2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
36 #include "iredges_t.h"
47 #include "../belive_t.h"
48 #include "../besched_t.h"
49 #include "../benode_t.h"
50 #include "ia32_new_nodes.h"
51 #include "gen_ia32_new_nodes.h"
52 #include "gen_ia32_regalloc_if.h"
60 #define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
62 /** the debug handle */
63 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
65 /* Forward declaration. */
66 typedef struct _x87_simulator x87_simulator;
69 * An exchange template.
70 * Note that our virtual functions have the same inputs
71 * and attributes as the real ones, so we can simple exchange
73 * Further, x87 supports inverse instructions, so we can handle them.
75 typedef struct _exchange_tmpl {
76 ir_op *normal_op; /**< the normal one */
77 ir_op *reverse_op; /**< the reverse one if exists */
78 ir_op *normal_pop_op; /**< the normal one with tos pop */
79 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
83 * An entry on the simulated x87 stack.
85 typedef struct _st_entry {
86 int reg_idx; /**< the virtual register index of this stack value */
87 ir_node *node; /**< the node that produced this value */
93 typedef struct _x87_state {
94 st_entry st[N_x87_REGS]; /**< the register stack */
95 int depth; /**< the current stack depth */
96 int tos; /**< position of the tos */
97 x87_simulator *sim; /**< The simulator. */
100 /** An empty state, used for blocks without fp instructions. */
101 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
102 static x87_state *empty = (x87_state *)&_empty;
105 NO_NODE_ADDED = 0, /**< No node was added. */
106 NODE_ADDED = 1 /**< A node was added by the simulator in the schedule. */
110 * The type of an instruction simulator function.
112 * @param state the x87 state
113 * @param n the node to be simulated
115 * @return NODE_ADDED if a node was added AFTER n in schedule,
118 typedef int (*sim_func)(x87_state *state, ir_node *n);
121 * A block state: Every block has a x87 state at the beginning and at the end.
123 typedef struct _blk_state {
124 x87_state *begin; /**< state at the begin or NULL if not assigned */
125 x87_state *end; /**< state at the end or NULL if not assigned */
128 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
130 /** liveness bitset for vfp registers. */
131 typedef unsigned char vfp_liveness;
136 struct _x87_simulator {
137 struct obstack obst; /**< An obstack for fast allocating. */
138 pmap *blk_states; /**< Map blocks to states. */
139 const arch_env_t *arch_env; /**< The architecture environment. */
140 be_lv_t *lv; /**< intrablock liveness. */
141 vfp_liveness *live; /**< Liveness information. */
142 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
143 waitq *worklist; /**< Worklist of blocks that must be processed. */
147 * Returns the current stack depth.
149 * @param state the x87 state
151 * @return the x87 stack depth
153 static int x87_get_depth(const x87_state *state) {
155 } /* x87_get_depth */
158 * Return the virtual register index at st(pos).
160 * @param state the x87 state
161 * @param pos a stack position
163 * @return the vfp register index that produced the value at st(pos)
165 static int x87_get_st_reg(const x87_state *state, int pos) {
166 assert(pos < state->depth);
167 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
168 } /* x87_get_st_reg */
171 * Return the node at st(pos).
173 * @param state the x87 state
174 * @param pos a stack position
176 * @return the IR node that produced the value at st(pos)
178 static ir_node *x87_get_st_node(const x87_state *state, int pos) {
179 assert(pos < state->depth);
180 return state->st[MASK_TOS(state->tos + pos)].node;
181 } /* x87_get_st_node */
185 * Dump the stack for debugging.
187 * @param state the x87 state
189 static void x87_dump_stack(const x87_state *state) {
192 for (i = state->depth - 1; i >= 0; --i) {
193 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
194 x87_get_st_node(state, i)));
196 DB((dbg, LEVEL_2, "<-- TOS\n"));
197 } /* x87_dump_stack */
198 #endif /* DEBUG_libfirm */
201 * Set a virtual register to st(pos).
203 * @param state the x87 state
204 * @param reg_idx the vfp register index that should be set
205 * @param node the IR node that produces the value of the vfp register
206 * @param pos the stack position where the new value should be entered
208 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos) {
209 assert(0 < state->depth);
210 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
211 state->st[MASK_TOS(state->tos + pos)].node = node;
213 DB((dbg, LEVEL_2, "After SET_REG: "));
214 DEBUG_ONLY(x87_dump_stack(state));
218 * Set the tos virtual register.
220 * @param state the x87 state
221 * @param reg_idx the vfp register index that should be set
222 * @param node the IR node that produces the value of the vfp register
224 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node) {
225 x87_set_st(state, reg_idx, node, 0);
229 * Swap st(0) with st(pos).
231 * @param state the x87 state
232 * @param pos the stack position to change the tos with
234 static void x87_fxch(x87_state *state, int pos) {
236 assert(pos < state->depth);
238 entry = state->st[MASK_TOS(state->tos + pos)];
239 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
240 state->st[MASK_TOS(state->tos)] = entry;
242 DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
246 * Convert a virtual register to the stack index.
248 * @param state the x87 state
249 * @param reg_idx the register vfp index
251 * @return the stack position where the register is stacked
252 * or -1 if the virtual register was not found
254 static int x87_on_stack(const x87_state *state, int reg_idx) {
255 int i, tos = state->tos;
257 for (i = 0; i < state->depth; ++i)
258 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
264 * Push a virtual Register onto the stack, double pushed allowed.
266 * @param state the x87 state
267 * @param reg_idx the register vfp index
268 * @param node the node that produces the value of the vfp register
270 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node) {
271 assert(state->depth < N_x87_REGS && "stack overrun");
274 state->tos = MASK_TOS(state->tos - 1);
275 state->st[state->tos].reg_idx = reg_idx;
276 state->st[state->tos].node = node;
278 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
282 * Push a virtual Register onto the stack, double pushes are NOT allowed.
284 * @param state the x87 state
285 * @param reg_idx the register vfp index
286 * @param node the node that produces the value of the vfp register
287 * @param dbl_push if != 0 double pushes are allowed
289 static void x87_push(x87_state *state, int reg_idx, ir_node *node) {
290 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
292 x87_push_dbl(state, reg_idx, node);
296 * Pop a virtual Register from the stack.
298 * @param state the x87 state
300 static void x87_pop(x87_state *state) {
301 assert(state->depth > 0 && "stack underrun");
304 state->tos = MASK_TOS(state->tos + 1);
306 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
310 * Returns the block state of a block.
312 * @param sim the x87 simulator handle
313 * @param block the current block
315 * @return the block state
317 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block) {
318 pmap_entry *entry = pmap_find(sim->blk_states, block);
321 blk_state *bl_state = obstack_alloc(&sim->obst, sizeof(*bl_state));
322 bl_state->begin = NULL;
323 bl_state->end = NULL;
325 pmap_insert(sim->blk_states, block, bl_state);
329 return PTR_TO_BLKSTATE(entry->value);
330 } /* x87_get_bl_state */
333 * Creates a new x87 state.
335 * @param sim the x87 simulator handle
337 * @return a new x87 state
339 static x87_state *x87_alloc_state(x87_simulator *sim) {
340 x87_state *res = obstack_alloc(&sim->obst, sizeof(*res));
344 } /* x87_alloc_state */
349 * @param sim the x87 simulator handle
350 * @param src the x87 state that will be cloned
352 * @return a cloned copy of the src state
354 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src) {
355 x87_state *res = x87_alloc_state(sim);
357 memcpy(res, src, sizeof(*res));
359 } /* x87_clone_state */
362 * Patch a virtual instruction into a x87 one and return
363 * the node representing the result value.
365 * @param n the IR node to patch
366 * @param op the x87 opcode to patch in
368 static ir_node *x87_patch_insn(ir_node *n, ir_op *op) {
369 ir_mode *mode = get_irn_mode(n);
374 if (mode == mode_T) {
375 /* patch all Proj's */
376 const ir_edge_t *edge;
378 foreach_out_edge(n, edge) {
379 ir_node *proj = get_edge_src_irn(edge);
381 mode = get_irn_mode(proj);
382 if (mode_is_float(mode)) {
384 set_irn_mode(proj, mode_E);
388 } else if (mode_is_float(mode))
389 set_irn_mode(n, mode_E);
391 } /* x87_patch_insn */
394 * Returns the first Proj of a mode_T node having a given mode.
396 * @param n the mode_T node
397 * @param m the desired mode of the Proj
398 * @return The first Proj of mode @p m found or NULL.
400 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m) {
401 const ir_edge_t *edge;
403 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
405 foreach_out_edge(n, edge) {
406 ir_node *proj = get_edge_src_irn(edge);
407 if (get_irn_mode(proj) == m)
412 } /* get_irn_Proj_for_mode */
415 * Wrap the arch_* function here so we can check for errors.
417 static INLINE const arch_register_t *x87_get_irn_register(x87_simulator *sim, const ir_node *irn) {
418 const arch_register_t *res;
420 res = arch_get_irn_register(sim->arch_env, irn);
421 assert(res->reg_class->regs == ia32_vfp_regs);
423 } /* x87_get_irn_register */
425 /* -------------- x87 perm --------------- */
428 * Creates a fxch for shuffle.
430 * @param state the x87 state
431 * @param pos parameter for fxch
432 * @param block the block were fxch is inserted
434 * Creates a new fxch node and reroute the user of the old node
437 * @return the fxch node
439 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block) {
441 ia32_x87_attr_t *attr;
443 fxch = new_rd_ia32_fxch(NULL, get_irn_irg(block), block, mode_E);
444 attr = get_ia32_x87_attr(fxch);
445 attr->x87[0] = &ia32_st_regs[pos];
446 attr->x87[2] = &ia32_st_regs[0];
450 x87_fxch(state, pos);
452 } /* x87_fxch_shuffle */
455 * Calculate the necessary permutations to reach dst_state.
457 * These permutations are done with fxch instructions and placed
458 * at the end of the block.
460 * Note that critical edges are removed here, so we need only
461 * a shuffle if the current block has only one successor.
463 * @param sim the simulator handle
464 * @param block the current block
465 * @param state the current x87 stack state, might be modified
466 * @param dst_block the destination block
467 * @param dst_state destination state
471 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block,
472 x87_state *state, ir_node *dst_block,
473 const x87_state *dst_state)
475 int i, n_cycles, k, ri;
476 unsigned cycles[4], all_mask;
477 char cycle_idx[4][8];
478 ir_node *fxch, *before, *after;
482 assert(state->depth == dst_state->depth);
484 /* Some mathematics here:
485 If we have a cycle of length n that includes the tos,
486 we need n-1 exchange operations.
487 We can always add the tos and restore it, so we need
488 n+1 exchange operations for a cycle not containing the tos.
489 So, the maximum of needed operations is for a cycle of 7
490 not including the tos == 8.
491 This is the same number of ops we would need for using stores,
492 so exchange is cheaper (we save the loads).
493 On the other hand, we might need an additional exchange
494 in the next block to bring one operand on top, so the
495 number of ops in the first case is identical.
496 Further, no more than 4 cycles can exists (4 x 2).
498 all_mask = (1 << (state->depth)) - 1;
500 for (n_cycles = 0; all_mask; ++n_cycles) {
501 int src_idx, dst_idx;
503 /* find the first free slot */
504 for (i = 0; i < state->depth; ++i) {
505 if (all_mask & (1 << i)) {
506 all_mask &= ~(1 << i);
508 /* check if there are differences here */
509 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
515 /* no more cycles found */
520 cycles[n_cycles] = (1 << i);
521 cycle_idx[n_cycles][k++] = i;
522 for (src_idx = i; ; src_idx = dst_idx) {
523 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
525 if ((all_mask & (1 << dst_idx)) == 0)
528 cycle_idx[n_cycles][k++] = dst_idx;
529 cycles[n_cycles] |= (1 << dst_idx);
530 all_mask &= ~(1 << dst_idx);
532 cycle_idx[n_cycles][k] = -1;
536 /* no permutation needed */
540 /* Hmm: permutation needed */
541 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
542 DEBUG_ONLY(x87_dump_stack(state));
543 DB((dbg, LEVEL_2, " to\n"));
544 DEBUG_ONLY(x87_dump_stack(dst_state));
548 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
549 for (ri = 0; ri < n_cycles; ++ri) {
550 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
551 for (k = 0; cycle_idx[ri][k] != -1; ++k)
552 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
553 DB((dbg, LEVEL_2, "\n"));
560 * Find the place node must be insert.
561 * We have only one successor block, so the last instruction should
564 before = sched_last(block);
565 assert(is_cfop(before));
567 /* now do the permutations */
568 for (ri = 0; ri < n_cycles; ++ri) {
569 if ((cycles[ri] & 1) == 0) {
570 /* this cycle does not include the tos */
571 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
573 sched_add_after(after, fxch);
575 sched_add_before(before, fxch);
578 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
579 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
581 sched_add_after(after, fxch);
583 sched_add_before(before, fxch);
586 if ((cycles[ri] & 1) == 0) {
587 /* this cycle does not include the tos */
588 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
589 sched_add_after(after, fxch);
596 * Create a fxch node before another node.
598 * @param state the x87 state
599 * @param n the node after the fxch
600 * @param pos exchange st(pos) with st(0)
604 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
607 ia32_x87_attr_t *attr;
608 ir_graph *irg = get_irn_irg(n);
609 ir_node *block = get_nodes_block(n);
611 x87_fxch(state, pos);
613 fxch = new_rd_ia32_fxch(NULL, irg, block, mode_E);
614 attr = get_ia32_x87_attr(fxch);
615 attr->x87[0] = &ia32_st_regs[pos];
616 attr->x87[2] = &ia32_st_regs[0];
620 sched_add_before(n, fxch);
621 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
623 } /* x87_create_fxch */
626 * Create a fpush before node n.
628 * @param state the x87 state
629 * @param n the node after the fpush
630 * @param pos push st(pos) on stack
631 * @param op_idx replace input op_idx of n with the fpush result
633 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx) {
634 ir_node *fpush, *pred = get_irn_n(n, op_idx);
635 ia32_x87_attr_t *attr;
636 const arch_register_t *out = x87_get_irn_register(state->sim, pred);
638 x87_push_dbl(state, arch_register_get_index(out), pred);
640 fpush = new_rd_ia32_fpush(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
641 attr = get_ia32_x87_attr(fpush);
642 attr->x87[0] = &ia32_st_regs[pos];
643 attr->x87[2] = &ia32_st_regs[0];
646 sched_add_before(n, fpush);
648 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
649 } /* x87_create_fpush */
652 * Create a fpop before node n.
654 * @param state the x87 state
655 * @param n the node after the fpop
656 * @param num pop 1 or 2 values
658 * @return the fpop node
660 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
663 ia32_x87_attr_t *attr;
667 fpop = new_rd_ia32_fpop(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
668 attr = get_ia32_x87_attr(fpop);
669 attr->x87[0] = &ia32_st_regs[0];
670 attr->x87[1] = &ia32_st_regs[0];
671 attr->x87[2] = &ia32_st_regs[0];
674 sched_add_before(n, fpop);
675 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
680 } /* x87_create_fpop */
683 * Creates an fldz before node n
685 * @param state the x87 state
686 * @param n the node after the fldz
688 * @return the fldz node
690 static ir_node *x87_create_fldz(x87_state *state, ir_node *n, int regidx) {
691 ir_graph *irg = get_irn_irg(n);
692 ir_node *block = get_nodes_block(n);
695 fldz = new_rd_ia32_fldz(NULL, irg, block, mode_E);
697 sched_add_before(n, fldz);
698 DB((dbg, LEVEL_1, "<<< %s\n", get_irn_opname(fldz)));
701 x87_push(state, regidx, fldz);
706 /* --------------------------------- liveness ------------------------------------------ */
709 * The liveness transfer function.
710 * Updates a live set over a single step from a given node to its predecessor.
711 * Everything defined at the node is removed from the set, the uses of the node get inserted.
713 * @param sim The simulator handle.
714 * @param irn The node at which liveness should be computed.
715 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
716 * the registers live after irn.
718 * @return The live bitset.
720 static vfp_liveness vfp_liveness_transfer(x87_simulator *sim, ir_node *irn, vfp_liveness live)
723 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
724 const arch_env_t *arch_env = sim->arch_env;
726 if (get_irn_mode(irn) == mode_T) {
727 const ir_edge_t *edge;
729 foreach_out_edge(irn, edge) {
730 ir_node *proj = get_edge_src_irn(edge);
732 if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
733 const arch_register_t *reg = x87_get_irn_register(sim, proj);
734 live &= ~(1 << arch_register_get_index(reg));
739 if (arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
740 const arch_register_t *reg = x87_get_irn_register(sim, irn);
741 live &= ~(1 << arch_register_get_index(reg));
744 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
745 ir_node *op = get_irn_n(irn, i);
747 if (mode_is_float(get_irn_mode(op)) && arch_irn_consider_in_reg_alloc(arch_env, cls, op)) {
748 const arch_register_t *reg = x87_get_irn_register(sim, op);
749 live |= 1 << arch_register_get_index(reg);
753 } /* vfp_liveness_transfer */
756 * Put all live virtual registers at the end of a block into a bitset.
758 * @param sim the simulator handle
759 * @param lv the liveness information
760 * @param bl the block
762 * @return The live bitset at the end of this block
764 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
767 vfp_liveness live = 0;
768 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
769 const arch_env_t *arch_env = sim->arch_env;
770 const be_lv_t *lv = sim->lv;
772 be_lv_foreach(lv, block, be_lv_state_end, i) {
773 const arch_register_t *reg;
774 const ir_node *node = be_lv_get_irn(lv, block, i);
775 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
778 reg = x87_get_irn_register(sim, node);
779 live |= 1 << arch_register_get_index(reg);
783 } /* vfp_liveness_end_of_block */
785 /** get the register mask from an arch_register */
786 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
789 * Return a bitset of argument registers which are live at the end of a node.
791 * @param sim the simulator handle
792 * @param pos the node
793 * @param kill kill mask for the output registers
795 * @return The live bitset.
797 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
799 unsigned idx = get_irn_idx(pos);
801 assert(idx < sim->n_idx);
802 return sim->live[idx] & ~kill;
803 } /* vfp_live_args_after */
806 * Calculate the liveness for a whole block and cache it.
808 * @param sim the simulator handle
809 * @param lv the liveness handle
810 * @param block the block
812 static void update_liveness(x87_simulator *sim, ir_node *block) {
813 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
817 /* now iterate through the block backward and cache the results */
818 sched_foreach_reverse(block, irn) {
819 /* stop at the first Phi: this produces the live-in */
823 idx = get_irn_idx(irn);
824 sim->live[idx] = live;
826 live = vfp_liveness_transfer(sim, irn, live);
828 idx = get_irn_idx(block);
829 sim->live[idx] = live;
830 } /* update_liveness */
833 * Returns true if a register is live in a set.
835 * @param reg_idx the vfp register index
836 * @param live a live bitset
838 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
842 * Dump liveness info.
844 * @param live the live bitset
846 static void vfp_dump_live(vfp_liveness live) {
849 DB((dbg, LEVEL_2, "Live after: "));
850 for (i = 0; i < 8; ++i) {
851 if (live & (1 << i)) {
852 DB((dbg, LEVEL_2, "vf%d ", i));
855 DB((dbg, LEVEL_2, "\n"));
856 } /* vfp_dump_live */
857 #endif /* DEBUG_libfirm */
859 /* --------------------------------- simulators ---------------------------------------- */
861 #define XCHG(a, b) do { int t = (a); (a) = (b); (b) = t; } while (0)
873 * Simulate a virtual binop.
875 * @param state the x87 state
876 * @param n the node that should be simulated (and patched)
877 * @param tmpl the template containing the 4 possible x87 opcodes
879 * @return NO_NODE_ADDED
881 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl) {
882 int op2_idx = 0, op1_idx;
883 int out_idx, do_pop = 0;
884 ia32_x87_attr_t *attr;
885 ir_node *patched_insn;
887 x87_simulator *sim = state->sim;
888 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
889 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
890 const arch_register_t *op1_reg = x87_get_irn_register(sim, op1);
891 const arch_register_t *op2_reg = x87_get_irn_register(sim, op2);
892 const arch_register_t *out = x87_get_irn_register(sim, n);
893 int reg_index_1 = arch_register_get_index(op1_reg);
894 int reg_index_2 = arch_register_get_index(op2_reg);
895 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
899 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
900 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
901 arch_register_get_name(out)));
902 DEBUG_ONLY(vfp_dump_live(live));
903 DB((dbg, LEVEL_1, "Stack before: "));
904 DEBUG_ONLY(x87_dump_stack(state));
906 if(reg_index_1 == REG_VFP_UKNWN) {
910 op1_idx = x87_on_stack(state, reg_index_1);
911 assert(op1_idx >= 0);
912 op1_live_after = is_vfp_live(arch_register_get_index(op1_reg), live);
915 if (reg_index_2 != REG_VFP_NOREG) {
916 if(reg_index_2 == REG_VFP_UKNWN) {
920 /* second operand is a vfp register */
921 op2_idx = x87_on_stack(state, reg_index_2);
922 assert(op2_idx >= 0);
924 = is_vfp_live(arch_register_get_index(op2_reg), live);
927 if (op2_live_after) {
928 /* Second operand is live. */
930 if (op1_live_after) {
931 /* Both operands are live: push the first one.
932 This works even for op1 == op2. */
933 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
934 /* now do fxxx (tos=tos X op) */
938 dst = tmpl->normal_op;
940 /* Second live, first operand is dead here, bring it to tos. */
942 x87_create_fxch(state, n, op1_idx);
947 /* now do fxxx (tos=tos X op) */
949 dst = tmpl->normal_op;
952 /* Second operand is dead. */
953 if (op1_live_after) {
954 /* First operand is live: bring second to tos. */
956 x87_create_fxch(state, n, op2_idx);
961 /* now do fxxxr (tos = op X tos) */
963 dst = tmpl->reverse_op;
965 /* Both operands are dead here, pop them from the stack. */
968 /* Both are identically and on tos, no pop needed. */
969 /* here fxxx (tos = tos X tos) */
970 dst = tmpl->normal_op;
973 /* now do fxxxp (op = op X tos, pop) */
974 dst = tmpl->normal_pop_op;
978 } else if (op1_idx == 0) {
979 assert(op1_idx != op2_idx);
980 /* now do fxxxrp (op = tos X op, pop) */
981 dst = tmpl->reverse_pop_op;
985 /* Bring the second on top. */
986 x87_create_fxch(state, n, op2_idx);
987 if (op1_idx == op2_idx) {
988 /* Both are identically and on tos now, no pop needed. */
991 /* use fxxx (tos = tos X tos) */
992 dst = tmpl->normal_op;
995 /* op2 is on tos now */
997 /* use fxxxp (op = op X tos, pop) */
998 dst = tmpl->normal_pop_op;
1006 /* second operand is an address mode */
1007 if (op1_live_after) {
1008 /* first operand is live: push it here */
1009 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
1011 /* use fxxx (tos = tos X mem) */
1012 dst = tmpl->normal_op;
1015 /* first operand is dead: bring it to tos */
1017 x87_create_fxch(state, n, op1_idx);
1021 /* use fxxxp (tos = tos X mem) */
1022 dst = tmpl->normal_op;
1027 patched_insn = x87_patch_insn(n, dst);
1028 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1033 /* patch the operation */
1034 attr = get_ia32_x87_attr(n);
1035 attr->x87[0] = op1_reg = &ia32_st_regs[op1_idx];
1036 if (reg_index_2 != REG_VFP_NOREG) {
1037 attr->x87[1] = op2_reg = &ia32_st_regs[op2_idx];
1039 attr->x87[2] = out = &ia32_st_regs[out_idx];
1041 if (reg_index_2 != REG_VFP_NOREG) {
1042 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1043 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1044 arch_register_get_name(out)));
1046 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1047 arch_register_get_name(op1_reg),
1048 arch_register_get_name(out)));
1051 return NO_NODE_ADDED;
1055 * Simulate a virtual Unop.
1057 * @param state the x87 state
1058 * @param n the node that should be simulated (and patched)
1059 * @param op the x87 opcode that will replace n's opcode
1061 * @return NO_NODE_ADDED
1063 static int sim_unop(x87_state *state, ir_node *n, ir_op *op) {
1064 int op1_idx, out_idx;
1065 x87_simulator *sim = state->sim;
1066 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, UNOP_IDX));
1067 const arch_register_t *out = x87_get_irn_register(sim, n);
1068 ia32_x87_attr_t *attr;
1069 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1071 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1072 DEBUG_ONLY(vfp_dump_live(live));
1074 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1076 if (is_vfp_live(arch_register_get_index(op1), live)) {
1077 /* push the operand here */
1078 x87_create_fpush(state, n, op1_idx, UNOP_IDX);
1082 /* operand is dead, bring it to tos */
1084 x87_create_fxch(state, n, op1_idx);
1089 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1091 attr = get_ia32_x87_attr(n);
1092 attr->x87[0] = op1 = &ia32_st_regs[0];
1093 attr->x87[2] = out = &ia32_st_regs[0];
1094 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1096 return NO_NODE_ADDED;
1100 * Simulate a virtual Load instruction.
1102 * @param state the x87 state
1103 * @param n the node that should be simulated (and patched)
1104 * @param op the x87 opcode that will replace n's opcode
1106 * @return NO_NODE_ADDED
1108 static int sim_load(x87_state *state, ir_node *n, ir_op *op) {
1109 const arch_register_t *out = x87_get_irn_register(state->sim, n);
1110 ia32_x87_attr_t *attr;
1112 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1113 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1114 assert(out == x87_get_irn_register(state->sim, n));
1115 attr = get_ia32_x87_attr(n);
1116 attr->x87[2] = out = &ia32_st_regs[0];
1117 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1119 return NO_NODE_ADDED;
1123 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1125 * @param store The store
1126 * @param old_val The former value
1127 * @param new_val The new value
1129 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val) {
1130 const ir_edge_t *edge, *ne;
1132 foreach_out_edge_safe(old_val, edge, ne) {
1133 ir_node *user = get_edge_src_irn(edge);
1135 if (! user || user == store)
1138 /* if the user is scheduled after the store: rewire */
1139 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1141 /* find the input of the user pointing to the old value */
1142 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1143 if (get_irn_n(user, i) == old_val)
1144 set_irn_n(user, i, new_val);
1148 } /* collect_and_rewire_users */
1151 * Simulate a virtual Store.
1153 * @param state the x87 state
1154 * @param n the node that should be simulated (and patched)
1155 * @param op the x87 store opcode
1156 * @param op_p the x87 store and pop opcode
1158 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p) {
1159 x87_simulator *sim = state->sim;
1160 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1161 const arch_register_t *op2 = x87_get_irn_register(sim, val);
1162 unsigned live = vfp_live_args_after(sim, n, 0);
1163 int insn = NO_NODE_ADDED;
1164 ia32_x87_attr_t *attr;
1165 int op2_reg_idx, op2_idx, depth;
1166 int live_after_node;
1169 op2_reg_idx = arch_register_get_index(op2);
1170 if (op2_reg_idx == REG_VFP_UKNWN) {
1171 /* just take any value from stack */
1172 if(state->depth > 0) {
1174 DEBUG_ONLY(op2 = NULL);
1175 live_after_node = 1;
1177 /* produce a new value which we will consume immediately */
1178 x87_create_fldz(state, n, op2_reg_idx);
1179 live_after_node = 0;
1180 op2_idx = x87_on_stack(state, op2_reg_idx);
1181 assert(op2_idx >= 0);
1184 op2_idx = x87_on_stack(state, op2_reg_idx);
1185 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1186 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1187 assert(op2_idx >= 0);
1190 mode = get_ia32_ls_mode(n);
1191 depth = x87_get_depth(state);
1193 if (live_after_node) {
1195 Problem: fst doesn't support mode_E (spills), only fstp does
1197 - stack not full: push value and fstp
1198 - stack full: fstp value and load again
1200 if (mode == mode_E) {
1201 if (depth < N_x87_REGS) {
1202 /* ok, we have a free register: push + fstp */
1203 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1205 x87_patch_insn(n, op_p);
1207 ir_node *vfld, *mem, *block, *rproj, *mproj;
1210 /* stack full here: need fstp + load */
1212 x87_patch_insn(n, op_p);
1214 block = get_nodes_block(n);
1215 irg = get_irn_irg(n);
1216 vfld = new_rd_ia32_vfld(NULL, irg, block, get_irn_n(n, 0), get_irn_n(n, 1), new_rd_NoMem(irg), get_ia32_ls_mode(n));
1218 /* copy all attributes */
1219 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1220 if (is_ia32_use_frame(n))
1221 set_ia32_use_frame(vfld);
1222 set_ia32_op_type(vfld, ia32_am_Source);
1223 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1224 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1225 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1227 rproj = new_r_Proj(irg, block, vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1228 mproj = new_r_Proj(irg, block, vfld, mode_M, pn_ia32_vfld_M);
1229 mem = get_irn_Proj_for_mode(n, mode_M);
1231 assert(mem && "Store memory not found");
1233 arch_set_irn_register(sim->arch_env, rproj, op2);
1235 /* reroute all former users of the store memory to the load memory */
1236 edges_reroute(mem, mproj, irg);
1237 /* set the memory input of the load to the store memory */
1238 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1240 sched_add_after(n, vfld);
1241 sched_add_after(vfld, rproj);
1243 /* rewire all users, scheduled after the store, to the loaded value */
1244 collect_and_rewire_users(n, val, rproj);
1249 /* we can only store the tos to memory */
1251 x87_create_fxch(state, n, op2_idx);
1253 /* mode != mode_E -> use normal fst */
1254 x87_patch_insn(n, op);
1257 /* we can only store the tos to memory */
1259 x87_create_fxch(state, n, op2_idx);
1262 x87_patch_insn(n, op_p);
1265 attr = get_ia32_x87_attr(n);
1266 attr->x87[1] = op2 = &ia32_st_regs[0];
1267 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1272 #define _GEN_BINOP(op, rev) \
1273 static int sim_##op(x87_state *state, ir_node *n) { \
1274 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1275 return sim_binop(state, n, &tmpl); \
1278 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1279 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1281 #define GEN_LOAD2(op, nop) \
1282 static int sim_##op(x87_state *state, ir_node *n) { \
1283 return sim_load(state, n, op_ia32_##nop); \
1286 #define GEN_LOAD(op) GEN_LOAD2(op, op)
1288 #define GEN_UNOP(op) \
1289 static int sim_##op(x87_state *state, ir_node *n) { \
1290 return sim_unop(state, n, op_ia32_##op); \
1293 #define GEN_STORE(op) \
1294 static int sim_##op(x87_state *state, ir_node *n) { \
1295 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1317 * Simulate a fCondJmp.
1319 * @param state the x87 state
1320 * @param n the node that should be simulated (and patched)
1322 * @return NO_NODE_ADDED
1324 static int sim_fCmpJmp(x87_state *state, ir_node *n) {
1328 ia32_x87_attr_t *attr;
1330 x87_simulator *sim = state->sim;
1331 ir_node *op1_node = get_irn_n(n, n_ia32_vfCmpJmp_left);
1332 ir_node *op2_node = get_irn_n(n, n_ia32_vfCmpJmp_right);
1333 const arch_register_t *op1 = x87_get_irn_register(sim, op1_node);
1334 const arch_register_t *op2 = x87_get_irn_register(sim, op2_node);
1335 int reg_index_1 = arch_register_get_index(op1);
1336 int reg_index_2 = arch_register_get_index(op2);
1337 unsigned live = vfp_live_args_after(sim, n, 0);
1339 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1340 arch_register_get_name(op1), arch_register_get_name(op2)));
1341 DEBUG_ONLY(vfp_dump_live(live));
1342 DB((dbg, LEVEL_1, "Stack before: "));
1343 DEBUG_ONLY(x87_dump_stack(state));
1345 op1_idx = x87_on_stack(state, reg_index_1);
1346 assert(op1_idx >= 0);
1348 /* BEWARE: check for comp a,a cases, they might happen */
1349 if (reg_index_2 != REG_VFP_NOREG) {
1350 /* second operand is a vfp register */
1351 op2_idx = x87_on_stack(state, reg_index_2);
1352 assert(op2_idx >= 0);
1354 if (is_vfp_live(arch_register_get_index(op2), live)) {
1355 /* second operand is live */
1357 if (is_vfp_live(arch_register_get_index(op1), live)) {
1358 /* both operands are live */
1361 /* res = tos X op */
1362 dst = op_ia32_fcomJmp;
1363 } else if (op2_idx == 0) {
1364 /* res = op X tos */
1365 dst = op_ia32_fcomrJmp;
1367 /* bring the first one to tos */
1368 x87_create_fxch(state, n, op1_idx);
1372 /* res = tos X op */
1373 dst = op_ia32_fcomJmp;
1376 /* second live, first operand is dead here, bring it to tos.
1377 This means further, op1_idx != op2_idx. */
1378 assert(op1_idx != op2_idx);
1380 x87_create_fxch(state, n, op1_idx);
1385 /* res = tos X op, pop */
1386 dst = op_ia32_fcompJmp;
1390 /* second operand is dead */
1391 if (is_vfp_live(arch_register_get_index(op1), live)) {
1392 /* first operand is live: bring second to tos.
1393 This means further, op1_idx != op2_idx. */
1394 assert(op1_idx != op2_idx);
1396 x87_create_fxch(state, n, op2_idx);
1401 /* res = op X tos, pop */
1402 dst = op_ia32_fcomrpJmp;
1405 /* both operands are dead here, check first for identity. */
1406 if (op1_idx == op2_idx) {
1407 /* identically, one pop needed */
1409 x87_create_fxch(state, n, op1_idx);
1413 /* res = tos X op, pop */
1414 dst = op_ia32_fcompJmp;
1417 /* different, move them to st and st(1) and pop both.
1418 The tricky part is to get one into st(1).*/
1419 else if (op2_idx == 1) {
1420 /* good, second operand is already in the right place, move the first */
1422 /* bring the first on top */
1423 x87_create_fxch(state, n, op1_idx);
1424 assert(op2_idx != 0);
1427 /* res = tos X op, pop, pop */
1428 dst = op_ia32_fcomppJmp;
1430 } else if (op1_idx == 1) {
1431 /* good, first operand is already in the right place, move the second */
1433 /* bring the first on top */
1434 x87_create_fxch(state, n, op2_idx);
1435 assert(op1_idx != 0);
1438 dst = op_ia32_fcomrppJmp;
1441 /* if one is already the TOS, we need two fxch */
1443 /* first one is TOS, move to st(1) */
1444 x87_create_fxch(state, n, 1);
1445 assert(op2_idx != 1);
1447 x87_create_fxch(state, n, op2_idx);
1449 /* res = op X tos, pop, pop */
1450 dst = op_ia32_fcomrppJmp;
1452 } else if (op2_idx == 0) {
1453 /* second one is TOS, move to st(1) */
1454 x87_create_fxch(state, n, 1);
1455 assert(op1_idx != 1);
1457 x87_create_fxch(state, n, op1_idx);
1459 /* res = tos X op, pop, pop */
1460 dst = op_ia32_fcomppJmp;
1463 /* none of them is either TOS or st(1), 3 fxch needed */
1464 x87_create_fxch(state, n, op2_idx);
1465 assert(op1_idx != 0);
1466 x87_create_fxch(state, n, 1);
1468 x87_create_fxch(state, n, op1_idx);
1470 /* res = tos X op, pop, pop */
1471 dst = op_ia32_fcomppJmp;
1478 /* second operand is an address mode */
1479 if (is_vfp_live(arch_register_get_index(op1), live)) {
1480 /* first operand is live: bring it to TOS */
1482 x87_create_fxch(state, n, op1_idx);
1485 dst = op_ia32_fcomJmp;
1487 /* first operand is dead: bring it to tos */
1489 x87_create_fxch(state, n, op1_idx);
1492 dst = op_ia32_fcompJmp;
1497 x87_patch_insn(n, dst);
1498 assert(pop_cnt < 3);
1504 /* patch the operation */
1505 attr = get_ia32_x87_attr(n);
1506 op1 = &ia32_st_regs[op1_idx];
1509 op2 = &ia32_st_regs[op2_idx];
1512 attr->x87[2] = NULL;
1515 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1516 arch_register_get_name(op1), arch_register_get_name(op2)));
1518 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1519 arch_register_get_name(op1)));
1521 return NO_NODE_ADDED;
1522 } /* sim_fCondJmp */
1525 int sim_Keep(x87_state *state, ir_node *node)
1528 const arch_register_t *op_reg;
1533 int node_added = NO_NODE_ADDED;
1535 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1537 arity = get_irn_arity(node);
1538 for(i = 0; i < arity; ++i) {
1539 op = get_irn_n(node, i);
1540 op_reg = arch_get_irn_register(state->sim->arch_env, op);
1541 if(arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1544 reg_id = arch_register_get_index(op_reg);
1545 live = vfp_live_args_after(state->sim, node, 0);
1547 op_stack_idx = x87_on_stack(state, reg_id);
1548 if(op_stack_idx >= 0 && !is_vfp_live(reg_id, live)) {
1549 x87_create_fpop(state, sched_next(node), 1);
1550 node_added = NODE_ADDED;
1554 DB((dbg, LEVEL_1, "Stack after: "));
1555 DEBUG_ONLY(x87_dump_stack(state));
1561 void keep_float_node_alive(x87_state *state, ir_node *node)
1567 const arch_register_class_t *cls;
1569 irg = get_irn_irg(node);
1570 block = get_nodes_block(node);
1571 cls = arch_get_irn_reg_class(state->sim->arch_env, node, -1);
1573 keep = be_new_Keep(cls, irg, block, 1, in);
1575 assert(sched_is_scheduled(node));
1576 sched_add_after(node, keep);
1580 * Create a copy of a node. Recreate the node if it's a constant.
1582 * @param state the x87 state
1583 * @param n the node to be copied
1585 * @return the copy of n
1587 static ir_node *create_Copy(x87_state *state, ir_node *n) {
1588 x87_simulator *sim = state->sim;
1589 ir_graph *irg = get_irn_irg(n);
1590 dbg_info *n_dbg = get_irn_dbg_info(n);
1591 ir_mode *mode = get_irn_mode(n);
1592 ir_node *block = get_nodes_block(n);
1593 ir_node *pred = get_irn_n(n, 0);
1594 ir_node *(*cnstr)(dbg_info *, ir_graph *, ir_node *, ir_mode *) = NULL;
1596 const arch_register_t *out;
1597 const arch_register_t *op1;
1598 ia32_x87_attr_t *attr;
1600 /* Do not copy constants, recreate them. */
1601 switch (get_ia32_irn_opcode(pred)) {
1602 case iro_ia32_Unknown_VFP:
1604 cnstr = new_rd_ia32_fldz;
1607 cnstr = new_rd_ia32_fld1;
1609 case iro_ia32_fldpi:
1610 cnstr = new_rd_ia32_fldpi;
1612 case iro_ia32_fldl2e:
1613 cnstr = new_rd_ia32_fldl2e;
1615 case iro_ia32_fldl2t:
1616 cnstr = new_rd_ia32_fldl2t;
1618 case iro_ia32_fldlg2:
1619 cnstr = new_rd_ia32_fldlg2;
1621 case iro_ia32_fldln2:
1622 cnstr = new_rd_ia32_fldln2;
1628 out = x87_get_irn_register(sim, n);
1629 op1 = x87_get_irn_register(sim, pred);
1631 if (cnstr != NULL) {
1632 /* copy a constant */
1633 res = (*cnstr)(n_dbg, irg, block, mode);
1635 x87_push(state, arch_register_get_index(out), res);
1637 attr = get_ia32_x87_attr(res);
1638 attr->x87[2] = &ia32_st_regs[0];
1640 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1642 res = new_rd_ia32_fpushCopy(n_dbg, irg, block, pred, mode);
1644 x87_push(state, arch_register_get_index(out), res);
1646 attr = get_ia32_x87_attr(res);
1647 attr->x87[0] = &ia32_st_regs[op1_idx];
1648 attr->x87[2] = &ia32_st_regs[0];
1650 arch_set_irn_register(sim->arch_env, res, out);
1656 * Simulate a be_Copy.
1658 * @param state the x87 state
1659 * @param n the node that should be simulated (and patched)
1661 * @return NO_NODE_ADDED
1663 static int sim_Copy(x87_state *state, ir_node *n) {
1664 x87_simulator *sim = state->sim;
1666 const arch_register_t *out;
1667 const arch_register_t *op1;
1668 const arch_register_class_t *class;
1669 ir_node *node, *next;
1670 ia32_x87_attr_t *attr;
1671 int op1_idx, out_idx;
1674 class = arch_get_irn_reg_class(sim->arch_env, n, -1);
1675 if (class->regs != ia32_vfp_regs)
1678 pred = get_irn_n(n, 0);
1679 out = x87_get_irn_register(sim, n);
1680 op1 = x87_get_irn_register(sim, pred);
1681 live = vfp_live_args_after(sim, n, REGMASK(out));
1683 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1684 arch_register_get_name(op1), arch_register_get_name(out)));
1685 DEBUG_ONLY(vfp_dump_live(live));
1687 /* handle the infamous unknown value */
1688 if (arch_register_get_index(op1) == REG_VFP_UKNWN) {
1689 /* Operand is still live, a real copy. We need here an fpush that can
1690 hold a a register, so use the fpushCopy or recreate constants */
1691 node = create_Copy(state, n);
1693 assert(is_ia32_fldz(node));
1694 next = sched_next(n);
1697 sched_add_before(next, node);
1699 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1700 arch_get_irn_register(sim->arch_env, node)->name));
1701 return NO_NODE_ADDED;
1704 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1706 if (is_vfp_live(arch_register_get_index(op1), live)) {
1707 ir_node *pred = get_irn_n(n, 0);
1709 /* Operand is still live, a real copy. We need here an fpush that can
1710 hold a a register, so use the fpushCopy or recreate constants */
1711 node = create_Copy(state, n);
1713 /* We have to make sure the old value doesn't go dead (which can happen
1714 * when we recreate constants). As the simulator expected that value in
1715 * the pred blocks. This is unfortunate as removing it would save us 1
1716 * instruction, but we would have to rerun all the simulation to get
1719 next = sched_next(n);
1722 sched_add_before(next, node);
1724 if(get_irn_n_edges(pred) == 0) {
1725 keep_float_node_alive(state, pred);
1728 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1729 arch_get_irn_register(sim->arch_env, node)->name));
1731 out_idx = x87_on_stack(state, arch_register_get_index(out));
1733 if (out_idx >= 0 && out_idx != op1_idx) {
1734 /* Matze: out already on stack? how can this happen? */
1737 /* op1 must be killed and placed where out is */
1739 /* best case, simple remove and rename */
1740 x87_patch_insn(n, op_ia32_Pop);
1741 attr = get_ia32_x87_attr(n);
1742 attr->x87[0] = op1 = &ia32_st_regs[0];
1745 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1747 /* move op1 to tos, store and pop it */
1749 x87_create_fxch(state, n, op1_idx);
1752 x87_patch_insn(n, op_ia32_Pop);
1753 attr = get_ia32_x87_attr(n);
1754 attr->x87[0] = op1 = &ia32_st_regs[out_idx];
1757 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1759 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1761 /* just a virtual copy */
1762 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1763 /* don't remove the node to keep the verifier quiet :),
1764 the emitter won't emit any code for the node */
1767 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1768 exchange(n, get_unop_op(n));
1772 return NO_NODE_ADDED;
1776 * Returns the result proj of the call
1778 static ir_node *get_call_result_proj(ir_node *call) {
1779 const ir_edge_t *edge;
1781 /* search the result proj */
1782 foreach_out_edge(call, edge) {
1783 ir_node *proj = get_edge_src_irn(edge);
1784 long pn = get_Proj_proj(proj);
1786 if (pn == pn_be_Call_first_res) {
1792 } /* get_call_result_proj */
1795 * Simulate a be_Call.
1797 * @param state the x87 state
1798 * @param n the node that should be simulated
1799 * @param arch_env the architecture environment
1801 * @return NO_NODE_ADDED
1803 static int sim_Call(x87_state *state, ir_node *n, const arch_env_t *arch_env)
1805 ir_type *call_tp = be_Call_get_type(n);
1809 const arch_register_t *reg;
1812 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1814 /* at the begin of a call the x87 state should be empty */
1815 assert(state->depth == 0 && "stack not empty before call");
1817 if (get_method_n_ress(call_tp) <= 0)
1821 * If the called function returns a float, it is returned in st(0).
1822 * This even happens if the return value is NOT used.
1823 * Moreover, only one return result is supported.
1825 res_type = get_method_res_type(call_tp, 0);
1826 mode = get_type_mode(res_type);
1828 if (mode == NULL || !mode_is_float(mode))
1831 resproj = get_call_result_proj(n);
1832 assert(resproj != NULL);
1834 reg = x87_get_irn_register(state->sim, resproj);
1835 x87_push(state, arch_register_get_index(reg), resproj);
1838 DB((dbg, LEVEL_1, "Stack after: "));
1839 DEBUG_ONLY(x87_dump_stack(state));
1841 return NO_NODE_ADDED;
1845 * Simulate a be_Spill.
1847 * @param state the x87 state
1848 * @param n the node that should be simulated (and patched)
1850 * Should not happen, spills are lowered before x87 simulator see them.
1852 static int sim_Spill(x87_state *state, ir_node *n) {
1853 assert(0 && "Spill not lowered");
1854 return sim_fst(state, n);
1858 * Simulate a be_Reload.
1860 * @param state the x87 state
1861 * @param n the node that should be simulated (and patched)
1863 * Should not happen, reloads are lowered before x87 simulator see them.
1865 static int sim_Reload(x87_state *state, ir_node *n) {
1866 assert(0 && "Reload not lowered");
1867 return sim_fld(state, n);
1871 * Simulate a be_Return.
1873 * @param state the x87 state
1874 * @param n the node that should be simulated (and patched)
1876 * @return NO_NODE_ADDED
1878 static int sim_Return(x87_state *state, ir_node *n) {
1879 int n_res = be_Return_get_n_rets(n);
1880 int i, n_float_res = 0;
1882 /* only floating point return values must resist on stack */
1883 for (i = 0; i < n_res; ++i) {
1884 ir_node *res = get_irn_n(n, be_pos_Return_val + i);
1886 if (mode_is_float(get_irn_mode(res)))
1889 assert(x87_get_depth(state) == n_float_res);
1891 /* pop them virtually */
1892 for (i = n_float_res - 1; i >= 0; --i)
1895 return NO_NODE_ADDED;
1898 typedef struct _perm_data_t {
1899 const arch_register_t *in;
1900 const arch_register_t *out;
1904 * Simulate a be_Perm.
1906 * @param state the x87 state
1907 * @param irn the node that should be simulated (and patched)
1909 * @return NO_NODE_ADDED
1911 static int sim_Perm(x87_state *state, ir_node *irn) {
1913 x87_simulator *sim = state->sim;
1914 ir_node *pred = get_irn_n(irn, 0);
1916 const ir_edge_t *edge;
1918 /* handle only floating point Perms */
1919 if (! mode_is_float(get_irn_mode(pred)))
1920 return NO_NODE_ADDED;
1922 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1924 /* Perm is a pure virtual instruction on x87.
1925 All inputs must be on the FPU stack and are pairwise
1926 different from each other.
1927 So, all we need to do is to permutate the stack state. */
1928 n = get_irn_arity(irn);
1929 NEW_ARR_A(int, stack_pos, n);
1931 /* collect old stack positions */
1932 for (i = 0; i < n; ++i) {
1933 const arch_register_t *inreg = x87_get_irn_register(sim, get_irn_n(irn, i));
1934 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1936 assert(idx >= 0 && "Perm argument not on x87 stack");
1940 /* now do the permutation */
1941 foreach_out_edge(irn, edge) {
1942 ir_node *proj = get_edge_src_irn(edge);
1943 const arch_register_t *out = x87_get_irn_register(sim, proj);
1944 long num = get_Proj_proj(proj);
1946 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1947 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
1949 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1951 return NO_NODE_ADDED;
1954 static int sim_Barrier(x87_state *state, ir_node *node) {
1955 //const arch_env_t *arch_env = state->sim->arch_env;
1958 /* materialize unknown if needed */
1959 arity = get_irn_arity(node);
1960 for(i = 0; i < arity; ++i) {
1961 const arch_register_t *reg;
1964 ia32_x87_attr_t *attr;
1965 ir_node *in = get_irn_n(node, i);
1967 if(!is_ia32_Unknown_VFP(in))
1970 /* TODO: not completely correct... */
1971 reg = &ia32_vfp_regs[REG_VFP_UKNWN];
1974 block = get_nodes_block(node);
1975 zero = new_rd_ia32_fldz(NULL, current_ir_graph, block, mode_E);
1976 x87_push(state, arch_register_get_index(reg), zero);
1978 attr = get_ia32_x87_attr(zero);
1979 attr->x87[2] = &ia32_st_regs[0];
1981 sched_add_before(node, zero);
1983 set_irn_n(node, i, zero);
1986 return NO_NODE_ADDED;
1991 * Kill any dead registers at block start by popping them from the stack.
1993 * @param sim the simulator handle
1994 * @param block the current block
1995 * @param start_state the x87 state at the begin of the block
1997 * @return the x87 state after dead register killed
1999 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state) {
2000 x87_state *state = start_state;
2001 ir_node *first_insn = sched_first(block);
2002 ir_node *keep = NULL;
2003 unsigned live = vfp_live_args_after(sim, block, 0);
2005 int i, depth, num_pop;
2008 depth = x87_get_depth(state);
2009 for (i = depth - 1; i >= 0; --i) {
2010 int reg = x87_get_st_reg(state, i);
2012 if (! is_vfp_live(reg, live))
2013 kill_mask |= (1 << i);
2017 /* create a new state, will be changed */
2018 state = x87_clone_state(sim, state);
2020 DB((dbg, LEVEL_1, "Killing deads:\n"));
2021 DEBUG_ONLY(vfp_dump_live(live));
2022 DEBUG_ONLY(x87_dump_stack(state));
2024 /* now kill registers */
2026 /* we can only kill from TOS, so bring them up */
2027 if (! (kill_mask & 1)) {
2028 /* search from behind, because we can to a double-pop */
2029 for (i = depth - 1; i >= 0; --i) {
2030 if (kill_mask & (1 << i)) {
2031 kill_mask &= ~(1 << i);
2038 x87_set_st(state, -1, keep, i);
2039 x87_create_fxch(state, first_insn, i);
2042 if ((kill_mask & 3) == 3) {
2043 /* we can do a double-pop */
2047 /* only a single pop */
2052 kill_mask >>= num_pop;
2053 keep = x87_create_fpop(state, first_insn, num_pop);
2058 } /* x87_kill_deads */
2061 * If we have PhiEs with unknown operands then we have to make sure that some
2062 * value is actually put onto the stack.
2064 static void fix_unknown_phis(x87_state *state, ir_node *block,
2065 ir_node *pred_block, int pos)
2069 sched_foreach(block, node) {
2071 const arch_register_t *reg;
2072 ia32_x87_attr_t *attr;
2077 op = get_Phi_pred(node, pos);
2078 if(!is_ia32_Unknown_VFP(op))
2081 reg = arch_get_irn_register(state->sim->arch_env, node);
2083 /* create a zero at end of pred block */
2084 zero = new_rd_ia32_fldz(NULL, current_ir_graph, pred_block, mode_E);
2085 x87_push(state, arch_register_get_index(reg), zero);
2087 attr = get_ia32_x87_attr(zero);
2088 attr->x87[2] = &ia32_st_regs[0];
2090 assert(is_ia32_fldz(zero));
2091 sched_add_before(sched_last(pred_block), zero);
2093 set_Phi_pred(node, pos, zero);
2098 * Run a simulation and fix all virtual instructions for a block.
2100 * @param sim the simulator handle
2101 * @param block the current block
2103 static void x87_simulate_block(x87_simulator *sim, ir_node *block) {
2105 blk_state *bl_state = x87_get_bl_state(sim, block);
2106 x87_state *state = bl_state->begin;
2107 const ir_edge_t *edge;
2108 ir_node *start_block;
2110 assert(state != NULL);
2111 /* already processed? */
2112 if (bl_state->end != NULL)
2115 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2116 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2117 DEBUG_ONLY(x87_dump_stack(state));
2119 /* at block begin, kill all dead registers */
2120 state = x87_kill_deads(sim, block, state);
2121 /* create a new state, will be changed */
2122 state = x87_clone_state(sim, state);
2124 /* beware, n might change */
2125 for (n = sched_first(block); !sched_is_end(n); n = next) {
2128 ir_op *op = get_irn_op(n);
2130 next = sched_next(n);
2131 if (op->ops.generic == NULL)
2134 func = (sim_func)op->ops.generic;
2137 node_inserted = (*func)(state, n);
2140 sim_func might have added an additional node after n,
2142 beware: n must not be changed by sim_func
2143 (i.e. removed from schedule) in this case
2145 if (node_inserted != NO_NODE_ADDED)
2146 next = sched_next(n);
2149 start_block = get_irg_start_block(get_irn_irg(block));
2151 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2153 /* check if the state must be shuffled */
2154 foreach_block_succ(block, edge) {
2155 ir_node *succ = get_edge_src_irn(edge);
2156 blk_state *succ_state;
2158 if (succ == start_block)
2161 succ_state = x87_get_bl_state(sim, succ);
2163 fix_unknown_phis(state, succ, block, get_edge_src_pos(edge));
2165 if (succ_state->begin == NULL) {
2166 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2167 DEBUG_ONLY(x87_dump_stack(state));
2168 succ_state->begin = state;
2170 waitq_put(sim->worklist, succ);
2172 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2173 /* There is already a begin state for the successor, bad.
2174 Do the necessary permutations.
2175 Note that critical edges are removed, so this is always possible:
2176 If the successor has more than one possible input, then it must
2179 x87_shuffle(sim, block, state, succ, succ_state->begin);
2182 bl_state->end = state;
2183 } /* x87_simulate_block */
2186 * Create a new x87 simulator.
2188 * @param sim a simulator handle, will be initialized
2189 * @param irg the current graph
2190 * @param arch_env the architecture environment
2192 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg,
2193 const arch_env_t *arch_env)
2195 obstack_init(&sim->obst);
2196 sim->blk_states = pmap_create();
2197 sim->arch_env = arch_env;
2198 sim->n_idx = get_irg_last_idx(irg);
2199 sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
2201 DB((dbg, LEVEL_1, "--------------------------------\n"
2202 "x87 Simulator started for %+F\n", irg));
2204 /* set the generic function pointer of instruction we must simulate */
2205 clear_irp_opcodes_generic_func();
2207 #define ASSOC(op) (op_ ## op)->ops.generic = (op_func)(sim_##op)
2208 #define ASSOC_IA32(op) (op_ia32_v ## op)->ops.generic = (op_func)(sim_##op)
2209 #define ASSOC_BE(op) (op_be_ ## op)->ops.generic = (op_func)(sim_##op)
2223 ASSOC_IA32(fCmpJmp);
2235 } /* x87_init_simulator */
2238 * Destroy a x87 simulator.
2240 * @param sim the simulator handle
2242 static void x87_destroy_simulator(x87_simulator *sim) {
2243 pmap_destroy(sim->blk_states);
2244 obstack_free(&sim->obst, NULL);
2245 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2246 } /* x87_destroy_simulator */
2249 * Pre-block walker: calculate the liveness information for the block
2250 * and store it into the sim->live cache.
2252 static void update_liveness_walker(ir_node *block, void *data) {
2253 x87_simulator *sim = data;
2254 update_liveness(sim, block);
2255 } /* update_liveness_walker */
2258 * Run a simulation and fix all virtual instructions for a graph.
2260 * @param env the architecture environment
2261 * @param irg the current graph
2263 * Needs a block-schedule.
2265 void x87_simulate_graph(const arch_env_t *arch_env, be_irg_t *birg) {
2266 ir_node *block, *start_block;
2267 blk_state *bl_state;
2269 ir_graph *irg = be_get_birg_irg(birg);
2271 /* create the simulator */
2272 x87_init_simulator(&sim, irg, arch_env);
2274 start_block = get_irg_start_block(irg);
2275 bl_state = x87_get_bl_state(&sim, start_block);
2277 /* start with the empty state */
2278 bl_state->begin = empty;
2281 sim.worklist = new_waitq();
2282 waitq_put(sim.worklist, start_block);
2284 be_assure_liveness(birg);
2285 sim.lv = be_get_birg_liveness(birg);
2286 // sim.lv = be_liveness(be_get_birg_irg(birg));
2287 be_liveness_assure_sets(sim.lv);
2289 /* Calculate the liveness for all nodes. We must precalculate this info,
2290 * because the simulator adds new nodes (possible before Phi nodes) which
2291 * would let a lazy calculation fail.
2292 * On the other hand we reduce the computation amount due to
2293 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2295 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2299 block = waitq_get(sim.worklist);
2300 x87_simulate_block(&sim, block);
2301 } while (! waitq_empty(sim.worklist));
2304 del_waitq(sim.worklist);
2305 x87_destroy_simulator(&sim);
2306 } /* x87_simulate_graph */
2308 void ia32_init_x87(void) {
2309 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2310 } /* ia32_init_x87 */