2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
36 #include "iredges_t.h"
47 #include "../belive_t.h"
48 #include "../besched_t.h"
49 #include "../benode_t.h"
50 #include "ia32_new_nodes.h"
51 #include "gen_ia32_new_nodes.h"
52 #include "gen_ia32_regalloc_if.h"
57 /* first and second binop index */
64 /* the store val index */
65 #define STORE_VAL_IDX 2
67 #define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
69 /** the debug handle */
70 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
72 /* Forward declaration. */
73 typedef struct _x87_simulator x87_simulator;
76 * An exchange template.
77 * Note that our virtual functions have the same inputs
78 * and attributes as the real ones, so we can simple exchange
80 * Further, x87 supports inverse instructions, so we can handle them.
82 typedef struct _exchange_tmpl {
83 ir_op *normal_op; /**< the normal one */
84 ir_op *reverse_op; /**< the reverse one if exists */
85 ir_op *normal_pop_op; /**< the normal one with tos pop */
86 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
90 * An entry on the simulated x87 stack.
92 typedef struct _st_entry {
93 int reg_idx; /**< the virtual register index of this stack value */
94 ir_node *node; /**< the node that produced this value */
100 typedef struct _x87_state {
101 st_entry st[N_x87_REGS]; /**< the register stack */
102 int depth; /**< the current stack depth */
103 int tos; /**< position of the tos */
104 x87_simulator *sim; /**< The simulator. */
107 /** An empty state, used for blocks without fp instructions. */
108 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
109 static x87_state *empty = (x87_state *)&_empty;
112 NO_NODE_ADDED = 0, /**< No node was added. */
113 NODE_ADDED = 1 /**< A node was added by the simulator in the schedule. */
117 * The type of an instruction simulator function.
119 * @param state the x87 state
120 * @param n the node to be simulated
122 * @return NODE_ADDED if a node was added AFTER n in schedule,
125 typedef int (*sim_func)(x87_state *state, ir_node *n);
128 * A block state: Every block has a x87 state at the beginning and at the end.
130 typedef struct _blk_state {
131 x87_state *begin; /**< state at the begin or NULL if not assigned */
132 x87_state *end; /**< state at the end or NULL if not assigned */
135 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
137 /** liveness bitset for vfp registers. */
138 typedef unsigned char vfp_liveness;
143 struct _x87_simulator {
144 struct obstack obst; /**< An obstack for fast allocating. */
145 pmap *blk_states; /**< Map blocks to states. */
146 const arch_env_t *arch_env; /**< The architecture environment. */
147 be_lv_t *lv; /**< intrablock liveness. */
148 vfp_liveness *live; /**< Liveness information. */
149 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
150 waitq *worklist; /**< Worklist of blocks that must be processed. */
154 * Returns the current stack depth.
156 * @param state the x87 state
158 * @return the x87 stack depth
160 static int x87_get_depth(const x87_state *state) {
162 } /* x87_get_depth */
165 * Return the virtual register index at st(pos).
167 * @param state the x87 state
168 * @param pos a stack position
170 * @return the vfp register index that produced the value at st(pos)
172 static int x87_get_st_reg(const x87_state *state, int pos) {
173 assert(pos < state->depth);
174 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
175 } /* x87_get_st_reg */
178 * Return the node at st(pos).
180 * @param state the x87 state
181 * @param pos a stack position
183 * @return the IR node that produced the value at st(pos)
185 static ir_node *x87_get_st_node(const x87_state *state, int pos) {
186 assert(pos < state->depth);
187 return state->st[MASK_TOS(state->tos + pos)].node;
188 } /* x87_get_st_node */
192 * Dump the stack for debugging.
194 * @param state the x87 state
196 static void x87_dump_stack(const x87_state *state) {
199 for (i = state->depth - 1; i >= 0; --i) {
200 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
201 x87_get_st_node(state, i)));
203 DB((dbg, LEVEL_2, "<-- TOS\n"));
204 } /* x87_dump_stack */
205 #endif /* DEBUG_libfirm */
208 * Set a virtual register to st(pos).
210 * @param state the x87 state
211 * @param reg_idx the vfp register index that should be set
212 * @param node the IR node that produces the value of the vfp register
213 * @param pos the stack position where the new value should be entered
215 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos) {
216 assert(0 < state->depth);
217 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
218 state->st[MASK_TOS(state->tos + pos)].node = node;
220 DB((dbg, LEVEL_2, "After SET_REG: "));
221 DEBUG_ONLY(x87_dump_stack(state));
225 * Set the tos virtual register.
227 * @param state the x87 state
228 * @param reg_idx the vfp register index that should be set
229 * @param node the IR node that produces the value of the vfp register
231 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node) {
232 x87_set_st(state, reg_idx, node, 0);
236 * Swap st(0) with st(pos).
238 * @param state the x87 state
239 * @param pos the stack position to change the tos with
241 static void x87_fxch(x87_state *state, int pos) {
243 assert(pos < state->depth);
245 entry = state->st[MASK_TOS(state->tos + pos)];
246 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
247 state->st[MASK_TOS(state->tos)] = entry;
249 DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
253 * Convert a virtual register to the stack index.
255 * @param state the x87 state
256 * @param reg_idx the register vfp index
258 * @return the stack position where the register is stacked
259 * or -1 if the virtual register was not found
261 static int x87_on_stack(const x87_state *state, int reg_idx) {
262 int i, tos = state->tos;
264 for (i = 0; i < state->depth; ++i)
265 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
271 * Push a virtual Register onto the stack, double pushed allowed.
273 * @param state the x87 state
274 * @param reg_idx the register vfp index
275 * @param node the node that produces the value of the vfp register
277 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node) {
278 assert(state->depth < N_x87_REGS && "stack overrun");
281 state->tos = MASK_TOS(state->tos - 1);
282 state->st[state->tos].reg_idx = reg_idx;
283 state->st[state->tos].node = node;
285 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
289 * Push a virtual Register onto the stack, double pushes are NOT allowed.
291 * @param state the x87 state
292 * @param reg_idx the register vfp index
293 * @param node the node that produces the value of the vfp register
294 * @param dbl_push if != 0 double pushes are allowed
296 static void x87_push(x87_state *state, int reg_idx, ir_node *node) {
297 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
299 x87_push_dbl(state, reg_idx, node);
303 * Pop a virtual Register from the stack.
305 * @param state the x87 state
307 static void x87_pop(x87_state *state) {
308 assert(state->depth > 0 && "stack underrun");
311 state->tos = MASK_TOS(state->tos + 1);
313 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
317 * Returns the block state of a block.
319 * @param sim the x87 simulator handle
320 * @param block the current block
322 * @return the block state
324 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block) {
325 pmap_entry *entry = pmap_find(sim->blk_states, block);
328 blk_state *bl_state = obstack_alloc(&sim->obst, sizeof(*bl_state));
329 bl_state->begin = NULL;
330 bl_state->end = NULL;
332 pmap_insert(sim->blk_states, block, bl_state);
336 return PTR_TO_BLKSTATE(entry->value);
337 } /* x87_get_bl_state */
340 * Creates a new x87 state.
342 * @param sim the x87 simulator handle
344 * @return a new x87 state
346 static x87_state *x87_alloc_state(x87_simulator *sim) {
347 x87_state *res = obstack_alloc(&sim->obst, sizeof(*res));
351 } /* x87_alloc_state */
356 * @param sim the x87 simulator handle
357 * @param src the x87 state that will be cloned
359 * @return a cloned copy of the src state
361 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src) {
362 x87_state *res = x87_alloc_state(sim);
364 memcpy(res, src, sizeof(*res));
366 } /* x87_clone_state */
369 * Patch a virtual instruction into a x87 one and return
370 * the node representing the result value.
372 * @param n the IR node to patch
373 * @param op the x87 opcode to patch in
375 static ir_node *x87_patch_insn(ir_node *n, ir_op *op) {
376 ir_mode *mode = get_irn_mode(n);
381 if (mode == mode_T) {
382 /* patch all Proj's */
383 const ir_edge_t *edge;
385 foreach_out_edge(n, edge) {
386 ir_node *proj = get_edge_src_irn(edge);
388 mode = get_irn_mode(proj);
389 if (mode_is_float(mode)) {
391 set_irn_mode(proj, mode_E);
395 } else if (mode_is_float(mode))
396 set_irn_mode(n, mode_E);
398 } /* x87_patch_insn */
401 * Returns the first Proj of a mode_T node having a given mode.
403 * @param n the mode_T node
404 * @param m the desired mode of the Proj
405 * @return The first Proj of mode @p m found or NULL.
407 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m) {
408 const ir_edge_t *edge;
410 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
412 foreach_out_edge(n, edge) {
413 ir_node *proj = get_edge_src_irn(edge);
414 if (get_irn_mode(proj) == m)
419 } /* get_irn_Proj_for_mode */
422 * Wrap the arch_* function here so we can check for errors.
424 static INLINE const arch_register_t *x87_get_irn_register(x87_simulator *sim, const ir_node *irn) {
425 const arch_register_t *res;
427 res = arch_get_irn_register(sim->arch_env, irn);
428 assert(res->reg_class->regs == ia32_vfp_regs);
430 } /* x87_get_irn_register */
432 /* -------------- x87 perm --------------- */
435 * Creates a fxch for shuffle.
437 * @param state the x87 state
438 * @param pos parameter for fxch
439 * @param block the block were fxch is inserted
441 * Creates a new fxch node and reroute the user of the old node
444 * @return the fxch node
446 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block) {
448 ia32_x87_attr_t *attr;
450 fxch = new_rd_ia32_fxch(NULL, get_irn_irg(block), block, mode_E);
451 attr = get_ia32_x87_attr(fxch);
452 attr->x87[0] = &ia32_st_regs[pos];
453 attr->x87[2] = &ia32_st_regs[0];
457 x87_fxch(state, pos);
459 } /* x87_fxch_shuffle */
462 * Calculate the necessary permutations to reach dst_state.
464 * These permutations are done with fxch instructions and placed
465 * at the end of the block.
467 * Note that critical edges are removed here, so we need only
468 * a shuffle if the current block has only one successor.
470 * @param sim the simulator handle
471 * @param block the current block
472 * @param state the current x87 stack state, might be modified
473 * @param dst_block the destination block
474 * @param dst_state destination state
478 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block,
479 x87_state *state, ir_node *dst_block,
480 const x87_state *dst_state)
482 int i, n_cycles, k, ri;
483 unsigned cycles[4], all_mask;
484 char cycle_idx[4][8];
485 ir_node *fxch, *before, *after;
489 assert(state->depth == dst_state->depth);
491 /* Some mathematics here:
492 If we have a cycle of length n that includes the tos,
493 we need n-1 exchange operations.
494 We can always add the tos and restore it, so we need
495 n+1 exchange operations for a cycle not containing the tos.
496 So, the maximum of needed operations is for a cycle of 7
497 not including the tos == 8.
498 This is the same number of ops we would need for using stores,
499 so exchange is cheaper (we save the loads).
500 On the other hand, we might need an additional exchange
501 in the next block to bring one operand on top, so the
502 number of ops in the first case is identical.
503 Further, no more than 4 cycles can exists (4 x 2).
505 all_mask = (1 << (state->depth)) - 1;
507 for (n_cycles = 0; all_mask; ++n_cycles) {
508 int src_idx, dst_idx;
510 /* find the first free slot */
511 for (i = 0; i < state->depth; ++i) {
512 if (all_mask & (1 << i)) {
513 all_mask &= ~(1 << i);
515 /* check if there are differences here */
516 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
522 /* no more cycles found */
527 cycles[n_cycles] = (1 << i);
528 cycle_idx[n_cycles][k++] = i;
529 for (src_idx = i; ; src_idx = dst_idx) {
530 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
532 if ((all_mask & (1 << dst_idx)) == 0)
535 cycle_idx[n_cycles][k++] = dst_idx;
536 cycles[n_cycles] |= (1 << dst_idx);
537 all_mask &= ~(1 << dst_idx);
539 cycle_idx[n_cycles][k] = -1;
543 /* no permutation needed */
547 /* Hmm: permutation needed */
548 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
549 DEBUG_ONLY(x87_dump_stack(state));
550 DB((dbg, LEVEL_2, " to\n"));
551 DEBUG_ONLY(x87_dump_stack(dst_state));
555 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
556 for (ri = 0; ri < n_cycles; ++ri) {
557 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
558 for (k = 0; cycle_idx[ri][k] != -1; ++k)
559 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
560 DB((dbg, LEVEL_2, "\n"));
567 * Find the place node must be insert.
568 * We have only one successor block, so the last instruction should
571 before = sched_last(block);
572 assert(is_cfop(before));
574 /* now do the permutations */
575 for (ri = 0; ri < n_cycles; ++ri) {
576 if ((cycles[ri] & 1) == 0) {
577 /* this cycle does not include the tos */
578 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
580 sched_add_after(after, fxch);
582 sched_add_before(before, fxch);
585 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
586 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
588 sched_add_after(after, fxch);
590 sched_add_before(before, fxch);
593 if ((cycles[ri] & 1) == 0) {
594 /* this cycle does not include the tos */
595 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
596 sched_add_after(after, fxch);
603 * Create a fxch node before another node.
605 * @param state the x87 state
606 * @param n the node after the fxch
607 * @param pos exchange st(pos) with st(0)
611 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
614 ia32_x87_attr_t *attr;
615 ir_graph *irg = get_irn_irg(n);
616 ir_node *block = get_nodes_block(n);
618 x87_fxch(state, pos);
620 fxch = new_rd_ia32_fxch(NULL, irg, block, mode_E);
621 attr = get_ia32_x87_attr(fxch);
622 attr->x87[0] = &ia32_st_regs[pos];
623 attr->x87[2] = &ia32_st_regs[0];
627 sched_add_before(n, fxch);
628 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
630 } /* x87_create_fxch */
633 * Create a fpush before node n.
635 * @param state the x87 state
636 * @param n the node after the fpush
637 * @param pos push st(pos) on stack
638 * @param op_idx replace input op_idx of n with the fpush result
640 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx) {
641 ir_node *fpush, *pred = get_irn_n(n, op_idx);
642 ia32_x87_attr_t *attr;
643 const arch_register_t *out = x87_get_irn_register(state->sim, pred);
645 x87_push_dbl(state, arch_register_get_index(out), pred);
647 fpush = new_rd_ia32_fpush(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
648 attr = get_ia32_x87_attr(fpush);
649 attr->x87[0] = &ia32_st_regs[pos];
650 attr->x87[2] = &ia32_st_regs[0];
653 sched_add_before(n, fpush);
655 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
656 } /* x87_create_fpush */
659 * Create a fpop before node n.
661 * @param state the x87 state
662 * @param n the node after the fpop
663 * @param num pop 1 or 2 values
665 * @return the fpop node
667 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
670 ia32_x87_attr_t *attr;
674 fpop = new_rd_ia32_fpop(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
675 attr = get_ia32_x87_attr(fpop);
676 attr->x87[0] = &ia32_st_regs[0];
677 attr->x87[1] = &ia32_st_regs[0];
678 attr->x87[2] = &ia32_st_regs[0];
681 sched_add_before(n, fpop);
682 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
687 } /* x87_create_fpop */
690 * Creates an fldz before node n
692 * @param state the x87 state
693 * @param n the node after the fldz
695 * @return the fldz node
697 static ir_node *x87_create_fldz(x87_state *state, ir_node *n, int regidx) {
698 ir_graph *irg = get_irn_irg(n);
699 ir_node *block = get_nodes_block(n);
702 fldz = new_rd_ia32_fldz(NULL, irg, block, mode_E);
704 sched_add_before(n, fldz);
705 DB((dbg, LEVEL_1, "<<< %s\n", get_irn_opname(fldz)));
708 x87_push(state, regidx, fldz);
713 /* --------------------------------- liveness ------------------------------------------ */
716 * The liveness transfer function.
717 * Updates a live set over a single step from a given node to its predecessor.
718 * Everything defined at the node is removed from the set, the uses of the node get inserted.
720 * @param sim The simulator handle.
721 * @param irn The node at which liveness should be computed.
722 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
723 * the registers live after irn.
725 * @return The live bitset.
727 static vfp_liveness vfp_liveness_transfer(x87_simulator *sim, ir_node *irn, vfp_liveness live)
730 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
731 const arch_env_t *arch_env = sim->arch_env;
733 if (get_irn_mode(irn) == mode_T) {
734 const ir_edge_t *edge;
736 foreach_out_edge(irn, edge) {
737 ir_node *proj = get_edge_src_irn(edge);
739 if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
740 const arch_register_t *reg = x87_get_irn_register(sim, proj);
741 live &= ~(1 << arch_register_get_index(reg));
746 if (arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
747 const arch_register_t *reg = x87_get_irn_register(sim, irn);
748 live &= ~(1 << arch_register_get_index(reg));
751 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
752 ir_node *op = get_irn_n(irn, i);
754 if (mode_is_float(get_irn_mode(op)) && arch_irn_consider_in_reg_alloc(arch_env, cls, op)) {
755 const arch_register_t *reg = x87_get_irn_register(sim, op);
756 live |= 1 << arch_register_get_index(reg);
760 } /* vfp_liveness_transfer */
763 * Put all live virtual registers at the end of a block into a bitset.
765 * @param sim the simulator handle
766 * @param lv the liveness information
767 * @param bl the block
769 * @return The live bitset at the end of this block
771 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
774 vfp_liveness live = 0;
775 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
776 const arch_env_t *arch_env = sim->arch_env;
777 const be_lv_t *lv = sim->lv;
779 be_lv_foreach(lv, block, be_lv_state_end, i) {
780 const arch_register_t *reg;
781 const ir_node *node = be_lv_get_irn(lv, block, i);
782 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
785 reg = x87_get_irn_register(sim, node);
786 live |= 1 << arch_register_get_index(reg);
790 } /* vfp_liveness_end_of_block */
792 /** get the register mask from an arch_register */
793 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
796 * Return a bitset of argument registers which are live at the end of a node.
798 * @param sim the simulator handle
799 * @param pos the node
800 * @param kill kill mask for the output registers
802 * @return The live bitset.
804 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
806 unsigned idx = get_irn_idx(pos);
808 assert(idx < sim->n_idx);
809 return sim->live[idx] & ~kill;
810 } /* vfp_live_args_after */
813 * Calculate the liveness for a whole block and cache it.
815 * @param sim the simulator handle
816 * @param lv the liveness handle
817 * @param block the block
819 static void update_liveness(x87_simulator *sim, ir_node *block) {
820 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
824 /* now iterate through the block backward and cache the results */
825 sched_foreach_reverse(block, irn) {
826 /* stop at the first Phi: this produces the live-in */
830 idx = get_irn_idx(irn);
831 sim->live[idx] = live;
833 live = vfp_liveness_transfer(sim, irn, live);
835 idx = get_irn_idx(block);
836 sim->live[idx] = live;
837 } /* update_liveness */
840 * Returns true if a register is live in a set.
842 * @param reg_idx the vfp register index
843 * @param live a live bitset
845 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
849 * Dump liveness info.
851 * @param live the live bitset
853 static void vfp_dump_live(vfp_liveness live) {
856 DB((dbg, LEVEL_2, "Live after: "));
857 for (i = 0; i < 8; ++i) {
858 if (live & (1 << i)) {
859 DB((dbg, LEVEL_2, "vf%d ", i));
862 DB((dbg, LEVEL_2, "\n"));
863 } /* vfp_dump_live */
864 #endif /* DEBUG_libfirm */
866 /* --------------------------------- simulators ---------------------------------------- */
868 #define XCHG(a, b) do { int t = (a); (a) = (b); (b) = t; } while (0)
880 * Simulate a virtual binop.
882 * @param state the x87 state
883 * @param n the node that should be simulated (and patched)
884 * @param tmpl the template containing the 4 possible x87 opcodes
886 * @return NO_NODE_ADDED
888 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl) {
889 int op2_idx = 0, op1_idx;
890 int out_idx, do_pop = 0;
891 ia32_x87_attr_t *attr;
892 ir_node *patched_insn;
894 x87_simulator *sim = state->sim;
895 ir_node *op1 = get_irn_n(n, BINOP_IDX_1);
896 ir_node *op2 = get_irn_n(n, BINOP_IDX_2);
897 const arch_register_t *op1_reg = x87_get_irn_register(sim, op1);
898 const arch_register_t *op2_reg = x87_get_irn_register(sim, op2);
899 const arch_register_t *out = x87_get_irn_register(sim, n);
900 int reg_index_1 = arch_register_get_index(op1_reg);
901 int reg_index_2 = arch_register_get_index(op2_reg);
902 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
906 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
907 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
908 arch_register_get_name(out)));
909 DEBUG_ONLY(vfp_dump_live(live));
910 DB((dbg, LEVEL_1, "Stack before: "));
911 DEBUG_ONLY(x87_dump_stack(state));
913 if(reg_index_1 == REG_VFP_UKNWN) {
917 op1_idx = x87_on_stack(state, reg_index_1);
918 assert(op1_idx >= 0);
919 op1_live_after = is_vfp_live(arch_register_get_index(op1_reg), live);
922 if (reg_index_2 != REG_VFP_NOREG) {
923 if(reg_index_2 == REG_VFP_UKNWN) {
927 /* second operand is a vfp register */
928 op2_idx = x87_on_stack(state, reg_index_2);
929 assert(op2_idx >= 0);
931 = is_vfp_live(arch_register_get_index(op2_reg), live);
934 if (op2_live_after) {
935 /* Second operand is live. */
937 if (op1_live_after) {
938 /* Both operands are live: push the first one.
939 This works even for op1 == op2. */
940 x87_create_fpush(state, n, op1_idx, BINOP_IDX_2);
941 /* now do fxxx (tos=tos X op) */
945 dst = tmpl->normal_op;
947 /* Second live, first operand is dead here, bring it to tos. */
949 x87_create_fxch(state, n, op1_idx);
954 /* now do fxxx (tos=tos X op) */
956 dst = tmpl->normal_op;
959 /* Second operand is dead. */
960 if (op1_live_after) {
961 /* First operand is live: bring second to tos. */
963 x87_create_fxch(state, n, op2_idx);
968 /* now do fxxxr (tos = op X tos) */
970 dst = tmpl->reverse_op;
972 /* Both operands are dead here, pop them from the stack. */
975 /* Both are identically and on tos, no pop needed. */
976 /* here fxxx (tos = tos X tos) */
977 dst = tmpl->normal_op;
980 /* now do fxxxp (op = op X tos, pop) */
981 dst = tmpl->normal_pop_op;
985 } else if (op1_idx == 0) {
986 assert(op1_idx != op2_idx);
987 /* now do fxxxrp (op = tos X op, pop) */
988 dst = tmpl->reverse_pop_op;
992 /* Bring the second on top. */
993 x87_create_fxch(state, n, op2_idx);
994 if (op1_idx == op2_idx) {
995 /* Both are identically and on tos now, no pop needed. */
998 /* use fxxx (tos = tos X tos) */
999 dst = tmpl->normal_op;
1002 /* op2 is on tos now */
1004 /* use fxxxp (op = op X tos, pop) */
1005 dst = tmpl->normal_pop_op;
1013 /* second operand is an address mode */
1014 if (op1_live_after) {
1015 /* first operand is live: push it here */
1016 x87_create_fpush(state, n, op1_idx, BINOP_IDX_1);
1018 /* use fxxx (tos = tos X mem) */
1019 dst = tmpl->normal_op;
1022 /* first operand is dead: bring it to tos */
1024 x87_create_fxch(state, n, op1_idx);
1028 /* use fxxxp (tos = tos X mem) */
1029 dst = tmpl->normal_op;
1034 patched_insn = x87_patch_insn(n, dst);
1035 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1040 /* patch the operation */
1041 attr = get_ia32_x87_attr(n);
1042 attr->x87[0] = op1_reg = &ia32_st_regs[op1_idx];
1043 if (reg_index_2 != REG_VFP_NOREG) {
1044 attr->x87[1] = op2_reg = &ia32_st_regs[op2_idx];
1046 attr->x87[2] = out = &ia32_st_regs[out_idx];
1048 if (reg_index_2 != REG_VFP_NOREG) {
1049 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1050 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1051 arch_register_get_name(out)));
1053 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1054 arch_register_get_name(op1_reg),
1055 arch_register_get_name(out)));
1058 return NO_NODE_ADDED;
1062 * Simulate a virtual Unop.
1064 * @param state the x87 state
1065 * @param n the node that should be simulated (and patched)
1066 * @param op the x87 opcode that will replace n's opcode
1068 * @return NO_NODE_ADDED
1070 static int sim_unop(x87_state *state, ir_node *n, ir_op *op) {
1071 int op1_idx, out_idx;
1072 x87_simulator *sim = state->sim;
1073 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, UNOP_IDX));
1074 const arch_register_t *out = x87_get_irn_register(sim, n);
1075 ia32_x87_attr_t *attr;
1076 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1078 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1079 DEBUG_ONLY(vfp_dump_live(live));
1081 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1083 if (is_vfp_live(arch_register_get_index(op1), live)) {
1084 /* push the operand here */
1085 x87_create_fpush(state, n, op1_idx, UNOP_IDX);
1089 /* operand is dead, bring it to tos */
1091 x87_create_fxch(state, n, op1_idx);
1096 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1098 attr = get_ia32_x87_attr(n);
1099 attr->x87[0] = op1 = &ia32_st_regs[0];
1100 attr->x87[2] = out = &ia32_st_regs[0];
1101 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1103 return NO_NODE_ADDED;
1107 * Simulate a virtual Load instruction.
1109 * @param state the x87 state
1110 * @param n the node that should be simulated (and patched)
1111 * @param op the x87 opcode that will replace n's opcode
1113 * @return NO_NODE_ADDED
1115 static int sim_load(x87_state *state, ir_node *n, ir_op *op) {
1116 const arch_register_t *out = x87_get_irn_register(state->sim, n);
1117 ia32_x87_attr_t *attr;
1119 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1120 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1121 assert(out == x87_get_irn_register(state->sim, n));
1122 attr = get_ia32_x87_attr(n);
1123 attr->x87[2] = out = &ia32_st_regs[0];
1124 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1126 return NO_NODE_ADDED;
1130 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1132 * @param store The store
1133 * @param old_val The former value
1134 * @param new_val The new value
1136 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val) {
1137 const ir_edge_t *edge, *ne;
1139 foreach_out_edge_safe(old_val, edge, ne) {
1140 ir_node *user = get_edge_src_irn(edge);
1142 if (! user || user == store)
1145 /* if the user is scheduled after the store: rewire */
1146 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1148 /* find the input of the user pointing to the old value */
1149 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1150 if (get_irn_n(user, i) == old_val)
1151 set_irn_n(user, i, new_val);
1155 } /* collect_and_rewire_users */
1158 * Simulate a virtual Store.
1160 * @param state the x87 state
1161 * @param n the node that should be simulated (and patched)
1162 * @param op the x87 store opcode
1163 * @param op_p the x87 store and pop opcode
1165 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p) {
1166 x87_simulator *sim = state->sim;
1167 ir_node *val = get_irn_n(n, STORE_VAL_IDX);
1168 const arch_register_t *op2 = x87_get_irn_register(sim, val);
1169 unsigned live = vfp_live_args_after(sim, n, 0);
1170 int insn = NO_NODE_ADDED;
1171 ia32_x87_attr_t *attr;
1172 int op2_reg_idx, op2_idx, depth;
1173 int live_after_node;
1176 op2_reg_idx = arch_register_get_index(op2);
1177 if (op2_reg_idx == REG_VFP_UKNWN) {
1178 /* just take any value from stack */
1179 if(state->depth > 0) {
1181 DEBUG_ONLY(op2 = NULL);
1182 live_after_node = 1;
1184 /* produce a new value which we will consume immediately */
1185 x87_create_fldz(state, n, op2_reg_idx);
1186 live_after_node = 0;
1187 op2_idx = x87_on_stack(state, op2_reg_idx);
1188 assert(op2_idx >= 0);
1191 op2_idx = x87_on_stack(state, op2_reg_idx);
1192 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1193 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1194 assert(op2_idx >= 0);
1197 mode = get_ia32_ls_mode(n);
1198 depth = x87_get_depth(state);
1200 if (live_after_node) {
1202 Problem: fst doesn't support mode_E (spills), only fstp does
1204 - stack not full: push value and fstp
1205 - stack full: fstp value and load again
1207 if (mode == mode_E) {
1208 if (depth < N_x87_REGS) {
1209 /* ok, we have a free register: push + fstp */
1210 x87_create_fpush(state, n, op2_idx, STORE_VAL_IDX);
1212 x87_patch_insn(n, op_p);
1214 ir_node *vfld, *mem, *block, *rproj, *mproj;
1217 /* stack full here: need fstp + load */
1219 x87_patch_insn(n, op_p);
1221 block = get_nodes_block(n);
1222 irg = get_irn_irg(n);
1223 vfld = new_rd_ia32_vfld(NULL, irg, block, get_irn_n(n, 0), get_irn_n(n, 1), new_rd_NoMem(irg), get_ia32_ls_mode(n));
1225 /* copy all attributes */
1226 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1227 if (is_ia32_use_frame(n))
1228 set_ia32_use_frame(vfld);
1229 set_ia32_am_flavour(vfld, get_ia32_am_flavour(n));
1230 set_ia32_op_type(vfld, ia32_am_Source);
1231 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1232 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1233 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1235 rproj = new_r_Proj(irg, block, vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1236 mproj = new_r_Proj(irg, block, vfld, mode_M, pn_ia32_vfld_M);
1237 mem = get_irn_Proj_for_mode(n, mode_M);
1239 assert(mem && "Store memory not found");
1241 arch_set_irn_register(sim->arch_env, rproj, op2);
1243 /* reroute all former users of the store memory to the load memory */
1244 edges_reroute(mem, mproj, irg);
1245 /* set the memory input of the load to the store memory */
1246 set_irn_n(vfld, 2, mem);
1248 sched_add_after(n, vfld);
1249 sched_add_after(vfld, rproj);
1251 /* rewire all users, scheduled after the store, to the loaded value */
1252 collect_and_rewire_users(n, val, rproj);
1257 /* we can only store the tos to memory */
1259 x87_create_fxch(state, n, op2_idx);
1261 /* mode != mode_E -> use normal fst */
1262 x87_patch_insn(n, op);
1265 /* we can only store the tos to memory */
1267 x87_create_fxch(state, n, op2_idx);
1270 x87_patch_insn(n, op_p);
1273 attr = get_ia32_x87_attr(n);
1274 attr->x87[1] = op2 = &ia32_st_regs[0];
1275 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1280 #define _GEN_BINOP(op, rev) \
1281 static int sim_##op(x87_state *state, ir_node *n) { \
1282 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1283 return sim_binop(state, n, &tmpl); \
1286 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1287 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1289 #define GEN_LOAD2(op, nop) \
1290 static int sim_##op(x87_state *state, ir_node *n) { \
1291 return sim_load(state, n, op_ia32_##nop); \
1294 #define GEN_LOAD(op) GEN_LOAD2(op, op)
1296 #define GEN_UNOP(op) \
1297 static int sim_##op(x87_state *state, ir_node *n) { \
1298 return sim_unop(state, n, op_ia32_##op); \
1301 #define GEN_STORE(op) \
1302 static int sim_##op(x87_state *state, ir_node *n) { \
1303 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1325 * Simulate a fCondJmp.
1327 * @param state the x87 state
1328 * @param n the node that should be simulated (and patched)
1330 * @return NO_NODE_ADDED
1332 static int sim_fCondJmp(x87_state *state, ir_node *n) {
1336 ia32_x87_attr_t *attr;
1338 x87_simulator *sim = state->sim;
1339 ir_node *op1_node = get_irn_n(n, n_ia32_vfCondJmp_left);
1340 ir_node *op2_node = get_irn_n(n, n_ia32_vfCondJmp_right);
1341 const arch_register_t *op1 = x87_get_irn_register(sim, op1_node);
1342 const arch_register_t *op2 = x87_get_irn_register(sim, op2_node);
1343 int reg_index_1 = arch_register_get_index(op1);
1344 int reg_index_2 = arch_register_get_index(op2);
1345 unsigned live = vfp_live_args_after(sim, n, 0);
1347 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1348 arch_register_get_name(op1), arch_register_get_name(op2)));
1349 DEBUG_ONLY(vfp_dump_live(live));
1350 DB((dbg, LEVEL_1, "Stack before: "));
1351 DEBUG_ONLY(x87_dump_stack(state));
1353 op1_idx = x87_on_stack(state, reg_index_1);
1354 assert(op1_idx >= 0);
1356 /* BEWARE: check for comp a,a cases, they might happen */
1357 if (reg_index_2 != REG_VFP_NOREG) {
1358 /* second operand is a vfp register */
1359 op2_idx = x87_on_stack(state, reg_index_2);
1360 assert(op2_idx >= 0);
1362 if (is_vfp_live(arch_register_get_index(op2), live)) {
1363 /* second operand is live */
1365 if (is_vfp_live(arch_register_get_index(op1), live)) {
1366 /* both operands are live */
1369 /* res = tos X op */
1370 dst = op_ia32_fcomJmp;
1371 } else if (op2_idx == 0) {
1372 /* res = op X tos */
1373 dst = op_ia32_fcomrJmp;
1375 /* bring the first one to tos */
1376 x87_create_fxch(state, n, op1_idx);
1380 /* res = tos X op */
1381 dst = op_ia32_fcomJmp;
1384 /* second live, first operand is dead here, bring it to tos.
1385 This means further, op1_idx != op2_idx. */
1386 assert(op1_idx != op2_idx);
1388 x87_create_fxch(state, n, op1_idx);
1393 /* res = tos X op, pop */
1394 dst = op_ia32_fcompJmp;
1398 /* second operand is dead */
1399 if (is_vfp_live(arch_register_get_index(op1), live)) {
1400 /* first operand is live: bring second to tos.
1401 This means further, op1_idx != op2_idx. */
1402 assert(op1_idx != op2_idx);
1404 x87_create_fxch(state, n, op2_idx);
1409 /* res = op X tos, pop */
1410 dst = op_ia32_fcomrpJmp;
1413 /* both operands are dead here, check first for identity. */
1414 if (op1_idx == op2_idx) {
1415 /* identically, one pop needed */
1417 x87_create_fxch(state, n, op1_idx);
1421 /* res = tos X op, pop */
1422 dst = op_ia32_fcompJmp;
1425 /* different, move them to st and st(1) and pop both.
1426 The tricky part is to get one into st(1).*/
1427 else if (op2_idx == 1) {
1428 /* good, second operand is already in the right place, move the first */
1430 /* bring the first on top */
1431 x87_create_fxch(state, n, op1_idx);
1432 assert(op2_idx != 0);
1435 /* res = tos X op, pop, pop */
1436 dst = op_ia32_fcomppJmp;
1438 } else if (op1_idx == 1) {
1439 /* good, first operand is already in the right place, move the second */
1441 /* bring the first on top */
1442 x87_create_fxch(state, n, op2_idx);
1443 assert(op1_idx != 0);
1446 dst = op_ia32_fcomrppJmp;
1449 /* if one is already the TOS, we need two fxch */
1451 /* first one is TOS, move to st(1) */
1452 x87_create_fxch(state, n, 1);
1453 assert(op2_idx != 1);
1455 x87_create_fxch(state, n, op2_idx);
1457 /* res = op X tos, pop, pop */
1458 dst = op_ia32_fcomrppJmp;
1460 } else if (op2_idx == 0) {
1461 /* second one is TOS, move to st(1) */
1462 x87_create_fxch(state, n, 1);
1463 assert(op1_idx != 1);
1465 x87_create_fxch(state, n, op1_idx);
1467 /* res = tos X op, pop, pop */
1468 dst = op_ia32_fcomppJmp;
1471 /* none of them is either TOS or st(1), 3 fxch needed */
1472 x87_create_fxch(state, n, op2_idx);
1473 assert(op1_idx != 0);
1474 x87_create_fxch(state, n, 1);
1476 x87_create_fxch(state, n, op1_idx);
1478 /* res = tos X op, pop, pop */
1479 dst = op_ia32_fcomppJmp;
1486 /* second operand is an address mode */
1487 if (is_vfp_live(arch_register_get_index(op1), live)) {
1488 /* first operand is live: bring it to TOS */
1490 x87_create_fxch(state, n, op1_idx);
1493 dst = op_ia32_fcomJmp;
1495 /* first operand is dead: bring it to tos */
1497 x87_create_fxch(state, n, op1_idx);
1500 dst = op_ia32_fcompJmp;
1505 x87_patch_insn(n, dst);
1506 assert(pop_cnt < 3);
1512 /* patch the operation */
1513 attr = get_ia32_x87_attr(n);
1514 op1 = &ia32_st_regs[op1_idx];
1517 op2 = &ia32_st_regs[op2_idx];
1520 attr->x87[2] = NULL;
1523 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1524 arch_register_get_name(op1), arch_register_get_name(op2)));
1526 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1527 arch_register_get_name(op1)));
1529 return NO_NODE_ADDED;
1530 } /* sim_fCondJmp */
1533 int sim_Keep(x87_state *state, ir_node *node)
1536 const arch_register_t *op_reg;
1541 int node_added = NO_NODE_ADDED;
1543 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1545 arity = get_irn_arity(node);
1546 for(i = 0; i < arity; ++i) {
1547 op = get_irn_n(node, i);
1548 op_reg = arch_get_irn_register(state->sim->arch_env, op);
1549 if(arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1552 reg_id = arch_register_get_index(op_reg);
1553 live = vfp_live_args_after(state->sim, node, 0);
1555 op_stack_idx = x87_on_stack(state, reg_id);
1556 if(op_stack_idx >= 0 && !is_vfp_live(reg_id, live)) {
1557 x87_create_fpop(state, sched_next(node), 1);
1558 node_added = NODE_ADDED;
1562 DB((dbg, LEVEL_1, "Stack after: "));
1563 DEBUG_ONLY(x87_dump_stack(state));
1569 void keep_float_node_alive(x87_state *state, ir_node *node)
1575 const arch_register_class_t *cls;
1577 irg = get_irn_irg(node);
1578 block = get_nodes_block(node);
1579 cls = arch_get_irn_reg_class(state->sim->arch_env, node, -1);
1581 keep = be_new_Keep(cls, irg, block, 1, in);
1583 assert(sched_is_scheduled(node));
1584 sched_add_after(node, keep);
1588 * Create a copy of a node. Recreate the node if it's a constant.
1590 * @param state the x87 state
1591 * @param n the node to be copied
1593 * @return the copy of n
1595 static ir_node *create_Copy(x87_state *state, ir_node *n) {
1596 x87_simulator *sim = state->sim;
1597 ir_graph *irg = get_irn_irg(n);
1598 dbg_info *n_dbg = get_irn_dbg_info(n);
1599 ir_mode *mode = get_irn_mode(n);
1600 ir_node *block = get_nodes_block(n);
1601 ir_node *pred = get_irn_n(n, 0);
1602 ir_node *(*cnstr)(dbg_info *, ir_graph *, ir_node *, ir_mode *) = NULL;
1604 const arch_register_t *out;
1605 const arch_register_t *op1;
1606 ia32_x87_attr_t *attr;
1608 /* Do not copy constants, recreate them. */
1609 switch (get_ia32_irn_opcode(pred)) {
1610 case iro_ia32_Unknown_VFP:
1612 cnstr = new_rd_ia32_fldz;
1615 cnstr = new_rd_ia32_fld1;
1617 case iro_ia32_fldpi:
1618 cnstr = new_rd_ia32_fldpi;
1620 case iro_ia32_fldl2e:
1621 cnstr = new_rd_ia32_fldl2e;
1623 case iro_ia32_fldl2t:
1624 cnstr = new_rd_ia32_fldl2t;
1626 case iro_ia32_fldlg2:
1627 cnstr = new_rd_ia32_fldlg2;
1629 case iro_ia32_fldln2:
1630 cnstr = new_rd_ia32_fldln2;
1636 out = x87_get_irn_register(sim, n);
1637 op1 = x87_get_irn_register(sim, pred);
1639 if (cnstr != NULL) {
1640 /* copy a constant */
1641 res = (*cnstr)(n_dbg, irg, block, mode);
1643 x87_push(state, arch_register_get_index(out), res);
1645 attr = get_ia32_x87_attr(res);
1646 attr->x87[2] = &ia32_st_regs[0];
1648 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1650 res = new_rd_ia32_fpushCopy(n_dbg, irg, block, pred, mode);
1652 x87_push(state, arch_register_get_index(out), res);
1654 attr = get_ia32_x87_attr(res);
1655 attr->x87[0] = &ia32_st_regs[op1_idx];
1656 attr->x87[2] = &ia32_st_regs[0];
1658 arch_set_irn_register(sim->arch_env, res, out);
1664 * Simulate a be_Copy.
1666 * @param state the x87 state
1667 * @param n the node that should be simulated (and patched)
1669 * @return NO_NODE_ADDED
1671 static int sim_Copy(x87_state *state, ir_node *n) {
1672 x87_simulator *sim = state->sim;
1674 const arch_register_t *out;
1675 const arch_register_t *op1;
1676 const arch_register_class_t *class;
1677 ir_node *node, *next;
1678 ia32_x87_attr_t *attr;
1679 int op1_idx, out_idx;
1682 class = arch_get_irn_reg_class(sim->arch_env, n, -1);
1683 if (class->regs != ia32_vfp_regs)
1686 pred = get_irn_n(n, 0);
1687 out = x87_get_irn_register(sim, n);
1688 op1 = x87_get_irn_register(sim, pred);
1689 live = vfp_live_args_after(sim, n, REGMASK(out));
1691 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1692 arch_register_get_name(op1), arch_register_get_name(out)));
1693 DEBUG_ONLY(vfp_dump_live(live));
1695 /* handle the infamous unknown value */
1696 if (arch_register_get_index(op1) == REG_VFP_UKNWN) {
1697 /* Operand is still live, a real copy. We need here an fpush that can
1698 hold a a register, so use the fpushCopy or recreate constants */
1699 node = create_Copy(state, n);
1701 assert(is_ia32_fldz(node));
1702 next = sched_next(n);
1705 sched_add_before(next, node);
1707 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1708 arch_get_irn_register(sim->arch_env, node)->name));
1709 return NO_NODE_ADDED;
1712 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1714 if (is_vfp_live(arch_register_get_index(op1), live)) {
1715 ir_node *pred = get_irn_n(n, 0);
1717 /* Operand is still live, a real copy. We need here an fpush that can
1718 hold a a register, so use the fpushCopy or recreate constants */
1719 node = create_Copy(state, n);
1721 /* We have to make sure the old value doesn't go dead (which can happen
1722 * when we recreate constants). As the simulator expected that value in
1723 * the pred blocks. This is unfortunate as removing it would save us 1
1724 * instruction, but we would have to rerun all the simulation to get
1727 next = sched_next(n);
1730 sched_add_before(next, node);
1732 if(get_irn_n_edges(pred) == 0) {
1733 keep_float_node_alive(state, pred);
1736 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1737 arch_get_irn_register(sim->arch_env, node)->name));
1739 out_idx = x87_on_stack(state, arch_register_get_index(out));
1741 if (out_idx >= 0 && out_idx != op1_idx) {
1742 /* Matze: out already on stack? how can this happen? */
1745 /* op1 must be killed and placed where out is */
1747 /* best case, simple remove and rename */
1748 x87_patch_insn(n, op_ia32_Pop);
1749 attr = get_ia32_x87_attr(n);
1750 attr->x87[0] = op1 = &ia32_st_regs[0];
1753 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1755 /* move op1 to tos, store and pop it */
1757 x87_create_fxch(state, n, op1_idx);
1760 x87_patch_insn(n, op_ia32_Pop);
1761 attr = get_ia32_x87_attr(n);
1762 attr->x87[0] = op1 = &ia32_st_regs[out_idx];
1765 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1767 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1769 /* just a virtual copy */
1770 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1771 /* don't remove the node to keep the verifier quiet :),
1772 the emitter won't emit any code for the node */
1775 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1776 exchange(n, get_unop_op(n));
1780 return NO_NODE_ADDED;
1784 * Returns the result proj of the call
1786 static ir_node *get_call_result_proj(ir_node *call) {
1787 const ir_edge_t *edge;
1789 /* search the result proj */
1790 foreach_out_edge(call, edge) {
1791 ir_node *proj = get_edge_src_irn(edge);
1792 long pn = get_Proj_proj(proj);
1794 if (pn == pn_be_Call_first_res) {
1800 } /* get_call_result_proj */
1803 * Simulate a be_Call.
1805 * @param state the x87 state
1806 * @param n the node that should be simulated
1807 * @param arch_env the architecture environment
1809 * @return NO_NODE_ADDED
1811 static int sim_Call(x87_state *state, ir_node *n, const arch_env_t *arch_env)
1813 ir_type *call_tp = be_Call_get_type(n);
1817 const arch_register_t *reg;
1820 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1822 /* at the begin of a call the x87 state should be empty */
1823 assert(state->depth == 0 && "stack not empty before call");
1825 if (get_method_n_ress(call_tp) <= 0)
1829 * If the called function returns a float, it is returned in st(0).
1830 * This even happens if the return value is NOT used.
1831 * Moreover, only one return result is supported.
1833 res_type = get_method_res_type(call_tp, 0);
1834 mode = get_type_mode(res_type);
1836 if (mode == NULL || !mode_is_float(mode))
1839 resproj = get_call_result_proj(n);
1840 assert(resproj != NULL);
1842 reg = x87_get_irn_register(state->sim, resproj);
1843 x87_push(state, arch_register_get_index(reg), resproj);
1846 DB((dbg, LEVEL_1, "Stack after: "));
1847 DEBUG_ONLY(x87_dump_stack(state));
1849 return NO_NODE_ADDED;
1853 * Simulate a be_Spill.
1855 * @param state the x87 state
1856 * @param n the node that should be simulated (and patched)
1858 * Should not happen, spills are lowered before x87 simulator see them.
1860 static int sim_Spill(x87_state *state, ir_node *n) {
1861 assert(0 && "Spill not lowered");
1862 return sim_fst(state, n);
1866 * Simulate a be_Reload.
1868 * @param state the x87 state
1869 * @param n the node that should be simulated (and patched)
1871 * Should not happen, reloads are lowered before x87 simulator see them.
1873 static int sim_Reload(x87_state *state, ir_node *n) {
1874 assert(0 && "Reload not lowered");
1875 return sim_fld(state, n);
1879 * Simulate a be_Return.
1881 * @param state the x87 state
1882 * @param n the node that should be simulated (and patched)
1884 * @return NO_NODE_ADDED
1886 static int sim_Return(x87_state *state, ir_node *n) {
1887 int n_res = be_Return_get_n_rets(n);
1888 int i, n_float_res = 0;
1890 /* only floating point return values must resist on stack */
1891 for (i = 0; i < n_res; ++i) {
1892 ir_node *res = get_irn_n(n, be_pos_Return_val + i);
1894 if (mode_is_float(get_irn_mode(res)))
1897 assert(x87_get_depth(state) == n_float_res);
1899 /* pop them virtually */
1900 for (i = n_float_res - 1; i >= 0; --i)
1903 return NO_NODE_ADDED;
1906 typedef struct _perm_data_t {
1907 const arch_register_t *in;
1908 const arch_register_t *out;
1912 * Simulate a be_Perm.
1914 * @param state the x87 state
1915 * @param irn the node that should be simulated (and patched)
1917 * @return NO_NODE_ADDED
1919 static int sim_Perm(x87_state *state, ir_node *irn) {
1921 x87_simulator *sim = state->sim;
1922 ir_node *pred = get_irn_n(irn, 0);
1924 const ir_edge_t *edge;
1926 /* handle only floating point Perms */
1927 if (! mode_is_float(get_irn_mode(pred)))
1928 return NO_NODE_ADDED;
1930 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1932 /* Perm is a pure virtual instruction on x87.
1933 All inputs must be on the FPU stack and are pairwise
1934 different from each other.
1935 So, all we need to do is to permutate the stack state. */
1936 n = get_irn_arity(irn);
1937 NEW_ARR_A(int, stack_pos, n);
1939 /* collect old stack positions */
1940 for (i = 0; i < n; ++i) {
1941 const arch_register_t *inreg = x87_get_irn_register(sim, get_irn_n(irn, i));
1942 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1944 assert(idx >= 0 && "Perm argument not on x87 stack");
1948 /* now do the permutation */
1949 foreach_out_edge(irn, edge) {
1950 ir_node *proj = get_edge_src_irn(edge);
1951 const arch_register_t *out = x87_get_irn_register(sim, proj);
1952 long num = get_Proj_proj(proj);
1954 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1955 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
1957 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1959 return NO_NODE_ADDED;
1962 static int sim_Barrier(x87_state *state, ir_node *node) {
1963 //const arch_env_t *arch_env = state->sim->arch_env;
1966 /* materialize unknown if needed */
1967 arity = get_irn_arity(node);
1968 for(i = 0; i < arity; ++i) {
1969 const arch_register_t *reg;
1972 ia32_x87_attr_t *attr;
1973 ir_node *in = get_irn_n(node, i);
1975 if(!is_ia32_Unknown_VFP(in))
1978 /* TODO: not completely correct... */
1979 reg = &ia32_vfp_regs[REG_VFP_UKNWN];
1982 block = get_nodes_block(node);
1983 zero = new_rd_ia32_fldz(NULL, current_ir_graph, block, mode_E);
1984 x87_push(state, arch_register_get_index(reg), zero);
1986 attr = get_ia32_x87_attr(zero);
1987 attr->x87[2] = &ia32_st_regs[0];
1989 sched_add_before(node, zero);
1991 set_irn_n(node, i, zero);
1994 return NO_NODE_ADDED;
1999 * Kill any dead registers at block start by popping them from the stack.
2001 * @param sim the simulator handle
2002 * @param block the current block
2003 * @param start_state the x87 state at the begin of the block
2005 * @return the x87 state after dead register killed
2007 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state) {
2008 x87_state *state = start_state;
2009 ir_node *first_insn = sched_first(block);
2010 ir_node *keep = NULL;
2011 unsigned live = vfp_live_args_after(sim, block, 0);
2013 int i, depth, num_pop;
2016 depth = x87_get_depth(state);
2017 for (i = depth - 1; i >= 0; --i) {
2018 int reg = x87_get_st_reg(state, i);
2020 if (! is_vfp_live(reg, live))
2021 kill_mask |= (1 << i);
2025 /* create a new state, will be changed */
2026 state = x87_clone_state(sim, state);
2028 DB((dbg, LEVEL_1, "Killing deads:\n"));
2029 DEBUG_ONLY(vfp_dump_live(live));
2030 DEBUG_ONLY(x87_dump_stack(state));
2032 /* now kill registers */
2034 /* we can only kill from TOS, so bring them up */
2035 if (! (kill_mask & 1)) {
2036 /* search from behind, because we can to a double-pop */
2037 for (i = depth - 1; i >= 0; --i) {
2038 if (kill_mask & (1 << i)) {
2039 kill_mask &= ~(1 << i);
2046 x87_set_st(state, -1, keep, i);
2047 x87_create_fxch(state, first_insn, i);
2050 if ((kill_mask & 3) == 3) {
2051 /* we can do a double-pop */
2055 /* only a single pop */
2060 kill_mask >>= num_pop;
2061 keep = x87_create_fpop(state, first_insn, num_pop);
2066 } /* x87_kill_deads */
2069 * If we have PhiEs with unknown operands then we have to make sure that some
2070 * value is actually put onto the stack.
2072 static void fix_unknown_phis(x87_state *state, ir_node *block,
2073 ir_node *pred_block, int pos)
2077 sched_foreach(block, node) {
2079 const arch_register_t *reg;
2080 ia32_x87_attr_t *attr;
2085 op = get_Phi_pred(node, pos);
2086 if(!is_ia32_Unknown_VFP(op))
2089 reg = arch_get_irn_register(state->sim->arch_env, node);
2091 /* create a zero at end of pred block */
2092 zero = new_rd_ia32_fldz(NULL, current_ir_graph, pred_block, mode_E);
2093 x87_push(state, arch_register_get_index(reg), zero);
2095 attr = get_ia32_x87_attr(zero);
2096 attr->x87[2] = &ia32_st_regs[0];
2098 assert(is_ia32_fldz(zero));
2099 sched_add_before(sched_last(pred_block), zero);
2101 set_Phi_pred(node, pos, zero);
2106 * Run a simulation and fix all virtual instructions for a block.
2108 * @param sim the simulator handle
2109 * @param block the current block
2111 static void x87_simulate_block(x87_simulator *sim, ir_node *block) {
2113 blk_state *bl_state = x87_get_bl_state(sim, block);
2114 x87_state *state = bl_state->begin;
2115 const ir_edge_t *edge;
2116 ir_node *start_block;
2118 assert(state != NULL);
2119 /* already processed? */
2120 if (bl_state->end != NULL)
2123 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2124 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2125 DEBUG_ONLY(x87_dump_stack(state));
2127 /* at block begin, kill all dead registers */
2128 state = x87_kill_deads(sim, block, state);
2129 /* create a new state, will be changed */
2130 state = x87_clone_state(sim, state);
2132 /* beware, n might change */
2133 for (n = sched_first(block); !sched_is_end(n); n = next) {
2136 ir_op *op = get_irn_op(n);
2138 next = sched_next(n);
2139 if (op->ops.generic == NULL)
2142 func = (sim_func)op->ops.generic;
2145 node_inserted = (*func)(state, n);
2148 sim_func might have added an additional node after n,
2150 beware: n must not be changed by sim_func
2151 (i.e. removed from schedule) in this case
2153 if (node_inserted != NO_NODE_ADDED)
2154 next = sched_next(n);
2157 start_block = get_irg_start_block(get_irn_irg(block));
2159 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2161 /* check if the state must be shuffled */
2162 foreach_block_succ(block, edge) {
2163 ir_node *succ = get_edge_src_irn(edge);
2164 blk_state *succ_state;
2166 if (succ == start_block)
2169 succ_state = x87_get_bl_state(sim, succ);
2171 fix_unknown_phis(state, succ, block, get_edge_src_pos(edge));
2173 if (succ_state->begin == NULL) {
2174 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2175 DEBUG_ONLY(x87_dump_stack(state));
2176 succ_state->begin = state;
2178 waitq_put(sim->worklist, succ);
2180 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2181 /* There is already a begin state for the successor, bad.
2182 Do the necessary permutations.
2183 Note that critical edges are removed, so this is always possible:
2184 If the successor has more than one possible input, then it must
2187 x87_shuffle(sim, block, state, succ, succ_state->begin);
2190 bl_state->end = state;
2191 } /* x87_simulate_block */
2194 * Create a new x87 simulator.
2196 * @param sim a simulator handle, will be initialized
2197 * @param irg the current graph
2198 * @param arch_env the architecture environment
2200 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg,
2201 const arch_env_t *arch_env)
2203 obstack_init(&sim->obst);
2204 sim->blk_states = pmap_create();
2205 sim->arch_env = arch_env;
2206 sim->n_idx = get_irg_last_idx(irg);
2207 sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
2209 DB((dbg, LEVEL_1, "--------------------------------\n"
2210 "x87 Simulator started for %+F\n", irg));
2212 /* set the generic function pointer of instruction we must simulate */
2213 clear_irp_opcodes_generic_func();
2215 #define ASSOC(op) (op_ ## op)->ops.generic = (op_func)(sim_##op)
2216 #define ASSOC_IA32(op) (op_ia32_v ## op)->ops.generic = (op_func)(sim_##op)
2217 #define ASSOC_BE(op) (op_be_ ## op)->ops.generic = (op_func)(sim_##op)
2231 ASSOC_IA32(fCondJmp);
2243 } /* x87_init_simulator */
2246 * Destroy a x87 simulator.
2248 * @param sim the simulator handle
2250 static void x87_destroy_simulator(x87_simulator *sim) {
2251 pmap_destroy(sim->blk_states);
2252 obstack_free(&sim->obst, NULL);
2253 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2254 } /* x87_destroy_simulator */
2257 * Pre-block walker: calculate the liveness information for the block
2258 * and store it into the sim->live cache.
2260 static void update_liveness_walker(ir_node *block, void *data) {
2261 x87_simulator *sim = data;
2262 update_liveness(sim, block);
2263 } /* update_liveness_walker */
2266 * Run a simulation and fix all virtual instructions for a graph.
2268 * @param env the architecture environment
2269 * @param irg the current graph
2271 * Needs a block-schedule.
2273 void x87_simulate_graph(const arch_env_t *arch_env, be_irg_t *birg) {
2274 ir_node *block, *start_block;
2275 blk_state *bl_state;
2277 ir_graph *irg = be_get_birg_irg(birg);
2279 /* create the simulator */
2280 x87_init_simulator(&sim, irg, arch_env);
2282 start_block = get_irg_start_block(irg);
2283 bl_state = x87_get_bl_state(&sim, start_block);
2285 /* start with the empty state */
2286 bl_state->begin = empty;
2289 sim.worklist = new_waitq();
2290 waitq_put(sim.worklist, start_block);
2292 be_assure_liveness(birg);
2293 sim.lv = be_get_birg_liveness(birg);
2294 // sim.lv = be_liveness(be_get_birg_irg(birg));
2295 be_liveness_assure_sets(sim.lv);
2297 /* Calculate the liveness for all nodes. We must precalculate this info,
2298 * because the simulator adds new nodes (possible before Phi nodes) which
2299 * would let a lazy calculation fail.
2300 * On the other hand we reduce the computation amount due to
2301 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2303 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2307 block = waitq_get(sim.worklist);
2308 x87_simulate_block(&sim, block);
2309 } while (! waitq_empty(sim.worklist));
2312 del_waitq(sim.worklist);
2313 x87_destroy_simulator(&sim);
2314 } /* x87_simulate_graph */
2316 void ia32_init_x87(void) {
2317 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2318 } /* ia32_init_x87 */