2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
36 #include "iredges_t.h"
47 #include "../belive_t.h"
48 #include "../besched_t.h"
49 #include "../benode_t.h"
50 #include "ia32_new_nodes.h"
51 #include "gen_ia32_new_nodes.h"
52 #include "gen_ia32_regalloc_if.h"
57 /* first and second binop index */
64 /* the store val index */
65 #define STORE_VAL_IDX 2
67 #define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
69 /** the debug handle */
70 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
72 /* Forward declaration. */
73 typedef struct _x87_simulator x87_simulator;
76 * An exchange template.
77 * Note that our virtual functions have the same inputs
78 * and attributes as the real ones, so we can simple exchange
80 * Further, x87 supports inverse instructions, so we can handle them.
82 typedef struct _exchange_tmpl {
83 ir_op *normal_op; /**< the normal one */
84 ir_op *reverse_op; /**< the reverse one if exists */
85 ir_op *normal_pop_op; /**< the normal one with tos pop */
86 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
90 * An entry on the simulated x87 stack.
92 typedef struct _st_entry {
93 int reg_idx; /**< the virtual register index of this stack value */
94 ir_node *node; /**< the node that produced this value */
100 typedef struct _x87_state {
101 st_entry st[N_x87_REGS]; /**< the register stack */
102 int depth; /**< the current stack depth */
103 int tos; /**< position of the tos */
104 x87_simulator *sim; /**< The simulator. */
107 /** An empty state, used for blocks without fp instructions. */
108 static x87_state _empty = { { {0, NULL}, }, 0, 0 };
109 static x87_state *empty = (x87_state *)&_empty;
112 NO_NODE_ADDED = 0, /**< No node was added. */
113 NODE_ADDED = 1 /**< A node was added by the simulator in the schedule. */
117 * The type of an instruction simulator function.
119 * @param state the x87 state
120 * @param n the node to be simulated
122 * @return NODE_ADDED if a node was added AFTER n in schedule,
125 typedef int (*sim_func)(x87_state *state, ir_node *n);
128 * A block state: Every block has a x87 state at the beginning and at the end.
130 typedef struct _blk_state {
131 x87_state *begin; /**< state at the begin or NULL if not assigned */
132 x87_state *end; /**< state at the end or NULL if not assigned */
135 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
137 /** liveness bitset for vfp registers. */
138 typedef unsigned char vfp_liveness;
143 struct _x87_simulator {
144 struct obstack obst; /**< An obstack for fast allocating. */
145 pmap *blk_states; /**< Map blocks to states. */
146 const arch_env_t *arch_env; /**< The architecture environment. */
147 be_lv_t *lv; /**< intrablock liveness. */
148 vfp_liveness *live; /**< Liveness information. */
149 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
150 waitq *worklist; /**< Worklist of blocks that must be processed. */
154 * Returns the current stack depth.
156 * @param state the x87 state
158 * @return the x87 stack depth
160 static int x87_get_depth(const x87_state *state) {
162 } /* x87_get_depth */
165 * Return the virtual register index at st(pos).
167 * @param state the x87 state
168 * @param pos a stack position
170 * @return the vfp register index that produced the value at st(pos)
172 static int x87_get_st_reg(const x87_state *state, int pos) {
173 assert(pos < state->depth);
174 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
175 } /* x87_get_st_reg */
178 * Return the node at st(pos).
180 * @param state the x87 state
181 * @param pos a stack position
183 * @return the IR node that produced the value at st(pos)
185 static ir_node *x87_get_st_node(const x87_state *state, int pos) {
186 assert(pos < state->depth);
187 return state->st[MASK_TOS(state->tos + pos)].node;
188 } /* x87_get_st_node */
192 * Dump the stack for debugging.
194 * @param state the x87 state
196 static void x87_dump_stack(const x87_state *state) {
199 for (i = state->depth - 1; i >= 0; --i) {
200 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
201 x87_get_st_node(state, i)));
203 DB((dbg, LEVEL_2, "<-- TOS\n"));
204 } /* x87_dump_stack */
205 #endif /* DEBUG_libfirm */
208 * Set a virtual register to st(pos).
210 * @param state the x87 state
211 * @param reg_idx the vfp register index that should be set
212 * @param node the IR node that produces the value of the vfp register
213 * @param pos the stack position where the new value should be entered
215 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos) {
216 assert(0 < state->depth);
217 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
218 state->st[MASK_TOS(state->tos + pos)].node = node;
220 DB((dbg, LEVEL_2, "After SET_REG: "));
221 DEBUG_ONLY(x87_dump_stack(state));
225 * Set the tos virtual register.
227 * @param state the x87 state
228 * @param reg_idx the vfp register index that should be set
229 * @param node the IR node that produces the value of the vfp register
231 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node) {
232 x87_set_st(state, reg_idx, node, 0);
236 * Swap st(0) with st(pos).
238 * @param state the x87 state
239 * @param pos the stack position to change the tos with
241 static void x87_fxch(x87_state *state, int pos) {
243 assert(pos < state->depth);
245 entry = state->st[MASK_TOS(state->tos + pos)];
246 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
247 state->st[MASK_TOS(state->tos)] = entry;
249 DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
253 * Convert a virtual register to the stack index.
255 * @param state the x87 state
256 * @param reg_idx the register vfp index
258 * @return the stack position where the register is stacked
259 * or -1 if the virtual register was not found
261 static int x87_on_stack(const x87_state *state, int reg_idx) {
262 int i, tos = state->tos;
264 for (i = 0; i < state->depth; ++i)
265 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
271 * Push a virtual Register onto the stack, double pushed allowed.
273 * @param state the x87 state
274 * @param reg_idx the register vfp index
275 * @param node the node that produces the value of the vfp register
277 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node) {
278 assert(state->depth < N_x87_REGS && "stack overrun");
281 state->tos = MASK_TOS(state->tos - 1);
282 state->st[state->tos].reg_idx = reg_idx;
283 state->st[state->tos].node = node;
285 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
289 * Push a virtual Register onto the stack, double pushes are NOT allowed.
291 * @param state the x87 state
292 * @param reg_idx the register vfp index
293 * @param node the node that produces the value of the vfp register
294 * @param dbl_push if != 0 double pushes are allowed
296 static void x87_push(x87_state *state, int reg_idx, ir_node *node) {
297 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
299 x87_push_dbl(state, reg_idx, node);
303 * Pop a virtual Register from the stack.
305 * @param state the x87 state
307 static void x87_pop(x87_state *state) {
308 assert(state->depth > 0 && "stack underrun");
311 state->tos = MASK_TOS(state->tos + 1);
313 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
317 * Returns the block state of a block.
319 * @param sim the x87 simulator handle
320 * @param block the current block
322 * @return the block state
324 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block) {
325 pmap_entry *entry = pmap_find(sim->blk_states, block);
328 blk_state *bl_state = obstack_alloc(&sim->obst, sizeof(*bl_state));
329 bl_state->begin = NULL;
330 bl_state->end = NULL;
332 pmap_insert(sim->blk_states, block, bl_state);
336 return PTR_TO_BLKSTATE(entry->value);
337 } /* x87_get_bl_state */
340 * Creates a new x87 state.
342 * @param sim the x87 simulator handle
344 * @return a new x87 state
346 static x87_state *x87_alloc_state(x87_simulator *sim) {
347 x87_state *res = obstack_alloc(&sim->obst, sizeof(*res));
351 } /* x87_alloc_state */
356 * @param sim the x87 simulator handle
357 * @param src the x87 state that will be cloned
359 * @return a cloned copy of the src state
361 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src) {
362 x87_state *res = x87_alloc_state(sim);
364 memcpy(res, src, sizeof(*res));
366 } /* x87_clone_state */
369 * Patch a virtual instruction into a x87 one and return
370 * the node representing the result value.
372 * @param n the IR node to patch
373 * @param op the x87 opcode to patch in
375 static ir_node *x87_patch_insn(ir_node *n, ir_op *op) {
376 ir_mode *mode = get_irn_mode(n);
381 if (mode == mode_T) {
382 /* patch all Proj's */
383 const ir_edge_t *edge;
385 foreach_out_edge(n, edge) {
386 ir_node *proj = get_edge_src_irn(edge);
388 mode = get_irn_mode(proj);
389 if (mode_is_float(mode)) {
391 set_irn_mode(proj, mode_E);
395 } else if (mode_is_float(mode))
396 set_irn_mode(n, mode_E);
398 } /* x87_patch_insn */
401 * Returns the first Proj of a mode_T node having a given mode.
403 * @param n the mode_T node
404 * @param m the desired mode of the Proj
405 * @return The first Proj of mode @p m found or NULL.
407 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m) {
408 const ir_edge_t *edge;
410 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
412 foreach_out_edge(n, edge) {
413 ir_node *proj = get_edge_src_irn(edge);
414 if (get_irn_mode(proj) == m)
419 } /* get_irn_Proj_for_mode */
422 * Wrap the arch_* function here so we can check for errors.
424 static INLINE const arch_register_t *x87_get_irn_register(x87_simulator *sim, const ir_node *irn) {
425 const arch_register_t *res;
427 res = arch_get_irn_register(sim->arch_env, irn);
428 assert(res->reg_class->regs == ia32_vfp_regs);
430 } /* x87_get_irn_register */
432 /* -------------- x87 perm --------------- */
435 * Creates a fxch for shuffle.
437 * @param state the x87 state
438 * @param pos parameter for fxch
439 * @param block the block were fxch is inserted
441 * Creates a new fxch node and reroute the user of the old node
444 * @return the fxch node
446 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block) {
450 fxch = new_rd_ia32_fxch(NULL, get_irn_irg(block), block, mode_E);
451 attr = get_ia32_attr(fxch);
452 attr->x87[0] = &ia32_st_regs[pos];
453 attr->x87[2] = &ia32_st_regs[0];
457 x87_fxch(state, pos);
459 } /* x87_fxch_shuffle */
462 * Calculate the necessary permutations to reach dst_state.
464 * These permutations are done with fxch instructions and placed
465 * at the end of the block.
467 * Note that critical edges are removed here, so we need only
468 * a shuffle if the current block has only one successor.
470 * @param sim the simulator handle
471 * @param block the current block
472 * @param state the current x87 stack state, might be modified
473 * @param dst_block the destination block
474 * @param dst_state destination state
478 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block, x87_state *state, ir_node *dst_block, const x87_state *dst_state) {
479 int i, n_cycles, k, ri;
480 unsigned cycles[4], all_mask;
481 char cycle_idx[4][8];
482 ir_node *fxch, *before, *after;
484 assert(state->depth == dst_state->depth);
486 /* Some mathematics here:
487 If we have a cycle of length n that includes the tos,
488 we need n-1 exchange operations.
489 We can always add the tos and restore it, so we need
490 n+1 exchange operations for a cycle not containing the tos.
491 So, the maximum of needed operations is for a cycle of 7
492 not including the tos == 8.
493 This is the same number of ops we would need for using stores,
494 so exchange is cheaper (we save the loads).
495 On the other hand, we might need an additional exchange
496 in the next block to bring one operand on top, so the
497 number of ops in the first case is identical.
498 Further, no more than 4 cycles can exists (4 x 2).
500 all_mask = (1 << (state->depth)) - 1;
502 for (n_cycles = 0; all_mask; ++n_cycles) {
503 int src_idx, dst_idx;
505 /* find the first free slot */
506 for (i = 0; i < state->depth; ++i) {
507 if (all_mask & (1 << i)) {
508 all_mask &= ~(1 << i);
510 /* check if there are differences here */
511 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
517 /* no more cycles found */
522 cycles[n_cycles] = (1 << i);
523 cycle_idx[n_cycles][k++] = i;
524 for (src_idx = i; ; src_idx = dst_idx) {
525 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
527 if ((all_mask & (1 << dst_idx)) == 0)
530 cycle_idx[n_cycles][k++] = dst_idx;
531 cycles[n_cycles] |= (1 << dst_idx);
532 all_mask &= ~(1 << dst_idx);
534 cycle_idx[n_cycles][k] = -1;
538 /* no permutation needed */
542 /* Hmm: permutation needed */
543 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
544 DEBUG_ONLY(x87_dump_stack(state));
545 DB((dbg, LEVEL_2, " to\n"));
546 DEBUG_ONLY(x87_dump_stack(dst_state));
550 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
551 for (ri = 0; ri < n_cycles; ++ri) {
552 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
553 for (k = 0; cycle_idx[ri][k] != -1; ++k)
554 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
555 DB((dbg, LEVEL_2, "\n"));
562 * Find the place node must be insert.
563 * We have only one successor block, so the last instruction should
566 before = sched_last(block);
567 assert(is_cfop(before));
569 /* now do the permutations */
570 for (ri = 0; ri < n_cycles; ++ri) {
571 if ((cycles[ri] & 1) == 0) {
572 /* this cycle does not include the tos */
573 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
575 sched_add_after(after, fxch);
577 sched_add_before(before, fxch);
580 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
581 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
583 sched_add_after(after, fxch);
585 sched_add_before(before, fxch);
588 if ((cycles[ri] & 1) == 0) {
589 /* this cycle does not include the tos */
590 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
591 sched_add_after(after, fxch);
598 * Create a fxch node before another node.
600 * @param state the x87 state
601 * @param n the node after the fxch
602 * @param pos exchange st(pos) with st(0)
603 * @param op_idx if >= 0, replace input op_idx of n with the fxch result
607 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos, int op_idx) {
610 ir_graph *irg = get_irn_irg(n);
611 ir_node *block = get_nodes_block(n);
613 x87_fxch(state, pos);
615 fxch = new_rd_ia32_fxch(NULL, irg, block, mode_E);
616 attr = get_ia32_attr(fxch);
617 attr->x87[0] = &ia32_st_regs[pos];
618 attr->x87[2] = &ia32_st_regs[0];
622 sched_add_before(n, fxch);
623 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
625 } /* x87_create_fxch */
628 * Create a fpush before node n.
630 * @param state the x87 state
631 * @param n the node after the fpush
632 * @param pos push st(pos) on stack
633 * @param op_idx replace input op_idx of n with the fpush result
635 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx) {
636 ir_node *fpush, *pred = get_irn_n(n, op_idx);
638 const arch_register_t *out = x87_get_irn_register(state->sim, pred);
640 x87_push_dbl(state, arch_register_get_index(out), pred);
642 fpush = new_rd_ia32_fpush(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
643 attr = get_ia32_attr(fpush);
644 attr->x87[0] = &ia32_st_regs[pos];
645 attr->x87[2] = &ia32_st_regs[0];
648 sched_add_before(n, fpush);
650 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
651 } /* x87_create_fpush */
654 * Create a fpop before node n.
656 * @param state the x87 state
657 * @param n the node after the fpop
658 * @param num pop 1 or 2 values
659 * @param pred node to use as predecessor of the fpop
661 * @return the fpop node
663 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num, ir_node *pred) {
664 ir_node *fpop = pred;
671 fpop = new_rd_ia32_fpop(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
672 attr = get_ia32_attr(fpop);
673 attr->x87[0] = &ia32_st_regs[0];
674 attr->x87[1] = &ia32_st_regs[0];
675 attr->x87[2] = &ia32_st_regs[0];
678 sched_add_before(n, fpop);
679 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
685 } /* x87_create_fpop */
688 * Creates an fldz before node n
690 * @param state the x87 state
691 * @param n the node after the fldz
693 * @return the fldz node
695 static ir_node *x87_create_fldz(x87_state *state, ir_node *n, int regidx) {
696 ir_graph *irg = get_irn_irg(n);
697 ir_node *block = get_nodes_block(n);
700 fldz = new_rd_ia32_fldz(NULL, irg, block, mode_E);
702 sched_add_before(n, fldz);
703 DB((dbg, LEVEL_1, "<<< %s\n", get_irn_opname(fldz)));
706 x87_push(state, regidx, fldz);
711 /* --------------------------------- liveness ------------------------------------------ */
714 * The liveness transfer function.
715 * Updates a live set over a single step from a given node to its predecessor.
716 * Everything defined at the node is removed from the set, the uses of the node get inserted.
718 * @param sim The simulator handle.
719 * @param irn The node at which liveness should be computed.
720 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
721 * the registers live after irn.
723 * @return The live bitset.
725 static vfp_liveness vfp_liveness_transfer(x87_simulator *sim, ir_node *irn, vfp_liveness live)
728 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
729 const arch_env_t *arch_env = sim->arch_env;
731 if (arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
732 const arch_register_t *reg = x87_get_irn_register(sim, irn);
733 live &= ~(1 << arch_register_get_index(reg));
736 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
737 ir_node *op = get_irn_n(irn, i);
739 if (mode_is_float(get_irn_mode(op)) && arch_irn_consider_in_reg_alloc(arch_env, cls, op)) {
740 const arch_register_t *reg = x87_get_irn_register(sim, op);
741 live |= 1 << arch_register_get_index(reg);
745 } /* vfp_liveness_transfer */
748 * Put all live virtual registers at the end of a block into a bitset.
750 * @param sim the simulator handle
751 * @param lv the liveness information
752 * @param bl the block
754 * @return The live bitset at the end of this block
756 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
759 vfp_liveness live = 0;
760 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
761 const arch_env_t *arch_env = sim->arch_env;
762 const be_lv_t *lv = sim->lv;
764 be_lv_foreach(lv, block, be_lv_state_end, i) {
765 const arch_register_t *reg;
766 const ir_node *node = be_lv_get_irn(lv, block, i);
767 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
770 reg = x87_get_irn_register(sim, node);
771 live |= 1 << arch_register_get_index(reg);
775 } /* vfp_liveness_end_of_block */
777 /** get the register mask from an arch_register */
778 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
781 * Return a bitset of argument registers which are live at the end of a node.
783 * @param sim the simulator handle
784 * @param pos the node
785 * @param kill kill mask for the output registers
787 * @return The live bitset.
789 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
791 unsigned idx = get_irn_idx(pos);
793 assert(idx < sim->n_idx);
794 return sim->live[idx] & ~kill;
795 } /* vfp_live_args_after */
798 * Calculate the liveness for a whole block and cache it.
800 * @param sim the simulator handle
801 * @param lv the liveness handle
802 * @param block the block
804 static void update_liveness(x87_simulator *sim, ir_node *block) {
805 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
809 /* now iterate through the block backward and cache the results */
810 sched_foreach_reverse(block, irn) {
811 /* stop at the first Phi: this produces the live-in */
815 idx = get_irn_idx(irn);
816 sim->live[idx] = live;
818 live = vfp_liveness_transfer(sim, irn, live);
820 idx = get_irn_idx(block);
821 sim->live[idx] = live;
822 } /* update_liveness */
825 * Returns true if a register is live in a set.
827 * @param reg_idx the vfp register index
828 * @param live a live bitset
830 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
834 * Dump liveness info.
836 * @param live the live bitset
838 static void vfp_dump_live(vfp_liveness live) {
841 DB((dbg, LEVEL_2, "Live after: "));
842 for (i = 0; i < 8; ++i) {
843 if (live & (1 << i)) {
844 DB((dbg, LEVEL_2, "vf%d ", i));
847 DB((dbg, LEVEL_2, "\n"));
848 } /* vfp_dump_live */
849 #endif /* DEBUG_libfirm */
851 /* --------------------------------- simulators ---------------------------------------- */
853 #define XCHG(a, b) do { int t = (a); (a) = (b); (b) = t; } while (0)
856 * Simulate a virtual binop.
858 * @param state the x87 state
859 * @param n the node that should be simulated (and patched)
860 * @param tmpl the template containing the 4 possible x87 opcodes
862 * @return NO_NODE_ADDED
864 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl) {
865 int op2_idx = 0, op1_idx;
866 int out_idx, do_pop = 0;
868 ir_node *patched_insn;
870 x87_simulator *sim = state->sim;
871 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, BINOP_IDX_1));
872 const arch_register_t *op2 = x87_get_irn_register(sim, get_irn_n(n, BINOP_IDX_2));
873 const arch_register_t *out = x87_get_irn_register(sim, n);
874 int reg_index_1 = arch_register_get_index(op1);
875 int reg_index_2 = arch_register_get_index(op2);
876 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
878 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
879 arch_register_get_name(op1), arch_register_get_name(op2),
880 arch_register_get_name(out)));
881 DEBUG_ONLY(vfp_dump_live(live));
882 DB((dbg, LEVEL_1, "Stack before: "));
883 DEBUG_ONLY(x87_dump_stack(state));
885 op1_idx = x87_on_stack(state, reg_index_1);
886 assert(op1_idx >= 0);
888 if (reg_index_2 != REG_VFP_NOREG) {
889 /* second operand is a vfp register */
890 op2_idx = x87_on_stack(state, reg_index_2);
891 assert(op2_idx >= 0);
893 if (is_vfp_live(arch_register_get_index(op2), live)) {
894 /* Second operand is live. */
896 if (is_vfp_live(arch_register_get_index(op1), live)) {
897 /* Both operands are live: push the first one.
898 This works even for op1 == op2. */
899 x87_create_fpush(state, n, op1_idx, BINOP_IDX_2);
900 /* now do fxxx (tos=tos X op) */
904 dst = tmpl->normal_op;
906 /* Second live, first operand is dead here, bring it to tos. */
908 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
913 /* now do fxxx (tos=tos X op) */
915 dst = tmpl->normal_op;
918 /* Second operand is dead. */
919 if (is_vfp_live(arch_register_get_index(op1), live)) {
920 /* First operand is live: bring second to tos. */
922 x87_create_fxch(state, n, op2_idx, BINOP_IDX_2);
927 /* now do fxxxr (tos = op X tos) */
929 dst = tmpl->reverse_op;
931 /* Both operands are dead here, pop them from the stack. */
934 /* Both are identically and on tos, no pop needed. */
935 /* here fxxx (tos = tos X tos) */
936 dst = tmpl->normal_op;
939 /* now do fxxxp (op = op X tos, pop) */
940 dst = tmpl->normal_pop_op;
944 } else if (op1_idx == 0) {
945 assert(op1_idx != op2_idx);
946 /* now do fxxxrp (op = tos X op, pop) */
947 dst = tmpl->reverse_pop_op;
951 /* Bring the second on top. */
952 x87_create_fxch(state, n, op2_idx, BINOP_IDX_2);
953 if (op1_idx == op2_idx) {
954 /* Both are identically and on tos now, no pop needed. */
957 /* use fxxx (tos = tos X tos) */
958 dst = tmpl->normal_op;
961 /* op2 is on tos now */
963 /* use fxxxp (op = op X tos, pop) */
964 dst = tmpl->normal_pop_op;
972 /* second operand is an address mode */
973 if (is_vfp_live(arch_register_get_index(op1), live)) {
974 /* first operand is live: push it here */
975 x87_create_fpush(state, n, op1_idx, BINOP_IDX_1);
977 /* use fxxx (tos = tos X mem) */
978 dst = tmpl->normal_op;
981 /* first operand is dead: bring it to tos */
983 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
987 /* use fxxxp (tos = tos X mem) */
988 dst = tmpl->normal_op;
993 patched_insn = x87_patch_insn(n, dst);
994 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
999 /* patch the operation */
1000 attr = get_ia32_attr(n);
1001 attr->x87[0] = op1 = &ia32_st_regs[op1_idx];
1002 if (reg_index_2 != REG_VFP_NOREG) {
1003 attr->x87[1] = op2 = &ia32_st_regs[op2_idx];
1005 attr->x87[2] = out = &ia32_st_regs[out_idx];
1007 if (reg_index_2 != REG_VFP_NOREG) {
1008 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1009 arch_register_get_name(op1), arch_register_get_name(op2),
1010 arch_register_get_name(out)));
1012 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1013 arch_register_get_name(op1),
1014 arch_register_get_name(out)));
1017 return NO_NODE_ADDED;
1021 * Simulate a virtual Unop.
1023 * @param state the x87 state
1024 * @param n the node that should be simulated (and patched)
1025 * @param op the x87 opcode that will replace n's opcode
1027 * @return NO_NODE_ADDED
1029 static int sim_unop(x87_state *state, ir_node *n, ir_op *op) {
1030 int op1_idx, out_idx;
1031 x87_simulator *sim = state->sim;
1032 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, UNOP_IDX));
1033 const arch_register_t *out = x87_get_irn_register(sim, n);
1035 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1037 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1038 DEBUG_ONLY(vfp_dump_live(live));
1040 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1042 if (is_vfp_live(arch_register_get_index(op1), live)) {
1043 /* push the operand here */
1044 x87_create_fpush(state, n, op1_idx, UNOP_IDX);
1048 /* operand is dead, bring it to tos */
1050 x87_create_fxch(state, n, op1_idx, UNOP_IDX);
1055 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1057 attr = get_ia32_attr(n);
1058 attr->x87[0] = op1 = &ia32_st_regs[0];
1059 attr->x87[2] = out = &ia32_st_regs[0];
1060 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1062 return NO_NODE_ADDED;
1066 * Simulate a virtual Load instruction.
1068 * @param state the x87 state
1069 * @param n the node that should be simulated (and patched)
1070 * @param op the x87 opcode that will replace n's opcode
1072 * @return NO_NODE_ADDED
1074 static int sim_load(x87_state *state, ir_node *n, ir_op *op) {
1075 const arch_register_t *out = x87_get_irn_register(state->sim, n);
1078 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1079 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1080 assert(out == x87_get_irn_register(state->sim, n));
1081 attr = get_ia32_attr(n);
1082 attr->x87[2] = out = &ia32_st_regs[0];
1083 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1085 return NO_NODE_ADDED;
1089 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1091 * @param store The store
1092 * @param old_val The former value
1093 * @param new_val The new value
1095 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val) {
1096 const ir_edge_t *edge, *ne;
1098 foreach_out_edge_safe(old_val, edge, ne) {
1099 ir_node *user = get_edge_src_irn(edge);
1101 if (! user || user == store)
1104 /* if the user is scheduled after the store: rewire */
1105 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1107 /* find the input of the user pointing to the old value */
1108 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1109 if (get_irn_n(user, i) == old_val)
1110 set_irn_n(user, i, new_val);
1114 } /* collect_and_rewire_users */
1117 * Simulate a virtual Store.
1119 * @param state the x87 state
1120 * @param n the node that should be simulated (and patched)
1121 * @param op the x87 store opcode
1122 * @param op_p the x87 store and pop opcode
1124 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p) {
1125 x87_simulator *sim = state->sim;
1126 ir_node *val = get_irn_n(n, STORE_VAL_IDX);
1127 const arch_register_t *op2 = x87_get_irn_register(sim, val);
1128 unsigned live = vfp_live_args_after(sim, n, 0);
1129 int insn = NO_NODE_ADDED;
1131 int op2_reg_idx, op2_idx, depth;
1132 int live_after_node;
1135 op2_reg_idx = arch_register_get_index(op2);
1136 if (op2_reg_idx == REG_VFP_UKNWN) {
1137 /* just take any value from stack */
1138 if(state->depth > 0) {
1140 DEBUG_ONLY(op2 = NULL);
1141 live_after_node = 1;
1143 /* produce a new value which we will consume immediately */
1144 x87_create_fldz(state, n, op2_reg_idx);
1145 live_after_node = 0;
1146 op2_idx = x87_on_stack(state, op2_reg_idx);
1147 assert(op2_idx >= 0);
1150 op2_idx = x87_on_stack(state, op2_reg_idx);
1151 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1152 assert(op2_idx >= 0);
1153 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1156 mode = get_ia32_ls_mode(n);
1157 depth = x87_get_depth(state);
1159 if (live_after_node) {
1161 Problem: fst doesn't support mode_E (spills), only fstp does
1163 - stack not full: push value and fstp
1164 - stack full: fstp value and load again
1166 if (mode == mode_E) {
1167 if (depth < N_x87_REGS) {
1168 /* ok, we have a free register: push + fstp */
1169 x87_create_fpush(state, n, op2_idx, STORE_VAL_IDX);
1171 x87_patch_insn(n, op_p);
1173 ir_node *vfld, *mem, *block, *rproj, *mproj;
1176 /* stack full here: need fstp + load */
1178 x87_patch_insn(n, op_p);
1180 block = get_nodes_block(n);
1181 irg = get_irn_irg(n);
1182 vfld = new_rd_ia32_vfld(NULL, irg, block, get_irn_n(n, 0), get_irn_n(n, 1), new_rd_NoMem(irg));
1184 /* copy all attributes */
1185 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1186 if (is_ia32_use_frame(n))
1187 set_ia32_use_frame(vfld);
1188 set_ia32_am_flavour(vfld, get_ia32_am_flavour(n));
1189 set_ia32_op_type(vfld, ia32_am_Source);
1190 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1191 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1192 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1194 rproj = new_r_Proj(irg, block, vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1195 mproj = new_r_Proj(irg, block, vfld, mode_M, pn_ia32_vfld_M);
1196 mem = get_irn_Proj_for_mode(n, mode_M);
1198 assert(mem && "Store memory not found");
1200 arch_set_irn_register(sim->arch_env, rproj, op2);
1202 /* reroute all former users of the store memory to the load memory */
1203 edges_reroute(mem, mproj, irg);
1204 /* set the memory input of the load to the store memory */
1205 set_irn_n(vfld, 2, mem);
1207 sched_add_after(n, vfld);
1208 sched_add_after(vfld, rproj);
1210 /* rewire all users, scheduled after the store, to the loaded value */
1211 collect_and_rewire_users(n, val, rproj);
1216 /* we can only store the tos to memory */
1218 x87_create_fxch(state, n, op2_idx, STORE_VAL_IDX);
1220 /* mode != mode_E -> use normal fst */
1221 x87_patch_insn(n, op);
1224 /* we can only store the tos to memory */
1226 x87_create_fxch(state, n, op2_idx, STORE_VAL_IDX);
1229 x87_patch_insn(n, op_p);
1232 attr = get_ia32_attr(n);
1233 attr->x87[1] = op2 = &ia32_st_regs[0];
1234 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1240 * Simulate a virtual Phi.
1241 * Just for cosmetic reasons change the mode of Phi nodes to mode_E.
1243 * @param state the x87 state
1244 * @param n the node that should be simulated (and patched)
1245 * @param arch_env the architecture environment
1247 * @return NO_NODE_ADDED
1249 static int sim_Phi(x87_state *state, ir_node *n, const arch_env_t *arch_env) {
1250 ir_mode *mode = get_irn_mode(n);
1252 if (mode_is_float(mode))
1253 set_irn_mode(n, mode_E);
1255 return NO_NODE_ADDED;
1258 #define _GEN_BINOP(op, rev) \
1259 static int sim_##op(x87_state *state, ir_node *n) { \
1260 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1261 return sim_binop(state, n, &tmpl); \
1264 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1265 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1267 #define GEN_LOAD2(op, nop) \
1268 static int sim_##op(x87_state *state, ir_node *n) { \
1269 return sim_load(state, n, op_ia32_##nop); \
1272 #define GEN_LOAD(op) GEN_LOAD2(op, op)
1274 #define GEN_UNOP(op) \
1275 static int sim_##op(x87_state *state, ir_node *n) { \
1276 return sim_unop(state, n, op_ia32_##op); \
1279 #define GEN_STORE(op) \
1280 static int sim_##op(x87_state *state, ir_node *n) { \
1281 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1306 * Simulate a fCondJmp.
1308 * @param state the x87 state
1309 * @param n the node that should be simulated (and patched)
1311 * @return NO_NODE_ADDED
1313 static int sim_fCondJmp(x87_state *state, ir_node *n) {
1319 x87_simulator *sim = state->sim;
1320 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, BINOP_IDX_1));
1321 const arch_register_t *op2 = x87_get_irn_register(sim, get_irn_n(n, BINOP_IDX_2));
1322 int reg_index_1 = arch_register_get_index(op1);
1323 int reg_index_2 = arch_register_get_index(op2);
1324 unsigned live = vfp_live_args_after(sim, n, 0);
1326 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1327 arch_register_get_name(op1), arch_register_get_name(op2)));
1328 DEBUG_ONLY(vfp_dump_live(live));
1329 DB((dbg, LEVEL_1, "Stack before: "));
1330 DEBUG_ONLY(x87_dump_stack(state));
1332 op1_idx = x87_on_stack(state, reg_index_1);
1333 assert(op1_idx >= 0);
1335 /* BEWARE: check for comp a,a cases, they might happen */
1336 if (reg_index_2 != REG_VFP_NOREG) {
1337 /* second operand is a vfp register */
1338 op2_idx = x87_on_stack(state, reg_index_2);
1339 assert(op2_idx >= 0);
1341 if (is_vfp_live(arch_register_get_index(op2), live)) {
1342 /* second operand is live */
1344 if (is_vfp_live(arch_register_get_index(op1), live)) {
1345 /* both operands are live */
1348 /* res = tos X op */
1349 dst = op_ia32_fcomJmp;
1350 } else if (op2_idx == 0) {
1351 /* res = op X tos */
1352 dst = op_ia32_fcomrJmp;
1354 /* bring the first one to tos */
1355 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1359 /* res = tos X op */
1360 dst = op_ia32_fcomJmp;
1363 /* second live, first operand is dead here, bring it to tos.
1364 This means further, op1_idx != op2_idx. */
1365 assert(op1_idx != op2_idx);
1367 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1372 /* res = tos X op, pop */
1373 dst = op_ia32_fcompJmp;
1377 /* second operand is dead */
1378 if (is_vfp_live(arch_register_get_index(op1), live)) {
1379 /* first operand is live: bring second to tos.
1380 This means further, op1_idx != op2_idx. */
1381 assert(op1_idx != op2_idx);
1383 x87_create_fxch(state, n, op2_idx, BINOP_IDX_2);
1388 /* res = op X tos, pop */
1389 dst = op_ia32_fcomrpJmp;
1392 /* both operands are dead here, check first for identity. */
1393 if (op1_idx == op2_idx) {
1394 /* identically, one pop needed */
1396 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1400 /* res = tos X op, pop */
1401 dst = op_ia32_fcompJmp;
1404 /* different, move them to st and st(1) and pop both.
1405 The tricky part is to get one into st(1).*/
1406 else if (op2_idx == 1) {
1407 /* good, second operand is already in the right place, move the first */
1409 /* bring the first on top */
1410 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1411 assert(op2_idx != 0);
1414 /* res = tos X op, pop, pop */
1415 dst = op_ia32_fcomppJmp;
1417 } else if (op1_idx == 1) {
1418 /* good, first operand is already in the right place, move the second */
1420 /* bring the first on top */
1421 x87_create_fxch(state, n, op2_idx, BINOP_IDX_2);
1422 assert(op1_idx != 0);
1425 dst = op_ia32_fcomrppJmp;
1428 /* if one is already the TOS, we need two fxch */
1430 /* first one is TOS, move to st(1) */
1431 x87_create_fxch(state, n, 1, BINOP_IDX_1);
1432 assert(op2_idx != 1);
1434 x87_create_fxch(state, n, op2_idx, BINOP_IDX_2);
1436 /* res = op X tos, pop, pop */
1437 dst = op_ia32_fcomrppJmp;
1439 } else if (op2_idx == 0) {
1440 /* second one is TOS, move to st(1) */
1441 x87_create_fxch(state, n, 1, BINOP_IDX_2);
1442 assert(op1_idx != 1);
1444 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1446 /* res = tos X op, pop, pop */
1447 dst = op_ia32_fcomppJmp;
1450 /* none of them is either TOS or st(1), 3 fxch needed */
1451 x87_create_fxch(state, n, op2_idx, BINOP_IDX_2);
1452 assert(op1_idx != 0);
1453 x87_create_fxch(state, n, 1, BINOP_IDX_2);
1455 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1457 /* res = tos X op, pop, pop */
1458 dst = op_ia32_fcomppJmp;
1465 /* second operand is an address mode */
1466 if (is_vfp_live(arch_register_get_index(op1), live)) {
1467 /* first operand is live: bring it to TOS */
1469 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1472 dst = op_ia32_fcomJmp;
1474 /* first operand is dead: bring it to tos */
1476 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1479 dst = op_ia32_fcompJmp;
1484 x87_patch_insn(n, dst);
1485 assert(pop_cnt < 3);
1491 /* patch the operation */
1492 attr = get_ia32_attr(n);
1493 op1 = &ia32_st_regs[op1_idx];
1496 op2 = &ia32_st_regs[op2_idx];
1499 attr->x87[2] = NULL;
1502 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1503 arch_register_get_name(op1), arch_register_get_name(op2)));
1505 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1506 arch_register_get_name(op1)));
1508 return NO_NODE_ADDED;
1509 } /* sim_fCondJmp */
1512 * Create a copy of a node. Recreate the node if it's a constant.
1514 * @param state the x87 state
1515 * @param n the node to be copied
1517 * @return the copy of n
1519 static ir_node *create_Copy(x87_state *state, ir_node *n) {
1520 x87_simulator *sim = state->sim;
1521 ir_graph *irg = get_irn_irg(n);
1522 dbg_info *n_dbg = get_irn_dbg_info(n);
1523 ir_mode *mode = get_irn_mode(n);
1524 ir_node *block = get_nodes_block(n);
1525 ir_node *pred = get_irn_n(n, 0);
1526 ir_node *(*cnstr)(dbg_info *, ir_graph *, ir_node *, ir_mode *) = NULL;
1528 const arch_register_t *out;
1529 const arch_register_t *op1;
1532 /* Do not copy constants, recreate them. */
1533 switch (get_ia32_irn_opcode(pred)) {
1534 case iro_ia32_Unknown_VFP:
1536 cnstr = new_rd_ia32_fldz;
1539 cnstr = new_rd_ia32_fld1;
1541 case iro_ia32_fldpi:
1542 cnstr = new_rd_ia32_fldpi;
1544 case iro_ia32_fldl2e:
1545 cnstr = new_rd_ia32_fldl2e;
1547 case iro_ia32_fldl2t:
1548 cnstr = new_rd_ia32_fldl2t;
1550 case iro_ia32_fldlg2:
1551 cnstr = new_rd_ia32_fldlg2;
1553 case iro_ia32_fldln2:
1554 cnstr = new_rd_ia32_fldln2;
1560 out = x87_get_irn_register(sim, n);
1561 op1 = x87_get_irn_register(sim, pred);
1563 if (cnstr != NULL) {
1564 /* copy a constant */
1565 res = (*cnstr)(n_dbg, irg, block, mode);
1567 x87_push(state, arch_register_get_index(out), res);
1569 attr = get_ia32_attr(res);
1570 attr->x87[2] = &ia32_st_regs[0];
1572 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1574 res = new_rd_ia32_fpushCopy(n_dbg, irg, block, pred, mode);
1576 x87_push(state, arch_register_get_index(out), res);
1578 attr = get_ia32_attr(res);
1579 attr->x87[0] = &ia32_st_regs[op1_idx];
1580 attr->x87[2] = &ia32_st_regs[0];
1582 arch_set_irn_register(sim->arch_env, res, out);
1588 * Simulate a be_Copy.
1590 * @param state the x87 state
1591 * @param n the node that should be simulated (and patched)
1593 * @return NO_NODE_ADDED
1595 static int sim_Copy(x87_state *state, ir_node *n) {
1598 const arch_register_t *out;
1599 const arch_register_t *op1;
1600 ir_node *node, *next;
1602 int op1_idx, out_idx;
1605 ir_mode *mode = get_irn_mode(n);
1607 if (!mode_is_float(mode))
1611 pred = get_irn_n(n, 0);
1612 out = x87_get_irn_register(sim, n);
1613 op1 = x87_get_irn_register(sim, pred);
1614 live = vfp_live_args_after(sim, n, REGMASK(out));
1616 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1617 arch_register_get_name(op1), arch_register_get_name(out)));
1618 DEBUG_ONLY(vfp_dump_live(live));
1620 /* handle the infamous unknown value */
1621 if (arch_register_get_index(op1) == REG_VFP_UKNWN) {
1622 /* Operand is still live, a real copy. We need here an fpush that can
1623 hold a a register, so use the fpushCopy or recreate constants */
1624 node = create_Copy(state, n);
1626 assert(is_ia32_fldz(node));
1627 next = sched_next(n);
1630 sched_add_before(next, node);
1632 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1633 arch_get_irn_register(sim->arch_env, node)->name));
1634 return NO_NODE_ADDED;
1637 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1639 if (is_vfp_live(arch_register_get_index(op1), live)) {
1640 /* Operand is still live, a real copy. We need here an fpush that can
1641 hold a a register, so use the fpushCopy or recreate constants */
1642 node = create_Copy(state, n);
1644 next = sched_next(n);
1647 sched_add_before(next, node);
1648 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1649 arch_get_irn_register(sim->arch_env, node)->name));
1651 out_idx = x87_on_stack(state, arch_register_get_index(out));
1653 if (out_idx >= 0 && out_idx != op1_idx) {
1654 /* Matze: out already on stack? how can this happen? */
1657 /* op1 must be killed and placed where out is */
1659 /* best case, simple remove and rename */
1660 x87_patch_insn(n, op_ia32_Pop);
1661 attr = get_ia32_attr(n);
1662 attr->x87[0] = op1 = &ia32_st_regs[0];
1665 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1667 /* move op1 to tos, store and pop it */
1669 x87_create_fxch(state, n, op1_idx, 0);
1672 x87_patch_insn(n, op_ia32_Pop);
1673 attr = get_ia32_attr(n);
1674 attr->x87[0] = op1 = &ia32_st_regs[out_idx];
1677 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1679 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1681 /* just a virtual copy */
1682 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1683 /* don't remove the node to keep the verifier quiet :),
1684 the emitter won't emit any code for the node */
1687 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1688 exchange(n, get_unop_op(n));
1692 return NO_NODE_ADDED;
1696 * Returns the result proj of the call, or NULL if the result is not used
1698 static ir_node *get_call_result_proj(ir_node *call) {
1699 const ir_edge_t *edge;
1700 ir_node *resproj = NULL;
1702 /* search the result proj */
1703 foreach_out_edge(call, edge) {
1704 ir_node *proj = get_edge_src_irn(edge);
1705 long pn = get_Proj_proj(proj);
1707 if (pn == pn_be_Call_first_res) {
1712 if (resproj == NULL) {
1716 /* the result proj is connected to a Keep and maybe other nodes */
1717 foreach_out_edge(resproj, edge) {
1718 ir_node *pred = get_edge_src_irn(edge);
1719 if (!be_is_Keep(pred)) {
1724 /* only be_Keep found, so result is not used */
1726 } /* get_call_result_proj */
1729 * Simulate a be_Call.
1731 * @param state the x87 state
1732 * @param n the node that should be simulated
1733 * @param arch_env the architecture environment
1735 * @return NO_NODE_ADDED
1737 static int sim_Call(x87_state *state, ir_node *n, const arch_env_t *arch_env) {
1738 ir_type *call_tp = be_Call_get_type(n);
1742 const arch_register_t *reg;
1744 /* at the begin of a call the x87 state should be empty */
1745 assert(state->depth == 0 && "stack not empty before call");
1747 if (get_method_n_ress(call_tp) <= 0)
1748 return NO_NODE_ADDED;
1751 * If the called function returns a float, it is returned in st(0).
1752 * This even happens if the return value is NOT used.
1753 * Moreover, only one return result is supported.
1755 res_type = get_method_res_type(call_tp, 0);
1756 mode = get_type_mode(res_type);
1758 if (mode == NULL || !mode_is_float(mode))
1759 return NO_NODE_ADDED;
1761 resproj = get_call_result_proj(n);
1762 if (resproj == NULL)
1763 return NO_NODE_ADDED;
1765 reg = x87_get_irn_register(state->sim, resproj);
1766 x87_push(state, arch_register_get_index(reg), resproj);
1768 return NO_NODE_ADDED;
1772 * Simulate a be_Spill.
1774 * @param state the x87 state
1775 * @param n the node that should be simulated (and patched)
1777 * Should not happen, spills are lowered before x87 simulator see them.
1779 static int sim_Spill(x87_state *state, ir_node *n) {
1780 assert(0 && "Spill not lowered");
1781 return sim_fst(state, n);
1785 * Simulate a be_Reload.
1787 * @param state the x87 state
1788 * @param n the node that should be simulated (and patched)
1790 * Should not happen, reloads are lowered before x87 simulator see them.
1792 static int sim_Reload(x87_state *state, ir_node *n) {
1793 assert(0 && "Reload not lowered");
1794 return sim_fld(state, n);
1798 * Simulate a be_Return.
1800 * @param state the x87 state
1801 * @param n the node that should be simulated (and patched)
1803 * @return NO_NODE_ADDED
1805 static int sim_Return(x87_state *state, ir_node *n) {
1806 int n_res = be_Return_get_n_rets(n);
1807 int i, n_float_res = 0;
1809 /* only floating point return values must resist on stack */
1810 for (i = 0; i < n_res; ++i) {
1811 ir_node *res = get_irn_n(n, be_pos_Return_val + i);
1813 if (mode_is_float(get_irn_mode(res)))
1816 assert(x87_get_depth(state) == n_float_res);
1818 /* pop them virtually */
1819 for (i = n_float_res - 1; i >= 0; --i)
1822 return NO_NODE_ADDED;
1825 typedef struct _perm_data_t {
1826 const arch_register_t *in;
1827 const arch_register_t *out;
1831 * Simulate a be_Perm.
1833 * @param state the x87 state
1834 * @param irn the node that should be simulated (and patched)
1836 * @return NO_NODE_ADDED
1838 static int sim_Perm(x87_state *state, ir_node *irn) {
1840 x87_simulator *sim = state->sim;
1841 ir_node *pred = get_irn_n(irn, 0);
1843 const ir_edge_t *edge;
1845 /* handle only floating point Perms */
1846 if (! mode_is_float(get_irn_mode(pred)))
1847 return NO_NODE_ADDED;
1849 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1851 /* Perm is a pure virtual instruction on x87.
1852 All inputs must be on the FPU stack and are pairwise
1853 different from each other.
1854 So, all we need to do is to permutate the stack state. */
1855 n = get_irn_arity(irn);
1856 NEW_ARR_A(int, stack_pos, n);
1858 /* collect old stack positions */
1859 for (i = 0; i < n; ++i) {
1860 const arch_register_t *inreg = x87_get_irn_register(sim, get_irn_n(irn, i));
1861 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1863 assert(idx >= 0 && "Perm argument not on x87 stack");
1867 /* now do the permutation */
1868 foreach_out_edge(irn, edge) {
1869 ir_node *proj = get_edge_src_irn(edge);
1870 const arch_register_t *out = x87_get_irn_register(sim, proj);
1871 long num = get_Proj_proj(proj);
1873 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1874 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
1876 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1878 return NO_NODE_ADDED;
1882 * Kill any dead registers at block start by popping them from the stack.
1884 * @param sim the simulator handle
1885 * @param block the current block
1886 * @param start_state the x87 state at the begin of the block
1888 * @return the x87 state after dead register killed
1890 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state) {
1891 x87_state *state = start_state;
1892 ir_node *first_insn = sched_first(block);
1893 ir_node *keep = NULL;
1894 unsigned live = vfp_live_args_after(sim, block, 0);
1896 int i, depth, num_pop;
1899 depth = x87_get_depth(state);
1900 for (i = depth - 1; i >= 0; --i) {
1901 int reg = x87_get_st_reg(state, i);
1903 if (! is_vfp_live(reg, live))
1904 kill_mask |= (1 << i);
1908 /* create a new state, will be changed */
1909 state = x87_clone_state(sim, state);
1911 DB((dbg, LEVEL_1, "Killing deads:\n"));
1912 DEBUG_ONLY(vfp_dump_live(live));
1913 DEBUG_ONLY(x87_dump_stack(state));
1915 /* now kill registers */
1917 /* we can only kill from TOS, so bring them up */
1918 if (! (kill_mask & 1)) {
1919 /* search from behind, because we can to a double-pop */
1920 for (i = depth - 1; i >= 0; --i) {
1921 if (kill_mask & (1 << i)) {
1922 kill_mask &= ~(1 << i);
1929 x87_set_st(state, -1, keep, i);
1930 keep = x87_create_fxch(state, first_insn, i, -1);
1933 keep = x87_get_st_node(state, 0);
1935 if ((kill_mask & 3) == 3) {
1936 /* we can do a double-pop */
1940 /* only a single pop */
1945 kill_mask >>= num_pop;
1946 keep = x87_create_fpop(state, first_insn, num_pop, keep);
1951 } /* x87_kill_deads */
1954 * Run a simulation and fix all virtual instructions for a block.
1956 * @param sim the simulator handle
1957 * @param block the current block
1959 static void x87_simulate_block(x87_simulator *sim, ir_node *block) {
1961 blk_state *bl_state = x87_get_bl_state(sim, block);
1962 x87_state *state = bl_state->begin;
1963 const ir_edge_t *edge;
1964 ir_node *start_block;
1966 assert(state != NULL);
1967 /* already processed? */
1968 if (bl_state->end != NULL)
1971 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
1972 DB((dbg, LEVEL_2, "State at Block begin:\n "));
1973 DEBUG_ONLY(x87_dump_stack(state));
1975 /* at block begin, kill all dead registers */
1976 state = x87_kill_deads(sim, block, state);
1978 /* beware, n might change */
1979 for (n = sched_first(block); !sched_is_end(n); n = next) {
1982 ir_op *op = get_irn_op(n);
1984 next = sched_next(n);
1985 if (op->ops.generic == NULL)
1988 func = (sim_func)op->ops.generic;
1990 /* have work to do */
1991 if (state == bl_state->begin) {
1992 /* create a new state, will be changed */
1993 state = x87_clone_state(sim, state);
1997 node_inserted = (*func)(state, n);
2000 sim_func might have added an additional node after n,
2002 beware: n must not be changed by sim_func
2003 (i.e. removed from schedule) in this case
2005 if (node_inserted != NO_NODE_ADDED)
2006 next = sched_next(n);
2009 start_block = get_irg_start_block(get_irn_irg(block));
2011 /* check if the state must be shuffled */
2012 foreach_block_succ(block, edge) {
2013 ir_node *succ = get_edge_src_irn(edge);
2014 blk_state *succ_state;
2016 if (succ == start_block)
2019 succ_state = x87_get_bl_state(sim, succ);
2021 if (succ_state->begin == NULL) {
2022 succ_state->begin = state;
2023 waitq_put(sim->worklist, succ);
2025 /* There is already a begin state for the successor, bad.
2026 Do the necessary permutations.
2027 Note that critical edges are removed, so this is always possible:
2028 If the successor has more than one possible input, then it must
2031 x87_shuffle(sim, block, state, succ, succ_state->begin);
2034 bl_state->end = state;
2036 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2037 } /* x87_simulate_block */
2040 * Create a new x87 simulator.
2042 * @param sim a simulator handle, will be initialized
2043 * @param irg the current graph
2044 * @param arch_env the architecture environment
2046 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg,
2047 const arch_env_t *arch_env)
2049 obstack_init(&sim->obst);
2050 sim->blk_states = pmap_create();
2051 sim->arch_env = arch_env;
2052 sim->n_idx = get_irg_last_idx(irg);
2053 sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
2055 DB((dbg, LEVEL_1, "--------------------------------\n"
2056 "x87 Simulator started for %+F\n", irg));
2058 /* set the generic function pointer of instruction we must simulate */
2059 clear_irp_opcodes_generic_func();
2061 #define ASSOC(op) (op_ ## op)->ops.generic = (op_func)(sim_##op)
2062 #define ASSOC_IA32(op) (op_ia32_v ## op)->ops.generic = (op_func)(sim_##op)
2063 #define ASSOC_BE(op) (op_be_ ## op)->ops.generic = (op_func)(sim_##op)
2080 ASSOC_IA32(fCondJmp);
2091 } /* x87_init_simulator */
2094 * Destroy a x87 simulator.
2096 * @param sim the simulator handle
2098 static void x87_destroy_simulator(x87_simulator *sim) {
2099 pmap_destroy(sim->blk_states);
2100 obstack_free(&sim->obst, NULL);
2101 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2102 } /* x87_destroy_simulator */
2105 * Pre-block walker: calculate the liveness information for the block
2106 * and store it into the sim->live cache.
2108 static void update_liveness_walker(ir_node *block, void *data) {
2109 x87_simulator *sim = data;
2110 update_liveness(sim, block);
2111 } /* update_liveness_walker */
2114 * Run a simulation and fix all virtual instructions for a graph.
2116 * @param env the architecture environment
2117 * @param irg the current graph
2119 * Needs a block-schedule.
2121 void x87_simulate_graph(const arch_env_t *arch_env, be_irg_t *birg) {
2122 ir_node *block, *start_block;
2123 blk_state *bl_state;
2125 ir_graph *irg = be_get_birg_irg(birg);
2127 /* create the simulator */
2128 x87_init_simulator(&sim, irg, arch_env);
2130 start_block = get_irg_start_block(irg);
2131 bl_state = x87_get_bl_state(&sim, start_block);
2133 /* start with the empty state */
2134 bl_state->begin = empty;
2137 sim.worklist = new_waitq();
2138 waitq_put(sim.worklist, start_block);
2140 be_invalidate_liveness(birg);
2141 be_assure_liveness(birg);
2142 sim.lv = be_get_birg_liveness(birg);
2144 /* Calculate the liveness for all nodes. We must precalculate this info,
2145 * because the simulator adds new nodes (possible before Phi nodes) which
2146 * would let a lazy calculation fail.
2147 * On the other hand we reduce the computation amount due to
2148 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2150 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2154 block = waitq_get(sim.worklist);
2155 x87_simulate_block(&sim, block);
2156 } while (! waitq_empty(sim.worklist));
2159 del_waitq(sim.worklist);
2160 x87_destroy_simulator(&sim);
2161 } /* x87_simulate_graph */
2163 void ia32_init_x87(void) {
2164 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2165 } /* ia32_init_x87 */