2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
36 #include "iredges_t.h"
47 #include "../belive_t.h"
48 #include "../besched_t.h"
49 #include "../benode_t.h"
50 #include "bearch_ia32_t.h"
51 #include "ia32_new_nodes.h"
52 #include "gen_ia32_new_nodes.h"
53 #include "gen_ia32_regalloc_if.h"
61 #define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
63 /** the debug handle */
64 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
66 /* Forward declaration. */
67 typedef struct _x87_simulator x87_simulator;
70 * An exchange template.
71 * Note that our virtual functions have the same inputs
72 * and attributes as the real ones, so we can simple exchange
74 * Further, x87 supports inverse instructions, so we can handle them.
76 typedef struct _exchange_tmpl {
77 ir_op *normal_op; /**< the normal one */
78 ir_op *reverse_op; /**< the reverse one if exists */
79 ir_op *normal_pop_op; /**< the normal one with tos pop */
80 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
84 * An entry on the simulated x87 stack.
86 typedef struct _st_entry {
87 int reg_idx; /**< the virtual register index of this stack value */
88 ir_node *node; /**< the node that produced this value */
94 typedef struct _x87_state {
95 st_entry st[N_x87_REGS]; /**< the register stack */
96 int depth; /**< the current stack depth */
97 int tos; /**< position of the tos */
98 x87_simulator *sim; /**< The simulator. */
101 /** An empty state, used for blocks without fp instructions. */
102 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
103 static x87_state *empty = (x87_state *)&_empty;
106 NO_NODE_ADDED = 0, /**< No node was added. */
107 NODE_ADDED = 1 /**< A node was added by the simulator in the schedule. */
111 * The type of an instruction simulator function.
113 * @param state the x87 state
114 * @param n the node to be simulated
116 * @return NODE_ADDED if a node was added AFTER n in schedule,
119 typedef int (*sim_func)(x87_state *state, ir_node *n);
122 * A block state: Every block has a x87 state at the beginning and at the end.
124 typedef struct _blk_state {
125 x87_state *begin; /**< state at the begin or NULL if not assigned */
126 x87_state *end; /**< state at the end or NULL if not assigned */
129 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
131 /** liveness bitset for vfp registers. */
132 typedef unsigned char vfp_liveness;
137 struct _x87_simulator {
138 struct obstack obst; /**< An obstack for fast allocating. */
139 pmap *blk_states; /**< Map blocks to states. */
140 const arch_env_t *arch_env; /**< The architecture environment. */
141 be_lv_t *lv; /**< intrablock liveness. */
142 vfp_liveness *live; /**< Liveness information. */
143 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
144 waitq *worklist; /**< Worklist of blocks that must be processed. */
145 ia32_isa_t *isa; /**< the ISA object */
149 * Returns the current stack depth.
151 * @param state the x87 state
153 * @return the x87 stack depth
155 static int x87_get_depth(const x87_state *state) {
157 } /* x87_get_depth */
160 * Return the virtual register index at st(pos).
162 * @param state the x87 state
163 * @param pos a stack position
165 * @return the vfp register index that produced the value at st(pos)
167 static int x87_get_st_reg(const x87_state *state, int pos) {
168 assert(pos < state->depth);
169 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
170 } /* x87_get_st_reg */
173 * Return the node at st(pos).
175 * @param state the x87 state
176 * @param pos a stack position
178 * @return the IR node that produced the value at st(pos)
180 static ir_node *x87_get_st_node(const x87_state *state, int pos) {
181 assert(pos < state->depth);
182 return state->st[MASK_TOS(state->tos + pos)].node;
183 } /* x87_get_st_node */
187 * Dump the stack for debugging.
189 * @param state the x87 state
191 static void x87_dump_stack(const x87_state *state) {
194 for (i = state->depth - 1; i >= 0; --i) {
195 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
196 x87_get_st_node(state, i)));
198 DB((dbg, LEVEL_2, "<-- TOS\n"));
199 } /* x87_dump_stack */
200 #endif /* DEBUG_libfirm */
203 * Set a virtual register to st(pos).
205 * @param state the x87 state
206 * @param reg_idx the vfp register index that should be set
207 * @param node the IR node that produces the value of the vfp register
208 * @param pos the stack position where the new value should be entered
210 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos) {
211 assert(0 < state->depth);
212 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
213 state->st[MASK_TOS(state->tos + pos)].node = node;
215 DB((dbg, LEVEL_2, "After SET_REG: "));
216 DEBUG_ONLY(x87_dump_stack(state));
220 * Set the tos virtual register.
222 * @param state the x87 state
223 * @param reg_idx the vfp register index that should be set
224 * @param node the IR node that produces the value of the vfp register
226 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node) {
227 x87_set_st(state, reg_idx, node, 0);
231 * Swap st(0) with st(pos).
233 * @param state the x87 state
234 * @param pos the stack position to change the tos with
236 static void x87_fxch(x87_state *state, int pos) {
238 assert(pos < state->depth);
240 entry = state->st[MASK_TOS(state->tos + pos)];
241 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
242 state->st[MASK_TOS(state->tos)] = entry;
244 DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
248 * Convert a virtual register to the stack index.
250 * @param state the x87 state
251 * @param reg_idx the register vfp index
253 * @return the stack position where the register is stacked
254 * or -1 if the virtual register was not found
256 static int x87_on_stack(const x87_state *state, int reg_idx) {
257 int i, tos = state->tos;
259 for (i = 0; i < state->depth; ++i)
260 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
266 * Push a virtual Register onto the stack, double pushed allowed.
268 * @param state the x87 state
269 * @param reg_idx the register vfp index
270 * @param node the node that produces the value of the vfp register
272 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node) {
273 assert(state->depth < N_x87_REGS && "stack overrun");
276 state->tos = MASK_TOS(state->tos - 1);
277 state->st[state->tos].reg_idx = reg_idx;
278 state->st[state->tos].node = node;
280 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
284 * Push a virtual Register onto the stack, double pushes are NOT allowed.
286 * @param state the x87 state
287 * @param reg_idx the register vfp index
288 * @param node the node that produces the value of the vfp register
289 * @param dbl_push if != 0 double pushes are allowed
291 static void x87_push(x87_state *state, int reg_idx, ir_node *node) {
292 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
294 x87_push_dbl(state, reg_idx, node);
298 * Pop a virtual Register from the stack.
300 * @param state the x87 state
302 static void x87_pop(x87_state *state) {
303 assert(state->depth > 0 && "stack underrun");
306 state->tos = MASK_TOS(state->tos + 1);
308 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
312 * Returns the block state of a block.
314 * @param sim the x87 simulator handle
315 * @param block the current block
317 * @return the block state
319 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block) {
320 pmap_entry *entry = pmap_find(sim->blk_states, block);
323 blk_state *bl_state = obstack_alloc(&sim->obst, sizeof(*bl_state));
324 bl_state->begin = NULL;
325 bl_state->end = NULL;
327 pmap_insert(sim->blk_states, block, bl_state);
331 return PTR_TO_BLKSTATE(entry->value);
332 } /* x87_get_bl_state */
335 * Creates a new x87 state.
337 * @param sim the x87 simulator handle
339 * @return a new x87 state
341 static x87_state *x87_alloc_state(x87_simulator *sim) {
342 x87_state *res = obstack_alloc(&sim->obst, sizeof(*res));
346 } /* x87_alloc_state */
351 * @param sim the x87 simulator handle
352 * @param src the x87 state that will be cloned
354 * @return a cloned copy of the src state
356 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src) {
357 x87_state *res = x87_alloc_state(sim);
359 memcpy(res, src, sizeof(*res));
361 } /* x87_clone_state */
364 * Patch a virtual instruction into a x87 one and return
365 * the node representing the result value.
367 * @param n the IR node to patch
368 * @param op the x87 opcode to patch in
370 static ir_node *x87_patch_insn(ir_node *n, ir_op *op) {
371 ir_mode *mode = get_irn_mode(n);
376 if (mode == mode_T) {
377 /* patch all Proj's */
378 const ir_edge_t *edge;
380 foreach_out_edge(n, edge) {
381 ir_node *proj = get_edge_src_irn(edge);
383 mode = get_irn_mode(proj);
384 if (mode_is_float(mode)) {
386 set_irn_mode(proj, mode_E);
390 } else if (mode_is_float(mode))
391 set_irn_mode(n, mode_E);
393 } /* x87_patch_insn */
396 * Returns the first Proj of a mode_T node having a given mode.
398 * @param n the mode_T node
399 * @param m the desired mode of the Proj
400 * @return The first Proj of mode @p m found or NULL.
402 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m) {
403 const ir_edge_t *edge;
405 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
407 foreach_out_edge(n, edge) {
408 ir_node *proj = get_edge_src_irn(edge);
409 if (get_irn_mode(proj) == m)
414 } /* get_irn_Proj_for_mode */
417 * Wrap the arch_* function here so we can check for errors.
419 static INLINE const arch_register_t *x87_get_irn_register(x87_simulator *sim, const ir_node *irn) {
420 const arch_register_t *res;
422 res = arch_get_irn_register(sim->arch_env, irn);
423 assert(res->reg_class->regs == ia32_vfp_regs);
425 } /* x87_get_irn_register */
427 /* -------------- x87 perm --------------- */
430 * Creates a fxch for shuffle.
432 * @param state the x87 state
433 * @param pos parameter for fxch
434 * @param block the block were fxch is inserted
436 * Creates a new fxch node and reroute the user of the old node
439 * @return the fxch node
441 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block) {
443 ia32_x87_attr_t *attr;
445 fxch = new_rd_ia32_fxch(NULL, get_irn_irg(block), block, mode_E);
446 attr = get_ia32_x87_attr(fxch);
447 attr->x87[0] = &ia32_st_regs[pos];
448 attr->x87[2] = &ia32_st_regs[0];
452 x87_fxch(state, pos);
454 } /* x87_fxch_shuffle */
457 * Calculate the necessary permutations to reach dst_state.
459 * These permutations are done with fxch instructions and placed
460 * at the end of the block.
462 * Note that critical edges are removed here, so we need only
463 * a shuffle if the current block has only one successor.
465 * @param sim the simulator handle
466 * @param block the current block
467 * @param state the current x87 stack state, might be modified
468 * @param dst_block the destination block
469 * @param dst_state destination state
473 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block,
474 x87_state *state, ir_node *dst_block,
475 const x87_state *dst_state)
477 int i, n_cycles, k, ri;
478 unsigned cycles[4], all_mask;
479 char cycle_idx[4][8];
480 ir_node *fxch, *before, *after;
484 assert(state->depth == dst_state->depth);
486 /* Some mathematics here:
487 If we have a cycle of length n that includes the tos,
488 we need n-1 exchange operations.
489 We can always add the tos and restore it, so we need
490 n+1 exchange operations for a cycle not containing the tos.
491 So, the maximum of needed operations is for a cycle of 7
492 not including the tos == 8.
493 This is the same number of ops we would need for using stores,
494 so exchange is cheaper (we save the loads).
495 On the other hand, we might need an additional exchange
496 in the next block to bring one operand on top, so the
497 number of ops in the first case is identical.
498 Further, no more than 4 cycles can exists (4 x 2).
500 all_mask = (1 << (state->depth)) - 1;
502 for (n_cycles = 0; all_mask; ++n_cycles) {
503 int src_idx, dst_idx;
505 /* find the first free slot */
506 for (i = 0; i < state->depth; ++i) {
507 if (all_mask & (1 << i)) {
508 all_mask &= ~(1 << i);
510 /* check if there are differences here */
511 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
517 /* no more cycles found */
522 cycles[n_cycles] = (1 << i);
523 cycle_idx[n_cycles][k++] = i;
524 for (src_idx = i; ; src_idx = dst_idx) {
525 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
527 if ((all_mask & (1 << dst_idx)) == 0)
530 cycle_idx[n_cycles][k++] = dst_idx;
531 cycles[n_cycles] |= (1 << dst_idx);
532 all_mask &= ~(1 << dst_idx);
534 cycle_idx[n_cycles][k] = -1;
538 /* no permutation needed */
542 /* Hmm: permutation needed */
543 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
544 DEBUG_ONLY(x87_dump_stack(state));
545 DB((dbg, LEVEL_2, " to\n"));
546 DEBUG_ONLY(x87_dump_stack(dst_state));
550 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
551 for (ri = 0; ri < n_cycles; ++ri) {
552 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
553 for (k = 0; cycle_idx[ri][k] != -1; ++k)
554 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
555 DB((dbg, LEVEL_2, "\n"));
562 * Find the place node must be insert.
563 * We have only one successor block, so the last instruction should
566 before = sched_last(block);
567 assert(is_cfop(before));
569 /* now do the permutations */
570 for (ri = 0; ri < n_cycles; ++ri) {
571 if ((cycles[ri] & 1) == 0) {
572 /* this cycle does not include the tos */
573 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
575 sched_add_after(after, fxch);
577 sched_add_before(before, fxch);
580 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
581 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
583 sched_add_after(after, fxch);
585 sched_add_before(before, fxch);
588 if ((cycles[ri] & 1) == 0) {
589 /* this cycle does not include the tos */
590 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
591 sched_add_after(after, fxch);
598 * Create a fxch node before another node.
600 * @param state the x87 state
601 * @param n the node after the fxch
602 * @param pos exchange st(pos) with st(0)
606 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
609 ia32_x87_attr_t *attr;
610 ir_graph *irg = get_irn_irg(n);
611 ir_node *block = get_nodes_block(n);
613 x87_fxch(state, pos);
615 fxch = new_rd_ia32_fxch(NULL, irg, block, mode_E);
616 attr = get_ia32_x87_attr(fxch);
617 attr->x87[0] = &ia32_st_regs[pos];
618 attr->x87[2] = &ia32_st_regs[0];
622 sched_add_before(n, fxch);
623 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
625 } /* x87_create_fxch */
628 * Create a fpush before node n.
630 * @param state the x87 state
631 * @param n the node after the fpush
632 * @param pos push st(pos) on stack
633 * @param op_idx replace input op_idx of n with the fpush result
635 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx) {
636 ir_node *fpush, *pred = get_irn_n(n, op_idx);
637 ia32_x87_attr_t *attr;
638 const arch_register_t *out = x87_get_irn_register(state->sim, pred);
640 x87_push_dbl(state, arch_register_get_index(out), pred);
642 fpush = new_rd_ia32_fpush(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
643 attr = get_ia32_x87_attr(fpush);
644 attr->x87[0] = &ia32_st_regs[pos];
645 attr->x87[2] = &ia32_st_regs[0];
648 sched_add_before(n, fpush);
650 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
651 } /* x87_create_fpush */
654 * Create a fpop before node n.
656 * @param state the x87 state
657 * @param n the node after the fpop
658 * @param num pop 1 or 2 values
660 * @return the fpop node
662 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
665 ia32_x87_attr_t *attr;
666 int cpu = state->sim->isa->opt_arch;
670 if (ARCH_ATHLON(cpu))
671 fpop = new_rd_ia32_ffreep(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
673 fpop = new_rd_ia32_fpop(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
674 attr = get_ia32_x87_attr(fpop);
675 attr->x87[0] = &ia32_st_regs[0];
676 attr->x87[1] = &ia32_st_regs[0];
677 attr->x87[2] = &ia32_st_regs[0];
680 sched_add_before(n, fpop);
681 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
686 } /* x87_create_fpop */
689 * Creates an fldz before node n
691 * @param state the x87 state
692 * @param n the node after the fldz
694 * @return the fldz node
696 static ir_node *x87_create_fldz(x87_state *state, ir_node *n, int regidx) {
697 ir_graph *irg = get_irn_irg(n);
698 ir_node *block = get_nodes_block(n);
701 fldz = new_rd_ia32_fldz(NULL, irg, block, mode_E);
703 sched_add_before(n, fldz);
704 DB((dbg, LEVEL_1, "<<< %s\n", get_irn_opname(fldz)));
707 x87_push(state, regidx, fldz);
712 /* --------------------------------- liveness ------------------------------------------ */
715 * The liveness transfer function.
716 * Updates a live set over a single step from a given node to its predecessor.
717 * Everything defined at the node is removed from the set, the uses of the node get inserted.
719 * @param sim The simulator handle.
720 * @param irn The node at which liveness should be computed.
721 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
722 * the registers live after irn.
724 * @return The live bitset.
726 static vfp_liveness vfp_liveness_transfer(x87_simulator *sim, ir_node *irn, vfp_liveness live)
729 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
730 const arch_env_t *arch_env = sim->arch_env;
732 if (get_irn_mode(irn) == mode_T) {
733 const ir_edge_t *edge;
735 foreach_out_edge(irn, edge) {
736 ir_node *proj = get_edge_src_irn(edge);
738 if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
739 const arch_register_t *reg = x87_get_irn_register(sim, proj);
740 live &= ~(1 << arch_register_get_index(reg));
745 if (arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
746 const arch_register_t *reg = x87_get_irn_register(sim, irn);
747 live &= ~(1 << arch_register_get_index(reg));
750 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
751 ir_node *op = get_irn_n(irn, i);
753 if (mode_is_float(get_irn_mode(op)) && arch_irn_consider_in_reg_alloc(arch_env, cls, op)) {
754 const arch_register_t *reg = x87_get_irn_register(sim, op);
755 live |= 1 << arch_register_get_index(reg);
759 } /* vfp_liveness_transfer */
762 * Put all live virtual registers at the end of a block into a bitset.
764 * @param sim the simulator handle
765 * @param lv the liveness information
766 * @param bl the block
768 * @return The live bitset at the end of this block
770 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
773 vfp_liveness live = 0;
774 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
775 const arch_env_t *arch_env = sim->arch_env;
776 const be_lv_t *lv = sim->lv;
778 be_lv_foreach(lv, block, be_lv_state_end, i) {
779 const arch_register_t *reg;
780 const ir_node *node = be_lv_get_irn(lv, block, i);
781 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
784 reg = x87_get_irn_register(sim, node);
785 live |= 1 << arch_register_get_index(reg);
789 } /* vfp_liveness_end_of_block */
791 /** get the register mask from an arch_register */
792 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
795 * Return a bitset of argument registers which are live at the end of a node.
797 * @param sim the simulator handle
798 * @param pos the node
799 * @param kill kill mask for the output registers
801 * @return The live bitset.
803 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
805 unsigned idx = get_irn_idx(pos);
807 assert(idx < sim->n_idx);
808 return sim->live[idx] & ~kill;
809 } /* vfp_live_args_after */
812 * Calculate the liveness for a whole block and cache it.
814 * @param sim the simulator handle
815 * @param lv the liveness handle
816 * @param block the block
818 static void update_liveness(x87_simulator *sim, ir_node *block) {
819 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
823 /* now iterate through the block backward and cache the results */
824 sched_foreach_reverse(block, irn) {
825 /* stop at the first Phi: this produces the live-in */
829 idx = get_irn_idx(irn);
830 sim->live[idx] = live;
832 live = vfp_liveness_transfer(sim, irn, live);
834 idx = get_irn_idx(block);
835 sim->live[idx] = live;
836 } /* update_liveness */
839 * Returns true if a register is live in a set.
841 * @param reg_idx the vfp register index
842 * @param live a live bitset
844 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
848 * Dump liveness info.
850 * @param live the live bitset
852 static void vfp_dump_live(vfp_liveness live) {
855 DB((dbg, LEVEL_2, "Live after: "));
856 for (i = 0; i < 8; ++i) {
857 if (live & (1 << i)) {
858 DB((dbg, LEVEL_2, "vf%d ", i));
861 DB((dbg, LEVEL_2, "\n"));
862 } /* vfp_dump_live */
863 #endif /* DEBUG_libfirm */
865 /* --------------------------------- simulators ---------------------------------------- */
867 #define XCHG(a, b) do { int t = (a); (a) = (b); (b) = t; } while (0)
879 * Simulate a virtual binop.
881 * @param state the x87 state
882 * @param n the node that should be simulated (and patched)
883 * @param tmpl the template containing the 4 possible x87 opcodes
885 * @return NO_NODE_ADDED
887 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl) {
888 int op2_idx = 0, op1_idx;
889 int out_idx, do_pop = 0;
890 ia32_x87_attr_t *attr;
891 ir_node *patched_insn;
893 x87_simulator *sim = state->sim;
894 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
895 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
896 const arch_register_t *op1_reg = x87_get_irn_register(sim, op1);
897 const arch_register_t *op2_reg = x87_get_irn_register(sim, op2);
898 const arch_register_t *out = x87_get_irn_register(sim, n);
899 int reg_index_1 = arch_register_get_index(op1_reg);
900 int reg_index_2 = arch_register_get_index(op2_reg);
901 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
905 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
906 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
907 arch_register_get_name(out)));
908 DEBUG_ONLY(vfp_dump_live(live));
909 DB((dbg, LEVEL_1, "Stack before: "));
910 DEBUG_ONLY(x87_dump_stack(state));
912 if(reg_index_1 == REG_VFP_UKNWN) {
916 op1_idx = x87_on_stack(state, reg_index_1);
917 assert(op1_idx >= 0);
918 op1_live_after = is_vfp_live(arch_register_get_index(op1_reg), live);
921 if (reg_index_2 != REG_VFP_NOREG) {
922 if(reg_index_2 == REG_VFP_UKNWN) {
926 /* second operand is a vfp register */
927 op2_idx = x87_on_stack(state, reg_index_2);
928 assert(op2_idx >= 0);
930 = is_vfp_live(arch_register_get_index(op2_reg), live);
933 if (op2_live_after) {
934 /* Second operand is live. */
936 if (op1_live_after) {
937 /* Both operands are live: push the first one.
938 This works even for op1 == op2. */
939 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
940 /* now do fxxx (tos=tos X op) */
944 dst = tmpl->normal_op;
946 /* Second live, first operand is dead here, bring it to tos. */
948 x87_create_fxch(state, n, op1_idx);
953 /* now do fxxx (tos=tos X op) */
955 dst = tmpl->normal_op;
958 /* Second operand is dead. */
959 if (op1_live_after) {
960 /* First operand is live: bring second to tos. */
962 x87_create_fxch(state, n, op2_idx);
967 /* now do fxxxr (tos = op X tos) */
969 dst = tmpl->reverse_op;
971 /* Both operands are dead here, pop them from the stack. */
974 /* Both are identically and on tos, no pop needed. */
975 /* here fxxx (tos = tos X tos) */
976 dst = tmpl->normal_op;
979 /* now do fxxxp (op = op X tos, pop) */
980 dst = tmpl->normal_pop_op;
984 } else if (op1_idx == 0) {
985 assert(op1_idx != op2_idx);
986 /* now do fxxxrp (op = tos X op, pop) */
987 dst = tmpl->reverse_pop_op;
991 /* Bring the second on top. */
992 x87_create_fxch(state, n, op2_idx);
993 if (op1_idx == op2_idx) {
994 /* Both are identically and on tos now, no pop needed. */
997 /* use fxxx (tos = tos X tos) */
998 dst = tmpl->normal_op;
1001 /* op2 is on tos now */
1003 /* use fxxxp (op = op X tos, pop) */
1004 dst = tmpl->normal_pop_op;
1012 /* second operand is an address mode */
1013 if (op1_live_after) {
1014 /* first operand is live: push it here */
1015 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
1017 /* use fxxx (tos = tos X mem) */
1018 dst = tmpl->normal_op;
1021 /* first operand is dead: bring it to tos */
1023 x87_create_fxch(state, n, op1_idx);
1027 /* use fxxxp (tos = tos X mem) */
1028 dst = tmpl->normal_op;
1033 patched_insn = x87_patch_insn(n, dst);
1034 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1039 /* patch the operation */
1040 attr = get_ia32_x87_attr(n);
1041 attr->x87[0] = op1_reg = &ia32_st_regs[op1_idx];
1042 if (reg_index_2 != REG_VFP_NOREG) {
1043 attr->x87[1] = op2_reg = &ia32_st_regs[op2_idx];
1045 attr->x87[2] = out = &ia32_st_regs[out_idx];
1047 if (reg_index_2 != REG_VFP_NOREG) {
1048 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1049 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1050 arch_register_get_name(out)));
1052 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1053 arch_register_get_name(op1_reg),
1054 arch_register_get_name(out)));
1057 return NO_NODE_ADDED;
1061 * Simulate a virtual Unop.
1063 * @param state the x87 state
1064 * @param n the node that should be simulated (and patched)
1065 * @param op the x87 opcode that will replace n's opcode
1067 * @return NO_NODE_ADDED
1069 static int sim_unop(x87_state *state, ir_node *n, ir_op *op) {
1070 int op1_idx, out_idx;
1071 x87_simulator *sim = state->sim;
1072 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, UNOP_IDX));
1073 const arch_register_t *out = x87_get_irn_register(sim, n);
1074 ia32_x87_attr_t *attr;
1075 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1077 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1078 DEBUG_ONLY(vfp_dump_live(live));
1080 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1082 if (is_vfp_live(arch_register_get_index(op1), live)) {
1083 /* push the operand here */
1084 x87_create_fpush(state, n, op1_idx, UNOP_IDX);
1088 /* operand is dead, bring it to tos */
1090 x87_create_fxch(state, n, op1_idx);
1095 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1097 attr = get_ia32_x87_attr(n);
1098 attr->x87[0] = op1 = &ia32_st_regs[0];
1099 attr->x87[2] = out = &ia32_st_regs[0];
1100 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1102 return NO_NODE_ADDED;
1106 * Simulate a virtual Load instruction.
1108 * @param state the x87 state
1109 * @param n the node that should be simulated (and patched)
1110 * @param op the x87 opcode that will replace n's opcode
1112 * @return NO_NODE_ADDED
1114 static int sim_load(x87_state *state, ir_node *n, ir_op *op) {
1115 const arch_register_t *out = x87_get_irn_register(state->sim, n);
1116 ia32_x87_attr_t *attr;
1118 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1119 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1120 assert(out == x87_get_irn_register(state->sim, n));
1121 attr = get_ia32_x87_attr(n);
1122 attr->x87[2] = out = &ia32_st_regs[0];
1123 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1125 return NO_NODE_ADDED;
1129 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1131 * @param store The store
1132 * @param old_val The former value
1133 * @param new_val The new value
1135 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val) {
1136 const ir_edge_t *edge, *ne;
1138 foreach_out_edge_safe(old_val, edge, ne) {
1139 ir_node *user = get_edge_src_irn(edge);
1141 if (! user || user == store)
1144 /* if the user is scheduled after the store: rewire */
1145 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1147 /* find the input of the user pointing to the old value */
1148 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1149 if (get_irn_n(user, i) == old_val)
1150 set_irn_n(user, i, new_val);
1154 } /* collect_and_rewire_users */
1157 * Simulate a virtual Store.
1159 * @param state the x87 state
1160 * @param n the node that should be simulated (and patched)
1161 * @param op the x87 store opcode
1162 * @param op_p the x87 store and pop opcode
1164 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p) {
1165 x87_simulator *sim = state->sim;
1166 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1167 const arch_register_t *op2 = x87_get_irn_register(sim, val);
1168 unsigned live = vfp_live_args_after(sim, n, 0);
1169 int insn = NO_NODE_ADDED;
1170 ia32_x87_attr_t *attr;
1171 int op2_reg_idx, op2_idx, depth;
1172 int live_after_node;
1175 op2_reg_idx = arch_register_get_index(op2);
1176 if (op2_reg_idx == REG_VFP_UKNWN) {
1177 /* just take any value from stack */
1178 if(state->depth > 0) {
1180 DEBUG_ONLY(op2 = NULL);
1181 live_after_node = 1;
1183 /* produce a new value which we will consume immediately */
1184 x87_create_fldz(state, n, op2_reg_idx);
1185 live_after_node = 0;
1186 op2_idx = x87_on_stack(state, op2_reg_idx);
1187 assert(op2_idx >= 0);
1190 op2_idx = x87_on_stack(state, op2_reg_idx);
1191 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1192 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1193 assert(op2_idx >= 0);
1196 mode = get_ia32_ls_mode(n);
1197 depth = x87_get_depth(state);
1199 if (live_after_node) {
1201 Problem: fst doesn't support mode_E (spills), only fstp does
1203 - stack not full: push value and fstp
1204 - stack full: fstp value and load again
1206 if (mode == mode_E) {
1207 if (depth < N_x87_REGS) {
1208 /* ok, we have a free register: push + fstp */
1209 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1211 x87_patch_insn(n, op_p);
1213 ir_node *vfld, *mem, *block, *rproj, *mproj;
1216 /* stack full here: need fstp + load */
1218 x87_patch_insn(n, op_p);
1220 block = get_nodes_block(n);
1221 irg = get_irn_irg(n);
1222 vfld = new_rd_ia32_vfld(NULL, irg, block, get_irn_n(n, 0), get_irn_n(n, 1), new_rd_NoMem(irg), get_ia32_ls_mode(n));
1224 /* copy all attributes */
1225 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1226 if (is_ia32_use_frame(n))
1227 set_ia32_use_frame(vfld);
1228 set_ia32_op_type(vfld, ia32_am_Source);
1229 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1230 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1231 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1233 rproj = new_r_Proj(irg, block, vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1234 mproj = new_r_Proj(irg, block, vfld, mode_M, pn_ia32_vfld_M);
1235 mem = get_irn_Proj_for_mode(n, mode_M);
1237 assert(mem && "Store memory not found");
1239 arch_set_irn_register(sim->arch_env, rproj, op2);
1241 /* reroute all former users of the store memory to the load memory */
1242 edges_reroute(mem, mproj, irg);
1243 /* set the memory input of the load to the store memory */
1244 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1246 sched_add_after(n, vfld);
1247 sched_add_after(vfld, rproj);
1249 /* rewire all users, scheduled after the store, to the loaded value */
1250 collect_and_rewire_users(n, val, rproj);
1255 /* we can only store the tos to memory */
1257 x87_create_fxch(state, n, op2_idx);
1259 /* mode != mode_E -> use normal fst */
1260 x87_patch_insn(n, op);
1263 /* we can only store the tos to memory */
1265 x87_create_fxch(state, n, op2_idx);
1268 x87_patch_insn(n, op_p);
1271 attr = get_ia32_x87_attr(n);
1272 attr->x87[1] = op2 = &ia32_st_regs[0];
1273 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1278 #define _GEN_BINOP(op, rev) \
1279 static int sim_##op(x87_state *state, ir_node *n) { \
1280 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1281 return sim_binop(state, n, &tmpl); \
1284 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1285 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1287 #define GEN_LOAD2(op, nop) \
1288 static int sim_##op(x87_state *state, ir_node *n) { \
1289 return sim_load(state, n, op_ia32_##nop); \
1292 #define GEN_LOAD(op) GEN_LOAD2(op, op)
1294 #define GEN_UNOP(op) \
1295 static int sim_##op(x87_state *state, ir_node *n) { \
1296 return sim_unop(state, n, op_ia32_##op); \
1299 #define GEN_STORE(op) \
1300 static int sim_##op(x87_state *state, ir_node *n) { \
1301 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1323 * Simulate a fCondJmp.
1325 * @param state the x87 state
1326 * @param n the node that should be simulated (and patched)
1328 * @return NO_NODE_ADDED
1330 static int sim_fCmpJmp(x87_state *state, ir_node *n) {
1334 ia32_x87_attr_t *attr;
1336 x87_simulator *sim = state->sim;
1337 ir_node *op1_node = get_irn_n(n, n_ia32_vfCmpJmp_left);
1338 ir_node *op2_node = get_irn_n(n, n_ia32_vfCmpJmp_right);
1339 const arch_register_t *op1 = x87_get_irn_register(sim, op1_node);
1340 const arch_register_t *op2 = x87_get_irn_register(sim, op2_node);
1341 int reg_index_1 = arch_register_get_index(op1);
1342 int reg_index_2 = arch_register_get_index(op2);
1343 unsigned live = vfp_live_args_after(sim, n, 0);
1345 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1346 arch_register_get_name(op1), arch_register_get_name(op2)));
1347 DEBUG_ONLY(vfp_dump_live(live));
1348 DB((dbg, LEVEL_1, "Stack before: "));
1349 DEBUG_ONLY(x87_dump_stack(state));
1351 op1_idx = x87_on_stack(state, reg_index_1);
1352 assert(op1_idx >= 0);
1354 /* BEWARE: check for comp a,a cases, they might happen */
1355 if (reg_index_2 != REG_VFP_NOREG) {
1356 /* second operand is a vfp register */
1357 op2_idx = x87_on_stack(state, reg_index_2);
1358 assert(op2_idx >= 0);
1360 if (is_vfp_live(arch_register_get_index(op2), live)) {
1361 /* second operand is live */
1363 if (is_vfp_live(arch_register_get_index(op1), live)) {
1364 /* both operands are live */
1367 /* res = tos X op */
1368 dst = op_ia32_fcomJmp;
1369 } else if (op2_idx == 0) {
1370 /* res = op X tos */
1371 dst = op_ia32_fcomrJmp;
1373 /* bring the first one to tos */
1374 x87_create_fxch(state, n, op1_idx);
1378 /* res = tos X op */
1379 dst = op_ia32_fcomJmp;
1382 /* second live, first operand is dead here, bring it to tos.
1383 This means further, op1_idx != op2_idx. */
1384 assert(op1_idx != op2_idx);
1386 x87_create_fxch(state, n, op1_idx);
1391 /* res = tos X op, pop */
1392 dst = op_ia32_fcompJmp;
1396 /* second operand is dead */
1397 if (is_vfp_live(arch_register_get_index(op1), live)) {
1398 /* first operand is live: bring second to tos.
1399 This means further, op1_idx != op2_idx. */
1400 assert(op1_idx != op2_idx);
1402 x87_create_fxch(state, n, op2_idx);
1407 /* res = op X tos, pop */
1408 dst = op_ia32_fcomrpJmp;
1411 /* both operands are dead here, check first for identity. */
1412 if (op1_idx == op2_idx) {
1413 /* identically, one pop needed */
1415 x87_create_fxch(state, n, op1_idx);
1419 /* res = tos X op, pop */
1420 dst = op_ia32_fcompJmp;
1423 /* different, move them to st and st(1) and pop both.
1424 The tricky part is to get one into st(1).*/
1425 else if (op2_idx == 1) {
1426 /* good, second operand is already in the right place, move the first */
1428 /* bring the first on top */
1429 x87_create_fxch(state, n, op1_idx);
1430 assert(op2_idx != 0);
1433 /* res = tos X op, pop, pop */
1434 dst = op_ia32_fcomppJmp;
1436 } else if (op1_idx == 1) {
1437 /* good, first operand is already in the right place, move the second */
1439 /* bring the first on top */
1440 x87_create_fxch(state, n, op2_idx);
1441 assert(op1_idx != 0);
1444 dst = op_ia32_fcomrppJmp;
1447 /* if one is already the TOS, we need two fxch */
1449 /* first one is TOS, move to st(1) */
1450 x87_create_fxch(state, n, 1);
1451 assert(op2_idx != 1);
1453 x87_create_fxch(state, n, op2_idx);
1455 /* res = op X tos, pop, pop */
1456 dst = op_ia32_fcomrppJmp;
1458 } else if (op2_idx == 0) {
1459 /* second one is TOS, move to st(1) */
1460 x87_create_fxch(state, n, 1);
1461 assert(op1_idx != 1);
1463 x87_create_fxch(state, n, op1_idx);
1465 /* res = tos X op, pop, pop */
1466 dst = op_ia32_fcomppJmp;
1469 /* none of them is either TOS or st(1), 3 fxch needed */
1470 x87_create_fxch(state, n, op2_idx);
1471 assert(op1_idx != 0);
1472 x87_create_fxch(state, n, 1);
1474 x87_create_fxch(state, n, op1_idx);
1476 /* res = tos X op, pop, pop */
1477 dst = op_ia32_fcomppJmp;
1484 /* second operand is an address mode */
1485 if (is_vfp_live(arch_register_get_index(op1), live)) {
1486 /* first operand is live: bring it to TOS */
1488 x87_create_fxch(state, n, op1_idx);
1491 dst = op_ia32_fcomJmp;
1493 /* first operand is dead: bring it to tos */
1495 x87_create_fxch(state, n, op1_idx);
1498 dst = op_ia32_fcompJmp;
1503 x87_patch_insn(n, dst);
1504 assert(pop_cnt < 3);
1510 /* patch the operation */
1511 attr = get_ia32_x87_attr(n);
1512 op1 = &ia32_st_regs[op1_idx];
1515 op2 = &ia32_st_regs[op2_idx];
1518 attr->x87[2] = NULL;
1521 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1522 arch_register_get_name(op1), arch_register_get_name(op2)));
1524 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1525 arch_register_get_name(op1)));
1527 return NO_NODE_ADDED;
1528 } /* sim_fCondJmp */
1531 int sim_Keep(x87_state *state, ir_node *node)
1534 const arch_register_t *op_reg;
1539 int node_added = NO_NODE_ADDED;
1541 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1543 arity = get_irn_arity(node);
1544 for(i = 0; i < arity; ++i) {
1545 op = get_irn_n(node, i);
1546 op_reg = arch_get_irn_register(state->sim->arch_env, op);
1547 if(arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1550 reg_id = arch_register_get_index(op_reg);
1551 live = vfp_live_args_after(state->sim, node, 0);
1553 op_stack_idx = x87_on_stack(state, reg_id);
1554 if(op_stack_idx >= 0 && !is_vfp_live(reg_id, live)) {
1555 x87_create_fpop(state, sched_next(node), 1);
1556 node_added = NODE_ADDED;
1560 DB((dbg, LEVEL_1, "Stack after: "));
1561 DEBUG_ONLY(x87_dump_stack(state));
1567 void keep_float_node_alive(x87_state *state, ir_node *node)
1573 const arch_register_class_t *cls;
1575 irg = get_irn_irg(node);
1576 block = get_nodes_block(node);
1577 cls = arch_get_irn_reg_class(state->sim->arch_env, node, -1);
1579 keep = be_new_Keep(cls, irg, block, 1, in);
1581 assert(sched_is_scheduled(node));
1582 sched_add_after(node, keep);
1586 * Create a copy of a node. Recreate the node if it's a constant.
1588 * @param state the x87 state
1589 * @param n the node to be copied
1591 * @return the copy of n
1593 static ir_node *create_Copy(x87_state *state, ir_node *n) {
1594 x87_simulator *sim = state->sim;
1595 ir_graph *irg = get_irn_irg(n);
1596 dbg_info *n_dbg = get_irn_dbg_info(n);
1597 ir_mode *mode = get_irn_mode(n);
1598 ir_node *block = get_nodes_block(n);
1599 ir_node *pred = get_irn_n(n, 0);
1600 ir_node *(*cnstr)(dbg_info *, ir_graph *, ir_node *, ir_mode *) = NULL;
1602 const arch_register_t *out;
1603 const arch_register_t *op1;
1604 ia32_x87_attr_t *attr;
1606 /* Do not copy constants, recreate them. */
1607 switch (get_ia32_irn_opcode(pred)) {
1608 case iro_ia32_Unknown_VFP:
1610 cnstr = new_rd_ia32_fldz;
1613 cnstr = new_rd_ia32_fld1;
1615 case iro_ia32_fldpi:
1616 cnstr = new_rd_ia32_fldpi;
1618 case iro_ia32_fldl2e:
1619 cnstr = new_rd_ia32_fldl2e;
1621 case iro_ia32_fldl2t:
1622 cnstr = new_rd_ia32_fldl2t;
1624 case iro_ia32_fldlg2:
1625 cnstr = new_rd_ia32_fldlg2;
1627 case iro_ia32_fldln2:
1628 cnstr = new_rd_ia32_fldln2;
1634 out = x87_get_irn_register(sim, n);
1635 op1 = x87_get_irn_register(sim, pred);
1637 if (cnstr != NULL) {
1638 /* copy a constant */
1639 res = (*cnstr)(n_dbg, irg, block, mode);
1641 x87_push(state, arch_register_get_index(out), res);
1643 attr = get_ia32_x87_attr(res);
1644 attr->x87[2] = &ia32_st_regs[0];
1646 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1648 res = new_rd_ia32_fpushCopy(n_dbg, irg, block, pred, mode);
1650 x87_push(state, arch_register_get_index(out), res);
1652 attr = get_ia32_x87_attr(res);
1653 attr->x87[0] = &ia32_st_regs[op1_idx];
1654 attr->x87[2] = &ia32_st_regs[0];
1656 arch_set_irn_register(sim->arch_env, res, out);
1662 * Simulate a be_Copy.
1664 * @param state the x87 state
1665 * @param n the node that should be simulated (and patched)
1667 * @return NO_NODE_ADDED
1669 static int sim_Copy(x87_state *state, ir_node *n) {
1670 x87_simulator *sim = state->sim;
1672 const arch_register_t *out;
1673 const arch_register_t *op1;
1674 const arch_register_class_t *class;
1675 ir_node *node, *next;
1676 ia32_x87_attr_t *attr;
1677 int op1_idx, out_idx;
1680 class = arch_get_irn_reg_class(sim->arch_env, n, -1);
1681 if (class->regs != ia32_vfp_regs)
1684 pred = get_irn_n(n, 0);
1685 out = x87_get_irn_register(sim, n);
1686 op1 = x87_get_irn_register(sim, pred);
1687 live = vfp_live_args_after(sim, n, REGMASK(out));
1689 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1690 arch_register_get_name(op1), arch_register_get_name(out)));
1691 DEBUG_ONLY(vfp_dump_live(live));
1693 /* handle the infamous unknown value */
1694 if (arch_register_get_index(op1) == REG_VFP_UKNWN) {
1695 /* Operand is still live, a real copy. We need here an fpush that can
1696 hold a a register, so use the fpushCopy or recreate constants */
1697 node = create_Copy(state, n);
1699 assert(is_ia32_fldz(node));
1700 next = sched_next(n);
1703 sched_add_before(next, node);
1705 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1706 arch_get_irn_register(sim->arch_env, node)->name));
1707 return NO_NODE_ADDED;
1710 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1712 if (is_vfp_live(arch_register_get_index(op1), live)) {
1713 ir_node *pred = get_irn_n(n, 0);
1715 /* Operand is still live, a real copy. We need here an fpush that can
1716 hold a a register, so use the fpushCopy or recreate constants */
1717 node = create_Copy(state, n);
1719 /* We have to make sure the old value doesn't go dead (which can happen
1720 * when we recreate constants). As the simulator expected that value in
1721 * the pred blocks. This is unfortunate as removing it would save us 1
1722 * instruction, but we would have to rerun all the simulation to get
1725 next = sched_next(n);
1728 sched_add_before(next, node);
1730 if(get_irn_n_edges(pred) == 0) {
1731 keep_float_node_alive(state, pred);
1734 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1735 arch_get_irn_register(sim->arch_env, node)->name));
1737 out_idx = x87_on_stack(state, arch_register_get_index(out));
1739 if (out_idx >= 0 && out_idx != op1_idx) {
1740 /* Matze: out already on stack? how can this happen? */
1743 /* op1 must be killed and placed where out is */
1745 /* best case, simple remove and rename */
1746 x87_patch_insn(n, op_ia32_Pop);
1747 attr = get_ia32_x87_attr(n);
1748 attr->x87[0] = op1 = &ia32_st_regs[0];
1751 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1753 /* move op1 to tos, store and pop it */
1755 x87_create_fxch(state, n, op1_idx);
1758 x87_patch_insn(n, op_ia32_Pop);
1759 attr = get_ia32_x87_attr(n);
1760 attr->x87[0] = op1 = &ia32_st_regs[out_idx];
1763 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1765 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1767 /* just a virtual copy */
1768 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1769 /* don't remove the node to keep the verifier quiet :),
1770 the emitter won't emit any code for the node */
1773 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1774 exchange(n, get_unop_op(n));
1778 return NO_NODE_ADDED;
1782 * Returns the result proj of the call
1784 static ir_node *get_call_result_proj(ir_node *call) {
1785 const ir_edge_t *edge;
1787 /* search the result proj */
1788 foreach_out_edge(call, edge) {
1789 ir_node *proj = get_edge_src_irn(edge);
1790 long pn = get_Proj_proj(proj);
1792 if (pn == pn_be_Call_first_res) {
1798 } /* get_call_result_proj */
1801 * Simulate a be_Call.
1803 * @param state the x87 state
1804 * @param n the node that should be simulated
1805 * @param arch_env the architecture environment
1807 * @return NO_NODE_ADDED
1809 static int sim_Call(x87_state *state, ir_node *n, const arch_env_t *arch_env)
1811 ir_type *call_tp = be_Call_get_type(n);
1815 const arch_register_t *reg;
1818 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1820 /* at the begin of a call the x87 state should be empty */
1821 assert(state->depth == 0 && "stack not empty before call");
1823 if (get_method_n_ress(call_tp) <= 0)
1827 * If the called function returns a float, it is returned in st(0).
1828 * This even happens if the return value is NOT used.
1829 * Moreover, only one return result is supported.
1831 res_type = get_method_res_type(call_tp, 0);
1832 mode = get_type_mode(res_type);
1834 if (mode == NULL || !mode_is_float(mode))
1837 resproj = get_call_result_proj(n);
1838 assert(resproj != NULL);
1840 reg = x87_get_irn_register(state->sim, resproj);
1841 x87_push(state, arch_register_get_index(reg), resproj);
1844 DB((dbg, LEVEL_1, "Stack after: "));
1845 DEBUG_ONLY(x87_dump_stack(state));
1847 return NO_NODE_ADDED;
1851 * Simulate a be_Spill.
1853 * @param state the x87 state
1854 * @param n the node that should be simulated (and patched)
1856 * Should not happen, spills are lowered before x87 simulator see them.
1858 static int sim_Spill(x87_state *state, ir_node *n) {
1859 assert(0 && "Spill not lowered");
1860 return sim_fst(state, n);
1864 * Simulate a be_Reload.
1866 * @param state the x87 state
1867 * @param n the node that should be simulated (and patched)
1869 * Should not happen, reloads are lowered before x87 simulator see them.
1871 static int sim_Reload(x87_state *state, ir_node *n) {
1872 assert(0 && "Reload not lowered");
1873 return sim_fld(state, n);
1877 * Simulate a be_Return.
1879 * @param state the x87 state
1880 * @param n the node that should be simulated (and patched)
1882 * @return NO_NODE_ADDED
1884 static int sim_Return(x87_state *state, ir_node *n) {
1885 int n_res = be_Return_get_n_rets(n);
1886 int i, n_float_res = 0;
1888 /* only floating point return values must resist on stack */
1889 for (i = 0; i < n_res; ++i) {
1890 ir_node *res = get_irn_n(n, be_pos_Return_val + i);
1892 if (mode_is_float(get_irn_mode(res)))
1895 assert(x87_get_depth(state) == n_float_res);
1897 /* pop them virtually */
1898 for (i = n_float_res - 1; i >= 0; --i)
1901 return NO_NODE_ADDED;
1904 typedef struct _perm_data_t {
1905 const arch_register_t *in;
1906 const arch_register_t *out;
1910 * Simulate a be_Perm.
1912 * @param state the x87 state
1913 * @param irn the node that should be simulated (and patched)
1915 * @return NO_NODE_ADDED
1917 static int sim_Perm(x87_state *state, ir_node *irn) {
1919 x87_simulator *sim = state->sim;
1920 ir_node *pred = get_irn_n(irn, 0);
1922 const ir_edge_t *edge;
1924 /* handle only floating point Perms */
1925 if (! mode_is_float(get_irn_mode(pred)))
1926 return NO_NODE_ADDED;
1928 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1930 /* Perm is a pure virtual instruction on x87.
1931 All inputs must be on the FPU stack and are pairwise
1932 different from each other.
1933 So, all we need to do is to permutate the stack state. */
1934 n = get_irn_arity(irn);
1935 NEW_ARR_A(int, stack_pos, n);
1937 /* collect old stack positions */
1938 for (i = 0; i < n; ++i) {
1939 const arch_register_t *inreg = x87_get_irn_register(sim, get_irn_n(irn, i));
1940 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1942 assert(idx >= 0 && "Perm argument not on x87 stack");
1946 /* now do the permutation */
1947 foreach_out_edge(irn, edge) {
1948 ir_node *proj = get_edge_src_irn(edge);
1949 const arch_register_t *out = x87_get_irn_register(sim, proj);
1950 long num = get_Proj_proj(proj);
1952 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1953 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
1955 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1957 return NO_NODE_ADDED;
1960 static int sim_Barrier(x87_state *state, ir_node *node) {
1961 //const arch_env_t *arch_env = state->sim->arch_env;
1964 /* materialize unknown if needed */
1965 arity = get_irn_arity(node);
1966 for(i = 0; i < arity; ++i) {
1967 const arch_register_t *reg;
1970 ia32_x87_attr_t *attr;
1971 ir_node *in = get_irn_n(node, i);
1973 if(!is_ia32_Unknown_VFP(in))
1976 /* TODO: not completely correct... */
1977 reg = &ia32_vfp_regs[REG_VFP_UKNWN];
1980 block = get_nodes_block(node);
1981 zero = new_rd_ia32_fldz(NULL, current_ir_graph, block, mode_E);
1982 x87_push(state, arch_register_get_index(reg), zero);
1984 attr = get_ia32_x87_attr(zero);
1985 attr->x87[2] = &ia32_st_regs[0];
1987 sched_add_before(node, zero);
1989 set_irn_n(node, i, zero);
1992 return NO_NODE_ADDED;
1997 * Kill any dead registers at block start by popping them from the stack.
1999 * @param sim the simulator handle
2000 * @param block the current block
2001 * @param start_state the x87 state at the begin of the block
2003 * @return the x87 state after dead register killed
2005 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state) {
2006 x87_state *state = start_state;
2007 ir_node *first_insn = sched_first(block);
2008 ir_node *keep = NULL;
2009 unsigned live = vfp_live_args_after(sim, block, 0);
2011 int i, depth, num_pop;
2014 depth = x87_get_depth(state);
2015 for (i = depth - 1; i >= 0; --i) {
2016 int reg = x87_get_st_reg(state, i);
2018 if (! is_vfp_live(reg, live))
2019 kill_mask |= (1 << i);
2023 /* create a new state, will be changed */
2024 state = x87_clone_state(sim, state);
2026 DB((dbg, LEVEL_1, "Killing deads:\n"));
2027 DEBUG_ONLY(vfp_dump_live(live));
2028 DEBUG_ONLY(x87_dump_stack(state));
2030 /* now kill registers */
2032 /* we can only kill from TOS, so bring them up */
2033 if (! (kill_mask & 1)) {
2034 /* search from behind, because we can to a double-pop */
2035 for (i = depth - 1; i >= 0; --i) {
2036 if (kill_mask & (1 << i)) {
2037 kill_mask &= ~(1 << i);
2044 x87_set_st(state, -1, keep, i);
2045 x87_create_fxch(state, first_insn, i);
2048 if ((kill_mask & 3) == 3) {
2049 /* we can do a double-pop */
2053 /* only a single pop */
2058 kill_mask >>= num_pop;
2059 keep = x87_create_fpop(state, first_insn, num_pop);
2064 } /* x87_kill_deads */
2067 * If we have PhiEs with unknown operands then we have to make sure that some
2068 * value is actually put onto the stack.
2070 static void fix_unknown_phis(x87_state *state, ir_node *block,
2071 ir_node *pred_block, int pos)
2075 sched_foreach(block, node) {
2077 const arch_register_t *reg;
2078 ia32_x87_attr_t *attr;
2083 op = get_Phi_pred(node, pos);
2084 if(!is_ia32_Unknown_VFP(op))
2087 reg = arch_get_irn_register(state->sim->arch_env, node);
2089 /* create a zero at end of pred block */
2090 zero = new_rd_ia32_fldz(NULL, current_ir_graph, pred_block, mode_E);
2091 x87_push(state, arch_register_get_index(reg), zero);
2093 attr = get_ia32_x87_attr(zero);
2094 attr->x87[2] = &ia32_st_regs[0];
2096 assert(is_ia32_fldz(zero));
2097 sched_add_before(sched_last(pred_block), zero);
2099 set_Phi_pred(node, pos, zero);
2104 * Run a simulation and fix all virtual instructions for a block.
2106 * @param sim the simulator handle
2107 * @param block the current block
2109 static void x87_simulate_block(x87_simulator *sim, ir_node *block) {
2111 blk_state *bl_state = x87_get_bl_state(sim, block);
2112 x87_state *state = bl_state->begin;
2113 const ir_edge_t *edge;
2114 ir_node *start_block;
2116 assert(state != NULL);
2117 /* already processed? */
2118 if (bl_state->end != NULL)
2121 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2122 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2123 DEBUG_ONLY(x87_dump_stack(state));
2125 /* at block begin, kill all dead registers */
2126 state = x87_kill_deads(sim, block, state);
2127 /* create a new state, will be changed */
2128 state = x87_clone_state(sim, state);
2130 /* beware, n might change */
2131 for (n = sched_first(block); !sched_is_end(n); n = next) {
2134 ir_op *op = get_irn_op(n);
2136 next = sched_next(n);
2137 if (op->ops.generic == NULL)
2140 func = (sim_func)op->ops.generic;
2143 node_inserted = (*func)(state, n);
2146 sim_func might have added an additional node after n,
2148 beware: n must not be changed by sim_func
2149 (i.e. removed from schedule) in this case
2151 if (node_inserted != NO_NODE_ADDED)
2152 next = sched_next(n);
2155 start_block = get_irg_start_block(get_irn_irg(block));
2157 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2159 /* check if the state must be shuffled */
2160 foreach_block_succ(block, edge) {
2161 ir_node *succ = get_edge_src_irn(edge);
2162 blk_state *succ_state;
2164 if (succ == start_block)
2167 succ_state = x87_get_bl_state(sim, succ);
2169 fix_unknown_phis(state, succ, block, get_edge_src_pos(edge));
2171 if (succ_state->begin == NULL) {
2172 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2173 DEBUG_ONLY(x87_dump_stack(state));
2174 succ_state->begin = state;
2176 waitq_put(sim->worklist, succ);
2178 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2179 /* There is already a begin state for the successor, bad.
2180 Do the necessary permutations.
2181 Note that critical edges are removed, so this is always possible:
2182 If the successor has more than one possible input, then it must
2185 x87_shuffle(sim, block, state, succ, succ_state->begin);
2188 bl_state->end = state;
2189 } /* x87_simulate_block */
2192 * Create a new x87 simulator.
2194 * @param sim a simulator handle, will be initialized
2195 * @param irg the current graph
2196 * @param arch_env the architecture environment
2198 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg,
2199 const arch_env_t *arch_env)
2201 obstack_init(&sim->obst);
2202 sim->blk_states = pmap_create();
2203 sim->arch_env = arch_env;
2204 sim->n_idx = get_irg_last_idx(irg);
2205 sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
2206 sim->isa = (ia32_isa_t *)arch_env->isa;
2208 DB((dbg, LEVEL_1, "--------------------------------\n"
2209 "x87 Simulator started for %+F\n", irg));
2211 /* set the generic function pointer of instruction we must simulate */
2212 clear_irp_opcodes_generic_func();
2214 #define ASSOC(op) (op_ ## op)->ops.generic = (op_func)(sim_##op)
2215 #define ASSOC_IA32(op) (op_ia32_v ## op)->ops.generic = (op_func)(sim_##op)
2216 #define ASSOC_BE(op) (op_be_ ## op)->ops.generic = (op_func)(sim_##op)
2230 ASSOC_IA32(fCmpJmp);
2242 } /* x87_init_simulator */
2245 * Destroy a x87 simulator.
2247 * @param sim the simulator handle
2249 static void x87_destroy_simulator(x87_simulator *sim) {
2250 pmap_destroy(sim->blk_states);
2251 obstack_free(&sim->obst, NULL);
2252 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2253 } /* x87_destroy_simulator */
2256 * Pre-block walker: calculate the liveness information for the block
2257 * and store it into the sim->live cache.
2259 static void update_liveness_walker(ir_node *block, void *data) {
2260 x87_simulator *sim = data;
2261 update_liveness(sim, block);
2262 } /* update_liveness_walker */
2265 * Run a simulation and fix all virtual instructions for a graph.
2267 * @param env the architecture environment
2268 * @param irg the current graph
2270 * Needs a block-schedule.
2272 void x87_simulate_graph(const arch_env_t *arch_env, be_irg_t *birg) {
2273 ir_node *block, *start_block;
2274 blk_state *bl_state;
2276 ir_graph *irg = be_get_birg_irg(birg);
2278 /* create the simulator */
2279 x87_init_simulator(&sim, irg, arch_env);
2281 start_block = get_irg_start_block(irg);
2282 bl_state = x87_get_bl_state(&sim, start_block);
2284 /* start with the empty state */
2285 bl_state->begin = empty;
2288 sim.worklist = new_waitq();
2289 waitq_put(sim.worklist, start_block);
2291 be_assure_liveness(birg);
2292 sim.lv = be_get_birg_liveness(birg);
2293 // sim.lv = be_liveness(be_get_birg_irg(birg));
2294 be_liveness_assure_sets(sim.lv);
2296 /* Calculate the liveness for all nodes. We must precalculate this info,
2297 * because the simulator adds new nodes (possible before Phi nodes) which
2298 * would let a lazy calculation fail.
2299 * On the other hand we reduce the computation amount due to
2300 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2302 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2306 block = waitq_get(sim.worklist);
2307 x87_simulate_block(&sim, block);
2308 } while (! waitq_empty(sim.worklist));
2311 del_waitq(sim.worklist);
2312 x87_destroy_simulator(&sim);
2313 } /* x87_simulate_graph */
2315 void ia32_init_x87(void) {
2316 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2317 } /* ia32_init_x87 */