2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
36 #include "iredges_t.h"
47 #include "../belive_t.h"
48 #include "../besched_t.h"
49 #include "../benode_t.h"
50 #include "ia32_new_nodes.h"
51 #include "gen_ia32_new_nodes.h"
52 #include "gen_ia32_regalloc_if.h"
57 /* first and second binop index */
64 /* the store val index */
65 #define STORE_VAL_IDX 2
67 #define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
69 /** the debug handle */
70 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
72 /* Forward declaration. */
73 typedef struct _x87_simulator x87_simulator;
76 * An exchange template.
77 * Note that our virtual functions have the same inputs
78 * and attributes as the real ones, so we can simple exchange
80 * Further, x87 supports inverse instructions, so we can handle them.
82 typedef struct _exchange_tmpl {
83 ir_op *normal_op; /**< the normal one */
84 ir_op *reverse_op; /**< the reverse one if exists */
85 ir_op *normal_pop_op; /**< the normal one with tos pop */
86 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
90 * An entry on the simulated x87 stack.
92 typedef struct _st_entry {
93 int reg_idx; /**< the virtual register index of this stack value */
94 ir_node *node; /**< the node that produced this value */
100 typedef struct _x87_state {
101 st_entry st[N_x87_REGS]; /**< the register stack */
102 int depth; /**< the current stack depth */
103 int tos; /**< position of the tos */
104 x87_simulator *sim; /**< The simulator. */
107 /** An empty state, used for blocks without fp instructions. */
108 static x87_state _empty = { { {0, NULL}, }, 0, 0 };
109 static x87_state *empty = (x87_state *)&_empty;
112 NO_NODE_ADDED = 0, /**< No node was added. */
113 NODE_ADDED = 1 /**< A node was added by the simulator in the schedule. */
117 * The type of an instruction simulator function.
119 * @param state the x87 state
120 * @param n the node to be simulated
122 * @return NODE_ADDED if a node was added AFTER n in schedule,
125 typedef int (*sim_func)(x87_state *state, ir_node *n);
128 * A block state: Every block has a x87 state at the beginning and at the end.
130 typedef struct _blk_state {
131 x87_state *begin; /**< state at the begin or NULL if not assigned */
132 x87_state *end; /**< state at the end or NULL if not assigned */
135 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
137 /** liveness bitset for vfp registers. */
138 typedef unsigned char vfp_liveness;
143 struct _x87_simulator {
144 struct obstack obst; /**< An obstack for fast allocating. */
145 pmap *blk_states; /**< Map blocks to states. */
146 const arch_env_t *arch_env; /**< The architecture environment. */
147 be_lv_t *lv; /**< intrablock liveness. */
148 vfp_liveness *live; /**< Liveness information. */
149 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
150 waitq *worklist; /**< Worklist of blocks that must be processed. */
154 * Returns the current stack depth.
156 * @param state the x87 state
158 * @return the x87 stack depth
160 static int x87_get_depth(const x87_state *state) {
162 } /* x87_get_depth */
165 * Return the virtual register index at st(pos).
167 * @param state the x87 state
168 * @param pos a stack position
170 * @return the vfp register index that produced the value at st(pos)
172 static int x87_get_st_reg(const x87_state *state, int pos) {
173 assert(pos < state->depth);
174 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
175 } /* x87_get_st_reg */
178 * Return the node at st(pos).
180 * @param state the x87 state
181 * @param pos a stack position
183 * @return the IR node that produced the value at st(pos)
185 static ir_node *x87_get_st_node(const x87_state *state, int pos) {
186 assert(pos < state->depth);
187 return state->st[MASK_TOS(state->tos + pos)].node;
188 } /* x87_get_st_node */
192 * Dump the stack for debugging.
194 * @param state the x87 state
196 static void x87_dump_stack(const x87_state *state) {
199 for (i = state->depth - 1; i >= 0; --i) {
200 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
201 x87_get_st_node(state, i)));
203 DB((dbg, LEVEL_2, "<-- TOS\n"));
204 } /* x87_dump_stack */
205 #endif /* DEBUG_libfirm */
208 * Set a virtual register to st(pos).
210 * @param state the x87 state
211 * @param reg_idx the vfp register index that should be set
212 * @param node the IR node that produces the value of the vfp register
213 * @param pos the stack position where the new value should be entered
215 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos) {
216 assert(0 < state->depth);
217 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
218 state->st[MASK_TOS(state->tos + pos)].node = node;
220 DB((dbg, LEVEL_2, "After SET_REG: "));
221 DEBUG_ONLY(x87_dump_stack(state));
225 * Set the tos virtual register.
227 * @param state the x87 state
228 * @param reg_idx the vfp register index that should be set
229 * @param node the IR node that produces the value of the vfp register
231 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node) {
232 x87_set_st(state, reg_idx, node, 0);
236 * Swap st(0) with st(pos).
238 * @param state the x87 state
239 * @param pos the stack position to change the tos with
241 static void x87_fxch(x87_state *state, int pos) {
243 assert(pos < state->depth);
245 entry = state->st[MASK_TOS(state->tos + pos)];
246 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
247 state->st[MASK_TOS(state->tos)] = entry;
249 DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
253 * Convert a virtual register to the stack index.
255 * @param state the x87 state
256 * @param reg_idx the register vfp index
258 * @return the stack position where the register is stacked
259 * or -1 if the virtual register was not found
261 static int x87_on_stack(const x87_state *state, int reg_idx) {
262 int i, tos = state->tos;
264 for (i = 0; i < state->depth; ++i)
265 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
271 * Push a virtual Register onto the stack, double pushed allowed.
273 * @param state the x87 state
274 * @param reg_idx the register vfp index
275 * @param node the node that produces the value of the vfp register
277 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node) {
278 assert(state->depth < N_x87_REGS && "stack overrun");
281 state->tos = MASK_TOS(state->tos - 1);
282 state->st[state->tos].reg_idx = reg_idx;
283 state->st[state->tos].node = node;
285 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
289 * Push a virtual Register onto the stack, double pushes are NOT allowed.
291 * @param state the x87 state
292 * @param reg_idx the register vfp index
293 * @param node the node that produces the value of the vfp register
294 * @param dbl_push if != 0 double pushes are allowed
296 static void x87_push(x87_state *state, int reg_idx, ir_node *node) {
297 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
299 x87_push_dbl(state, reg_idx, node);
303 * Pop a virtual Register from the stack.
305 * @param state the x87 state
307 static void x87_pop(x87_state *state) {
308 assert(state->depth > 0 && "stack underrun");
311 state->tos = MASK_TOS(state->tos + 1);
313 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
317 * Returns the block state of a block.
319 * @param sim the x87 simulator handle
320 * @param block the current block
322 * @return the block state
324 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block) {
325 pmap_entry *entry = pmap_find(sim->blk_states, block);
328 blk_state *bl_state = obstack_alloc(&sim->obst, sizeof(*bl_state));
329 bl_state->begin = NULL;
330 bl_state->end = NULL;
332 pmap_insert(sim->blk_states, block, bl_state);
336 return PTR_TO_BLKSTATE(entry->value);
337 } /* x87_get_bl_state */
340 * Creates a new x87 state.
342 * @param sim the x87 simulator handle
344 * @return a new x87 state
346 static x87_state *x87_alloc_state(x87_simulator *sim) {
347 x87_state *res = obstack_alloc(&sim->obst, sizeof(*res));
351 } /* x87_alloc_state */
356 * @param sim the x87 simulator handle
357 * @param src the x87 state that will be cloned
359 * @return a cloned copy of the src state
361 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src) {
362 x87_state *res = x87_alloc_state(sim);
364 memcpy(res, src, sizeof(*res));
366 } /* x87_clone_state */
369 * Patch a virtual instruction into a x87 one and return
370 * the node representing the result value.
372 * @param n the IR node to patch
373 * @param op the x87 opcode to patch in
375 static ir_node *x87_patch_insn(ir_node *n, ir_op *op) {
376 ir_mode *mode = get_irn_mode(n);
381 if (mode == mode_T) {
382 /* patch all Proj's */
383 const ir_edge_t *edge;
385 foreach_out_edge(n, edge) {
386 ir_node *proj = get_edge_src_irn(edge);
388 mode = get_irn_mode(proj);
389 if (mode_is_float(mode)) {
391 set_irn_mode(proj, mode_E);
395 } else if (mode_is_float(mode))
396 set_irn_mode(n, mode_E);
398 } /* x87_patch_insn */
401 * Returns the first Proj of a mode_T node having a given mode.
403 * @param n the mode_T node
404 * @param m the desired mode of the Proj
405 * @return The first Proj of mode @p m found or NULL.
407 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m) {
408 const ir_edge_t *edge;
410 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
412 foreach_out_edge(n, edge) {
413 ir_node *proj = get_edge_src_irn(edge);
414 if (get_irn_mode(proj) == m)
419 } /* get_irn_Proj_for_mode */
422 * Wrap the arch_* function here so we can check for errors.
424 static INLINE const arch_register_t *x87_get_irn_register(x87_simulator *sim, const ir_node *irn) {
425 const arch_register_t *res;
427 res = arch_get_irn_register(sim->arch_env, irn);
428 assert(res->reg_class->regs == ia32_vfp_regs);
430 } /* x87_get_irn_register */
432 /* -------------- x87 perm --------------- */
435 * Creates a fxch for shuffle.
437 * @param state the x87 state
438 * @param pos parameter for fxch
439 * @param block the block were fxch is inserted
441 * Creates a new fxch node and reroute the user of the old node
444 * @return the fxch node
446 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block) {
448 ia32_x87_attr_t *attr;
450 fxch = new_rd_ia32_fxch(NULL, get_irn_irg(block), block, mode_E);
451 attr = get_ia32_x87_attr(fxch);
452 attr->x87[0] = &ia32_st_regs[pos];
453 attr->x87[2] = &ia32_st_regs[0];
457 x87_fxch(state, pos);
459 } /* x87_fxch_shuffle */
462 * Calculate the necessary permutations to reach dst_state.
464 * These permutations are done with fxch instructions and placed
465 * at the end of the block.
467 * Note that critical edges are removed here, so we need only
468 * a shuffle if the current block has only one successor.
470 * @param sim the simulator handle
471 * @param block the current block
472 * @param state the current x87 stack state, might be modified
473 * @param dst_block the destination block
474 * @param dst_state destination state
478 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block, x87_state *state, ir_node *dst_block, const x87_state *dst_state) {
479 int i, n_cycles, k, ri;
480 unsigned cycles[4], all_mask;
481 char cycle_idx[4][8];
482 ir_node *fxch, *before, *after;
484 assert(state->depth == dst_state->depth);
486 /* Some mathematics here:
487 If we have a cycle of length n that includes the tos,
488 we need n-1 exchange operations.
489 We can always add the tos and restore it, so we need
490 n+1 exchange operations for a cycle not containing the tos.
491 So, the maximum of needed operations is for a cycle of 7
492 not including the tos == 8.
493 This is the same number of ops we would need for using stores,
494 so exchange is cheaper (we save the loads).
495 On the other hand, we might need an additional exchange
496 in the next block to bring one operand on top, so the
497 number of ops in the first case is identical.
498 Further, no more than 4 cycles can exists (4 x 2).
500 all_mask = (1 << (state->depth)) - 1;
502 for (n_cycles = 0; all_mask; ++n_cycles) {
503 int src_idx, dst_idx;
505 /* find the first free slot */
506 for (i = 0; i < state->depth; ++i) {
507 if (all_mask & (1 << i)) {
508 all_mask &= ~(1 << i);
510 /* check if there are differences here */
511 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
517 /* no more cycles found */
522 cycles[n_cycles] = (1 << i);
523 cycle_idx[n_cycles][k++] = i;
524 for (src_idx = i; ; src_idx = dst_idx) {
525 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
527 if ((all_mask & (1 << dst_idx)) == 0)
530 cycle_idx[n_cycles][k++] = dst_idx;
531 cycles[n_cycles] |= (1 << dst_idx);
532 all_mask &= ~(1 << dst_idx);
534 cycle_idx[n_cycles][k] = -1;
538 /* no permutation needed */
542 /* Hmm: permutation needed */
543 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
544 DEBUG_ONLY(x87_dump_stack(state));
545 DB((dbg, LEVEL_2, " to\n"));
546 DEBUG_ONLY(x87_dump_stack(dst_state));
550 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
551 for (ri = 0; ri < n_cycles; ++ri) {
552 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
553 for (k = 0; cycle_idx[ri][k] != -1; ++k)
554 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
555 DB((dbg, LEVEL_2, "\n"));
562 * Find the place node must be insert.
563 * We have only one successor block, so the last instruction should
566 before = sched_last(block);
567 assert(is_cfop(before));
569 /* now do the permutations */
570 for (ri = 0; ri < n_cycles; ++ri) {
571 if ((cycles[ri] & 1) == 0) {
572 /* this cycle does not include the tos */
573 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
575 sched_add_after(after, fxch);
577 sched_add_before(before, fxch);
580 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
581 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
583 sched_add_after(after, fxch);
585 sched_add_before(before, fxch);
588 if ((cycles[ri] & 1) == 0) {
589 /* this cycle does not include the tos */
590 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
591 sched_add_after(after, fxch);
598 * Create a fxch node before another node.
600 * @param state the x87 state
601 * @param n the node after the fxch
602 * @param pos exchange st(pos) with st(0)
603 * @param op_idx if >= 0, replace input op_idx of n with the fxch result
607 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos, int op_idx) {
609 ia32_x87_attr_t *attr;
610 ir_graph *irg = get_irn_irg(n);
611 ir_node *block = get_nodes_block(n);
613 x87_fxch(state, pos);
615 fxch = new_rd_ia32_fxch(NULL, irg, block, mode_E);
616 attr = get_ia32_x87_attr(fxch);
617 attr->x87[0] = &ia32_st_regs[pos];
618 attr->x87[2] = &ia32_st_regs[0];
622 sched_add_before(n, fxch);
623 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
625 } /* x87_create_fxch */
628 * Create a fpush before node n.
630 * @param state the x87 state
631 * @param n the node after the fpush
632 * @param pos push st(pos) on stack
633 * @param op_idx replace input op_idx of n with the fpush result
635 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx) {
636 ir_node *fpush, *pred = get_irn_n(n, op_idx);
637 ia32_x87_attr_t *attr;
638 const arch_register_t *out = x87_get_irn_register(state->sim, pred);
640 x87_push_dbl(state, arch_register_get_index(out), pred);
642 fpush = new_rd_ia32_fpush(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
643 attr = get_ia32_x87_attr(fpush);
644 attr->x87[0] = &ia32_st_regs[pos];
645 attr->x87[2] = &ia32_st_regs[0];
648 sched_add_before(n, fpush);
650 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
651 } /* x87_create_fpush */
654 * Create a fpop before node n.
656 * @param state the x87 state
657 * @param n the node after the fpop
658 * @param num pop 1 or 2 values
660 * @return the fpop node
662 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
665 ia32_x87_attr_t *attr;
669 fpop = new_rd_ia32_fpop(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
670 attr = get_ia32_x87_attr(fpop);
671 attr->x87[0] = &ia32_st_regs[0];
672 attr->x87[1] = &ia32_st_regs[0];
673 attr->x87[2] = &ia32_st_regs[0];
676 sched_add_before(n, fpop);
677 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
682 } /* x87_create_fpop */
685 * Creates an fldz before node n
687 * @param state the x87 state
688 * @param n the node after the fldz
690 * @return the fldz node
692 static ir_node *x87_create_fldz(x87_state *state, ir_node *n, int regidx) {
693 ir_graph *irg = get_irn_irg(n);
694 ir_node *block = get_nodes_block(n);
697 fldz = new_rd_ia32_fldz(NULL, irg, block, mode_E);
699 sched_add_before(n, fldz);
700 DB((dbg, LEVEL_1, "<<< %s\n", get_irn_opname(fldz)));
703 x87_push(state, regidx, fldz);
708 /* --------------------------------- liveness ------------------------------------------ */
711 * The liveness transfer function.
712 * Updates a live set over a single step from a given node to its predecessor.
713 * Everything defined at the node is removed from the set, the uses of the node get inserted.
715 * @param sim The simulator handle.
716 * @param irn The node at which liveness should be computed.
717 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
718 * the registers live after irn.
720 * @return The live bitset.
722 static vfp_liveness vfp_liveness_transfer(x87_simulator *sim, ir_node *irn, vfp_liveness live)
725 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
726 const arch_env_t *arch_env = sim->arch_env;
728 if (arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
729 const arch_register_t *reg = x87_get_irn_register(sim, irn);
730 live &= ~(1 << arch_register_get_index(reg));
733 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
734 ir_node *op = get_irn_n(irn, i);
736 if (mode_is_float(get_irn_mode(op)) && arch_irn_consider_in_reg_alloc(arch_env, cls, op)) {
737 const arch_register_t *reg = x87_get_irn_register(sim, op);
738 live |= 1 << arch_register_get_index(reg);
742 } /* vfp_liveness_transfer */
745 * Put all live virtual registers at the end of a block into a bitset.
747 * @param sim the simulator handle
748 * @param lv the liveness information
749 * @param bl the block
751 * @return The live bitset at the end of this block
753 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
756 vfp_liveness live = 0;
757 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
758 const arch_env_t *arch_env = sim->arch_env;
759 const be_lv_t *lv = sim->lv;
761 be_lv_foreach(lv, block, be_lv_state_end, i) {
762 const arch_register_t *reg;
763 const ir_node *node = be_lv_get_irn(lv, block, i);
764 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
767 reg = x87_get_irn_register(sim, node);
768 live |= 1 << arch_register_get_index(reg);
772 } /* vfp_liveness_end_of_block */
774 /** get the register mask from an arch_register */
775 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
778 * Return a bitset of argument registers which are live at the end of a node.
780 * @param sim the simulator handle
781 * @param pos the node
782 * @param kill kill mask for the output registers
784 * @return The live bitset.
786 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
788 unsigned idx = get_irn_idx(pos);
790 assert(idx < sim->n_idx);
791 return sim->live[idx] & ~kill;
792 } /* vfp_live_args_after */
795 * Calculate the liveness for a whole block and cache it.
797 * @param sim the simulator handle
798 * @param lv the liveness handle
799 * @param block the block
801 static void update_liveness(x87_simulator *sim, ir_node *block) {
802 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
806 /* now iterate through the block backward and cache the results */
807 sched_foreach_reverse(block, irn) {
808 /* stop at the first Phi: this produces the live-in */
812 idx = get_irn_idx(irn);
813 sim->live[idx] = live;
815 live = vfp_liveness_transfer(sim, irn, live);
817 idx = get_irn_idx(block);
818 sim->live[idx] = live;
819 } /* update_liveness */
822 * Returns true if a register is live in a set.
824 * @param reg_idx the vfp register index
825 * @param live a live bitset
827 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
831 * Dump liveness info.
833 * @param live the live bitset
835 static void vfp_dump_live(vfp_liveness live) {
838 DB((dbg, LEVEL_2, "Live after: "));
839 for (i = 0; i < 8; ++i) {
840 if (live & (1 << i)) {
841 DB((dbg, LEVEL_2, "vf%d ", i));
844 DB((dbg, LEVEL_2, "\n"));
845 } /* vfp_dump_live */
846 #endif /* DEBUG_libfirm */
848 /* --------------------------------- simulators ---------------------------------------- */
850 #define XCHG(a, b) do { int t = (a); (a) = (b); (b) = t; } while (0)
853 * Simulate a virtual binop.
855 * @param state the x87 state
856 * @param n the node that should be simulated (and patched)
857 * @param tmpl the template containing the 4 possible x87 opcodes
859 * @return NO_NODE_ADDED
861 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl) {
862 int op2_idx = 0, op1_idx;
863 int out_idx, do_pop = 0;
864 ia32_x87_attr_t *attr;
865 ir_node *patched_insn;
867 x87_simulator *sim = state->sim;
868 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, BINOP_IDX_1));
869 const arch_register_t *op2 = x87_get_irn_register(sim, get_irn_n(n, BINOP_IDX_2));
870 const arch_register_t *out = x87_get_irn_register(sim, n);
871 int reg_index_1 = arch_register_get_index(op1);
872 int reg_index_2 = arch_register_get_index(op2);
873 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
875 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
876 arch_register_get_name(op1), arch_register_get_name(op2),
877 arch_register_get_name(out)));
878 DEBUG_ONLY(vfp_dump_live(live));
879 DB((dbg, LEVEL_1, "Stack before: "));
880 DEBUG_ONLY(x87_dump_stack(state));
882 op1_idx = x87_on_stack(state, reg_index_1);
883 assert(op1_idx >= 0);
885 if (reg_index_2 != REG_VFP_NOREG) {
886 /* second operand is a vfp register */
887 op2_idx = x87_on_stack(state, reg_index_2);
888 assert(op2_idx >= 0);
890 if (is_vfp_live(arch_register_get_index(op2), live)) {
891 /* Second operand is live. */
893 if (is_vfp_live(arch_register_get_index(op1), live)) {
894 /* Both operands are live: push the first one.
895 This works even for op1 == op2. */
896 x87_create_fpush(state, n, op1_idx, BINOP_IDX_2);
897 /* now do fxxx (tos=tos X op) */
901 dst = tmpl->normal_op;
903 /* Second live, first operand is dead here, bring it to tos. */
905 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
910 /* now do fxxx (tos=tos X op) */
912 dst = tmpl->normal_op;
915 /* Second operand is dead. */
916 if (is_vfp_live(arch_register_get_index(op1), live)) {
917 /* First operand is live: bring second to tos. */
919 x87_create_fxch(state, n, op2_idx, BINOP_IDX_2);
924 /* now do fxxxr (tos = op X tos) */
926 dst = tmpl->reverse_op;
928 /* Both operands are dead here, pop them from the stack. */
931 /* Both are identically and on tos, no pop needed. */
932 /* here fxxx (tos = tos X tos) */
933 dst = tmpl->normal_op;
936 /* now do fxxxp (op = op X tos, pop) */
937 dst = tmpl->normal_pop_op;
941 } else if (op1_idx == 0) {
942 assert(op1_idx != op2_idx);
943 /* now do fxxxrp (op = tos X op, pop) */
944 dst = tmpl->reverse_pop_op;
948 /* Bring the second on top. */
949 x87_create_fxch(state, n, op2_idx, BINOP_IDX_2);
950 if (op1_idx == op2_idx) {
951 /* Both are identically and on tos now, no pop needed. */
954 /* use fxxx (tos = tos X tos) */
955 dst = tmpl->normal_op;
958 /* op2 is on tos now */
960 /* use fxxxp (op = op X tos, pop) */
961 dst = tmpl->normal_pop_op;
969 /* second operand is an address mode */
970 if (is_vfp_live(arch_register_get_index(op1), live)) {
971 /* first operand is live: push it here */
972 x87_create_fpush(state, n, op1_idx, BINOP_IDX_1);
974 /* use fxxx (tos = tos X mem) */
975 dst = tmpl->normal_op;
978 /* first operand is dead: bring it to tos */
980 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
984 /* use fxxxp (tos = tos X mem) */
985 dst = tmpl->normal_op;
990 patched_insn = x87_patch_insn(n, dst);
991 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
996 /* patch the operation */
997 attr = get_ia32_x87_attr(n);
998 attr->x87[0] = op1 = &ia32_st_regs[op1_idx];
999 if (reg_index_2 != REG_VFP_NOREG) {
1000 attr->x87[1] = op2 = &ia32_st_regs[op2_idx];
1002 attr->x87[2] = out = &ia32_st_regs[out_idx];
1004 if (reg_index_2 != REG_VFP_NOREG) {
1005 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1006 arch_register_get_name(op1), arch_register_get_name(op2),
1007 arch_register_get_name(out)));
1009 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1010 arch_register_get_name(op1),
1011 arch_register_get_name(out)));
1014 return NO_NODE_ADDED;
1018 * Simulate a virtual Unop.
1020 * @param state the x87 state
1021 * @param n the node that should be simulated (and patched)
1022 * @param op the x87 opcode that will replace n's opcode
1024 * @return NO_NODE_ADDED
1026 static int sim_unop(x87_state *state, ir_node *n, ir_op *op) {
1027 int op1_idx, out_idx;
1028 x87_simulator *sim = state->sim;
1029 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, UNOP_IDX));
1030 const arch_register_t *out = x87_get_irn_register(sim, n);
1031 ia32_x87_attr_t *attr;
1032 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1034 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1035 DEBUG_ONLY(vfp_dump_live(live));
1037 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1039 if (is_vfp_live(arch_register_get_index(op1), live)) {
1040 /* push the operand here */
1041 x87_create_fpush(state, n, op1_idx, UNOP_IDX);
1045 /* operand is dead, bring it to tos */
1047 x87_create_fxch(state, n, op1_idx, UNOP_IDX);
1052 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1054 attr = get_ia32_x87_attr(n);
1055 attr->x87[0] = op1 = &ia32_st_regs[0];
1056 attr->x87[2] = out = &ia32_st_regs[0];
1057 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1059 return NO_NODE_ADDED;
1063 * Simulate a virtual Load instruction.
1065 * @param state the x87 state
1066 * @param n the node that should be simulated (and patched)
1067 * @param op the x87 opcode that will replace n's opcode
1069 * @return NO_NODE_ADDED
1071 static int sim_load(x87_state *state, ir_node *n, ir_op *op) {
1072 const arch_register_t *out = x87_get_irn_register(state->sim, n);
1073 ia32_x87_attr_t *attr;
1075 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1076 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1077 assert(out == x87_get_irn_register(state->sim, n));
1078 attr = get_ia32_x87_attr(n);
1079 attr->x87[2] = out = &ia32_st_regs[0];
1080 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1082 return NO_NODE_ADDED;
1086 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1088 * @param store The store
1089 * @param old_val The former value
1090 * @param new_val The new value
1092 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val) {
1093 const ir_edge_t *edge, *ne;
1095 foreach_out_edge_safe(old_val, edge, ne) {
1096 ir_node *user = get_edge_src_irn(edge);
1098 if (! user || user == store)
1101 /* if the user is scheduled after the store: rewire */
1102 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1104 /* find the input of the user pointing to the old value */
1105 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1106 if (get_irn_n(user, i) == old_val)
1107 set_irn_n(user, i, new_val);
1111 } /* collect_and_rewire_users */
1114 * Simulate a virtual Store.
1116 * @param state the x87 state
1117 * @param n the node that should be simulated (and patched)
1118 * @param op the x87 store opcode
1119 * @param op_p the x87 store and pop opcode
1121 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p) {
1122 x87_simulator *sim = state->sim;
1123 ir_node *val = get_irn_n(n, STORE_VAL_IDX);
1124 const arch_register_t *op2 = x87_get_irn_register(sim, val);
1125 unsigned live = vfp_live_args_after(sim, n, 0);
1126 int insn = NO_NODE_ADDED;
1127 ia32_x87_attr_t *attr;
1128 int op2_reg_idx, op2_idx, depth;
1129 int live_after_node;
1132 op2_reg_idx = arch_register_get_index(op2);
1133 if (op2_reg_idx == REG_VFP_UKNWN) {
1134 /* just take any value from stack */
1135 if(state->depth > 0) {
1137 DEBUG_ONLY(op2 = NULL);
1138 live_after_node = 1;
1140 /* produce a new value which we will consume immediately */
1141 x87_create_fldz(state, n, op2_reg_idx);
1142 live_after_node = 0;
1143 op2_idx = x87_on_stack(state, op2_reg_idx);
1144 assert(op2_idx >= 0);
1147 op2_idx = x87_on_stack(state, op2_reg_idx);
1148 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1149 assert(op2_idx >= 0);
1150 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1153 mode = get_ia32_ls_mode(n);
1154 depth = x87_get_depth(state);
1156 if (live_after_node) {
1158 Problem: fst doesn't support mode_E (spills), only fstp does
1160 - stack not full: push value and fstp
1161 - stack full: fstp value and load again
1163 if (mode == mode_E) {
1164 if (depth < N_x87_REGS) {
1165 /* ok, we have a free register: push + fstp */
1166 x87_create_fpush(state, n, op2_idx, STORE_VAL_IDX);
1168 x87_patch_insn(n, op_p);
1170 ir_node *vfld, *mem, *block, *rproj, *mproj;
1173 /* stack full here: need fstp + load */
1175 x87_patch_insn(n, op_p);
1177 block = get_nodes_block(n);
1178 irg = get_irn_irg(n);
1179 vfld = new_rd_ia32_vfld(NULL, irg, block, get_irn_n(n, 0), get_irn_n(n, 1), new_rd_NoMem(irg));
1181 /* copy all attributes */
1182 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1183 if (is_ia32_use_frame(n))
1184 set_ia32_use_frame(vfld);
1185 set_ia32_am_flavour(vfld, get_ia32_am_flavour(n));
1186 set_ia32_op_type(vfld, ia32_am_Source);
1187 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1188 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1189 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1191 rproj = new_r_Proj(irg, block, vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1192 mproj = new_r_Proj(irg, block, vfld, mode_M, pn_ia32_vfld_M);
1193 mem = get_irn_Proj_for_mode(n, mode_M);
1195 assert(mem && "Store memory not found");
1197 arch_set_irn_register(sim->arch_env, rproj, op2);
1199 /* reroute all former users of the store memory to the load memory */
1200 edges_reroute(mem, mproj, irg);
1201 /* set the memory input of the load to the store memory */
1202 set_irn_n(vfld, 2, mem);
1204 sched_add_after(n, vfld);
1205 sched_add_after(vfld, rproj);
1207 /* rewire all users, scheduled after the store, to the loaded value */
1208 collect_and_rewire_users(n, val, rproj);
1213 /* we can only store the tos to memory */
1215 x87_create_fxch(state, n, op2_idx, STORE_VAL_IDX);
1217 /* mode != mode_E -> use normal fst */
1218 x87_patch_insn(n, op);
1221 /* we can only store the tos to memory */
1223 x87_create_fxch(state, n, op2_idx, STORE_VAL_IDX);
1226 x87_patch_insn(n, op_p);
1229 attr = get_ia32_x87_attr(n);
1230 attr->x87[1] = op2 = &ia32_st_regs[0];
1231 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1237 * Simulate a virtual Phi.
1238 * Just for cosmetic reasons change the mode of Phi nodes to mode_E.
1240 * @param state the x87 state
1241 * @param n the node that should be simulated (and patched)
1242 * @param arch_env the architecture environment
1244 * @return NO_NODE_ADDED
1246 static int sim_Phi(x87_state *state, ir_node *n, const arch_env_t *arch_env) {
1247 ir_mode *mode = get_irn_mode(n);
1249 if (mode_is_float(mode))
1250 set_irn_mode(n, mode_E);
1252 return NO_NODE_ADDED;
1255 #define _GEN_BINOP(op, rev) \
1256 static int sim_##op(x87_state *state, ir_node *n) { \
1257 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1258 return sim_binop(state, n, &tmpl); \
1261 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1262 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1264 #define GEN_LOAD2(op, nop) \
1265 static int sim_##op(x87_state *state, ir_node *n) { \
1266 return sim_load(state, n, op_ia32_##nop); \
1269 #define GEN_LOAD(op) GEN_LOAD2(op, op)
1271 #define GEN_UNOP(op) \
1272 static int sim_##op(x87_state *state, ir_node *n) { \
1273 return sim_unop(state, n, op_ia32_##op); \
1276 #define GEN_STORE(op) \
1277 static int sim_##op(x87_state *state, ir_node *n) { \
1278 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1303 * Simulate a fCondJmp.
1305 * @param state the x87 state
1306 * @param n the node that should be simulated (and patched)
1308 * @return NO_NODE_ADDED
1310 static int sim_fCondJmp(x87_state *state, ir_node *n) {
1314 ia32_x87_attr_t *attr;
1316 x87_simulator *sim = state->sim;
1317 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, BINOP_IDX_1));
1318 const arch_register_t *op2 = x87_get_irn_register(sim, get_irn_n(n, BINOP_IDX_2));
1319 int reg_index_1 = arch_register_get_index(op1);
1320 int reg_index_2 = arch_register_get_index(op2);
1321 unsigned live = vfp_live_args_after(sim, n, 0);
1323 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1324 arch_register_get_name(op1), arch_register_get_name(op2)));
1325 DEBUG_ONLY(vfp_dump_live(live));
1326 DB((dbg, LEVEL_1, "Stack before: "));
1327 DEBUG_ONLY(x87_dump_stack(state));
1329 op1_idx = x87_on_stack(state, reg_index_1);
1330 assert(op1_idx >= 0);
1332 /* BEWARE: check for comp a,a cases, they might happen */
1333 if (reg_index_2 != REG_VFP_NOREG) {
1334 /* second operand is a vfp register */
1335 op2_idx = x87_on_stack(state, reg_index_2);
1336 assert(op2_idx >= 0);
1338 if (is_vfp_live(arch_register_get_index(op2), live)) {
1339 /* second operand is live */
1341 if (is_vfp_live(arch_register_get_index(op1), live)) {
1342 /* both operands are live */
1345 /* res = tos X op */
1346 dst = op_ia32_fcomJmp;
1347 } else if (op2_idx == 0) {
1348 /* res = op X tos */
1349 dst = op_ia32_fcomrJmp;
1351 /* bring the first one to tos */
1352 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1356 /* res = tos X op */
1357 dst = op_ia32_fcomJmp;
1360 /* second live, first operand is dead here, bring it to tos.
1361 This means further, op1_idx != op2_idx. */
1362 assert(op1_idx != op2_idx);
1364 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1369 /* res = tos X op, pop */
1370 dst = op_ia32_fcompJmp;
1374 /* second operand is dead */
1375 if (is_vfp_live(arch_register_get_index(op1), live)) {
1376 /* first operand is live: bring second to tos.
1377 This means further, op1_idx != op2_idx. */
1378 assert(op1_idx != op2_idx);
1380 x87_create_fxch(state, n, op2_idx, BINOP_IDX_2);
1385 /* res = op X tos, pop */
1386 dst = op_ia32_fcomrpJmp;
1389 /* both operands are dead here, check first for identity. */
1390 if (op1_idx == op2_idx) {
1391 /* identically, one pop needed */
1393 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1397 /* res = tos X op, pop */
1398 dst = op_ia32_fcompJmp;
1401 /* different, move them to st and st(1) and pop both.
1402 The tricky part is to get one into st(1).*/
1403 else if (op2_idx == 1) {
1404 /* good, second operand is already in the right place, move the first */
1406 /* bring the first on top */
1407 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1408 assert(op2_idx != 0);
1411 /* res = tos X op, pop, pop */
1412 dst = op_ia32_fcomppJmp;
1414 } else if (op1_idx == 1) {
1415 /* good, first operand is already in the right place, move the second */
1417 /* bring the first on top */
1418 x87_create_fxch(state, n, op2_idx, BINOP_IDX_2);
1419 assert(op1_idx != 0);
1422 dst = op_ia32_fcomrppJmp;
1425 /* if one is already the TOS, we need two fxch */
1427 /* first one is TOS, move to st(1) */
1428 x87_create_fxch(state, n, 1, BINOP_IDX_1);
1429 assert(op2_idx != 1);
1431 x87_create_fxch(state, n, op2_idx, BINOP_IDX_2);
1433 /* res = op X tos, pop, pop */
1434 dst = op_ia32_fcomrppJmp;
1436 } else if (op2_idx == 0) {
1437 /* second one is TOS, move to st(1) */
1438 x87_create_fxch(state, n, 1, BINOP_IDX_2);
1439 assert(op1_idx != 1);
1441 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1443 /* res = tos X op, pop, pop */
1444 dst = op_ia32_fcomppJmp;
1447 /* none of them is either TOS or st(1), 3 fxch needed */
1448 x87_create_fxch(state, n, op2_idx, BINOP_IDX_2);
1449 assert(op1_idx != 0);
1450 x87_create_fxch(state, n, 1, BINOP_IDX_2);
1452 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1454 /* res = tos X op, pop, pop */
1455 dst = op_ia32_fcomppJmp;
1462 /* second operand is an address mode */
1463 if (is_vfp_live(arch_register_get_index(op1), live)) {
1464 /* first operand is live: bring it to TOS */
1466 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1469 dst = op_ia32_fcomJmp;
1471 /* first operand is dead: bring it to tos */
1473 x87_create_fxch(state, n, op1_idx, BINOP_IDX_1);
1476 dst = op_ia32_fcompJmp;
1481 x87_patch_insn(n, dst);
1482 assert(pop_cnt < 3);
1488 /* patch the operation */
1489 attr = get_ia32_x87_attr(n);
1490 op1 = &ia32_st_regs[op1_idx];
1493 op2 = &ia32_st_regs[op2_idx];
1496 attr->x87[2] = NULL;
1499 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1500 arch_register_get_name(op1), arch_register_get_name(op2)));
1502 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1503 arch_register_get_name(op1)));
1505 return NO_NODE_ADDED;
1506 } /* sim_fCondJmp */
1509 int sim_Keep(x87_state *state, ir_node *node)
1512 const arch_register_t *op_reg;
1517 op = get_irn_n(node, 0);
1518 op_reg = arch_get_irn_register(state->sim->arch_env, op);
1519 if(arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1520 return NO_NODE_ADDED;
1522 reg_id = arch_register_get_index(op_reg);
1523 live = vfp_live_args_after(state->sim, node, 0);
1525 op_stack_idx = x87_on_stack(state, reg_id);
1526 if(op_stack_idx >= 0 && !is_vfp_live(reg_id, live)) {
1527 x87_create_fpop(state, sched_next(node), 1);
1531 return NO_NODE_ADDED;
1535 void keep_float_node_alive(x87_state *state, ir_node *node)
1541 const arch_register_class_t *cls;
1543 irg = get_irn_irg(node);
1544 block = get_nodes_block(node);
1545 cls = arch_get_irn_reg_class(state->sim->arch_env, node, -1);
1547 keep = be_new_Keep(cls, irg, block, 1, in);
1549 assert(sched_is_scheduled(node));
1550 sched_add_after(node, keep);
1554 * Create a copy of a node. Recreate the node if it's a constant.
1556 * @param state the x87 state
1557 * @param n the node to be copied
1559 * @return the copy of n
1561 static ir_node *create_Copy(x87_state *state, ir_node *n) {
1562 x87_simulator *sim = state->sim;
1563 ir_graph *irg = get_irn_irg(n);
1564 dbg_info *n_dbg = get_irn_dbg_info(n);
1565 ir_mode *mode = get_irn_mode(n);
1566 ir_node *block = get_nodes_block(n);
1567 ir_node *pred = get_irn_n(n, 0);
1568 ir_node *(*cnstr)(dbg_info *, ir_graph *, ir_node *, ir_mode *) = NULL;
1570 const arch_register_t *out;
1571 const arch_register_t *op1;
1572 ia32_x87_attr_t *attr;
1574 /* Do not copy constants, recreate them. */
1575 switch (get_ia32_irn_opcode(pred)) {
1576 case iro_ia32_Unknown_VFP:
1578 cnstr = new_rd_ia32_fldz;
1581 cnstr = new_rd_ia32_fld1;
1583 case iro_ia32_fldpi:
1584 cnstr = new_rd_ia32_fldpi;
1586 case iro_ia32_fldl2e:
1587 cnstr = new_rd_ia32_fldl2e;
1589 case iro_ia32_fldl2t:
1590 cnstr = new_rd_ia32_fldl2t;
1592 case iro_ia32_fldlg2:
1593 cnstr = new_rd_ia32_fldlg2;
1595 case iro_ia32_fldln2:
1596 cnstr = new_rd_ia32_fldln2;
1602 out = x87_get_irn_register(sim, n);
1603 op1 = x87_get_irn_register(sim, pred);
1605 if (cnstr != NULL) {
1606 /* copy a constant */
1607 res = (*cnstr)(n_dbg, irg, block, mode);
1609 x87_push(state, arch_register_get_index(out), res);
1611 attr = get_ia32_x87_attr(res);
1612 attr->x87[2] = &ia32_st_regs[0];
1614 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1616 res = new_rd_ia32_fpushCopy(n_dbg, irg, block, pred, mode);
1618 x87_push(state, arch_register_get_index(out), res);
1620 attr = get_ia32_x87_attr(res);
1621 attr->x87[0] = &ia32_st_regs[op1_idx];
1622 attr->x87[2] = &ia32_st_regs[0];
1624 arch_set_irn_register(sim->arch_env, res, out);
1630 * Simulate a be_Copy.
1632 * @param state the x87 state
1633 * @param n the node that should be simulated (and patched)
1635 * @return NO_NODE_ADDED
1637 static int sim_Copy(x87_state *state, ir_node *n) {
1640 const arch_register_t *out;
1641 const arch_register_t *op1;
1642 ir_node *node, *next;
1643 ia32_x87_attr_t *attr;
1644 int op1_idx, out_idx;
1647 ir_mode *mode = get_irn_mode(n);
1649 if (!mode_is_float(mode))
1653 pred = get_irn_n(n, 0);
1654 out = x87_get_irn_register(sim, n);
1655 op1 = x87_get_irn_register(sim, pred);
1656 live = vfp_live_args_after(sim, n, REGMASK(out));
1658 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1659 arch_register_get_name(op1), arch_register_get_name(out)));
1660 DEBUG_ONLY(vfp_dump_live(live));
1662 /* handle the infamous unknown value */
1663 if (arch_register_get_index(op1) == REG_VFP_UKNWN) {
1664 /* Operand is still live, a real copy. We need here an fpush that can
1665 hold a a register, so use the fpushCopy or recreate constants */
1666 node = create_Copy(state, n);
1668 assert(is_ia32_fldz(node));
1669 next = sched_next(n);
1672 sched_add_before(next, node);
1674 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1675 arch_get_irn_register(sim->arch_env, node)->name));
1676 return NO_NODE_ADDED;
1679 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1681 if (is_vfp_live(arch_register_get_index(op1), live)) {
1682 ir_node *pred = get_irn_n(n, 0);
1684 /* Operand is still live, a real copy. We need here an fpush that can
1685 hold a a register, so use the fpushCopy or recreate constants */
1686 node = create_Copy(state, n);
1688 /* We have to make sure the old value doesn't go dead (which can happen
1689 * when we recreate constants). As the simulator expected that value in
1690 * the pred blocks. This is unfortunate as removing it would save us 1
1691 * instruction, but we would have to rerun all the simulation to get
1694 next = sched_next(n);
1697 sched_add_before(next, node);
1699 if(get_irn_n_edges(pred) == 0) {
1700 keep_float_node_alive(state, pred);
1703 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1704 arch_get_irn_register(sim->arch_env, node)->name));
1706 out_idx = x87_on_stack(state, arch_register_get_index(out));
1708 if (out_idx >= 0 && out_idx != op1_idx) {
1709 /* Matze: out already on stack? how can this happen? */
1712 /* op1 must be killed and placed where out is */
1714 /* best case, simple remove and rename */
1715 x87_patch_insn(n, op_ia32_Pop);
1716 attr = get_ia32_x87_attr(n);
1717 attr->x87[0] = op1 = &ia32_st_regs[0];
1720 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1722 /* move op1 to tos, store and pop it */
1724 x87_create_fxch(state, n, op1_idx, 0);
1727 x87_patch_insn(n, op_ia32_Pop);
1728 attr = get_ia32_x87_attr(n);
1729 attr->x87[0] = op1 = &ia32_st_regs[out_idx];
1732 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1734 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1736 /* just a virtual copy */
1737 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1738 /* don't remove the node to keep the verifier quiet :),
1739 the emitter won't emit any code for the node */
1742 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1743 exchange(n, get_unop_op(n));
1747 return NO_NODE_ADDED;
1751 * Returns the result proj of the call, or NULL if the result is not used
1753 static ir_node *get_call_result_proj(ir_node *call) {
1754 const ir_edge_t *edge;
1755 ir_node *resproj = NULL;
1757 /* search the result proj */
1758 foreach_out_edge(call, edge) {
1759 ir_node *proj = get_edge_src_irn(edge);
1760 long pn = get_Proj_proj(proj);
1762 if (pn == pn_be_Call_first_res) {
1767 if (resproj == NULL) {
1771 /* the result proj is connected to a Keep and maybe other nodes */
1772 foreach_out_edge(resproj, edge) {
1773 ir_node *pred = get_edge_src_irn(edge);
1774 if (!be_is_Keep(pred)) {
1779 /* only be_Keep found, so result is not used */
1781 } /* get_call_result_proj */
1784 * Simulate a be_Call.
1786 * @param state the x87 state
1787 * @param n the node that should be simulated
1788 * @param arch_env the architecture environment
1790 * @return NO_NODE_ADDED
1792 static int sim_Call(x87_state *state, ir_node *n, const arch_env_t *arch_env) {
1793 ir_type *call_tp = be_Call_get_type(n);
1797 const arch_register_t *reg;
1799 /* at the begin of a call the x87 state should be empty */
1800 assert(state->depth == 0 && "stack not empty before call");
1802 if (get_method_n_ress(call_tp) <= 0)
1803 return NO_NODE_ADDED;
1806 * If the called function returns a float, it is returned in st(0).
1807 * This even happens if the return value is NOT used.
1808 * Moreover, only one return result is supported.
1810 res_type = get_method_res_type(call_tp, 0);
1811 mode = get_type_mode(res_type);
1813 if (mode == NULL || !mode_is_float(mode))
1814 return NO_NODE_ADDED;
1816 resproj = get_call_result_proj(n);
1817 if (resproj == NULL)
1818 return NO_NODE_ADDED;
1820 reg = x87_get_irn_register(state->sim, resproj);
1821 x87_push(state, arch_register_get_index(reg), resproj);
1823 return NO_NODE_ADDED;
1827 * Simulate a be_Spill.
1829 * @param state the x87 state
1830 * @param n the node that should be simulated (and patched)
1832 * Should not happen, spills are lowered before x87 simulator see them.
1834 static int sim_Spill(x87_state *state, ir_node *n) {
1835 assert(0 && "Spill not lowered");
1836 return sim_fst(state, n);
1840 * Simulate a be_Reload.
1842 * @param state the x87 state
1843 * @param n the node that should be simulated (and patched)
1845 * Should not happen, reloads are lowered before x87 simulator see them.
1847 static int sim_Reload(x87_state *state, ir_node *n) {
1848 assert(0 && "Reload not lowered");
1849 return sim_fld(state, n);
1853 * Simulate a be_Return.
1855 * @param state the x87 state
1856 * @param n the node that should be simulated (and patched)
1858 * @return NO_NODE_ADDED
1860 static int sim_Return(x87_state *state, ir_node *n) {
1861 int n_res = be_Return_get_n_rets(n);
1862 int i, n_float_res = 0;
1864 /* only floating point return values must resist on stack */
1865 for (i = 0; i < n_res; ++i) {
1866 ir_node *res = get_irn_n(n, be_pos_Return_val + i);
1868 if (mode_is_float(get_irn_mode(res)))
1871 assert(x87_get_depth(state) == n_float_res);
1873 /* pop them virtually */
1874 for (i = n_float_res - 1; i >= 0; --i)
1877 return NO_NODE_ADDED;
1880 typedef struct _perm_data_t {
1881 const arch_register_t *in;
1882 const arch_register_t *out;
1886 * Simulate a be_Perm.
1888 * @param state the x87 state
1889 * @param irn the node that should be simulated (and patched)
1891 * @return NO_NODE_ADDED
1893 static int sim_Perm(x87_state *state, ir_node *irn) {
1895 x87_simulator *sim = state->sim;
1896 ir_node *pred = get_irn_n(irn, 0);
1898 const ir_edge_t *edge;
1900 /* handle only floating point Perms */
1901 if (! mode_is_float(get_irn_mode(pred)))
1902 return NO_NODE_ADDED;
1904 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1906 /* Perm is a pure virtual instruction on x87.
1907 All inputs must be on the FPU stack and are pairwise
1908 different from each other.
1909 So, all we need to do is to permutate the stack state. */
1910 n = get_irn_arity(irn);
1911 NEW_ARR_A(int, stack_pos, n);
1913 /* collect old stack positions */
1914 for (i = 0; i < n; ++i) {
1915 const arch_register_t *inreg = x87_get_irn_register(sim, get_irn_n(irn, i));
1916 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1918 assert(idx >= 0 && "Perm argument not on x87 stack");
1922 /* now do the permutation */
1923 foreach_out_edge(irn, edge) {
1924 ir_node *proj = get_edge_src_irn(edge);
1925 const arch_register_t *out = x87_get_irn_register(sim, proj);
1926 long num = get_Proj_proj(proj);
1928 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1929 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
1931 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1933 return NO_NODE_ADDED;
1937 * Kill any dead registers at block start by popping them from the stack.
1939 * @param sim the simulator handle
1940 * @param block the current block
1941 * @param start_state the x87 state at the begin of the block
1943 * @return the x87 state after dead register killed
1945 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state) {
1946 x87_state *state = start_state;
1947 ir_node *first_insn = sched_first(block);
1948 ir_node *keep = NULL;
1949 unsigned live = vfp_live_args_after(sim, block, 0);
1951 int i, depth, num_pop;
1954 depth = x87_get_depth(state);
1955 for (i = depth - 1; i >= 0; --i) {
1956 int reg = x87_get_st_reg(state, i);
1958 if (! is_vfp_live(reg, live))
1959 kill_mask |= (1 << i);
1963 /* create a new state, will be changed */
1964 state = x87_clone_state(sim, state);
1966 DB((dbg, LEVEL_1, "Killing deads:\n"));
1967 DEBUG_ONLY(vfp_dump_live(live));
1968 DEBUG_ONLY(x87_dump_stack(state));
1970 /* now kill registers */
1972 /* we can only kill from TOS, so bring them up */
1973 if (! (kill_mask & 1)) {
1974 /* search from behind, because we can to a double-pop */
1975 for (i = depth - 1; i >= 0; --i) {
1976 if (kill_mask & (1 << i)) {
1977 kill_mask &= ~(1 << i);
1984 x87_set_st(state, -1, keep, i);
1985 x87_create_fxch(state, first_insn, i, -1);
1988 if ((kill_mask & 3) == 3) {
1989 /* we can do a double-pop */
1993 /* only a single pop */
1998 kill_mask >>= num_pop;
1999 keep = x87_create_fpop(state, first_insn, num_pop);
2004 } /* x87_kill_deads */
2007 * Run a simulation and fix all virtual instructions for a block.
2009 * @param sim the simulator handle
2010 * @param block the current block
2012 static void x87_simulate_block(x87_simulator *sim, ir_node *block) {
2014 blk_state *bl_state = x87_get_bl_state(sim, block);
2015 x87_state *state = bl_state->begin;
2016 const ir_edge_t *edge;
2017 ir_node *start_block;
2019 assert(state != NULL);
2020 /* already processed? */
2021 if (bl_state->end != NULL)
2024 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2025 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2026 DEBUG_ONLY(x87_dump_stack(state));
2028 /* at block begin, kill all dead registers */
2029 state = x87_kill_deads(sim, block, state);
2031 /* beware, n might change */
2032 for (n = sched_first(block); !sched_is_end(n); n = next) {
2035 ir_op *op = get_irn_op(n);
2037 next = sched_next(n);
2038 if (op->ops.generic == NULL)
2041 func = (sim_func)op->ops.generic;
2043 /* have work to do */
2044 if (state == bl_state->begin) {
2045 /* create a new state, will be changed */
2046 state = x87_clone_state(sim, state);
2050 node_inserted = (*func)(state, n);
2053 sim_func might have added an additional node after n,
2055 beware: n must not be changed by sim_func
2056 (i.e. removed from schedule) in this case
2058 if (node_inserted != NO_NODE_ADDED)
2059 next = sched_next(n);
2062 start_block = get_irg_start_block(get_irn_irg(block));
2064 /* check if the state must be shuffled */
2065 foreach_block_succ(block, edge) {
2066 ir_node *succ = get_edge_src_irn(edge);
2067 blk_state *succ_state;
2069 if (succ == start_block)
2072 succ_state = x87_get_bl_state(sim, succ);
2074 if (succ_state->begin == NULL) {
2075 succ_state->begin = state;
2076 waitq_put(sim->worklist, succ);
2078 /* There is already a begin state for the successor, bad.
2079 Do the necessary permutations.
2080 Note that critical edges are removed, so this is always possible:
2081 If the successor has more than one possible input, then it must
2084 x87_shuffle(sim, block, state, succ, succ_state->begin);
2087 bl_state->end = state;
2089 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2090 } /* x87_simulate_block */
2093 * Create a new x87 simulator.
2095 * @param sim a simulator handle, will be initialized
2096 * @param irg the current graph
2097 * @param arch_env the architecture environment
2099 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg,
2100 const arch_env_t *arch_env)
2102 obstack_init(&sim->obst);
2103 sim->blk_states = pmap_create();
2104 sim->arch_env = arch_env;
2105 sim->n_idx = get_irg_last_idx(irg);
2106 sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
2108 DB((dbg, LEVEL_1, "--------------------------------\n"
2109 "x87 Simulator started for %+F\n", irg));
2111 /* set the generic function pointer of instruction we must simulate */
2112 clear_irp_opcodes_generic_func();
2114 #define ASSOC(op) (op_ ## op)->ops.generic = (op_func)(sim_##op)
2115 #define ASSOC_IA32(op) (op_ia32_v ## op)->ops.generic = (op_func)(sim_##op)
2116 #define ASSOC_BE(op) (op_be_ ## op)->ops.generic = (op_func)(sim_##op)
2133 ASSOC_IA32(fCondJmp);
2145 } /* x87_init_simulator */
2148 * Destroy a x87 simulator.
2150 * @param sim the simulator handle
2152 static void x87_destroy_simulator(x87_simulator *sim) {
2153 pmap_destroy(sim->blk_states);
2154 obstack_free(&sim->obst, NULL);
2155 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2156 } /* x87_destroy_simulator */
2159 * Pre-block walker: calculate the liveness information for the block
2160 * and store it into the sim->live cache.
2162 static void update_liveness_walker(ir_node *block, void *data) {
2163 x87_simulator *sim = data;
2164 update_liveness(sim, block);
2165 } /* update_liveness_walker */
2168 * Run a simulation and fix all virtual instructions for a graph.
2170 * @param env the architecture environment
2171 * @param irg the current graph
2173 * Needs a block-schedule.
2175 void x87_simulate_graph(const arch_env_t *arch_env, be_irg_t *birg) {
2176 ir_node *block, *start_block;
2177 blk_state *bl_state;
2179 ir_graph *irg = be_get_birg_irg(birg);
2181 /* create the simulator */
2182 x87_init_simulator(&sim, irg, arch_env);
2184 start_block = get_irg_start_block(irg);
2185 bl_state = x87_get_bl_state(&sim, start_block);
2187 /* start with the empty state */
2188 bl_state->begin = empty;
2191 sim.worklist = new_waitq();
2192 waitq_put(sim.worklist, start_block);
2194 be_assure_liveness(birg);
2195 sim.lv = be_get_birg_liveness(birg);
2196 // sim.lv = be_liveness(be_get_birg_irg(birg));
2197 be_liveness_assure_sets(sim.lv);
2199 /* Calculate the liveness for all nodes. We must precalculate this info,
2200 * because the simulator adds new nodes (possible before Phi nodes) which
2201 * would let a lazy calculation fail.
2202 * On the other hand we reduce the computation amount due to
2203 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2205 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2209 block = waitq_get(sim.worklist);
2210 x87_simulate_block(&sim, block);
2211 } while (! waitq_empty(sim.worklist));
2214 del_waitq(sim.worklist);
2215 x87_destroy_simulator(&sim);
2216 } /* x87_simulate_graph */
2218 void ia32_init_x87(void) {
2219 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2220 } /* ia32_init_x87 */