2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
36 #include "iredges_t.h"
47 #include "../belive_t.h"
48 #include "../besched_t.h"
49 #include "../benode_t.h"
50 #include "ia32_new_nodes.h"
51 #include "gen_ia32_new_nodes.h"
52 #include "gen_ia32_regalloc_if.h"
57 /* first and second binop index */
64 /* the store val index */
65 #define STORE_VAL_IDX 2
67 #define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
69 /** the debug handle */
70 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
72 /* Forward declaration. */
73 typedef struct _x87_simulator x87_simulator;
76 * An exchange template.
77 * Note that our virtual functions have the same inputs
78 * and attributes as the real ones, so we can simple exchange
80 * Further, x87 supports inverse instructions, so we can handle them.
82 typedef struct _exchange_tmpl {
83 ir_op *normal_op; /**< the normal one */
84 ir_op *reverse_op; /**< the reverse one if exists */
85 ir_op *normal_pop_op; /**< the normal one with tos pop */
86 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
90 * An entry on the simulated x87 stack.
92 typedef struct _st_entry {
93 int reg_idx; /**< the virtual register index of this stack value */
94 ir_node *node; /**< the node that produced this value */
100 typedef struct _x87_state {
101 st_entry st[N_x87_REGS]; /**< the register stack */
102 int depth; /**< the current stack depth */
103 int tos; /**< position of the tos */
104 x87_simulator *sim; /**< The simulator. */
107 /** An empty state, used for blocks without fp instructions. */
108 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
109 static x87_state *empty = (x87_state *)&_empty;
112 NO_NODE_ADDED = 0, /**< No node was added. */
113 NODE_ADDED = 1 /**< A node was added by the simulator in the schedule. */
117 * The type of an instruction simulator function.
119 * @param state the x87 state
120 * @param n the node to be simulated
122 * @return NODE_ADDED if a node was added AFTER n in schedule,
125 typedef int (*sim_func)(x87_state *state, ir_node *n);
128 * A block state: Every block has a x87 state at the beginning and at the end.
130 typedef struct _blk_state {
131 x87_state *begin; /**< state at the begin or NULL if not assigned */
132 x87_state *end; /**< state at the end or NULL if not assigned */
135 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
137 /** liveness bitset for vfp registers. */
138 typedef unsigned char vfp_liveness;
143 struct _x87_simulator {
144 struct obstack obst; /**< An obstack for fast allocating. */
145 pmap *blk_states; /**< Map blocks to states. */
146 const arch_env_t *arch_env; /**< The architecture environment. */
147 be_lv_t *lv; /**< intrablock liveness. */
148 vfp_liveness *live; /**< Liveness information. */
149 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
150 waitq *worklist; /**< Worklist of blocks that must be processed. */
154 * Returns the current stack depth.
156 * @param state the x87 state
158 * @return the x87 stack depth
160 static int x87_get_depth(const x87_state *state) {
162 } /* x87_get_depth */
165 * Return the virtual register index at st(pos).
167 * @param state the x87 state
168 * @param pos a stack position
170 * @return the vfp register index that produced the value at st(pos)
172 static int x87_get_st_reg(const x87_state *state, int pos) {
173 assert(pos < state->depth);
174 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
175 } /* x87_get_st_reg */
178 * Return the node at st(pos).
180 * @param state the x87 state
181 * @param pos a stack position
183 * @return the IR node that produced the value at st(pos)
185 static ir_node *x87_get_st_node(const x87_state *state, int pos) {
186 assert(pos < state->depth);
187 return state->st[MASK_TOS(state->tos + pos)].node;
188 } /* x87_get_st_node */
192 * Dump the stack for debugging.
194 * @param state the x87 state
196 static void x87_dump_stack(const x87_state *state) {
199 for (i = state->depth - 1; i >= 0; --i) {
200 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
201 x87_get_st_node(state, i)));
203 DB((dbg, LEVEL_2, "<-- TOS\n"));
204 } /* x87_dump_stack */
205 #endif /* DEBUG_libfirm */
208 * Set a virtual register to st(pos).
210 * @param state the x87 state
211 * @param reg_idx the vfp register index that should be set
212 * @param node the IR node that produces the value of the vfp register
213 * @param pos the stack position where the new value should be entered
215 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos) {
216 assert(0 < state->depth);
217 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
218 state->st[MASK_TOS(state->tos + pos)].node = node;
220 DB((dbg, LEVEL_2, "After SET_REG: "));
221 DEBUG_ONLY(x87_dump_stack(state));
225 * Set the tos virtual register.
227 * @param state the x87 state
228 * @param reg_idx the vfp register index that should be set
229 * @param node the IR node that produces the value of the vfp register
231 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node) {
232 x87_set_st(state, reg_idx, node, 0);
236 * Swap st(0) with st(pos).
238 * @param state the x87 state
239 * @param pos the stack position to change the tos with
241 static void x87_fxch(x87_state *state, int pos) {
243 assert(pos < state->depth);
245 entry = state->st[MASK_TOS(state->tos + pos)];
246 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
247 state->st[MASK_TOS(state->tos)] = entry;
249 DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
253 * Convert a virtual register to the stack index.
255 * @param state the x87 state
256 * @param reg_idx the register vfp index
258 * @return the stack position where the register is stacked
259 * or -1 if the virtual register was not found
261 static int x87_on_stack(const x87_state *state, int reg_idx) {
262 int i, tos = state->tos;
264 for (i = 0; i < state->depth; ++i)
265 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
271 * Push a virtual Register onto the stack, double pushed allowed.
273 * @param state the x87 state
274 * @param reg_idx the register vfp index
275 * @param node the node that produces the value of the vfp register
277 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node) {
278 assert(state->depth < N_x87_REGS && "stack overrun");
281 state->tos = MASK_TOS(state->tos - 1);
282 state->st[state->tos].reg_idx = reg_idx;
283 state->st[state->tos].node = node;
285 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
289 * Push a virtual Register onto the stack, double pushes are NOT allowed.
291 * @param state the x87 state
292 * @param reg_idx the register vfp index
293 * @param node the node that produces the value of the vfp register
294 * @param dbl_push if != 0 double pushes are allowed
296 static void x87_push(x87_state *state, int reg_idx, ir_node *node) {
297 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
299 x87_push_dbl(state, reg_idx, node);
303 * Pop a virtual Register from the stack.
305 * @param state the x87 state
307 static void x87_pop(x87_state *state) {
308 assert(state->depth > 0 && "stack underrun");
311 state->tos = MASK_TOS(state->tos + 1);
313 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
317 * Returns the block state of a block.
319 * @param sim the x87 simulator handle
320 * @param block the current block
322 * @return the block state
324 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block) {
325 pmap_entry *entry = pmap_find(sim->blk_states, block);
328 blk_state *bl_state = obstack_alloc(&sim->obst, sizeof(*bl_state));
329 bl_state->begin = NULL;
330 bl_state->end = NULL;
332 pmap_insert(sim->blk_states, block, bl_state);
336 return PTR_TO_BLKSTATE(entry->value);
337 } /* x87_get_bl_state */
340 * Creates a new x87 state.
342 * @param sim the x87 simulator handle
344 * @return a new x87 state
346 static x87_state *x87_alloc_state(x87_simulator *sim) {
347 x87_state *res = obstack_alloc(&sim->obst, sizeof(*res));
351 } /* x87_alloc_state */
356 * @param sim the x87 simulator handle
357 * @param src the x87 state that will be cloned
359 * @return a cloned copy of the src state
361 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src) {
362 x87_state *res = x87_alloc_state(sim);
364 memcpy(res, src, sizeof(*res));
366 } /* x87_clone_state */
369 * Patch a virtual instruction into a x87 one and return
370 * the node representing the result value.
372 * @param n the IR node to patch
373 * @param op the x87 opcode to patch in
375 static ir_node *x87_patch_insn(ir_node *n, ir_op *op) {
376 ir_mode *mode = get_irn_mode(n);
381 if (mode == mode_T) {
382 /* patch all Proj's */
383 const ir_edge_t *edge;
385 foreach_out_edge(n, edge) {
386 ir_node *proj = get_edge_src_irn(edge);
388 mode = get_irn_mode(proj);
389 if (mode_is_float(mode)) {
391 set_irn_mode(proj, mode_E);
395 } else if (mode_is_float(mode))
396 set_irn_mode(n, mode_E);
398 } /* x87_patch_insn */
401 * Returns the first Proj of a mode_T node having a given mode.
403 * @param n the mode_T node
404 * @param m the desired mode of the Proj
405 * @return The first Proj of mode @p m found or NULL.
407 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m) {
408 const ir_edge_t *edge;
410 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
412 foreach_out_edge(n, edge) {
413 ir_node *proj = get_edge_src_irn(edge);
414 if (get_irn_mode(proj) == m)
419 } /* get_irn_Proj_for_mode */
422 * Wrap the arch_* function here so we can check for errors.
424 static INLINE const arch_register_t *x87_get_irn_register(x87_simulator *sim, const ir_node *irn) {
425 const arch_register_t *res;
427 res = arch_get_irn_register(sim->arch_env, irn);
428 assert(res->reg_class->regs == ia32_vfp_regs);
430 } /* x87_get_irn_register */
432 /* -------------- x87 perm --------------- */
435 * Creates a fxch for shuffle.
437 * @param state the x87 state
438 * @param pos parameter for fxch
439 * @param block the block were fxch is inserted
441 * Creates a new fxch node and reroute the user of the old node
444 * @return the fxch node
446 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block) {
448 ia32_x87_attr_t *attr;
450 fxch = new_rd_ia32_fxch(NULL, get_irn_irg(block), block, mode_E);
451 attr = get_ia32_x87_attr(fxch);
452 attr->x87[0] = &ia32_st_regs[pos];
453 attr->x87[2] = &ia32_st_regs[0];
457 x87_fxch(state, pos);
459 } /* x87_fxch_shuffle */
462 * Calculate the necessary permutations to reach dst_state.
464 * These permutations are done with fxch instructions and placed
465 * at the end of the block.
467 * Note that critical edges are removed here, so we need only
468 * a shuffle if the current block has only one successor.
470 * @param sim the simulator handle
471 * @param block the current block
472 * @param state the current x87 stack state, might be modified
473 * @param dst_block the destination block
474 * @param dst_state destination state
478 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block,
479 x87_state *state, ir_node *dst_block,
480 const x87_state *dst_state)
482 int i, n_cycles, k, ri;
483 unsigned cycles[4], all_mask;
484 char cycle_idx[4][8];
485 ir_node *fxch, *before, *after;
489 assert(state->depth == dst_state->depth);
491 /* Some mathematics here:
492 If we have a cycle of length n that includes the tos,
493 we need n-1 exchange operations.
494 We can always add the tos and restore it, so we need
495 n+1 exchange operations for a cycle not containing the tos.
496 So, the maximum of needed operations is for a cycle of 7
497 not including the tos == 8.
498 This is the same number of ops we would need for using stores,
499 so exchange is cheaper (we save the loads).
500 On the other hand, we might need an additional exchange
501 in the next block to bring one operand on top, so the
502 number of ops in the first case is identical.
503 Further, no more than 4 cycles can exists (4 x 2).
505 all_mask = (1 << (state->depth)) - 1;
507 for (n_cycles = 0; all_mask; ++n_cycles) {
508 int src_idx, dst_idx;
510 /* find the first free slot */
511 for (i = 0; i < state->depth; ++i) {
512 if (all_mask & (1 << i)) {
513 all_mask &= ~(1 << i);
515 /* check if there are differences here */
516 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
522 /* no more cycles found */
527 cycles[n_cycles] = (1 << i);
528 cycle_idx[n_cycles][k++] = i;
529 for (src_idx = i; ; src_idx = dst_idx) {
530 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
532 if ((all_mask & (1 << dst_idx)) == 0)
535 cycle_idx[n_cycles][k++] = dst_idx;
536 cycles[n_cycles] |= (1 << dst_idx);
537 all_mask &= ~(1 << dst_idx);
539 cycle_idx[n_cycles][k] = -1;
543 /* no permutation needed */
547 /* Hmm: permutation needed */
548 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
549 DEBUG_ONLY(x87_dump_stack(state));
550 DB((dbg, LEVEL_2, " to\n"));
551 DEBUG_ONLY(x87_dump_stack(dst_state));
555 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
556 for (ri = 0; ri < n_cycles; ++ri) {
557 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
558 for (k = 0; cycle_idx[ri][k] != -1; ++k)
559 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
560 DB((dbg, LEVEL_2, "\n"));
567 * Find the place node must be insert.
568 * We have only one successor block, so the last instruction should
571 before = sched_last(block);
572 assert(is_cfop(before));
574 /* now do the permutations */
575 for (ri = 0; ri < n_cycles; ++ri) {
576 if ((cycles[ri] & 1) == 0) {
577 /* this cycle does not include the tos */
578 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
580 sched_add_after(after, fxch);
582 sched_add_before(before, fxch);
585 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
586 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
588 sched_add_after(after, fxch);
590 sched_add_before(before, fxch);
593 if ((cycles[ri] & 1) == 0) {
594 /* this cycle does not include the tos */
595 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
596 sched_add_after(after, fxch);
603 * Create a fxch node before another node.
605 * @param state the x87 state
606 * @param n the node after the fxch
607 * @param pos exchange st(pos) with st(0)
611 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
614 ia32_x87_attr_t *attr;
615 ir_graph *irg = get_irn_irg(n);
616 ir_node *block = get_nodes_block(n);
618 x87_fxch(state, pos);
620 fxch = new_rd_ia32_fxch(NULL, irg, block, mode_E);
621 attr = get_ia32_x87_attr(fxch);
622 attr->x87[0] = &ia32_st_regs[pos];
623 attr->x87[2] = &ia32_st_regs[0];
627 sched_add_before(n, fxch);
628 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
630 } /* x87_create_fxch */
633 * Create a fpush before node n.
635 * @param state the x87 state
636 * @param n the node after the fpush
637 * @param pos push st(pos) on stack
638 * @param op_idx replace input op_idx of n with the fpush result
640 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx) {
641 ir_node *fpush, *pred = get_irn_n(n, op_idx);
642 ia32_x87_attr_t *attr;
643 const arch_register_t *out = x87_get_irn_register(state->sim, pred);
645 x87_push_dbl(state, arch_register_get_index(out), pred);
647 fpush = new_rd_ia32_fpush(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
648 attr = get_ia32_x87_attr(fpush);
649 attr->x87[0] = &ia32_st_regs[pos];
650 attr->x87[2] = &ia32_st_regs[0];
653 sched_add_before(n, fpush);
655 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
656 } /* x87_create_fpush */
659 * Create a fpop before node n.
661 * @param state the x87 state
662 * @param n the node after the fpop
663 * @param num pop 1 or 2 values
665 * @return the fpop node
667 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
670 ia32_x87_attr_t *attr;
674 fpop = new_rd_ia32_fpop(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
675 attr = get_ia32_x87_attr(fpop);
676 attr->x87[0] = &ia32_st_regs[0];
677 attr->x87[1] = &ia32_st_regs[0];
678 attr->x87[2] = &ia32_st_regs[0];
681 sched_add_before(n, fpop);
682 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
687 } /* x87_create_fpop */
690 * Creates an fldz before node n
692 * @param state the x87 state
693 * @param n the node after the fldz
695 * @return the fldz node
697 static ir_node *x87_create_fldz(x87_state *state, ir_node *n, int regidx) {
698 ir_graph *irg = get_irn_irg(n);
699 ir_node *block = get_nodes_block(n);
702 fldz = new_rd_ia32_fldz(NULL, irg, block, mode_E);
704 sched_add_before(n, fldz);
705 DB((dbg, LEVEL_1, "<<< %s\n", get_irn_opname(fldz)));
708 x87_push(state, regidx, fldz);
713 /* --------------------------------- liveness ------------------------------------------ */
716 * The liveness transfer function.
717 * Updates a live set over a single step from a given node to its predecessor.
718 * Everything defined at the node is removed from the set, the uses of the node get inserted.
720 * @param sim The simulator handle.
721 * @param irn The node at which liveness should be computed.
722 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
723 * the registers live after irn.
725 * @return The live bitset.
727 static vfp_liveness vfp_liveness_transfer(x87_simulator *sim, ir_node *irn, vfp_liveness live)
730 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
731 const arch_env_t *arch_env = sim->arch_env;
733 if (arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
734 const arch_register_t *reg = x87_get_irn_register(sim, irn);
735 live &= ~(1 << arch_register_get_index(reg));
738 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
739 ir_node *op = get_irn_n(irn, i);
741 if (mode_is_float(get_irn_mode(op)) && arch_irn_consider_in_reg_alloc(arch_env, cls, op)) {
742 const arch_register_t *reg = x87_get_irn_register(sim, op);
743 live |= 1 << arch_register_get_index(reg);
747 } /* vfp_liveness_transfer */
750 * Put all live virtual registers at the end of a block into a bitset.
752 * @param sim the simulator handle
753 * @param lv the liveness information
754 * @param bl the block
756 * @return The live bitset at the end of this block
758 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
761 vfp_liveness live = 0;
762 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
763 const arch_env_t *arch_env = sim->arch_env;
764 const be_lv_t *lv = sim->lv;
766 be_lv_foreach(lv, block, be_lv_state_end, i) {
767 const arch_register_t *reg;
768 const ir_node *node = be_lv_get_irn(lv, block, i);
769 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
772 reg = x87_get_irn_register(sim, node);
773 live |= 1 << arch_register_get_index(reg);
777 } /* vfp_liveness_end_of_block */
779 /** get the register mask from an arch_register */
780 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
783 * Return a bitset of argument registers which are live at the end of a node.
785 * @param sim the simulator handle
786 * @param pos the node
787 * @param kill kill mask for the output registers
789 * @return The live bitset.
791 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
793 unsigned idx = get_irn_idx(pos);
795 assert(idx < sim->n_idx);
796 return sim->live[idx] & ~kill;
797 } /* vfp_live_args_after */
800 * Calculate the liveness for a whole block and cache it.
802 * @param sim the simulator handle
803 * @param lv the liveness handle
804 * @param block the block
806 static void update_liveness(x87_simulator *sim, ir_node *block) {
807 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
811 /* now iterate through the block backward and cache the results */
812 sched_foreach_reverse(block, irn) {
813 /* stop at the first Phi: this produces the live-in */
817 idx = get_irn_idx(irn);
818 sim->live[idx] = live;
820 live = vfp_liveness_transfer(sim, irn, live);
822 idx = get_irn_idx(block);
823 sim->live[idx] = live;
824 } /* update_liveness */
827 * Returns true if a register is live in a set.
829 * @param reg_idx the vfp register index
830 * @param live a live bitset
832 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
836 * Dump liveness info.
838 * @param live the live bitset
840 static void vfp_dump_live(vfp_liveness live) {
843 DB((dbg, LEVEL_2, "Live after: "));
844 for (i = 0; i < 8; ++i) {
845 if (live & (1 << i)) {
846 DB((dbg, LEVEL_2, "vf%d ", i));
849 DB((dbg, LEVEL_2, "\n"));
850 } /* vfp_dump_live */
851 #endif /* DEBUG_libfirm */
853 /* --------------------------------- simulators ---------------------------------------- */
855 #define XCHG(a, b) do { int t = (a); (a) = (b); (b) = t; } while (0)
858 * Simulate a virtual binop.
860 * @param state the x87 state
861 * @param n the node that should be simulated (and patched)
862 * @param tmpl the template containing the 4 possible x87 opcodes
864 * @return NO_NODE_ADDED
866 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl) {
867 int op2_idx = 0, op1_idx;
868 int out_idx, do_pop = 0;
869 ia32_x87_attr_t *attr;
870 ir_node *patched_insn;
872 x87_simulator *sim = state->sim;
873 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, BINOP_IDX_1));
874 const arch_register_t *op2 = x87_get_irn_register(sim, get_irn_n(n, BINOP_IDX_2));
875 const arch_register_t *out = x87_get_irn_register(sim, n);
876 int reg_index_1 = arch_register_get_index(op1);
877 int reg_index_2 = arch_register_get_index(op2);
878 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
880 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
881 arch_register_get_name(op1), arch_register_get_name(op2),
882 arch_register_get_name(out)));
883 DEBUG_ONLY(vfp_dump_live(live));
884 DB((dbg, LEVEL_1, "Stack before: "));
885 DEBUG_ONLY(x87_dump_stack(state));
887 op1_idx = x87_on_stack(state, reg_index_1);
888 assert(op1_idx >= 0);
890 if (reg_index_2 != REG_VFP_NOREG) {
891 /* second operand is a vfp register */
892 op2_idx = x87_on_stack(state, reg_index_2);
893 assert(op2_idx >= 0);
895 if (is_vfp_live(arch_register_get_index(op2), live)) {
896 /* Second operand is live. */
898 if (is_vfp_live(arch_register_get_index(op1), live)) {
899 /* Both operands are live: push the first one.
900 This works even for op1 == op2. */
901 x87_create_fpush(state, n, op1_idx, BINOP_IDX_2);
902 /* now do fxxx (tos=tos X op) */
906 dst = tmpl->normal_op;
908 /* Second live, first operand is dead here, bring it to tos. */
910 x87_create_fxch(state, n, op1_idx);
915 /* now do fxxx (tos=tos X op) */
917 dst = tmpl->normal_op;
920 /* Second operand is dead. */
921 if (is_vfp_live(arch_register_get_index(op1), live)) {
922 /* First operand is live: bring second to tos. */
924 x87_create_fxch(state, n, op2_idx);
929 /* now do fxxxr (tos = op X tos) */
931 dst = tmpl->reverse_op;
933 /* Both operands are dead here, pop them from the stack. */
936 /* Both are identically and on tos, no pop needed. */
937 /* here fxxx (tos = tos X tos) */
938 dst = tmpl->normal_op;
941 /* now do fxxxp (op = op X tos, pop) */
942 dst = tmpl->normal_pop_op;
946 } else if (op1_idx == 0) {
947 assert(op1_idx != op2_idx);
948 /* now do fxxxrp (op = tos X op, pop) */
949 dst = tmpl->reverse_pop_op;
953 /* Bring the second on top. */
954 x87_create_fxch(state, n, op2_idx);
955 if (op1_idx == op2_idx) {
956 /* Both are identically and on tos now, no pop needed. */
959 /* use fxxx (tos = tos X tos) */
960 dst = tmpl->normal_op;
963 /* op2 is on tos now */
965 /* use fxxxp (op = op X tos, pop) */
966 dst = tmpl->normal_pop_op;
974 /* second operand is an address mode */
975 if (is_vfp_live(arch_register_get_index(op1), live)) {
976 /* first operand is live: push it here */
977 x87_create_fpush(state, n, op1_idx, BINOP_IDX_1);
979 /* use fxxx (tos = tos X mem) */
980 dst = tmpl->normal_op;
983 /* first operand is dead: bring it to tos */
985 x87_create_fxch(state, n, op1_idx);
989 /* use fxxxp (tos = tos X mem) */
990 dst = tmpl->normal_op;
995 patched_insn = x87_patch_insn(n, dst);
996 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1001 /* patch the operation */
1002 attr = get_ia32_x87_attr(n);
1003 attr->x87[0] = op1 = &ia32_st_regs[op1_idx];
1004 if (reg_index_2 != REG_VFP_NOREG) {
1005 attr->x87[1] = op2 = &ia32_st_regs[op2_idx];
1007 attr->x87[2] = out = &ia32_st_regs[out_idx];
1009 if (reg_index_2 != REG_VFP_NOREG) {
1010 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1011 arch_register_get_name(op1), arch_register_get_name(op2),
1012 arch_register_get_name(out)));
1014 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1015 arch_register_get_name(op1),
1016 arch_register_get_name(out)));
1019 return NO_NODE_ADDED;
1023 * Simulate a virtual Unop.
1025 * @param state the x87 state
1026 * @param n the node that should be simulated (and patched)
1027 * @param op the x87 opcode that will replace n's opcode
1029 * @return NO_NODE_ADDED
1031 static int sim_unop(x87_state *state, ir_node *n, ir_op *op) {
1032 int op1_idx, out_idx;
1033 x87_simulator *sim = state->sim;
1034 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, UNOP_IDX));
1035 const arch_register_t *out = x87_get_irn_register(sim, n);
1036 ia32_x87_attr_t *attr;
1037 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1039 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1040 DEBUG_ONLY(vfp_dump_live(live));
1042 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1044 if (is_vfp_live(arch_register_get_index(op1), live)) {
1045 /* push the operand here */
1046 x87_create_fpush(state, n, op1_idx, UNOP_IDX);
1050 /* operand is dead, bring it to tos */
1052 x87_create_fxch(state, n, op1_idx);
1057 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1059 attr = get_ia32_x87_attr(n);
1060 attr->x87[0] = op1 = &ia32_st_regs[0];
1061 attr->x87[2] = out = &ia32_st_regs[0];
1062 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1064 return NO_NODE_ADDED;
1068 * Simulate a virtual Load instruction.
1070 * @param state the x87 state
1071 * @param n the node that should be simulated (and patched)
1072 * @param op the x87 opcode that will replace n's opcode
1074 * @return NO_NODE_ADDED
1076 static int sim_load(x87_state *state, ir_node *n, ir_op *op) {
1077 const arch_register_t *out = x87_get_irn_register(state->sim, n);
1078 ia32_x87_attr_t *attr;
1080 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1081 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1082 assert(out == x87_get_irn_register(state->sim, n));
1083 attr = get_ia32_x87_attr(n);
1084 attr->x87[2] = out = &ia32_st_regs[0];
1085 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1087 return NO_NODE_ADDED;
1091 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1093 * @param store The store
1094 * @param old_val The former value
1095 * @param new_val The new value
1097 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val) {
1098 const ir_edge_t *edge, *ne;
1100 foreach_out_edge_safe(old_val, edge, ne) {
1101 ir_node *user = get_edge_src_irn(edge);
1103 if (! user || user == store)
1106 /* if the user is scheduled after the store: rewire */
1107 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1109 /* find the input of the user pointing to the old value */
1110 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1111 if (get_irn_n(user, i) == old_val)
1112 set_irn_n(user, i, new_val);
1116 } /* collect_and_rewire_users */
1119 * Simulate a virtual Store.
1121 * @param state the x87 state
1122 * @param n the node that should be simulated (and patched)
1123 * @param op the x87 store opcode
1124 * @param op_p the x87 store and pop opcode
1126 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p) {
1127 x87_simulator *sim = state->sim;
1128 ir_node *val = get_irn_n(n, STORE_VAL_IDX);
1129 const arch_register_t *op2 = x87_get_irn_register(sim, val);
1130 unsigned live = vfp_live_args_after(sim, n, 0);
1131 int insn = NO_NODE_ADDED;
1132 ia32_x87_attr_t *attr;
1133 int op2_reg_idx, op2_idx, depth;
1134 int live_after_node;
1137 op2_reg_idx = arch_register_get_index(op2);
1138 if (op2_reg_idx == REG_VFP_UKNWN) {
1139 /* just take any value from stack */
1140 if(state->depth > 0) {
1142 DEBUG_ONLY(op2 = NULL);
1143 live_after_node = 1;
1145 /* produce a new value which we will consume immediately */
1146 x87_create_fldz(state, n, op2_reg_idx);
1147 live_after_node = 0;
1148 op2_idx = x87_on_stack(state, op2_reg_idx);
1149 assert(op2_idx >= 0);
1152 op2_idx = x87_on_stack(state, op2_reg_idx);
1153 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1154 assert(op2_idx >= 0);
1155 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1158 mode = get_ia32_ls_mode(n);
1159 depth = x87_get_depth(state);
1161 if (live_after_node) {
1163 Problem: fst doesn't support mode_E (spills), only fstp does
1165 - stack not full: push value and fstp
1166 - stack full: fstp value and load again
1168 if (mode == mode_E) {
1169 if (depth < N_x87_REGS) {
1170 /* ok, we have a free register: push + fstp */
1171 x87_create_fpush(state, n, op2_idx, STORE_VAL_IDX);
1173 x87_patch_insn(n, op_p);
1175 ir_node *vfld, *mem, *block, *rproj, *mproj;
1178 /* stack full here: need fstp + load */
1180 x87_patch_insn(n, op_p);
1182 block = get_nodes_block(n);
1183 irg = get_irn_irg(n);
1184 vfld = new_rd_ia32_vfld(NULL, irg, block, get_irn_n(n, 0), get_irn_n(n, 1), new_rd_NoMem(irg));
1186 /* copy all attributes */
1187 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1188 if (is_ia32_use_frame(n))
1189 set_ia32_use_frame(vfld);
1190 set_ia32_am_flavour(vfld, get_ia32_am_flavour(n));
1191 set_ia32_op_type(vfld, ia32_am_Source);
1192 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1193 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1194 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1196 rproj = new_r_Proj(irg, block, vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1197 mproj = new_r_Proj(irg, block, vfld, mode_M, pn_ia32_vfld_M);
1198 mem = get_irn_Proj_for_mode(n, mode_M);
1200 assert(mem && "Store memory not found");
1202 arch_set_irn_register(sim->arch_env, rproj, op2);
1204 /* reroute all former users of the store memory to the load memory */
1205 edges_reroute(mem, mproj, irg);
1206 /* set the memory input of the load to the store memory */
1207 set_irn_n(vfld, 2, mem);
1209 sched_add_after(n, vfld);
1210 sched_add_after(vfld, rproj);
1212 /* rewire all users, scheduled after the store, to the loaded value */
1213 collect_and_rewire_users(n, val, rproj);
1218 /* we can only store the tos to memory */
1220 x87_create_fxch(state, n, op2_idx);
1222 /* mode != mode_E -> use normal fst */
1223 x87_patch_insn(n, op);
1226 /* we can only store the tos to memory */
1228 x87_create_fxch(state, n, op2_idx);
1231 x87_patch_insn(n, op_p);
1234 attr = get_ia32_x87_attr(n);
1235 attr->x87[1] = op2 = &ia32_st_regs[0];
1236 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1241 #define _GEN_BINOP(op, rev) \
1242 static int sim_##op(x87_state *state, ir_node *n) { \
1243 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1244 return sim_binop(state, n, &tmpl); \
1247 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1248 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1250 #define GEN_LOAD2(op, nop) \
1251 static int sim_##op(x87_state *state, ir_node *n) { \
1252 return sim_load(state, n, op_ia32_##nop); \
1255 #define GEN_LOAD(op) GEN_LOAD2(op, op)
1257 #define GEN_UNOP(op) \
1258 static int sim_##op(x87_state *state, ir_node *n) { \
1259 return sim_unop(state, n, op_ia32_##op); \
1262 #define GEN_STORE(op) \
1263 static int sim_##op(x87_state *state, ir_node *n) { \
1264 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1289 * Simulate a fCondJmp.
1291 * @param state the x87 state
1292 * @param n the node that should be simulated (and patched)
1294 * @return NO_NODE_ADDED
1296 static int sim_fCondJmp(x87_state *state, ir_node *n) {
1300 ia32_x87_attr_t *attr;
1302 x87_simulator *sim = state->sim;
1303 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, BINOP_IDX_1));
1304 const arch_register_t *op2 = x87_get_irn_register(sim, get_irn_n(n, BINOP_IDX_2));
1305 int reg_index_1 = arch_register_get_index(op1);
1306 int reg_index_2 = arch_register_get_index(op2);
1307 unsigned live = vfp_live_args_after(sim, n, 0);
1309 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1310 arch_register_get_name(op1), arch_register_get_name(op2)));
1311 DEBUG_ONLY(vfp_dump_live(live));
1312 DB((dbg, LEVEL_1, "Stack before: "));
1313 DEBUG_ONLY(x87_dump_stack(state));
1315 op1_idx = x87_on_stack(state, reg_index_1);
1316 assert(op1_idx >= 0);
1318 /* BEWARE: check for comp a,a cases, they might happen */
1319 if (reg_index_2 != REG_VFP_NOREG) {
1320 /* second operand is a vfp register */
1321 op2_idx = x87_on_stack(state, reg_index_2);
1322 assert(op2_idx >= 0);
1324 if (is_vfp_live(arch_register_get_index(op2), live)) {
1325 /* second operand is live */
1327 if (is_vfp_live(arch_register_get_index(op1), live)) {
1328 /* both operands are live */
1331 /* res = tos X op */
1332 dst = op_ia32_fcomJmp;
1333 } else if (op2_idx == 0) {
1334 /* res = op X tos */
1335 dst = op_ia32_fcomrJmp;
1337 /* bring the first one to tos */
1338 x87_create_fxch(state, n, op1_idx);
1342 /* res = tos X op */
1343 dst = op_ia32_fcomJmp;
1346 /* second live, first operand is dead here, bring it to tos.
1347 This means further, op1_idx != op2_idx. */
1348 assert(op1_idx != op2_idx);
1350 x87_create_fxch(state, n, op1_idx);
1355 /* res = tos X op, pop */
1356 dst = op_ia32_fcompJmp;
1360 /* second operand is dead */
1361 if (is_vfp_live(arch_register_get_index(op1), live)) {
1362 /* first operand is live: bring second to tos.
1363 This means further, op1_idx != op2_idx. */
1364 assert(op1_idx != op2_idx);
1366 x87_create_fxch(state, n, op2_idx);
1371 /* res = op X tos, pop */
1372 dst = op_ia32_fcomrpJmp;
1375 /* both operands are dead here, check first for identity. */
1376 if (op1_idx == op2_idx) {
1377 /* identically, one pop needed */
1379 x87_create_fxch(state, n, op1_idx);
1383 /* res = tos X op, pop */
1384 dst = op_ia32_fcompJmp;
1387 /* different, move them to st and st(1) and pop both.
1388 The tricky part is to get one into st(1).*/
1389 else if (op2_idx == 1) {
1390 /* good, second operand is already in the right place, move the first */
1392 /* bring the first on top */
1393 x87_create_fxch(state, n, op1_idx);
1394 assert(op2_idx != 0);
1397 /* res = tos X op, pop, pop */
1398 dst = op_ia32_fcomppJmp;
1400 } else if (op1_idx == 1) {
1401 /* good, first operand is already in the right place, move the second */
1403 /* bring the first on top */
1404 x87_create_fxch(state, n, op2_idx);
1405 assert(op1_idx != 0);
1408 dst = op_ia32_fcomrppJmp;
1411 /* if one is already the TOS, we need two fxch */
1413 /* first one is TOS, move to st(1) */
1414 x87_create_fxch(state, n, 1);
1415 assert(op2_idx != 1);
1417 x87_create_fxch(state, n, op2_idx);
1419 /* res = op X tos, pop, pop */
1420 dst = op_ia32_fcomrppJmp;
1422 } else if (op2_idx == 0) {
1423 /* second one is TOS, move to st(1) */
1424 x87_create_fxch(state, n, 1);
1425 assert(op1_idx != 1);
1427 x87_create_fxch(state, n, op1_idx);
1429 /* res = tos X op, pop, pop */
1430 dst = op_ia32_fcomppJmp;
1433 /* none of them is either TOS or st(1), 3 fxch needed */
1434 x87_create_fxch(state, n, op2_idx);
1435 assert(op1_idx != 0);
1436 x87_create_fxch(state, n, 1);
1438 x87_create_fxch(state, n, op1_idx);
1440 /* res = tos X op, pop, pop */
1441 dst = op_ia32_fcomppJmp;
1448 /* second operand is an address mode */
1449 if (is_vfp_live(arch_register_get_index(op1), live)) {
1450 /* first operand is live: bring it to TOS */
1452 x87_create_fxch(state, n, op1_idx);
1455 dst = op_ia32_fcomJmp;
1457 /* first operand is dead: bring it to tos */
1459 x87_create_fxch(state, n, op1_idx);
1462 dst = op_ia32_fcompJmp;
1467 x87_patch_insn(n, dst);
1468 assert(pop_cnt < 3);
1474 /* patch the operation */
1475 attr = get_ia32_x87_attr(n);
1476 op1 = &ia32_st_regs[op1_idx];
1479 op2 = &ia32_st_regs[op2_idx];
1482 attr->x87[2] = NULL;
1485 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1486 arch_register_get_name(op1), arch_register_get_name(op2)));
1488 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1489 arch_register_get_name(op1)));
1491 return NO_NODE_ADDED;
1492 } /* sim_fCondJmp */
1495 int sim_Keep(x87_state *state, ir_node *node)
1498 const arch_register_t *op_reg;
1503 op = get_irn_n(node, 0);
1504 op_reg = arch_get_irn_register(state->sim->arch_env, op);
1505 if(arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1506 return NO_NODE_ADDED;
1508 reg_id = arch_register_get_index(op_reg);
1509 live = vfp_live_args_after(state->sim, node, 0);
1511 op_stack_idx = x87_on_stack(state, reg_id);
1512 if(op_stack_idx >= 0 && !is_vfp_live(reg_id, live)) {
1513 x87_create_fpop(state, sched_next(node), 1);
1517 return NO_NODE_ADDED;
1521 void keep_float_node_alive(x87_state *state, ir_node *node)
1527 const arch_register_class_t *cls;
1529 irg = get_irn_irg(node);
1530 block = get_nodes_block(node);
1531 cls = arch_get_irn_reg_class(state->sim->arch_env, node, -1);
1533 keep = be_new_Keep(cls, irg, block, 1, in);
1535 assert(sched_is_scheduled(node));
1536 sched_add_after(node, keep);
1540 * Create a copy of a node. Recreate the node if it's a constant.
1542 * @param state the x87 state
1543 * @param n the node to be copied
1545 * @return the copy of n
1547 static ir_node *create_Copy(x87_state *state, ir_node *n) {
1548 x87_simulator *sim = state->sim;
1549 ir_graph *irg = get_irn_irg(n);
1550 dbg_info *n_dbg = get_irn_dbg_info(n);
1551 ir_mode *mode = get_irn_mode(n);
1552 ir_node *block = get_nodes_block(n);
1553 ir_node *pred = get_irn_n(n, 0);
1554 ir_node *(*cnstr)(dbg_info *, ir_graph *, ir_node *, ir_mode *) = NULL;
1556 const arch_register_t *out;
1557 const arch_register_t *op1;
1558 ia32_x87_attr_t *attr;
1560 /* Do not copy constants, recreate them. */
1561 switch (get_ia32_irn_opcode(pred)) {
1562 case iro_ia32_Unknown_VFP:
1564 cnstr = new_rd_ia32_fldz;
1567 cnstr = new_rd_ia32_fld1;
1569 case iro_ia32_fldpi:
1570 cnstr = new_rd_ia32_fldpi;
1572 case iro_ia32_fldl2e:
1573 cnstr = new_rd_ia32_fldl2e;
1575 case iro_ia32_fldl2t:
1576 cnstr = new_rd_ia32_fldl2t;
1578 case iro_ia32_fldlg2:
1579 cnstr = new_rd_ia32_fldlg2;
1581 case iro_ia32_fldln2:
1582 cnstr = new_rd_ia32_fldln2;
1588 out = x87_get_irn_register(sim, n);
1589 op1 = x87_get_irn_register(sim, pred);
1591 if (cnstr != NULL) {
1592 /* copy a constant */
1593 res = (*cnstr)(n_dbg, irg, block, mode);
1595 x87_push(state, arch_register_get_index(out), res);
1597 attr = get_ia32_x87_attr(res);
1598 attr->x87[2] = &ia32_st_regs[0];
1600 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1602 res = new_rd_ia32_fpushCopy(n_dbg, irg, block, pred, mode);
1604 x87_push(state, arch_register_get_index(out), res);
1606 attr = get_ia32_x87_attr(res);
1607 attr->x87[0] = &ia32_st_regs[op1_idx];
1608 attr->x87[2] = &ia32_st_regs[0];
1610 arch_set_irn_register(sim->arch_env, res, out);
1616 * Simulate a be_Copy.
1618 * @param state the x87 state
1619 * @param n the node that should be simulated (and patched)
1621 * @return NO_NODE_ADDED
1623 static int sim_Copy(x87_state *state, ir_node *n) {
1626 const arch_register_t *out;
1627 const arch_register_t *op1;
1628 ir_node *node, *next;
1629 ia32_x87_attr_t *attr;
1630 int op1_idx, out_idx;
1633 ir_mode *mode = get_irn_mode(n);
1635 if (!mode_is_float(mode))
1639 pred = get_irn_n(n, 0);
1640 out = x87_get_irn_register(sim, n);
1641 op1 = x87_get_irn_register(sim, pred);
1642 live = vfp_live_args_after(sim, n, REGMASK(out));
1644 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1645 arch_register_get_name(op1), arch_register_get_name(out)));
1646 DEBUG_ONLY(vfp_dump_live(live));
1648 /* handle the infamous unknown value */
1649 if (arch_register_get_index(op1) == REG_VFP_UKNWN) {
1650 /* Operand is still live, a real copy. We need here an fpush that can
1651 hold a a register, so use the fpushCopy or recreate constants */
1652 node = create_Copy(state, n);
1654 assert(is_ia32_fldz(node));
1655 next = sched_next(n);
1658 sched_add_before(next, node);
1660 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1661 arch_get_irn_register(sim->arch_env, node)->name));
1662 return NO_NODE_ADDED;
1665 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1667 if (is_vfp_live(arch_register_get_index(op1), live)) {
1668 ir_node *pred = get_irn_n(n, 0);
1670 /* Operand is still live, a real copy. We need here an fpush that can
1671 hold a a register, so use the fpushCopy or recreate constants */
1672 node = create_Copy(state, n);
1674 /* We have to make sure the old value doesn't go dead (which can happen
1675 * when we recreate constants). As the simulator expected that value in
1676 * the pred blocks. This is unfortunate as removing it would save us 1
1677 * instruction, but we would have to rerun all the simulation to get
1680 next = sched_next(n);
1683 sched_add_before(next, node);
1685 if(get_irn_n_edges(pred) == 0) {
1686 keep_float_node_alive(state, pred);
1689 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1690 arch_get_irn_register(sim->arch_env, node)->name));
1692 out_idx = x87_on_stack(state, arch_register_get_index(out));
1694 if (out_idx >= 0 && out_idx != op1_idx) {
1695 /* Matze: out already on stack? how can this happen? */
1698 /* op1 must be killed and placed where out is */
1700 /* best case, simple remove and rename */
1701 x87_patch_insn(n, op_ia32_Pop);
1702 attr = get_ia32_x87_attr(n);
1703 attr->x87[0] = op1 = &ia32_st_regs[0];
1706 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1708 /* move op1 to tos, store and pop it */
1710 x87_create_fxch(state, n, op1_idx);
1713 x87_patch_insn(n, op_ia32_Pop);
1714 attr = get_ia32_x87_attr(n);
1715 attr->x87[0] = op1 = &ia32_st_regs[out_idx];
1718 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1720 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1722 /* just a virtual copy */
1723 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1724 /* don't remove the node to keep the verifier quiet :),
1725 the emitter won't emit any code for the node */
1728 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1729 exchange(n, get_unop_op(n));
1733 return NO_NODE_ADDED;
1737 * Returns the result proj of the call, or NULL if the result is not used
1739 static ir_node *get_call_result_proj(ir_node *call) {
1740 const ir_edge_t *edge;
1741 ir_node *resproj = NULL;
1743 /* search the result proj */
1744 foreach_out_edge(call, edge) {
1745 ir_node *proj = get_edge_src_irn(edge);
1746 long pn = get_Proj_proj(proj);
1748 if (pn == pn_be_Call_first_res) {
1753 if (resproj == NULL) {
1757 /* the result proj is connected to a Keep and maybe other nodes */
1758 foreach_out_edge(resproj, edge) {
1759 ir_node *pred = get_edge_src_irn(edge);
1760 if (!be_is_Keep(pred)) {
1765 /* only be_Keep found, so result is not used */
1767 } /* get_call_result_proj */
1770 * Simulate a be_Call.
1772 * @param state the x87 state
1773 * @param n the node that should be simulated
1774 * @param arch_env the architecture environment
1776 * @return NO_NODE_ADDED
1778 static int sim_Call(x87_state *state, ir_node *n, const arch_env_t *arch_env)
1780 ir_type *call_tp = be_Call_get_type(n);
1784 const arch_register_t *reg;
1787 /* at the begin of a call the x87 state should be empty */
1788 assert(state->depth == 0 && "stack not empty before call");
1790 if (get_method_n_ress(call_tp) <= 0)
1791 return NO_NODE_ADDED;
1794 * If the called function returns a float, it is returned in st(0).
1795 * This even happens if the return value is NOT used.
1796 * Moreover, only one return result is supported.
1798 res_type = get_method_res_type(call_tp, 0);
1799 mode = get_type_mode(res_type);
1801 if (mode == NULL || !mode_is_float(mode))
1802 return NO_NODE_ADDED;
1804 resproj = get_call_result_proj(n);
1805 if (resproj == NULL)
1806 return NO_NODE_ADDED;
1808 reg = x87_get_irn_register(state->sim, resproj);
1809 x87_push(state, arch_register_get_index(reg), resproj);
1811 return NO_NODE_ADDED;
1815 * Simulate a be_Spill.
1817 * @param state the x87 state
1818 * @param n the node that should be simulated (and patched)
1820 * Should not happen, spills are lowered before x87 simulator see them.
1822 static int sim_Spill(x87_state *state, ir_node *n) {
1823 assert(0 && "Spill not lowered");
1824 return sim_fst(state, n);
1828 * Simulate a be_Reload.
1830 * @param state the x87 state
1831 * @param n the node that should be simulated (and patched)
1833 * Should not happen, reloads are lowered before x87 simulator see them.
1835 static int sim_Reload(x87_state *state, ir_node *n) {
1836 assert(0 && "Reload not lowered");
1837 return sim_fld(state, n);
1841 * Simulate a be_Return.
1843 * @param state the x87 state
1844 * @param n the node that should be simulated (and patched)
1846 * @return NO_NODE_ADDED
1848 static int sim_Return(x87_state *state, ir_node *n) {
1849 int n_res = be_Return_get_n_rets(n);
1850 int i, n_float_res = 0;
1852 /* only floating point return values must resist on stack */
1853 for (i = 0; i < n_res; ++i) {
1854 ir_node *res = get_irn_n(n, be_pos_Return_val + i);
1856 if (mode_is_float(get_irn_mode(res)))
1859 assert(x87_get_depth(state) == n_float_res);
1861 /* pop them virtually */
1862 for (i = n_float_res - 1; i >= 0; --i)
1865 return NO_NODE_ADDED;
1868 typedef struct _perm_data_t {
1869 const arch_register_t *in;
1870 const arch_register_t *out;
1874 * Simulate a be_Perm.
1876 * @param state the x87 state
1877 * @param irn the node that should be simulated (and patched)
1879 * @return NO_NODE_ADDED
1881 static int sim_Perm(x87_state *state, ir_node *irn) {
1883 x87_simulator *sim = state->sim;
1884 ir_node *pred = get_irn_n(irn, 0);
1886 const ir_edge_t *edge;
1888 /* handle only floating point Perms */
1889 if (! mode_is_float(get_irn_mode(pred)))
1890 return NO_NODE_ADDED;
1892 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1894 /* Perm is a pure virtual instruction on x87.
1895 All inputs must be on the FPU stack and are pairwise
1896 different from each other.
1897 So, all we need to do is to permutate the stack state. */
1898 n = get_irn_arity(irn);
1899 NEW_ARR_A(int, stack_pos, n);
1901 /* collect old stack positions */
1902 for (i = 0; i < n; ++i) {
1903 const arch_register_t *inreg = x87_get_irn_register(sim, get_irn_n(irn, i));
1904 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1906 assert(idx >= 0 && "Perm argument not on x87 stack");
1910 /* now do the permutation */
1911 foreach_out_edge(irn, edge) {
1912 ir_node *proj = get_edge_src_irn(edge);
1913 const arch_register_t *out = x87_get_irn_register(sim, proj);
1914 long num = get_Proj_proj(proj);
1916 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1917 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
1919 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1921 return NO_NODE_ADDED;
1925 * Kill any dead registers at block start by popping them from the stack.
1927 * @param sim the simulator handle
1928 * @param block the current block
1929 * @param start_state the x87 state at the begin of the block
1931 * @return the x87 state after dead register killed
1933 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state) {
1934 x87_state *state = start_state;
1935 ir_node *first_insn = sched_first(block);
1936 ir_node *keep = NULL;
1937 unsigned live = vfp_live_args_after(sim, block, 0);
1939 int i, depth, num_pop;
1942 depth = x87_get_depth(state);
1943 for (i = depth - 1; i >= 0; --i) {
1944 int reg = x87_get_st_reg(state, i);
1946 if (! is_vfp_live(reg, live))
1947 kill_mask |= (1 << i);
1951 /* create a new state, will be changed */
1952 state = x87_clone_state(sim, state);
1954 DB((dbg, LEVEL_1, "Killing deads:\n"));
1955 DEBUG_ONLY(vfp_dump_live(live));
1956 DEBUG_ONLY(x87_dump_stack(state));
1958 /* now kill registers */
1960 /* we can only kill from TOS, so bring them up */
1961 if (! (kill_mask & 1)) {
1962 /* search from behind, because we can to a double-pop */
1963 for (i = depth - 1; i >= 0; --i) {
1964 if (kill_mask & (1 << i)) {
1965 kill_mask &= ~(1 << i);
1972 x87_set_st(state, -1, keep, i);
1973 x87_create_fxch(state, first_insn, i);
1976 if ((kill_mask & 3) == 3) {
1977 /* we can do a double-pop */
1981 /* only a single pop */
1986 kill_mask >>= num_pop;
1987 keep = x87_create_fpop(state, first_insn, num_pop);
1992 } /* x87_kill_deads */
1995 * Run a simulation and fix all virtual instructions for a block.
1997 * @param sim the simulator handle
1998 * @param block the current block
2000 static void x87_simulate_block(x87_simulator *sim, ir_node *block) {
2002 blk_state *bl_state = x87_get_bl_state(sim, block);
2003 x87_state *state = bl_state->begin;
2004 const ir_edge_t *edge;
2005 ir_node *start_block;
2007 assert(state != NULL);
2008 /* already processed? */
2009 if (bl_state->end != NULL)
2012 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2013 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2014 DEBUG_ONLY(x87_dump_stack(state));
2016 /* at block begin, kill all dead registers */
2017 state = x87_kill_deads(sim, block, state);
2019 /* beware, n might change */
2020 for (n = sched_first(block); !sched_is_end(n); n = next) {
2023 ir_op *op = get_irn_op(n);
2025 next = sched_next(n);
2026 if (op->ops.generic == NULL)
2029 func = (sim_func)op->ops.generic;
2031 /* have work to do */
2032 if (state == bl_state->begin) {
2033 /* create a new state, will be changed */
2034 state = x87_clone_state(sim, state);
2038 node_inserted = (*func)(state, n);
2041 sim_func might have added an additional node after n,
2043 beware: n must not be changed by sim_func
2044 (i.e. removed from schedule) in this case
2046 if (node_inserted != NO_NODE_ADDED)
2047 next = sched_next(n);
2050 start_block = get_irg_start_block(get_irn_irg(block));
2052 /* check if the state must be shuffled */
2053 foreach_block_succ(block, edge) {
2054 ir_node *succ = get_edge_src_irn(edge);
2055 blk_state *succ_state;
2057 if (succ == start_block)
2060 succ_state = x87_get_bl_state(sim, succ);
2062 if (succ_state->begin == NULL) {
2063 succ_state->begin = state;
2064 waitq_put(sim->worklist, succ);
2066 /* There is already a begin state for the successor, bad.
2067 Do the necessary permutations.
2068 Note that critical edges are removed, so this is always possible:
2069 If the successor has more than one possible input, then it must
2072 x87_shuffle(sim, block, state, succ, succ_state->begin);
2075 bl_state->end = state;
2077 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2078 } /* x87_simulate_block */
2081 * Create a new x87 simulator.
2083 * @param sim a simulator handle, will be initialized
2084 * @param irg the current graph
2085 * @param arch_env the architecture environment
2087 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg,
2088 const arch_env_t *arch_env)
2090 obstack_init(&sim->obst);
2091 sim->blk_states = pmap_create();
2092 sim->arch_env = arch_env;
2093 sim->n_idx = get_irg_last_idx(irg);
2094 sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
2096 DB((dbg, LEVEL_1, "--------------------------------\n"
2097 "x87 Simulator started for %+F\n", irg));
2099 /* set the generic function pointer of instruction we must simulate */
2100 clear_irp_opcodes_generic_func();
2102 #define ASSOC(op) (op_ ## op)->ops.generic = (op_func)(sim_##op)
2103 #define ASSOC_IA32(op) (op_ia32_v ## op)->ops.generic = (op_func)(sim_##op)
2104 #define ASSOC_BE(op) (op_be_ ## op)->ops.generic = (op_func)(sim_##op)
2121 ASSOC_IA32(fCondJmp);
2132 } /* x87_init_simulator */
2135 * Destroy a x87 simulator.
2137 * @param sim the simulator handle
2139 static void x87_destroy_simulator(x87_simulator *sim) {
2140 pmap_destroy(sim->blk_states);
2141 obstack_free(&sim->obst, NULL);
2142 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2143 } /* x87_destroy_simulator */
2146 * Pre-block walker: calculate the liveness information for the block
2147 * and store it into the sim->live cache.
2149 static void update_liveness_walker(ir_node *block, void *data) {
2150 x87_simulator *sim = data;
2151 update_liveness(sim, block);
2152 } /* update_liveness_walker */
2155 * Run a simulation and fix all virtual instructions for a graph.
2157 * @param env the architecture environment
2158 * @param irg the current graph
2160 * Needs a block-schedule.
2162 void x87_simulate_graph(const arch_env_t *arch_env, be_irg_t *birg) {
2163 ir_node *block, *start_block;
2164 blk_state *bl_state;
2166 ir_graph *irg = be_get_birg_irg(birg);
2168 /* create the simulator */
2169 x87_init_simulator(&sim, irg, arch_env);
2171 start_block = get_irg_start_block(irg);
2172 bl_state = x87_get_bl_state(&sim, start_block);
2174 /* start with the empty state */
2175 bl_state->begin = empty;
2178 sim.worklist = new_waitq();
2179 waitq_put(sim.worklist, start_block);
2181 be_assure_liveness(birg);
2182 sim.lv = be_get_birg_liveness(birg);
2183 // sim.lv = be_liveness(be_get_birg_irg(birg));
2184 be_liveness_assure_sets(sim.lv);
2186 /* Calculate the liveness for all nodes. We must precalculate this info,
2187 * because the simulator adds new nodes (possible before Phi nodes) which
2188 * would let a lazy calculation fail.
2189 * On the other hand we reduce the computation amount due to
2190 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2192 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2196 block = waitq_get(sim.worklist);
2197 x87_simulate_block(&sim, block);
2198 } while (! waitq_empty(sim.worklist));
2201 del_waitq(sim.worklist);
2202 x87_destroy_simulator(&sim);
2203 } /* x87_simulate_graph */
2205 void ia32_init_x87(void) {
2206 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2207 } /* ia32_init_x87 */