2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
34 #include "iredges_t.h"
46 #include "../belive_t.h"
47 #include "../besched_t.h"
48 #include "../benode_t.h"
49 #include "bearch_ia32_t.h"
50 #include "ia32_new_nodes.h"
51 #include "gen_ia32_new_nodes.h"
52 #include "gen_ia32_regalloc_if.h"
54 #include "ia32_architecture.h"
61 #define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
63 /** the debug handle */
64 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
66 /* Forward declaration. */
67 typedef struct _x87_simulator x87_simulator;
70 * An exchange template.
71 * Note that our virtual functions have the same inputs
72 * and attributes as the real ones, so we can simple exchange
74 * Further, x87 supports inverse instructions, so we can handle them.
76 typedef struct _exchange_tmpl {
77 ir_op *normal_op; /**< the normal one */
78 ir_op *reverse_op; /**< the reverse one if exists */
79 ir_op *normal_pop_op; /**< the normal one with tos pop */
80 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
84 * An entry on the simulated x87 stack.
86 typedef struct _st_entry {
87 int reg_idx; /**< the virtual register index of this stack value */
88 ir_node *node; /**< the node that produced this value */
94 typedef struct _x87_state {
95 st_entry st[N_x87_REGS]; /**< the register stack */
96 int depth; /**< the current stack depth */
97 int tos; /**< position of the tos */
98 x87_simulator *sim; /**< The simulator. */
101 /** An empty state, used for blocks without fp instructions. */
102 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
103 static x87_state *empty = (x87_state *)&_empty;
106 NO_NODE_ADDED = 0, /**< No node was added. */
107 NODE_ADDED = 1 /**< A node was added by the simulator in the schedule. */
111 * The type of an instruction simulator function.
113 * @param state the x87 state
114 * @param n the node to be simulated
116 * @return NODE_ADDED if a node was added AFTER n in schedule,
119 typedef int (*sim_func)(x87_state *state, ir_node *n);
122 * A block state: Every block has a x87 state at the beginning and at the end.
124 typedef struct _blk_state {
125 x87_state *begin; /**< state at the begin or NULL if not assigned */
126 x87_state *end; /**< state at the end or NULL if not assigned */
129 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
131 /** liveness bitset for vfp registers. */
132 typedef unsigned char vfp_liveness;
137 struct _x87_simulator {
138 struct obstack obst; /**< An obstack for fast allocating. */
139 pmap *blk_states; /**< Map blocks to states. */
140 be_lv_t *lv; /**< intrablock liveness. */
141 vfp_liveness *live; /**< Liveness information. */
142 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
143 waitq *worklist; /**< Worklist of blocks that must be processed. */
144 ia32_isa_t *isa; /**< the ISA object */
148 * Returns the current stack depth.
150 * @param state the x87 state
152 * @return the x87 stack depth
154 static int x87_get_depth(const x87_state *state)
157 } /* x87_get_depth */
160 * Return the virtual register index at st(pos).
162 * @param state the x87 state
163 * @param pos a stack position
165 * @return the vfp register index that produced the value at st(pos)
167 static int x87_get_st_reg(const x87_state *state, int pos)
169 assert(pos < state->depth);
170 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
171 } /* x87_get_st_reg */
175 * Return the node at st(pos).
177 * @param state the x87 state
178 * @param pos a stack position
180 * @return the IR node that produced the value at st(pos)
182 static ir_node *x87_get_st_node(const x87_state *state, int pos)
184 assert(pos < state->depth);
185 return state->st[MASK_TOS(state->tos + pos)].node;
186 } /* x87_get_st_node */
189 * Dump the stack for debugging.
191 * @param state the x87 state
193 static void x87_dump_stack(const x87_state *state)
197 for (i = state->depth - 1; i >= 0; --i) {
198 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
199 x87_get_st_node(state, i)));
201 DB((dbg, LEVEL_2, "<-- TOS\n"));
202 } /* x87_dump_stack */
203 #endif /* DEBUG_libfirm */
206 * Set a virtual register to st(pos).
208 * @param state the x87 state
209 * @param reg_idx the vfp register index that should be set
210 * @param node the IR node that produces the value of the vfp register
211 * @param pos the stack position where the new value should be entered
213 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
215 assert(0 < state->depth);
216 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
217 state->st[MASK_TOS(state->tos + pos)].node = node;
219 DB((dbg, LEVEL_2, "After SET_REG: "));
220 DEBUG_ONLY(x87_dump_stack(state));
224 * Set the tos virtual register.
226 * @param state the x87 state
227 * @param reg_idx the vfp register index that should be set
228 * @param node the IR node that produces the value of the vfp register
230 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node)
232 x87_set_st(state, reg_idx, node, 0);
236 * Swap st(0) with st(pos).
238 * @param state the x87 state
239 * @param pos the stack position to change the tos with
241 static void x87_fxch(x87_state *state, int pos)
244 assert(pos < state->depth);
246 entry = state->st[MASK_TOS(state->tos + pos)];
247 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
248 state->st[MASK_TOS(state->tos)] = entry;
250 DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
254 * Convert a virtual register to the stack index.
256 * @param state the x87 state
257 * @param reg_idx the register vfp index
259 * @return the stack position where the register is stacked
260 * or -1 if the virtual register was not found
262 static int x87_on_stack(const x87_state *state, int reg_idx)
264 int i, tos = state->tos;
266 for (i = 0; i < state->depth; ++i)
267 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
273 * Push a virtual Register onto the stack, double pushed allowed.
275 * @param state the x87 state
276 * @param reg_idx the register vfp index
277 * @param node the node that produces the value of the vfp register
279 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node)
281 assert(state->depth < N_x87_REGS && "stack overrun");
284 state->tos = MASK_TOS(state->tos - 1);
285 state->st[state->tos].reg_idx = reg_idx;
286 state->st[state->tos].node = node;
288 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
292 * Push a virtual Register onto the stack, double pushes are NOT allowed.
294 * @param state the x87 state
295 * @param reg_idx the register vfp index
296 * @param node the node that produces the value of the vfp register
297 * @param dbl_push if != 0 double pushes are allowed
299 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
301 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
303 x87_push_dbl(state, reg_idx, node);
307 * Pop a virtual Register from the stack.
309 * @param state the x87 state
311 static void x87_pop(x87_state *state)
313 assert(state->depth > 0 && "stack underrun");
316 state->tos = MASK_TOS(state->tos + 1);
318 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
322 * Empty the fpu stack
324 * @param state the x87 state
326 static void x87_emms(x87_state *state)
333 * Returns the block state of a block.
335 * @param sim the x87 simulator handle
336 * @param block the current block
338 * @return the block state
340 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
342 pmap_entry *entry = pmap_find(sim->blk_states, block);
345 blk_state *bl_state = obstack_alloc(&sim->obst, sizeof(*bl_state));
346 bl_state->begin = NULL;
347 bl_state->end = NULL;
349 pmap_insert(sim->blk_states, block, bl_state);
353 return PTR_TO_BLKSTATE(entry->value);
354 } /* x87_get_bl_state */
357 * Creates a new x87 state.
359 * @param sim the x87 simulator handle
361 * @return a new x87 state
363 static x87_state *x87_alloc_state(x87_simulator *sim)
365 x87_state *res = obstack_alloc(&sim->obst, sizeof(*res));
369 } /* x87_alloc_state */
374 * @param sim the x87 simulator handle
375 * @param src the x87 state that will be cloned
377 * @return a cloned copy of the src state
379 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
381 x87_state *res = x87_alloc_state(sim);
385 } /* x87_clone_state */
388 * Patch a virtual instruction into a x87 one and return
389 * the node representing the result value.
391 * @param n the IR node to patch
392 * @param op the x87 opcode to patch in
394 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
396 ir_mode *mode = get_irn_mode(n);
401 if (mode == mode_T) {
402 /* patch all Proj's */
403 const ir_edge_t *edge;
405 foreach_out_edge(n, edge) {
406 ir_node *proj = get_edge_src_irn(edge);
408 mode = get_irn_mode(proj);
409 if (mode_is_float(mode)) {
411 set_irn_mode(proj, mode_E);
415 } else if (mode_is_float(mode))
416 set_irn_mode(n, mode_E);
418 } /* x87_patch_insn */
421 * Returns the first Proj of a mode_T node having a given mode.
423 * @param n the mode_T node
424 * @param m the desired mode of the Proj
425 * @return The first Proj of mode @p m found or NULL.
427 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
429 const ir_edge_t *edge;
431 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
433 foreach_out_edge(n, edge) {
434 ir_node *proj = get_edge_src_irn(edge);
435 if (get_irn_mode(proj) == m)
440 } /* get_irn_Proj_for_mode */
443 * Wrap the arch_* function here so we can check for errors.
445 static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
447 const arch_register_t *res = arch_get_irn_register(irn);
449 assert(res->reg_class->regs == ia32_vfp_regs);
451 } /* x87_get_irn_register */
453 static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
456 const arch_register_t *res = arch_irn_get_register(irn, pos);
458 assert(res->reg_class->regs == ia32_vfp_regs);
462 /* -------------- x87 perm --------------- */
465 * Creates a fxch for shuffle.
467 * @param state the x87 state
468 * @param pos parameter for fxch
469 * @param block the block were fxch is inserted
471 * Creates a new fxch node and reroute the user of the old node
474 * @return the fxch node
476 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block)
479 ia32_x87_attr_t *attr;
481 fxch = new_bd_ia32_fxch(NULL, block);
482 attr = get_ia32_x87_attr(fxch);
483 attr->x87[0] = &ia32_st_regs[pos];
484 attr->x87[2] = &ia32_st_regs[0];
488 x87_fxch(state, pos);
490 } /* x87_fxch_shuffle */
493 * Calculate the necessary permutations to reach dst_state.
495 * These permutations are done with fxch instructions and placed
496 * at the end of the block.
498 * Note that critical edges are removed here, so we need only
499 * a shuffle if the current block has only one successor.
501 * @param sim the simulator handle
502 * @param block the current block
503 * @param state the current x87 stack state, might be modified
504 * @param dst_block the destination block
505 * @param dst_state destination state
509 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block,
510 x87_state *state, ir_node *dst_block,
511 const x87_state *dst_state)
513 int i, n_cycles, k, ri;
514 unsigned cycles[4], all_mask;
515 char cycle_idx[4][8];
516 ir_node *fxch, *before, *after;
520 assert(state->depth == dst_state->depth);
522 /* Some mathematics here:
523 If we have a cycle of length n that includes the tos,
524 we need n-1 exchange operations.
525 We can always add the tos and restore it, so we need
526 n+1 exchange operations for a cycle not containing the tos.
527 So, the maximum of needed operations is for a cycle of 7
528 not including the tos == 8.
529 This is the same number of ops we would need for using stores,
530 so exchange is cheaper (we save the loads).
531 On the other hand, we might need an additional exchange
532 in the next block to bring one operand on top, so the
533 number of ops in the first case is identical.
534 Further, no more than 4 cycles can exists (4 x 2).
536 all_mask = (1 << (state->depth)) - 1;
538 for (n_cycles = 0; all_mask; ++n_cycles) {
539 int src_idx, dst_idx;
541 /* find the first free slot */
542 for (i = 0; i < state->depth; ++i) {
543 if (all_mask & (1 << i)) {
544 all_mask &= ~(1 << i);
546 /* check if there are differences here */
547 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
553 /* no more cycles found */
558 cycles[n_cycles] = (1 << i);
559 cycle_idx[n_cycles][k++] = i;
560 for (src_idx = i; ; src_idx = dst_idx) {
561 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
563 if ((all_mask & (1 << dst_idx)) == 0)
566 cycle_idx[n_cycles][k++] = dst_idx;
567 cycles[n_cycles] |= (1 << dst_idx);
568 all_mask &= ~(1 << dst_idx);
570 cycle_idx[n_cycles][k] = -1;
574 /* no permutation needed */
578 /* Hmm: permutation needed */
579 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
580 DEBUG_ONLY(x87_dump_stack(state));
581 DB((dbg, LEVEL_2, " to\n"));
582 DEBUG_ONLY(x87_dump_stack(dst_state));
586 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
587 for (ri = 0; ri < n_cycles; ++ri) {
588 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
589 for (k = 0; cycle_idx[ri][k] != -1; ++k)
590 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
591 DB((dbg, LEVEL_2, "\n"));
598 * Find the place node must be insert.
599 * We have only one successor block, so the last instruction should
602 before = sched_last(block);
603 assert(is_cfop(before));
605 /* now do the permutations */
606 for (ri = 0; ri < n_cycles; ++ri) {
607 if ((cycles[ri] & 1) == 0) {
608 /* this cycle does not include the tos */
609 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
611 sched_add_after(after, fxch);
613 sched_add_before(before, fxch);
616 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
617 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
619 sched_add_after(after, fxch);
621 sched_add_before(before, fxch);
624 if ((cycles[ri] & 1) == 0) {
625 /* this cycle does not include the tos */
626 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
627 sched_add_after(after, fxch);
634 * Create a fxch node before another node.
636 * @param state the x87 state
637 * @param n the node after the fxch
638 * @param pos exchange st(pos) with st(0)
642 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
645 ia32_x87_attr_t *attr;
646 ir_node *block = get_nodes_block(n);
648 x87_fxch(state, pos);
650 fxch = new_bd_ia32_fxch(NULL, block);
651 attr = get_ia32_x87_attr(fxch);
652 attr->x87[0] = &ia32_st_regs[pos];
653 attr->x87[2] = &ia32_st_regs[0];
657 sched_add_before(n, fxch);
658 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
660 } /* x87_create_fxch */
663 * Create a fpush before node n.
665 * @param state the x87 state
666 * @param n the node after the fpush
667 * @param pos push st(pos) on stack
668 * @param op_idx replace input op_idx of n with the fpush result
670 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx)
672 ir_node *fpush, *pred = get_irn_n(n, op_idx);
673 ia32_x87_attr_t *attr;
674 const arch_register_t *out = x87_get_irn_register(pred);
676 x87_push_dbl(state, arch_register_get_index(out), pred);
678 fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
679 attr = get_ia32_x87_attr(fpush);
680 attr->x87[0] = &ia32_st_regs[pos];
681 attr->x87[2] = &ia32_st_regs[0];
684 sched_add_before(n, fpush);
686 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
687 } /* x87_create_fpush */
690 * Create a fpop before node n.
692 * @param state the x87 state
693 * @param n the node after the fpop
694 * @param num pop 1 or 2 values
696 * @return the fpop node
698 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
700 ir_node *fpop = NULL;
701 ia32_x87_attr_t *attr;
706 if (ia32_cg_config.use_ffreep)
707 fpop = new_bd_ia32_ffreep(NULL, get_nodes_block(n));
709 fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
710 attr = get_ia32_x87_attr(fpop);
711 attr->x87[0] = &ia32_st_regs[0];
712 attr->x87[1] = &ia32_st_regs[0];
713 attr->x87[2] = &ia32_st_regs[0];
716 sched_add_before(n, fpop);
717 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
722 } /* x87_create_fpop */
725 * Creates an fldz before node n
727 * @param state the x87 state
728 * @param n the node after the fldz
730 * @return the fldz node
732 static ir_node *x87_create_fldz(x87_state *state, ir_node *n, int regidx)
734 ir_node *block = get_nodes_block(n);
737 fldz = new_bd_ia32_fldz(NULL, block, mode_E);
739 sched_add_before(n, fldz);
740 DB((dbg, LEVEL_1, "<<< %s\n", get_irn_opname(fldz)));
743 x87_push(state, regidx, fldz);
748 /* --------------------------------- liveness ------------------------------------------ */
751 * The liveness transfer function.
752 * Updates a live set over a single step from a given node to its predecessor.
753 * Everything defined at the node is removed from the set, the uses of the node get inserted.
755 * @param irn The node at which liveness should be computed.
756 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
757 * the registers live after irn.
759 * @return The live bitset.
761 static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
764 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
766 if (get_irn_mode(irn) == mode_T) {
767 const ir_edge_t *edge;
769 foreach_out_edge(irn, edge) {
770 ir_node *proj = get_edge_src_irn(edge);
772 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
773 const arch_register_t *reg = x87_get_irn_register(proj);
774 live &= ~(1 << arch_register_get_index(reg));
779 if (arch_irn_consider_in_reg_alloc(cls, irn)) {
780 const arch_register_t *reg = x87_get_irn_register(irn);
781 live &= ~(1 << arch_register_get_index(reg));
784 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
785 ir_node *op = get_irn_n(irn, i);
787 if (mode_is_float(get_irn_mode(op)) &&
788 arch_irn_consider_in_reg_alloc(cls, op)) {
789 const arch_register_t *reg = x87_get_irn_register(op);
790 live |= 1 << arch_register_get_index(reg);
794 } /* vfp_liveness_transfer */
797 * Put all live virtual registers at the end of a block into a bitset.
799 * @param sim the simulator handle
800 * @param lv the liveness information
801 * @param bl the block
803 * @return The live bitset at the end of this block
805 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
808 vfp_liveness live = 0;
809 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
810 const be_lv_t *lv = sim->lv;
812 be_lv_foreach(lv, block, be_lv_state_end, i) {
813 const arch_register_t *reg;
814 const ir_node *node = be_lv_get_irn(lv, block, i);
815 if (!arch_irn_consider_in_reg_alloc(cls, node))
818 reg = x87_get_irn_register(node);
819 live |= 1 << arch_register_get_index(reg);
823 } /* vfp_liveness_end_of_block */
825 /** get the register mask from an arch_register */
826 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
829 * Return a bitset of argument registers which are live at the end of a node.
831 * @param sim the simulator handle
832 * @param pos the node
833 * @param kill kill mask for the output registers
835 * @return The live bitset.
837 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
839 unsigned idx = get_irn_idx(pos);
841 assert(idx < sim->n_idx);
842 return sim->live[idx] & ~kill;
843 } /* vfp_live_args_after */
846 * Calculate the liveness for a whole block and cache it.
848 * @param sim the simulator handle
849 * @param lv the liveness handle
850 * @param block the block
852 static void update_liveness(x87_simulator *sim, ir_node *block)
854 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
858 /* now iterate through the block backward and cache the results */
859 sched_foreach_reverse(block, irn) {
860 /* stop at the first Phi: this produces the live-in */
864 idx = get_irn_idx(irn);
865 sim->live[idx] = live;
867 live = vfp_liveness_transfer(irn, live);
869 idx = get_irn_idx(block);
870 sim->live[idx] = live;
871 } /* update_liveness */
874 * Returns true if a register is live in a set.
876 * @param reg_idx the vfp register index
877 * @param live a live bitset
879 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
883 * Dump liveness info.
885 * @param live the live bitset
887 static void vfp_dump_live(vfp_liveness live)
891 DB((dbg, LEVEL_2, "Live after: "));
892 for (i = 0; i < 8; ++i) {
893 if (live & (1 << i)) {
894 DB((dbg, LEVEL_2, "vf%d ", i));
897 DB((dbg, LEVEL_2, "\n"));
898 } /* vfp_dump_live */
899 #endif /* DEBUG_libfirm */
901 /* --------------------------------- simulators ---------------------------------------- */
903 #define XCHG(a, b) do { int t = (a); (a) = (b); (b) = t; } while (0)
915 * Simulate a virtual binop.
917 * @param state the x87 state
918 * @param n the node that should be simulated (and patched)
919 * @param tmpl the template containing the 4 possible x87 opcodes
921 * @return NO_NODE_ADDED
923 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl)
925 int op2_idx = 0, op1_idx;
926 int out_idx, do_pop = 0;
927 ia32_x87_attr_t *attr;
929 ir_node *patched_insn;
931 x87_simulator *sim = state->sim;
932 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
933 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
934 const arch_register_t *op1_reg = x87_get_irn_register(op1);
935 const arch_register_t *op2_reg = x87_get_irn_register(op2);
936 const arch_register_t *out = x87_irn_get_register(n, pn_ia32_res);
937 int reg_index_1 = arch_register_get_index(op1_reg);
938 int reg_index_2 = arch_register_get_index(op2_reg);
939 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
943 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
944 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
945 arch_register_get_name(out)));
946 DEBUG_ONLY(vfp_dump_live(live));
947 DB((dbg, LEVEL_1, "Stack before: "));
948 DEBUG_ONLY(x87_dump_stack(state));
950 if (reg_index_1 == REG_VFP_UKNWN) {
954 op1_idx = x87_on_stack(state, reg_index_1);
955 assert(op1_idx >= 0);
956 op1_live_after = is_vfp_live(arch_register_get_index(op1_reg), live);
959 attr = get_ia32_x87_attr(n);
960 permuted = attr->attr.data.ins_permuted;
962 if (reg_index_2 != REG_VFP_NOREG) {
965 if (reg_index_2 == REG_VFP_UKNWN) {
969 /* second operand is a vfp register */
970 op2_idx = x87_on_stack(state, reg_index_2);
971 assert(op2_idx >= 0);
973 = is_vfp_live(arch_register_get_index(op2_reg), live);
976 if (op2_live_after) {
977 /* Second operand is live. */
979 if (op1_live_after) {
980 /* Both operands are live: push the first one.
981 This works even for op1 == op2. */
982 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
983 /* now do fxxx (tos=tos X op) */
987 dst = tmpl->normal_op;
989 /* Second live, first operand is dead here, bring it to tos. */
991 x87_create_fxch(state, n, op1_idx);
996 /* now do fxxx (tos=tos X op) */
998 dst = tmpl->normal_op;
1001 /* Second operand is dead. */
1002 if (op1_live_after) {
1003 /* First operand is live: bring second to tos. */
1005 x87_create_fxch(state, n, op2_idx);
1010 /* now do fxxxr (tos = op X tos) */
1012 dst = tmpl->reverse_op;
1014 /* Both operands are dead here, pop them from the stack. */
1017 /* Both are identically and on tos, no pop needed. */
1018 /* here fxxx (tos = tos X tos) */
1019 dst = tmpl->normal_op;
1022 /* now do fxxxp (op = op X tos, pop) */
1023 dst = tmpl->normal_pop_op;
1027 } else if (op1_idx == 0) {
1028 assert(op1_idx != op2_idx);
1029 /* now do fxxxrp (op = tos X op, pop) */
1030 dst = tmpl->reverse_pop_op;
1034 /* Bring the second on top. */
1035 x87_create_fxch(state, n, op2_idx);
1036 if (op1_idx == op2_idx) {
1037 /* Both are identically and on tos now, no pop needed. */
1040 /* use fxxx (tos = tos X tos) */
1041 dst = tmpl->normal_op;
1044 /* op2 is on tos now */
1046 /* use fxxxp (op = op X tos, pop) */
1047 dst = tmpl->normal_pop_op;
1055 /* second operand is an address mode */
1056 if (op1_live_after) {
1057 /* first operand is live: push it here */
1058 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
1061 /* first operand is dead: bring it to tos */
1063 x87_create_fxch(state, n, op1_idx);
1068 /* use fxxx (tos = tos X mem) */
1069 dst = permuted ? tmpl->reverse_op : tmpl->normal_op;
1073 patched_insn = x87_patch_insn(n, dst);
1074 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1079 /* patch the operation */
1080 attr->x87[0] = op1_reg = &ia32_st_regs[op1_idx];
1081 if (reg_index_2 != REG_VFP_NOREG) {
1082 attr->x87[1] = op2_reg = &ia32_st_regs[op2_idx];
1084 attr->x87[2] = out = &ia32_st_regs[out_idx];
1086 if (reg_index_2 != REG_VFP_NOREG) {
1087 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1088 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1089 arch_register_get_name(out)));
1091 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1092 arch_register_get_name(op1_reg),
1093 arch_register_get_name(out)));
1096 return NO_NODE_ADDED;
1100 * Simulate a virtual Unop.
1102 * @param state the x87 state
1103 * @param n the node that should be simulated (and patched)
1104 * @param op the x87 opcode that will replace n's opcode
1106 * @return NO_NODE_ADDED
1108 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
1110 int op1_idx, out_idx;
1111 x87_simulator *sim = state->sim;
1112 const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, UNOP_IDX));
1113 const arch_register_t *out = x87_get_irn_register(n);
1114 ia32_x87_attr_t *attr;
1115 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1117 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1118 DEBUG_ONLY(vfp_dump_live(live));
1120 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1122 if (is_vfp_live(arch_register_get_index(op1), live)) {
1123 /* push the operand here */
1124 x87_create_fpush(state, n, op1_idx, UNOP_IDX);
1128 /* operand is dead, bring it to tos */
1130 x87_create_fxch(state, n, op1_idx);
1135 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1137 attr = get_ia32_x87_attr(n);
1138 attr->x87[0] = op1 = &ia32_st_regs[0];
1139 attr->x87[2] = out = &ia32_st_regs[0];
1140 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1142 return NO_NODE_ADDED;
1146 * Simulate a virtual Load instruction.
1148 * @param state the x87 state
1149 * @param n the node that should be simulated (and patched)
1150 * @param op the x87 opcode that will replace n's opcode
1152 * @return NO_NODE_ADDED
1154 static int sim_load(x87_state *state, ir_node *n, ir_op *op, int res_pos)
1156 const arch_register_t *out = x87_irn_get_register(n, res_pos);
1157 ia32_x87_attr_t *attr;
1159 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1160 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1161 assert(out == x87_irn_get_register(n, res_pos));
1162 attr = get_ia32_x87_attr(n);
1163 attr->x87[2] = out = &ia32_st_regs[0];
1164 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1166 return NO_NODE_ADDED;
1170 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1172 * @param store The store
1173 * @param old_val The former value
1174 * @param new_val The new value
1176 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
1178 const ir_edge_t *edge, *ne;
1180 foreach_out_edge_safe(old_val, edge, ne) {
1181 ir_node *user = get_edge_src_irn(edge);
1183 if (! user || user == store)
1186 /* if the user is scheduled after the store: rewire */
1187 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1189 /* find the input of the user pointing to the old value */
1190 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1191 if (get_irn_n(user, i) == old_val)
1192 set_irn_n(user, i, new_val);
1196 } /* collect_and_rewire_users */
1199 * Simulate a virtual Store.
1201 * @param state the x87 state
1202 * @param n the node that should be simulated (and patched)
1203 * @param op the x87 store opcode
1204 * @param op_p the x87 store and pop opcode
1206 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p)
1208 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1209 const arch_register_t *op2 = x87_get_irn_register(val);
1210 unsigned live = vfp_live_args_after(state->sim, n, 0);
1211 int insn = NO_NODE_ADDED;
1212 ia32_x87_attr_t *attr;
1213 int op2_reg_idx, op2_idx, depth;
1214 int live_after_node;
1217 op2_reg_idx = arch_register_get_index(op2);
1218 if (op2_reg_idx == REG_VFP_UKNWN) {
1219 /* just take any value from stack */
1220 if (state->depth > 0) {
1222 DEBUG_ONLY(op2 = NULL);
1223 live_after_node = 1;
1225 /* produce a new value which we will consume immediately */
1226 x87_create_fldz(state, n, op2_reg_idx);
1227 live_after_node = 0;
1228 op2_idx = x87_on_stack(state, op2_reg_idx);
1229 assert(op2_idx >= 0);
1232 op2_idx = x87_on_stack(state, op2_reg_idx);
1233 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1234 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1235 assert(op2_idx >= 0);
1238 mode = get_ia32_ls_mode(n);
1239 depth = x87_get_depth(state);
1241 if (live_after_node) {
1243 Problem: fst doesn't support mode_E (spills), only fstp does
1245 - stack not full: push value and fstp
1246 - stack full: fstp value and load again
1247 Note that we cannot test on mode_E, because floats might be 96bit ...
1249 if (get_mode_size_bits(mode) > 64 || mode == mode_Ls) {
1250 if (depth < N_x87_REGS) {
1251 /* ok, we have a free register: push + fstp */
1252 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1254 x87_patch_insn(n, op_p);
1256 ir_node *vfld, *mem, *block, *rproj, *mproj;
1259 /* stack full here: need fstp + load */
1261 x87_patch_insn(n, op_p);
1263 block = get_nodes_block(n);
1264 vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), new_NoMem(), get_ia32_ls_mode(n));
1266 /* copy all attributes */
1267 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1268 if (is_ia32_use_frame(n))
1269 set_ia32_use_frame(vfld);
1270 set_ia32_op_type(vfld, ia32_AddrModeS);
1271 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1272 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1273 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1275 irg = get_irn_irg(n);
1276 rproj = new_r_Proj(irg, block, vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1277 mproj = new_r_Proj(irg, block, vfld, mode_M, pn_ia32_vfld_M);
1278 mem = get_irn_Proj_for_mode(n, mode_M);
1280 assert(mem && "Store memory not found");
1282 arch_set_irn_register(rproj, op2);
1284 /* reroute all former users of the store memory to the load memory */
1285 edges_reroute(mem, mproj, irg);
1286 /* set the memory input of the load to the store memory */
1287 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1289 sched_add_after(n, vfld);
1290 sched_add_after(vfld, rproj);
1292 /* rewire all users, scheduled after the store, to the loaded value */
1293 collect_and_rewire_users(n, val, rproj);
1298 /* we can only store the tos to memory */
1300 x87_create_fxch(state, n, op2_idx);
1302 /* mode != mode_E -> use normal fst */
1303 x87_patch_insn(n, op);
1306 /* we can only store the tos to memory */
1308 x87_create_fxch(state, n, op2_idx);
1311 x87_patch_insn(n, op_p);
1314 attr = get_ia32_x87_attr(n);
1315 attr->x87[1] = op2 = &ia32_st_regs[0];
1316 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1321 #define _GEN_BINOP(op, rev) \
1322 static int sim_##op(x87_state *state, ir_node *n) { \
1323 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1324 return sim_binop(state, n, &tmpl); \
1327 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1328 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1330 #define GEN_LOAD(op) \
1331 static int sim_##op(x87_state *state, ir_node *n) { \
1332 return sim_load(state, n, op_ia32_##op, pn_ia32_v##op##_res); \
1335 #define GEN_UNOP(op) \
1336 static int sim_##op(x87_state *state, ir_node *n) { \
1337 return sim_unop(state, n, op_ia32_##op); \
1340 #define GEN_STORE(op) \
1341 static int sim_##op(x87_state *state, ir_node *n) { \
1342 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1364 * Simulate a virtual fisttp.
1366 * @param state the x87 state
1367 * @param n the node that should be simulated (and patched)
1369 static int sim_fisttp(x87_state *state, ir_node *n)
1371 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1372 const arch_register_t *op2 = x87_get_irn_register(val);
1373 int insn = NO_NODE_ADDED;
1374 ia32_x87_attr_t *attr;
1375 int op2_reg_idx, op2_idx, depth;
1377 op2_reg_idx = arch_register_get_index(op2);
1378 if (op2_reg_idx == REG_VFP_UKNWN) {
1379 /* just take any value from stack */
1380 if (state->depth > 0) {
1382 DEBUG_ONLY(op2 = NULL);
1384 /* produce a new value which we will consume immediately */
1385 x87_create_fldz(state, n, op2_reg_idx);
1386 op2_idx = x87_on_stack(state, op2_reg_idx);
1387 assert(op2_idx >= 0);
1390 op2_idx = x87_on_stack(state, op2_reg_idx);
1391 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1392 assert(op2_idx >= 0);
1395 depth = x87_get_depth(state);
1397 /* Note: although the value is still live here, it is destroyed because
1398 of the pop. The register allocator is aware of that and introduced a copy
1399 if the value must be alive. */
1401 /* we can only store the tos to memory */
1403 x87_create_fxch(state, n, op2_idx);
1406 x87_patch_insn(n, op_ia32_fisttp);
1408 attr = get_ia32_x87_attr(n);
1409 attr->x87[1] = op2 = &ia32_st_regs[0];
1410 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1415 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1417 x87_simulator *sim = state->sim;
1418 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1419 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1420 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1421 int reg_index_1 = arch_register_get_index(reg1);
1422 int op1_idx = x87_on_stack(state, reg_index_1);
1423 unsigned live = vfp_live_args_after(sim, n, 0);
1425 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, arch_register_get_name(reg1)));
1426 DEBUG_ONLY(vfp_dump_live(live));
1427 DB((dbg, LEVEL_1, "Stack before: "));
1428 DEBUG_ONLY(x87_dump_stack(state));
1429 assert(op1_idx >= 0);
1432 /* bring the value to tos */
1433 x87_create_fxch(state, n, op1_idx);
1437 /* patch the operation */
1438 x87_patch_insn(n, op_ia32_FtstFnstsw);
1439 reg1 = &ia32_st_regs[op1_idx];
1440 attr->x87[0] = reg1;
1441 attr->x87[1] = NULL;
1442 attr->x87[2] = NULL;
1444 if (!is_vfp_live(reg_index_1, live)) {
1445 x87_create_fpop(state, sched_next(n), 1);
1449 return NO_NODE_ADDED;
1453 * @param state the x87 state
1454 * @param n the node that should be simulated (and patched)
1456 static int sim_Fucom(x87_state *state, ir_node *n)
1460 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1462 x87_simulator *sim = state->sim;
1463 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1464 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1465 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1466 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1467 int reg_index_1 = arch_register_get_index(op1);
1468 int reg_index_2 = arch_register_get_index(op2);
1469 unsigned live = vfp_live_args_after(sim, n, 0);
1470 int permuted = attr->attr.data.ins_permuted;
1473 int node_added = NO_NODE_ADDED;
1475 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1476 arch_register_get_name(op1), arch_register_get_name(op2)));
1477 DEBUG_ONLY(vfp_dump_live(live));
1478 DB((dbg, LEVEL_1, "Stack before: "));
1479 DEBUG_ONLY(x87_dump_stack(state));
1481 op1_idx = x87_on_stack(state, reg_index_1);
1482 assert(op1_idx >= 0);
1484 /* BEWARE: check for comp a,a cases, they might happen */
1485 if (reg_index_2 != REG_VFP_NOREG) {
1486 /* second operand is a vfp register */
1487 op2_idx = x87_on_stack(state, reg_index_2);
1488 assert(op2_idx >= 0);
1490 if (is_vfp_live(reg_index_2, live)) {
1491 /* second operand is live */
1493 if (is_vfp_live(reg_index_1, live)) {
1494 /* both operands are live */
1497 /* res = tos X op */
1498 } else if (op2_idx == 0) {
1499 /* res = op X tos */
1500 permuted = !permuted;
1503 /* bring the first one to tos */
1504 x87_create_fxch(state, n, op1_idx);
1508 /* res = tos X op */
1511 /* second live, first operand is dead here, bring it to tos.
1512 This means further, op1_idx != op2_idx. */
1513 assert(op1_idx != op2_idx);
1515 x87_create_fxch(state, n, op1_idx);
1520 /* res = tos X op, pop */
1524 /* second operand is dead */
1525 if (is_vfp_live(reg_index_1, live)) {
1526 /* first operand is live: bring second to tos.
1527 This means further, op1_idx != op2_idx. */
1528 assert(op1_idx != op2_idx);
1530 x87_create_fxch(state, n, op2_idx);
1535 /* res = op X tos, pop */
1537 permuted = !permuted;
1540 /* both operands are dead here, check first for identity. */
1541 if (op1_idx == op2_idx) {
1542 /* identically, one pop needed */
1544 x87_create_fxch(state, n, op1_idx);
1548 /* res = tos X op, pop */
1551 /* different, move them to st and st(1) and pop both.
1552 The tricky part is to get one into st(1).*/
1553 else if (op2_idx == 1) {
1554 /* good, second operand is already in the right place, move the first */
1556 /* bring the first on top */
1557 x87_create_fxch(state, n, op1_idx);
1558 assert(op2_idx != 0);
1561 /* res = tos X op, pop, pop */
1563 } else if (op1_idx == 1) {
1564 /* good, first operand is already in the right place, move the second */
1566 /* bring the first on top */
1567 x87_create_fxch(state, n, op2_idx);
1568 assert(op1_idx != 0);
1571 /* res = op X tos, pop, pop */
1572 permuted = !permuted;
1576 /* if one is already the TOS, we need two fxch */
1578 /* first one is TOS, move to st(1) */
1579 x87_create_fxch(state, n, 1);
1580 assert(op2_idx != 1);
1582 x87_create_fxch(state, n, op2_idx);
1584 /* res = op X tos, pop, pop */
1586 permuted = !permuted;
1588 } else if (op2_idx == 0) {
1589 /* second one is TOS, move to st(1) */
1590 x87_create_fxch(state, n, 1);
1591 assert(op1_idx != 1);
1593 x87_create_fxch(state, n, op1_idx);
1595 /* res = tos X op, pop, pop */
1598 /* none of them is either TOS or st(1), 3 fxch needed */
1599 x87_create_fxch(state, n, op2_idx);
1600 assert(op1_idx != 0);
1601 x87_create_fxch(state, n, 1);
1603 x87_create_fxch(state, n, op1_idx);
1605 /* res = tos X op, pop, pop */
1612 /* second operand is an address mode */
1613 if (is_vfp_live(reg_index_1, live)) {
1614 /* first operand is live: bring it to TOS */
1616 x87_create_fxch(state, n, op1_idx);
1620 /* first operand is dead: bring it to tos */
1622 x87_create_fxch(state, n, op1_idx);
1629 /* patch the operation */
1630 if (is_ia32_vFucomFnstsw(n)) {
1634 case 0: dst = op_ia32_FucomFnstsw; break;
1635 case 1: dst = op_ia32_FucompFnstsw; break;
1636 case 2: dst = op_ia32_FucomppFnstsw; break;
1637 default: panic("invalid popcount in sim_Fucom");
1640 for (i = 0; i < pops; ++i) {
1643 } else if (is_ia32_vFucomi(n)) {
1645 case 0: dst = op_ia32_Fucomi; break;
1646 case 1: dst = op_ia32_Fucompi; x87_pop(state); break;
1648 dst = op_ia32_Fucompi;
1650 x87_create_fpop(state, sched_next(n), 1);
1651 node_added = NODE_ADDED;
1653 default: panic("invalid popcount in sim_Fucom");
1656 panic("invalid operation %+F in sim_FucomFnstsw", n);
1659 x87_patch_insn(n, dst);
1666 op1 = &ia32_st_regs[op1_idx];
1669 op2 = &ia32_st_regs[op2_idx];
1672 attr->x87[2] = NULL;
1673 attr->attr.data.ins_permuted = permuted;
1676 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1677 arch_register_get_name(op1), arch_register_get_name(op2)));
1679 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1680 arch_register_get_name(op1)));
1686 static int sim_Keep(x87_state *state, ir_node *node)
1689 const arch_register_t *op_reg;
1694 int node_added = NO_NODE_ADDED;
1696 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1698 arity = get_irn_arity(node);
1699 for (i = 0; i < arity; ++i) {
1700 op = get_irn_n(node, i);
1701 op_reg = arch_get_irn_register(op);
1702 if (arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1705 reg_id = arch_register_get_index(op_reg);
1706 live = vfp_live_args_after(state->sim, node, 0);
1708 op_stack_idx = x87_on_stack(state, reg_id);
1709 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live)) {
1710 x87_create_fpop(state, sched_next(node), 1);
1711 node_added = NODE_ADDED;
1715 DB((dbg, LEVEL_1, "Stack after: "));
1716 DEBUG_ONLY(x87_dump_stack(state));
1721 static void keep_float_node_alive(ir_node *node)
1723 ir_graph *irg = get_irn_irg(node);
1724 ir_node *block = get_nodes_block(node);
1725 const arch_register_class_t *cls = arch_get_irn_reg_class_out(node);
1730 keep = be_new_Keep(cls, irg, block, 1, in);
1732 assert(sched_is_scheduled(node));
1733 sched_add_after(node, keep);
1737 * Create a copy of a node. Recreate the node if it's a constant.
1739 * @param state the x87 state
1740 * @param n the node to be copied
1742 * @return the copy of n
1744 static ir_node *create_Copy(x87_state *state, ir_node *n)
1746 dbg_info *n_dbg = get_irn_dbg_info(n);
1747 ir_mode *mode = get_irn_mode(n);
1748 ir_node *block = get_nodes_block(n);
1749 ir_node *pred = get_irn_n(n, 0);
1750 ir_node *(*cnstr)(dbg_info *, ir_node *, ir_mode *) = NULL;
1752 const arch_register_t *out;
1753 const arch_register_t *op1;
1754 ia32_x87_attr_t *attr;
1756 /* Do not copy constants, recreate them. */
1757 switch (get_ia32_irn_opcode(pred)) {
1758 case iro_ia32_Unknown_VFP:
1760 cnstr = new_bd_ia32_fldz;
1763 cnstr = new_bd_ia32_fld1;
1765 case iro_ia32_fldpi:
1766 cnstr = new_bd_ia32_fldpi;
1768 case iro_ia32_fldl2e:
1769 cnstr = new_bd_ia32_fldl2e;
1771 case iro_ia32_fldl2t:
1772 cnstr = new_bd_ia32_fldl2t;
1774 case iro_ia32_fldlg2:
1775 cnstr = new_bd_ia32_fldlg2;
1777 case iro_ia32_fldln2:
1778 cnstr = new_bd_ia32_fldln2;
1784 out = x87_get_irn_register(n);
1785 op1 = x87_get_irn_register(pred);
1787 if (cnstr != NULL) {
1788 /* copy a constant */
1789 res = (*cnstr)(n_dbg, block, mode);
1791 x87_push(state, arch_register_get_index(out), res);
1793 attr = get_ia32_x87_attr(res);
1794 attr->x87[2] = &ia32_st_regs[0];
1796 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1798 res = new_bd_ia32_fpushCopy(n_dbg, block, pred, mode);
1800 x87_push(state, arch_register_get_index(out), res);
1802 attr = get_ia32_x87_attr(res);
1803 attr->x87[0] = &ia32_st_regs[op1_idx];
1804 attr->x87[2] = &ia32_st_regs[0];
1806 arch_set_irn_register(res, out);
1812 * Simulate a be_Copy.
1814 * @param state the x87 state
1815 * @param n the node that should be simulated (and patched)
1817 * @return NO_NODE_ADDED
1819 static int sim_Copy(x87_state *state, ir_node *n)
1822 const arch_register_t *out;
1823 const arch_register_t *op1;
1824 const arch_register_class_t *cls;
1825 ir_node *node, *next;
1826 ia32_x87_attr_t *attr;
1827 int op1_idx, out_idx;
1830 cls = arch_get_irn_reg_class_out(n);
1831 if (cls->regs != ia32_vfp_regs)
1834 pred = get_irn_n(n, 0);
1835 out = x87_get_irn_register(n);
1836 op1 = x87_get_irn_register(pred);
1837 live = vfp_live_args_after(state->sim, n, REGMASK(out));
1839 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1840 arch_register_get_name(op1), arch_register_get_name(out)));
1841 DEBUG_ONLY(vfp_dump_live(live));
1843 /* handle the infamous unknown value */
1844 if (arch_register_get_index(op1) == REG_VFP_UKNWN) {
1845 /* Operand is still live, a real copy. We need here an fpush that can
1846 hold a a register, so use the fpushCopy or recreate constants */
1847 node = create_Copy(state, n);
1849 assert(is_ia32_fldz(node));
1850 next = sched_next(n);
1853 sched_add_before(next, node);
1855 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1856 arch_get_irn_register(node)->name));
1857 return NO_NODE_ADDED;
1860 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1862 if (is_vfp_live(arch_register_get_index(op1), live)) {
1863 ir_node *pred = get_irn_n(n, 0);
1865 /* Operand is still live, a real copy. We need here an fpush that can
1866 hold a a register, so use the fpushCopy or recreate constants */
1867 node = create_Copy(state, n);
1869 /* We have to make sure the old value doesn't go dead (which can happen
1870 * when we recreate constants). As the simulator expected that value in
1871 * the pred blocks. This is unfortunate as removing it would save us 1
1872 * instruction, but we would have to rerun all the simulation to get
1875 next = sched_next(n);
1878 sched_add_before(next, node);
1880 if (get_irn_n_edges(pred) == 0) {
1881 keep_float_node_alive(pred);
1884 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1886 out_idx = x87_on_stack(state, arch_register_get_index(out));
1888 if (out_idx >= 0 && out_idx != op1_idx) {
1889 /* Matze: out already on stack? how can this happen? */
1892 /* op1 must be killed and placed where out is */
1894 /* best case, simple remove and rename */
1895 x87_patch_insn(n, op_ia32_Pop);
1896 attr = get_ia32_x87_attr(n);
1897 attr->x87[0] = op1 = &ia32_st_regs[0];
1900 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1902 /* move op1 to tos, store and pop it */
1904 x87_create_fxch(state, n, op1_idx);
1907 x87_patch_insn(n, op_ia32_Pop);
1908 attr = get_ia32_x87_attr(n);
1909 attr->x87[0] = op1 = &ia32_st_regs[out_idx];
1912 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1914 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1916 /* just a virtual copy */
1917 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1918 /* don't remove the node to keep the verifier quiet :),
1919 the emitter won't emit any code for the node */
1922 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1923 exchange(n, get_unop_op(n));
1927 return NO_NODE_ADDED;
1931 * Returns the result proj of the call
1933 static ir_node *get_call_result_proj(ir_node *call)
1935 const ir_edge_t *edge;
1937 /* search the result proj */
1938 foreach_out_edge(call, edge) {
1939 ir_node *proj = get_edge_src_irn(edge);
1940 long pn = get_Proj_proj(proj);
1942 if (pn == pn_ia32_Call_vf0) {
1948 } /* get_call_result_proj */
1951 * Simulate a ia32_Call.
1953 * @param state the x87 state
1954 * @param n the node that should be simulated
1956 * @return NO_NODE_ADDED
1958 static int sim_Call(x87_state *state, ir_node *n)
1960 ir_type *call_tp = get_ia32_call_attr_const(n)->call_tp;
1964 const arch_register_t *reg;
1966 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1968 /* at the begin of a call the x87 state should be empty */
1969 assert(state->depth == 0 && "stack not empty before call");
1971 if (get_method_n_ress(call_tp) <= 0)
1975 * If the called function returns a float, it is returned in st(0).
1976 * This even happens if the return value is NOT used.
1977 * Moreover, only one return result is supported.
1979 res_type = get_method_res_type(call_tp, 0);
1980 mode = get_type_mode(res_type);
1982 if (mode == NULL || !mode_is_float(mode))
1985 resproj = get_call_result_proj(n);
1986 assert(resproj != NULL);
1988 reg = x87_get_irn_register(resproj);
1989 x87_push(state, arch_register_get_index(reg), resproj);
1992 DB((dbg, LEVEL_1, "Stack after: "));
1993 DEBUG_ONLY(x87_dump_stack(state));
1995 return NO_NODE_ADDED;
1999 * Simulate a be_Spill.
2001 * @param state the x87 state
2002 * @param n the node that should be simulated (and patched)
2004 * Should not happen, spills are lowered before x87 simulator see them.
2006 static int sim_Spill(x87_state *state, ir_node *n)
2008 panic("Spill not lowered");
2009 return sim_fst(state, n);
2013 * Simulate a be_Reload.
2015 * @param state the x87 state
2016 * @param n the node that should be simulated (and patched)
2018 * Should not happen, reloads are lowered before x87 simulator see them.
2020 static int sim_Reload(x87_state *state, ir_node *n)
2022 panic("Reload not lowered");
2023 return sim_fld(state, n);
2027 * Simulate a be_Return.
2029 * @param state the x87 state
2030 * @param n the node that should be simulated (and patched)
2032 * @return NO_NODE_ADDED
2034 static int sim_Return(x87_state *state, ir_node *n)
2036 int n_res = be_Return_get_n_rets(n);
2037 int i, n_float_res = 0;
2039 /* only floating point return values must reside on stack */
2040 for (i = 0; i < n_res; ++i) {
2041 ir_node *res = get_irn_n(n, be_pos_Return_val + i);
2043 if (mode_is_float(get_irn_mode(res)))
2046 assert(x87_get_depth(state) == n_float_res);
2048 /* pop them virtually */
2049 for (i = n_float_res - 1; i >= 0; --i)
2052 return NO_NODE_ADDED;
2055 typedef struct _perm_data_t {
2056 const arch_register_t *in;
2057 const arch_register_t *out;
2061 * Simulate a be_Perm.
2063 * @param state the x87 state
2064 * @param irn the node that should be simulated (and patched)
2066 * @return NO_NODE_ADDED
2068 static int sim_Perm(x87_state *state, ir_node *irn)
2071 ir_node *pred = get_irn_n(irn, 0);
2073 const ir_edge_t *edge;
2075 /* handle only floating point Perms */
2076 if (! mode_is_float(get_irn_mode(pred)))
2077 return NO_NODE_ADDED;
2079 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
2081 /* Perm is a pure virtual instruction on x87.
2082 All inputs must be on the FPU stack and are pairwise
2083 different from each other.
2084 So, all we need to do is to permutate the stack state. */
2085 n = get_irn_arity(irn);
2086 NEW_ARR_A(int, stack_pos, n);
2088 /* collect old stack positions */
2089 for (i = 0; i < n; ++i) {
2090 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
2091 int idx = x87_on_stack(state, arch_register_get_index(inreg));
2093 assert(idx >= 0 && "Perm argument not on x87 stack");
2097 /* now do the permutation */
2098 foreach_out_edge(irn, edge) {
2099 ir_node *proj = get_edge_src_irn(edge);
2100 const arch_register_t *out = x87_get_irn_register(proj);
2101 long num = get_Proj_proj(proj);
2103 assert(0 <= num && num < n && "More Proj's than Perm inputs");
2104 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
2106 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
2108 return NO_NODE_ADDED;
2111 static int sim_Barrier(x87_state *state, ir_node *node)
2115 /* materialize unknown if needed */
2116 arity = get_irn_arity(node);
2117 for (i = 0; i < arity; ++i) {
2118 const arch_register_t *reg;
2121 ia32_x87_attr_t *attr;
2122 ir_node *in = get_irn_n(node, i);
2124 if (!is_ia32_Unknown_VFP(in))
2127 /* TODO: not completely correct... */
2128 reg = &ia32_vfp_regs[REG_VFP_UKNWN];
2131 block = get_nodes_block(node);
2132 zero = new_bd_ia32_fldz(NULL, block, mode_E);
2133 x87_push(state, arch_register_get_index(reg), zero);
2135 attr = get_ia32_x87_attr(zero);
2136 attr->x87[2] = &ia32_st_regs[0];
2138 sched_add_before(node, zero);
2140 set_irn_n(node, i, zero);
2143 return NO_NODE_ADDED;
2148 * Kill any dead registers at block start by popping them from the stack.
2150 * @param sim the simulator handle
2151 * @param block the current block
2152 * @param start_state the x87 state at the begin of the block
2154 * @return the x87 state after dead register killed
2156 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state)
2158 x87_state *state = start_state;
2159 ir_node *first_insn = sched_first(block);
2160 ir_node *keep = NULL;
2161 unsigned live = vfp_live_args_after(sim, block, 0);
2163 int i, depth, num_pop;
2166 depth = x87_get_depth(state);
2167 for (i = depth - 1; i >= 0; --i) {
2168 int reg = x87_get_st_reg(state, i);
2170 if (! is_vfp_live(reg, live))
2171 kill_mask |= (1 << i);
2175 /* create a new state, will be changed */
2176 state = x87_clone_state(sim, state);
2178 DB((dbg, LEVEL_1, "Killing deads:\n"));
2179 DEBUG_ONLY(vfp_dump_live(live));
2180 DEBUG_ONLY(x87_dump_stack(state));
2182 if (kill_mask != 0 && live == 0) {
2183 /* special case: kill all registers */
2184 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
2185 if (ia32_cg_config.use_femms) {
2186 /* use FEMMS on AMD processors to clear all */
2187 keep = new_bd_ia32_femms(NULL, block);
2189 /* use EMMS to clear all */
2190 keep = new_bd_ia32_emms(NULL, block);
2192 sched_add_before(first_insn, keep);
2198 /* now kill registers */
2200 /* we can only kill from TOS, so bring them up */
2201 if (! (kill_mask & 1)) {
2202 /* search from behind, because we can to a double-pop */
2203 for (i = depth - 1; i >= 0; --i) {
2204 if (kill_mask & (1 << i)) {
2205 kill_mask &= ~(1 << i);
2212 x87_set_st(state, -1, keep, i);
2213 x87_create_fxch(state, first_insn, i);
2216 if ((kill_mask & 3) == 3) {
2217 /* we can do a double-pop */
2221 /* only a single pop */
2226 kill_mask >>= num_pop;
2227 keep = x87_create_fpop(state, first_insn, num_pop);
2232 } /* x87_kill_deads */
2235 * If we have PhiEs with unknown operands then we have to make sure that some
2236 * value is actually put onto the stack.
2238 static void fix_unknown_phis(x87_state *state, ir_node *block,
2239 ir_node *pred_block, int pos)
2243 sched_foreach(block, node) {
2245 const arch_register_t *reg;
2246 ia32_x87_attr_t *attr;
2251 op = get_Phi_pred(node, pos);
2252 if (!is_ia32_Unknown_VFP(op))
2255 reg = arch_get_irn_register(node);
2257 /* create a zero at end of pred block */
2258 zero = new_bd_ia32_fldz(NULL, pred_block, mode_E);
2259 x87_push(state, arch_register_get_index(reg), zero);
2261 attr = get_ia32_x87_attr(zero);
2262 attr->x87[2] = &ia32_st_regs[0];
2264 assert(is_ia32_fldz(zero));
2265 sched_add_before(sched_last(pred_block), zero);
2267 set_Phi_pred(node, pos, zero);
2272 * Run a simulation and fix all virtual instructions for a block.
2274 * @param sim the simulator handle
2275 * @param block the current block
2277 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
2280 blk_state *bl_state = x87_get_bl_state(sim, block);
2281 x87_state *state = bl_state->begin;
2282 const ir_edge_t *edge;
2283 ir_node *start_block;
2285 assert(state != NULL);
2286 /* already processed? */
2287 if (bl_state->end != NULL)
2290 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2291 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2292 DEBUG_ONLY(x87_dump_stack(state));
2294 /* at block begin, kill all dead registers */
2295 state = x87_kill_deads(sim, block, state);
2296 /* create a new state, will be changed */
2297 state = x87_clone_state(sim, state);
2299 /* beware, n might change */
2300 for (n = sched_first(block); !sched_is_end(n); n = next) {
2303 ir_op *op = get_irn_op(n);
2305 next = sched_next(n);
2306 if (op->ops.generic == NULL)
2309 func = (sim_func)op->ops.generic;
2312 node_inserted = (*func)(state, n);
2315 sim_func might have added an additional node after n,
2317 beware: n must not be changed by sim_func
2318 (i.e. removed from schedule) in this case
2320 if (node_inserted != NO_NODE_ADDED)
2321 next = sched_next(n);
2324 start_block = get_irg_start_block(get_irn_irg(block));
2326 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2328 /* check if the state must be shuffled */
2329 foreach_block_succ(block, edge) {
2330 ir_node *succ = get_edge_src_irn(edge);
2331 blk_state *succ_state;
2333 if (succ == start_block)
2336 succ_state = x87_get_bl_state(sim, succ);
2338 fix_unknown_phis(state, succ, block, get_edge_src_pos(edge));
2340 if (succ_state->begin == NULL) {
2341 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2342 DEBUG_ONLY(x87_dump_stack(state));
2343 succ_state->begin = state;
2345 waitq_put(sim->worklist, succ);
2347 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2348 /* There is already a begin state for the successor, bad.
2349 Do the necessary permutations.
2350 Note that critical edges are removed, so this is always possible:
2351 If the successor has more than one possible input, then it must
2354 x87_shuffle(sim, block, state, succ, succ_state->begin);
2357 bl_state->end = state;
2358 } /* x87_simulate_block */
2360 static void register_sim(ir_op *op, sim_func func)
2362 assert(op->ops.generic == NULL);
2363 op->ops.generic = (op_func) func;
2367 * Create a new x87 simulator.
2369 * @param sim a simulator handle, will be initialized
2370 * @param irg the current graph
2372 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
2374 obstack_init(&sim->obst);
2375 sim->blk_states = pmap_create();
2376 sim->n_idx = get_irg_last_idx(irg);
2377 sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
2379 DB((dbg, LEVEL_1, "--------------------------------\n"
2380 "x87 Simulator started for %+F\n", irg));
2382 /* set the generic function pointer of instruction we must simulate */
2383 clear_irp_opcodes_generic_func();
2385 register_sim(op_ia32_Call, sim_Call);
2386 register_sim(op_ia32_vfld, sim_fld);
2387 register_sim(op_ia32_vfild, sim_fild);
2388 register_sim(op_ia32_vfld1, sim_fld1);
2389 register_sim(op_ia32_vfldz, sim_fldz);
2390 register_sim(op_ia32_vfadd, sim_fadd);
2391 register_sim(op_ia32_vfsub, sim_fsub);
2392 register_sim(op_ia32_vfmul, sim_fmul);
2393 register_sim(op_ia32_vfdiv, sim_fdiv);
2394 register_sim(op_ia32_vfprem, sim_fprem);
2395 register_sim(op_ia32_vfabs, sim_fabs);
2396 register_sim(op_ia32_vfchs, sim_fchs);
2397 register_sim(op_ia32_vfist, sim_fist);
2398 register_sim(op_ia32_vfisttp, sim_fisttp);
2399 register_sim(op_ia32_vfst, sim_fst);
2400 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
2401 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
2402 register_sim(op_ia32_vFucomi, sim_Fucom);
2403 register_sim(op_be_Copy, sim_Copy);
2404 register_sim(op_be_Spill, sim_Spill);
2405 register_sim(op_be_Reload, sim_Reload);
2406 register_sim(op_be_Return, sim_Return);
2407 register_sim(op_be_Perm, sim_Perm);
2408 register_sim(op_be_Keep, sim_Keep);
2409 register_sim(op_be_Barrier, sim_Barrier);
2410 } /* x87_init_simulator */
2413 * Destroy a x87 simulator.
2415 * @param sim the simulator handle
2417 static void x87_destroy_simulator(x87_simulator *sim)
2419 pmap_destroy(sim->blk_states);
2420 obstack_free(&sim->obst, NULL);
2421 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2422 } /* x87_destroy_simulator */
2425 * Pre-block walker: calculate the liveness information for the block
2426 * and store it into the sim->live cache.
2428 static void update_liveness_walker(ir_node *block, void *data)
2430 x87_simulator *sim = data;
2431 update_liveness(sim, block);
2432 } /* update_liveness_walker */
2434 void x87_simulate_graph(be_irg_t *birg)
2436 /* TODO improve code quality (less executed fxch) by using execfreqs */
2438 ir_node *block, *start_block;
2439 blk_state *bl_state;
2441 ir_graph *irg = be_get_birg_irg(birg);
2443 /* create the simulator */
2444 x87_init_simulator(&sim, irg);
2446 start_block = get_irg_start_block(irg);
2447 bl_state = x87_get_bl_state(&sim, start_block);
2449 /* start with the empty state */
2450 bl_state->begin = empty;
2453 sim.worklist = new_waitq();
2454 waitq_put(sim.worklist, start_block);
2456 be_assure_liveness(birg);
2457 sim.lv = be_get_birg_liveness(birg);
2458 // sim.lv = be_liveness(be_get_birg_irg(birg));
2459 be_liveness_assure_sets(sim.lv);
2461 /* Calculate the liveness for all nodes. We must precalculate this info,
2462 * because the simulator adds new nodes (possible before Phi nodes) which
2463 * would let a lazy calculation fail.
2464 * On the other hand we reduce the computation amount due to
2465 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2467 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2471 block = waitq_get(sim.worklist);
2472 x87_simulate_block(&sim, block);
2473 } while (! waitq_empty(sim.worklist));
2476 del_waitq(sim.worklist);
2477 x87_destroy_simulator(&sim);
2478 } /* x87_simulate_graph */
2480 void ia32_init_x87(void)
2482 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2483 } /* ia32_init_x87 */