2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
34 #include "iredges_t.h"
46 #include "../belive_t.h"
47 #include "../besched_t.h"
48 #include "../benode_t.h"
49 #include "bearch_ia32_t.h"
50 #include "ia32_new_nodes.h"
51 #include "gen_ia32_new_nodes.h"
52 #include "gen_ia32_regalloc_if.h"
54 #include "ia32_architecture.h"
61 #define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
63 /** the debug handle */
64 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
66 /* Forward declaration. */
67 typedef struct _x87_simulator x87_simulator;
70 * An exchange template.
71 * Note that our virtual functions have the same inputs
72 * and attributes as the real ones, so we can simple exchange
74 * Further, x87 supports inverse instructions, so we can handle them.
76 typedef struct _exchange_tmpl {
77 ir_op *normal_op; /**< the normal one */
78 ir_op *reverse_op; /**< the reverse one if exists */
79 ir_op *normal_pop_op; /**< the normal one with tos pop */
80 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
84 * An entry on the simulated x87 stack.
86 typedef struct _st_entry {
87 int reg_idx; /**< the virtual register index of this stack value */
88 ir_node *node; /**< the node that produced this value */
94 typedef struct _x87_state {
95 st_entry st[N_x87_REGS]; /**< the register stack */
96 int depth; /**< the current stack depth */
97 int tos; /**< position of the tos */
98 x87_simulator *sim; /**< The simulator. */
101 /** An empty state, used for blocks without fp instructions. */
102 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
103 static x87_state *empty = (x87_state *)&_empty;
106 NO_NODE_ADDED = 0, /**< No node was added. */
107 NODE_ADDED = 1 /**< A node was added by the simulator in the schedule. */
111 * The type of an instruction simulator function.
113 * @param state the x87 state
114 * @param n the node to be simulated
116 * @return NODE_ADDED if a node was added AFTER n in schedule,
119 typedef int (*sim_func)(x87_state *state, ir_node *n);
122 * A block state: Every block has a x87 state at the beginning and at the end.
124 typedef struct _blk_state {
125 x87_state *begin; /**< state at the begin or NULL if not assigned */
126 x87_state *end; /**< state at the end or NULL if not assigned */
129 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
131 /** liveness bitset for vfp registers. */
132 typedef unsigned char vfp_liveness;
137 struct _x87_simulator {
138 struct obstack obst; /**< An obstack for fast allocating. */
139 pmap *blk_states; /**< Map blocks to states. */
140 be_lv_t *lv; /**< intrablock liveness. */
141 vfp_liveness *live; /**< Liveness information. */
142 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
143 waitq *worklist; /**< Worklist of blocks that must be processed. */
144 ia32_isa_t *isa; /**< the ISA object */
148 * Returns the current stack depth.
150 * @param state the x87 state
152 * @return the x87 stack depth
154 static int x87_get_depth(const x87_state *state)
157 } /* x87_get_depth */
160 * Return the virtual register index at st(pos).
162 * @param state the x87 state
163 * @param pos a stack position
165 * @return the vfp register index that produced the value at st(pos)
167 static int x87_get_st_reg(const x87_state *state, int pos)
169 assert(pos < state->depth);
170 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
171 } /* x87_get_st_reg */
175 * Return the node at st(pos).
177 * @param state the x87 state
178 * @param pos a stack position
180 * @return the IR node that produced the value at st(pos)
182 static ir_node *x87_get_st_node(const x87_state *state, int pos)
184 assert(pos < state->depth);
185 return state->st[MASK_TOS(state->tos + pos)].node;
186 } /* x87_get_st_node */
189 * Dump the stack for debugging.
191 * @param state the x87 state
193 static void x87_dump_stack(const x87_state *state)
197 for (i = state->depth - 1; i >= 0; --i) {
198 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
199 x87_get_st_node(state, i)));
201 DB((dbg, LEVEL_2, "<-- TOS\n"));
202 } /* x87_dump_stack */
203 #endif /* DEBUG_libfirm */
206 * Set a virtual register to st(pos).
208 * @param state the x87 state
209 * @param reg_idx the vfp register index that should be set
210 * @param node the IR node that produces the value of the vfp register
211 * @param pos the stack position where the new value should be entered
213 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
215 assert(0 < state->depth);
216 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
217 state->st[MASK_TOS(state->tos + pos)].node = node;
219 DB((dbg, LEVEL_2, "After SET_REG: "));
220 DEBUG_ONLY(x87_dump_stack(state));
224 * Set the tos virtual register.
226 * @param state the x87 state
227 * @param reg_idx the vfp register index that should be set
228 * @param node the IR node that produces the value of the vfp register
230 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node)
232 x87_set_st(state, reg_idx, node, 0);
236 * Swap st(0) with st(pos).
238 * @param state the x87 state
239 * @param pos the stack position to change the tos with
241 static void x87_fxch(x87_state *state, int pos)
244 assert(pos < state->depth);
246 entry = state->st[MASK_TOS(state->tos + pos)];
247 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
248 state->st[MASK_TOS(state->tos)] = entry;
250 DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
254 * Convert a virtual register to the stack index.
256 * @param state the x87 state
257 * @param reg_idx the register vfp index
259 * @return the stack position where the register is stacked
260 * or -1 if the virtual register was not found
262 static int x87_on_stack(const x87_state *state, int reg_idx)
264 int i, tos = state->tos;
266 for (i = 0; i < state->depth; ++i)
267 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
273 * Push a virtual Register onto the stack, double pushed allowed.
275 * @param state the x87 state
276 * @param reg_idx the register vfp index
277 * @param node the node that produces the value of the vfp register
279 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node)
281 assert(state->depth < N_x87_REGS && "stack overrun");
284 state->tos = MASK_TOS(state->tos - 1);
285 state->st[state->tos].reg_idx = reg_idx;
286 state->st[state->tos].node = node;
288 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
292 * Push a virtual Register onto the stack, double pushes are NOT allowed.
294 * @param state the x87 state
295 * @param reg_idx the register vfp index
296 * @param node the node that produces the value of the vfp register
297 * @param dbl_push if != 0 double pushes are allowed
299 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
301 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
303 x87_push_dbl(state, reg_idx, node);
307 * Pop a virtual Register from the stack.
309 * @param state the x87 state
311 static void x87_pop(x87_state *state)
313 assert(state->depth > 0 && "stack underrun");
316 state->tos = MASK_TOS(state->tos + 1);
318 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
322 * Empty the fpu stack
324 * @param state the x87 state
326 static void x87_emms(x87_state *state)
333 * Returns the block state of a block.
335 * @param sim the x87 simulator handle
336 * @param block the current block
338 * @return the block state
340 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
342 pmap_entry *entry = pmap_find(sim->blk_states, block);
345 blk_state *bl_state = obstack_alloc(&sim->obst, sizeof(*bl_state));
346 bl_state->begin = NULL;
347 bl_state->end = NULL;
349 pmap_insert(sim->blk_states, block, bl_state);
353 return PTR_TO_BLKSTATE(entry->value);
354 } /* x87_get_bl_state */
357 * Creates a new x87 state.
359 * @param sim the x87 simulator handle
361 * @return a new x87 state
363 static x87_state *x87_alloc_state(x87_simulator *sim)
365 x87_state *res = obstack_alloc(&sim->obst, sizeof(*res));
369 } /* x87_alloc_state */
374 * @param sim the x87 simulator handle
375 * @param src the x87 state that will be cloned
377 * @return a cloned copy of the src state
379 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
381 x87_state *res = x87_alloc_state(sim);
383 memcpy(res, src, sizeof(*res));
385 } /* x87_clone_state */
388 * Patch a virtual instruction into a x87 one and return
389 * the node representing the result value.
391 * @param n the IR node to patch
392 * @param op the x87 opcode to patch in
394 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
396 ir_mode *mode = get_irn_mode(n);
401 if (mode == mode_T) {
402 /* patch all Proj's */
403 const ir_edge_t *edge;
405 foreach_out_edge(n, edge) {
406 ir_node *proj = get_edge_src_irn(edge);
408 mode = get_irn_mode(proj);
409 if (mode_is_float(mode)) {
411 set_irn_mode(proj, mode_E);
415 } else if (mode_is_float(mode))
416 set_irn_mode(n, mode_E);
418 } /* x87_patch_insn */
421 * Returns the first Proj of a mode_T node having a given mode.
423 * @param n the mode_T node
424 * @param m the desired mode of the Proj
425 * @return The first Proj of mode @p m found or NULL.
427 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
429 const ir_edge_t *edge;
431 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
433 foreach_out_edge(n, edge) {
434 ir_node *proj = get_edge_src_irn(edge);
435 if (get_irn_mode(proj) == m)
440 } /* get_irn_Proj_for_mode */
443 * Wrap the arch_* function here so we can check for errors.
445 static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
447 const arch_register_t *res = arch_get_irn_register(irn);
449 assert(res->reg_class->regs == ia32_vfp_regs);
451 } /* x87_get_irn_register */
453 static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
456 const arch_register_t *res = arch_irn_get_register(irn, pos);
458 assert(res->reg_class->regs == ia32_vfp_regs);
462 /* -------------- x87 perm --------------- */
465 * Creates a fxch for shuffle.
467 * @param state the x87 state
468 * @param pos parameter for fxch
469 * @param block the block were fxch is inserted
471 * Creates a new fxch node and reroute the user of the old node
474 * @return the fxch node
476 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block)
479 ia32_x87_attr_t *attr;
481 fxch = new_rd_ia32_fxch(NULL, get_irn_irg(block), block);
482 attr = get_ia32_x87_attr(fxch);
483 attr->x87[0] = &ia32_st_regs[pos];
484 attr->x87[2] = &ia32_st_regs[0];
488 x87_fxch(state, pos);
490 } /* x87_fxch_shuffle */
493 * Calculate the necessary permutations to reach dst_state.
495 * These permutations are done with fxch instructions and placed
496 * at the end of the block.
498 * Note that critical edges are removed here, so we need only
499 * a shuffle if the current block has only one successor.
501 * @param sim the simulator handle
502 * @param block the current block
503 * @param state the current x87 stack state, might be modified
504 * @param dst_block the destination block
505 * @param dst_state destination state
509 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block,
510 x87_state *state, ir_node *dst_block,
511 const x87_state *dst_state)
513 int i, n_cycles, k, ri;
514 unsigned cycles[4], all_mask;
515 char cycle_idx[4][8];
516 ir_node *fxch, *before, *after;
520 assert(state->depth == dst_state->depth);
522 /* Some mathematics here:
523 If we have a cycle of length n that includes the tos,
524 we need n-1 exchange operations.
525 We can always add the tos and restore it, so we need
526 n+1 exchange operations for a cycle not containing the tos.
527 So, the maximum of needed operations is for a cycle of 7
528 not including the tos == 8.
529 This is the same number of ops we would need for using stores,
530 so exchange is cheaper (we save the loads).
531 On the other hand, we might need an additional exchange
532 in the next block to bring one operand on top, so the
533 number of ops in the first case is identical.
534 Further, no more than 4 cycles can exists (4 x 2).
536 all_mask = (1 << (state->depth)) - 1;
538 for (n_cycles = 0; all_mask; ++n_cycles) {
539 int src_idx, dst_idx;
541 /* find the first free slot */
542 for (i = 0; i < state->depth; ++i) {
543 if (all_mask & (1 << i)) {
544 all_mask &= ~(1 << i);
546 /* check if there are differences here */
547 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
553 /* no more cycles found */
558 cycles[n_cycles] = (1 << i);
559 cycle_idx[n_cycles][k++] = i;
560 for (src_idx = i; ; src_idx = dst_idx) {
561 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
563 if ((all_mask & (1 << dst_idx)) == 0)
566 cycle_idx[n_cycles][k++] = dst_idx;
567 cycles[n_cycles] |= (1 << dst_idx);
568 all_mask &= ~(1 << dst_idx);
570 cycle_idx[n_cycles][k] = -1;
574 /* no permutation needed */
578 /* Hmm: permutation needed */
579 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
580 DEBUG_ONLY(x87_dump_stack(state));
581 DB((dbg, LEVEL_2, " to\n"));
582 DEBUG_ONLY(x87_dump_stack(dst_state));
586 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
587 for (ri = 0; ri < n_cycles; ++ri) {
588 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
589 for (k = 0; cycle_idx[ri][k] != -1; ++k)
590 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
591 DB((dbg, LEVEL_2, "\n"));
598 * Find the place node must be insert.
599 * We have only one successor block, so the last instruction should
602 before = sched_last(block);
603 assert(is_cfop(before));
605 /* now do the permutations */
606 for (ri = 0; ri < n_cycles; ++ri) {
607 if ((cycles[ri] & 1) == 0) {
608 /* this cycle does not include the tos */
609 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
611 sched_add_after(after, fxch);
613 sched_add_before(before, fxch);
616 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
617 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
619 sched_add_after(after, fxch);
621 sched_add_before(before, fxch);
624 if ((cycles[ri] & 1) == 0) {
625 /* this cycle does not include the tos */
626 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
627 sched_add_after(after, fxch);
634 * Create a fxch node before another node.
636 * @param state the x87 state
637 * @param n the node after the fxch
638 * @param pos exchange st(pos) with st(0)
642 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
645 ia32_x87_attr_t *attr;
646 ir_graph *irg = get_irn_irg(n);
647 ir_node *block = get_nodes_block(n);
649 x87_fxch(state, pos);
651 fxch = new_rd_ia32_fxch(NULL, irg, block);
652 attr = get_ia32_x87_attr(fxch);
653 attr->x87[0] = &ia32_st_regs[pos];
654 attr->x87[2] = &ia32_st_regs[0];
658 sched_add_before(n, fxch);
659 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
661 } /* x87_create_fxch */
664 * Create a fpush before node n.
666 * @param state the x87 state
667 * @param n the node after the fpush
668 * @param pos push st(pos) on stack
669 * @param op_idx replace input op_idx of n with the fpush result
671 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx)
673 ir_node *fpush, *pred = get_irn_n(n, op_idx);
674 ia32_x87_attr_t *attr;
675 const arch_register_t *out = x87_get_irn_register(pred);
677 x87_push_dbl(state, arch_register_get_index(out), pred);
679 fpush = new_rd_ia32_fpush(NULL, get_irn_irg(n), get_nodes_block(n));
680 attr = get_ia32_x87_attr(fpush);
681 attr->x87[0] = &ia32_st_regs[pos];
682 attr->x87[2] = &ia32_st_regs[0];
685 sched_add_before(n, fpush);
687 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
688 } /* x87_create_fpush */
691 * Create a fpop before node n.
693 * @param state the x87 state
694 * @param n the node after the fpop
695 * @param num pop 1 or 2 values
697 * @return the fpop node
699 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
701 ir_node *fpop = NULL;
702 ia32_x87_attr_t *attr;
707 if (ia32_cg_config.use_ffreep)
708 fpop = new_rd_ia32_ffreep(NULL, get_irn_irg(n), get_nodes_block(n));
710 fpop = new_rd_ia32_fpop(NULL, get_irn_irg(n), get_nodes_block(n));
711 attr = get_ia32_x87_attr(fpop);
712 attr->x87[0] = &ia32_st_regs[0];
713 attr->x87[1] = &ia32_st_regs[0];
714 attr->x87[2] = &ia32_st_regs[0];
717 sched_add_before(n, fpop);
718 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
723 } /* x87_create_fpop */
726 * Creates an fldz before node n
728 * @param state the x87 state
729 * @param n the node after the fldz
731 * @return the fldz node
733 static ir_node *x87_create_fldz(x87_state *state, ir_node *n, int regidx)
735 ir_graph *irg = get_irn_irg(n);
736 ir_node *block = get_nodes_block(n);
739 fldz = new_rd_ia32_fldz(NULL, irg, block, mode_E);
741 sched_add_before(n, fldz);
742 DB((dbg, LEVEL_1, "<<< %s\n", get_irn_opname(fldz)));
745 x87_push(state, regidx, fldz);
750 /* --------------------------------- liveness ------------------------------------------ */
753 * The liveness transfer function.
754 * Updates a live set over a single step from a given node to its predecessor.
755 * Everything defined at the node is removed from the set, the uses of the node get inserted.
757 * @param irn The node at which liveness should be computed.
758 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
759 * the registers live after irn.
761 * @return The live bitset.
763 static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
766 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
768 if (get_irn_mode(irn) == mode_T) {
769 const ir_edge_t *edge;
771 foreach_out_edge(irn, edge) {
772 ir_node *proj = get_edge_src_irn(edge);
774 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
775 const arch_register_t *reg = x87_get_irn_register(proj);
776 live &= ~(1 << arch_register_get_index(reg));
781 if (arch_irn_consider_in_reg_alloc(cls, irn)) {
782 const arch_register_t *reg = x87_get_irn_register(irn);
783 live &= ~(1 << arch_register_get_index(reg));
786 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
787 ir_node *op = get_irn_n(irn, i);
789 if (mode_is_float(get_irn_mode(op)) &&
790 arch_irn_consider_in_reg_alloc(cls, op)) {
791 const arch_register_t *reg = x87_get_irn_register(op);
792 live |= 1 << arch_register_get_index(reg);
796 } /* vfp_liveness_transfer */
799 * Put all live virtual registers at the end of a block into a bitset.
801 * @param sim the simulator handle
802 * @param lv the liveness information
803 * @param bl the block
805 * @return The live bitset at the end of this block
807 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
810 vfp_liveness live = 0;
811 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
812 const be_lv_t *lv = sim->lv;
814 be_lv_foreach(lv, block, be_lv_state_end, i) {
815 const arch_register_t *reg;
816 const ir_node *node = be_lv_get_irn(lv, block, i);
817 if (!arch_irn_consider_in_reg_alloc(cls, node))
820 reg = x87_get_irn_register(node);
821 live |= 1 << arch_register_get_index(reg);
825 } /* vfp_liveness_end_of_block */
827 /** get the register mask from an arch_register */
828 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
831 * Return a bitset of argument registers which are live at the end of a node.
833 * @param sim the simulator handle
834 * @param pos the node
835 * @param kill kill mask for the output registers
837 * @return The live bitset.
839 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
841 unsigned idx = get_irn_idx(pos);
843 assert(idx < sim->n_idx);
844 return sim->live[idx] & ~kill;
845 } /* vfp_live_args_after */
848 * Calculate the liveness for a whole block and cache it.
850 * @param sim the simulator handle
851 * @param lv the liveness handle
852 * @param block the block
854 static void update_liveness(x87_simulator *sim, ir_node *block)
856 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
860 /* now iterate through the block backward and cache the results */
861 sched_foreach_reverse(block, irn) {
862 /* stop at the first Phi: this produces the live-in */
866 idx = get_irn_idx(irn);
867 sim->live[idx] = live;
869 live = vfp_liveness_transfer(irn, live);
871 idx = get_irn_idx(block);
872 sim->live[idx] = live;
873 } /* update_liveness */
876 * Returns true if a register is live in a set.
878 * @param reg_idx the vfp register index
879 * @param live a live bitset
881 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
885 * Dump liveness info.
887 * @param live the live bitset
889 static void vfp_dump_live(vfp_liveness live)
893 DB((dbg, LEVEL_2, "Live after: "));
894 for (i = 0; i < 8; ++i) {
895 if (live & (1 << i)) {
896 DB((dbg, LEVEL_2, "vf%d ", i));
899 DB((dbg, LEVEL_2, "\n"));
900 } /* vfp_dump_live */
901 #endif /* DEBUG_libfirm */
903 /* --------------------------------- simulators ---------------------------------------- */
905 #define XCHG(a, b) do { int t = (a); (a) = (b); (b) = t; } while (0)
917 * Simulate a virtual binop.
919 * @param state the x87 state
920 * @param n the node that should be simulated (and patched)
921 * @param tmpl the template containing the 4 possible x87 opcodes
923 * @return NO_NODE_ADDED
925 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl)
927 int op2_idx = 0, op1_idx;
928 int out_idx, do_pop = 0;
929 ia32_x87_attr_t *attr;
931 ir_node *patched_insn;
933 x87_simulator *sim = state->sim;
934 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
935 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
936 const arch_register_t *op1_reg = x87_get_irn_register(op1);
937 const arch_register_t *op2_reg = x87_get_irn_register(op2);
938 const arch_register_t *out = x87_irn_get_register(n, pn_ia32_res);
939 int reg_index_1 = arch_register_get_index(op1_reg);
940 int reg_index_2 = arch_register_get_index(op2_reg);
941 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
945 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
946 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
947 arch_register_get_name(out)));
948 DEBUG_ONLY(vfp_dump_live(live));
949 DB((dbg, LEVEL_1, "Stack before: "));
950 DEBUG_ONLY(x87_dump_stack(state));
952 if (reg_index_1 == REG_VFP_UKNWN) {
956 op1_idx = x87_on_stack(state, reg_index_1);
957 assert(op1_idx >= 0);
958 op1_live_after = is_vfp_live(arch_register_get_index(op1_reg), live);
961 attr = get_ia32_x87_attr(n);
962 permuted = attr->attr.data.ins_permuted;
964 if (reg_index_2 != REG_VFP_NOREG) {
967 if (reg_index_2 == REG_VFP_UKNWN) {
971 /* second operand is a vfp register */
972 op2_idx = x87_on_stack(state, reg_index_2);
973 assert(op2_idx >= 0);
975 = is_vfp_live(arch_register_get_index(op2_reg), live);
978 if (op2_live_after) {
979 /* Second operand is live. */
981 if (op1_live_after) {
982 /* Both operands are live: push the first one.
983 This works even for op1 == op2. */
984 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
985 /* now do fxxx (tos=tos X op) */
989 dst = tmpl->normal_op;
991 /* Second live, first operand is dead here, bring it to tos. */
993 x87_create_fxch(state, n, op1_idx);
998 /* now do fxxx (tos=tos X op) */
1000 dst = tmpl->normal_op;
1003 /* Second operand is dead. */
1004 if (op1_live_after) {
1005 /* First operand is live: bring second to tos. */
1007 x87_create_fxch(state, n, op2_idx);
1012 /* now do fxxxr (tos = op X tos) */
1014 dst = tmpl->reverse_op;
1016 /* Both operands are dead here, pop them from the stack. */
1019 /* Both are identically and on tos, no pop needed. */
1020 /* here fxxx (tos = tos X tos) */
1021 dst = tmpl->normal_op;
1024 /* now do fxxxp (op = op X tos, pop) */
1025 dst = tmpl->normal_pop_op;
1029 } else if (op1_idx == 0) {
1030 assert(op1_idx != op2_idx);
1031 /* now do fxxxrp (op = tos X op, pop) */
1032 dst = tmpl->reverse_pop_op;
1036 /* Bring the second on top. */
1037 x87_create_fxch(state, n, op2_idx);
1038 if (op1_idx == op2_idx) {
1039 /* Both are identically and on tos now, no pop needed. */
1042 /* use fxxx (tos = tos X tos) */
1043 dst = tmpl->normal_op;
1046 /* op2 is on tos now */
1048 /* use fxxxp (op = op X tos, pop) */
1049 dst = tmpl->normal_pop_op;
1057 /* second operand is an address mode */
1058 if (op1_live_after) {
1059 /* first operand is live: push it here */
1060 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
1063 /* first operand is dead: bring it to tos */
1065 x87_create_fxch(state, n, op1_idx);
1070 /* use fxxx (tos = tos X mem) */
1071 dst = permuted ? tmpl->reverse_op : tmpl->normal_op;
1075 patched_insn = x87_patch_insn(n, dst);
1076 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1081 /* patch the operation */
1082 attr->x87[0] = op1_reg = &ia32_st_regs[op1_idx];
1083 if (reg_index_2 != REG_VFP_NOREG) {
1084 attr->x87[1] = op2_reg = &ia32_st_regs[op2_idx];
1086 attr->x87[2] = out = &ia32_st_regs[out_idx];
1088 if (reg_index_2 != REG_VFP_NOREG) {
1089 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1090 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1091 arch_register_get_name(out)));
1093 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1094 arch_register_get_name(op1_reg),
1095 arch_register_get_name(out)));
1098 return NO_NODE_ADDED;
1102 * Simulate a virtual Unop.
1104 * @param state the x87 state
1105 * @param n the node that should be simulated (and patched)
1106 * @param op the x87 opcode that will replace n's opcode
1108 * @return NO_NODE_ADDED
1110 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
1112 int op1_idx, out_idx;
1113 x87_simulator *sim = state->sim;
1114 const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, UNOP_IDX));
1115 const arch_register_t *out = x87_get_irn_register(n);
1116 ia32_x87_attr_t *attr;
1117 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1119 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1120 DEBUG_ONLY(vfp_dump_live(live));
1122 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1124 if (is_vfp_live(arch_register_get_index(op1), live)) {
1125 /* push the operand here */
1126 x87_create_fpush(state, n, op1_idx, UNOP_IDX);
1130 /* operand is dead, bring it to tos */
1132 x87_create_fxch(state, n, op1_idx);
1137 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1139 attr = get_ia32_x87_attr(n);
1140 attr->x87[0] = op1 = &ia32_st_regs[0];
1141 attr->x87[2] = out = &ia32_st_regs[0];
1142 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1144 return NO_NODE_ADDED;
1148 * Simulate a virtual Load instruction.
1150 * @param state the x87 state
1151 * @param n the node that should be simulated (and patched)
1152 * @param op the x87 opcode that will replace n's opcode
1154 * @return NO_NODE_ADDED
1156 static int sim_load(x87_state *state, ir_node *n, ir_op *op, int res_pos)
1158 const arch_register_t *out = x87_irn_get_register(n, res_pos);
1159 ia32_x87_attr_t *attr;
1161 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1162 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1163 assert(out == x87_irn_get_register(n, res_pos));
1164 attr = get_ia32_x87_attr(n);
1165 attr->x87[2] = out = &ia32_st_regs[0];
1166 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1168 return NO_NODE_ADDED;
1172 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1174 * @param store The store
1175 * @param old_val The former value
1176 * @param new_val The new value
1178 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
1180 const ir_edge_t *edge, *ne;
1182 foreach_out_edge_safe(old_val, edge, ne) {
1183 ir_node *user = get_edge_src_irn(edge);
1185 if (! user || user == store)
1188 /* if the user is scheduled after the store: rewire */
1189 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1191 /* find the input of the user pointing to the old value */
1192 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1193 if (get_irn_n(user, i) == old_val)
1194 set_irn_n(user, i, new_val);
1198 } /* collect_and_rewire_users */
1201 * Simulate a virtual Store.
1203 * @param state the x87 state
1204 * @param n the node that should be simulated (and patched)
1205 * @param op the x87 store opcode
1206 * @param op_p the x87 store and pop opcode
1208 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p)
1210 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1211 const arch_register_t *op2 = x87_get_irn_register(val);
1212 unsigned live = vfp_live_args_after(state->sim, n, 0);
1213 int insn = NO_NODE_ADDED;
1214 ia32_x87_attr_t *attr;
1215 int op2_reg_idx, op2_idx, depth;
1216 int live_after_node;
1219 op2_reg_idx = arch_register_get_index(op2);
1220 if (op2_reg_idx == REG_VFP_UKNWN) {
1221 /* just take any value from stack */
1222 if (state->depth > 0) {
1224 DEBUG_ONLY(op2 = NULL);
1225 live_after_node = 1;
1227 /* produce a new value which we will consume immediately */
1228 x87_create_fldz(state, n, op2_reg_idx);
1229 live_after_node = 0;
1230 op2_idx = x87_on_stack(state, op2_reg_idx);
1231 assert(op2_idx >= 0);
1234 op2_idx = x87_on_stack(state, op2_reg_idx);
1235 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1236 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1237 assert(op2_idx >= 0);
1240 mode = get_ia32_ls_mode(n);
1241 depth = x87_get_depth(state);
1243 if (live_after_node) {
1245 Problem: fst doesn't support mode_E (spills), only fstp does
1247 - stack not full: push value and fstp
1248 - stack full: fstp value and load again
1249 Note that we cannot test on mode_E, because floats might be 96bit ...
1251 if (get_mode_size_bits(mode) > 64 || mode == mode_Ls) {
1252 if (depth < N_x87_REGS) {
1253 /* ok, we have a free register: push + fstp */
1254 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1256 x87_patch_insn(n, op_p);
1258 ir_node *vfld, *mem, *block, *rproj, *mproj;
1261 /* stack full here: need fstp + load */
1263 x87_patch_insn(n, op_p);
1265 block = get_nodes_block(n);
1266 irg = get_irn_irg(n);
1267 vfld = new_rd_ia32_vfld(NULL, irg, block, get_irn_n(n, 0), get_irn_n(n, 1), new_NoMem(), get_ia32_ls_mode(n));
1269 /* copy all attributes */
1270 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1271 if (is_ia32_use_frame(n))
1272 set_ia32_use_frame(vfld);
1273 set_ia32_op_type(vfld, ia32_AddrModeS);
1274 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1275 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1276 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1278 rproj = new_r_Proj(irg, block, vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1279 mproj = new_r_Proj(irg, block, vfld, mode_M, pn_ia32_vfld_M);
1280 mem = get_irn_Proj_for_mode(n, mode_M);
1282 assert(mem && "Store memory not found");
1284 arch_set_irn_register(rproj, op2);
1286 /* reroute all former users of the store memory to the load memory */
1287 edges_reroute(mem, mproj, irg);
1288 /* set the memory input of the load to the store memory */
1289 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1291 sched_add_after(n, vfld);
1292 sched_add_after(vfld, rproj);
1294 /* rewire all users, scheduled after the store, to the loaded value */
1295 collect_and_rewire_users(n, val, rproj);
1300 /* we can only store the tos to memory */
1302 x87_create_fxch(state, n, op2_idx);
1304 /* mode != mode_E -> use normal fst */
1305 x87_patch_insn(n, op);
1308 /* we can only store the tos to memory */
1310 x87_create_fxch(state, n, op2_idx);
1313 x87_patch_insn(n, op_p);
1316 attr = get_ia32_x87_attr(n);
1317 attr->x87[1] = op2 = &ia32_st_regs[0];
1318 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1323 #define _GEN_BINOP(op, rev) \
1324 static int sim_##op(x87_state *state, ir_node *n) { \
1325 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1326 return sim_binop(state, n, &tmpl); \
1329 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1330 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1332 #define GEN_LOAD(op) \
1333 static int sim_##op(x87_state *state, ir_node *n) { \
1334 return sim_load(state, n, op_ia32_##op, pn_ia32_v##op##_res); \
1337 #define GEN_UNOP(op) \
1338 static int sim_##op(x87_state *state, ir_node *n) { \
1339 return sim_unop(state, n, op_ia32_##op); \
1342 #define GEN_STORE(op) \
1343 static int sim_##op(x87_state *state, ir_node *n) { \
1344 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1366 * Simulate a virtual fisttp.
1368 * @param state the x87 state
1369 * @param n the node that should be simulated (and patched)
1371 static int sim_fisttp(x87_state *state, ir_node *n)
1373 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1374 const arch_register_t *op2 = x87_get_irn_register(val);
1375 int insn = NO_NODE_ADDED;
1376 ia32_x87_attr_t *attr;
1377 int op2_reg_idx, op2_idx, depth;
1379 op2_reg_idx = arch_register_get_index(op2);
1380 if (op2_reg_idx == REG_VFP_UKNWN) {
1381 /* just take any value from stack */
1382 if (state->depth > 0) {
1384 DEBUG_ONLY(op2 = NULL);
1386 /* produce a new value which we will consume immediately */
1387 x87_create_fldz(state, n, op2_reg_idx);
1388 op2_idx = x87_on_stack(state, op2_reg_idx);
1389 assert(op2_idx >= 0);
1392 op2_idx = x87_on_stack(state, op2_reg_idx);
1393 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1394 assert(op2_idx >= 0);
1397 depth = x87_get_depth(state);
1399 /* Note: although the value is still live here, it is destroyed because
1400 of the pop. The register allocator is aware of that and introduced a copy
1401 if the value must be alive. */
1403 /* we can only store the tos to memory */
1405 x87_create_fxch(state, n, op2_idx);
1408 x87_patch_insn(n, op_ia32_fisttp);
1410 attr = get_ia32_x87_attr(n);
1411 attr->x87[1] = op2 = &ia32_st_regs[0];
1412 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1417 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1419 x87_simulator *sim = state->sim;
1420 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1421 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1422 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1423 int reg_index_1 = arch_register_get_index(reg1);
1424 int op1_idx = x87_on_stack(state, reg_index_1);
1425 unsigned live = vfp_live_args_after(sim, n, 0);
1427 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, arch_register_get_name(reg1)));
1428 DEBUG_ONLY(vfp_dump_live(live));
1429 DB((dbg, LEVEL_1, "Stack before: "));
1430 DEBUG_ONLY(x87_dump_stack(state));
1431 assert(op1_idx >= 0);
1434 /* bring the value to tos */
1435 x87_create_fxch(state, n, op1_idx);
1439 /* patch the operation */
1440 x87_patch_insn(n, op_ia32_FtstFnstsw);
1441 reg1 = &ia32_st_regs[op1_idx];
1442 attr->x87[0] = reg1;
1443 attr->x87[1] = NULL;
1444 attr->x87[2] = NULL;
1446 if (!is_vfp_live(reg_index_1, live)) {
1447 x87_create_fpop(state, sched_next(n), 1);
1451 return NO_NODE_ADDED;
1455 * @param state the x87 state
1456 * @param n the node that should be simulated (and patched)
1458 static int sim_Fucom(x87_state *state, ir_node *n)
1462 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1464 x87_simulator *sim = state->sim;
1465 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1466 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1467 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1468 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1469 int reg_index_1 = arch_register_get_index(op1);
1470 int reg_index_2 = arch_register_get_index(op2);
1471 unsigned live = vfp_live_args_after(sim, n, 0);
1472 int permuted = attr->attr.data.ins_permuted;
1475 int node_added = NO_NODE_ADDED;
1477 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1478 arch_register_get_name(op1), arch_register_get_name(op2)));
1479 DEBUG_ONLY(vfp_dump_live(live));
1480 DB((dbg, LEVEL_1, "Stack before: "));
1481 DEBUG_ONLY(x87_dump_stack(state));
1483 op1_idx = x87_on_stack(state, reg_index_1);
1484 assert(op1_idx >= 0);
1486 /* BEWARE: check for comp a,a cases, they might happen */
1487 if (reg_index_2 != REG_VFP_NOREG) {
1488 /* second operand is a vfp register */
1489 op2_idx = x87_on_stack(state, reg_index_2);
1490 assert(op2_idx >= 0);
1492 if (is_vfp_live(reg_index_2, live)) {
1493 /* second operand is live */
1495 if (is_vfp_live(reg_index_1, live)) {
1496 /* both operands are live */
1499 /* res = tos X op */
1500 } else if (op2_idx == 0) {
1501 /* res = op X tos */
1502 permuted = !permuted;
1505 /* bring the first one to tos */
1506 x87_create_fxch(state, n, op1_idx);
1510 /* res = tos X op */
1513 /* second live, first operand is dead here, bring it to tos.
1514 This means further, op1_idx != op2_idx. */
1515 assert(op1_idx != op2_idx);
1517 x87_create_fxch(state, n, op1_idx);
1522 /* res = tos X op, pop */
1526 /* second operand is dead */
1527 if (is_vfp_live(reg_index_1, live)) {
1528 /* first operand is live: bring second to tos.
1529 This means further, op1_idx != op2_idx. */
1530 assert(op1_idx != op2_idx);
1532 x87_create_fxch(state, n, op2_idx);
1537 /* res = op X tos, pop */
1539 permuted = !permuted;
1542 /* both operands are dead here, check first for identity. */
1543 if (op1_idx == op2_idx) {
1544 /* identically, one pop needed */
1546 x87_create_fxch(state, n, op1_idx);
1550 /* res = tos X op, pop */
1553 /* different, move them to st and st(1) and pop both.
1554 The tricky part is to get one into st(1).*/
1555 else if (op2_idx == 1) {
1556 /* good, second operand is already in the right place, move the first */
1558 /* bring the first on top */
1559 x87_create_fxch(state, n, op1_idx);
1560 assert(op2_idx != 0);
1563 /* res = tos X op, pop, pop */
1565 } else if (op1_idx == 1) {
1566 /* good, first operand is already in the right place, move the second */
1568 /* bring the first on top */
1569 x87_create_fxch(state, n, op2_idx);
1570 assert(op1_idx != 0);
1573 /* res = op X tos, pop, pop */
1574 permuted = !permuted;
1578 /* if one is already the TOS, we need two fxch */
1580 /* first one is TOS, move to st(1) */
1581 x87_create_fxch(state, n, 1);
1582 assert(op2_idx != 1);
1584 x87_create_fxch(state, n, op2_idx);
1586 /* res = op X tos, pop, pop */
1588 permuted = !permuted;
1590 } else if (op2_idx == 0) {
1591 /* second one is TOS, move to st(1) */
1592 x87_create_fxch(state, n, 1);
1593 assert(op1_idx != 1);
1595 x87_create_fxch(state, n, op1_idx);
1597 /* res = tos X op, pop, pop */
1600 /* none of them is either TOS or st(1), 3 fxch needed */
1601 x87_create_fxch(state, n, op2_idx);
1602 assert(op1_idx != 0);
1603 x87_create_fxch(state, n, 1);
1605 x87_create_fxch(state, n, op1_idx);
1607 /* res = tos X op, pop, pop */
1614 /* second operand is an address mode */
1615 if (is_vfp_live(reg_index_1, live)) {
1616 /* first operand is live: bring it to TOS */
1618 x87_create_fxch(state, n, op1_idx);
1622 /* first operand is dead: bring it to tos */
1624 x87_create_fxch(state, n, op1_idx);
1631 /* patch the operation */
1632 if (is_ia32_vFucomFnstsw(n)) {
1636 case 0: dst = op_ia32_FucomFnstsw; break;
1637 case 1: dst = op_ia32_FucompFnstsw; break;
1638 case 2: dst = op_ia32_FucomppFnstsw; break;
1639 default: panic("invalid popcount in sim_Fucom");
1642 for (i = 0; i < pops; ++i) {
1645 } else if (is_ia32_vFucomi(n)) {
1647 case 0: dst = op_ia32_Fucomi; break;
1648 case 1: dst = op_ia32_Fucompi; x87_pop(state); break;
1650 dst = op_ia32_Fucompi;
1652 x87_create_fpop(state, sched_next(n), 1);
1653 node_added = NODE_ADDED;
1655 default: panic("invalid popcount in sim_Fucom");
1658 panic("invalid operation %+F in sim_FucomFnstsw", n);
1661 x87_patch_insn(n, dst);
1668 op1 = &ia32_st_regs[op1_idx];
1671 op2 = &ia32_st_regs[op2_idx];
1674 attr->x87[2] = NULL;
1675 attr->attr.data.ins_permuted = permuted;
1678 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1679 arch_register_get_name(op1), arch_register_get_name(op2)));
1681 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1682 arch_register_get_name(op1)));
1688 static int sim_Keep(x87_state *state, ir_node *node)
1691 const arch_register_t *op_reg;
1696 int node_added = NO_NODE_ADDED;
1698 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1700 arity = get_irn_arity(node);
1701 for (i = 0; i < arity; ++i) {
1702 op = get_irn_n(node, i);
1703 op_reg = arch_get_irn_register(op);
1704 if (arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1707 reg_id = arch_register_get_index(op_reg);
1708 live = vfp_live_args_after(state->sim, node, 0);
1710 op_stack_idx = x87_on_stack(state, reg_id);
1711 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live)) {
1712 x87_create_fpop(state, sched_next(node), 1);
1713 node_added = NODE_ADDED;
1717 DB((dbg, LEVEL_1, "Stack after: "));
1718 DEBUG_ONLY(x87_dump_stack(state));
1723 static void keep_float_node_alive(ir_node *node)
1725 ir_graph *irg = get_irn_irg(node);
1726 ir_node *block = get_nodes_block(node);
1727 const arch_register_class_t *cls = arch_get_irn_reg_class_out(node);
1732 keep = be_new_Keep(cls, irg, block, 1, in);
1734 assert(sched_is_scheduled(node));
1735 sched_add_after(node, keep);
1739 * Create a copy of a node. Recreate the node if it's a constant.
1741 * @param state the x87 state
1742 * @param n the node to be copied
1744 * @return the copy of n
1746 static ir_node *create_Copy(x87_state *state, ir_node *n)
1748 ir_graph *irg = get_irn_irg(n);
1749 dbg_info *n_dbg = get_irn_dbg_info(n);
1750 ir_mode *mode = get_irn_mode(n);
1751 ir_node *block = get_nodes_block(n);
1752 ir_node *pred = get_irn_n(n, 0);
1753 ir_node *(*cnstr)(dbg_info *, ir_graph *, ir_node *, ir_mode *) = NULL;
1755 const arch_register_t *out;
1756 const arch_register_t *op1;
1757 ia32_x87_attr_t *attr;
1759 /* Do not copy constants, recreate them. */
1760 switch (get_ia32_irn_opcode(pred)) {
1761 case iro_ia32_Unknown_VFP:
1763 cnstr = new_rd_ia32_fldz;
1766 cnstr = new_rd_ia32_fld1;
1768 case iro_ia32_fldpi:
1769 cnstr = new_rd_ia32_fldpi;
1771 case iro_ia32_fldl2e:
1772 cnstr = new_rd_ia32_fldl2e;
1774 case iro_ia32_fldl2t:
1775 cnstr = new_rd_ia32_fldl2t;
1777 case iro_ia32_fldlg2:
1778 cnstr = new_rd_ia32_fldlg2;
1780 case iro_ia32_fldln2:
1781 cnstr = new_rd_ia32_fldln2;
1787 out = x87_get_irn_register(n);
1788 op1 = x87_get_irn_register(pred);
1790 if (cnstr != NULL) {
1791 /* copy a constant */
1792 res = (*cnstr)(n_dbg, irg, block, mode);
1794 x87_push(state, arch_register_get_index(out), res);
1796 attr = get_ia32_x87_attr(res);
1797 attr->x87[2] = &ia32_st_regs[0];
1799 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1801 res = new_rd_ia32_fpushCopy(n_dbg, irg, block, pred, mode);
1803 x87_push(state, arch_register_get_index(out), res);
1805 attr = get_ia32_x87_attr(res);
1806 attr->x87[0] = &ia32_st_regs[op1_idx];
1807 attr->x87[2] = &ia32_st_regs[0];
1809 arch_set_irn_register(res, out);
1815 * Simulate a be_Copy.
1817 * @param state the x87 state
1818 * @param n the node that should be simulated (and patched)
1820 * @return NO_NODE_ADDED
1822 static int sim_Copy(x87_state *state, ir_node *n)
1825 const arch_register_t *out;
1826 const arch_register_t *op1;
1827 const arch_register_class_t *cls;
1828 ir_node *node, *next;
1829 ia32_x87_attr_t *attr;
1830 int op1_idx, out_idx;
1833 cls = arch_get_irn_reg_class_out(n);
1834 if (cls->regs != ia32_vfp_regs)
1837 pred = get_irn_n(n, 0);
1838 out = x87_get_irn_register(n);
1839 op1 = x87_get_irn_register(pred);
1840 live = vfp_live_args_after(state->sim, n, REGMASK(out));
1842 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1843 arch_register_get_name(op1), arch_register_get_name(out)));
1844 DEBUG_ONLY(vfp_dump_live(live));
1846 /* handle the infamous unknown value */
1847 if (arch_register_get_index(op1) == REG_VFP_UKNWN) {
1848 /* Operand is still live, a real copy. We need here an fpush that can
1849 hold a a register, so use the fpushCopy or recreate constants */
1850 node = create_Copy(state, n);
1852 assert(is_ia32_fldz(node));
1853 next = sched_next(n);
1856 sched_add_before(next, node);
1858 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1859 arch_get_irn_register(node)->name));
1860 return NO_NODE_ADDED;
1863 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1865 if (is_vfp_live(arch_register_get_index(op1), live)) {
1866 ir_node *pred = get_irn_n(n, 0);
1868 /* Operand is still live, a real copy. We need here an fpush that can
1869 hold a a register, so use the fpushCopy or recreate constants */
1870 node = create_Copy(state, n);
1872 /* We have to make sure the old value doesn't go dead (which can happen
1873 * when we recreate constants). As the simulator expected that value in
1874 * the pred blocks. This is unfortunate as removing it would save us 1
1875 * instruction, but we would have to rerun all the simulation to get
1878 next = sched_next(n);
1881 sched_add_before(next, node);
1883 if (get_irn_n_edges(pred) == 0) {
1884 keep_float_node_alive(pred);
1887 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1889 out_idx = x87_on_stack(state, arch_register_get_index(out));
1891 if (out_idx >= 0 && out_idx != op1_idx) {
1892 /* Matze: out already on stack? how can this happen? */
1895 /* op1 must be killed and placed where out is */
1897 /* best case, simple remove and rename */
1898 x87_patch_insn(n, op_ia32_Pop);
1899 attr = get_ia32_x87_attr(n);
1900 attr->x87[0] = op1 = &ia32_st_regs[0];
1903 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1905 /* move op1 to tos, store and pop it */
1907 x87_create_fxch(state, n, op1_idx);
1910 x87_patch_insn(n, op_ia32_Pop);
1911 attr = get_ia32_x87_attr(n);
1912 attr->x87[0] = op1 = &ia32_st_regs[out_idx];
1915 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1917 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1919 /* just a virtual copy */
1920 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1921 /* don't remove the node to keep the verifier quiet :),
1922 the emitter won't emit any code for the node */
1925 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1926 exchange(n, get_unop_op(n));
1930 return NO_NODE_ADDED;
1934 * Returns the result proj of the call
1936 static ir_node *get_call_result_proj(ir_node *call)
1938 const ir_edge_t *edge;
1940 /* search the result proj */
1941 foreach_out_edge(call, edge) {
1942 ir_node *proj = get_edge_src_irn(edge);
1943 long pn = get_Proj_proj(proj);
1945 if (pn == pn_ia32_Call_vf0) {
1951 } /* get_call_result_proj */
1954 * Simulate a ia32_Call.
1956 * @param state the x87 state
1957 * @param n the node that should be simulated
1959 * @return NO_NODE_ADDED
1961 static int sim_Call(x87_state *state, ir_node *n)
1963 ir_type *call_tp = get_ia32_call_attr_const(n)->call_tp;
1967 const arch_register_t *reg;
1969 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1971 /* at the begin of a call the x87 state should be empty */
1972 assert(state->depth == 0 && "stack not empty before call");
1974 if (get_method_n_ress(call_tp) <= 0)
1978 * If the called function returns a float, it is returned in st(0).
1979 * This even happens if the return value is NOT used.
1980 * Moreover, only one return result is supported.
1982 res_type = get_method_res_type(call_tp, 0);
1983 mode = get_type_mode(res_type);
1985 if (mode == NULL || !mode_is_float(mode))
1988 resproj = get_call_result_proj(n);
1989 assert(resproj != NULL);
1991 reg = x87_get_irn_register(resproj);
1992 x87_push(state, arch_register_get_index(reg), resproj);
1995 DB((dbg, LEVEL_1, "Stack after: "));
1996 DEBUG_ONLY(x87_dump_stack(state));
1998 return NO_NODE_ADDED;
2002 * Simulate a be_Spill.
2004 * @param state the x87 state
2005 * @param n the node that should be simulated (and patched)
2007 * Should not happen, spills are lowered before x87 simulator see them.
2009 static int sim_Spill(x87_state *state, ir_node *n)
2011 panic("Spill not lowered");
2012 return sim_fst(state, n);
2016 * Simulate a be_Reload.
2018 * @param state the x87 state
2019 * @param n the node that should be simulated (and patched)
2021 * Should not happen, reloads are lowered before x87 simulator see them.
2023 static int sim_Reload(x87_state *state, ir_node *n)
2025 panic("Reload not lowered");
2026 return sim_fld(state, n);
2030 * Simulate a be_Return.
2032 * @param state the x87 state
2033 * @param n the node that should be simulated (and patched)
2035 * @return NO_NODE_ADDED
2037 static int sim_Return(x87_state *state, ir_node *n)
2039 int n_res = be_Return_get_n_rets(n);
2040 int i, n_float_res = 0;
2042 /* only floating point return values must reside on stack */
2043 for (i = 0; i < n_res; ++i) {
2044 ir_node *res = get_irn_n(n, be_pos_Return_val + i);
2046 if (mode_is_float(get_irn_mode(res)))
2049 assert(x87_get_depth(state) == n_float_res);
2051 /* pop them virtually */
2052 for (i = n_float_res - 1; i >= 0; --i)
2055 return NO_NODE_ADDED;
2058 typedef struct _perm_data_t {
2059 const arch_register_t *in;
2060 const arch_register_t *out;
2064 * Simulate a be_Perm.
2066 * @param state the x87 state
2067 * @param irn the node that should be simulated (and patched)
2069 * @return NO_NODE_ADDED
2071 static int sim_Perm(x87_state *state, ir_node *irn)
2074 ir_node *pred = get_irn_n(irn, 0);
2076 const ir_edge_t *edge;
2078 /* handle only floating point Perms */
2079 if (! mode_is_float(get_irn_mode(pred)))
2080 return NO_NODE_ADDED;
2082 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
2084 /* Perm is a pure virtual instruction on x87.
2085 All inputs must be on the FPU stack and are pairwise
2086 different from each other.
2087 So, all we need to do is to permutate the stack state. */
2088 n = get_irn_arity(irn);
2089 NEW_ARR_A(int, stack_pos, n);
2091 /* collect old stack positions */
2092 for (i = 0; i < n; ++i) {
2093 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
2094 int idx = x87_on_stack(state, arch_register_get_index(inreg));
2096 assert(idx >= 0 && "Perm argument not on x87 stack");
2100 /* now do the permutation */
2101 foreach_out_edge(irn, edge) {
2102 ir_node *proj = get_edge_src_irn(edge);
2103 const arch_register_t *out = x87_get_irn_register(proj);
2104 long num = get_Proj_proj(proj);
2106 assert(0 <= num && num < n && "More Proj's than Perm inputs");
2107 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
2109 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
2111 return NO_NODE_ADDED;
2114 static int sim_Barrier(x87_state *state, ir_node *node)
2118 /* materialize unknown if needed */
2119 arity = get_irn_arity(node);
2120 for (i = 0; i < arity; ++i) {
2121 const arch_register_t *reg;
2124 ia32_x87_attr_t *attr;
2125 ir_node *in = get_irn_n(node, i);
2127 if (!is_ia32_Unknown_VFP(in))
2130 /* TODO: not completely correct... */
2131 reg = &ia32_vfp_regs[REG_VFP_UKNWN];
2134 block = get_nodes_block(node);
2135 zero = new_rd_ia32_fldz(NULL, current_ir_graph, block, mode_E);
2136 x87_push(state, arch_register_get_index(reg), zero);
2138 attr = get_ia32_x87_attr(zero);
2139 attr->x87[2] = &ia32_st_regs[0];
2141 sched_add_before(node, zero);
2143 set_irn_n(node, i, zero);
2146 return NO_NODE_ADDED;
2151 * Kill any dead registers at block start by popping them from the stack.
2153 * @param sim the simulator handle
2154 * @param block the current block
2155 * @param start_state the x87 state at the begin of the block
2157 * @return the x87 state after dead register killed
2159 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state)
2161 x87_state *state = start_state;
2162 ir_node *first_insn = sched_first(block);
2163 ir_node *keep = NULL;
2164 unsigned live = vfp_live_args_after(sim, block, 0);
2166 int i, depth, num_pop;
2169 depth = x87_get_depth(state);
2170 for (i = depth - 1; i >= 0; --i) {
2171 int reg = x87_get_st_reg(state, i);
2173 if (! is_vfp_live(reg, live))
2174 kill_mask |= (1 << i);
2178 /* create a new state, will be changed */
2179 state = x87_clone_state(sim, state);
2181 DB((dbg, LEVEL_1, "Killing deads:\n"));
2182 DEBUG_ONLY(vfp_dump_live(live));
2183 DEBUG_ONLY(x87_dump_stack(state));
2185 if (kill_mask != 0 && live == 0) {
2186 /* special case: kill all registers */
2187 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
2188 if (ia32_cg_config.use_femms) {
2189 /* use FEMMS on AMD processors to clear all */
2190 keep = new_rd_ia32_femms(NULL, get_irn_irg(block), block);
2192 /* use EMMS to clear all */
2193 keep = new_rd_ia32_emms(NULL, get_irn_irg(block), block);
2195 sched_add_before(first_insn, keep);
2201 /* now kill registers */
2203 /* we can only kill from TOS, so bring them up */
2204 if (! (kill_mask & 1)) {
2205 /* search from behind, because we can to a double-pop */
2206 for (i = depth - 1; i >= 0; --i) {
2207 if (kill_mask & (1 << i)) {
2208 kill_mask &= ~(1 << i);
2215 x87_set_st(state, -1, keep, i);
2216 x87_create_fxch(state, first_insn, i);
2219 if ((kill_mask & 3) == 3) {
2220 /* we can do a double-pop */
2224 /* only a single pop */
2229 kill_mask >>= num_pop;
2230 keep = x87_create_fpop(state, first_insn, num_pop);
2235 } /* x87_kill_deads */
2238 * If we have PhiEs with unknown operands then we have to make sure that some
2239 * value is actually put onto the stack.
2241 static void fix_unknown_phis(x87_state *state, ir_node *block,
2242 ir_node *pred_block, int pos)
2246 sched_foreach(block, node) {
2248 const arch_register_t *reg;
2249 ia32_x87_attr_t *attr;
2254 op = get_Phi_pred(node, pos);
2255 if (!is_ia32_Unknown_VFP(op))
2258 reg = arch_get_irn_register(node);
2260 /* create a zero at end of pred block */
2261 zero = new_rd_ia32_fldz(NULL, current_ir_graph, pred_block, mode_E);
2262 x87_push(state, arch_register_get_index(reg), zero);
2264 attr = get_ia32_x87_attr(zero);
2265 attr->x87[2] = &ia32_st_regs[0];
2267 assert(is_ia32_fldz(zero));
2268 sched_add_before(sched_last(pred_block), zero);
2270 set_Phi_pred(node, pos, zero);
2275 * Run a simulation and fix all virtual instructions for a block.
2277 * @param sim the simulator handle
2278 * @param block the current block
2280 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
2283 blk_state *bl_state = x87_get_bl_state(sim, block);
2284 x87_state *state = bl_state->begin;
2285 const ir_edge_t *edge;
2286 ir_node *start_block;
2288 assert(state != NULL);
2289 /* already processed? */
2290 if (bl_state->end != NULL)
2293 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2294 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2295 DEBUG_ONLY(x87_dump_stack(state));
2297 /* at block begin, kill all dead registers */
2298 state = x87_kill_deads(sim, block, state);
2299 /* create a new state, will be changed */
2300 state = x87_clone_state(sim, state);
2302 /* beware, n might change */
2303 for (n = sched_first(block); !sched_is_end(n); n = next) {
2306 ir_op *op = get_irn_op(n);
2308 next = sched_next(n);
2309 if (op->ops.generic == NULL)
2312 func = (sim_func)op->ops.generic;
2315 node_inserted = (*func)(state, n);
2318 sim_func might have added an additional node after n,
2320 beware: n must not be changed by sim_func
2321 (i.e. removed from schedule) in this case
2323 if (node_inserted != NO_NODE_ADDED)
2324 next = sched_next(n);
2327 start_block = get_irg_start_block(get_irn_irg(block));
2329 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2331 /* check if the state must be shuffled */
2332 foreach_block_succ(block, edge) {
2333 ir_node *succ = get_edge_src_irn(edge);
2334 blk_state *succ_state;
2336 if (succ == start_block)
2339 succ_state = x87_get_bl_state(sim, succ);
2341 fix_unknown_phis(state, succ, block, get_edge_src_pos(edge));
2343 if (succ_state->begin == NULL) {
2344 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2345 DEBUG_ONLY(x87_dump_stack(state));
2346 succ_state->begin = state;
2348 waitq_put(sim->worklist, succ);
2350 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2351 /* There is already a begin state for the successor, bad.
2352 Do the necessary permutations.
2353 Note that critical edges are removed, so this is always possible:
2354 If the successor has more than one possible input, then it must
2357 x87_shuffle(sim, block, state, succ, succ_state->begin);
2360 bl_state->end = state;
2361 } /* x87_simulate_block */
2363 static void register_sim(ir_op *op, sim_func func)
2365 assert(op->ops.generic == NULL);
2366 op->ops.generic = (op_func) func;
2370 * Create a new x87 simulator.
2372 * @param sim a simulator handle, will be initialized
2373 * @param irg the current graph
2375 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
2377 obstack_init(&sim->obst);
2378 sim->blk_states = pmap_create();
2379 sim->n_idx = get_irg_last_idx(irg);
2380 sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
2382 DB((dbg, LEVEL_1, "--------------------------------\n"
2383 "x87 Simulator started for %+F\n", irg));
2385 /* set the generic function pointer of instruction we must simulate */
2386 clear_irp_opcodes_generic_func();
2388 register_sim(op_ia32_Call, sim_Call);
2389 register_sim(op_ia32_vfld, sim_fld);
2390 register_sim(op_ia32_vfild, sim_fild);
2391 register_sim(op_ia32_vfld1, sim_fld1);
2392 register_sim(op_ia32_vfldz, sim_fldz);
2393 register_sim(op_ia32_vfadd, sim_fadd);
2394 register_sim(op_ia32_vfsub, sim_fsub);
2395 register_sim(op_ia32_vfmul, sim_fmul);
2396 register_sim(op_ia32_vfdiv, sim_fdiv);
2397 register_sim(op_ia32_vfprem, sim_fprem);
2398 register_sim(op_ia32_vfabs, sim_fabs);
2399 register_sim(op_ia32_vfchs, sim_fchs);
2400 register_sim(op_ia32_vfist, sim_fist);
2401 register_sim(op_ia32_vfisttp, sim_fisttp);
2402 register_sim(op_ia32_vfst, sim_fst);
2403 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
2404 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
2405 register_sim(op_ia32_vFucomi, sim_Fucom);
2406 register_sim(op_be_Copy, sim_Copy);
2407 register_sim(op_be_Spill, sim_Spill);
2408 register_sim(op_be_Reload, sim_Reload);
2409 register_sim(op_be_Return, sim_Return);
2410 register_sim(op_be_Perm, sim_Perm);
2411 register_sim(op_be_Keep, sim_Keep);
2412 register_sim(op_be_Barrier, sim_Barrier);
2413 } /* x87_init_simulator */
2416 * Destroy a x87 simulator.
2418 * @param sim the simulator handle
2420 static void x87_destroy_simulator(x87_simulator *sim)
2422 pmap_destroy(sim->blk_states);
2423 obstack_free(&sim->obst, NULL);
2424 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2425 } /* x87_destroy_simulator */
2428 * Pre-block walker: calculate the liveness information for the block
2429 * and store it into the sim->live cache.
2431 static void update_liveness_walker(ir_node *block, void *data)
2433 x87_simulator *sim = data;
2434 update_liveness(sim, block);
2435 } /* update_liveness_walker */
2437 void x87_simulate_graph(be_irg_t *birg)
2439 /* TODO improve code quality (less executed fxch) by using execfreqs */
2441 ir_node *block, *start_block;
2442 blk_state *bl_state;
2444 ir_graph *irg = be_get_birg_irg(birg);
2446 /* create the simulator */
2447 x87_init_simulator(&sim, irg);
2449 start_block = get_irg_start_block(irg);
2450 bl_state = x87_get_bl_state(&sim, start_block);
2452 /* start with the empty state */
2453 bl_state->begin = empty;
2456 sim.worklist = new_waitq();
2457 waitq_put(sim.worklist, start_block);
2459 be_assure_liveness(birg);
2460 sim.lv = be_get_birg_liveness(birg);
2461 // sim.lv = be_liveness(be_get_birg_irg(birg));
2462 be_liveness_assure_sets(sim.lv);
2464 /* Calculate the liveness for all nodes. We must precalculate this info,
2465 * because the simulator adds new nodes (possible before Phi nodes) which
2466 * would let a lazy calculation fail.
2467 * On the other hand we reduce the computation amount due to
2468 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2470 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2474 block = waitq_get(sim.worklist);
2475 x87_simulate_block(&sim, block);
2476 } while (! waitq_empty(sim.worklist));
2479 del_waitq(sim.worklist);
2480 x87_destroy_simulator(&sim);
2481 } /* x87_simulate_graph */
2483 void ia32_init_x87(void)
2485 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2486 } /* ia32_init_x87 */