2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
34 #include "iredges_t.h"
46 #include "../belive_t.h"
47 #include "../besched.h"
48 #include "../benode.h"
49 #include "bearch_ia32_t.h"
50 #include "ia32_new_nodes.h"
51 #include "gen_ia32_new_nodes.h"
52 #include "gen_ia32_regalloc_if.h"
54 #include "ia32_architecture.h"
56 #define MASK_TOS(x) ((x) & (N_ia32_st_REGS - 1))
58 /** the debug handle */
59 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
61 /* Forward declaration. */
62 typedef struct x87_simulator x87_simulator;
65 * An exchange template.
66 * Note that our virtual functions have the same inputs
67 * and attributes as the real ones, so we can simple exchange
69 * Further, x87 supports inverse instructions, so we can handle them.
71 typedef struct exchange_tmpl {
72 ir_op *normal_op; /**< the normal one */
73 ir_op *reverse_op; /**< the reverse one if exists */
74 ir_op *normal_pop_op; /**< the normal one with tos pop */
75 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
79 * An entry on the simulated x87 stack.
81 typedef struct st_entry {
82 int reg_idx; /**< the virtual register index of this stack value */
83 ir_node *node; /**< the node that produced this value */
89 typedef struct x87_state {
90 st_entry st[N_ia32_st_REGS]; /**< the register stack */
91 int depth; /**< the current stack depth */
92 int tos; /**< position of the tos */
93 x87_simulator *sim; /**< The simulator. */
96 /** An empty state, used for blocks without fp instructions. */
97 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
98 static x87_state *empty = (x87_state *)&_empty;
101 * Return values of the instruction simulator functions.
104 NO_NODE_ADDED = 0, /**< No node that needs simulation was added. */
105 NODE_ADDED = 1 /**< A node that must be simulated was added by the simulator
106 in the schedule AFTER the current node. */
110 * The type of an instruction simulator function.
112 * @param state the x87 state
113 * @param n the node to be simulated
115 * @return NODE_ADDED if a node was added AFTER n in schedule that MUST be
117 * NO_NODE_ADDED otherwise
119 typedef int (*sim_func)(x87_state *state, ir_node *n);
122 * A block state: Every block has a x87 state at the beginning and at the end.
124 typedef struct blk_state {
125 x87_state *begin; /**< state at the begin or NULL if not assigned */
126 x87_state *end; /**< state at the end or NULL if not assigned */
129 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
131 /** liveness bitset for vfp registers. */
132 typedef unsigned char vfp_liveness;
137 struct x87_simulator {
138 struct obstack obst; /**< An obstack for fast allocating. */
139 pmap *blk_states; /**< Map blocks to states. */
140 be_lv_t *lv; /**< intrablock liveness. */
141 vfp_liveness *live; /**< Liveness information. */
142 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
143 waitq *worklist; /**< Worklist of blocks that must be processed. */
144 ia32_isa_t *isa; /**< the ISA object */
148 * Returns the current stack depth.
150 * @param state the x87 state
152 * @return the x87 stack depth
154 static int x87_get_depth(const x87_state *state)
157 } /* x87_get_depth */
160 * Return the virtual register index at st(pos).
162 * @param state the x87 state
163 * @param pos a stack position
165 * @return the vfp register index that produced the value at st(pos)
167 static int x87_get_st_reg(const x87_state *state, int pos)
169 assert(pos < state->depth);
170 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
171 } /* x87_get_st_reg */
175 * Return the node at st(pos).
177 * @param state the x87 state
178 * @param pos a stack position
180 * @return the IR node that produced the value at st(pos)
182 static ir_node *x87_get_st_node(const x87_state *state, int pos)
184 assert(pos < state->depth);
185 return state->st[MASK_TOS(state->tos + pos)].node;
186 } /* x87_get_st_node */
189 * Dump the stack for debugging.
191 * @param state the x87 state
193 static void x87_dump_stack(const x87_state *state)
197 for (i = state->depth - 1; i >= 0; --i) {
198 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
199 x87_get_st_node(state, i)));
201 DB((dbg, LEVEL_2, "<-- TOS\n"));
202 } /* x87_dump_stack */
203 #endif /* DEBUG_libfirm */
206 * Set a virtual register to st(pos).
208 * @param state the x87 state
209 * @param reg_idx the vfp register index that should be set
210 * @param node the IR node that produces the value of the vfp register
211 * @param pos the stack position where the new value should be entered
213 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
215 assert(0 < state->depth);
216 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
217 state->st[MASK_TOS(state->tos + pos)].node = node;
219 DB((dbg, LEVEL_2, "After SET_REG: "));
220 DEBUG_ONLY(x87_dump_stack(state));
224 * Set the tos virtual register.
226 * @param state the x87 state
227 * @param reg_idx the vfp register index that should be set
228 * @param node the IR node that produces the value of the vfp register
230 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node)
232 x87_set_st(state, reg_idx, node, 0);
236 * Swap st(0) with st(pos).
238 * @param state the x87 state
239 * @param pos the stack position to change the tos with
241 static void x87_fxch(x87_state *state, int pos)
244 assert(pos < state->depth);
246 entry = state->st[MASK_TOS(state->tos + pos)];
247 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
248 state->st[MASK_TOS(state->tos)] = entry;
250 DB((dbg, LEVEL_2, "After FXCH: "));
251 DEBUG_ONLY(x87_dump_stack(state));
255 * Convert a virtual register to the stack index.
257 * @param state the x87 state
258 * @param reg_idx the register vfp index
260 * @return the stack position where the register is stacked
261 * or -1 if the virtual register was not found
263 static int x87_on_stack(const x87_state *state, int reg_idx)
265 int i, tos = state->tos;
267 for (i = 0; i < state->depth; ++i)
268 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
274 * Push a virtual Register onto the stack, double pushed allowed.
276 * @param state the x87 state
277 * @param reg_idx the register vfp index
278 * @param node the node that produces the value of the vfp register
280 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node)
282 assert(state->depth < N_ia32_st_REGS && "stack overrun");
285 state->tos = MASK_TOS(state->tos - 1);
286 state->st[state->tos].reg_idx = reg_idx;
287 state->st[state->tos].node = node;
289 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
293 * Push a virtual Register onto the stack, double pushes are NOT allowed.
295 * @param state the x87 state
296 * @param reg_idx the register vfp index
297 * @param node the node that produces the value of the vfp register
298 * @param dbl_push if != 0 double pushes are allowed
300 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
302 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
304 x87_push_dbl(state, reg_idx, node);
308 * Pop a virtual Register from the stack.
310 * @param state the x87 state
312 static void x87_pop(x87_state *state)
314 assert(state->depth > 0 && "stack underrun");
317 state->tos = MASK_TOS(state->tos + 1);
319 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
323 * Empty the fpu stack
325 * @param state the x87 state
327 static void x87_emms(x87_state *state)
334 * Returns the block state of a block.
336 * @param sim the x87 simulator handle
337 * @param block the current block
339 * @return the block state
341 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
343 pmap_entry *entry = pmap_find(sim->blk_states, block);
346 blk_state *bl_state = OALLOC(&sim->obst, blk_state);
347 bl_state->begin = NULL;
348 bl_state->end = NULL;
350 pmap_insert(sim->blk_states, block, bl_state);
354 return PTR_TO_BLKSTATE(entry->value);
355 } /* x87_get_bl_state */
358 * Creates a new x87 state.
360 * @param sim the x87 simulator handle
362 * @return a new x87 state
364 static x87_state *x87_alloc_state(x87_simulator *sim)
366 x87_state *res = OALLOC(&sim->obst, x87_state);
370 } /* x87_alloc_state */
375 * @param sim the x87 simulator handle
376 * @param src the x87 state that will be cloned
378 * @return a cloned copy of the src state
380 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
382 x87_state *res = x87_alloc_state(sim);
386 } /* x87_clone_state */
389 * Patch a virtual instruction into a x87 one and return
390 * the node representing the result value.
392 * @param n the IR node to patch
393 * @param op the x87 opcode to patch in
395 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
397 ir_mode *mode = get_irn_mode(n);
402 if (mode == mode_T) {
403 /* patch all Proj's */
404 const ir_edge_t *edge;
406 foreach_out_edge(n, edge) {
407 ir_node *proj = get_edge_src_irn(edge);
409 mode = get_irn_mode(proj);
410 if (mode_is_float(mode)) {
412 set_irn_mode(proj, ia32_reg_classes[CLASS_ia32_st].mode);
416 } else if (mode_is_float(mode))
417 set_irn_mode(n, ia32_reg_classes[CLASS_ia32_st].mode);
419 } /* x87_patch_insn */
422 * Returns the first Proj of a mode_T node having a given mode.
424 * @param n the mode_T node
425 * @param m the desired mode of the Proj
426 * @return The first Proj of mode @p m found or NULL.
428 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
430 const ir_edge_t *edge;
432 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
434 foreach_out_edge(n, edge) {
435 ir_node *proj = get_edge_src_irn(edge);
436 if (get_irn_mode(proj) == m)
441 } /* get_irn_Proj_for_mode */
444 * Wrap the arch_* function here so we can check for errors.
446 static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
448 const arch_register_t *res = arch_get_irn_register(irn);
450 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
452 } /* x87_get_irn_register */
454 static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
457 const arch_register_t *res = arch_irn_get_register(irn, pos);
459 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
461 } /* x87_irn_get_register */
463 static inline const arch_register_t *get_st_reg(int index)
465 return &ia32_registers[REG_ST0 + index];
468 /* -------------- x87 perm --------------- */
471 * Creates a fxch for shuffle.
473 * @param state the x87 state
474 * @param pos parameter for fxch
475 * @param block the block were fxch is inserted
477 * Creates a new fxch node and reroute the user of the old node
480 * @return the fxch node
482 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block)
485 ia32_x87_attr_t *attr;
487 fxch = new_bd_ia32_fxch(NULL, block);
488 attr = get_ia32_x87_attr(fxch);
489 attr->x87[0] = get_st_reg(pos);
490 attr->x87[2] = get_st_reg(0);
494 x87_fxch(state, pos);
496 } /* x87_fxch_shuffle */
499 * Calculate the necessary permutations to reach dst_state.
501 * These permutations are done with fxch instructions and placed
502 * at the end of the block.
504 * Note that critical edges are removed here, so we need only
505 * a shuffle if the current block has only one successor.
507 * @param sim the simulator handle
508 * @param block the current block
509 * @param state the current x87 stack state, might be modified
510 * @param dst_block the destination block
511 * @param dst_state destination state
515 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block,
516 x87_state *state, ir_node *dst_block,
517 const x87_state *dst_state)
519 int i, n_cycles, k, ri;
520 unsigned cycles[4], all_mask;
521 char cycle_idx[4][8];
522 ir_node *fxch, *before, *after;
526 assert(state->depth == dst_state->depth);
528 /* Some mathematics here:
529 If we have a cycle of length n that includes the tos,
530 we need n-1 exchange operations.
531 We can always add the tos and restore it, so we need
532 n+1 exchange operations for a cycle not containing the tos.
533 So, the maximum of needed operations is for a cycle of 7
534 not including the tos == 8.
535 This is the same number of ops we would need for using stores,
536 so exchange is cheaper (we save the loads).
537 On the other hand, we might need an additional exchange
538 in the next block to bring one operand on top, so the
539 number of ops in the first case is identical.
540 Further, no more than 4 cycles can exists (4 x 2).
542 all_mask = (1 << (state->depth)) - 1;
544 for (n_cycles = 0; all_mask; ++n_cycles) {
545 int src_idx, dst_idx;
547 /* find the first free slot */
548 for (i = 0; i < state->depth; ++i) {
549 if (all_mask & (1 << i)) {
550 all_mask &= ~(1 << i);
552 /* check if there are differences here */
553 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
559 /* no more cycles found */
564 cycles[n_cycles] = (1 << i);
565 cycle_idx[n_cycles][k++] = i;
566 for (src_idx = i; ; src_idx = dst_idx) {
567 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
569 if ((all_mask & (1 << dst_idx)) == 0)
572 cycle_idx[n_cycles][k++] = dst_idx;
573 cycles[n_cycles] |= (1 << dst_idx);
574 all_mask &= ~(1 << dst_idx);
576 cycle_idx[n_cycles][k] = -1;
580 /* no permutation needed */
584 /* Hmm: permutation needed */
585 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
586 DEBUG_ONLY(x87_dump_stack(state));
587 DB((dbg, LEVEL_2, " to\n"));
588 DEBUG_ONLY(x87_dump_stack(dst_state));
592 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
593 for (ri = 0; ri < n_cycles; ++ri) {
594 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
595 for (k = 0; cycle_idx[ri][k] != -1; ++k)
596 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
597 DB((dbg, LEVEL_2, "\n"));
604 * Find the place node must be insert.
605 * We have only one successor block, so the last instruction should
608 before = sched_last(block);
609 assert(is_cfop(before));
611 /* now do the permutations */
612 for (ri = 0; ri < n_cycles; ++ri) {
613 if ((cycles[ri] & 1) == 0) {
614 /* this cycle does not include the tos */
615 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
617 sched_add_after(after, fxch);
619 sched_add_before(before, fxch);
622 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
623 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
625 sched_add_after(after, fxch);
627 sched_add_before(before, fxch);
630 if ((cycles[ri] & 1) == 0) {
631 /* this cycle does not include the tos */
632 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
633 sched_add_after(after, fxch);
640 * Create a fxch node before another node.
642 * @param state the x87 state
643 * @param n the node after the fxch
644 * @param pos exchange st(pos) with st(0)
648 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
651 ia32_x87_attr_t *attr;
652 ir_node *block = get_nodes_block(n);
654 x87_fxch(state, pos);
656 fxch = new_bd_ia32_fxch(NULL, block);
657 attr = get_ia32_x87_attr(fxch);
658 attr->x87[0] = get_st_reg(pos);
659 attr->x87[2] = get_st_reg(0);
663 sched_add_before(n, fxch);
664 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
666 } /* x87_create_fxch */
669 * Create a fpush before node n.
671 * @param state the x87 state
672 * @param n the node after the fpush
673 * @param pos push st(pos) on stack
674 * @param op_idx replace input op_idx of n with the fpush result
676 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx)
678 ir_node *fpush, *pred = get_irn_n(n, op_idx);
679 ia32_x87_attr_t *attr;
680 const arch_register_t *out = x87_get_irn_register(pred);
682 x87_push_dbl(state, arch_register_get_index(out), pred);
684 fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
685 attr = get_ia32_x87_attr(fpush);
686 attr->x87[0] = get_st_reg(pos);
687 attr->x87[2] = get_st_reg(0);
690 sched_add_before(n, fpush);
692 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
693 } /* x87_create_fpush */
696 * Create a fpop before node n.
698 * @param state the x87 state
699 * @param n the node after the fpop
700 * @param num pop 1 or 2 values
702 * @return the fpop node
704 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
706 ir_node *fpop = NULL;
707 ia32_x87_attr_t *attr;
712 if (ia32_cg_config.use_ffreep)
713 fpop = new_bd_ia32_ffreep(NULL, get_nodes_block(n));
715 fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
716 attr = get_ia32_x87_attr(fpop);
717 attr->x87[0] = get_st_reg(0);
718 attr->x87[1] = get_st_reg(0);
719 attr->x87[2] = get_st_reg(0);
722 sched_add_before(n, fpop);
723 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
726 } /* x87_create_fpop */
728 /* --------------------------------- liveness ------------------------------------------ */
731 * The liveness transfer function.
732 * Updates a live set over a single step from a given node to its predecessor.
733 * Everything defined at the node is removed from the set, the uses of the node get inserted.
735 * @param irn The node at which liveness should be computed.
736 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
737 * the registers live after irn.
739 * @return The live bitset.
741 static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
744 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
746 if (get_irn_mode(irn) == mode_T) {
747 const ir_edge_t *edge;
749 foreach_out_edge(irn, edge) {
750 ir_node *proj = get_edge_src_irn(edge);
752 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
753 const arch_register_t *reg = x87_get_irn_register(proj);
754 live &= ~(1 << arch_register_get_index(reg));
757 } else if (arch_irn_consider_in_reg_alloc(cls, irn)) {
758 const arch_register_t *reg = x87_get_irn_register(irn);
759 live &= ~(1 << arch_register_get_index(reg));
762 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
763 ir_node *op = get_irn_n(irn, i);
765 if (mode_is_float(get_irn_mode(op)) &&
766 arch_irn_consider_in_reg_alloc(cls, op)) {
767 const arch_register_t *reg = x87_get_irn_register(op);
768 live |= 1 << arch_register_get_index(reg);
772 } /* vfp_liveness_transfer */
775 * Put all live virtual registers at the end of a block into a bitset.
777 * @param sim the simulator handle
778 * @param lv the liveness information
779 * @param bl the block
781 * @return The live bitset at the end of this block
783 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
786 vfp_liveness live = 0;
787 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
788 const be_lv_t *lv = sim->lv;
790 be_lv_foreach(lv, block, be_lv_state_end, i) {
791 const arch_register_t *reg;
792 const ir_node *node = be_lv_get_irn(lv, block, i);
793 if (!arch_irn_consider_in_reg_alloc(cls, node))
796 reg = x87_get_irn_register(node);
797 live |= 1 << arch_register_get_index(reg);
801 } /* vfp_liveness_end_of_block */
803 /** get the register mask from an arch_register */
804 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
807 * Return a bitset of argument registers which are live at the end of a node.
809 * @param sim the simulator handle
810 * @param pos the node
811 * @param kill kill mask for the output registers
813 * @return The live bitset.
815 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
817 unsigned idx = get_irn_idx(pos);
819 assert(idx < sim->n_idx);
820 return sim->live[idx] & ~kill;
821 } /* vfp_live_args_after */
824 * Calculate the liveness for a whole block and cache it.
826 * @param sim the simulator handle
827 * @param lv the liveness handle
828 * @param block the block
830 static void update_liveness(x87_simulator *sim, ir_node *block)
832 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
836 /* now iterate through the block backward and cache the results */
837 sched_foreach_reverse(block, irn) {
838 /* stop at the first Phi: this produces the live-in */
842 idx = get_irn_idx(irn);
843 sim->live[idx] = live;
845 live = vfp_liveness_transfer(irn, live);
847 idx = get_irn_idx(block);
848 sim->live[idx] = live;
849 } /* update_liveness */
852 * Returns true if a register is live in a set.
854 * @param reg_idx the vfp register index
855 * @param live a live bitset
857 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
861 * Dump liveness info.
863 * @param live the live bitset
865 static void vfp_dump_live(vfp_liveness live)
869 DB((dbg, LEVEL_2, "Live after: "));
870 for (i = 0; i < 8; ++i) {
871 if (live & (1 << i)) {
872 DB((dbg, LEVEL_2, "vf%d ", i));
875 DB((dbg, LEVEL_2, "\n"));
876 } /* vfp_dump_live */
877 #endif /* DEBUG_libfirm */
879 /* --------------------------------- simulators ---------------------------------------- */
882 * Simulate a virtual binop.
884 * @param state the x87 state
885 * @param n the node that should be simulated (and patched)
886 * @param tmpl the template containing the 4 possible x87 opcodes
888 * @return NO_NODE_ADDED
890 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl)
892 int op2_idx = 0, op1_idx;
893 int out_idx, do_pop = 0;
894 ia32_x87_attr_t *attr;
896 ir_node *patched_insn;
898 x87_simulator *sim = state->sim;
899 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
900 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
901 const arch_register_t *op1_reg = x87_get_irn_register(op1);
902 const arch_register_t *op2_reg = x87_get_irn_register(op2);
903 const arch_register_t *out = x87_irn_get_register(n, pn_ia32_res);
904 int reg_index_1 = arch_register_get_index(op1_reg);
905 int reg_index_2 = arch_register_get_index(op2_reg);
906 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
910 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
911 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
912 arch_register_get_name(out)));
913 DEBUG_ONLY(vfp_dump_live(live));
914 DB((dbg, LEVEL_1, "Stack before: "));
915 DEBUG_ONLY(x87_dump_stack(state));
917 op1_idx = x87_on_stack(state, reg_index_1);
918 assert(op1_idx >= 0);
919 op1_live_after = is_vfp_live(reg_index_1, live);
921 attr = get_ia32_x87_attr(n);
922 permuted = attr->attr.data.ins_permuted;
924 if (reg_index_2 != REG_VFP_VFP_NOREG) {
927 /* second operand is a vfp register */
928 op2_idx = x87_on_stack(state, reg_index_2);
929 assert(op2_idx >= 0);
930 op2_live_after = is_vfp_live(reg_index_2, live);
932 if (op2_live_after) {
933 /* Second operand is live. */
935 if (op1_live_after) {
936 /* Both operands are live: push the first one.
937 This works even for op1 == op2. */
938 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
939 /* now do fxxx (tos=tos X op) */
943 dst = tmpl->normal_op;
945 /* Second live, first operand is dead here, bring it to tos. */
947 x87_create_fxch(state, n, op1_idx);
952 /* now do fxxx (tos=tos X op) */
954 dst = tmpl->normal_op;
957 /* Second operand is dead. */
958 if (op1_live_after) {
959 /* First operand is live: bring second to tos. */
961 x87_create_fxch(state, n, op2_idx);
966 /* now do fxxxr (tos = op X tos) */
968 dst = tmpl->reverse_op;
970 /* Both operands are dead here, pop them from the stack. */
973 /* Both are identically and on tos, no pop needed. */
974 /* here fxxx (tos = tos X tos) */
975 dst = tmpl->normal_op;
978 /* now do fxxxp (op = op X tos, pop) */
979 dst = tmpl->normal_pop_op;
983 } else if (op1_idx == 0) {
984 assert(op1_idx != op2_idx);
985 /* now do fxxxrp (op = tos X op, pop) */
986 dst = tmpl->reverse_pop_op;
990 /* Bring the second on top. */
991 x87_create_fxch(state, n, op2_idx);
992 if (op1_idx == op2_idx) {
993 /* Both are identically and on tos now, no pop needed. */
996 /* use fxxx (tos = tos X tos) */
997 dst = tmpl->normal_op;
1000 /* op2 is on tos now */
1002 /* use fxxxp (op = op X tos, pop) */
1003 dst = tmpl->normal_pop_op;
1011 /* second operand is an address mode */
1012 if (op1_live_after) {
1013 /* first operand is live: push it here */
1014 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
1017 /* first operand is dead: bring it to tos */
1019 x87_create_fxch(state, n, op1_idx);
1024 /* use fxxx (tos = tos X mem) */
1025 dst = permuted ? tmpl->reverse_op : tmpl->normal_op;
1029 patched_insn = x87_patch_insn(n, dst);
1030 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1035 /* patch the operation */
1036 attr->x87[0] = op1_reg = get_st_reg(op1_idx);
1037 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1038 attr->x87[1] = op2_reg = get_st_reg(op2_idx);
1040 attr->x87[2] = out = get_st_reg(out_idx);
1042 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1043 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1044 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1045 arch_register_get_name(out)));
1047 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1048 arch_register_get_name(op1_reg),
1049 arch_register_get_name(out)));
1052 return NO_NODE_ADDED;
1056 * Simulate a virtual Unop.
1058 * @param state the x87 state
1059 * @param n the node that should be simulated (and patched)
1060 * @param op the x87 opcode that will replace n's opcode
1062 * @return NO_NODE_ADDED
1064 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
1067 x87_simulator *sim = state->sim;
1068 const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, 0));
1069 const arch_register_t *out = x87_get_irn_register(n);
1070 ia32_x87_attr_t *attr;
1071 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1073 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1074 DEBUG_ONLY(vfp_dump_live(live));
1076 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1078 if (is_vfp_live(arch_register_get_index(op1), live)) {
1079 /* push the operand here */
1080 x87_create_fpush(state, n, op1_idx, 0);
1084 /* operand is dead, bring it to tos */
1086 x87_create_fxch(state, n, op1_idx);
1091 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1092 attr = get_ia32_x87_attr(n);
1093 attr->x87[0] = op1 = get_st_reg(0);
1094 attr->x87[2] = out = get_st_reg(0);
1095 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1097 return NO_NODE_ADDED;
1101 * Simulate a virtual Load instruction.
1103 * @param state the x87 state
1104 * @param n the node that should be simulated (and patched)
1105 * @param op the x87 opcode that will replace n's opcode
1107 * @return NO_NODE_ADDED
1109 static int sim_load(x87_state *state, ir_node *n, ir_op *op, int res_pos)
1111 const arch_register_t *out = x87_irn_get_register(n, res_pos);
1112 ia32_x87_attr_t *attr;
1114 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1115 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1116 assert(out == x87_irn_get_register(n, res_pos));
1117 attr = get_ia32_x87_attr(n);
1118 attr->x87[2] = out = get_st_reg(0);
1119 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1121 return NO_NODE_ADDED;
1125 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1127 * @param store The store
1128 * @param old_val The former value
1129 * @param new_val The new value
1131 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
1133 const ir_edge_t *edge, *ne;
1135 foreach_out_edge_safe(old_val, edge, ne) {
1136 ir_node *user = get_edge_src_irn(edge);
1138 if (! user || user == store)
1141 /* if the user is scheduled after the store: rewire */
1142 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1144 /* find the input of the user pointing to the old value */
1145 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1146 if (get_irn_n(user, i) == old_val)
1147 set_irn_n(user, i, new_val);
1151 } /* collect_and_rewire_users */
1154 * Simulate a virtual Store.
1156 * @param state the x87 state
1157 * @param n the node that should be simulated (and patched)
1158 * @param op the x87 store opcode
1159 * @param op_p the x87 store and pop opcode
1161 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p)
1163 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1164 const arch_register_t *op2 = x87_get_irn_register(val);
1165 unsigned live = vfp_live_args_after(state->sim, n, 0);
1166 int insn = NO_NODE_ADDED;
1167 ia32_x87_attr_t *attr;
1168 int op2_reg_idx, op2_idx, depth;
1169 int live_after_node;
1172 op2_reg_idx = arch_register_get_index(op2);
1173 op2_idx = x87_on_stack(state, op2_reg_idx);
1174 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1175 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1176 assert(op2_idx >= 0);
1178 mode = get_ia32_ls_mode(n);
1179 depth = x87_get_depth(state);
1181 if (live_after_node) {
1183 Problem: fst doesn't support 96bit modes (spills), only fstp does
1184 fist doesn't support 64bit mode, only fistp
1186 - stack not full: push value and fstp
1187 - stack full: fstp value and load again
1188 Note that we cannot test on mode_E, because floats might be 96bit ...
1190 if (get_mode_size_bits(mode) > 64 || (mode_is_int(mode) && get_mode_size_bits(mode) > 32)) {
1191 if (depth < N_ia32_st_REGS) {
1192 /* ok, we have a free register: push + fstp */
1193 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1195 x87_patch_insn(n, op_p);
1197 ir_node *vfld, *mem, *block, *rproj, *mproj;
1198 ir_graph *irg = get_irn_irg(n);
1199 ir_node *nomem = get_irg_no_mem(irg);
1201 /* stack full here: need fstp + load */
1203 x87_patch_insn(n, op_p);
1205 block = get_nodes_block(n);
1206 vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), nomem, get_ia32_ls_mode(n));
1208 /* copy all attributes */
1209 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1210 if (is_ia32_use_frame(n))
1211 set_ia32_use_frame(vfld);
1212 set_ia32_op_type(vfld, ia32_AddrModeS);
1213 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1214 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1215 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1217 rproj = new_r_Proj(vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1218 mproj = new_r_Proj(vfld, mode_M, pn_ia32_vfld_M);
1219 mem = get_irn_Proj_for_mode(n, mode_M);
1221 assert(mem && "Store memory not found");
1223 arch_set_irn_register(rproj, op2);
1225 /* reroute all former users of the store memory to the load memory */
1226 edges_reroute(mem, mproj);
1227 /* set the memory input of the load to the store memory */
1228 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1230 sched_add_after(n, vfld);
1231 sched_add_after(vfld, rproj);
1233 /* rewire all users, scheduled after the store, to the loaded value */
1234 collect_and_rewire_users(n, val, rproj);
1239 /* we can only store the tos to memory */
1241 x87_create_fxch(state, n, op2_idx);
1243 /* mode size 64 or smaller -> use normal fst */
1244 x87_patch_insn(n, op);
1247 /* we can only store the tos to memory */
1249 x87_create_fxch(state, n, op2_idx);
1252 x87_patch_insn(n, op_p);
1255 attr = get_ia32_x87_attr(n);
1256 attr->x87[1] = op2 = get_st_reg(0);
1257 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1262 #define _GEN_BINOP(op, rev) \
1263 static int sim_##op(x87_state *state, ir_node *n) { \
1264 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1265 return sim_binop(state, n, &tmpl); \
1268 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1269 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1271 #define GEN_LOAD(op) \
1272 static int sim_##op(x87_state *state, ir_node *n) { \
1273 return sim_load(state, n, op_ia32_##op, pn_ia32_v##op##_res); \
1276 #define GEN_UNOP(op) \
1277 static int sim_##op(x87_state *state, ir_node *n) { \
1278 return sim_unop(state, n, op_ia32_##op); \
1281 #define GEN_STORE(op) \
1282 static int sim_##op(x87_state *state, ir_node *n) { \
1283 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1305 * Simulate a virtual fisttp.
1307 * @param state the x87 state
1308 * @param n the node that should be simulated (and patched)
1310 * @return NO_NODE_ADDED
1312 static int sim_fisttp(x87_state *state, ir_node *n)
1314 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1315 const arch_register_t *op2 = x87_get_irn_register(val);
1316 ia32_x87_attr_t *attr;
1317 int op2_reg_idx, op2_idx;
1319 op2_reg_idx = arch_register_get_index(op2);
1320 op2_idx = x87_on_stack(state, op2_reg_idx);
1321 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1322 assert(op2_idx >= 0);
1324 /* Note: although the value is still live here, it is destroyed because
1325 of the pop. The register allocator is aware of that and introduced a copy
1326 if the value must be alive. */
1328 /* we can only store the tos to memory */
1330 x87_create_fxch(state, n, op2_idx);
1333 x87_patch_insn(n, op_ia32_fisttp);
1335 attr = get_ia32_x87_attr(n);
1336 attr->x87[1] = op2 = get_st_reg(0);
1337 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1339 return NO_NODE_ADDED;
1343 * Simulate a virtual FtstFnstsw.
1345 * @param state the x87 state
1346 * @param n the node that should be simulated (and patched)
1348 * @return NO_NODE_ADDED
1350 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1352 x87_simulator *sim = state->sim;
1353 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1354 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1355 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1356 int reg_index_1 = arch_register_get_index(reg1);
1357 int op1_idx = x87_on_stack(state, reg_index_1);
1358 unsigned live = vfp_live_args_after(sim, n, 0);
1360 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, arch_register_get_name(reg1)));
1361 DEBUG_ONLY(vfp_dump_live(live));
1362 DB((dbg, LEVEL_1, "Stack before: "));
1363 DEBUG_ONLY(x87_dump_stack(state));
1364 assert(op1_idx >= 0);
1367 /* bring the value to tos */
1368 x87_create_fxch(state, n, op1_idx);
1372 /* patch the operation */
1373 x87_patch_insn(n, op_ia32_FtstFnstsw);
1374 reg1 = get_st_reg(op1_idx);
1375 attr->x87[0] = reg1;
1376 attr->x87[1] = NULL;
1377 attr->x87[2] = NULL;
1379 if (!is_vfp_live(reg_index_1, live))
1380 x87_create_fpop(state, sched_next(n), 1);
1382 return NO_NODE_ADDED;
1383 } /* sim_FtstFnstsw */
1388 * @param state the x87 state
1389 * @param n the node that should be simulated (and patched)
1391 * @return NO_NODE_ADDED
1393 static int sim_Fucom(x87_state *state, ir_node *n)
1397 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1399 x87_simulator *sim = state->sim;
1400 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1401 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1402 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1403 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1404 int reg_index_1 = arch_register_get_index(op1);
1405 int reg_index_2 = arch_register_get_index(op2);
1406 unsigned live = vfp_live_args_after(sim, n, 0);
1407 bool permuted = attr->attr.data.ins_permuted;
1411 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1412 arch_register_get_name(op1), arch_register_get_name(op2)));
1413 DEBUG_ONLY(vfp_dump_live(live));
1414 DB((dbg, LEVEL_1, "Stack before: "));
1415 DEBUG_ONLY(x87_dump_stack(state));
1417 op1_idx = x87_on_stack(state, reg_index_1);
1418 assert(op1_idx >= 0);
1420 /* BEWARE: check for comp a,a cases, they might happen */
1421 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1422 /* second operand is a vfp register */
1423 op2_idx = x87_on_stack(state, reg_index_2);
1424 assert(op2_idx >= 0);
1426 if (is_vfp_live(reg_index_2, live)) {
1427 /* second operand is live */
1429 if (is_vfp_live(reg_index_1, live)) {
1430 /* both operands are live */
1433 /* res = tos X op */
1434 } else if (op2_idx == 0) {
1435 /* res = op X tos */
1436 permuted = !permuted;
1439 /* bring the first one to tos */
1440 x87_create_fxch(state, n, op1_idx);
1441 if (op1_idx == op2_idx) {
1443 } else if (op2_idx == 0) {
1447 /* res = tos X op */
1450 /* second live, first operand is dead here, bring it to tos.
1451 This means further, op1_idx != op2_idx. */
1452 assert(op1_idx != op2_idx);
1454 x87_create_fxch(state, n, op1_idx);
1459 /* res = tos X op, pop */
1463 /* second operand is dead */
1464 if (is_vfp_live(reg_index_1, live)) {
1465 /* first operand is live: bring second to tos.
1466 This means further, op1_idx != op2_idx. */
1467 assert(op1_idx != op2_idx);
1469 x87_create_fxch(state, n, op2_idx);
1474 /* res = op X tos, pop */
1476 permuted = !permuted;
1479 /* both operands are dead here, check first for identity. */
1480 if (op1_idx == op2_idx) {
1481 /* identically, one pop needed */
1483 x87_create_fxch(state, n, op1_idx);
1487 /* res = tos X op, pop */
1490 /* different, move them to st and st(1) and pop both.
1491 The tricky part is to get one into st(1).*/
1492 else if (op2_idx == 1) {
1493 /* good, second operand is already in the right place, move the first */
1495 /* bring the first on top */
1496 x87_create_fxch(state, n, op1_idx);
1497 assert(op2_idx != 0);
1500 /* res = tos X op, pop, pop */
1502 } else if (op1_idx == 1) {
1503 /* good, first operand is already in the right place, move the second */
1505 /* bring the first on top */
1506 x87_create_fxch(state, n, op2_idx);
1507 assert(op1_idx != 0);
1510 /* res = op X tos, pop, pop */
1511 permuted = !permuted;
1515 /* if one is already the TOS, we need two fxch */
1517 /* first one is TOS, move to st(1) */
1518 x87_create_fxch(state, n, 1);
1519 assert(op2_idx != 1);
1521 x87_create_fxch(state, n, op2_idx);
1523 /* res = op X tos, pop, pop */
1525 permuted = !permuted;
1527 } else if (op2_idx == 0) {
1528 /* second one is TOS, move to st(1) */
1529 x87_create_fxch(state, n, 1);
1530 assert(op1_idx != 1);
1532 x87_create_fxch(state, n, op1_idx);
1534 /* res = tos X op, pop, pop */
1537 /* none of them is either TOS or st(1), 3 fxch needed */
1538 x87_create_fxch(state, n, op2_idx);
1539 assert(op1_idx != 0);
1540 x87_create_fxch(state, n, 1);
1542 x87_create_fxch(state, n, op1_idx);
1544 /* res = tos X op, pop, pop */
1551 /* second operand is an address mode */
1552 if (is_vfp_live(reg_index_1, live)) {
1553 /* first operand is live: bring it to TOS */
1555 x87_create_fxch(state, n, op1_idx);
1559 /* first operand is dead: bring it to tos */
1561 x87_create_fxch(state, n, op1_idx);
1568 /* patch the operation */
1569 if (is_ia32_vFucomFnstsw(n)) {
1573 case 0: dst = op_ia32_FucomFnstsw; break;
1574 case 1: dst = op_ia32_FucompFnstsw; break;
1575 case 2: dst = op_ia32_FucomppFnstsw; break;
1576 default: panic("invalid popcount in sim_Fucom");
1579 for (i = 0; i < pops; ++i) {
1582 } else if (is_ia32_vFucomi(n)) {
1584 case 0: dst = op_ia32_Fucomi; break;
1585 case 1: dst = op_ia32_Fucompi; x87_pop(state); break;
1587 dst = op_ia32_Fucompi;
1589 x87_create_fpop(state, sched_next(n), 1);
1591 default: panic("invalid popcount in sim_Fucom");
1594 panic("invalid operation %+F in sim_FucomFnstsw", n);
1597 x87_patch_insn(n, dst);
1604 op1 = get_st_reg(op1_idx);
1607 op2 = get_st_reg(op2_idx);
1610 attr->x87[2] = NULL;
1611 attr->attr.data.ins_permuted = permuted;
1614 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1615 arch_register_get_name(op1), arch_register_get_name(op2)));
1617 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1618 arch_register_get_name(op1)));
1621 return NO_NODE_ADDED;
1627 * @param state the x87 state
1628 * @param n the node that should be simulated (and patched)
1630 * @return NO_NODE_ADDED
1632 static int sim_Keep(x87_state *state, ir_node *node)
1635 const arch_register_t *op_reg;
1641 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1643 arity = get_irn_arity(node);
1644 for (i = 0; i < arity; ++i) {
1645 op = get_irn_n(node, i);
1646 op_reg = arch_get_irn_register(op);
1647 if (arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1650 reg_id = arch_register_get_index(op_reg);
1651 live = vfp_live_args_after(state->sim, node, 0);
1653 op_stack_idx = x87_on_stack(state, reg_id);
1654 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live))
1655 x87_create_fpop(state, sched_next(node), 1);
1658 DB((dbg, LEVEL_1, "Stack after: "));
1659 DEBUG_ONLY(x87_dump_stack(state));
1661 return NO_NODE_ADDED;
1665 * Keep the given node alive by adding a be_Keep.
1667 * @param node the node to kept alive
1669 static void keep_float_node_alive(ir_node *node)
1671 ir_node *block = get_nodes_block(node);
1672 ir_node *keep = be_new_Keep(block, 1, &node);
1674 assert(sched_is_scheduled(node));
1675 sched_add_after(node, keep);
1679 * Create a copy of a node. Recreate the node if it's a constant.
1681 * @param state the x87 state
1682 * @param n the node to be copied
1684 * @return the copy of n
1686 static ir_node *create_Copy(x87_state *state, ir_node *n)
1688 dbg_info *n_dbg = get_irn_dbg_info(n);
1689 ir_mode *mode = get_irn_mode(n);
1690 ir_node *block = get_nodes_block(n);
1691 ir_node *pred = get_irn_n(n, 0);
1692 ir_node *(*cnstr)(dbg_info *, ir_node *, ir_mode *) = NULL;
1694 const arch_register_t *out;
1695 const arch_register_t *op1;
1696 ia32_x87_attr_t *attr;
1698 /* Do not copy constants, recreate them. */
1699 switch (get_ia32_irn_opcode(pred)) {
1701 cnstr = new_bd_ia32_fldz;
1704 cnstr = new_bd_ia32_fld1;
1706 case iro_ia32_fldpi:
1707 cnstr = new_bd_ia32_fldpi;
1709 case iro_ia32_fldl2e:
1710 cnstr = new_bd_ia32_fldl2e;
1712 case iro_ia32_fldl2t:
1713 cnstr = new_bd_ia32_fldl2t;
1715 case iro_ia32_fldlg2:
1716 cnstr = new_bd_ia32_fldlg2;
1718 case iro_ia32_fldln2:
1719 cnstr = new_bd_ia32_fldln2;
1725 out = x87_get_irn_register(n);
1726 op1 = x87_get_irn_register(pred);
1728 if (cnstr != NULL) {
1729 /* copy a constant */
1730 res = (*cnstr)(n_dbg, block, mode);
1732 x87_push(state, arch_register_get_index(out), res);
1734 attr = get_ia32_x87_attr(res);
1735 attr->x87[2] = get_st_reg(0);
1737 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1739 res = new_bd_ia32_fpushCopy(n_dbg, block, pred, mode);
1741 x87_push(state, arch_register_get_index(out), res);
1743 attr = get_ia32_x87_attr(res);
1744 attr->x87[0] = get_st_reg(op1_idx);
1745 attr->x87[2] = get_st_reg(0);
1747 arch_set_irn_register(res, out);
1753 * Simulate a be_Copy.
1755 * @param state the x87 state
1756 * @param n the node that should be simulated (and patched)
1758 * @return NO_NODE_ADDED
1760 static int sim_Copy(x87_state *state, ir_node *n)
1763 const arch_register_t *out;
1764 const arch_register_t *op1;
1765 const arch_register_class_t *cls;
1766 ir_node *node, *next;
1767 int op1_idx, out_idx;
1770 cls = arch_get_irn_reg_class_out(n);
1771 if (cls != &ia32_reg_classes[CLASS_ia32_vfp])
1774 pred = get_irn_n(n, 0);
1775 out = x87_get_irn_register(n);
1776 op1 = x87_get_irn_register(pred);
1777 live = vfp_live_args_after(state->sim, n, REGMASK(out));
1779 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1780 arch_register_get_name(op1), arch_register_get_name(out)));
1781 DEBUG_ONLY(vfp_dump_live(live));
1783 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1785 if (is_vfp_live(arch_register_get_index(op1), live)) {
1786 /* Operand is still live, a real copy. We need here an fpush that can
1787 hold a a register, so use the fpushCopy or recreate constants */
1788 node = create_Copy(state, n);
1790 /* We have to make sure the old value doesn't go dead (which can happen
1791 * when we recreate constants). As the simulator expected that value in
1792 * the pred blocks. This is unfortunate as removing it would save us 1
1793 * instruction, but we would have to rerun all the simulation to get
1796 next = sched_next(n);
1799 sched_add_before(next, node);
1801 if (get_irn_n_edges(pred) == 0) {
1802 keep_float_node_alive(pred);
1805 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1807 out_idx = x87_on_stack(state, arch_register_get_index(out));
1809 if (out_idx >= 0 && out_idx != op1_idx) {
1810 /* Matze: out already on stack? how can this happen? */
1811 panic("invalid stack state in x87 simulator");
1814 /* op1 must be killed and placed where out is */
1816 ia32_x87_attr_t *attr;
1817 /* best case, simple remove and rename */
1818 x87_patch_insn(n, op_ia32_Pop);
1819 attr = get_ia32_x87_attr(n);
1820 attr->x87[0] = op1 = get_st_reg(0);
1823 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1825 ia32_x87_attr_t *attr;
1826 /* move op1 to tos, store and pop it */
1828 x87_create_fxch(state, n, op1_idx);
1831 x87_patch_insn(n, op_ia32_Pop);
1832 attr = get_ia32_x87_attr(n);
1833 attr->x87[0] = op1 = get_st_reg(out_idx);
1836 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1838 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1841 /* just a virtual copy */
1842 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1843 /* don't remove the node to keep the verifier quiet :),
1844 the emitter won't emit any code for the node */
1847 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1848 exchange(n, get_unop_op(n));
1852 return NO_NODE_ADDED;
1856 * Returns the vf0 result Proj of a Call.
1858 * @para call the Call node
1860 static ir_node *get_call_result_proj(ir_node *call)
1862 const ir_edge_t *edge;
1864 /* search the result proj */
1865 foreach_out_edge(call, edge) {
1866 ir_node *proj = get_edge_src_irn(edge);
1867 long pn = get_Proj_proj(proj);
1869 if (pn == pn_ia32_Call_vf0)
1874 } /* get_call_result_proj */
1877 * Simulate a ia32_Call.
1879 * @param state the x87 state
1880 * @param n the node that should be simulated (and patched)
1882 * @return NO_NODE_ADDED
1884 static int sim_Call(x87_state *state, ir_node *n)
1886 ir_type *call_tp = get_ia32_call_attr_const(n)->call_tp;
1890 const arch_register_t *reg;
1892 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1894 /* at the begin of a call the x87 state should be empty */
1895 assert(state->depth == 0 && "stack not empty before call");
1897 if (get_method_n_ress(call_tp) <= 0)
1901 * If the called function returns a float, it is returned in st(0).
1902 * This even happens if the return value is NOT used.
1903 * Moreover, only one return result is supported.
1905 res_type = get_method_res_type(call_tp, 0);
1906 mode = get_type_mode(res_type);
1908 if (mode == NULL || !mode_is_float(mode))
1911 resproj = get_call_result_proj(n);
1912 assert(resproj != NULL);
1914 reg = x87_get_irn_register(resproj);
1915 x87_push(state, arch_register_get_index(reg), resproj);
1918 DB((dbg, LEVEL_1, "Stack after: "));
1919 DEBUG_ONLY(x87_dump_stack(state));
1921 return NO_NODE_ADDED;
1925 * Simulate a be_Return.
1927 * @param state the x87 state
1928 * @param n the node that should be simulated (and patched)
1930 * @return NO_NODE_ADDED
1932 static int sim_Return(x87_state *state, ir_node *n)
1934 int n_res = be_Return_get_n_rets(n);
1935 int i, n_float_res = 0;
1937 /* only floating point return values must reside on stack */
1938 for (i = 0; i < n_res; ++i) {
1939 ir_node *res = get_irn_n(n, n_be_Return_val + i);
1941 if (mode_is_float(get_irn_mode(res)))
1944 assert(x87_get_depth(state) == n_float_res);
1946 /* pop them virtually */
1947 for (i = n_float_res - 1; i >= 0; --i)
1950 return NO_NODE_ADDED;
1953 typedef struct perm_data_t {
1954 const arch_register_t *in;
1955 const arch_register_t *out;
1959 * Simulate a be_Perm.
1961 * @param state the x87 state
1962 * @param irn the node that should be simulated (and patched)
1964 * @return NO_NODE_ADDED
1966 static int sim_Perm(x87_state *state, ir_node *irn)
1969 ir_node *pred = get_irn_n(irn, 0);
1971 const ir_edge_t *edge;
1973 /* handle only floating point Perms */
1974 if (! mode_is_float(get_irn_mode(pred)))
1975 return NO_NODE_ADDED;
1977 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1979 /* Perm is a pure virtual instruction on x87.
1980 All inputs must be on the FPU stack and are pairwise
1981 different from each other.
1982 So, all we need to do is to permutate the stack state. */
1983 n = get_irn_arity(irn);
1984 NEW_ARR_A(int, stack_pos, n);
1986 /* collect old stack positions */
1987 for (i = 0; i < n; ++i) {
1988 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
1989 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1991 assert(idx >= 0 && "Perm argument not on x87 stack");
1995 /* now do the permutation */
1996 foreach_out_edge(irn, edge) {
1997 ir_node *proj = get_edge_src_irn(edge);
1998 const arch_register_t *out = x87_get_irn_register(proj);
1999 long num = get_Proj_proj(proj);
2001 assert(0 <= num && num < n && "More Proj's than Perm inputs");
2002 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
2004 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
2006 return NO_NODE_ADDED;
2010 * Kill any dead registers at block start by popping them from the stack.
2012 * @param sim the simulator handle
2013 * @param block the current block
2014 * @param start_state the x87 state at the begin of the block
2016 * @return the x87 state after dead register killed
2018 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state)
2020 x87_state *state = start_state;
2021 ir_node *first_insn = sched_first(block);
2022 ir_node *keep = NULL;
2023 unsigned live = vfp_live_args_after(sim, block, 0);
2025 int i, depth, num_pop;
2028 depth = x87_get_depth(state);
2029 for (i = depth - 1; i >= 0; --i) {
2030 int reg = x87_get_st_reg(state, i);
2032 if (! is_vfp_live(reg, live))
2033 kill_mask |= (1 << i);
2037 /* create a new state, will be changed */
2038 state = x87_clone_state(sim, state);
2040 DB((dbg, LEVEL_1, "Killing deads:\n"));
2041 DEBUG_ONLY(vfp_dump_live(live));
2042 DEBUG_ONLY(x87_dump_stack(state));
2044 if (kill_mask != 0 && live == 0) {
2045 /* special case: kill all registers */
2046 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
2047 if (ia32_cg_config.use_femms) {
2048 /* use FEMMS on AMD processors to clear all */
2049 keep = new_bd_ia32_femms(NULL, block);
2051 /* use EMMS to clear all */
2052 keep = new_bd_ia32_emms(NULL, block);
2054 sched_add_before(first_insn, keep);
2060 /* now kill registers */
2062 /* we can only kill from TOS, so bring them up */
2063 if (! (kill_mask & 1)) {
2064 /* search from behind, because we can to a double-pop */
2065 for (i = depth - 1; i >= 0; --i) {
2066 if (kill_mask & (1 << i)) {
2067 kill_mask &= ~(1 << i);
2074 x87_set_st(state, -1, keep, i);
2075 x87_create_fxch(state, first_insn, i);
2078 if ((kill_mask & 3) == 3) {
2079 /* we can do a double-pop */
2083 /* only a single pop */
2088 kill_mask >>= num_pop;
2089 keep = x87_create_fpop(state, first_insn, num_pop);
2094 } /* x87_kill_deads */
2097 * Run a simulation and fix all virtual instructions for a block.
2099 * @param sim the simulator handle
2100 * @param block the current block
2102 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
2105 blk_state *bl_state = x87_get_bl_state(sim, block);
2106 x87_state *state = bl_state->begin;
2107 const ir_edge_t *edge;
2108 ir_node *start_block;
2110 assert(state != NULL);
2111 /* already processed? */
2112 if (bl_state->end != NULL)
2115 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2116 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2117 DEBUG_ONLY(x87_dump_stack(state));
2119 /* at block begin, kill all dead registers */
2120 state = x87_kill_deads(sim, block, state);
2121 /* create a new state, will be changed */
2122 state = x87_clone_state(sim, state);
2124 /* beware, n might change */
2125 for (n = sched_first(block); !sched_is_end(n); n = next) {
2128 ir_op *op = get_irn_op(n);
2131 * get the next node to be simulated here.
2132 * n might be completely removed from the schedule-
2134 next = sched_next(n);
2135 if (op->ops.generic != NULL) {
2136 func = (sim_func)op->ops.generic;
2139 node_inserted = (*func)(state, n);
2142 * sim_func might have added an additional node after n,
2143 * so update next node
2144 * beware: n must not be changed by sim_func
2145 * (i.e. removed from schedule) in this case
2147 if (node_inserted != NO_NODE_ADDED)
2148 next = sched_next(n);
2152 start_block = get_irg_start_block(get_irn_irg(block));
2154 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2156 /* check if the state must be shuffled */
2157 foreach_block_succ(block, edge) {
2158 ir_node *succ = get_edge_src_irn(edge);
2159 blk_state *succ_state;
2161 if (succ == start_block)
2164 succ_state = x87_get_bl_state(sim, succ);
2166 if (succ_state->begin == NULL) {
2167 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2168 DEBUG_ONLY(x87_dump_stack(state));
2169 succ_state->begin = state;
2171 waitq_put(sim->worklist, succ);
2173 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2174 /* There is already a begin state for the successor, bad.
2175 Do the necessary permutations.
2176 Note that critical edges are removed, so this is always possible:
2177 If the successor has more than one possible input, then it must
2180 x87_shuffle(sim, block, state, succ, succ_state->begin);
2183 bl_state->end = state;
2184 } /* x87_simulate_block */
2187 * Register a simulator function.
2189 * @param op the opcode to simulate
2190 * @param func the simulator function for the opcode
2192 static void register_sim(ir_op *op, sim_func func)
2194 assert(op->ops.generic == NULL);
2195 op->ops.generic = (op_func) func;
2196 } /* register_sim */
2199 * Create a new x87 simulator.
2201 * @param sim a simulator handle, will be initialized
2202 * @param irg the current graph
2204 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
2206 obstack_init(&sim->obst);
2207 sim->blk_states = pmap_create();
2208 sim->n_idx = get_irg_last_idx(irg);
2209 sim->live = OALLOCN(&sim->obst, vfp_liveness, sim->n_idx);
2211 DB((dbg, LEVEL_1, "--------------------------------\n"
2212 "x87 Simulator started for %+F\n", irg));
2214 /* set the generic function pointer of instruction we must simulate */
2215 clear_irp_opcodes_generic_func();
2217 register_sim(op_ia32_Call, sim_Call);
2218 register_sim(op_ia32_vfld, sim_fld);
2219 register_sim(op_ia32_vfild, sim_fild);
2220 register_sim(op_ia32_vfld1, sim_fld1);
2221 register_sim(op_ia32_vfldz, sim_fldz);
2222 register_sim(op_ia32_vfadd, sim_fadd);
2223 register_sim(op_ia32_vfsub, sim_fsub);
2224 register_sim(op_ia32_vfmul, sim_fmul);
2225 register_sim(op_ia32_vfdiv, sim_fdiv);
2226 register_sim(op_ia32_vfprem, sim_fprem);
2227 register_sim(op_ia32_vfabs, sim_fabs);
2228 register_sim(op_ia32_vfchs, sim_fchs);
2229 register_sim(op_ia32_vfist, sim_fist);
2230 register_sim(op_ia32_vfisttp, sim_fisttp);
2231 register_sim(op_ia32_vfst, sim_fst);
2232 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
2233 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
2234 register_sim(op_ia32_vFucomi, sim_Fucom);
2235 register_sim(op_be_Copy, sim_Copy);
2236 register_sim(op_be_Return, sim_Return);
2237 register_sim(op_be_Perm, sim_Perm);
2238 register_sim(op_be_Keep, sim_Keep);
2239 } /* x87_init_simulator */
2242 * Destroy a x87 simulator.
2244 * @param sim the simulator handle
2246 static void x87_destroy_simulator(x87_simulator *sim)
2248 pmap_destroy(sim->blk_states);
2249 obstack_free(&sim->obst, NULL);
2250 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2251 } /* x87_destroy_simulator */
2254 * Pre-block walker: calculate the liveness information for the block
2255 * and store it into the sim->live cache.
2257 static void update_liveness_walker(ir_node *block, void *data)
2259 x87_simulator *sim = (x87_simulator*)data;
2260 update_liveness(sim, block);
2261 } /* update_liveness_walker */
2264 * Run a simulation and fix all virtual instructions for a graph.
2265 * Replaces all virtual floating point instructions and registers
2268 void ia32_x87_simulate_graph(ir_graph *irg)
2270 /* TODO improve code quality (less executed fxch) by using execfreqs */
2272 ir_node *block, *start_block;
2273 blk_state *bl_state;
2276 /* create the simulator */
2277 x87_init_simulator(&sim, irg);
2279 start_block = get_irg_start_block(irg);
2280 bl_state = x87_get_bl_state(&sim, start_block);
2282 /* start with the empty state */
2283 bl_state->begin = empty;
2286 sim.worklist = new_waitq();
2287 waitq_put(sim.worklist, start_block);
2289 be_assure_liveness(irg);
2290 sim.lv = be_get_irg_liveness(irg);
2291 be_liveness_assure_sets(sim.lv);
2293 /* Calculate the liveness for all nodes. We must precalculate this info,
2294 * because the simulator adds new nodes (possible before Phi nodes) which
2295 * would let a lazy calculation fail.
2296 * On the other hand we reduce the computation amount due to
2297 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2299 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2303 block = (ir_node*)waitq_get(sim.worklist);
2304 x87_simulate_block(&sim, block);
2305 } while (! waitq_empty(sim.worklist));
2308 del_waitq(sim.worklist);
2309 x87_destroy_simulator(&sim);
2310 } /* ia32_x87_simulate_graph */
2312 /* Initializes the x87 simulator. */
2313 void ia32_init_x87(void)
2315 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2316 } /* ia32_init_x87 */