2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
33 #include "iredges_t.h"
48 #include "bearch_ia32_t.h"
49 #include "ia32_new_nodes.h"
50 #include "gen_ia32_new_nodes.h"
51 #include "gen_ia32_regalloc_if.h"
53 #include "ia32_architecture.h"
55 #define MASK_TOS(x) ((x) & (N_ia32_st_REGS - 1))
57 /** the debug handle */
58 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
60 /* Forward declaration. */
61 typedef struct x87_simulator x87_simulator;
64 * An exchange template.
65 * Note that our virtual functions have the same inputs
66 * and attributes as the real ones, so we can simple exchange
68 * Further, x87 supports inverse instructions, so we can handle them.
70 typedef struct exchange_tmpl {
71 ir_op *normal_op; /**< the normal one */
72 ir_op *reverse_op; /**< the reverse one if exists */
73 ir_op *normal_pop_op; /**< the normal one with tos pop */
74 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
78 * An entry on the simulated x87 stack.
80 typedef struct st_entry {
81 int reg_idx; /**< the virtual register index of this stack value */
82 ir_node *node; /**< the node that produced this value */
88 typedef struct x87_state {
89 st_entry st[N_ia32_st_REGS]; /**< the register stack */
90 int depth; /**< the current stack depth */
91 int tos; /**< position of the tos */
92 x87_simulator *sim; /**< The simulator. */
95 /** An empty state, used for blocks without fp instructions. */
96 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
97 static x87_state *empty = (x87_state *)&_empty;
100 * Return values of the instruction simulator functions.
103 NO_NODE_ADDED = 0, /**< No node that needs simulation was added. */
104 NODE_ADDED = 1 /**< A node that must be simulated was added by the simulator
105 in the schedule AFTER the current node. */
109 * The type of an instruction simulator function.
111 * @param state the x87 state
112 * @param n the node to be simulated
114 * @return NODE_ADDED if a node was added AFTER n in schedule that MUST be
116 * NO_NODE_ADDED otherwise
118 typedef int (*sim_func)(x87_state *state, ir_node *n);
121 * A block state: Every block has a x87 state at the beginning and at the end.
123 typedef struct blk_state {
124 x87_state *begin; /**< state at the begin or NULL if not assigned */
125 x87_state *end; /**< state at the end or NULL if not assigned */
128 /** liveness bitset for vfp registers. */
129 typedef unsigned char vfp_liveness;
134 struct x87_simulator {
135 struct obstack obst; /**< An obstack for fast allocating. */
136 pmap *blk_states; /**< Map blocks to states. */
137 be_lv_t *lv; /**< intrablock liveness. */
138 vfp_liveness *live; /**< Liveness information. */
139 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
140 waitq *worklist; /**< Worklist of blocks that must be processed. */
141 ia32_isa_t *isa; /**< the ISA object */
145 * Returns the current stack depth.
147 * @param state the x87 state
149 * @return the x87 stack depth
151 static int x87_get_depth(const x87_state *state)
157 * Return the virtual register index at st(pos).
159 * @param state the x87 state
160 * @param pos a stack position
162 * @return the vfp register index that produced the value at st(pos)
164 static int x87_get_st_reg(const x87_state *state, int pos)
166 assert(pos < state->depth);
167 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
172 * Return the node at st(pos).
174 * @param state the x87 state
175 * @param pos a stack position
177 * @return the IR node that produced the value at st(pos)
179 static ir_node *x87_get_st_node(const x87_state *state, int pos)
181 assert(pos < state->depth);
182 return state->st[MASK_TOS(state->tos + pos)].node;
186 * Dump the stack for debugging.
188 * @param state the x87 state
190 static void x87_dump_stack(const x87_state *state)
194 for (i = state->depth - 1; i >= 0; --i) {
195 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
196 x87_get_st_node(state, i)));
198 DB((dbg, LEVEL_2, "<-- TOS\n"));
200 #endif /* DEBUG_libfirm */
203 * Set a virtual register to st(pos).
205 * @param state the x87 state
206 * @param reg_idx the vfp register index that should be set
207 * @param node the IR node that produces the value of the vfp register
208 * @param pos the stack position where the new value should be entered
210 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
212 assert(0 < state->depth);
213 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
214 state->st[MASK_TOS(state->tos + pos)].node = node;
216 DB((dbg, LEVEL_2, "After SET_REG: "));
217 DEBUG_ONLY(x87_dump_stack(state);)
221 * Set the tos virtual register.
223 * @param state the x87 state
224 * @param reg_idx the vfp register index that should be set
225 * @param node the IR node that produces the value of the vfp register
227 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node)
229 x87_set_st(state, reg_idx, node, 0);
233 * Swap st(0) with st(pos).
235 * @param state the x87 state
236 * @param pos the stack position to change the tos with
238 static void x87_fxch(x87_state *state, int pos)
241 assert(pos < state->depth);
243 entry = state->st[MASK_TOS(state->tos + pos)];
244 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
245 state->st[MASK_TOS(state->tos)] = entry;
247 DB((dbg, LEVEL_2, "After FXCH: "));
248 DEBUG_ONLY(x87_dump_stack(state);)
252 * Convert a virtual register to the stack index.
254 * @param state the x87 state
255 * @param reg_idx the register vfp index
257 * @return the stack position where the register is stacked
258 * or -1 if the virtual register was not found
260 static int x87_on_stack(const x87_state *state, int reg_idx)
262 int i, tos = state->tos;
264 for (i = 0; i < state->depth; ++i)
265 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
271 * Push a virtual Register onto the stack, double pushed allowed.
273 * @param state the x87 state
274 * @param reg_idx the register vfp index
275 * @param node the node that produces the value of the vfp register
277 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node)
279 assert(state->depth < N_ia32_st_REGS && "stack overrun");
282 state->tos = MASK_TOS(state->tos - 1);
283 state->st[state->tos].reg_idx = reg_idx;
284 state->st[state->tos].node = node;
286 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state);)
290 * Push a virtual Register onto the stack, double pushes are NOT allowed.
292 * @param state the x87 state
293 * @param reg_idx the register vfp index
294 * @param node the node that produces the value of the vfp register
295 * @param dbl_push if != 0 double pushes are allowed
297 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
299 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
301 x87_push_dbl(state, reg_idx, node);
305 * Pop a virtual Register from the stack.
307 * @param state the x87 state
309 static void x87_pop(x87_state *state)
311 assert(state->depth > 0 && "stack underrun");
314 state->tos = MASK_TOS(state->tos + 1);
316 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state);)
320 * Empty the fpu stack
322 * @param state the x87 state
324 static void x87_emms(x87_state *state)
331 * Returns the block state of a block.
333 * @param sim the x87 simulator handle
334 * @param block the current block
336 * @return the block state
338 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
340 blk_state *res = pmap_get(sim->blk_states, block);
343 res = OALLOC(&sim->obst, blk_state);
347 pmap_insert(sim->blk_states, block, res);
354 * Creates a new x87 state.
356 * @param sim the x87 simulator handle
358 * @return a new x87 state
360 static x87_state *x87_alloc_state(x87_simulator *sim)
362 x87_state *res = OALLOC(&sim->obst, x87_state);
371 * @param sim the x87 simulator handle
372 * @param src the x87 state that will be cloned
374 * @return a cloned copy of the src state
376 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
378 x87_state *res = x87_alloc_state(sim);
385 * Patch a virtual instruction into a x87 one and return
386 * the node representing the result value.
388 * @param n the IR node to patch
389 * @param op the x87 opcode to patch in
391 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
393 ir_mode *mode = get_irn_mode(n);
398 if (mode == mode_T) {
399 /* patch all Proj's */
400 const ir_edge_t *edge;
402 foreach_out_edge(n, edge) {
403 ir_node *proj = get_edge_src_irn(edge);
405 mode = get_irn_mode(proj);
406 if (mode_is_float(mode)) {
408 set_irn_mode(proj, ia32_reg_classes[CLASS_ia32_st].mode);
412 } else if (mode_is_float(mode))
413 set_irn_mode(n, ia32_reg_classes[CLASS_ia32_st].mode);
418 * Returns the first Proj of a mode_T node having a given mode.
420 * @param n the mode_T node
421 * @param m the desired mode of the Proj
422 * @return The first Proj of mode @p m found or NULL.
424 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
426 const ir_edge_t *edge;
428 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
430 foreach_out_edge(n, edge) {
431 ir_node *proj = get_edge_src_irn(edge);
432 if (get_irn_mode(proj) == m)
440 * Wrap the arch_* function here so we can check for errors.
442 static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
444 const arch_register_t *res = arch_get_irn_register(irn);
446 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
450 static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
453 const arch_register_t *res = arch_get_irn_register_out(irn, pos);
455 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
459 static inline const arch_register_t *get_st_reg(int index)
461 return &ia32_registers[REG_ST0 + index];
464 /* -------------- x87 perm --------------- */
467 * Creates a fxch for shuffle.
469 * @param state the x87 state
470 * @param pos parameter for fxch
471 * @param block the block were fxch is inserted
473 * Creates a new fxch node and reroute the user of the old node
476 * @return the fxch node
478 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block)
481 ia32_x87_attr_t *attr;
483 fxch = new_bd_ia32_fxch(NULL, block);
484 attr = get_ia32_x87_attr(fxch);
485 attr->x87[0] = get_st_reg(pos);
486 attr->x87[2] = get_st_reg(0);
490 x87_fxch(state, pos);
495 * Calculate the necessary permutations to reach dst_state.
497 * These permutations are done with fxch instructions and placed
498 * at the end of the block.
500 * Note that critical edges are removed here, so we need only
501 * a shuffle if the current block has only one successor.
503 * @param sim the simulator handle
504 * @param block the current block
505 * @param state the current x87 stack state, might be modified
506 * @param dst_block the destination block
507 * @param dst_state destination state
511 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block,
512 x87_state *state, ir_node *dst_block,
513 const x87_state *dst_state)
515 int i, n_cycles, k, ri;
516 unsigned cycles[4], all_mask;
517 char cycle_idx[4][8];
518 ir_node *fxch, *before, *after;
522 assert(state->depth == dst_state->depth);
524 /* Some mathematics here:
525 If we have a cycle of length n that includes the tos,
526 we need n-1 exchange operations.
527 We can always add the tos and restore it, so we need
528 n+1 exchange operations for a cycle not containing the tos.
529 So, the maximum of needed operations is for a cycle of 7
530 not including the tos == 8.
531 This is the same number of ops we would need for using stores,
532 so exchange is cheaper (we save the loads).
533 On the other hand, we might need an additional exchange
534 in the next block to bring one operand on top, so the
535 number of ops in the first case is identical.
536 Further, no more than 4 cycles can exists (4 x 2).
538 all_mask = (1 << (state->depth)) - 1;
540 for (n_cycles = 0; all_mask; ++n_cycles) {
541 int src_idx, dst_idx;
543 /* find the first free slot */
544 for (i = 0; i < state->depth; ++i) {
545 if (all_mask & (1 << i)) {
546 all_mask &= ~(1 << i);
548 /* check if there are differences here */
549 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
555 /* no more cycles found */
560 cycles[n_cycles] = (1 << i);
561 cycle_idx[n_cycles][k++] = i;
562 for (src_idx = i; ; src_idx = dst_idx) {
563 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
565 if ((all_mask & (1 << dst_idx)) == 0)
568 cycle_idx[n_cycles][k++] = dst_idx;
569 cycles[n_cycles] |= (1 << dst_idx);
570 all_mask &= ~(1 << dst_idx);
572 cycle_idx[n_cycles][k] = -1;
576 /* no permutation needed */
580 /* Hmm: permutation needed */
581 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
582 DEBUG_ONLY(x87_dump_stack(state);)
583 DB((dbg, LEVEL_2, " to\n"));
584 DEBUG_ONLY(x87_dump_stack(dst_state);)
588 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
589 for (ri = 0; ri < n_cycles; ++ri) {
590 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
591 for (k = 0; cycle_idx[ri][k] != -1; ++k)
592 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
593 DB((dbg, LEVEL_2, "\n"));
600 * Find the place node must be insert.
601 * We have only one successor block, so the last instruction should
604 before = sched_last(block);
605 assert(is_cfop(before));
607 /* now do the permutations */
608 for (ri = 0; ri < n_cycles; ++ri) {
609 if ((cycles[ri] & 1) == 0) {
610 /* this cycle does not include the tos */
611 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
613 sched_add_after(after, fxch);
615 sched_add_before(before, fxch);
618 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
619 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
621 sched_add_after(after, fxch);
623 sched_add_before(before, fxch);
626 if ((cycles[ri] & 1) == 0) {
627 /* this cycle does not include the tos */
628 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
629 sched_add_after(after, fxch);
636 * Create a fxch node before another node.
638 * @param state the x87 state
639 * @param n the node after the fxch
640 * @param pos exchange st(pos) with st(0)
644 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
647 ia32_x87_attr_t *attr;
648 ir_node *block = get_nodes_block(n);
650 x87_fxch(state, pos);
652 fxch = new_bd_ia32_fxch(NULL, block);
653 attr = get_ia32_x87_attr(fxch);
654 attr->x87[0] = get_st_reg(pos);
655 attr->x87[2] = get_st_reg(0);
659 sched_add_before(n, fxch);
660 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
665 * Create a fpush before node n.
667 * @param state the x87 state
668 * @param n the node after the fpush
669 * @param pos push st(pos) on stack
670 * @param op_idx replace input op_idx of n with the fpush result
672 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx)
674 ir_node *fpush, *pred = get_irn_n(n, op_idx);
675 ia32_x87_attr_t *attr;
676 const arch_register_t *out = x87_get_irn_register(pred);
678 x87_push_dbl(state, arch_register_get_index(out), pred);
680 fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
681 attr = get_ia32_x87_attr(fpush);
682 attr->x87[0] = get_st_reg(pos);
683 attr->x87[2] = get_st_reg(0);
686 sched_add_before(n, fpush);
688 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
692 * Create a fpop before node n.
694 * @param state the x87 state
695 * @param n the node after the fpop
696 * @param num pop 1 or 2 values
698 * @return the fpop node
700 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
702 ir_node *fpop = NULL;
703 ia32_x87_attr_t *attr;
708 if (ia32_cg_config.use_ffreep)
709 fpop = new_bd_ia32_ffreep(NULL, get_nodes_block(n));
711 fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
712 attr = get_ia32_x87_attr(fpop);
713 attr->x87[0] = get_st_reg(0);
714 attr->x87[1] = get_st_reg(0);
715 attr->x87[2] = get_st_reg(0);
718 sched_add_before(n, fpop);
719 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
724 /* --------------------------------- liveness ------------------------------------------ */
727 * The liveness transfer function.
728 * Updates a live set over a single step from a given node to its predecessor.
729 * Everything defined at the node is removed from the set, the uses of the node get inserted.
731 * @param irn The node at which liveness should be computed.
732 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
733 * the registers live after irn.
735 * @return The live bitset.
737 static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
740 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
742 if (get_irn_mode(irn) == mode_T) {
743 const ir_edge_t *edge;
745 foreach_out_edge(irn, edge) {
746 ir_node *proj = get_edge_src_irn(edge);
748 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
749 const arch_register_t *reg = x87_get_irn_register(proj);
750 live &= ~(1 << arch_register_get_index(reg));
753 } else if (arch_irn_consider_in_reg_alloc(cls, irn)) {
754 const arch_register_t *reg = x87_get_irn_register(irn);
755 live &= ~(1 << arch_register_get_index(reg));
758 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
759 ir_node *op = get_irn_n(irn, i);
761 if (mode_is_float(get_irn_mode(op)) &&
762 arch_irn_consider_in_reg_alloc(cls, op)) {
763 const arch_register_t *reg = x87_get_irn_register(op);
764 live |= 1 << arch_register_get_index(reg);
771 * Put all live virtual registers at the end of a block into a bitset.
773 * @param sim the simulator handle
774 * @param lv the liveness information
775 * @param bl the block
777 * @return The live bitset at the end of this block
779 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
782 vfp_liveness live = 0;
783 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
784 const be_lv_t *lv = sim->lv;
786 be_lv_foreach(lv, block, be_lv_state_end, i) {
787 const arch_register_t *reg;
788 const ir_node *node = be_lv_get_irn(lv, block, i);
789 if (!arch_irn_consider_in_reg_alloc(cls, node))
792 reg = x87_get_irn_register(node);
793 live |= 1 << arch_register_get_index(reg);
799 /** get the register mask from an arch_register */
800 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
803 * Return a bitset of argument registers which are live at the end of a node.
805 * @param sim the simulator handle
806 * @param pos the node
807 * @param kill kill mask for the output registers
809 * @return The live bitset.
811 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
813 unsigned idx = get_irn_idx(pos);
815 assert(idx < sim->n_idx);
816 return sim->live[idx] & ~kill;
820 * Calculate the liveness for a whole block and cache it.
822 * @param sim the simulator handle
823 * @param lv the liveness handle
824 * @param block the block
826 static void update_liveness(x87_simulator *sim, ir_node *block)
828 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
832 /* now iterate through the block backward and cache the results */
833 sched_foreach_reverse(block, irn) {
834 /* stop at the first Phi: this produces the live-in */
838 idx = get_irn_idx(irn);
839 sim->live[idx] = live;
841 live = vfp_liveness_transfer(irn, live);
843 idx = get_irn_idx(block);
844 sim->live[idx] = live;
848 * Returns true if a register is live in a set.
850 * @param reg_idx the vfp register index
851 * @param live a live bitset
853 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
857 * Dump liveness info.
859 * @param live the live bitset
861 static void vfp_dump_live(vfp_liveness live)
865 DB((dbg, LEVEL_2, "Live after: "));
866 for (i = 0; i < 8; ++i) {
867 if (live & (1 << i)) {
868 DB((dbg, LEVEL_2, "vf%d ", i));
871 DB((dbg, LEVEL_2, "\n"));
873 #endif /* DEBUG_libfirm */
875 /* --------------------------------- simulators ---------------------------------------- */
878 * Simulate a virtual binop.
880 * @param state the x87 state
881 * @param n the node that should be simulated (and patched)
882 * @param tmpl the template containing the 4 possible x87 opcodes
884 * @return NO_NODE_ADDED
886 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl)
888 int op2_idx = 0, op1_idx;
889 int out_idx, do_pop = 0;
890 ia32_x87_attr_t *attr;
892 ir_node *patched_insn;
894 x87_simulator *sim = state->sim;
895 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
896 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
897 const arch_register_t *op1_reg = x87_get_irn_register(op1);
898 const arch_register_t *op2_reg = x87_get_irn_register(op2);
899 const arch_register_t *out = x87_irn_get_register(n, pn_ia32_res);
900 int reg_index_1 = arch_register_get_index(op1_reg);
901 int reg_index_2 = arch_register_get_index(op2_reg);
902 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
906 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
907 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
908 arch_register_get_name(out)));
909 DEBUG_ONLY(vfp_dump_live(live);)
910 DB((dbg, LEVEL_1, "Stack before: "));
911 DEBUG_ONLY(x87_dump_stack(state);)
913 op1_idx = x87_on_stack(state, reg_index_1);
914 assert(op1_idx >= 0);
915 op1_live_after = is_vfp_live(reg_index_1, live);
917 attr = get_ia32_x87_attr(n);
918 permuted = attr->attr.data.ins_permuted;
920 if (reg_index_2 != REG_VFP_VFP_NOREG) {
923 /* second operand is a vfp register */
924 op2_idx = x87_on_stack(state, reg_index_2);
925 assert(op2_idx >= 0);
926 op2_live_after = is_vfp_live(reg_index_2, live);
928 if (op2_live_after) {
929 /* Second operand is live. */
931 if (op1_live_after) {
932 /* Both operands are live: push the first one.
933 This works even for op1 == op2. */
934 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
935 /* now do fxxx (tos=tos X op) */
939 dst = tmpl->normal_op;
941 /* Second live, first operand is dead here, bring it to tos. */
943 x87_create_fxch(state, n, op1_idx);
948 /* now do fxxx (tos=tos X op) */
950 dst = tmpl->normal_op;
953 /* Second operand is dead. */
954 if (op1_live_after) {
955 /* First operand is live: bring second to tos. */
957 x87_create_fxch(state, n, op2_idx);
962 /* now do fxxxr (tos = op X tos) */
964 dst = tmpl->reverse_op;
966 /* Both operands are dead here, pop them from the stack. */
969 /* Both are identically and on tos, no pop needed. */
970 /* here fxxx (tos = tos X tos) */
971 dst = tmpl->normal_op;
974 /* now do fxxxp (op = op X tos, pop) */
975 dst = tmpl->normal_pop_op;
979 } else if (op1_idx == 0) {
980 assert(op1_idx != op2_idx);
981 /* now do fxxxrp (op = tos X op, pop) */
982 dst = tmpl->reverse_pop_op;
986 /* Bring the second on top. */
987 x87_create_fxch(state, n, op2_idx);
988 if (op1_idx == op2_idx) {
989 /* Both are identically and on tos now, no pop needed. */
992 /* use fxxx (tos = tos X tos) */
993 dst = tmpl->normal_op;
996 /* op2 is on tos now */
998 /* use fxxxp (op = op X tos, pop) */
999 dst = tmpl->normal_pop_op;
1007 /* second operand is an address mode */
1008 if (op1_live_after) {
1009 /* first operand is live: push it here */
1010 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
1013 /* first operand is dead: bring it to tos */
1015 x87_create_fxch(state, n, op1_idx);
1020 /* use fxxx (tos = tos X mem) */
1021 dst = permuted ? tmpl->reverse_op : tmpl->normal_op;
1025 patched_insn = x87_patch_insn(n, dst);
1026 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1031 /* patch the operation */
1032 attr->x87[0] = op1_reg = get_st_reg(op1_idx);
1033 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1034 attr->x87[1] = op2_reg = get_st_reg(op2_idx);
1036 attr->x87[2] = out = get_st_reg(out_idx);
1038 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1039 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1040 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1041 arch_register_get_name(out)));
1043 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1044 arch_register_get_name(op1_reg),
1045 arch_register_get_name(out)));
1048 return NO_NODE_ADDED;
1052 * Simulate a virtual Unop.
1054 * @param state the x87 state
1055 * @param n the node that should be simulated (and patched)
1056 * @param op the x87 opcode that will replace n's opcode
1058 * @return NO_NODE_ADDED
1060 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
1063 x87_simulator *sim = state->sim;
1064 const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, 0));
1065 const arch_register_t *out = x87_get_irn_register(n);
1066 ia32_x87_attr_t *attr;
1067 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1069 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1070 DEBUG_ONLY(vfp_dump_live(live);)
1072 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1074 if (is_vfp_live(arch_register_get_index(op1), live)) {
1075 /* push the operand here */
1076 x87_create_fpush(state, n, op1_idx, 0);
1080 /* operand is dead, bring it to tos */
1082 x87_create_fxch(state, n, op1_idx);
1087 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1088 attr = get_ia32_x87_attr(n);
1089 attr->x87[0] = op1 = get_st_reg(0);
1090 attr->x87[2] = out = get_st_reg(0);
1091 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1093 return NO_NODE_ADDED;
1097 * Simulate a virtual Load instruction.
1099 * @param state the x87 state
1100 * @param n the node that should be simulated (and patched)
1101 * @param op the x87 opcode that will replace n's opcode
1103 * @return NO_NODE_ADDED
1105 static int sim_load(x87_state *state, ir_node *n, ir_op *op, int res_pos)
1107 const arch_register_t *out = x87_irn_get_register(n, res_pos);
1108 ia32_x87_attr_t *attr;
1110 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1111 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1112 assert(out == x87_irn_get_register(n, res_pos));
1113 attr = get_ia32_x87_attr(n);
1114 attr->x87[2] = out = get_st_reg(0);
1115 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1117 return NO_NODE_ADDED;
1121 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1123 * @param store The store
1124 * @param old_val The former value
1125 * @param new_val The new value
1127 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
1129 const ir_edge_t *edge, *ne;
1131 foreach_out_edge_safe(old_val, edge, ne) {
1132 ir_node *user = get_edge_src_irn(edge);
1134 if (! user || user == store)
1137 /* if the user is scheduled after the store: rewire */
1138 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1140 /* find the input of the user pointing to the old value */
1141 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1142 if (get_irn_n(user, i) == old_val)
1143 set_irn_n(user, i, new_val);
1150 * Simulate a virtual Store.
1152 * @param state the x87 state
1153 * @param n the node that should be simulated (and patched)
1154 * @param op the x87 store opcode
1155 * @param op_p the x87 store and pop opcode
1157 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p)
1159 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1160 const arch_register_t *op2 = x87_get_irn_register(val);
1161 unsigned live = vfp_live_args_after(state->sim, n, 0);
1162 int insn = NO_NODE_ADDED;
1163 ia32_x87_attr_t *attr;
1164 int op2_reg_idx, op2_idx, depth;
1165 int live_after_node;
1168 op2_reg_idx = arch_register_get_index(op2);
1169 op2_idx = x87_on_stack(state, op2_reg_idx);
1170 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1171 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1172 assert(op2_idx >= 0);
1174 mode = get_ia32_ls_mode(n);
1175 depth = x87_get_depth(state);
1177 if (live_after_node) {
1179 Problem: fst doesn't support 96bit modes (spills), only fstp does
1180 fist doesn't support 64bit mode, only fistp
1182 - stack not full: push value and fstp
1183 - stack full: fstp value and load again
1184 Note that we cannot test on mode_E, because floats might be 96bit ...
1186 if (get_mode_size_bits(mode) > 64 || (mode_is_int(mode) && get_mode_size_bits(mode) > 32)) {
1187 if (depth < N_ia32_st_REGS) {
1188 /* ok, we have a free register: push + fstp */
1189 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1191 x87_patch_insn(n, op_p);
1193 ir_node *vfld, *mem, *block, *rproj, *mproj;
1194 ir_graph *irg = get_irn_irg(n);
1195 ir_node *nomem = get_irg_no_mem(irg);
1197 /* stack full here: need fstp + load */
1199 x87_patch_insn(n, op_p);
1201 block = get_nodes_block(n);
1202 vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), nomem, get_ia32_ls_mode(n));
1204 /* copy all attributes */
1205 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1206 if (is_ia32_use_frame(n))
1207 set_ia32_use_frame(vfld);
1208 set_ia32_op_type(vfld, ia32_AddrModeS);
1209 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1210 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1211 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1213 rproj = new_r_Proj(vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1214 mproj = new_r_Proj(vfld, mode_M, pn_ia32_vfld_M);
1215 mem = get_irn_Proj_for_mode(n, mode_M);
1217 assert(mem && "Store memory not found");
1219 arch_set_irn_register(rproj, op2);
1221 /* reroute all former users of the store memory to the load memory */
1222 edges_reroute(mem, mproj);
1223 /* set the memory input of the load to the store memory */
1224 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1226 sched_add_after(n, vfld);
1227 sched_add_after(vfld, rproj);
1229 /* rewire all users, scheduled after the store, to the loaded value */
1230 collect_and_rewire_users(n, val, rproj);
1235 /* we can only store the tos to memory */
1237 x87_create_fxch(state, n, op2_idx);
1239 /* mode size 64 or smaller -> use normal fst */
1240 x87_patch_insn(n, op);
1243 /* we can only store the tos to memory */
1245 x87_create_fxch(state, n, op2_idx);
1248 x87_patch_insn(n, op_p);
1251 attr = get_ia32_x87_attr(n);
1252 attr->x87[1] = op2 = get_st_reg(0);
1253 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1258 #define _GEN_BINOP(op, rev) \
1259 static int sim_##op(x87_state *state, ir_node *n) { \
1260 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1261 return sim_binop(state, n, &tmpl); \
1264 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1265 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1267 #define GEN_LOAD(op) \
1268 static int sim_##op(x87_state *state, ir_node *n) { \
1269 return sim_load(state, n, op_ia32_##op, pn_ia32_v##op##_res); \
1272 #define GEN_UNOP(op) \
1273 static int sim_##op(x87_state *state, ir_node *n) { \
1274 return sim_unop(state, n, op_ia32_##op); \
1277 #define GEN_STORE(op) \
1278 static int sim_##op(x87_state *state, ir_node *n) { \
1279 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1301 * Simulate a virtual fisttp.
1303 * @param state the x87 state
1304 * @param n the node that should be simulated (and patched)
1306 * @return NO_NODE_ADDED
1308 static int sim_fisttp(x87_state *state, ir_node *n)
1310 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1311 const arch_register_t *op2 = x87_get_irn_register(val);
1312 ia32_x87_attr_t *attr;
1313 int op2_reg_idx, op2_idx;
1315 op2_reg_idx = arch_register_get_index(op2);
1316 op2_idx = x87_on_stack(state, op2_reg_idx);
1317 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1318 assert(op2_idx >= 0);
1320 /* Note: although the value is still live here, it is destroyed because
1321 of the pop. The register allocator is aware of that and introduced a copy
1322 if the value must be alive. */
1324 /* we can only store the tos to memory */
1326 x87_create_fxch(state, n, op2_idx);
1329 x87_patch_insn(n, op_ia32_fisttp);
1331 attr = get_ia32_x87_attr(n);
1332 attr->x87[1] = op2 = get_st_reg(0);
1333 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1335 return NO_NODE_ADDED;
1339 * Simulate a virtual FtstFnstsw.
1341 * @param state the x87 state
1342 * @param n the node that should be simulated (and patched)
1344 * @return NO_NODE_ADDED
1346 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1348 x87_simulator *sim = state->sim;
1349 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1350 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1351 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1352 int reg_index_1 = arch_register_get_index(reg1);
1353 int op1_idx = x87_on_stack(state, reg_index_1);
1354 unsigned live = vfp_live_args_after(sim, n, 0);
1356 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, arch_register_get_name(reg1)));
1357 DEBUG_ONLY(vfp_dump_live(live);)
1358 DB((dbg, LEVEL_1, "Stack before: "));
1359 DEBUG_ONLY(x87_dump_stack(state);)
1360 assert(op1_idx >= 0);
1363 /* bring the value to tos */
1364 x87_create_fxch(state, n, op1_idx);
1368 /* patch the operation */
1369 x87_patch_insn(n, op_ia32_FtstFnstsw);
1370 reg1 = get_st_reg(op1_idx);
1371 attr->x87[0] = reg1;
1372 attr->x87[1] = NULL;
1373 attr->x87[2] = NULL;
1375 if (!is_vfp_live(reg_index_1, live))
1376 x87_create_fpop(state, sched_next(n), 1);
1378 return NO_NODE_ADDED;
1384 * @param state the x87 state
1385 * @param n the node that should be simulated (and patched)
1387 * @return NO_NODE_ADDED
1389 static int sim_Fucom(x87_state *state, ir_node *n)
1393 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1395 x87_simulator *sim = state->sim;
1396 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1397 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1398 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1399 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1400 int reg_index_1 = arch_register_get_index(op1);
1401 int reg_index_2 = arch_register_get_index(op2);
1402 unsigned live = vfp_live_args_after(sim, n, 0);
1403 bool permuted = attr->attr.data.ins_permuted;
1407 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1408 arch_register_get_name(op1), arch_register_get_name(op2)));
1409 DEBUG_ONLY(vfp_dump_live(live);)
1410 DB((dbg, LEVEL_1, "Stack before: "));
1411 DEBUG_ONLY(x87_dump_stack(state);)
1413 op1_idx = x87_on_stack(state, reg_index_1);
1414 assert(op1_idx >= 0);
1416 /* BEWARE: check for comp a,a cases, they might happen */
1417 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1418 /* second operand is a vfp register */
1419 op2_idx = x87_on_stack(state, reg_index_2);
1420 assert(op2_idx >= 0);
1422 if (is_vfp_live(reg_index_2, live)) {
1423 /* second operand is live */
1425 if (is_vfp_live(reg_index_1, live)) {
1426 /* both operands are live */
1429 /* res = tos X op */
1430 } else if (op2_idx == 0) {
1431 /* res = op X tos */
1432 permuted = !permuted;
1435 /* bring the first one to tos */
1436 x87_create_fxch(state, n, op1_idx);
1437 if (op1_idx == op2_idx) {
1439 } else if (op2_idx == 0) {
1443 /* res = tos X op */
1446 /* second live, first operand is dead here, bring it to tos.
1447 This means further, op1_idx != op2_idx. */
1448 assert(op1_idx != op2_idx);
1450 x87_create_fxch(state, n, op1_idx);
1455 /* res = tos X op, pop */
1459 /* second operand is dead */
1460 if (is_vfp_live(reg_index_1, live)) {
1461 /* first operand is live: bring second to tos.
1462 This means further, op1_idx != op2_idx. */
1463 assert(op1_idx != op2_idx);
1465 x87_create_fxch(state, n, op2_idx);
1470 /* res = op X tos, pop */
1472 permuted = !permuted;
1475 /* both operands are dead here, check first for identity. */
1476 if (op1_idx == op2_idx) {
1477 /* identically, one pop needed */
1479 x87_create_fxch(state, n, op1_idx);
1483 /* res = tos X op, pop */
1486 /* different, move them to st and st(1) and pop both.
1487 The tricky part is to get one into st(1).*/
1488 else if (op2_idx == 1) {
1489 /* good, second operand is already in the right place, move the first */
1491 /* bring the first on top */
1492 x87_create_fxch(state, n, op1_idx);
1493 assert(op2_idx != 0);
1496 /* res = tos X op, pop, pop */
1498 } else if (op1_idx == 1) {
1499 /* good, first operand is already in the right place, move the second */
1501 /* bring the first on top */
1502 x87_create_fxch(state, n, op2_idx);
1503 assert(op1_idx != 0);
1506 /* res = op X tos, pop, pop */
1507 permuted = !permuted;
1511 /* if one is already the TOS, we need two fxch */
1513 /* first one is TOS, move to st(1) */
1514 x87_create_fxch(state, n, 1);
1515 assert(op2_idx != 1);
1517 x87_create_fxch(state, n, op2_idx);
1519 /* res = op X tos, pop, pop */
1521 permuted = !permuted;
1523 } else if (op2_idx == 0) {
1524 /* second one is TOS, move to st(1) */
1525 x87_create_fxch(state, n, 1);
1526 assert(op1_idx != 1);
1528 x87_create_fxch(state, n, op1_idx);
1530 /* res = tos X op, pop, pop */
1533 /* none of them is either TOS or st(1), 3 fxch needed */
1534 x87_create_fxch(state, n, op2_idx);
1535 assert(op1_idx != 0);
1536 x87_create_fxch(state, n, 1);
1538 x87_create_fxch(state, n, op1_idx);
1540 /* res = tos X op, pop, pop */
1547 /* second operand is an address mode */
1548 if (is_vfp_live(reg_index_1, live)) {
1549 /* first operand is live: bring it to TOS */
1551 x87_create_fxch(state, n, op1_idx);
1555 /* first operand is dead: bring it to tos */
1557 x87_create_fxch(state, n, op1_idx);
1564 /* patch the operation */
1565 if (is_ia32_vFucomFnstsw(n)) {
1569 case 0: dst = op_ia32_FucomFnstsw; break;
1570 case 1: dst = op_ia32_FucompFnstsw; break;
1571 case 2: dst = op_ia32_FucomppFnstsw; break;
1572 default: panic("invalid popcount in sim_Fucom");
1575 for (i = 0; i < pops; ++i) {
1578 } else if (is_ia32_vFucomi(n)) {
1580 case 0: dst = op_ia32_Fucomi; break;
1581 case 1: dst = op_ia32_Fucompi; x87_pop(state); break;
1583 dst = op_ia32_Fucompi;
1585 x87_create_fpop(state, sched_next(n), 1);
1587 default: panic("invalid popcount in sim_Fucom");
1590 panic("invalid operation %+F in sim_FucomFnstsw", n);
1593 x87_patch_insn(n, dst);
1600 op1 = get_st_reg(op1_idx);
1603 op2 = get_st_reg(op2_idx);
1606 attr->x87[2] = NULL;
1607 attr->attr.data.ins_permuted = permuted;
1610 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1611 arch_register_get_name(op1), arch_register_get_name(op2)));
1613 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1614 arch_register_get_name(op1)));
1617 return NO_NODE_ADDED;
1623 * @param state the x87 state
1624 * @param n the node that should be simulated (and patched)
1626 * @return NO_NODE_ADDED
1628 static int sim_Keep(x87_state *state, ir_node *node)
1631 const arch_register_t *op_reg;
1637 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1639 arity = get_irn_arity(node);
1640 for (i = 0; i < arity; ++i) {
1641 op = get_irn_n(node, i);
1642 op_reg = arch_get_irn_register(op);
1643 if (arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1646 reg_id = arch_register_get_index(op_reg);
1647 live = vfp_live_args_after(state->sim, node, 0);
1649 op_stack_idx = x87_on_stack(state, reg_id);
1650 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live))
1651 x87_create_fpop(state, sched_next(node), 1);
1654 DB((dbg, LEVEL_1, "Stack after: "));
1655 DEBUG_ONLY(x87_dump_stack(state);)
1657 return NO_NODE_ADDED;
1661 * Keep the given node alive by adding a be_Keep.
1663 * @param node the node to kept alive
1665 static void keep_float_node_alive(ir_node *node)
1667 ir_node *block = get_nodes_block(node);
1668 ir_node *keep = be_new_Keep(block, 1, &node);
1670 assert(sched_is_scheduled(node));
1671 sched_add_after(node, keep);
1675 * Create a copy of a node. Recreate the node if it's a constant.
1677 * @param state the x87 state
1678 * @param n the node to be copied
1680 * @return the copy of n
1682 static ir_node *create_Copy(x87_state *state, ir_node *n)
1684 dbg_info *n_dbg = get_irn_dbg_info(n);
1685 ir_mode *mode = get_irn_mode(n);
1686 ir_node *block = get_nodes_block(n);
1687 ir_node *pred = get_irn_n(n, 0);
1688 ir_node *(*cnstr)(dbg_info *, ir_node *, ir_mode *) = NULL;
1690 const arch_register_t *out;
1691 const arch_register_t *op1;
1692 ia32_x87_attr_t *attr;
1694 /* Do not copy constants, recreate them. */
1695 switch (get_ia32_irn_opcode(pred)) {
1697 cnstr = new_bd_ia32_fldz;
1700 cnstr = new_bd_ia32_fld1;
1702 case iro_ia32_fldpi:
1703 cnstr = new_bd_ia32_fldpi;
1705 case iro_ia32_fldl2e:
1706 cnstr = new_bd_ia32_fldl2e;
1708 case iro_ia32_fldl2t:
1709 cnstr = new_bd_ia32_fldl2t;
1711 case iro_ia32_fldlg2:
1712 cnstr = new_bd_ia32_fldlg2;
1714 case iro_ia32_fldln2:
1715 cnstr = new_bd_ia32_fldln2;
1721 out = x87_get_irn_register(n);
1722 op1 = x87_get_irn_register(pred);
1724 if (cnstr != NULL) {
1725 /* copy a constant */
1726 res = (*cnstr)(n_dbg, block, mode);
1728 x87_push(state, arch_register_get_index(out), res);
1730 attr = get_ia32_x87_attr(res);
1731 attr->x87[2] = get_st_reg(0);
1733 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1735 res = new_bd_ia32_fpushCopy(n_dbg, block, pred, mode);
1737 x87_push(state, arch_register_get_index(out), res);
1739 attr = get_ia32_x87_attr(res);
1740 attr->x87[0] = get_st_reg(op1_idx);
1741 attr->x87[2] = get_st_reg(0);
1743 arch_set_irn_register(res, out);
1749 * Simulate a be_Copy.
1751 * @param state the x87 state
1752 * @param n the node that should be simulated (and patched)
1754 * @return NO_NODE_ADDED
1756 static int sim_Copy(x87_state *state, ir_node *n)
1759 const arch_register_t *out;
1760 const arch_register_t *op1;
1761 const arch_register_class_t *cls;
1762 ir_node *node, *next;
1763 int op1_idx, out_idx;
1766 cls = arch_get_irn_reg_class(n);
1767 if (cls != &ia32_reg_classes[CLASS_ia32_vfp])
1770 pred = get_irn_n(n, 0);
1771 out = x87_get_irn_register(n);
1772 op1 = x87_get_irn_register(pred);
1773 live = vfp_live_args_after(state->sim, n, REGMASK(out));
1775 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1776 arch_register_get_name(op1), arch_register_get_name(out)));
1777 DEBUG_ONLY(vfp_dump_live(live);)
1779 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1781 if (is_vfp_live(arch_register_get_index(op1), live)) {
1782 /* Operand is still live, a real copy. We need here an fpush that can
1783 hold a a register, so use the fpushCopy or recreate constants */
1784 node = create_Copy(state, n);
1786 /* We have to make sure the old value doesn't go dead (which can happen
1787 * when we recreate constants). As the simulator expected that value in
1788 * the pred blocks. This is unfortunate as removing it would save us 1
1789 * instruction, but we would have to rerun all the simulation to get
1792 next = sched_next(n);
1795 sched_add_before(next, node);
1797 if (get_irn_n_edges(pred) == 0) {
1798 keep_float_node_alive(pred);
1801 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1803 out_idx = x87_on_stack(state, arch_register_get_index(out));
1805 if (out_idx >= 0 && out_idx != op1_idx) {
1806 /* Matze: out already on stack? how can this happen? */
1807 panic("invalid stack state in x87 simulator");
1810 /* op1 must be killed and placed where out is */
1812 ia32_x87_attr_t *attr;
1813 /* best case, simple remove and rename */
1814 x87_patch_insn(n, op_ia32_Pop);
1815 attr = get_ia32_x87_attr(n);
1816 attr->x87[0] = op1 = get_st_reg(0);
1819 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1821 ia32_x87_attr_t *attr;
1822 /* move op1 to tos, store and pop it */
1824 x87_create_fxch(state, n, op1_idx);
1827 x87_patch_insn(n, op_ia32_Pop);
1828 attr = get_ia32_x87_attr(n);
1829 attr->x87[0] = op1 = get_st_reg(out_idx);
1832 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1834 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1837 /* just a virtual copy */
1838 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1839 /* don't remove the node to keep the verifier quiet :),
1840 the emitter won't emit any code for the node */
1843 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1844 exchange(n, get_unop_op(n));
1848 return NO_NODE_ADDED;
1852 * Returns the vf0 result Proj of a Call.
1854 * @para call the Call node
1856 static ir_node *get_call_result_proj(ir_node *call)
1858 const ir_edge_t *edge;
1860 /* search the result proj */
1861 foreach_out_edge(call, edge) {
1862 ir_node *proj = get_edge_src_irn(edge);
1863 long pn = get_Proj_proj(proj);
1865 if (pn == pn_ia32_Call_vf0)
1873 * Simulate a ia32_Call.
1875 * @param state the x87 state
1876 * @param n the node that should be simulated (and patched)
1878 * @return NO_NODE_ADDED
1880 static int sim_Call(x87_state *state, ir_node *n)
1882 ir_type *call_tp = get_ia32_call_attr_const(n)->call_tp;
1886 const arch_register_t *reg;
1888 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1890 /* at the begin of a call the x87 state should be empty */
1891 assert(state->depth == 0 && "stack not empty before call");
1893 if (get_method_n_ress(call_tp) <= 0)
1897 * If the called function returns a float, it is returned in st(0).
1898 * This even happens if the return value is NOT used.
1899 * Moreover, only one return result is supported.
1901 res_type = get_method_res_type(call_tp, 0);
1902 mode = get_type_mode(res_type);
1904 if (mode == NULL || !mode_is_float(mode))
1907 resproj = get_call_result_proj(n);
1908 assert(resproj != NULL);
1910 reg = x87_get_irn_register(resproj);
1911 x87_push(state, arch_register_get_index(reg), resproj);
1914 DB((dbg, LEVEL_1, "Stack after: "));
1915 DEBUG_ONLY(x87_dump_stack(state);)
1917 return NO_NODE_ADDED;
1921 * Simulate a be_Return.
1923 * @param state the x87 state
1924 * @param n the node that should be simulated (and patched)
1926 * @return NO_NODE_ADDED
1928 static int sim_Return(x87_state *state, ir_node *n)
1930 int n_res = be_Return_get_n_rets(n);
1931 int i, n_float_res = 0;
1933 /* only floating point return values must reside on stack */
1934 for (i = 0; i < n_res; ++i) {
1935 ir_node *res = get_irn_n(n, n_be_Return_val + i);
1937 if (mode_is_float(get_irn_mode(res)))
1940 assert(x87_get_depth(state) == n_float_res);
1942 /* pop them virtually */
1943 for (i = n_float_res - 1; i >= 0; --i)
1946 return NO_NODE_ADDED;
1949 typedef struct perm_data_t {
1950 const arch_register_t *in;
1951 const arch_register_t *out;
1955 * Simulate a be_Perm.
1957 * @param state the x87 state
1958 * @param irn the node that should be simulated (and patched)
1960 * @return NO_NODE_ADDED
1962 static int sim_Perm(x87_state *state, ir_node *irn)
1965 ir_node *pred = get_irn_n(irn, 0);
1967 const ir_edge_t *edge;
1969 /* handle only floating point Perms */
1970 if (! mode_is_float(get_irn_mode(pred)))
1971 return NO_NODE_ADDED;
1973 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1975 /* Perm is a pure virtual instruction on x87.
1976 All inputs must be on the FPU stack and are pairwise
1977 different from each other.
1978 So, all we need to do is to permutate the stack state. */
1979 n = get_irn_arity(irn);
1980 NEW_ARR_A(int, stack_pos, n);
1982 /* collect old stack positions */
1983 for (i = 0; i < n; ++i) {
1984 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
1985 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1987 assert(idx >= 0 && "Perm argument not on x87 stack");
1991 /* now do the permutation */
1992 foreach_out_edge(irn, edge) {
1993 ir_node *proj = get_edge_src_irn(edge);
1994 const arch_register_t *out = x87_get_irn_register(proj);
1995 long num = get_Proj_proj(proj);
1997 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1998 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
2000 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
2002 return NO_NODE_ADDED;
2006 * Kill any dead registers at block start by popping them from the stack.
2008 * @param sim the simulator handle
2009 * @param block the current block
2010 * @param start_state the x87 state at the begin of the block
2012 * @return the x87 state after dead register killed
2014 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state)
2016 x87_state *state = start_state;
2017 ir_node *first_insn = sched_first(block);
2018 ir_node *keep = NULL;
2019 unsigned live = vfp_live_args_after(sim, block, 0);
2021 int i, depth, num_pop;
2024 depth = x87_get_depth(state);
2025 for (i = depth - 1; i >= 0; --i) {
2026 int reg = x87_get_st_reg(state, i);
2028 if (! is_vfp_live(reg, live))
2029 kill_mask |= (1 << i);
2033 /* create a new state, will be changed */
2034 state = x87_clone_state(sim, state);
2036 DB((dbg, LEVEL_1, "Killing deads:\n"));
2037 DEBUG_ONLY(vfp_dump_live(live);)
2038 DEBUG_ONLY(x87_dump_stack(state);)
2040 if (kill_mask != 0 && live == 0) {
2041 /* special case: kill all registers */
2042 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
2043 if (ia32_cg_config.use_femms) {
2044 /* use FEMMS on AMD processors to clear all */
2045 keep = new_bd_ia32_femms(NULL, block);
2047 /* use EMMS to clear all */
2048 keep = new_bd_ia32_emms(NULL, block);
2050 sched_add_before(first_insn, keep);
2056 /* now kill registers */
2058 /* we can only kill from TOS, so bring them up */
2059 if (! (kill_mask & 1)) {
2060 /* search from behind, because we can to a double-pop */
2061 for (i = depth - 1; i >= 0; --i) {
2062 if (kill_mask & (1 << i)) {
2063 kill_mask &= ~(1 << i);
2070 x87_set_st(state, -1, keep, i);
2071 x87_create_fxch(state, first_insn, i);
2074 if ((kill_mask & 3) == 3) {
2075 /* we can do a double-pop */
2079 /* only a single pop */
2084 kill_mask >>= num_pop;
2085 keep = x87_create_fpop(state, first_insn, num_pop);
2093 * Run a simulation and fix all virtual instructions for a block.
2095 * @param sim the simulator handle
2096 * @param block the current block
2098 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
2101 blk_state *bl_state = x87_get_bl_state(sim, block);
2102 x87_state *state = bl_state->begin;
2103 const ir_edge_t *edge;
2104 ir_node *start_block;
2106 assert(state != NULL);
2107 /* already processed? */
2108 if (bl_state->end != NULL)
2111 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2112 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2113 DEBUG_ONLY(x87_dump_stack(state);)
2115 /* at block begin, kill all dead registers */
2116 state = x87_kill_deads(sim, block, state);
2117 /* create a new state, will be changed */
2118 state = x87_clone_state(sim, state);
2120 /* beware, n might change */
2121 for (n = sched_first(block); !sched_is_end(n); n = next) {
2124 ir_op *op = get_irn_op(n);
2127 * get the next node to be simulated here.
2128 * n might be completely removed from the schedule-
2130 next = sched_next(n);
2131 if (op->ops.generic != NULL) {
2132 func = (sim_func)op->ops.generic;
2135 node_inserted = (*func)(state, n);
2138 * sim_func might have added an additional node after n,
2139 * so update next node
2140 * beware: n must not be changed by sim_func
2141 * (i.e. removed from schedule) in this case
2143 if (node_inserted != NO_NODE_ADDED)
2144 next = sched_next(n);
2148 start_block = get_irg_start_block(get_irn_irg(block));
2150 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state);)
2152 /* check if the state must be shuffled */
2153 foreach_block_succ(block, edge) {
2154 ir_node *succ = get_edge_src_irn(edge);
2155 blk_state *succ_state;
2157 if (succ == start_block)
2160 succ_state = x87_get_bl_state(sim, succ);
2162 if (succ_state->begin == NULL) {
2163 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2164 DEBUG_ONLY(x87_dump_stack(state);)
2165 succ_state->begin = state;
2167 waitq_put(sim->worklist, succ);
2169 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2170 /* There is already a begin state for the successor, bad.
2171 Do the necessary permutations.
2172 Note that critical edges are removed, so this is always possible:
2173 If the successor has more than one possible input, then it must
2176 x87_shuffle(sim, block, state, succ, succ_state->begin);
2179 bl_state->end = state;
2183 * Register a simulator function.
2185 * @param op the opcode to simulate
2186 * @param func the simulator function for the opcode
2188 static void register_sim(ir_op *op, sim_func func)
2190 assert(op->ops.generic == NULL);
2191 op->ops.generic = (op_func) func;
2195 * Create a new x87 simulator.
2197 * @param sim a simulator handle, will be initialized
2198 * @param irg the current graph
2200 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
2202 obstack_init(&sim->obst);
2203 sim->blk_states = pmap_create();
2204 sim->n_idx = get_irg_last_idx(irg);
2205 sim->live = OALLOCN(&sim->obst, vfp_liveness, sim->n_idx);
2207 DB((dbg, LEVEL_1, "--------------------------------\n"
2208 "x87 Simulator started for %+F\n", irg));
2210 /* set the generic function pointer of instruction we must simulate */
2211 ir_clear_opcodes_generic_func();
2213 register_sim(op_ia32_Call, sim_Call);
2214 register_sim(op_ia32_vfld, sim_fld);
2215 register_sim(op_ia32_vfild, sim_fild);
2216 register_sim(op_ia32_vfld1, sim_fld1);
2217 register_sim(op_ia32_vfldz, sim_fldz);
2218 register_sim(op_ia32_vfadd, sim_fadd);
2219 register_sim(op_ia32_vfsub, sim_fsub);
2220 register_sim(op_ia32_vfmul, sim_fmul);
2221 register_sim(op_ia32_vfdiv, sim_fdiv);
2222 register_sim(op_ia32_vfprem, sim_fprem);
2223 register_sim(op_ia32_vfabs, sim_fabs);
2224 register_sim(op_ia32_vfchs, sim_fchs);
2225 register_sim(op_ia32_vfist, sim_fist);
2226 register_sim(op_ia32_vfisttp, sim_fisttp);
2227 register_sim(op_ia32_vfst, sim_fst);
2228 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
2229 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
2230 register_sim(op_ia32_vFucomi, sim_Fucom);
2231 register_sim(op_be_Copy, sim_Copy);
2232 register_sim(op_be_Return, sim_Return);
2233 register_sim(op_be_Perm, sim_Perm);
2234 register_sim(op_be_Keep, sim_Keep);
2238 * Destroy a x87 simulator.
2240 * @param sim the simulator handle
2242 static void x87_destroy_simulator(x87_simulator *sim)
2244 pmap_destroy(sim->blk_states);
2245 obstack_free(&sim->obst, NULL);
2246 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2250 * Pre-block walker: calculate the liveness information for the block
2251 * and store it into the sim->live cache.
2253 static void update_liveness_walker(ir_node *block, void *data)
2255 x87_simulator *sim = (x87_simulator*)data;
2256 update_liveness(sim, block);
2260 * Run a simulation and fix all virtual instructions for a graph.
2261 * Replaces all virtual floating point instructions and registers
2264 void ia32_x87_simulate_graph(ir_graph *irg)
2266 /* TODO improve code quality (less executed fxch) by using execfreqs */
2268 ir_node *block, *start_block;
2269 blk_state *bl_state;
2272 /* create the simulator */
2273 x87_init_simulator(&sim, irg);
2275 start_block = get_irg_start_block(irg);
2276 bl_state = x87_get_bl_state(&sim, start_block);
2278 /* start with the empty state */
2279 bl_state->begin = empty;
2282 sim.worklist = new_waitq();
2283 waitq_put(sim.worklist, start_block);
2285 be_assure_live_sets(irg);
2286 sim.lv = be_get_irg_liveness(irg);
2288 /* Calculate the liveness for all nodes. We must precalculate this info,
2289 * because the simulator adds new nodes (possible before Phi nodes) which
2290 * would let a lazy calculation fail.
2291 * On the other hand we reduce the computation amount due to
2292 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2294 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2298 block = (ir_node*)waitq_get(sim.worklist);
2299 x87_simulate_block(&sim, block);
2300 } while (! waitq_empty(sim.worklist));
2303 del_waitq(sim.worklist);
2304 x87_destroy_simulator(&sim);
2307 /* Initializes the x87 simulator. */
2308 void ia32_init_x87(void)
2310 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");