2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
33 #include "iredges_t.h"
48 #include "bearch_ia32_t.h"
49 #include "ia32_new_nodes.h"
50 #include "gen_ia32_new_nodes.h"
51 #include "gen_ia32_regalloc_if.h"
53 #include "ia32_architecture.h"
55 /** the debug handle */
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 /* Forward declaration. */
59 typedef struct x87_simulator x87_simulator;
62 * An exchange template.
63 * Note that our virtual functions have the same inputs
64 * and attributes as the real ones, so we can simple exchange
66 * Further, x87 supports inverse instructions, so we can handle them.
68 typedef struct exchange_tmpl {
69 ir_op *normal_op; /**< the normal one */
70 ir_op *reverse_op; /**< the reverse one if exists */
71 ir_op *normal_pop_op; /**< the normal one with tos pop */
72 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
76 * An entry on the simulated x87 stack.
78 typedef struct st_entry {
79 int reg_idx; /**< the virtual register index of this stack value */
80 ir_node *node; /**< the node that produced this value */
86 typedef struct x87_state {
87 st_entry st[N_ia32_st_REGS]; /**< the register stack */
88 int depth; /**< the current stack depth */
89 x87_simulator *sim; /**< The simulator. */
92 /** An empty state, used for blocks without fp instructions. */
93 static x87_state _empty = { { {0, NULL}, }, 0, NULL };
94 static x87_state *empty = (x87_state *)&_empty;
97 * Return values of the instruction simulator functions.
100 NO_NODE_ADDED = 0, /**< No node that needs simulation was added. */
101 NODE_ADDED = 1 /**< A node that must be simulated was added by the simulator
102 in the schedule AFTER the current node. */
106 * The type of an instruction simulator function.
108 * @param state the x87 state
109 * @param n the node to be simulated
111 * @return NODE_ADDED if a node was added AFTER n in schedule that MUST be
113 * NO_NODE_ADDED otherwise
115 typedef int (*sim_func)(x87_state *state, ir_node *n);
118 * A block state: Every block has a x87 state at the beginning and at the end.
120 typedef struct blk_state {
121 x87_state *begin; /**< state at the begin or NULL if not assigned */
122 x87_state *end; /**< state at the end or NULL if not assigned */
125 /** liveness bitset for vfp registers. */
126 typedef unsigned char vfp_liveness;
131 struct x87_simulator {
132 struct obstack obst; /**< An obstack for fast allocating. */
133 pmap *blk_states; /**< Map blocks to states. */
134 be_lv_t *lv; /**< intrablock liveness. */
135 vfp_liveness *live; /**< Liveness information. */
136 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
137 waitq *worklist; /**< Worklist of blocks that must be processed. */
141 * Returns the current stack depth.
143 * @param state the x87 state
145 * @return the x87 stack depth
147 static int x87_get_depth(const x87_state *state)
152 static st_entry *x87_get_entry(x87_state *const state, int const pos)
154 assert(0 <= pos && pos < state->depth);
155 return &state->st[N_ia32_st_REGS - state->depth + pos];
159 * Return the virtual register index at st(pos).
161 * @param state the x87 state
162 * @param pos a stack position
164 * @return the vfp register index that produced the value at st(pos)
166 static int x87_get_st_reg(const x87_state *state, int pos)
168 return x87_get_entry((x87_state*)state, pos)->reg_idx;
173 * Dump the stack for debugging.
175 * @param state the x87 state
177 static void x87_dump_stack(const x87_state *state)
179 for (int i = state->depth; i-- != 0;) {
180 st_entry const *const entry = x87_get_entry((x87_state*)state, i);
181 DB((dbg, LEVEL_2, "vf%d(%+F) ", entry->reg_idx, entry->node));
183 DB((dbg, LEVEL_2, "<-- TOS\n"));
185 #endif /* DEBUG_libfirm */
188 * Set a virtual register to st(pos).
190 * @param state the x87 state
191 * @param reg_idx the vfp register index that should be set
192 * @param node the IR node that produces the value of the vfp register
193 * @param pos the stack position where the new value should be entered
195 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
197 st_entry *const entry = x87_get_entry(state, pos);
198 entry->reg_idx = reg_idx;
201 DB((dbg, LEVEL_2, "After SET_REG: "));
202 DEBUG_ONLY(x87_dump_stack(state);)
206 * Set the tos virtual register.
208 * @param state the x87 state
209 * @param reg_idx the vfp register index that should be set
210 * @param node the IR node that produces the value of the vfp register
212 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node)
214 x87_set_st(state, reg_idx, node, 0);
218 * Swap st(0) with st(pos).
220 * @param state the x87 state
221 * @param pos the stack position to change the tos with
223 static void x87_fxch(x87_state *state, int pos)
225 st_entry *const a = x87_get_entry(state, pos);
226 st_entry *const b = x87_get_entry(state, 0);
227 st_entry const t = *a;
231 DB((dbg, LEVEL_2, "After FXCH: "));
232 DEBUG_ONLY(x87_dump_stack(state);)
236 * Convert a virtual register to the stack index.
238 * @param state the x87 state
239 * @param reg_idx the register vfp index
241 * @return the stack position where the register is stacked
242 * or -1 if the virtual register was not found
244 static int x87_on_stack(const x87_state *state, int reg_idx)
246 for (int i = 0; i < state->depth; ++i) {
247 if (x87_get_st_reg(state, i) == reg_idx)
254 * Push a virtual Register onto the stack, double pushed allowed.
256 * @param state the x87 state
257 * @param reg_idx the register vfp index
258 * @param node the node that produces the value of the vfp register
260 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node)
262 assert(state->depth < N_ia32_st_REGS && "stack overrun");
265 st_entry *const entry = x87_get_entry(state, 0);
266 entry->reg_idx = reg_idx;
269 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state);)
273 * Push a virtual Register onto the stack, double pushes are NOT allowed.
275 * @param state the x87 state
276 * @param reg_idx the register vfp index
277 * @param node the node that produces the value of the vfp register
279 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
281 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
283 x87_push_dbl(state, reg_idx, node);
287 * Pop a virtual Register from the stack.
289 * @param state the x87 state
291 static void x87_pop(x87_state *state)
293 assert(state->depth > 0 && "stack underrun");
297 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state);)
301 * Empty the fpu stack
303 * @param state the x87 state
305 static void x87_emms(x87_state *state)
311 * Returns the block state of a block.
313 * @param sim the x87 simulator handle
314 * @param block the current block
316 * @return the block state
318 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
320 blk_state *res = pmap_get(blk_state, sim->blk_states, block);
323 res = OALLOC(&sim->obst, blk_state);
327 pmap_insert(sim->blk_states, block, res);
334 * Creates a new x87 state.
336 * @param sim the x87 simulator handle
338 * @return a new x87 state
340 static x87_state *x87_alloc_state(x87_simulator *sim)
342 x87_state *res = OALLOC(&sim->obst, x87_state);
351 * @param sim the x87 simulator handle
352 * @param src the x87 state that will be cloned
354 * @return a cloned copy of the src state
356 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
358 x87_state *res = x87_alloc_state(sim);
365 * Patch a virtual instruction into a x87 one and return
366 * the node representing the result value.
368 * @param n the IR node to patch
369 * @param op the x87 opcode to patch in
371 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
373 ir_mode *mode = get_irn_mode(n);
378 if (mode == mode_T) {
379 /* patch all Proj's */
380 foreach_out_edge(n, edge) {
381 ir_node *proj = get_edge_src_irn(edge);
383 mode = get_irn_mode(proj);
384 if (mode_is_float(mode)) {
386 set_irn_mode(proj, ia32_reg_classes[CLASS_ia32_st].mode);
390 } else if (mode_is_float(mode))
391 set_irn_mode(n, ia32_reg_classes[CLASS_ia32_st].mode);
396 * Returns the first Proj of a mode_T node having a given mode.
398 * @param n the mode_T node
399 * @param m the desired mode of the Proj
400 * @return The first Proj of mode @p m found or NULL.
402 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
404 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
406 foreach_out_edge(n, edge) {
407 ir_node *proj = get_edge_src_irn(edge);
408 if (get_irn_mode(proj) == m)
416 * Wrap the arch_* function here so we can check for errors.
418 static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
420 const arch_register_t *res = arch_get_irn_register(irn);
422 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
426 static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
429 const arch_register_t *res = arch_get_irn_register_out(irn, pos);
431 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
435 static inline const arch_register_t *get_st_reg(int index)
437 return &ia32_registers[REG_ST0 + index];
440 /* -------------- x87 perm --------------- */
443 * Creates a fxch for shuffle.
445 * @param state the x87 state
446 * @param pos parameter for fxch
447 * @param block the block were fxch is inserted
449 * Creates a new fxch node and reroute the user of the old node
452 * @return the fxch node
454 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block)
457 ia32_x87_attr_t *attr;
459 fxch = new_bd_ia32_fxch(NULL, block);
460 attr = get_ia32_x87_attr(fxch);
461 attr->x87[0] = get_st_reg(pos);
462 attr->x87[2] = get_st_reg(0);
466 x87_fxch(state, pos);
471 * Calculate the necessary permutations to reach dst_state.
473 * These permutations are done with fxch instructions and placed
474 * at the end of the block.
476 * Note that critical edges are removed here, so we need only
477 * a shuffle if the current block has only one successor.
479 * @param block the current block
480 * @param state the current x87 stack state, might be modified
481 * @param dst_state destination state
485 static x87_state *x87_shuffle(ir_node *block, x87_state *state, const x87_state *dst_state)
487 int i, n_cycles, k, ri;
488 unsigned cycles[4], all_mask;
489 char cycle_idx[4][8];
490 ir_node *fxch, *before, *after;
492 assert(state->depth == dst_state->depth);
494 /* Some mathematics here:
495 If we have a cycle of length n that includes the tos,
496 we need n-1 exchange operations.
497 We can always add the tos and restore it, so we need
498 n+1 exchange operations for a cycle not containing the tos.
499 So, the maximum of needed operations is for a cycle of 7
500 not including the tos == 8.
501 This is the same number of ops we would need for using stores,
502 so exchange is cheaper (we save the loads).
503 On the other hand, we might need an additional exchange
504 in the next block to bring one operand on top, so the
505 number of ops in the first case is identical.
506 Further, no more than 4 cycles can exists (4 x 2).
508 all_mask = (1 << (state->depth)) - 1;
510 for (n_cycles = 0; all_mask; ++n_cycles) {
511 int src_idx, dst_idx;
513 /* find the first free slot */
514 for (i = 0; i < state->depth; ++i) {
515 if (all_mask & (1 << i)) {
516 all_mask &= ~(1 << i);
518 /* check if there are differences here */
519 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
525 /* no more cycles found */
530 cycles[n_cycles] = (1 << i);
531 cycle_idx[n_cycles][k++] = i;
532 for (src_idx = i; ; src_idx = dst_idx) {
533 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
535 if ((all_mask & (1 << dst_idx)) == 0)
538 cycle_idx[n_cycles][k++] = dst_idx;
539 cycles[n_cycles] |= (1 << dst_idx);
540 all_mask &= ~(1 << dst_idx);
542 cycle_idx[n_cycles][k] = -1;
546 /* no permutation needed */
550 /* Hmm: permutation needed */
551 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
552 DEBUG_ONLY(x87_dump_stack(state);)
553 DB((dbg, LEVEL_2, " to\n"));
554 DEBUG_ONLY(x87_dump_stack(dst_state);)
558 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
559 for (ri = 0; ri < n_cycles; ++ri) {
560 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
561 for (k = 0; cycle_idx[ri][k] != -1; ++k)
562 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
563 DB((dbg, LEVEL_2, "\n"));
570 * Find the place node must be insert.
571 * We have only one successor block, so the last instruction should
574 before = sched_last(block);
575 assert(is_cfop(before));
577 /* now do the permutations */
578 for (ri = 0; ri < n_cycles; ++ri) {
579 if ((cycles[ri] & 1) == 0) {
580 /* this cycle does not include the tos */
581 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
583 sched_add_after(after, fxch);
585 sched_add_before(before, fxch);
588 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
589 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
591 sched_add_after(after, fxch);
593 sched_add_before(before, fxch);
596 if ((cycles[ri] & 1) == 0) {
597 /* this cycle does not include the tos */
598 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
599 sched_add_after(after, fxch);
606 * Create a fxch node before another node.
608 * @param state the x87 state
609 * @param n the node after the fxch
610 * @param pos exchange st(pos) with st(0)
614 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
617 ia32_x87_attr_t *attr;
618 ir_node *block = get_nodes_block(n);
620 x87_fxch(state, pos);
622 fxch = new_bd_ia32_fxch(NULL, block);
623 attr = get_ia32_x87_attr(fxch);
624 attr->x87[0] = get_st_reg(pos);
625 attr->x87[2] = get_st_reg(0);
629 sched_add_before(n, fxch);
630 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
635 * Create a fpush before node n.
637 * @param state the x87 state
638 * @param n the node after the fpush
639 * @param pos push st(pos) on stack
640 * @param op_idx replace input op_idx of n with the fpush result
642 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx)
644 ir_node *fpush, *pred = get_irn_n(n, op_idx);
645 ia32_x87_attr_t *attr;
646 const arch_register_t *out = x87_get_irn_register(pred);
648 x87_push_dbl(state, arch_register_get_index(out), pred);
650 fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
651 attr = get_ia32_x87_attr(fpush);
652 attr->x87[0] = get_st_reg(pos);
653 attr->x87[2] = get_st_reg(0);
656 sched_add_before(n, fpush);
658 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
662 * Create a fpop before node n.
664 * @param state the x87 state
665 * @param n the node after the fpop
666 * @param num pop 1 or 2 values
668 * @return the fpop node
670 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
672 ir_node *fpop = NULL;
673 ia32_x87_attr_t *attr;
678 if (ia32_cg_config.use_ffreep)
679 fpop = new_bd_ia32_ffreep(NULL, get_nodes_block(n));
681 fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
682 attr = get_ia32_x87_attr(fpop);
683 attr->x87[0] = get_st_reg(0);
684 attr->x87[1] = get_st_reg(0);
685 attr->x87[2] = get_st_reg(0);
688 sched_add_before(n, fpop);
689 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
694 /* --------------------------------- liveness ------------------------------------------ */
697 * The liveness transfer function.
698 * Updates a live set over a single step from a given node to its predecessor.
699 * Everything defined at the node is removed from the set, the uses of the node get inserted.
701 * @param irn The node at which liveness should be computed.
702 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
703 * the registers live after irn.
705 * @return The live bitset.
707 static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
710 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
712 if (get_irn_mode(irn) == mode_T) {
713 foreach_out_edge(irn, edge) {
714 ir_node *proj = get_edge_src_irn(edge);
716 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
717 const arch_register_t *reg = x87_get_irn_register(proj);
718 live &= ~(1 << arch_register_get_index(reg));
721 } else if (arch_irn_consider_in_reg_alloc(cls, irn)) {
722 const arch_register_t *reg = x87_get_irn_register(irn);
723 live &= ~(1 << arch_register_get_index(reg));
726 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
727 ir_node *op = get_irn_n(irn, i);
729 if (mode_is_float(get_irn_mode(op)) &&
730 arch_irn_consider_in_reg_alloc(cls, op)) {
731 const arch_register_t *reg = x87_get_irn_register(op);
732 live |= 1 << arch_register_get_index(reg);
739 * Put all live virtual registers at the end of a block into a bitset.
741 * @param sim the simulator handle
742 * @param bl the block
744 * @return The live bitset at the end of this block
746 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
748 vfp_liveness live = 0;
749 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
750 const be_lv_t *lv = sim->lv;
752 be_lv_foreach(lv, block, be_lv_state_end, node) {
753 const arch_register_t *reg;
754 if (!arch_irn_consider_in_reg_alloc(cls, node))
757 reg = x87_get_irn_register(node);
758 live |= 1 << arch_register_get_index(reg);
764 /** get the register mask from an arch_register */
765 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
768 * Return a bitset of argument registers which are live at the end of a node.
770 * @param sim the simulator handle
771 * @param pos the node
772 * @param kill kill mask for the output registers
774 * @return The live bitset.
776 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
778 unsigned idx = get_irn_idx(pos);
780 assert(idx < sim->n_idx);
781 return sim->live[idx] & ~kill;
785 * Calculate the liveness for a whole block and cache it.
787 * @param sim the simulator handle
788 * @param block the block
790 static void update_liveness(x87_simulator *sim, ir_node *block)
792 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
795 /* now iterate through the block backward and cache the results */
796 sched_foreach_reverse(block, irn) {
797 /* stop at the first Phi: this produces the live-in */
801 idx = get_irn_idx(irn);
802 sim->live[idx] = live;
804 live = vfp_liveness_transfer(irn, live);
806 idx = get_irn_idx(block);
807 sim->live[idx] = live;
811 * Returns true if a register is live in a set.
813 * @param reg_idx the vfp register index
814 * @param live a live bitset
816 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
820 * Dump liveness info.
822 * @param live the live bitset
824 static void vfp_dump_live(vfp_liveness live)
828 DB((dbg, LEVEL_2, "Live after: "));
829 for (i = 0; i < 8; ++i) {
830 if (live & (1 << i)) {
831 DB((dbg, LEVEL_2, "vf%d ", i));
834 DB((dbg, LEVEL_2, "\n"));
836 #endif /* DEBUG_libfirm */
838 /* --------------------------------- simulators ---------------------------------------- */
841 * Simulate a virtual binop.
843 * @param state the x87 state
844 * @param n the node that should be simulated (and patched)
845 * @param tmpl the template containing the 4 possible x87 opcodes
847 * @return NO_NODE_ADDED
849 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl)
851 int op2_idx = 0, op1_idx;
852 int out_idx, do_pop = 0;
853 ia32_x87_attr_t *attr;
855 ir_node *patched_insn;
857 x87_simulator *sim = state->sim;
858 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
859 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
860 const arch_register_t *op1_reg = x87_get_irn_register(op1);
861 const arch_register_t *op2_reg = x87_get_irn_register(op2);
862 const arch_register_t *out = x87_irn_get_register(n, pn_ia32_res);
863 int reg_index_1 = arch_register_get_index(op1_reg);
864 int reg_index_2 = arch_register_get_index(op2_reg);
865 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
869 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
870 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
871 arch_register_get_name(out)));
872 DEBUG_ONLY(vfp_dump_live(live);)
873 DB((dbg, LEVEL_1, "Stack before: "));
874 DEBUG_ONLY(x87_dump_stack(state);)
876 op1_idx = x87_on_stack(state, reg_index_1);
877 assert(op1_idx >= 0);
878 op1_live_after = is_vfp_live(reg_index_1, live);
880 attr = get_ia32_x87_attr(n);
881 permuted = attr->attr.data.ins_permuted;
883 if (reg_index_2 != REG_VFP_VFP_NOREG) {
886 /* second operand is a vfp register */
887 op2_idx = x87_on_stack(state, reg_index_2);
888 assert(op2_idx >= 0);
889 op2_live_after = is_vfp_live(reg_index_2, live);
891 if (op2_live_after) {
892 /* Second operand is live. */
894 if (op1_live_after) {
895 /* Both operands are live: push the first one.
896 This works even for op1 == op2. */
897 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
898 /* now do fxxx (tos=tos X op) */
902 dst = tmpl->normal_op;
904 /* Second live, first operand is dead here, bring it to tos. */
906 x87_create_fxch(state, n, op1_idx);
911 /* now do fxxx (tos=tos X op) */
913 dst = tmpl->normal_op;
916 /* Second operand is dead. */
917 if (op1_live_after) {
918 /* First operand is live: bring second to tos. */
920 x87_create_fxch(state, n, op2_idx);
925 /* now do fxxxr (tos = op X tos) */
927 dst = tmpl->reverse_op;
929 /* Both operands are dead here, pop them from the stack. */
932 /* Both are identically and on tos, no pop needed. */
933 /* here fxxx (tos = tos X tos) */
934 dst = tmpl->normal_op;
937 /* now do fxxxp (op = op X tos, pop) */
938 dst = tmpl->normal_pop_op;
942 } else if (op1_idx == 0) {
943 assert(op1_idx != op2_idx);
944 /* now do fxxxrp (op = tos X op, pop) */
945 dst = tmpl->reverse_pop_op;
949 /* Bring the second on top. */
950 x87_create_fxch(state, n, op2_idx);
951 if (op1_idx == op2_idx) {
952 /* Both are identically and on tos now, no pop needed. */
955 /* use fxxx (tos = tos X tos) */
956 dst = tmpl->normal_op;
959 /* op2 is on tos now */
961 /* use fxxxp (op = op X tos, pop) */
962 dst = tmpl->normal_pop_op;
970 /* second operand is an address mode */
971 if (op1_live_after) {
972 /* first operand is live: push it here */
973 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
976 /* first operand is dead: bring it to tos */
978 x87_create_fxch(state, n, op1_idx);
983 /* use fxxx (tos = tos X mem) */
984 dst = permuted ? tmpl->reverse_op : tmpl->normal_op;
988 patched_insn = x87_patch_insn(n, dst);
989 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
994 /* patch the operation */
995 attr->x87[0] = op1_reg = get_st_reg(op1_idx);
996 if (reg_index_2 != REG_VFP_VFP_NOREG) {
997 attr->x87[1] = op2_reg = get_st_reg(op2_idx);
999 attr->x87[2] = out = get_st_reg(out_idx);
1001 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1002 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1003 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1004 arch_register_get_name(out)));
1006 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1007 arch_register_get_name(op1_reg),
1008 arch_register_get_name(out)));
1011 return NO_NODE_ADDED;
1015 * Simulate a virtual Unop.
1017 * @param state the x87 state
1018 * @param n the node that should be simulated (and patched)
1019 * @param op the x87 opcode that will replace n's opcode
1021 * @return NO_NODE_ADDED
1023 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
1025 x87_simulator *sim = state->sim;
1026 const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, 0));
1027 const arch_register_t *out = x87_get_irn_register(n);
1028 ia32_x87_attr_t *attr;
1029 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1031 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1032 DEBUG_ONLY(vfp_dump_live(live);)
1034 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1036 if (is_vfp_live(arch_register_get_index(op1), live)) {
1037 /* push the operand here */
1038 x87_create_fpush(state, n, op1_idx, 0);
1041 /* operand is dead, bring it to tos */
1043 x87_create_fxch(state, n, op1_idx);
1047 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1048 attr = get_ia32_x87_attr(n);
1049 attr->x87[0] = op1 = get_st_reg(0);
1050 attr->x87[2] = out = get_st_reg(0);
1051 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1053 return NO_NODE_ADDED;
1057 * Simulate a virtual Load instruction.
1059 * @param state the x87 state
1060 * @param n the node that should be simulated (and patched)
1061 * @param op the x87 opcode that will replace n's opcode
1063 * @return NO_NODE_ADDED
1065 static int sim_load(x87_state *state, ir_node *n, ir_op *op, int res_pos)
1067 const arch_register_t *out = x87_irn_get_register(n, res_pos);
1068 ia32_x87_attr_t *attr;
1070 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1071 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1072 assert(out == x87_irn_get_register(n, res_pos));
1073 attr = get_ia32_x87_attr(n);
1074 attr->x87[2] = out = get_st_reg(0);
1075 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1077 return NO_NODE_ADDED;
1081 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1083 * @param store The store
1084 * @param old_val The former value
1085 * @param new_val The new value
1087 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
1089 foreach_out_edge_safe(old_val, edge) {
1090 ir_node *user = get_edge_src_irn(edge);
1092 if (! user || user == store)
1095 /* if the user is scheduled after the store: rewire */
1096 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1098 /* find the input of the user pointing to the old value */
1099 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1100 if (get_irn_n(user, i) == old_val)
1101 set_irn_n(user, i, new_val);
1108 * Simulate a virtual Store.
1110 * @param state the x87 state
1111 * @param n the node that should be simulated (and patched)
1112 * @param op the x87 store opcode
1113 * @param op_p the x87 store and pop opcode
1115 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p)
1117 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1118 const arch_register_t *op2 = x87_get_irn_register(val);
1119 unsigned live = vfp_live_args_after(state->sim, n, 0);
1120 int insn = NO_NODE_ADDED;
1121 ia32_x87_attr_t *attr;
1122 int op2_reg_idx, op2_idx, depth;
1123 int live_after_node;
1126 op2_reg_idx = arch_register_get_index(op2);
1127 op2_idx = x87_on_stack(state, op2_reg_idx);
1128 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1129 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1130 assert(op2_idx >= 0);
1132 mode = get_ia32_ls_mode(n);
1133 depth = x87_get_depth(state);
1135 if (live_after_node) {
1137 Problem: fst doesn't support 96bit modes (spills), only fstp does
1138 fist doesn't support 64bit mode, only fistp
1140 - stack not full: push value and fstp
1141 - stack full: fstp value and load again
1142 Note that we cannot test on mode_E, because floats might be 96bit ...
1144 if (get_mode_size_bits(mode) > 64 || (mode_is_int(mode) && get_mode_size_bits(mode) > 32)) {
1145 if (depth < N_ia32_st_REGS) {
1146 /* ok, we have a free register: push + fstp */
1147 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1149 x87_patch_insn(n, op_p);
1151 ir_node *vfld, *mem, *block, *rproj, *mproj;
1152 ir_graph *irg = get_irn_irg(n);
1153 ir_node *nomem = get_irg_no_mem(irg);
1155 /* stack full here: need fstp + load */
1157 x87_patch_insn(n, op_p);
1159 block = get_nodes_block(n);
1160 vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), nomem, get_ia32_ls_mode(n));
1162 /* copy all attributes */
1163 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1164 if (is_ia32_use_frame(n))
1165 set_ia32_use_frame(vfld);
1166 set_ia32_op_type(vfld, ia32_AddrModeS);
1167 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1168 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1169 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1171 rproj = new_r_Proj(vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1172 mproj = new_r_Proj(vfld, mode_M, pn_ia32_vfld_M);
1173 mem = get_irn_Proj_for_mode(n, mode_M);
1175 assert(mem && "Store memory not found");
1177 arch_set_irn_register(rproj, op2);
1179 /* reroute all former users of the store memory to the load memory */
1180 edges_reroute(mem, mproj);
1181 /* set the memory input of the load to the store memory */
1182 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1184 sched_add_after(n, vfld);
1185 sched_add_after(vfld, rproj);
1187 /* rewire all users, scheduled after the store, to the loaded value */
1188 collect_and_rewire_users(n, val, rproj);
1193 /* we can only store the tos to memory */
1195 x87_create_fxch(state, n, op2_idx);
1197 /* mode size 64 or smaller -> use normal fst */
1198 x87_patch_insn(n, op);
1201 /* we can only store the tos to memory */
1203 x87_create_fxch(state, n, op2_idx);
1206 x87_patch_insn(n, op_p);
1209 attr = get_ia32_x87_attr(n);
1210 attr->x87[1] = op2 = get_st_reg(0);
1211 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1216 #define _GEN_BINOP(op, rev) \
1217 static int sim_##op(x87_state *state, ir_node *n) { \
1218 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1219 return sim_binop(state, n, &tmpl); \
1222 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1223 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1225 #define GEN_LOAD(op) \
1226 static int sim_##op(x87_state *state, ir_node *n) { \
1227 return sim_load(state, n, op_ia32_##op, pn_ia32_v##op##_res); \
1230 #define GEN_UNOP(op) \
1231 static int sim_##op(x87_state *state, ir_node *n) { \
1232 return sim_unop(state, n, op_ia32_##op); \
1235 #define GEN_STORE(op) \
1236 static int sim_##op(x87_state *state, ir_node *n) { \
1237 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1259 * Simulate a virtual fisttp.
1261 * @param state the x87 state
1262 * @param n the node that should be simulated (and patched)
1264 * @return NO_NODE_ADDED
1266 static int sim_fisttp(x87_state *state, ir_node *n)
1268 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1269 const arch_register_t *op2 = x87_get_irn_register(val);
1270 ia32_x87_attr_t *attr;
1271 int op2_reg_idx, op2_idx;
1273 op2_reg_idx = arch_register_get_index(op2);
1274 op2_idx = x87_on_stack(state, op2_reg_idx);
1275 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1276 assert(op2_idx >= 0);
1278 /* Note: although the value is still live here, it is destroyed because
1279 of the pop. The register allocator is aware of that and introduced a copy
1280 if the value must be alive. */
1282 /* we can only store the tos to memory */
1284 x87_create_fxch(state, n, op2_idx);
1287 x87_patch_insn(n, op_ia32_fisttp);
1289 attr = get_ia32_x87_attr(n);
1290 attr->x87[1] = op2 = get_st_reg(0);
1291 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1293 return NO_NODE_ADDED;
1297 * Simulate a virtual FtstFnstsw.
1299 * @param state the x87 state
1300 * @param n the node that should be simulated (and patched)
1302 * @return NO_NODE_ADDED
1304 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1306 x87_simulator *sim = state->sim;
1307 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1308 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1309 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1310 int reg_index_1 = arch_register_get_index(reg1);
1311 int op1_idx = x87_on_stack(state, reg_index_1);
1312 unsigned live = vfp_live_args_after(sim, n, 0);
1314 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, arch_register_get_name(reg1)));
1315 DEBUG_ONLY(vfp_dump_live(live);)
1316 DB((dbg, LEVEL_1, "Stack before: "));
1317 DEBUG_ONLY(x87_dump_stack(state);)
1318 assert(op1_idx >= 0);
1321 /* bring the value to tos */
1322 x87_create_fxch(state, n, op1_idx);
1326 /* patch the operation */
1327 x87_patch_insn(n, op_ia32_FtstFnstsw);
1328 reg1 = get_st_reg(op1_idx);
1329 attr->x87[0] = reg1;
1330 attr->x87[1] = NULL;
1331 attr->x87[2] = NULL;
1333 if (!is_vfp_live(reg_index_1, live))
1334 x87_create_fpop(state, sched_next(n), 1);
1336 return NO_NODE_ADDED;
1342 * @param state the x87 state
1343 * @param n the node that should be simulated (and patched)
1345 * @return NO_NODE_ADDED
1347 static int sim_Fucom(x87_state *state, ir_node *n)
1351 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1353 x87_simulator *sim = state->sim;
1354 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1355 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1356 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1357 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1358 int reg_index_1 = arch_register_get_index(op1);
1359 int reg_index_2 = arch_register_get_index(op2);
1360 unsigned live = vfp_live_args_after(sim, n, 0);
1361 bool permuted = attr->attr.data.ins_permuted;
1365 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1366 arch_register_get_name(op1), arch_register_get_name(op2)));
1367 DEBUG_ONLY(vfp_dump_live(live);)
1368 DB((dbg, LEVEL_1, "Stack before: "));
1369 DEBUG_ONLY(x87_dump_stack(state);)
1371 op1_idx = x87_on_stack(state, reg_index_1);
1372 assert(op1_idx >= 0);
1374 /* BEWARE: check for comp a,a cases, they might happen */
1375 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1376 /* second operand is a vfp register */
1377 op2_idx = x87_on_stack(state, reg_index_2);
1378 assert(op2_idx >= 0);
1380 if (is_vfp_live(reg_index_2, live)) {
1381 /* second operand is live */
1383 if (is_vfp_live(reg_index_1, live)) {
1384 /* both operands are live */
1387 /* res = tos X op */
1388 } else if (op2_idx == 0) {
1389 /* res = op X tos */
1390 permuted = !permuted;
1393 /* bring the first one to tos */
1394 x87_create_fxch(state, n, op1_idx);
1395 if (op1_idx == op2_idx) {
1397 } else if (op2_idx == 0) {
1401 /* res = tos X op */
1404 /* second live, first operand is dead here, bring it to tos.
1405 This means further, op1_idx != op2_idx. */
1406 assert(op1_idx != op2_idx);
1408 x87_create_fxch(state, n, op1_idx);
1413 /* res = tos X op, pop */
1417 /* second operand is dead */
1418 if (is_vfp_live(reg_index_1, live)) {
1419 /* first operand is live: bring second to tos.
1420 This means further, op1_idx != op2_idx. */
1421 assert(op1_idx != op2_idx);
1423 x87_create_fxch(state, n, op2_idx);
1428 /* res = op X tos, pop */
1430 permuted = !permuted;
1433 /* both operands are dead here, check first for identity. */
1434 if (op1_idx == op2_idx) {
1435 /* identically, one pop needed */
1437 x87_create_fxch(state, n, op1_idx);
1441 /* res = tos X op, pop */
1444 /* different, move them to st and st(1) and pop both.
1445 The tricky part is to get one into st(1).*/
1446 else if (op2_idx == 1) {
1447 /* good, second operand is already in the right place, move the first */
1449 /* bring the first on top */
1450 x87_create_fxch(state, n, op1_idx);
1451 assert(op2_idx != 0);
1454 /* res = tos X op, pop, pop */
1456 } else if (op1_idx == 1) {
1457 /* good, first operand is already in the right place, move the second */
1459 /* bring the first on top */
1460 x87_create_fxch(state, n, op2_idx);
1461 assert(op1_idx != 0);
1464 /* res = op X tos, pop, pop */
1465 permuted = !permuted;
1469 /* if one is already the TOS, we need two fxch */
1471 /* first one is TOS, move to st(1) */
1472 x87_create_fxch(state, n, 1);
1473 assert(op2_idx != 1);
1475 x87_create_fxch(state, n, op2_idx);
1477 /* res = op X tos, pop, pop */
1479 permuted = !permuted;
1481 } else if (op2_idx == 0) {
1482 /* second one is TOS, move to st(1) */
1483 x87_create_fxch(state, n, 1);
1484 assert(op1_idx != 1);
1486 x87_create_fxch(state, n, op1_idx);
1488 /* res = tos X op, pop, pop */
1491 /* none of them is either TOS or st(1), 3 fxch needed */
1492 x87_create_fxch(state, n, op2_idx);
1493 assert(op1_idx != 0);
1494 x87_create_fxch(state, n, 1);
1496 x87_create_fxch(state, n, op1_idx);
1498 /* res = tos X op, pop, pop */
1505 /* second operand is an address mode */
1506 if (is_vfp_live(reg_index_1, live)) {
1507 /* first operand is live: bring it to TOS */
1509 x87_create_fxch(state, n, op1_idx);
1513 /* first operand is dead: bring it to tos */
1515 x87_create_fxch(state, n, op1_idx);
1522 /* patch the operation */
1523 if (is_ia32_vFucomFnstsw(n)) {
1527 case 0: dst = op_ia32_FucomFnstsw; break;
1528 case 1: dst = op_ia32_FucompFnstsw; break;
1529 case 2: dst = op_ia32_FucomppFnstsw; break;
1530 default: panic("invalid popcount");
1533 for (i = 0; i < pops; ++i) {
1536 } else if (is_ia32_vFucomi(n)) {
1538 case 0: dst = op_ia32_Fucomi; break;
1539 case 1: dst = op_ia32_Fucompi; x87_pop(state); break;
1541 dst = op_ia32_Fucompi;
1543 x87_create_fpop(state, sched_next(n), 1);
1545 default: panic("invalid popcount");
1548 panic("invalid operation %+F", n);
1551 x87_patch_insn(n, dst);
1558 op1 = get_st_reg(op1_idx);
1561 op2 = get_st_reg(op2_idx);
1564 attr->x87[2] = NULL;
1565 attr->attr.data.ins_permuted = permuted;
1568 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1569 arch_register_get_name(op1), arch_register_get_name(op2)));
1571 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1572 arch_register_get_name(op1)));
1575 return NO_NODE_ADDED;
1581 * @param state the x87 state
1582 * @param n the node that should be simulated (and patched)
1584 * @return NO_NODE_ADDED
1586 static int sim_Keep(x87_state *state, ir_node *node)
1589 const arch_register_t *op_reg;
1595 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1597 arity = get_irn_arity(node);
1598 for (i = 0; i < arity; ++i) {
1599 op = get_irn_n(node, i);
1600 op_reg = arch_get_irn_register(op);
1601 if (arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1604 reg_id = arch_register_get_index(op_reg);
1605 live = vfp_live_args_after(state->sim, node, 0);
1607 op_stack_idx = x87_on_stack(state, reg_id);
1608 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live))
1609 x87_create_fpop(state, sched_next(node), 1);
1612 DB((dbg, LEVEL_1, "Stack after: "));
1613 DEBUG_ONLY(x87_dump_stack(state);)
1615 return NO_NODE_ADDED;
1619 * Keep the given node alive by adding a be_Keep.
1621 * @param node the node to kept alive
1623 static void keep_float_node_alive(ir_node *node)
1625 ir_node *block = get_nodes_block(node);
1626 ir_node *keep = be_new_Keep(block, 1, &node);
1628 assert(sched_is_scheduled(node));
1629 sched_add_after(node, keep);
1633 * Create a copy of a node. Recreate the node if it's a constant.
1635 * @param state the x87 state
1636 * @param n the node to be copied
1638 * @return the copy of n
1640 static ir_node *create_Copy(x87_state *state, ir_node *n)
1642 dbg_info *n_dbg = get_irn_dbg_info(n);
1643 ir_mode *mode = get_irn_mode(n);
1644 ir_node *block = get_nodes_block(n);
1645 ir_node *pred = get_irn_n(n, 0);
1646 ir_node *(*cnstr)(dbg_info *, ir_node *, ir_mode *) = NULL;
1648 const arch_register_t *out;
1649 const arch_register_t *op1;
1650 ia32_x87_attr_t *attr;
1652 /* Do not copy constants, recreate them. */
1653 switch (get_ia32_irn_opcode(pred)) {
1655 cnstr = new_bd_ia32_fldz;
1658 cnstr = new_bd_ia32_fld1;
1660 case iro_ia32_fldpi:
1661 cnstr = new_bd_ia32_fldpi;
1663 case iro_ia32_fldl2e:
1664 cnstr = new_bd_ia32_fldl2e;
1666 case iro_ia32_fldl2t:
1667 cnstr = new_bd_ia32_fldl2t;
1669 case iro_ia32_fldlg2:
1670 cnstr = new_bd_ia32_fldlg2;
1672 case iro_ia32_fldln2:
1673 cnstr = new_bd_ia32_fldln2;
1679 out = x87_get_irn_register(n);
1680 op1 = x87_get_irn_register(pred);
1682 if (cnstr != NULL) {
1683 /* copy a constant */
1684 res = (*cnstr)(n_dbg, block, mode);
1686 x87_push(state, arch_register_get_index(out), res);
1688 attr = get_ia32_x87_attr(res);
1689 attr->x87[2] = get_st_reg(0);
1691 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1693 res = new_bd_ia32_fpushCopy(n_dbg, block, pred, mode);
1695 x87_push(state, arch_register_get_index(out), res);
1697 attr = get_ia32_x87_attr(res);
1698 attr->x87[0] = get_st_reg(op1_idx);
1699 attr->x87[2] = get_st_reg(0);
1701 arch_set_irn_register(res, out);
1707 * Simulate a be_Copy.
1709 * @param state the x87 state
1710 * @param n the node that should be simulated (and patched)
1712 * @return NO_NODE_ADDED
1714 static int sim_Copy(x87_state *state, ir_node *n)
1717 const arch_register_t *out;
1718 const arch_register_t *op1;
1719 const arch_register_class_t *cls;
1720 ir_node *node, *next;
1721 int op1_idx, out_idx;
1724 cls = arch_get_irn_reg_class(n);
1725 if (cls != &ia32_reg_classes[CLASS_ia32_vfp])
1728 pred = be_get_Copy_op(n);
1729 out = x87_get_irn_register(n);
1730 op1 = x87_get_irn_register(pred);
1731 live = vfp_live_args_after(state->sim, n, REGMASK(out));
1733 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1734 arch_register_get_name(op1), arch_register_get_name(out)));
1735 DEBUG_ONLY(vfp_dump_live(live);)
1737 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1739 if (is_vfp_live(arch_register_get_index(op1), live)) {
1740 /* Operand is still live, a real copy. We need here an fpush that can
1741 hold a a register, so use the fpushCopy or recreate constants */
1742 node = create_Copy(state, n);
1744 /* We have to make sure the old value doesn't go dead (which can happen
1745 * when we recreate constants). As the simulator expected that value in
1746 * the pred blocks. This is unfortunate as removing it would save us 1
1747 * instruction, but we would have to rerun all the simulation to get
1750 next = sched_next(n);
1753 sched_add_before(next, node);
1755 if (get_irn_n_edges(pred) == 0) {
1756 keep_float_node_alive(pred);
1759 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1761 out_idx = x87_on_stack(state, arch_register_get_index(out));
1763 if (out_idx >= 0 && out_idx != op1_idx) {
1764 /* Matze: out already on stack? how can this happen? */
1765 panic("invalid stack state");
1768 /* op1 must be killed and placed where out is */
1770 ia32_x87_attr_t *attr;
1771 /* best case, simple remove and rename */
1772 x87_patch_insn(n, op_ia32_Pop);
1773 attr = get_ia32_x87_attr(n);
1774 attr->x87[0] = op1 = get_st_reg(0);
1777 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1779 ia32_x87_attr_t *attr;
1780 /* move op1 to tos, store and pop it */
1782 x87_create_fxch(state, n, op1_idx);
1785 x87_patch_insn(n, op_ia32_Pop);
1786 attr = get_ia32_x87_attr(n);
1787 attr->x87[0] = op1 = get_st_reg(out_idx);
1790 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1792 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1795 /* just a virtual copy */
1796 x87_set_st(state, arch_register_get_index(out), pred, op1_idx);
1797 /* don't remove the node to keep the verifier quiet :),
1798 the emitter won't emit any code for the node */
1801 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1806 return NO_NODE_ADDED;
1810 * Returns the vf0 result Proj of a Call.
1812 * @para call the Call node
1814 static ir_node *get_call_result_proj(ir_node *call)
1816 /* search the result proj */
1817 foreach_out_edge(call, edge) {
1818 ir_node *proj = get_edge_src_irn(edge);
1819 long pn = get_Proj_proj(proj);
1821 if (pn == pn_ia32_Call_vf0)
1828 static int sim_Asm(x87_state *const state, ir_node *const n)
1832 for (size_t i = get_irn_arity(n); i-- != 0;) {
1833 arch_register_req_t const *const req = arch_get_irn_register_req_in(n, i);
1834 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1835 panic("cannot handle %+F with x87 constraints", n);
1838 for (size_t i = arch_get_irn_n_outs(n); i-- != 0;) {
1839 arch_register_req_t const *const req = arch_get_irn_register_req_out(n, i);
1840 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1841 panic("cannot handle %+F with x87 constraints", n);
1844 return NO_NODE_ADDED;
1848 * Simulate a ia32_Call.
1850 * @param state the x87 state
1851 * @param n the node that should be simulated (and patched)
1853 * @return NO_NODE_ADDED
1855 static int sim_Call(x87_state *state, ir_node *n)
1857 ir_type *call_tp = get_ia32_call_attr_const(n)->call_tp;
1861 const arch_register_t *reg;
1863 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1865 /* at the begin of a call the x87 state should be empty */
1866 assert(state->depth == 0 && "stack not empty before call");
1868 if (get_method_n_ress(call_tp) <= 0)
1872 * If the called function returns a float, it is returned in st(0).
1873 * This even happens if the return value is NOT used.
1874 * Moreover, only one return result is supported.
1876 res_type = get_method_res_type(call_tp, 0);
1877 mode = get_type_mode(res_type);
1879 if (mode == NULL || !mode_is_float(mode))
1882 resproj = get_call_result_proj(n);
1883 assert(resproj != NULL);
1885 reg = x87_get_irn_register(resproj);
1886 x87_push(state, arch_register_get_index(reg), resproj);
1889 DB((dbg, LEVEL_1, "Stack after: "));
1890 DEBUG_ONLY(x87_dump_stack(state);)
1892 return NO_NODE_ADDED;
1896 * Simulate a be_Return.
1898 * @param state the x87 state
1899 * @param n the node that should be simulated (and patched)
1901 * @return NO_NODE_ADDED
1903 static int sim_Return(x87_state *state, ir_node *n)
1905 #ifdef DEBUG_libfirm
1906 /* only floating point return values must reside on stack */
1907 int n_float_res = 0;
1908 int const n_res = be_Return_get_n_rets(n);
1909 for (int i = 0; i < n_res; ++i) {
1910 ir_node *const res = get_irn_n(n, n_be_Return_val + i);
1911 if (mode_is_float(get_irn_mode(res)))
1914 assert(x87_get_depth(state) == n_float_res);
1917 /* pop them virtually */
1919 return NO_NODE_ADDED;
1923 * Simulate a be_Perm.
1925 * @param state the x87 state
1926 * @param irn the node that should be simulated (and patched)
1928 * @return NO_NODE_ADDED
1930 static int sim_Perm(x87_state *state, ir_node *irn)
1933 ir_node *pred = get_irn_n(irn, 0);
1936 /* handle only floating point Perms */
1937 if (! mode_is_float(get_irn_mode(pred)))
1938 return NO_NODE_ADDED;
1940 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1942 /* Perm is a pure virtual instruction on x87.
1943 All inputs must be on the FPU stack and are pairwise
1944 different from each other.
1945 So, all we need to do is to permutate the stack state. */
1946 n = get_irn_arity(irn);
1947 NEW_ARR_A(int, stack_pos, n);
1949 /* collect old stack positions */
1950 for (i = 0; i < n; ++i) {
1951 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
1952 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1954 assert(idx >= 0 && "Perm argument not on x87 stack");
1958 /* now do the permutation */
1959 foreach_out_edge(irn, edge) {
1960 ir_node *proj = get_edge_src_irn(edge);
1961 const arch_register_t *out = x87_get_irn_register(proj);
1962 long num = get_Proj_proj(proj);
1964 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1965 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
1967 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1969 return NO_NODE_ADDED;
1973 * Kill any dead registers at block start by popping them from the stack.
1975 * @param sim the simulator handle
1976 * @param block the current block
1977 * @param state the x87 state at the begin of the block
1979 static void x87_kill_deads(x87_simulator *const sim, ir_node *const block, x87_state *const state)
1981 ir_node *first_insn = sched_first(block);
1982 ir_node *keep = NULL;
1983 unsigned live = vfp_live_args_after(sim, block, 0);
1985 int i, depth, num_pop;
1988 depth = x87_get_depth(state);
1989 for (i = depth - 1; i >= 0; --i) {
1990 int reg = x87_get_st_reg(state, i);
1992 if (! is_vfp_live(reg, live))
1993 kill_mask |= (1 << i);
1997 DB((dbg, LEVEL_1, "Killing deads:\n"));
1998 DEBUG_ONLY(vfp_dump_live(live);)
1999 DEBUG_ONLY(x87_dump_stack(state);)
2001 if (kill_mask != 0 && live == 0) {
2002 /* special case: kill all registers */
2003 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
2004 if (ia32_cg_config.use_femms) {
2005 /* use FEMMS on AMD processors to clear all */
2006 keep = new_bd_ia32_femms(NULL, block);
2008 /* use EMMS to clear all */
2009 keep = new_bd_ia32_emms(NULL, block);
2011 sched_add_before(first_insn, keep);
2017 /* now kill registers */
2019 /* we can only kill from TOS, so bring them up */
2020 if (! (kill_mask & 1)) {
2021 /* search from behind, because we can to a double-pop */
2022 for (i = depth - 1; i >= 0; --i) {
2023 if (kill_mask & (1 << i)) {
2024 kill_mask &= ~(1 << i);
2031 x87_set_st(state, -1, keep, i);
2032 x87_create_fxch(state, first_insn, i);
2035 if ((kill_mask & 3) == 3) {
2036 /* we can do a double-pop */
2040 /* only a single pop */
2045 kill_mask >>= num_pop;
2046 keep = x87_create_fpop(state, first_insn, num_pop);
2053 * Run a simulation and fix all virtual instructions for a block.
2055 * @param sim the simulator handle
2056 * @param block the current block
2058 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
2061 blk_state *bl_state = x87_get_bl_state(sim, block);
2062 x87_state *state = bl_state->begin;
2063 ir_node *start_block;
2065 assert(state != NULL);
2066 /* already processed? */
2067 if (bl_state->end != NULL)
2070 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2071 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2072 DEBUG_ONLY(x87_dump_stack(state);)
2074 /* create a new state, will be changed */
2075 state = x87_clone_state(sim, state);
2076 /* at block begin, kill all dead registers */
2077 x87_kill_deads(sim, block, state);
2079 /* beware, n might change */
2080 for (n = sched_first(block); !sched_is_end(n); n = next) {
2083 ir_op *op = get_irn_op(n);
2086 * get the next node to be simulated here.
2087 * n might be completely removed from the schedule-
2089 next = sched_next(n);
2090 if (op->ops.generic != NULL) {
2091 func = (sim_func)op->ops.generic;
2094 node_inserted = (*func)(state, n);
2097 * sim_func might have added an additional node after n,
2098 * so update next node
2099 * beware: n must not be changed by sim_func
2100 * (i.e. removed from schedule) in this case
2102 if (node_inserted != NO_NODE_ADDED)
2103 next = sched_next(n);
2107 start_block = get_irg_start_block(get_irn_irg(block));
2109 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state);)
2111 /* check if the state must be shuffled */
2112 foreach_block_succ(block, edge) {
2113 ir_node *succ = get_edge_src_irn(edge);
2114 blk_state *succ_state;
2116 if (succ == start_block)
2119 succ_state = x87_get_bl_state(sim, succ);
2121 if (succ_state->begin == NULL) {
2122 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2123 DEBUG_ONLY(x87_dump_stack(state);)
2124 succ_state->begin = state;
2126 waitq_put(sim->worklist, succ);
2128 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2129 /* There is already a begin state for the successor, bad.
2130 Do the necessary permutations.
2131 Note that critical edges are removed, so this is always possible:
2132 If the successor has more than one possible input, then it must
2135 x87_shuffle(block, state, succ_state->begin);
2138 bl_state->end = state;
2142 * Register a simulator function.
2144 * @param op the opcode to simulate
2145 * @param func the simulator function for the opcode
2147 static void register_sim(ir_op *op, sim_func func)
2149 assert(op->ops.generic == NULL);
2150 op->ops.generic = (op_func) func;
2154 * Create a new x87 simulator.
2156 * @param sim a simulator handle, will be initialized
2157 * @param irg the current graph
2159 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
2161 obstack_init(&sim->obst);
2162 sim->blk_states = pmap_create();
2163 sim->n_idx = get_irg_last_idx(irg);
2164 sim->live = OALLOCN(&sim->obst, vfp_liveness, sim->n_idx);
2166 DB((dbg, LEVEL_1, "--------------------------------\n"
2167 "x87 Simulator started for %+F\n", irg));
2169 /* set the generic function pointer of instruction we must simulate */
2170 ir_clear_opcodes_generic_func();
2172 register_sim(op_ia32_Asm, sim_Asm);
2173 register_sim(op_ia32_Call, sim_Call);
2174 register_sim(op_ia32_vfld, sim_fld);
2175 register_sim(op_ia32_vfild, sim_fild);
2176 register_sim(op_ia32_vfld1, sim_fld1);
2177 register_sim(op_ia32_vfldz, sim_fldz);
2178 register_sim(op_ia32_vfadd, sim_fadd);
2179 register_sim(op_ia32_vfsub, sim_fsub);
2180 register_sim(op_ia32_vfmul, sim_fmul);
2181 register_sim(op_ia32_vfdiv, sim_fdiv);
2182 register_sim(op_ia32_vfprem, sim_fprem);
2183 register_sim(op_ia32_vfabs, sim_fabs);
2184 register_sim(op_ia32_vfchs, sim_fchs);
2185 register_sim(op_ia32_vfist, sim_fist);
2186 register_sim(op_ia32_vfisttp, sim_fisttp);
2187 register_sim(op_ia32_vfst, sim_fst);
2188 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
2189 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
2190 register_sim(op_ia32_vFucomi, sim_Fucom);
2191 register_sim(op_be_Copy, sim_Copy);
2192 register_sim(op_be_Return, sim_Return);
2193 register_sim(op_be_Perm, sim_Perm);
2194 register_sim(op_be_Keep, sim_Keep);
2198 * Destroy a x87 simulator.
2200 * @param sim the simulator handle
2202 static void x87_destroy_simulator(x87_simulator *sim)
2204 pmap_destroy(sim->blk_states);
2205 obstack_free(&sim->obst, NULL);
2206 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2210 * Pre-block walker: calculate the liveness information for the block
2211 * and store it into the sim->live cache.
2213 static void update_liveness_walker(ir_node *block, void *data)
2215 x87_simulator *sim = (x87_simulator*)data;
2216 update_liveness(sim, block);
2220 * Run a simulation and fix all virtual instructions for a graph.
2221 * Replaces all virtual floating point instructions and registers
2224 void ia32_x87_simulate_graph(ir_graph *irg)
2226 /* TODO improve code quality (less executed fxch) by using execfreqs */
2228 ir_node *block, *start_block;
2229 blk_state *bl_state;
2232 /* create the simulator */
2233 x87_init_simulator(&sim, irg);
2235 start_block = get_irg_start_block(irg);
2236 bl_state = x87_get_bl_state(&sim, start_block);
2238 /* start with the empty state */
2239 bl_state->begin = empty;
2242 sim.worklist = new_waitq();
2243 waitq_put(sim.worklist, start_block);
2245 be_assure_live_sets(irg);
2246 sim.lv = be_get_irg_liveness(irg);
2248 /* Calculate the liveness for all nodes. We must precalculate this info,
2249 * because the simulator adds new nodes (possible before Phi nodes) which
2250 * would let a lazy calculation fail.
2251 * On the other hand we reduce the computation amount due to
2252 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2254 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2258 block = (ir_node*)waitq_get(sim.worklist);
2259 x87_simulate_block(&sim, block);
2260 } while (! waitq_empty(sim.worklist));
2263 del_waitq(sim.worklist);
2264 x87_destroy_simulator(&sim);
2267 /* Initializes the x87 simulator. */
2268 void ia32_init_x87(void)
2270 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");