2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
33 #include "iredges_t.h"
48 #include "bearch_ia32_t.h"
49 #include "ia32_new_nodes.h"
50 #include "gen_ia32_new_nodes.h"
51 #include "gen_ia32_regalloc_if.h"
53 #include "ia32_architecture.h"
55 /** the debug handle */
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 /* Forward declaration. */
59 typedef struct x87_simulator x87_simulator;
62 * An exchange template.
63 * Note that our virtual functions have the same inputs
64 * and attributes as the real ones, so we can simple exchange
66 * Further, x87 supports inverse instructions, so we can handle them.
68 typedef struct exchange_tmpl {
69 ir_op *normal_op; /**< the normal one */
70 ir_op *reverse_op; /**< the reverse one if exists */
71 ir_op *normal_pop_op; /**< the normal one with tos pop */
72 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
76 * An entry on the simulated x87 stack.
78 typedef struct st_entry {
79 int reg_idx; /**< the virtual register index of this stack value */
80 ir_node *node; /**< the node that produced this value */
86 typedef struct x87_state {
87 st_entry st[N_ia32_st_REGS]; /**< the register stack */
88 int depth; /**< the current stack depth */
89 x87_simulator *sim; /**< The simulator. */
92 /** An empty state, used for blocks without fp instructions. */
93 static x87_state empty = { { {0, NULL}, }, 0, NULL };
96 * Return values of the instruction simulator functions.
99 NO_NODE_ADDED = 0, /**< No node that needs simulation was added. */
100 NODE_ADDED = 1 /**< A node that must be simulated was added by the simulator
101 in the schedule AFTER the current node. */
105 * The type of an instruction simulator function.
107 * @param state the x87 state
108 * @param n the node to be simulated
110 * @return NODE_ADDED if a node was added AFTER n in schedule that MUST be
112 * NO_NODE_ADDED otherwise
114 typedef int (*sim_func)(x87_state *state, ir_node *n);
117 * A block state: Every block has a x87 state at the beginning and at the end.
119 typedef struct blk_state {
120 x87_state *begin; /**< state at the begin or NULL if not assigned */
121 x87_state *end; /**< state at the end or NULL if not assigned */
124 /** liveness bitset for vfp registers. */
125 typedef unsigned char vfp_liveness;
130 struct x87_simulator {
131 struct obstack obst; /**< An obstack for fast allocating. */
132 pmap *blk_states; /**< Map blocks to states. */
133 be_lv_t *lv; /**< intrablock liveness. */
134 vfp_liveness *live; /**< Liveness information. */
135 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
136 waitq *worklist; /**< Worklist of blocks that must be processed. */
140 * Returns the current stack depth.
142 * @param state the x87 state
144 * @return the x87 stack depth
146 static int x87_get_depth(const x87_state *state)
151 static st_entry *x87_get_entry(x87_state *const state, int const pos)
153 assert(0 <= pos && pos < state->depth);
154 return &state->st[N_ia32_st_REGS - state->depth + pos];
158 * Return the virtual register index at st(pos).
160 * @param state the x87 state
161 * @param pos a stack position
163 * @return the vfp register index that produced the value at st(pos)
165 static int x87_get_st_reg(const x87_state *state, int pos)
167 return x87_get_entry((x87_state*)state, pos)->reg_idx;
172 * Dump the stack for debugging.
174 * @param state the x87 state
176 static void x87_dump_stack(const x87_state *state)
178 for (int i = state->depth; i-- != 0;) {
179 st_entry const *const entry = x87_get_entry((x87_state*)state, i);
180 DB((dbg, LEVEL_2, "vf%d(%+F) ", entry->reg_idx, entry->node));
182 DB((dbg, LEVEL_2, "<-- TOS\n"));
184 #endif /* DEBUG_libfirm */
187 * Set a virtual register to st(pos).
189 * @param state the x87 state
190 * @param reg_idx the vfp register index that should be set
191 * @param node the IR node that produces the value of the vfp register
192 * @param pos the stack position where the new value should be entered
194 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
196 st_entry *const entry = x87_get_entry(state, pos);
197 entry->reg_idx = reg_idx;
200 DB((dbg, LEVEL_2, "After SET_REG: "));
201 DEBUG_ONLY(x87_dump_stack(state);)
205 * Set the tos virtual register.
207 * @param state the x87 state
208 * @param reg_idx the vfp register index that should be set
209 * @param node the IR node that produces the value of the vfp register
211 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node)
213 x87_set_st(state, reg_idx, node, 0);
217 * Swap st(0) with st(pos).
219 * @param state the x87 state
220 * @param pos the stack position to change the tos with
222 static void x87_fxch(x87_state *state, int pos)
224 st_entry *const a = x87_get_entry(state, pos);
225 st_entry *const b = x87_get_entry(state, 0);
226 st_entry const t = *a;
230 DB((dbg, LEVEL_2, "After FXCH: "));
231 DEBUG_ONLY(x87_dump_stack(state);)
235 * Convert a virtual register to the stack index.
237 * @param state the x87 state
238 * @param reg_idx the register vfp index
240 * @return the stack position where the register is stacked
241 * or -1 if the virtual register was not found
243 static int x87_on_stack(const x87_state *state, int reg_idx)
245 for (int i = 0; i < state->depth; ++i) {
246 if (x87_get_st_reg(state, i) == reg_idx)
253 * Push a virtual Register onto the stack, double pushed allowed.
255 * @param state the x87 state
256 * @param reg_idx the register vfp index
257 * @param node the node that produces the value of the vfp register
259 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node)
261 assert(state->depth < N_ia32_st_REGS && "stack overrun");
264 st_entry *const entry = x87_get_entry(state, 0);
265 entry->reg_idx = reg_idx;
268 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state);)
272 * Push a virtual Register onto the stack, double pushes are NOT allowed.
274 * @param state the x87 state
275 * @param reg_idx the register vfp index
276 * @param node the node that produces the value of the vfp register
278 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
280 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
282 x87_push_dbl(state, reg_idx, node);
286 * Pop a virtual Register from the stack.
288 * @param state the x87 state
290 static void x87_pop(x87_state *state)
292 assert(state->depth > 0 && "stack underrun");
296 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state);)
300 * Empty the fpu stack
302 * @param state the x87 state
304 static void x87_emms(x87_state *state)
310 * Returns the block state of a block.
312 * @param sim the x87 simulator handle
313 * @param block the current block
315 * @return the block state
317 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
319 blk_state *res = pmap_get(blk_state, sim->blk_states, block);
322 res = OALLOC(&sim->obst, blk_state);
326 pmap_insert(sim->blk_states, block, res);
333 * Creates a new x87 state.
335 * @param sim the x87 simulator handle
337 * @return a new x87 state
339 static x87_state *x87_alloc_state(x87_simulator *sim)
341 x87_state *res = OALLOC(&sim->obst, x87_state);
350 * @param sim the x87 simulator handle
351 * @param src the x87 state that will be cloned
353 * @return a cloned copy of the src state
355 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
357 x87_state *res = x87_alloc_state(sim);
364 * Patch a virtual instruction into a x87 one and return
365 * the node representing the result value.
367 * @param n the IR node to patch
368 * @param op the x87 opcode to patch in
370 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
372 ir_mode *mode = get_irn_mode(n);
377 if (mode == mode_T) {
378 /* patch all Proj's */
379 foreach_out_edge(n, edge) {
380 ir_node *proj = get_edge_src_irn(edge);
382 mode = get_irn_mode(proj);
383 if (mode_is_float(mode)) {
385 set_irn_mode(proj, ia32_reg_classes[CLASS_ia32_st].mode);
389 } else if (mode_is_float(mode))
390 set_irn_mode(n, ia32_reg_classes[CLASS_ia32_st].mode);
395 * Returns the first Proj of a mode_T node having a given mode.
397 * @param n the mode_T node
398 * @param m the desired mode of the Proj
399 * @return The first Proj of mode @p m found or NULL.
401 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
403 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
405 foreach_out_edge(n, edge) {
406 ir_node *proj = get_edge_src_irn(edge);
407 if (get_irn_mode(proj) == m)
415 * Wrap the arch_* function here so we can check for errors.
417 static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
419 const arch_register_t *res = arch_get_irn_register(irn);
421 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
425 static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
428 const arch_register_t *res = arch_get_irn_register_out(irn, pos);
430 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
434 static inline const arch_register_t *get_st_reg(int index)
436 return &ia32_registers[REG_ST0 + index];
439 /* -------------- x87 perm --------------- */
442 * Creates a fxch for shuffle.
444 * @param state the x87 state
445 * @param pos parameter for fxch
446 * @param block the block were fxch is inserted
448 * Creates a new fxch node and reroute the user of the old node
451 * @return the fxch node
453 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block)
456 ia32_x87_attr_t *attr;
458 fxch = new_bd_ia32_fxch(NULL, block);
459 attr = get_ia32_x87_attr(fxch);
460 attr->x87[0] = get_st_reg(pos);
461 attr->x87[2] = get_st_reg(0);
465 x87_fxch(state, pos);
470 * Calculate the necessary permutations to reach dst_state.
472 * These permutations are done with fxch instructions and placed
473 * at the end of the block.
475 * Note that critical edges are removed here, so we need only
476 * a shuffle if the current block has only one successor.
478 * @param block the current block
479 * @param state the current x87 stack state, might be modified
480 * @param dst_state destination state
484 static x87_state *x87_shuffle(ir_node *block, x87_state *state, const x87_state *dst_state)
486 int i, n_cycles, k, ri;
487 unsigned cycles[4], all_mask;
488 char cycle_idx[4][8];
489 ir_node *fxch, *before, *after;
491 assert(state->depth == dst_state->depth);
493 /* Some mathematics here:
494 If we have a cycle of length n that includes the tos,
495 we need n-1 exchange operations.
496 We can always add the tos and restore it, so we need
497 n+1 exchange operations for a cycle not containing the tos.
498 So, the maximum of needed operations is for a cycle of 7
499 not including the tos == 8.
500 This is the same number of ops we would need for using stores,
501 so exchange is cheaper (we save the loads).
502 On the other hand, we might need an additional exchange
503 in the next block to bring one operand on top, so the
504 number of ops in the first case is identical.
505 Further, no more than 4 cycles can exists (4 x 2).
507 all_mask = (1 << (state->depth)) - 1;
509 for (n_cycles = 0; all_mask; ++n_cycles) {
510 int src_idx, dst_idx;
512 /* find the first free slot */
513 for (i = 0; i < state->depth; ++i) {
514 if (all_mask & (1 << i)) {
515 all_mask &= ~(1 << i);
517 /* check if there are differences here */
518 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
524 /* no more cycles found */
529 cycles[n_cycles] = (1 << i);
530 cycle_idx[n_cycles][k++] = i;
531 for (src_idx = i; ; src_idx = dst_idx) {
532 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
534 if ((all_mask & (1 << dst_idx)) == 0)
537 cycle_idx[n_cycles][k++] = dst_idx;
538 cycles[n_cycles] |= (1 << dst_idx);
539 all_mask &= ~(1 << dst_idx);
541 cycle_idx[n_cycles][k] = -1;
545 /* no permutation needed */
549 /* Hmm: permutation needed */
550 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
551 DEBUG_ONLY(x87_dump_stack(state);)
552 DB((dbg, LEVEL_2, " to\n"));
553 DEBUG_ONLY(x87_dump_stack(dst_state);)
557 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
558 for (ri = 0; ri < n_cycles; ++ri) {
559 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
560 for (k = 0; cycle_idx[ri][k] != -1; ++k)
561 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
562 DB((dbg, LEVEL_2, "\n"));
569 * Find the place node must be insert.
570 * We have only one successor block, so the last instruction should
573 before = sched_last(block);
574 assert(is_cfop(before));
576 /* now do the permutations */
577 for (ri = 0; ri < n_cycles; ++ri) {
578 if ((cycles[ri] & 1) == 0) {
579 /* this cycle does not include the tos */
580 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
582 sched_add_after(after, fxch);
584 sched_add_before(before, fxch);
587 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
588 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
590 sched_add_after(after, fxch);
592 sched_add_before(before, fxch);
595 if ((cycles[ri] & 1) == 0) {
596 /* this cycle does not include the tos */
597 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
598 sched_add_after(after, fxch);
605 * Create a fxch node before another node.
607 * @param state the x87 state
608 * @param n the node after the fxch
609 * @param pos exchange st(pos) with st(0)
613 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
616 ia32_x87_attr_t *attr;
617 ir_node *block = get_nodes_block(n);
619 x87_fxch(state, pos);
621 fxch = new_bd_ia32_fxch(NULL, block);
622 attr = get_ia32_x87_attr(fxch);
623 attr->x87[0] = get_st_reg(pos);
624 attr->x87[2] = get_st_reg(0);
628 sched_add_before(n, fxch);
629 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
634 * Create a fpush before node n.
636 * @param state the x87 state
637 * @param n the node after the fpush
638 * @param pos push st(pos) on stack
639 * @param op_idx replace input op_idx of n with the fpush result
641 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx)
643 ir_node *fpush, *pred = get_irn_n(n, op_idx);
644 ia32_x87_attr_t *attr;
645 const arch_register_t *out = x87_get_irn_register(pred);
647 x87_push_dbl(state, arch_register_get_index(out), pred);
649 fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
650 attr = get_ia32_x87_attr(fpush);
651 attr->x87[0] = get_st_reg(pos);
652 attr->x87[2] = get_st_reg(0);
655 sched_add_before(n, fpush);
657 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
661 * Create a fpop before node n.
663 * @param state the x87 state
664 * @param n the node after the fpop
665 * @param num pop 1 or 2 values
667 * @return the fpop node
669 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
671 ir_node *fpop = NULL;
672 ia32_x87_attr_t *attr;
677 if (ia32_cg_config.use_ffreep)
678 fpop = new_bd_ia32_ffreep(NULL, get_nodes_block(n));
680 fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
681 attr = get_ia32_x87_attr(fpop);
682 attr->x87[0] = get_st_reg(0);
683 attr->x87[1] = get_st_reg(0);
684 attr->x87[2] = get_st_reg(0);
687 sched_add_before(n, fpop);
688 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
693 /* --------------------------------- liveness ------------------------------------------ */
696 * The liveness transfer function.
697 * Updates a live set over a single step from a given node to its predecessor.
698 * Everything defined at the node is removed from the set, the uses of the node get inserted.
700 * @param irn The node at which liveness should be computed.
701 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
702 * the registers live after irn.
704 * @return The live bitset.
706 static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
709 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
711 if (get_irn_mode(irn) == mode_T) {
712 foreach_out_edge(irn, edge) {
713 ir_node *proj = get_edge_src_irn(edge);
715 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
716 const arch_register_t *reg = x87_get_irn_register(proj);
717 live &= ~(1 << arch_register_get_index(reg));
720 } else if (arch_irn_consider_in_reg_alloc(cls, irn)) {
721 const arch_register_t *reg = x87_get_irn_register(irn);
722 live &= ~(1 << arch_register_get_index(reg));
725 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
726 ir_node *op = get_irn_n(irn, i);
728 if (mode_is_float(get_irn_mode(op)) &&
729 arch_irn_consider_in_reg_alloc(cls, op)) {
730 const arch_register_t *reg = x87_get_irn_register(op);
731 live |= 1 << arch_register_get_index(reg);
738 * Put all live virtual registers at the end of a block into a bitset.
740 * @param sim the simulator handle
741 * @param bl the block
743 * @return The live bitset at the end of this block
745 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
747 vfp_liveness live = 0;
748 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
749 const be_lv_t *lv = sim->lv;
751 be_lv_foreach(lv, block, be_lv_state_end, node) {
752 const arch_register_t *reg;
753 if (!arch_irn_consider_in_reg_alloc(cls, node))
756 reg = x87_get_irn_register(node);
757 live |= 1 << arch_register_get_index(reg);
763 /** get the register mask from an arch_register */
764 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
767 * Return a bitset of argument registers which are live at the end of a node.
769 * @param sim the simulator handle
770 * @param pos the node
771 * @param kill kill mask for the output registers
773 * @return The live bitset.
775 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
777 unsigned idx = get_irn_idx(pos);
779 assert(idx < sim->n_idx);
780 return sim->live[idx] & ~kill;
784 * Calculate the liveness for a whole block and cache it.
786 * @param sim the simulator handle
787 * @param block the block
789 static void update_liveness(x87_simulator *sim, ir_node *block)
791 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
794 /* now iterate through the block backward and cache the results */
795 sched_foreach_reverse(block, irn) {
796 /* stop at the first Phi: this produces the live-in */
800 idx = get_irn_idx(irn);
801 sim->live[idx] = live;
803 live = vfp_liveness_transfer(irn, live);
805 idx = get_irn_idx(block);
806 sim->live[idx] = live;
810 * Returns true if a register is live in a set.
812 * @param reg_idx the vfp register index
813 * @param live a live bitset
815 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
819 * Dump liveness info.
821 * @param live the live bitset
823 static void vfp_dump_live(vfp_liveness live)
827 DB((dbg, LEVEL_2, "Live after: "));
828 for (i = 0; i < 8; ++i) {
829 if (live & (1 << i)) {
830 DB((dbg, LEVEL_2, "vf%d ", i));
833 DB((dbg, LEVEL_2, "\n"));
835 #endif /* DEBUG_libfirm */
837 /* --------------------------------- simulators ---------------------------------------- */
840 * Simulate a virtual binop.
842 * @param state the x87 state
843 * @param n the node that should be simulated (and patched)
844 * @param tmpl the template containing the 4 possible x87 opcodes
846 * @return NO_NODE_ADDED
848 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl)
850 int op2_idx = 0, op1_idx;
851 int out_idx, do_pop = 0;
852 ia32_x87_attr_t *attr;
854 ir_node *patched_insn;
856 x87_simulator *sim = state->sim;
857 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
858 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
859 const arch_register_t *op1_reg = x87_get_irn_register(op1);
860 const arch_register_t *op2_reg = x87_get_irn_register(op2);
861 const arch_register_t *out = x87_irn_get_register(n, pn_ia32_res);
862 int reg_index_1 = arch_register_get_index(op1_reg);
863 int reg_index_2 = arch_register_get_index(op2_reg);
864 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
868 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
869 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
870 arch_register_get_name(out)));
871 DEBUG_ONLY(vfp_dump_live(live);)
872 DB((dbg, LEVEL_1, "Stack before: "));
873 DEBUG_ONLY(x87_dump_stack(state);)
875 op1_idx = x87_on_stack(state, reg_index_1);
876 assert(op1_idx >= 0);
877 op1_live_after = is_vfp_live(reg_index_1, live);
879 attr = get_ia32_x87_attr(n);
880 permuted = attr->attr.data.ins_permuted;
882 if (reg_index_2 != REG_VFP_VFP_NOREG) {
885 /* second operand is a vfp register */
886 op2_idx = x87_on_stack(state, reg_index_2);
887 assert(op2_idx >= 0);
888 op2_live_after = is_vfp_live(reg_index_2, live);
890 if (op2_live_after) {
891 /* Second operand is live. */
893 if (op1_live_after) {
894 /* Both operands are live: push the first one.
895 This works even for op1 == op2. */
896 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
897 /* now do fxxx (tos=tos X op) */
901 dst = tmpl->normal_op;
903 /* Second live, first operand is dead here, bring it to tos. */
905 x87_create_fxch(state, n, op1_idx);
910 /* now do fxxx (tos=tos X op) */
912 dst = tmpl->normal_op;
915 /* Second operand is dead. */
916 if (op1_live_after) {
917 /* First operand is live: bring second to tos. */
919 x87_create_fxch(state, n, op2_idx);
924 /* now do fxxxr (tos = op X tos) */
926 dst = tmpl->reverse_op;
928 /* Both operands are dead here, pop them from the stack. */
931 /* Both are identically and on tos, no pop needed. */
932 /* here fxxx (tos = tos X tos) */
933 dst = tmpl->normal_op;
936 /* now do fxxxp (op = op X tos, pop) */
937 dst = tmpl->normal_pop_op;
941 } else if (op1_idx == 0) {
942 assert(op1_idx != op2_idx);
943 /* now do fxxxrp (op = tos X op, pop) */
944 dst = tmpl->reverse_pop_op;
948 /* Bring the second on top. */
949 x87_create_fxch(state, n, op2_idx);
950 if (op1_idx == op2_idx) {
951 /* Both are identically and on tos now, no pop needed. */
954 /* use fxxx (tos = tos X tos) */
955 dst = tmpl->normal_op;
958 /* op2 is on tos now */
960 /* use fxxxp (op = op X tos, pop) */
961 dst = tmpl->normal_pop_op;
969 /* second operand is an address mode */
970 if (op1_live_after) {
971 /* first operand is live: push it here */
972 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
975 /* first operand is dead: bring it to tos */
977 x87_create_fxch(state, n, op1_idx);
982 /* use fxxx (tos = tos X mem) */
983 dst = permuted ? tmpl->reverse_op : tmpl->normal_op;
987 patched_insn = x87_patch_insn(n, dst);
988 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
993 /* patch the operation */
994 attr->x87[0] = op1_reg = get_st_reg(op1_idx);
995 if (reg_index_2 != REG_VFP_VFP_NOREG) {
996 attr->x87[1] = op2_reg = get_st_reg(op2_idx);
998 attr->x87[2] = out = get_st_reg(out_idx);
1000 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1001 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1002 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1003 arch_register_get_name(out)));
1005 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1006 arch_register_get_name(op1_reg),
1007 arch_register_get_name(out)));
1010 return NO_NODE_ADDED;
1014 * Simulate a virtual Unop.
1016 * @param state the x87 state
1017 * @param n the node that should be simulated (and patched)
1018 * @param op the x87 opcode that will replace n's opcode
1020 * @return NO_NODE_ADDED
1022 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
1024 x87_simulator *sim = state->sim;
1025 const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, 0));
1026 const arch_register_t *out = x87_get_irn_register(n);
1027 ia32_x87_attr_t *attr;
1028 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1030 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1031 DEBUG_ONLY(vfp_dump_live(live);)
1033 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1035 if (is_vfp_live(arch_register_get_index(op1), live)) {
1036 /* push the operand here */
1037 x87_create_fpush(state, n, op1_idx, 0);
1040 /* operand is dead, bring it to tos */
1042 x87_create_fxch(state, n, op1_idx);
1046 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1047 attr = get_ia32_x87_attr(n);
1048 attr->x87[0] = op1 = get_st_reg(0);
1049 attr->x87[2] = out = get_st_reg(0);
1050 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1052 return NO_NODE_ADDED;
1056 * Simulate a virtual Load instruction.
1058 * @param state the x87 state
1059 * @param n the node that should be simulated (and patched)
1060 * @param op the x87 opcode that will replace n's opcode
1062 * @return NO_NODE_ADDED
1064 static int sim_load(x87_state *state, ir_node *n, ir_op *op, int res_pos)
1066 const arch_register_t *out = x87_irn_get_register(n, res_pos);
1067 ia32_x87_attr_t *attr;
1069 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1070 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1071 assert(out == x87_irn_get_register(n, res_pos));
1072 attr = get_ia32_x87_attr(n);
1073 attr->x87[2] = out = get_st_reg(0);
1074 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1076 return NO_NODE_ADDED;
1080 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1082 * @param store The store
1083 * @param old_val The former value
1084 * @param new_val The new value
1086 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
1088 foreach_out_edge_safe(old_val, edge) {
1089 ir_node *user = get_edge_src_irn(edge);
1091 if (! user || user == store)
1094 /* if the user is scheduled after the store: rewire */
1095 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1097 /* find the input of the user pointing to the old value */
1098 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1099 if (get_irn_n(user, i) == old_val)
1100 set_irn_n(user, i, new_val);
1107 * Simulate a virtual Store.
1109 * @param state the x87 state
1110 * @param n the node that should be simulated (and patched)
1111 * @param op the x87 store opcode
1112 * @param op_p the x87 store and pop opcode
1114 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p)
1116 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1117 const arch_register_t *op2 = x87_get_irn_register(val);
1118 unsigned live = vfp_live_args_after(state->sim, n, 0);
1119 int insn = NO_NODE_ADDED;
1120 ia32_x87_attr_t *attr;
1121 int op2_reg_idx, op2_idx, depth;
1122 int live_after_node;
1125 op2_reg_idx = arch_register_get_index(op2);
1126 op2_idx = x87_on_stack(state, op2_reg_idx);
1127 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1128 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1129 assert(op2_idx >= 0);
1131 mode = get_ia32_ls_mode(n);
1132 depth = x87_get_depth(state);
1134 if (live_after_node) {
1136 Problem: fst doesn't support 96bit modes (spills), only fstp does
1137 fist doesn't support 64bit mode, only fistp
1139 - stack not full: push value and fstp
1140 - stack full: fstp value and load again
1141 Note that we cannot test on mode_E, because floats might be 96bit ...
1143 if (get_mode_size_bits(mode) > 64 || (mode_is_int(mode) && get_mode_size_bits(mode) > 32)) {
1144 if (depth < N_ia32_st_REGS) {
1145 /* ok, we have a free register: push + fstp */
1146 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1148 x87_patch_insn(n, op_p);
1150 ir_node *vfld, *mem, *block, *rproj, *mproj;
1151 ir_graph *irg = get_irn_irg(n);
1152 ir_node *nomem = get_irg_no_mem(irg);
1154 /* stack full here: need fstp + load */
1156 x87_patch_insn(n, op_p);
1158 block = get_nodes_block(n);
1159 vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), nomem, get_ia32_ls_mode(n));
1161 /* copy all attributes */
1162 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1163 if (is_ia32_use_frame(n))
1164 set_ia32_use_frame(vfld);
1165 set_ia32_op_type(vfld, ia32_AddrModeS);
1166 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1167 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1168 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1170 rproj = new_r_Proj(vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1171 mproj = new_r_Proj(vfld, mode_M, pn_ia32_vfld_M);
1172 mem = get_irn_Proj_for_mode(n, mode_M);
1174 assert(mem && "Store memory not found");
1176 arch_set_irn_register(rproj, op2);
1178 /* reroute all former users of the store memory to the load memory */
1179 edges_reroute(mem, mproj);
1180 /* set the memory input of the load to the store memory */
1181 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1183 sched_add_after(n, vfld);
1184 sched_add_after(vfld, rproj);
1186 /* rewire all users, scheduled after the store, to the loaded value */
1187 collect_and_rewire_users(n, val, rproj);
1192 /* we can only store the tos to memory */
1194 x87_create_fxch(state, n, op2_idx);
1196 /* mode size 64 or smaller -> use normal fst */
1197 x87_patch_insn(n, op);
1200 /* we can only store the tos to memory */
1202 x87_create_fxch(state, n, op2_idx);
1205 x87_patch_insn(n, op_p);
1208 attr = get_ia32_x87_attr(n);
1209 attr->x87[1] = op2 = get_st_reg(0);
1210 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1215 #define _GEN_BINOP(op, rev) \
1216 static int sim_##op(x87_state *state, ir_node *n) { \
1217 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1218 return sim_binop(state, n, &tmpl); \
1221 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1222 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1224 #define GEN_LOAD(op) \
1225 static int sim_##op(x87_state *state, ir_node *n) { \
1226 return sim_load(state, n, op_ia32_##op, pn_ia32_v##op##_res); \
1229 #define GEN_UNOP(op) \
1230 static int sim_##op(x87_state *state, ir_node *n) { \
1231 return sim_unop(state, n, op_ia32_##op); \
1234 #define GEN_STORE(op) \
1235 static int sim_##op(x87_state *state, ir_node *n) { \
1236 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1258 * Simulate a virtual fisttp.
1260 * @param state the x87 state
1261 * @param n the node that should be simulated (and patched)
1263 * @return NO_NODE_ADDED
1265 static int sim_fisttp(x87_state *state, ir_node *n)
1267 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1268 const arch_register_t *op2 = x87_get_irn_register(val);
1269 ia32_x87_attr_t *attr;
1270 int op2_reg_idx, op2_idx;
1272 op2_reg_idx = arch_register_get_index(op2);
1273 op2_idx = x87_on_stack(state, op2_reg_idx);
1274 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1275 assert(op2_idx >= 0);
1277 /* Note: although the value is still live here, it is destroyed because
1278 of the pop. The register allocator is aware of that and introduced a copy
1279 if the value must be alive. */
1281 /* we can only store the tos to memory */
1283 x87_create_fxch(state, n, op2_idx);
1286 x87_patch_insn(n, op_ia32_fisttp);
1288 attr = get_ia32_x87_attr(n);
1289 attr->x87[1] = op2 = get_st_reg(0);
1290 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1292 return NO_NODE_ADDED;
1296 * Simulate a virtual FtstFnstsw.
1298 * @param state the x87 state
1299 * @param n the node that should be simulated (and patched)
1301 * @return NO_NODE_ADDED
1303 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1305 x87_simulator *sim = state->sim;
1306 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1307 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1308 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1309 int reg_index_1 = arch_register_get_index(reg1);
1310 int op1_idx = x87_on_stack(state, reg_index_1);
1311 unsigned live = vfp_live_args_after(sim, n, 0);
1313 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, arch_register_get_name(reg1)));
1314 DEBUG_ONLY(vfp_dump_live(live);)
1315 DB((dbg, LEVEL_1, "Stack before: "));
1316 DEBUG_ONLY(x87_dump_stack(state);)
1317 assert(op1_idx >= 0);
1320 /* bring the value to tos */
1321 x87_create_fxch(state, n, op1_idx);
1325 /* patch the operation */
1326 x87_patch_insn(n, op_ia32_FtstFnstsw);
1327 reg1 = get_st_reg(op1_idx);
1328 attr->x87[0] = reg1;
1329 attr->x87[1] = NULL;
1330 attr->x87[2] = NULL;
1332 if (!is_vfp_live(reg_index_1, live))
1333 x87_create_fpop(state, sched_next(n), 1);
1335 return NO_NODE_ADDED;
1341 * @param state the x87 state
1342 * @param n the node that should be simulated (and patched)
1344 * @return NO_NODE_ADDED
1346 static int sim_Fucom(x87_state *state, ir_node *n)
1350 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1352 x87_simulator *sim = state->sim;
1353 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1354 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1355 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1356 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1357 int reg_index_1 = arch_register_get_index(op1);
1358 int reg_index_2 = arch_register_get_index(op2);
1359 unsigned live = vfp_live_args_after(sim, n, 0);
1360 bool permuted = attr->attr.data.ins_permuted;
1364 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1365 arch_register_get_name(op1), arch_register_get_name(op2)));
1366 DEBUG_ONLY(vfp_dump_live(live);)
1367 DB((dbg, LEVEL_1, "Stack before: "));
1368 DEBUG_ONLY(x87_dump_stack(state);)
1370 op1_idx = x87_on_stack(state, reg_index_1);
1371 assert(op1_idx >= 0);
1373 /* BEWARE: check for comp a,a cases, they might happen */
1374 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1375 /* second operand is a vfp register */
1376 op2_idx = x87_on_stack(state, reg_index_2);
1377 assert(op2_idx >= 0);
1379 if (is_vfp_live(reg_index_2, live)) {
1380 /* second operand is live */
1382 if (is_vfp_live(reg_index_1, live)) {
1383 /* both operands are live */
1386 /* res = tos X op */
1387 } else if (op2_idx == 0) {
1388 /* res = op X tos */
1389 permuted = !permuted;
1392 /* bring the first one to tos */
1393 x87_create_fxch(state, n, op1_idx);
1394 if (op1_idx == op2_idx) {
1396 } else if (op2_idx == 0) {
1400 /* res = tos X op */
1403 /* second live, first operand is dead here, bring it to tos.
1404 This means further, op1_idx != op2_idx. */
1405 assert(op1_idx != op2_idx);
1407 x87_create_fxch(state, n, op1_idx);
1412 /* res = tos X op, pop */
1416 /* second operand is dead */
1417 if (is_vfp_live(reg_index_1, live)) {
1418 /* first operand is live: bring second to tos.
1419 This means further, op1_idx != op2_idx. */
1420 assert(op1_idx != op2_idx);
1422 x87_create_fxch(state, n, op2_idx);
1427 /* res = op X tos, pop */
1429 permuted = !permuted;
1432 /* both operands are dead here, check first for identity. */
1433 if (op1_idx == op2_idx) {
1434 /* identically, one pop needed */
1436 x87_create_fxch(state, n, op1_idx);
1440 /* res = tos X op, pop */
1443 /* different, move them to st and st(1) and pop both.
1444 The tricky part is to get one into st(1).*/
1445 else if (op2_idx == 1) {
1446 /* good, second operand is already in the right place, move the first */
1448 /* bring the first on top */
1449 x87_create_fxch(state, n, op1_idx);
1450 assert(op2_idx != 0);
1453 /* res = tos X op, pop, pop */
1455 } else if (op1_idx == 1) {
1456 /* good, first operand is already in the right place, move the second */
1458 /* bring the first on top */
1459 x87_create_fxch(state, n, op2_idx);
1460 assert(op1_idx != 0);
1463 /* res = op X tos, pop, pop */
1464 permuted = !permuted;
1468 /* if one is already the TOS, we need two fxch */
1470 /* first one is TOS, move to st(1) */
1471 x87_create_fxch(state, n, 1);
1472 assert(op2_idx != 1);
1474 x87_create_fxch(state, n, op2_idx);
1476 /* res = op X tos, pop, pop */
1478 permuted = !permuted;
1480 } else if (op2_idx == 0) {
1481 /* second one is TOS, move to st(1) */
1482 x87_create_fxch(state, n, 1);
1483 assert(op1_idx != 1);
1485 x87_create_fxch(state, n, op1_idx);
1487 /* res = tos X op, pop, pop */
1490 /* none of them is either TOS or st(1), 3 fxch needed */
1491 x87_create_fxch(state, n, op2_idx);
1492 assert(op1_idx != 0);
1493 x87_create_fxch(state, n, 1);
1495 x87_create_fxch(state, n, op1_idx);
1497 /* res = tos X op, pop, pop */
1504 /* second operand is an address mode */
1505 if (is_vfp_live(reg_index_1, live)) {
1506 /* first operand is live: bring it to TOS */
1508 x87_create_fxch(state, n, op1_idx);
1512 /* first operand is dead: bring it to tos */
1514 x87_create_fxch(state, n, op1_idx);
1521 /* patch the operation */
1522 if (is_ia32_vFucomFnstsw(n)) {
1526 case 0: dst = op_ia32_FucomFnstsw; break;
1527 case 1: dst = op_ia32_FucompFnstsw; break;
1528 case 2: dst = op_ia32_FucomppFnstsw; break;
1529 default: panic("invalid popcount");
1532 for (i = 0; i < pops; ++i) {
1535 } else if (is_ia32_vFucomi(n)) {
1537 case 0: dst = op_ia32_Fucomi; break;
1538 case 1: dst = op_ia32_Fucompi; x87_pop(state); break;
1540 dst = op_ia32_Fucompi;
1542 x87_create_fpop(state, sched_next(n), 1);
1544 default: panic("invalid popcount");
1547 panic("invalid operation %+F", n);
1550 x87_patch_insn(n, dst);
1557 op1 = get_st_reg(op1_idx);
1560 op2 = get_st_reg(op2_idx);
1563 attr->x87[2] = NULL;
1564 attr->attr.data.ins_permuted = permuted;
1567 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1568 arch_register_get_name(op1), arch_register_get_name(op2)));
1570 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1571 arch_register_get_name(op1)));
1574 return NO_NODE_ADDED;
1580 * @param state the x87 state
1581 * @param n the node that should be simulated (and patched)
1583 * @return NO_NODE_ADDED
1585 static int sim_Keep(x87_state *state, ir_node *node)
1588 const arch_register_t *op_reg;
1594 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1596 arity = get_irn_arity(node);
1597 for (i = 0; i < arity; ++i) {
1598 op = get_irn_n(node, i);
1599 op_reg = arch_get_irn_register(op);
1600 if (arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1603 reg_id = arch_register_get_index(op_reg);
1604 live = vfp_live_args_after(state->sim, node, 0);
1606 op_stack_idx = x87_on_stack(state, reg_id);
1607 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live))
1608 x87_create_fpop(state, sched_next(node), 1);
1611 DB((dbg, LEVEL_1, "Stack after: "));
1612 DEBUG_ONLY(x87_dump_stack(state);)
1614 return NO_NODE_ADDED;
1618 * Keep the given node alive by adding a be_Keep.
1620 * @param node the node to kept alive
1622 static void keep_float_node_alive(ir_node *node)
1624 ir_node *block = get_nodes_block(node);
1625 ir_node *keep = be_new_Keep(block, 1, &node);
1627 assert(sched_is_scheduled(node));
1628 sched_add_after(node, keep);
1632 * Create a copy of a node. Recreate the node if it's a constant.
1634 * @param state the x87 state
1635 * @param n the node to be copied
1637 * @return the copy of n
1639 static ir_node *create_Copy(x87_state *state, ir_node *n)
1641 dbg_info *n_dbg = get_irn_dbg_info(n);
1642 ir_mode *mode = get_irn_mode(n);
1643 ir_node *block = get_nodes_block(n);
1644 ir_node *pred = get_irn_n(n, 0);
1645 ir_node *(*cnstr)(dbg_info *, ir_node *, ir_mode *) = NULL;
1647 const arch_register_t *out;
1648 const arch_register_t *op1;
1649 ia32_x87_attr_t *attr;
1651 /* Do not copy constants, recreate them. */
1652 switch (get_ia32_irn_opcode(pred)) {
1654 cnstr = new_bd_ia32_fldz;
1657 cnstr = new_bd_ia32_fld1;
1659 case iro_ia32_fldpi:
1660 cnstr = new_bd_ia32_fldpi;
1662 case iro_ia32_fldl2e:
1663 cnstr = new_bd_ia32_fldl2e;
1665 case iro_ia32_fldl2t:
1666 cnstr = new_bd_ia32_fldl2t;
1668 case iro_ia32_fldlg2:
1669 cnstr = new_bd_ia32_fldlg2;
1671 case iro_ia32_fldln2:
1672 cnstr = new_bd_ia32_fldln2;
1678 out = x87_get_irn_register(n);
1679 op1 = x87_get_irn_register(pred);
1681 if (cnstr != NULL) {
1682 /* copy a constant */
1683 res = (*cnstr)(n_dbg, block, mode);
1685 x87_push(state, arch_register_get_index(out), res);
1687 attr = get_ia32_x87_attr(res);
1688 attr->x87[2] = get_st_reg(0);
1690 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1692 res = new_bd_ia32_fpushCopy(n_dbg, block, pred, mode);
1694 x87_push(state, arch_register_get_index(out), res);
1696 attr = get_ia32_x87_attr(res);
1697 attr->x87[0] = get_st_reg(op1_idx);
1698 attr->x87[2] = get_st_reg(0);
1700 arch_set_irn_register(res, out);
1706 * Simulate a be_Copy.
1708 * @param state the x87 state
1709 * @param n the node that should be simulated (and patched)
1711 * @return NO_NODE_ADDED
1713 static int sim_Copy(x87_state *state, ir_node *n)
1716 const arch_register_t *out;
1717 const arch_register_t *op1;
1718 const arch_register_class_t *cls;
1719 ir_node *node, *next;
1720 int op1_idx, out_idx;
1723 cls = arch_get_irn_reg_class(n);
1724 if (cls != &ia32_reg_classes[CLASS_ia32_vfp])
1727 pred = be_get_Copy_op(n);
1728 out = x87_get_irn_register(n);
1729 op1 = x87_get_irn_register(pred);
1730 live = vfp_live_args_after(state->sim, n, REGMASK(out));
1732 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1733 arch_register_get_name(op1), arch_register_get_name(out)));
1734 DEBUG_ONLY(vfp_dump_live(live);)
1736 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1738 if (is_vfp_live(arch_register_get_index(op1), live)) {
1739 /* Operand is still live, a real copy. We need here an fpush that can
1740 hold a a register, so use the fpushCopy or recreate constants */
1741 node = create_Copy(state, n);
1743 /* We have to make sure the old value doesn't go dead (which can happen
1744 * when we recreate constants). As the simulator expected that value in
1745 * the pred blocks. This is unfortunate as removing it would save us 1
1746 * instruction, but we would have to rerun all the simulation to get
1749 next = sched_next(n);
1752 sched_add_before(next, node);
1754 if (get_irn_n_edges(pred) == 0) {
1755 keep_float_node_alive(pred);
1758 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1760 out_idx = x87_on_stack(state, arch_register_get_index(out));
1762 if (out_idx >= 0 && out_idx != op1_idx) {
1763 /* Matze: out already on stack? how can this happen? */
1764 panic("invalid stack state");
1767 /* op1 must be killed and placed where out is */
1769 ia32_x87_attr_t *attr;
1770 /* best case, simple remove and rename */
1771 x87_patch_insn(n, op_ia32_Pop);
1772 attr = get_ia32_x87_attr(n);
1773 attr->x87[0] = op1 = get_st_reg(0);
1776 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1778 ia32_x87_attr_t *attr;
1779 /* move op1 to tos, store and pop it */
1781 x87_create_fxch(state, n, op1_idx);
1784 x87_patch_insn(n, op_ia32_Pop);
1785 attr = get_ia32_x87_attr(n);
1786 attr->x87[0] = op1 = get_st_reg(out_idx);
1789 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1791 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1794 /* just a virtual copy */
1795 x87_set_st(state, arch_register_get_index(out), pred, op1_idx);
1796 /* don't remove the node to keep the verifier quiet :),
1797 the emitter won't emit any code for the node */
1800 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1805 return NO_NODE_ADDED;
1809 * Returns the vf0 result Proj of a Call.
1811 * @para call the Call node
1813 static ir_node *get_call_result_proj(ir_node *call)
1815 /* search the result proj */
1816 foreach_out_edge(call, edge) {
1817 ir_node *proj = get_edge_src_irn(edge);
1818 long pn = get_Proj_proj(proj);
1820 if (pn == pn_ia32_Call_vf0)
1827 static int sim_Asm(x87_state *const state, ir_node *const n)
1831 for (size_t i = get_irn_arity(n); i-- != 0;) {
1832 arch_register_req_t const *const req = arch_get_irn_register_req_in(n, i);
1833 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1834 panic("cannot handle %+F with x87 constraints", n);
1837 for (size_t i = arch_get_irn_n_outs(n); i-- != 0;) {
1838 arch_register_req_t const *const req = arch_get_irn_register_req_out(n, i);
1839 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1840 panic("cannot handle %+F with x87 constraints", n);
1843 return NO_NODE_ADDED;
1847 * Simulate a ia32_Call.
1849 * @param state the x87 state
1850 * @param n the node that should be simulated (and patched)
1852 * @return NO_NODE_ADDED
1854 static int sim_Call(x87_state *state, ir_node *n)
1856 ir_type *call_tp = get_ia32_call_attr_const(n)->call_tp;
1860 const arch_register_t *reg;
1862 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1864 /* at the begin of a call the x87 state should be empty */
1865 assert(state->depth == 0 && "stack not empty before call");
1867 if (get_method_n_ress(call_tp) <= 0)
1871 * If the called function returns a float, it is returned in st(0).
1872 * This even happens if the return value is NOT used.
1873 * Moreover, only one return result is supported.
1875 res_type = get_method_res_type(call_tp, 0);
1876 mode = get_type_mode(res_type);
1878 if (mode == NULL || !mode_is_float(mode))
1881 resproj = get_call_result_proj(n);
1882 assert(resproj != NULL);
1884 reg = x87_get_irn_register(resproj);
1885 x87_push(state, arch_register_get_index(reg), resproj);
1888 DB((dbg, LEVEL_1, "Stack after: "));
1889 DEBUG_ONLY(x87_dump_stack(state);)
1891 return NO_NODE_ADDED;
1895 * Simulate a be_Return.
1897 * @param state the x87 state
1898 * @param n the node that should be simulated (and patched)
1900 * @return NO_NODE_ADDED
1902 static int sim_Return(x87_state *state, ir_node *n)
1904 #ifdef DEBUG_libfirm
1905 /* only floating point return values must reside on stack */
1906 int n_float_res = 0;
1907 int const n_res = be_Return_get_n_rets(n);
1908 for (int i = 0; i < n_res; ++i) {
1909 ir_node *const res = get_irn_n(n, n_be_Return_val + i);
1910 if (mode_is_float(get_irn_mode(res)))
1913 assert(x87_get_depth(state) == n_float_res);
1916 /* pop them virtually */
1918 return NO_NODE_ADDED;
1922 * Simulate a be_Perm.
1924 * @param state the x87 state
1925 * @param irn the node that should be simulated (and patched)
1927 * @return NO_NODE_ADDED
1929 static int sim_Perm(x87_state *state, ir_node *irn)
1932 ir_node *pred = get_irn_n(irn, 0);
1935 /* handle only floating point Perms */
1936 if (! mode_is_float(get_irn_mode(pred)))
1937 return NO_NODE_ADDED;
1939 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1941 /* Perm is a pure virtual instruction on x87.
1942 All inputs must be on the FPU stack and are pairwise
1943 different from each other.
1944 So, all we need to do is to permutate the stack state. */
1945 n = get_irn_arity(irn);
1946 NEW_ARR_A(int, stack_pos, n);
1948 /* collect old stack positions */
1949 for (i = 0; i < n; ++i) {
1950 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
1951 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1953 assert(idx >= 0 && "Perm argument not on x87 stack");
1957 /* now do the permutation */
1958 foreach_out_edge(irn, edge) {
1959 ir_node *proj = get_edge_src_irn(edge);
1960 const arch_register_t *out = x87_get_irn_register(proj);
1961 long num = get_Proj_proj(proj);
1963 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1964 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
1966 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1968 return NO_NODE_ADDED;
1972 * Kill any dead registers at block start by popping them from the stack.
1974 * @param sim the simulator handle
1975 * @param block the current block
1976 * @param state the x87 state at the begin of the block
1978 static void x87_kill_deads(x87_simulator *const sim, ir_node *const block, x87_state *const state)
1980 ir_node *first_insn = sched_first(block);
1981 ir_node *keep = NULL;
1982 unsigned live = vfp_live_args_after(sim, block, 0);
1984 int i, depth, num_pop;
1987 depth = x87_get_depth(state);
1988 for (i = depth - 1; i >= 0; --i) {
1989 int reg = x87_get_st_reg(state, i);
1991 if (! is_vfp_live(reg, live))
1992 kill_mask |= (1 << i);
1996 DB((dbg, LEVEL_1, "Killing deads:\n"));
1997 DEBUG_ONLY(vfp_dump_live(live);)
1998 DEBUG_ONLY(x87_dump_stack(state);)
2000 if (kill_mask != 0 && live == 0) {
2001 /* special case: kill all registers */
2002 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
2003 if (ia32_cg_config.use_femms) {
2004 /* use FEMMS on AMD processors to clear all */
2005 keep = new_bd_ia32_femms(NULL, block);
2007 /* use EMMS to clear all */
2008 keep = new_bd_ia32_emms(NULL, block);
2010 sched_add_before(first_insn, keep);
2016 /* now kill registers */
2018 /* we can only kill from TOS, so bring them up */
2019 if (! (kill_mask & 1)) {
2020 /* search from behind, because we can to a double-pop */
2021 for (i = depth - 1; i >= 0; --i) {
2022 if (kill_mask & (1 << i)) {
2023 kill_mask &= ~(1 << i);
2030 x87_set_st(state, -1, keep, i);
2031 x87_create_fxch(state, first_insn, i);
2034 if ((kill_mask & 3) == 3) {
2035 /* we can do a double-pop */
2039 /* only a single pop */
2044 kill_mask >>= num_pop;
2045 keep = x87_create_fpop(state, first_insn, num_pop);
2052 * Run a simulation and fix all virtual instructions for a block.
2054 * @param sim the simulator handle
2055 * @param block the current block
2057 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
2060 blk_state *bl_state = x87_get_bl_state(sim, block);
2061 x87_state *state = bl_state->begin;
2062 ir_node *start_block;
2064 assert(state != NULL);
2065 /* already processed? */
2066 if (bl_state->end != NULL)
2069 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2070 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2071 DEBUG_ONLY(x87_dump_stack(state);)
2073 /* create a new state, will be changed */
2074 state = x87_clone_state(sim, state);
2075 /* at block begin, kill all dead registers */
2076 x87_kill_deads(sim, block, state);
2078 /* beware, n might change */
2079 for (n = sched_first(block); !sched_is_end(n); n = next) {
2082 ir_op *op = get_irn_op(n);
2085 * get the next node to be simulated here.
2086 * n might be completely removed from the schedule-
2088 next = sched_next(n);
2089 if (op->ops.generic != NULL) {
2090 func = (sim_func)op->ops.generic;
2093 node_inserted = (*func)(state, n);
2096 * sim_func might have added an additional node after n,
2097 * so update next node
2098 * beware: n must not be changed by sim_func
2099 * (i.e. removed from schedule) in this case
2101 if (node_inserted != NO_NODE_ADDED)
2102 next = sched_next(n);
2106 start_block = get_irg_start_block(get_irn_irg(block));
2108 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state);)
2110 /* check if the state must be shuffled */
2111 foreach_block_succ(block, edge) {
2112 ir_node *succ = get_edge_src_irn(edge);
2113 blk_state *succ_state;
2115 if (succ == start_block)
2118 succ_state = x87_get_bl_state(sim, succ);
2120 if (succ_state->begin == NULL) {
2121 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2122 DEBUG_ONLY(x87_dump_stack(state);)
2123 succ_state->begin = state;
2125 waitq_put(sim->worklist, succ);
2127 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2128 /* There is already a begin state for the successor, bad.
2129 Do the necessary permutations.
2130 Note that critical edges are removed, so this is always possible:
2131 If the successor has more than one possible input, then it must
2134 x87_shuffle(block, state, succ_state->begin);
2137 bl_state->end = state;
2141 * Register a simulator function.
2143 * @param op the opcode to simulate
2144 * @param func the simulator function for the opcode
2146 static void register_sim(ir_op *op, sim_func func)
2148 assert(op->ops.generic == NULL);
2149 op->ops.generic = (op_func) func;
2153 * Create a new x87 simulator.
2155 * @param sim a simulator handle, will be initialized
2156 * @param irg the current graph
2158 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
2160 obstack_init(&sim->obst);
2161 sim->blk_states = pmap_create();
2162 sim->n_idx = get_irg_last_idx(irg);
2163 sim->live = OALLOCN(&sim->obst, vfp_liveness, sim->n_idx);
2165 DB((dbg, LEVEL_1, "--------------------------------\n"
2166 "x87 Simulator started for %+F\n", irg));
2168 /* set the generic function pointer of instruction we must simulate */
2169 ir_clear_opcodes_generic_func();
2171 register_sim(op_ia32_Asm, sim_Asm);
2172 register_sim(op_ia32_Call, sim_Call);
2173 register_sim(op_ia32_vfld, sim_fld);
2174 register_sim(op_ia32_vfild, sim_fild);
2175 register_sim(op_ia32_vfld1, sim_fld1);
2176 register_sim(op_ia32_vfldz, sim_fldz);
2177 register_sim(op_ia32_vfadd, sim_fadd);
2178 register_sim(op_ia32_vfsub, sim_fsub);
2179 register_sim(op_ia32_vfmul, sim_fmul);
2180 register_sim(op_ia32_vfdiv, sim_fdiv);
2181 register_sim(op_ia32_vfprem, sim_fprem);
2182 register_sim(op_ia32_vfabs, sim_fabs);
2183 register_sim(op_ia32_vfchs, sim_fchs);
2184 register_sim(op_ia32_vfist, sim_fist);
2185 register_sim(op_ia32_vfisttp, sim_fisttp);
2186 register_sim(op_ia32_vfst, sim_fst);
2187 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
2188 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
2189 register_sim(op_ia32_vFucomi, sim_Fucom);
2190 register_sim(op_be_Copy, sim_Copy);
2191 register_sim(op_be_Return, sim_Return);
2192 register_sim(op_be_Perm, sim_Perm);
2193 register_sim(op_be_Keep, sim_Keep);
2197 * Destroy a x87 simulator.
2199 * @param sim the simulator handle
2201 static void x87_destroy_simulator(x87_simulator *sim)
2203 pmap_destroy(sim->blk_states);
2204 obstack_free(&sim->obst, NULL);
2205 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2209 * Pre-block walker: calculate the liveness information for the block
2210 * and store it into the sim->live cache.
2212 static void update_liveness_walker(ir_node *block, void *data)
2214 x87_simulator *sim = (x87_simulator*)data;
2215 update_liveness(sim, block);
2219 * Run a simulation and fix all virtual instructions for a graph.
2220 * Replaces all virtual floating point instructions and registers
2223 void ia32_x87_simulate_graph(ir_graph *irg)
2225 /* TODO improve code quality (less executed fxch) by using execfreqs */
2227 ir_node *block, *start_block;
2228 blk_state *bl_state;
2231 /* create the simulator */
2232 x87_init_simulator(&sim, irg);
2234 start_block = get_irg_start_block(irg);
2235 bl_state = x87_get_bl_state(&sim, start_block);
2237 /* start with the empty state */
2239 bl_state->begin = ∅
2241 sim.worklist = new_waitq();
2242 waitq_put(sim.worklist, start_block);
2244 be_assure_live_sets(irg);
2245 sim.lv = be_get_irg_liveness(irg);
2247 /* Calculate the liveness for all nodes. We must precalculate this info,
2248 * because the simulator adds new nodes (possible before Phi nodes) which
2249 * would let a lazy calculation fail.
2250 * On the other hand we reduce the computation amount due to
2251 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2253 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2257 block = (ir_node*)waitq_get(sim.worklist);
2258 x87_simulate_block(&sim, block);
2259 } while (! waitq_empty(sim.worklist));
2262 del_waitq(sim.worklist);
2263 x87_destroy_simulator(&sim);
2266 /* Initializes the x87 simulator. */
2267 void ia32_init_x87(void)
2269 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");