2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
33 #include "iredges_t.h"
48 #include "bearch_ia32_t.h"
49 #include "ia32_new_nodes.h"
50 #include "gen_ia32_new_nodes.h"
51 #include "gen_ia32_regalloc_if.h"
53 #include "ia32_architecture.h"
55 #define MASK_TOS(x) ((x) & (N_ia32_st_REGS - 1))
57 /** the debug handle */
58 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
60 /* Forward declaration. */
61 typedef struct x87_simulator x87_simulator;
64 * An exchange template.
65 * Note that our virtual functions have the same inputs
66 * and attributes as the real ones, so we can simple exchange
68 * Further, x87 supports inverse instructions, so we can handle them.
70 typedef struct exchange_tmpl {
71 ir_op *normal_op; /**< the normal one */
72 ir_op *reverse_op; /**< the reverse one if exists */
73 ir_op *normal_pop_op; /**< the normal one with tos pop */
74 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
78 * An entry on the simulated x87 stack.
80 typedef struct st_entry {
81 int reg_idx; /**< the virtual register index of this stack value */
82 ir_node *node; /**< the node that produced this value */
88 typedef struct x87_state {
89 st_entry st[N_ia32_st_REGS]; /**< the register stack */
90 int depth; /**< the current stack depth */
91 int tos; /**< position of the tos */
92 x87_simulator *sim; /**< The simulator. */
95 /** An empty state, used for blocks without fp instructions. */
96 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
97 static x87_state *empty = (x87_state *)&_empty;
100 * Return values of the instruction simulator functions.
103 NO_NODE_ADDED = 0, /**< No node that needs simulation was added. */
104 NODE_ADDED = 1 /**< A node that must be simulated was added by the simulator
105 in the schedule AFTER the current node. */
109 * The type of an instruction simulator function.
111 * @param state the x87 state
112 * @param n the node to be simulated
114 * @return NODE_ADDED if a node was added AFTER n in schedule that MUST be
116 * NO_NODE_ADDED otherwise
118 typedef int (*sim_func)(x87_state *state, ir_node *n);
121 * A block state: Every block has a x87 state at the beginning and at the end.
123 typedef struct blk_state {
124 x87_state *begin; /**< state at the begin or NULL if not assigned */
125 x87_state *end; /**< state at the end or NULL if not assigned */
128 /** liveness bitset for vfp registers. */
129 typedef unsigned char vfp_liveness;
134 struct x87_simulator {
135 struct obstack obst; /**< An obstack for fast allocating. */
136 pmap *blk_states; /**< Map blocks to states. */
137 be_lv_t *lv; /**< intrablock liveness. */
138 vfp_liveness *live; /**< Liveness information. */
139 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
140 waitq *worklist; /**< Worklist of blocks that must be processed. */
144 * Returns the current stack depth.
146 * @param state the x87 state
148 * @return the x87 stack depth
150 static int x87_get_depth(const x87_state *state)
155 static st_entry *x87_get_entry(x87_state *const state, int const pos)
157 assert(0 <= pos && pos < state->depth);
158 return &state->st[MASK_TOS(state->tos + pos)];
162 * Return the virtual register index at st(pos).
164 * @param state the x87 state
165 * @param pos a stack position
167 * @return the vfp register index that produced the value at st(pos)
169 static int x87_get_st_reg(const x87_state *state, int pos)
171 return x87_get_entry((x87_state*)state, pos)->reg_idx;
176 * Dump the stack for debugging.
178 * @param state the x87 state
180 static void x87_dump_stack(const x87_state *state)
182 for (int i = state->depth; i-- != 0;) {
183 st_entry const *const entry = x87_get_entry((x87_state*)state, i);
184 DB((dbg, LEVEL_2, "vf%d(%+F) ", entry->reg_idx, entry->node));
186 DB((dbg, LEVEL_2, "<-- TOS\n"));
188 #endif /* DEBUG_libfirm */
191 * Set a virtual register to st(pos).
193 * @param state the x87 state
194 * @param reg_idx the vfp register index that should be set
195 * @param node the IR node that produces the value of the vfp register
196 * @param pos the stack position where the new value should be entered
198 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
200 st_entry *const entry = x87_get_entry(state, pos);
201 entry->reg_idx = reg_idx;
204 DB((dbg, LEVEL_2, "After SET_REG: "));
205 DEBUG_ONLY(x87_dump_stack(state);)
209 * Set the tos virtual register.
211 * @param state the x87 state
212 * @param reg_idx the vfp register index that should be set
213 * @param node the IR node that produces the value of the vfp register
215 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node)
217 x87_set_st(state, reg_idx, node, 0);
221 * Swap st(0) with st(pos).
223 * @param state the x87 state
224 * @param pos the stack position to change the tos with
226 static void x87_fxch(x87_state *state, int pos)
228 st_entry *const a = x87_get_entry(state, pos);
229 st_entry *const b = x87_get_entry(state, 0);
230 st_entry const t = *a;
234 DB((dbg, LEVEL_2, "After FXCH: "));
235 DEBUG_ONLY(x87_dump_stack(state);)
239 * Convert a virtual register to the stack index.
241 * @param state the x87 state
242 * @param reg_idx the register vfp index
244 * @return the stack position where the register is stacked
245 * or -1 if the virtual register was not found
247 static int x87_on_stack(const x87_state *state, int reg_idx)
249 for (int i = 0; i < state->depth; ++i) {
250 if (x87_get_st_reg(state, i) == reg_idx)
257 * Push a virtual Register onto the stack, double pushed allowed.
259 * @param state the x87 state
260 * @param reg_idx the register vfp index
261 * @param node the node that produces the value of the vfp register
263 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node)
265 assert(state->depth < N_ia32_st_REGS && "stack overrun");
268 state->tos = MASK_TOS(state->tos - 1);
269 st_entry *const entry = x87_get_entry(state, 0);
270 entry->reg_idx = reg_idx;
273 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state);)
277 * Push a virtual Register onto the stack, double pushes are NOT allowed.
279 * @param state the x87 state
280 * @param reg_idx the register vfp index
281 * @param node the node that produces the value of the vfp register
283 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
285 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
287 x87_push_dbl(state, reg_idx, node);
291 * Pop a virtual Register from the stack.
293 * @param state the x87 state
295 static void x87_pop(x87_state *state)
297 assert(state->depth > 0 && "stack underrun");
300 state->tos = MASK_TOS(state->tos + 1);
302 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state);)
306 * Empty the fpu stack
308 * @param state the x87 state
310 static void x87_emms(x87_state *state)
317 * Returns the block state of a block.
319 * @param sim the x87 simulator handle
320 * @param block the current block
322 * @return the block state
324 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
326 blk_state *res = pmap_get(blk_state, sim->blk_states, block);
329 res = OALLOC(&sim->obst, blk_state);
333 pmap_insert(sim->blk_states, block, res);
340 * Creates a new x87 state.
342 * @param sim the x87 simulator handle
344 * @return a new x87 state
346 static x87_state *x87_alloc_state(x87_simulator *sim)
348 x87_state *res = OALLOC(&sim->obst, x87_state);
357 * @param sim the x87 simulator handle
358 * @param src the x87 state that will be cloned
360 * @return a cloned copy of the src state
362 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
364 x87_state *res = x87_alloc_state(sim);
371 * Patch a virtual instruction into a x87 one and return
372 * the node representing the result value.
374 * @param n the IR node to patch
375 * @param op the x87 opcode to patch in
377 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
379 ir_mode *mode = get_irn_mode(n);
384 if (mode == mode_T) {
385 /* patch all Proj's */
386 foreach_out_edge(n, edge) {
387 ir_node *proj = get_edge_src_irn(edge);
389 mode = get_irn_mode(proj);
390 if (mode_is_float(mode)) {
392 set_irn_mode(proj, ia32_reg_classes[CLASS_ia32_st].mode);
396 } else if (mode_is_float(mode))
397 set_irn_mode(n, ia32_reg_classes[CLASS_ia32_st].mode);
402 * Returns the first Proj of a mode_T node having a given mode.
404 * @param n the mode_T node
405 * @param m the desired mode of the Proj
406 * @return The first Proj of mode @p m found or NULL.
408 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
410 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
412 foreach_out_edge(n, edge) {
413 ir_node *proj = get_edge_src_irn(edge);
414 if (get_irn_mode(proj) == m)
422 * Wrap the arch_* function here so we can check for errors.
424 static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
426 const arch_register_t *res = arch_get_irn_register(irn);
428 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
432 static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
435 const arch_register_t *res = arch_get_irn_register_out(irn, pos);
437 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
441 static inline const arch_register_t *get_st_reg(int index)
443 return &ia32_registers[REG_ST0 + index];
446 /* -------------- x87 perm --------------- */
449 * Creates a fxch for shuffle.
451 * @param state the x87 state
452 * @param pos parameter for fxch
453 * @param block the block were fxch is inserted
455 * Creates a new fxch node and reroute the user of the old node
458 * @return the fxch node
460 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block)
463 ia32_x87_attr_t *attr;
465 fxch = new_bd_ia32_fxch(NULL, block);
466 attr = get_ia32_x87_attr(fxch);
467 attr->x87[0] = get_st_reg(pos);
468 attr->x87[2] = get_st_reg(0);
472 x87_fxch(state, pos);
477 * Calculate the necessary permutations to reach dst_state.
479 * These permutations are done with fxch instructions and placed
480 * at the end of the block.
482 * Note that critical edges are removed here, so we need only
483 * a shuffle if the current block has only one successor.
485 * @param block the current block
486 * @param state the current x87 stack state, might be modified
487 * @param dst_state destination state
491 static x87_state *x87_shuffle(ir_node *block, x87_state *state, const x87_state *dst_state)
493 int i, n_cycles, k, ri;
494 unsigned cycles[4], all_mask;
495 char cycle_idx[4][8];
496 ir_node *fxch, *before, *after;
498 assert(state->depth == dst_state->depth);
500 /* Some mathematics here:
501 If we have a cycle of length n that includes the tos,
502 we need n-1 exchange operations.
503 We can always add the tos and restore it, so we need
504 n+1 exchange operations for a cycle not containing the tos.
505 So, the maximum of needed operations is for a cycle of 7
506 not including the tos == 8.
507 This is the same number of ops we would need for using stores,
508 so exchange is cheaper (we save the loads).
509 On the other hand, we might need an additional exchange
510 in the next block to bring one operand on top, so the
511 number of ops in the first case is identical.
512 Further, no more than 4 cycles can exists (4 x 2).
514 all_mask = (1 << (state->depth)) - 1;
516 for (n_cycles = 0; all_mask; ++n_cycles) {
517 int src_idx, dst_idx;
519 /* find the first free slot */
520 for (i = 0; i < state->depth; ++i) {
521 if (all_mask & (1 << i)) {
522 all_mask &= ~(1 << i);
524 /* check if there are differences here */
525 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
531 /* no more cycles found */
536 cycles[n_cycles] = (1 << i);
537 cycle_idx[n_cycles][k++] = i;
538 for (src_idx = i; ; src_idx = dst_idx) {
539 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
541 if ((all_mask & (1 << dst_idx)) == 0)
544 cycle_idx[n_cycles][k++] = dst_idx;
545 cycles[n_cycles] |= (1 << dst_idx);
546 all_mask &= ~(1 << dst_idx);
548 cycle_idx[n_cycles][k] = -1;
552 /* no permutation needed */
556 /* Hmm: permutation needed */
557 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
558 DEBUG_ONLY(x87_dump_stack(state);)
559 DB((dbg, LEVEL_2, " to\n"));
560 DEBUG_ONLY(x87_dump_stack(dst_state);)
564 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
565 for (ri = 0; ri < n_cycles; ++ri) {
566 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
567 for (k = 0; cycle_idx[ri][k] != -1; ++k)
568 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
569 DB((dbg, LEVEL_2, "\n"));
576 * Find the place node must be insert.
577 * We have only one successor block, so the last instruction should
580 before = sched_last(block);
581 assert(is_cfop(before));
583 /* now do the permutations */
584 for (ri = 0; ri < n_cycles; ++ri) {
585 if ((cycles[ri] & 1) == 0) {
586 /* this cycle does not include the tos */
587 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
589 sched_add_after(after, fxch);
591 sched_add_before(before, fxch);
594 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
595 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
597 sched_add_after(after, fxch);
599 sched_add_before(before, fxch);
602 if ((cycles[ri] & 1) == 0) {
603 /* this cycle does not include the tos */
604 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
605 sched_add_after(after, fxch);
612 * Create a fxch node before another node.
614 * @param state the x87 state
615 * @param n the node after the fxch
616 * @param pos exchange st(pos) with st(0)
620 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
623 ia32_x87_attr_t *attr;
624 ir_node *block = get_nodes_block(n);
626 x87_fxch(state, pos);
628 fxch = new_bd_ia32_fxch(NULL, block);
629 attr = get_ia32_x87_attr(fxch);
630 attr->x87[0] = get_st_reg(pos);
631 attr->x87[2] = get_st_reg(0);
635 sched_add_before(n, fxch);
636 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
641 * Create a fpush before node n.
643 * @param state the x87 state
644 * @param n the node after the fpush
645 * @param pos push st(pos) on stack
646 * @param op_idx replace input op_idx of n with the fpush result
648 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx)
650 ir_node *fpush, *pred = get_irn_n(n, op_idx);
651 ia32_x87_attr_t *attr;
652 const arch_register_t *out = x87_get_irn_register(pred);
654 x87_push_dbl(state, arch_register_get_index(out), pred);
656 fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
657 attr = get_ia32_x87_attr(fpush);
658 attr->x87[0] = get_st_reg(pos);
659 attr->x87[2] = get_st_reg(0);
662 sched_add_before(n, fpush);
664 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
668 * Create a fpop before node n.
670 * @param state the x87 state
671 * @param n the node after the fpop
672 * @param num pop 1 or 2 values
674 * @return the fpop node
676 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
678 ir_node *fpop = NULL;
679 ia32_x87_attr_t *attr;
684 if (ia32_cg_config.use_ffreep)
685 fpop = new_bd_ia32_ffreep(NULL, get_nodes_block(n));
687 fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
688 attr = get_ia32_x87_attr(fpop);
689 attr->x87[0] = get_st_reg(0);
690 attr->x87[1] = get_st_reg(0);
691 attr->x87[2] = get_st_reg(0);
694 sched_add_before(n, fpop);
695 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
700 /* --------------------------------- liveness ------------------------------------------ */
703 * The liveness transfer function.
704 * Updates a live set over a single step from a given node to its predecessor.
705 * Everything defined at the node is removed from the set, the uses of the node get inserted.
707 * @param irn The node at which liveness should be computed.
708 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
709 * the registers live after irn.
711 * @return The live bitset.
713 static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
716 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
718 if (get_irn_mode(irn) == mode_T) {
719 foreach_out_edge(irn, edge) {
720 ir_node *proj = get_edge_src_irn(edge);
722 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
723 const arch_register_t *reg = x87_get_irn_register(proj);
724 live &= ~(1 << arch_register_get_index(reg));
727 } else if (arch_irn_consider_in_reg_alloc(cls, irn)) {
728 const arch_register_t *reg = x87_get_irn_register(irn);
729 live &= ~(1 << arch_register_get_index(reg));
732 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
733 ir_node *op = get_irn_n(irn, i);
735 if (mode_is_float(get_irn_mode(op)) &&
736 arch_irn_consider_in_reg_alloc(cls, op)) {
737 const arch_register_t *reg = x87_get_irn_register(op);
738 live |= 1 << arch_register_get_index(reg);
745 * Put all live virtual registers at the end of a block into a bitset.
747 * @param sim the simulator handle
748 * @param bl the block
750 * @return The live bitset at the end of this block
752 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
754 vfp_liveness live = 0;
755 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
756 const be_lv_t *lv = sim->lv;
758 be_lv_foreach(lv, block, be_lv_state_end, node) {
759 const arch_register_t *reg;
760 if (!arch_irn_consider_in_reg_alloc(cls, node))
763 reg = x87_get_irn_register(node);
764 live |= 1 << arch_register_get_index(reg);
770 /** get the register mask from an arch_register */
771 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
774 * Return a bitset of argument registers which are live at the end of a node.
776 * @param sim the simulator handle
777 * @param pos the node
778 * @param kill kill mask for the output registers
780 * @return The live bitset.
782 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
784 unsigned idx = get_irn_idx(pos);
786 assert(idx < sim->n_idx);
787 return sim->live[idx] & ~kill;
791 * Calculate the liveness for a whole block and cache it.
793 * @param sim the simulator handle
794 * @param block the block
796 static void update_liveness(x87_simulator *sim, ir_node *block)
798 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
801 /* now iterate through the block backward and cache the results */
802 sched_foreach_reverse(block, irn) {
803 /* stop at the first Phi: this produces the live-in */
807 idx = get_irn_idx(irn);
808 sim->live[idx] = live;
810 live = vfp_liveness_transfer(irn, live);
812 idx = get_irn_idx(block);
813 sim->live[idx] = live;
817 * Returns true if a register is live in a set.
819 * @param reg_idx the vfp register index
820 * @param live a live bitset
822 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
826 * Dump liveness info.
828 * @param live the live bitset
830 static void vfp_dump_live(vfp_liveness live)
834 DB((dbg, LEVEL_2, "Live after: "));
835 for (i = 0; i < 8; ++i) {
836 if (live & (1 << i)) {
837 DB((dbg, LEVEL_2, "vf%d ", i));
840 DB((dbg, LEVEL_2, "\n"));
842 #endif /* DEBUG_libfirm */
844 /* --------------------------------- simulators ---------------------------------------- */
847 * Simulate a virtual binop.
849 * @param state the x87 state
850 * @param n the node that should be simulated (and patched)
851 * @param tmpl the template containing the 4 possible x87 opcodes
853 * @return NO_NODE_ADDED
855 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl)
857 int op2_idx = 0, op1_idx;
858 int out_idx, do_pop = 0;
859 ia32_x87_attr_t *attr;
861 ir_node *patched_insn;
863 x87_simulator *sim = state->sim;
864 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
865 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
866 const arch_register_t *op1_reg = x87_get_irn_register(op1);
867 const arch_register_t *op2_reg = x87_get_irn_register(op2);
868 const arch_register_t *out = x87_irn_get_register(n, pn_ia32_res);
869 int reg_index_1 = arch_register_get_index(op1_reg);
870 int reg_index_2 = arch_register_get_index(op2_reg);
871 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
875 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
876 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
877 arch_register_get_name(out)));
878 DEBUG_ONLY(vfp_dump_live(live);)
879 DB((dbg, LEVEL_1, "Stack before: "));
880 DEBUG_ONLY(x87_dump_stack(state);)
882 op1_idx = x87_on_stack(state, reg_index_1);
883 assert(op1_idx >= 0);
884 op1_live_after = is_vfp_live(reg_index_1, live);
886 attr = get_ia32_x87_attr(n);
887 permuted = attr->attr.data.ins_permuted;
889 if (reg_index_2 != REG_VFP_VFP_NOREG) {
892 /* second operand is a vfp register */
893 op2_idx = x87_on_stack(state, reg_index_2);
894 assert(op2_idx >= 0);
895 op2_live_after = is_vfp_live(reg_index_2, live);
897 if (op2_live_after) {
898 /* Second operand is live. */
900 if (op1_live_after) {
901 /* Both operands are live: push the first one.
902 This works even for op1 == op2. */
903 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
904 /* now do fxxx (tos=tos X op) */
908 dst = tmpl->normal_op;
910 /* Second live, first operand is dead here, bring it to tos. */
912 x87_create_fxch(state, n, op1_idx);
917 /* now do fxxx (tos=tos X op) */
919 dst = tmpl->normal_op;
922 /* Second operand is dead. */
923 if (op1_live_after) {
924 /* First operand is live: bring second to tos. */
926 x87_create_fxch(state, n, op2_idx);
931 /* now do fxxxr (tos = op X tos) */
933 dst = tmpl->reverse_op;
935 /* Both operands are dead here, pop them from the stack. */
938 /* Both are identically and on tos, no pop needed. */
939 /* here fxxx (tos = tos X tos) */
940 dst = tmpl->normal_op;
943 /* now do fxxxp (op = op X tos, pop) */
944 dst = tmpl->normal_pop_op;
948 } else if (op1_idx == 0) {
949 assert(op1_idx != op2_idx);
950 /* now do fxxxrp (op = tos X op, pop) */
951 dst = tmpl->reverse_pop_op;
955 /* Bring the second on top. */
956 x87_create_fxch(state, n, op2_idx);
957 if (op1_idx == op2_idx) {
958 /* Both are identically and on tos now, no pop needed. */
961 /* use fxxx (tos = tos X tos) */
962 dst = tmpl->normal_op;
965 /* op2 is on tos now */
967 /* use fxxxp (op = op X tos, pop) */
968 dst = tmpl->normal_pop_op;
976 /* second operand is an address mode */
977 if (op1_live_after) {
978 /* first operand is live: push it here */
979 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
982 /* first operand is dead: bring it to tos */
984 x87_create_fxch(state, n, op1_idx);
989 /* use fxxx (tos = tos X mem) */
990 dst = permuted ? tmpl->reverse_op : tmpl->normal_op;
994 patched_insn = x87_patch_insn(n, dst);
995 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1000 /* patch the operation */
1001 attr->x87[0] = op1_reg = get_st_reg(op1_idx);
1002 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1003 attr->x87[1] = op2_reg = get_st_reg(op2_idx);
1005 attr->x87[2] = out = get_st_reg(out_idx);
1007 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1008 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1009 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1010 arch_register_get_name(out)));
1012 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1013 arch_register_get_name(op1_reg),
1014 arch_register_get_name(out)));
1017 return NO_NODE_ADDED;
1021 * Simulate a virtual Unop.
1023 * @param state the x87 state
1024 * @param n the node that should be simulated (and patched)
1025 * @param op the x87 opcode that will replace n's opcode
1027 * @return NO_NODE_ADDED
1029 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
1031 x87_simulator *sim = state->sim;
1032 const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, 0));
1033 const arch_register_t *out = x87_get_irn_register(n);
1034 ia32_x87_attr_t *attr;
1035 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1037 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1038 DEBUG_ONLY(vfp_dump_live(live);)
1040 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1042 if (is_vfp_live(arch_register_get_index(op1), live)) {
1043 /* push the operand here */
1044 x87_create_fpush(state, n, op1_idx, 0);
1047 /* operand is dead, bring it to tos */
1049 x87_create_fxch(state, n, op1_idx);
1053 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1054 attr = get_ia32_x87_attr(n);
1055 attr->x87[0] = op1 = get_st_reg(0);
1056 attr->x87[2] = out = get_st_reg(0);
1057 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1059 return NO_NODE_ADDED;
1063 * Simulate a virtual Load instruction.
1065 * @param state the x87 state
1066 * @param n the node that should be simulated (and patched)
1067 * @param op the x87 opcode that will replace n's opcode
1069 * @return NO_NODE_ADDED
1071 static int sim_load(x87_state *state, ir_node *n, ir_op *op, int res_pos)
1073 const arch_register_t *out = x87_irn_get_register(n, res_pos);
1074 ia32_x87_attr_t *attr;
1076 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1077 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1078 assert(out == x87_irn_get_register(n, res_pos));
1079 attr = get_ia32_x87_attr(n);
1080 attr->x87[2] = out = get_st_reg(0);
1081 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1083 return NO_NODE_ADDED;
1087 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1089 * @param store The store
1090 * @param old_val The former value
1091 * @param new_val The new value
1093 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
1095 foreach_out_edge_safe(old_val, edge) {
1096 ir_node *user = get_edge_src_irn(edge);
1098 if (! user || user == store)
1101 /* if the user is scheduled after the store: rewire */
1102 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1104 /* find the input of the user pointing to the old value */
1105 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1106 if (get_irn_n(user, i) == old_val)
1107 set_irn_n(user, i, new_val);
1114 * Simulate a virtual Store.
1116 * @param state the x87 state
1117 * @param n the node that should be simulated (and patched)
1118 * @param op the x87 store opcode
1119 * @param op_p the x87 store and pop opcode
1121 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p)
1123 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1124 const arch_register_t *op2 = x87_get_irn_register(val);
1125 unsigned live = vfp_live_args_after(state->sim, n, 0);
1126 int insn = NO_NODE_ADDED;
1127 ia32_x87_attr_t *attr;
1128 int op2_reg_idx, op2_idx, depth;
1129 int live_after_node;
1132 op2_reg_idx = arch_register_get_index(op2);
1133 op2_idx = x87_on_stack(state, op2_reg_idx);
1134 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1135 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1136 assert(op2_idx >= 0);
1138 mode = get_ia32_ls_mode(n);
1139 depth = x87_get_depth(state);
1141 if (live_after_node) {
1143 Problem: fst doesn't support 96bit modes (spills), only fstp does
1144 fist doesn't support 64bit mode, only fistp
1146 - stack not full: push value and fstp
1147 - stack full: fstp value and load again
1148 Note that we cannot test on mode_E, because floats might be 96bit ...
1150 if (get_mode_size_bits(mode) > 64 || (mode_is_int(mode) && get_mode_size_bits(mode) > 32)) {
1151 if (depth < N_ia32_st_REGS) {
1152 /* ok, we have a free register: push + fstp */
1153 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1155 x87_patch_insn(n, op_p);
1157 ir_node *vfld, *mem, *block, *rproj, *mproj;
1158 ir_graph *irg = get_irn_irg(n);
1159 ir_node *nomem = get_irg_no_mem(irg);
1161 /* stack full here: need fstp + load */
1163 x87_patch_insn(n, op_p);
1165 block = get_nodes_block(n);
1166 vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), nomem, get_ia32_ls_mode(n));
1168 /* copy all attributes */
1169 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1170 if (is_ia32_use_frame(n))
1171 set_ia32_use_frame(vfld);
1172 set_ia32_op_type(vfld, ia32_AddrModeS);
1173 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1174 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1175 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1177 rproj = new_r_Proj(vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1178 mproj = new_r_Proj(vfld, mode_M, pn_ia32_vfld_M);
1179 mem = get_irn_Proj_for_mode(n, mode_M);
1181 assert(mem && "Store memory not found");
1183 arch_set_irn_register(rproj, op2);
1185 /* reroute all former users of the store memory to the load memory */
1186 edges_reroute(mem, mproj);
1187 /* set the memory input of the load to the store memory */
1188 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1190 sched_add_after(n, vfld);
1191 sched_add_after(vfld, rproj);
1193 /* rewire all users, scheduled after the store, to the loaded value */
1194 collect_and_rewire_users(n, val, rproj);
1199 /* we can only store the tos to memory */
1201 x87_create_fxch(state, n, op2_idx);
1203 /* mode size 64 or smaller -> use normal fst */
1204 x87_patch_insn(n, op);
1207 /* we can only store the tos to memory */
1209 x87_create_fxch(state, n, op2_idx);
1212 x87_patch_insn(n, op_p);
1215 attr = get_ia32_x87_attr(n);
1216 attr->x87[1] = op2 = get_st_reg(0);
1217 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1222 #define _GEN_BINOP(op, rev) \
1223 static int sim_##op(x87_state *state, ir_node *n) { \
1224 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1225 return sim_binop(state, n, &tmpl); \
1228 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1229 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1231 #define GEN_LOAD(op) \
1232 static int sim_##op(x87_state *state, ir_node *n) { \
1233 return sim_load(state, n, op_ia32_##op, pn_ia32_v##op##_res); \
1236 #define GEN_UNOP(op) \
1237 static int sim_##op(x87_state *state, ir_node *n) { \
1238 return sim_unop(state, n, op_ia32_##op); \
1241 #define GEN_STORE(op) \
1242 static int sim_##op(x87_state *state, ir_node *n) { \
1243 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1265 * Simulate a virtual fisttp.
1267 * @param state the x87 state
1268 * @param n the node that should be simulated (and patched)
1270 * @return NO_NODE_ADDED
1272 static int sim_fisttp(x87_state *state, ir_node *n)
1274 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1275 const arch_register_t *op2 = x87_get_irn_register(val);
1276 ia32_x87_attr_t *attr;
1277 int op2_reg_idx, op2_idx;
1279 op2_reg_idx = arch_register_get_index(op2);
1280 op2_idx = x87_on_stack(state, op2_reg_idx);
1281 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1282 assert(op2_idx >= 0);
1284 /* Note: although the value is still live here, it is destroyed because
1285 of the pop. The register allocator is aware of that and introduced a copy
1286 if the value must be alive. */
1288 /* we can only store the tos to memory */
1290 x87_create_fxch(state, n, op2_idx);
1293 x87_patch_insn(n, op_ia32_fisttp);
1295 attr = get_ia32_x87_attr(n);
1296 attr->x87[1] = op2 = get_st_reg(0);
1297 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1299 return NO_NODE_ADDED;
1303 * Simulate a virtual FtstFnstsw.
1305 * @param state the x87 state
1306 * @param n the node that should be simulated (and patched)
1308 * @return NO_NODE_ADDED
1310 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1312 x87_simulator *sim = state->sim;
1313 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1314 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1315 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1316 int reg_index_1 = arch_register_get_index(reg1);
1317 int op1_idx = x87_on_stack(state, reg_index_1);
1318 unsigned live = vfp_live_args_after(sim, n, 0);
1320 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, arch_register_get_name(reg1)));
1321 DEBUG_ONLY(vfp_dump_live(live);)
1322 DB((dbg, LEVEL_1, "Stack before: "));
1323 DEBUG_ONLY(x87_dump_stack(state);)
1324 assert(op1_idx >= 0);
1327 /* bring the value to tos */
1328 x87_create_fxch(state, n, op1_idx);
1332 /* patch the operation */
1333 x87_patch_insn(n, op_ia32_FtstFnstsw);
1334 reg1 = get_st_reg(op1_idx);
1335 attr->x87[0] = reg1;
1336 attr->x87[1] = NULL;
1337 attr->x87[2] = NULL;
1339 if (!is_vfp_live(reg_index_1, live))
1340 x87_create_fpop(state, sched_next(n), 1);
1342 return NO_NODE_ADDED;
1348 * @param state the x87 state
1349 * @param n the node that should be simulated (and patched)
1351 * @return NO_NODE_ADDED
1353 static int sim_Fucom(x87_state *state, ir_node *n)
1357 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1359 x87_simulator *sim = state->sim;
1360 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1361 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1362 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1363 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1364 int reg_index_1 = arch_register_get_index(op1);
1365 int reg_index_2 = arch_register_get_index(op2);
1366 unsigned live = vfp_live_args_after(sim, n, 0);
1367 bool permuted = attr->attr.data.ins_permuted;
1371 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1372 arch_register_get_name(op1), arch_register_get_name(op2)));
1373 DEBUG_ONLY(vfp_dump_live(live);)
1374 DB((dbg, LEVEL_1, "Stack before: "));
1375 DEBUG_ONLY(x87_dump_stack(state);)
1377 op1_idx = x87_on_stack(state, reg_index_1);
1378 assert(op1_idx >= 0);
1380 /* BEWARE: check for comp a,a cases, they might happen */
1381 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1382 /* second operand is a vfp register */
1383 op2_idx = x87_on_stack(state, reg_index_2);
1384 assert(op2_idx >= 0);
1386 if (is_vfp_live(reg_index_2, live)) {
1387 /* second operand is live */
1389 if (is_vfp_live(reg_index_1, live)) {
1390 /* both operands are live */
1393 /* res = tos X op */
1394 } else if (op2_idx == 0) {
1395 /* res = op X tos */
1396 permuted = !permuted;
1399 /* bring the first one to tos */
1400 x87_create_fxch(state, n, op1_idx);
1401 if (op1_idx == op2_idx) {
1403 } else if (op2_idx == 0) {
1407 /* res = tos X op */
1410 /* second live, first operand is dead here, bring it to tos.
1411 This means further, op1_idx != op2_idx. */
1412 assert(op1_idx != op2_idx);
1414 x87_create_fxch(state, n, op1_idx);
1419 /* res = tos X op, pop */
1423 /* second operand is dead */
1424 if (is_vfp_live(reg_index_1, live)) {
1425 /* first operand is live: bring second to tos.
1426 This means further, op1_idx != op2_idx. */
1427 assert(op1_idx != op2_idx);
1429 x87_create_fxch(state, n, op2_idx);
1434 /* res = op X tos, pop */
1436 permuted = !permuted;
1439 /* both operands are dead here, check first for identity. */
1440 if (op1_idx == op2_idx) {
1441 /* identically, one pop needed */
1443 x87_create_fxch(state, n, op1_idx);
1447 /* res = tos X op, pop */
1450 /* different, move them to st and st(1) and pop both.
1451 The tricky part is to get one into st(1).*/
1452 else if (op2_idx == 1) {
1453 /* good, second operand is already in the right place, move the first */
1455 /* bring the first on top */
1456 x87_create_fxch(state, n, op1_idx);
1457 assert(op2_idx != 0);
1460 /* res = tos X op, pop, pop */
1462 } else if (op1_idx == 1) {
1463 /* good, first operand is already in the right place, move the second */
1465 /* bring the first on top */
1466 x87_create_fxch(state, n, op2_idx);
1467 assert(op1_idx != 0);
1470 /* res = op X tos, pop, pop */
1471 permuted = !permuted;
1475 /* if one is already the TOS, we need two fxch */
1477 /* first one is TOS, move to st(1) */
1478 x87_create_fxch(state, n, 1);
1479 assert(op2_idx != 1);
1481 x87_create_fxch(state, n, op2_idx);
1483 /* res = op X tos, pop, pop */
1485 permuted = !permuted;
1487 } else if (op2_idx == 0) {
1488 /* second one is TOS, move to st(1) */
1489 x87_create_fxch(state, n, 1);
1490 assert(op1_idx != 1);
1492 x87_create_fxch(state, n, op1_idx);
1494 /* res = tos X op, pop, pop */
1497 /* none of them is either TOS or st(1), 3 fxch needed */
1498 x87_create_fxch(state, n, op2_idx);
1499 assert(op1_idx != 0);
1500 x87_create_fxch(state, n, 1);
1502 x87_create_fxch(state, n, op1_idx);
1504 /* res = tos X op, pop, pop */
1511 /* second operand is an address mode */
1512 if (is_vfp_live(reg_index_1, live)) {
1513 /* first operand is live: bring it to TOS */
1515 x87_create_fxch(state, n, op1_idx);
1519 /* first operand is dead: bring it to tos */
1521 x87_create_fxch(state, n, op1_idx);
1528 /* patch the operation */
1529 if (is_ia32_vFucomFnstsw(n)) {
1533 case 0: dst = op_ia32_FucomFnstsw; break;
1534 case 1: dst = op_ia32_FucompFnstsw; break;
1535 case 2: dst = op_ia32_FucomppFnstsw; break;
1536 default: panic("invalid popcount");
1539 for (i = 0; i < pops; ++i) {
1542 } else if (is_ia32_vFucomi(n)) {
1544 case 0: dst = op_ia32_Fucomi; break;
1545 case 1: dst = op_ia32_Fucompi; x87_pop(state); break;
1547 dst = op_ia32_Fucompi;
1549 x87_create_fpop(state, sched_next(n), 1);
1551 default: panic("invalid popcount");
1554 panic("invalid operation %+F", n);
1557 x87_patch_insn(n, dst);
1564 op1 = get_st_reg(op1_idx);
1567 op2 = get_st_reg(op2_idx);
1570 attr->x87[2] = NULL;
1571 attr->attr.data.ins_permuted = permuted;
1574 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1575 arch_register_get_name(op1), arch_register_get_name(op2)));
1577 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1578 arch_register_get_name(op1)));
1581 return NO_NODE_ADDED;
1587 * @param state the x87 state
1588 * @param n the node that should be simulated (and patched)
1590 * @return NO_NODE_ADDED
1592 static int sim_Keep(x87_state *state, ir_node *node)
1595 const arch_register_t *op_reg;
1601 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1603 arity = get_irn_arity(node);
1604 for (i = 0; i < arity; ++i) {
1605 op = get_irn_n(node, i);
1606 op_reg = arch_get_irn_register(op);
1607 if (arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1610 reg_id = arch_register_get_index(op_reg);
1611 live = vfp_live_args_after(state->sim, node, 0);
1613 op_stack_idx = x87_on_stack(state, reg_id);
1614 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live))
1615 x87_create_fpop(state, sched_next(node), 1);
1618 DB((dbg, LEVEL_1, "Stack after: "));
1619 DEBUG_ONLY(x87_dump_stack(state);)
1621 return NO_NODE_ADDED;
1625 * Keep the given node alive by adding a be_Keep.
1627 * @param node the node to kept alive
1629 static void keep_float_node_alive(ir_node *node)
1631 ir_node *block = get_nodes_block(node);
1632 ir_node *keep = be_new_Keep(block, 1, &node);
1634 assert(sched_is_scheduled(node));
1635 sched_add_after(node, keep);
1639 * Create a copy of a node. Recreate the node if it's a constant.
1641 * @param state the x87 state
1642 * @param n the node to be copied
1644 * @return the copy of n
1646 static ir_node *create_Copy(x87_state *state, ir_node *n)
1648 dbg_info *n_dbg = get_irn_dbg_info(n);
1649 ir_mode *mode = get_irn_mode(n);
1650 ir_node *block = get_nodes_block(n);
1651 ir_node *pred = get_irn_n(n, 0);
1652 ir_node *(*cnstr)(dbg_info *, ir_node *, ir_mode *) = NULL;
1654 const arch_register_t *out;
1655 const arch_register_t *op1;
1656 ia32_x87_attr_t *attr;
1658 /* Do not copy constants, recreate them. */
1659 switch (get_ia32_irn_opcode(pred)) {
1661 cnstr = new_bd_ia32_fldz;
1664 cnstr = new_bd_ia32_fld1;
1666 case iro_ia32_fldpi:
1667 cnstr = new_bd_ia32_fldpi;
1669 case iro_ia32_fldl2e:
1670 cnstr = new_bd_ia32_fldl2e;
1672 case iro_ia32_fldl2t:
1673 cnstr = new_bd_ia32_fldl2t;
1675 case iro_ia32_fldlg2:
1676 cnstr = new_bd_ia32_fldlg2;
1678 case iro_ia32_fldln2:
1679 cnstr = new_bd_ia32_fldln2;
1685 out = x87_get_irn_register(n);
1686 op1 = x87_get_irn_register(pred);
1688 if (cnstr != NULL) {
1689 /* copy a constant */
1690 res = (*cnstr)(n_dbg, block, mode);
1692 x87_push(state, arch_register_get_index(out), res);
1694 attr = get_ia32_x87_attr(res);
1695 attr->x87[2] = get_st_reg(0);
1697 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1699 res = new_bd_ia32_fpushCopy(n_dbg, block, pred, mode);
1701 x87_push(state, arch_register_get_index(out), res);
1703 attr = get_ia32_x87_attr(res);
1704 attr->x87[0] = get_st_reg(op1_idx);
1705 attr->x87[2] = get_st_reg(0);
1707 arch_set_irn_register(res, out);
1713 * Simulate a be_Copy.
1715 * @param state the x87 state
1716 * @param n the node that should be simulated (and patched)
1718 * @return NO_NODE_ADDED
1720 static int sim_Copy(x87_state *state, ir_node *n)
1723 const arch_register_t *out;
1724 const arch_register_t *op1;
1725 const arch_register_class_t *cls;
1726 ir_node *node, *next;
1727 int op1_idx, out_idx;
1730 cls = arch_get_irn_reg_class(n);
1731 if (cls != &ia32_reg_classes[CLASS_ia32_vfp])
1734 pred = be_get_Copy_op(n);
1735 out = x87_get_irn_register(n);
1736 op1 = x87_get_irn_register(pred);
1737 live = vfp_live_args_after(state->sim, n, REGMASK(out));
1739 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1740 arch_register_get_name(op1), arch_register_get_name(out)));
1741 DEBUG_ONLY(vfp_dump_live(live);)
1743 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1745 if (is_vfp_live(arch_register_get_index(op1), live)) {
1746 /* Operand is still live, a real copy. We need here an fpush that can
1747 hold a a register, so use the fpushCopy or recreate constants */
1748 node = create_Copy(state, n);
1750 /* We have to make sure the old value doesn't go dead (which can happen
1751 * when we recreate constants). As the simulator expected that value in
1752 * the pred blocks. This is unfortunate as removing it would save us 1
1753 * instruction, but we would have to rerun all the simulation to get
1756 next = sched_next(n);
1759 sched_add_before(next, node);
1761 if (get_irn_n_edges(pred) == 0) {
1762 keep_float_node_alive(pred);
1765 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1767 out_idx = x87_on_stack(state, arch_register_get_index(out));
1769 if (out_idx >= 0 && out_idx != op1_idx) {
1770 /* Matze: out already on stack? how can this happen? */
1771 panic("invalid stack state");
1774 /* op1 must be killed and placed where out is */
1776 ia32_x87_attr_t *attr;
1777 /* best case, simple remove and rename */
1778 x87_patch_insn(n, op_ia32_Pop);
1779 attr = get_ia32_x87_attr(n);
1780 attr->x87[0] = op1 = get_st_reg(0);
1783 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1785 ia32_x87_attr_t *attr;
1786 /* move op1 to tos, store and pop it */
1788 x87_create_fxch(state, n, op1_idx);
1791 x87_patch_insn(n, op_ia32_Pop);
1792 attr = get_ia32_x87_attr(n);
1793 attr->x87[0] = op1 = get_st_reg(out_idx);
1796 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1798 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1801 /* just a virtual copy */
1802 x87_set_st(state, arch_register_get_index(out), pred, op1_idx);
1803 /* don't remove the node to keep the verifier quiet :),
1804 the emitter won't emit any code for the node */
1807 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1812 return NO_NODE_ADDED;
1816 * Returns the vf0 result Proj of a Call.
1818 * @para call the Call node
1820 static ir_node *get_call_result_proj(ir_node *call)
1822 /* search the result proj */
1823 foreach_out_edge(call, edge) {
1824 ir_node *proj = get_edge_src_irn(edge);
1825 long pn = get_Proj_proj(proj);
1827 if (pn == pn_ia32_Call_vf0)
1834 static int sim_Asm(x87_state *const state, ir_node *const n)
1838 for (size_t i = get_irn_arity(n); i-- != 0;) {
1839 arch_register_req_t const *const req = arch_get_irn_register_req_in(n, i);
1840 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1841 panic("cannot handle %+F with x87 constraints", n);
1844 for (size_t i = arch_get_irn_n_outs(n); i-- != 0;) {
1845 arch_register_req_t const *const req = arch_get_irn_register_req_out(n, i);
1846 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1847 panic("cannot handle %+F with x87 constraints", n);
1850 return NO_NODE_ADDED;
1854 * Simulate a ia32_Call.
1856 * @param state the x87 state
1857 * @param n the node that should be simulated (and patched)
1859 * @return NO_NODE_ADDED
1861 static int sim_Call(x87_state *state, ir_node *n)
1863 ir_type *call_tp = get_ia32_call_attr_const(n)->call_tp;
1867 const arch_register_t *reg;
1869 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1871 /* at the begin of a call the x87 state should be empty */
1872 assert(state->depth == 0 && "stack not empty before call");
1874 if (get_method_n_ress(call_tp) <= 0)
1878 * If the called function returns a float, it is returned in st(0).
1879 * This even happens if the return value is NOT used.
1880 * Moreover, only one return result is supported.
1882 res_type = get_method_res_type(call_tp, 0);
1883 mode = get_type_mode(res_type);
1885 if (mode == NULL || !mode_is_float(mode))
1888 resproj = get_call_result_proj(n);
1889 assert(resproj != NULL);
1891 reg = x87_get_irn_register(resproj);
1892 x87_push(state, arch_register_get_index(reg), resproj);
1895 DB((dbg, LEVEL_1, "Stack after: "));
1896 DEBUG_ONLY(x87_dump_stack(state);)
1898 return NO_NODE_ADDED;
1902 * Simulate a be_Return.
1904 * @param state the x87 state
1905 * @param n the node that should be simulated (and patched)
1907 * @return NO_NODE_ADDED
1909 static int sim_Return(x87_state *state, ir_node *n)
1911 #ifdef DEBUG_libfirm
1912 /* only floating point return values must reside on stack */
1913 int n_float_res = 0;
1914 int const n_res = be_Return_get_n_rets(n);
1915 for (int i = 0; i < n_res; ++i) {
1916 ir_node *const res = get_irn_n(n, n_be_Return_val + i);
1917 if (mode_is_float(get_irn_mode(res)))
1920 assert(x87_get_depth(state) == n_float_res);
1923 /* pop them virtually */
1925 return NO_NODE_ADDED;
1929 * Simulate a be_Perm.
1931 * @param state the x87 state
1932 * @param irn the node that should be simulated (and patched)
1934 * @return NO_NODE_ADDED
1936 static int sim_Perm(x87_state *state, ir_node *irn)
1939 ir_node *pred = get_irn_n(irn, 0);
1942 /* handle only floating point Perms */
1943 if (! mode_is_float(get_irn_mode(pred)))
1944 return NO_NODE_ADDED;
1946 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1948 /* Perm is a pure virtual instruction on x87.
1949 All inputs must be on the FPU stack and are pairwise
1950 different from each other.
1951 So, all we need to do is to permutate the stack state. */
1952 n = get_irn_arity(irn);
1953 NEW_ARR_A(int, stack_pos, n);
1955 /* collect old stack positions */
1956 for (i = 0; i < n; ++i) {
1957 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
1958 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1960 assert(idx >= 0 && "Perm argument not on x87 stack");
1964 /* now do the permutation */
1965 foreach_out_edge(irn, edge) {
1966 ir_node *proj = get_edge_src_irn(edge);
1967 const arch_register_t *out = x87_get_irn_register(proj);
1968 long num = get_Proj_proj(proj);
1970 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1971 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
1973 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1975 return NO_NODE_ADDED;
1979 * Kill any dead registers at block start by popping them from the stack.
1981 * @param sim the simulator handle
1982 * @param block the current block
1983 * @param state the x87 state at the begin of the block
1985 static void x87_kill_deads(x87_simulator *const sim, ir_node *const block, x87_state *const state)
1987 ir_node *first_insn = sched_first(block);
1988 ir_node *keep = NULL;
1989 unsigned live = vfp_live_args_after(sim, block, 0);
1991 int i, depth, num_pop;
1994 depth = x87_get_depth(state);
1995 for (i = depth - 1; i >= 0; --i) {
1996 int reg = x87_get_st_reg(state, i);
1998 if (! is_vfp_live(reg, live))
1999 kill_mask |= (1 << i);
2003 DB((dbg, LEVEL_1, "Killing deads:\n"));
2004 DEBUG_ONLY(vfp_dump_live(live);)
2005 DEBUG_ONLY(x87_dump_stack(state);)
2007 if (kill_mask != 0 && live == 0) {
2008 /* special case: kill all registers */
2009 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
2010 if (ia32_cg_config.use_femms) {
2011 /* use FEMMS on AMD processors to clear all */
2012 keep = new_bd_ia32_femms(NULL, block);
2014 /* use EMMS to clear all */
2015 keep = new_bd_ia32_emms(NULL, block);
2017 sched_add_before(first_insn, keep);
2023 /* now kill registers */
2025 /* we can only kill from TOS, so bring them up */
2026 if (! (kill_mask & 1)) {
2027 /* search from behind, because we can to a double-pop */
2028 for (i = depth - 1; i >= 0; --i) {
2029 if (kill_mask & (1 << i)) {
2030 kill_mask &= ~(1 << i);
2037 x87_set_st(state, -1, keep, i);
2038 x87_create_fxch(state, first_insn, i);
2041 if ((kill_mask & 3) == 3) {
2042 /* we can do a double-pop */
2046 /* only a single pop */
2051 kill_mask >>= num_pop;
2052 keep = x87_create_fpop(state, first_insn, num_pop);
2059 * Run a simulation and fix all virtual instructions for a block.
2061 * @param sim the simulator handle
2062 * @param block the current block
2064 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
2067 blk_state *bl_state = x87_get_bl_state(sim, block);
2068 x87_state *state = bl_state->begin;
2069 ir_node *start_block;
2071 assert(state != NULL);
2072 /* already processed? */
2073 if (bl_state->end != NULL)
2076 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2077 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2078 DEBUG_ONLY(x87_dump_stack(state);)
2080 /* create a new state, will be changed */
2081 state = x87_clone_state(sim, state);
2082 /* at block begin, kill all dead registers */
2083 x87_kill_deads(sim, block, state);
2085 /* beware, n might change */
2086 for (n = sched_first(block); !sched_is_end(n); n = next) {
2089 ir_op *op = get_irn_op(n);
2092 * get the next node to be simulated here.
2093 * n might be completely removed from the schedule-
2095 next = sched_next(n);
2096 if (op->ops.generic != NULL) {
2097 func = (sim_func)op->ops.generic;
2100 node_inserted = (*func)(state, n);
2103 * sim_func might have added an additional node after n,
2104 * so update next node
2105 * beware: n must not be changed by sim_func
2106 * (i.e. removed from schedule) in this case
2108 if (node_inserted != NO_NODE_ADDED)
2109 next = sched_next(n);
2113 start_block = get_irg_start_block(get_irn_irg(block));
2115 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state);)
2117 /* check if the state must be shuffled */
2118 foreach_block_succ(block, edge) {
2119 ir_node *succ = get_edge_src_irn(edge);
2120 blk_state *succ_state;
2122 if (succ == start_block)
2125 succ_state = x87_get_bl_state(sim, succ);
2127 if (succ_state->begin == NULL) {
2128 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2129 DEBUG_ONLY(x87_dump_stack(state);)
2130 succ_state->begin = state;
2132 waitq_put(sim->worklist, succ);
2134 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2135 /* There is already a begin state for the successor, bad.
2136 Do the necessary permutations.
2137 Note that critical edges are removed, so this is always possible:
2138 If the successor has more than one possible input, then it must
2141 x87_shuffle(block, state, succ_state->begin);
2144 bl_state->end = state;
2148 * Register a simulator function.
2150 * @param op the opcode to simulate
2151 * @param func the simulator function for the opcode
2153 static void register_sim(ir_op *op, sim_func func)
2155 assert(op->ops.generic == NULL);
2156 op->ops.generic = (op_func) func;
2160 * Create a new x87 simulator.
2162 * @param sim a simulator handle, will be initialized
2163 * @param irg the current graph
2165 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
2167 obstack_init(&sim->obst);
2168 sim->blk_states = pmap_create();
2169 sim->n_idx = get_irg_last_idx(irg);
2170 sim->live = OALLOCN(&sim->obst, vfp_liveness, sim->n_idx);
2172 DB((dbg, LEVEL_1, "--------------------------------\n"
2173 "x87 Simulator started for %+F\n", irg));
2175 /* set the generic function pointer of instruction we must simulate */
2176 ir_clear_opcodes_generic_func();
2178 register_sim(op_ia32_Asm, sim_Asm);
2179 register_sim(op_ia32_Call, sim_Call);
2180 register_sim(op_ia32_vfld, sim_fld);
2181 register_sim(op_ia32_vfild, sim_fild);
2182 register_sim(op_ia32_vfld1, sim_fld1);
2183 register_sim(op_ia32_vfldz, sim_fldz);
2184 register_sim(op_ia32_vfadd, sim_fadd);
2185 register_sim(op_ia32_vfsub, sim_fsub);
2186 register_sim(op_ia32_vfmul, sim_fmul);
2187 register_sim(op_ia32_vfdiv, sim_fdiv);
2188 register_sim(op_ia32_vfprem, sim_fprem);
2189 register_sim(op_ia32_vfabs, sim_fabs);
2190 register_sim(op_ia32_vfchs, sim_fchs);
2191 register_sim(op_ia32_vfist, sim_fist);
2192 register_sim(op_ia32_vfisttp, sim_fisttp);
2193 register_sim(op_ia32_vfst, sim_fst);
2194 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
2195 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
2196 register_sim(op_ia32_vFucomi, sim_Fucom);
2197 register_sim(op_be_Copy, sim_Copy);
2198 register_sim(op_be_Return, sim_Return);
2199 register_sim(op_be_Perm, sim_Perm);
2200 register_sim(op_be_Keep, sim_Keep);
2204 * Destroy a x87 simulator.
2206 * @param sim the simulator handle
2208 static void x87_destroy_simulator(x87_simulator *sim)
2210 pmap_destroy(sim->blk_states);
2211 obstack_free(&sim->obst, NULL);
2212 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2216 * Pre-block walker: calculate the liveness information for the block
2217 * and store it into the sim->live cache.
2219 static void update_liveness_walker(ir_node *block, void *data)
2221 x87_simulator *sim = (x87_simulator*)data;
2222 update_liveness(sim, block);
2226 * Run a simulation and fix all virtual instructions for a graph.
2227 * Replaces all virtual floating point instructions and registers
2230 void ia32_x87_simulate_graph(ir_graph *irg)
2232 /* TODO improve code quality (less executed fxch) by using execfreqs */
2234 ir_node *block, *start_block;
2235 blk_state *bl_state;
2238 /* create the simulator */
2239 x87_init_simulator(&sim, irg);
2241 start_block = get_irg_start_block(irg);
2242 bl_state = x87_get_bl_state(&sim, start_block);
2244 /* start with the empty state */
2245 bl_state->begin = empty;
2248 sim.worklist = new_waitq();
2249 waitq_put(sim.worklist, start_block);
2251 be_assure_live_sets(irg);
2252 sim.lv = be_get_irg_liveness(irg);
2254 /* Calculate the liveness for all nodes. We must precalculate this info,
2255 * because the simulator adds new nodes (possible before Phi nodes) which
2256 * would let a lazy calculation fail.
2257 * On the other hand we reduce the computation amount due to
2258 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2260 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2264 block = (ir_node*)waitq_get(sim.worklist);
2265 x87_simulate_block(&sim, block);
2266 } while (! waitq_empty(sim.worklist));
2269 del_waitq(sim.worklist);
2270 x87_destroy_simulator(&sim);
2273 /* Initializes the x87 simulator. */
2274 void ia32_init_x87(void)
2276 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");