2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
33 #include "iredges_t.h"
48 #include "bearch_ia32_t.h"
49 #include "ia32_new_nodes.h"
50 #include "gen_ia32_new_nodes.h"
51 #include "gen_ia32_regalloc_if.h"
53 #include "ia32_architecture.h"
55 #define MASK_TOS(x) ((x) & (N_ia32_st_REGS - 1))
57 /** the debug handle */
58 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
60 /* Forward declaration. */
61 typedef struct x87_simulator x87_simulator;
64 * An exchange template.
65 * Note that our virtual functions have the same inputs
66 * and attributes as the real ones, so we can simple exchange
68 * Further, x87 supports inverse instructions, so we can handle them.
70 typedef struct exchange_tmpl {
71 ir_op *normal_op; /**< the normal one */
72 ir_op *reverse_op; /**< the reverse one if exists */
73 ir_op *normal_pop_op; /**< the normal one with tos pop */
74 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
78 * An entry on the simulated x87 stack.
80 typedef struct st_entry {
81 int reg_idx; /**< the virtual register index of this stack value */
82 ir_node *node; /**< the node that produced this value */
88 typedef struct x87_state {
89 st_entry st[N_ia32_st_REGS]; /**< the register stack */
90 int depth; /**< the current stack depth */
91 int tos; /**< position of the tos */
92 x87_simulator *sim; /**< The simulator. */
95 /** An empty state, used for blocks without fp instructions. */
96 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
97 static x87_state *empty = (x87_state *)&_empty;
100 * Return values of the instruction simulator functions.
103 NO_NODE_ADDED = 0, /**< No node that needs simulation was added. */
104 NODE_ADDED = 1 /**< A node that must be simulated was added by the simulator
105 in the schedule AFTER the current node. */
109 * The type of an instruction simulator function.
111 * @param state the x87 state
112 * @param n the node to be simulated
114 * @return NODE_ADDED if a node was added AFTER n in schedule that MUST be
116 * NO_NODE_ADDED otherwise
118 typedef int (*sim_func)(x87_state *state, ir_node *n);
121 * A block state: Every block has a x87 state at the beginning and at the end.
123 typedef struct blk_state {
124 x87_state *begin; /**< state at the begin or NULL if not assigned */
125 x87_state *end; /**< state at the end or NULL if not assigned */
128 /** liveness bitset for vfp registers. */
129 typedef unsigned char vfp_liveness;
134 struct x87_simulator {
135 struct obstack obst; /**< An obstack for fast allocating. */
136 pmap *blk_states; /**< Map blocks to states. */
137 be_lv_t *lv; /**< intrablock liveness. */
138 vfp_liveness *live; /**< Liveness information. */
139 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
140 waitq *worklist; /**< Worklist of blocks that must be processed. */
144 * Returns the current stack depth.
146 * @param state the x87 state
148 * @return the x87 stack depth
150 static int x87_get_depth(const x87_state *state)
155 static st_entry *x87_get_entry(x87_state *const state, int const pos)
157 assert(0 <= pos && pos < state->depth);
158 return &state->st[MASK_TOS(state->tos + pos)];
162 * Return the virtual register index at st(pos).
164 * @param state the x87 state
165 * @param pos a stack position
167 * @return the vfp register index that produced the value at st(pos)
169 static int x87_get_st_reg(const x87_state *state, int pos)
171 return x87_get_entry((x87_state*)state, pos)->reg_idx;
176 * Return the node at st(pos).
178 * @param state the x87 state
179 * @param pos a stack position
181 * @return the IR node that produced the value at st(pos)
183 static ir_node *x87_get_st_node(const x87_state *state, int pos)
185 return x87_get_entry((x87_state*)state, pos)->node;
189 * Dump the stack for debugging.
191 * @param state the x87 state
193 static void x87_dump_stack(const x87_state *state)
197 for (i = state->depth - 1; i >= 0; --i) {
198 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
199 x87_get_st_node(state, i)));
201 DB((dbg, LEVEL_2, "<-- TOS\n"));
203 #endif /* DEBUG_libfirm */
206 * Set a virtual register to st(pos).
208 * @param state the x87 state
209 * @param reg_idx the vfp register index that should be set
210 * @param node the IR node that produces the value of the vfp register
211 * @param pos the stack position where the new value should be entered
213 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
215 st_entry *const entry = x87_get_entry(state, pos);
216 entry->reg_idx = reg_idx;
219 DB((dbg, LEVEL_2, "After SET_REG: "));
220 DEBUG_ONLY(x87_dump_stack(state);)
224 * Set the tos virtual register.
226 * @param state the x87 state
227 * @param reg_idx the vfp register index that should be set
228 * @param node the IR node that produces the value of the vfp register
230 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node)
232 x87_set_st(state, reg_idx, node, 0);
236 * Swap st(0) with st(pos).
238 * @param state the x87 state
239 * @param pos the stack position to change the tos with
241 static void x87_fxch(x87_state *state, int pos)
243 st_entry *const a = x87_get_entry(state, pos);
244 st_entry *const b = x87_get_entry(state, 0);
245 st_entry const t = *a;
249 DB((dbg, LEVEL_2, "After FXCH: "));
250 DEBUG_ONLY(x87_dump_stack(state);)
254 * Convert a virtual register to the stack index.
256 * @param state the x87 state
257 * @param reg_idx the register vfp index
259 * @return the stack position where the register is stacked
260 * or -1 if the virtual register was not found
262 static int x87_on_stack(const x87_state *state, int reg_idx)
264 for (int i = 0; i < state->depth; ++i) {
265 if (x87_get_st_reg(state, i) == reg_idx)
272 * Push a virtual Register onto the stack, double pushed allowed.
274 * @param state the x87 state
275 * @param reg_idx the register vfp index
276 * @param node the node that produces the value of the vfp register
278 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node)
280 assert(state->depth < N_ia32_st_REGS && "stack overrun");
283 state->tos = MASK_TOS(state->tos - 1);
284 st_entry *const entry = x87_get_entry(state, 0);
285 entry->reg_idx = reg_idx;
288 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state);)
292 * Push a virtual Register onto the stack, double pushes are NOT allowed.
294 * @param state the x87 state
295 * @param reg_idx the register vfp index
296 * @param node the node that produces the value of the vfp register
298 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
300 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
302 x87_push_dbl(state, reg_idx, node);
306 * Pop a virtual Register from the stack.
308 * @param state the x87 state
310 static void x87_pop(x87_state *state)
312 assert(state->depth > 0 && "stack underrun");
315 state->tos = MASK_TOS(state->tos + 1);
317 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state);)
321 * Empty the fpu stack
323 * @param state the x87 state
325 static void x87_emms(x87_state *state)
332 * Returns the block state of a block.
334 * @param sim the x87 simulator handle
335 * @param block the current block
337 * @return the block state
339 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
341 blk_state *res = pmap_get(blk_state, sim->blk_states, block);
344 res = OALLOC(&sim->obst, blk_state);
348 pmap_insert(sim->blk_states, block, res);
355 * Creates a new x87 state.
357 * @param sim the x87 simulator handle
359 * @return a new x87 state
361 static x87_state *x87_alloc_state(x87_simulator *sim)
363 x87_state *res = OALLOC(&sim->obst, x87_state);
372 * @param sim the x87 simulator handle
373 * @param src the x87 state that will be cloned
375 * @return a cloned copy of the src state
377 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
379 x87_state *res = x87_alloc_state(sim);
386 * Patch a virtual instruction into a x87 one and return
387 * the node representing the result value.
389 * @param n the IR node to patch
390 * @param op the x87 opcode to patch in
392 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
394 ir_mode *mode = get_irn_mode(n);
399 if (mode == mode_T) {
400 /* patch all Proj's */
401 foreach_out_edge(n, edge) {
402 ir_node *proj = get_edge_src_irn(edge);
404 mode = get_irn_mode(proj);
405 if (mode_is_float(mode)) {
407 set_irn_mode(proj, ia32_reg_classes[CLASS_ia32_st].mode);
411 } else if (mode_is_float(mode))
412 set_irn_mode(n, ia32_reg_classes[CLASS_ia32_st].mode);
417 * Returns the first Proj of a mode_T node having a given mode.
419 * @param n the mode_T node
420 * @param m the desired mode of the Proj
421 * @return The first Proj of mode @p m found or NULL.
423 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
425 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
427 foreach_out_edge(n, edge) {
428 ir_node *proj = get_edge_src_irn(edge);
429 if (get_irn_mode(proj) == m)
437 * Wrap the arch_* function here so we can check for errors.
439 static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
441 const arch_register_t *res = arch_get_irn_register(irn);
443 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
447 static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
450 const arch_register_t *res = arch_get_irn_register_out(irn, pos);
452 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
456 static inline const arch_register_t *get_st_reg(int index)
458 return &ia32_registers[REG_ST0 + index];
461 /* -------------- x87 perm --------------- */
464 * Creates a fxch for shuffle.
466 * @param state the x87 state
467 * @param pos parameter for fxch
468 * @param block the block were fxch is inserted
470 * Creates a new fxch node and reroute the user of the old node
473 * @return the fxch node
475 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block)
478 ia32_x87_attr_t *attr;
480 fxch = new_bd_ia32_fxch(NULL, block);
481 attr = get_ia32_x87_attr(fxch);
482 attr->x87[0] = get_st_reg(pos);
483 attr->x87[2] = get_st_reg(0);
487 x87_fxch(state, pos);
492 * Calculate the necessary permutations to reach dst_state.
494 * These permutations are done with fxch instructions and placed
495 * at the end of the block.
497 * Note that critical edges are removed here, so we need only
498 * a shuffle if the current block has only one successor.
500 * @param block the current block
501 * @param state the current x87 stack state, might be modified
502 * @param dst_state destination state
506 static x87_state *x87_shuffle(ir_node *block, x87_state *state, const x87_state *dst_state)
508 int i, n_cycles, k, ri;
509 unsigned cycles[4], all_mask;
510 char cycle_idx[4][8];
511 ir_node *fxch, *before, *after;
513 assert(state->depth == dst_state->depth);
515 /* Some mathematics here:
516 If we have a cycle of length n that includes the tos,
517 we need n-1 exchange operations.
518 We can always add the tos and restore it, so we need
519 n+1 exchange operations for a cycle not containing the tos.
520 So, the maximum of needed operations is for a cycle of 7
521 not including the tos == 8.
522 This is the same number of ops we would need for using stores,
523 so exchange is cheaper (we save the loads).
524 On the other hand, we might need an additional exchange
525 in the next block to bring one operand on top, so the
526 number of ops in the first case is identical.
527 Further, no more than 4 cycles can exists (4 x 2).
529 all_mask = (1 << (state->depth)) - 1;
531 for (n_cycles = 0; all_mask; ++n_cycles) {
532 int src_idx, dst_idx;
534 /* find the first free slot */
535 for (i = 0; i < state->depth; ++i) {
536 if (all_mask & (1 << i)) {
537 all_mask &= ~(1 << i);
539 /* check if there are differences here */
540 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
546 /* no more cycles found */
551 cycles[n_cycles] = (1 << i);
552 cycle_idx[n_cycles][k++] = i;
553 for (src_idx = i; ; src_idx = dst_idx) {
554 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
556 if ((all_mask & (1 << dst_idx)) == 0)
559 cycle_idx[n_cycles][k++] = dst_idx;
560 cycles[n_cycles] |= (1 << dst_idx);
561 all_mask &= ~(1 << dst_idx);
563 cycle_idx[n_cycles][k] = -1;
567 /* no permutation needed */
571 /* Hmm: permutation needed */
572 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
573 DEBUG_ONLY(x87_dump_stack(state);)
574 DB((dbg, LEVEL_2, " to\n"));
575 DEBUG_ONLY(x87_dump_stack(dst_state);)
579 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
580 for (ri = 0; ri < n_cycles; ++ri) {
581 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
582 for (k = 0; cycle_idx[ri][k] != -1; ++k)
583 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
584 DB((dbg, LEVEL_2, "\n"));
591 * Find the place node must be insert.
592 * We have only one successor block, so the last instruction should
595 before = sched_last(block);
596 assert(is_cfop(before));
598 /* now do the permutations */
599 for (ri = 0; ri < n_cycles; ++ri) {
600 if ((cycles[ri] & 1) == 0) {
601 /* this cycle does not include the tos */
602 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
604 sched_add_after(after, fxch);
606 sched_add_before(before, fxch);
609 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
610 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
612 sched_add_after(after, fxch);
614 sched_add_before(before, fxch);
617 if ((cycles[ri] & 1) == 0) {
618 /* this cycle does not include the tos */
619 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
620 sched_add_after(after, fxch);
627 * Create a fxch node before another node.
629 * @param state the x87 state
630 * @param n the node after the fxch
631 * @param pos exchange st(pos) with st(0)
635 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
638 ia32_x87_attr_t *attr;
639 ir_node *block = get_nodes_block(n);
641 x87_fxch(state, pos);
643 fxch = new_bd_ia32_fxch(NULL, block);
644 attr = get_ia32_x87_attr(fxch);
645 attr->x87[0] = get_st_reg(pos);
646 attr->x87[2] = get_st_reg(0);
650 sched_add_before(n, fxch);
651 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
656 * Create a fpush before node n.
658 * @param state the x87 state
659 * @param n the node after the fpush
660 * @param pos push st(pos) on stack
661 * @param op_idx replace input op_idx of n with the fpush result
663 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx)
665 ir_node *fpush, *pred = get_irn_n(n, op_idx);
666 ia32_x87_attr_t *attr;
667 const arch_register_t *out = x87_get_irn_register(pred);
669 x87_push_dbl(state, arch_register_get_index(out), pred);
671 fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
672 attr = get_ia32_x87_attr(fpush);
673 attr->x87[0] = get_st_reg(pos);
674 attr->x87[2] = get_st_reg(0);
677 sched_add_before(n, fpush);
679 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
683 * Create a fpop before node n.
685 * @param state the x87 state
686 * @param n the node after the fpop
687 * @param num pop 1 or 2 values
689 * @return the fpop node
691 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
693 ir_node *fpop = NULL;
694 ia32_x87_attr_t *attr;
699 if (ia32_cg_config.use_ffreep)
700 fpop = new_bd_ia32_ffreep(NULL, get_nodes_block(n));
702 fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
703 attr = get_ia32_x87_attr(fpop);
704 attr->x87[0] = get_st_reg(0);
705 attr->x87[1] = get_st_reg(0);
706 attr->x87[2] = get_st_reg(0);
709 sched_add_before(n, fpop);
710 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
715 /* --------------------------------- liveness ------------------------------------------ */
718 * The liveness transfer function.
719 * Updates a live set over a single step from a given node to its predecessor.
720 * Everything defined at the node is removed from the set, the uses of the node get inserted.
722 * @param irn The node at which liveness should be computed.
723 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
724 * the registers live after irn.
726 * @return The live bitset.
728 static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
731 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
733 if (get_irn_mode(irn) == mode_T) {
734 foreach_out_edge(irn, edge) {
735 ir_node *proj = get_edge_src_irn(edge);
737 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
738 const arch_register_t *reg = x87_get_irn_register(proj);
739 live &= ~(1 << arch_register_get_index(reg));
742 } else if (arch_irn_consider_in_reg_alloc(cls, irn)) {
743 const arch_register_t *reg = x87_get_irn_register(irn);
744 live &= ~(1 << arch_register_get_index(reg));
747 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
748 ir_node *op = get_irn_n(irn, i);
750 if (mode_is_float(get_irn_mode(op)) &&
751 arch_irn_consider_in_reg_alloc(cls, op)) {
752 const arch_register_t *reg = x87_get_irn_register(op);
753 live |= 1 << arch_register_get_index(reg);
760 * Put all live virtual registers at the end of a block into a bitset.
762 * @param sim the simulator handle
763 * @param bl the block
765 * @return The live bitset at the end of this block
767 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
769 vfp_liveness live = 0;
770 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
771 const be_lv_t *lv = sim->lv;
773 be_lv_foreach(lv, block, be_lv_state_end, node) {
774 const arch_register_t *reg;
775 if (!arch_irn_consider_in_reg_alloc(cls, node))
778 reg = x87_get_irn_register(node);
779 live |= 1 << arch_register_get_index(reg);
785 /** get the register mask from an arch_register */
786 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
789 * Return a bitset of argument registers which are live at the end of a node.
791 * @param sim the simulator handle
792 * @param pos the node
793 * @param kill kill mask for the output registers
795 * @return The live bitset.
797 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
799 unsigned idx = get_irn_idx(pos);
801 assert(idx < sim->n_idx);
802 return sim->live[idx] & ~kill;
806 * Calculate the liveness for a whole block and cache it.
808 * @param sim the simulator handle
809 * @param block the block
811 static void update_liveness(x87_simulator *sim, ir_node *block)
813 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
816 /* now iterate through the block backward and cache the results */
817 sched_foreach_reverse(block, irn) {
818 /* stop at the first Phi: this produces the live-in */
822 idx = get_irn_idx(irn);
823 sim->live[idx] = live;
825 live = vfp_liveness_transfer(irn, live);
827 idx = get_irn_idx(block);
828 sim->live[idx] = live;
832 * Returns true if a register is live in a set.
834 * @param reg_idx the vfp register index
835 * @param live a live bitset
837 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
841 * Dump liveness info.
843 * @param live the live bitset
845 static void vfp_dump_live(vfp_liveness live)
849 DB((dbg, LEVEL_2, "Live after: "));
850 for (i = 0; i < 8; ++i) {
851 if (live & (1 << i)) {
852 DB((dbg, LEVEL_2, "vf%d ", i));
855 DB((dbg, LEVEL_2, "\n"));
857 #endif /* DEBUG_libfirm */
859 /* --------------------------------- simulators ---------------------------------------- */
862 * Simulate a virtual binop.
864 * @param state the x87 state
865 * @param n the node that should be simulated (and patched)
866 * @param tmpl the template containing the 4 possible x87 opcodes
868 * @return NO_NODE_ADDED
870 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl)
872 int op2_idx = 0, op1_idx;
873 int out_idx, do_pop = 0;
874 ia32_x87_attr_t *attr;
876 ir_node *patched_insn;
878 x87_simulator *sim = state->sim;
879 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
880 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
881 const arch_register_t *op1_reg = x87_get_irn_register(op1);
882 const arch_register_t *op2_reg = x87_get_irn_register(op2);
883 const arch_register_t *out = x87_irn_get_register(n, pn_ia32_res);
884 int reg_index_1 = arch_register_get_index(op1_reg);
885 int reg_index_2 = arch_register_get_index(op2_reg);
886 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
890 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
891 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
892 arch_register_get_name(out)));
893 DEBUG_ONLY(vfp_dump_live(live);)
894 DB((dbg, LEVEL_1, "Stack before: "));
895 DEBUG_ONLY(x87_dump_stack(state);)
897 op1_idx = x87_on_stack(state, reg_index_1);
898 assert(op1_idx >= 0);
899 op1_live_after = is_vfp_live(reg_index_1, live);
901 attr = get_ia32_x87_attr(n);
902 permuted = attr->attr.data.ins_permuted;
904 if (reg_index_2 != REG_VFP_VFP_NOREG) {
907 /* second operand is a vfp register */
908 op2_idx = x87_on_stack(state, reg_index_2);
909 assert(op2_idx >= 0);
910 op2_live_after = is_vfp_live(reg_index_2, live);
912 if (op2_live_after) {
913 /* Second operand is live. */
915 if (op1_live_after) {
916 /* Both operands are live: push the first one.
917 This works even for op1 == op2. */
918 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
919 /* now do fxxx (tos=tos X op) */
923 dst = tmpl->normal_op;
925 /* Second live, first operand is dead here, bring it to tos. */
927 x87_create_fxch(state, n, op1_idx);
932 /* now do fxxx (tos=tos X op) */
934 dst = tmpl->normal_op;
937 /* Second operand is dead. */
938 if (op1_live_after) {
939 /* First operand is live: bring second to tos. */
941 x87_create_fxch(state, n, op2_idx);
946 /* now do fxxxr (tos = op X tos) */
948 dst = tmpl->reverse_op;
950 /* Both operands are dead here, pop them from the stack. */
953 /* Both are identically and on tos, no pop needed. */
954 /* here fxxx (tos = tos X tos) */
955 dst = tmpl->normal_op;
958 /* now do fxxxp (op = op X tos, pop) */
959 dst = tmpl->normal_pop_op;
963 } else if (op1_idx == 0) {
964 assert(op1_idx != op2_idx);
965 /* now do fxxxrp (op = tos X op, pop) */
966 dst = tmpl->reverse_pop_op;
970 /* Bring the second on top. */
971 x87_create_fxch(state, n, op2_idx);
972 if (op1_idx == op2_idx) {
973 /* Both are identically and on tos now, no pop needed. */
976 /* use fxxx (tos = tos X tos) */
977 dst = tmpl->normal_op;
980 /* op2 is on tos now */
982 /* use fxxxp (op = op X tos, pop) */
983 dst = tmpl->normal_pop_op;
991 /* second operand is an address mode */
992 if (op1_live_after) {
993 /* first operand is live: push it here */
994 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
997 /* first operand is dead: bring it to tos */
999 x87_create_fxch(state, n, op1_idx);
1004 /* use fxxx (tos = tos X mem) */
1005 dst = permuted ? tmpl->reverse_op : tmpl->normal_op;
1009 patched_insn = x87_patch_insn(n, dst);
1010 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1015 /* patch the operation */
1016 attr->x87[0] = op1_reg = get_st_reg(op1_idx);
1017 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1018 attr->x87[1] = op2_reg = get_st_reg(op2_idx);
1020 attr->x87[2] = out = get_st_reg(out_idx);
1022 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1023 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1024 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1025 arch_register_get_name(out)));
1027 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1028 arch_register_get_name(op1_reg),
1029 arch_register_get_name(out)));
1032 return NO_NODE_ADDED;
1036 * Simulate a virtual Unop.
1038 * @param state the x87 state
1039 * @param n the node that should be simulated (and patched)
1040 * @param op the x87 opcode that will replace n's opcode
1042 * @return NO_NODE_ADDED
1044 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
1046 x87_simulator *sim = state->sim;
1047 const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, 0));
1048 const arch_register_t *out = x87_get_irn_register(n);
1049 ia32_x87_attr_t *attr;
1050 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1052 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1053 DEBUG_ONLY(vfp_dump_live(live);)
1055 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1057 if (is_vfp_live(arch_register_get_index(op1), live)) {
1058 /* push the operand here */
1059 x87_create_fpush(state, n, op1_idx, 0);
1062 /* operand is dead, bring it to tos */
1064 x87_create_fxch(state, n, op1_idx);
1068 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1069 attr = get_ia32_x87_attr(n);
1070 attr->x87[0] = op1 = get_st_reg(0);
1071 attr->x87[2] = out = get_st_reg(0);
1072 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1074 return NO_NODE_ADDED;
1078 * Simulate a virtual Load instruction.
1080 * @param state the x87 state
1081 * @param n the node that should be simulated (and patched)
1082 * @param op the x87 opcode that will replace n's opcode
1084 * @return NO_NODE_ADDED
1086 static int sim_load(x87_state *state, ir_node *n, ir_op *op, int res_pos)
1088 const arch_register_t *out = x87_irn_get_register(n, res_pos);
1089 ia32_x87_attr_t *attr;
1091 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1092 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1093 assert(out == x87_irn_get_register(n, res_pos));
1094 attr = get_ia32_x87_attr(n);
1095 attr->x87[2] = out = get_st_reg(0);
1096 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1098 return NO_NODE_ADDED;
1102 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1104 * @param store The store
1105 * @param old_val The former value
1106 * @param new_val The new value
1108 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
1110 foreach_out_edge_safe(old_val, edge) {
1111 ir_node *user = get_edge_src_irn(edge);
1113 if (! user || user == store)
1116 /* if the user is scheduled after the store: rewire */
1117 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1119 /* find the input of the user pointing to the old value */
1120 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1121 if (get_irn_n(user, i) == old_val)
1122 set_irn_n(user, i, new_val);
1129 * Simulate a virtual Store.
1131 * @param state the x87 state
1132 * @param n the node that should be simulated (and patched)
1133 * @param op the x87 store opcode
1134 * @param op_p the x87 store and pop opcode
1136 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p)
1138 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1139 const arch_register_t *op2 = x87_get_irn_register(val);
1140 unsigned live = vfp_live_args_after(state->sim, n, 0);
1141 int insn = NO_NODE_ADDED;
1142 ia32_x87_attr_t *attr;
1143 int op2_reg_idx, op2_idx, depth;
1144 int live_after_node;
1147 op2_reg_idx = arch_register_get_index(op2);
1148 op2_idx = x87_on_stack(state, op2_reg_idx);
1149 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1150 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1151 assert(op2_idx >= 0);
1153 mode = get_ia32_ls_mode(n);
1154 depth = x87_get_depth(state);
1156 if (live_after_node) {
1158 Problem: fst doesn't support 96bit modes (spills), only fstp does
1159 fist doesn't support 64bit mode, only fistp
1161 - stack not full: push value and fstp
1162 - stack full: fstp value and load again
1163 Note that we cannot test on mode_E, because floats might be 96bit ...
1165 if (get_mode_size_bits(mode) > 64 || (mode_is_int(mode) && get_mode_size_bits(mode) > 32)) {
1166 if (depth < N_ia32_st_REGS) {
1167 /* ok, we have a free register: push + fstp */
1168 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1170 x87_patch_insn(n, op_p);
1172 ir_node *vfld, *mem, *block, *rproj, *mproj;
1173 ir_graph *irg = get_irn_irg(n);
1174 ir_node *nomem = get_irg_no_mem(irg);
1176 /* stack full here: need fstp + load */
1178 x87_patch_insn(n, op_p);
1180 block = get_nodes_block(n);
1181 vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), nomem, get_ia32_ls_mode(n));
1183 /* copy all attributes */
1184 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1185 if (is_ia32_use_frame(n))
1186 set_ia32_use_frame(vfld);
1187 set_ia32_op_type(vfld, ia32_AddrModeS);
1188 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1189 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1190 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1192 rproj = new_r_Proj(vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1193 mproj = new_r_Proj(vfld, mode_M, pn_ia32_vfld_M);
1194 mem = get_irn_Proj_for_mode(n, mode_M);
1196 assert(mem && "Store memory not found");
1198 arch_set_irn_register(rproj, op2);
1200 /* reroute all former users of the store memory to the load memory */
1201 edges_reroute(mem, mproj);
1202 /* set the memory input of the load to the store memory */
1203 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1205 sched_add_after(n, vfld);
1206 sched_add_after(vfld, rproj);
1208 /* rewire all users, scheduled after the store, to the loaded value */
1209 collect_and_rewire_users(n, val, rproj);
1214 /* we can only store the tos to memory */
1216 x87_create_fxch(state, n, op2_idx);
1218 /* mode size 64 or smaller -> use normal fst */
1219 x87_patch_insn(n, op);
1222 /* we can only store the tos to memory */
1224 x87_create_fxch(state, n, op2_idx);
1227 x87_patch_insn(n, op_p);
1230 attr = get_ia32_x87_attr(n);
1231 attr->x87[1] = op2 = get_st_reg(0);
1232 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1237 #define _GEN_BINOP(op, rev) \
1238 static int sim_##op(x87_state *state, ir_node *n) { \
1239 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1240 return sim_binop(state, n, &tmpl); \
1243 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1244 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1246 #define GEN_LOAD(op) \
1247 static int sim_##op(x87_state *state, ir_node *n) { \
1248 return sim_load(state, n, op_ia32_##op, pn_ia32_v##op##_res); \
1251 #define GEN_UNOP(op) \
1252 static int sim_##op(x87_state *state, ir_node *n) { \
1253 return sim_unop(state, n, op_ia32_##op); \
1256 #define GEN_STORE(op) \
1257 static int sim_##op(x87_state *state, ir_node *n) { \
1258 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1280 * Simulate a virtual fisttp.
1282 * @param state the x87 state
1283 * @param n the node that should be simulated (and patched)
1285 * @return NO_NODE_ADDED
1287 static int sim_fisttp(x87_state *state, ir_node *n)
1289 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1290 const arch_register_t *op2 = x87_get_irn_register(val);
1291 ia32_x87_attr_t *attr;
1292 int op2_reg_idx, op2_idx;
1294 op2_reg_idx = arch_register_get_index(op2);
1295 op2_idx = x87_on_stack(state, op2_reg_idx);
1296 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1297 assert(op2_idx >= 0);
1299 /* Note: although the value is still live here, it is destroyed because
1300 of the pop. The register allocator is aware of that and introduced a copy
1301 if the value must be alive. */
1303 /* we can only store the tos to memory */
1305 x87_create_fxch(state, n, op2_idx);
1308 x87_patch_insn(n, op_ia32_fisttp);
1310 attr = get_ia32_x87_attr(n);
1311 attr->x87[1] = op2 = get_st_reg(0);
1312 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1314 return NO_NODE_ADDED;
1318 * Simulate a virtual FtstFnstsw.
1320 * @param state the x87 state
1321 * @param n the node that should be simulated (and patched)
1323 * @return NO_NODE_ADDED
1325 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1327 x87_simulator *sim = state->sim;
1328 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1329 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1330 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1331 int reg_index_1 = arch_register_get_index(reg1);
1332 int op1_idx = x87_on_stack(state, reg_index_1);
1333 unsigned live = vfp_live_args_after(sim, n, 0);
1335 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, arch_register_get_name(reg1)));
1336 DEBUG_ONLY(vfp_dump_live(live);)
1337 DB((dbg, LEVEL_1, "Stack before: "));
1338 DEBUG_ONLY(x87_dump_stack(state);)
1339 assert(op1_idx >= 0);
1342 /* bring the value to tos */
1343 x87_create_fxch(state, n, op1_idx);
1347 /* patch the operation */
1348 x87_patch_insn(n, op_ia32_FtstFnstsw);
1349 reg1 = get_st_reg(op1_idx);
1350 attr->x87[0] = reg1;
1351 attr->x87[1] = NULL;
1352 attr->x87[2] = NULL;
1354 if (!is_vfp_live(reg_index_1, live))
1355 x87_create_fpop(state, sched_next(n), 1);
1357 return NO_NODE_ADDED;
1363 * @param state the x87 state
1364 * @param n the node that should be simulated (and patched)
1366 * @return NO_NODE_ADDED
1368 static int sim_Fucom(x87_state *state, ir_node *n)
1372 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1374 x87_simulator *sim = state->sim;
1375 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1376 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1377 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1378 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1379 int reg_index_1 = arch_register_get_index(op1);
1380 int reg_index_2 = arch_register_get_index(op2);
1381 unsigned live = vfp_live_args_after(sim, n, 0);
1382 bool permuted = attr->attr.data.ins_permuted;
1386 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1387 arch_register_get_name(op1), arch_register_get_name(op2)));
1388 DEBUG_ONLY(vfp_dump_live(live);)
1389 DB((dbg, LEVEL_1, "Stack before: "));
1390 DEBUG_ONLY(x87_dump_stack(state);)
1392 op1_idx = x87_on_stack(state, reg_index_1);
1393 assert(op1_idx >= 0);
1395 /* BEWARE: check for comp a,a cases, they might happen */
1396 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1397 /* second operand is a vfp register */
1398 op2_idx = x87_on_stack(state, reg_index_2);
1399 assert(op2_idx >= 0);
1401 if (is_vfp_live(reg_index_2, live)) {
1402 /* second operand is live */
1404 if (is_vfp_live(reg_index_1, live)) {
1405 /* both operands are live */
1408 /* res = tos X op */
1409 } else if (op2_idx == 0) {
1410 /* res = op X tos */
1411 permuted = !permuted;
1414 /* bring the first one to tos */
1415 x87_create_fxch(state, n, op1_idx);
1416 if (op1_idx == op2_idx) {
1418 } else if (op2_idx == 0) {
1422 /* res = tos X op */
1425 /* second live, first operand is dead here, bring it to tos.
1426 This means further, op1_idx != op2_idx. */
1427 assert(op1_idx != op2_idx);
1429 x87_create_fxch(state, n, op1_idx);
1434 /* res = tos X op, pop */
1438 /* second operand is dead */
1439 if (is_vfp_live(reg_index_1, live)) {
1440 /* first operand is live: bring second to tos.
1441 This means further, op1_idx != op2_idx. */
1442 assert(op1_idx != op2_idx);
1444 x87_create_fxch(state, n, op2_idx);
1449 /* res = op X tos, pop */
1451 permuted = !permuted;
1454 /* both operands are dead here, check first for identity. */
1455 if (op1_idx == op2_idx) {
1456 /* identically, one pop needed */
1458 x87_create_fxch(state, n, op1_idx);
1462 /* res = tos X op, pop */
1465 /* different, move them to st and st(1) and pop both.
1466 The tricky part is to get one into st(1).*/
1467 else if (op2_idx == 1) {
1468 /* good, second operand is already in the right place, move the first */
1470 /* bring the first on top */
1471 x87_create_fxch(state, n, op1_idx);
1472 assert(op2_idx != 0);
1475 /* res = tos X op, pop, pop */
1477 } else if (op1_idx == 1) {
1478 /* good, first operand is already in the right place, move the second */
1480 /* bring the first on top */
1481 x87_create_fxch(state, n, op2_idx);
1482 assert(op1_idx != 0);
1485 /* res = op X tos, pop, pop */
1486 permuted = !permuted;
1490 /* if one is already the TOS, we need two fxch */
1492 /* first one is TOS, move to st(1) */
1493 x87_create_fxch(state, n, 1);
1494 assert(op2_idx != 1);
1496 x87_create_fxch(state, n, op2_idx);
1498 /* res = op X tos, pop, pop */
1500 permuted = !permuted;
1502 } else if (op2_idx == 0) {
1503 /* second one is TOS, move to st(1) */
1504 x87_create_fxch(state, n, 1);
1505 assert(op1_idx != 1);
1507 x87_create_fxch(state, n, op1_idx);
1509 /* res = tos X op, pop, pop */
1512 /* none of them is either TOS or st(1), 3 fxch needed */
1513 x87_create_fxch(state, n, op2_idx);
1514 assert(op1_idx != 0);
1515 x87_create_fxch(state, n, 1);
1517 x87_create_fxch(state, n, op1_idx);
1519 /* res = tos X op, pop, pop */
1526 /* second operand is an address mode */
1527 if (is_vfp_live(reg_index_1, live)) {
1528 /* first operand is live: bring it to TOS */
1530 x87_create_fxch(state, n, op1_idx);
1534 /* first operand is dead: bring it to tos */
1536 x87_create_fxch(state, n, op1_idx);
1543 /* patch the operation */
1544 if (is_ia32_vFucomFnstsw(n)) {
1548 case 0: dst = op_ia32_FucomFnstsw; break;
1549 case 1: dst = op_ia32_FucompFnstsw; break;
1550 case 2: dst = op_ia32_FucomppFnstsw; break;
1551 default: panic("invalid popcount");
1554 for (i = 0; i < pops; ++i) {
1557 } else if (is_ia32_vFucomi(n)) {
1559 case 0: dst = op_ia32_Fucomi; break;
1560 case 1: dst = op_ia32_Fucompi; x87_pop(state); break;
1562 dst = op_ia32_Fucompi;
1564 x87_create_fpop(state, sched_next(n), 1);
1566 default: panic("invalid popcount");
1569 panic("invalid operation %+F", n);
1572 x87_patch_insn(n, dst);
1579 op1 = get_st_reg(op1_idx);
1582 op2 = get_st_reg(op2_idx);
1585 attr->x87[2] = NULL;
1586 attr->attr.data.ins_permuted = permuted;
1589 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1590 arch_register_get_name(op1), arch_register_get_name(op2)));
1592 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1593 arch_register_get_name(op1)));
1596 return NO_NODE_ADDED;
1602 * @param state the x87 state
1603 * @param n the node that should be simulated (and patched)
1605 * @return NO_NODE_ADDED
1607 static int sim_Keep(x87_state *state, ir_node *node)
1610 const arch_register_t *op_reg;
1616 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1618 arity = get_irn_arity(node);
1619 for (i = 0; i < arity; ++i) {
1620 op = get_irn_n(node, i);
1621 op_reg = arch_get_irn_register(op);
1622 if (arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1625 reg_id = arch_register_get_index(op_reg);
1626 live = vfp_live_args_after(state->sim, node, 0);
1628 op_stack_idx = x87_on_stack(state, reg_id);
1629 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live))
1630 x87_create_fpop(state, sched_next(node), 1);
1633 DB((dbg, LEVEL_1, "Stack after: "));
1634 DEBUG_ONLY(x87_dump_stack(state);)
1636 return NO_NODE_ADDED;
1640 * Keep the given node alive by adding a be_Keep.
1642 * @param node the node to kept alive
1644 static void keep_float_node_alive(ir_node *node)
1646 ir_node *block = get_nodes_block(node);
1647 ir_node *keep = be_new_Keep(block, 1, &node);
1649 assert(sched_is_scheduled(node));
1650 sched_add_after(node, keep);
1654 * Create a copy of a node. Recreate the node if it's a constant.
1656 * @param state the x87 state
1657 * @param n the node to be copied
1659 * @return the copy of n
1661 static ir_node *create_Copy(x87_state *state, ir_node *n)
1663 dbg_info *n_dbg = get_irn_dbg_info(n);
1664 ir_mode *mode = get_irn_mode(n);
1665 ir_node *block = get_nodes_block(n);
1666 ir_node *pred = get_irn_n(n, 0);
1667 ir_node *(*cnstr)(dbg_info *, ir_node *, ir_mode *) = NULL;
1669 const arch_register_t *out;
1670 const arch_register_t *op1;
1671 ia32_x87_attr_t *attr;
1673 /* Do not copy constants, recreate them. */
1674 switch (get_ia32_irn_opcode(pred)) {
1676 cnstr = new_bd_ia32_fldz;
1679 cnstr = new_bd_ia32_fld1;
1681 case iro_ia32_fldpi:
1682 cnstr = new_bd_ia32_fldpi;
1684 case iro_ia32_fldl2e:
1685 cnstr = new_bd_ia32_fldl2e;
1687 case iro_ia32_fldl2t:
1688 cnstr = new_bd_ia32_fldl2t;
1690 case iro_ia32_fldlg2:
1691 cnstr = new_bd_ia32_fldlg2;
1693 case iro_ia32_fldln2:
1694 cnstr = new_bd_ia32_fldln2;
1700 out = x87_get_irn_register(n);
1701 op1 = x87_get_irn_register(pred);
1703 if (cnstr != NULL) {
1704 /* copy a constant */
1705 res = (*cnstr)(n_dbg, block, mode);
1707 x87_push(state, arch_register_get_index(out), res);
1709 attr = get_ia32_x87_attr(res);
1710 attr->x87[2] = get_st_reg(0);
1712 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1714 res = new_bd_ia32_fpushCopy(n_dbg, block, pred, mode);
1716 x87_push(state, arch_register_get_index(out), res);
1718 attr = get_ia32_x87_attr(res);
1719 attr->x87[0] = get_st_reg(op1_idx);
1720 attr->x87[2] = get_st_reg(0);
1722 arch_set_irn_register(res, out);
1728 * Simulate a be_Copy.
1730 * @param state the x87 state
1731 * @param n the node that should be simulated (and patched)
1733 * @return NO_NODE_ADDED
1735 static int sim_Copy(x87_state *state, ir_node *n)
1738 const arch_register_t *out;
1739 const arch_register_t *op1;
1740 const arch_register_class_t *cls;
1741 ir_node *node, *next;
1742 int op1_idx, out_idx;
1745 cls = arch_get_irn_reg_class(n);
1746 if (cls != &ia32_reg_classes[CLASS_ia32_vfp])
1749 pred = be_get_Copy_op(n);
1750 out = x87_get_irn_register(n);
1751 op1 = x87_get_irn_register(pred);
1752 live = vfp_live_args_after(state->sim, n, REGMASK(out));
1754 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1755 arch_register_get_name(op1), arch_register_get_name(out)));
1756 DEBUG_ONLY(vfp_dump_live(live);)
1758 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1760 if (is_vfp_live(arch_register_get_index(op1), live)) {
1761 /* Operand is still live, a real copy. We need here an fpush that can
1762 hold a a register, so use the fpushCopy or recreate constants */
1763 node = create_Copy(state, n);
1765 /* We have to make sure the old value doesn't go dead (which can happen
1766 * when we recreate constants). As the simulator expected that value in
1767 * the pred blocks. This is unfortunate as removing it would save us 1
1768 * instruction, but we would have to rerun all the simulation to get
1771 next = sched_next(n);
1774 sched_add_before(next, node);
1776 if (get_irn_n_edges(pred) == 0) {
1777 keep_float_node_alive(pred);
1780 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1782 out_idx = x87_on_stack(state, arch_register_get_index(out));
1784 if (out_idx >= 0 && out_idx != op1_idx) {
1785 /* Matze: out already on stack? how can this happen? */
1786 panic("invalid stack state");
1789 /* op1 must be killed and placed where out is */
1791 ia32_x87_attr_t *attr;
1792 /* best case, simple remove and rename */
1793 x87_patch_insn(n, op_ia32_Pop);
1794 attr = get_ia32_x87_attr(n);
1795 attr->x87[0] = op1 = get_st_reg(0);
1798 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1800 ia32_x87_attr_t *attr;
1801 /* move op1 to tos, store and pop it */
1803 x87_create_fxch(state, n, op1_idx);
1806 x87_patch_insn(n, op_ia32_Pop);
1807 attr = get_ia32_x87_attr(n);
1808 attr->x87[0] = op1 = get_st_reg(out_idx);
1811 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1813 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1816 /* just a virtual copy */
1817 x87_set_st(state, arch_register_get_index(out), pred, op1_idx);
1818 /* don't remove the node to keep the verifier quiet :),
1819 the emitter won't emit any code for the node */
1822 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1827 return NO_NODE_ADDED;
1831 * Returns the vf0 result Proj of a Call.
1833 * @para call the Call node
1835 static ir_node *get_call_result_proj(ir_node *call)
1837 /* search the result proj */
1838 foreach_out_edge(call, edge) {
1839 ir_node *proj = get_edge_src_irn(edge);
1840 long pn = get_Proj_proj(proj);
1842 if (pn == pn_ia32_Call_vf0)
1849 static int sim_Asm(x87_state *const state, ir_node *const n)
1853 for (size_t i = get_irn_arity(n); i-- != 0;) {
1854 arch_register_req_t const *const req = arch_get_irn_register_req_in(n, i);
1855 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1856 panic("cannot handle %+F with x87 constraints", n);
1859 for (size_t i = arch_get_irn_n_outs(n); i-- != 0;) {
1860 arch_register_req_t const *const req = arch_get_irn_register_req_out(n, i);
1861 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1862 panic("cannot handle %+F with x87 constraints", n);
1865 return NO_NODE_ADDED;
1869 * Simulate a ia32_Call.
1871 * @param state the x87 state
1872 * @param n the node that should be simulated (and patched)
1874 * @return NO_NODE_ADDED
1876 static int sim_Call(x87_state *state, ir_node *n)
1878 ir_type *call_tp = get_ia32_call_attr_const(n)->call_tp;
1882 const arch_register_t *reg;
1884 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1886 /* at the begin of a call the x87 state should be empty */
1887 assert(state->depth == 0 && "stack not empty before call");
1889 if (get_method_n_ress(call_tp) <= 0)
1893 * If the called function returns a float, it is returned in st(0).
1894 * This even happens if the return value is NOT used.
1895 * Moreover, only one return result is supported.
1897 res_type = get_method_res_type(call_tp, 0);
1898 mode = get_type_mode(res_type);
1900 if (mode == NULL || !mode_is_float(mode))
1903 resproj = get_call_result_proj(n);
1904 assert(resproj != NULL);
1906 reg = x87_get_irn_register(resproj);
1907 x87_push(state, arch_register_get_index(reg), resproj);
1910 DB((dbg, LEVEL_1, "Stack after: "));
1911 DEBUG_ONLY(x87_dump_stack(state);)
1913 return NO_NODE_ADDED;
1917 * Simulate a be_Return.
1919 * @param state the x87 state
1920 * @param n the node that should be simulated (and patched)
1922 * @return NO_NODE_ADDED
1924 static int sim_Return(x87_state *state, ir_node *n)
1926 #ifdef DEBUG_libfirm
1927 /* only floating point return values must reside on stack */
1928 int n_float_res = 0;
1929 int const n_res = be_Return_get_n_rets(n);
1930 for (int i = 0; i < n_res; ++i) {
1931 ir_node *const res = get_irn_n(n, n_be_Return_val + i);
1932 if (mode_is_float(get_irn_mode(res)))
1935 assert(x87_get_depth(state) == n_float_res);
1938 /* pop them virtually */
1940 return NO_NODE_ADDED;
1944 * Simulate a be_Perm.
1946 * @param state the x87 state
1947 * @param irn the node that should be simulated (and patched)
1949 * @return NO_NODE_ADDED
1951 static int sim_Perm(x87_state *state, ir_node *irn)
1954 ir_node *pred = get_irn_n(irn, 0);
1957 /* handle only floating point Perms */
1958 if (! mode_is_float(get_irn_mode(pred)))
1959 return NO_NODE_ADDED;
1961 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1963 /* Perm is a pure virtual instruction on x87.
1964 All inputs must be on the FPU stack and are pairwise
1965 different from each other.
1966 So, all we need to do is to permutate the stack state. */
1967 n = get_irn_arity(irn);
1968 NEW_ARR_A(int, stack_pos, n);
1970 /* collect old stack positions */
1971 for (i = 0; i < n; ++i) {
1972 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
1973 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1975 assert(idx >= 0 && "Perm argument not on x87 stack");
1979 /* now do the permutation */
1980 foreach_out_edge(irn, edge) {
1981 ir_node *proj = get_edge_src_irn(edge);
1982 const arch_register_t *out = x87_get_irn_register(proj);
1983 long num = get_Proj_proj(proj);
1985 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1986 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
1988 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1990 return NO_NODE_ADDED;
1994 * Kill any dead registers at block start by popping them from the stack.
1996 * @param sim the simulator handle
1997 * @param block the current block
1998 * @param state the x87 state at the begin of the block
2000 static void x87_kill_deads(x87_simulator *const sim, ir_node *const block, x87_state *const state)
2002 ir_node *first_insn = sched_first(block);
2003 ir_node *keep = NULL;
2004 unsigned live = vfp_live_args_after(sim, block, 0);
2006 int i, depth, num_pop;
2009 depth = x87_get_depth(state);
2010 for (i = depth - 1; i >= 0; --i) {
2011 int reg = x87_get_st_reg(state, i);
2013 if (! is_vfp_live(reg, live))
2014 kill_mask |= (1 << i);
2018 DB((dbg, LEVEL_1, "Killing deads:\n"));
2019 DEBUG_ONLY(vfp_dump_live(live);)
2020 DEBUG_ONLY(x87_dump_stack(state);)
2022 if (kill_mask != 0 && live == 0) {
2023 /* special case: kill all registers */
2024 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
2025 if (ia32_cg_config.use_femms) {
2026 /* use FEMMS on AMD processors to clear all */
2027 keep = new_bd_ia32_femms(NULL, block);
2029 /* use EMMS to clear all */
2030 keep = new_bd_ia32_emms(NULL, block);
2032 sched_add_before(first_insn, keep);
2038 /* now kill registers */
2040 /* we can only kill from TOS, so bring them up */
2041 if (! (kill_mask & 1)) {
2042 /* search from behind, because we can to a double-pop */
2043 for (i = depth - 1; i >= 0; --i) {
2044 if (kill_mask & (1 << i)) {
2045 kill_mask &= ~(1 << i);
2052 x87_set_st(state, -1, keep, i);
2053 x87_create_fxch(state, first_insn, i);
2056 if ((kill_mask & 3) == 3) {
2057 /* we can do a double-pop */
2061 /* only a single pop */
2066 kill_mask >>= num_pop;
2067 keep = x87_create_fpop(state, first_insn, num_pop);
2074 * Run a simulation and fix all virtual instructions for a block.
2076 * @param sim the simulator handle
2077 * @param block the current block
2079 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
2082 blk_state *bl_state = x87_get_bl_state(sim, block);
2083 x87_state *state = bl_state->begin;
2084 ir_node *start_block;
2086 assert(state != NULL);
2087 /* already processed? */
2088 if (bl_state->end != NULL)
2091 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2092 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2093 DEBUG_ONLY(x87_dump_stack(state);)
2095 /* create a new state, will be changed */
2096 state = x87_clone_state(sim, state);
2097 /* at block begin, kill all dead registers */
2098 x87_kill_deads(sim, block, state);
2100 /* beware, n might change */
2101 for (n = sched_first(block); !sched_is_end(n); n = next) {
2104 ir_op *op = get_irn_op(n);
2107 * get the next node to be simulated here.
2108 * n might be completely removed from the schedule-
2110 next = sched_next(n);
2111 if (op->ops.generic != NULL) {
2112 func = (sim_func)op->ops.generic;
2115 node_inserted = (*func)(state, n);
2118 * sim_func might have added an additional node after n,
2119 * so update next node
2120 * beware: n must not be changed by sim_func
2121 * (i.e. removed from schedule) in this case
2123 if (node_inserted != NO_NODE_ADDED)
2124 next = sched_next(n);
2128 start_block = get_irg_start_block(get_irn_irg(block));
2130 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state);)
2132 /* check if the state must be shuffled */
2133 foreach_block_succ(block, edge) {
2134 ir_node *succ = get_edge_src_irn(edge);
2135 blk_state *succ_state;
2137 if (succ == start_block)
2140 succ_state = x87_get_bl_state(sim, succ);
2142 if (succ_state->begin == NULL) {
2143 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2144 DEBUG_ONLY(x87_dump_stack(state);)
2145 succ_state->begin = state;
2147 waitq_put(sim->worklist, succ);
2149 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2150 /* There is already a begin state for the successor, bad.
2151 Do the necessary permutations.
2152 Note that critical edges are removed, so this is always possible:
2153 If the successor has more than one possible input, then it must
2156 x87_shuffle(block, state, succ_state->begin);
2159 bl_state->end = state;
2163 * Register a simulator function.
2165 * @param op the opcode to simulate
2166 * @param func the simulator function for the opcode
2168 static void register_sim(ir_op *op, sim_func func)
2170 assert(op->ops.generic == NULL);
2171 op->ops.generic = (op_func) func;
2175 * Create a new x87 simulator.
2177 * @param sim a simulator handle, will be initialized
2178 * @param irg the current graph
2180 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
2182 obstack_init(&sim->obst);
2183 sim->blk_states = pmap_create();
2184 sim->n_idx = get_irg_last_idx(irg);
2185 sim->live = OALLOCN(&sim->obst, vfp_liveness, sim->n_idx);
2187 DB((dbg, LEVEL_1, "--------------------------------\n"
2188 "x87 Simulator started for %+F\n", irg));
2190 /* set the generic function pointer of instruction we must simulate */
2191 ir_clear_opcodes_generic_func();
2193 register_sim(op_ia32_Asm, sim_Asm);
2194 register_sim(op_ia32_Call, sim_Call);
2195 register_sim(op_ia32_vfld, sim_fld);
2196 register_sim(op_ia32_vfild, sim_fild);
2197 register_sim(op_ia32_vfld1, sim_fld1);
2198 register_sim(op_ia32_vfldz, sim_fldz);
2199 register_sim(op_ia32_vfadd, sim_fadd);
2200 register_sim(op_ia32_vfsub, sim_fsub);
2201 register_sim(op_ia32_vfmul, sim_fmul);
2202 register_sim(op_ia32_vfdiv, sim_fdiv);
2203 register_sim(op_ia32_vfprem, sim_fprem);
2204 register_sim(op_ia32_vfabs, sim_fabs);
2205 register_sim(op_ia32_vfchs, sim_fchs);
2206 register_sim(op_ia32_vfist, sim_fist);
2207 register_sim(op_ia32_vfisttp, sim_fisttp);
2208 register_sim(op_ia32_vfst, sim_fst);
2209 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
2210 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
2211 register_sim(op_ia32_vFucomi, sim_Fucom);
2212 register_sim(op_be_Copy, sim_Copy);
2213 register_sim(op_be_Return, sim_Return);
2214 register_sim(op_be_Perm, sim_Perm);
2215 register_sim(op_be_Keep, sim_Keep);
2219 * Destroy a x87 simulator.
2221 * @param sim the simulator handle
2223 static void x87_destroy_simulator(x87_simulator *sim)
2225 pmap_destroy(sim->blk_states);
2226 obstack_free(&sim->obst, NULL);
2227 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2231 * Pre-block walker: calculate the liveness information for the block
2232 * and store it into the sim->live cache.
2234 static void update_liveness_walker(ir_node *block, void *data)
2236 x87_simulator *sim = (x87_simulator*)data;
2237 update_liveness(sim, block);
2241 * Run a simulation and fix all virtual instructions for a graph.
2242 * Replaces all virtual floating point instructions and registers
2245 void ia32_x87_simulate_graph(ir_graph *irg)
2247 /* TODO improve code quality (less executed fxch) by using execfreqs */
2249 ir_node *block, *start_block;
2250 blk_state *bl_state;
2253 /* create the simulator */
2254 x87_init_simulator(&sim, irg);
2256 start_block = get_irg_start_block(irg);
2257 bl_state = x87_get_bl_state(&sim, start_block);
2259 /* start with the empty state */
2260 bl_state->begin = empty;
2263 sim.worklist = new_waitq();
2264 waitq_put(sim.worklist, start_block);
2266 be_assure_live_sets(irg);
2267 sim.lv = be_get_irg_liveness(irg);
2269 /* Calculate the liveness for all nodes. We must precalculate this info,
2270 * because the simulator adds new nodes (possible before Phi nodes) which
2271 * would let a lazy calculation fail.
2272 * On the other hand we reduce the computation amount due to
2273 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2275 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2279 block = (ir_node*)waitq_get(sim.worklist);
2280 x87_simulate_block(&sim, block);
2281 } while (! waitq_empty(sim.worklist));
2284 del_waitq(sim.worklist);
2285 x87_destroy_simulator(&sim);
2288 /* Initializes the x87 simulator. */
2289 void ia32_init_x87(void)
2291 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");