2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
34 #include "iredges_t.h"
46 #include "../belive_t.h"
47 #include "../besched.h"
48 #include "../benode.h"
49 #include "bearch_ia32_t.h"
50 #include "ia32_new_nodes.h"
51 #include "gen_ia32_new_nodes.h"
52 #include "gen_ia32_regalloc_if.h"
54 #include "ia32_architecture.h"
61 #define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
63 /** the debug handle */
64 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
66 /* Forward declaration. */
67 typedef struct _x87_simulator x87_simulator;
70 * An exchange template.
71 * Note that our virtual functions have the same inputs
72 * and attributes as the real ones, so we can simple exchange
74 * Further, x87 supports inverse instructions, so we can handle them.
76 typedef struct _exchange_tmpl {
77 ir_op *normal_op; /**< the normal one */
78 ir_op *reverse_op; /**< the reverse one if exists */
79 ir_op *normal_pop_op; /**< the normal one with tos pop */
80 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
84 * An entry on the simulated x87 stack.
86 typedef struct _st_entry {
87 int reg_idx; /**< the virtual register index of this stack value */
88 ir_node *node; /**< the node that produced this value */
94 typedef struct _x87_state {
95 st_entry st[N_x87_REGS]; /**< the register stack */
96 int depth; /**< the current stack depth */
97 int tos; /**< position of the tos */
98 x87_simulator *sim; /**< The simulator. */
101 /** An empty state, used for blocks without fp instructions. */
102 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
103 static x87_state *empty = (x87_state *)&_empty;
106 * Return values of the instruction simulator functions.
109 NO_NODE_ADDED = 0, /**< No node that needs simulation was added. */
110 NODE_ADDED = 1 /**< A node that must be simulated was added by the simulator
111 in the schedule AFTER the current node. */
115 * The type of an instruction simulator function.
117 * @param state the x87 state
118 * @param n the node to be simulated
120 * @return NODE_ADDED if a node was added AFTER n in schedule that MUST be
122 * NO_NODE_ADDED otherwise
124 typedef int (*sim_func)(x87_state *state, ir_node *n);
127 * A block state: Every block has a x87 state at the beginning and at the end.
129 typedef struct _blk_state {
130 x87_state *begin; /**< state at the begin or NULL if not assigned */
131 x87_state *end; /**< state at the end or NULL if not assigned */
134 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
136 /** liveness bitset for vfp registers. */
137 typedef unsigned char vfp_liveness;
142 struct _x87_simulator {
143 struct obstack obst; /**< An obstack for fast allocating. */
144 pmap *blk_states; /**< Map blocks to states. */
145 be_lv_t *lv; /**< intrablock liveness. */
146 vfp_liveness *live; /**< Liveness information. */
147 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
148 waitq *worklist; /**< Worklist of blocks that must be processed. */
149 ia32_isa_t *isa; /**< the ISA object */
153 * Returns the current stack depth.
155 * @param state the x87 state
157 * @return the x87 stack depth
159 static int x87_get_depth(const x87_state *state)
162 } /* x87_get_depth */
165 * Return the virtual register index at st(pos).
167 * @param state the x87 state
168 * @param pos a stack position
170 * @return the vfp register index that produced the value at st(pos)
172 static int x87_get_st_reg(const x87_state *state, int pos)
174 assert(pos < state->depth);
175 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
176 } /* x87_get_st_reg */
180 * Return the node at st(pos).
182 * @param state the x87 state
183 * @param pos a stack position
185 * @return the IR node that produced the value at st(pos)
187 static ir_node *x87_get_st_node(const x87_state *state, int pos)
189 assert(pos < state->depth);
190 return state->st[MASK_TOS(state->tos + pos)].node;
191 } /* x87_get_st_node */
194 * Dump the stack for debugging.
196 * @param state the x87 state
198 static void x87_dump_stack(const x87_state *state)
202 for (i = state->depth - 1; i >= 0; --i) {
203 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
204 x87_get_st_node(state, i)));
206 DB((dbg, LEVEL_2, "<-- TOS\n"));
207 } /* x87_dump_stack */
208 #endif /* DEBUG_libfirm */
211 * Set a virtual register to st(pos).
213 * @param state the x87 state
214 * @param reg_idx the vfp register index that should be set
215 * @param node the IR node that produces the value of the vfp register
216 * @param pos the stack position where the new value should be entered
218 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
220 assert(0 < state->depth);
221 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
222 state->st[MASK_TOS(state->tos + pos)].node = node;
224 DB((dbg, LEVEL_2, "After SET_REG: "));
225 DEBUG_ONLY(x87_dump_stack(state));
229 * Set the tos virtual register.
231 * @param state the x87 state
232 * @param reg_idx the vfp register index that should be set
233 * @param node the IR node that produces the value of the vfp register
235 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node)
237 x87_set_st(state, reg_idx, node, 0);
241 * Swap st(0) with st(pos).
243 * @param state the x87 state
244 * @param pos the stack position to change the tos with
246 static void x87_fxch(x87_state *state, int pos)
249 assert(pos < state->depth);
251 entry = state->st[MASK_TOS(state->tos + pos)];
252 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
253 state->st[MASK_TOS(state->tos)] = entry;
255 DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
259 * Convert a virtual register to the stack index.
261 * @param state the x87 state
262 * @param reg_idx the register vfp index
264 * @return the stack position where the register is stacked
265 * or -1 if the virtual register was not found
267 static int x87_on_stack(const x87_state *state, int reg_idx)
269 int i, tos = state->tos;
271 for (i = 0; i < state->depth; ++i)
272 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
278 * Push a virtual Register onto the stack, double pushed allowed.
280 * @param state the x87 state
281 * @param reg_idx the register vfp index
282 * @param node the node that produces the value of the vfp register
284 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node)
286 assert(state->depth < N_x87_REGS && "stack overrun");
289 state->tos = MASK_TOS(state->tos - 1);
290 state->st[state->tos].reg_idx = reg_idx;
291 state->st[state->tos].node = node;
293 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
297 * Push a virtual Register onto the stack, double pushes are NOT allowed.
299 * @param state the x87 state
300 * @param reg_idx the register vfp index
301 * @param node the node that produces the value of the vfp register
302 * @param dbl_push if != 0 double pushes are allowed
304 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
306 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
308 x87_push_dbl(state, reg_idx, node);
312 * Pop a virtual Register from the stack.
314 * @param state the x87 state
316 static void x87_pop(x87_state *state)
318 assert(state->depth > 0 && "stack underrun");
321 state->tos = MASK_TOS(state->tos + 1);
323 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
327 * Empty the fpu stack
329 * @param state the x87 state
331 static void x87_emms(x87_state *state)
338 * Returns the block state of a block.
340 * @param sim the x87 simulator handle
341 * @param block the current block
343 * @return the block state
345 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
347 pmap_entry *entry = pmap_find(sim->blk_states, block);
350 blk_state *bl_state = obstack_alloc(&sim->obst, sizeof(*bl_state));
351 bl_state->begin = NULL;
352 bl_state->end = NULL;
354 pmap_insert(sim->blk_states, block, bl_state);
358 return PTR_TO_BLKSTATE(entry->value);
359 } /* x87_get_bl_state */
362 * Creates a new x87 state.
364 * @param sim the x87 simulator handle
366 * @return a new x87 state
368 static x87_state *x87_alloc_state(x87_simulator *sim)
370 x87_state *res = obstack_alloc(&sim->obst, sizeof(*res));
374 } /* x87_alloc_state */
379 * @param sim the x87 simulator handle
380 * @param src the x87 state that will be cloned
382 * @return a cloned copy of the src state
384 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
386 x87_state *res = x87_alloc_state(sim);
390 } /* x87_clone_state */
393 * Patch a virtual instruction into a x87 one and return
394 * the node representing the result value.
396 * @param n the IR node to patch
397 * @param op the x87 opcode to patch in
399 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
401 ir_mode *mode = get_irn_mode(n);
406 if (mode == mode_T) {
407 /* patch all Proj's */
408 const ir_edge_t *edge;
410 foreach_out_edge(n, edge) {
411 ir_node *proj = get_edge_src_irn(edge);
413 mode = get_irn_mode(proj);
414 if (mode_is_float(mode)) {
416 set_irn_mode(proj, ia32_reg_classes[CLASS_ia32_st].mode);
420 } else if (mode_is_float(mode))
421 set_irn_mode(n, ia32_reg_classes[CLASS_ia32_st].mode);
423 } /* x87_patch_insn */
426 * Returns the first Proj of a mode_T node having a given mode.
428 * @param n the mode_T node
429 * @param m the desired mode of the Proj
430 * @return The first Proj of mode @p m found or NULL.
432 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
434 const ir_edge_t *edge;
436 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
438 foreach_out_edge(n, edge) {
439 ir_node *proj = get_edge_src_irn(edge);
440 if (get_irn_mode(proj) == m)
445 } /* get_irn_Proj_for_mode */
448 * Wrap the arch_* function here so we can check for errors.
450 static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
452 const arch_register_t *res = arch_get_irn_register(irn);
454 assert(res->reg_class->regs == ia32_vfp_regs);
456 } /* x87_get_irn_register */
458 static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
461 const arch_register_t *res = arch_irn_get_register(irn, pos);
463 assert(res->reg_class->regs == ia32_vfp_regs);
465 } /* x87_irn_get_register */
467 /* -------------- x87 perm --------------- */
470 * Creates a fxch for shuffle.
472 * @param state the x87 state
473 * @param pos parameter for fxch
474 * @param block the block were fxch is inserted
476 * Creates a new fxch node and reroute the user of the old node
479 * @return the fxch node
481 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block)
484 ia32_x87_attr_t *attr;
486 fxch = new_bd_ia32_fxch(NULL, block);
487 attr = get_ia32_x87_attr(fxch);
488 attr->x87[0] = &ia32_st_regs[pos];
489 attr->x87[2] = &ia32_st_regs[0];
493 x87_fxch(state, pos);
495 } /* x87_fxch_shuffle */
498 * Calculate the necessary permutations to reach dst_state.
500 * These permutations are done with fxch instructions and placed
501 * at the end of the block.
503 * Note that critical edges are removed here, so we need only
504 * a shuffle if the current block has only one successor.
506 * @param sim the simulator handle
507 * @param block the current block
508 * @param state the current x87 stack state, might be modified
509 * @param dst_block the destination block
510 * @param dst_state destination state
514 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block,
515 x87_state *state, ir_node *dst_block,
516 const x87_state *dst_state)
518 int i, n_cycles, k, ri;
519 unsigned cycles[4], all_mask;
520 char cycle_idx[4][8];
521 ir_node *fxch, *before, *after;
525 assert(state->depth == dst_state->depth);
527 /* Some mathematics here:
528 If we have a cycle of length n that includes the tos,
529 we need n-1 exchange operations.
530 We can always add the tos and restore it, so we need
531 n+1 exchange operations for a cycle not containing the tos.
532 So, the maximum of needed operations is for a cycle of 7
533 not including the tos == 8.
534 This is the same number of ops we would need for using stores,
535 so exchange is cheaper (we save the loads).
536 On the other hand, we might need an additional exchange
537 in the next block to bring one operand on top, so the
538 number of ops in the first case is identical.
539 Further, no more than 4 cycles can exists (4 x 2).
541 all_mask = (1 << (state->depth)) - 1;
543 for (n_cycles = 0; all_mask; ++n_cycles) {
544 int src_idx, dst_idx;
546 /* find the first free slot */
547 for (i = 0; i < state->depth; ++i) {
548 if (all_mask & (1 << i)) {
549 all_mask &= ~(1 << i);
551 /* check if there are differences here */
552 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
558 /* no more cycles found */
563 cycles[n_cycles] = (1 << i);
564 cycle_idx[n_cycles][k++] = i;
565 for (src_idx = i; ; src_idx = dst_idx) {
566 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
568 if ((all_mask & (1 << dst_idx)) == 0)
571 cycle_idx[n_cycles][k++] = dst_idx;
572 cycles[n_cycles] |= (1 << dst_idx);
573 all_mask &= ~(1 << dst_idx);
575 cycle_idx[n_cycles][k] = -1;
579 /* no permutation needed */
583 /* Hmm: permutation needed */
584 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
585 DEBUG_ONLY(x87_dump_stack(state));
586 DB((dbg, LEVEL_2, " to\n"));
587 DEBUG_ONLY(x87_dump_stack(dst_state));
591 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
592 for (ri = 0; ri < n_cycles; ++ri) {
593 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
594 for (k = 0; cycle_idx[ri][k] != -1; ++k)
595 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
596 DB((dbg, LEVEL_2, "\n"));
603 * Find the place node must be insert.
604 * We have only one successor block, so the last instruction should
607 before = sched_last(block);
608 assert(is_cfop(before));
610 /* now do the permutations */
611 for (ri = 0; ri < n_cycles; ++ri) {
612 if ((cycles[ri] & 1) == 0) {
613 /* this cycle does not include the tos */
614 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
616 sched_add_after(after, fxch);
618 sched_add_before(before, fxch);
621 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
622 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
624 sched_add_after(after, fxch);
626 sched_add_before(before, fxch);
629 if ((cycles[ri] & 1) == 0) {
630 /* this cycle does not include the tos */
631 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
632 sched_add_after(after, fxch);
639 * Create a fxch node before another node.
641 * @param state the x87 state
642 * @param n the node after the fxch
643 * @param pos exchange st(pos) with st(0)
647 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
650 ia32_x87_attr_t *attr;
651 ir_node *block = get_nodes_block(n);
653 x87_fxch(state, pos);
655 fxch = new_bd_ia32_fxch(NULL, block);
656 attr = get_ia32_x87_attr(fxch);
657 attr->x87[0] = &ia32_st_regs[pos];
658 attr->x87[2] = &ia32_st_regs[0];
662 sched_add_before(n, fxch);
663 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
665 } /* x87_create_fxch */
668 * Create a fpush before node n.
670 * @param state the x87 state
671 * @param n the node after the fpush
672 * @param pos push st(pos) on stack
673 * @param op_idx replace input op_idx of n with the fpush result
675 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx)
677 ir_node *fpush, *pred = get_irn_n(n, op_idx);
678 ia32_x87_attr_t *attr;
679 const arch_register_t *out = x87_get_irn_register(pred);
681 x87_push_dbl(state, arch_register_get_index(out), pred);
683 fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
684 attr = get_ia32_x87_attr(fpush);
685 attr->x87[0] = &ia32_st_regs[pos];
686 attr->x87[2] = &ia32_st_regs[0];
689 sched_add_before(n, fpush);
691 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
692 } /* x87_create_fpush */
695 * Create a fpop before node n.
697 * @param state the x87 state
698 * @param n the node after the fpop
699 * @param num pop 1 or 2 values
701 * @return the fpop node
703 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
705 ir_node *fpop = NULL;
706 ia32_x87_attr_t *attr;
711 if (ia32_cg_config.use_ffreep)
712 fpop = new_bd_ia32_ffreep(NULL, get_nodes_block(n));
714 fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
715 attr = get_ia32_x87_attr(fpop);
716 attr->x87[0] = &ia32_st_regs[0];
717 attr->x87[1] = &ia32_st_regs[0];
718 attr->x87[2] = &ia32_st_regs[0];
721 sched_add_before(n, fpop);
722 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
727 } /* x87_create_fpop */
729 /* --------------------------------- liveness ------------------------------------------ */
732 * The liveness transfer function.
733 * Updates a live set over a single step from a given node to its predecessor.
734 * Everything defined at the node is removed from the set, the uses of the node get inserted.
736 * @param irn The node at which liveness should be computed.
737 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
738 * the registers live after irn.
740 * @return The live bitset.
742 static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
745 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
747 if (get_irn_mode(irn) == mode_T) {
748 const ir_edge_t *edge;
750 foreach_out_edge(irn, edge) {
751 ir_node *proj = get_edge_src_irn(edge);
753 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
754 const arch_register_t *reg = x87_get_irn_register(proj);
755 live &= ~(1 << arch_register_get_index(reg));
760 if (arch_irn_consider_in_reg_alloc(cls, irn)) {
761 const arch_register_t *reg = x87_get_irn_register(irn);
762 live &= ~(1 << arch_register_get_index(reg));
765 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
766 ir_node *op = get_irn_n(irn, i);
768 if (mode_is_float(get_irn_mode(op)) &&
769 arch_irn_consider_in_reg_alloc(cls, op)) {
770 const arch_register_t *reg = x87_get_irn_register(op);
771 live |= 1 << arch_register_get_index(reg);
775 } /* vfp_liveness_transfer */
778 * Put all live virtual registers at the end of a block into a bitset.
780 * @param sim the simulator handle
781 * @param lv the liveness information
782 * @param bl the block
784 * @return The live bitset at the end of this block
786 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
789 vfp_liveness live = 0;
790 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
791 const be_lv_t *lv = sim->lv;
793 be_lv_foreach(lv, block, be_lv_state_end, i) {
794 const arch_register_t *reg;
795 const ir_node *node = be_lv_get_irn(lv, block, i);
796 if (!arch_irn_consider_in_reg_alloc(cls, node))
799 reg = x87_get_irn_register(node);
800 live |= 1 << arch_register_get_index(reg);
804 } /* vfp_liveness_end_of_block */
806 /** get the register mask from an arch_register */
807 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
810 * Return a bitset of argument registers which are live at the end of a node.
812 * @param sim the simulator handle
813 * @param pos the node
814 * @param kill kill mask for the output registers
816 * @return The live bitset.
818 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
820 unsigned idx = get_irn_idx(pos);
822 assert(idx < sim->n_idx);
823 return sim->live[idx] & ~kill;
824 } /* vfp_live_args_after */
827 * Calculate the liveness for a whole block and cache it.
829 * @param sim the simulator handle
830 * @param lv the liveness handle
831 * @param block the block
833 static void update_liveness(x87_simulator *sim, ir_node *block)
835 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
839 /* now iterate through the block backward and cache the results */
840 sched_foreach_reverse(block, irn) {
841 /* stop at the first Phi: this produces the live-in */
845 idx = get_irn_idx(irn);
846 sim->live[idx] = live;
848 live = vfp_liveness_transfer(irn, live);
850 idx = get_irn_idx(block);
851 sim->live[idx] = live;
852 } /* update_liveness */
855 * Returns true if a register is live in a set.
857 * @param reg_idx the vfp register index
858 * @param live a live bitset
860 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
864 * Dump liveness info.
866 * @param live the live bitset
868 static void vfp_dump_live(vfp_liveness live)
872 DB((dbg, LEVEL_2, "Live after: "));
873 for (i = 0; i < 8; ++i) {
874 if (live & (1 << i)) {
875 DB((dbg, LEVEL_2, "vf%d ", i));
878 DB((dbg, LEVEL_2, "\n"));
879 } /* vfp_dump_live */
880 #endif /* DEBUG_libfirm */
882 /* --------------------------------- simulators ---------------------------------------- */
885 * Simulate a virtual binop.
887 * @param state the x87 state
888 * @param n the node that should be simulated (and patched)
889 * @param tmpl the template containing the 4 possible x87 opcodes
891 * @return NO_NODE_ADDED
893 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl)
895 int op2_idx = 0, op1_idx;
896 int out_idx, do_pop = 0;
897 ia32_x87_attr_t *attr;
899 ir_node *patched_insn;
901 x87_simulator *sim = state->sim;
902 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
903 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
904 const arch_register_t *op1_reg = x87_get_irn_register(op1);
905 const arch_register_t *op2_reg = x87_get_irn_register(op2);
906 const arch_register_t *out = x87_irn_get_register(n, pn_ia32_res);
907 int reg_index_1 = arch_register_get_index(op1_reg);
908 int reg_index_2 = arch_register_get_index(op2_reg);
909 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
913 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
914 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
915 arch_register_get_name(out)));
916 DEBUG_ONLY(vfp_dump_live(live));
917 DB((dbg, LEVEL_1, "Stack before: "));
918 DEBUG_ONLY(x87_dump_stack(state));
920 op1_idx = x87_on_stack(state, reg_index_1);
921 assert(op1_idx >= 0);
922 op1_live_after = is_vfp_live(arch_register_get_index(op1_reg), live);
924 attr = get_ia32_x87_attr(n);
925 permuted = attr->attr.data.ins_permuted;
927 if (reg_index_2 != REG_VFP_NOREG) {
930 /* second operand is a vfp register */
931 op2_idx = x87_on_stack(state, reg_index_2);
932 assert(op2_idx >= 0);
933 op2_live_after = is_vfp_live(arch_register_get_index(op2_reg), live);
935 if (op2_live_after) {
936 /* Second operand is live. */
938 if (op1_live_after) {
939 /* Both operands are live: push the first one.
940 This works even for op1 == op2. */
941 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
942 /* now do fxxx (tos=tos X op) */
946 dst = tmpl->normal_op;
948 /* Second live, first operand is dead here, bring it to tos. */
950 x87_create_fxch(state, n, op1_idx);
955 /* now do fxxx (tos=tos X op) */
957 dst = tmpl->normal_op;
960 /* Second operand is dead. */
961 if (op1_live_after) {
962 /* First operand is live: bring second to tos. */
964 x87_create_fxch(state, n, op2_idx);
969 /* now do fxxxr (tos = op X tos) */
971 dst = tmpl->reverse_op;
973 /* Both operands are dead here, pop them from the stack. */
976 /* Both are identically and on tos, no pop needed. */
977 /* here fxxx (tos = tos X tos) */
978 dst = tmpl->normal_op;
981 /* now do fxxxp (op = op X tos, pop) */
982 dst = tmpl->normal_pop_op;
986 } else if (op1_idx == 0) {
987 assert(op1_idx != op2_idx);
988 /* now do fxxxrp (op = tos X op, pop) */
989 dst = tmpl->reverse_pop_op;
993 /* Bring the second on top. */
994 x87_create_fxch(state, n, op2_idx);
995 if (op1_idx == op2_idx) {
996 /* Both are identically and on tos now, no pop needed. */
999 /* use fxxx (tos = tos X tos) */
1000 dst = tmpl->normal_op;
1003 /* op2 is on tos now */
1005 /* use fxxxp (op = op X tos, pop) */
1006 dst = tmpl->normal_pop_op;
1014 /* second operand is an address mode */
1015 if (op1_live_after) {
1016 /* first operand is live: push it here */
1017 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
1020 /* first operand is dead: bring it to tos */
1022 x87_create_fxch(state, n, op1_idx);
1027 /* use fxxx (tos = tos X mem) */
1028 dst = permuted ? tmpl->reverse_op : tmpl->normal_op;
1032 patched_insn = x87_patch_insn(n, dst);
1033 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1038 /* patch the operation */
1039 attr->x87[0] = op1_reg = &ia32_st_regs[op1_idx];
1040 if (reg_index_2 != REG_VFP_NOREG) {
1041 attr->x87[1] = op2_reg = &ia32_st_regs[op2_idx];
1043 attr->x87[2] = out = &ia32_st_regs[out_idx];
1045 if (reg_index_2 != REG_VFP_NOREG) {
1046 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1047 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1048 arch_register_get_name(out)));
1050 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1051 arch_register_get_name(op1_reg),
1052 arch_register_get_name(out)));
1055 return NO_NODE_ADDED;
1059 * Simulate a virtual Unop.
1061 * @param state the x87 state
1062 * @param n the node that should be simulated (and patched)
1063 * @param op the x87 opcode that will replace n's opcode
1065 * @return NO_NODE_ADDED
1067 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
1069 int op1_idx, out_idx;
1070 x87_simulator *sim = state->sim;
1071 const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, UNOP_IDX));
1072 const arch_register_t *out = x87_get_irn_register(n);
1073 ia32_x87_attr_t *attr;
1074 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1076 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1077 DEBUG_ONLY(vfp_dump_live(live));
1079 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1081 if (is_vfp_live(arch_register_get_index(op1), live)) {
1082 /* push the operand here */
1083 x87_create_fpush(state, n, op1_idx, UNOP_IDX);
1087 /* operand is dead, bring it to tos */
1089 x87_create_fxch(state, n, op1_idx);
1094 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1096 attr = get_ia32_x87_attr(n);
1097 attr->x87[0] = op1 = &ia32_st_regs[0];
1098 attr->x87[2] = out = &ia32_st_regs[0];
1099 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1101 return NO_NODE_ADDED;
1105 * Simulate a virtual Load instruction.
1107 * @param state the x87 state
1108 * @param n the node that should be simulated (and patched)
1109 * @param op the x87 opcode that will replace n's opcode
1111 * @return NO_NODE_ADDED
1113 static int sim_load(x87_state *state, ir_node *n, ir_op *op, int res_pos)
1115 const arch_register_t *out = x87_irn_get_register(n, res_pos);
1116 ia32_x87_attr_t *attr;
1118 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1119 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1120 assert(out == x87_irn_get_register(n, res_pos));
1121 attr = get_ia32_x87_attr(n);
1122 attr->x87[2] = out = &ia32_st_regs[0];
1123 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1125 return NO_NODE_ADDED;
1129 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1131 * @param store The store
1132 * @param old_val The former value
1133 * @param new_val The new value
1135 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
1137 const ir_edge_t *edge, *ne;
1139 foreach_out_edge_safe(old_val, edge, ne) {
1140 ir_node *user = get_edge_src_irn(edge);
1142 if (! user || user == store)
1145 /* if the user is scheduled after the store: rewire */
1146 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1148 /* find the input of the user pointing to the old value */
1149 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1150 if (get_irn_n(user, i) == old_val)
1151 set_irn_n(user, i, new_val);
1155 } /* collect_and_rewire_users */
1158 * Simulate a virtual Store.
1160 * @param state the x87 state
1161 * @param n the node that should be simulated (and patched)
1162 * @param op the x87 store opcode
1163 * @param op_p the x87 store and pop opcode
1165 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p)
1167 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1168 const arch_register_t *op2 = x87_get_irn_register(val);
1169 unsigned live = vfp_live_args_after(state->sim, n, 0);
1170 int insn = NO_NODE_ADDED;
1171 ia32_x87_attr_t *attr;
1172 int op2_reg_idx, op2_idx, depth;
1173 int live_after_node;
1176 op2_reg_idx = arch_register_get_index(op2);
1177 op2_idx = x87_on_stack(state, op2_reg_idx);
1178 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1179 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1180 assert(op2_idx >= 0);
1182 mode = get_ia32_ls_mode(n);
1183 depth = x87_get_depth(state);
1185 if (live_after_node) {
1187 Problem: fst doesn't support 96bit modes (spills), only fstp does
1188 fist doesn't support 64bit mode, only fistp
1190 - stack not full: push value and fstp
1191 - stack full: fstp value and load again
1192 Note that we cannot test on mode_E, because floats might be 96bit ...
1194 if (get_mode_size_bits(mode) > 64 || (mode_is_int(mode) && get_mode_size_bits(mode) > 32)) {
1195 if (depth < N_x87_REGS) {
1196 /* ok, we have a free register: push + fstp */
1197 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1199 x87_patch_insn(n, op_p);
1201 ir_node *vfld, *mem, *block, *rproj, *mproj;
1204 /* stack full here: need fstp + load */
1206 x87_patch_insn(n, op_p);
1208 block = get_nodes_block(n);
1209 vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), new_NoMem(), get_ia32_ls_mode(n));
1211 /* copy all attributes */
1212 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1213 if (is_ia32_use_frame(n))
1214 set_ia32_use_frame(vfld);
1215 set_ia32_op_type(vfld, ia32_AddrModeS);
1216 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1217 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1218 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1220 rproj = new_r_Proj(vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1221 mproj = new_r_Proj(vfld, mode_M, pn_ia32_vfld_M);
1222 mem = get_irn_Proj_for_mode(n, mode_M);
1224 assert(mem && "Store memory not found");
1226 arch_set_irn_register(rproj, op2);
1228 /* reroute all former users of the store memory to the load memory */
1229 irg = get_irn_irg(n);
1230 edges_reroute(mem, mproj, irg);
1231 /* set the memory input of the load to the store memory */
1232 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1234 sched_add_after(n, vfld);
1235 sched_add_after(vfld, rproj);
1237 /* rewire all users, scheduled after the store, to the loaded value */
1238 collect_and_rewire_users(n, val, rproj);
1243 /* we can only store the tos to memory */
1245 x87_create_fxch(state, n, op2_idx);
1247 /* mode size 64 or smaller -> use normal fst */
1248 x87_patch_insn(n, op);
1251 /* we can only store the tos to memory */
1253 x87_create_fxch(state, n, op2_idx);
1256 x87_patch_insn(n, op_p);
1259 attr = get_ia32_x87_attr(n);
1260 attr->x87[1] = op2 = &ia32_st_regs[0];
1261 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1266 #define _GEN_BINOP(op, rev) \
1267 static int sim_##op(x87_state *state, ir_node *n) { \
1268 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1269 return sim_binop(state, n, &tmpl); \
1272 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1273 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1275 #define GEN_LOAD(op) \
1276 static int sim_##op(x87_state *state, ir_node *n) { \
1277 return sim_load(state, n, op_ia32_##op, pn_ia32_v##op##_res); \
1280 #define GEN_UNOP(op) \
1281 static int sim_##op(x87_state *state, ir_node *n) { \
1282 return sim_unop(state, n, op_ia32_##op); \
1285 #define GEN_STORE(op) \
1286 static int sim_##op(x87_state *state, ir_node *n) { \
1287 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1309 * Simulate a virtual fisttp.
1311 * @param state the x87 state
1312 * @param n the node that should be simulated (and patched)
1314 * @return NO_NODE_ADDED
1316 static int sim_fisttp(x87_state *state, ir_node *n)
1318 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1319 const arch_register_t *op2 = x87_get_irn_register(val);
1320 ia32_x87_attr_t *attr;
1321 int op2_reg_idx, op2_idx, depth;
1323 op2_reg_idx = arch_register_get_index(op2);
1324 op2_idx = x87_on_stack(state, op2_reg_idx);
1325 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1326 assert(op2_idx >= 0);
1328 depth = x87_get_depth(state);
1330 /* Note: although the value is still live here, it is destroyed because
1331 of the pop. The register allocator is aware of that and introduced a copy
1332 if the value must be alive. */
1334 /* we can only store the tos to memory */
1336 x87_create_fxch(state, n, op2_idx);
1339 x87_patch_insn(n, op_ia32_fisttp);
1341 attr = get_ia32_x87_attr(n);
1342 attr->x87[1] = op2 = &ia32_st_regs[0];
1343 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1345 return NO_NODE_ADDED;
1349 * Simulate a virtual FtstFnstsw.
1351 * @param state the x87 state
1352 * @param n the node that should be simulated (and patched)
1354 * @return NO_NODE_ADDED
1356 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1358 x87_simulator *sim = state->sim;
1359 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1360 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1361 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1362 int reg_index_1 = arch_register_get_index(reg1);
1363 int op1_idx = x87_on_stack(state, reg_index_1);
1364 unsigned live = vfp_live_args_after(sim, n, 0);
1366 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, arch_register_get_name(reg1)));
1367 DEBUG_ONLY(vfp_dump_live(live));
1368 DB((dbg, LEVEL_1, "Stack before: "));
1369 DEBUG_ONLY(x87_dump_stack(state));
1370 assert(op1_idx >= 0);
1373 /* bring the value to tos */
1374 x87_create_fxch(state, n, op1_idx);
1378 /* patch the operation */
1379 x87_patch_insn(n, op_ia32_FtstFnstsw);
1380 reg1 = &ia32_st_regs[op1_idx];
1381 attr->x87[0] = reg1;
1382 attr->x87[1] = NULL;
1383 attr->x87[2] = NULL;
1385 if (!is_vfp_live(reg_index_1, live))
1386 x87_create_fpop(state, sched_next(n), 1);
1388 return NO_NODE_ADDED;
1389 } /* sim_FtstFnstsw */
1394 * @param state the x87 state
1395 * @param n the node that should be simulated (and patched)
1397 * @return NO_NODE_ADDED
1399 static int sim_Fucom(x87_state *state, ir_node *n)
1403 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1405 x87_simulator *sim = state->sim;
1406 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1407 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1408 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1409 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1410 int reg_index_1 = arch_register_get_index(op1);
1411 int reg_index_2 = arch_register_get_index(op2);
1412 unsigned live = vfp_live_args_after(sim, n, 0);
1413 bool permuted = attr->attr.data.ins_permuted;
1417 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1418 arch_register_get_name(op1), arch_register_get_name(op2)));
1419 DEBUG_ONLY(vfp_dump_live(live));
1420 DB((dbg, LEVEL_1, "Stack before: "));
1421 DEBUG_ONLY(x87_dump_stack(state));
1423 op1_idx = x87_on_stack(state, reg_index_1);
1424 assert(op1_idx >= 0);
1426 /* BEWARE: check for comp a,a cases, they might happen */
1427 if (reg_index_2 != REG_VFP_NOREG) {
1428 /* second operand is a vfp register */
1429 op2_idx = x87_on_stack(state, reg_index_2);
1430 assert(op2_idx >= 0);
1432 if (is_vfp_live(reg_index_2, live)) {
1433 /* second operand is live */
1435 if (is_vfp_live(reg_index_1, live)) {
1436 /* both operands are live */
1439 /* res = tos X op */
1440 } else if (op2_idx == 0) {
1441 /* res = op X tos */
1442 permuted = !permuted;
1445 /* bring the first one to tos */
1446 x87_create_fxch(state, n, op1_idx);
1447 if (op1_idx == op2_idx) {
1449 } else if (op2_idx == 0) {
1453 /* res = tos X op */
1456 /* second live, first operand is dead here, bring it to tos.
1457 This means further, op1_idx != op2_idx. */
1458 assert(op1_idx != op2_idx);
1460 x87_create_fxch(state, n, op1_idx);
1465 /* res = tos X op, pop */
1469 /* second operand is dead */
1470 if (is_vfp_live(reg_index_1, live)) {
1471 /* first operand is live: bring second to tos.
1472 This means further, op1_idx != op2_idx. */
1473 assert(op1_idx != op2_idx);
1475 x87_create_fxch(state, n, op2_idx);
1480 /* res = op X tos, pop */
1482 permuted = !permuted;
1485 /* both operands are dead here, check first for identity. */
1486 if (op1_idx == op2_idx) {
1487 /* identically, one pop needed */
1489 x87_create_fxch(state, n, op1_idx);
1493 /* res = tos X op, pop */
1496 /* different, move them to st and st(1) and pop both.
1497 The tricky part is to get one into st(1).*/
1498 else if (op2_idx == 1) {
1499 /* good, second operand is already in the right place, move the first */
1501 /* bring the first on top */
1502 x87_create_fxch(state, n, op1_idx);
1503 assert(op2_idx != 0);
1506 /* res = tos X op, pop, pop */
1508 } else if (op1_idx == 1) {
1509 /* good, first operand is already in the right place, move the second */
1511 /* bring the first on top */
1512 x87_create_fxch(state, n, op2_idx);
1513 assert(op1_idx != 0);
1516 /* res = op X tos, pop, pop */
1517 permuted = !permuted;
1521 /* if one is already the TOS, we need two fxch */
1523 /* first one is TOS, move to st(1) */
1524 x87_create_fxch(state, n, 1);
1525 assert(op2_idx != 1);
1527 x87_create_fxch(state, n, op2_idx);
1529 /* res = op X tos, pop, pop */
1531 permuted = !permuted;
1533 } else if (op2_idx == 0) {
1534 /* second one is TOS, move to st(1) */
1535 x87_create_fxch(state, n, 1);
1536 assert(op1_idx != 1);
1538 x87_create_fxch(state, n, op1_idx);
1540 /* res = tos X op, pop, pop */
1543 /* none of them is either TOS or st(1), 3 fxch needed */
1544 x87_create_fxch(state, n, op2_idx);
1545 assert(op1_idx != 0);
1546 x87_create_fxch(state, n, 1);
1548 x87_create_fxch(state, n, op1_idx);
1550 /* res = tos X op, pop, pop */
1557 /* second operand is an address mode */
1558 if (is_vfp_live(reg_index_1, live)) {
1559 /* first operand is live: bring it to TOS */
1561 x87_create_fxch(state, n, op1_idx);
1565 /* first operand is dead: bring it to tos */
1567 x87_create_fxch(state, n, op1_idx);
1574 /* patch the operation */
1575 if (is_ia32_vFucomFnstsw(n)) {
1579 case 0: dst = op_ia32_FucomFnstsw; break;
1580 case 1: dst = op_ia32_FucompFnstsw; break;
1581 case 2: dst = op_ia32_FucomppFnstsw; break;
1582 default: panic("invalid popcount in sim_Fucom");
1585 for (i = 0; i < pops; ++i) {
1588 } else if (is_ia32_vFucomi(n)) {
1590 case 0: dst = op_ia32_Fucomi; break;
1591 case 1: dst = op_ia32_Fucompi; x87_pop(state); break;
1593 dst = op_ia32_Fucompi;
1595 x87_create_fpop(state, sched_next(n), 1);
1597 default: panic("invalid popcount in sim_Fucom");
1600 panic("invalid operation %+F in sim_FucomFnstsw", n);
1603 x87_patch_insn(n, dst);
1610 op1 = &ia32_st_regs[op1_idx];
1613 op2 = &ia32_st_regs[op2_idx];
1616 attr->x87[2] = NULL;
1617 attr->attr.data.ins_permuted = permuted;
1620 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1621 arch_register_get_name(op1), arch_register_get_name(op2)));
1623 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1624 arch_register_get_name(op1)));
1627 return NO_NODE_ADDED;
1633 * @param state the x87 state
1634 * @param n the node that should be simulated (and patched)
1636 * @return NO_NODE_ADDED
1638 static int sim_Keep(x87_state *state, ir_node *node)
1641 const arch_register_t *op_reg;
1647 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1649 arity = get_irn_arity(node);
1650 for (i = 0; i < arity; ++i) {
1651 op = get_irn_n(node, i);
1652 op_reg = arch_get_irn_register(op);
1653 if (arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1656 reg_id = arch_register_get_index(op_reg);
1657 live = vfp_live_args_after(state->sim, node, 0);
1659 op_stack_idx = x87_on_stack(state, reg_id);
1660 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live))
1661 x87_create_fpop(state, sched_next(node), 1);
1664 DB((dbg, LEVEL_1, "Stack after: "));
1665 DEBUG_ONLY(x87_dump_stack(state));
1667 return NO_NODE_ADDED;
1671 * Keep the given node alive by adding a be_Keep.
1673 * @param node the node to kept alive
1675 static void keep_float_node_alive(ir_node *node)
1677 ir_node *block = get_nodes_block(node);
1678 ir_node *keep = be_new_Keep(block, 1, &node);
1680 assert(sched_is_scheduled(node));
1681 sched_add_after(node, keep);
1685 * Create a copy of a node. Recreate the node if it's a constant.
1687 * @param state the x87 state
1688 * @param n the node to be copied
1690 * @return the copy of n
1692 static ir_node *create_Copy(x87_state *state, ir_node *n)
1694 dbg_info *n_dbg = get_irn_dbg_info(n);
1695 ir_mode *mode = get_irn_mode(n);
1696 ir_node *block = get_nodes_block(n);
1697 ir_node *pred = get_irn_n(n, 0);
1698 ir_node *(*cnstr)(dbg_info *, ir_node *, ir_mode *) = NULL;
1700 const arch_register_t *out;
1701 const arch_register_t *op1;
1702 ia32_x87_attr_t *attr;
1704 /* Do not copy constants, recreate them. */
1705 switch (get_ia32_irn_opcode(pred)) {
1707 cnstr = new_bd_ia32_fldz;
1710 cnstr = new_bd_ia32_fld1;
1712 case iro_ia32_fldpi:
1713 cnstr = new_bd_ia32_fldpi;
1715 case iro_ia32_fldl2e:
1716 cnstr = new_bd_ia32_fldl2e;
1718 case iro_ia32_fldl2t:
1719 cnstr = new_bd_ia32_fldl2t;
1721 case iro_ia32_fldlg2:
1722 cnstr = new_bd_ia32_fldlg2;
1724 case iro_ia32_fldln2:
1725 cnstr = new_bd_ia32_fldln2;
1731 out = x87_get_irn_register(n);
1732 op1 = x87_get_irn_register(pred);
1734 if (cnstr != NULL) {
1735 /* copy a constant */
1736 res = (*cnstr)(n_dbg, block, mode);
1738 x87_push(state, arch_register_get_index(out), res);
1740 attr = get_ia32_x87_attr(res);
1741 attr->x87[2] = &ia32_st_regs[0];
1743 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1745 res = new_bd_ia32_fpushCopy(n_dbg, block, pred, mode);
1747 x87_push(state, arch_register_get_index(out), res);
1749 attr = get_ia32_x87_attr(res);
1750 attr->x87[0] = &ia32_st_regs[op1_idx];
1751 attr->x87[2] = &ia32_st_regs[0];
1753 arch_set_irn_register(res, out);
1759 * Simulate a be_Copy.
1761 * @param state the x87 state
1762 * @param n the node that should be simulated (and patched)
1764 * @return NO_NODE_ADDED
1766 static int sim_Copy(x87_state *state, ir_node *n)
1769 const arch_register_t *out;
1770 const arch_register_t *op1;
1771 const arch_register_class_t *cls;
1772 ir_node *node, *next;
1773 int op1_idx, out_idx;
1776 cls = arch_get_irn_reg_class_out(n);
1777 if (cls->regs != ia32_vfp_regs)
1780 pred = get_irn_n(n, 0);
1781 out = x87_get_irn_register(n);
1782 op1 = x87_get_irn_register(pred);
1783 live = vfp_live_args_after(state->sim, n, REGMASK(out));
1785 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1786 arch_register_get_name(op1), arch_register_get_name(out)));
1787 DEBUG_ONLY(vfp_dump_live(live));
1789 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1791 if (is_vfp_live(arch_register_get_index(op1), live)) {
1792 ir_node *pred = get_irn_n(n, 0);
1794 /* Operand is still live, a real copy. We need here an fpush that can
1795 hold a a register, so use the fpushCopy or recreate constants */
1796 node = create_Copy(state, n);
1798 /* We have to make sure the old value doesn't go dead (which can happen
1799 * when we recreate constants). As the simulator expected that value in
1800 * the pred blocks. This is unfortunate as removing it would save us 1
1801 * instruction, but we would have to rerun all the simulation to get
1804 next = sched_next(n);
1807 sched_add_before(next, node);
1809 if (get_irn_n_edges(pred) == 0) {
1810 keep_float_node_alive(pred);
1813 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1815 out_idx = x87_on_stack(state, arch_register_get_index(out));
1817 if (out_idx >= 0 && out_idx != op1_idx) {
1818 /* Matze: out already on stack? how can this happen? */
1819 panic("invalid stack state in x87 simulator");
1822 /* op1 must be killed and placed where out is */
1824 ia32_x87_attr_t *attr;
1825 /* best case, simple remove and rename */
1826 x87_patch_insn(n, op_ia32_Pop);
1827 attr = get_ia32_x87_attr(n);
1828 attr->x87[0] = op1 = &ia32_st_regs[0];
1831 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1833 ia32_x87_attr_t *attr;
1834 /* move op1 to tos, store and pop it */
1836 x87_create_fxch(state, n, op1_idx);
1839 x87_patch_insn(n, op_ia32_Pop);
1840 attr = get_ia32_x87_attr(n);
1841 attr->x87[0] = op1 = &ia32_st_regs[out_idx];
1844 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1846 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1849 /* just a virtual copy */
1850 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1851 /* don't remove the node to keep the verifier quiet :),
1852 the emitter won't emit any code for the node */
1855 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1856 exchange(n, get_unop_op(n));
1860 return NO_NODE_ADDED;
1864 * Returns the vf0 result Proj of a Call.
1866 * @para call the Call node
1868 static ir_node *get_call_result_proj(ir_node *call)
1870 const ir_edge_t *edge;
1872 /* search the result proj */
1873 foreach_out_edge(call, edge) {
1874 ir_node *proj = get_edge_src_irn(edge);
1875 long pn = get_Proj_proj(proj);
1877 if (pn == pn_ia32_Call_vf0)
1882 } /* get_call_result_proj */
1885 * Simulate a ia32_Call.
1887 * @param state the x87 state
1888 * @param n the node that should be simulated (and patched)
1890 * @return NO_NODE_ADDED
1892 static int sim_Call(x87_state *state, ir_node *n)
1894 ir_type *call_tp = get_ia32_call_attr_const(n)->call_tp;
1898 const arch_register_t *reg;
1900 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1902 /* at the begin of a call the x87 state should be empty */
1903 assert(state->depth == 0 && "stack not empty before call");
1905 if (get_method_n_ress(call_tp) <= 0)
1909 * If the called function returns a float, it is returned in st(0).
1910 * This even happens if the return value is NOT used.
1911 * Moreover, only one return result is supported.
1913 res_type = get_method_res_type(call_tp, 0);
1914 mode = get_type_mode(res_type);
1916 if (mode == NULL || !mode_is_float(mode))
1919 resproj = get_call_result_proj(n);
1920 assert(resproj != NULL);
1922 reg = x87_get_irn_register(resproj);
1923 x87_push(state, arch_register_get_index(reg), resproj);
1926 DB((dbg, LEVEL_1, "Stack after: "));
1927 DEBUG_ONLY(x87_dump_stack(state));
1929 return NO_NODE_ADDED;
1933 * Simulate a be_Return.
1935 * @param state the x87 state
1936 * @param n the node that should be simulated (and patched)
1938 * @return NO_NODE_ADDED
1940 static int sim_Return(x87_state *state, ir_node *n)
1942 int n_res = be_Return_get_n_rets(n);
1943 int i, n_float_res = 0;
1945 /* only floating point return values must reside on stack */
1946 for (i = 0; i < n_res; ++i) {
1947 ir_node *res = get_irn_n(n, be_pos_Return_val + i);
1949 if (mode_is_float(get_irn_mode(res)))
1952 assert(x87_get_depth(state) == n_float_res);
1954 /* pop them virtually */
1955 for (i = n_float_res - 1; i >= 0; --i)
1958 return NO_NODE_ADDED;
1961 typedef struct _perm_data_t {
1962 const arch_register_t *in;
1963 const arch_register_t *out;
1967 * Simulate a be_Perm.
1969 * @param state the x87 state
1970 * @param irn the node that should be simulated (and patched)
1972 * @return NO_NODE_ADDED
1974 static int sim_Perm(x87_state *state, ir_node *irn)
1977 ir_node *pred = get_irn_n(irn, 0);
1979 const ir_edge_t *edge;
1981 /* handle only floating point Perms */
1982 if (! mode_is_float(get_irn_mode(pred)))
1983 return NO_NODE_ADDED;
1985 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1987 /* Perm is a pure virtual instruction on x87.
1988 All inputs must be on the FPU stack and are pairwise
1989 different from each other.
1990 So, all we need to do is to permutate the stack state. */
1991 n = get_irn_arity(irn);
1992 NEW_ARR_A(int, stack_pos, n);
1994 /* collect old stack positions */
1995 for (i = 0; i < n; ++i) {
1996 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
1997 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1999 assert(idx >= 0 && "Perm argument not on x87 stack");
2003 /* now do the permutation */
2004 foreach_out_edge(irn, edge) {
2005 ir_node *proj = get_edge_src_irn(edge);
2006 const arch_register_t *out = x87_get_irn_register(proj);
2007 long num = get_Proj_proj(proj);
2009 assert(0 <= num && num < n && "More Proj's than Perm inputs");
2010 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
2012 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
2014 return NO_NODE_ADDED;
2018 * Kill any dead registers at block start by popping them from the stack.
2020 * @param sim the simulator handle
2021 * @param block the current block
2022 * @param start_state the x87 state at the begin of the block
2024 * @return the x87 state after dead register killed
2026 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state)
2028 x87_state *state = start_state;
2029 ir_node *first_insn = sched_first(block);
2030 ir_node *keep = NULL;
2031 unsigned live = vfp_live_args_after(sim, block, 0);
2033 int i, depth, num_pop;
2036 depth = x87_get_depth(state);
2037 for (i = depth - 1; i >= 0; --i) {
2038 int reg = x87_get_st_reg(state, i);
2040 if (! is_vfp_live(reg, live))
2041 kill_mask |= (1 << i);
2045 /* create a new state, will be changed */
2046 state = x87_clone_state(sim, state);
2048 DB((dbg, LEVEL_1, "Killing deads:\n"));
2049 DEBUG_ONLY(vfp_dump_live(live));
2050 DEBUG_ONLY(x87_dump_stack(state));
2052 if (kill_mask != 0 && live == 0) {
2053 /* special case: kill all registers */
2054 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
2055 if (ia32_cg_config.use_femms) {
2056 /* use FEMMS on AMD processors to clear all */
2057 keep = new_bd_ia32_femms(NULL, block);
2059 /* use EMMS to clear all */
2060 keep = new_bd_ia32_emms(NULL, block);
2062 sched_add_before(first_insn, keep);
2068 /* now kill registers */
2070 /* we can only kill from TOS, so bring them up */
2071 if (! (kill_mask & 1)) {
2072 /* search from behind, because we can to a double-pop */
2073 for (i = depth - 1; i >= 0; --i) {
2074 if (kill_mask & (1 << i)) {
2075 kill_mask &= ~(1 << i);
2082 x87_set_st(state, -1, keep, i);
2083 x87_create_fxch(state, first_insn, i);
2086 if ((kill_mask & 3) == 3) {
2087 /* we can do a double-pop */
2091 /* only a single pop */
2096 kill_mask >>= num_pop;
2097 keep = x87_create_fpop(state, first_insn, num_pop);
2102 } /* x87_kill_deads */
2105 * Run a simulation and fix all virtual instructions for a block.
2107 * @param sim the simulator handle
2108 * @param block the current block
2110 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
2113 blk_state *bl_state = x87_get_bl_state(sim, block);
2114 x87_state *state = bl_state->begin;
2115 const ir_edge_t *edge;
2116 ir_node *start_block;
2118 assert(state != NULL);
2119 /* already processed? */
2120 if (bl_state->end != NULL)
2123 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2124 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2125 DEBUG_ONLY(x87_dump_stack(state));
2127 /* at block begin, kill all dead registers */
2128 state = x87_kill_deads(sim, block, state);
2129 /* create a new state, will be changed */
2130 state = x87_clone_state(sim, state);
2132 /* beware, n might change */
2133 for (n = sched_first(block); !sched_is_end(n); n = next) {
2136 ir_op *op = get_irn_op(n);
2139 * get the next node to be simulated here.
2140 * n might be completely removed from the schedule-
2142 next = sched_next(n);
2143 if (op->ops.generic != NULL) {
2144 func = (sim_func)op->ops.generic;
2147 node_inserted = (*func)(state, n);
2150 * sim_func might have added an additional node after n,
2151 * so update next node
2152 * beware: n must not be changed by sim_func
2153 * (i.e. removed from schedule) in this case
2155 if (node_inserted != NO_NODE_ADDED)
2156 next = sched_next(n);
2160 start_block = get_irg_start_block(get_irn_irg(block));
2162 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2164 /* check if the state must be shuffled */
2165 foreach_block_succ(block, edge) {
2166 ir_node *succ = get_edge_src_irn(edge);
2167 blk_state *succ_state;
2169 if (succ == start_block)
2172 succ_state = x87_get_bl_state(sim, succ);
2174 if (succ_state->begin == NULL) {
2175 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2176 DEBUG_ONLY(x87_dump_stack(state));
2177 succ_state->begin = state;
2179 waitq_put(sim->worklist, succ);
2181 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2182 /* There is already a begin state for the successor, bad.
2183 Do the necessary permutations.
2184 Note that critical edges are removed, so this is always possible:
2185 If the successor has more than one possible input, then it must
2188 x87_shuffle(sim, block, state, succ, succ_state->begin);
2191 bl_state->end = state;
2192 } /* x87_simulate_block */
2195 * Register a simulator function.
2197 * @param op the opcode to simulate
2198 * @param func the simulator function for the opcode
2200 static void register_sim(ir_op *op, sim_func func)
2202 assert(op->ops.generic == NULL);
2203 op->ops.generic = (op_func) func;
2204 } /* register_sim */
2207 * Create a new x87 simulator.
2209 * @param sim a simulator handle, will be initialized
2210 * @param irg the current graph
2212 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
2214 obstack_init(&sim->obst);
2215 sim->blk_states = pmap_create();
2216 sim->n_idx = get_irg_last_idx(irg);
2217 sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
2219 DB((dbg, LEVEL_1, "--------------------------------\n"
2220 "x87 Simulator started for %+F\n", irg));
2222 /* set the generic function pointer of instruction we must simulate */
2223 clear_irp_opcodes_generic_func();
2225 register_sim(op_ia32_Call, sim_Call);
2226 register_sim(op_ia32_vfld, sim_fld);
2227 register_sim(op_ia32_vfild, sim_fild);
2228 register_sim(op_ia32_vfld1, sim_fld1);
2229 register_sim(op_ia32_vfldz, sim_fldz);
2230 register_sim(op_ia32_vfadd, sim_fadd);
2231 register_sim(op_ia32_vfsub, sim_fsub);
2232 register_sim(op_ia32_vfmul, sim_fmul);
2233 register_sim(op_ia32_vfdiv, sim_fdiv);
2234 register_sim(op_ia32_vfprem, sim_fprem);
2235 register_sim(op_ia32_vfabs, sim_fabs);
2236 register_sim(op_ia32_vfchs, sim_fchs);
2237 register_sim(op_ia32_vfist, sim_fist);
2238 register_sim(op_ia32_vfisttp, sim_fisttp);
2239 register_sim(op_ia32_vfst, sim_fst);
2240 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
2241 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
2242 register_sim(op_ia32_vFucomi, sim_Fucom);
2243 register_sim(op_be_Copy, sim_Copy);
2244 register_sim(op_be_Return, sim_Return);
2245 register_sim(op_be_Perm, sim_Perm);
2246 register_sim(op_be_Keep, sim_Keep);
2247 } /* x87_init_simulator */
2250 * Destroy a x87 simulator.
2252 * @param sim the simulator handle
2254 static void x87_destroy_simulator(x87_simulator *sim)
2256 pmap_destroy(sim->blk_states);
2257 obstack_free(&sim->obst, NULL);
2258 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2259 } /* x87_destroy_simulator */
2262 * Pre-block walker: calculate the liveness information for the block
2263 * and store it into the sim->live cache.
2265 static void update_liveness_walker(ir_node *block, void *data)
2267 x87_simulator *sim = data;
2268 update_liveness(sim, block);
2269 } /* update_liveness_walker */
2272 * Run a simulation and fix all virtual instructions for a graph.
2273 * Replaces all virtual floating point instructions and registers
2276 void x87_simulate_graph(be_irg_t *birg)
2278 /* TODO improve code quality (less executed fxch) by using execfreqs */
2280 ir_node *block, *start_block;
2281 blk_state *bl_state;
2283 ir_graph *irg = be_get_birg_irg(birg);
2285 /* create the simulator */
2286 x87_init_simulator(&sim, irg);
2288 start_block = get_irg_start_block(irg);
2289 bl_state = x87_get_bl_state(&sim, start_block);
2291 /* start with the empty state */
2292 bl_state->begin = empty;
2295 sim.worklist = new_waitq();
2296 waitq_put(sim.worklist, start_block);
2298 be_assure_liveness(birg);
2299 sim.lv = be_get_birg_liveness(birg);
2300 be_liveness_assure_sets(sim.lv);
2302 /* Calculate the liveness for all nodes. We must precalculate this info,
2303 * because the simulator adds new nodes (possible before Phi nodes) which
2304 * would let a lazy calculation fail.
2305 * On the other hand we reduce the computation amount due to
2306 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2308 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2312 block = waitq_get(sim.worklist);
2313 x87_simulate_block(&sim, block);
2314 } while (! waitq_empty(sim.worklist));
2317 del_waitq(sim.worklist);
2318 x87_destroy_simulator(&sim);
2319 } /* x87_simulate_graph */
2321 /* Initializes the x87 simulator. */
2322 void ia32_init_x87(void)
2324 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2325 } /* ia32_init_x87 */