2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
36 #include "iredges_t.h"
47 #include "../belive_t.h"
48 #include "../besched_t.h"
49 #include "../benode_t.h"
50 #include "bearch_ia32_t.h"
51 #include "ia32_new_nodes.h"
52 #include "gen_ia32_new_nodes.h"
53 #include "gen_ia32_regalloc_if.h"
61 #define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
63 /** the debug handle */
64 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
66 /* Forward declaration. */
67 typedef struct _x87_simulator x87_simulator;
70 * An exchange template.
71 * Note that our virtual functions have the same inputs
72 * and attributes as the real ones, so we can simple exchange
74 * Further, x87 supports inverse instructions, so we can handle them.
76 typedef struct _exchange_tmpl {
77 ir_op *normal_op; /**< the normal one */
78 ir_op *reverse_op; /**< the reverse one if exists */
79 ir_op *normal_pop_op; /**< the normal one with tos pop */
80 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
84 * An entry on the simulated x87 stack.
86 typedef struct _st_entry {
87 int reg_idx; /**< the virtual register index of this stack value */
88 ir_node *node; /**< the node that produced this value */
94 typedef struct _x87_state {
95 st_entry st[N_x87_REGS]; /**< the register stack */
96 int depth; /**< the current stack depth */
97 int tos; /**< position of the tos */
98 x87_simulator *sim; /**< The simulator. */
101 /** An empty state, used for blocks without fp instructions. */
102 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
103 static x87_state *empty = (x87_state *)&_empty;
106 NO_NODE_ADDED = 0, /**< No node was added. */
107 NODE_ADDED = 1 /**< A node was added by the simulator in the schedule. */
111 * The type of an instruction simulator function.
113 * @param state the x87 state
114 * @param n the node to be simulated
116 * @return NODE_ADDED if a node was added AFTER n in schedule,
119 typedef int (*sim_func)(x87_state *state, ir_node *n);
122 * A block state: Every block has a x87 state at the beginning and at the end.
124 typedef struct _blk_state {
125 x87_state *begin; /**< state at the begin or NULL if not assigned */
126 x87_state *end; /**< state at the end or NULL if not assigned */
129 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
131 /** liveness bitset for vfp registers. */
132 typedef unsigned char vfp_liveness;
137 struct _x87_simulator {
138 struct obstack obst; /**< An obstack for fast allocating. */
139 pmap *blk_states; /**< Map blocks to states. */
140 const arch_env_t *arch_env; /**< The architecture environment. */
141 be_lv_t *lv; /**< intrablock liveness. */
142 vfp_liveness *live; /**< Liveness information. */
143 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
144 waitq *worklist; /**< Worklist of blocks that must be processed. */
145 ia32_isa_t *isa; /**< the ISA object */
149 * Returns the current stack depth.
151 * @param state the x87 state
153 * @return the x87 stack depth
155 static int x87_get_depth(const x87_state *state) {
157 } /* x87_get_depth */
160 * Return the virtual register index at st(pos).
162 * @param state the x87 state
163 * @param pos a stack position
165 * @return the vfp register index that produced the value at st(pos)
167 static int x87_get_st_reg(const x87_state *state, int pos) {
168 assert(pos < state->depth);
169 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
170 } /* x87_get_st_reg */
173 * Return the node at st(pos).
175 * @param state the x87 state
176 * @param pos a stack position
178 * @return the IR node that produced the value at st(pos)
180 static ir_node *x87_get_st_node(const x87_state *state, int pos) {
181 assert(pos < state->depth);
182 return state->st[MASK_TOS(state->tos + pos)].node;
183 } /* x87_get_st_node */
187 * Dump the stack for debugging.
189 * @param state the x87 state
191 static void x87_dump_stack(const x87_state *state) {
194 for (i = state->depth - 1; i >= 0; --i) {
195 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
196 x87_get_st_node(state, i)));
198 DB((dbg, LEVEL_2, "<-- TOS\n"));
199 } /* x87_dump_stack */
200 #endif /* DEBUG_libfirm */
203 * Set a virtual register to st(pos).
205 * @param state the x87 state
206 * @param reg_idx the vfp register index that should be set
207 * @param node the IR node that produces the value of the vfp register
208 * @param pos the stack position where the new value should be entered
210 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos) {
211 assert(0 < state->depth);
212 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
213 state->st[MASK_TOS(state->tos + pos)].node = node;
215 DB((dbg, LEVEL_2, "After SET_REG: "));
216 DEBUG_ONLY(x87_dump_stack(state));
220 * Set the tos virtual register.
222 * @param state the x87 state
223 * @param reg_idx the vfp register index that should be set
224 * @param node the IR node that produces the value of the vfp register
226 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node) {
227 x87_set_st(state, reg_idx, node, 0);
231 * Swap st(0) with st(pos).
233 * @param state the x87 state
234 * @param pos the stack position to change the tos with
236 static void x87_fxch(x87_state *state, int pos) {
238 assert(pos < state->depth);
240 entry = state->st[MASK_TOS(state->tos + pos)];
241 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
242 state->st[MASK_TOS(state->tos)] = entry;
244 DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
248 * Convert a virtual register to the stack index.
250 * @param state the x87 state
251 * @param reg_idx the register vfp index
253 * @return the stack position where the register is stacked
254 * or -1 if the virtual register was not found
256 static int x87_on_stack(const x87_state *state, int reg_idx) {
257 int i, tos = state->tos;
259 for (i = 0; i < state->depth; ++i)
260 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
266 * Push a virtual Register onto the stack, double pushed allowed.
268 * @param state the x87 state
269 * @param reg_idx the register vfp index
270 * @param node the node that produces the value of the vfp register
272 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node) {
273 assert(state->depth < N_x87_REGS && "stack overrun");
276 state->tos = MASK_TOS(state->tos - 1);
277 state->st[state->tos].reg_idx = reg_idx;
278 state->st[state->tos].node = node;
280 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
284 * Push a virtual Register onto the stack, double pushes are NOT allowed.
286 * @param state the x87 state
287 * @param reg_idx the register vfp index
288 * @param node the node that produces the value of the vfp register
289 * @param dbl_push if != 0 double pushes are allowed
291 static void x87_push(x87_state *state, int reg_idx, ir_node *node) {
292 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
294 x87_push_dbl(state, reg_idx, node);
298 * Pop a virtual Register from the stack.
300 * @param state the x87 state
302 static void x87_pop(x87_state *state) {
303 assert(state->depth > 0 && "stack underrun");
306 state->tos = MASK_TOS(state->tos + 1);
308 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
312 * Empty the fpu stack
314 * @param state the x87 state
316 static void x87_emms(x87_state *state) {
322 * Returns the block state of a block.
324 * @param sim the x87 simulator handle
325 * @param block the current block
327 * @return the block state
329 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block) {
330 pmap_entry *entry = pmap_find(sim->blk_states, block);
333 blk_state *bl_state = obstack_alloc(&sim->obst, sizeof(*bl_state));
334 bl_state->begin = NULL;
335 bl_state->end = NULL;
337 pmap_insert(sim->blk_states, block, bl_state);
341 return PTR_TO_BLKSTATE(entry->value);
342 } /* x87_get_bl_state */
345 * Creates a new x87 state.
347 * @param sim the x87 simulator handle
349 * @return a new x87 state
351 static x87_state *x87_alloc_state(x87_simulator *sim) {
352 x87_state *res = obstack_alloc(&sim->obst, sizeof(*res));
356 } /* x87_alloc_state */
361 * @param sim the x87 simulator handle
362 * @param src the x87 state that will be cloned
364 * @return a cloned copy of the src state
366 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src) {
367 x87_state *res = x87_alloc_state(sim);
369 memcpy(res, src, sizeof(*res));
371 } /* x87_clone_state */
374 * Patch a virtual instruction into a x87 one and return
375 * the node representing the result value.
377 * @param n the IR node to patch
378 * @param op the x87 opcode to patch in
380 static ir_node *x87_patch_insn(ir_node *n, ir_op *op) {
381 ir_mode *mode = get_irn_mode(n);
386 if (mode == mode_T) {
387 /* patch all Proj's */
388 const ir_edge_t *edge;
390 foreach_out_edge(n, edge) {
391 ir_node *proj = get_edge_src_irn(edge);
393 mode = get_irn_mode(proj);
394 if (mode_is_float(mode)) {
396 set_irn_mode(proj, mode_E);
400 } else if (mode_is_float(mode))
401 set_irn_mode(n, mode_E);
403 } /* x87_patch_insn */
406 * Returns the first Proj of a mode_T node having a given mode.
408 * @param n the mode_T node
409 * @param m the desired mode of the Proj
410 * @return The first Proj of mode @p m found or NULL.
412 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m) {
413 const ir_edge_t *edge;
415 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
417 foreach_out_edge(n, edge) {
418 ir_node *proj = get_edge_src_irn(edge);
419 if (get_irn_mode(proj) == m)
424 } /* get_irn_Proj_for_mode */
427 * Wrap the arch_* function here so we can check for errors.
429 static INLINE const arch_register_t *x87_get_irn_register(x87_simulator *sim, const ir_node *irn) {
430 const arch_register_t *res;
432 res = arch_get_irn_register(sim->arch_env, irn);
433 assert(res->reg_class->regs == ia32_vfp_regs);
435 } /* x87_get_irn_register */
437 /* -------------- x87 perm --------------- */
440 * Creates a fxch for shuffle.
442 * @param state the x87 state
443 * @param pos parameter for fxch
444 * @param block the block were fxch is inserted
446 * Creates a new fxch node and reroute the user of the old node
449 * @return the fxch node
451 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block) {
453 ia32_x87_attr_t *attr;
455 fxch = new_rd_ia32_fxch(NULL, get_irn_irg(block), block, mode_E);
456 attr = get_ia32_x87_attr(fxch);
457 attr->x87[0] = &ia32_st_regs[pos];
458 attr->x87[2] = &ia32_st_regs[0];
462 x87_fxch(state, pos);
464 } /* x87_fxch_shuffle */
467 * Calculate the necessary permutations to reach dst_state.
469 * These permutations are done with fxch instructions and placed
470 * at the end of the block.
472 * Note that critical edges are removed here, so we need only
473 * a shuffle if the current block has only one successor.
475 * @param sim the simulator handle
476 * @param block the current block
477 * @param state the current x87 stack state, might be modified
478 * @param dst_block the destination block
479 * @param dst_state destination state
483 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block,
484 x87_state *state, ir_node *dst_block,
485 const x87_state *dst_state)
487 int i, n_cycles, k, ri;
488 unsigned cycles[4], all_mask;
489 char cycle_idx[4][8];
490 ir_node *fxch, *before, *after;
494 assert(state->depth == dst_state->depth);
496 /* Some mathematics here:
497 If we have a cycle of length n that includes the tos,
498 we need n-1 exchange operations.
499 We can always add the tos and restore it, so we need
500 n+1 exchange operations for a cycle not containing the tos.
501 So, the maximum of needed operations is for a cycle of 7
502 not including the tos == 8.
503 This is the same number of ops we would need for using stores,
504 so exchange is cheaper (we save the loads).
505 On the other hand, we might need an additional exchange
506 in the next block to bring one operand on top, so the
507 number of ops in the first case is identical.
508 Further, no more than 4 cycles can exists (4 x 2).
510 all_mask = (1 << (state->depth)) - 1;
512 for (n_cycles = 0; all_mask; ++n_cycles) {
513 int src_idx, dst_idx;
515 /* find the first free slot */
516 for (i = 0; i < state->depth; ++i) {
517 if (all_mask & (1 << i)) {
518 all_mask &= ~(1 << i);
520 /* check if there are differences here */
521 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
527 /* no more cycles found */
532 cycles[n_cycles] = (1 << i);
533 cycle_idx[n_cycles][k++] = i;
534 for (src_idx = i; ; src_idx = dst_idx) {
535 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
537 if ((all_mask & (1 << dst_idx)) == 0)
540 cycle_idx[n_cycles][k++] = dst_idx;
541 cycles[n_cycles] |= (1 << dst_idx);
542 all_mask &= ~(1 << dst_idx);
544 cycle_idx[n_cycles][k] = -1;
548 /* no permutation needed */
552 /* Hmm: permutation needed */
553 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
554 DEBUG_ONLY(x87_dump_stack(state));
555 DB((dbg, LEVEL_2, " to\n"));
556 DEBUG_ONLY(x87_dump_stack(dst_state));
560 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
561 for (ri = 0; ri < n_cycles; ++ri) {
562 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
563 for (k = 0; cycle_idx[ri][k] != -1; ++k)
564 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
565 DB((dbg, LEVEL_2, "\n"));
572 * Find the place node must be insert.
573 * We have only one successor block, so the last instruction should
576 before = sched_last(block);
577 assert(is_cfop(before));
579 /* now do the permutations */
580 for (ri = 0; ri < n_cycles; ++ri) {
581 if ((cycles[ri] & 1) == 0) {
582 /* this cycle does not include the tos */
583 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
585 sched_add_after(after, fxch);
587 sched_add_before(before, fxch);
590 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
591 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
593 sched_add_after(after, fxch);
595 sched_add_before(before, fxch);
598 if ((cycles[ri] & 1) == 0) {
599 /* this cycle does not include the tos */
600 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
601 sched_add_after(after, fxch);
608 * Create a fxch node before another node.
610 * @param state the x87 state
611 * @param n the node after the fxch
612 * @param pos exchange st(pos) with st(0)
616 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
619 ia32_x87_attr_t *attr;
620 ir_graph *irg = get_irn_irg(n);
621 ir_node *block = get_nodes_block(n);
623 x87_fxch(state, pos);
625 fxch = new_rd_ia32_fxch(NULL, irg, block, mode_E);
626 attr = get_ia32_x87_attr(fxch);
627 attr->x87[0] = &ia32_st_regs[pos];
628 attr->x87[2] = &ia32_st_regs[0];
632 sched_add_before(n, fxch);
633 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
635 } /* x87_create_fxch */
638 * Create a fpush before node n.
640 * @param state the x87 state
641 * @param n the node after the fpush
642 * @param pos push st(pos) on stack
643 * @param op_idx replace input op_idx of n with the fpush result
645 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx) {
646 ir_node *fpush, *pred = get_irn_n(n, op_idx);
647 ia32_x87_attr_t *attr;
648 const arch_register_t *out = x87_get_irn_register(state->sim, pred);
650 x87_push_dbl(state, arch_register_get_index(out), pred);
652 fpush = new_rd_ia32_fpush(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
653 attr = get_ia32_x87_attr(fpush);
654 attr->x87[0] = &ia32_st_regs[pos];
655 attr->x87[2] = &ia32_st_regs[0];
658 sched_add_before(n, fpush);
660 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
661 } /* x87_create_fpush */
664 * Create a fpop before node n.
666 * @param state the x87 state
667 * @param n the node after the fpop
668 * @param num pop 1 or 2 values
670 * @return the fpop node
672 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
675 ia32_x87_attr_t *attr;
676 int cpu = state->sim->isa->opt_arch;
680 if (ARCH_ATHLON(cpu))
681 fpop = new_rd_ia32_ffreep(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
683 fpop = new_rd_ia32_fpop(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
684 attr = get_ia32_x87_attr(fpop);
685 attr->x87[0] = &ia32_st_regs[0];
686 attr->x87[1] = &ia32_st_regs[0];
687 attr->x87[2] = &ia32_st_regs[0];
690 sched_add_before(n, fpop);
691 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
696 } /* x87_create_fpop */
699 * Creates an fldz before node n
701 * @param state the x87 state
702 * @param n the node after the fldz
704 * @return the fldz node
706 static ir_node *x87_create_fldz(x87_state *state, ir_node *n, int regidx) {
707 ir_graph *irg = get_irn_irg(n);
708 ir_node *block = get_nodes_block(n);
711 fldz = new_rd_ia32_fldz(NULL, irg, block, mode_E);
713 sched_add_before(n, fldz);
714 DB((dbg, LEVEL_1, "<<< %s\n", get_irn_opname(fldz)));
717 x87_push(state, regidx, fldz);
722 /* --------------------------------- liveness ------------------------------------------ */
725 * The liveness transfer function.
726 * Updates a live set over a single step from a given node to its predecessor.
727 * Everything defined at the node is removed from the set, the uses of the node get inserted.
729 * @param sim The simulator handle.
730 * @param irn The node at which liveness should be computed.
731 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
732 * the registers live after irn.
734 * @return The live bitset.
736 static vfp_liveness vfp_liveness_transfer(x87_simulator *sim, ir_node *irn, vfp_liveness live)
739 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
740 const arch_env_t *arch_env = sim->arch_env;
742 if (get_irn_mode(irn) == mode_T) {
743 const ir_edge_t *edge;
745 foreach_out_edge(irn, edge) {
746 ir_node *proj = get_edge_src_irn(edge);
748 if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
749 const arch_register_t *reg = x87_get_irn_register(sim, proj);
750 live &= ~(1 << arch_register_get_index(reg));
755 if (arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
756 const arch_register_t *reg = x87_get_irn_register(sim, irn);
757 live &= ~(1 << arch_register_get_index(reg));
760 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
761 ir_node *op = get_irn_n(irn, i);
763 if (mode_is_float(get_irn_mode(op)) && arch_irn_consider_in_reg_alloc(arch_env, cls, op)) {
764 const arch_register_t *reg = x87_get_irn_register(sim, op);
765 live |= 1 << arch_register_get_index(reg);
769 } /* vfp_liveness_transfer */
772 * Put all live virtual registers at the end of a block into a bitset.
774 * @param sim the simulator handle
775 * @param lv the liveness information
776 * @param bl the block
778 * @return The live bitset at the end of this block
780 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
783 vfp_liveness live = 0;
784 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
785 const arch_env_t *arch_env = sim->arch_env;
786 const be_lv_t *lv = sim->lv;
788 be_lv_foreach(lv, block, be_lv_state_end, i) {
789 const arch_register_t *reg;
790 const ir_node *node = be_lv_get_irn(lv, block, i);
791 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
794 reg = x87_get_irn_register(sim, node);
795 live |= 1 << arch_register_get_index(reg);
799 } /* vfp_liveness_end_of_block */
801 /** get the register mask from an arch_register */
802 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
805 * Return a bitset of argument registers which are live at the end of a node.
807 * @param sim the simulator handle
808 * @param pos the node
809 * @param kill kill mask for the output registers
811 * @return The live bitset.
813 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
815 unsigned idx = get_irn_idx(pos);
817 assert(idx < sim->n_idx);
818 return sim->live[idx] & ~kill;
819 } /* vfp_live_args_after */
822 * Calculate the liveness for a whole block and cache it.
824 * @param sim the simulator handle
825 * @param lv the liveness handle
826 * @param block the block
828 static void update_liveness(x87_simulator *sim, ir_node *block) {
829 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
833 /* now iterate through the block backward and cache the results */
834 sched_foreach_reverse(block, irn) {
835 /* stop at the first Phi: this produces the live-in */
839 idx = get_irn_idx(irn);
840 sim->live[idx] = live;
842 live = vfp_liveness_transfer(sim, irn, live);
844 idx = get_irn_idx(block);
845 sim->live[idx] = live;
846 } /* update_liveness */
849 * Returns true if a register is live in a set.
851 * @param reg_idx the vfp register index
852 * @param live a live bitset
854 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
858 * Dump liveness info.
860 * @param live the live bitset
862 static void vfp_dump_live(vfp_liveness live) {
865 DB((dbg, LEVEL_2, "Live after: "));
866 for (i = 0; i < 8; ++i) {
867 if (live & (1 << i)) {
868 DB((dbg, LEVEL_2, "vf%d ", i));
871 DB((dbg, LEVEL_2, "\n"));
872 } /* vfp_dump_live */
873 #endif /* DEBUG_libfirm */
875 /* --------------------------------- simulators ---------------------------------------- */
877 #define XCHG(a, b) do { int t = (a); (a) = (b); (b) = t; } while (0)
889 * Simulate a virtual binop.
891 * @param state the x87 state
892 * @param n the node that should be simulated (and patched)
893 * @param tmpl the template containing the 4 possible x87 opcodes
895 * @return NO_NODE_ADDED
897 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl) {
898 int op2_idx = 0, op1_idx;
899 int out_idx, do_pop = 0;
900 ia32_x87_attr_t *attr;
901 ir_node *patched_insn;
903 x87_simulator *sim = state->sim;
904 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
905 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
906 const arch_register_t *op1_reg = x87_get_irn_register(sim, op1);
907 const arch_register_t *op2_reg = x87_get_irn_register(sim, op2);
908 const arch_register_t *out = x87_get_irn_register(sim, n);
909 int reg_index_1 = arch_register_get_index(op1_reg);
910 int reg_index_2 = arch_register_get_index(op2_reg);
911 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
915 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
916 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
917 arch_register_get_name(out)));
918 DEBUG_ONLY(vfp_dump_live(live));
919 DB((dbg, LEVEL_1, "Stack before: "));
920 DEBUG_ONLY(x87_dump_stack(state));
922 if(reg_index_1 == REG_VFP_UKNWN) {
926 op1_idx = x87_on_stack(state, reg_index_1);
927 assert(op1_idx >= 0);
928 op1_live_after = is_vfp_live(arch_register_get_index(op1_reg), live);
931 if (reg_index_2 != REG_VFP_NOREG) {
932 if(reg_index_2 == REG_VFP_UKNWN) {
936 /* second operand is a vfp register */
937 op2_idx = x87_on_stack(state, reg_index_2);
938 assert(op2_idx >= 0);
940 = is_vfp_live(arch_register_get_index(op2_reg), live);
943 if (op2_live_after) {
944 /* Second operand is live. */
946 if (op1_live_after) {
947 /* Both operands are live: push the first one.
948 This works even for op1 == op2. */
949 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
950 /* now do fxxx (tos=tos X op) */
954 dst = tmpl->normal_op;
956 /* Second live, first operand is dead here, bring it to tos. */
958 x87_create_fxch(state, n, op1_idx);
963 /* now do fxxx (tos=tos X op) */
965 dst = tmpl->normal_op;
968 /* Second operand is dead. */
969 if (op1_live_after) {
970 /* First operand is live: bring second to tos. */
972 x87_create_fxch(state, n, op2_idx);
977 /* now do fxxxr (tos = op X tos) */
979 dst = tmpl->reverse_op;
981 /* Both operands are dead here, pop them from the stack. */
984 /* Both are identically and on tos, no pop needed. */
985 /* here fxxx (tos = tos X tos) */
986 dst = tmpl->normal_op;
989 /* now do fxxxp (op = op X tos, pop) */
990 dst = tmpl->normal_pop_op;
994 } else if (op1_idx == 0) {
995 assert(op1_idx != op2_idx);
996 /* now do fxxxrp (op = tos X op, pop) */
997 dst = tmpl->reverse_pop_op;
1001 /* Bring the second on top. */
1002 x87_create_fxch(state, n, op2_idx);
1003 if (op1_idx == op2_idx) {
1004 /* Both are identically and on tos now, no pop needed. */
1007 /* use fxxx (tos = tos X tos) */
1008 dst = tmpl->normal_op;
1011 /* op2 is on tos now */
1013 /* use fxxxp (op = op X tos, pop) */
1014 dst = tmpl->normal_pop_op;
1022 /* second operand is an address mode */
1023 if (op1_live_after) {
1024 /* first operand is live: push it here */
1025 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
1027 /* use fxxx (tos = tos X mem) */
1028 dst = tmpl->normal_op;
1031 /* first operand is dead: bring it to tos */
1033 x87_create_fxch(state, n, op1_idx);
1037 /* use fxxxp (tos = tos X mem) */
1038 dst = tmpl->normal_op;
1043 patched_insn = x87_patch_insn(n, dst);
1044 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1049 /* patch the operation */
1050 attr = get_ia32_x87_attr(n);
1051 attr->x87[0] = op1_reg = &ia32_st_regs[op1_idx];
1052 if (reg_index_2 != REG_VFP_NOREG) {
1053 attr->x87[1] = op2_reg = &ia32_st_regs[op2_idx];
1055 attr->x87[2] = out = &ia32_st_regs[out_idx];
1057 if (reg_index_2 != REG_VFP_NOREG) {
1058 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1059 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1060 arch_register_get_name(out)));
1062 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1063 arch_register_get_name(op1_reg),
1064 arch_register_get_name(out)));
1067 return NO_NODE_ADDED;
1071 * Simulate a virtual Unop.
1073 * @param state the x87 state
1074 * @param n the node that should be simulated (and patched)
1075 * @param op the x87 opcode that will replace n's opcode
1077 * @return NO_NODE_ADDED
1079 static int sim_unop(x87_state *state, ir_node *n, ir_op *op) {
1080 int op1_idx, out_idx;
1081 x87_simulator *sim = state->sim;
1082 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, UNOP_IDX));
1083 const arch_register_t *out = x87_get_irn_register(sim, n);
1084 ia32_x87_attr_t *attr;
1085 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1087 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1088 DEBUG_ONLY(vfp_dump_live(live));
1090 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1092 if (is_vfp_live(arch_register_get_index(op1), live)) {
1093 /* push the operand here */
1094 x87_create_fpush(state, n, op1_idx, UNOP_IDX);
1098 /* operand is dead, bring it to tos */
1100 x87_create_fxch(state, n, op1_idx);
1105 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1107 attr = get_ia32_x87_attr(n);
1108 attr->x87[0] = op1 = &ia32_st_regs[0];
1109 attr->x87[2] = out = &ia32_st_regs[0];
1110 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1112 return NO_NODE_ADDED;
1116 * Simulate a virtual Load instruction.
1118 * @param state the x87 state
1119 * @param n the node that should be simulated (and patched)
1120 * @param op the x87 opcode that will replace n's opcode
1122 * @return NO_NODE_ADDED
1124 static int sim_load(x87_state *state, ir_node *n, ir_op *op) {
1125 const arch_register_t *out = x87_get_irn_register(state->sim, n);
1126 ia32_x87_attr_t *attr;
1128 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1129 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1130 assert(out == x87_get_irn_register(state->sim, n));
1131 attr = get_ia32_x87_attr(n);
1132 attr->x87[2] = out = &ia32_st_regs[0];
1133 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1135 return NO_NODE_ADDED;
1139 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1141 * @param store The store
1142 * @param old_val The former value
1143 * @param new_val The new value
1145 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val) {
1146 const ir_edge_t *edge, *ne;
1148 foreach_out_edge_safe(old_val, edge, ne) {
1149 ir_node *user = get_edge_src_irn(edge);
1151 if (! user || user == store)
1154 /* if the user is scheduled after the store: rewire */
1155 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1157 /* find the input of the user pointing to the old value */
1158 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1159 if (get_irn_n(user, i) == old_val)
1160 set_irn_n(user, i, new_val);
1164 } /* collect_and_rewire_users */
1167 * Simulate a virtual Store.
1169 * @param state the x87 state
1170 * @param n the node that should be simulated (and patched)
1171 * @param op the x87 store opcode
1172 * @param op_p the x87 store and pop opcode
1174 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p) {
1175 x87_simulator *sim = state->sim;
1176 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1177 const arch_register_t *op2 = x87_get_irn_register(sim, val);
1178 unsigned live = vfp_live_args_after(sim, n, 0);
1179 int insn = NO_NODE_ADDED;
1180 ia32_x87_attr_t *attr;
1181 int op2_reg_idx, op2_idx, depth;
1182 int live_after_node;
1185 op2_reg_idx = arch_register_get_index(op2);
1186 if (op2_reg_idx == REG_VFP_UKNWN) {
1187 /* just take any value from stack */
1188 if(state->depth > 0) {
1190 DEBUG_ONLY(op2 = NULL);
1191 live_after_node = 1;
1193 /* produce a new value which we will consume immediately */
1194 x87_create_fldz(state, n, op2_reg_idx);
1195 live_after_node = 0;
1196 op2_idx = x87_on_stack(state, op2_reg_idx);
1197 assert(op2_idx >= 0);
1200 op2_idx = x87_on_stack(state, op2_reg_idx);
1201 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1202 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1203 assert(op2_idx >= 0);
1206 mode = get_ia32_ls_mode(n);
1207 depth = x87_get_depth(state);
1209 if (live_after_node) {
1211 Problem: fst doesn't support mode_E (spills), only fstp does
1213 - stack not full: push value and fstp
1214 - stack full: fstp value and load again
1216 if (mode == mode_E || mode == mode_Ls) {
1217 if (depth < N_x87_REGS) {
1218 /* ok, we have a free register: push + fstp */
1219 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1221 x87_patch_insn(n, op_p);
1223 ir_node *vfld, *mem, *block, *rproj, *mproj;
1226 /* stack full here: need fstp + load */
1228 x87_patch_insn(n, op_p);
1230 block = get_nodes_block(n);
1231 irg = get_irn_irg(n);
1232 vfld = new_rd_ia32_vfld(NULL, irg, block, get_irn_n(n, 0), get_irn_n(n, 1), new_rd_NoMem(irg), get_ia32_ls_mode(n));
1234 /* copy all attributes */
1235 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1236 if (is_ia32_use_frame(n))
1237 set_ia32_use_frame(vfld);
1238 set_ia32_op_type(vfld, ia32_am_Source);
1239 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1240 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1241 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1243 rproj = new_r_Proj(irg, block, vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1244 mproj = new_r_Proj(irg, block, vfld, mode_M, pn_ia32_vfld_M);
1245 mem = get_irn_Proj_for_mode(n, mode_M);
1247 assert(mem && "Store memory not found");
1249 arch_set_irn_register(sim->arch_env, rproj, op2);
1251 /* reroute all former users of the store memory to the load memory */
1252 edges_reroute(mem, mproj, irg);
1253 /* set the memory input of the load to the store memory */
1254 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1256 sched_add_after(n, vfld);
1257 sched_add_after(vfld, rproj);
1259 /* rewire all users, scheduled after the store, to the loaded value */
1260 collect_and_rewire_users(n, val, rproj);
1265 /* we can only store the tos to memory */
1267 x87_create_fxch(state, n, op2_idx);
1269 /* mode != mode_E -> use normal fst */
1270 x87_patch_insn(n, op);
1273 /* we can only store the tos to memory */
1275 x87_create_fxch(state, n, op2_idx);
1278 x87_patch_insn(n, op_p);
1281 attr = get_ia32_x87_attr(n);
1282 attr->x87[1] = op2 = &ia32_st_regs[0];
1283 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1288 #define _GEN_BINOP(op, rev) \
1289 static int sim_##op(x87_state *state, ir_node *n) { \
1290 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1291 return sim_binop(state, n, &tmpl); \
1294 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1295 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1297 #define GEN_LOAD2(op, nop) \
1298 static int sim_##op(x87_state *state, ir_node *n) { \
1299 return sim_load(state, n, op_ia32_##nop); \
1302 #define GEN_LOAD(op) GEN_LOAD2(op, op)
1304 #define GEN_UNOP(op) \
1305 static int sim_##op(x87_state *state, ir_node *n) { \
1306 return sim_unop(state, n, op_ia32_##op); \
1309 #define GEN_STORE(op) \
1310 static int sim_##op(x87_state *state, ir_node *n) { \
1311 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1333 * @param state the x87 state
1334 * @param n the node that should be simulated (and patched)
1336 * @return NO_NODE_ADDED
1338 static int sim_FucomFnstsw(x87_state *state, ir_node *n) {
1342 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1344 x87_simulator *sim = state->sim;
1345 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1346 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1347 const arch_register_t *op1 = x87_get_irn_register(sim, op1_node);
1348 const arch_register_t *op2 = x87_get_irn_register(sim, op2_node);
1349 int reg_index_1 = arch_register_get_index(op1);
1350 int reg_index_2 = arch_register_get_index(op2);
1351 unsigned live = vfp_live_args_after(sim, n, 0);
1352 int flipped = attr->attr.data.cmp_flipped;
1355 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1356 arch_register_get_name(op1), arch_register_get_name(op2)));
1357 DEBUG_ONLY(vfp_dump_live(live));
1358 DB((dbg, LEVEL_1, "Stack before: "));
1359 DEBUG_ONLY(x87_dump_stack(state));
1361 op1_idx = x87_on_stack(state, reg_index_1);
1362 assert(op1_idx >= 0);
1364 /* BEWARE: check for comp a,a cases, they might happen */
1365 if (reg_index_2 != REG_VFP_NOREG) {
1366 /* second operand is a vfp register */
1367 op2_idx = x87_on_stack(state, reg_index_2);
1368 assert(op2_idx >= 0);
1370 if (is_vfp_live(arch_register_get_index(op2), live)) {
1371 /* second operand is live */
1373 if (is_vfp_live(arch_register_get_index(op1), live)) {
1374 /* both operands are live */
1377 /* res = tos X op */
1378 dst = op_ia32_FucomFnstsw;
1379 } else if (op2_idx == 0) {
1380 /* res = op X tos */
1381 dst = op_ia32_FucomFnstsw;
1385 /* bring the first one to tos */
1386 x87_create_fxch(state, n, op1_idx);
1390 /* res = tos X op */
1391 dst = op_ia32_FucomFnstsw;
1394 /* second live, first operand is dead here, bring it to tos.
1395 This means further, op1_idx != op2_idx. */
1396 assert(op1_idx != op2_idx);
1398 x87_create_fxch(state, n, op1_idx);
1403 /* res = tos X op, pop */
1404 dst = op_ia32_FucompFnstsw;
1408 /* second operand is dead */
1409 if (is_vfp_live(arch_register_get_index(op1), live)) {
1410 /* first operand is live: bring second to tos.
1411 This means further, op1_idx != op2_idx. */
1412 assert(op1_idx != op2_idx);
1414 x87_create_fxch(state, n, op2_idx);
1419 /* res = op X tos, pop */
1420 dst = op_ia32_FucompFnstsw;
1425 /* both operands are dead here, check first for identity. */
1426 if (op1_idx == op2_idx) {
1427 /* identically, one pop needed */
1429 x87_create_fxch(state, n, op1_idx);
1433 /* res = tos X op, pop */
1434 dst = op_ia32_FucompFnstsw;
1437 /* different, move them to st and st(1) and pop both.
1438 The tricky part is to get one into st(1).*/
1439 else if (op2_idx == 1) {
1440 /* good, second operand is already in the right place, move the first */
1442 /* bring the first on top */
1443 x87_create_fxch(state, n, op1_idx);
1444 assert(op2_idx != 0);
1447 /* res = tos X op, pop, pop */
1448 dst = op_ia32_FucomppFnstsw;
1450 } else if (op1_idx == 1) {
1451 /* good, first operand is already in the right place, move the second */
1453 /* bring the first on top */
1454 x87_create_fxch(state, n, op2_idx);
1455 assert(op1_idx != 0);
1458 /* res = op X tos, pop, pop */
1459 dst = op_ia32_FucomppFnstsw;
1464 /* if one is already the TOS, we need two fxch */
1466 /* first one is TOS, move to st(1) */
1467 x87_create_fxch(state, n, 1);
1468 assert(op2_idx != 1);
1470 x87_create_fxch(state, n, op2_idx);
1472 /* res = op X tos, pop, pop */
1473 dst = op_ia32_FucomppFnstsw;
1477 } else if (op2_idx == 0) {
1478 /* second one is TOS, move to st(1) */
1479 x87_create_fxch(state, n, 1);
1480 assert(op1_idx != 1);
1482 x87_create_fxch(state, n, op1_idx);
1484 /* res = tos X op, pop, pop */
1485 dst = op_ia32_FucomppFnstsw;
1488 /* none of them is either TOS or st(1), 3 fxch needed */
1489 x87_create_fxch(state, n, op2_idx);
1490 assert(op1_idx != 0);
1491 x87_create_fxch(state, n, 1);
1493 x87_create_fxch(state, n, op1_idx);
1495 /* res = tos X op, pop, pop */
1496 dst = op_ia32_FucomppFnstsw;
1503 /* second operand is an address mode */
1504 if (is_vfp_live(arch_register_get_index(op1), live)) {
1505 /* first operand is live: bring it to TOS */
1507 x87_create_fxch(state, n, op1_idx);
1510 dst = op_ia32_FucomFnstsw;
1512 /* first operand is dead: bring it to tos */
1514 x87_create_fxch(state, n, op1_idx);
1517 dst = op_ia32_FucompFnstsw;
1522 x87_patch_insn(n, dst);
1523 assert(pop_cnt < 3);
1535 /* patch the operation */
1536 op1 = &ia32_st_regs[op1_idx];
1539 op2 = &ia32_st_regs[op2_idx];
1542 attr->x87[2] = NULL;
1543 attr->attr.data.cmp_flipped = flipped;
1546 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1547 arch_register_get_name(op1), arch_register_get_name(op2)));
1549 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1550 arch_register_get_name(op1)));
1552 return NO_NODE_ADDED;
1555 static int sim_Keep(x87_state *state, ir_node *node)
1558 const arch_register_t *op_reg;
1563 int node_added = NO_NODE_ADDED;
1565 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1567 arity = get_irn_arity(node);
1568 for(i = 0; i < arity; ++i) {
1569 op = get_irn_n(node, i);
1570 op_reg = arch_get_irn_register(state->sim->arch_env, op);
1571 if(arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1574 reg_id = arch_register_get_index(op_reg);
1575 live = vfp_live_args_after(state->sim, node, 0);
1577 op_stack_idx = x87_on_stack(state, reg_id);
1578 if(op_stack_idx >= 0 && !is_vfp_live(reg_id, live)) {
1579 x87_create_fpop(state, sched_next(node), 1);
1580 node_added = NODE_ADDED;
1584 DB((dbg, LEVEL_1, "Stack after: "));
1585 DEBUG_ONLY(x87_dump_stack(state));
1591 void keep_float_node_alive(x87_state *state, ir_node *node)
1597 const arch_register_class_t *cls;
1599 irg = get_irn_irg(node);
1600 block = get_nodes_block(node);
1601 cls = arch_get_irn_reg_class(state->sim->arch_env, node, -1);
1603 keep = be_new_Keep(cls, irg, block, 1, in);
1605 assert(sched_is_scheduled(node));
1606 sched_add_after(node, keep);
1610 * Create a copy of a node. Recreate the node if it's a constant.
1612 * @param state the x87 state
1613 * @param n the node to be copied
1615 * @return the copy of n
1617 static ir_node *create_Copy(x87_state *state, ir_node *n) {
1618 x87_simulator *sim = state->sim;
1619 ir_graph *irg = get_irn_irg(n);
1620 dbg_info *n_dbg = get_irn_dbg_info(n);
1621 ir_mode *mode = get_irn_mode(n);
1622 ir_node *block = get_nodes_block(n);
1623 ir_node *pred = get_irn_n(n, 0);
1624 ir_node *(*cnstr)(dbg_info *, ir_graph *, ir_node *, ir_mode *) = NULL;
1626 const arch_register_t *out;
1627 const arch_register_t *op1;
1628 ia32_x87_attr_t *attr;
1630 /* Do not copy constants, recreate them. */
1631 switch (get_ia32_irn_opcode(pred)) {
1632 case iro_ia32_Unknown_VFP:
1634 cnstr = new_rd_ia32_fldz;
1637 cnstr = new_rd_ia32_fld1;
1639 case iro_ia32_fldpi:
1640 cnstr = new_rd_ia32_fldpi;
1642 case iro_ia32_fldl2e:
1643 cnstr = new_rd_ia32_fldl2e;
1645 case iro_ia32_fldl2t:
1646 cnstr = new_rd_ia32_fldl2t;
1648 case iro_ia32_fldlg2:
1649 cnstr = new_rd_ia32_fldlg2;
1651 case iro_ia32_fldln2:
1652 cnstr = new_rd_ia32_fldln2;
1658 out = x87_get_irn_register(sim, n);
1659 op1 = x87_get_irn_register(sim, pred);
1661 if (cnstr != NULL) {
1662 /* copy a constant */
1663 res = (*cnstr)(n_dbg, irg, block, mode);
1665 x87_push(state, arch_register_get_index(out), res);
1667 attr = get_ia32_x87_attr(res);
1668 attr->x87[2] = &ia32_st_regs[0];
1670 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1672 res = new_rd_ia32_fpushCopy(n_dbg, irg, block, pred, mode);
1674 x87_push(state, arch_register_get_index(out), res);
1676 attr = get_ia32_x87_attr(res);
1677 attr->x87[0] = &ia32_st_regs[op1_idx];
1678 attr->x87[2] = &ia32_st_regs[0];
1680 arch_set_irn_register(sim->arch_env, res, out);
1686 * Simulate a be_Copy.
1688 * @param state the x87 state
1689 * @param n the node that should be simulated (and patched)
1691 * @return NO_NODE_ADDED
1693 static int sim_Copy(x87_state *state, ir_node *n) {
1694 x87_simulator *sim = state->sim;
1696 const arch_register_t *out;
1697 const arch_register_t *op1;
1698 const arch_register_class_t *class;
1699 ir_node *node, *next;
1700 ia32_x87_attr_t *attr;
1701 int op1_idx, out_idx;
1704 class = arch_get_irn_reg_class(sim->arch_env, n, -1);
1705 if (class->regs != ia32_vfp_regs)
1708 pred = get_irn_n(n, 0);
1709 out = x87_get_irn_register(sim, n);
1710 op1 = x87_get_irn_register(sim, pred);
1711 live = vfp_live_args_after(sim, n, REGMASK(out));
1713 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1714 arch_register_get_name(op1), arch_register_get_name(out)));
1715 DEBUG_ONLY(vfp_dump_live(live));
1717 /* handle the infamous unknown value */
1718 if (arch_register_get_index(op1) == REG_VFP_UKNWN) {
1719 /* Operand is still live, a real copy. We need here an fpush that can
1720 hold a a register, so use the fpushCopy or recreate constants */
1721 node = create_Copy(state, n);
1723 assert(is_ia32_fldz(node));
1724 next = sched_next(n);
1727 sched_add_before(next, node);
1729 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1730 arch_get_irn_register(sim->arch_env, node)->name));
1731 return NO_NODE_ADDED;
1734 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1736 if (is_vfp_live(arch_register_get_index(op1), live)) {
1737 ir_node *pred = get_irn_n(n, 0);
1739 /* Operand is still live, a real copy. We need here an fpush that can
1740 hold a a register, so use the fpushCopy or recreate constants */
1741 node = create_Copy(state, n);
1743 /* We have to make sure the old value doesn't go dead (which can happen
1744 * when we recreate constants). As the simulator expected that value in
1745 * the pred blocks. This is unfortunate as removing it would save us 1
1746 * instruction, but we would have to rerun all the simulation to get
1749 next = sched_next(n);
1752 sched_add_before(next, node);
1754 if(get_irn_n_edges(pred) == 0) {
1755 keep_float_node_alive(state, pred);
1758 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1759 arch_get_irn_register(sim->arch_env, node)->name));
1761 out_idx = x87_on_stack(state, arch_register_get_index(out));
1763 if (out_idx >= 0 && out_idx != op1_idx) {
1764 /* Matze: out already on stack? how can this happen? */
1767 /* op1 must be killed and placed where out is */
1769 /* best case, simple remove and rename */
1770 x87_patch_insn(n, op_ia32_Pop);
1771 attr = get_ia32_x87_attr(n);
1772 attr->x87[0] = op1 = &ia32_st_regs[0];
1775 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1777 /* move op1 to tos, store and pop it */
1779 x87_create_fxch(state, n, op1_idx);
1782 x87_patch_insn(n, op_ia32_Pop);
1783 attr = get_ia32_x87_attr(n);
1784 attr->x87[0] = op1 = &ia32_st_regs[out_idx];
1787 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1789 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1791 /* just a virtual copy */
1792 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1793 /* don't remove the node to keep the verifier quiet :),
1794 the emitter won't emit any code for the node */
1797 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1798 exchange(n, get_unop_op(n));
1802 return NO_NODE_ADDED;
1806 * Returns the result proj of the call
1808 static ir_node *get_call_result_proj(ir_node *call) {
1809 const ir_edge_t *edge;
1811 /* search the result proj */
1812 foreach_out_edge(call, edge) {
1813 ir_node *proj = get_edge_src_irn(edge);
1814 long pn = get_Proj_proj(proj);
1816 if (pn == pn_be_Call_first_res) {
1822 } /* get_call_result_proj */
1825 * Simulate a be_Call.
1827 * @param state the x87 state
1828 * @param n the node that should be simulated
1829 * @param arch_env the architecture environment
1831 * @return NO_NODE_ADDED
1833 static int sim_Call(x87_state *state, ir_node *n, const arch_env_t *arch_env)
1835 ir_type *call_tp = be_Call_get_type(n);
1839 const arch_register_t *reg;
1842 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1844 /* at the begin of a call the x87 state should be empty */
1845 assert(state->depth == 0 && "stack not empty before call");
1847 if (get_method_n_ress(call_tp) <= 0)
1851 * If the called function returns a float, it is returned in st(0).
1852 * This even happens if the return value is NOT used.
1853 * Moreover, only one return result is supported.
1855 res_type = get_method_res_type(call_tp, 0);
1856 mode = get_type_mode(res_type);
1858 if (mode == NULL || !mode_is_float(mode))
1861 resproj = get_call_result_proj(n);
1862 assert(resproj != NULL);
1864 reg = x87_get_irn_register(state->sim, resproj);
1865 x87_push(state, arch_register_get_index(reg), resproj);
1868 DB((dbg, LEVEL_1, "Stack after: "));
1869 DEBUG_ONLY(x87_dump_stack(state));
1871 return NO_NODE_ADDED;
1875 * Simulate a be_Spill.
1877 * @param state the x87 state
1878 * @param n the node that should be simulated (and patched)
1880 * Should not happen, spills are lowered before x87 simulator see them.
1882 static int sim_Spill(x87_state *state, ir_node *n) {
1883 assert(0 && "Spill not lowered");
1884 return sim_fst(state, n);
1888 * Simulate a be_Reload.
1890 * @param state the x87 state
1891 * @param n the node that should be simulated (and patched)
1893 * Should not happen, reloads are lowered before x87 simulator see them.
1895 static int sim_Reload(x87_state *state, ir_node *n) {
1896 assert(0 && "Reload not lowered");
1897 return sim_fld(state, n);
1901 * Simulate a be_Return.
1903 * @param state the x87 state
1904 * @param n the node that should be simulated (and patched)
1906 * @return NO_NODE_ADDED
1908 static int sim_Return(x87_state *state, ir_node *n) {
1909 int n_res = be_Return_get_n_rets(n);
1910 int i, n_float_res = 0;
1912 /* only floating point return values must resist on stack */
1913 for (i = 0; i < n_res; ++i) {
1914 ir_node *res = get_irn_n(n, be_pos_Return_val + i);
1916 if (mode_is_float(get_irn_mode(res)))
1919 assert(x87_get_depth(state) == n_float_res);
1921 /* pop them virtually */
1922 for (i = n_float_res - 1; i >= 0; --i)
1925 return NO_NODE_ADDED;
1928 typedef struct _perm_data_t {
1929 const arch_register_t *in;
1930 const arch_register_t *out;
1934 * Simulate a be_Perm.
1936 * @param state the x87 state
1937 * @param irn the node that should be simulated (and patched)
1939 * @return NO_NODE_ADDED
1941 static int sim_Perm(x87_state *state, ir_node *irn) {
1943 x87_simulator *sim = state->sim;
1944 ir_node *pred = get_irn_n(irn, 0);
1946 const ir_edge_t *edge;
1948 /* handle only floating point Perms */
1949 if (! mode_is_float(get_irn_mode(pred)))
1950 return NO_NODE_ADDED;
1952 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1954 /* Perm is a pure virtual instruction on x87.
1955 All inputs must be on the FPU stack and are pairwise
1956 different from each other.
1957 So, all we need to do is to permutate the stack state. */
1958 n = get_irn_arity(irn);
1959 NEW_ARR_A(int, stack_pos, n);
1961 /* collect old stack positions */
1962 for (i = 0; i < n; ++i) {
1963 const arch_register_t *inreg = x87_get_irn_register(sim, get_irn_n(irn, i));
1964 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1966 assert(idx >= 0 && "Perm argument not on x87 stack");
1970 /* now do the permutation */
1971 foreach_out_edge(irn, edge) {
1972 ir_node *proj = get_edge_src_irn(edge);
1973 const arch_register_t *out = x87_get_irn_register(sim, proj);
1974 long num = get_Proj_proj(proj);
1976 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1977 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
1979 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1981 return NO_NODE_ADDED;
1984 static int sim_Barrier(x87_state *state, ir_node *node) {
1985 //const arch_env_t *arch_env = state->sim->arch_env;
1988 /* materialize unknown if needed */
1989 arity = get_irn_arity(node);
1990 for(i = 0; i < arity; ++i) {
1991 const arch_register_t *reg;
1994 ia32_x87_attr_t *attr;
1995 ir_node *in = get_irn_n(node, i);
1997 if(!is_ia32_Unknown_VFP(in))
2000 /* TODO: not completely correct... */
2001 reg = &ia32_vfp_regs[REG_VFP_UKNWN];
2004 block = get_nodes_block(node);
2005 zero = new_rd_ia32_fldz(NULL, current_ir_graph, block, mode_E);
2006 x87_push(state, arch_register_get_index(reg), zero);
2008 attr = get_ia32_x87_attr(zero);
2009 attr->x87[2] = &ia32_st_regs[0];
2011 sched_add_before(node, zero);
2013 set_irn_n(node, i, zero);
2016 return NO_NODE_ADDED;
2021 * Kill any dead registers at block start by popping them from the stack.
2023 * @param sim the simulator handle
2024 * @param block the current block
2025 * @param start_state the x87 state at the begin of the block
2027 * @return the x87 state after dead register killed
2029 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state) {
2030 x87_state *state = start_state;
2031 ir_node *first_insn = sched_first(block);
2032 ir_node *keep = NULL;
2033 unsigned live = vfp_live_args_after(sim, block, 0);
2035 int i, depth, num_pop;
2038 depth = x87_get_depth(state);
2039 for (i = depth - 1; i >= 0; --i) {
2040 int reg = x87_get_st_reg(state, i);
2042 if (! is_vfp_live(reg, live))
2043 kill_mask |= (1 << i);
2047 /* create a new state, will be changed */
2048 state = x87_clone_state(sim, state);
2050 DB((dbg, LEVEL_1, "Killing deads:\n"));
2051 DEBUG_ONLY(vfp_dump_live(live));
2052 DEBUG_ONLY(x87_dump_stack(state));
2054 if (kill_mask != 0 && live == 0) {
2055 int cpu = sim->isa->arch;
2057 /* special case: kill all registers */
2058 if (ARCH_ATHLON(sim->isa->opt_arch) && ARCH_MMX(cpu)) {
2059 if (ARCH_AMD(cpu)) {
2060 /* use FEMMS on AMD processors to clear all */
2061 keep = new_rd_ia32_femms(NULL, get_irn_irg(block), block, mode_E);
2063 /* use EMMS to clear all */
2064 keep = new_rd_ia32_emms(NULL, get_irn_irg(block), block, mode_E);
2066 sched_add_before(first_insn, keep);
2072 /* now kill registers */
2074 /* we can only kill from TOS, so bring them up */
2075 if (! (kill_mask & 1)) {
2076 /* search from behind, because we can to a double-pop */
2077 for (i = depth - 1; i >= 0; --i) {
2078 if (kill_mask & (1 << i)) {
2079 kill_mask &= ~(1 << i);
2086 x87_set_st(state, -1, keep, i);
2087 x87_create_fxch(state, first_insn, i);
2090 if ((kill_mask & 3) == 3) {
2091 /* we can do a double-pop */
2095 /* only a single pop */
2100 kill_mask >>= num_pop;
2101 keep = x87_create_fpop(state, first_insn, num_pop);
2106 } /* x87_kill_deads */
2109 * If we have PhiEs with unknown operands then we have to make sure that some
2110 * value is actually put onto the stack.
2112 static void fix_unknown_phis(x87_state *state, ir_node *block,
2113 ir_node *pred_block, int pos)
2117 sched_foreach(block, node) {
2119 const arch_register_t *reg;
2120 ia32_x87_attr_t *attr;
2125 op = get_Phi_pred(node, pos);
2126 if(!is_ia32_Unknown_VFP(op))
2129 reg = arch_get_irn_register(state->sim->arch_env, node);
2131 /* create a zero at end of pred block */
2132 zero = new_rd_ia32_fldz(NULL, current_ir_graph, pred_block, mode_E);
2133 x87_push(state, arch_register_get_index(reg), zero);
2135 attr = get_ia32_x87_attr(zero);
2136 attr->x87[2] = &ia32_st_regs[0];
2138 assert(is_ia32_fldz(zero));
2139 sched_add_before(sched_last(pred_block), zero);
2141 set_Phi_pred(node, pos, zero);
2146 * Run a simulation and fix all virtual instructions for a block.
2148 * @param sim the simulator handle
2149 * @param block the current block
2151 static void x87_simulate_block(x87_simulator *sim, ir_node *block) {
2153 blk_state *bl_state = x87_get_bl_state(sim, block);
2154 x87_state *state = bl_state->begin;
2155 const ir_edge_t *edge;
2156 ir_node *start_block;
2158 assert(state != NULL);
2159 /* already processed? */
2160 if (bl_state->end != NULL)
2163 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2164 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2165 DEBUG_ONLY(x87_dump_stack(state));
2167 /* at block begin, kill all dead registers */
2168 state = x87_kill_deads(sim, block, state);
2169 /* create a new state, will be changed */
2170 state = x87_clone_state(sim, state);
2172 /* beware, n might change */
2173 for (n = sched_first(block); !sched_is_end(n); n = next) {
2176 ir_op *op = get_irn_op(n);
2178 next = sched_next(n);
2179 if (op->ops.generic == NULL)
2182 func = (sim_func)op->ops.generic;
2185 node_inserted = (*func)(state, n);
2188 sim_func might have added an additional node after n,
2190 beware: n must not be changed by sim_func
2191 (i.e. removed from schedule) in this case
2193 if (node_inserted != NO_NODE_ADDED)
2194 next = sched_next(n);
2197 start_block = get_irg_start_block(get_irn_irg(block));
2199 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2201 /* check if the state must be shuffled */
2202 foreach_block_succ(block, edge) {
2203 ir_node *succ = get_edge_src_irn(edge);
2204 blk_state *succ_state;
2206 if (succ == start_block)
2209 succ_state = x87_get_bl_state(sim, succ);
2211 fix_unknown_phis(state, succ, block, get_edge_src_pos(edge));
2213 if (succ_state->begin == NULL) {
2214 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2215 DEBUG_ONLY(x87_dump_stack(state));
2216 succ_state->begin = state;
2218 waitq_put(sim->worklist, succ);
2220 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2221 /* There is already a begin state for the successor, bad.
2222 Do the necessary permutations.
2223 Note that critical edges are removed, so this is always possible:
2224 If the successor has more than one possible input, then it must
2227 x87_shuffle(sim, block, state, succ, succ_state->begin);
2230 bl_state->end = state;
2231 } /* x87_simulate_block */
2234 * Create a new x87 simulator.
2236 * @param sim a simulator handle, will be initialized
2237 * @param irg the current graph
2238 * @param arch_env the architecture environment
2240 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg,
2241 const arch_env_t *arch_env)
2243 obstack_init(&sim->obst);
2244 sim->blk_states = pmap_create();
2245 sim->arch_env = arch_env;
2246 sim->n_idx = get_irg_last_idx(irg);
2247 sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
2248 sim->isa = (ia32_isa_t *)arch_env->isa;
2250 DB((dbg, LEVEL_1, "--------------------------------\n"
2251 "x87 Simulator started for %+F\n", irg));
2253 /* set the generic function pointer of instruction we must simulate */
2254 clear_irp_opcodes_generic_func();
2256 #define ASSOC(op) (op_ ## op)->ops.generic = (op_func)(sim_##op)
2257 #define ASSOC_IA32(op) (op_ia32_v ## op)->ops.generic = (op_func)(sim_##op)
2258 #define ASSOC_BE(op) (op_be_ ## op)->ops.generic = (op_func)(sim_##op)
2272 ASSOC_IA32(FucomFnstsw);
2284 } /* x87_init_simulator */
2287 * Destroy a x87 simulator.
2289 * @param sim the simulator handle
2291 static void x87_destroy_simulator(x87_simulator *sim) {
2292 pmap_destroy(sim->blk_states);
2293 obstack_free(&sim->obst, NULL);
2294 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2295 } /* x87_destroy_simulator */
2298 * Pre-block walker: calculate the liveness information for the block
2299 * and store it into the sim->live cache.
2301 static void update_liveness_walker(ir_node *block, void *data) {
2302 x87_simulator *sim = data;
2303 update_liveness(sim, block);
2304 } /* update_liveness_walker */
2307 * Run a simulation and fix all virtual instructions for a graph.
2309 * @param env the architecture environment
2310 * @param irg the current graph
2312 * Needs a block-schedule.
2314 void x87_simulate_graph(const arch_env_t *arch_env, be_irg_t *birg) {
2315 ir_node *block, *start_block;
2316 blk_state *bl_state;
2318 ir_graph *irg = be_get_birg_irg(birg);
2320 /* create the simulator */
2321 x87_init_simulator(&sim, irg, arch_env);
2323 start_block = get_irg_start_block(irg);
2324 bl_state = x87_get_bl_state(&sim, start_block);
2326 /* start with the empty state */
2327 bl_state->begin = empty;
2330 sim.worklist = new_waitq();
2331 waitq_put(sim.worklist, start_block);
2333 be_assure_liveness(birg);
2334 sim.lv = be_get_birg_liveness(birg);
2335 // sim.lv = be_liveness(be_get_birg_irg(birg));
2336 be_liveness_assure_sets(sim.lv);
2338 /* Calculate the liveness for all nodes. We must precalculate this info,
2339 * because the simulator adds new nodes (possible before Phi nodes) which
2340 * would let a lazy calculation fail.
2341 * On the other hand we reduce the computation amount due to
2342 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2344 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2348 block = waitq_get(sim.worklist);
2349 x87_simulate_block(&sim, block);
2350 } while (! waitq_empty(sim.worklist));
2353 del_waitq(sim.worklist);
2354 x87_destroy_simulator(&sim);
2355 } /* x87_simulate_graph */
2357 void ia32_init_x87(void) {
2358 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2359 } /* ia32_init_x87 */