2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
36 #include "iredges_t.h"
47 #include "../belive_t.h"
48 #include "../besched_t.h"
49 #include "../benode_t.h"
50 #include "bearch_ia32_t.h"
51 #include "ia32_new_nodes.h"
52 #include "gen_ia32_new_nodes.h"
53 #include "gen_ia32_regalloc_if.h"
61 #define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
63 /** the debug handle */
64 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
66 /* Forward declaration. */
67 typedef struct _x87_simulator x87_simulator;
70 * An exchange template.
71 * Note that our virtual functions have the same inputs
72 * and attributes as the real ones, so we can simple exchange
74 * Further, x87 supports inverse instructions, so we can handle them.
76 typedef struct _exchange_tmpl {
77 ir_op *normal_op; /**< the normal one */
78 ir_op *reverse_op; /**< the reverse one if exists */
79 ir_op *normal_pop_op; /**< the normal one with tos pop */
80 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
84 * An entry on the simulated x87 stack.
86 typedef struct _st_entry {
87 int reg_idx; /**< the virtual register index of this stack value */
88 ir_node *node; /**< the node that produced this value */
94 typedef struct _x87_state {
95 st_entry st[N_x87_REGS]; /**< the register stack */
96 int depth; /**< the current stack depth */
97 int tos; /**< position of the tos */
98 x87_simulator *sim; /**< The simulator. */
101 /** An empty state, used for blocks without fp instructions. */
102 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
103 static x87_state *empty = (x87_state *)&_empty;
106 NO_NODE_ADDED = 0, /**< No node was added. */
107 NODE_ADDED = 1 /**< A node was added by the simulator in the schedule. */
111 * The type of an instruction simulator function.
113 * @param state the x87 state
114 * @param n the node to be simulated
116 * @return NODE_ADDED if a node was added AFTER n in schedule,
119 typedef int (*sim_func)(x87_state *state, ir_node *n);
122 * A block state: Every block has a x87 state at the beginning and at the end.
124 typedef struct _blk_state {
125 x87_state *begin; /**< state at the begin or NULL if not assigned */
126 x87_state *end; /**< state at the end or NULL if not assigned */
129 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
131 /** liveness bitset for vfp registers. */
132 typedef unsigned char vfp_liveness;
137 struct _x87_simulator {
138 struct obstack obst; /**< An obstack for fast allocating. */
139 pmap *blk_states; /**< Map blocks to states. */
140 const arch_env_t *arch_env; /**< The architecture environment. */
141 be_lv_t *lv; /**< intrablock liveness. */
142 vfp_liveness *live; /**< Liveness information. */
143 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
144 waitq *worklist; /**< Worklist of blocks that must be processed. */
145 ia32_isa_t *isa; /**< the ISA object */
149 * Returns the current stack depth.
151 * @param state the x87 state
153 * @return the x87 stack depth
155 static int x87_get_depth(const x87_state *state) {
157 } /* x87_get_depth */
160 * Return the virtual register index at st(pos).
162 * @param state the x87 state
163 * @param pos a stack position
165 * @return the vfp register index that produced the value at st(pos)
167 static int x87_get_st_reg(const x87_state *state, int pos) {
168 assert(pos < state->depth);
169 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
170 } /* x87_get_st_reg */
173 * Return the node at st(pos).
175 * @param state the x87 state
176 * @param pos a stack position
178 * @return the IR node that produced the value at st(pos)
180 static ir_node *x87_get_st_node(const x87_state *state, int pos) {
181 assert(pos < state->depth);
182 return state->st[MASK_TOS(state->tos + pos)].node;
183 } /* x87_get_st_node */
187 * Dump the stack for debugging.
189 * @param state the x87 state
191 static void x87_dump_stack(const x87_state *state) {
194 for (i = state->depth - 1; i >= 0; --i) {
195 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
196 x87_get_st_node(state, i)));
198 DB((dbg, LEVEL_2, "<-- TOS\n"));
199 } /* x87_dump_stack */
200 #endif /* DEBUG_libfirm */
203 * Set a virtual register to st(pos).
205 * @param state the x87 state
206 * @param reg_idx the vfp register index that should be set
207 * @param node the IR node that produces the value of the vfp register
208 * @param pos the stack position where the new value should be entered
210 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos) {
211 assert(0 < state->depth);
212 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
213 state->st[MASK_TOS(state->tos + pos)].node = node;
215 DB((dbg, LEVEL_2, "After SET_REG: "));
216 DEBUG_ONLY(x87_dump_stack(state));
220 * Set the tos virtual register.
222 * @param state the x87 state
223 * @param reg_idx the vfp register index that should be set
224 * @param node the IR node that produces the value of the vfp register
226 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node) {
227 x87_set_st(state, reg_idx, node, 0);
231 * Swap st(0) with st(pos).
233 * @param state the x87 state
234 * @param pos the stack position to change the tos with
236 static void x87_fxch(x87_state *state, int pos) {
238 assert(pos < state->depth);
240 entry = state->st[MASK_TOS(state->tos + pos)];
241 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
242 state->st[MASK_TOS(state->tos)] = entry;
244 DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
248 * Convert a virtual register to the stack index.
250 * @param state the x87 state
251 * @param reg_idx the register vfp index
253 * @return the stack position where the register is stacked
254 * or -1 if the virtual register was not found
256 static int x87_on_stack(const x87_state *state, int reg_idx) {
257 int i, tos = state->tos;
259 for (i = 0; i < state->depth; ++i)
260 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
266 * Push a virtual Register onto the stack, double pushed allowed.
268 * @param state the x87 state
269 * @param reg_idx the register vfp index
270 * @param node the node that produces the value of the vfp register
272 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node) {
273 assert(state->depth < N_x87_REGS && "stack overrun");
276 state->tos = MASK_TOS(state->tos - 1);
277 state->st[state->tos].reg_idx = reg_idx;
278 state->st[state->tos].node = node;
280 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
284 * Push a virtual Register onto the stack, double pushes are NOT allowed.
286 * @param state the x87 state
287 * @param reg_idx the register vfp index
288 * @param node the node that produces the value of the vfp register
289 * @param dbl_push if != 0 double pushes are allowed
291 static void x87_push(x87_state *state, int reg_idx, ir_node *node) {
292 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
294 x87_push_dbl(state, reg_idx, node);
298 * Pop a virtual Register from the stack.
300 * @param state the x87 state
302 static void x87_pop(x87_state *state) {
303 assert(state->depth > 0 && "stack underrun");
306 state->tos = MASK_TOS(state->tos + 1);
308 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
312 * Empty the fpu stack
314 * @param state the x87 state
316 static void x87_emms(x87_state *state) {
322 * Returns the block state of a block.
324 * @param sim the x87 simulator handle
325 * @param block the current block
327 * @return the block state
329 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block) {
330 pmap_entry *entry = pmap_find(sim->blk_states, block);
333 blk_state *bl_state = obstack_alloc(&sim->obst, sizeof(*bl_state));
334 bl_state->begin = NULL;
335 bl_state->end = NULL;
337 pmap_insert(sim->blk_states, block, bl_state);
341 return PTR_TO_BLKSTATE(entry->value);
342 } /* x87_get_bl_state */
345 * Creates a new x87 state.
347 * @param sim the x87 simulator handle
349 * @return a new x87 state
351 static x87_state *x87_alloc_state(x87_simulator *sim) {
352 x87_state *res = obstack_alloc(&sim->obst, sizeof(*res));
356 } /* x87_alloc_state */
361 * @param sim the x87 simulator handle
362 * @param src the x87 state that will be cloned
364 * @return a cloned copy of the src state
366 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src) {
367 x87_state *res = x87_alloc_state(sim);
369 memcpy(res, src, sizeof(*res));
371 } /* x87_clone_state */
374 * Patch a virtual instruction into a x87 one and return
375 * the node representing the result value.
377 * @param n the IR node to patch
378 * @param op the x87 opcode to patch in
380 static ir_node *x87_patch_insn(ir_node *n, ir_op *op) {
381 ir_mode *mode = get_irn_mode(n);
386 if (mode == mode_T) {
387 /* patch all Proj's */
388 const ir_edge_t *edge;
390 foreach_out_edge(n, edge) {
391 ir_node *proj = get_edge_src_irn(edge);
393 mode = get_irn_mode(proj);
394 if (mode_is_float(mode)) {
396 set_irn_mode(proj, mode_E);
400 } else if (mode_is_float(mode))
401 set_irn_mode(n, mode_E);
403 } /* x87_patch_insn */
406 * Returns the first Proj of a mode_T node having a given mode.
408 * @param n the mode_T node
409 * @param m the desired mode of the Proj
410 * @return The first Proj of mode @p m found or NULL.
412 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m) {
413 const ir_edge_t *edge;
415 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
417 foreach_out_edge(n, edge) {
418 ir_node *proj = get_edge_src_irn(edge);
419 if (get_irn_mode(proj) == m)
424 } /* get_irn_Proj_for_mode */
427 * Wrap the arch_* function here so we can check for errors.
429 static INLINE const arch_register_t *x87_get_irn_register(x87_simulator *sim, const ir_node *irn) {
430 const arch_register_t *res;
432 res = arch_get_irn_register(sim->arch_env, irn);
433 assert(res->reg_class->regs == ia32_vfp_regs);
435 } /* x87_get_irn_register */
437 /* -------------- x87 perm --------------- */
440 * Creates a fxch for shuffle.
442 * @param state the x87 state
443 * @param pos parameter for fxch
444 * @param block the block were fxch is inserted
446 * Creates a new fxch node and reroute the user of the old node
449 * @return the fxch node
451 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block) {
453 ia32_x87_attr_t *attr;
455 fxch = new_rd_ia32_fxch(NULL, get_irn_irg(block), block, mode_E);
456 attr = get_ia32_x87_attr(fxch);
457 attr->x87[0] = &ia32_st_regs[pos];
458 attr->x87[2] = &ia32_st_regs[0];
462 x87_fxch(state, pos);
464 } /* x87_fxch_shuffle */
467 * Calculate the necessary permutations to reach dst_state.
469 * These permutations are done with fxch instructions and placed
470 * at the end of the block.
472 * Note that critical edges are removed here, so we need only
473 * a shuffle if the current block has only one successor.
475 * @param sim the simulator handle
476 * @param block the current block
477 * @param state the current x87 stack state, might be modified
478 * @param dst_block the destination block
479 * @param dst_state destination state
483 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block,
484 x87_state *state, ir_node *dst_block,
485 const x87_state *dst_state)
487 int i, n_cycles, k, ri;
488 unsigned cycles[4], all_mask;
489 char cycle_idx[4][8];
490 ir_node *fxch, *before, *after;
494 assert(state->depth == dst_state->depth);
496 /* Some mathematics here:
497 If we have a cycle of length n that includes the tos,
498 we need n-1 exchange operations.
499 We can always add the tos and restore it, so we need
500 n+1 exchange operations for a cycle not containing the tos.
501 So, the maximum of needed operations is for a cycle of 7
502 not including the tos == 8.
503 This is the same number of ops we would need for using stores,
504 so exchange is cheaper (we save the loads).
505 On the other hand, we might need an additional exchange
506 in the next block to bring one operand on top, so the
507 number of ops in the first case is identical.
508 Further, no more than 4 cycles can exists (4 x 2).
510 all_mask = (1 << (state->depth)) - 1;
512 for (n_cycles = 0; all_mask; ++n_cycles) {
513 int src_idx, dst_idx;
515 /* find the first free slot */
516 for (i = 0; i < state->depth; ++i) {
517 if (all_mask & (1 << i)) {
518 all_mask &= ~(1 << i);
520 /* check if there are differences here */
521 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
527 /* no more cycles found */
532 cycles[n_cycles] = (1 << i);
533 cycle_idx[n_cycles][k++] = i;
534 for (src_idx = i; ; src_idx = dst_idx) {
535 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
537 if ((all_mask & (1 << dst_idx)) == 0)
540 cycle_idx[n_cycles][k++] = dst_idx;
541 cycles[n_cycles] |= (1 << dst_idx);
542 all_mask &= ~(1 << dst_idx);
544 cycle_idx[n_cycles][k] = -1;
548 /* no permutation needed */
552 /* Hmm: permutation needed */
553 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
554 DEBUG_ONLY(x87_dump_stack(state));
555 DB((dbg, LEVEL_2, " to\n"));
556 DEBUG_ONLY(x87_dump_stack(dst_state));
560 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
561 for (ri = 0; ri < n_cycles; ++ri) {
562 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
563 for (k = 0; cycle_idx[ri][k] != -1; ++k)
564 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
565 DB((dbg, LEVEL_2, "\n"));
572 * Find the place node must be insert.
573 * We have only one successor block, so the last instruction should
576 before = sched_last(block);
577 assert(is_cfop(before));
579 /* now do the permutations */
580 for (ri = 0; ri < n_cycles; ++ri) {
581 if ((cycles[ri] & 1) == 0) {
582 /* this cycle does not include the tos */
583 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
585 sched_add_after(after, fxch);
587 sched_add_before(before, fxch);
590 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
591 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
593 sched_add_after(after, fxch);
595 sched_add_before(before, fxch);
598 if ((cycles[ri] & 1) == 0) {
599 /* this cycle does not include the tos */
600 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
601 sched_add_after(after, fxch);
608 * Create a fxch node before another node.
610 * @param state the x87 state
611 * @param n the node after the fxch
612 * @param pos exchange st(pos) with st(0)
616 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
619 ia32_x87_attr_t *attr;
620 ir_graph *irg = get_irn_irg(n);
621 ir_node *block = get_nodes_block(n);
623 x87_fxch(state, pos);
625 fxch = new_rd_ia32_fxch(NULL, irg, block, mode_E);
626 attr = get_ia32_x87_attr(fxch);
627 attr->x87[0] = &ia32_st_regs[pos];
628 attr->x87[2] = &ia32_st_regs[0];
632 sched_add_before(n, fxch);
633 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
635 } /* x87_create_fxch */
638 * Create a fpush before node n.
640 * @param state the x87 state
641 * @param n the node after the fpush
642 * @param pos push st(pos) on stack
643 * @param op_idx replace input op_idx of n with the fpush result
645 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx) {
646 ir_node *fpush, *pred = get_irn_n(n, op_idx);
647 ia32_x87_attr_t *attr;
648 const arch_register_t *out = x87_get_irn_register(state->sim, pred);
650 x87_push_dbl(state, arch_register_get_index(out), pred);
652 fpush = new_rd_ia32_fpush(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
653 attr = get_ia32_x87_attr(fpush);
654 attr->x87[0] = &ia32_st_regs[pos];
655 attr->x87[2] = &ia32_st_regs[0];
658 sched_add_before(n, fpush);
660 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
661 } /* x87_create_fpush */
664 * Create a fpop before node n.
666 * @param state the x87 state
667 * @param n the node after the fpop
668 * @param num pop 1 or 2 values
670 * @return the fpop node
672 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
675 ia32_x87_attr_t *attr;
676 int cpu = state->sim->isa->opt_arch;
680 if (ARCH_ATHLON(cpu))
681 fpop = new_rd_ia32_ffreep(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
683 fpop = new_rd_ia32_fpop(NULL, get_irn_irg(n), get_nodes_block(n), mode_E);
684 attr = get_ia32_x87_attr(fpop);
685 attr->x87[0] = &ia32_st_regs[0];
686 attr->x87[1] = &ia32_st_regs[0];
687 attr->x87[2] = &ia32_st_regs[0];
690 sched_add_before(n, fpop);
691 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
696 } /* x87_create_fpop */
699 * Creates an fldz before node n
701 * @param state the x87 state
702 * @param n the node after the fldz
704 * @return the fldz node
706 static ir_node *x87_create_fldz(x87_state *state, ir_node *n, int regidx) {
707 ir_graph *irg = get_irn_irg(n);
708 ir_node *block = get_nodes_block(n);
711 fldz = new_rd_ia32_fldz(NULL, irg, block, mode_E);
713 sched_add_before(n, fldz);
714 DB((dbg, LEVEL_1, "<<< %s\n", get_irn_opname(fldz)));
717 x87_push(state, regidx, fldz);
722 /* --------------------------------- liveness ------------------------------------------ */
725 * The liveness transfer function.
726 * Updates a live set over a single step from a given node to its predecessor.
727 * Everything defined at the node is removed from the set, the uses of the node get inserted.
729 * @param sim The simulator handle.
730 * @param irn The node at which liveness should be computed.
731 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
732 * the registers live after irn.
734 * @return The live bitset.
736 static vfp_liveness vfp_liveness_transfer(x87_simulator *sim, ir_node *irn, vfp_liveness live)
739 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
740 const arch_env_t *arch_env = sim->arch_env;
742 if (get_irn_mode(irn) == mode_T) {
743 const ir_edge_t *edge;
745 foreach_out_edge(irn, edge) {
746 ir_node *proj = get_edge_src_irn(edge);
748 if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
749 const arch_register_t *reg = x87_get_irn_register(sim, proj);
750 live &= ~(1 << arch_register_get_index(reg));
755 if (arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
756 const arch_register_t *reg = x87_get_irn_register(sim, irn);
757 live &= ~(1 << arch_register_get_index(reg));
760 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
761 ir_node *op = get_irn_n(irn, i);
763 if (mode_is_float(get_irn_mode(op)) && arch_irn_consider_in_reg_alloc(arch_env, cls, op)) {
764 const arch_register_t *reg = x87_get_irn_register(sim, op);
765 live |= 1 << arch_register_get_index(reg);
769 } /* vfp_liveness_transfer */
772 * Put all live virtual registers at the end of a block into a bitset.
774 * @param sim the simulator handle
775 * @param lv the liveness information
776 * @param bl the block
778 * @return The live bitset at the end of this block
780 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
783 vfp_liveness live = 0;
784 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
785 const arch_env_t *arch_env = sim->arch_env;
786 const be_lv_t *lv = sim->lv;
788 be_lv_foreach(lv, block, be_lv_state_end, i) {
789 const arch_register_t *reg;
790 const ir_node *node = be_lv_get_irn(lv, block, i);
791 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
794 reg = x87_get_irn_register(sim, node);
795 live |= 1 << arch_register_get_index(reg);
799 } /* vfp_liveness_end_of_block */
801 /** get the register mask from an arch_register */
802 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
805 * Return a bitset of argument registers which are live at the end of a node.
807 * @param sim the simulator handle
808 * @param pos the node
809 * @param kill kill mask for the output registers
811 * @return The live bitset.
813 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
815 unsigned idx = get_irn_idx(pos);
817 assert(idx < sim->n_idx);
818 return sim->live[idx] & ~kill;
819 } /* vfp_live_args_after */
822 * Calculate the liveness for a whole block and cache it.
824 * @param sim the simulator handle
825 * @param lv the liveness handle
826 * @param block the block
828 static void update_liveness(x87_simulator *sim, ir_node *block) {
829 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
833 /* now iterate through the block backward and cache the results */
834 sched_foreach_reverse(block, irn) {
835 /* stop at the first Phi: this produces the live-in */
839 idx = get_irn_idx(irn);
840 sim->live[idx] = live;
842 live = vfp_liveness_transfer(sim, irn, live);
844 idx = get_irn_idx(block);
845 sim->live[idx] = live;
846 } /* update_liveness */
849 * Returns true if a register is live in a set.
851 * @param reg_idx the vfp register index
852 * @param live a live bitset
854 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
858 * Dump liveness info.
860 * @param live the live bitset
862 static void vfp_dump_live(vfp_liveness live) {
865 DB((dbg, LEVEL_2, "Live after: "));
866 for (i = 0; i < 8; ++i) {
867 if (live & (1 << i)) {
868 DB((dbg, LEVEL_2, "vf%d ", i));
871 DB((dbg, LEVEL_2, "\n"));
872 } /* vfp_dump_live */
873 #endif /* DEBUG_libfirm */
875 /* --------------------------------- simulators ---------------------------------------- */
877 #define XCHG(a, b) do { int t = (a); (a) = (b); (b) = t; } while (0)
889 * Simulate a virtual binop.
891 * @param state the x87 state
892 * @param n the node that should be simulated (and patched)
893 * @param tmpl the template containing the 4 possible x87 opcodes
895 * @return NO_NODE_ADDED
897 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl) {
898 int op2_idx = 0, op1_idx;
899 int out_idx, do_pop = 0;
900 ia32_x87_attr_t *attr;
901 ir_node *patched_insn;
903 x87_simulator *sim = state->sim;
904 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
905 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
906 const arch_register_t *op1_reg = x87_get_irn_register(sim, op1);
907 const arch_register_t *op2_reg = x87_get_irn_register(sim, op2);
908 const arch_register_t *out = x87_get_irn_register(sim, n);
909 int reg_index_1 = arch_register_get_index(op1_reg);
910 int reg_index_2 = arch_register_get_index(op2_reg);
911 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
915 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
916 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
917 arch_register_get_name(out)));
918 DEBUG_ONLY(vfp_dump_live(live));
919 DB((dbg, LEVEL_1, "Stack before: "));
920 DEBUG_ONLY(x87_dump_stack(state));
922 if(reg_index_1 == REG_VFP_UKNWN) {
926 op1_idx = x87_on_stack(state, reg_index_1);
927 assert(op1_idx >= 0);
928 op1_live_after = is_vfp_live(arch_register_get_index(op1_reg), live);
931 if (reg_index_2 != REG_VFP_NOREG) {
932 if(reg_index_2 == REG_VFP_UKNWN) {
936 /* second operand is a vfp register */
937 op2_idx = x87_on_stack(state, reg_index_2);
938 assert(op2_idx >= 0);
940 = is_vfp_live(arch_register_get_index(op2_reg), live);
943 if (op2_live_after) {
944 /* Second operand is live. */
946 if (op1_live_after) {
947 /* Both operands are live: push the first one.
948 This works even for op1 == op2. */
949 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
950 /* now do fxxx (tos=tos X op) */
954 dst = tmpl->normal_op;
956 /* Second live, first operand is dead here, bring it to tos. */
958 x87_create_fxch(state, n, op1_idx);
963 /* now do fxxx (tos=tos X op) */
965 dst = tmpl->normal_op;
968 /* Second operand is dead. */
969 if (op1_live_after) {
970 /* First operand is live: bring second to tos. */
972 x87_create_fxch(state, n, op2_idx);
977 /* now do fxxxr (tos = op X tos) */
979 dst = tmpl->reverse_op;
981 /* Both operands are dead here, pop them from the stack. */
984 /* Both are identically and on tos, no pop needed. */
985 /* here fxxx (tos = tos X tos) */
986 dst = tmpl->normal_op;
989 /* now do fxxxp (op = op X tos, pop) */
990 dst = tmpl->normal_pop_op;
994 } else if (op1_idx == 0) {
995 assert(op1_idx != op2_idx);
996 /* now do fxxxrp (op = tos X op, pop) */
997 dst = tmpl->reverse_pop_op;
1001 /* Bring the second on top. */
1002 x87_create_fxch(state, n, op2_idx);
1003 if (op1_idx == op2_idx) {
1004 /* Both are identically and on tos now, no pop needed. */
1007 /* use fxxx (tos = tos X tos) */
1008 dst = tmpl->normal_op;
1011 /* op2 is on tos now */
1013 /* use fxxxp (op = op X tos, pop) */
1014 dst = tmpl->normal_pop_op;
1022 /* second operand is an address mode */
1023 if (op1_live_after) {
1024 /* first operand is live: push it here */
1025 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
1027 /* use fxxx (tos = tos X mem) */
1028 dst = tmpl->normal_op;
1031 /* first operand is dead: bring it to tos */
1033 x87_create_fxch(state, n, op1_idx);
1037 /* use fxxxp (tos = tos X mem) */
1038 dst = tmpl->normal_op;
1043 patched_insn = x87_patch_insn(n, dst);
1044 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1049 /* patch the operation */
1050 attr = get_ia32_x87_attr(n);
1051 attr->x87[0] = op1_reg = &ia32_st_regs[op1_idx];
1052 if (reg_index_2 != REG_VFP_NOREG) {
1053 attr->x87[1] = op2_reg = &ia32_st_regs[op2_idx];
1055 attr->x87[2] = out = &ia32_st_regs[out_idx];
1057 if (reg_index_2 != REG_VFP_NOREG) {
1058 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1059 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1060 arch_register_get_name(out)));
1062 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1063 arch_register_get_name(op1_reg),
1064 arch_register_get_name(out)));
1067 return NO_NODE_ADDED;
1071 * Simulate a virtual Unop.
1073 * @param state the x87 state
1074 * @param n the node that should be simulated (and patched)
1075 * @param op the x87 opcode that will replace n's opcode
1077 * @return NO_NODE_ADDED
1079 static int sim_unop(x87_state *state, ir_node *n, ir_op *op) {
1080 int op1_idx, out_idx;
1081 x87_simulator *sim = state->sim;
1082 const arch_register_t *op1 = x87_get_irn_register(sim, get_irn_n(n, UNOP_IDX));
1083 const arch_register_t *out = x87_get_irn_register(sim, n);
1084 ia32_x87_attr_t *attr;
1085 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1087 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1088 DEBUG_ONLY(vfp_dump_live(live));
1090 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1092 if (is_vfp_live(arch_register_get_index(op1), live)) {
1093 /* push the operand here */
1094 x87_create_fpush(state, n, op1_idx, UNOP_IDX);
1098 /* operand is dead, bring it to tos */
1100 x87_create_fxch(state, n, op1_idx);
1105 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1107 attr = get_ia32_x87_attr(n);
1108 attr->x87[0] = op1 = &ia32_st_regs[0];
1109 attr->x87[2] = out = &ia32_st_regs[0];
1110 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1112 return NO_NODE_ADDED;
1116 * Simulate a virtual Load instruction.
1118 * @param state the x87 state
1119 * @param n the node that should be simulated (and patched)
1120 * @param op the x87 opcode that will replace n's opcode
1122 * @return NO_NODE_ADDED
1124 static int sim_load(x87_state *state, ir_node *n, ir_op *op) {
1125 const arch_register_t *out = x87_get_irn_register(state->sim, n);
1126 ia32_x87_attr_t *attr;
1128 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1129 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1130 assert(out == x87_get_irn_register(state->sim, n));
1131 attr = get_ia32_x87_attr(n);
1132 attr->x87[2] = out = &ia32_st_regs[0];
1133 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1135 return NO_NODE_ADDED;
1139 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1141 * @param store The store
1142 * @param old_val The former value
1143 * @param new_val The new value
1145 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val) {
1146 const ir_edge_t *edge, *ne;
1148 foreach_out_edge_safe(old_val, edge, ne) {
1149 ir_node *user = get_edge_src_irn(edge);
1151 if (! user || user == store)
1154 /* if the user is scheduled after the store: rewire */
1155 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1157 /* find the input of the user pointing to the old value */
1158 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1159 if (get_irn_n(user, i) == old_val)
1160 set_irn_n(user, i, new_val);
1164 } /* collect_and_rewire_users */
1167 * Simulate a virtual Store.
1169 * @param state the x87 state
1170 * @param n the node that should be simulated (and patched)
1171 * @param op the x87 store opcode
1172 * @param op_p the x87 store and pop opcode
1174 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p) {
1175 x87_simulator *sim = state->sim;
1176 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1177 const arch_register_t *op2 = x87_get_irn_register(sim, val);
1178 unsigned live = vfp_live_args_after(sim, n, 0);
1179 int insn = NO_NODE_ADDED;
1180 ia32_x87_attr_t *attr;
1181 int op2_reg_idx, op2_idx, depth;
1182 int live_after_node;
1185 op2_reg_idx = arch_register_get_index(op2);
1186 if (op2_reg_idx == REG_VFP_UKNWN) {
1187 /* just take any value from stack */
1188 if(state->depth > 0) {
1190 DEBUG_ONLY(op2 = NULL);
1191 live_after_node = 1;
1193 /* produce a new value which we will consume immediately */
1194 x87_create_fldz(state, n, op2_reg_idx);
1195 live_after_node = 0;
1196 op2_idx = x87_on_stack(state, op2_reg_idx);
1197 assert(op2_idx >= 0);
1200 op2_idx = x87_on_stack(state, op2_reg_idx);
1201 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1202 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1203 assert(op2_idx >= 0);
1206 mode = get_ia32_ls_mode(n);
1207 depth = x87_get_depth(state);
1209 if (live_after_node) {
1211 Problem: fst doesn't support mode_E (spills), only fstp does
1213 - stack not full: push value and fstp
1214 - stack full: fstp value and load again
1216 if (mode == mode_E) {
1217 if (depth < N_x87_REGS) {
1218 /* ok, we have a free register: push + fstp */
1219 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1221 x87_patch_insn(n, op_p);
1223 ir_node *vfld, *mem, *block, *rproj, *mproj;
1226 /* stack full here: need fstp + load */
1228 x87_patch_insn(n, op_p);
1230 block = get_nodes_block(n);
1231 irg = get_irn_irg(n);
1232 vfld = new_rd_ia32_vfld(NULL, irg, block, get_irn_n(n, 0), get_irn_n(n, 1), new_rd_NoMem(irg), get_ia32_ls_mode(n));
1234 /* copy all attributes */
1235 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1236 if (is_ia32_use_frame(n))
1237 set_ia32_use_frame(vfld);
1238 set_ia32_op_type(vfld, ia32_am_Source);
1239 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1240 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1241 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1243 rproj = new_r_Proj(irg, block, vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1244 mproj = new_r_Proj(irg, block, vfld, mode_M, pn_ia32_vfld_M);
1245 mem = get_irn_Proj_for_mode(n, mode_M);
1247 assert(mem && "Store memory not found");
1249 arch_set_irn_register(sim->arch_env, rproj, op2);
1251 /* reroute all former users of the store memory to the load memory */
1252 edges_reroute(mem, mproj, irg);
1253 /* set the memory input of the load to the store memory */
1254 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1256 sched_add_after(n, vfld);
1257 sched_add_after(vfld, rproj);
1259 /* rewire all users, scheduled after the store, to the loaded value */
1260 collect_and_rewire_users(n, val, rproj);
1265 /* we can only store the tos to memory */
1267 x87_create_fxch(state, n, op2_idx);
1269 /* mode != mode_E -> use normal fst */
1270 x87_patch_insn(n, op);
1273 /* we can only store the tos to memory */
1275 x87_create_fxch(state, n, op2_idx);
1278 x87_patch_insn(n, op_p);
1281 attr = get_ia32_x87_attr(n);
1282 attr->x87[1] = op2 = &ia32_st_regs[0];
1283 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1288 #define _GEN_BINOP(op, rev) \
1289 static int sim_##op(x87_state *state, ir_node *n) { \
1290 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1291 return sim_binop(state, n, &tmpl); \
1294 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1295 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1297 #define GEN_LOAD2(op, nop) \
1298 static int sim_##op(x87_state *state, ir_node *n) { \
1299 return sim_load(state, n, op_ia32_##nop); \
1302 #define GEN_LOAD(op) GEN_LOAD2(op, op)
1304 #define GEN_UNOP(op) \
1305 static int sim_##op(x87_state *state, ir_node *n) { \
1306 return sim_unop(state, n, op_ia32_##op); \
1309 #define GEN_STORE(op) \
1310 static int sim_##op(x87_state *state, ir_node *n) { \
1311 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1333 * Simulate a fCondJmp.
1335 * @param state the x87 state
1336 * @param n the node that should be simulated (and patched)
1338 * @return NO_NODE_ADDED
1340 static int sim_fCmpJmp(x87_state *state, ir_node *n) {
1344 ia32_x87_attr_t *attr;
1346 x87_simulator *sim = state->sim;
1347 ir_node *op1_node = get_irn_n(n, n_ia32_vfCmpJmp_left);
1348 ir_node *op2_node = get_irn_n(n, n_ia32_vfCmpJmp_right);
1349 const arch_register_t *op1 = x87_get_irn_register(sim, op1_node);
1350 const arch_register_t *op2 = x87_get_irn_register(sim, op2_node);
1351 int reg_index_1 = arch_register_get_index(op1);
1352 int reg_index_2 = arch_register_get_index(op2);
1353 unsigned live = vfp_live_args_after(sim, n, 0);
1355 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1356 arch_register_get_name(op1), arch_register_get_name(op2)));
1357 DEBUG_ONLY(vfp_dump_live(live));
1358 DB((dbg, LEVEL_1, "Stack before: "));
1359 DEBUG_ONLY(x87_dump_stack(state));
1361 op1_idx = x87_on_stack(state, reg_index_1);
1362 assert(op1_idx >= 0);
1364 /* BEWARE: check for comp a,a cases, they might happen */
1365 if (reg_index_2 != REG_VFP_NOREG) {
1366 /* second operand is a vfp register */
1367 op2_idx = x87_on_stack(state, reg_index_2);
1368 assert(op2_idx >= 0);
1370 if (is_vfp_live(arch_register_get_index(op2), live)) {
1371 /* second operand is live */
1373 if (is_vfp_live(arch_register_get_index(op1), live)) {
1374 /* both operands are live */
1377 /* res = tos X op */
1378 dst = op_ia32_fcomJmp;
1379 } else if (op2_idx == 0) {
1380 /* res = op X tos */
1381 dst = op_ia32_fcomrJmp;
1383 /* bring the first one to tos */
1384 x87_create_fxch(state, n, op1_idx);
1388 /* res = tos X op */
1389 dst = op_ia32_fcomJmp;
1392 /* second live, first operand is dead here, bring it to tos.
1393 This means further, op1_idx != op2_idx. */
1394 assert(op1_idx != op2_idx);
1396 x87_create_fxch(state, n, op1_idx);
1401 /* res = tos X op, pop */
1402 dst = op_ia32_fcompJmp;
1406 /* second operand is dead */
1407 if (is_vfp_live(arch_register_get_index(op1), live)) {
1408 /* first operand is live: bring second to tos.
1409 This means further, op1_idx != op2_idx. */
1410 assert(op1_idx != op2_idx);
1412 x87_create_fxch(state, n, op2_idx);
1417 /* res = op X tos, pop */
1418 dst = op_ia32_fcomrpJmp;
1421 /* both operands are dead here, check first for identity. */
1422 if (op1_idx == op2_idx) {
1423 /* identically, one pop needed */
1425 x87_create_fxch(state, n, op1_idx);
1429 /* res = tos X op, pop */
1430 dst = op_ia32_fcompJmp;
1433 /* different, move them to st and st(1) and pop both.
1434 The tricky part is to get one into st(1).*/
1435 else if (op2_idx == 1) {
1436 /* good, second operand is already in the right place, move the first */
1438 /* bring the first on top */
1439 x87_create_fxch(state, n, op1_idx);
1440 assert(op2_idx != 0);
1443 /* res = tos X op, pop, pop */
1444 dst = op_ia32_fcomppJmp;
1446 } else if (op1_idx == 1) {
1447 /* good, first operand is already in the right place, move the second */
1449 /* bring the first on top */
1450 x87_create_fxch(state, n, op2_idx);
1451 assert(op1_idx != 0);
1454 dst = op_ia32_fcomrppJmp;
1457 /* if one is already the TOS, we need two fxch */
1459 /* first one is TOS, move to st(1) */
1460 x87_create_fxch(state, n, 1);
1461 assert(op2_idx != 1);
1463 x87_create_fxch(state, n, op2_idx);
1465 /* res = op X tos, pop, pop */
1466 dst = op_ia32_fcomrppJmp;
1468 } else if (op2_idx == 0) {
1469 /* second one is TOS, move to st(1) */
1470 x87_create_fxch(state, n, 1);
1471 assert(op1_idx != 1);
1473 x87_create_fxch(state, n, op1_idx);
1475 /* res = tos X op, pop, pop */
1476 dst = op_ia32_fcomppJmp;
1479 /* none of them is either TOS or st(1), 3 fxch needed */
1480 x87_create_fxch(state, n, op2_idx);
1481 assert(op1_idx != 0);
1482 x87_create_fxch(state, n, 1);
1484 x87_create_fxch(state, n, op1_idx);
1486 /* res = tos X op, pop, pop */
1487 dst = op_ia32_fcomppJmp;
1494 /* second operand is an address mode */
1495 if (is_vfp_live(arch_register_get_index(op1), live)) {
1496 /* first operand is live: bring it to TOS */
1498 x87_create_fxch(state, n, op1_idx);
1501 dst = op_ia32_fcomJmp;
1503 /* first operand is dead: bring it to tos */
1505 x87_create_fxch(state, n, op1_idx);
1508 dst = op_ia32_fcompJmp;
1513 x87_patch_insn(n, dst);
1514 assert(pop_cnt < 3);
1520 /* patch the operation */
1521 attr = get_ia32_x87_attr(n);
1522 op1 = &ia32_st_regs[op1_idx];
1525 op2 = &ia32_st_regs[op2_idx];
1528 attr->x87[2] = NULL;
1531 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1532 arch_register_get_name(op1), arch_register_get_name(op2)));
1534 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1535 arch_register_get_name(op1)));
1537 return NO_NODE_ADDED;
1538 } /* sim_fCondJmp */
1541 int sim_Keep(x87_state *state, ir_node *node)
1544 const arch_register_t *op_reg;
1549 int node_added = NO_NODE_ADDED;
1551 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1553 arity = get_irn_arity(node);
1554 for(i = 0; i < arity; ++i) {
1555 op = get_irn_n(node, i);
1556 op_reg = arch_get_irn_register(state->sim->arch_env, op);
1557 if(arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1560 reg_id = arch_register_get_index(op_reg);
1561 live = vfp_live_args_after(state->sim, node, 0);
1563 op_stack_idx = x87_on_stack(state, reg_id);
1564 if(op_stack_idx >= 0 && !is_vfp_live(reg_id, live)) {
1565 x87_create_fpop(state, sched_next(node), 1);
1566 node_added = NODE_ADDED;
1570 DB((dbg, LEVEL_1, "Stack after: "));
1571 DEBUG_ONLY(x87_dump_stack(state));
1577 void keep_float_node_alive(x87_state *state, ir_node *node)
1583 const arch_register_class_t *cls;
1585 irg = get_irn_irg(node);
1586 block = get_nodes_block(node);
1587 cls = arch_get_irn_reg_class(state->sim->arch_env, node, -1);
1589 keep = be_new_Keep(cls, irg, block, 1, in);
1591 assert(sched_is_scheduled(node));
1592 sched_add_after(node, keep);
1596 * Create a copy of a node. Recreate the node if it's a constant.
1598 * @param state the x87 state
1599 * @param n the node to be copied
1601 * @return the copy of n
1603 static ir_node *create_Copy(x87_state *state, ir_node *n) {
1604 x87_simulator *sim = state->sim;
1605 ir_graph *irg = get_irn_irg(n);
1606 dbg_info *n_dbg = get_irn_dbg_info(n);
1607 ir_mode *mode = get_irn_mode(n);
1608 ir_node *block = get_nodes_block(n);
1609 ir_node *pred = get_irn_n(n, 0);
1610 ir_node *(*cnstr)(dbg_info *, ir_graph *, ir_node *, ir_mode *) = NULL;
1612 const arch_register_t *out;
1613 const arch_register_t *op1;
1614 ia32_x87_attr_t *attr;
1616 /* Do not copy constants, recreate them. */
1617 switch (get_ia32_irn_opcode(pred)) {
1618 case iro_ia32_Unknown_VFP:
1620 cnstr = new_rd_ia32_fldz;
1623 cnstr = new_rd_ia32_fld1;
1625 case iro_ia32_fldpi:
1626 cnstr = new_rd_ia32_fldpi;
1628 case iro_ia32_fldl2e:
1629 cnstr = new_rd_ia32_fldl2e;
1631 case iro_ia32_fldl2t:
1632 cnstr = new_rd_ia32_fldl2t;
1634 case iro_ia32_fldlg2:
1635 cnstr = new_rd_ia32_fldlg2;
1637 case iro_ia32_fldln2:
1638 cnstr = new_rd_ia32_fldln2;
1644 out = x87_get_irn_register(sim, n);
1645 op1 = x87_get_irn_register(sim, pred);
1647 if (cnstr != NULL) {
1648 /* copy a constant */
1649 res = (*cnstr)(n_dbg, irg, block, mode);
1651 x87_push(state, arch_register_get_index(out), res);
1653 attr = get_ia32_x87_attr(res);
1654 attr->x87[2] = &ia32_st_regs[0];
1656 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1658 res = new_rd_ia32_fpushCopy(n_dbg, irg, block, pred, mode);
1660 x87_push(state, arch_register_get_index(out), res);
1662 attr = get_ia32_x87_attr(res);
1663 attr->x87[0] = &ia32_st_regs[op1_idx];
1664 attr->x87[2] = &ia32_st_regs[0];
1666 arch_set_irn_register(sim->arch_env, res, out);
1672 * Simulate a be_Copy.
1674 * @param state the x87 state
1675 * @param n the node that should be simulated (and patched)
1677 * @return NO_NODE_ADDED
1679 static int sim_Copy(x87_state *state, ir_node *n) {
1680 x87_simulator *sim = state->sim;
1682 const arch_register_t *out;
1683 const arch_register_t *op1;
1684 const arch_register_class_t *class;
1685 ir_node *node, *next;
1686 ia32_x87_attr_t *attr;
1687 int op1_idx, out_idx;
1690 class = arch_get_irn_reg_class(sim->arch_env, n, -1);
1691 if (class->regs != ia32_vfp_regs)
1694 pred = get_irn_n(n, 0);
1695 out = x87_get_irn_register(sim, n);
1696 op1 = x87_get_irn_register(sim, pred);
1697 live = vfp_live_args_after(sim, n, REGMASK(out));
1699 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1700 arch_register_get_name(op1), arch_register_get_name(out)));
1701 DEBUG_ONLY(vfp_dump_live(live));
1703 /* handle the infamous unknown value */
1704 if (arch_register_get_index(op1) == REG_VFP_UKNWN) {
1705 /* Operand is still live, a real copy. We need here an fpush that can
1706 hold a a register, so use the fpushCopy or recreate constants */
1707 node = create_Copy(state, n);
1709 assert(is_ia32_fldz(node));
1710 next = sched_next(n);
1713 sched_add_before(next, node);
1715 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1716 arch_get_irn_register(sim->arch_env, node)->name));
1717 return NO_NODE_ADDED;
1720 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1722 if (is_vfp_live(arch_register_get_index(op1), live)) {
1723 ir_node *pred = get_irn_n(n, 0);
1725 /* Operand is still live, a real copy. We need here an fpush that can
1726 hold a a register, so use the fpushCopy or recreate constants */
1727 node = create_Copy(state, n);
1729 /* We have to make sure the old value doesn't go dead (which can happen
1730 * when we recreate constants). As the simulator expected that value in
1731 * the pred blocks. This is unfortunate as removing it would save us 1
1732 * instruction, but we would have to rerun all the simulation to get
1735 next = sched_next(n);
1738 sched_add_before(next, node);
1740 if(get_irn_n_edges(pred) == 0) {
1741 keep_float_node_alive(state, pred);
1744 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1745 arch_get_irn_register(sim->arch_env, node)->name));
1747 out_idx = x87_on_stack(state, arch_register_get_index(out));
1749 if (out_idx >= 0 && out_idx != op1_idx) {
1750 /* Matze: out already on stack? how can this happen? */
1753 /* op1 must be killed and placed where out is */
1755 /* best case, simple remove and rename */
1756 x87_patch_insn(n, op_ia32_Pop);
1757 attr = get_ia32_x87_attr(n);
1758 attr->x87[0] = op1 = &ia32_st_regs[0];
1761 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1763 /* move op1 to tos, store and pop it */
1765 x87_create_fxch(state, n, op1_idx);
1768 x87_patch_insn(n, op_ia32_Pop);
1769 attr = get_ia32_x87_attr(n);
1770 attr->x87[0] = op1 = &ia32_st_regs[out_idx];
1773 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1775 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1777 /* just a virtual copy */
1778 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1779 /* don't remove the node to keep the verifier quiet :),
1780 the emitter won't emit any code for the node */
1783 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1784 exchange(n, get_unop_op(n));
1788 return NO_NODE_ADDED;
1792 * Returns the result proj of the call
1794 static ir_node *get_call_result_proj(ir_node *call) {
1795 const ir_edge_t *edge;
1797 /* search the result proj */
1798 foreach_out_edge(call, edge) {
1799 ir_node *proj = get_edge_src_irn(edge);
1800 long pn = get_Proj_proj(proj);
1802 if (pn == pn_be_Call_first_res) {
1808 } /* get_call_result_proj */
1811 * Simulate a be_Call.
1813 * @param state the x87 state
1814 * @param n the node that should be simulated
1815 * @param arch_env the architecture environment
1817 * @return NO_NODE_ADDED
1819 static int sim_Call(x87_state *state, ir_node *n, const arch_env_t *arch_env)
1821 ir_type *call_tp = be_Call_get_type(n);
1825 const arch_register_t *reg;
1828 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1830 /* at the begin of a call the x87 state should be empty */
1831 assert(state->depth == 0 && "stack not empty before call");
1833 if (get_method_n_ress(call_tp) <= 0)
1837 * If the called function returns a float, it is returned in st(0).
1838 * This even happens if the return value is NOT used.
1839 * Moreover, only one return result is supported.
1841 res_type = get_method_res_type(call_tp, 0);
1842 mode = get_type_mode(res_type);
1844 if (mode == NULL || !mode_is_float(mode))
1847 resproj = get_call_result_proj(n);
1848 assert(resproj != NULL);
1850 reg = x87_get_irn_register(state->sim, resproj);
1851 x87_push(state, arch_register_get_index(reg), resproj);
1854 DB((dbg, LEVEL_1, "Stack after: "));
1855 DEBUG_ONLY(x87_dump_stack(state));
1857 return NO_NODE_ADDED;
1861 * Simulate a be_Spill.
1863 * @param state the x87 state
1864 * @param n the node that should be simulated (and patched)
1866 * Should not happen, spills are lowered before x87 simulator see them.
1868 static int sim_Spill(x87_state *state, ir_node *n) {
1869 assert(0 && "Spill not lowered");
1870 return sim_fst(state, n);
1874 * Simulate a be_Reload.
1876 * @param state the x87 state
1877 * @param n the node that should be simulated (and patched)
1879 * Should not happen, reloads are lowered before x87 simulator see them.
1881 static int sim_Reload(x87_state *state, ir_node *n) {
1882 assert(0 && "Reload not lowered");
1883 return sim_fld(state, n);
1887 * Simulate a be_Return.
1889 * @param state the x87 state
1890 * @param n the node that should be simulated (and patched)
1892 * @return NO_NODE_ADDED
1894 static int sim_Return(x87_state *state, ir_node *n) {
1895 int n_res = be_Return_get_n_rets(n);
1896 int i, n_float_res = 0;
1898 /* only floating point return values must resist on stack */
1899 for (i = 0; i < n_res; ++i) {
1900 ir_node *res = get_irn_n(n, be_pos_Return_val + i);
1902 if (mode_is_float(get_irn_mode(res)))
1905 assert(x87_get_depth(state) == n_float_res);
1907 /* pop them virtually */
1908 for (i = n_float_res - 1; i >= 0; --i)
1911 return NO_NODE_ADDED;
1914 typedef struct _perm_data_t {
1915 const arch_register_t *in;
1916 const arch_register_t *out;
1920 * Simulate a be_Perm.
1922 * @param state the x87 state
1923 * @param irn the node that should be simulated (and patched)
1925 * @return NO_NODE_ADDED
1927 static int sim_Perm(x87_state *state, ir_node *irn) {
1929 x87_simulator *sim = state->sim;
1930 ir_node *pred = get_irn_n(irn, 0);
1932 const ir_edge_t *edge;
1934 /* handle only floating point Perms */
1935 if (! mode_is_float(get_irn_mode(pred)))
1936 return NO_NODE_ADDED;
1938 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1940 /* Perm is a pure virtual instruction on x87.
1941 All inputs must be on the FPU stack and are pairwise
1942 different from each other.
1943 So, all we need to do is to permutate the stack state. */
1944 n = get_irn_arity(irn);
1945 NEW_ARR_A(int, stack_pos, n);
1947 /* collect old stack positions */
1948 for (i = 0; i < n; ++i) {
1949 const arch_register_t *inreg = x87_get_irn_register(sim, get_irn_n(irn, i));
1950 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1952 assert(idx >= 0 && "Perm argument not on x87 stack");
1956 /* now do the permutation */
1957 foreach_out_edge(irn, edge) {
1958 ir_node *proj = get_edge_src_irn(edge);
1959 const arch_register_t *out = x87_get_irn_register(sim, proj);
1960 long num = get_Proj_proj(proj);
1962 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1963 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
1965 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1967 return NO_NODE_ADDED;
1970 static int sim_Barrier(x87_state *state, ir_node *node) {
1971 //const arch_env_t *arch_env = state->sim->arch_env;
1974 /* materialize unknown if needed */
1975 arity = get_irn_arity(node);
1976 for(i = 0; i < arity; ++i) {
1977 const arch_register_t *reg;
1980 ia32_x87_attr_t *attr;
1981 ir_node *in = get_irn_n(node, i);
1983 if(!is_ia32_Unknown_VFP(in))
1986 /* TODO: not completely correct... */
1987 reg = &ia32_vfp_regs[REG_VFP_UKNWN];
1990 block = get_nodes_block(node);
1991 zero = new_rd_ia32_fldz(NULL, current_ir_graph, block, mode_E);
1992 x87_push(state, arch_register_get_index(reg), zero);
1994 attr = get_ia32_x87_attr(zero);
1995 attr->x87[2] = &ia32_st_regs[0];
1997 sched_add_before(node, zero);
1999 set_irn_n(node, i, zero);
2002 return NO_NODE_ADDED;
2007 * Kill any dead registers at block start by popping them from the stack.
2009 * @param sim the simulator handle
2010 * @param block the current block
2011 * @param start_state the x87 state at the begin of the block
2013 * @return the x87 state after dead register killed
2015 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state) {
2016 x87_state *state = start_state;
2017 ir_node *first_insn = sched_first(block);
2018 ir_node *keep = NULL;
2019 unsigned live = vfp_live_args_after(sim, block, 0);
2021 int i, depth, num_pop;
2024 depth = x87_get_depth(state);
2025 for (i = depth - 1; i >= 0; --i) {
2026 int reg = x87_get_st_reg(state, i);
2028 if (! is_vfp_live(reg, live))
2029 kill_mask |= (1 << i);
2033 /* create a new state, will be changed */
2034 state = x87_clone_state(sim, state);
2036 DB((dbg, LEVEL_1, "Killing deads:\n"));
2037 DEBUG_ONLY(vfp_dump_live(live));
2038 DEBUG_ONLY(x87_dump_stack(state));
2040 if (kill_mask != 0 && live == 0) {
2041 int cpu = sim->isa->arch;
2043 /* special case: kill all registers */
2044 if (ARCH_ATHLON(sim->isa->opt_arch) && ARCH_MMX(cpu)) {
2045 if (ARCH_AMD(cpu)) {
2046 /* use FEMMS on AMD processors to clear all */
2047 keep = new_rd_ia32_femms(NULL, get_irn_irg(block), block, mode_E);
2049 /* use EMMS to clear all */
2050 keep = new_rd_ia32_emms(NULL, get_irn_irg(block), block, mode_E);
2052 sched_add_before(first_insn, keep);
2058 /* now kill registers */
2060 /* we can only kill from TOS, so bring them up */
2061 if (! (kill_mask & 1)) {
2062 /* search from behind, because we can to a double-pop */
2063 for (i = depth - 1; i >= 0; --i) {
2064 if (kill_mask & (1 << i)) {
2065 kill_mask &= ~(1 << i);
2072 x87_set_st(state, -1, keep, i);
2073 x87_create_fxch(state, first_insn, i);
2076 if ((kill_mask & 3) == 3) {
2077 /* we can do a double-pop */
2081 /* only a single pop */
2086 kill_mask >>= num_pop;
2087 keep = x87_create_fpop(state, first_insn, num_pop);
2092 } /* x87_kill_deads */
2095 * If we have PhiEs with unknown operands then we have to make sure that some
2096 * value is actually put onto the stack.
2098 static void fix_unknown_phis(x87_state *state, ir_node *block,
2099 ir_node *pred_block, int pos)
2103 sched_foreach(block, node) {
2105 const arch_register_t *reg;
2106 ia32_x87_attr_t *attr;
2111 op = get_Phi_pred(node, pos);
2112 if(!is_ia32_Unknown_VFP(op))
2115 reg = arch_get_irn_register(state->sim->arch_env, node);
2117 /* create a zero at end of pred block */
2118 zero = new_rd_ia32_fldz(NULL, current_ir_graph, pred_block, mode_E);
2119 x87_push(state, arch_register_get_index(reg), zero);
2121 attr = get_ia32_x87_attr(zero);
2122 attr->x87[2] = &ia32_st_regs[0];
2124 assert(is_ia32_fldz(zero));
2125 sched_add_before(sched_last(pred_block), zero);
2127 set_Phi_pred(node, pos, zero);
2132 * Run a simulation and fix all virtual instructions for a block.
2134 * @param sim the simulator handle
2135 * @param block the current block
2137 static void x87_simulate_block(x87_simulator *sim, ir_node *block) {
2139 blk_state *bl_state = x87_get_bl_state(sim, block);
2140 x87_state *state = bl_state->begin;
2141 const ir_edge_t *edge;
2142 ir_node *start_block;
2144 assert(state != NULL);
2145 /* already processed? */
2146 if (bl_state->end != NULL)
2149 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2150 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2151 DEBUG_ONLY(x87_dump_stack(state));
2153 /* at block begin, kill all dead registers */
2154 state = x87_kill_deads(sim, block, state);
2155 /* create a new state, will be changed */
2156 state = x87_clone_state(sim, state);
2158 /* beware, n might change */
2159 for (n = sched_first(block); !sched_is_end(n); n = next) {
2162 ir_op *op = get_irn_op(n);
2164 next = sched_next(n);
2165 if (op->ops.generic == NULL)
2168 func = (sim_func)op->ops.generic;
2171 node_inserted = (*func)(state, n);
2174 sim_func might have added an additional node after n,
2176 beware: n must not be changed by sim_func
2177 (i.e. removed from schedule) in this case
2179 if (node_inserted != NO_NODE_ADDED)
2180 next = sched_next(n);
2183 start_block = get_irg_start_block(get_irn_irg(block));
2185 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2187 /* check if the state must be shuffled */
2188 foreach_block_succ(block, edge) {
2189 ir_node *succ = get_edge_src_irn(edge);
2190 blk_state *succ_state;
2192 if (succ == start_block)
2195 succ_state = x87_get_bl_state(sim, succ);
2197 fix_unknown_phis(state, succ, block, get_edge_src_pos(edge));
2199 if (succ_state->begin == NULL) {
2200 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2201 DEBUG_ONLY(x87_dump_stack(state));
2202 succ_state->begin = state;
2204 waitq_put(sim->worklist, succ);
2206 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2207 /* There is already a begin state for the successor, bad.
2208 Do the necessary permutations.
2209 Note that critical edges are removed, so this is always possible:
2210 If the successor has more than one possible input, then it must
2213 x87_shuffle(sim, block, state, succ, succ_state->begin);
2216 bl_state->end = state;
2217 } /* x87_simulate_block */
2220 * Create a new x87 simulator.
2222 * @param sim a simulator handle, will be initialized
2223 * @param irg the current graph
2224 * @param arch_env the architecture environment
2226 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg,
2227 const arch_env_t *arch_env)
2229 obstack_init(&sim->obst);
2230 sim->blk_states = pmap_create();
2231 sim->arch_env = arch_env;
2232 sim->n_idx = get_irg_last_idx(irg);
2233 sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
2234 sim->isa = (ia32_isa_t *)arch_env->isa;
2236 DB((dbg, LEVEL_1, "--------------------------------\n"
2237 "x87 Simulator started for %+F\n", irg));
2239 /* set the generic function pointer of instruction we must simulate */
2240 clear_irp_opcodes_generic_func();
2242 #define ASSOC(op) (op_ ## op)->ops.generic = (op_func)(sim_##op)
2243 #define ASSOC_IA32(op) (op_ia32_v ## op)->ops.generic = (op_func)(sim_##op)
2244 #define ASSOC_BE(op) (op_be_ ## op)->ops.generic = (op_func)(sim_##op)
2258 ASSOC_IA32(fCmpJmp);
2270 } /* x87_init_simulator */
2273 * Destroy a x87 simulator.
2275 * @param sim the simulator handle
2277 static void x87_destroy_simulator(x87_simulator *sim) {
2278 pmap_destroy(sim->blk_states);
2279 obstack_free(&sim->obst, NULL);
2280 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2281 } /* x87_destroy_simulator */
2284 * Pre-block walker: calculate the liveness information for the block
2285 * and store it into the sim->live cache.
2287 static void update_liveness_walker(ir_node *block, void *data) {
2288 x87_simulator *sim = data;
2289 update_liveness(sim, block);
2290 } /* update_liveness_walker */
2293 * Run a simulation and fix all virtual instructions for a graph.
2295 * @param env the architecture environment
2296 * @param irg the current graph
2298 * Needs a block-schedule.
2300 void x87_simulate_graph(const arch_env_t *arch_env, be_irg_t *birg) {
2301 ir_node *block, *start_block;
2302 blk_state *bl_state;
2304 ir_graph *irg = be_get_birg_irg(birg);
2306 /* create the simulator */
2307 x87_init_simulator(&sim, irg, arch_env);
2309 start_block = get_irg_start_block(irg);
2310 bl_state = x87_get_bl_state(&sim, start_block);
2312 /* start with the empty state */
2313 bl_state->begin = empty;
2316 sim.worklist = new_waitq();
2317 waitq_put(sim.worklist, start_block);
2319 be_assure_liveness(birg);
2320 sim.lv = be_get_birg_liveness(birg);
2321 // sim.lv = be_liveness(be_get_birg_irg(birg));
2322 be_liveness_assure_sets(sim.lv);
2324 /* Calculate the liveness for all nodes. We must precalculate this info,
2325 * because the simulator adds new nodes (possible before Phi nodes) which
2326 * would let a lazy calculation fail.
2327 * On the other hand we reduce the computation amount due to
2328 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2330 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2334 block = waitq_get(sim.worklist);
2335 x87_simulate_block(&sim, block);
2336 } while (! waitq_empty(sim.worklist));
2339 del_waitq(sim.worklist);
2340 x87_destroy_simulator(&sim);
2341 } /* x87_simulate_graph */
2343 void ia32_init_x87(void) {
2344 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2345 } /* ia32_init_x87 */