2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
36 #include "iredges_t.h"
48 #include "../belive_t.h"
49 #include "../besched_t.h"
50 #include "../benode_t.h"
51 #include "bearch_ia32_t.h"
52 #include "ia32_new_nodes.h"
53 #include "gen_ia32_new_nodes.h"
54 #include "gen_ia32_regalloc_if.h"
56 #include "ia32_architecture.h"
63 #define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
65 /** the debug handle */
66 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
68 /* Forward declaration. */
69 typedef struct _x87_simulator x87_simulator;
72 * An exchange template.
73 * Note that our virtual functions have the same inputs
74 * and attributes as the real ones, so we can simple exchange
76 * Further, x87 supports inverse instructions, so we can handle them.
78 typedef struct _exchange_tmpl {
79 ir_op *normal_op; /**< the normal one */
80 ir_op *reverse_op; /**< the reverse one if exists */
81 ir_op *normal_pop_op; /**< the normal one with tos pop */
82 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
86 * An entry on the simulated x87 stack.
88 typedef struct _st_entry {
89 int reg_idx; /**< the virtual register index of this stack value */
90 ir_node *node; /**< the node that produced this value */
96 typedef struct _x87_state {
97 st_entry st[N_x87_REGS]; /**< the register stack */
98 int depth; /**< the current stack depth */
99 int tos; /**< position of the tos */
100 x87_simulator *sim; /**< The simulator. */
103 /** An empty state, used for blocks without fp instructions. */
104 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
105 static x87_state *empty = (x87_state *)&_empty;
108 NO_NODE_ADDED = 0, /**< No node was added. */
109 NODE_ADDED = 1 /**< A node was added by the simulator in the schedule. */
113 * The type of an instruction simulator function.
115 * @param state the x87 state
116 * @param n the node to be simulated
118 * @return NODE_ADDED if a node was added AFTER n in schedule,
121 typedef int (*sim_func)(x87_state *state, ir_node *n);
124 * A block state: Every block has a x87 state at the beginning and at the end.
126 typedef struct _blk_state {
127 x87_state *begin; /**< state at the begin or NULL if not assigned */
128 x87_state *end; /**< state at the end or NULL if not assigned */
131 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
133 /** liveness bitset for vfp registers. */
134 typedef unsigned char vfp_liveness;
139 struct _x87_simulator {
140 struct obstack obst; /**< An obstack for fast allocating. */
141 pmap *blk_states; /**< Map blocks to states. */
142 const arch_env_t *arch_env; /**< The architecture environment. */
143 be_lv_t *lv; /**< intrablock liveness. */
144 vfp_liveness *live; /**< Liveness information. */
145 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
146 waitq *worklist; /**< Worklist of blocks that must be processed. */
147 ia32_isa_t *isa; /**< the ISA object */
151 * Returns the current stack depth.
153 * @param state the x87 state
155 * @return the x87 stack depth
157 static int x87_get_depth(const x87_state *state)
160 } /* x87_get_depth */
163 * Return the virtual register index at st(pos).
165 * @param state the x87 state
166 * @param pos a stack position
168 * @return the vfp register index that produced the value at st(pos)
170 static int x87_get_st_reg(const x87_state *state, int pos)
172 assert(pos < state->depth);
173 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
174 } /* x87_get_st_reg */
178 * Return the node at st(pos).
180 * @param state the x87 state
181 * @param pos a stack position
183 * @return the IR node that produced the value at st(pos)
185 static ir_node *x87_get_st_node(const x87_state *state, int pos)
187 assert(pos < state->depth);
188 return state->st[MASK_TOS(state->tos + pos)].node;
189 } /* x87_get_st_node */
192 * Dump the stack for debugging.
194 * @param state the x87 state
196 static void x87_dump_stack(const x87_state *state)
200 for (i = state->depth - 1; i >= 0; --i) {
201 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
202 x87_get_st_node(state, i)));
204 DB((dbg, LEVEL_2, "<-- TOS\n"));
205 } /* x87_dump_stack */
206 #endif /* DEBUG_libfirm */
209 * Set a virtual register to st(pos).
211 * @param state the x87 state
212 * @param reg_idx the vfp register index that should be set
213 * @param node the IR node that produces the value of the vfp register
214 * @param pos the stack position where the new value should be entered
216 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
218 assert(0 < state->depth);
219 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
220 state->st[MASK_TOS(state->tos + pos)].node = node;
222 DB((dbg, LEVEL_2, "After SET_REG: "));
223 DEBUG_ONLY(x87_dump_stack(state));
227 * Set the tos virtual register.
229 * @param state the x87 state
230 * @param reg_idx the vfp register index that should be set
231 * @param node the IR node that produces the value of the vfp register
233 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node)
235 x87_set_st(state, reg_idx, node, 0);
239 * Swap st(0) with st(pos).
241 * @param state the x87 state
242 * @param pos the stack position to change the tos with
244 static void x87_fxch(x87_state *state, int pos)
247 assert(pos < state->depth);
249 entry = state->st[MASK_TOS(state->tos + pos)];
250 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
251 state->st[MASK_TOS(state->tos)] = entry;
253 DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
257 * Convert a virtual register to the stack index.
259 * @param state the x87 state
260 * @param reg_idx the register vfp index
262 * @return the stack position where the register is stacked
263 * or -1 if the virtual register was not found
265 static int x87_on_stack(const x87_state *state, int reg_idx)
267 int i, tos = state->tos;
269 for (i = 0; i < state->depth; ++i)
270 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
276 * Push a virtual Register onto the stack, double pushed allowed.
278 * @param state the x87 state
279 * @param reg_idx the register vfp index
280 * @param node the node that produces the value of the vfp register
282 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node)
284 assert(state->depth < N_x87_REGS && "stack overrun");
287 state->tos = MASK_TOS(state->tos - 1);
288 state->st[state->tos].reg_idx = reg_idx;
289 state->st[state->tos].node = node;
291 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
295 * Push a virtual Register onto the stack, double pushes are NOT allowed.
297 * @param state the x87 state
298 * @param reg_idx the register vfp index
299 * @param node the node that produces the value of the vfp register
300 * @param dbl_push if != 0 double pushes are allowed
302 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
304 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
306 x87_push_dbl(state, reg_idx, node);
310 * Pop a virtual Register from the stack.
312 * @param state the x87 state
314 static void x87_pop(x87_state *state)
316 assert(state->depth > 0 && "stack underrun");
319 state->tos = MASK_TOS(state->tos + 1);
321 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
325 * Empty the fpu stack
327 * @param state the x87 state
329 static void x87_emms(x87_state *state)
336 * Returns the block state of a block.
338 * @param sim the x87 simulator handle
339 * @param block the current block
341 * @return the block state
343 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
345 pmap_entry *entry = pmap_find(sim->blk_states, block);
348 blk_state *bl_state = obstack_alloc(&sim->obst, sizeof(*bl_state));
349 bl_state->begin = NULL;
350 bl_state->end = NULL;
352 pmap_insert(sim->blk_states, block, bl_state);
356 return PTR_TO_BLKSTATE(entry->value);
357 } /* x87_get_bl_state */
360 * Creates a new x87 state.
362 * @param sim the x87 simulator handle
364 * @return a new x87 state
366 static x87_state *x87_alloc_state(x87_simulator *sim)
368 x87_state *res = obstack_alloc(&sim->obst, sizeof(*res));
372 } /* x87_alloc_state */
377 * @param sim the x87 simulator handle
378 * @param src the x87 state that will be cloned
380 * @return a cloned copy of the src state
382 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
384 x87_state *res = x87_alloc_state(sim);
386 memcpy(res, src, sizeof(*res));
388 } /* x87_clone_state */
391 * Patch a virtual instruction into a x87 one and return
392 * the node representing the result value.
394 * @param n the IR node to patch
395 * @param op the x87 opcode to patch in
397 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
399 ir_mode *mode = get_irn_mode(n);
404 if (mode == mode_T) {
405 /* patch all Proj's */
406 const ir_edge_t *edge;
408 foreach_out_edge(n, edge) {
409 ir_node *proj = get_edge_src_irn(edge);
411 mode = get_irn_mode(proj);
412 if (mode_is_float(mode)) {
414 set_irn_mode(proj, mode_E);
418 } else if (mode_is_float(mode))
419 set_irn_mode(n, mode_E);
421 } /* x87_patch_insn */
424 * Returns the first Proj of a mode_T node having a given mode.
426 * @param n the mode_T node
427 * @param m the desired mode of the Proj
428 * @return The first Proj of mode @p m found or NULL.
430 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
432 const ir_edge_t *edge;
434 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
436 foreach_out_edge(n, edge) {
437 ir_node *proj = get_edge_src_irn(edge);
438 if (get_irn_mode(proj) == m)
443 } /* get_irn_Proj_for_mode */
446 * Wrap the arch_* function here so we can check for errors.
448 static INLINE const arch_register_t *x87_get_irn_register(const ir_node *irn)
450 const arch_register_t *res = arch_get_irn_register(irn);
452 assert(res->reg_class->regs == ia32_vfp_regs);
454 } /* x87_get_irn_register */
456 /* -------------- x87 perm --------------- */
459 * Creates a fxch for shuffle.
461 * @param state the x87 state
462 * @param pos parameter for fxch
463 * @param block the block were fxch is inserted
465 * Creates a new fxch node and reroute the user of the old node
468 * @return the fxch node
470 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block)
473 ia32_x87_attr_t *attr;
475 fxch = new_rd_ia32_fxch(NULL, get_irn_irg(block), block);
476 attr = get_ia32_x87_attr(fxch);
477 attr->x87[0] = &ia32_st_regs[pos];
478 attr->x87[2] = &ia32_st_regs[0];
482 x87_fxch(state, pos);
484 } /* x87_fxch_shuffle */
487 * Calculate the necessary permutations to reach dst_state.
489 * These permutations are done with fxch instructions and placed
490 * at the end of the block.
492 * Note that critical edges are removed here, so we need only
493 * a shuffle if the current block has only one successor.
495 * @param sim the simulator handle
496 * @param block the current block
497 * @param state the current x87 stack state, might be modified
498 * @param dst_block the destination block
499 * @param dst_state destination state
503 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block,
504 x87_state *state, ir_node *dst_block,
505 const x87_state *dst_state)
507 int i, n_cycles, k, ri;
508 unsigned cycles[4], all_mask;
509 char cycle_idx[4][8];
510 ir_node *fxch, *before, *after;
514 assert(state->depth == dst_state->depth);
516 /* Some mathematics here:
517 If we have a cycle of length n that includes the tos,
518 we need n-1 exchange operations.
519 We can always add the tos and restore it, so we need
520 n+1 exchange operations for a cycle not containing the tos.
521 So, the maximum of needed operations is for a cycle of 7
522 not including the tos == 8.
523 This is the same number of ops we would need for using stores,
524 so exchange is cheaper (we save the loads).
525 On the other hand, we might need an additional exchange
526 in the next block to bring one operand on top, so the
527 number of ops in the first case is identical.
528 Further, no more than 4 cycles can exists (4 x 2).
530 all_mask = (1 << (state->depth)) - 1;
532 for (n_cycles = 0; all_mask; ++n_cycles) {
533 int src_idx, dst_idx;
535 /* find the first free slot */
536 for (i = 0; i < state->depth; ++i) {
537 if (all_mask & (1 << i)) {
538 all_mask &= ~(1 << i);
540 /* check if there are differences here */
541 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
547 /* no more cycles found */
552 cycles[n_cycles] = (1 << i);
553 cycle_idx[n_cycles][k++] = i;
554 for (src_idx = i; ; src_idx = dst_idx) {
555 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
557 if ((all_mask & (1 << dst_idx)) == 0)
560 cycle_idx[n_cycles][k++] = dst_idx;
561 cycles[n_cycles] |= (1 << dst_idx);
562 all_mask &= ~(1 << dst_idx);
564 cycle_idx[n_cycles][k] = -1;
568 /* no permutation needed */
572 /* Hmm: permutation needed */
573 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
574 DEBUG_ONLY(x87_dump_stack(state));
575 DB((dbg, LEVEL_2, " to\n"));
576 DEBUG_ONLY(x87_dump_stack(dst_state));
580 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
581 for (ri = 0; ri < n_cycles; ++ri) {
582 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
583 for (k = 0; cycle_idx[ri][k] != -1; ++k)
584 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
585 DB((dbg, LEVEL_2, "\n"));
592 * Find the place node must be insert.
593 * We have only one successor block, so the last instruction should
596 before = sched_last(block);
597 assert(is_cfop(before));
599 /* now do the permutations */
600 for (ri = 0; ri < n_cycles; ++ri) {
601 if ((cycles[ri] & 1) == 0) {
602 /* this cycle does not include the tos */
603 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
605 sched_add_after(after, fxch);
607 sched_add_before(before, fxch);
610 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
611 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
613 sched_add_after(after, fxch);
615 sched_add_before(before, fxch);
618 if ((cycles[ri] & 1) == 0) {
619 /* this cycle does not include the tos */
620 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
621 sched_add_after(after, fxch);
628 * Create a fxch node before another node.
630 * @param state the x87 state
631 * @param n the node after the fxch
632 * @param pos exchange st(pos) with st(0)
636 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
639 ia32_x87_attr_t *attr;
640 ir_graph *irg = get_irn_irg(n);
641 ir_node *block = get_nodes_block(n);
643 x87_fxch(state, pos);
645 fxch = new_rd_ia32_fxch(NULL, irg, block);
646 attr = get_ia32_x87_attr(fxch);
647 attr->x87[0] = &ia32_st_regs[pos];
648 attr->x87[2] = &ia32_st_regs[0];
652 sched_add_before(n, fxch);
653 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
655 } /* x87_create_fxch */
658 * Create a fpush before node n.
660 * @param state the x87 state
661 * @param n the node after the fpush
662 * @param pos push st(pos) on stack
663 * @param op_idx replace input op_idx of n with the fpush result
665 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx)
667 ir_node *fpush, *pred = get_irn_n(n, op_idx);
668 ia32_x87_attr_t *attr;
669 const arch_register_t *out = x87_get_irn_register(pred);
671 x87_push_dbl(state, arch_register_get_index(out), pred);
673 fpush = new_rd_ia32_fpush(NULL, get_irn_irg(n), get_nodes_block(n));
674 attr = get_ia32_x87_attr(fpush);
675 attr->x87[0] = &ia32_st_regs[pos];
676 attr->x87[2] = &ia32_st_regs[0];
679 sched_add_before(n, fpush);
681 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
682 } /* x87_create_fpush */
685 * Create a fpop before node n.
687 * @param state the x87 state
688 * @param n the node after the fpop
689 * @param num pop 1 or 2 values
691 * @return the fpop node
693 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
695 ir_node *fpop = NULL;
696 ia32_x87_attr_t *attr;
701 if (ia32_cg_config.use_ffreep)
702 fpop = new_rd_ia32_ffreep(NULL, get_irn_irg(n), get_nodes_block(n));
704 fpop = new_rd_ia32_fpop(NULL, get_irn_irg(n), get_nodes_block(n));
705 attr = get_ia32_x87_attr(fpop);
706 attr->x87[0] = &ia32_st_regs[0];
707 attr->x87[1] = &ia32_st_regs[0];
708 attr->x87[2] = &ia32_st_regs[0];
711 sched_add_before(n, fpop);
712 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
717 } /* x87_create_fpop */
720 * Creates an fldz before node n
722 * @param state the x87 state
723 * @param n the node after the fldz
725 * @return the fldz node
727 static ir_node *x87_create_fldz(x87_state *state, ir_node *n, int regidx)
729 ir_graph *irg = get_irn_irg(n);
730 ir_node *block = get_nodes_block(n);
733 fldz = new_rd_ia32_fldz(NULL, irg, block, mode_E);
735 sched_add_before(n, fldz);
736 DB((dbg, LEVEL_1, "<<< %s\n", get_irn_opname(fldz)));
739 x87_push(state, regidx, fldz);
744 /* --------------------------------- liveness ------------------------------------------ */
747 * The liveness transfer function.
748 * Updates a live set over a single step from a given node to its predecessor.
749 * Everything defined at the node is removed from the set, the uses of the node get inserted.
751 * @param sim The simulator handle.
752 * @param irn The node at which liveness should be computed.
753 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
754 * the registers live after irn.
756 * @return The live bitset.
758 static vfp_liveness vfp_liveness_transfer(x87_simulator *sim, ir_node *irn, vfp_liveness live)
761 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
762 const arch_env_t *arch_env = sim->arch_env;
764 if (get_irn_mode(irn) == mode_T) {
765 const ir_edge_t *edge;
767 foreach_out_edge(irn, edge) {
768 ir_node *proj = get_edge_src_irn(edge);
770 if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
771 const arch_register_t *reg = x87_get_irn_register(proj);
772 live &= ~(1 << arch_register_get_index(reg));
777 if (arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
778 const arch_register_t *reg = x87_get_irn_register(irn);
779 live &= ~(1 << arch_register_get_index(reg));
782 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
783 ir_node *op = get_irn_n(irn, i);
785 if (mode_is_float(get_irn_mode(op)) && arch_irn_consider_in_reg_alloc(arch_env, cls, op)) {
786 const arch_register_t *reg = x87_get_irn_register(op);
787 live |= 1 << arch_register_get_index(reg);
791 } /* vfp_liveness_transfer */
794 * Put all live virtual registers at the end of a block into a bitset.
796 * @param sim the simulator handle
797 * @param lv the liveness information
798 * @param bl the block
800 * @return The live bitset at the end of this block
802 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
805 vfp_liveness live = 0;
806 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
807 const arch_env_t *arch_env = sim->arch_env;
808 const be_lv_t *lv = sim->lv;
810 be_lv_foreach(lv, block, be_lv_state_end, i) {
811 const arch_register_t *reg;
812 const ir_node *node = be_lv_get_irn(lv, block, i);
813 if (!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
816 reg = x87_get_irn_register(node);
817 live |= 1 << arch_register_get_index(reg);
821 } /* vfp_liveness_end_of_block */
823 /** get the register mask from an arch_register */
824 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
827 * Return a bitset of argument registers which are live at the end of a node.
829 * @param sim the simulator handle
830 * @param pos the node
831 * @param kill kill mask for the output registers
833 * @return The live bitset.
835 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
837 unsigned idx = get_irn_idx(pos);
839 assert(idx < sim->n_idx);
840 return sim->live[idx] & ~kill;
841 } /* vfp_live_args_after */
844 * Calculate the liveness for a whole block and cache it.
846 * @param sim the simulator handle
847 * @param lv the liveness handle
848 * @param block the block
850 static void update_liveness(x87_simulator *sim, ir_node *block)
852 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
856 /* now iterate through the block backward and cache the results */
857 sched_foreach_reverse(block, irn) {
858 /* stop at the first Phi: this produces the live-in */
862 idx = get_irn_idx(irn);
863 sim->live[idx] = live;
865 live = vfp_liveness_transfer(sim, irn, live);
867 idx = get_irn_idx(block);
868 sim->live[idx] = live;
869 } /* update_liveness */
872 * Returns true if a register is live in a set.
874 * @param reg_idx the vfp register index
875 * @param live a live bitset
877 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
881 * Dump liveness info.
883 * @param live the live bitset
885 static void vfp_dump_live(vfp_liveness live)
889 DB((dbg, LEVEL_2, "Live after: "));
890 for (i = 0; i < 8; ++i) {
891 if (live & (1 << i)) {
892 DB((dbg, LEVEL_2, "vf%d ", i));
895 DB((dbg, LEVEL_2, "\n"));
896 } /* vfp_dump_live */
897 #endif /* DEBUG_libfirm */
899 /* --------------------------------- simulators ---------------------------------------- */
901 #define XCHG(a, b) do { int t = (a); (a) = (b); (b) = t; } while (0)
913 * Simulate a virtual binop.
915 * @param state the x87 state
916 * @param n the node that should be simulated (and patched)
917 * @param tmpl the template containing the 4 possible x87 opcodes
919 * @return NO_NODE_ADDED
921 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl)
923 int op2_idx = 0, op1_idx;
924 int out_idx, do_pop = 0;
925 ia32_x87_attr_t *attr;
927 ir_node *patched_insn;
929 x87_simulator *sim = state->sim;
930 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
931 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
932 const arch_register_t *op1_reg = x87_get_irn_register(op1);
933 const arch_register_t *op2_reg = x87_get_irn_register(op2);
934 const arch_register_t *out = x87_get_irn_register(n);
935 int reg_index_1 = arch_register_get_index(op1_reg);
936 int reg_index_2 = arch_register_get_index(op2_reg);
937 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
941 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
942 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
943 arch_register_get_name(out)));
944 DEBUG_ONLY(vfp_dump_live(live));
945 DB((dbg, LEVEL_1, "Stack before: "));
946 DEBUG_ONLY(x87_dump_stack(state));
948 if (reg_index_1 == REG_VFP_UKNWN) {
952 op1_idx = x87_on_stack(state, reg_index_1);
953 assert(op1_idx >= 0);
954 op1_live_after = is_vfp_live(arch_register_get_index(op1_reg), live);
957 attr = get_ia32_x87_attr(n);
958 permuted = attr->attr.data.ins_permuted;
960 if (reg_index_2 != REG_VFP_NOREG) {
963 if (reg_index_2 == REG_VFP_UKNWN) {
967 /* second operand is a vfp register */
968 op2_idx = x87_on_stack(state, reg_index_2);
969 assert(op2_idx >= 0);
971 = is_vfp_live(arch_register_get_index(op2_reg), live);
974 if (op2_live_after) {
975 /* Second operand is live. */
977 if (op1_live_after) {
978 /* Both operands are live: push the first one.
979 This works even for op1 == op2. */
980 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
981 /* now do fxxx (tos=tos X op) */
985 dst = tmpl->normal_op;
987 /* Second live, first operand is dead here, bring it to tos. */
989 x87_create_fxch(state, n, op1_idx);
994 /* now do fxxx (tos=tos X op) */
996 dst = tmpl->normal_op;
999 /* Second operand is dead. */
1000 if (op1_live_after) {
1001 /* First operand is live: bring second to tos. */
1003 x87_create_fxch(state, n, op2_idx);
1008 /* now do fxxxr (tos = op X tos) */
1010 dst = tmpl->reverse_op;
1012 /* Both operands are dead here, pop them from the stack. */
1015 /* Both are identically and on tos, no pop needed. */
1016 /* here fxxx (tos = tos X tos) */
1017 dst = tmpl->normal_op;
1020 /* now do fxxxp (op = op X tos, pop) */
1021 dst = tmpl->normal_pop_op;
1025 } else if (op1_idx == 0) {
1026 assert(op1_idx != op2_idx);
1027 /* now do fxxxrp (op = tos X op, pop) */
1028 dst = tmpl->reverse_pop_op;
1032 /* Bring the second on top. */
1033 x87_create_fxch(state, n, op2_idx);
1034 if (op1_idx == op2_idx) {
1035 /* Both are identically and on tos now, no pop needed. */
1038 /* use fxxx (tos = tos X tos) */
1039 dst = tmpl->normal_op;
1042 /* op2 is on tos now */
1044 /* use fxxxp (op = op X tos, pop) */
1045 dst = tmpl->normal_pop_op;
1053 /* second operand is an address mode */
1054 if (op1_live_after) {
1055 /* first operand is live: push it here */
1056 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
1059 /* first operand is dead: bring it to tos */
1061 x87_create_fxch(state, n, op1_idx);
1066 /* use fxxx (tos = tos X mem) */
1067 dst = permuted ? tmpl->reverse_op : tmpl->normal_op;
1071 patched_insn = x87_patch_insn(n, dst);
1072 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1077 /* patch the operation */
1078 attr->x87[0] = op1_reg = &ia32_st_regs[op1_idx];
1079 if (reg_index_2 != REG_VFP_NOREG) {
1080 attr->x87[1] = op2_reg = &ia32_st_regs[op2_idx];
1082 attr->x87[2] = out = &ia32_st_regs[out_idx];
1084 if (reg_index_2 != REG_VFP_NOREG) {
1085 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1086 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1087 arch_register_get_name(out)));
1089 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1090 arch_register_get_name(op1_reg),
1091 arch_register_get_name(out)));
1094 return NO_NODE_ADDED;
1098 * Simulate a virtual Unop.
1100 * @param state the x87 state
1101 * @param n the node that should be simulated (and patched)
1102 * @param op the x87 opcode that will replace n's opcode
1104 * @return NO_NODE_ADDED
1106 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
1108 int op1_idx, out_idx;
1109 x87_simulator *sim = state->sim;
1110 const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, UNOP_IDX));
1111 const arch_register_t *out = x87_get_irn_register(n);
1112 ia32_x87_attr_t *attr;
1113 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1115 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1116 DEBUG_ONLY(vfp_dump_live(live));
1118 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1120 if (is_vfp_live(arch_register_get_index(op1), live)) {
1121 /* push the operand here */
1122 x87_create_fpush(state, n, op1_idx, UNOP_IDX);
1126 /* operand is dead, bring it to tos */
1128 x87_create_fxch(state, n, op1_idx);
1133 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1135 attr = get_ia32_x87_attr(n);
1136 attr->x87[0] = op1 = &ia32_st_regs[0];
1137 attr->x87[2] = out = &ia32_st_regs[0];
1138 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1140 return NO_NODE_ADDED;
1144 * Simulate a virtual Load instruction.
1146 * @param state the x87 state
1147 * @param n the node that should be simulated (and patched)
1148 * @param op the x87 opcode that will replace n's opcode
1150 * @return NO_NODE_ADDED
1152 static int sim_load(x87_state *state, ir_node *n, ir_op *op)
1154 const arch_register_t *out = x87_get_irn_register(n);
1155 ia32_x87_attr_t *attr;
1157 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1158 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1159 assert(out == x87_get_irn_register(n));
1160 attr = get_ia32_x87_attr(n);
1161 attr->x87[2] = out = &ia32_st_regs[0];
1162 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1164 return NO_NODE_ADDED;
1168 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1170 * @param store The store
1171 * @param old_val The former value
1172 * @param new_val The new value
1174 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
1176 const ir_edge_t *edge, *ne;
1178 foreach_out_edge_safe(old_val, edge, ne) {
1179 ir_node *user = get_edge_src_irn(edge);
1181 if (! user || user == store)
1184 /* if the user is scheduled after the store: rewire */
1185 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1187 /* find the input of the user pointing to the old value */
1188 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1189 if (get_irn_n(user, i) == old_val)
1190 set_irn_n(user, i, new_val);
1194 } /* collect_and_rewire_users */
1197 * Simulate a virtual Store.
1199 * @param state the x87 state
1200 * @param n the node that should be simulated (and patched)
1201 * @param op the x87 store opcode
1202 * @param op_p the x87 store and pop opcode
1204 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p)
1206 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1207 const arch_register_t *op2 = x87_get_irn_register(val);
1208 unsigned live = vfp_live_args_after(state->sim, n, 0);
1209 int insn = NO_NODE_ADDED;
1210 ia32_x87_attr_t *attr;
1211 int op2_reg_idx, op2_idx, depth;
1212 int live_after_node;
1215 op2_reg_idx = arch_register_get_index(op2);
1216 if (op2_reg_idx == REG_VFP_UKNWN) {
1217 /* just take any value from stack */
1218 if (state->depth > 0) {
1220 DEBUG_ONLY(op2 = NULL);
1221 live_after_node = 1;
1223 /* produce a new value which we will consume immediately */
1224 x87_create_fldz(state, n, op2_reg_idx);
1225 live_after_node = 0;
1226 op2_idx = x87_on_stack(state, op2_reg_idx);
1227 assert(op2_idx >= 0);
1230 op2_idx = x87_on_stack(state, op2_reg_idx);
1231 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1232 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1233 assert(op2_idx >= 0);
1236 mode = get_ia32_ls_mode(n);
1237 depth = x87_get_depth(state);
1239 if (live_after_node) {
1241 Problem: fst doesn't support mode_E (spills), only fstp does
1243 - stack not full: push value and fstp
1244 - stack full: fstp value and load again
1245 Note that we cannot test on mode_E, because floats might be 96bit ...
1247 if (get_mode_size_bits(mode) > 64 || mode == mode_Ls) {
1248 if (depth < N_x87_REGS) {
1249 /* ok, we have a free register: push + fstp */
1250 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1252 x87_patch_insn(n, op_p);
1254 ir_node *vfld, *mem, *block, *rproj, *mproj;
1257 /* stack full here: need fstp + load */
1259 x87_patch_insn(n, op_p);
1261 block = get_nodes_block(n);
1262 irg = get_irn_irg(n);
1263 vfld = new_rd_ia32_vfld(NULL, irg, block, get_irn_n(n, 0), get_irn_n(n, 1), new_rd_NoMem(irg), get_ia32_ls_mode(n));
1265 /* copy all attributes */
1266 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1267 if (is_ia32_use_frame(n))
1268 set_ia32_use_frame(vfld);
1269 set_ia32_op_type(vfld, ia32_AddrModeS);
1270 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1271 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1272 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1274 rproj = new_r_Proj(irg, block, vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1275 mproj = new_r_Proj(irg, block, vfld, mode_M, pn_ia32_vfld_M);
1276 mem = get_irn_Proj_for_mode(n, mode_M);
1278 assert(mem && "Store memory not found");
1280 arch_set_irn_register(rproj, op2);
1282 /* reroute all former users of the store memory to the load memory */
1283 edges_reroute(mem, mproj, irg);
1284 /* set the memory input of the load to the store memory */
1285 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1287 sched_add_after(n, vfld);
1288 sched_add_after(vfld, rproj);
1290 /* rewire all users, scheduled after the store, to the loaded value */
1291 collect_and_rewire_users(n, val, rproj);
1296 /* we can only store the tos to memory */
1298 x87_create_fxch(state, n, op2_idx);
1300 /* mode != mode_E -> use normal fst */
1301 x87_patch_insn(n, op);
1304 /* we can only store the tos to memory */
1306 x87_create_fxch(state, n, op2_idx);
1309 x87_patch_insn(n, op_p);
1312 attr = get_ia32_x87_attr(n);
1313 attr->x87[1] = op2 = &ia32_st_regs[0];
1314 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1319 #define _GEN_BINOP(op, rev) \
1320 static int sim_##op(x87_state *state, ir_node *n) { \
1321 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1322 return sim_binop(state, n, &tmpl); \
1325 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1326 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1328 #define GEN_LOAD2(op, nop) \
1329 static int sim_##op(x87_state *state, ir_node *n) { \
1330 return sim_load(state, n, op_ia32_##nop); \
1333 #define GEN_LOAD(op) GEN_LOAD2(op, op)
1335 #define GEN_UNOP(op) \
1336 static int sim_##op(x87_state *state, ir_node *n) { \
1337 return sim_unop(state, n, op_ia32_##op); \
1340 #define GEN_STORE(op) \
1341 static int sim_##op(x87_state *state, ir_node *n) { \
1342 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1364 * Simulate a virtual fisttp.
1366 * @param state the x87 state
1367 * @param n the node that should be simulated (and patched)
1369 static int sim_fisttp(x87_state *state, ir_node *n)
1371 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1372 const arch_register_t *op2 = x87_get_irn_register(val);
1373 int insn = NO_NODE_ADDED;
1374 ia32_x87_attr_t *attr;
1375 int op2_reg_idx, op2_idx, depth;
1377 op2_reg_idx = arch_register_get_index(op2);
1378 if (op2_reg_idx == REG_VFP_UKNWN) {
1379 /* just take any value from stack */
1380 if (state->depth > 0) {
1382 DEBUG_ONLY(op2 = NULL);
1384 /* produce a new value which we will consume immediately */
1385 x87_create_fldz(state, n, op2_reg_idx);
1386 op2_idx = x87_on_stack(state, op2_reg_idx);
1387 assert(op2_idx >= 0);
1390 op2_idx = x87_on_stack(state, op2_reg_idx);
1391 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1392 assert(op2_idx >= 0);
1395 depth = x87_get_depth(state);
1397 /* Note: although the value is still live here, it is destroyed because
1398 of the pop. The register allocator is aware of that and introduced a copy
1399 if the value must be alive. */
1401 /* we can only store the tos to memory */
1403 x87_create_fxch(state, n, op2_idx);
1406 x87_patch_insn(n, op_ia32_fisttp);
1408 attr = get_ia32_x87_attr(n);
1409 attr->x87[1] = op2 = &ia32_st_regs[0];
1410 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1415 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1417 x87_simulator *sim = state->sim;
1418 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1419 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1420 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1421 int reg_index_1 = arch_register_get_index(reg1);
1422 int op1_idx = x87_on_stack(state, reg_index_1);
1423 unsigned live = vfp_live_args_after(sim, n, 0);
1425 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, arch_register_get_name(reg1)));
1426 DEBUG_ONLY(vfp_dump_live(live));
1427 DB((dbg, LEVEL_1, "Stack before: "));
1428 DEBUG_ONLY(x87_dump_stack(state));
1429 assert(op1_idx >= 0);
1432 /* bring the value to tos */
1433 x87_create_fxch(state, n, op1_idx);
1437 /* patch the operation */
1438 x87_patch_insn(n, op_ia32_FtstFnstsw);
1439 reg1 = &ia32_st_regs[op1_idx];
1440 attr->x87[0] = reg1;
1441 attr->x87[1] = NULL;
1442 attr->x87[2] = NULL;
1444 if (!is_vfp_live(reg_index_1, live)) {
1445 x87_create_fpop(state, sched_next(n), 1);
1449 return NO_NODE_ADDED;
1453 * @param state the x87 state
1454 * @param n the node that should be simulated (and patched)
1456 static int sim_Fucom(x87_state *state, ir_node *n)
1460 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1462 x87_simulator *sim = state->sim;
1463 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1464 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1465 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1466 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1467 int reg_index_1 = arch_register_get_index(op1);
1468 int reg_index_2 = arch_register_get_index(op2);
1469 unsigned live = vfp_live_args_after(sim, n, 0);
1470 int permuted = attr->attr.data.ins_permuted;
1473 int node_added = NO_NODE_ADDED;
1475 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1476 arch_register_get_name(op1), arch_register_get_name(op2)));
1477 DEBUG_ONLY(vfp_dump_live(live));
1478 DB((dbg, LEVEL_1, "Stack before: "));
1479 DEBUG_ONLY(x87_dump_stack(state));
1481 op1_idx = x87_on_stack(state, reg_index_1);
1482 assert(op1_idx >= 0);
1484 /* BEWARE: check for comp a,a cases, they might happen */
1485 if (reg_index_2 != REG_VFP_NOREG) {
1486 /* second operand is a vfp register */
1487 op2_idx = x87_on_stack(state, reg_index_2);
1488 assert(op2_idx >= 0);
1490 if (is_vfp_live(reg_index_2, live)) {
1491 /* second operand is live */
1493 if (is_vfp_live(reg_index_1, live)) {
1494 /* both operands are live */
1497 /* res = tos X op */
1498 } else if (op2_idx == 0) {
1499 /* res = op X tos */
1500 permuted = !permuted;
1503 /* bring the first one to tos */
1504 x87_create_fxch(state, n, op1_idx);
1508 /* res = tos X op */
1511 /* second live, first operand is dead here, bring it to tos.
1512 This means further, op1_idx != op2_idx. */
1513 assert(op1_idx != op2_idx);
1515 x87_create_fxch(state, n, op1_idx);
1520 /* res = tos X op, pop */
1524 /* second operand is dead */
1525 if (is_vfp_live(reg_index_1, live)) {
1526 /* first operand is live: bring second to tos.
1527 This means further, op1_idx != op2_idx. */
1528 assert(op1_idx != op2_idx);
1530 x87_create_fxch(state, n, op2_idx);
1535 /* res = op X tos, pop */
1537 permuted = !permuted;
1540 /* both operands are dead here, check first for identity. */
1541 if (op1_idx == op2_idx) {
1542 /* identically, one pop needed */
1544 x87_create_fxch(state, n, op1_idx);
1548 /* res = tos X op, pop */
1551 /* different, move them to st and st(1) and pop both.
1552 The tricky part is to get one into st(1).*/
1553 else if (op2_idx == 1) {
1554 /* good, second operand is already in the right place, move the first */
1556 /* bring the first on top */
1557 x87_create_fxch(state, n, op1_idx);
1558 assert(op2_idx != 0);
1561 /* res = tos X op, pop, pop */
1563 } else if (op1_idx == 1) {
1564 /* good, first operand is already in the right place, move the second */
1566 /* bring the first on top */
1567 x87_create_fxch(state, n, op2_idx);
1568 assert(op1_idx != 0);
1571 /* res = op X tos, pop, pop */
1572 permuted = !permuted;
1576 /* if one is already the TOS, we need two fxch */
1578 /* first one is TOS, move to st(1) */
1579 x87_create_fxch(state, n, 1);
1580 assert(op2_idx != 1);
1582 x87_create_fxch(state, n, op2_idx);
1584 /* res = op X tos, pop, pop */
1586 permuted = !permuted;
1588 } else if (op2_idx == 0) {
1589 /* second one is TOS, move to st(1) */
1590 x87_create_fxch(state, n, 1);
1591 assert(op1_idx != 1);
1593 x87_create_fxch(state, n, op1_idx);
1595 /* res = tos X op, pop, pop */
1598 /* none of them is either TOS or st(1), 3 fxch needed */
1599 x87_create_fxch(state, n, op2_idx);
1600 assert(op1_idx != 0);
1601 x87_create_fxch(state, n, 1);
1603 x87_create_fxch(state, n, op1_idx);
1605 /* res = tos X op, pop, pop */
1612 /* second operand is an address mode */
1613 if (is_vfp_live(reg_index_1, live)) {
1614 /* first operand is live: bring it to TOS */
1616 x87_create_fxch(state, n, op1_idx);
1620 /* first operand is dead: bring it to tos */
1622 x87_create_fxch(state, n, op1_idx);
1629 /* patch the operation */
1630 if (is_ia32_vFucomFnstsw(n)) {
1634 case 0: dst = op_ia32_FucomFnstsw; break;
1635 case 1: dst = op_ia32_FucompFnstsw; break;
1636 case 2: dst = op_ia32_FucomppFnstsw; break;
1637 default: panic("invalid popcount in sim_Fucom");
1640 for (i = 0; i < pops; ++i) {
1643 } else if (is_ia32_vFucomi(n)) {
1645 case 0: dst = op_ia32_Fucomi; break;
1646 case 1: dst = op_ia32_Fucompi; x87_pop(state); break;
1648 dst = op_ia32_Fucompi;
1650 x87_create_fpop(state, sched_next(n), 1);
1651 node_added = NODE_ADDED;
1653 default: panic("invalid popcount in sim_Fucom");
1656 panic("invalid operation %+F in sim_FucomFnstsw", n);
1659 x87_patch_insn(n, dst);
1666 op1 = &ia32_st_regs[op1_idx];
1669 op2 = &ia32_st_regs[op2_idx];
1672 attr->x87[2] = NULL;
1673 attr->attr.data.ins_permuted = permuted;
1676 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1677 arch_register_get_name(op1), arch_register_get_name(op2)));
1679 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1680 arch_register_get_name(op1)));
1686 static int sim_Keep(x87_state *state, ir_node *node)
1689 const arch_register_t *op_reg;
1694 int node_added = NO_NODE_ADDED;
1696 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1698 arity = get_irn_arity(node);
1699 for (i = 0; i < arity; ++i) {
1700 op = get_irn_n(node, i);
1701 op_reg = arch_get_irn_register(op);
1702 if (arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1705 reg_id = arch_register_get_index(op_reg);
1706 live = vfp_live_args_after(state->sim, node, 0);
1708 op_stack_idx = x87_on_stack(state, reg_id);
1709 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live)) {
1710 x87_create_fpop(state, sched_next(node), 1);
1711 node_added = NODE_ADDED;
1715 DB((dbg, LEVEL_1, "Stack after: "));
1716 DEBUG_ONLY(x87_dump_stack(state));
1721 static void keep_float_node_alive(ir_node *node)
1727 const arch_register_class_t *cls;
1729 irg = get_irn_irg(node);
1730 block = get_nodes_block(node);
1731 cls = arch_get_irn_reg_class(node, -1);
1733 keep = be_new_Keep(cls, irg, block, 1, in);
1735 assert(sched_is_scheduled(node));
1736 sched_add_after(node, keep);
1740 * Create a copy of a node. Recreate the node if it's a constant.
1742 * @param state the x87 state
1743 * @param n the node to be copied
1745 * @return the copy of n
1747 static ir_node *create_Copy(x87_state *state, ir_node *n)
1749 ir_graph *irg = get_irn_irg(n);
1750 dbg_info *n_dbg = get_irn_dbg_info(n);
1751 ir_mode *mode = get_irn_mode(n);
1752 ir_node *block = get_nodes_block(n);
1753 ir_node *pred = get_irn_n(n, 0);
1754 ir_node *(*cnstr)(dbg_info *, ir_graph *, ir_node *, ir_mode *) = NULL;
1756 const arch_register_t *out;
1757 const arch_register_t *op1;
1758 ia32_x87_attr_t *attr;
1760 /* Do not copy constants, recreate them. */
1761 switch (get_ia32_irn_opcode(pred)) {
1762 case iro_ia32_Unknown_VFP:
1764 cnstr = new_rd_ia32_fldz;
1767 cnstr = new_rd_ia32_fld1;
1769 case iro_ia32_fldpi:
1770 cnstr = new_rd_ia32_fldpi;
1772 case iro_ia32_fldl2e:
1773 cnstr = new_rd_ia32_fldl2e;
1775 case iro_ia32_fldl2t:
1776 cnstr = new_rd_ia32_fldl2t;
1778 case iro_ia32_fldlg2:
1779 cnstr = new_rd_ia32_fldlg2;
1781 case iro_ia32_fldln2:
1782 cnstr = new_rd_ia32_fldln2;
1788 out = x87_get_irn_register(n);
1789 op1 = x87_get_irn_register(pred);
1791 if (cnstr != NULL) {
1792 /* copy a constant */
1793 res = (*cnstr)(n_dbg, irg, block, mode);
1795 x87_push(state, arch_register_get_index(out), res);
1797 attr = get_ia32_x87_attr(res);
1798 attr->x87[2] = &ia32_st_regs[0];
1800 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1802 res = new_rd_ia32_fpushCopy(n_dbg, irg, block, pred, mode);
1804 x87_push(state, arch_register_get_index(out), res);
1806 attr = get_ia32_x87_attr(res);
1807 attr->x87[0] = &ia32_st_regs[op1_idx];
1808 attr->x87[2] = &ia32_st_regs[0];
1810 arch_set_irn_register(res, out);
1816 * Simulate a be_Copy.
1818 * @param state the x87 state
1819 * @param n the node that should be simulated (and patched)
1821 * @return NO_NODE_ADDED
1823 static int sim_Copy(x87_state *state, ir_node *n)
1826 const arch_register_t *out;
1827 const arch_register_t *op1;
1828 const arch_register_class_t *cls;
1829 ir_node *node, *next;
1830 ia32_x87_attr_t *attr;
1831 int op1_idx, out_idx;
1834 cls = arch_get_irn_reg_class(n, -1);
1835 if (cls->regs != ia32_vfp_regs)
1838 pred = get_irn_n(n, 0);
1839 out = x87_get_irn_register(n);
1840 op1 = x87_get_irn_register(pred);
1841 live = vfp_live_args_after(state->sim, n, REGMASK(out));
1843 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1844 arch_register_get_name(op1), arch_register_get_name(out)));
1845 DEBUG_ONLY(vfp_dump_live(live));
1847 /* handle the infamous unknown value */
1848 if (arch_register_get_index(op1) == REG_VFP_UKNWN) {
1849 /* Operand is still live, a real copy. We need here an fpush that can
1850 hold a a register, so use the fpushCopy or recreate constants */
1851 node = create_Copy(state, n);
1853 assert(is_ia32_fldz(node));
1854 next = sched_next(n);
1857 sched_add_before(next, node);
1859 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1860 arch_get_irn_register(node)->name));
1861 return NO_NODE_ADDED;
1864 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1866 if (is_vfp_live(arch_register_get_index(op1), live)) {
1867 ir_node *pred = get_irn_n(n, 0);
1869 /* Operand is still live, a real copy. We need here an fpush that can
1870 hold a a register, so use the fpushCopy or recreate constants */
1871 node = create_Copy(state, n);
1873 /* We have to make sure the old value doesn't go dead (which can happen
1874 * when we recreate constants). As the simulator expected that value in
1875 * the pred blocks. This is unfortunate as removing it would save us 1
1876 * instruction, but we would have to rerun all the simulation to get
1879 next = sched_next(n);
1882 sched_add_before(next, node);
1884 if (get_irn_n_edges(pred) == 0) {
1885 keep_float_node_alive(pred);
1888 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1890 out_idx = x87_on_stack(state, arch_register_get_index(out));
1892 if (out_idx >= 0 && out_idx != op1_idx) {
1893 /* Matze: out already on stack? how can this happen? */
1896 /* op1 must be killed and placed where out is */
1898 /* best case, simple remove and rename */
1899 x87_patch_insn(n, op_ia32_Pop);
1900 attr = get_ia32_x87_attr(n);
1901 attr->x87[0] = op1 = &ia32_st_regs[0];
1904 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1906 /* move op1 to tos, store and pop it */
1908 x87_create_fxch(state, n, op1_idx);
1911 x87_patch_insn(n, op_ia32_Pop);
1912 attr = get_ia32_x87_attr(n);
1913 attr->x87[0] = op1 = &ia32_st_regs[out_idx];
1916 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1918 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1920 /* just a virtual copy */
1921 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1922 /* don't remove the node to keep the verifier quiet :),
1923 the emitter won't emit any code for the node */
1926 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1927 exchange(n, get_unop_op(n));
1931 return NO_NODE_ADDED;
1935 * Returns the result proj of the call
1937 static ir_node *get_call_result_proj(ir_node *call)
1939 const ir_edge_t *edge;
1941 /* search the result proj */
1942 foreach_out_edge(call, edge) {
1943 ir_node *proj = get_edge_src_irn(edge);
1944 long pn = get_Proj_proj(proj);
1946 if (pn == pn_ia32_Call_vf0) {
1952 } /* get_call_result_proj */
1955 * Simulate a ia32_Call.
1957 * @param state the x87 state
1958 * @param n the node that should be simulated
1960 * @return NO_NODE_ADDED
1962 static int sim_Call(x87_state *state, ir_node *n)
1964 ir_type *call_tp = get_ia32_call_attr_const(n)->call_tp;
1968 const arch_register_t *reg;
1970 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1972 /* at the begin of a call the x87 state should be empty */
1973 assert(state->depth == 0 && "stack not empty before call");
1975 if (get_method_n_ress(call_tp) <= 0)
1979 * If the called function returns a float, it is returned in st(0).
1980 * This even happens if the return value is NOT used.
1981 * Moreover, only one return result is supported.
1983 res_type = get_method_res_type(call_tp, 0);
1984 mode = get_type_mode(res_type);
1986 if (mode == NULL || !mode_is_float(mode))
1989 resproj = get_call_result_proj(n);
1990 assert(resproj != NULL);
1992 reg = x87_get_irn_register(resproj);
1993 x87_push(state, arch_register_get_index(reg), resproj);
1996 DB((dbg, LEVEL_1, "Stack after: "));
1997 DEBUG_ONLY(x87_dump_stack(state));
1999 return NO_NODE_ADDED;
2003 * Simulate a be_Spill.
2005 * @param state the x87 state
2006 * @param n the node that should be simulated (and patched)
2008 * Should not happen, spills are lowered before x87 simulator see them.
2010 static int sim_Spill(x87_state *state, ir_node *n)
2012 assert(0 && "Spill not lowered");
2013 return sim_fst(state, n);
2017 * Simulate a be_Reload.
2019 * @param state the x87 state
2020 * @param n the node that should be simulated (and patched)
2022 * Should not happen, reloads are lowered before x87 simulator see them.
2024 static int sim_Reload(x87_state *state, ir_node *n)
2026 assert(0 && "Reload not lowered");
2027 return sim_fld(state, n);
2031 * Simulate a be_Return.
2033 * @param state the x87 state
2034 * @param n the node that should be simulated (and patched)
2036 * @return NO_NODE_ADDED
2038 static int sim_Return(x87_state *state, ir_node *n)
2040 int n_res = be_Return_get_n_rets(n);
2041 int i, n_float_res = 0;
2043 /* only floating point return values must reside on stack */
2044 for (i = 0; i < n_res; ++i) {
2045 ir_node *res = get_irn_n(n, be_pos_Return_val + i);
2047 if (mode_is_float(get_irn_mode(res)))
2050 assert(x87_get_depth(state) == n_float_res);
2052 /* pop them virtually */
2053 for (i = n_float_res - 1; i >= 0; --i)
2056 return NO_NODE_ADDED;
2059 typedef struct _perm_data_t {
2060 const arch_register_t *in;
2061 const arch_register_t *out;
2065 * Simulate a be_Perm.
2067 * @param state the x87 state
2068 * @param irn the node that should be simulated (and patched)
2070 * @return NO_NODE_ADDED
2072 static int sim_Perm(x87_state *state, ir_node *irn)
2075 ir_node *pred = get_irn_n(irn, 0);
2077 const ir_edge_t *edge;
2079 /* handle only floating point Perms */
2080 if (! mode_is_float(get_irn_mode(pred)))
2081 return NO_NODE_ADDED;
2083 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
2085 /* Perm is a pure virtual instruction on x87.
2086 All inputs must be on the FPU stack and are pairwise
2087 different from each other.
2088 So, all we need to do is to permutate the stack state. */
2089 n = get_irn_arity(irn);
2090 NEW_ARR_A(int, stack_pos, n);
2092 /* collect old stack positions */
2093 for (i = 0; i < n; ++i) {
2094 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
2095 int idx = x87_on_stack(state, arch_register_get_index(inreg));
2097 assert(idx >= 0 && "Perm argument not on x87 stack");
2101 /* now do the permutation */
2102 foreach_out_edge(irn, edge) {
2103 ir_node *proj = get_edge_src_irn(edge);
2104 const arch_register_t *out = x87_get_irn_register(proj);
2105 long num = get_Proj_proj(proj);
2107 assert(0 <= num && num < n && "More Proj's than Perm inputs");
2108 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
2110 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
2112 return NO_NODE_ADDED;
2115 static int sim_Barrier(x87_state *state, ir_node *node)
2117 //const arch_env_t *arch_env = state->sim->arch_env;
2120 /* materialize unknown if needed */
2121 arity = get_irn_arity(node);
2122 for (i = 0; i < arity; ++i) {
2123 const arch_register_t *reg;
2126 ia32_x87_attr_t *attr;
2127 ir_node *in = get_irn_n(node, i);
2129 if (!is_ia32_Unknown_VFP(in))
2132 /* TODO: not completely correct... */
2133 reg = &ia32_vfp_regs[REG_VFP_UKNWN];
2136 block = get_nodes_block(node);
2137 zero = new_rd_ia32_fldz(NULL, current_ir_graph, block, mode_E);
2138 x87_push(state, arch_register_get_index(reg), zero);
2140 attr = get_ia32_x87_attr(zero);
2141 attr->x87[2] = &ia32_st_regs[0];
2143 sched_add_before(node, zero);
2145 set_irn_n(node, i, zero);
2148 return NO_NODE_ADDED;
2153 * Kill any dead registers at block start by popping them from the stack.
2155 * @param sim the simulator handle
2156 * @param block the current block
2157 * @param start_state the x87 state at the begin of the block
2159 * @return the x87 state after dead register killed
2161 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state)
2163 x87_state *state = start_state;
2164 ir_node *first_insn = sched_first(block);
2165 ir_node *keep = NULL;
2166 unsigned live = vfp_live_args_after(sim, block, 0);
2168 int i, depth, num_pop;
2171 depth = x87_get_depth(state);
2172 for (i = depth - 1; i >= 0; --i) {
2173 int reg = x87_get_st_reg(state, i);
2175 if (! is_vfp_live(reg, live))
2176 kill_mask |= (1 << i);
2180 /* create a new state, will be changed */
2181 state = x87_clone_state(sim, state);
2183 DB((dbg, LEVEL_1, "Killing deads:\n"));
2184 DEBUG_ONLY(vfp_dump_live(live));
2185 DEBUG_ONLY(x87_dump_stack(state));
2187 if (kill_mask != 0 && live == 0) {
2188 /* special case: kill all registers */
2189 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
2190 if (ia32_cg_config.use_femms) {
2191 /* use FEMMS on AMD processors to clear all */
2192 keep = new_rd_ia32_femms(NULL, get_irn_irg(block), block);
2194 /* use EMMS to clear all */
2195 keep = new_rd_ia32_emms(NULL, get_irn_irg(block), block);
2197 sched_add_before(first_insn, keep);
2203 /* now kill registers */
2205 /* we can only kill from TOS, so bring them up */
2206 if (! (kill_mask & 1)) {
2207 /* search from behind, because we can to a double-pop */
2208 for (i = depth - 1; i >= 0; --i) {
2209 if (kill_mask & (1 << i)) {
2210 kill_mask &= ~(1 << i);
2217 x87_set_st(state, -1, keep, i);
2218 x87_create_fxch(state, first_insn, i);
2221 if ((kill_mask & 3) == 3) {
2222 /* we can do a double-pop */
2226 /* only a single pop */
2231 kill_mask >>= num_pop;
2232 keep = x87_create_fpop(state, first_insn, num_pop);
2237 } /* x87_kill_deads */
2240 * If we have PhiEs with unknown operands then we have to make sure that some
2241 * value is actually put onto the stack.
2243 static void fix_unknown_phis(x87_state *state, ir_node *block,
2244 ir_node *pred_block, int pos)
2248 sched_foreach(block, node) {
2250 const arch_register_t *reg;
2251 ia32_x87_attr_t *attr;
2256 op = get_Phi_pred(node, pos);
2257 if (!is_ia32_Unknown_VFP(op))
2260 reg = arch_get_irn_register(node);
2262 /* create a zero at end of pred block */
2263 zero = new_rd_ia32_fldz(NULL, current_ir_graph, pred_block, mode_E);
2264 x87_push(state, arch_register_get_index(reg), zero);
2266 attr = get_ia32_x87_attr(zero);
2267 attr->x87[2] = &ia32_st_regs[0];
2269 assert(is_ia32_fldz(zero));
2270 sched_add_before(sched_last(pred_block), zero);
2272 set_Phi_pred(node, pos, zero);
2277 * Run a simulation and fix all virtual instructions for a block.
2279 * @param sim the simulator handle
2280 * @param block the current block
2282 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
2285 blk_state *bl_state = x87_get_bl_state(sim, block);
2286 x87_state *state = bl_state->begin;
2287 const ir_edge_t *edge;
2288 ir_node *start_block;
2290 assert(state != NULL);
2291 /* already processed? */
2292 if (bl_state->end != NULL)
2295 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2296 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2297 DEBUG_ONLY(x87_dump_stack(state));
2299 /* at block begin, kill all dead registers */
2300 state = x87_kill_deads(sim, block, state);
2301 /* create a new state, will be changed */
2302 state = x87_clone_state(sim, state);
2304 /* beware, n might change */
2305 for (n = sched_first(block); !sched_is_end(n); n = next) {
2308 ir_op *op = get_irn_op(n);
2310 next = sched_next(n);
2311 if (op->ops.generic == NULL)
2314 func = (sim_func)op->ops.generic;
2317 node_inserted = (*func)(state, n);
2320 sim_func might have added an additional node after n,
2322 beware: n must not be changed by sim_func
2323 (i.e. removed from schedule) in this case
2325 if (node_inserted != NO_NODE_ADDED)
2326 next = sched_next(n);
2329 start_block = get_irg_start_block(get_irn_irg(block));
2331 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2333 /* check if the state must be shuffled */
2334 foreach_block_succ(block, edge) {
2335 ir_node *succ = get_edge_src_irn(edge);
2336 blk_state *succ_state;
2338 if (succ == start_block)
2341 succ_state = x87_get_bl_state(sim, succ);
2343 fix_unknown_phis(state, succ, block, get_edge_src_pos(edge));
2345 if (succ_state->begin == NULL) {
2346 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2347 DEBUG_ONLY(x87_dump_stack(state));
2348 succ_state->begin = state;
2350 waitq_put(sim->worklist, succ);
2352 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2353 /* There is already a begin state for the successor, bad.
2354 Do the necessary permutations.
2355 Note that critical edges are removed, so this is always possible:
2356 If the successor has more than one possible input, then it must
2359 x87_shuffle(sim, block, state, succ, succ_state->begin);
2362 bl_state->end = state;
2363 } /* x87_simulate_block */
2365 static void register_sim(ir_op *op, sim_func func)
2367 assert(op->ops.generic == NULL);
2368 op->ops.generic = (op_func) func;
2372 * Create a new x87 simulator.
2374 * @param sim a simulator handle, will be initialized
2375 * @param irg the current graph
2376 * @param arch_env the architecture environment
2378 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg,
2379 const arch_env_t *arch_env)
2381 obstack_init(&sim->obst);
2382 sim->blk_states = pmap_create();
2383 sim->arch_env = arch_env;
2384 sim->n_idx = get_irg_last_idx(irg);
2385 sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
2387 DB((dbg, LEVEL_1, "--------------------------------\n"
2388 "x87 Simulator started for %+F\n", irg));
2390 /* set the generic function pointer of instruction we must simulate */
2391 clear_irp_opcodes_generic_func();
2393 register_sim(op_ia32_Call, sim_Call);
2394 register_sim(op_ia32_vfld, sim_fld);
2395 register_sim(op_ia32_vfild, sim_fild);
2396 register_sim(op_ia32_vfld1, sim_fld1);
2397 register_sim(op_ia32_vfldz, sim_fldz);
2398 register_sim(op_ia32_vfadd, sim_fadd);
2399 register_sim(op_ia32_vfsub, sim_fsub);
2400 register_sim(op_ia32_vfmul, sim_fmul);
2401 register_sim(op_ia32_vfdiv, sim_fdiv);
2402 register_sim(op_ia32_vfprem, sim_fprem);
2403 register_sim(op_ia32_vfabs, sim_fabs);
2404 register_sim(op_ia32_vfchs, sim_fchs);
2405 register_sim(op_ia32_vfist, sim_fist);
2406 register_sim(op_ia32_vfisttp, sim_fisttp);
2407 register_sim(op_ia32_vfst, sim_fst);
2408 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
2409 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
2410 register_sim(op_ia32_vFucomi, sim_Fucom);
2411 register_sim(op_be_Copy, sim_Copy);
2412 register_sim(op_be_Spill, sim_Spill);
2413 register_sim(op_be_Reload, sim_Reload);
2414 register_sim(op_be_Return, sim_Return);
2415 register_sim(op_be_Perm, sim_Perm);
2416 register_sim(op_be_Keep, sim_Keep);
2417 register_sim(op_be_Barrier, sim_Barrier);
2418 } /* x87_init_simulator */
2421 * Destroy a x87 simulator.
2423 * @param sim the simulator handle
2425 static void x87_destroy_simulator(x87_simulator *sim)
2427 pmap_destroy(sim->blk_states);
2428 obstack_free(&sim->obst, NULL);
2429 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2430 } /* x87_destroy_simulator */
2433 * Pre-block walker: calculate the liveness information for the block
2434 * and store it into the sim->live cache.
2436 static void update_liveness_walker(ir_node *block, void *data)
2438 x87_simulator *sim = data;
2439 update_liveness(sim, block);
2440 } /* update_liveness_walker */
2443 * Run a simulation and fix all virtual instructions for a graph.
2445 * @param env the architecture environment
2446 * @param irg the current graph
2448 * Needs a block-schedule.
2450 void x87_simulate_graph(const arch_env_t *arch_env, be_irg_t *birg)
2452 ir_node *block, *start_block;
2453 blk_state *bl_state;
2455 ir_graph *irg = be_get_birg_irg(birg);
2457 /* create the simulator */
2458 x87_init_simulator(&sim, irg, arch_env);
2460 start_block = get_irg_start_block(irg);
2461 bl_state = x87_get_bl_state(&sim, start_block);
2463 /* start with the empty state */
2464 bl_state->begin = empty;
2467 sim.worklist = new_waitq();
2468 waitq_put(sim.worklist, start_block);
2470 be_assure_liveness(birg);
2471 sim.lv = be_get_birg_liveness(birg);
2472 // sim.lv = be_liveness(be_get_birg_irg(birg));
2473 be_liveness_assure_sets(sim.lv);
2475 /* Calculate the liveness for all nodes. We must precalculate this info,
2476 * because the simulator adds new nodes (possible before Phi nodes) which
2477 * would let a lazy calculation fail.
2478 * On the other hand we reduce the computation amount due to
2479 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2481 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2485 block = waitq_get(sim.worklist);
2486 x87_simulate_block(&sim, block);
2487 } while (! waitq_empty(sim.worklist));
2490 del_waitq(sim.worklist);
2491 x87_destroy_simulator(&sim);
2492 } /* x87_simulate_graph */
2494 void ia32_init_x87(void)
2496 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2497 } /* ia32_init_x87 */