2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
33 #include "iredges_t.h"
48 #include "bearch_ia32_t.h"
49 #include "ia32_new_nodes.h"
50 #include "gen_ia32_new_nodes.h"
51 #include "gen_ia32_regalloc_if.h"
53 #include "ia32_architecture.h"
55 /** the debug handle */
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 /* Forward declaration. */
59 typedef struct x87_simulator x87_simulator;
62 * An entry on the simulated x87 stack.
64 typedef struct st_entry {
65 int reg_idx; /**< the virtual register index of this stack value */
66 ir_node *node; /**< the node that produced this value */
72 typedef struct x87_state {
73 st_entry st[N_ia32_st_REGS]; /**< the register stack */
74 int depth; /**< the current stack depth */
75 x87_simulator *sim; /**< The simulator. */
78 /** An empty state, used for blocks without fp instructions. */
79 static x87_state empty = { { {0, NULL}, }, 0, NULL };
82 * Return values of the instruction simulator functions.
85 NO_NODE_ADDED = 0, /**< No node that needs simulation was added. */
86 NODE_ADDED = 1 /**< A node that must be simulated was added by the simulator
87 in the schedule AFTER the current node. */
91 * The type of an instruction simulator function.
93 * @param state the x87 state
94 * @param n the node to be simulated
96 * @return NODE_ADDED if a node was added AFTER n in schedule that MUST be
98 * NO_NODE_ADDED otherwise
100 typedef int (*sim_func)(x87_state *state, ir_node *n);
103 * A block state: Every block has a x87 state at the beginning and at the end.
105 typedef struct blk_state {
106 x87_state *begin; /**< state at the begin or NULL if not assigned */
107 x87_state *end; /**< state at the end or NULL if not assigned */
110 /** liveness bitset for vfp registers. */
111 typedef unsigned char vfp_liveness;
116 struct x87_simulator {
117 struct obstack obst; /**< An obstack for fast allocating. */
118 pmap *blk_states; /**< Map blocks to states. */
119 be_lv_t *lv; /**< intrablock liveness. */
120 vfp_liveness *live; /**< Liveness information. */
121 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
122 waitq *worklist; /**< Worklist of blocks that must be processed. */
126 * Returns the current stack depth.
128 * @param state the x87 state
130 * @return the x87 stack depth
132 static int x87_get_depth(const x87_state *state)
137 static st_entry *x87_get_entry(x87_state *const state, int const pos)
139 assert(0 <= pos && pos < state->depth);
140 return &state->st[N_ia32_st_REGS - state->depth + pos];
144 * Return the virtual register index at st(pos).
146 * @param state the x87 state
147 * @param pos a stack position
149 * @return the vfp register index that produced the value at st(pos)
151 static int x87_get_st_reg(const x87_state *state, int pos)
153 return x87_get_entry((x87_state*)state, pos)->reg_idx;
158 * Dump the stack for debugging.
160 * @param state the x87 state
162 static void x87_dump_stack(const x87_state *state)
164 for (int i = state->depth; i-- != 0;) {
165 st_entry const *const entry = x87_get_entry((x87_state*)state, i);
166 DB((dbg, LEVEL_2, "vf%d(%+F) ", entry->reg_idx, entry->node));
168 DB((dbg, LEVEL_2, "<-- TOS\n"));
170 #endif /* DEBUG_libfirm */
173 * Set a virtual register to st(pos).
175 * @param state the x87 state
176 * @param reg_idx the vfp register index that should be set
177 * @param node the IR node that produces the value of the vfp register
178 * @param pos the stack position where the new value should be entered
180 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
182 st_entry *const entry = x87_get_entry(state, pos);
183 entry->reg_idx = reg_idx;
186 DB((dbg, LEVEL_2, "After SET_REG: "));
187 DEBUG_ONLY(x87_dump_stack(state);)
191 * Swap st(0) with st(pos).
193 * @param state the x87 state
194 * @param pos the stack position to change the tos with
196 static void x87_fxch(x87_state *state, int pos)
198 st_entry *const a = x87_get_entry(state, pos);
199 st_entry *const b = x87_get_entry(state, 0);
200 st_entry const t = *a;
204 DB((dbg, LEVEL_2, "After FXCH: "));
205 DEBUG_ONLY(x87_dump_stack(state);)
209 * Convert a virtual register to the stack index.
211 * @param state the x87 state
212 * @param reg_idx the register vfp index
214 * @return the stack position where the register is stacked
215 * or -1 if the virtual register was not found
217 static int x87_on_stack(const x87_state *state, int reg_idx)
219 for (int i = 0; i < state->depth; ++i) {
220 if (x87_get_st_reg(state, i) == reg_idx)
227 * Push a virtual Register onto the stack, double pushes are NOT allowed.
229 * @param state the x87 state
230 * @param reg_idx the register vfp index
231 * @param node the node that produces the value of the vfp register
233 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
235 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
236 assert(state->depth < N_ia32_st_REGS && "stack overrun");
239 st_entry *const entry = x87_get_entry(state, 0);
240 entry->reg_idx = reg_idx;
243 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state);)
247 * Pop a virtual Register from the stack.
249 * @param state the x87 state
251 static void x87_pop(x87_state *state)
253 assert(state->depth > 0 && "stack underrun");
257 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state);)
261 * Empty the fpu stack
263 * @param state the x87 state
265 static void x87_emms(x87_state *state)
271 * Returns the block state of a block.
273 * @param sim the x87 simulator handle
274 * @param block the current block
276 * @return the block state
278 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
280 blk_state *res = pmap_get(blk_state, sim->blk_states, block);
283 res = OALLOC(&sim->obst, blk_state);
287 pmap_insert(sim->blk_states, block, res);
296 * @param sim the x87 simulator handle
297 * @param src the x87 state that will be cloned
299 * @return a cloned copy of the src state
301 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
303 x87_state *const res = OALLOC(&sim->obst, x87_state);
309 * Patch a virtual instruction into a x87 one and return
310 * the node representing the result value.
312 * @param n the IR node to patch
313 * @param op the x87 opcode to patch in
315 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
317 ir_mode *mode = get_irn_mode(n);
322 if (mode == mode_T) {
323 /* patch all Proj's */
324 foreach_out_edge(n, edge) {
325 ir_node *proj = get_edge_src_irn(edge);
327 mode = get_irn_mode(proj);
328 if (mode_is_float(mode)) {
330 set_irn_mode(proj, ia32_reg_classes[CLASS_ia32_st].mode);
334 } else if (mode_is_float(mode))
335 set_irn_mode(n, ia32_reg_classes[CLASS_ia32_st].mode);
340 * Returns the first Proj of a mode_T node having a given mode.
342 * @param n the mode_T node
343 * @param m the desired mode of the Proj
344 * @return The first Proj of mode @p m found.
346 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
348 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
350 foreach_out_edge(n, edge) {
351 ir_node *proj = get_edge_src_irn(edge);
352 if (get_irn_mode(proj) == m)
356 panic("Proj not found");
360 * Wrap the arch_* function here so we can check for errors.
362 static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
364 const arch_register_t *res = arch_get_irn_register(irn);
366 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
370 static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
373 const arch_register_t *res = arch_get_irn_register_out(irn, pos);
375 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
379 static inline const arch_register_t *get_st_reg(int index)
381 return &ia32_registers[REG_ST0 + index];
385 * Create a fxch node before another node.
387 * @param state the x87 state
388 * @param n the node after the fxch
389 * @param pos exchange st(pos) with st(0)
391 static void x87_create_fxch(x87_state *state, ir_node *n, int pos)
393 x87_fxch(state, pos);
395 ir_node *const block = get_nodes_block(n);
396 ir_node *const fxch = new_bd_ia32_fxch(NULL, block);
397 ia32_x87_attr_t *const attr = get_ia32_x87_attr(fxch);
398 attr->x87[0] = get_st_reg(pos);
402 sched_add_before(n, fxch);
403 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, get_st_reg(0)->name));
406 /* -------------- x87 perm --------------- */
409 * Calculate the necessary permutations to reach dst_state.
411 * These permutations are done with fxch instructions and placed
412 * at the end of the block.
414 * Note that critical edges are removed here, so we need only
415 * a shuffle if the current block has only one successor.
417 * @param block the current block
418 * @param state the current x87 stack state, might be modified
419 * @param dst_state destination state
423 static x87_state *x87_shuffle(ir_node *block, x87_state *state, const x87_state *dst_state)
425 int i, n_cycles, k, ri;
426 unsigned cycles[4], all_mask;
427 char cycle_idx[4][8];
429 assert(state->depth == dst_state->depth);
431 /* Some mathematics here:
432 * If we have a cycle of length n that includes the tos,
433 * we need n-1 exchange operations.
434 * We can always add the tos and restore it, so we need
435 * n+1 exchange operations for a cycle not containing the tos.
436 * So, the maximum of needed operations is for a cycle of 7
437 * not including the tos == 8.
438 * This is the same number of ops we would need for using stores,
439 * so exchange is cheaper (we save the loads).
440 * On the other hand, we might need an additional exchange
441 * in the next block to bring one operand on top, so the
442 * number of ops in the first case is identical.
443 * Further, no more than 4 cycles can exists (4 x 2). */
444 all_mask = (1 << (state->depth)) - 1;
446 for (n_cycles = 0; all_mask; ++n_cycles) {
447 int src_idx, dst_idx;
449 /* find the first free slot */
450 for (i = 0; i < state->depth; ++i) {
451 if (all_mask & (1 << i)) {
452 all_mask &= ~(1 << i);
454 /* check if there are differences here */
455 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
461 /* no more cycles found */
466 cycles[n_cycles] = (1 << i);
467 cycle_idx[n_cycles][k++] = i;
468 for (src_idx = i; ; src_idx = dst_idx) {
469 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
471 if ((all_mask & (1 << dst_idx)) == 0)
474 cycle_idx[n_cycles][k++] = dst_idx;
475 cycles[n_cycles] |= (1 << dst_idx);
476 all_mask &= ~(1 << dst_idx);
478 cycle_idx[n_cycles][k] = -1;
482 /* no permutation needed */
486 /* Hmm: permutation needed */
487 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
488 DEBUG_ONLY(x87_dump_stack(state);)
489 DB((dbg, LEVEL_2, " to\n"));
490 DEBUG_ONLY(x87_dump_stack(dst_state);)
494 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
495 for (ri = 0; ri < n_cycles; ++ri) {
496 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
497 for (k = 0; cycle_idx[ri][k] != -1; ++k)
498 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
499 DB((dbg, LEVEL_2, "\n"));
504 * Find the place node must be insert.
505 * We have only one successor block, so the last instruction should
508 ir_node *const before = sched_last(block);
509 assert(is_cfop(before));
511 /* now do the permutations */
512 for (ri = 0; ri < n_cycles; ++ri) {
513 if ((cycles[ri] & 1) == 0) {
514 /* this cycle does not include the tos */
515 x87_create_fxch(state, before, cycle_idx[ri][0]);
517 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
518 x87_create_fxch(state, before, cycle_idx[ri][k]);
520 if ((cycles[ri] & 1) == 0) {
521 /* this cycle does not include the tos */
522 x87_create_fxch(state, before, cycle_idx[ri][0]);
529 * Create a fpush before node n.
531 * @param state the x87 state
532 * @param n the node after the fpush
533 * @param pos push st(pos) on stack
534 * @param val the value to push
536 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int const out_reg_idx, ir_node *const val)
538 x87_push(state, out_reg_idx, val);
540 ir_node *const fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
541 ia32_x87_attr_t *const attr = get_ia32_x87_attr(fpush);
542 attr->x87[0] = get_st_reg(pos);
545 sched_add_before(n, fpush);
547 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, get_st_reg(0)->name));
551 * Create a fpop before node n.
553 * @param state the x87 state
554 * @param n the node after the fpop
555 * @param num pop 1 or 2 values
557 * @return the fpop node
559 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
561 ir_node *fpop = NULL;
562 ia32_x87_attr_t *attr;
567 if (ia32_cg_config.use_ffreep)
568 fpop = new_bd_ia32_ffreep(NULL, get_nodes_block(n));
570 fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
571 attr = get_ia32_x87_attr(fpop);
572 attr->x87[0] = get_st_reg(0);
575 sched_add_before(n, fpop);
576 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
581 /* --------------------------------- liveness ------------------------------------------ */
584 * The liveness transfer function.
585 * Updates a live set over a single step from a given node to its predecessor.
586 * Everything defined at the node is removed from the set, the uses of the node get inserted.
588 * @param irn The node at which liveness should be computed.
589 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
590 * the registers live after irn.
592 * @return The live bitset.
594 static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
597 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
599 if (get_irn_mode(irn) == mode_T) {
600 foreach_out_edge(irn, edge) {
601 ir_node *proj = get_edge_src_irn(edge);
603 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
604 const arch_register_t *reg = x87_get_irn_register(proj);
605 live &= ~(1 << reg->index);
608 } else if (arch_irn_consider_in_reg_alloc(cls, irn)) {
609 const arch_register_t *reg = x87_get_irn_register(irn);
610 live &= ~(1 << reg->index);
613 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
614 ir_node *op = get_irn_n(irn, i);
616 if (mode_is_float(get_irn_mode(op)) &&
617 arch_irn_consider_in_reg_alloc(cls, op)) {
618 const arch_register_t *reg = x87_get_irn_register(op);
619 live |= 1 << reg->index;
626 * Put all live virtual registers at the end of a block into a bitset.
628 * @param sim the simulator handle
629 * @param bl the block
631 * @return The live bitset at the end of this block
633 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
635 vfp_liveness live = 0;
636 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
637 const be_lv_t *lv = sim->lv;
639 be_lv_foreach(lv, block, be_lv_state_end, node) {
640 const arch_register_t *reg;
641 if (!arch_irn_consider_in_reg_alloc(cls, node))
644 reg = x87_get_irn_register(node);
645 live |= 1 << reg->index;
651 /** get the register mask from an arch_register */
652 #define REGMASK(reg) (1 << (reg->index))
655 * Return a bitset of argument registers which are live at the end of a node.
657 * @param sim the simulator handle
658 * @param pos the node
659 * @param kill kill mask for the output registers
661 * @return The live bitset.
663 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
665 unsigned idx = get_irn_idx(pos);
667 assert(idx < sim->n_idx);
668 return sim->live[idx] & ~kill;
672 * Calculate the liveness for a whole block and cache it.
674 * @param sim the simulator handle
675 * @param block the block
677 static void update_liveness(x87_simulator *sim, ir_node *block)
679 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
682 /* now iterate through the block backward and cache the results */
683 sched_foreach_reverse(block, irn) {
684 /* stop at the first Phi: this produces the live-in */
688 idx = get_irn_idx(irn);
689 sim->live[idx] = live;
691 live = vfp_liveness_transfer(irn, live);
693 idx = get_irn_idx(block);
694 sim->live[idx] = live;
698 * Returns true if a register is live in a set.
700 * @param reg_idx the vfp register index
701 * @param live a live bitset
703 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
707 * Dump liveness info.
709 * @param live the live bitset
711 static void vfp_dump_live(vfp_liveness live)
715 DB((dbg, LEVEL_2, "Live after: "));
716 for (i = 0; i < 8; ++i) {
717 if (live & (1 << i)) {
718 DB((dbg, LEVEL_2, "vf%d ", i));
721 DB((dbg, LEVEL_2, "\n"));
723 #endif /* DEBUG_libfirm */
725 /* --------------------------------- simulators ---------------------------------------- */
728 * Simulate a virtual binop.
730 * @param state the x87 state
731 * @param n the node that should be simulated (and patched)
733 * @return NO_NODE_ADDED
735 static int sim_binop(x87_state *const state, ir_node *const n, ir_op *const op)
737 int op2_idx = 0, op1_idx;
738 int out_idx, do_pop = 0;
739 ia32_x87_attr_t *attr;
741 ir_node *patched_insn;
742 x87_simulator *sim = state->sim;
743 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
744 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
745 const arch_register_t *op1_reg = x87_get_irn_register(op1);
746 const arch_register_t *op2_reg = x87_get_irn_register(op2);
747 const arch_register_t *out = x87_irn_get_register(n, pn_ia32_res);
748 int reg_index_1 = op1_reg->index;
749 int reg_index_2 = op2_reg->index;
750 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
754 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n, op1_reg->name, op2_reg->name, out->name));
755 DEBUG_ONLY(vfp_dump_live(live);)
756 DB((dbg, LEVEL_1, "Stack before: "));
757 DEBUG_ONLY(x87_dump_stack(state);)
759 op1_idx = x87_on_stack(state, reg_index_1);
760 assert(op1_idx >= 0);
761 op1_live_after = is_vfp_live(reg_index_1, live);
763 attr = get_ia32_x87_attr(n);
764 permuted = attr->attr.data.ins_permuted;
766 int const out_reg_idx = out->index;
767 if (reg_index_2 != REG_VFP_VFP_NOREG) {
770 /* second operand is a vfp register */
771 op2_idx = x87_on_stack(state, reg_index_2);
772 assert(op2_idx >= 0);
773 op2_live_after = is_vfp_live(reg_index_2, live);
775 if (op2_live_after) {
776 /* Second operand is live. */
778 if (op1_live_after) {
779 /* Both operands are live: push the first one.
780 This works even for op1 == op2. */
781 x87_create_fpush(state, n, op1_idx, out_reg_idx, op2);
782 /* now do fxxx (tos=tos X op) */
787 /* Second live, first operand is dead here, bring it to tos. */
789 x87_create_fxch(state, n, op1_idx);
794 /* now do fxxx (tos=tos X op) */
798 /* Second operand is dead. */
799 if (op1_live_after) {
800 /* First operand is live: bring second to tos. */
802 x87_create_fxch(state, n, op2_idx);
807 /* now do fxxxr (tos = op X tos) */
810 /* Both operands are dead here, pop them from the stack. */
813 /* Both are identically and on tos, no pop needed. */
814 /* here fxxx (tos = tos X tos) */
817 /* now do fxxxp (op = op X tos, pop) */
821 } else if (op1_idx == 0) {
822 assert(op1_idx != op2_idx);
823 /* now do fxxxrp (op = tos X op, pop) */
827 /* Bring the second on top. */
828 x87_create_fxch(state, n, op2_idx);
829 if (op1_idx == op2_idx) {
830 /* Both are identically and on tos now, no pop needed. */
833 /* use fxxx (tos = tos X tos) */
836 /* op2 is on tos now */
838 /* use fxxxp (op = op X tos, pop) */
846 /* second operand is an address mode */
847 if (op1_live_after) {
848 /* first operand is live: push it here */
849 x87_create_fpush(state, n, op1_idx, out_reg_idx, op1);
852 /* first operand is dead: bring it to tos */
854 x87_create_fxch(state, n, op1_idx);
859 /* use fxxx (tos = tos X mem) */
863 patched_insn = x87_patch_insn(n, op);
864 x87_set_st(state, out_reg_idx, patched_insn, out_idx);
869 /* patch the operation */
871 attr->x87[0] = op1_reg = get_st_reg(op1_idx);
872 if (reg_index_2 != REG_VFP_VFP_NOREG) {
873 attr->x87[1] = op2_reg = get_st_reg(op2_idx);
875 attr->x87[2] = out = get_st_reg(out_idx);
877 if (reg_index_2 != REG_VFP_VFP_NOREG) {
878 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n), op1_reg->name, op2_reg->name, out->name));
880 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n), op1_reg->name, out->name));
883 return NO_NODE_ADDED;
887 * Simulate a virtual Unop.
889 * @param state the x87 state
890 * @param n the node that should be simulated (and patched)
891 * @param op the x87 opcode that will replace n's opcode
893 * @return NO_NODE_ADDED
895 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
897 arch_register_t const *const out = x87_get_irn_register(n);
898 unsigned const live = vfp_live_args_after(state->sim, n, REGMASK(out));
899 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
900 DEBUG_ONLY(vfp_dump_live(live);)
902 ir_node *const op1 = get_irn_n(n, 0);
903 arch_register_t const *const op1_reg = x87_get_irn_register(op1);
904 int const op1_reg_idx = op1_reg->index;
905 int const op1_idx = x87_on_stack(state, op1_reg_idx);
906 int const out_reg_idx = out->index;
907 if (is_vfp_live(op1_reg_idx, live)) {
908 /* push the operand here */
909 x87_create_fpush(state, n, op1_idx, out_reg_idx, op1);
911 /* operand is dead, bring it to tos */
913 x87_create_fxch(state, n, op1_idx);
917 x87_set_st(state, out_reg_idx, x87_patch_insn(n, op), 0);
918 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), get_st_reg(0)->name));
920 return NO_NODE_ADDED;
924 * Simulate a virtual Load instruction.
926 * @param state the x87 state
927 * @param n the node that should be simulated (and patched)
928 * @param op the x87 opcode that will replace n's opcode
930 * @return NO_NODE_ADDED
932 static int sim_load(x87_state *state, ir_node *n, ir_op *op, int res_pos)
934 const arch_register_t *out = x87_irn_get_register(n, res_pos);
936 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
937 x87_push(state, out->index, x87_patch_insn(n, op));
938 assert(out == x87_irn_get_register(n, res_pos));
939 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), get_st_reg(0)->name));
941 return NO_NODE_ADDED;
945 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
947 * @param store The store
948 * @param old_val The former value
949 * @param new_val The new value
951 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
953 foreach_out_edge_safe(old_val, edge) {
954 ir_node *user = get_edge_src_irn(edge);
955 /* if the user is scheduled after the store: rewire */
956 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
957 set_irn_n(user, get_edge_src_pos(edge), new_val);
963 * Simulate a virtual Store.
965 * @param state the x87 state
966 * @param n the node that should be simulated (and patched)
967 * @param op the x87 store opcode
969 static int sim_store(x87_state *state, ir_node *n, ir_op *op)
971 ir_node *const val = get_irn_n(n, n_ia32_vfst_val);
972 arch_register_t const *const op2 = x87_get_irn_register(val);
973 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, op2->name));
976 int insn = NO_NODE_ADDED;
977 int const op2_reg_idx = op2->index;
978 int const op2_idx = x87_on_stack(state, op2_reg_idx);
979 unsigned const live = vfp_live_args_after(state->sim, n, 0);
980 int const live_after_node = is_vfp_live(op2_reg_idx, live);
981 assert(op2_idx >= 0);
982 if (live_after_node) {
983 /* Problem: fst doesn't support 80bit modes (spills), only fstp does
984 * fist doesn't support 64bit mode, only fistp
986 * - stack not full: push value and fstp
987 * - stack full: fstp value and load again
988 * Note that we cannot test on mode_E, because floats might be 80bit ... */
989 ir_mode *const mode = get_ia32_ls_mode(n);
990 if (get_mode_size_bits(mode) > (mode_is_int(mode) ? 32 : 64)) {
991 if (x87_get_depth(state) < N_ia32_st_REGS) {
992 /* ok, we have a free register: push + fstp */
993 x87_create_fpush(state, n, op2_idx, REG_VFP_VFP_NOREG, val);
994 x87_patch_insn(n, op);
997 /* stack full here: need fstp + load */
998 x87_patch_insn(n, op);
1001 ir_node *const block = get_nodes_block(n);
1002 ir_node *const mem = get_irn_Proj_for_mode(n, mode_M);
1003 ir_node *const vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), mem, mode);
1005 /* copy all attributes */
1006 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1007 if (is_ia32_use_frame(n))
1008 set_ia32_use_frame(vfld);
1009 set_ia32_op_type(vfld, ia32_AddrModeS);
1010 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1011 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1012 set_ia32_ls_mode(vfld, mode);
1014 ir_node *const rproj = new_r_Proj(vfld, mode, pn_ia32_vfld_res);
1015 ir_node *const mproj = new_r_Proj(vfld, mode_M, pn_ia32_vfld_M);
1017 arch_set_irn_register(rproj, op2);
1019 /* reroute all former users of the store memory to the load memory */
1020 edges_reroute_except(mem, mproj, vfld);
1022 sched_add_after(n, vfld);
1024 /* rewire all users, scheduled after the store, to the loaded value */
1025 collect_and_rewire_users(n, val, rproj);
1030 /* we can only store the tos to memory */
1032 x87_create_fxch(state, n, op2_idx);
1034 /* mode size 64 or smaller -> use normal fst */
1035 x87_patch_insn(n, op);
1038 /* we can only store the tos to memory */
1040 x87_create_fxch(state, n, op2_idx);
1042 x87_patch_insn(n, op);
1049 ia32_x87_attr_t *const attr = get_ia32_x87_attr(n);
1051 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), get_st_reg(0)->name));
1056 #define GEN_BINOP(op) \
1057 static int sim_##op(x87_state *state, ir_node *n) { \
1058 return sim_binop(state, n, op_ia32_##op); \
1061 #define GEN_LOAD(op) \
1062 static int sim_##op(x87_state *state, ir_node *n) { \
1063 return sim_load(state, n, op_ia32_##op, pn_ia32_v##op##_res); \
1066 #define GEN_UNOP(op) \
1067 static int sim_##op(x87_state *state, ir_node *n) { \
1068 return sim_unop(state, n, op_ia32_##op); \
1071 #define GEN_STORE(op) \
1072 static int sim_##op(x87_state *state, ir_node *n) { \
1073 return sim_store(state, n, op_ia32_##op); \
1093 static int sim_fprem(x87_state *const state, ir_node *const n)
1097 panic("TODO implement");
1098 return NO_NODE_ADDED;
1102 * Simulate a virtual fisttp.
1104 * @param state the x87 state
1105 * @param n the node that should be simulated (and patched)
1107 * @return NO_NODE_ADDED
1109 static int sim_fisttp(x87_state *state, ir_node *n)
1111 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1112 const arch_register_t *op2 = x87_get_irn_register(val);
1114 int const op2_idx = x87_on_stack(state, op2->index);
1115 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, op2->name));
1116 assert(op2_idx >= 0);
1118 /* Note: although the value is still live here, it is destroyed because
1119 of the pop. The register allocator is aware of that and introduced a copy
1120 if the value must be alive. */
1122 /* we can only store the tos to memory */
1124 x87_create_fxch(state, n, op2_idx);
1127 x87_patch_insn(n, op_ia32_fisttp);
1129 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), get_st_reg(0)->name));
1131 return NO_NODE_ADDED;
1135 * Simulate a virtual FtstFnstsw.
1137 * @param state the x87 state
1138 * @param n the node that should be simulated (and patched)
1140 * @return NO_NODE_ADDED
1142 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1144 x87_simulator *sim = state->sim;
1145 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1146 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1147 int reg_index_1 = reg1->index;
1148 int op1_idx = x87_on_stack(state, reg_index_1);
1149 unsigned live = vfp_live_args_after(sim, n, 0);
1151 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, reg1->name));
1152 DEBUG_ONLY(vfp_dump_live(live);)
1153 DB((dbg, LEVEL_1, "Stack before: "));
1154 DEBUG_ONLY(x87_dump_stack(state);)
1155 assert(op1_idx >= 0);
1158 /* bring the value to tos */
1159 x87_create_fxch(state, n, op1_idx);
1162 /* patch the operation */
1163 x87_patch_insn(n, op_ia32_FtstFnstsw);
1165 if (!is_vfp_live(reg_index_1, live))
1166 x87_create_fpop(state, sched_next(n), 1);
1168 return NO_NODE_ADDED;
1174 * @param state the x87 state
1175 * @param n the node that should be simulated (and patched)
1177 * @return NO_NODE_ADDED
1179 static int sim_Fucom(x87_state *state, ir_node *n)
1183 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1185 x87_simulator *sim = state->sim;
1186 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1187 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1188 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1189 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1190 int reg_index_1 = op1->index;
1191 int reg_index_2 = op2->index;
1192 unsigned live = vfp_live_args_after(sim, n, 0);
1193 bool permuted = attr->attr.data.ins_permuted;
1197 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n, op1->name, op2->name));
1198 DEBUG_ONLY(vfp_dump_live(live);)
1199 DB((dbg, LEVEL_1, "Stack before: "));
1200 DEBUG_ONLY(x87_dump_stack(state);)
1202 op1_idx = x87_on_stack(state, reg_index_1);
1203 assert(op1_idx >= 0);
1205 /* BEWARE: check for comp a,a cases, they might happen */
1206 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1207 /* second operand is a vfp register */
1208 op2_idx = x87_on_stack(state, reg_index_2);
1209 assert(op2_idx >= 0);
1211 if (is_vfp_live(reg_index_2, live)) {
1212 /* second operand is live */
1214 if (is_vfp_live(reg_index_1, live)) {
1215 /* both operands are live */
1218 /* res = tos X op */
1219 } else if (op2_idx == 0) {
1220 /* res = op X tos */
1221 permuted = !permuted;
1224 /* bring the first one to tos */
1225 x87_create_fxch(state, n, op1_idx);
1226 if (op1_idx == op2_idx) {
1228 } else if (op2_idx == 0) {
1232 /* res = tos X op */
1235 /* second live, first operand is dead here, bring it to tos.
1236 This means further, op1_idx != op2_idx. */
1237 assert(op1_idx != op2_idx);
1239 x87_create_fxch(state, n, op1_idx);
1244 /* res = tos X op, pop */
1248 /* second operand is dead */
1249 if (is_vfp_live(reg_index_1, live)) {
1250 /* first operand is live: bring second to tos.
1251 This means further, op1_idx != op2_idx. */
1252 assert(op1_idx != op2_idx);
1254 x87_create_fxch(state, n, op2_idx);
1259 /* res = op X tos, pop */
1261 permuted = !permuted;
1264 /* both operands are dead here, check first for identity. */
1265 if (op1_idx == op2_idx) {
1266 /* identically, one pop needed */
1268 x87_create_fxch(state, n, op1_idx);
1272 /* res = tos X op, pop */
1275 /* different, move them to st and st(1) and pop both.
1276 The tricky part is to get one into st(1).*/
1277 else if (op2_idx == 1) {
1278 /* good, second operand is already in the right place, move the first */
1280 /* bring the first on top */
1281 x87_create_fxch(state, n, op1_idx);
1282 assert(op2_idx != 0);
1285 /* res = tos X op, pop, pop */
1287 } else if (op1_idx == 1) {
1288 /* good, first operand is already in the right place, move the second */
1290 /* bring the first on top */
1291 x87_create_fxch(state, n, op2_idx);
1292 assert(op1_idx != 0);
1295 /* res = op X tos, pop, pop */
1296 permuted = !permuted;
1300 /* if one is already the TOS, we need two fxch */
1302 /* first one is TOS, move to st(1) */
1303 x87_create_fxch(state, n, 1);
1304 assert(op2_idx != 1);
1306 x87_create_fxch(state, n, op2_idx);
1308 /* res = op X tos, pop, pop */
1310 permuted = !permuted;
1312 } else if (op2_idx == 0) {
1313 /* second one is TOS, move to st(1) */
1314 x87_create_fxch(state, n, 1);
1315 assert(op1_idx != 1);
1317 x87_create_fxch(state, n, op1_idx);
1319 /* res = tos X op, pop, pop */
1322 /* none of them is either TOS or st(1), 3 fxch needed */
1323 x87_create_fxch(state, n, op2_idx);
1324 assert(op1_idx != 0);
1325 x87_create_fxch(state, n, 1);
1327 x87_create_fxch(state, n, op1_idx);
1329 /* res = tos X op, pop, pop */
1336 /* second operand is an address mode */
1337 if (is_vfp_live(reg_index_1, live)) {
1338 /* first operand is live: bring it to TOS */
1340 x87_create_fxch(state, n, op1_idx);
1344 /* first operand is dead: bring it to tos */
1346 x87_create_fxch(state, n, op1_idx);
1353 /* patch the operation */
1354 if (is_ia32_vFucomFnstsw(n)) {
1358 case 1: attr->pop = true; /* FALLTHROUGH */
1359 case 0: dst = op_ia32_FucomFnstsw; break;
1360 case 2: dst = op_ia32_FucomppFnstsw; break;
1361 default: panic("invalid popcount");
1364 for (i = 0; i < pops; ++i) {
1367 } else if (is_ia32_vFucomi(n)) {
1368 dst = op_ia32_Fucomi;
1371 case 1: attr->pop = true; x87_pop(state); break;
1375 x87_create_fpop(state, sched_next(n), 1);
1377 default: panic("invalid popcount");
1380 panic("invalid operation %+F", n);
1383 x87_patch_insn(n, dst);
1390 op1 = get_st_reg(op1_idx);
1393 op2 = get_st_reg(op2_idx);
1396 attr->attr.data.ins_permuted = permuted;
1399 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n), op1->name, op2->name));
1401 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n), op1->name));
1404 return NO_NODE_ADDED;
1410 * @param state the x87 state
1411 * @param n the node that should be simulated (and patched)
1413 * @return NO_NODE_ADDED
1415 static int sim_Keep(x87_state *state, ir_node *node)
1418 const arch_register_t *op_reg;
1424 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1426 arity = get_irn_arity(node);
1427 for (i = 0; i < arity; ++i) {
1428 op = get_irn_n(node, i);
1429 op_reg = arch_get_irn_register(op);
1430 if (op_reg->reg_class != &ia32_reg_classes[CLASS_ia32_vfp])
1433 reg_id = op_reg->index;
1434 live = vfp_live_args_after(state->sim, node, 0);
1436 op_stack_idx = x87_on_stack(state, reg_id);
1437 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live))
1438 x87_create_fpop(state, sched_next(node), 1);
1441 DB((dbg, LEVEL_1, "Stack after: "));
1442 DEBUG_ONLY(x87_dump_stack(state);)
1444 return NO_NODE_ADDED;
1448 * Keep the given node alive by adding a be_Keep.
1450 * @param node the node to kept alive
1452 static void keep_float_node_alive(ir_node *node)
1454 ir_node *block = get_nodes_block(node);
1455 ir_node *keep = be_new_Keep(block, 1, &node);
1456 sched_add_after(node, keep);
1460 * Create a copy of a node. Recreate the node if it's a constant.
1462 * @param state the x87 state
1463 * @param n the node to be copied
1465 * @return the copy of n
1467 static ir_node *create_Copy(x87_state *state, ir_node *n)
1469 dbg_info *n_dbg = get_irn_dbg_info(n);
1470 ir_mode *mode = get_irn_mode(n);
1471 ir_node *block = get_nodes_block(n);
1472 ir_node *pred = get_irn_n(n, 0);
1473 ir_node *(*cnstr)(dbg_info *, ir_node *, ir_mode *) = NULL;
1475 const arch_register_t *out;
1476 const arch_register_t *op1;
1478 /* Do not copy constants, recreate them. */
1479 switch (get_ia32_irn_opcode(pred)) {
1481 cnstr = new_bd_ia32_fldz;
1484 cnstr = new_bd_ia32_fld1;
1486 case iro_ia32_fldpi:
1487 cnstr = new_bd_ia32_fldpi;
1489 case iro_ia32_fldl2e:
1490 cnstr = new_bd_ia32_fldl2e;
1492 case iro_ia32_fldl2t:
1493 cnstr = new_bd_ia32_fldl2t;
1495 case iro_ia32_fldlg2:
1496 cnstr = new_bd_ia32_fldlg2;
1498 case iro_ia32_fldln2:
1499 cnstr = new_bd_ia32_fldln2;
1505 out = x87_get_irn_register(n);
1506 op1 = x87_get_irn_register(pred);
1508 if (cnstr != NULL) {
1509 /* copy a constant */
1510 res = (*cnstr)(n_dbg, block, mode);
1512 x87_push(state, out->index, res);
1514 int op1_idx = x87_on_stack(state, op1->index);
1516 res = new_bd_ia32_fpushCopy(n_dbg, block, pred, mode);
1518 x87_push(state, out->index, res);
1520 ia32_x87_attr_t *const attr = get_ia32_x87_attr(res);
1521 attr->x87[0] = get_st_reg(op1_idx);
1523 arch_set_irn_register(res, out);
1529 * Simulate a be_Copy.
1531 * @param state the x87 state
1532 * @param n the node that should be simulated (and patched)
1534 * @return NO_NODE_ADDED
1536 static int sim_Copy(x87_state *state, ir_node *n)
1538 arch_register_class_t const *const cls = arch_get_irn_reg_class(n);
1539 if (cls != &ia32_reg_classes[CLASS_ia32_vfp])
1540 return NO_NODE_ADDED;
1542 ir_node *const pred = be_get_Copy_op(n);
1543 arch_register_t const *const op1 = x87_get_irn_register(pred);
1544 arch_register_t const *const out = x87_get_irn_register(n);
1545 unsigned const live = vfp_live_args_after(state->sim, n, REGMASK(out));
1547 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n, op1->name, out->name));
1548 DEBUG_ONLY(vfp_dump_live(live);)
1550 if (is_vfp_live(op1->index, live)) {
1551 /* Operand is still live, a real copy. We need here an fpush that can
1552 hold a a register, so use the fpushCopy or recreate constants */
1553 ir_node *const node = create_Copy(state, n);
1555 /* We have to make sure the old value doesn't go dead (which can happen
1556 * when we recreate constants). As the simulator expected that value in
1557 * the pred blocks. This is unfortunate as removing it would save us 1
1558 * instruction, but we would have to rerun all the simulation to get
1561 ir_node *const next = sched_next(n);
1564 sched_add_before(next, node);
1566 if (get_irn_n_edges(pred) == 0) {
1567 keep_float_node_alive(pred);
1570 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1572 /* Just a virtual copy. */
1573 int const op1_idx = x87_on_stack(state, op1->index);
1574 x87_set_st(state, out->index, n, op1_idx);
1576 return NO_NODE_ADDED;
1580 * Returns the vf0 result Proj of a Call.
1582 * @para call the Call node
1584 static ir_node *get_call_result_proj(ir_node *call)
1586 /* search the result proj */
1587 foreach_out_edge(call, edge) {
1588 ir_node *proj = get_edge_src_irn(edge);
1589 long pn = get_Proj_proj(proj);
1591 if (pn == pn_ia32_Call_vf0)
1595 panic("result Proj missing");
1598 static int sim_Asm(x87_state *const state, ir_node *const n)
1602 for (size_t i = get_irn_arity(n); i-- != 0;) {
1603 arch_register_req_t const *const req = arch_get_irn_register_req_in(n, i);
1604 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1605 panic("cannot handle %+F with x87 constraints", n);
1608 for (size_t i = arch_get_irn_n_outs(n); i-- != 0;) {
1609 arch_register_req_t const *const req = arch_get_irn_register_req_out(n, i);
1610 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1611 panic("cannot handle %+F with x87 constraints", n);
1614 return NO_NODE_ADDED;
1618 * Simulate a ia32_Call.
1620 * @param state the x87 state
1621 * @param n the node that should be simulated (and patched)
1623 * @return NO_NODE_ADDED
1625 static int sim_Call(x87_state *state, ir_node *n)
1627 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1629 /* at the begin of a call the x87 state should be empty */
1630 assert(state->depth == 0 && "stack not empty before call");
1632 ir_type *const call_tp = get_ia32_call_attr_const(n)->call_tp;
1633 if (get_method_n_ress(call_tp) != 0) {
1634 /* If the called function returns a float, it is returned in st(0).
1635 * This even happens if the return value is NOT used.
1636 * Moreover, only one return result is supported. */
1637 ir_type *const res_type = get_method_res_type(call_tp, 0);
1638 ir_mode *const mode = get_type_mode(res_type);
1639 if (mode && mode_is_float(mode)) {
1640 ir_node *const resproj = get_call_result_proj(n);
1641 arch_register_t const *const reg = x87_get_irn_register(resproj);
1642 x87_push(state, reg->index, resproj);
1645 DB((dbg, LEVEL_1, "Stack after: "));
1646 DEBUG_ONLY(x87_dump_stack(state);)
1648 return NO_NODE_ADDED;
1652 * Simulate a be_Return.
1654 * @param state the x87 state
1655 * @param n the node that should be simulated (and patched)
1657 * @return NO_NODE_ADDED
1659 static int sim_Return(x87_state *state, ir_node *n)
1661 #ifdef DEBUG_libfirm
1662 /* only floating point return values must reside on stack */
1663 int n_float_res = 0;
1664 int const n_res = be_Return_get_n_rets(n);
1665 for (int i = 0; i < n_res; ++i) {
1666 ir_node *const res = get_irn_n(n, n_be_Return_val + i);
1667 if (mode_is_float(get_irn_mode(res)))
1670 assert(x87_get_depth(state) == n_float_res);
1673 /* pop them virtually */
1675 return NO_NODE_ADDED;
1679 * Simulate a be_Perm.
1681 * @param state the x87 state
1682 * @param irn the node that should be simulated (and patched)
1684 * @return NO_NODE_ADDED
1686 static int sim_Perm(x87_state *state, ir_node *irn)
1689 ir_node *pred = get_irn_n(irn, 0);
1692 /* handle only floating point Perms */
1693 if (! mode_is_float(get_irn_mode(pred)))
1694 return NO_NODE_ADDED;
1696 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1698 /* Perm is a pure virtual instruction on x87.
1699 All inputs must be on the FPU stack and are pairwise
1700 different from each other.
1701 So, all we need to do is to permutate the stack state. */
1702 n = get_irn_arity(irn);
1703 NEW_ARR_A(int, stack_pos, n);
1705 /* collect old stack positions */
1706 for (i = 0; i < n; ++i) {
1707 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
1708 int idx = x87_on_stack(state, inreg->index);
1710 assert(idx >= 0 && "Perm argument not on x87 stack");
1714 /* now do the permutation */
1715 foreach_out_edge(irn, edge) {
1716 ir_node *proj = get_edge_src_irn(edge);
1717 const arch_register_t *out = x87_get_irn_register(proj);
1718 long num = get_Proj_proj(proj);
1720 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1721 x87_set_st(state, out->index, proj, stack_pos[(unsigned)num]);
1723 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1725 return NO_NODE_ADDED;
1729 * Kill any dead registers at block start by popping them from the stack.
1731 * @param sim the simulator handle
1732 * @param block the current block
1733 * @param state the x87 state at the begin of the block
1735 static void x87_kill_deads(x87_simulator *const sim, ir_node *const block, x87_state *const state)
1737 ir_node *first_insn = sched_first(block);
1738 ir_node *keep = NULL;
1739 unsigned live = vfp_live_args_after(sim, block, 0);
1741 int i, depth, num_pop;
1744 depth = x87_get_depth(state);
1745 for (i = depth - 1; i >= 0; --i) {
1746 int reg = x87_get_st_reg(state, i);
1748 if (! is_vfp_live(reg, live))
1749 kill_mask |= (1 << i);
1753 DB((dbg, LEVEL_1, "Killing deads:\n"));
1754 DEBUG_ONLY(vfp_dump_live(live);)
1755 DEBUG_ONLY(x87_dump_stack(state);)
1757 if (kill_mask != 0 && live == 0) {
1758 /* special case: kill all registers */
1759 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
1760 if (ia32_cg_config.use_femms) {
1761 /* use FEMMS on AMD processors to clear all */
1762 keep = new_bd_ia32_femms(NULL, block);
1764 /* use EMMS to clear all */
1765 keep = new_bd_ia32_emms(NULL, block);
1767 sched_add_before(first_insn, keep);
1773 /* now kill registers */
1775 /* we can only kill from TOS, so bring them up */
1776 if (! (kill_mask & 1)) {
1777 /* search from behind, because we can to a double-pop */
1778 for (i = depth - 1; i >= 0; --i) {
1779 if (kill_mask & (1 << i)) {
1780 kill_mask &= ~(1 << i);
1787 x87_set_st(state, -1, keep, i);
1788 x87_create_fxch(state, first_insn, i);
1791 if ((kill_mask & 3) == 3) {
1792 /* we can do a double-pop */
1796 /* only a single pop */
1801 kill_mask >>= num_pop;
1802 keep = x87_create_fpop(state, first_insn, num_pop);
1809 * Run a simulation and fix all virtual instructions for a block.
1811 * @param sim the simulator handle
1812 * @param block the current block
1814 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
1817 blk_state *bl_state = x87_get_bl_state(sim, block);
1818 x87_state *state = bl_state->begin;
1819 ir_node *start_block;
1821 assert(state != NULL);
1822 /* already processed? */
1823 if (bl_state->end != NULL)
1826 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
1827 DB((dbg, LEVEL_2, "State at Block begin:\n "));
1828 DEBUG_ONLY(x87_dump_stack(state);)
1830 /* create a new state, will be changed */
1831 state = x87_clone_state(sim, state);
1832 /* at block begin, kill all dead registers */
1833 x87_kill_deads(sim, block, state);
1835 /* beware, n might change */
1836 for (n = sched_first(block); !sched_is_end(n); n = next) {
1839 ir_op *op = get_irn_op(n);
1842 * get the next node to be simulated here.
1843 * n might be completely removed from the schedule-
1845 next = sched_next(n);
1846 if (op->ops.generic != NULL) {
1847 func = (sim_func)op->ops.generic;
1850 node_inserted = (*func)(state, n);
1853 * sim_func might have added an additional node after n,
1854 * so update next node
1855 * beware: n must not be changed by sim_func
1856 * (i.e. removed from schedule) in this case
1858 if (node_inserted != NO_NODE_ADDED)
1859 next = sched_next(n);
1863 start_block = get_irg_start_block(get_irn_irg(block));
1865 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state);)
1867 /* check if the state must be shuffled */
1868 foreach_block_succ(block, edge) {
1869 ir_node *succ = get_edge_src_irn(edge);
1870 blk_state *succ_state;
1872 if (succ == start_block)
1875 succ_state = x87_get_bl_state(sim, succ);
1877 if (succ_state->begin == NULL) {
1878 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
1879 DEBUG_ONLY(x87_dump_stack(state);)
1880 succ_state->begin = state;
1882 waitq_put(sim->worklist, succ);
1884 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
1885 /* There is already a begin state for the successor, bad.
1886 Do the necessary permutations.
1887 Note that critical edges are removed, so this is always possible:
1888 If the successor has more than one possible input, then it must
1891 x87_shuffle(block, state, succ_state->begin);
1894 bl_state->end = state;
1898 * Register a simulator function.
1900 * @param op the opcode to simulate
1901 * @param func the simulator function for the opcode
1903 static void register_sim(ir_op *op, sim_func func)
1905 assert(op->ops.generic == NULL);
1906 op->ops.generic = (op_func) func;
1910 * Create a new x87 simulator.
1912 * @param sim a simulator handle, will be initialized
1913 * @param irg the current graph
1915 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
1917 obstack_init(&sim->obst);
1918 sim->blk_states = pmap_create();
1919 sim->n_idx = get_irg_last_idx(irg);
1920 sim->live = OALLOCN(&sim->obst, vfp_liveness, sim->n_idx);
1922 DB((dbg, LEVEL_1, "--------------------------------\n"
1923 "x87 Simulator started for %+F\n", irg));
1925 /* set the generic function pointer of instruction we must simulate */
1926 ir_clear_opcodes_generic_func();
1928 register_sim(op_ia32_Asm, sim_Asm);
1929 register_sim(op_ia32_Call, sim_Call);
1930 register_sim(op_ia32_vfld, sim_fld);
1931 register_sim(op_ia32_vfild, sim_fild);
1932 register_sim(op_ia32_vfld1, sim_fld1);
1933 register_sim(op_ia32_vfldz, sim_fldz);
1934 register_sim(op_ia32_vfadd, sim_fadd);
1935 register_sim(op_ia32_vfsub, sim_fsub);
1936 register_sim(op_ia32_vfmul, sim_fmul);
1937 register_sim(op_ia32_vfdiv, sim_fdiv);
1938 register_sim(op_ia32_vfprem, sim_fprem);
1939 register_sim(op_ia32_vfabs, sim_fabs);
1940 register_sim(op_ia32_vfchs, sim_fchs);
1941 register_sim(op_ia32_vfist, sim_fist);
1942 register_sim(op_ia32_vfisttp, sim_fisttp);
1943 register_sim(op_ia32_vfst, sim_fst);
1944 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
1945 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
1946 register_sim(op_ia32_vFucomi, sim_Fucom);
1947 register_sim(op_be_Copy, sim_Copy);
1948 register_sim(op_be_Return, sim_Return);
1949 register_sim(op_be_Perm, sim_Perm);
1950 register_sim(op_be_Keep, sim_Keep);
1954 * Destroy a x87 simulator.
1956 * @param sim the simulator handle
1958 static void x87_destroy_simulator(x87_simulator *sim)
1960 pmap_destroy(sim->blk_states);
1961 obstack_free(&sim->obst, NULL);
1962 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
1966 * Pre-block walker: calculate the liveness information for the block
1967 * and store it into the sim->live cache.
1969 static void update_liveness_walker(ir_node *block, void *data)
1971 x87_simulator *sim = (x87_simulator*)data;
1972 update_liveness(sim, block);
1976 * Run a simulation and fix all virtual instructions for a graph.
1977 * Replaces all virtual floating point instructions and registers
1980 void ia32_x87_simulate_graph(ir_graph *irg)
1982 /* TODO improve code quality (less executed fxch) by using execfreqs */
1984 ir_node *block, *start_block;
1985 blk_state *bl_state;
1988 /* create the simulator */
1989 x87_init_simulator(&sim, irg);
1991 start_block = get_irg_start_block(irg);
1992 bl_state = x87_get_bl_state(&sim, start_block);
1994 /* start with the empty state */
1996 bl_state->begin = ∅
1998 sim.worklist = new_waitq();
1999 waitq_put(sim.worklist, start_block);
2001 be_assure_live_sets(irg);
2002 sim.lv = be_get_irg_liveness(irg);
2004 /* Calculate the liveness for all nodes. We must precalculate this info,
2005 * because the simulator adds new nodes (possible before Phi nodes) which
2006 * would let a lazy calculation fail.
2007 * On the other hand we reduce the computation amount due to
2008 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2010 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2014 block = (ir_node*)waitq_get(sim.worklist);
2015 x87_simulate_block(&sim, block);
2016 } while (! waitq_empty(sim.worklist));
2019 del_waitq(sim.worklist);
2020 x87_destroy_simulator(&sim);
2023 /* Initializes the x87 simulator. */
2024 void ia32_init_x87(void)
2026 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");