2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
33 #include "iredges_t.h"
48 #include "bearch_ia32_t.h"
49 #include "ia32_new_nodes.h"
50 #include "gen_ia32_new_nodes.h"
51 #include "gen_ia32_regalloc_if.h"
53 #include "ia32_architecture.h"
55 /** the debug handle */
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 /* Forward declaration. */
59 typedef struct x87_simulator x87_simulator;
62 * An entry on the simulated x87 stack.
64 typedef struct st_entry {
65 int reg_idx; /**< the virtual register index of this stack value */
66 ir_node *node; /**< the node that produced this value */
72 typedef struct x87_state {
73 st_entry st[N_ia32_st_REGS]; /**< the register stack */
74 int depth; /**< the current stack depth */
75 x87_simulator *sim; /**< The simulator. */
78 /** An empty state, used for blocks without fp instructions. */
79 static x87_state empty = { { {0, NULL}, }, 0, NULL };
82 * Return values of the instruction simulator functions.
85 NO_NODE_ADDED = 0, /**< No node that needs simulation was added. */
86 NODE_ADDED = 1 /**< A node that must be simulated was added by the simulator
87 in the schedule AFTER the current node. */
91 * The type of an instruction simulator function.
93 * @param state the x87 state
94 * @param n the node to be simulated
96 * @return NODE_ADDED if a node was added AFTER n in schedule that MUST be
98 * NO_NODE_ADDED otherwise
100 typedef int (*sim_func)(x87_state *state, ir_node *n);
103 * A block state: Every block has a x87 state at the beginning and at the end.
105 typedef struct blk_state {
106 x87_state *begin; /**< state at the begin or NULL if not assigned */
107 x87_state *end; /**< state at the end or NULL if not assigned */
110 /** liveness bitset for vfp registers. */
111 typedef unsigned char vfp_liveness;
116 struct x87_simulator {
117 struct obstack obst; /**< An obstack for fast allocating. */
118 pmap *blk_states; /**< Map blocks to states. */
119 be_lv_t *lv; /**< intrablock liveness. */
120 vfp_liveness *live; /**< Liveness information. */
121 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
122 waitq *worklist; /**< Worklist of blocks that must be processed. */
126 * Returns the current stack depth.
128 * @param state the x87 state
130 * @return the x87 stack depth
132 static int x87_get_depth(const x87_state *state)
137 static st_entry *x87_get_entry(x87_state *const state, int const pos)
139 assert(0 <= pos && pos < state->depth);
140 return &state->st[N_ia32_st_REGS - state->depth + pos];
144 * Return the virtual register index at st(pos).
146 * @param state the x87 state
147 * @param pos a stack position
149 * @return the vfp register index that produced the value at st(pos)
151 static int x87_get_st_reg(const x87_state *state, int pos)
153 return x87_get_entry((x87_state*)state, pos)->reg_idx;
158 * Dump the stack for debugging.
160 * @param state the x87 state
162 static void x87_dump_stack(const x87_state *state)
164 for (int i = state->depth; i-- != 0;) {
165 st_entry const *const entry = x87_get_entry((x87_state*)state, i);
166 DB((dbg, LEVEL_2, "vf%d(%+F) ", entry->reg_idx, entry->node));
168 DB((dbg, LEVEL_2, "<-- TOS\n"));
170 #endif /* DEBUG_libfirm */
173 * Set a virtual register to st(pos).
175 * @param state the x87 state
176 * @param reg_idx the vfp register index that should be set
177 * @param node the IR node that produces the value of the vfp register
178 * @param pos the stack position where the new value should be entered
180 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
182 st_entry *const entry = x87_get_entry(state, pos);
183 entry->reg_idx = reg_idx;
186 DB((dbg, LEVEL_2, "After SET_REG: "));
187 DEBUG_ONLY(x87_dump_stack(state);)
191 * Swap st(0) with st(pos).
193 * @param state the x87 state
194 * @param pos the stack position to change the tos with
196 static void x87_fxch(x87_state *state, int pos)
198 st_entry *const a = x87_get_entry(state, pos);
199 st_entry *const b = x87_get_entry(state, 0);
200 st_entry const t = *a;
204 DB((dbg, LEVEL_2, "After FXCH: "));
205 DEBUG_ONLY(x87_dump_stack(state);)
209 * Convert a virtual register to the stack index.
211 * @param state the x87 state
212 * @param reg_idx the register vfp index
214 * @return the stack position where the register is stacked
215 * or -1 if the virtual register was not found
217 static int x87_on_stack(const x87_state *state, int reg_idx)
219 for (int i = 0; i < state->depth; ++i) {
220 if (x87_get_st_reg(state, i) == reg_idx)
227 * Push a virtual Register onto the stack, double pushes are NOT allowed.
229 * @param state the x87 state
230 * @param reg_idx the register vfp index
231 * @param node the node that produces the value of the vfp register
233 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
235 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
236 assert(state->depth < N_ia32_st_REGS && "stack overrun");
239 st_entry *const entry = x87_get_entry(state, 0);
240 entry->reg_idx = reg_idx;
243 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state);)
247 * Pop a virtual Register from the stack.
249 * @param state the x87 state
251 static void x87_pop(x87_state *state)
253 assert(state->depth > 0 && "stack underrun");
257 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state);)
261 * Empty the fpu stack
263 * @param state the x87 state
265 static void x87_emms(x87_state *state)
271 * Returns the block state of a block.
273 * @param sim the x87 simulator handle
274 * @param block the current block
276 * @return the block state
278 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
280 blk_state *res = pmap_get(blk_state, sim->blk_states, block);
283 res = OALLOC(&sim->obst, blk_state);
287 pmap_insert(sim->blk_states, block, res);
296 * @param sim the x87 simulator handle
297 * @param src the x87 state that will be cloned
299 * @return a cloned copy of the src state
301 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
303 x87_state *const res = OALLOC(&sim->obst, x87_state);
309 * Patch a virtual instruction into a x87 one and return
310 * the node representing the result value.
312 * @param n the IR node to patch
313 * @param op the x87 opcode to patch in
315 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
317 ir_mode *mode = get_irn_mode(n);
322 if (mode == mode_T) {
323 /* patch all Proj's */
324 foreach_out_edge(n, edge) {
325 ir_node *proj = get_edge_src_irn(edge);
327 mode = get_irn_mode(proj);
328 if (mode_is_float(mode)) {
330 set_irn_mode(proj, ia32_reg_classes[CLASS_ia32_st].mode);
334 } else if (mode_is_float(mode))
335 set_irn_mode(n, ia32_reg_classes[CLASS_ia32_st].mode);
340 * Returns the first Proj of a mode_T node having a given mode.
342 * @param n the mode_T node
343 * @param m the desired mode of the Proj
344 * @return The first Proj of mode @p m found.
346 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
348 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
350 foreach_out_edge(n, edge) {
351 ir_node *proj = get_edge_src_irn(edge);
352 if (get_irn_mode(proj) == m)
356 panic("Proj not found");
360 * Wrap the arch_* function here so we can check for errors.
362 static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
364 const arch_register_t *res = arch_get_irn_register(irn);
366 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
370 static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
373 const arch_register_t *res = arch_get_irn_register_out(irn, pos);
375 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
379 static inline const arch_register_t *get_st_reg(int index)
381 return &ia32_registers[REG_ST0 + index];
385 * Create a fxch node before another node.
387 * @param state the x87 state
388 * @param n the node after the fxch
389 * @param pos exchange st(pos) with st(0)
391 static void x87_create_fxch(x87_state *state, ir_node *n, int pos)
393 x87_fxch(state, pos);
395 ir_node *const block = get_nodes_block(n);
396 ir_node *const fxch = new_bd_ia32_fxch(NULL, block);
397 ia32_x87_attr_t *const attr = get_ia32_x87_attr(fxch);
398 attr->reg = get_st_reg(pos);
402 sched_add_before(n, fxch);
403 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fxch), attr->reg->name));
406 /* -------------- x87 perm --------------- */
409 * Calculate the necessary permutations to reach dst_state.
411 * These permutations are done with fxch instructions and placed
412 * at the end of the block.
414 * Note that critical edges are removed here, so we need only
415 * a shuffle if the current block has only one successor.
417 * @param block the current block
418 * @param state the current x87 stack state, might be modified
419 * @param dst_state destination state
423 static x87_state *x87_shuffle(ir_node *block, x87_state *state, const x87_state *dst_state)
425 int i, n_cycles, k, ri;
426 unsigned cycles[4], all_mask;
427 char cycle_idx[4][8];
429 assert(state->depth == dst_state->depth);
431 /* Some mathematics here:
432 * If we have a cycle of length n that includes the tos,
433 * we need n-1 exchange operations.
434 * We can always add the tos and restore it, so we need
435 * n+1 exchange operations for a cycle not containing the tos.
436 * So, the maximum of needed operations is for a cycle of 7
437 * not including the tos == 8.
438 * This is the same number of ops we would need for using stores,
439 * so exchange is cheaper (we save the loads).
440 * On the other hand, we might need an additional exchange
441 * in the next block to bring one operand on top, so the
442 * number of ops in the first case is identical.
443 * Further, no more than 4 cycles can exists (4 x 2). */
444 all_mask = (1 << (state->depth)) - 1;
446 for (n_cycles = 0; all_mask; ++n_cycles) {
447 int src_idx, dst_idx;
449 /* find the first free slot */
450 for (i = 0; i < state->depth; ++i) {
451 if (all_mask & (1 << i)) {
452 all_mask &= ~(1 << i);
454 /* check if there are differences here */
455 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
461 /* no more cycles found */
466 cycles[n_cycles] = (1 << i);
467 cycle_idx[n_cycles][k++] = i;
468 for (src_idx = i; ; src_idx = dst_idx) {
469 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
471 if ((all_mask & (1 << dst_idx)) == 0)
474 cycle_idx[n_cycles][k++] = dst_idx;
475 cycles[n_cycles] |= (1 << dst_idx);
476 all_mask &= ~(1 << dst_idx);
478 cycle_idx[n_cycles][k] = -1;
482 /* no permutation needed */
486 /* Hmm: permutation needed */
487 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
488 DEBUG_ONLY(x87_dump_stack(state);)
489 DB((dbg, LEVEL_2, " to\n"));
490 DEBUG_ONLY(x87_dump_stack(dst_state);)
494 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
495 for (ri = 0; ri < n_cycles; ++ri) {
496 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
497 for (k = 0; cycle_idx[ri][k] != -1; ++k)
498 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
499 DB((dbg, LEVEL_2, "\n"));
504 * Find the place node must be insert.
505 * We have only one successor block, so the last instruction should
508 ir_node *const before = sched_last(block);
509 assert(is_cfop(before));
511 /* now do the permutations */
512 for (ri = 0; ri < n_cycles; ++ri) {
513 if ((cycles[ri] & 1) == 0) {
514 /* this cycle does not include the tos */
515 x87_create_fxch(state, before, cycle_idx[ri][0]);
517 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
518 x87_create_fxch(state, before, cycle_idx[ri][k]);
520 if ((cycles[ri] & 1) == 0) {
521 /* this cycle does not include the tos */
522 x87_create_fxch(state, before, cycle_idx[ri][0]);
529 * Create a fpush before node n.
531 * @param state the x87 state
532 * @param n the node after the fpush
533 * @param pos push st(pos) on stack
534 * @param val the value to push
536 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int const out_reg_idx, ir_node *const val)
538 x87_push(state, out_reg_idx, val);
540 ir_node *const fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
541 ia32_x87_attr_t *const attr = get_ia32_x87_attr(fpush);
542 attr->reg = get_st_reg(pos);
545 sched_add_before(n, fpush);
547 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpush), attr->reg->name));
551 * Create a fpop before node n.
553 * @param state the x87 state
554 * @param n the node after the fpop
555 * @param num pop 1 or 2 values
557 * @return the fpop node
559 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
561 ir_node *fpop = NULL;
562 ia32_x87_attr_t *attr;
567 if (ia32_cg_config.use_ffreep)
568 fpop = new_bd_ia32_ffreep(NULL, get_nodes_block(n));
570 fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
571 attr = get_ia32_x87_attr(fpop);
572 attr->reg = get_st_reg(0);
575 sched_add_before(n, fpop);
576 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->reg->name));
581 /* --------------------------------- liveness ------------------------------------------ */
584 * The liveness transfer function.
585 * Updates a live set over a single step from a given node to its predecessor.
586 * Everything defined at the node is removed from the set, the uses of the node get inserted.
588 * @param irn The node at which liveness should be computed.
589 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
590 * the registers live after irn.
592 * @return The live bitset.
594 static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
597 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
599 if (get_irn_mode(irn) == mode_T) {
600 foreach_out_edge(irn, edge) {
601 ir_node *proj = get_edge_src_irn(edge);
603 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
604 const arch_register_t *reg = x87_get_irn_register(proj);
605 live &= ~(1 << reg->index);
608 } else if (arch_irn_consider_in_reg_alloc(cls, irn)) {
609 const arch_register_t *reg = x87_get_irn_register(irn);
610 live &= ~(1 << reg->index);
613 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
614 ir_node *op = get_irn_n(irn, i);
616 if (mode_is_float(get_irn_mode(op)) &&
617 arch_irn_consider_in_reg_alloc(cls, op)) {
618 const arch_register_t *reg = x87_get_irn_register(op);
619 live |= 1 << reg->index;
626 * Put all live virtual registers at the end of a block into a bitset.
628 * @param sim the simulator handle
629 * @param bl the block
631 * @return The live bitset at the end of this block
633 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
635 vfp_liveness live = 0;
636 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
637 const be_lv_t *lv = sim->lv;
639 be_lv_foreach(lv, block, be_lv_state_end, node) {
640 const arch_register_t *reg;
641 if (!arch_irn_consider_in_reg_alloc(cls, node))
644 reg = x87_get_irn_register(node);
645 live |= 1 << reg->index;
651 /** get the register mask from an arch_register */
652 #define REGMASK(reg) (1 << (reg->index))
655 * Return a bitset of argument registers which are live at the end of a node.
657 * @param sim the simulator handle
658 * @param pos the node
659 * @param kill kill mask for the output registers
661 * @return The live bitset.
663 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
665 unsigned idx = get_irn_idx(pos);
667 assert(idx < sim->n_idx);
668 return sim->live[idx] & ~kill;
672 * Calculate the liveness for a whole block and cache it.
674 * @param sim the simulator handle
675 * @param block the block
677 static void update_liveness(x87_simulator *sim, ir_node *block)
679 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
682 /* now iterate through the block backward and cache the results */
683 sched_foreach_reverse(block, irn) {
684 /* stop at the first Phi: this produces the live-in */
688 idx = get_irn_idx(irn);
689 sim->live[idx] = live;
691 live = vfp_liveness_transfer(irn, live);
693 idx = get_irn_idx(block);
694 sim->live[idx] = live;
698 * Returns true if a register is live in a set.
700 * @param reg_idx the vfp register index
701 * @param live a live bitset
703 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
707 * Dump liveness info.
709 * @param live the live bitset
711 static void vfp_dump_live(vfp_liveness live)
715 DB((dbg, LEVEL_2, "Live after: "));
716 for (i = 0; i < 8; ++i) {
717 if (live & (1 << i)) {
718 DB((dbg, LEVEL_2, "vf%d ", i));
721 DB((dbg, LEVEL_2, "\n"));
723 #endif /* DEBUG_libfirm */
725 /* --------------------------------- simulators ---------------------------------------- */
728 * Simulate a virtual binop.
730 * @param state the x87 state
731 * @param n the node that should be simulated (and patched)
733 * @return NO_NODE_ADDED
735 static int sim_binop(x87_state *const state, ir_node *const n, ir_op *const op)
737 ir_node *patched_insn;
738 x87_simulator *sim = state->sim;
739 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
740 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
741 const arch_register_t *op1_reg = x87_get_irn_register(op1);
742 const arch_register_t *op2_reg = x87_get_irn_register(op2);
743 const arch_register_t *out = x87_irn_get_register(n, pn_ia32_res);
744 int reg_index_1 = op1_reg->index;
745 int reg_index_2 = op2_reg->index;
746 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
750 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n, op1_reg->name, op2_reg->name, out->name));
751 DEBUG_ONLY(vfp_dump_live(live);)
752 DB((dbg, LEVEL_1, "Stack before: "));
753 DEBUG_ONLY(x87_dump_stack(state);)
755 int op1_idx = x87_on_stack(state, reg_index_1);
756 assert(op1_idx >= 0);
757 op1_live_after = is_vfp_live(reg_index_1, live);
762 int const out_reg_idx = out->index;
763 ia32_x87_attr_t *const attr = get_ia32_x87_attr(n);
764 if (reg_index_2 != REG_VFP_VFP_NOREG) {
765 /* second operand is a vfp register */
766 op2_idx = x87_on_stack(state, reg_index_2);
767 assert(op2_idx >= 0);
768 op2_live_after = is_vfp_live(reg_index_2, live);
770 if (op2_live_after) {
771 /* Second operand is live. */
773 if (op1_live_after) {
774 /* Both operands are live: push the first one.
775 This works even for op1 == op2. */
776 x87_create_fpush(state, n, op1_idx, out_reg_idx, op2);
777 /* now do fxxx (tos=tos X op) */
782 /* Second live, first operand is dead here, bring it to tos. */
784 x87_create_fxch(state, n, op1_idx);
789 /* now do fxxx (tos=tos X op) */
793 /* Second operand is dead. */
794 if (op1_live_after) {
795 /* First operand is live: bring second to tos. */
797 x87_create_fxch(state, n, op2_idx);
802 /* now do fxxxr (tos = op X tos) */
805 /* Both operands are dead here, pop them from the stack. */
808 /* Both are identically and on tos, no pop needed. */
809 /* here fxxx (tos = tos X tos) */
812 /* now do fxxxp (op = op X tos, pop) */
816 } else if (op1_idx == 0) {
817 assert(op1_idx != op2_idx);
818 /* now do fxxxrp (op = tos X op, pop) */
822 /* Bring the second on top. */
823 x87_create_fxch(state, n, op2_idx);
824 if (op1_idx == op2_idx) {
825 /* Both are identically and on tos now, no pop needed. */
828 /* use fxxx (tos = tos X tos) */
831 /* op2 is on tos now */
833 /* use fxxxp (op = op X tos, pop) */
841 /* second operand is an address mode */
842 if (op1_live_after) {
843 /* first operand is live: push it here */
844 x87_create_fpush(state, n, op1_idx, out_reg_idx, op1);
846 /* first operand is dead: bring it to tos */
848 x87_create_fxch(state, n, op1_idx);
851 op1_idx = attr->attr.data.ins_permuted ? -1 : 0;
852 op2_idx = attr->attr.data.ins_permuted ? 0 : -1;
855 assert(op1_idx == 0 || op2_idx == 0);
856 assert(out_idx == op1_idx || out_idx == op2_idx);
858 patched_insn = x87_patch_insn(n, op);
859 x87_set_st(state, out_reg_idx, patched_insn, out_idx);
863 /* patch the operation */
864 int const reg_idx = op1_idx != 0 ? op1_idx : op2_idx;
865 attr->reg = reg_idx >= 0 ? get_st_reg(reg_idx) : NULL;
866 attr->attr.data.ins_permuted = op1_idx != 0;
867 attr->res_in_reg = out_idx != 0;
871 char const *const l = op1_idx >= 0 ? get_st_reg(op1_idx)->name : "[AM]";
872 char const *const r = op2_idx >= 0 ? get_st_reg(op2_idx)->name : "[AM]";
873 char const *const o = get_st_reg(out_idx)->name;
874 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n), l, r, o));
877 return NO_NODE_ADDED;
881 * Simulate a virtual Unop.
883 * @param state the x87 state
884 * @param n the node that should be simulated (and patched)
885 * @param op the x87 opcode that will replace n's opcode
887 * @return NO_NODE_ADDED
889 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
891 arch_register_t const *const out = x87_get_irn_register(n);
892 unsigned const live = vfp_live_args_after(state->sim, n, REGMASK(out));
893 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
894 DEBUG_ONLY(vfp_dump_live(live);)
896 ir_node *const op1 = get_irn_n(n, 0);
897 arch_register_t const *const op1_reg = x87_get_irn_register(op1);
898 int const op1_reg_idx = op1_reg->index;
899 int const op1_idx = x87_on_stack(state, op1_reg_idx);
900 int const out_reg_idx = out->index;
901 if (is_vfp_live(op1_reg_idx, live)) {
902 /* push the operand here */
903 x87_create_fpush(state, n, op1_idx, out_reg_idx, op1);
905 /* operand is dead, bring it to tos */
907 x87_create_fxch(state, n, op1_idx);
911 x87_set_st(state, out_reg_idx, x87_patch_insn(n, op), 0);
912 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), get_st_reg(0)->name));
914 return NO_NODE_ADDED;
918 * Simulate a virtual Load instruction.
920 * @param state the x87 state
921 * @param n the node that should be simulated (and patched)
922 * @param op the x87 opcode that will replace n's opcode
924 * @return NO_NODE_ADDED
926 static int sim_load(x87_state *state, ir_node *n, ir_op *op, int res_pos)
928 const arch_register_t *out = x87_irn_get_register(n, res_pos);
930 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
931 x87_push(state, out->index, x87_patch_insn(n, op));
932 assert(out == x87_irn_get_register(n, res_pos));
933 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), get_st_reg(0)->name));
935 return NO_NODE_ADDED;
939 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
941 * @param store The store
942 * @param old_val The former value
943 * @param new_val The new value
945 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
947 foreach_out_edge_safe(old_val, edge) {
948 ir_node *user = get_edge_src_irn(edge);
949 /* if the user is scheduled after the store: rewire */
950 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
951 set_irn_n(user, get_edge_src_pos(edge), new_val);
957 * Simulate a virtual Store.
959 * @param state the x87 state
960 * @param n the node that should be simulated (and patched)
961 * @param op the x87 store opcode
963 static int sim_store(x87_state *state, ir_node *n, ir_op *op)
965 ir_node *const val = get_irn_n(n, n_ia32_vfst_val);
966 arch_register_t const *const op2 = x87_get_irn_register(val);
967 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, op2->name));
970 int insn = NO_NODE_ADDED;
971 int const op2_reg_idx = op2->index;
972 int const op2_idx = x87_on_stack(state, op2_reg_idx);
973 unsigned const live = vfp_live_args_after(state->sim, n, 0);
974 int const live_after_node = is_vfp_live(op2_reg_idx, live);
975 assert(op2_idx >= 0);
976 if (live_after_node) {
977 /* Problem: fst doesn't support 80bit modes (spills), only fstp does
978 * fist doesn't support 64bit mode, only fistp
980 * - stack not full: push value and fstp
981 * - stack full: fstp value and load again
982 * Note that we cannot test on mode_E, because floats might be 80bit ... */
983 ir_mode *const mode = get_ia32_ls_mode(n);
984 if (get_mode_size_bits(mode) > (mode_is_int(mode) ? 32 : 64)) {
985 if (x87_get_depth(state) < N_ia32_st_REGS) {
986 /* ok, we have a free register: push + fstp */
987 x87_create_fpush(state, n, op2_idx, REG_VFP_VFP_NOREG, val);
988 x87_patch_insn(n, op);
991 /* stack full here: need fstp + load */
992 x87_patch_insn(n, op);
995 ir_node *const block = get_nodes_block(n);
996 ir_node *const mem = get_irn_Proj_for_mode(n, mode_M);
997 ir_node *const vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), mem, mode);
999 /* copy all attributes */
1000 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1001 if (is_ia32_use_frame(n))
1002 set_ia32_use_frame(vfld);
1003 set_ia32_op_type(vfld, ia32_AddrModeS);
1004 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1005 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1006 set_ia32_ls_mode(vfld, mode);
1008 ir_node *const rproj = new_r_Proj(vfld, mode, pn_ia32_vfld_res);
1009 ir_node *const mproj = new_r_Proj(vfld, mode_M, pn_ia32_vfld_M);
1011 arch_set_irn_register(rproj, op2);
1013 /* reroute all former users of the store memory to the load memory */
1014 edges_reroute_except(mem, mproj, vfld);
1016 sched_add_after(n, vfld);
1018 /* rewire all users, scheduled after the store, to the loaded value */
1019 collect_and_rewire_users(n, val, rproj);
1024 /* we can only store the tos to memory */
1026 x87_create_fxch(state, n, op2_idx);
1028 /* mode size 64 or smaller -> use normal fst */
1029 x87_patch_insn(n, op);
1032 /* we can only store the tos to memory */
1034 x87_create_fxch(state, n, op2_idx);
1036 x87_patch_insn(n, op);
1043 ia32_x87_attr_t *const attr = get_ia32_x87_attr(n);
1045 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), get_st_reg(0)->name));
1050 #define GEN_BINOP(op) \
1051 static int sim_##op(x87_state *state, ir_node *n) { \
1052 return sim_binop(state, n, op_ia32_##op); \
1055 #define GEN_LOAD(op) \
1056 static int sim_##op(x87_state *state, ir_node *n) { \
1057 return sim_load(state, n, op_ia32_##op, pn_ia32_v##op##_res); \
1060 #define GEN_UNOP(op) \
1061 static int sim_##op(x87_state *state, ir_node *n) { \
1062 return sim_unop(state, n, op_ia32_##op); \
1065 #define GEN_STORE(op) \
1066 static int sim_##op(x87_state *state, ir_node *n) { \
1067 return sim_store(state, n, op_ia32_##op); \
1087 static int sim_fprem(x87_state *const state, ir_node *const n)
1091 panic("TODO implement");
1092 return NO_NODE_ADDED;
1096 * Simulate a virtual fisttp.
1098 * @param state the x87 state
1099 * @param n the node that should be simulated (and patched)
1101 * @return NO_NODE_ADDED
1103 static int sim_fisttp(x87_state *state, ir_node *n)
1105 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1106 const arch_register_t *op2 = x87_get_irn_register(val);
1108 int const op2_idx = x87_on_stack(state, op2->index);
1109 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, op2->name));
1110 assert(op2_idx >= 0);
1112 /* Note: although the value is still live here, it is destroyed because
1113 of the pop. The register allocator is aware of that and introduced a copy
1114 if the value must be alive. */
1116 /* we can only store the tos to memory */
1118 x87_create_fxch(state, n, op2_idx);
1121 x87_patch_insn(n, op_ia32_fisttp);
1123 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), get_st_reg(0)->name));
1125 return NO_NODE_ADDED;
1129 * Simulate a virtual FtstFnstsw.
1131 * @param state the x87 state
1132 * @param n the node that should be simulated (and patched)
1134 * @return NO_NODE_ADDED
1136 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1138 x87_simulator *sim = state->sim;
1139 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1140 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1141 int reg_index_1 = reg1->index;
1142 int op1_idx = x87_on_stack(state, reg_index_1);
1143 unsigned live = vfp_live_args_after(sim, n, 0);
1145 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, reg1->name));
1146 DEBUG_ONLY(vfp_dump_live(live);)
1147 DB((dbg, LEVEL_1, "Stack before: "));
1148 DEBUG_ONLY(x87_dump_stack(state);)
1149 assert(op1_idx >= 0);
1152 /* bring the value to tos */
1153 x87_create_fxch(state, n, op1_idx);
1156 /* patch the operation */
1157 x87_patch_insn(n, op_ia32_FtstFnstsw);
1159 if (!is_vfp_live(reg_index_1, live))
1160 x87_create_fpop(state, sched_next(n), 1);
1162 return NO_NODE_ADDED;
1168 * @param state the x87 state
1169 * @param n the node that should be simulated (and patched)
1171 * @return NO_NODE_ADDED
1173 static int sim_Fucom(x87_state *state, ir_node *n)
1175 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1177 x87_simulator *sim = state->sim;
1178 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1179 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1180 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1181 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1182 int reg_index_1 = op1->index;
1183 int reg_index_2 = op2->index;
1184 unsigned live = vfp_live_args_after(sim, n, 0);
1186 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n, op1->name, op2->name));
1187 DEBUG_ONLY(vfp_dump_live(live);)
1188 DB((dbg, LEVEL_1, "Stack before: "));
1189 DEBUG_ONLY(x87_dump_stack(state);)
1191 int op1_idx = x87_on_stack(state, reg_index_1);
1192 assert(op1_idx >= 0);
1196 /* BEWARE: check for comp a,a cases, they might happen */
1197 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1198 /* second operand is a vfp register */
1199 op2_idx = x87_on_stack(state, reg_index_2);
1200 assert(op2_idx >= 0);
1202 if (is_vfp_live(reg_index_2, live)) {
1203 /* second operand is live */
1205 if (is_vfp_live(reg_index_1, live)) {
1206 /* both operands are live */
1207 if (op1_idx != 0 && op2_idx != 0) {
1208 /* bring the first one to tos */
1209 x87_create_fxch(state, n, op1_idx);
1210 if (op1_idx == op2_idx)
1213 /* res = tos X op */
1216 /* second live, first operand is dead here, bring it to tos.
1217 This means further, op1_idx != op2_idx. */
1218 assert(op1_idx != op2_idx);
1220 x87_create_fxch(state, n, op1_idx);
1225 /* res = tos X op, pop */
1229 /* second operand is dead */
1230 if (is_vfp_live(reg_index_1, live)) {
1231 /* first operand is live: bring second to tos.
1232 This means further, op1_idx != op2_idx. */
1233 assert(op1_idx != op2_idx);
1235 x87_create_fxch(state, n, op2_idx);
1240 /* res = op X tos, pop */
1243 /* both operands are dead here, check first for identity. */
1244 if (op1_idx == op2_idx) {
1245 /* identically, one pop needed */
1247 x87_create_fxch(state, n, op1_idx);
1251 /* res = tos X op, pop */
1254 /* different, move them to st and st(1) and pop both.
1255 The tricky part is to get one into st(1).*/
1256 else if (op2_idx == 1) {
1257 /* good, second operand is already in the right place, move the first */
1259 /* bring the first on top */
1260 x87_create_fxch(state, n, op1_idx);
1261 assert(op2_idx != 0);
1264 /* res = tos X op, pop, pop */
1266 } else if (op1_idx == 1) {
1267 /* good, first operand is already in the right place, move the second */
1269 /* bring the first on top */
1270 x87_create_fxch(state, n, op2_idx);
1271 assert(op1_idx != 0);
1274 /* res = op X tos, pop, pop */
1277 /* if one is already the TOS, we need two fxch */
1279 /* first one is TOS, move to st(1) */
1280 x87_create_fxch(state, n, 1);
1281 assert(op2_idx != 1);
1283 x87_create_fxch(state, n, op2_idx);
1285 /* res = op X tos, pop, pop */
1287 } else if (op2_idx == 0) {
1288 /* second one is TOS, move to st(1) */
1289 x87_create_fxch(state, n, 1);
1290 assert(op1_idx != 1);
1292 x87_create_fxch(state, n, op1_idx);
1294 /* res = tos X op, pop, pop */
1297 /* none of them is either TOS or st(1), 3 fxch needed */
1298 x87_create_fxch(state, n, op2_idx);
1299 assert(op1_idx != 0);
1300 x87_create_fxch(state, n, 1);
1302 x87_create_fxch(state, n, op1_idx);
1304 /* res = tos X op, pop, pop */
1311 /* second operand is an address mode */
1313 x87_create_fxch(state, n, op1_idx);
1314 /* Pop first operand, if it is dead. */
1315 if (!is_vfp_live(reg_index_1, live))
1318 op1_idx = attr->attr.data.ins_permuted ? -1 : 0;
1319 op2_idx = attr->attr.data.ins_permuted ? 0 : -1;
1321 assert(op1_idx == 0 || op2_idx == 0);
1323 /* patch the operation */
1324 if (is_ia32_vFucomFnstsw(n)) {
1325 dst = pops == 2 ? op_ia32_FucomppFnstsw : op_ia32_FucomFnstsw;
1326 for (int i = 0; i < pops; ++i)
1328 } else if (is_ia32_vFucomi(n)) {
1329 dst = op_ia32_Fucomi;
1333 x87_create_fpop(state, sched_next(n), 1);
1335 panic("invalid operation %+F", n);
1338 x87_patch_insn(n, dst);
1340 int const reg_idx = op1_idx != 0 ? op1_idx : op2_idx;
1341 attr->reg = reg_idx >= 0 ? get_st_reg(reg_idx) : NULL;
1342 attr->attr.data.ins_permuted = op1_idx != 0;
1343 attr->pop = pops != 0;
1346 char const *const l = op1_idx >= 0 ? get_st_reg(op1_idx)->name : "[AM]";
1347 char const *const r = op2_idx >= 0 ? get_st_reg(op2_idx)->name : "[AM]";
1348 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n), l, r));
1351 return NO_NODE_ADDED;
1357 * @param state the x87 state
1358 * @param n the node that should be simulated (and patched)
1360 * @return NO_NODE_ADDED
1362 static int sim_Keep(x87_state *state, ir_node *node)
1365 const arch_register_t *op_reg;
1371 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1373 arity = get_irn_arity(node);
1374 for (i = 0; i < arity; ++i) {
1375 op = get_irn_n(node, i);
1376 op_reg = arch_get_irn_register(op);
1377 if (op_reg->reg_class != &ia32_reg_classes[CLASS_ia32_vfp])
1380 reg_id = op_reg->index;
1381 live = vfp_live_args_after(state->sim, node, 0);
1383 op_stack_idx = x87_on_stack(state, reg_id);
1384 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live))
1385 x87_create_fpop(state, sched_next(node), 1);
1388 DB((dbg, LEVEL_1, "Stack after: "));
1389 DEBUG_ONLY(x87_dump_stack(state);)
1391 return NO_NODE_ADDED;
1395 * Keep the given node alive by adding a be_Keep.
1397 * @param node the node to kept alive
1399 static void keep_float_node_alive(ir_node *node)
1401 ir_node *block = get_nodes_block(node);
1402 ir_node *keep = be_new_Keep(block, 1, &node);
1403 sched_add_after(node, keep);
1407 * Create a copy of a node. Recreate the node if it's a constant.
1409 * @param state the x87 state
1410 * @param n the node to be copied
1412 * @return the copy of n
1414 static ir_node *create_Copy(x87_state *state, ir_node *n)
1416 dbg_info *n_dbg = get_irn_dbg_info(n);
1417 ir_mode *mode = get_irn_mode(n);
1418 ir_node *block = get_nodes_block(n);
1419 ir_node *pred = get_irn_n(n, 0);
1420 ir_node *(*cnstr)(dbg_info *, ir_node *, ir_mode *) = NULL;
1422 const arch_register_t *out;
1423 const arch_register_t *op1;
1425 /* Do not copy constants, recreate them. */
1426 switch (get_ia32_irn_opcode(pred)) {
1428 cnstr = new_bd_ia32_fldz;
1431 cnstr = new_bd_ia32_fld1;
1433 case iro_ia32_fldpi:
1434 cnstr = new_bd_ia32_fldpi;
1436 case iro_ia32_fldl2e:
1437 cnstr = new_bd_ia32_fldl2e;
1439 case iro_ia32_fldl2t:
1440 cnstr = new_bd_ia32_fldl2t;
1442 case iro_ia32_fldlg2:
1443 cnstr = new_bd_ia32_fldlg2;
1445 case iro_ia32_fldln2:
1446 cnstr = new_bd_ia32_fldln2;
1452 out = x87_get_irn_register(n);
1453 op1 = x87_get_irn_register(pred);
1455 if (cnstr != NULL) {
1456 /* copy a constant */
1457 res = (*cnstr)(n_dbg, block, mode);
1459 x87_push(state, out->index, res);
1461 int op1_idx = x87_on_stack(state, op1->index);
1463 res = new_bd_ia32_fpushCopy(n_dbg, block, pred, mode);
1465 x87_push(state, out->index, res);
1467 ia32_x87_attr_t *const attr = get_ia32_x87_attr(res);
1468 attr->reg = get_st_reg(op1_idx);
1470 arch_set_irn_register(res, out);
1476 * Simulate a be_Copy.
1478 * @param state the x87 state
1479 * @param n the node that should be simulated (and patched)
1481 * @return NO_NODE_ADDED
1483 static int sim_Copy(x87_state *state, ir_node *n)
1485 arch_register_class_t const *const cls = arch_get_irn_reg_class(n);
1486 if (cls != &ia32_reg_classes[CLASS_ia32_vfp])
1487 return NO_NODE_ADDED;
1489 ir_node *const pred = be_get_Copy_op(n);
1490 arch_register_t const *const op1 = x87_get_irn_register(pred);
1491 arch_register_t const *const out = x87_get_irn_register(n);
1492 unsigned const live = vfp_live_args_after(state->sim, n, REGMASK(out));
1494 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n, op1->name, out->name));
1495 DEBUG_ONLY(vfp_dump_live(live);)
1497 if (is_vfp_live(op1->index, live)) {
1498 /* Operand is still live, a real copy. We need here an fpush that can
1499 hold a a register, so use the fpushCopy or recreate constants */
1500 ir_node *const node = create_Copy(state, n);
1502 /* We have to make sure the old value doesn't go dead (which can happen
1503 * when we recreate constants). As the simulator expected that value in
1504 * the pred blocks. This is unfortunate as removing it would save us 1
1505 * instruction, but we would have to rerun all the simulation to get
1508 ir_node *const next = sched_next(n);
1511 sched_add_before(next, node);
1513 if (get_irn_n_edges(pred) == 0) {
1514 keep_float_node_alive(pred);
1517 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1519 /* Just a virtual copy. */
1520 int const op1_idx = x87_on_stack(state, op1->index);
1521 x87_set_st(state, out->index, n, op1_idx);
1523 return NO_NODE_ADDED;
1527 * Returns the vf0 result Proj of a Call.
1529 * @para call the Call node
1531 static ir_node *get_call_result_proj(ir_node *call)
1533 /* search the result proj */
1534 foreach_out_edge(call, edge) {
1535 ir_node *proj = get_edge_src_irn(edge);
1536 long pn = get_Proj_proj(proj);
1538 if (pn == pn_ia32_Call_vf0)
1542 panic("result Proj missing");
1545 static int sim_Asm(x87_state *const state, ir_node *const n)
1549 for (size_t i = get_irn_arity(n); i-- != 0;) {
1550 arch_register_req_t const *const req = arch_get_irn_register_req_in(n, i);
1551 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1552 panic("cannot handle %+F with x87 constraints", n);
1555 for (size_t i = arch_get_irn_n_outs(n); i-- != 0;) {
1556 arch_register_req_t const *const req = arch_get_irn_register_req_out(n, i);
1557 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1558 panic("cannot handle %+F with x87 constraints", n);
1561 return NO_NODE_ADDED;
1565 * Simulate a ia32_Call.
1567 * @param state the x87 state
1568 * @param n the node that should be simulated (and patched)
1570 * @return NO_NODE_ADDED
1572 static int sim_Call(x87_state *state, ir_node *n)
1574 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1576 /* at the begin of a call the x87 state should be empty */
1577 assert(state->depth == 0 && "stack not empty before call");
1579 ir_type *const call_tp = get_ia32_call_attr_const(n)->call_tp;
1580 if (get_method_n_ress(call_tp) != 0) {
1581 /* If the called function returns a float, it is returned in st(0).
1582 * This even happens if the return value is NOT used.
1583 * Moreover, only one return result is supported. */
1584 ir_type *const res_type = get_method_res_type(call_tp, 0);
1585 ir_mode *const mode = get_type_mode(res_type);
1586 if (mode && mode_is_float(mode)) {
1587 ir_node *const resproj = get_call_result_proj(n);
1588 arch_register_t const *const reg = x87_get_irn_register(resproj);
1589 x87_push(state, reg->index, resproj);
1592 DB((dbg, LEVEL_1, "Stack after: "));
1593 DEBUG_ONLY(x87_dump_stack(state);)
1595 return NO_NODE_ADDED;
1599 * Simulate a be_Return.
1601 * @param state the x87 state
1602 * @param n the node that should be simulated (and patched)
1604 * @return NO_NODE_ADDED
1606 static int sim_Return(x87_state *state, ir_node *n)
1608 #ifdef DEBUG_libfirm
1609 /* only floating point return values must reside on stack */
1610 int n_float_res = 0;
1611 int const n_res = be_Return_get_n_rets(n);
1612 for (int i = 0; i < n_res; ++i) {
1613 ir_node *const res = get_irn_n(n, n_be_Return_val + i);
1614 if (mode_is_float(get_irn_mode(res)))
1617 assert(x87_get_depth(state) == n_float_res);
1620 /* pop them virtually */
1622 return NO_NODE_ADDED;
1626 * Simulate a be_Perm.
1628 * @param state the x87 state
1629 * @param irn the node that should be simulated (and patched)
1631 * @return NO_NODE_ADDED
1633 static int sim_Perm(x87_state *state, ir_node *irn)
1636 ir_node *pred = get_irn_n(irn, 0);
1639 /* handle only floating point Perms */
1640 if (! mode_is_float(get_irn_mode(pred)))
1641 return NO_NODE_ADDED;
1643 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1645 /* Perm is a pure virtual instruction on x87.
1646 All inputs must be on the FPU stack and are pairwise
1647 different from each other.
1648 So, all we need to do is to permutate the stack state. */
1649 n = get_irn_arity(irn);
1650 NEW_ARR_A(int, stack_pos, n);
1652 /* collect old stack positions */
1653 for (i = 0; i < n; ++i) {
1654 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
1655 int idx = x87_on_stack(state, inreg->index);
1657 assert(idx >= 0 && "Perm argument not on x87 stack");
1661 /* now do the permutation */
1662 foreach_out_edge(irn, edge) {
1663 ir_node *proj = get_edge_src_irn(edge);
1664 const arch_register_t *out = x87_get_irn_register(proj);
1665 long num = get_Proj_proj(proj);
1667 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1668 x87_set_st(state, out->index, proj, stack_pos[(unsigned)num]);
1670 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1672 return NO_NODE_ADDED;
1676 * Kill any dead registers at block start by popping them from the stack.
1678 * @param sim the simulator handle
1679 * @param block the current block
1680 * @param state the x87 state at the begin of the block
1682 static void x87_kill_deads(x87_simulator *const sim, ir_node *const block, x87_state *const state)
1684 ir_node *first_insn = sched_first(block);
1685 ir_node *keep = NULL;
1686 unsigned live = vfp_live_args_after(sim, block, 0);
1688 int i, depth, num_pop;
1691 depth = x87_get_depth(state);
1692 for (i = depth - 1; i >= 0; --i) {
1693 int reg = x87_get_st_reg(state, i);
1695 if (! is_vfp_live(reg, live))
1696 kill_mask |= (1 << i);
1700 DB((dbg, LEVEL_1, "Killing deads:\n"));
1701 DEBUG_ONLY(vfp_dump_live(live);)
1702 DEBUG_ONLY(x87_dump_stack(state);)
1704 if (kill_mask != 0 && live == 0) {
1705 /* special case: kill all registers */
1706 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
1707 if (ia32_cg_config.use_femms) {
1708 /* use FEMMS on AMD processors to clear all */
1709 keep = new_bd_ia32_femms(NULL, block);
1711 /* use EMMS to clear all */
1712 keep = new_bd_ia32_emms(NULL, block);
1714 sched_add_before(first_insn, keep);
1720 /* now kill registers */
1722 /* we can only kill from TOS, so bring them up */
1723 if (! (kill_mask & 1)) {
1724 /* search from behind, because we can to a double-pop */
1725 for (i = depth - 1; i >= 0; --i) {
1726 if (kill_mask & (1 << i)) {
1727 kill_mask &= ~(1 << i);
1734 x87_set_st(state, -1, keep, i);
1735 x87_create_fxch(state, first_insn, i);
1738 if ((kill_mask & 3) == 3) {
1739 /* we can do a double-pop */
1743 /* only a single pop */
1748 kill_mask >>= num_pop;
1749 keep = x87_create_fpop(state, first_insn, num_pop);
1756 * Run a simulation and fix all virtual instructions for a block.
1758 * @param sim the simulator handle
1759 * @param block the current block
1761 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
1764 blk_state *bl_state = x87_get_bl_state(sim, block);
1765 x87_state *state = bl_state->begin;
1766 ir_node *start_block;
1768 assert(state != NULL);
1769 /* already processed? */
1770 if (bl_state->end != NULL)
1773 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
1774 DB((dbg, LEVEL_2, "State at Block begin:\n "));
1775 DEBUG_ONLY(x87_dump_stack(state);)
1777 /* create a new state, will be changed */
1778 state = x87_clone_state(sim, state);
1779 /* at block begin, kill all dead registers */
1780 x87_kill_deads(sim, block, state);
1782 /* beware, n might change */
1783 for (n = sched_first(block); !sched_is_end(n); n = next) {
1786 ir_op *op = get_irn_op(n);
1789 * get the next node to be simulated here.
1790 * n might be completely removed from the schedule-
1792 next = sched_next(n);
1793 if (op->ops.generic != NULL) {
1794 func = (sim_func)op->ops.generic;
1797 node_inserted = (*func)(state, n);
1800 * sim_func might have added an additional node after n,
1801 * so update next node
1802 * beware: n must not be changed by sim_func
1803 * (i.e. removed from schedule) in this case
1805 if (node_inserted != NO_NODE_ADDED)
1806 next = sched_next(n);
1810 start_block = get_irg_start_block(get_irn_irg(block));
1812 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state);)
1814 /* check if the state must be shuffled */
1815 foreach_block_succ(block, edge) {
1816 ir_node *succ = get_edge_src_irn(edge);
1817 blk_state *succ_state;
1819 if (succ == start_block)
1822 succ_state = x87_get_bl_state(sim, succ);
1824 if (succ_state->begin == NULL) {
1825 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
1826 DEBUG_ONLY(x87_dump_stack(state);)
1827 succ_state->begin = state;
1829 waitq_put(sim->worklist, succ);
1831 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
1832 /* There is already a begin state for the successor, bad.
1833 Do the necessary permutations.
1834 Note that critical edges are removed, so this is always possible:
1835 If the successor has more than one possible input, then it must
1838 x87_shuffle(block, state, succ_state->begin);
1841 bl_state->end = state;
1845 * Register a simulator function.
1847 * @param op the opcode to simulate
1848 * @param func the simulator function for the opcode
1850 static void register_sim(ir_op *op, sim_func func)
1852 assert(op->ops.generic == NULL);
1853 op->ops.generic = (op_func) func;
1857 * Create a new x87 simulator.
1859 * @param sim a simulator handle, will be initialized
1860 * @param irg the current graph
1862 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
1864 obstack_init(&sim->obst);
1865 sim->blk_states = pmap_create();
1866 sim->n_idx = get_irg_last_idx(irg);
1867 sim->live = OALLOCN(&sim->obst, vfp_liveness, sim->n_idx);
1869 DB((dbg, LEVEL_1, "--------------------------------\n"
1870 "x87 Simulator started for %+F\n", irg));
1872 /* set the generic function pointer of instruction we must simulate */
1873 ir_clear_opcodes_generic_func();
1875 register_sim(op_ia32_Asm, sim_Asm);
1876 register_sim(op_ia32_Call, sim_Call);
1877 register_sim(op_ia32_vfld, sim_fld);
1878 register_sim(op_ia32_vfild, sim_fild);
1879 register_sim(op_ia32_vfld1, sim_fld1);
1880 register_sim(op_ia32_vfldz, sim_fldz);
1881 register_sim(op_ia32_vfadd, sim_fadd);
1882 register_sim(op_ia32_vfsub, sim_fsub);
1883 register_sim(op_ia32_vfmul, sim_fmul);
1884 register_sim(op_ia32_vfdiv, sim_fdiv);
1885 register_sim(op_ia32_vfprem, sim_fprem);
1886 register_sim(op_ia32_vfabs, sim_fabs);
1887 register_sim(op_ia32_vfchs, sim_fchs);
1888 register_sim(op_ia32_vfist, sim_fist);
1889 register_sim(op_ia32_vfisttp, sim_fisttp);
1890 register_sim(op_ia32_vfst, sim_fst);
1891 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
1892 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
1893 register_sim(op_ia32_vFucomi, sim_Fucom);
1894 register_sim(op_be_Copy, sim_Copy);
1895 register_sim(op_be_Return, sim_Return);
1896 register_sim(op_be_Perm, sim_Perm);
1897 register_sim(op_be_Keep, sim_Keep);
1901 * Destroy a x87 simulator.
1903 * @param sim the simulator handle
1905 static void x87_destroy_simulator(x87_simulator *sim)
1907 pmap_destroy(sim->blk_states);
1908 obstack_free(&sim->obst, NULL);
1909 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
1913 * Pre-block walker: calculate the liveness information for the block
1914 * and store it into the sim->live cache.
1916 static void update_liveness_walker(ir_node *block, void *data)
1918 x87_simulator *sim = (x87_simulator*)data;
1919 update_liveness(sim, block);
1923 * Run a simulation and fix all virtual instructions for a graph.
1924 * Replaces all virtual floating point instructions and registers
1927 void ia32_x87_simulate_graph(ir_graph *irg)
1929 /* TODO improve code quality (less executed fxch) by using execfreqs */
1931 ir_node *block, *start_block;
1932 blk_state *bl_state;
1935 /* create the simulator */
1936 x87_init_simulator(&sim, irg);
1938 start_block = get_irg_start_block(irg);
1939 bl_state = x87_get_bl_state(&sim, start_block);
1941 /* start with the empty state */
1943 bl_state->begin = ∅
1945 sim.worklist = new_waitq();
1946 waitq_put(sim.worklist, start_block);
1948 be_assure_live_sets(irg);
1949 sim.lv = be_get_irg_liveness(irg);
1951 /* Calculate the liveness for all nodes. We must precalculate this info,
1952 * because the simulator adds new nodes (possible before Phi nodes) which
1953 * would let a lazy calculation fail.
1954 * On the other hand we reduce the computation amount due to
1955 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
1957 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
1961 block = (ir_node*)waitq_get(sim.worklist);
1962 x87_simulate_block(&sim, block);
1963 } while (! waitq_empty(sim.worklist));
1966 del_waitq(sim.worklist);
1967 x87_destroy_simulator(&sim);
1970 /* Initializes the x87 simulator. */
1971 void ia32_init_x87(void)
1973 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");