2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
33 #include "iredges_t.h"
48 #include "bearch_ia32_t.h"
49 #include "ia32_new_nodes.h"
50 #include "gen_ia32_new_nodes.h"
51 #include "gen_ia32_regalloc_if.h"
53 #include "ia32_architecture.h"
55 /** the debug handle */
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 /* Forward declaration. */
59 typedef struct x87_simulator x87_simulator;
62 * An entry on the simulated x87 stack.
64 typedef struct st_entry {
65 int reg_idx; /**< the virtual register index of this stack value */
66 ir_node *node; /**< the node that produced this value */
72 typedef struct x87_state {
73 st_entry st[N_ia32_st_REGS]; /**< the register stack */
74 int depth; /**< the current stack depth */
75 x87_simulator *sim; /**< The simulator. */
78 /** An empty state, used for blocks without fp instructions. */
79 static x87_state empty = { { {0, NULL}, }, 0, NULL };
82 * Return values of the instruction simulator functions.
85 NO_NODE_ADDED = 0, /**< No node that needs simulation was added. */
86 NODE_ADDED = 1 /**< A node that must be simulated was added by the simulator
87 in the schedule AFTER the current node. */
91 * The type of an instruction simulator function.
93 * @param state the x87 state
94 * @param n the node to be simulated
96 * @return NODE_ADDED if a node was added AFTER n in schedule that MUST be
98 * NO_NODE_ADDED otherwise
100 typedef int (*sim_func)(x87_state *state, ir_node *n);
103 * A block state: Every block has a x87 state at the beginning and at the end.
105 typedef struct blk_state {
106 x87_state *begin; /**< state at the begin or NULL if not assigned */
107 x87_state *end; /**< state at the end or NULL if not assigned */
110 /** liveness bitset for vfp registers. */
111 typedef unsigned char vfp_liveness;
116 struct x87_simulator {
117 struct obstack obst; /**< An obstack for fast allocating. */
118 pmap *blk_states; /**< Map blocks to states. */
119 be_lv_t *lv; /**< intrablock liveness. */
120 vfp_liveness *live; /**< Liveness information. */
121 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
122 waitq *worklist; /**< Worklist of blocks that must be processed. */
126 * Returns the current stack depth.
128 * @param state the x87 state
130 * @return the x87 stack depth
132 static int x87_get_depth(const x87_state *state)
137 static st_entry *x87_get_entry(x87_state *const state, int const pos)
139 assert(0 <= pos && pos < state->depth);
140 return &state->st[N_ia32_st_REGS - state->depth + pos];
144 * Return the virtual register index at st(pos).
146 * @param state the x87 state
147 * @param pos a stack position
149 * @return the vfp register index that produced the value at st(pos)
151 static int x87_get_st_reg(const x87_state *state, int pos)
153 return x87_get_entry((x87_state*)state, pos)->reg_idx;
158 * Dump the stack for debugging.
160 * @param state the x87 state
162 static void x87_dump_stack(const x87_state *state)
164 for (int i = state->depth; i-- != 0;) {
165 st_entry const *const entry = x87_get_entry((x87_state*)state, i);
166 DB((dbg, LEVEL_2, "vf%d(%+F) ", entry->reg_idx, entry->node));
168 DB((dbg, LEVEL_2, "<-- TOS\n"));
170 #endif /* DEBUG_libfirm */
173 * Set a virtual register to st(pos).
175 * @param state the x87 state
176 * @param reg_idx the vfp register index that should be set
177 * @param node the IR node that produces the value of the vfp register
178 * @param pos the stack position where the new value should be entered
180 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
182 st_entry *const entry = x87_get_entry(state, pos);
183 entry->reg_idx = reg_idx;
186 DB((dbg, LEVEL_2, "After SET_REG: "));
187 DEBUG_ONLY(x87_dump_stack(state);)
191 * Swap st(0) with st(pos).
193 * @param state the x87 state
194 * @param pos the stack position to change the tos with
196 static void x87_fxch(x87_state *state, int pos)
198 st_entry *const a = x87_get_entry(state, pos);
199 st_entry *const b = x87_get_entry(state, 0);
200 st_entry const t = *a;
204 DB((dbg, LEVEL_2, "After FXCH: "));
205 DEBUG_ONLY(x87_dump_stack(state);)
209 * Convert a virtual register to the stack index.
211 * @param state the x87 state
212 * @param reg_idx the register vfp index
214 * @return the stack position where the register is stacked
215 * or -1 if the virtual register was not found
217 static int x87_on_stack(const x87_state *state, int reg_idx)
219 for (int i = 0; i < state->depth; ++i) {
220 if (x87_get_st_reg(state, i) == reg_idx)
227 * Push a virtual Register onto the stack, double pushes are NOT allowed.
229 * @param state the x87 state
230 * @param reg_idx the register vfp index
231 * @param node the node that produces the value of the vfp register
233 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
235 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
236 assert(state->depth < N_ia32_st_REGS && "stack overrun");
239 st_entry *const entry = x87_get_entry(state, 0);
240 entry->reg_idx = reg_idx;
243 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state);)
247 * Pop a virtual Register from the stack.
249 * @param state the x87 state
251 static void x87_pop(x87_state *state)
253 assert(state->depth > 0 && "stack underrun");
257 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state);)
261 * Empty the fpu stack
263 * @param state the x87 state
265 static void x87_emms(x87_state *state)
271 * Returns the block state of a block.
273 * @param sim the x87 simulator handle
274 * @param block the current block
276 * @return the block state
278 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
280 blk_state *res = pmap_get(blk_state, sim->blk_states, block);
283 res = OALLOC(&sim->obst, blk_state);
287 pmap_insert(sim->blk_states, block, res);
296 * @param sim the x87 simulator handle
297 * @param src the x87 state that will be cloned
299 * @return a cloned copy of the src state
301 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
303 x87_state *const res = OALLOC(&sim->obst, x87_state);
309 * Patch a virtual instruction into a x87 one and return
310 * the node representing the result value.
312 * @param n the IR node to patch
313 * @param op the x87 opcode to patch in
315 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
317 ir_mode *mode = get_irn_mode(n);
322 if (mode == mode_T) {
323 /* patch all Proj's */
324 foreach_out_edge(n, edge) {
325 ir_node *proj = get_edge_src_irn(edge);
327 mode = get_irn_mode(proj);
328 if (mode_is_float(mode)) {
330 set_irn_mode(proj, ia32_reg_classes[CLASS_ia32_st].mode);
334 } else if (mode_is_float(mode))
335 set_irn_mode(n, ia32_reg_classes[CLASS_ia32_st].mode);
340 * Returns the first Proj of a mode_T node having a given mode.
342 * @param n the mode_T node
343 * @param m the desired mode of the Proj
344 * @return The first Proj of mode @p m found.
346 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
348 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
350 foreach_out_edge(n, edge) {
351 ir_node *proj = get_edge_src_irn(edge);
352 if (get_irn_mode(proj) == m)
356 panic("Proj not found");
360 * Wrap the arch_* function here so we can check for errors.
362 static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
364 const arch_register_t *res = arch_get_irn_register(irn);
366 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
370 static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
373 const arch_register_t *res = arch_get_irn_register_out(irn, pos);
375 assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
379 static inline const arch_register_t *get_st_reg(int index)
381 return &ia32_registers[REG_ST0 + index];
385 * Create a fxch node before another node.
387 * @param state the x87 state
388 * @param n the node after the fxch
389 * @param pos exchange st(pos) with st(0)
391 static void x87_create_fxch(x87_state *state, ir_node *n, int pos)
393 x87_fxch(state, pos);
395 ir_node *const block = get_nodes_block(n);
396 ir_node *const fxch = new_bd_ia32_fxch(NULL, block);
397 ia32_x87_attr_t *const attr = get_ia32_x87_attr(fxch);
398 attr->x87[0] = get_st_reg(pos);
399 attr->x87[2] = get_st_reg(0);
403 sched_add_before(n, fxch);
404 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
407 /* -------------- x87 perm --------------- */
410 * Calculate the necessary permutations to reach dst_state.
412 * These permutations are done with fxch instructions and placed
413 * at the end of the block.
415 * Note that critical edges are removed here, so we need only
416 * a shuffle if the current block has only one successor.
418 * @param block the current block
419 * @param state the current x87 stack state, might be modified
420 * @param dst_state destination state
424 static x87_state *x87_shuffle(ir_node *block, x87_state *state, const x87_state *dst_state)
426 int i, n_cycles, k, ri;
427 unsigned cycles[4], all_mask;
428 char cycle_idx[4][8];
430 assert(state->depth == dst_state->depth);
432 /* Some mathematics here:
433 * If we have a cycle of length n that includes the tos,
434 * we need n-1 exchange operations.
435 * We can always add the tos and restore it, so we need
436 * n+1 exchange operations for a cycle not containing the tos.
437 * So, the maximum of needed operations is for a cycle of 7
438 * not including the tos == 8.
439 * This is the same number of ops we would need for using stores,
440 * so exchange is cheaper (we save the loads).
441 * On the other hand, we might need an additional exchange
442 * in the next block to bring one operand on top, so the
443 * number of ops in the first case is identical.
444 * Further, no more than 4 cycles can exists (4 x 2). */
445 all_mask = (1 << (state->depth)) - 1;
447 for (n_cycles = 0; all_mask; ++n_cycles) {
448 int src_idx, dst_idx;
450 /* find the first free slot */
451 for (i = 0; i < state->depth; ++i) {
452 if (all_mask & (1 << i)) {
453 all_mask &= ~(1 << i);
455 /* check if there are differences here */
456 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
462 /* no more cycles found */
467 cycles[n_cycles] = (1 << i);
468 cycle_idx[n_cycles][k++] = i;
469 for (src_idx = i; ; src_idx = dst_idx) {
470 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
472 if ((all_mask & (1 << dst_idx)) == 0)
475 cycle_idx[n_cycles][k++] = dst_idx;
476 cycles[n_cycles] |= (1 << dst_idx);
477 all_mask &= ~(1 << dst_idx);
479 cycle_idx[n_cycles][k] = -1;
483 /* no permutation needed */
487 /* Hmm: permutation needed */
488 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
489 DEBUG_ONLY(x87_dump_stack(state);)
490 DB((dbg, LEVEL_2, " to\n"));
491 DEBUG_ONLY(x87_dump_stack(dst_state);)
495 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
496 for (ri = 0; ri < n_cycles; ++ri) {
497 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
498 for (k = 0; cycle_idx[ri][k] != -1; ++k)
499 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
500 DB((dbg, LEVEL_2, "\n"));
505 * Find the place node must be insert.
506 * We have only one successor block, so the last instruction should
509 ir_node *const before = sched_last(block);
510 assert(is_cfop(before));
512 /* now do the permutations */
513 for (ri = 0; ri < n_cycles; ++ri) {
514 if ((cycles[ri] & 1) == 0) {
515 /* this cycle does not include the tos */
516 x87_create_fxch(state, before, cycle_idx[ri][0]);
518 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
519 x87_create_fxch(state, before, cycle_idx[ri][k]);
521 if ((cycles[ri] & 1) == 0) {
522 /* this cycle does not include the tos */
523 x87_create_fxch(state, before, cycle_idx[ri][0]);
530 * Create a fpush before node n.
532 * @param state the x87 state
533 * @param n the node after the fpush
534 * @param pos push st(pos) on stack
535 * @param val the value to push
537 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int const out_reg_idx, ir_node *const val)
539 x87_push(state, out_reg_idx, val);
541 ir_node *const fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
542 ia32_x87_attr_t *const attr = get_ia32_x87_attr(fpush);
543 attr->x87[0] = get_st_reg(pos);
544 attr->x87[2] = get_st_reg(0);
547 sched_add_before(n, fpush);
549 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
553 * Create a fpop before node n.
555 * @param state the x87 state
556 * @param n the node after the fpop
557 * @param num pop 1 or 2 values
559 * @return the fpop node
561 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
563 ir_node *fpop = NULL;
564 ia32_x87_attr_t *attr;
569 if (ia32_cg_config.use_ffreep)
570 fpop = new_bd_ia32_ffreep(NULL, get_nodes_block(n));
572 fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
573 attr = get_ia32_x87_attr(fpop);
574 attr->x87[0] = get_st_reg(0);
575 attr->x87[1] = get_st_reg(0);
576 attr->x87[2] = get_st_reg(0);
579 sched_add_before(n, fpop);
580 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
585 /* --------------------------------- liveness ------------------------------------------ */
588 * The liveness transfer function.
589 * Updates a live set over a single step from a given node to its predecessor.
590 * Everything defined at the node is removed from the set, the uses of the node get inserted.
592 * @param irn The node at which liveness should be computed.
593 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
594 * the registers live after irn.
596 * @return The live bitset.
598 static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
601 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
603 if (get_irn_mode(irn) == mode_T) {
604 foreach_out_edge(irn, edge) {
605 ir_node *proj = get_edge_src_irn(edge);
607 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
608 const arch_register_t *reg = x87_get_irn_register(proj);
609 live &= ~(1 << arch_register_get_index(reg));
612 } else if (arch_irn_consider_in_reg_alloc(cls, irn)) {
613 const arch_register_t *reg = x87_get_irn_register(irn);
614 live &= ~(1 << arch_register_get_index(reg));
617 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
618 ir_node *op = get_irn_n(irn, i);
620 if (mode_is_float(get_irn_mode(op)) &&
621 arch_irn_consider_in_reg_alloc(cls, op)) {
622 const arch_register_t *reg = x87_get_irn_register(op);
623 live |= 1 << arch_register_get_index(reg);
630 * Put all live virtual registers at the end of a block into a bitset.
632 * @param sim the simulator handle
633 * @param bl the block
635 * @return The live bitset at the end of this block
637 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
639 vfp_liveness live = 0;
640 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
641 const be_lv_t *lv = sim->lv;
643 be_lv_foreach(lv, block, be_lv_state_end, node) {
644 const arch_register_t *reg;
645 if (!arch_irn_consider_in_reg_alloc(cls, node))
648 reg = x87_get_irn_register(node);
649 live |= 1 << arch_register_get_index(reg);
655 /** get the register mask from an arch_register */
656 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
659 * Return a bitset of argument registers which are live at the end of a node.
661 * @param sim the simulator handle
662 * @param pos the node
663 * @param kill kill mask for the output registers
665 * @return The live bitset.
667 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
669 unsigned idx = get_irn_idx(pos);
671 assert(idx < sim->n_idx);
672 return sim->live[idx] & ~kill;
676 * Calculate the liveness for a whole block and cache it.
678 * @param sim the simulator handle
679 * @param block the block
681 static void update_liveness(x87_simulator *sim, ir_node *block)
683 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
686 /* now iterate through the block backward and cache the results */
687 sched_foreach_reverse(block, irn) {
688 /* stop at the first Phi: this produces the live-in */
692 idx = get_irn_idx(irn);
693 sim->live[idx] = live;
695 live = vfp_liveness_transfer(irn, live);
697 idx = get_irn_idx(block);
698 sim->live[idx] = live;
702 * Returns true if a register is live in a set.
704 * @param reg_idx the vfp register index
705 * @param live a live bitset
707 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
711 * Dump liveness info.
713 * @param live the live bitset
715 static void vfp_dump_live(vfp_liveness live)
719 DB((dbg, LEVEL_2, "Live after: "));
720 for (i = 0; i < 8; ++i) {
721 if (live & (1 << i)) {
722 DB((dbg, LEVEL_2, "vf%d ", i));
725 DB((dbg, LEVEL_2, "\n"));
727 #endif /* DEBUG_libfirm */
729 /* --------------------------------- simulators ---------------------------------------- */
732 * Simulate a virtual binop.
734 * @param state the x87 state
735 * @param n the node that should be simulated (and patched)
737 * @return NO_NODE_ADDED
739 static int sim_binop(x87_state *const state, ir_node *const n, ir_op *const normal_op, ir_op *const normal_pop_op)
741 int op2_idx = 0, op1_idx;
742 int out_idx, do_pop = 0;
743 ia32_x87_attr_t *attr;
745 ir_node *patched_insn;
747 x87_simulator *sim = state->sim;
748 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
749 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
750 const arch_register_t *op1_reg = x87_get_irn_register(op1);
751 const arch_register_t *op2_reg = x87_get_irn_register(op2);
752 const arch_register_t *out = x87_irn_get_register(n, pn_ia32_res);
753 int reg_index_1 = arch_register_get_index(op1_reg);
754 int reg_index_2 = arch_register_get_index(op2_reg);
755 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
759 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
760 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
761 arch_register_get_name(out)));
762 DEBUG_ONLY(vfp_dump_live(live);)
763 DB((dbg, LEVEL_1, "Stack before: "));
764 DEBUG_ONLY(x87_dump_stack(state);)
766 op1_idx = x87_on_stack(state, reg_index_1);
767 assert(op1_idx >= 0);
768 op1_live_after = is_vfp_live(reg_index_1, live);
770 attr = get_ia32_x87_attr(n);
771 permuted = attr->attr.data.ins_permuted;
773 int const out_reg_idx = arch_register_get_index(out);
774 if (reg_index_2 != REG_VFP_VFP_NOREG) {
777 /* second operand is a vfp register */
778 op2_idx = x87_on_stack(state, reg_index_2);
779 assert(op2_idx >= 0);
780 op2_live_after = is_vfp_live(reg_index_2, live);
782 if (op2_live_after) {
783 /* Second operand is live. */
785 if (op1_live_after) {
786 /* Both operands are live: push the first one.
787 This works even for op1 == op2. */
788 x87_create_fpush(state, n, op1_idx, out_reg_idx, op2);
789 /* now do fxxx (tos=tos X op) */
795 /* Second live, first operand is dead here, bring it to tos. */
797 x87_create_fxch(state, n, op1_idx);
802 /* now do fxxx (tos=tos X op) */
807 /* Second operand is dead. */
808 if (op1_live_after) {
809 /* First operand is live: bring second to tos. */
811 x87_create_fxch(state, n, op2_idx);
816 /* now do fxxxr (tos = op X tos) */
820 /* Both operands are dead here, pop them from the stack. */
823 /* Both are identically and on tos, no pop needed. */
824 /* here fxxx (tos = tos X tos) */
828 /* now do fxxxp (op = op X tos, pop) */
833 } else if (op1_idx == 0) {
834 assert(op1_idx != op2_idx);
835 /* now do fxxxrp (op = tos X op, pop) */
840 /* Bring the second on top. */
841 x87_create_fxch(state, n, op2_idx);
842 if (op1_idx == op2_idx) {
843 /* Both are identically and on tos now, no pop needed. */
846 /* use fxxx (tos = tos X tos) */
850 /* op2 is on tos now */
852 /* use fxxxp (op = op X tos, pop) */
861 /* second operand is an address mode */
862 if (op1_live_after) {
863 /* first operand is live: push it here */
864 x87_create_fpush(state, n, op1_idx, out_reg_idx, op1);
867 /* first operand is dead: bring it to tos */
869 x87_create_fxch(state, n, op1_idx);
874 /* use fxxx (tos = tos X mem) */
879 patched_insn = x87_patch_insn(n, dst);
880 x87_set_st(state, out_reg_idx, patched_insn, out_idx);
885 /* patch the operation */
886 attr->x87[0] = op1_reg = get_st_reg(op1_idx);
887 if (reg_index_2 != REG_VFP_VFP_NOREG) {
888 attr->x87[1] = op2_reg = get_st_reg(op2_idx);
890 attr->x87[2] = out = get_st_reg(out_idx);
892 if (reg_index_2 != REG_VFP_VFP_NOREG) {
893 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
894 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
895 arch_register_get_name(out)));
897 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
898 arch_register_get_name(op1_reg),
899 arch_register_get_name(out)));
902 return NO_NODE_ADDED;
906 * Simulate a virtual Unop.
908 * @param state the x87 state
909 * @param n the node that should be simulated (and patched)
910 * @param op the x87 opcode that will replace n's opcode
912 * @return NO_NODE_ADDED
914 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
916 arch_register_t const *const out = x87_get_irn_register(n);
917 unsigned const live = vfp_live_args_after(state->sim, n, REGMASK(out));
918 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
919 DEBUG_ONLY(vfp_dump_live(live);)
921 ir_node *const op1 = get_irn_n(n, 0);
922 arch_register_t const *const op1_reg = x87_get_irn_register(op1);
923 int const op1_reg_idx = arch_register_get_index(op1_reg);
924 int const op1_idx = x87_on_stack(state, op1_reg_idx);
925 int const out_reg_idx = arch_register_get_index(out);
926 if (is_vfp_live(op1_reg_idx, live)) {
927 /* push the operand here */
928 x87_create_fpush(state, n, op1_idx, out_reg_idx, op1);
930 /* operand is dead, bring it to tos */
932 x87_create_fxch(state, n, op1_idx);
936 x87_set_st(state, out_reg_idx, x87_patch_insn(n, op), 0);
937 ia32_x87_attr_t *const attr = get_ia32_x87_attr(n);
938 attr->x87[2] = attr->x87[0] = get_st_reg(0);
939 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), attr->x87[2]->name));
941 return NO_NODE_ADDED;
945 * Simulate a virtual Load instruction.
947 * @param state the x87 state
948 * @param n the node that should be simulated (and patched)
949 * @param op the x87 opcode that will replace n's opcode
951 * @return NO_NODE_ADDED
953 static int sim_load(x87_state *state, ir_node *n, ir_op *op, int res_pos)
955 const arch_register_t *out = x87_irn_get_register(n, res_pos);
956 ia32_x87_attr_t *attr;
958 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
959 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
960 assert(out == x87_irn_get_register(n, res_pos));
961 attr = get_ia32_x87_attr(n);
962 attr->x87[2] = out = get_st_reg(0);
963 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
965 return NO_NODE_ADDED;
969 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
971 * @param store The store
972 * @param old_val The former value
973 * @param new_val The new value
975 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
977 foreach_out_edge_safe(old_val, edge) {
978 ir_node *user = get_edge_src_irn(edge);
979 /* if the user is scheduled after the store: rewire */
980 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
981 set_irn_n(user, get_edge_src_pos(edge), new_val);
987 * Simulate a virtual Store.
989 * @param state the x87 state
990 * @param n the node that should be simulated (and patched)
991 * @param op the x87 store opcode
992 * @param op_p the x87 store and pop opcode
994 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p)
996 ir_node *const val = get_irn_n(n, n_ia32_vfst_val);
997 arch_register_t const *const op2 = x87_get_irn_register(val);
998 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1000 int insn = NO_NODE_ADDED;
1001 int const op2_reg_idx = arch_register_get_index(op2);
1002 int const op2_idx = x87_on_stack(state, op2_reg_idx);
1003 unsigned const live = vfp_live_args_after(state->sim, n, 0);
1004 int const live_after_node = is_vfp_live(op2_reg_idx, live);
1005 assert(op2_idx >= 0);
1006 if (live_after_node) {
1007 /* Problem: fst doesn't support 80bit modes (spills), only fstp does
1008 * fist doesn't support 64bit mode, only fistp
1010 * - stack not full: push value and fstp
1011 * - stack full: fstp value and load again
1012 * Note that we cannot test on mode_E, because floats might be 80bit ... */
1013 ir_mode *const mode = get_ia32_ls_mode(n);
1014 if (get_mode_size_bits(mode) > (mode_is_int(mode) ? 32 : 64)) {
1015 if (x87_get_depth(state) < N_ia32_st_REGS) {
1016 /* ok, we have a free register: push + fstp */
1017 x87_create_fpush(state, n, op2_idx, REG_VFP_VFP_NOREG, val);
1019 x87_patch_insn(n, op_p);
1021 /* stack full here: need fstp + load */
1023 x87_patch_insn(n, op_p);
1025 ir_node *const block = get_nodes_block(n);
1026 ir_node *const mem = get_irn_Proj_for_mode(n, mode_M);
1027 ir_node *const vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), mem, mode);
1029 /* copy all attributes */
1030 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1031 if (is_ia32_use_frame(n))
1032 set_ia32_use_frame(vfld);
1033 set_ia32_op_type(vfld, ia32_AddrModeS);
1034 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1035 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1036 set_ia32_ls_mode(vfld, mode);
1038 ir_node *const rproj = new_r_Proj(vfld, mode, pn_ia32_vfld_res);
1039 ir_node *const mproj = new_r_Proj(vfld, mode_M, pn_ia32_vfld_M);
1041 arch_set_irn_register(rproj, op2);
1043 /* reroute all former users of the store memory to the load memory */
1044 edges_reroute_except(mem, mproj, vfld);
1046 sched_add_after(n, vfld);
1048 /* rewire all users, scheduled after the store, to the loaded value */
1049 collect_and_rewire_users(n, val, rproj);
1054 /* we can only store the tos to memory */
1056 x87_create_fxch(state, n, op2_idx);
1058 /* mode size 64 or smaller -> use normal fst */
1059 x87_patch_insn(n, op);
1062 /* we can only store the tos to memory */
1064 x87_create_fxch(state, n, op2_idx);
1067 x87_patch_insn(n, op_p);
1070 ia32_x87_attr_t *const attr = get_ia32_x87_attr(n);
1071 attr->x87[1] = get_st_reg(0);
1072 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(attr->x87[1])));
1077 #define GEN_BINOP(op) \
1078 static int sim_##op(x87_state *state, ir_node *n) { \
1079 return sim_binop(state, n, op_ia32_##op, op_ia32_##op##p); \
1082 #define GEN_LOAD(op) \
1083 static int sim_##op(x87_state *state, ir_node *n) { \
1084 return sim_load(state, n, op_ia32_##op, pn_ia32_v##op##_res); \
1087 #define GEN_UNOP(op) \
1088 static int sim_##op(x87_state *state, ir_node *n) { \
1089 return sim_unop(state, n, op_ia32_##op); \
1092 #define GEN_STORE(op) \
1093 static int sim_##op(x87_state *state, ir_node *n) { \
1094 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1114 static int sim_fprem(x87_state *const state, ir_node *const n)
1118 panic("TODO implement");
1119 return NO_NODE_ADDED;
1123 * Simulate a virtual fisttp.
1125 * @param state the x87 state
1126 * @param n the node that should be simulated (and patched)
1128 * @return NO_NODE_ADDED
1130 static int sim_fisttp(x87_state *state, ir_node *n)
1132 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1133 const arch_register_t *op2 = x87_get_irn_register(val);
1134 ia32_x87_attr_t *attr;
1135 int op2_reg_idx, op2_idx;
1137 op2_reg_idx = arch_register_get_index(op2);
1138 op2_idx = x87_on_stack(state, op2_reg_idx);
1139 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1140 assert(op2_idx >= 0);
1142 /* Note: although the value is still live here, it is destroyed because
1143 of the pop. The register allocator is aware of that and introduced a copy
1144 if the value must be alive. */
1146 /* we can only store the tos to memory */
1148 x87_create_fxch(state, n, op2_idx);
1151 x87_patch_insn(n, op_ia32_fisttp);
1153 attr = get_ia32_x87_attr(n);
1154 attr->x87[1] = op2 = get_st_reg(0);
1155 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1157 return NO_NODE_ADDED;
1161 * Simulate a virtual FtstFnstsw.
1163 * @param state the x87 state
1164 * @param n the node that should be simulated (and patched)
1166 * @return NO_NODE_ADDED
1168 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1170 x87_simulator *sim = state->sim;
1171 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1172 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1173 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1174 int reg_index_1 = arch_register_get_index(reg1);
1175 int op1_idx = x87_on_stack(state, reg_index_1);
1176 unsigned live = vfp_live_args_after(sim, n, 0);
1178 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, arch_register_get_name(reg1)));
1179 DEBUG_ONLY(vfp_dump_live(live);)
1180 DB((dbg, LEVEL_1, "Stack before: "));
1181 DEBUG_ONLY(x87_dump_stack(state);)
1182 assert(op1_idx >= 0);
1185 /* bring the value to tos */
1186 x87_create_fxch(state, n, op1_idx);
1190 /* patch the operation */
1191 x87_patch_insn(n, op_ia32_FtstFnstsw);
1192 reg1 = get_st_reg(op1_idx);
1193 attr->x87[0] = reg1;
1194 attr->x87[1] = NULL;
1195 attr->x87[2] = NULL;
1197 if (!is_vfp_live(reg_index_1, live))
1198 x87_create_fpop(state, sched_next(n), 1);
1200 return NO_NODE_ADDED;
1206 * @param state the x87 state
1207 * @param n the node that should be simulated (and patched)
1209 * @return NO_NODE_ADDED
1211 static int sim_Fucom(x87_state *state, ir_node *n)
1215 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1217 x87_simulator *sim = state->sim;
1218 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1219 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1220 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1221 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1222 int reg_index_1 = arch_register_get_index(op1);
1223 int reg_index_2 = arch_register_get_index(op2);
1224 unsigned live = vfp_live_args_after(sim, n, 0);
1225 bool permuted = attr->attr.data.ins_permuted;
1229 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1230 arch_register_get_name(op1), arch_register_get_name(op2)));
1231 DEBUG_ONLY(vfp_dump_live(live);)
1232 DB((dbg, LEVEL_1, "Stack before: "));
1233 DEBUG_ONLY(x87_dump_stack(state);)
1235 op1_idx = x87_on_stack(state, reg_index_1);
1236 assert(op1_idx >= 0);
1238 /* BEWARE: check for comp a,a cases, they might happen */
1239 if (reg_index_2 != REG_VFP_VFP_NOREG) {
1240 /* second operand is a vfp register */
1241 op2_idx = x87_on_stack(state, reg_index_2);
1242 assert(op2_idx >= 0);
1244 if (is_vfp_live(reg_index_2, live)) {
1245 /* second operand is live */
1247 if (is_vfp_live(reg_index_1, live)) {
1248 /* both operands are live */
1251 /* res = tos X op */
1252 } else if (op2_idx == 0) {
1253 /* res = op X tos */
1254 permuted = !permuted;
1257 /* bring the first one to tos */
1258 x87_create_fxch(state, n, op1_idx);
1259 if (op1_idx == op2_idx) {
1261 } else if (op2_idx == 0) {
1265 /* res = tos X op */
1268 /* second live, first operand is dead here, bring it to tos.
1269 This means further, op1_idx != op2_idx. */
1270 assert(op1_idx != op2_idx);
1272 x87_create_fxch(state, n, op1_idx);
1277 /* res = tos X op, pop */
1281 /* second operand is dead */
1282 if (is_vfp_live(reg_index_1, live)) {
1283 /* first operand is live: bring second to tos.
1284 This means further, op1_idx != op2_idx. */
1285 assert(op1_idx != op2_idx);
1287 x87_create_fxch(state, n, op2_idx);
1292 /* res = op X tos, pop */
1294 permuted = !permuted;
1297 /* both operands are dead here, check first for identity. */
1298 if (op1_idx == op2_idx) {
1299 /* identically, one pop needed */
1301 x87_create_fxch(state, n, op1_idx);
1305 /* res = tos X op, pop */
1308 /* different, move them to st and st(1) and pop both.
1309 The tricky part is to get one into st(1).*/
1310 else if (op2_idx == 1) {
1311 /* good, second operand is already in the right place, move the first */
1313 /* bring the first on top */
1314 x87_create_fxch(state, n, op1_idx);
1315 assert(op2_idx != 0);
1318 /* res = tos X op, pop, pop */
1320 } else if (op1_idx == 1) {
1321 /* good, first operand is already in the right place, move the second */
1323 /* bring the first on top */
1324 x87_create_fxch(state, n, op2_idx);
1325 assert(op1_idx != 0);
1328 /* res = op X tos, pop, pop */
1329 permuted = !permuted;
1333 /* if one is already the TOS, we need two fxch */
1335 /* first one is TOS, move to st(1) */
1336 x87_create_fxch(state, n, 1);
1337 assert(op2_idx != 1);
1339 x87_create_fxch(state, n, op2_idx);
1341 /* res = op X tos, pop, pop */
1343 permuted = !permuted;
1345 } else if (op2_idx == 0) {
1346 /* second one is TOS, move to st(1) */
1347 x87_create_fxch(state, n, 1);
1348 assert(op1_idx != 1);
1350 x87_create_fxch(state, n, op1_idx);
1352 /* res = tos X op, pop, pop */
1355 /* none of them is either TOS or st(1), 3 fxch needed */
1356 x87_create_fxch(state, n, op2_idx);
1357 assert(op1_idx != 0);
1358 x87_create_fxch(state, n, 1);
1360 x87_create_fxch(state, n, op1_idx);
1362 /* res = tos X op, pop, pop */
1369 /* second operand is an address mode */
1370 if (is_vfp_live(reg_index_1, live)) {
1371 /* first operand is live: bring it to TOS */
1373 x87_create_fxch(state, n, op1_idx);
1377 /* first operand is dead: bring it to tos */
1379 x87_create_fxch(state, n, op1_idx);
1386 /* patch the operation */
1387 if (is_ia32_vFucomFnstsw(n)) {
1391 case 0: dst = op_ia32_FucomFnstsw; break;
1392 case 1: dst = op_ia32_FucompFnstsw; break;
1393 case 2: dst = op_ia32_FucomppFnstsw; break;
1394 default: panic("invalid popcount");
1397 for (i = 0; i < pops; ++i) {
1400 } else if (is_ia32_vFucomi(n)) {
1402 case 0: dst = op_ia32_Fucomi; break;
1403 case 1: dst = op_ia32_Fucompi; x87_pop(state); break;
1405 dst = op_ia32_Fucompi;
1407 x87_create_fpop(state, sched_next(n), 1);
1409 default: panic("invalid popcount");
1412 panic("invalid operation %+F", n);
1415 x87_patch_insn(n, dst);
1422 op1 = get_st_reg(op1_idx);
1425 op2 = get_st_reg(op2_idx);
1428 attr->x87[2] = NULL;
1429 attr->attr.data.ins_permuted = permuted;
1432 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1433 arch_register_get_name(op1), arch_register_get_name(op2)));
1435 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1436 arch_register_get_name(op1)));
1439 return NO_NODE_ADDED;
1445 * @param state the x87 state
1446 * @param n the node that should be simulated (and patched)
1448 * @return NO_NODE_ADDED
1450 static int sim_Keep(x87_state *state, ir_node *node)
1453 const arch_register_t *op_reg;
1459 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1461 arity = get_irn_arity(node);
1462 for (i = 0; i < arity; ++i) {
1463 op = get_irn_n(node, i);
1464 op_reg = arch_get_irn_register(op);
1465 if (arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1468 reg_id = arch_register_get_index(op_reg);
1469 live = vfp_live_args_after(state->sim, node, 0);
1471 op_stack_idx = x87_on_stack(state, reg_id);
1472 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live))
1473 x87_create_fpop(state, sched_next(node), 1);
1476 DB((dbg, LEVEL_1, "Stack after: "));
1477 DEBUG_ONLY(x87_dump_stack(state);)
1479 return NO_NODE_ADDED;
1483 * Keep the given node alive by adding a be_Keep.
1485 * @param node the node to kept alive
1487 static void keep_float_node_alive(ir_node *node)
1489 ir_node *block = get_nodes_block(node);
1490 ir_node *keep = be_new_Keep(block, 1, &node);
1491 sched_add_after(node, keep);
1495 * Create a copy of a node. Recreate the node if it's a constant.
1497 * @param state the x87 state
1498 * @param n the node to be copied
1500 * @return the copy of n
1502 static ir_node *create_Copy(x87_state *state, ir_node *n)
1504 dbg_info *n_dbg = get_irn_dbg_info(n);
1505 ir_mode *mode = get_irn_mode(n);
1506 ir_node *block = get_nodes_block(n);
1507 ir_node *pred = get_irn_n(n, 0);
1508 ir_node *(*cnstr)(dbg_info *, ir_node *, ir_mode *) = NULL;
1510 const arch_register_t *out;
1511 const arch_register_t *op1;
1512 ia32_x87_attr_t *attr;
1514 /* Do not copy constants, recreate them. */
1515 switch (get_ia32_irn_opcode(pred)) {
1517 cnstr = new_bd_ia32_fldz;
1520 cnstr = new_bd_ia32_fld1;
1522 case iro_ia32_fldpi:
1523 cnstr = new_bd_ia32_fldpi;
1525 case iro_ia32_fldl2e:
1526 cnstr = new_bd_ia32_fldl2e;
1528 case iro_ia32_fldl2t:
1529 cnstr = new_bd_ia32_fldl2t;
1531 case iro_ia32_fldlg2:
1532 cnstr = new_bd_ia32_fldlg2;
1534 case iro_ia32_fldln2:
1535 cnstr = new_bd_ia32_fldln2;
1541 out = x87_get_irn_register(n);
1542 op1 = x87_get_irn_register(pred);
1544 if (cnstr != NULL) {
1545 /* copy a constant */
1546 res = (*cnstr)(n_dbg, block, mode);
1548 x87_push(state, arch_register_get_index(out), res);
1550 attr = get_ia32_x87_attr(res);
1551 attr->x87[2] = get_st_reg(0);
1553 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1555 res = new_bd_ia32_fpushCopy(n_dbg, block, pred, mode);
1557 x87_push(state, arch_register_get_index(out), res);
1559 attr = get_ia32_x87_attr(res);
1560 attr->x87[0] = get_st_reg(op1_idx);
1561 attr->x87[2] = get_st_reg(0);
1563 arch_set_irn_register(res, out);
1569 * Simulate a be_Copy.
1571 * @param state the x87 state
1572 * @param n the node that should be simulated (and patched)
1574 * @return NO_NODE_ADDED
1576 static int sim_Copy(x87_state *state, ir_node *n)
1578 arch_register_class_t const *const cls = arch_get_irn_reg_class(n);
1579 if (cls != &ia32_reg_classes[CLASS_ia32_vfp])
1580 return NO_NODE_ADDED;
1582 ir_node *const pred = be_get_Copy_op(n);
1583 arch_register_t const *const op1 = x87_get_irn_register(pred);
1584 arch_register_t const *const out = x87_get_irn_register(n);
1585 unsigned const live = vfp_live_args_after(state->sim, n, REGMASK(out));
1587 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1588 arch_register_get_name(op1), arch_register_get_name(out)));
1589 DEBUG_ONLY(vfp_dump_live(live);)
1591 if (is_vfp_live(arch_register_get_index(op1), live)) {
1592 /* Operand is still live, a real copy. We need here an fpush that can
1593 hold a a register, so use the fpushCopy or recreate constants */
1594 ir_node *const node = create_Copy(state, n);
1596 /* We have to make sure the old value doesn't go dead (which can happen
1597 * when we recreate constants). As the simulator expected that value in
1598 * the pred blocks. This is unfortunate as removing it would save us 1
1599 * instruction, but we would have to rerun all the simulation to get
1602 ir_node *const next = sched_next(n);
1605 sched_add_before(next, node);
1607 if (get_irn_n_edges(pred) == 0) {
1608 keep_float_node_alive(pred);
1611 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1613 /* Just a virtual copy. */
1614 int const op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1615 x87_set_st(state, arch_register_get_index(out), n, op1_idx);
1617 return NO_NODE_ADDED;
1621 * Returns the vf0 result Proj of a Call.
1623 * @para call the Call node
1625 static ir_node *get_call_result_proj(ir_node *call)
1627 /* search the result proj */
1628 foreach_out_edge(call, edge) {
1629 ir_node *proj = get_edge_src_irn(edge);
1630 long pn = get_Proj_proj(proj);
1632 if (pn == pn_ia32_Call_vf0)
1636 panic("result Proj missing");
1639 static int sim_Asm(x87_state *const state, ir_node *const n)
1643 for (size_t i = get_irn_arity(n); i-- != 0;) {
1644 arch_register_req_t const *const req = arch_get_irn_register_req_in(n, i);
1645 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1646 panic("cannot handle %+F with x87 constraints", n);
1649 for (size_t i = arch_get_irn_n_outs(n); i-- != 0;) {
1650 arch_register_req_t const *const req = arch_get_irn_register_req_out(n, i);
1651 if (req->cls == &ia32_reg_classes[CLASS_ia32_vfp])
1652 panic("cannot handle %+F with x87 constraints", n);
1655 return NO_NODE_ADDED;
1659 * Simulate a ia32_Call.
1661 * @param state the x87 state
1662 * @param n the node that should be simulated (and patched)
1664 * @return NO_NODE_ADDED
1666 static int sim_Call(x87_state *state, ir_node *n)
1668 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1670 /* at the begin of a call the x87 state should be empty */
1671 assert(state->depth == 0 && "stack not empty before call");
1673 ir_type *const call_tp = get_ia32_call_attr_const(n)->call_tp;
1674 if (get_method_n_ress(call_tp) != 0) {
1675 /* If the called function returns a float, it is returned in st(0).
1676 * This even happens if the return value is NOT used.
1677 * Moreover, only one return result is supported. */
1678 ir_type *const res_type = get_method_res_type(call_tp, 0);
1679 ir_mode *const mode = get_type_mode(res_type);
1680 if (mode && mode_is_float(mode)) {
1681 ir_node *const resproj = get_call_result_proj(n);
1682 arch_register_t const *const reg = x87_get_irn_register(resproj);
1683 x87_push(state, arch_register_get_index(reg), resproj);
1686 DB((dbg, LEVEL_1, "Stack after: "));
1687 DEBUG_ONLY(x87_dump_stack(state);)
1689 return NO_NODE_ADDED;
1693 * Simulate a be_Return.
1695 * @param state the x87 state
1696 * @param n the node that should be simulated (and patched)
1698 * @return NO_NODE_ADDED
1700 static int sim_Return(x87_state *state, ir_node *n)
1702 #ifdef DEBUG_libfirm
1703 /* only floating point return values must reside on stack */
1704 int n_float_res = 0;
1705 int const n_res = be_Return_get_n_rets(n);
1706 for (int i = 0; i < n_res; ++i) {
1707 ir_node *const res = get_irn_n(n, n_be_Return_val + i);
1708 if (mode_is_float(get_irn_mode(res)))
1711 assert(x87_get_depth(state) == n_float_res);
1714 /* pop them virtually */
1716 return NO_NODE_ADDED;
1720 * Simulate a be_Perm.
1722 * @param state the x87 state
1723 * @param irn the node that should be simulated (and patched)
1725 * @return NO_NODE_ADDED
1727 static int sim_Perm(x87_state *state, ir_node *irn)
1730 ir_node *pred = get_irn_n(irn, 0);
1733 /* handle only floating point Perms */
1734 if (! mode_is_float(get_irn_mode(pred)))
1735 return NO_NODE_ADDED;
1737 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
1739 /* Perm is a pure virtual instruction on x87.
1740 All inputs must be on the FPU stack and are pairwise
1741 different from each other.
1742 So, all we need to do is to permutate the stack state. */
1743 n = get_irn_arity(irn);
1744 NEW_ARR_A(int, stack_pos, n);
1746 /* collect old stack positions */
1747 for (i = 0; i < n; ++i) {
1748 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
1749 int idx = x87_on_stack(state, arch_register_get_index(inreg));
1751 assert(idx >= 0 && "Perm argument not on x87 stack");
1755 /* now do the permutation */
1756 foreach_out_edge(irn, edge) {
1757 ir_node *proj = get_edge_src_irn(edge);
1758 const arch_register_t *out = x87_get_irn_register(proj);
1759 long num = get_Proj_proj(proj);
1761 assert(0 <= num && num < n && "More Proj's than Perm inputs");
1762 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
1764 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
1766 return NO_NODE_ADDED;
1770 * Kill any dead registers at block start by popping them from the stack.
1772 * @param sim the simulator handle
1773 * @param block the current block
1774 * @param state the x87 state at the begin of the block
1776 static void x87_kill_deads(x87_simulator *const sim, ir_node *const block, x87_state *const state)
1778 ir_node *first_insn = sched_first(block);
1779 ir_node *keep = NULL;
1780 unsigned live = vfp_live_args_after(sim, block, 0);
1782 int i, depth, num_pop;
1785 depth = x87_get_depth(state);
1786 for (i = depth - 1; i >= 0; --i) {
1787 int reg = x87_get_st_reg(state, i);
1789 if (! is_vfp_live(reg, live))
1790 kill_mask |= (1 << i);
1794 DB((dbg, LEVEL_1, "Killing deads:\n"));
1795 DEBUG_ONLY(vfp_dump_live(live);)
1796 DEBUG_ONLY(x87_dump_stack(state);)
1798 if (kill_mask != 0 && live == 0) {
1799 /* special case: kill all registers */
1800 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
1801 if (ia32_cg_config.use_femms) {
1802 /* use FEMMS on AMD processors to clear all */
1803 keep = new_bd_ia32_femms(NULL, block);
1805 /* use EMMS to clear all */
1806 keep = new_bd_ia32_emms(NULL, block);
1808 sched_add_before(first_insn, keep);
1814 /* now kill registers */
1816 /* we can only kill from TOS, so bring them up */
1817 if (! (kill_mask & 1)) {
1818 /* search from behind, because we can to a double-pop */
1819 for (i = depth - 1; i >= 0; --i) {
1820 if (kill_mask & (1 << i)) {
1821 kill_mask &= ~(1 << i);
1828 x87_set_st(state, -1, keep, i);
1829 x87_create_fxch(state, first_insn, i);
1832 if ((kill_mask & 3) == 3) {
1833 /* we can do a double-pop */
1837 /* only a single pop */
1842 kill_mask >>= num_pop;
1843 keep = x87_create_fpop(state, first_insn, num_pop);
1850 * Run a simulation and fix all virtual instructions for a block.
1852 * @param sim the simulator handle
1853 * @param block the current block
1855 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
1858 blk_state *bl_state = x87_get_bl_state(sim, block);
1859 x87_state *state = bl_state->begin;
1860 ir_node *start_block;
1862 assert(state != NULL);
1863 /* already processed? */
1864 if (bl_state->end != NULL)
1867 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
1868 DB((dbg, LEVEL_2, "State at Block begin:\n "));
1869 DEBUG_ONLY(x87_dump_stack(state);)
1871 /* create a new state, will be changed */
1872 state = x87_clone_state(sim, state);
1873 /* at block begin, kill all dead registers */
1874 x87_kill_deads(sim, block, state);
1876 /* beware, n might change */
1877 for (n = sched_first(block); !sched_is_end(n); n = next) {
1880 ir_op *op = get_irn_op(n);
1883 * get the next node to be simulated here.
1884 * n might be completely removed from the schedule-
1886 next = sched_next(n);
1887 if (op->ops.generic != NULL) {
1888 func = (sim_func)op->ops.generic;
1891 node_inserted = (*func)(state, n);
1894 * sim_func might have added an additional node after n,
1895 * so update next node
1896 * beware: n must not be changed by sim_func
1897 * (i.e. removed from schedule) in this case
1899 if (node_inserted != NO_NODE_ADDED)
1900 next = sched_next(n);
1904 start_block = get_irg_start_block(get_irn_irg(block));
1906 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state);)
1908 /* check if the state must be shuffled */
1909 foreach_block_succ(block, edge) {
1910 ir_node *succ = get_edge_src_irn(edge);
1911 blk_state *succ_state;
1913 if (succ == start_block)
1916 succ_state = x87_get_bl_state(sim, succ);
1918 if (succ_state->begin == NULL) {
1919 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
1920 DEBUG_ONLY(x87_dump_stack(state);)
1921 succ_state->begin = state;
1923 waitq_put(sim->worklist, succ);
1925 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
1926 /* There is already a begin state for the successor, bad.
1927 Do the necessary permutations.
1928 Note that critical edges are removed, so this is always possible:
1929 If the successor has more than one possible input, then it must
1932 x87_shuffle(block, state, succ_state->begin);
1935 bl_state->end = state;
1939 * Register a simulator function.
1941 * @param op the opcode to simulate
1942 * @param func the simulator function for the opcode
1944 static void register_sim(ir_op *op, sim_func func)
1946 assert(op->ops.generic == NULL);
1947 op->ops.generic = (op_func) func;
1951 * Create a new x87 simulator.
1953 * @param sim a simulator handle, will be initialized
1954 * @param irg the current graph
1956 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
1958 obstack_init(&sim->obst);
1959 sim->blk_states = pmap_create();
1960 sim->n_idx = get_irg_last_idx(irg);
1961 sim->live = OALLOCN(&sim->obst, vfp_liveness, sim->n_idx);
1963 DB((dbg, LEVEL_1, "--------------------------------\n"
1964 "x87 Simulator started for %+F\n", irg));
1966 /* set the generic function pointer of instruction we must simulate */
1967 ir_clear_opcodes_generic_func();
1969 register_sim(op_ia32_Asm, sim_Asm);
1970 register_sim(op_ia32_Call, sim_Call);
1971 register_sim(op_ia32_vfld, sim_fld);
1972 register_sim(op_ia32_vfild, sim_fild);
1973 register_sim(op_ia32_vfld1, sim_fld1);
1974 register_sim(op_ia32_vfldz, sim_fldz);
1975 register_sim(op_ia32_vfadd, sim_fadd);
1976 register_sim(op_ia32_vfsub, sim_fsub);
1977 register_sim(op_ia32_vfmul, sim_fmul);
1978 register_sim(op_ia32_vfdiv, sim_fdiv);
1979 register_sim(op_ia32_vfprem, sim_fprem);
1980 register_sim(op_ia32_vfabs, sim_fabs);
1981 register_sim(op_ia32_vfchs, sim_fchs);
1982 register_sim(op_ia32_vfist, sim_fist);
1983 register_sim(op_ia32_vfisttp, sim_fisttp);
1984 register_sim(op_ia32_vfst, sim_fst);
1985 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
1986 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
1987 register_sim(op_ia32_vFucomi, sim_Fucom);
1988 register_sim(op_be_Copy, sim_Copy);
1989 register_sim(op_be_Return, sim_Return);
1990 register_sim(op_be_Perm, sim_Perm);
1991 register_sim(op_be_Keep, sim_Keep);
1995 * Destroy a x87 simulator.
1997 * @param sim the simulator handle
1999 static void x87_destroy_simulator(x87_simulator *sim)
2001 pmap_destroy(sim->blk_states);
2002 obstack_free(&sim->obst, NULL);
2003 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2007 * Pre-block walker: calculate the liveness information for the block
2008 * and store it into the sim->live cache.
2010 static void update_liveness_walker(ir_node *block, void *data)
2012 x87_simulator *sim = (x87_simulator*)data;
2013 update_liveness(sim, block);
2017 * Run a simulation and fix all virtual instructions for a graph.
2018 * Replaces all virtual floating point instructions and registers
2021 void ia32_x87_simulate_graph(ir_graph *irg)
2023 /* TODO improve code quality (less executed fxch) by using execfreqs */
2025 ir_node *block, *start_block;
2026 blk_state *bl_state;
2029 /* create the simulator */
2030 x87_init_simulator(&sim, irg);
2032 start_block = get_irg_start_block(irg);
2033 bl_state = x87_get_bl_state(&sim, start_block);
2035 /* start with the empty state */
2037 bl_state->begin = ∅
2039 sim.worklist = new_waitq();
2040 waitq_put(sim.worklist, start_block);
2042 be_assure_live_sets(irg);
2043 sim.lv = be_get_irg_liveness(irg);
2045 /* Calculate the liveness for all nodes. We must precalculate this info,
2046 * because the simulator adds new nodes (possible before Phi nodes) which
2047 * would let a lazy calculation fail.
2048 * On the other hand we reduce the computation amount due to
2049 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2051 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2055 block = (ir_node*)waitq_get(sim.worklist);
2056 x87_simulate_block(&sim, block);
2057 } while (! waitq_empty(sim.worklist));
2060 del_waitq(sim.worklist);
2061 x87_destroy_simulator(&sim);
2064 /* Initializes the x87 simulator. */
2065 void ia32_init_x87(void)
2067 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");