2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the x87 support and virtual to stack
23 * register translation for the ia32 backend.
24 * @author Michael Beck
34 #include "iredges_t.h"
46 #include "../belive_t.h"
47 #include "../besched_t.h"
48 #include "../benode_t.h"
49 #include "bearch_ia32_t.h"
50 #include "ia32_new_nodes.h"
51 #include "gen_ia32_new_nodes.h"
52 #include "gen_ia32_regalloc_if.h"
54 #include "ia32_architecture.h"
61 #define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
63 /** the debug handle */
64 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
66 /* Forward declaration. */
67 typedef struct _x87_simulator x87_simulator;
70 * An exchange template.
71 * Note that our virtual functions have the same inputs
72 * and attributes as the real ones, so we can simple exchange
74 * Further, x87 supports inverse instructions, so we can handle them.
76 typedef struct _exchange_tmpl {
77 ir_op *normal_op; /**< the normal one */
78 ir_op *reverse_op; /**< the reverse one if exists */
79 ir_op *normal_pop_op; /**< the normal one with tos pop */
80 ir_op *reverse_pop_op; /**< the reverse one with tos pop */
84 * An entry on the simulated x87 stack.
86 typedef struct _st_entry {
87 int reg_idx; /**< the virtual register index of this stack value */
88 ir_node *node; /**< the node that produced this value */
94 typedef struct _x87_state {
95 st_entry st[N_x87_REGS]; /**< the register stack */
96 int depth; /**< the current stack depth */
97 int tos; /**< position of the tos */
98 x87_simulator *sim; /**< The simulator. */
101 /** An empty state, used for blocks without fp instructions. */
102 static x87_state _empty = { { {0, NULL}, }, 0, 0, NULL };
103 static x87_state *empty = (x87_state *)&_empty;
106 * Return values of the instruction simulator functions.
109 NO_NODE_ADDED = 0, /**< No node that needs simulation was added. */
110 NODE_ADDED = 1 /**< A node that must be simulated was added by the simulator
111 in the schedule AFTER the current node. */
115 * The type of an instruction simulator function.
117 * @param state the x87 state
118 * @param n the node to be simulated
120 * @return NODE_ADDED if a node was added AFTER n in schedule that MUST be
122 * NO_NODE_ADDED otherwise
124 typedef int (*sim_func)(x87_state *state, ir_node *n);
127 * A block state: Every block has a x87 state at the beginning and at the end.
129 typedef struct _blk_state {
130 x87_state *begin; /**< state at the begin or NULL if not assigned */
131 x87_state *end; /**< state at the end or NULL if not assigned */
134 #define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
136 /** liveness bitset for vfp registers. */
137 typedef unsigned char vfp_liveness;
142 struct _x87_simulator {
143 struct obstack obst; /**< An obstack for fast allocating. */
144 pmap *blk_states; /**< Map blocks to states. */
145 be_lv_t *lv; /**< intrablock liveness. */
146 vfp_liveness *live; /**< Liveness information. */
147 unsigned n_idx; /**< The cached get_irg_last_idx() result. */
148 waitq *worklist; /**< Worklist of blocks that must be processed. */
149 ia32_isa_t *isa; /**< the ISA object */
153 * Returns the current stack depth.
155 * @param state the x87 state
157 * @return the x87 stack depth
159 static int x87_get_depth(const x87_state *state)
162 } /* x87_get_depth */
165 * Return the virtual register index at st(pos).
167 * @param state the x87 state
168 * @param pos a stack position
170 * @return the vfp register index that produced the value at st(pos)
172 static int x87_get_st_reg(const x87_state *state, int pos)
174 assert(pos < state->depth);
175 return state->st[MASK_TOS(state->tos + pos)].reg_idx;
176 } /* x87_get_st_reg */
180 * Return the node at st(pos).
182 * @param state the x87 state
183 * @param pos a stack position
185 * @return the IR node that produced the value at st(pos)
187 static ir_node *x87_get_st_node(const x87_state *state, int pos)
189 assert(pos < state->depth);
190 return state->st[MASK_TOS(state->tos + pos)].node;
191 } /* x87_get_st_node */
194 * Dump the stack for debugging.
196 * @param state the x87 state
198 static void x87_dump_stack(const x87_state *state)
202 for (i = state->depth - 1; i >= 0; --i) {
203 DB((dbg, LEVEL_2, "vf%d(%+F) ", x87_get_st_reg(state, i),
204 x87_get_st_node(state, i)));
206 DB((dbg, LEVEL_2, "<-- TOS\n"));
207 } /* x87_dump_stack */
208 #endif /* DEBUG_libfirm */
211 * Set a virtual register to st(pos).
213 * @param state the x87 state
214 * @param reg_idx the vfp register index that should be set
215 * @param node the IR node that produces the value of the vfp register
216 * @param pos the stack position where the new value should be entered
218 static void x87_set_st(x87_state *state, int reg_idx, ir_node *node, int pos)
220 assert(0 < state->depth);
221 state->st[MASK_TOS(state->tos + pos)].reg_idx = reg_idx;
222 state->st[MASK_TOS(state->tos + pos)].node = node;
224 DB((dbg, LEVEL_2, "After SET_REG: "));
225 DEBUG_ONLY(x87_dump_stack(state));
229 * Set the tos virtual register.
231 * @param state the x87 state
232 * @param reg_idx the vfp register index that should be set
233 * @param node the IR node that produces the value of the vfp register
235 static void x87_set_tos(x87_state *state, int reg_idx, ir_node *node)
237 x87_set_st(state, reg_idx, node, 0);
241 * Swap st(0) with st(pos).
243 * @param state the x87 state
244 * @param pos the stack position to change the tos with
246 static void x87_fxch(x87_state *state, int pos)
249 assert(pos < state->depth);
251 entry = state->st[MASK_TOS(state->tos + pos)];
252 state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
253 state->st[MASK_TOS(state->tos)] = entry;
255 DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
259 * Convert a virtual register to the stack index.
261 * @param state the x87 state
262 * @param reg_idx the register vfp index
264 * @return the stack position where the register is stacked
265 * or -1 if the virtual register was not found
267 static int x87_on_stack(const x87_state *state, int reg_idx)
269 int i, tos = state->tos;
271 for (i = 0; i < state->depth; ++i)
272 if (state->st[MASK_TOS(tos + i)].reg_idx == reg_idx)
278 * Push a virtual Register onto the stack, double pushed allowed.
280 * @param state the x87 state
281 * @param reg_idx the register vfp index
282 * @param node the node that produces the value of the vfp register
284 static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node)
286 assert(state->depth < N_x87_REGS && "stack overrun");
289 state->tos = MASK_TOS(state->tos - 1);
290 state->st[state->tos].reg_idx = reg_idx;
291 state->st[state->tos].node = node;
293 DB((dbg, LEVEL_2, "After PUSH: ")); DEBUG_ONLY(x87_dump_stack(state));
297 * Push a virtual Register onto the stack, double pushes are NOT allowed.
299 * @param state the x87 state
300 * @param reg_idx the register vfp index
301 * @param node the node that produces the value of the vfp register
302 * @param dbl_push if != 0 double pushes are allowed
304 static void x87_push(x87_state *state, int reg_idx, ir_node *node)
306 assert(x87_on_stack(state, reg_idx) == -1 && "double push");
308 x87_push_dbl(state, reg_idx, node);
312 * Pop a virtual Register from the stack.
314 * @param state the x87 state
316 static void x87_pop(x87_state *state)
318 assert(state->depth > 0 && "stack underrun");
321 state->tos = MASK_TOS(state->tos + 1);
323 DB((dbg, LEVEL_2, "After POP: ")); DEBUG_ONLY(x87_dump_stack(state));
327 * Empty the fpu stack
329 * @param state the x87 state
331 static void x87_emms(x87_state *state)
338 * Returns the block state of a block.
340 * @param sim the x87 simulator handle
341 * @param block the current block
343 * @return the block state
345 static blk_state *x87_get_bl_state(x87_simulator *sim, ir_node *block)
347 pmap_entry *entry = pmap_find(sim->blk_states, block);
350 blk_state *bl_state = obstack_alloc(&sim->obst, sizeof(*bl_state));
351 bl_state->begin = NULL;
352 bl_state->end = NULL;
354 pmap_insert(sim->blk_states, block, bl_state);
358 return PTR_TO_BLKSTATE(entry->value);
359 } /* x87_get_bl_state */
362 * Creates a new x87 state.
364 * @param sim the x87 simulator handle
366 * @return a new x87 state
368 static x87_state *x87_alloc_state(x87_simulator *sim)
370 x87_state *res = obstack_alloc(&sim->obst, sizeof(*res));
374 } /* x87_alloc_state */
379 * @param sim the x87 simulator handle
380 * @param src the x87 state that will be cloned
382 * @return a cloned copy of the src state
384 static x87_state *x87_clone_state(x87_simulator *sim, const x87_state *src)
386 x87_state *res = x87_alloc_state(sim);
390 } /* x87_clone_state */
393 * Patch a virtual instruction into a x87 one and return
394 * the node representing the result value.
396 * @param n the IR node to patch
397 * @param op the x87 opcode to patch in
399 static ir_node *x87_patch_insn(ir_node *n, ir_op *op)
401 ir_mode *mode = get_irn_mode(n);
406 if (mode == mode_T) {
407 /* patch all Proj's */
408 const ir_edge_t *edge;
410 foreach_out_edge(n, edge) {
411 ir_node *proj = get_edge_src_irn(edge);
413 mode = get_irn_mode(proj);
414 if (mode_is_float(mode)) {
416 set_irn_mode(proj, ia32_reg_classes[CLASS_ia32_st].mode);
420 } else if (mode_is_float(mode))
421 set_irn_mode(n, ia32_reg_classes[CLASS_ia32_st].mode);
423 } /* x87_patch_insn */
426 * Returns the first Proj of a mode_T node having a given mode.
428 * @param n the mode_T node
429 * @param m the desired mode of the Proj
430 * @return The first Proj of mode @p m found or NULL.
432 static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
434 const ir_edge_t *edge;
436 assert(get_irn_mode(n) == mode_T && "Need mode_T node");
438 foreach_out_edge(n, edge) {
439 ir_node *proj = get_edge_src_irn(edge);
440 if (get_irn_mode(proj) == m)
445 } /* get_irn_Proj_for_mode */
448 * Wrap the arch_* function here so we can check for errors.
450 static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
452 const arch_register_t *res = arch_get_irn_register(irn);
454 assert(res->reg_class->regs == ia32_vfp_regs);
456 } /* x87_get_irn_register */
458 static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
461 const arch_register_t *res = arch_irn_get_register(irn, pos);
463 assert(res->reg_class->regs == ia32_vfp_regs);
465 } /* x87_irn_get_register */
467 /* -------------- x87 perm --------------- */
470 * Creates a fxch for shuffle.
472 * @param state the x87 state
473 * @param pos parameter for fxch
474 * @param block the block were fxch is inserted
476 * Creates a new fxch node and reroute the user of the old node
479 * @return the fxch node
481 static ir_node *x87_fxch_shuffle(x87_state *state, int pos, ir_node *block)
484 ia32_x87_attr_t *attr;
486 fxch = new_bd_ia32_fxch(NULL, block);
487 attr = get_ia32_x87_attr(fxch);
488 attr->x87[0] = &ia32_st_regs[pos];
489 attr->x87[2] = &ia32_st_regs[0];
493 x87_fxch(state, pos);
495 } /* x87_fxch_shuffle */
498 * Calculate the necessary permutations to reach dst_state.
500 * These permutations are done with fxch instructions and placed
501 * at the end of the block.
503 * Note that critical edges are removed here, so we need only
504 * a shuffle if the current block has only one successor.
506 * @param sim the simulator handle
507 * @param block the current block
508 * @param state the current x87 stack state, might be modified
509 * @param dst_block the destination block
510 * @param dst_state destination state
514 static x87_state *x87_shuffle(x87_simulator *sim, ir_node *block,
515 x87_state *state, ir_node *dst_block,
516 const x87_state *dst_state)
518 int i, n_cycles, k, ri;
519 unsigned cycles[4], all_mask;
520 char cycle_idx[4][8];
521 ir_node *fxch, *before, *after;
525 assert(state->depth == dst_state->depth);
527 /* Some mathematics here:
528 If we have a cycle of length n that includes the tos,
529 we need n-1 exchange operations.
530 We can always add the tos and restore it, so we need
531 n+1 exchange operations for a cycle not containing the tos.
532 So, the maximum of needed operations is for a cycle of 7
533 not including the tos == 8.
534 This is the same number of ops we would need for using stores,
535 so exchange is cheaper (we save the loads).
536 On the other hand, we might need an additional exchange
537 in the next block to bring one operand on top, so the
538 number of ops in the first case is identical.
539 Further, no more than 4 cycles can exists (4 x 2).
541 all_mask = (1 << (state->depth)) - 1;
543 for (n_cycles = 0; all_mask; ++n_cycles) {
544 int src_idx, dst_idx;
546 /* find the first free slot */
547 for (i = 0; i < state->depth; ++i) {
548 if (all_mask & (1 << i)) {
549 all_mask &= ~(1 << i);
551 /* check if there are differences here */
552 if (x87_get_st_reg(state, i) != x87_get_st_reg(dst_state, i))
558 /* no more cycles found */
563 cycles[n_cycles] = (1 << i);
564 cycle_idx[n_cycles][k++] = i;
565 for (src_idx = i; ; src_idx = dst_idx) {
566 dst_idx = x87_on_stack(dst_state, x87_get_st_reg(state, src_idx));
568 if ((all_mask & (1 << dst_idx)) == 0)
571 cycle_idx[n_cycles][k++] = dst_idx;
572 cycles[n_cycles] |= (1 << dst_idx);
573 all_mask &= ~(1 << dst_idx);
575 cycle_idx[n_cycles][k] = -1;
579 /* no permutation needed */
583 /* Hmm: permutation needed */
584 DB((dbg, LEVEL_2, "\n%+F needs permutation: from\n", block));
585 DEBUG_ONLY(x87_dump_stack(state));
586 DB((dbg, LEVEL_2, " to\n"));
587 DEBUG_ONLY(x87_dump_stack(dst_state));
591 DB((dbg, LEVEL_2, "Need %d cycles\n", n_cycles));
592 for (ri = 0; ri < n_cycles; ++ri) {
593 DB((dbg, LEVEL_2, " Ring %d:\n ", ri));
594 for (k = 0; cycle_idx[ri][k] != -1; ++k)
595 DB((dbg, LEVEL_2, " st%d ->", cycle_idx[ri][k]));
596 DB((dbg, LEVEL_2, "\n"));
603 * Find the place node must be insert.
604 * We have only one successor block, so the last instruction should
607 before = sched_last(block);
608 assert(is_cfop(before));
610 /* now do the permutations */
611 for (ri = 0; ri < n_cycles; ++ri) {
612 if ((cycles[ri] & 1) == 0) {
613 /* this cycle does not include the tos */
614 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
616 sched_add_after(after, fxch);
618 sched_add_before(before, fxch);
621 for (k = 1; cycle_idx[ri][k] != -1; ++k) {
622 fxch = x87_fxch_shuffle(state, cycle_idx[ri][k], block);
624 sched_add_after(after, fxch);
626 sched_add_before(before, fxch);
629 if ((cycles[ri] & 1) == 0) {
630 /* this cycle does not include the tos */
631 fxch = x87_fxch_shuffle(state, cycle_idx[ri][0], block);
632 sched_add_after(after, fxch);
639 * Create a fxch node before another node.
641 * @param state the x87 state
642 * @param n the node after the fxch
643 * @param pos exchange st(pos) with st(0)
647 static ir_node *x87_create_fxch(x87_state *state, ir_node *n, int pos)
650 ia32_x87_attr_t *attr;
651 ir_node *block = get_nodes_block(n);
653 x87_fxch(state, pos);
655 fxch = new_bd_ia32_fxch(NULL, block);
656 attr = get_ia32_x87_attr(fxch);
657 attr->x87[0] = &ia32_st_regs[pos];
658 attr->x87[2] = &ia32_st_regs[0];
662 sched_add_before(n, fxch);
663 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fxch), attr->x87[0]->name, attr->x87[2]->name));
665 } /* x87_create_fxch */
668 * Create a fpush before node n.
670 * @param state the x87 state
671 * @param n the node after the fpush
672 * @param pos push st(pos) on stack
673 * @param op_idx replace input op_idx of n with the fpush result
675 static void x87_create_fpush(x87_state *state, ir_node *n, int pos, int op_idx)
677 ir_node *fpush, *pred = get_irn_n(n, op_idx);
678 ia32_x87_attr_t *attr;
679 const arch_register_t *out = x87_get_irn_register(pred);
681 x87_push_dbl(state, arch_register_get_index(out), pred);
683 fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
684 attr = get_ia32_x87_attr(fpush);
685 attr->x87[0] = &ia32_st_regs[pos];
686 attr->x87[2] = &ia32_st_regs[0];
689 sched_add_before(n, fpush);
691 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(fpush), attr->x87[0]->name, attr->x87[2]->name));
692 } /* x87_create_fpush */
695 * Create a fpop before node n.
697 * @param state the x87 state
698 * @param n the node after the fpop
699 * @param num pop 1 or 2 values
701 * @return the fpop node
703 static ir_node *x87_create_fpop(x87_state *state, ir_node *n, int num)
705 ir_node *fpop = NULL;
706 ia32_x87_attr_t *attr;
711 if (ia32_cg_config.use_ffreep)
712 fpop = new_bd_ia32_ffreep(NULL, get_nodes_block(n));
714 fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
715 attr = get_ia32_x87_attr(fpop);
716 attr->x87[0] = &ia32_st_regs[0];
717 attr->x87[1] = &ia32_st_regs[0];
718 attr->x87[2] = &ia32_st_regs[0];
721 sched_add_before(n, fpop);
722 DB((dbg, LEVEL_1, "<<< %s %s\n", get_irn_opname(fpop), attr->x87[0]->name));
727 } /* x87_create_fpop */
730 * Creates an fldz before node n
732 * @param state the x87 state
733 * @param n the node after the fldz
735 * @return the fldz node
737 static ir_node *x87_create_fldz(x87_state *state, ir_node *n, int regidx)
739 ir_node *block = get_nodes_block(n);
742 fldz = new_bd_ia32_fldz(NULL, block, ia32_reg_classes[CLASS_ia32_st].mode);
744 sched_add_before(n, fldz);
745 DB((dbg, LEVEL_1, "<<< %s\n", get_irn_opname(fldz)));
748 x87_push(state, regidx, fldz);
753 /* --------------------------------- liveness ------------------------------------------ */
756 * The liveness transfer function.
757 * Updates a live set over a single step from a given node to its predecessor.
758 * Everything defined at the node is removed from the set, the uses of the node get inserted.
760 * @param irn The node at which liveness should be computed.
761 * @param live The bitset of registers live before @p irn. This set gets modified by updating it to
762 * the registers live after irn.
764 * @return The live bitset.
766 static vfp_liveness vfp_liveness_transfer(ir_node *irn, vfp_liveness live)
769 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
771 if (get_irn_mode(irn) == mode_T) {
772 const ir_edge_t *edge;
774 foreach_out_edge(irn, edge) {
775 ir_node *proj = get_edge_src_irn(edge);
777 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
778 const arch_register_t *reg = x87_get_irn_register(proj);
779 live &= ~(1 << arch_register_get_index(reg));
784 if (arch_irn_consider_in_reg_alloc(cls, irn)) {
785 const arch_register_t *reg = x87_get_irn_register(irn);
786 live &= ~(1 << arch_register_get_index(reg));
789 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
790 ir_node *op = get_irn_n(irn, i);
792 if (mode_is_float(get_irn_mode(op)) &&
793 arch_irn_consider_in_reg_alloc(cls, op)) {
794 const arch_register_t *reg = x87_get_irn_register(op);
795 live |= 1 << arch_register_get_index(reg);
799 } /* vfp_liveness_transfer */
802 * Put all live virtual registers at the end of a block into a bitset.
804 * @param sim the simulator handle
805 * @param lv the liveness information
806 * @param bl the block
808 * @return The live bitset at the end of this block
810 static vfp_liveness vfp_liveness_end_of_block(x87_simulator *sim, const ir_node *block)
813 vfp_liveness live = 0;
814 const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
815 const be_lv_t *lv = sim->lv;
817 be_lv_foreach(lv, block, be_lv_state_end, i) {
818 const arch_register_t *reg;
819 const ir_node *node = be_lv_get_irn(lv, block, i);
820 if (!arch_irn_consider_in_reg_alloc(cls, node))
823 reg = x87_get_irn_register(node);
824 live |= 1 << arch_register_get_index(reg);
828 } /* vfp_liveness_end_of_block */
830 /** get the register mask from an arch_register */
831 #define REGMASK(reg) (1 << (arch_register_get_index(reg)))
834 * Return a bitset of argument registers which are live at the end of a node.
836 * @param sim the simulator handle
837 * @param pos the node
838 * @param kill kill mask for the output registers
840 * @return The live bitset.
842 static unsigned vfp_live_args_after(x87_simulator *sim, const ir_node *pos, unsigned kill)
844 unsigned idx = get_irn_idx(pos);
846 assert(idx < sim->n_idx);
847 return sim->live[idx] & ~kill;
848 } /* vfp_live_args_after */
851 * Calculate the liveness for a whole block and cache it.
853 * @param sim the simulator handle
854 * @param lv the liveness handle
855 * @param block the block
857 static void update_liveness(x87_simulator *sim, ir_node *block)
859 vfp_liveness live = vfp_liveness_end_of_block(sim, block);
863 /* now iterate through the block backward and cache the results */
864 sched_foreach_reverse(block, irn) {
865 /* stop at the first Phi: this produces the live-in */
869 idx = get_irn_idx(irn);
870 sim->live[idx] = live;
872 live = vfp_liveness_transfer(irn, live);
874 idx = get_irn_idx(block);
875 sim->live[idx] = live;
876 } /* update_liveness */
879 * Returns true if a register is live in a set.
881 * @param reg_idx the vfp register index
882 * @param live a live bitset
884 #define is_vfp_live(reg_idx, live) ((live) & (1 << (reg_idx)))
888 * Dump liveness info.
890 * @param live the live bitset
892 static void vfp_dump_live(vfp_liveness live)
896 DB((dbg, LEVEL_2, "Live after: "));
897 for (i = 0; i < 8; ++i) {
898 if (live & (1 << i)) {
899 DB((dbg, LEVEL_2, "vf%d ", i));
902 DB((dbg, LEVEL_2, "\n"));
903 } /* vfp_dump_live */
904 #endif /* DEBUG_libfirm */
906 /* --------------------------------- simulators ---------------------------------------- */
909 * Simulate a virtual binop.
911 * @param state the x87 state
912 * @param n the node that should be simulated (and patched)
913 * @param tmpl the template containing the 4 possible x87 opcodes
915 * @return NO_NODE_ADDED
917 static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl)
919 int op2_idx = 0, op1_idx;
920 int out_idx, do_pop = 0;
921 ia32_x87_attr_t *attr;
923 ir_node *patched_insn;
925 x87_simulator *sim = state->sim;
926 ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
927 ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
928 const arch_register_t *op1_reg = x87_get_irn_register(op1);
929 const arch_register_t *op2_reg = x87_get_irn_register(op2);
930 const arch_register_t *out = x87_irn_get_register(n, pn_ia32_res);
931 int reg_index_1 = arch_register_get_index(op1_reg);
932 int reg_index_2 = arch_register_get_index(op2_reg);
933 vfp_liveness live = vfp_live_args_after(sim, n, REGMASK(out));
937 DB((dbg, LEVEL_1, ">>> %+F %s, %s -> %s\n", n,
938 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
939 arch_register_get_name(out)));
940 DEBUG_ONLY(vfp_dump_live(live));
941 DB((dbg, LEVEL_1, "Stack before: "));
942 DEBUG_ONLY(x87_dump_stack(state));
944 if (reg_index_1 == REG_VFP_UKNWN) {
948 op1_idx = x87_on_stack(state, reg_index_1);
949 assert(op1_idx >= 0);
950 op1_live_after = is_vfp_live(arch_register_get_index(op1_reg), live);
953 attr = get_ia32_x87_attr(n);
954 permuted = attr->attr.data.ins_permuted;
956 if (reg_index_2 != REG_VFP_NOREG) {
959 if (reg_index_2 == REG_VFP_UKNWN) {
963 /* second operand is a vfp register */
964 op2_idx = x87_on_stack(state, reg_index_2);
965 assert(op2_idx >= 0);
967 = is_vfp_live(arch_register_get_index(op2_reg), live);
970 if (op2_live_after) {
971 /* Second operand is live. */
973 if (op1_live_after) {
974 /* Both operands are live: push the first one.
975 This works even for op1 == op2. */
976 x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
977 /* now do fxxx (tos=tos X op) */
981 dst = tmpl->normal_op;
983 /* Second live, first operand is dead here, bring it to tos. */
985 x87_create_fxch(state, n, op1_idx);
990 /* now do fxxx (tos=tos X op) */
992 dst = tmpl->normal_op;
995 /* Second operand is dead. */
996 if (op1_live_after) {
997 /* First operand is live: bring second to tos. */
999 x87_create_fxch(state, n, op2_idx);
1004 /* now do fxxxr (tos = op X tos) */
1006 dst = tmpl->reverse_op;
1008 /* Both operands are dead here, pop them from the stack. */
1011 /* Both are identically and on tos, no pop needed. */
1012 /* here fxxx (tos = tos X tos) */
1013 dst = tmpl->normal_op;
1016 /* now do fxxxp (op = op X tos, pop) */
1017 dst = tmpl->normal_pop_op;
1021 } else if (op1_idx == 0) {
1022 assert(op1_idx != op2_idx);
1023 /* now do fxxxrp (op = tos X op, pop) */
1024 dst = tmpl->reverse_pop_op;
1028 /* Bring the second on top. */
1029 x87_create_fxch(state, n, op2_idx);
1030 if (op1_idx == op2_idx) {
1031 /* Both are identically and on tos now, no pop needed. */
1034 /* use fxxx (tos = tos X tos) */
1035 dst = tmpl->normal_op;
1038 /* op2 is on tos now */
1040 /* use fxxxp (op = op X tos, pop) */
1041 dst = tmpl->normal_pop_op;
1049 /* second operand is an address mode */
1050 if (op1_live_after) {
1051 /* first operand is live: push it here */
1052 x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
1055 /* first operand is dead: bring it to tos */
1057 x87_create_fxch(state, n, op1_idx);
1062 /* use fxxx (tos = tos X mem) */
1063 dst = permuted ? tmpl->reverse_op : tmpl->normal_op;
1067 patched_insn = x87_patch_insn(n, dst);
1068 x87_set_st(state, arch_register_get_index(out), patched_insn, out_idx);
1073 /* patch the operation */
1074 attr->x87[0] = op1_reg = &ia32_st_regs[op1_idx];
1075 if (reg_index_2 != REG_VFP_NOREG) {
1076 attr->x87[1] = op2_reg = &ia32_st_regs[op2_idx];
1078 attr->x87[2] = out = &ia32_st_regs[out_idx];
1080 if (reg_index_2 != REG_VFP_NOREG) {
1081 DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
1082 arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
1083 arch_register_get_name(out)));
1085 DB((dbg, LEVEL_1, "<<< %s %s, [AM] -> %s\n", get_irn_opname(n),
1086 arch_register_get_name(op1_reg),
1087 arch_register_get_name(out)));
1090 return NO_NODE_ADDED;
1094 * Simulate a virtual Unop.
1096 * @param state the x87 state
1097 * @param n the node that should be simulated (and patched)
1098 * @param op the x87 opcode that will replace n's opcode
1100 * @return NO_NODE_ADDED
1102 static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
1104 int op1_idx, out_idx;
1105 x87_simulator *sim = state->sim;
1106 const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, UNOP_IDX));
1107 const arch_register_t *out = x87_get_irn_register(n);
1108 ia32_x87_attr_t *attr;
1109 unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
1111 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
1112 DEBUG_ONLY(vfp_dump_live(live));
1114 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1116 if (is_vfp_live(arch_register_get_index(op1), live)) {
1117 /* push the operand here */
1118 x87_create_fpush(state, n, op1_idx, UNOP_IDX);
1122 /* operand is dead, bring it to tos */
1124 x87_create_fxch(state, n, op1_idx);
1129 x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
1131 attr = get_ia32_x87_attr(n);
1132 attr->x87[0] = op1 = &ia32_st_regs[0];
1133 attr->x87[2] = out = &ia32_st_regs[0];
1134 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
1136 return NO_NODE_ADDED;
1140 * Simulate a virtual Load instruction.
1142 * @param state the x87 state
1143 * @param n the node that should be simulated (and patched)
1144 * @param op the x87 opcode that will replace n's opcode
1146 * @return NO_NODE_ADDED
1148 static int sim_load(x87_state *state, ir_node *n, ir_op *op, int res_pos)
1150 const arch_register_t *out = x87_irn_get_register(n, res_pos);
1151 ia32_x87_attr_t *attr;
1153 DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, arch_register_get_name(out)));
1154 x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
1155 assert(out == x87_irn_get_register(n, res_pos));
1156 attr = get_ia32_x87_attr(n);
1157 attr->x87[2] = out = &ia32_st_regs[0];
1158 DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
1160 return NO_NODE_ADDED;
1164 * Rewire all users of @p old_val to @new_val iff they are scheduled after @p store.
1166 * @param store The store
1167 * @param old_val The former value
1168 * @param new_val The new value
1170 static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
1172 const ir_edge_t *edge, *ne;
1174 foreach_out_edge_safe(old_val, edge, ne) {
1175 ir_node *user = get_edge_src_irn(edge);
1177 if (! user || user == store)
1180 /* if the user is scheduled after the store: rewire */
1181 if (sched_is_scheduled(user) && sched_comes_after(store, user)) {
1183 /* find the input of the user pointing to the old value */
1184 for (i = get_irn_arity(user) - 1; i >= 0; i--) {
1185 if (get_irn_n(user, i) == old_val)
1186 set_irn_n(user, i, new_val);
1190 } /* collect_and_rewire_users */
1193 * Simulate a virtual Store.
1195 * @param state the x87 state
1196 * @param n the node that should be simulated (and patched)
1197 * @param op the x87 store opcode
1198 * @param op_p the x87 store and pop opcode
1200 static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p)
1202 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1203 const arch_register_t *op2 = x87_get_irn_register(val);
1204 unsigned live = vfp_live_args_after(state->sim, n, 0);
1205 int insn = NO_NODE_ADDED;
1206 ia32_x87_attr_t *attr;
1207 int op2_reg_idx, op2_idx, depth;
1208 int live_after_node;
1211 op2_reg_idx = arch_register_get_index(op2);
1212 if (op2_reg_idx == REG_VFP_UKNWN) {
1213 /* just take any value from stack */
1214 if (state->depth > 0) {
1216 DEBUG_ONLY(op2 = NULL);
1217 live_after_node = 1;
1219 /* produce a new value which we will consume immediately */
1220 x87_create_fldz(state, n, op2_reg_idx);
1221 live_after_node = 0;
1222 op2_idx = x87_on_stack(state, op2_reg_idx);
1223 assert(op2_idx >= 0);
1226 op2_idx = x87_on_stack(state, op2_reg_idx);
1227 live_after_node = is_vfp_live(arch_register_get_index(op2), live);
1228 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1229 assert(op2_idx >= 0);
1232 mode = get_ia32_ls_mode(n);
1233 depth = x87_get_depth(state);
1235 if (live_after_node) {
1237 Problem: fst doesn't support 96bit modes (spills), only fstp does
1238 fist doesn't support 64bit mode, only fistp
1240 - stack not full: push value and fstp
1241 - stack full: fstp value and load again
1242 Note that we cannot test on mode_E, because floats might be 96bit ...
1244 if (get_mode_size_bits(mode) > 64 || (mode_is_int(mode) && get_mode_size_bits(mode) > 32)) {
1245 if (depth < N_x87_REGS) {
1246 /* ok, we have a free register: push + fstp */
1247 x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
1249 x87_patch_insn(n, op_p);
1251 ir_node *vfld, *mem, *block, *rproj, *mproj;
1254 /* stack full here: need fstp + load */
1256 x87_patch_insn(n, op_p);
1258 block = get_nodes_block(n);
1259 vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), new_NoMem(), get_ia32_ls_mode(n));
1261 /* copy all attributes */
1262 set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
1263 if (is_ia32_use_frame(n))
1264 set_ia32_use_frame(vfld);
1265 set_ia32_op_type(vfld, ia32_AddrModeS);
1266 add_ia32_am_offs_int(vfld, get_ia32_am_offs_int(n));
1267 set_ia32_am_sc(vfld, get_ia32_am_sc(n));
1268 set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
1270 rproj = new_r_Proj(block, vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
1271 mproj = new_r_Proj(block, vfld, mode_M, pn_ia32_vfld_M);
1272 mem = get_irn_Proj_for_mode(n, mode_M);
1274 assert(mem && "Store memory not found");
1276 arch_set_irn_register(rproj, op2);
1278 /* reroute all former users of the store memory to the load memory */
1279 irg = get_irn_irg(n);
1280 edges_reroute(mem, mproj, irg);
1281 /* set the memory input of the load to the store memory */
1282 set_irn_n(vfld, n_ia32_vfld_mem, mem);
1284 sched_add_after(n, vfld);
1285 sched_add_after(vfld, rproj);
1287 /* rewire all users, scheduled after the store, to the loaded value */
1288 collect_and_rewire_users(n, val, rproj);
1293 /* we can only store the tos to memory */
1295 x87_create_fxch(state, n, op2_idx);
1297 /* mode size 64 or smaller -> use normal fst */
1298 x87_patch_insn(n, op);
1301 /* we can only store the tos to memory */
1303 x87_create_fxch(state, n, op2_idx);
1306 x87_patch_insn(n, op_p);
1309 attr = get_ia32_x87_attr(n);
1310 attr->x87[1] = op2 = &ia32_st_regs[0];
1311 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1316 #define _GEN_BINOP(op, rev) \
1317 static int sim_##op(x87_state *state, ir_node *n) { \
1318 exchange_tmpl tmpl = { op_ia32_##op, op_ia32_##rev, op_ia32_##op##p, op_ia32_##rev##p }; \
1319 return sim_binop(state, n, &tmpl); \
1322 #define GEN_BINOP(op) _GEN_BINOP(op, op)
1323 #define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
1325 #define GEN_LOAD(op) \
1326 static int sim_##op(x87_state *state, ir_node *n) { \
1327 return sim_load(state, n, op_ia32_##op, pn_ia32_v##op##_res); \
1330 #define GEN_UNOP(op) \
1331 static int sim_##op(x87_state *state, ir_node *n) { \
1332 return sim_unop(state, n, op_ia32_##op); \
1335 #define GEN_STORE(op) \
1336 static int sim_##op(x87_state *state, ir_node *n) { \
1337 return sim_store(state, n, op_ia32_##op, op_ia32_##op##p); \
1359 * Simulate a virtual fisttp.
1361 * @param state the x87 state
1362 * @param n the node that should be simulated (and patched)
1364 * @return NO_NODE_ADDED
1366 static int sim_fisttp(x87_state *state, ir_node *n)
1368 ir_node *val = get_irn_n(n, n_ia32_vfst_val);
1369 const arch_register_t *op2 = x87_get_irn_register(val);
1370 ia32_x87_attr_t *attr;
1371 int op2_reg_idx, op2_idx, depth;
1373 op2_reg_idx = arch_register_get_index(op2);
1374 if (op2_reg_idx == REG_VFP_UKNWN) {
1375 /* just take any value from stack */
1376 if (state->depth > 0) {
1378 DEBUG_ONLY(op2 = NULL);
1380 /* produce a new value which we will consume immediately */
1381 x87_create_fldz(state, n, op2_reg_idx);
1382 op2_idx = x87_on_stack(state, op2_reg_idx);
1383 assert(op2_idx >= 0);
1386 op2_idx = x87_on_stack(state, op2_reg_idx);
1387 DB((dbg, LEVEL_1, ">>> %+F %s ->\n", n, arch_register_get_name(op2)));
1388 assert(op2_idx >= 0);
1391 depth = x87_get_depth(state);
1393 /* Note: although the value is still live here, it is destroyed because
1394 of the pop. The register allocator is aware of that and introduced a copy
1395 if the value must be alive. */
1397 /* we can only store the tos to memory */
1399 x87_create_fxch(state, n, op2_idx);
1402 x87_patch_insn(n, op_ia32_fisttp);
1404 attr = get_ia32_x87_attr(n);
1405 attr->x87[1] = op2 = &ia32_st_regs[0];
1406 DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
1408 return NO_NODE_ADDED;
1412 * Simulate a virtual FtstFnstsw.
1414 * @param state the x87 state
1415 * @param n the node that should be simulated (and patched)
1417 * @return NO_NODE_ADDED
1419 static int sim_FtstFnstsw(x87_state *state, ir_node *n)
1421 x87_simulator *sim = state->sim;
1422 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1423 ir_node *op1_node = get_irn_n(n, n_ia32_vFtstFnstsw_left);
1424 const arch_register_t *reg1 = x87_get_irn_register(op1_node);
1425 int reg_index_1 = arch_register_get_index(reg1);
1426 int op1_idx = x87_on_stack(state, reg_index_1);
1427 unsigned live = vfp_live_args_after(sim, n, 0);
1429 DB((dbg, LEVEL_1, ">>> %+F %s\n", n, arch_register_get_name(reg1)));
1430 DEBUG_ONLY(vfp_dump_live(live));
1431 DB((dbg, LEVEL_1, "Stack before: "));
1432 DEBUG_ONLY(x87_dump_stack(state));
1433 assert(op1_idx >= 0);
1436 /* bring the value to tos */
1437 x87_create_fxch(state, n, op1_idx);
1441 /* patch the operation */
1442 x87_patch_insn(n, op_ia32_FtstFnstsw);
1443 reg1 = &ia32_st_regs[op1_idx];
1444 attr->x87[0] = reg1;
1445 attr->x87[1] = NULL;
1446 attr->x87[2] = NULL;
1448 if (!is_vfp_live(reg_index_1, live))
1449 x87_create_fpop(state, sched_next(n), 1);
1451 return NO_NODE_ADDED;
1452 } /* sim_FtstFnstsw */
1457 * @param state the x87 state
1458 * @param n the node that should be simulated (and patched)
1460 * @return NO_NODE_ADDED
1462 static int sim_Fucom(x87_state *state, ir_node *n)
1466 ia32_x87_attr_t *attr = get_ia32_x87_attr(n);
1468 x87_simulator *sim = state->sim;
1469 ir_node *op1_node = get_irn_n(n, n_ia32_vFucomFnstsw_left);
1470 ir_node *op2_node = get_irn_n(n, n_ia32_vFucomFnstsw_right);
1471 const arch_register_t *op1 = x87_get_irn_register(op1_node);
1472 const arch_register_t *op2 = x87_get_irn_register(op2_node);
1473 int reg_index_1 = arch_register_get_index(op1);
1474 int reg_index_2 = arch_register_get_index(op2);
1475 unsigned live = vfp_live_args_after(sim, n, 0);
1476 int permuted = attr->attr.data.ins_permuted;
1480 DB((dbg, LEVEL_1, ">>> %+F %s, %s\n", n,
1481 arch_register_get_name(op1), arch_register_get_name(op2)));
1482 DEBUG_ONLY(vfp_dump_live(live));
1483 DB((dbg, LEVEL_1, "Stack before: "));
1484 DEBUG_ONLY(x87_dump_stack(state));
1486 op1_idx = x87_on_stack(state, reg_index_1);
1487 assert(op1_idx >= 0);
1489 /* BEWARE: check for comp a,a cases, they might happen */
1490 if (reg_index_2 != REG_VFP_NOREG) {
1491 /* second operand is a vfp register */
1492 op2_idx = x87_on_stack(state, reg_index_2);
1493 assert(op2_idx >= 0);
1495 if (is_vfp_live(reg_index_2, live)) {
1496 /* second operand is live */
1498 if (is_vfp_live(reg_index_1, live)) {
1499 /* both operands are live */
1502 /* res = tos X op */
1503 } else if (op2_idx == 0) {
1504 /* res = op X tos */
1505 permuted = !permuted;
1508 /* bring the first one to tos */
1509 x87_create_fxch(state, n, op1_idx);
1513 /* res = tos X op */
1516 /* second live, first operand is dead here, bring it to tos.
1517 This means further, op1_idx != op2_idx. */
1518 assert(op1_idx != op2_idx);
1520 x87_create_fxch(state, n, op1_idx);
1525 /* res = tos X op, pop */
1529 /* second operand is dead */
1530 if (is_vfp_live(reg_index_1, live)) {
1531 /* first operand is live: bring second to tos.
1532 This means further, op1_idx != op2_idx. */
1533 assert(op1_idx != op2_idx);
1535 x87_create_fxch(state, n, op2_idx);
1540 /* res = op X tos, pop */
1542 permuted = !permuted;
1545 /* both operands are dead here, check first for identity. */
1546 if (op1_idx == op2_idx) {
1547 /* identically, one pop needed */
1549 x87_create_fxch(state, n, op1_idx);
1553 /* res = tos X op, pop */
1556 /* different, move them to st and st(1) and pop both.
1557 The tricky part is to get one into st(1).*/
1558 else if (op2_idx == 1) {
1559 /* good, second operand is already in the right place, move the first */
1561 /* bring the first on top */
1562 x87_create_fxch(state, n, op1_idx);
1563 assert(op2_idx != 0);
1566 /* res = tos X op, pop, pop */
1568 } else if (op1_idx == 1) {
1569 /* good, first operand is already in the right place, move the second */
1571 /* bring the first on top */
1572 x87_create_fxch(state, n, op2_idx);
1573 assert(op1_idx != 0);
1576 /* res = op X tos, pop, pop */
1577 permuted = !permuted;
1581 /* if one is already the TOS, we need two fxch */
1583 /* first one is TOS, move to st(1) */
1584 x87_create_fxch(state, n, 1);
1585 assert(op2_idx != 1);
1587 x87_create_fxch(state, n, op2_idx);
1589 /* res = op X tos, pop, pop */
1591 permuted = !permuted;
1593 } else if (op2_idx == 0) {
1594 /* second one is TOS, move to st(1) */
1595 x87_create_fxch(state, n, 1);
1596 assert(op1_idx != 1);
1598 x87_create_fxch(state, n, op1_idx);
1600 /* res = tos X op, pop, pop */
1603 /* none of them is either TOS or st(1), 3 fxch needed */
1604 x87_create_fxch(state, n, op2_idx);
1605 assert(op1_idx != 0);
1606 x87_create_fxch(state, n, 1);
1608 x87_create_fxch(state, n, op1_idx);
1610 /* res = tos X op, pop, pop */
1617 /* second operand is an address mode */
1618 if (is_vfp_live(reg_index_1, live)) {
1619 /* first operand is live: bring it to TOS */
1621 x87_create_fxch(state, n, op1_idx);
1625 /* first operand is dead: bring it to tos */
1627 x87_create_fxch(state, n, op1_idx);
1634 /* patch the operation */
1635 if (is_ia32_vFucomFnstsw(n)) {
1639 case 0: dst = op_ia32_FucomFnstsw; break;
1640 case 1: dst = op_ia32_FucompFnstsw; break;
1641 case 2: dst = op_ia32_FucomppFnstsw; break;
1642 default: panic("invalid popcount in sim_Fucom");
1645 for (i = 0; i < pops; ++i) {
1648 } else if (is_ia32_vFucomi(n)) {
1650 case 0: dst = op_ia32_Fucomi; break;
1651 case 1: dst = op_ia32_Fucompi; x87_pop(state); break;
1653 dst = op_ia32_Fucompi;
1655 x87_create_fpop(state, sched_next(n), 1);
1657 default: panic("invalid popcount in sim_Fucom");
1660 panic("invalid operation %+F in sim_FucomFnstsw", n);
1663 x87_patch_insn(n, dst);
1670 op1 = &ia32_st_regs[op1_idx];
1673 op2 = &ia32_st_regs[op2_idx];
1676 attr->x87[2] = NULL;
1677 attr->attr.data.ins_permuted = permuted;
1680 DB((dbg, LEVEL_1, "<<< %s %s, %s\n", get_irn_opname(n),
1681 arch_register_get_name(op1), arch_register_get_name(op2)));
1683 DB((dbg, LEVEL_1, "<<< %s %s, [AM]\n", get_irn_opname(n),
1684 arch_register_get_name(op1)));
1687 return NO_NODE_ADDED;
1693 * @param state the x87 state
1694 * @param n the node that should be simulated (and patched)
1696 * @return NO_NODE_ADDED
1698 static int sim_Keep(x87_state *state, ir_node *node)
1701 const arch_register_t *op_reg;
1707 DB((dbg, LEVEL_1, ">>> %+F\n", node));
1709 arity = get_irn_arity(node);
1710 for (i = 0; i < arity; ++i) {
1711 op = get_irn_n(node, i);
1712 op_reg = arch_get_irn_register(op);
1713 if (arch_register_get_class(op_reg) != &ia32_reg_classes[CLASS_ia32_vfp])
1716 reg_id = arch_register_get_index(op_reg);
1717 live = vfp_live_args_after(state->sim, node, 0);
1719 op_stack_idx = x87_on_stack(state, reg_id);
1720 if (op_stack_idx >= 0 && !is_vfp_live(reg_id, live))
1721 x87_create_fpop(state, sched_next(node), 1);
1724 DB((dbg, LEVEL_1, "Stack after: "));
1725 DEBUG_ONLY(x87_dump_stack(state));
1727 return NO_NODE_ADDED;
1731 * Keep the given node alive by adding a be_Keep.
1733 * @param node the node to kept alive
1735 static void keep_float_node_alive(ir_node *node)
1737 ir_node *block = get_nodes_block(node);
1738 const arch_register_class_t *cls = arch_get_irn_reg_class_out(node);
1741 keep = be_new_Keep(cls, block, 1, &node);
1743 assert(sched_is_scheduled(node));
1744 sched_add_after(node, keep);
1745 } /* keep_float_node_alive */
1748 * Create a copy of a node. Recreate the node if it's a constant.
1750 * @param state the x87 state
1751 * @param n the node to be copied
1753 * @return the copy of n
1755 static ir_node *create_Copy(x87_state *state, ir_node *n)
1757 dbg_info *n_dbg = get_irn_dbg_info(n);
1758 ir_mode *mode = get_irn_mode(n);
1759 ir_node *block = get_nodes_block(n);
1760 ir_node *pred = get_irn_n(n, 0);
1761 ir_node *(*cnstr)(dbg_info *, ir_node *, ir_mode *) = NULL;
1763 const arch_register_t *out;
1764 const arch_register_t *op1;
1765 ia32_x87_attr_t *attr;
1767 /* Do not copy constants, recreate them. */
1768 switch (get_ia32_irn_opcode(pred)) {
1769 case iro_ia32_Unknown_VFP:
1771 cnstr = new_bd_ia32_fldz;
1774 cnstr = new_bd_ia32_fld1;
1776 case iro_ia32_fldpi:
1777 cnstr = new_bd_ia32_fldpi;
1779 case iro_ia32_fldl2e:
1780 cnstr = new_bd_ia32_fldl2e;
1782 case iro_ia32_fldl2t:
1783 cnstr = new_bd_ia32_fldl2t;
1785 case iro_ia32_fldlg2:
1786 cnstr = new_bd_ia32_fldlg2;
1788 case iro_ia32_fldln2:
1789 cnstr = new_bd_ia32_fldln2;
1795 out = x87_get_irn_register(n);
1796 op1 = x87_get_irn_register(pred);
1798 if (cnstr != NULL) {
1799 /* copy a constant */
1800 res = (*cnstr)(n_dbg, block, mode);
1802 x87_push(state, arch_register_get_index(out), res);
1804 attr = get_ia32_x87_attr(res);
1805 attr->x87[2] = &ia32_st_regs[0];
1807 int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1809 res = new_bd_ia32_fpushCopy(n_dbg, block, pred, mode);
1811 x87_push(state, arch_register_get_index(out), res);
1813 attr = get_ia32_x87_attr(res);
1814 attr->x87[0] = &ia32_st_regs[op1_idx];
1815 attr->x87[2] = &ia32_st_regs[0];
1817 arch_set_irn_register(res, out);
1823 * Simulate a be_Copy.
1825 * @param state the x87 state
1826 * @param n the node that should be simulated (and patched)
1828 * @return NO_NODE_ADDED
1830 static int sim_Copy(x87_state *state, ir_node *n)
1833 const arch_register_t *out;
1834 const arch_register_t *op1;
1835 const arch_register_class_t *cls;
1836 ir_node *node, *next;
1837 ia32_x87_attr_t *attr;
1838 int op1_idx, out_idx;
1841 cls = arch_get_irn_reg_class_out(n);
1842 if (cls->regs != ia32_vfp_regs)
1845 pred = get_irn_n(n, 0);
1846 out = x87_get_irn_register(n);
1847 op1 = x87_get_irn_register(pred);
1848 live = vfp_live_args_after(state->sim, n, REGMASK(out));
1850 DB((dbg, LEVEL_1, ">>> %+F %s -> %s\n", n,
1851 arch_register_get_name(op1), arch_register_get_name(out)));
1852 DEBUG_ONLY(vfp_dump_live(live));
1854 /* handle the infamous unknown value */
1855 if (arch_register_get_index(op1) == REG_VFP_UKNWN) {
1856 /* Operand is still live, a real copy. We need here an fpush that can
1857 hold a a register, so use the fpushCopy or recreate constants */
1858 node = create_Copy(state, n);
1860 assert(is_ia32_fldz(node));
1861 next = sched_next(n);
1864 sched_add_before(next, node);
1866 DB((dbg, LEVEL_1, "<<< %+F %s -> %s\n", node, op1->name,
1867 arch_get_irn_register(node)->name));
1868 return NO_NODE_ADDED;
1871 op1_idx = x87_on_stack(state, arch_register_get_index(op1));
1873 if (is_vfp_live(arch_register_get_index(op1), live)) {
1874 ir_node *pred = get_irn_n(n, 0);
1876 /* Operand is still live, a real copy. We need here an fpush that can
1877 hold a a register, so use the fpushCopy or recreate constants */
1878 node = create_Copy(state, n);
1880 /* We have to make sure the old value doesn't go dead (which can happen
1881 * when we recreate constants). As the simulator expected that value in
1882 * the pred blocks. This is unfortunate as removing it would save us 1
1883 * instruction, but we would have to rerun all the simulation to get
1886 next = sched_next(n);
1889 sched_add_before(next, node);
1891 if (get_irn_n_edges(pred) == 0) {
1892 keep_float_node_alive(pred);
1895 DB((dbg, LEVEL_1, "<<< %+F %s -> ?\n", node, op1->name));
1897 out_idx = x87_on_stack(state, arch_register_get_index(out));
1899 if (out_idx >= 0 && out_idx != op1_idx) {
1900 /* Matze: out already on stack? how can this happen? */
1903 /* op1 must be killed and placed where out is */
1905 /* best case, simple remove and rename */
1906 x87_patch_insn(n, op_ia32_Pop);
1907 attr = get_ia32_x87_attr(n);
1908 attr->x87[0] = op1 = &ia32_st_regs[0];
1911 x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
1913 /* move op1 to tos, store and pop it */
1915 x87_create_fxch(state, n, op1_idx);
1918 x87_patch_insn(n, op_ia32_Pop);
1919 attr = get_ia32_x87_attr(n);
1920 attr->x87[0] = op1 = &ia32_st_regs[out_idx];
1923 x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
1925 DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
1927 /* just a virtual copy */
1928 x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
1929 /* don't remove the node to keep the verifier quiet :),
1930 the emitter won't emit any code for the node */
1933 DB((dbg, LEVEL_1, "<<< KILLED %s\n", get_irn_opname(n)));
1934 exchange(n, get_unop_op(n));
1938 return NO_NODE_ADDED;
1942 * Returns the vf0 result Proj of a Call.
1944 * @para call the Call node
1946 static ir_node *get_call_result_proj(ir_node *call)
1948 const ir_edge_t *edge;
1950 /* search the result proj */
1951 foreach_out_edge(call, edge) {
1952 ir_node *proj = get_edge_src_irn(edge);
1953 long pn = get_Proj_proj(proj);
1955 if (pn == pn_ia32_Call_vf0)
1960 } /* get_call_result_proj */
1963 * Simulate a ia32_Call.
1965 * @param state the x87 state
1966 * @param n the node that should be simulated (and patched)
1968 * @return NO_NODE_ADDED
1970 static int sim_Call(x87_state *state, ir_node *n)
1972 ir_type *call_tp = get_ia32_call_attr_const(n)->call_tp;
1976 const arch_register_t *reg;
1978 DB((dbg, LEVEL_1, ">>> %+F\n", n));
1980 /* at the begin of a call the x87 state should be empty */
1981 assert(state->depth == 0 && "stack not empty before call");
1983 if (get_method_n_ress(call_tp) <= 0)
1987 * If the called function returns a float, it is returned in st(0).
1988 * This even happens if the return value is NOT used.
1989 * Moreover, only one return result is supported.
1991 res_type = get_method_res_type(call_tp, 0);
1992 mode = get_type_mode(res_type);
1994 if (mode == NULL || !mode_is_float(mode))
1997 resproj = get_call_result_proj(n);
1998 assert(resproj != NULL);
2000 reg = x87_get_irn_register(resproj);
2001 x87_push(state, arch_register_get_index(reg), resproj);
2004 DB((dbg, LEVEL_1, "Stack after: "));
2005 DEBUG_ONLY(x87_dump_stack(state));
2007 return NO_NODE_ADDED;
2011 * Simulate a be_Spill.
2013 * @param state the x87 state
2014 * @param n the node that should be simulated (and patched)
2016 * Should not happen, spills are lowered before x87 simulator see them.
2018 static int sim_Spill(x87_state *state, ir_node *n)
2020 panic("Spill not lowered before x87 simulator run");
2021 return sim_fst(state, n);
2025 * Simulate a be_Reload.
2027 * @param state the x87 state
2028 * @param n the node that should be simulated (and patched)
2030 * Should not happen, reloads are lowered before x87 simulator see them.
2032 static int sim_Reload(x87_state *state, ir_node *n)
2034 panic("Reload not lowered before x87 simulator run");
2035 return sim_fld(state, n);
2039 * Simulate a be_Return.
2041 * @param state the x87 state
2042 * @param n the node that should be simulated (and patched)
2044 * @return NO_NODE_ADDED
2046 static int sim_Return(x87_state *state, ir_node *n)
2048 int n_res = be_Return_get_n_rets(n);
2049 int i, n_float_res = 0;
2051 /* only floating point return values must reside on stack */
2052 for (i = 0; i < n_res; ++i) {
2053 ir_node *res = get_irn_n(n, be_pos_Return_val + i);
2055 if (mode_is_float(get_irn_mode(res)))
2058 assert(x87_get_depth(state) == n_float_res);
2060 /* pop them virtually */
2061 for (i = n_float_res - 1; i >= 0; --i)
2064 return NO_NODE_ADDED;
2067 typedef struct _perm_data_t {
2068 const arch_register_t *in;
2069 const arch_register_t *out;
2073 * Simulate a be_Perm.
2075 * @param state the x87 state
2076 * @param irn the node that should be simulated (and patched)
2078 * @return NO_NODE_ADDED
2080 static int sim_Perm(x87_state *state, ir_node *irn)
2083 ir_node *pred = get_irn_n(irn, 0);
2085 const ir_edge_t *edge;
2087 /* handle only floating point Perms */
2088 if (! mode_is_float(get_irn_mode(pred)))
2089 return NO_NODE_ADDED;
2091 DB((dbg, LEVEL_1, ">>> %+F\n", irn));
2093 /* Perm is a pure virtual instruction on x87.
2094 All inputs must be on the FPU stack and are pairwise
2095 different from each other.
2096 So, all we need to do is to permutate the stack state. */
2097 n = get_irn_arity(irn);
2098 NEW_ARR_A(int, stack_pos, n);
2100 /* collect old stack positions */
2101 for (i = 0; i < n; ++i) {
2102 const arch_register_t *inreg = x87_get_irn_register(get_irn_n(irn, i));
2103 int idx = x87_on_stack(state, arch_register_get_index(inreg));
2105 assert(idx >= 0 && "Perm argument not on x87 stack");
2109 /* now do the permutation */
2110 foreach_out_edge(irn, edge) {
2111 ir_node *proj = get_edge_src_irn(edge);
2112 const arch_register_t *out = x87_get_irn_register(proj);
2113 long num = get_Proj_proj(proj);
2115 assert(0 <= num && num < n && "More Proj's than Perm inputs");
2116 x87_set_st(state, arch_register_get_index(out), proj, stack_pos[(unsigned)num]);
2118 DB((dbg, LEVEL_1, "<<< %+F\n", irn));
2120 return NO_NODE_ADDED;
2124 * Simulate the Barrier to generate Unknowns.
2125 * We must push something on the stack for its value.
2127 * @param state the x87 state
2128 * @param irn the node that should be simulated (and patched)
2130 * @return NO_NODE_ADDED
2132 static int sim_Barrier(x87_state *state, ir_node *node)
2136 /* materialize unknown if needed */
2137 arity = get_irn_arity(node);
2138 for (i = 0; i < arity; ++i) {
2139 const arch_register_t *reg;
2142 ia32_x87_attr_t *attr;
2143 ir_node *in = get_irn_n(node, i);
2145 if (!is_ia32_Unknown_VFP(in))
2148 /* TODO: not completely correct... */
2149 reg = &ia32_vfp_regs[REG_VFP_UKNWN];
2152 block = get_nodes_block(node);
2153 zero = new_bd_ia32_fldz(NULL, block, ia32_reg_classes[CLASS_ia32_st].mode);
2154 x87_push(state, arch_register_get_index(reg), zero);
2156 attr = get_ia32_x87_attr(zero);
2157 attr->x87[2] = &ia32_st_regs[0];
2159 sched_add_before(node, zero);
2161 set_irn_n(node, i, zero);
2164 return NO_NODE_ADDED;
2168 * Kill any dead registers at block start by popping them from the stack.
2170 * @param sim the simulator handle
2171 * @param block the current block
2172 * @param start_state the x87 state at the begin of the block
2174 * @return the x87 state after dead register killed
2176 static x87_state *x87_kill_deads(x87_simulator *sim, ir_node *block, x87_state *start_state)
2178 x87_state *state = start_state;
2179 ir_node *first_insn = sched_first(block);
2180 ir_node *keep = NULL;
2181 unsigned live = vfp_live_args_after(sim, block, 0);
2183 int i, depth, num_pop;
2186 depth = x87_get_depth(state);
2187 for (i = depth - 1; i >= 0; --i) {
2188 int reg = x87_get_st_reg(state, i);
2190 if (! is_vfp_live(reg, live))
2191 kill_mask |= (1 << i);
2195 /* create a new state, will be changed */
2196 state = x87_clone_state(sim, state);
2198 DB((dbg, LEVEL_1, "Killing deads:\n"));
2199 DEBUG_ONLY(vfp_dump_live(live));
2200 DEBUG_ONLY(x87_dump_stack(state));
2202 if (kill_mask != 0 && live == 0) {
2203 /* special case: kill all registers */
2204 if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
2205 if (ia32_cg_config.use_femms) {
2206 /* use FEMMS on AMD processors to clear all */
2207 keep = new_bd_ia32_femms(NULL, block);
2209 /* use EMMS to clear all */
2210 keep = new_bd_ia32_emms(NULL, block);
2212 sched_add_before(first_insn, keep);
2218 /* now kill registers */
2220 /* we can only kill from TOS, so bring them up */
2221 if (! (kill_mask & 1)) {
2222 /* search from behind, because we can to a double-pop */
2223 for (i = depth - 1; i >= 0; --i) {
2224 if (kill_mask & (1 << i)) {
2225 kill_mask &= ~(1 << i);
2232 x87_set_st(state, -1, keep, i);
2233 x87_create_fxch(state, first_insn, i);
2236 if ((kill_mask & 3) == 3) {
2237 /* we can do a double-pop */
2241 /* only a single pop */
2246 kill_mask >>= num_pop;
2247 keep = x87_create_fpop(state, first_insn, num_pop);
2252 } /* x87_kill_deads */
2255 * If we have PhiEs with unknown operands in a block
2256 * we have to make sure that some value is actually put onto the stack.
2258 * @param state the x87 state
2259 * @param block the block that should be checked
2260 * @param pred_block check inputs from this predecessor block
2261 * @param pos index of pred_block
2263 static void fix_unknown_phis(x87_state *state, ir_node *block,
2264 ir_node *pred_block, int pos)
2268 sched_foreach_Phi(block, phi) {
2270 const arch_register_t *reg;
2271 ia32_x87_attr_t *attr;
2273 op = get_Phi_pred(phi, pos);
2274 if (!is_ia32_Unknown_VFP(op))
2277 reg = arch_get_irn_register(phi);
2279 /* create a zero at end of pred block */
2280 zero = new_bd_ia32_fldz(NULL, pred_block, ia32_reg_classes[CLASS_ia32_st].mode);
2281 x87_push(state, arch_register_get_index(reg), zero);
2283 attr = get_ia32_x87_attr(zero);
2284 attr->x87[2] = &ia32_st_regs[0];
2286 assert(is_ia32_fldz(zero));
2287 sched_add_before(sched_last(pred_block), zero);
2289 set_Phi_pred(phi, pos, zero);
2291 } /* fix_unknown_phis */
2294 * Run a simulation and fix all virtual instructions for a block.
2296 * @param sim the simulator handle
2297 * @param block the current block
2299 static void x87_simulate_block(x87_simulator *sim, ir_node *block)
2302 blk_state *bl_state = x87_get_bl_state(sim, block);
2303 x87_state *state = bl_state->begin;
2304 const ir_edge_t *edge;
2305 ir_node *start_block;
2307 assert(state != NULL);
2308 /* already processed? */
2309 if (bl_state->end != NULL)
2312 DB((dbg, LEVEL_1, "Simulate %+F\n", block));
2313 DB((dbg, LEVEL_2, "State at Block begin:\n "));
2314 DEBUG_ONLY(x87_dump_stack(state));
2316 /* at block begin, kill all dead registers */
2317 state = x87_kill_deads(sim, block, state);
2318 /* create a new state, will be changed */
2319 state = x87_clone_state(sim, state);
2321 /* beware, n might change */
2322 for (n = sched_first(block); !sched_is_end(n); n = next) {
2325 ir_op *op = get_irn_op(n);
2328 * get the next node to be simulated here.
2329 * n might be completely removed from the schedule-
2331 next = sched_next(n);
2332 if (op->ops.generic != NULL) {
2333 func = (sim_func)op->ops.generic;
2336 node_inserted = (*func)(state, n);
2339 * sim_func might have added an additional node after n,
2340 * so update next node
2341 * beware: n must not be changed by sim_func
2342 * (i.e. removed from schedule) in this case
2344 if (node_inserted != NO_NODE_ADDED)
2345 next = sched_next(n);
2349 start_block = get_irg_start_block(get_irn_irg(block));
2351 DB((dbg, LEVEL_2, "State at Block end:\n ")); DEBUG_ONLY(x87_dump_stack(state));
2353 /* check if the state must be shuffled */
2354 foreach_block_succ(block, edge) {
2355 ir_node *succ = get_edge_src_irn(edge);
2356 blk_state *succ_state;
2358 if (succ == start_block)
2361 succ_state = x87_get_bl_state(sim, succ);
2363 fix_unknown_phis(state, succ, block, get_edge_src_pos(edge));
2365 if (succ_state->begin == NULL) {
2366 DB((dbg, LEVEL_2, "Set begin state for succ %+F:\n", succ));
2367 DEBUG_ONLY(x87_dump_stack(state));
2368 succ_state->begin = state;
2370 waitq_put(sim->worklist, succ);
2372 DB((dbg, LEVEL_2, "succ %+F already has a state, shuffling\n", succ));
2373 /* There is already a begin state for the successor, bad.
2374 Do the necessary permutations.
2375 Note that critical edges are removed, so this is always possible:
2376 If the successor has more than one possible input, then it must
2379 x87_shuffle(sim, block, state, succ, succ_state->begin);
2382 bl_state->end = state;
2383 } /* x87_simulate_block */
2386 * Register a simulator function.
2388 * @param op the opcode to simulate
2389 * @param func the simulator function for the opcode
2391 static void register_sim(ir_op *op, sim_func func)
2393 assert(op->ops.generic == NULL);
2394 op->ops.generic = (op_func) func;
2395 } /* register_sim */
2398 * Create a new x87 simulator.
2400 * @param sim a simulator handle, will be initialized
2401 * @param irg the current graph
2403 static void x87_init_simulator(x87_simulator *sim, ir_graph *irg)
2405 obstack_init(&sim->obst);
2406 sim->blk_states = pmap_create();
2407 sim->n_idx = get_irg_last_idx(irg);
2408 sim->live = obstack_alloc(&sim->obst, sizeof(*sim->live) * sim->n_idx);
2410 DB((dbg, LEVEL_1, "--------------------------------\n"
2411 "x87 Simulator started for %+F\n", irg));
2413 /* set the generic function pointer of instruction we must simulate */
2414 clear_irp_opcodes_generic_func();
2416 register_sim(op_ia32_Call, sim_Call);
2417 register_sim(op_ia32_vfld, sim_fld);
2418 register_sim(op_ia32_vfild, sim_fild);
2419 register_sim(op_ia32_vfld1, sim_fld1);
2420 register_sim(op_ia32_vfldz, sim_fldz);
2421 register_sim(op_ia32_vfadd, sim_fadd);
2422 register_sim(op_ia32_vfsub, sim_fsub);
2423 register_sim(op_ia32_vfmul, sim_fmul);
2424 register_sim(op_ia32_vfdiv, sim_fdiv);
2425 register_sim(op_ia32_vfprem, sim_fprem);
2426 register_sim(op_ia32_vfabs, sim_fabs);
2427 register_sim(op_ia32_vfchs, sim_fchs);
2428 register_sim(op_ia32_vfist, sim_fist);
2429 register_sim(op_ia32_vfisttp, sim_fisttp);
2430 register_sim(op_ia32_vfst, sim_fst);
2431 register_sim(op_ia32_vFtstFnstsw, sim_FtstFnstsw);
2432 register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
2433 register_sim(op_ia32_vFucomi, sim_Fucom);
2434 register_sim(op_be_Copy, sim_Copy);
2435 register_sim(op_be_Spill, sim_Spill);
2436 register_sim(op_be_Reload, sim_Reload);
2437 register_sim(op_be_Return, sim_Return);
2438 register_sim(op_be_Perm, sim_Perm);
2439 register_sim(op_be_Keep, sim_Keep);
2440 register_sim(op_be_Barrier, sim_Barrier);
2441 } /* x87_init_simulator */
2444 * Destroy a x87 simulator.
2446 * @param sim the simulator handle
2448 static void x87_destroy_simulator(x87_simulator *sim)
2450 pmap_destroy(sim->blk_states);
2451 obstack_free(&sim->obst, NULL);
2452 DB((dbg, LEVEL_1, "x87 Simulator stopped\n\n"));
2453 } /* x87_destroy_simulator */
2456 * Pre-block walker: calculate the liveness information for the block
2457 * and store it into the sim->live cache.
2459 static void update_liveness_walker(ir_node *block, void *data)
2461 x87_simulator *sim = data;
2462 update_liveness(sim, block);
2463 } /* update_liveness_walker */
2466 * Run a simulation and fix all virtual instructions for a graph.
2467 * Replaces all virtual floating point instructions and registers
2470 void x87_simulate_graph(be_irg_t *birg)
2472 /* TODO improve code quality (less executed fxch) by using execfreqs */
2474 ir_node *block, *start_block;
2475 blk_state *bl_state;
2477 ir_graph *irg = be_get_birg_irg(birg);
2479 /* create the simulator */
2480 x87_init_simulator(&sim, irg);
2482 start_block = get_irg_start_block(irg);
2483 bl_state = x87_get_bl_state(&sim, start_block);
2485 /* start with the empty state */
2486 bl_state->begin = empty;
2489 sim.worklist = new_waitq();
2490 waitq_put(sim.worklist, start_block);
2492 be_assure_liveness(birg);
2493 sim.lv = be_get_birg_liveness(birg);
2494 be_liveness_assure_sets(sim.lv);
2496 /* Calculate the liveness for all nodes. We must precalculate this info,
2497 * because the simulator adds new nodes (possible before Phi nodes) which
2498 * would let a lazy calculation fail.
2499 * On the other hand we reduce the computation amount due to
2500 * precaching from O(n^2) to O(n) at the expense of O(n) cache memory.
2502 irg_block_walk_graph(irg, update_liveness_walker, NULL, &sim);
2506 block = waitq_get(sim.worklist);
2507 x87_simulate_block(&sim, block);
2508 } while (! waitq_empty(sim.worklist));
2511 del_waitq(sim.worklist);
2512 x87_destroy_simulator(&sim);
2513 } /* x87_simulate_graph */
2515 /* Initializes the x87 simulator. */
2516 void ia32_init_x87(void)
2518 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.x87");
2519 } /* ia32_init_x87 */