2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Handles fpu rounding modes
23 * @author Matthias Braun
25 * The problem we deal with here is that the x86 ABI says the user can control
26 * the fpu rounding mode, which means that when we do some operations like float
27 * to int conversion which are specified as truncation in the C standard we have
28 * to spill, change and restore the fpu rounding mode between spills.
33 #include "ia32_new_nodes.h"
34 #include "ia32_architecture.h"
35 #include "gen_ia32_regalloc_if.h"
49 #include "bessaconstr.h"
52 static ir_entity *fpcw_round = NULL;
53 static ir_entity *fpcw_truncate = NULL;
55 static ir_entity *create_ent(int value, const char *name)
57 ir_mode *mode = mode_Hu;
58 ir_type *type = new_type_primitive(mode);
59 ir_type *glob = get_glob_type();
65 set_type_alignment_bytes(type, 4);
67 tv = new_tarval_from_long(value, mode);
68 ent = new_entity(glob, new_id_from_str(name), type);
69 set_entity_ld_ident(ent, get_entity_ident(ent));
70 set_entity_visibility(ent, ir_visibility_local);
71 add_entity_linkage(ent, IR_LINKAGE_CONSTANT);
73 cnst_irg = get_const_code_irg();
74 cnst = new_r_Const(cnst_irg, tv);
75 set_atomic_ent_value(ent, cnst);
80 static void create_fpcw_entities(void)
82 fpcw_round = create_ent(0xc7f, "_fpcw_round");
83 fpcw_truncate = create_ent(0x37f, "_fpcw_truncate");
86 static ir_node *create_fpu_mode_spill(void *env, ir_node *state, int force,
91 /* we don't spill the fpcw in unsafe mode */
92 if (ia32_cg_config.use_unsafe_floatconv) {
93 ir_node *block = get_nodes_block(state);
94 if (force == 1 || !is_ia32_ChangeCW(state)) {
95 ir_node *spill = new_bd_ia32_FnstCWNOP(NULL, block, state);
96 sched_add_after(after, spill);
102 if (force == 1 || !is_ia32_ChangeCW(state)) {
103 ir_graph *irg = get_irn_irg(state);
104 ir_node *block = get_nodes_block(state);
105 ir_node *noreg = ia32_new_NoReg_gp(irg);
106 ir_node *nomem = get_irg_no_mem(irg);
107 ir_node *frame = get_irg_frame(irg);
109 = new_bd_ia32_FnstCW(NULL, block, frame, noreg, nomem, state);
110 set_ia32_op_type(spill, ia32_AddrModeD);
111 /* use mode_Iu, as movl has a shorter opcode than movw */
112 set_ia32_ls_mode(spill, mode_Iu);
113 set_ia32_use_frame(spill);
115 sched_add_after(skip_Proj(after), spill);
122 static ir_node *create_fldcw_ent(ir_node *block, ir_entity *entity)
124 ir_graph *irg = get_irn_irg(block);
125 ir_node *nomem = get_irg_no_mem(irg);
126 ir_node *noreg = ia32_new_NoReg_gp(irg);
129 reload = new_bd_ia32_FldCW(NULL, block, noreg, noreg, nomem);
130 set_ia32_op_type(reload, ia32_AddrModeS);
131 set_ia32_ls_mode(reload, ia32_reg_classes[CLASS_ia32_fp_cw].mode);
132 set_ia32_am_sc(reload, entity);
133 set_ia32_use_frame(reload);
134 arch_set_irn_register(reload, &ia32_registers[REG_FPCW]);
139 static ir_node *create_fpu_mode_reload(void *env, ir_node *state,
140 ir_node *spill, ir_node *before,
143 ir_graph *irg = get_irn_irg(state);
144 ir_node *block = get_nodes_block(before);
145 ir_node *frame = get_irg_frame(irg);
146 ir_node *noreg = ia32_new_NoReg_gp(irg);
147 ir_node *reload = NULL;
150 if (ia32_cg_config.use_unsafe_floatconv) {
151 if (fpcw_round == NULL) {
152 create_fpcw_entities();
155 reload = create_fldcw_ent(block, fpcw_round);
157 reload = create_fldcw_ent(block, fpcw_truncate);
159 sched_add_before(before, reload);
164 reload = new_bd_ia32_FldCW(NULL, block, frame, noreg, spill);
165 set_ia32_op_type(reload, ia32_AddrModeS);
166 set_ia32_ls_mode(reload, ia32_reg_classes[CLASS_ia32_fp_cw].mode);
167 set_ia32_use_frame(reload);
168 arch_set_irn_register(reload, &ia32_registers[REG_FPCW]);
170 sched_add_before(before, reload);
172 ir_mode *lsmode = ia32_reg_classes[CLASS_ia32_fp_cw].mode;
173 ir_node *nomem = get_irg_no_mem(irg);
174 ir_node *cwstore, *load, *load_res, *orn, *store, *fldcw;
178 assert(last_state != NULL);
179 cwstore = new_bd_ia32_FnstCW(NULL, block, frame, noreg, nomem,
181 set_ia32_op_type(cwstore, ia32_AddrModeD);
182 set_ia32_ls_mode(cwstore, lsmode);
183 set_ia32_use_frame(cwstore);
184 sched_add_before(before, cwstore);
186 load = new_bd_ia32_Load(NULL, block, frame, noreg, cwstore);
187 set_ia32_op_type(load, ia32_AddrModeS);
188 set_ia32_ls_mode(load, lsmode);
189 set_ia32_use_frame(load);
190 sched_add_before(before, load);
192 load_res = new_r_Proj(load, mode_Iu, pn_ia32_Load_res);
194 /* TODO: make the actual mode configurable in ChangeCW... */
195 or_const = new_bd_ia32_Immediate(NULL, get_irg_start_block(irg),
197 arch_set_irn_register(or_const, &ia32_registers[REG_GP_NOREG]);
198 orn = new_bd_ia32_Or(NULL, block, noreg, noreg, nomem, load_res,
200 sched_add_before(before, orn);
202 store = new_bd_ia32_Store(NULL, block, frame, noreg, nomem, orn);
203 set_ia32_op_type(store, ia32_AddrModeD);
204 /* use mode_Iu, as movl has a shorter opcode than movw */
205 set_ia32_ls_mode(store, mode_Iu);
206 set_ia32_use_frame(store);
207 store_proj = new_r_Proj(store, mode_M, pn_ia32_Store_M);
208 sched_add_before(before, store);
210 fldcw = new_bd_ia32_FldCW(NULL, block, frame, noreg, store_proj);
211 set_ia32_op_type(fldcw, ia32_AddrModeS);
212 set_ia32_ls_mode(fldcw, lsmode);
213 set_ia32_use_frame(fldcw);
214 arch_set_irn_register(fldcw, &ia32_registers[REG_FPCW]);
215 sched_add_before(before, fldcw);
223 typedef struct collect_fpu_mode_nodes_env_t {
224 ir_node **state_nodes;
225 } collect_fpu_mode_nodes_env_t;
227 static void collect_fpu_mode_nodes_walker(ir_node *node, void *data)
229 collect_fpu_mode_nodes_env_t *env = (collect_fpu_mode_nodes_env_t*)data;
230 const arch_register_t *reg;
232 if (!mode_is_data(get_irn_mode(node)))
235 reg = arch_get_irn_register(node);
236 if (reg == &ia32_registers[REG_FPCW] && !is_ia32_ChangeCW(node)) {
237 ARR_APP1(ir_node*, env->state_nodes, node);
241 static void rewire_fpu_mode_nodes(ir_graph *irg)
243 collect_fpu_mode_nodes_env_t env;
244 be_ssa_construction_env_t senv;
245 const arch_register_t *reg = &ia32_registers[REG_FPCW];
246 ir_node *initial_value;
248 be_lv_t *lv = be_get_irg_liveness(irg);
251 /* do ssa construction for the fpu modes */
252 env.state_nodes = NEW_ARR_F(ir_node*, 0);
253 irg_walk_graph(irg, collect_fpu_mode_nodes_walker, NULL, &env);
255 /* nothing needs to be done, in fact we must not continue as for endless
256 * loops noone is using the initial_value and it will point to a bad node
259 if (ARR_LEN(env.state_nodes) == 0) {
260 DEL_ARR_F(env.state_nodes);
264 initial_value = be_get_initial_reg_value(irg, reg);
265 be_ssa_construction_init(&senv, irg);
266 be_ssa_construction_add_copies(&senv, env.state_nodes,
267 ARR_LEN(env.state_nodes));
268 be_ssa_construction_fix_users(&senv, initial_value);
271 be_ssa_construction_update_liveness_phis(&senv, lv);
272 be_liveness_update(lv, initial_value);
273 len = ARR_LEN(env.state_nodes);
274 for (i = 0; i < len; ++i) {
275 be_liveness_update(lv, env.state_nodes[i]);
278 be_liveness_invalidate(be_get_irg_liveness(irg));
281 /* set registers for the phis */
282 phis = be_ssa_construction_get_new_phis(&senv);
284 for (i = 0; i < len; ++i) {
285 ir_node *phi = phis[i];
286 arch_set_irn_register(phi, reg);
288 be_ssa_construction_destroy(&senv);
289 DEL_ARR_F(env.state_nodes);
291 be_liveness_invalidate(be_get_irg_liveness(irg));
294 void ia32_setup_fpu_mode(ir_graph *irg)
296 /* do ssa construction for the fpu modes */
297 rewire_fpu_mode_nodes(irg);
299 /* ensure correct fpu mode for operations */
300 be_assure_state(irg, &ia32_registers[REG_FPCW],
301 NULL, create_fpu_mode_spill, create_fpu_mode_reload);