2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements functions to finalize the irg for emit.
23 * @author Christian Wuerdig
37 #include "../bearch.h"
38 #include "../besched.h"
39 #include "../benode.h"
41 #include "bearch_ia32_t.h"
42 #include "ia32_finish.h"
43 #include "ia32_new_nodes.h"
44 #include "ia32_common_transform.h"
45 #include "ia32_transform.h"
46 #include "ia32_dbg_stat.h"
47 #include "ia32_optimize.h"
48 #include "gen_ia32_regalloc_if.h"
50 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
52 COMPILETIME_ASSERT((int)pn_ia32_Sub_res == pn_ia32_Sbb_res, pn_ia32_Sub_res);
53 COMPILETIME_ASSERT((int)pn_ia32_Sub_flags == pn_ia32_Sbb_flags, pn_ia32_Sub_flags);
56 * Transforms a Sub or xSub into Neg--Add iff OUT_REG != SRC1_REG && OUT_REG == SRC2_REG.
57 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
59 static void ia32_transform_sub_to_neg_add(ir_node *irn)
62 ir_node *in1, *in2, *noreg, *nomem, *res;
63 ir_node *noreg_fp, *block;
65 const arch_register_t *in1_reg, *in2_reg, *out_reg;
67 /* fix_am will solve this for AddressMode variants */
68 if (get_ia32_op_type(irn) != ia32_Normal)
71 irg = get_irn_irg(irn);
72 noreg = ia32_new_NoReg_gp(irg);
73 noreg_fp = ia32_new_NoReg_xmm(irg);
74 nomem = get_irg_no_mem(irg);
75 in1 = get_irn_n(irn, n_ia32_binary_left);
76 in2 = get_irn_n(irn, n_ia32_binary_right);
77 in1_reg = arch_get_irn_register(in1);
78 in2_reg = arch_get_irn_register(in2);
79 out_reg = arch_irn_get_register(irn, 0);
81 if (out_reg == in1_reg)
84 /* in case of sub and OUT == SRC2 we can transform the sequence into neg src2 -- add */
85 if (out_reg != in2_reg)
88 block = get_nodes_block(irn);
89 dbgi = get_irn_dbg_info(irn);
91 /* generate the neg src2 */
92 if (is_ia32_xSub(irn)) {
95 ir_mode *op_mode = get_ia32_ls_mode(irn);
97 assert(get_irn_mode(irn) != mode_T);
99 res = new_bd_ia32_xXor(dbgi, block, noreg, noreg, nomem, in2, noreg_fp);
100 size = get_mode_size_bits(op_mode);
101 entity = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
102 set_ia32_am_sc(res, entity);
103 set_ia32_op_type(res, ia32_AddrModeS);
104 set_ia32_ls_mode(res, op_mode);
106 arch_set_irn_register(res, in2_reg);
108 /* add to schedule */
109 sched_add_before(irn, res);
111 /* generate the add */
112 res = new_bd_ia32_xAdd(dbgi, block, noreg, noreg, nomem, res, in1);
113 set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
115 /* exchange the add and the sub */
116 edges_reroute(irn, res);
118 /* add to schedule */
119 sched_add_before(irn, res);
121 ir_node *res_proj = NULL;
122 ir_node *flags_proj = NULL;
124 const ir_edge_t *edge;
126 if (get_irn_mode(irn) == mode_T) {
127 /* collect the Proj uses */
128 foreach_out_edge(irn, edge) {
129 ir_node *proj = get_edge_src_irn(edge);
130 long pn = get_Proj_proj(proj);
131 if (pn == pn_ia32_Sub_res) {
132 assert(res_proj == NULL);
135 assert(pn == pn_ia32_Sub_flags);
136 assert(flags_proj == NULL);
142 if (is_ia32_Sbb(irn)) {
143 /* Feed borrow (in CF) as carry (via CMC) into NOT+ADC. */
144 carry = get_irn_n(irn, n_ia32_Sbb_eflags);
145 carry = new_bd_ia32_Cmc(dbgi, block, carry);
147 } else if (flags_proj != 0) {
149 * ARG, the above technique does NOT set the flags right.
150 * So, we must produce the following code:
152 * t2 = a + ~b + Carry
155 * a + -b = a + (~b + 1) would set the carry flag wrong IFF both a and b are zero.
162 carry = new_bd_ia32_Stc(dbgi, block);
165 nnot = new_bd_ia32_Not(dbgi, block, in2);
166 arch_set_irn_register(nnot, in2_reg);
167 sched_add_before(irn, nnot);
169 arch_set_irn_register(carry, &ia32_registers[REG_EFLAGS]);
170 sched_add_before(irn, carry);
172 adc = new_bd_ia32_Adc(dbgi, block, noreg, noreg, nomem, nnot, in1, carry);
173 arch_set_irn_register(adc, out_reg);
174 sched_add_before(irn, adc);
176 set_irn_mode(adc, mode_T);
177 adc_flags = new_r_Proj(adc, mode_Iu, pn_ia32_Adc_flags);
178 arch_set_irn_register(adc_flags, &ia32_registers[REG_EFLAGS]);
180 if (flags_proj != NULL) {
181 cmc = new_bd_ia32_Cmc(dbgi, block, adc_flags);
182 arch_set_irn_register(cmc, &ia32_registers[REG_EFLAGS]);
183 sched_add_before(irn, cmc);
184 exchange(flags_proj, cmc);
187 if (res_proj != NULL) {
188 set_Proj_pred(res_proj, adc);
189 set_Proj_proj(res_proj, pn_ia32_Adc_res);
194 res = new_bd_ia32_Neg(dbgi, block, in2);
195 arch_set_irn_register(res, in2_reg);
197 /* add to schedule */
198 sched_add_before(irn, res);
200 /* generate the add */
201 res = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, res, in1);
202 arch_set_irn_register(res, out_reg);
203 set_ia32_commutative(res);
205 /* exchange the add and the sub */
206 edges_reroute(irn, res);
208 /* add to schedule */
209 sched_add_before(irn, res);
213 set_irn_mode(res, get_irn_mode(irn));
215 SET_IA32_ORIG_NODE(res, irn);
217 /* remove the old sub */
221 DBG_OPT_SUB2NEGADD(irn, res);
224 static inline int need_constraint_copy(ir_node *irn)
226 /* TODO this should be determined from the node specification */
227 switch (get_ia32_irn_opcode(irn)) {
228 case iro_ia32_IMul: {
229 /* the 3 operand form of IMul needs no constraint copy */
230 ir_node *right = get_irn_n(irn, n_ia32_IMul_right);
231 return !is_ia32_Immediate(right);
235 case iro_ia32_Conv_I2I:
236 case iro_ia32_Conv_I2I8Bit:
237 case iro_ia32_CMovcc:
246 * Returns the index of the "same" register.
247 * On the x86, we should have only one.
249 static int get_first_same(const arch_register_req_t* req)
251 const unsigned other = req->other_same;
254 for (i = 0; i < 32; ++i) {
255 if (other & (1U << i)) return i;
257 panic("same position not found");
261 * Insert copies for all ia32 nodes where the should_be_same requirement
263 * Transform Sub into Neg -- Add if IN2 == OUT
265 static void assure_should_be_same_requirements(ir_node *node)
267 const arch_register_t *out_reg, *in_reg;
269 ir_node *in_node, *block;
271 n_res = arch_irn_get_n_outs(node);
272 block = get_nodes_block(node);
274 /* check all OUT requirements, if there is a should_be_same */
275 for (i = 0; i < n_res; i++) {
278 ir_node *uses_out_reg;
279 const arch_register_req_t *req = arch_get_out_register_req(node, i);
280 const arch_register_class_t *cls;
281 int uses_out_reg_pos;
283 if (!arch_register_req_is(req, should_be_same))
286 same_pos = get_first_same(req);
288 /* get in and out register */
289 out_reg = arch_irn_get_register(node, i);
290 in_node = get_irn_n(node, same_pos);
291 in_reg = arch_get_irn_register(in_node);
293 /* requirement already fulfilled? */
294 if (in_reg == out_reg)
296 cls = arch_register_get_class(in_reg);
297 assert(cls == arch_register_get_class(out_reg));
299 /* check if any other input operands uses the out register */
300 arity = get_irn_arity(node);
302 uses_out_reg_pos = -1;
303 for (i2 = 0; i2 < arity; ++i2) {
304 ir_node *in = get_irn_n(node, i2);
305 const arch_register_t *other_in_reg;
307 if (!mode_is_data(get_irn_mode(in)))
310 other_in_reg = arch_get_irn_register(in);
312 if (other_in_reg != out_reg)
315 if (uses_out_reg != NULL && in != uses_out_reg) {
316 panic("invalid register allocation");
319 if (uses_out_reg_pos >= 0)
320 uses_out_reg_pos = -1; /* multiple inputs... */
322 uses_out_reg_pos = i2;
325 /* no-one else is using the out reg, we can simply copy it
326 * (the register can't be live since the operation will override it
328 if (uses_out_reg == NULL) {
329 ir_node *copy = be_new_Copy(cls, block, in_node);
330 DBG_OPT_2ADDRCPY(copy);
332 /* destination is the out register */
333 arch_set_irn_register(copy, out_reg);
335 /* insert copy before the node into the schedule */
336 sched_add_before(node, copy);
339 set_irn_n(node, same_pos, copy);
342 "created copy %+F for should be same argument at input %d of %+F\n",
343 copy, same_pos, node));
347 /* for commutative nodes we can simply swap the left/right */
348 if (uses_out_reg_pos == n_ia32_binary_right && is_ia32_commutative(node)) {
349 ia32_swap_left_right(node);
351 "swapped left/right input of %+F to resolve should be same constraint\n",
356 panic("Unresolved should_be_same constraint");
362 * We have a source address mode node with base or index register equal to
363 * result register and unfulfilled should_be_same requirement. The constraint
364 * handler will insert a copy from the remaining input operand to the result
365 * register -> base or index is broken then.
366 * Solution: Turn back this address mode into explicit Load + Operation.
368 static void fix_am_source(ir_node *irn)
372 /* check only ia32 nodes with source address mode */
373 if (!is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS)
375 /* only need to fix binary operations */
376 if (get_ia32_am_support(irn) != ia32_am_binary)
379 n_res = arch_irn_get_n_outs(irn);
381 for (i = 0; i < n_res; i++) {
382 const arch_register_req_t *req = arch_get_out_register_req(irn, i);
383 const arch_register_t *out_reg;
386 const arch_register_t *same_reg;
389 if (!arch_register_req_is(req, should_be_same))
392 /* get in and out register */
393 out_reg = arch_irn_get_register(irn, i);
394 same_pos = get_first_same(req);
395 same_node = get_irn_n(irn, same_pos);
396 same_reg = arch_get_irn_register(same_node);
398 /* should_be same constraint is fullfilled, nothing to do */
399 if (out_reg == same_reg)
402 /* we only need to do something if the out reg is the same as base
404 if (out_reg != arch_get_irn_register(get_irn_n(irn, n_ia32_base)) &&
405 out_reg != arch_get_irn_register(get_irn_n(irn, n_ia32_index)))
408 load_res = ia32_turn_back_am(irn);
409 arch_set_irn_register(load_res, out_reg);
412 "irg %+F: build back AM source for node %+F, inserted load %+F\n",
413 get_irn_irg(irn), irn, get_Proj_pred(load_res)));
419 * Block walker: finishes a block
421 static void ia32_finish_irg_walker(ir_node *block, void *env)
426 /* first: turn back AM source if necessary */
427 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
428 next = sched_next(irn);
432 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
433 next = sched_next(irn);
435 /* check if there is a sub which need to be transformed */
436 if (is_ia32_Sub(irn) || is_ia32_Sbb(irn) || is_ia32_xSub(irn)) {
437 ia32_transform_sub_to_neg_add(irn);
441 /* second: insert copies and finish irg */
442 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
443 next = sched_next(irn);
444 if (is_ia32_irn(irn)) {
445 /* some nodes are just a bit less efficient, but need no fixing if the
446 * should be same requirement is not fulfilled */
447 if (need_constraint_copy(irn))
448 assure_should_be_same_requirements(irn);
454 * Block walker: pushes all blocks on a wait queue
456 static void ia32_push_on_queue_walker(ir_node *block, void *env)
458 waitq *wq = (waitq*)env;
459 waitq_put(wq, block);
464 * Add Copy nodes for not fulfilled should_be_equal constraints
466 void ia32_finish_irg(ir_graph *irg)
468 waitq *wq = new_waitq();
470 /* Push the blocks on the waitq because ia32_finish_irg_walker starts more walks ... */
471 irg_block_walk_graph(irg, NULL, ia32_push_on_queue_walker, wq);
473 while (! waitq_empty(wq)) {
474 ir_node *block = (ir_node*)waitq_get(wq);
475 ia32_finish_irg_walker(block, NULL);
480 void ia32_init_finish(void)
482 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.finish");