* Transforms a Sub or xSub into Neg--Add iff OUT_REG == SRC2_REG.
* THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
*/
-static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) {
+static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg)
+{
ir_graph *irg;
ir_node *in1, *in2, *noreg, *nomem, *res;
ir_node *noreg_fp, *block;
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
long pn = get_Proj_proj(proj);
- if(pn == pn_ia32_Sub_res) {
+ if (pn == pn_ia32_Sub_res) {
assert(res_proj == NULL);
res_proj = proj;
} else {
DBG_OPT_SUB2NEGADD(irn, res);
}
-static INLINE int need_constraint_copy(ir_node *irn) {
- /* the 3 operand form of IMul needs no constraint copy */
- if(is_ia32_IMul(irn)) {
- ir_node *right = get_irn_n(irn, n_ia32_IMul_right);
- if(is_ia32_Immediate(right))
+static INLINE int need_constraint_copy(ir_node *irn)
+{
+ /* TODO this should be determined from the node specification */
+ switch (get_ia32_irn_opcode(irn)) {
+ case iro_ia32_IMul: {
+ /* the 3 operand form of IMul needs no constraint copy */
+ ir_node *right = get_irn_n(irn, n_ia32_IMul_right);
+ return !is_ia32_Immediate(right);
+ }
+
+ case iro_ia32_Lea:
+ case iro_ia32_Conv_I2I:
+ case iro_ia32_Conv_I2I8Bit:
+ case iro_ia32_CMov:
return 0;
- }
- return ! is_ia32_Lea(irn) &&
- ! is_ia32_Conv_I2I(irn) &&
- ! is_ia32_Conv_I2I8Bit(irn) &&
- ! is_ia32_CMov(irn);
+ default:
+ return 1;
+ }
}
/**
arity = get_irn_arity(node);
uses_out_reg = NULL;
uses_out_reg_pos = -1;
- for(i2 = 0; i2 < arity; ++i2) {
+ for (i2 = 0; i2 < arity; ++i2) {
ir_node *in = get_irn_n(node, i2);
const arch_register_t *in_reg;
- if(!mode_is_data(get_irn_mode(in)))
+ if (!mode_is_data(get_irn_mode(in)))
continue;
in_reg = arch_get_irn_register(arch_env, in);
- if(in_reg != out_reg)
+ if (in_reg != out_reg)
continue;
- if(uses_out_reg != NULL && in != uses_out_reg) {
+ if (uses_out_reg != NULL && in != uses_out_reg) {
panic("invalid register allocation");
}
uses_out_reg = in;
- if(uses_out_reg_pos >= 0)
+ if (uses_out_reg_pos >= 0)
uses_out_reg_pos = -1; /* multiple inputs... */
else
uses_out_reg_pos = i2;
/* no-one else is using the out reg, we can simply copy it
* (the register can't be live since the operation will override it
* anyway) */
- if(uses_out_reg == NULL) {
+ if (uses_out_reg == NULL) {
ir_node *copy = be_new_Copy(cls, irg, block, in_node);
DBG_OPT_2ADDRCPY(copy);
/* set copy as in */
set_irn_n(node, same_pos, copy);
- DBG((dbg, LEVEL_1, "created copy %+F for should be same argument "
- "at input %d of %+F\n", copy, same_pos, node));
+ DBG((dbg, LEVEL_1,
+ "created copy %+F for should be same argument at input %d of %+F\n",
+ copy, same_pos, node));
continue;
}
/* for commutative nodes we can simply swap the left/right */
if (uses_out_reg_pos == n_ia32_binary_right && is_ia32_commutative(node)) {
ia32_swap_left_right(node);
- DBG((dbg, LEVEL_1, "swapped left/right input of %+F to resolve "
- "should be same constraint\n", node));
+ DBG((dbg, LEVEL_1,
+ "swapped left/right input of %+F to resolve should be same constraint\n",
+ node));
continue;
}
sched_add_before(node, perm);
- DBG((dbg, LEVEL_1, "created perm %+F for should be same argument "
- "at input %d of %+F (need permutate with %+F)\n", perm, same_pos,
- node, uses_out_reg));
+ DBG((dbg, LEVEL_1,
+ "created perm %+F for should be same argument at input %d of %+F (need permutate with %+F)\n",
+ perm, same_pos, node, uses_out_reg));
/* use the perm results */
- for(i2 = 0; i2 < arity; ++i2) {
+ for (i2 = 0; i2 < arity; ++i2) {
ir_node *in = get_irn_n(node, i2);
- if(in == in_node) {
+ if (in == in_node) {
set_irn_n(node, i2, perm_proj0);
- } else if(in == uses_out_reg) {
+ } else if (in == uses_out_reg) {
set_irn_n(node, i2, perm_proj1);
}
}
* register -> base or index is broken then.
* Solution: Turn back this address mode into explicit Load + Operation.
*/
-static void fix_am_source(ir_node *irn, void *env) {
+static void fix_am_source(ir_node *irn, void *env)
+{
ia32_code_gen_t *cg = env;
const arch_env_t *arch_env = cg->arch_env;
ir_node *base;
if (! is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS)
return;
/* only need to fix binary operations */
- if (get_ia32_am_arity(irn) != ia32_am_binary)
+ if (get_ia32_am_support(irn) != ia32_am_binary)
return;
base = get_irn_n(irn, n_ia32_base);
ir_node *same_node = get_irn_n(irn, same_pos);
const arch_register_t *same_reg
= arch_get_irn_register(arch_env, same_node);
- const arch_register_class_t *same_cls;
ir_graph *irg = cg->irg;
dbg_info *dbgi = get_irn_dbg_info(irn);
ir_node *block = get_nodes_block(irn);
- ir_mode *proj_mode;
ir_node *load;
ir_node *load_res;
ir_node *mem;
- int pnres;
- int pnmem;
/* should_be same constraint is fullfilled, nothing to do */
- if(out_reg == same_reg)
+ if (out_reg == same_reg)
continue;
/* we only need to do something if the out reg is the same as base
continue;
/* turn back address mode */
- same_cls = arch_register_get_class(same_reg);
- mem = get_irn_n(irn, n_ia32_mem);
+ mem = get_irn_n(irn, n_ia32_mem);
assert(get_irn_mode(mem) == mode_M);
- if (same_cls == &ia32_reg_classes[CLASS_ia32_gp]) {
- load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem);
- pnres = pn_ia32_Load_res;
- pnmem = pn_ia32_Load_M;
- proj_mode = mode_Iu;
- } else if (same_cls == &ia32_reg_classes[CLASS_ia32_xmm]) {
- load = new_rd_ia32_xLoad(dbgi, irg, block, base, index, mem,
- get_ia32_ls_mode(irn));
- pnres = pn_ia32_xLoad_res;
- pnmem = pn_ia32_xLoad_M;
- proj_mode = mode_E;
- } else {
- panic("cannot turn back address mode for this register class");
- }
+ load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem);
/* copy address mode information to load */
set_ia32_op_type(load, ia32_AddrModeS);
ia32_copy_am_attrs(load, irn);
+ if (is_ia32_is_reload(irn))
+ set_ia32_is_reload(load);
/* insert the load into schedule */
sched_add_before(irn, load);
DBG((dbg, LEVEL_3, "irg %+F: build back AM source for node %+F, inserted load %+F\n", cg->irg, irn, load));
- load_res = new_r_Proj(cg->irg, block, load, proj_mode, pnres);
+ load_res = new_r_Proj(cg->irg, block, load, mode_Iu, pn_ia32_Load_res);
arch_set_irn_register(cg->arch_env, load_res, out_reg);
/* set the new input operand */
- set_irn_n(irn, n_ia32_binary_right, load_res);
- if(get_irn_mode(irn) == mode_T) {
+ if (is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_right)))
+ set_irn_n(irn, n_ia32_binary_left, load_res);
+ else
+ set_irn_n(irn, n_ia32_binary_right, load_res);
+ if (get_irn_mode(irn) == mode_T) {
const ir_edge_t *edge, *next;
foreach_out_edge_safe(irn, edge, next) {
ir_node *node = get_edge_src_irn(edge);
exchange(node, irn);
} else if (pn == pn_ia32_mem) {
set_Proj_pred(node, load);
- set_Proj_proj(node, pnmem);
+ set_Proj_proj(node, pn_ia32_Load_M);
+ } else {
+ panic("Unexpected Proj");
}
}
set_irn_mode(irn, mode_Iu);
/**
* Block walker: finishes a block
*/
-static void ia32_finish_irg_walker(ir_node *block, void *env) {
+static void ia32_finish_irg_walker(ir_node *block, void *env)
+{
ia32_code_gen_t *cg = env;
ir_node *irn, *next;
/**
* Block walker: pushes all blocks on a wait queue
*/
-static void ia32_push_on_queue_walker(ir_node *block, void *env) {
+static void ia32_push_on_queue_walker(ir_node *block, void *env)
+{
waitq *wq = env;
waitq_put(wq, block);
}
/**
* Add Copy nodes for not fulfilled should_be_equal constraints
*/
-void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) {
+void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg)
+{
waitq *wq = new_waitq();
/* Push the blocks on the waitq because ia32_finish_irg_walker starts more walks ... */