return (ir_node *)n;
}
+
/**
* Return register requirements for an ia32 node.
* If the node returns a tuple (mode_T) then the proj's
static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
const ia32_register_req_t *irn_req;
long node_pos = pos == -1 ? 0 : pos;
- ir_mode *mode = get_irn_mode(irn);
+ ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
firm_dbg_module_t *mod = firm_dbg_register(DEBUG_MODULE);
- if (mode == mode_T || mode == mode_M) {
- DBG((mod, LEVEL_1, "ignoring mode_T, mode_M node %+F\n", irn));
+ if (is_Block(irn) || mode == mode_M || mode == mode_X) {
+ DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
+ return NULL;
+ }
+
+ if (mode == mode_T && pos < 0) {
+ DBG((mod, LEVEL_1, "ignoring request OUT requirements for node %+F\n", irn));
return NULL;
}
int pos = 0;
const ia32_irn_ops_t *ops = self;
+ if (get_irn_mode(irn) == mode_X) {
+ return;
+ }
+
DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn));
if (is_Proj(irn)) {
const arch_register_t *reg = NULL;
if (is_Proj(irn)) {
+
+ if (get_irn_mode(irn) == mode_X) {
+ return NULL;
+ }
+
pos = ia32_translate_proj_pos(irn);
irn = my_skip_proj(irn);
}
ir_graph *irg;
} ia32_abi_env_t;
-static void *ia32_abi_init(const be_abi_call_t *call, const arch_isa_t *isa, ir_graph *irg)
+static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
{
ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
be_abi_call_flags_t fl = be_abi_call_get_flags(call);
env->flags = fl.bits;
env->irg = irg;
- env->isa = isa;
+ env->isa = aenv->isa;
return env;
}
ir_node *curr_no_reg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_XXX]);
ir_node *store_bp;
- curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, reg_size, be_stack_dir_along);
+ curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, reg_size, be_stack_dir_expand);
store_bp = new_rd_ia32_Store(NULL, env->irg, bl, curr_sp, curr_no_reg, curr_bp, *mem, mode_T);
set_ia32_am_support(store_bp, ia32_am_Dest);
set_ia32_am_flavour(store_bp, ia32_B);
ir_node *curr_no_reg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_XXX]);
if(env->flags.try_omit_fp) {
- curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
+ curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
}
else {
irg_walk_blkwise_graph(cg->irg, ia32_place_consts_set_modes, ia32_transform_node, cg);
be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
- edges_deactivate(cg->irg);
- dead_node_elimination(cg->irg);
- edges_activate(cg->irg);
-
cg->mod = old_mod;
if (cg->opt.doam) {
+ edges_deactivate(cg->irg);
+ //dead_node_elimination(cg->irg);
+ edges_activate(cg->irg);
+
irg_walk_blkwise_graph(cg->irg, NULL, ia32_optimize_am, cg);
be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
}
const arch_register_t *out_reg, *in_reg;
int n_res, i;
ir_node *copy, *in_node, *block;
+ ia32_op_type_t op_tp;
if (! is_ia32_irn(irn))
return;
- /* nodes with destination address mode don't produce values */
- if (get_ia32_op_type(irn) == ia32_AddrModeD)
+ /* AM Dest nodes don't produce any values */
+ op_tp = get_ia32_op_type(irn);
+ if (op_tp == ia32_AddrModeD)
return;
reqs = get_ia32_out_req_all(irn);
block = get_nodes_block(irn);
/* check all OUT requirements, if there is a should_be_same */
- for (i = 0; i < n_res; i++) {
- if (arch_register_req_is(&(reqs[i]->req), should_be_same)) {
- /* get in and out register */
- out_reg = get_ia32_out_reg(irn, i);
- in_node = get_irn_n(irn, reqs[i]->same_pos);
- in_reg = arch_get_irn_register(cg->arch_env, in_node);
-
- /* check if in and out register are equal */
- if (arch_register_get_index(out_reg) != arch_register_get_index(in_reg)) {
- DBG((cg->mod, LEVEL_1, "inserting copy for %+F in_pos %d\n", irn, reqs[i]->same_pos));
-
- /* create copy from in register */
- copy = be_new_Copy(arch_register_get_class(in_reg), cg->irg, block, in_node);
-
- /* destination is the out register */
- arch_set_irn_register(cg->arch_env, copy, out_reg);
-
- /* insert copy before the node into the schedule */
- sched_add_before(irn, copy);
-
- /* set copy as in */
- set_irn_n(irn, reqs[i]->same_pos, copy);
+ if (op_tp == ia32_Normal) {
+ for (i = 0; i < n_res; i++) {
+ if (arch_register_req_is(&(reqs[i]->req), should_be_same)) {
+ /* get in and out register */
+ out_reg = get_ia32_out_reg(irn, i);
+ in_node = get_irn_n(irn, reqs[i]->same_pos);
+ in_reg = arch_get_irn_register(cg->arch_env, in_node);
+
+ /* don't copy ignore nodes */
+ if (arch_irn_is(cg->arch_env, in_node, ignore))
+ continue;
+
+ /* check if in and out register are equal */
+ if (arch_register_get_index(out_reg) != arch_register_get_index(in_reg)) {
+ DBG((cg->mod, LEVEL_1, "inserting copy for %+F in_pos %d\n", irn, reqs[i]->same_pos));
+
+ /* create copy from in register */
+ copy = be_new_Copy(arch_register_get_class(in_reg), cg->irg, block, in_node);
+
+ /* destination is the out register */
+ arch_set_irn_register(cg->arch_env, copy, out_reg);
+
+ /* insert copy before the node into the schedule */
+ sched_add_before(irn, copy);
+
+ /* set copy as in */
+ set_irn_n(irn, reqs[i]->same_pos, copy);
+ }
}
}
}
/* check if there is a sub which need to be transformed */
ia32_transform_sub_to_neg_add(irn, cg);
+
+ /* transform a LEA into an Add if possible */
+ ia32_transform_lea_to_add(irn, cg);
+
+ /* check for peephole optimization */
+ ia32_peephole_optimization(irn, cg);
}
/**
cg->opt.doam = 1;
cg->opt.placecnst = 1;
cg->opt.immops = 1;
+ cg->opt.extbb = 1;
#ifndef NDEBUG
if (isa->name_obst_size) {
ia32_register_init(isa);
ia32_create_opcodes();
+ ia32_register_copy_attr_func();
isa->regs_16bit = pmap_create();
isa->regs_8bit = pmap_create();
/* set stack parameters */
for (i = stack_idx; i < n; i++) {
- be_abi_call_param_stack(abi, i);
+ be_abi_call_param_stack(abi, i, 1, 0, 0);
}