/* check if there is a sub which need to be transformed */
ia32_transform_sub_to_neg_add(irn, cg);
+
+ /* transform a LEA into an Add if possible */
+ ia32_transform_lea_to_add(irn, cg);
}
/**
cg->opt.doam = 1;
cg->opt.placecnst = 1;
cg->opt.immops = 1;
+ cg->opt.extbb = 1;
#ifndef NDEBUG
if (isa->name_obst_size) {
unsigned doam : 1; /**< do address mode optimizations */
unsigned placecnst : 1; /**< place constants in the blocks where they are used */
unsigned immops : 1; /**< create operations with immediates */
+ unsigned extbb : 1; /**< do extended basic block scheduling */
} ia32_optimize_t;
typedef struct _ia32_code_gen_t {
#define SNPRINTF_BUF_LEN 128
+/* global arch_env for lc_printf functions */
static const arch_env_t *arch_env = NULL;
+/* indicates whether blocks are scheduled or not
+ (this variable is set automatically) */
+static int have_block_sched = 0;
+
/*************************************************************
* _ _ __ _ _
* (_) | | / _| | | | |
return buf;
}
-static int have_block_sched = 0;
/** Return the next block in Block schedule */
static ir_node *next_blk_sched(const ir_node *block) {
return have_block_sched ? get_irn_link(block) : NULL;
TestJmp_emitter(irn, env);
}
-/**
- * Emits code for conditional test and jump with immediate.
- */
-static void emit_ia32_TestJmp_i(const ir_node *irn, ia32_emit_env_t *env) {
- TestJmp_emitter(irn, env);
-}
+
/*********************************************************
* _ _ _
snprintf(cmnt_buf, SNPRINTF_BUF_LEN, "/* default case */");
IA32_DO_EMIT;
}
- snprintf(cmd_buf, SNPRINTF_BUF_LEN, ".long %s", get_cfop_target(tbl.branches[i].target, buf), last_value);
+ snprintf(cmd_buf, SNPRINTF_BUF_LEN, ".long %s", get_cfop_target(tbl.branches[i].target, buf));
snprintf(cmnt_buf, SNPRINTF_BUF_LEN, "/* case %d */", last_value);
IA32_DO_EMIT;
}
ia32_emit_func_prolog(F, irg);
irg_block_walk_graph(irg, ia32_gen_labels, NULL, &emit_env);
-#if 1
- have_block_sched = 0;
- irg_walk_blkwise_graph(irg, NULL, ia32_gen_block, &emit_env);
-#else
- compute_extbb(irg);
+ if (cg->opt.extbb) {
+ /* schedule extended basic blocks */
- list.start = NULL;
- list.end = NULL;
- irg_extblock_walk_graph(irg, NULL, create_block_list, &list);
+ compute_extbb(irg);
- have_block_sched = 1;
- for (block = list.start; block; block = get_irn_link(block))
- ia32_gen_block(block, &emit_env);
-#endif
+ list.start = NULL;
+ list.end = NULL;
+ irg_extblock_walk_graph(irg, NULL, create_block_list, &list);
+
+ have_block_sched = 1;
+ for (block = list.start; block; block = get_irn_link(block))
+ ia32_gen_block(block, &emit_env);
+ }
+ else {
+ /* "normal" block schedule */
+
+ have_block_sched = 0;
+ irg_walk_blkwise_graph(irg, NULL, ia32_gen_block, &emit_env);
+ }
ia32_emit_func_epilog(F, irg);
}
return 0;
if (nr == pn_DivMod_res_mod)
return 1;
-
- switch(get_ia32_flavour(pred)) {
- if (nr == pn_DivMod_res_div)
- return 0;
- if (nr == pn_DivMod_res_mod)
- return 1;
- assert(0 && "unsupported DivMod");
- }
+ assert(0 && "unsupported DivMod");
}
else if (is_ia32_fDiv(pred)) {
if (nr == pn_Quot_res)
return attr->cnst;
}
+/**
+ * Sets the string representation of the internal const.
+ */
+void set_ia32_cnst(ir_node *node, char *cnst) {
+ ia32_attr_t *attr = get_ia32_attr(node);
+ attr->cnst = cnst;
+}
+
/**
* Sets the uses_frame flag.
*/
*/
char *get_ia32_cnst(const ir_node *node);
+/**
+ * Sets the string representation of the internal const.
+ */
+void set_ia32_cnst(ir_node *node, char *cnst);
+
/**
* Sets the uses_frame flag.
*/
ia32_am_B = ia32_B,
ia32_am_I = ia32_I,
ia32_am_IS = ia32_I | ia32_S,
+ ia32_am_BI = ia32_B | ia32_I,
ia32_am_OB = ia32_O | ia32_B,
ia32_am_OI = ia32_O | ia32_I,
ia32_am_OIS = ia32_O | ia32_I | ia32_S,
* Prints the old node name on cg obst and returns a pointer to it.
*/
const char *get_old_node_name(ia32_transform_env_t *env) {
- static int name_cnt = 0;
ia32_isa_t *isa = (ia32_isa_t *)env->cg->arch_env->isa;
lc_eoprintf(firm_get_arg_env(), isa->name_obst, "%+F", env->irn);
obstack_1grow(isa->name_obst, 0);
isa->name_obst_size += obstack_object_size(isa->name_obst);
- name_cnt++;
- if (name_cnt % 1024 == 0) {
- printf("name obst size reached %d bytes after %d nodes\n", isa->name_obst_size, name_cnt);
- }
return obstack_finish(isa->name_obst);
}
#endif /* NDEBUG */
}
}
+/**
+ * Transforms a LEA into an Add if possible
+ * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
+ */
+void ia32_transform_lea_to_add(ir_node *irn, ia32_code_gen_t *cg) {
+ ia32_am_flavour_t am_flav;
+ int imm = 0;
+ ir_node *res = NULL;
+ ir_node *nomem, *noreg, *base, *index, *op1, *op2;
+ char *offs;
+ ia32_transform_env_t tenv;
+ arch_register_t *out_reg, *base_reg, *index_reg;
+
+ /* must be a LEA */
+ if (! is_ia32_Lea(irn))
+ return;
+
+ am_flav = get_ia32_am_flavour(irn);
+
+ /* only some LEAs can be transformed to an Add */
+ if (am_flav != ia32_am_B && am_flav != ia32_am_OB && am_flav != ia32_am_OI && am_flav != ia32_am_BI)
+ return;
+
+ noreg = ia32_new_NoReg_gp(cg);
+ nomem = new_rd_NoMem(cg->irg);
+ op1 = noreg;
+ op2 = noreg;
+ base = get_irn_n(irn, 0);
+ index = get_irn_n(irn,1);
+
+ offs = get_ia32_am_offs(irn);
+
+ /* offset has a explicit sign -> we need to skip + */
+ if (offs && offs[0] == '+')
+ offs++;
+
+ out_reg = arch_get_irn_register(cg->arch_env, irn);
+ base_reg = arch_get_irn_register(cg->arch_env, base);
+ index_reg = arch_get_irn_register(cg->arch_env, index);
+
+ tenv.block = get_nodes_block(irn);
+ tenv.dbg = get_irn_dbg_info(irn);
+ tenv.irg = cg->irg;
+ tenv.irn = irn;
+ tenv.mod = cg->mod;
+ tenv.mode = get_irn_mode(irn);
+ tenv.cg = cg;
+
+ switch(get_ia32_am_flavour(irn)) {
+ case ia32_am_B:
+ /* out register must be same as base register */
+ if (! REGS_ARE_EQUAL(out_reg, base_reg))
+ return;
+
+ op1 = base;
+ break;
+ case ia32_am_OB:
+ /* out register must be same as base register */
+ if (! REGS_ARE_EQUAL(out_reg, base_reg))
+ return;
+
+ op1 = base;
+ imm = 1;
+ break;
+ case ia32_am_OI:
+ /* out register must be same as index register */
+ if (! REGS_ARE_EQUAL(out_reg, index_reg))
+ return;
+
+ op1 = index;
+ imm = 1;
+ break;
+ case ia32_am_BI:
+ /* out register must be same as one in register */
+ if (REGS_ARE_EQUAL(out_reg, base_reg)) {
+ op1 = base;
+ op2 = index;
+ }
+ else if (REGS_ARE_EQUAL(out_reg, index_reg)) {
+ op1 = index;
+ op2 = base;
+ }
+ else {
+ /* in registers a different from out -> no Add possible */
+ return;
+ }
+ default:
+ break;
+ }
+
+ res = new_rd_ia32_Add(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, op1, op2, nomem, mode_T);
+ arch_set_irn_register(cg->arch_env, res, out_reg);
+ set_ia32_op_type(res, ia32_Normal);
+
+ if (imm)
+ set_ia32_cnst(res, offs);
+
+ SET_IA32_ORIG_NODE(res, get_old_node_name(&tenv));
+
+ /* add Add to schedule */
+ sched_add_before(irn, res);
+
+ res = new_rd_Proj(tenv.dbg, tenv.irg, tenv.block, res, tenv.mode, 0);
+
+ /* add result Proj to schedule */
+ sched_add_before(irn, res);
+
+ /* remove the old LEA */
+ sched_remove(irn);
+
+ /* exchange the Add and the LEA */
+ exchange(irn, res);
+}
+
/**
* Transforms the given firm node (and maybe some other related nodes)
* into one or more assembler nodes.
*/
void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg);
+/**
+ * Transforms a LEA into an Add if possible
+ * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
+ */
+void ia32_transform_lea_to_add(ir_node *irn, ia32_code_gen_t *cg);
+
#endif /* _IA32_TRANSFORM_H_ */