const ia32_register_req_t *irn_req;
long node_pos = pos == -1 ? 0 : pos;
ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
- firm_dbg_module_t *mod = firm_dbg_register(DEBUG_MODULE);
+ FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
if (is_Block(irn) || mode == mode_M || mode == mode_X) {
DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
-
if (is_Proj(irn)) {
if (pos == -1) {
node_pos = ia32_translate_proj_pos(irn);
char buf[64];
const ia32_irn_ops_t *ops = self;
- if (is_ia32_use_frame(irn) && bias != 0) {
+ if (get_ia32_frame_ent(irn)) {
ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
snprintf(buf, sizeof(buf), "%d", bias);
- add_ia32_am_offs(irn, buf);
- am_flav |= ia32_O;
- set_ia32_am_flavour(irn, am_flav);
+
+ if (get_ia32_op_type(irn) == ia32_Normal) {
+ set_ia32_cnst(irn, buf);
+ }
+ else {
+ add_ia32_am_offs(irn, buf);
+ am_flav |= ia32_O;
+ set_ia32_am_flavour(irn, am_flav);
+ }
}
}
*/
static void ia32_prepare_graph(void *self) {
ia32_code_gen_t *cg = self;
- firm_dbg_module_t *old_mod = cg->mod;
+ DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
- cg->mod = firm_dbg_register("firm.be.ia32.transform");
+ FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
irg_walk_blkwise_graph(cg->irg, ia32_place_consts_set_modes, ia32_transform_node, cg);
be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
- cg->mod = old_mod;
+ DEBUG_ONLY(cg->mod = old_mod;)
if (cg->opt.doam) {
edges_deactivate(cg->irg);
static void ia32_finish_irg_walker(ir_node *irn, void *env) {
ia32_code_gen_t *cg = env;
const ia32_register_req_t **reqs;
- const arch_register_t *out_reg, *in_reg;
+ const arch_register_t *out_reg, *in_reg, *in2_reg;
int n_res, i;
- ir_node *copy, *in_node, *block;
+ ir_node *copy, *in_node, *block, *in2_node;
ia32_op_type_t op_tp;
- if (! is_ia32_irn(irn))
- return;
-
- /* AM Dest nodes don't produce any values */
- op_tp = get_ia32_op_type(irn);
- if (op_tp == ia32_AddrModeD)
- return;
-
- reqs = get_ia32_out_req_all(irn);
- n_res = get_ia32_n_res(irn);
- block = get_nodes_block(irn);
-
- /* check all OUT requirements, if there is a should_be_same */
- if (op_tp == ia32_Normal) {
- for (i = 0; i < n_res; i++) {
- if (arch_register_req_is(&(reqs[i]->req), should_be_same)) {
- /* get in and out register */
- out_reg = get_ia32_out_reg(irn, i);
- in_node = get_irn_n(irn, reqs[i]->same_pos);
- in_reg = arch_get_irn_register(cg->arch_env, in_node);
-
- /* don't copy ignore nodes */
- if (arch_irn_is(cg->arch_env, in_node, ignore))
- continue;
-
- /* check if in and out register are equal */
- if (arch_register_get_index(out_reg) != arch_register_get_index(in_reg)) {
- DBG((cg->mod, LEVEL_1, "inserting copy for %+F in_pos %d\n", irn, reqs[i]->same_pos));
-
- /* create copy from in register */
- copy = be_new_Copy(arch_register_get_class(in_reg), cg->irg, block, in_node);
-
- /* destination is the out register */
- arch_set_irn_register(cg->arch_env, copy, out_reg);
-
- /* insert copy before the node into the schedule */
- sched_add_before(irn, copy);
-
- /* set copy as in */
- set_irn_n(irn, reqs[i]->same_pos, copy);
+ if (is_ia32_irn(irn)) {
+ /* AM Dest nodes don't produce any values */
+ op_tp = get_ia32_op_type(irn);
+ if (op_tp == ia32_AddrModeD)
+ return;
+
+ reqs = get_ia32_out_req_all(irn);
+ n_res = get_ia32_n_res(irn);
+ block = get_nodes_block(irn);
+
+ /* check all OUT requirements, if there is a should_be_same */
+ if (op_tp == ia32_Normal) {
+ for (i = 0; i < n_res; i++) {
+ if (arch_register_req_is(&(reqs[i]->req), should_be_same)) {
+ /* get in and out register */
+ out_reg = get_ia32_out_reg(irn, i);
+ in_node = get_irn_n(irn, reqs[i]->same_pos);
+ in_reg = arch_get_irn_register(cg->arch_env, in_node);
+ in2_node = get_irn_n(irn, reqs[i]->same_pos ^ 1);
+ in2_reg = arch_get_irn_register(cg->arch_env, in2_node);
+
+ /* don't copy ignore nodes */
+ if (arch_irn_is(cg->arch_env, in_node, ignore) && is_Proj(in_node))
+ continue;
+
+ /* check if in and out register are equal */
+ if (! REGS_ARE_EQUAL(out_reg, in_reg)) {
+ /* in case of a commutative op: just exchange the in's */
+ if (is_ia32_commutative(irn) && REGS_ARE_EQUAL(out_reg, in2_reg)) {
+ set_irn_n(irn, reqs[i]->same_pos, in2_node);
+ set_irn_n(irn, reqs[i]->same_pos ^ 1, in_node);
+ }
+ else {
+ DBG((cg->mod, LEVEL_1, "inserting copy for %+F in_pos %d\n", irn, reqs[i]->same_pos));
+ /* create copy from in register */
+ copy = be_new_Copy(arch_register_get_class(in_reg), cg->irg, block, in_node);
+
+ /* destination is the out register */
+ arch_set_irn_register(cg->arch_env, copy, out_reg);
+
+ /* insert copy before the node into the schedule */
+ sched_add_before(irn, copy);
+
+ /* set copy as in */
+ set_irn_n(irn, reqs[i]->same_pos, copy);
+ }
+ }
}
}
}
- }
- /* check if there is a sub which need to be transformed */
- ia32_transform_sub_to_neg_add(irn, cg);
+ /* If we have a CondJmp with immediate, we need to */
+ /* check if it's the right operand, otherwise we have */
+ /* to change it, as CMP doesn't support immediate as */
+ /* left operands. */
+ if (is_ia32_CondJmp(irn) && (is_ia32_ImmConst(irn) || is_ia32_ImmSymConst(irn)) && op_tp == ia32_AddrModeS) {
+ long pnc = get_negated_pnc(get_ia32_pncode(irn), get_ia32_res_mode(irn));
+ set_ia32_op_type(irn, ia32_AddrModeD);
+ set_ia32_pncode(irn, pnc);
+ }
+
+ /* check if there is a sub which need to be transformed */
+ ia32_transform_sub_to_neg_add(irn, cg);
- /* transform a LEA into an Add if possible */
- ia32_transform_lea_to_add(irn, cg);
+ /* transform a LEA into an Add if possible */
+ ia32_transform_lea_to_add(irn, cg);
+ }
/* check for peephole optimization */
ia32_peephole_optimization(irn, cg);
}
if (mode_is_float(mode)) {
- new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ else
+ new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
}
else {
new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
reg = arch_get_irn_register(env->cg->arch_env, irn);
arch_set_irn_register(env->cg->arch_env, new_op, reg);
- exchange(irn, proj);
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, new_op));
+ exchange(irn, proj);
}
/**
}
if (mode_is_float(mode)) {
- new_op = new_rd_ia32_fStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem, mode_T);
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem, mode_T);
+ else
+ new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem, mode_T);
}
else if (get_mode_size_bits(mode) == 8) {
new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem, mode_T);
set_ia32_am_support(new_op, ia32_am_Dest);
set_ia32_op_type(new_op, ia32_AddrModeD);
set_ia32_am_flavour(new_op, ia32_B);
- set_ia32_ls_mode(new_op, get_irn_mode(val));
+ set_ia32_ls_mode(new_op, mode);
set_ia32_frame_ent(new_op, ent);
set_ia32_use_frame(new_op);
- proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, 0);
+ proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, 0);
if (sched_point) {
sched_add_after(sched_point, new_op);
sched_remove(irn);
}
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, new_op));
+
exchange(irn, proj);
+}
+/**
+ * Fix the mode of Spill/Reload
+ */
+static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
+{
+ if (mode_is_float(mode)) {
+ if (USE_SSE2(cg))
+ mode = mode_D;
+ else
+ mode = mode_E;
+ }
+ else
+ mode = mode_Is;
+ return mode;
}
/**
- * Calls the transform functions for StackParam, Spill and Reload.
+ * Block-Walker: Calls the transform functions Spill and Reload.
*/
-static void ia32_after_ra_walker(ir_node *node, void *env) {
+static void ia32_after_ra_walker(ir_node *block, void *env) {
+ ir_node *node, *prev;
ia32_code_gen_t *cg = env;
ia32_transform_env_t tenv;
- if (is_Block(node))
- return;
-
- tenv.block = get_nodes_block(node);
- tenv.dbg = get_irn_dbg_info(node);
+ tenv.block = block;
tenv.irg = current_ir_graph;
- tenv.irn = node;
- tenv.mod = cg->mod;
- tenv.mode = get_irn_mode(node);
tenv.cg = cg;
-
- /* be_is_StackParam(node) || */
- if (be_is_Reload(node)) {
- transform_to_Load(&tenv);
- }
- else if (be_is_Spill(node)) {
- transform_to_Store(&tenv);
+ DEBUG_ONLY(tenv.mod = cg->mod;)
+
+ /* beware: the schedule is changed here */
+ for (node = sched_last(block); !sched_is_begin(node); node = prev) {
+ prev = sched_prev(node);
+ if (be_is_Reload(node)) {
+ /* we always reload the whole register */
+ tenv.dbg = get_irn_dbg_info(node);
+ tenv.irn = node;
+ tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
+ transform_to_Load(&tenv);
+ }
+ else if (be_is_Spill(node)) {
+ /* we always spill the whole register */
+ tenv.dbg = get_irn_dbg_info(node);
+ tenv.irn = node;
+ tenv.mode = fix_spill_mode(cg, get_irn_mode(be_get_Spill_context(node)));
+ transform_to_Store(&tenv);
+ }
}
}
/**
- * We transform StackParam, Spill and Reload here. This needs to be done before
+ * We transform Spill and Reload here. This needs to be done before
* stack biasing otherwise we would miss the corrected offset for these nodes.
+ *
+ * If x87 instruction should be emitted, run the x87 simulator and patch
+ * the virtual instructions. This must obviously be done after register allocation.
*/
static void ia32_after_ra(void *self) {
ia32_code_gen_t *cg = self;
- irg_walk_blkwise_graph(cg->irg, NULL, ia32_after_ra_walker, self);
+ irg_block_walk_graph(cg->irg, NULL, ia32_after_ra_walker, self);
/* if we do x87 code generation, rewrite all the virtual instructions and registers */
- if (USE_x87(cg)) {
+ if (cg->used_x87) {
x87_simulate_graph(cg->arch_env, cg->irg, cg->blk_sched);
- be_dump(cg->irg, "-x87", dump_ir_extblock_graph_sched);
}
}
cg->impl = &ia32_code_gen_if;
cg->irg = birg->irg;
cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
- cg->mod = firm_dbg_register("firm.be.ia32.cg");
cg->out = F;
cg->arch_env = birg->main_env->arch_env;
cg->types = pmap_create();
cg->birg = birg;
cg->blk_sched = NULL;
cg->fp_kind = isa->fp_kind;
+ cg->used_x87 = 0;
+
+ FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg");
/* set optimizations */
cg->opt.incdec = 0;
* Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
*/
static const list_sched_selector_t *ia32_get_list_sched_selector(const void *self) {
- memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t));
+// memcpy(&ia32_sched_selector, reg_pressure_selector, sizeof(list_sched_selector_t));
+ memcpy(&ia32_sched_selector, trivial_selector, sizeof(list_sched_selector_t));
ia32_sched_selector.to_appear_in_schedule = ia32_to_appear_in_schedule;
return &ia32_sched_selector;
}
+/**
+ * Returns the necessary byte alignment for storing a register of given class.
+ */
+static int ia32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
+ ir_mode *mode = arch_register_class_mode(cls);
+ int bytes = get_mode_size_bytes(mode);
+
+ if (mode_is_float(mode) && bytes > 8)
+ return 16;
+ return bytes;
+}
+
#ifdef WITH_LIBCORE
static void ia32_register_options(lc_opt_entry_t *ent)
{
ia32_get_call_abi,
ia32_get_irn_handler,
ia32_get_code_generator_if,
- ia32_get_list_sched_selector
+ ia32_get_list_sched_selector,
+ ia32_get_reg_class_alignment
};