TEMPLATE_get_allowed_execution_units,
TEMPLATE_get_machine,
TEMPLATE_get_backend_irg_list,
+ NULL, /* mark remat */
TEMPLATE_parse_asm_constraint,
TEMPLATE_is_valid_clobber
};
ir_node *new_pred = be_transform_node(pred);
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
- ir_mode *mode = get_irn_mode(node);
long proj = get_Proj_proj(node);
switch(proj) {
arm_get_allowed_execution_units,
arm_get_machine,
arm_get_irg_list,
+ NULL, /* mark remat */
arm_parse_asm_constraint,
arm_is_valid_clobber
};
*/
ir_graph **(*get_backend_irg_list)(const void *self, ir_graph ***irgs);
+ /**
+ * mark node as rematerialized
+ */
+ void (*mark_remat)(const void *self, ir_node *node);
+
/**
* parse an assembler constraint part and set flags according to its nature
* advances the *c pointer to point to the last parsed character (so if you
#define arch_env_get_backend_irg_list(env,irgs) ((env)->impl->get_backend_irg_list((env), (irgs)))
#define arch_env_parse_asm_constraint(env,c) ((env)->impl->parse_asm_constraint((env), (c))
#define arch_env_is_valid_clobber(env,clobber) ((env)->impl->is_valid_clobber((env), (clobber))
+#define arch_env_mark_remat(env,node) \
+ do { if ((env)->impl->mark_remat != NULL) (env)->impl->mark_remat((env), (node)); } while(0)
/**
* ISA base class.
get_irn_op(spilled), get_irn_mode(spilled),
get_irn_arity(spilled), ins);
copy_node_attr(spilled, res);
+ arch_env_mark_remat(env->arch_env, res);
new_backedge_info(res);
DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
load_res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
ia32_copy_am_attrs(load, node);
+ if (is_ia32_is_reload(node))
+ set_ia32_is_reload(load);
set_irn_n(node, n_ia32_mem, new_NoMem());
switch (get_ia32_am_arity(node)) {
set_ia32_ls_mode(new_op, spillmode);
set_ia32_frame_ent(new_op, ent);
set_ia32_use_frame(new_op);
+ set_ia32_is_reload(new_op);
DBG_OPT_RELOAD2LD(node, new_op);
set_ia32_ls_mode(store, mode);
set_ia32_frame_ent(store, ent);
set_ia32_use_frame(store);
+ set_ia32_is_spill(store);
SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(cg, node));
DBG_OPT_SPILL2ST(node, store);
return NULL;
}
+static void ia32_mark_remat(const void *self, ir_node *node) {
+ (void) self;
+ if (is_ia32_irn(node)) {
+ set_ia32_is_remat(node);
+ }
+}
+
/**
* Allows or disallows the creation of Psi nodes for the given Phi nodes.
* @return 1 if allowed, 0 otherwise
ia32_get_allowed_execution_units,
ia32_get_machine,
ia32_get_irg_list,
+ ia32_mark_remat,
ia32_parse_asm_constraint,
ia32_is_valid_clobber
};
static int do_pic;
static char pic_base_label[128];
static ir_label_t exc_label_id;
+static int mark_spill_reload = 0;
/** Return the next block in Block schedule */
static ir_node *get_prev_block_sched(const ir_node *block)
DBG((dbg, LEVEL_1, "emitting code for %+F\n", node));
- if (is_ia32_irn(node) && get_ia32_exc_label(node)) {
- /* emit the exception label of this instruction */
- ia32_assign_exc_label(node);
+ if (is_ia32_irn(node)) {
+ if (get_ia32_exc_label(node)) {
+ /* emit the exception label of this instruction */
+ ia32_assign_exc_label(node);
+ }
+ if (mark_spill_reload) {
+ if (is_ia32_is_spill(node)) {
+ be_emit_cstring("\txchg %ebx, %ebx /* spill mark */\n");
+ be_emit_write_line();
+ }
+ if (is_ia32_is_reload(node)) {
+ be_emit_cstring("\txchg %edx, %edx /* reload mark */\n");
+ be_emit_write_line();
+ }
+ if (is_ia32_is_remat(node)) {
+ be_emit_cstring("\txchg %ecx, %ecx /* remat mark */\n");
+ be_emit_write_line();
+ }
+ }
}
if (op->ops.generic) {
emit_func_ptr func = (emit_func_ptr) op->ops.generic;
DEL_ARR_F(exc_list);
}
+static const lc_opt_table_entry_t ia32_emitter_options[] = {
+ LC_OPT_ENT_BOOL("mark_spill_reload", "mark spills and reloads with ud opcodes", &mark_spill_reload),
+ LC_OPT_LAST
+};
+
void ia32_init_emitter(void)
{
+ lc_opt_entry_t *be_grp;
+ lc_opt_entry_t *ia32_grp;
+
+ be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
+ ia32_grp = lc_opt_get_grp(be_grp, "ia32");
+
+ lc_opt_add_table(ia32_grp, ia32_emitter_options);
+
FIRM_DBG_REGISTER(dbg, "firm.be.ia32.emitter");
}
/* copy address mode information to load */
set_ia32_op_type(load, ia32_AddrModeS);
ia32_copy_am_attrs(load, irn);
+ if (is_ia32_is_reload(irn))
+ set_ia32_is_reload(load);
/* insert the load into schedule */
sched_add_before(irn, load);
return attr->data.is_reload;
}
+void set_ia32_is_spill(ir_node *node) {
+ ia32_attr_t *attr = get_ia32_attr(node);
+ attr->data.is_spill = 1;
+}
+
+int is_ia32_is_spill(const ir_node *node) {
+ const ia32_attr_t *attr = get_ia32_attr_const(node);
+ return attr->data.is_spill;
+}
+
+void set_ia32_is_remat(ir_node *node) {
+ ia32_attr_t *attr = get_ia32_attr(node);
+ attr->data.is_remat = 1;
+}
+
+int is_ia32_is_remat(const ir_node *node) {
+ const ia32_attr_t *attr = get_ia32_attr_const(node);
+ return attr->data.is_remat;
+}
+
/**
* Gets the mode of the stored/loaded value (only set for Store/Load)
*/
void set_ia32_is_reload(ir_node *node);
int is_ia32_is_reload(const ir_node *node);
+void set_ia32_is_spill(ir_node *node);
+int is_ia32_is_spill(const ir_node *node);
+
+void set_ia32_is_remat(ir_node *node);
+int is_ia32_is_remat(const ir_node *node);
+
/**
* Gets the mode of the stored/loaded value (only set for Store/Load)
*/
typedef enum {
match_commutative = 1 << 0, /**< inputs are commutative */
- match_am_and_immediates = 1 << 1, /**< mode support AM and immediate at
+ match_am_and_immediates = 1 << 1, /**< node supports AM and immediate at
the same time */
match_am = 1 << 2, /**< node supports (32bit) source AM */
match_8bit_am = 1 << 3, /**< node supports 8bit source AM */
(for commutative nodes) */
unsigned cmp_unsigned : 1; /**< compare should be unsigned */
unsigned is_reload : 1; /**< node performs a reload */
+ unsigned is_spill : 1;
+ unsigned is_remat : 1;
} data;
int *out_flags; /**< flags for each produced value */
static const arch_env_t *arch_env;
static ia32_code_gen_t *cg;
+static void copy_mark(const ir_node *old, ir_node *new)
+{
+ if (is_ia32_is_reload(old))
+ set_ia32_is_reload(new);
+ if (is_ia32_is_spill(old))
+ set_ia32_is_spill(new);
+ if (is_ia32_is_remat(old))
+ set_ia32_is_remat(new);
+}
+
/**
* Returns non-zero if the given node produces
* a zero flag.
}
sched_add_before(node, test);
+ copy_mark(node, test);
be_peephole_exchange(node, test);
}
spreg = arch_get_irn_register(cg->arch_env, curr_sp);
push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, val, curr_sp);
+ copy_mark(store, push);
if (first_push == NULL)
first_push = push;
be_set_IncSP_offset(irn, inc_ofs);
}
+#if 0
+static void peephole_store_incsp(ir_node *store)
+{
+ dbg_info *dbgi;
+ ir_node *node;
+ ir_node *block;
+ ir_node *noref;
+ ir_node *mem;
+ ir_node *push;
+ ir_node *val;
+ ir_node *am_base = get_irn_n(store, n_ia32_Store_base);
+ if (!be_is_IncSP(am_base)
+ || get_nodes_block(am_base) != get_nodes_block(store))
+ return;
+ mem = get_irn_n(store, n_ia32_Store_mem);
+ if (!is_ia32_NoReg_GP(get_irn_n(store, n_ia32_Store_index))
+ || !is_NoMem(mem))
+ return;
+
+ int incsp_offset = be_get_IncSP_offset(am_base);
+ if (incsp_offset <= 0)
+ return;
+
+ /* we have to be at offset 0 */
+ int my_offset = get_ia32_am_offs_int(store);
+ if (my_offset != 0) {
+ /* TODO here: find out wether there is a store with offset 0 before
+ * us and wether we can move it down to our place */
+ return;
+ }
+ ir_mode *ls_mode = get_ia32_ls_mode(store);
+ int my_store_size = get_mode_size_bytes(ls_mode);
+
+ if (my_offset + my_store_size > incsp_offset)
+ return;
+
+ /* correctness checking:
+ - noone else must write to that stackslot
+ (because after translation incsp won't allocate it anymore)
+ */
+ sched_foreach_reverse_from(store, node) {
+ int i, arity;
+
+ if (node == am_base)
+ break;
+
+ /* make sure noone else can use the space on the stack */
+ arity = get_irn_arity(node);
+ for (i = 0; i < arity; ++i) {
+ ir_node *pred = get_irn_n(node, i);
+ if (pred != am_base)
+ continue;
+
+ if (i == n_ia32_base &&
+ (get_ia32_op_type(node) == ia32_AddrModeS
+ || get_ia32_op_type(node) == ia32_AddrModeD)) {
+ int node_offset = get_ia32_am_offs_int(node);
+ ir_mode *node_ls_mode = get_ia32_ls_mode(node);
+ int node_size = get_mode_size_bytes(node);
+ /* overlapping with our position? abort */
+ if (node_offset < my_offset + my_store_size
+ && node_offset + node_size >= my_offset)
+ return;
+ /* otherwise it's fine */
+ continue;
+ }
+
+ /* strange use of esp: abort */
+ return;
+ }
+ }
+
+ /* all ok, change to push */
+ dbgi = get_irn_dbg_info(store);
+ block = get_nodes_block(store);
+ noreg = ia32_new_NoReg_gp(cg);
+ val = get_ia32_
+
+ push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, mem,
+
+ create_push(dbgi, current_ir_graph, block, am_base, store);
+}
+#endif
+
/**
* Return true if a mode can be stored in the GP register set
*/
pop = new_rd_ia32_Pop(get_irn_dbg_info(load), irg, block, mem, pred_sp);
arch_set_irn_register(arch_env, pop, reg);
+ copy_mark(load, pop);
+
/* create stackpointer Proj */
pred_sp = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
arch_set_irn_register(arch_env, pred_sp, esp);
sched_add_before(node, produceval);
sched_add_before(node, xor);
+ copy_mark(node, xor);
be_peephole_exchange(node, xor);
}
/* exchange the Add and the LEA */
sched_add_before(node, res);
+ copy_mark(node, res);
be_peephole_exchange(node, res);
}
mips_get_allowed_execution_units,
mips_get_machine,
mips_get_irg_list,
+ NULL, /* mark remat */
mips_parse_asm_constraint,
mips_is_valid_clobber
};
ppc32_get_allowed_execution_units,
ppc32_get_machine,
ppc32_get_irg_list,
+ NULL, /* mark remat */
ppc32_parse_asm_constraint,
ppc32_is_valid_clobber
};