#include "../beblocksched.h"
#include "../bemachine.h"
#include "../beilpsched.h"
+#include "../bespillslots.h"
+#include "../bemodule.h"
#include "bearch_ia32_t.h"
ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
- if (is_Block(irn) || mode == mode_M || mode == mode_X) {
+ if (is_Block(irn) || mode == mode_X) {
DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
return NULL;
}
DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
if (is_Proj(irn)) {
+ if(mode == mode_M)
+ return NULL;
+
if(pos >= 0) {
DBG((mod, LEVEL_1, "ignoring request IN requirements for node %+F\n", irn));
return NULL;
}
node_pos = (pos == -1) ? get_Proj_proj(irn) : pos;
- irn = skip_Proj(irn);
+ irn = skip_Proj_const(irn);
DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
}
}
pos = get_Proj_proj(irn);
- irn = skip_Proj(irn);
+ irn = skip_Proj_const(irn);
}
if (is_ia32_irn(irn)) {
static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
arch_irn_class_t classification = arch_irn_class_normal;
- irn = skip_Proj(irn);
+ irn = skip_Proj_const(irn);
if (is_cfop(irn))
classification |= arch_irn_class_branch;
/* pred is only set, if we have a Proj */
flags = pred && is_ia32_irn(pred) ? get_ia32_out_flags(pred, get_Proj_proj(irn)) : arch_irn_flags_none;
- irn = skip_Proj(irn);
+ irn = skip_Proj_const(irn);
if (is_ia32_irn(irn))
flags |= get_ia32_flags(irn);
}
ir_graph *irg; /**< The associated graph. */
} ia32_abi_env_t;
-static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
+static ir_entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
}
-static void ia32_set_frame_entity(const void *self, ir_node *irn, entity *ent) {
+static void ia32_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent) {
set_ia32_frame_ent(irn, ent);
}
static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) {
- char buf[64];
const ia32_irn_ops_t *ops = self;
if (get_ia32_frame_ent(irn)) {
- ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
-
if(is_ia32_Pop(irn)) {
int omit_fp = be_abi_omit_fp(ops->cg->birg->abi);
if (omit_fp) {
DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
- snprintf(buf, sizeof(buf), "%d", bias);
-
if (get_ia32_op_type(irn) == ia32_Normal) {
+ // Matze: When does this case happen?
+ char buf[64];
+ snprintf(buf, sizeof(buf), "%d", bias);
set_ia32_cnst(irn, buf);
} else {
- add_ia32_am_offs(irn, buf);
+ ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
am_flav |= ia32_O;
set_ia32_am_flavour(irn, am_flav);
+
+ add_ia32_am_offs_int(irn, bias);
}
}
}
long proj = get_Proj_proj(irn);
ir_node *pred = get_Proj_pred(irn);
- if (proj == pn_ia32_Push_stack && is_ia32_Push(pred))
+ if (is_ia32_Push(pred) && proj == pn_ia32_Push_stack)
return 4;
- if (proj == pn_ia32_Pop_stack && is_ia32_Pop(pred))
+ if (is_ia32_Pop(pred) && proj == pn_ia32_Pop_stack)
return -4;
}
ia32_abi_env_t *env = self;
if (! between_type) {
- entity *old_bp_ent;
- entity *ret_addr_ent;
- entity *omit_fp_ret_addr_ent;
+ ir_entity *old_bp_ent;
+ ir_entity *ret_addr_ent;
+ ir_entity *omit_fp_ret_addr_ent;
ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P);
ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P);
old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
- set_entity_offset_bytes(old_bp_ent, 0);
- set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
+ set_entity_offset(old_bp_ent, 0);
+ set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
set_type_state(between_type, layout_fixed);
omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
- set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
+ set_entity_offset(omit_fp_ret_addr_ent, 0);
set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
set_type_state(omit_fp_between_type, layout_fixed);
}
static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
ir_graph *irg;
ir_mode *mode;
+ ir_mode *irn_mode;
ir_node *block, *noreg, *nomem;
- int pnc;
+ dbg_info *dbg;
/* we cannot invert non-ia32 irns */
if (! is_ia32_irn(irn))
if (get_ia32_op_type(irn) != ia32_Normal)
return NULL;
- irg = get_irn_irg(irn);
- block = get_nodes_block(irn);
- mode = get_ia32_res_mode(irn);
- noreg = get_irn_n(irn, 0);
- nomem = new_r_NoMem(irg);
+ irg = get_irn_irg(irn);
+ block = get_nodes_block(irn);
+ mode = get_ia32_res_mode(irn);
+ irn_mode = get_irn_mode(irn);
+ noreg = get_irn_n(irn, 0);
+ nomem = new_r_NoMem(irg);
+ dbg = get_irn_dbg_info(irn);
/* initialize structure */
inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
inverse->costs = 0;
- inverse->n = 2;
+ inverse->n = 1;
switch (get_ia32_irn_opcode(irn)) {
case iro_ia32_Add:
if (get_ia32_immop_type(irn) == ia32_ImmConst) {
/* we have an add with a const here */
/* invers == add with negated const */
- inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
- pnc = pn_ia32_Add_res;
+ inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem, irn_mode);
inverse->costs += 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
/* we have an add with a symconst here */
/* invers == sub with const */
- inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
- pnc = pn_ia32_Sub_res;
+ inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem, irn_mode);
inverse->costs += 2;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal add: inverse == sub */
- ir_node *proj = ia32_get_res_proj(irn);
- assert(proj);
-
- inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem);
- pnc = pn_ia32_Sub_res;
+ inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, i ^ 1), nomem, irn_mode);
inverse->costs += 2;
}
break;
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* we have a sub with a const/symconst here */
/* invers == add with this const */
- inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
- pnc = pn_ia32_Add_res;
+ inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem, irn_mode);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal sub */
- ir_node *proj = ia32_get_res_proj(irn);
- assert(proj);
-
if (i == 2) {
- inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem);
+ inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, 3), nomem, irn_mode);
}
else {
- inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem);
+ inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, 2), (ir_node*) irn, nomem, irn_mode);
}
- pnc = pn_ia32_Sub_res;
inverse->costs += 1;
}
break;
case iro_ia32_Eor:
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* xor with const: inverse = xor */
- inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
- pnc = pn_ia32_Eor_res;
+ inverse->nodes[0] = new_rd_ia32_Eor(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem, irn_mode);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal xor */
- inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem);
- pnc = pn_ia32_Eor_res;
+ inverse->nodes[0] = new_rd_ia32_Eor(dbg, irg, block, noreg, noreg, (ir_node *) irn, get_irn_n(irn, i), nomem, irn_mode);
inverse->costs += 1;
}
break;
case iro_ia32_Not: {
- ir_node *proj = ia32_get_res_proj(irn);
- assert(proj);
-
- inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, proj, nomem);
- pnc = pn_ia32_Not_res;
+ inverse->nodes[0] = new_rd_ia32_Not(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem, irn_mode);
inverse->costs += 1;
break;
}
case iro_ia32_Minus: {
- ir_node *proj = ia32_get_res_proj(irn);
- assert(proj);
-
- inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, proj, nomem);
- pnc = pn_ia32_Minus_res;
+ inverse->nodes[0] = new_rd_ia32_Minus(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem, irn_mode);
inverse->costs += 1;
break;
}
}
set_ia32_res_mode(inverse->nodes[0], mode);
- inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc);
return inverse;
}
+/**
+ * Get the mode that should be used for spilling value node
+ */
+static ir_mode *get_spill_mode(ia32_code_gen_t *cg, const ir_node *node)
+{
+ ir_mode *mode = get_irn_mode(node);
+ if (mode_is_float(mode)) {
+#if 0
+ // super exact spilling...
+ if (USE_SSE2(cg))
+ return mode_D;
+ else
+ return mode_E;
+#else
+ return mode;
+#endif
+ }
+ else
+ return mode_Is;
+
+ assert(0);
+ return mode;
+}
+
+/**
+ * Checks wether an addressmode reload for a node with mode mode is compatible
+ * with a spillslot of mode spill_mode
+ */
+static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
+{
+ if(mode_is_float(mode)) {
+ return mode == spillmode;
+ } else {
+ return 1;
+ }
+}
+
/**
* Check if irn can load it's operand at position i from memory (source addressmode).
* @param self Pointer to irn ops itself
* @return Non-Zero if operand can be loaded
*/
static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
+ const ia32_irn_ops_t *ops = self;
+ ia32_code_gen_t *cg = ops->cg;
+ ir_node *op = get_irn_n(irn, i);
+ const ir_mode *mode = get_irn_mode(op);
+ const ir_mode *spillmode = get_spill_mode(cg, op);
+
if (! is_ia32_irn(irn) || /* must be an ia32 irn */
get_irn_arity(irn) != 5 || /* must be a binary operation */
get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
+ ! ia32_is_spillmode_compatible(mode, spillmode) ||
(i != 2 && i != 3) || /* a "real" operand position must be requested */
(i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */
is_ia32_use_frame(irn)) /* must not already use frame */
set_ia32_got_reload(irn);
set_irn_n(irn, 0, get_irg_frame(get_irn_irg(irn)));
- set_irn_n(irn, 4, spill);
-
- /*
- Input at position one is index register, which is NoReg.
- We would need cg object to get a real noreg, but we cannot
- access it from here.
- */
set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
+ set_irn_n(irn, 4, spill);
//FIXME DBG_OPT_AM_S(reload, irn);
}
static void ia32_kill_convs(ia32_code_gen_t *cg) {
ir_node *irn;
- /* BEWARE: the Projs are inserted in the set */
foreach_nodeset(cg->kill_conv, irn) {
- ir_node *in = get_irn_n(get_Proj_pred(irn), 2);
+ ir_node *in = get_irn_n(irn, 2);
edges_reroute(irn, in, cg->birg->irg);
}
}
newn = new_rd_ia32_LdTls(dbg, irg, blk, get_irn_mode(irn));
exchange(irn, newn);
+ set_irg_tls(irg, newn);
}
}
*/
static void ia32_prepare_graph(void *self) {
ia32_code_gen_t *cg = self;
- dom_front_info_t *dom;
DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
/* 2nd: transform all remaining nodes */
ia32_register_transformers();
- dom = be_compute_dominance_frontiers(cg->irg);
cg->kill_conv = new_nodeset(5);
transform_tls(cg->irg);
ia32_kill_convs(cg);
del_nodeset(cg->kill_conv);
- be_free_dominance_frontiers(dom);
-
if (cg->dump)
be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
/**
- * Transforms a be node into a Load.
+ * Transforms a be_Reload into a ia32 Load.
*/
static void transform_to_Load(ia32_transform_env_t *env) {
ir_node *irn = env->irn;
- entity *ent = be_get_frame_entity(irn);
- ir_mode *mode = env->mode;
+ ir_entity *ent = be_get_frame_entity(irn);
+ ir_mode *mode = get_irn_mode(irn);
+ ir_mode *spillmode = get_spill_mode(env->cg, irn);
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
- ir_node *nomem = new_rd_NoMem(env->irg);
ir_node *sched_point = NULL;
- ir_node *ptr = get_irn_n(irn, 0);
- ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
+ ir_node *ptr = get_irg_frame(env->irg);
+ ir_node *mem = get_irn_n(irn, be_pos_Reload_mem);
ir_node *new_op, *proj;
const arch_register_t *reg;
sched_point = sched_prev(irn);
}
- if (mode_is_float(mode)) {
+ if (mode_is_float(spillmode)) {
if (USE_SSE2(env->cg))
new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
else
set_ia32_am_support(new_op, ia32_am_Source);
set_ia32_op_type(new_op, ia32_AddrModeS);
set_ia32_am_flavour(new_op, ia32_B);
- set_ia32_ls_mode(new_op, mode);
+ set_ia32_ls_mode(new_op, spillmode);
set_ia32_frame_ent(new_op, ent);
set_ia32_use_frame(new_op);
}
/**
- * Transforms a be node into a Store.
+ * Transforms a be_Spill node into a ia32 Store.
*/
static void transform_to_Store(ia32_transform_env_t *env) {
ir_node *irn = env->irn;
- entity *ent = be_get_frame_entity(irn);
- ir_mode *mode = env->mode;
+ ir_entity *ent = be_get_frame_entity(irn);
+ const ir_node *spillval = get_irn_n(irn, be_pos_Spill_val);
+ ir_mode *mode = get_spill_mode(env->cg, spillval);
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
ir_node *nomem = new_rd_NoMem(env->irg);
- ir_node *ptr = get_irn_n(irn, 0);
- ir_node *val = get_irn_n(irn, 1);
- ir_node *new_op, *proj;
+ ir_node *ptr = get_irg_frame(env->irg);
+ ir_node *val = get_irn_n(irn, be_pos_Spill_val);
+ ir_node *store;
ir_node *sched_point = NULL;
if (sched_is_scheduled(irn)) {
if (mode_is_float(mode)) {
if (USE_SSE2(env->cg))
- new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
+ store = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
else
- new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
+ store = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
}
else if (get_mode_size_bits(mode) == 8) {
- new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
+ store = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
}
else {
- new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
+ store = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
}
- set_ia32_am_support(new_op, ia32_am_Dest);
- set_ia32_op_type(new_op, ia32_AddrModeD);
- set_ia32_am_flavour(new_op, ia32_B);
- set_ia32_ls_mode(new_op, mode);
- set_ia32_frame_ent(new_op, ent);
- set_ia32_use_frame(new_op);
-
- DBG_OPT_SPILL2ST(irn, new_op);
+ set_ia32_am_support(store, ia32_am_Dest);
+ set_ia32_op_type(store, ia32_AddrModeD);
+ set_ia32_am_flavour(store, ia32_B);
+ set_ia32_ls_mode(store, mode);
+ set_ia32_frame_ent(store, ent);
+ set_ia32_use_frame(store);
- proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, pn_ia32_Store_M);
+ DBG_OPT_SPILL2ST(irn, store);
+ SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(env->cg, irn));
if (sched_point) {
- sched_add_after(sched_point, new_op);
+ sched_add_after(sched_point, store);
sched_remove(irn);
}
- SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
-
- exchange(irn, proj);
+ exchange(irn, store);
}
-static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent) {
+static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) {
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
ir_node *frame = get_irg_frame(env->irg);
return push;
}
-static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, entity *ent) {
+static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_entity *ent) {
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
ir_node *frame = get_irg_frame(env->irg);
set_ia32_frame_ent(pop, ent);
set_ia32_use_frame(pop);
set_ia32_op_type(pop, ia32_AddrModeD);
- set_ia32_am_flavour(pop, ia32_B);
+ set_ia32_am_flavour(pop, ia32_am_OB);
set_ia32_ls_mode(pop, mode_Is);
sched_add_before(schedpoint, pop);
// create pushs
for(i = 0; i < arity; ++i) {
- entity *ent = be_get_MemPerm_in_entity(node, i);
+ ir_entity *ent = be_get_MemPerm_in_entity(node, i);
ir_type *enttype = get_entity_type(ent);
int entbits = get_type_size_bits(enttype);
ir_node *mem = get_irn_n(node, i + 1);
// create pops
for(i = arity - 1; i >= 0; --i) {
- entity *ent = be_get_MemPerm_out_entity(node, i);
+ ir_entity *ent = be_get_MemPerm_out_entity(node, i);
ir_type *enttype = get_entity_type(ent);
int entbits = get_type_size_bits(enttype);
sched_remove(node);
}
-/**
- * Fix the mode of Spill/Reload
- */
-static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
-{
- if (mode_is_float(mode)) {
- if (USE_SSE2(cg))
- mode = mode_D;
- else
- mode = mode_E;
- }
- else
- mode = mode_Is;
- return mode;
-}
-
/**
* Block-Walker: Calls the transform functions Spill and Reload.
*/
/* beware: the schedule is changed here */
for (node = sched_last(block); !sched_is_begin(node); node = prev) {
prev = sched_prev(node);
+ tenv.dbg = get_irn_dbg_info(node);
+ tenv.irn = node;
+ tenv.mode = get_irn_mode(node);
+
if (be_is_Reload(node)) {
- /* we always reload the whole register */
- tenv.dbg = get_irn_dbg_info(node);
- tenv.irn = node;
- tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
transform_to_Load(&tenv);
- }
- else if (be_is_Spill(node)) {
- ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
- /* we always spill the whole register */
- tenv.dbg = get_irn_dbg_info(node);
- tenv.irn = node;
- tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval));
+ } else if (be_is_Spill(node)) {
transform_to_Store(&tenv);
- }
- else if(be_is_MemPerm(node)) {
- tenv.dbg = get_irn_dbg_info(node);
- tenv.irn = node;
+ } else if(be_is_MemPerm(node)) {
transform_MemPerm(&tenv);
}
}
}
+/**
+ * Collects nodes that need frame entities assigned.
+ */
+static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
+{
+ be_fec_env_t *env = data;
+
+ if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
+ const ir_mode *mode = get_irn_mode(node);
+ int align = get_mode_size_bytes(mode);
+ be_node_needs_frame_entity(env, node, mode, align);
+ } else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL
+ && is_ia32_use_frame(node)) {
+ if (is_ia32_Load(node)) {
+ const ir_mode *mode = get_ia32_ls_mode(node);
+ int align = get_mode_size_bytes(mode);
+ be_node_needs_frame_entity(env, node, mode, align);
+ } else if (is_ia32_vfild(node)) {
+ const ir_mode *mode = get_ia32_ls_mode(node);
+ int align = 4;
+ be_node_needs_frame_entity(env, node, mode, align);
+ }
+ }
+}
+
/**
* We transform Spill and Reload here. This needs to be done before
* stack biasing otherwise we would miss the corrected offset for these nodes.
- *
- * If x87 instruction should be emitted, run the x87 simulator and patch
- * the virtual instructions. This must obviously be done after register allocation.
*/
static void ia32_after_ra(void *self) {
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
+ be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg);
+
+ /* create and coalesce frame entities */
+ irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
+ be_assign_entities(fec_env);
+ be_free_frame_entity_coalescer(fec_env);
irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
}
/**
- * Last touchups for the graph before emit
+ * Last touchups for the graph before emit: x87 simulation to replace the
+ * virtual with real x87 instructions, creating a block schedule and peephole
+ * optimisations.
*/
static void ia32_finish(void *self) {
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
- //be_remove_empty_blocks(irg);
- cg->blk_sched = be_create_block_schedule(irg, cg->birg->execfreqs);
-
- //cg->blk_sched = sched_create_block_schedule(cg->irg, cg->birg->execfreqs);
-
/* if we do x87 code generation, rewrite all the virtual instructions and registers */
if (cg->used_fp == fp_x87 || cg->force_sim) {
- x87_simulate_graph(cg->arch_env, irg, cg->blk_sched);
+ x87_simulate_graph(cg->arch_env, cg->birg);
}
+ /* create block schedule, this also removes empty blocks which might
+ * produce critical edges */
+ cg->blk_sched = be_create_block_schedule(irg, cg->birg->exec_freq);
+
+ /* do peephole optimisations */
ia32_peephole_optimization(irg, cg);
}
free(cg);
}
-static void *ia32_cg_init(const be_irg_t *birg);
+static void *ia32_cg_init(be_irg_t *birg);
static const arch_code_generator_if_t ia32_code_gen_if = {
ia32_cg_init,
NULL, /* before abi introduce hook */
ia32_prepare_graph,
+ NULL, /* spill */
ia32_before_sched, /* before scheduling hook */
ia32_before_ra, /* before register allocation hook */
ia32_after_ra, /* after register allocation hook */
/**
* Initializes a IA32 code generator.
*/
-static void *ia32_cg_init(const be_irg_t *birg) {
+static void *ia32_cg_init(be_irg_t *birg) {
ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
cg->isa = isa;
cg->birg = birg;
cg->blk_sched = NULL;
- cg->fp_to_gp = NULL;
- cg->gp_to_fp = NULL;
cg->fp_kind = isa->fp_kind;
cg->used_fp = fp_none;
cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
}
}
+const arch_isa_if_t ia32_isa_if;
/**
* The template that generates a new ISA object.
ia32_register_init(isa);
ia32_create_opcodes();
+ ia32_register_copy_attr_func();
if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
(ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
{ NULL }
};
-
-/**
- * Register command line options for the ia32 backend.
- *
- * Options so far:
- *
- * ia32-arch=arch create instruction for arch
- * ia32-opt=arch optimize for run on arch
- * ia32-fpunit=unit select floating point unit (x87 or SSE2)
- * ia32-incdec optimize for inc/dec
- * ia32-noaddrmode do not use address mode
- * ia32-nolea do not optimize for LEAs
- * ia32-noplacecnst do not place constants,
- * ia32-noimmop no operations with immediates
- * ia32-noextbb do not use extended basic block scheduling
- * ia32-nopushargs do not create pushs for function argument passing
- * ia32-gasmode set the GAS compatibility mode
- */
-static void ia32_register_options(lc_opt_entry_t *ent)
-{
- lc_opt_entry_t *be_grp_ia32 = lc_opt_get_grp(ent, "ia32");
- lc_opt_add_table(be_grp_ia32, ia32_options);
-}
#endif /* WITH_LIBCORE */
const arch_isa_if_t ia32_isa_if = {
ia32_get_libfirm_params,
ia32_get_allowed_execution_units,
ia32_get_machine,
-#ifdef WITH_LIBCORE
- ia32_register_options
-#endif
};
+
+void be_init_arch_ia32(void)
+{
+ lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
+ lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32");
+
+ lc_opt_add_table(ia32_grp, ia32_options);
+ be_register_isa_if("ia32", &ia32_isa_if);
+}
+
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);