);
dump(BE_CH_DUMP_SPILL, irg, chordal_env.cls, "-spill", dump_ir_block_graph_sched);
- be_abi_fix_stack_nodes(bi->abi, chordal_env.lv);
+
+ // commented out for now, since spillslot coalescer currently doesn't
+ // detect memory as reloads
+ //check_for_memory_operands(&chordal_env);
+ be_abi_fix_stack_nodes(bi->abi, chordal_env.lv);
BE_TIMER_PUSH(ra_timer.t_verify);
}
BE_TIMER_POP(ra_timer.t_verify);
- check_for_memory_operands(&chordal_env);
-
BE_TIMER_PUSH(ra_timer.t_epilog);
dump(BE_CH_DUMP_LOWER, irg, NULL, "-spilloff", dump_ir_block_graph_sched);
arch_code_generator_finish(birg.cg);
BE_TIMER_POP(t_finish);
+ dump(DUMP_FINAL, irg, "-finish", dump_ir_block_graph_sched);
+
/* check schedule */
BE_TIMER_PUSH(t_verify);
be_sched_vrfy(birg.irg, vrfy_option);
int i;
ir_node *frame = get_irg_frame(irg);
const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
- ir_node *irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n, in);
+ ir_node *irn;
+ const arch_register_t *sp = arch_env->isa->sp;
be_memperm_attr_t *attr;
+ ir_node **real_in;
- init_node_attr(irn, n);
+ real_in = alloca((n+1) * sizeof(real_in[0]));
+ real_in[0] = frame;
+ memcpy(&real_in[1], in, n * sizeof(real_in[0]));
+
+ irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n+1, real_in);
+
+ init_node_attr(irn, n + 1);
+ be_node_set_reg_class(irn, 0, sp->reg_class);
for(i = 0; i < n; ++i) {
- be_node_set_reg_class(irn, i, cls_frame);
+ be_node_set_reg_class(irn, i + 1, cls_frame);
be_node_set_reg_class(irn, OUT_POS(i), cls_frame);
}
attr = get_irn_attr(irn);
- attr->in_entities = obstack_alloc(irg->obst, n*sizeof(attr->in_entities[0]));
- memset(attr->in_entities, 0, n*sizeof(attr->in_entities[0]));
+ attr->in_entities = obstack_alloc(irg->obst, n * sizeof(attr->in_entities[0]));
+ memset(attr->in_entities, 0, n * sizeof(attr->in_entities[0]));
attr->out_entities = obstack_alloc(irg->obst, n*sizeof(attr->out_entities[0]));
memset(attr->out_entities, 0, n*sizeof(attr->out_entities[0]));
be_memperm_attr_t *attr = get_irn_attr(irn);
assert(be_is_MemPerm(irn));
- assert(n < get_irn_arity(irn));
+ assert(n < be_get_MemPerm_entity_arity(irn));
attr->in_entities[n] = ent;
}
be_memperm_attr_t *attr = get_irn_attr(irn);
assert(be_is_MemPerm(irn));
- assert(n < get_irn_arity(irn));
+ assert(n < be_get_MemPerm_entity_arity(irn));
return attr->in_entities[n];
}
be_memperm_attr_t *attr = get_irn_attr(irn);
assert(be_is_MemPerm(irn));
- assert(n < get_irn_arity(irn));
+ assert(n < be_get_MemPerm_entity_arity(irn));
attr->out_entities[n] = ent;
}
be_memperm_attr_t *attr = get_irn_attr(irn);
assert(be_is_MemPerm(irn));
- assert(n < get_irn_arity(irn));
+ assert(n < be_get_MemPerm_entity_arity(irn));
return attr->out_entities[n];
}
+int be_get_MemPerm_entity_arity(const ir_node *irn)
+{
+ return get_irn_arity(irn) - 1;
+}
+
static void be_limited(void *data, bitset_t *bs)
{
be_req_t *req = data;
case beo_MemPerm:
{
int i;
- for(i = 0; i < get_irn_arity(irn); ++i) {
+ for(i = 0; i < be_get_MemPerm_entity_arity(irn); ++i) {
entity *in, *out;
in = be_get_MemPerm_in_entity(irn, i);
out = be_get_MemPerm_out_entity(irn, i);
* Make a new Perm node.
*/
ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int arity, ir_node *in[]);
+/**
+ * Create a new MemPerm node.
+ */
ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[]);
ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int arity, ir_node *in[]);
void be_set_MemPerm_out_entity(const ir_node *irn, int n, entity* ent);
entity *be_get_MemPerm_out_entity(const ir_node *irn, int n);
+int be_get_MemPerm_entity_arity(const ir_node *irn);
+
/**
* Impose a register constraint on a backend node.
* @param irn The node.
ir_type* frame = get_irg_frame_type(env->chordal_env->irg);
entity* res = frame_alloc_area(frame, slot->size, slot->align, 0);
+ // adjust size of the entity type...
+ ir_type *enttype = get_entity_type(res);
+ set_type_size_bytes(enttype, slot->size);
+
slot->entity = res;
return res;
spill.ent = spillent;
res = set_insert(env->spills, &spill, sizeof(spill), hash);
- for(i = 0, arity = get_irn_arity(memperm); i < arity; ++i) {
- ir_node* arg = get_irn_n(memperm, i);
+ for(i = 0, arity = be_get_MemPerm_entity_arity(memperm); i < arity; ++i) {
+ ir_node* arg = get_irn_n(memperm, i + 1);
entity* argent = be_get_MemPerm_in_entity(memperm, i);
collect(env, arg, memperm, argent);
exchange(irn, proj);
}
+static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node **sp, ir_node *mem, entity *ent, const char *offset) {
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_mode *spmode = get_irn_mode(*sp);
+ const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, *sp);
+
+ ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, *sp, noreg, mem);
+
+ set_ia32_frame_ent(push, ent);
+ set_ia32_use_frame(push);
+ set_ia32_op_type(push, ia32_AddrModeS);
+ set_ia32_am_flavour(push, ia32_B);
+ set_ia32_ls_mode(push, mode_Is);
+ if(offset != NULL)
+ add_ia32_am_offs(push, offset);
+
+ sched_add_before(schedpoint, push);
+
+ *sp = new_rd_Proj(env->dbg, env->irg, env->block, push, spmode, 0);
+ sched_add_before(schedpoint, *sp);
+ arch_set_irn_register(env->cg->arch_env, *sp, spreg);
+
+ return push;
+}
+
+static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node **sp, entity *ent, const char *offset) {
+ ir_mode *spmode = get_irn_mode(*sp);
+ const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, *sp);
+
+ ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, *sp, new_NoMem());
+
+ set_ia32_frame_ent(pop, ent);
+ set_ia32_use_frame(pop);
+ set_ia32_op_type(pop, ia32_AddrModeD);
+ set_ia32_am_flavour(pop, ia32_B);
+ set_ia32_ls_mode(pop, mode_Is);
+ if(offset != NULL)
+ add_ia32_am_offs(pop, offset);
+
+ sched_add_before(schedpoint, pop);
+
+ *sp = new_rd_Proj(env->dbg, env->irg, env->block, pop, spmode, 0);
+ arch_set_irn_register(env->cg->arch_env, *sp, spreg);
+ sched_add_before(schedpoint, *sp);
+
+ return pop;
+}
+
+static void transform_MemPerm(ia32_transform_env_t *env) {
+ /*
+ * Transform memperm, currently we do this the ugly way and produce
+ * push/pop into/from memory cascades. This is possible without using
+ * any registers.
+ */
+ ir_node *node = env->irn;
+ int i, arity;
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *sp = get_irn_n(node, 0);
+ const arch_register_t *spreg = arch_get_irn_register(env->cg->arch_env, sp);
+ const ir_edge_t *edge;
+ const ir_edge_t *next;
+ ir_node **pops;
+ ir_mode *spmode = get_irn_mode(sp);
+
+ arity = be_get_MemPerm_entity_arity(node);
+ pops = alloca(arity * sizeof(pops[0]));
+
+ // create pushs
+ for(i = 0; i < arity; ++i) {
+ entity *ent = be_get_MemPerm_in_entity(node, i);
+ ir_type *enttype = get_entity_type(ent);
+ int entbits = get_type_size_bits(enttype);
+ ir_node *mem = get_irn_n(node, i + 1);
+
+ assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
+
+ create_push(env, node, &sp, mem, ent, NULL);
+ if(entbits == 64) {
+ // add another push after the first one
+ create_push(env, node, &sp, mem, ent, "4");
+ }
+
+ set_irn_n(node, i, new_Bad());
+ }
+
+ // create pops
+ for(i = arity - 1; i >= 0; --i) {
+ entity *ent = be_get_MemPerm_out_entity(node, i);
+ ir_type *enttype = get_entity_type(ent);
+ int entbits = get_type_size_bits(enttype);
+
+ ir_node *pop;
+
+ assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
+
+ pop = create_pop(env, node, &sp, ent, NULL);
+ if(entbits == 64) {
+ // add another push after the first one
+ pop = create_pop(env, node, &sp, ent, "4");
+ }
+
+ pops[i] = pop;
+ }
+
+ // exchange memprojs
+ foreach_out_edge_safe(node, edge, next) {
+ ir_node *proj = get_edge_src_irn(edge);
+ int p = get_Proj_proj(proj);
+
+ assert(p < arity);
+
+ set_Proj_pred(proj, pops[p]);
+ set_Proj_proj(proj, 3);
+ }
+
+ sched_remove(node);
+}
+
/**
* Fix the mode of Spill/Reload
*/
tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval));
transform_to_Store(&tenv);
}
+ else if(be_is_MemPerm(node)) {
+ tenv.dbg = get_irn_dbg_info(node);
+ tenv.irn = node;
+ transform_MemPerm(&tenv);
+ }
}
}
ir_graph *irg = cg->irg;
ia32_finish_irg(irg, cg);
- if (cg->dump)
- be_dump(irg, "-finished", dump_ir_block_graph_sched);
}
/**
"emit" => '
if (get_ia32_id_cnst(n)) {
if (get_ia32_immop_type(n) == ia32_ImmConst) {
-. push %C /* Push(%A2) */
- } else {
-. push OFFSET FLAT:%C /* Push(%A2) */
+4. push %C /* Push const on stack */
+} else {
+4. push OFFSET FLAT:%C /* Push symconst on stack */
}
}
-else {
-. push %S2 /* Push(%A2) */
+else if (get_ia32_op_type(n) == ia32_Normal) {
+2. push %S2 /* Push(%A2) */
}
+else {
+2. push %ia32_emit_am /* Push memory to stack */
+};
',
"outs" => [ "stack", "M" ],
},
"Pop" => {
"comment" => "pop a gp register from the stack",
"reg_req" => { "in" => [ "esp", "none" ], "out" => [ "gp", "esp" ] },
- "emit" => '. pop %D1 /* Pop -> %D1 */',
+ "emit" => '
+if (get_ia32_op_type(n) == ia32_Normal) {
+2. pop %D1 /* Pop from stack into %D1 */
+}
+else {
+2. pop %ia32_emit_am /* Pop from stack into memory */
+}
+',
"outs" => [ "res", "stack", "M" ],
},