int *pos;
} be_op_t;
+typedef enum {
+ be_req_kind_old_limited,
+ be_req_kind_negate_old_limited,
+ be_req_kind_single_reg
+} be_req_kind_t;
+
+typedef struct {
+ arch_register_req_t req;
+ be_req_kind_t kind;
+ union {
+ struct {
+ void (*old_limited)(void *ptr, bitset_t *bs);
+ void *old_limited_env;
+ } old_limited;
+
+ const arch_register_t *single_reg;
+ } x;
+} be_req_t;
+
typedef struct {
const arch_register_t *reg;
- arch_register_req_t req;
+ be_req_t req;
+ be_req_t in_req;
} be_reg_data_t;
typedef struct {
typedef struct {
be_node_attr_t node_attr;
ir_node *spill_ctx; /**< The node in whose context this spill was introduced. */
- unsigned offset; /**< The offset of the memory location the spill writes to
- in the spill area. */
+ entity *ent; /**< The entity in the stack frame the spill writes to. */
} be_spill_attr_t;
static ir_op *op_Spill;
static ir_op *op_Perm;
static ir_op *op_Copy;
static ir_op *op_Keep;
+static ir_op *op_Call;
+static ir_op *op_IncSP;
+static ir_op *op_AddSP;
static int beo_base = -1;
static const ir_op_ops be_node_op_ops;
+#define N irop_flag_none
+#define L irop_flag_labeled
+#define C irop_flag_commutative
+#define X irop_flag_cfopcode
+#define I irop_flag_ip_cfopcode
+#define F irop_flag_fragile
+#define Y irop_flag_forking
+#define H irop_flag_highlevel
+#define c irop_flag_constlike
+#define K irop_flag_keep
+
void be_node_init(void) {
static int inited = 0;
- int i;
if(inited)
return;
inited = 1;
- beo_base = get_next_ir_opcode();
-
- /* Acquire all needed opcodes. We assume that they are consecutive! */
- for(i = beo_Spill; i < beo_Last; ++i)
- get_next_ir_opcode();
+ /* Acquire all needed opcodes. */
+ beo_base = get_next_ir_opcodes(beo_Last - 1);
- op_Spill = new_ir_op(beo_base + beo_Spill, "Spill", op_pin_state_mem_pinned, 0, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops);
- op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, 0, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_Perm = new_ir_op(beo_base + beo_Perm, "Perm", op_pin_state_pinned, 0, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_Copy = new_ir_op(beo_base + beo_Copy, "Copy", op_pin_state_pinned, 0, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_Keep = new_ir_op(beo_base + beo_Keep, "Keep", op_pin_state_pinned, 0, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_Spill = new_ir_op(beo_base + beo_Spill, "Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops);
+ op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_Perm = new_ir_op(beo_base + beo_Perm, "Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_Copy = new_ir_op(beo_base + beo_Copy, "Copy", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_Keep = new_ir_op(beo_base + beo_Keep, "Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
set_op_tag(op_Spill, &be_node_tag);
set_op_tag(op_Reload, &be_node_tag);
a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), n_outs);
memset(a->reg_data, 0, n_outs * sizeof(a->reg_data[0]));
for(i = 0; i < n_outs; ++i) {
- a->reg_data[i].req.cls = cls;
- a->reg_data[i].req.type = arch_register_req_type_normal;
+ a->reg_data[i].req.req.cls = cls;
+ a->reg_data[i].req.req.type = arch_register_req_type_normal;
}
}
in[0] = to_spill;
res = new_ir_node(NULL, irg, bl, op_Spill, mode_M, 1, in);
a = init_node_attr(res, cls, irg, 0);
- a->offset = BE_SPILL_NO_OFFSET;
+ a->ent = NULL;
a->spill_ctx = ctx;
return res;
}
return get_irn_be_opcode(irn) == beo_Keep;
}
-void be_set_Perm_out_req(ir_node *irn, int pos, const arch_register_req_t *req)
+static void be_limited(void *data, bitset_t *bs)
+{
+ be_req_t *req = data;
+
+ switch(req->kind) {
+ case be_req_kind_negate_old_limited:
+ case be_req_kind_old_limited:
+ req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs);
+ if(req->kind == be_req_kind_negate_old_limited)
+ bitset_flip_all(bs);
+ break;
+ case be_req_kind_single_reg:
+ bitset_clear_all(bs);
+ bitset_set(bs, req->x.single_reg->index);
+ break;
+ }
+}
+
+void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg)
{
+ int idx = pos < 0 ? -(pos - 1) : pos;
be_node_attr_t *a = get_irn_attr(irn);
+ be_reg_data_t *rd = &a->reg_data[idx];
+ be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
+
+ assert(is_be_node(irn));
+ assert(!(pos >= 0) || pos < get_irn_arity(irn));
+ assert(!(pos < 0) || -(pos + 1) <= a->n_outs);
- assert(be_is_Perm(irn));
- assert(pos >= 0 && pos < get_irn_arity(irn));
- memcpy(&a->reg_data[pos].req, req, sizeof(req[0]));
+ r->kind = be_req_kind_single_reg;
+ r->x.single_reg = reg;
+ r->req.limited = be_limited;
+ r->req.limited_env = r;
+ r->req.type = arch_register_req_type_limited;
+ r->req.cls = reg->reg_class;
}
-void be_set_Spill_offset(ir_node *irn, unsigned offset)
+void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req)
+{
+ int idx = pos < 0 ? -(pos - 1) : pos;
+ be_node_attr_t *a = get_irn_attr(irn);
+ be_reg_data_t *rd = &a->reg_data[idx];
+ be_req_t *r = pos < 0 ? &rd->req : &rd->in_req;
+
+ assert(is_be_node(irn));
+ assert(!(pos >= 0) || pos < get_irn_arity(irn));
+ assert(!(pos < 0) || -(pos + 1) <= a->n_outs);
+ assert(arch_register_req_is(req, limited));
+
+ r->kind = be_req_kind_old_limited;
+ r->req.limited = be_limited;
+ r->req.limited_env = r;
+ r->req.type = arch_register_req_type_limited;
+ r->req.cls = req->cls;
+
+ r->x.old_limited.old_limited = req->limited;
+ r->x.old_limited.old_limited_env = req->limited_env;
+}
+
+
+void be_set_IncSP_offset(ir_node *irn, int offset)
+{
+
+}
+
+int be_get_IncSP_offset(ir_node *irn)
+{
+ return -1;
+}
+
+void be_set_Spill_entity(ir_node *irn, entity *ent)
{
be_spill_attr_t *a = get_irn_attr(irn);
assert(be_is_Spill(irn));
- a->offset = offset;
+ a->ent = ent;
}
static ir_node *find_a_spill_walker(ir_node *irn, unsigned visited_nr)
return find_a_spill_walker(irn, visited_nr);
}
-
-unsigned be_get_spill_offset(ir_node *irn)
+entity *be_get_spill_entity(ir_node *irn)
{
int opc = get_irn_opcode(irn);
switch(get_irn_be_opcode(irn)) {
case beo_Reload:
- return be_get_spill_offset(find_a_spill(irn));
+ return be_get_spill_entity(find_a_spill(irn));
case beo_Spill:
{
be_spill_attr_t *a = get_irn_attr(irn);
- return a->offset;
+ return a->ent;
}
default:
assert(0 && "Must give spill/reload node");
}
- return (unsigned) -1;
+ return NULL;
}
ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *ctx)
{
const be_node_attr_t *a = get_irn_attr(irn);
-
if(out_pos < a->n_outs)
memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
else {
const be_node_attr_t *a = get_irn_attr(irn);
int n = get_irn_arity(irn);
- req->type = arch_register_req_type_none;
- req->cls = NULL;
-
- switch(get_irn_be_opcode(irn)) {
- case beo_Spill:
- case beo_Copy:
- case beo_Keep:
- case beo_Perm:
- if(pos < n) {
- req->type = arch_register_req_type_normal;
- req->cls = a->cls;
- }
- break;
- case beo_Reload:
- default:
- req = NULL;
+ if(pos < get_irn_arity(irn))
+ memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0]));
+ else {
+ req->type = arch_register_req_type_none;
+ req->cls = NULL;
}
return req;
}
static const arch_register_req_t *
-be_node_get_irn_reg_req(const arch_irn_ops_t *self, arch_register_req_t *req, const ir_node *irn, int pos)
+be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos)
{
int out_pos = pos;
}
static void
-be_node_set_irn_reg(const arch_irn_ops_t *_self, ir_node *irn, const arch_register_t *reg)
+be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg)
{
int out_pos;
be_node_attr_t *a;
- out_pos = redir_proj(&irn, -1);
+ out_pos = redir_proj((const ir_node **) &irn, -1);
a = get_irn_attr(irn);
assert(is_be_node(irn));
}
const arch_register_t *
-be_node_get_irn_reg(const arch_irn_ops_t *_self, const ir_node *irn)
+be_node_get_irn_reg(const void *_self, const ir_node *irn)
{
int out_pos;
be_node_attr_t *a;
- out_pos = redir_proj(&irn, -1);
+ out_pos = redir_proj((const ir_node **) &irn, -1);
a = get_irn_attr(irn);
assert(is_be_node(irn));
return a->reg_data[out_pos].reg;
}
-arch_irn_class_t be_node_classify(const arch_irn_ops_t *_self, const ir_node *irn)
+arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
{
- redir_proj(&irn, -1);
+ redir_proj((const ir_node **) &irn, -1);
switch(get_irn_be_opcode(irn)) {
#define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b;
return 0;
}
-arch_irn_class_t be_node_get_flags(const arch_irn_ops_t *_self, const ir_node *irn)
+arch_irn_class_t be_node_get_flags(const void *_self, const ir_node *irn)
{
return 0;
}
-static const arch_irn_ops_t be_node_irn_ops = {
+static const arch_irn_ops_if_t be_node_irn_ops_if = {
be_node_get_irn_reg_req,
be_node_set_irn_reg,
be_node_get_irn_reg,
be_node_get_flags,
};
-const arch_irn_ops_t *be_node_get_arch_ops(const arch_irn_handler_t *self, const ir_node *irn)
+static const arch_irn_ops_t be_node_irn_ops = {
+ &be_node_irn_ops_if
+};
+
+const void *be_node_get_arch_ops(const arch_irn_handler_t *self, const ir_node *irn)
{
redir_proj((const ir_node **) &irn, -1);
return is_be_node(irn) ? &be_node_irn_ops : NULL;
if(get_irn_be_opcode(irn) == beo_Spill) {
be_spill_attr_t *a = (be_spill_attr_t *) at;
+
ir_fprintf(f, "spill context: %+F\n", a->spill_ctx);
- ir_fprintf(f, "spill offset: %04x (%u)\n", a->offset);
+ if (a->ent) {
+ unsigned ofs = get_entity_offset_bytes(a->ent);
+ ir_fprintf(f, "spill entity: %+F offset %x (%d)\n", a->ent, ofs, ofs);
+ }
+ else {
+ ir_fprintf(f, "spill entity: n/a\n");
+ }
}
break;
}
NULL
};
-ir_node *insert_Perm_after(const arch_env_t *arch_env,
- const arch_register_class_t *cls,
- dom_front_info_t *dom_front,
- ir_node *pos)
+pset *nodes_live_at(const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *pos, pset *live)
{
- ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
- ir_graph *irg = get_irn_irg(bl);
- pset *live = pset_new_ptr_default();
- firm_dbg_module_t *dbg = firm_dbg_register("be.node");
-
+ firm_dbg_module_t *dbg = firm_dbg_register("firm.be.node");
+ const ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
+ ir_node *irn;
irn_live_t *li;
- ir_node *curr, *irn, *perm, **nodes;
- int i, n;
-
- DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos));
-
live_foreach(bl, li) {
ir_node *irn = (ir_node *) li->irn;
- if(live_is_end(li) && arch_irn_has_reg_class(arch_env, irn, -1, cls))
+ if(live_is_end(li) && arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
pset_insert_ptr(live, irn);
}
sched_foreach_reverse(bl, irn) {
+ int i, n;
ir_node *x;
- /*
- * If we encounter the node we want to insert the Perm after,
- * exit immediately, so that this node is still live
- */
+ /*
+ * If we encounter the node we want to insert the Perm after,
+ * exit immediately, so that this node is still live
+ */
if(irn == pos)
- break;
+ return live;
DBG((dbg, LEVEL_1, "%+F\n", irn));
for(x = pset_first(live); x; x = pset_next(live))
for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
- if(arch_irn_has_reg_class(arch_env, op, -1, cls))
+ if(arch_irn_consider_in_reg_alloc(arch_env, cls, op))
pset_insert_ptr(live, op);
}
}
+ return live;
+}
+
+ir_node *insert_Perm_after(const arch_env_t *arch_env,
+ const arch_register_class_t *cls,
+ dom_front_info_t *dom_front,
+ ir_node *pos)
+{
+ ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
+ ir_graph *irg = get_irn_irg(bl);
+ pset *live = pset_new_ptr_default();
+ firm_dbg_module_t *dbg = firm_dbg_register("be.node");
+
+ ir_node *curr, *irn, *perm, **nodes;
+ int i, n;
+
+ DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos));
+
+ if(!nodes_live_at(arch_env, cls, pos, live));
+
n = pset_count(live);
if(n == 0)
curr = proj;
copies[0] = proj;
- be_introduce_copies(dom_front, perm_op, 1, copies);
+ be_ssa_constr_single(dom_front, perm_op, 1, copies);
}
return perm;
}