* Backend node support for generic backend nodes.
* This file provides Perm, Copy, Spill and Reload nodes.
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include <stdlib.h>
#include "beirgmod.h"
-#define OUT_POS(x) (-((x) + 1))
-
#define get_irn_attr(irn) get_irn_generic_attr(irn)
#define get_irn_attr_const(irn) get_irn_generic_attr_const(irn)
typedef struct {
arch_register_req_t req;
- arch_irn_flags_t flags;
-} be_req_t;
-
-typedef struct {
- const arch_register_t *reg;
- be_req_t req;
- be_req_t in_req;
+ arch_register_req_t in_req;
} be_reg_data_t;
/** The generic be nodes attribute type. */
#define K irop_flag_keep
#define M irop_flag_uses_memory
-static int be_reqs_equal(const be_req_t *req1, const be_req_t *req2)
-{
- if(!reg_reqs_equal(&req1->req, &req2->req))
- return 0;
- if(req1->flags != req2->flags)
- return 0;
-
- return 1;
-}
-
/**
* Compare two be node attributes.
*
* @return zero if both attributes are identically
*/
static int _node_cmp_attr(const be_node_attr_t *a, const be_node_attr_t *b) {
- int i, len;
+ int i, len = ARR_LEN(a->reg_data);
- if (ARR_LEN(a->reg_data) != ARR_LEN(b->reg_data))
+ if (len != ARR_LEN(b->reg_data))
return 1;
- len = ARR_LEN(a->reg_data);
- for (i = 0; i < len; ++i) {
- if (a->reg_data[i].reg != b->reg_data[i].reg ||
- !be_reqs_equal(&a->reg_data[i].in_req, &b->reg_data[i].in_req) ||
- !be_reqs_equal(&a->reg_data[i].req, &b->reg_data[i].req))
+ for (i = len - 1; i >= 0; --i) {
+ if (!reg_reqs_equal(&a->reg_data[i].in_req, &b->reg_data[i].in_req) ||
+ !reg_reqs_equal(&a->reg_data[i].req, &b->reg_data[i].req))
return 1;
}
const be_node_attr_t *a_attr = get_irn_attr_const(a);
const be_node_attr_t *b_attr = get_irn_attr_const(b);
- return _node_cmp_attr(a_attr, b_attr);
+ if (_node_cmp_attr(a_attr, b_attr) != 0)
+ return 1;
+
+ return !be_info_equal(a, b);
}
/**
const be_call_attr_t *b_attr = get_irn_attr_const(b);
if (a_attr->ent != b_attr->ent ||
- a_attr->call_tp != b_attr->call_tp)
+ a_attr->call_tp != b_attr->call_tp)
return 1;
return _node_cmp_attr(&a_attr->node_attr, &b_attr->node_attr);
}
-static INLINE be_req_t *get_be_req(const ir_node *node, int pos)
+static inline arch_register_req_t *get_be_req(const ir_node *node, int pos)
{
int idx;
const be_node_attr_t *attr;
assert(is_be_node(node));
attr = get_irn_attr_const(node);
- if(pos < 0) {
+ if (pos < 0) {
idx = -(pos + 1);
} else {
idx = pos;
return pos < 0 ? &rd->req : &rd->in_req;
}
-static INLINE arch_register_req_t *get_req(const ir_node *node, int pos)
-{
- be_req_t *bereq = get_be_req(node, pos);
- return &bereq->req;
-}
-
/**
- * Initializes the generic attribute of all be nodes and return ir.
+ * Initializes the generic attribute of all be nodes and return it.
*/
static void *init_node_attr(ir_node *node, int max_reg_data)
{
memset(a, 0, sizeof(get_op_attr_size(get_irn_op(node))));
if(max_reg_data >= 0) {
+ backend_info_t *info = be_get_info(node);
+ info->out_infos = NEW_ARR_D(reg_out_info_t, obst, max_reg_data);
+ memset(info->out_infos, 0, max_reg_data * sizeof(info->out_infos[0]));
+
a->reg_data = NEW_ARR_D(be_reg_data_t, obst, max_reg_data);
memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0]));
} else {
+ backend_info_t *info = be_get_info(node);
+ info->out_infos = NEW_ARR_F(reg_out_info_t, 0);
+
a->reg_data = NEW_ARR_F(be_reg_data_t, 0);
}
static void add_register_req(ir_node *node)
{
- be_node_attr_t *a = get_irn_attr(node);
- be_reg_data_t regreq;
+ backend_info_t *info = be_get_info(node);
+ be_node_attr_t *a = get_irn_attr(node);
+ be_reg_data_t regreq;
+ reg_out_info_t out_info;
memset(®req, 0, sizeof(regreq));
+ memset(&out_info, 0, sizeof(out_info));
ARR_APP1(be_reg_data_t, a->reg_data, regreq);
+ ARR_APP1(reg_out_info_t, info->out_infos, out_info);
}
/**
return 0;
}
-static be_reg_data_t *retrieve_reg_data(const ir_node *node)
-{
- const be_node_attr_t *attr;
- int pos = 0;
-
- if(is_Proj(node)) {
- pos = get_Proj_proj(node);
- node = get_Proj_pred(node);
- }
-
- assert(is_be_node(node));
- attr = get_irn_attr_const(node);
- assert(pos >= 0 && pos < ARR_LEN(attr->reg_data) && "illegal proj number");
-
- return &attr->reg_data[pos];
-}
-
-static void
-be_node_set_irn_reg(ir_node *irn, const arch_register_t *reg)
-{
- be_reg_data_t *r = retrieve_reg_data(irn);
- r->reg = reg;
-}
-
ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *to_spill)
{
a->ent = NULL;
a->offset = 0;
- be_node_set_reg_class(res, be_pos_Spill_frame, cls_frame);
- be_node_set_reg_class(res, be_pos_Spill_val, cls);
+ be_node_set_reg_class_in(res, be_pos_Spill_frame, cls_frame);
+ be_node_set_reg_class_in(res, be_pos_Spill_val, cls);
return res;
}
-ir_node *be_new_Reload(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
- ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *mem, ir_mode *mode)
+ir_node *be_new_Reload(const arch_register_class_t *cls,
+ const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *block,
+ ir_node *frame, ir_node *mem, ir_mode *mode)
{
ir_node *in[2];
ir_node *res;
in[0] = frame;
in[1] = mem;
- res = new_ir_node(NULL, irg, bl, op_be_Reload, mode, 2, in);
+ res = new_ir_node(NULL, irg, block, op_be_Reload, mode, 2, in);
init_node_attr(res, 2);
- be_node_set_reg_class(res, -1, cls);
- be_node_set_reg_class(res, be_pos_Reload_frame, cls_frame);
- be_node_set_flags(res, -1, arch_irn_flags_rematerializable);
+ be_node_set_reg_class_out(res, 0, cls);
+ be_node_set_reg_class_in(res, be_pos_Reload_frame, cls_frame);
+ arch_irn_set_flags(res, arch_irn_flags_rematerializable);
return res;
}
return get_irn_n(irn, be_pos_Spill_frame);
}
-ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
+ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg,
+ ir_node *block, int n, ir_node *in[])
{
int i;
- ir_node *irn = new_ir_node(NULL, irg, bl, op_be_Perm, mode_T, n, in);
+ ir_node *irn = new_ir_node(NULL, irg, block, op_be_Perm, mode_T, n, in);
init_node_attr(irn, n);
for(i = 0; i < n; ++i) {
- be_node_set_reg_class(irn, i, cls);
- be_node_set_reg_class(irn, OUT_POS(i), cls);
+ be_node_set_reg_class_in(irn, i, cls);
+ be_node_set_reg_class_out(irn, i, cls);
}
return irn;
void be_Perm_reduce(ir_node *perm, int new_size, int *map)
{
- int arity = get_irn_arity(perm);
- be_reg_data_t *old_data = alloca(arity * sizeof(old_data[0]));
- be_node_attr_t *attr = get_irn_attr(perm);
+ int arity = get_irn_arity(perm);
+ be_reg_data_t *old_data = ALLOCAN(be_reg_data_t, arity);
+ reg_out_info_t *old_infos = ALLOCAN(reg_out_info_t, arity);
+ be_node_attr_t *attr = get_irn_attr(perm);
+ backend_info_t *info = be_get_info(perm);
ir_node **new_in;
int i;
assert(be_is_Perm(perm));
assert(new_size <= arity);
- NEW_ARR_A(ir_node *, new_in, new_size);
+ new_in = alloca(new_size * sizeof(*new_in));
/* save the old register data */
memcpy(old_data, attr->reg_data, arity * sizeof(old_data[0]));
+ memcpy(old_infos, info->out_infos, arity * sizeof(old_infos[0]));
/* compose the new in array and set the new register data directly in place */
for (i = 0; i < new_size; ++i) {
int idx = map[i];
- new_in[i] = get_irn_n(perm, idx);
- attr->reg_data[i] = old_data[idx];
+ new_in[i] = get_irn_n(perm, idx);
+ attr->reg_data[i] = old_data[idx];
+ info->out_infos[i] = old_infos[idx];
}
set_irn_in(perm, new_size, new_in);
ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
{
- int i;
- ir_node *frame = get_irg_frame(irg);
- const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame, -1);
- ir_node *irn;
- const arch_register_t *sp = arch_env->sp;
- be_memperm_attr_t *attr;
- ir_node **real_in;
+ ir_node *frame = get_irg_frame(irg);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class_out(frame);
+ const arch_register_t *sp = arch_env->sp;
+ ir_node *irn;
+ be_memperm_attr_t *attr;
+ ir_node **real_in;
+ int i;
- real_in = alloca((n+1) * sizeof(real_in[0]));
+ real_in = ALLOCAN(ir_node*, n + 1);
real_in[0] = frame;
memcpy(&real_in[1], in, n * sizeof(real_in[0]));
irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n+1, real_in);
init_node_attr(irn, n + 1);
- be_node_set_reg_class(irn, 0, sp->reg_class);
+ be_node_set_reg_class_in(irn, 0, sp->reg_class);
for (i = 0; i < n; ++i) {
- be_node_set_reg_class(irn, i + 1, cls_frame);
- be_node_set_reg_class(irn, OUT_POS(i), cls_frame);
+ be_node_set_reg_class_in(irn, i + 1, cls_frame);
+ be_node_set_reg_class_out(irn, i, cls_frame);
}
attr = get_irn_attr(irn);
in[0] = op;
res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), 1, in);
init_node_attr(res, 1);
- be_node_set_reg_class(res, 0, cls);
- be_node_set_reg_class(res, OUT_POS(0), cls);
+ be_node_set_reg_class_in(res, 0, cls);
+ be_node_set_reg_class_out(res, 0, cls);
- req = get_req(res, OUT_POS(0));
+ req = get_be_req(res, BE_OUT_POS(0));
req->cls = cls;
req->type = arch_register_req_type_should_be_same;
req->other_same = 1U << 0;
for(i = 0; i < n; ++i) {
add_irn_n(res, in[i]);
add_register_req(res);
- be_node_set_reg_class(res, i, cls);
+ be_node_set_reg_class_in(res, i, cls);
}
keep_alive(res);
assert(be_is_Keep(keep));
n = add_irn_n(keep, node);
add_register_req(keep);
- be_node_set_reg_class(keep, n, cls);
+ be_node_set_reg_class_in(keep, n, cls);
}
/* creates a be_Call */
a->offset = offset;
a->align = align;
- be_node_set_flags(irn, -1, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
-
/* Set output constraint to stack register. */
- be_node_set_reg_class(irn, 0, sp->reg_class);
- be_set_constr_single_reg(irn, BE_OUT_POS(0), sp);
- be_node_set_irn_reg(irn, sp);
+ be_node_set_reg_class_in(irn, 0, sp->reg_class);
+ be_set_constr_single_reg_out(irn, 0, sp, arch_register_req_type_produces_sp);
return irn;
}
irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, be_pos_AddSP_last, in);
a = init_node_attr(irn, be_pos_AddSP_last);
- be_node_set_flags(irn, OUT_POS(pn_be_AddSP_sp),
- arch_irn_flags_ignore | arch_irn_flags_modify_sp);
-
/* Set output constraint to stack register. */
- be_set_constr_single_reg(irn, be_pos_AddSP_old_sp, sp);
- be_node_set_reg_class(irn, be_pos_AddSP_size, arch_register_get_class(sp));
- be_set_constr_single_reg(irn, OUT_POS(pn_be_AddSP_sp), sp);
- a->reg_data[pn_be_AddSP_sp].reg = sp;
+ be_set_constr_single_reg_in(irn, be_pos_AddSP_old_sp, sp, 0);
+ be_node_set_reg_class_in(irn, be_pos_AddSP_size, arch_register_get_class(sp));
+ be_set_constr_single_reg_out(irn, pn_be_AddSP_sp, sp, arch_register_req_type_produces_sp);
cls = arch_register_get_class(sp);
- be_node_set_reg_class(irn, OUT_POS(pn_be_AddSP_res), cls);
return irn;
}
irn = new_ir_node(NULL, irg, bl, op_be_SubSP, mode_T, be_pos_SubSP_last, in);
a = init_node_attr(irn, be_pos_SubSP_last);
- be_node_set_flags(irn, OUT_POS(pn_be_SubSP_sp),
- arch_irn_flags_ignore | arch_irn_flags_modify_sp);
-
/* Set output constraint to stack register. */
- be_set_constr_single_reg(irn, be_pos_SubSP_old_sp, sp);
- be_node_set_reg_class(irn, be_pos_SubSP_size, arch_register_get_class(sp));
- be_set_constr_single_reg(irn, OUT_POS(pn_be_SubSP_sp), sp);
- a->reg_data[pn_be_SubSP_sp].reg = sp;
+ be_set_constr_single_reg_in(irn, be_pos_SubSP_old_sp, sp, 0);
+ be_node_set_reg_class_in(irn, be_pos_SubSP_size, arch_register_get_class(sp));
+ be_set_constr_single_reg_out(irn, pn_be_SubSP_sp, sp, arch_register_req_type_produces_sp);
return irn;
}
return res;
}
-ir_node *be_RegParams_append_out_reg(ir_node *regparams,
- const arch_env_t *arch_env,
- const arch_register_t *reg)
-{
- ir_graph *irg = get_irn_irg(regparams);
- ir_node *block = get_nodes_block(regparams);
- be_node_attr_t *attr = get_irn_attr(regparams);
- const arch_register_class_t *cls = arch_register_get_class(reg);
- ir_mode *mode = arch_register_class_mode(cls);
- int n = ARR_LEN(attr->reg_data);
- ir_node *proj;
- (void)arch_env; // TODO remove parameter
-
- assert(be_is_RegParams(regparams));
- proj = new_r_Proj(irg, block, regparams, mode, n);
- add_register_req(regparams);
- be_set_constr_single_reg(regparams, BE_OUT_POS(n), reg);
- arch_set_irn_register(proj, reg);
-
- /* TODO decide, whether we need to set ignore/modify sp flags here? */
-
- return proj;
-}
-
ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_entity *ent)
{
be_frame_attr_t *a;
a = init_node_attr(irn, 1);
a->ent = ent;
a->offset = 0;
- be_node_set_reg_class(irn, 0, cls_frame);
- be_node_set_reg_class(irn, OUT_POS(0), cls_frame);
+ be_node_set_reg_class_in(irn, 0, cls_frame);
+ be_node_set_reg_class_out(irn, 0, cls_frame);
return optimize_node(irn);
}
ir_node *be_new_CopyKeep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, int n, ir_node *in_keep[], ir_mode *mode)
{
- ir_node *irn;
- ir_node **in = (ir_node **) alloca((n + 1) * sizeof(in[0]));
+ ir_node *irn;
+ ir_node **in = ALLOCAN(ir_node*, n + 1);
in[0] = src;
memcpy(&in[1], in_keep, n * sizeof(in[0]));
irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, n + 1, in);
init_node_attr(irn, n + 1);
- be_node_set_reg_class(irn, OUT_POS(0), cls);
- be_node_set_reg_class(irn, 0, cls);
+ be_node_set_reg_class_in(irn, 0, cls);
+ be_node_set_reg_class_out(irn, 0, cls);
return irn;
}
return get_irn_arity(irn) - 1;
}
-void be_set_constr_single_reg(ir_node *node, int pos, const arch_register_t *reg)
+static void set_req_single(struct obstack *obst, arch_register_req_t *req,
+ const arch_register_t *reg, arch_register_req_type_t additional_types)
{
- arch_register_req_t *req = get_req(node, pos);
const arch_register_class_t *cls = arch_register_get_class(reg);
- ir_graph *irg = get_irn_irg(node);
- struct obstack *obst = get_irg_obstack(irg);
- unsigned *limited_bitset;
-
- assert(req->cls == NULL || req->cls == cls);
- assert(! (req->type & arch_register_req_type_limited));
- assert(req->limited == NULL);
+ unsigned *limited_bitset;
limited_bitset = rbitset_obstack_alloc(obst, arch_register_class_n_regs(cls));
rbitset_set(limited_bitset, arch_register_get_index(reg));
req->cls = cls;
- req->type |= arch_register_req_type_limited;
+ req->type |= arch_register_req_type_limited | additional_types;
req->limited = limited_bitset;
+
+}
+
+void be_set_constr_single_reg_in(ir_node *node, int pos,
+ const arch_register_t *reg, arch_register_req_type_t additional_types)
+{
+ arch_register_req_t *req = get_be_req(node, pos);
+ ir_graph *irg = get_irn_irg(node);
+ struct obstack *obst = get_irg_obstack(irg);
+
+ set_req_single(obst, req, reg, additional_types);
+}
+
+void be_set_constr_single_reg_out(ir_node *node, int pos,
+ const arch_register_t *reg, arch_register_req_type_t additional_types)
+{
+ arch_register_req_t *req = get_be_req(node, BE_OUT_POS(pos));
+ ir_graph *irg = get_irn_irg(node);
+ struct obstack *obst = get_irg_obstack(irg);
+
+ /* if we have an ignore register, add ignore flag and just assign it */
+ if (reg->type & arch_register_type_ignore) {
+ additional_types |= arch_register_req_type_ignore;
+ }
+
+ arch_irn_set_register(node, pos, reg);
+ set_req_single(obst, req, reg, additional_types);
}
void be_set_constr_limited(ir_node *node, int pos, const arch_register_req_t *req)
{
ir_graph *irg = get_irn_irg(node);
struct obstack *obst = get_irg_obstack(irg);
- arch_register_req_t *r = get_req(node, pos);
+ arch_register_req_t *r = get_be_req(node, pos);
assert(arch_register_req_is(req, limited));
assert(!(req->type & (arch_register_req_type_should_be_same | arch_register_req_type_must_be_different)));
r->limited = rbitset_duplicate_obstack_alloc(obst, req->limited, req->cls->n_regs);
}
-void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags)
+void be_node_set_reg_class_in(ir_node *irn, int pos, const arch_register_class_t *cls)
{
- be_req_t *bereq = get_be_req(irn, pos);
- bereq->flags = flags;
-}
+ arch_register_req_t *req = get_be_req(irn, pos);
-void be_node_add_flags(ir_node *irn, int pos, arch_irn_flags_t flags)
-{
- be_req_t *bereq = get_be_req(irn, pos);
- bereq->flags |= flags;
+ req->cls = cls;
+
+ if (cls == NULL) {
+ req->type = arch_register_req_type_none;
+ } else if (req->type == arch_register_req_type_none) {
+ req->type = arch_register_req_type_normal;
+ }
}
-void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls)
+void be_node_set_reg_class_out(ir_node *irn, int pos, const arch_register_class_t *cls)
{
- arch_register_req_t *req = get_req(irn, pos);
+ arch_register_req_t *req = get_be_req(irn, BE_OUT_POS(pos));
req->cls = cls;
void be_node_set_req_type(ir_node *irn, int pos, arch_register_req_type_t type)
{
- arch_register_req_t *req = get_req(irn, pos);
+ arch_register_req_t *req = get_be_req(irn, pos);
req->type = type;
}
return a->align;
}
-ir_node *be_spill(const arch_env_t *arch_env, ir_node *block, ir_node *irn)
+ir_node *be_spill(ir_node *block, ir_node *irn)
{
ir_graph *irg = get_irn_irg(block);
ir_node *frame = get_irg_frame(irg);
- const arch_register_class_t *cls = arch_get_irn_reg_class(irn, -1);
- const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame, -1);
+ const arch_register_class_t *cls = arch_get_irn_reg_class_out(irn);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class_out(frame);
ir_node *spill;
- (void)arch_env;
spill = be_new_Spill(cls, cls_frame, irg, block, frame, irn);
return spill;
}
-ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
+ir_node *be_reload(const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
{
ir_node *reload;
ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
ir_graph *irg = get_irn_irg(bl);
ir_node *frame = get_irg_frame(irg);
- const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame, -1);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class_out(frame);
assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
reload = be_new_Reload(cls, cls_frame, irg, bl, frame, spill, mode);
if (is_Block(insert)) {
- insert = sched_skip(insert, 0, sched_skip_cf_predicator, (void *) arch_env);
+ insert = sched_skip(insert, 0, sched_skip_cf_predicator, NULL);
sched_add_after(insert, reload);
} else {
sched_add_before(insert, reload);
{
const be_node_attr_t *a = get_irn_attr_const(irn);
- if(out_pos >= ARR_LEN(a->reg_data)) {
+ if (out_pos >= ARR_LEN(a->reg_data)) {
return arch_no_register_req;
}
- return &a->reg_data[out_pos].req.req;
+ return &a->reg_data[out_pos].req;
}
static const
{
const be_node_attr_t *a = get_irn_attr_const(irn);
- if(pos >= get_irn_arity(irn) || pos >= ARR_LEN(a->reg_data))
+ if (pos >= get_irn_arity(irn) || pos >= ARR_LEN(a->reg_data))
return arch_no_register_req;
- return &a->reg_data[pos].in_req.req;
+ return &a->reg_data[pos].in_req;
}
static const arch_register_req_t *
if (get_irn_mode(irn) == mode_T)
return arch_no_register_req;
+ assert(pos == -1);
out_pos = redir_proj((const ir_node **)&irn);
assert(is_be_node(irn));
return get_out_reg_req(irn, out_pos);
* For spills and reloads, we return "none" as requirement for frame
* pointer, so every input is ok. Some backends need this (e.g. STA).
*/
- if ((be_is_Spill(irn) && pos == be_pos_Spill_frame) ||
- (be_is_Reload(irn) && pos == be_pos_Reload_frame))
+ if ((pos == be_pos_Spill_frame && be_is_Spill(irn)) ||
+ (pos == be_pos_Reload_frame && be_is_Reload(irn)))
return arch_no_register_req;
return get_in_reg_req(irn, pos);
return arch_no_register_req;
}
-const arch_register_t *
-be_node_get_irn_reg(const ir_node *irn)
-{
- be_reg_data_t *r;
-
- if (get_irn_mode(irn) == mode_T)
- return NULL;
- r = retrieve_reg_data(irn);
- return r->reg;
-}
-
static arch_irn_class_t be_node_classify(const ir_node *irn)
{
restart:
irn = get_Proj_pred(irn);
}
goto restart;
- break;
- default:
- return arch_irn_class_normal;
- }
- return 0;
-}
-
-static arch_irn_flags_t be_node_get_flags(const ir_node *node)
-{
- be_req_t *bereq;
- int pos = -1;
-
- if(is_Proj(node)) {
- pos = OUT_POS(get_Proj_proj(node));
- node = skip_Proj_const(node);
+ default:
+ return 0;
}
-
- bereq = get_be_req(node, pos);
-
- return bereq->flags;
}
static ir_entity *be_node_get_frame_entity(const ir_node *irn)
static const arch_irn_ops_t be_node_irn_ops = {
be_node_get_irn_reg_req,
- be_node_set_irn_reg,
- be_node_get_irn_reg,
be_node_classify,
- be_node_get_flags,
be_node_get_frame_entity,
be_node_set_frame_entity,
be_node_set_frame_offset,
arch_irn_flags_t flags;
} phi_attr_t;
-struct {
- arch_env_t *arch_env;
- pmap *phi_attrs;
+static struct {
+ pmap *phi_attrs;
} phi_handler;
#define get_phi_handler_from_ops(h) container_of(h, phi_handler_t, irn_ops)
-static INLINE
+static inline
phi_attr_t *get_Phi_attr(const ir_node *phi)
{
phi_attr_t *attr = pmap_get(phi_handler.phi_attrs, (void*) phi);
/* Matze: don't we unnecessary constraint our phis with this?
* we only need to take the regclass IMO*/
if(!is_Phi(op))
- return arch_get_register_req(op, BE_OUT_POS(0));
+ return arch_get_register_req_out(op);
}
/*
const arch_register_req_t *req;
req = get_Phi_reg_req_recursive(irn, &visited);
- memcpy(&attr->req, req, sizeof(req[0]));
+ attr->req = *req;
assert(attr->req.cls != NULL);
attr->req.type = arch_register_req_type_normal;
return &attr->req;
}
-void be_set_phi_reg_req(const arch_env_t *arch_env, ir_node *node,
- const arch_register_req_t *req)
+void be_set_phi_reg_req(ir_node *node, const arch_register_req_t *req,
+ arch_register_req_type_t additional_types)
{
phi_attr_t *attr;
- (void) arch_env;
assert(mode_is_datab(get_irn_mode(node)));
- attr = get_Phi_attr(node);
- memcpy(&attr->req, req, sizeof(req[0]));
+ attr = get_Phi_attr(node);
+ attr->req = *req;
+ attr->req.type |= additional_types;
}
-void be_set_phi_flags(const arch_env_t *arch_env, ir_node *node,
- arch_irn_flags_t flags)
+void be_set_phi_flags(ir_node *node, arch_irn_flags_t flags)
{
phi_attr_t *attr;
- (void) arch_env;
assert(mode_is_datab(get_irn_mode(node)));
attr->flags = flags;
}
-static void phi_set_irn_reg(ir_node *irn, const arch_register_t *reg)
-{
- phi_attr_t *attr = get_Phi_attr(irn);
- attr->reg = reg;
-}
-
-static const arch_register_t *phi_get_irn_reg(const ir_node *irn)
-{
- phi_attr_t *attr = get_Phi_attr(irn);
- return attr->reg;
-}
-
static arch_irn_class_t phi_classify(const ir_node *irn)
{
(void) irn;
- return arch_irn_class_normal;
-}
-
-static arch_irn_flags_t phi_get_flags(const ir_node *irn)
-{
- phi_attr_t *attr = get_Phi_attr(irn);
- return attr->flags;
+ return 0;
}
static ir_entity *phi_get_frame_entity(const ir_node *irn)
static const arch_irn_ops_t phi_irn_ops = {
phi_get_irn_reg_req,
- phi_set_irn_reg,
- phi_get_irn_reg,
phi_classify,
- phi_get_flags,
phi_get_frame_entity,
phi_set_frame_entity,
phi_set_frame_offset,
NULL, /* perform_memory_operand */
};
-void be_phi_handler_new(be_main_env_t *env)
+void be_phi_handler_new(void)
{
- phi_handler.arch_env = env->arch_env;
phi_handler.phi_attrs = pmap_create();
op_Phi->ops.be_ops = &phi_irn_ops;
}
static void dump_node_req(FILE *f, int idx, const arch_register_req_t *req,
const ir_node *node)
{
- int did_something = 0;
- char buf[16];
- const char *prefix = buf;
+ char tmp[256];
- snprintf(buf, sizeof(buf), "#%d ", idx);
- buf[sizeof(buf) - 1] = '\0';
+ if (req->cls == NULL) return;
- if(req->cls != 0) {
- char tmp[256];
- fprintf(f, prefix);
- arch_register_req_format(tmp, sizeof(tmp), req, node);
- fprintf(f, "%s", tmp);
- did_something = 1;
- }
-
- if(did_something)
- fprintf(f, "\n");
+ arch_register_req_format(tmp, sizeof(tmp), req, node);
+ fprintf(f, "#%d %s\n", idx, tmp);
}
/**
int len = ARR_LEN(a->reg_data);
fprintf(f, "registers: \n");
- for(i = 0; i < len; ++i) {
- be_reg_data_t *rd = &a->reg_data[i];
- if(rd->reg)
- fprintf(f, "#%d: %s\n", i, rd->reg->name);
+ for (i = 0; i < len; ++i) {
+ const arch_register_t *reg = arch_irn_get_register(node, i);
+ fprintf(f, "#%d: %s\n", i, reg != NULL ? reg->name : "n/a");
}
fprintf(f, "in requirements:\n");
- for(i = 0; i < len; ++i) {
- dump_node_req(f, i, &a->reg_data[i].in_req.req, node);
+ for (i = 0; i < len; ++i) {
+ dump_node_req(f, i, &a->reg_data[i].in_req, node);
}
fprintf(f, "\nout requirements:\n");
- for(i = 0; i < len; ++i) {
- dump_node_req(f, i, &a->reg_data[i].req.req, node);
+ for (i = 0; i < len; ++i) {
+ dump_node_req(f, i, &a->reg_data[i].req, node);
}
}
switch(reason) {
case dump_node_opcode_txt:
- fprintf(f, get_op_name(get_irn_op(irn)));
+ fputs(get_op_name(get_irn_op(irn)), f);
break;
case dump_node_mode_txt:
if(be_is_Perm(irn) || be_is_Copy(irn) || be_is_CopyKeep(irn)) {
const be_node_attr_t *old_attr = get_irn_attr_const(old_node);
be_node_attr_t *new_attr = get_irn_attr(new_node);
struct obstack *obst = get_irg_obstack(get_irn_irg(new_node));
+ backend_info_t *old_info = be_get_info(old_node);
+ backend_info_t *new_info = be_get_info(new_node);
unsigned i, len;
assert(is_be_node(old_node));
if(get_irn_op(old_node)->opar == oparity_dynamic
|| be_is_RegParams(old_node)) {
new_attr->reg_data = NEW_ARR_F(be_reg_data_t, len);
+ new_info->out_infos = NEW_ARR_F(reg_out_info_t, len);
} else {
new_attr->reg_data = NEW_ARR_D(be_reg_data_t, obst, len);
+ new_info->out_infos = NEW_ARR_D(reg_out_info_t, obst, len);
}
if(len > 0) {
memcpy(new_attr->reg_data, old_attr->reg_data, len * sizeof(be_reg_data_t));
+ memcpy(new_info->out_infos, old_info->out_infos, len * sizeof(new_info->out_infos[0]));
for(i = 0; i < len; ++i) {
const be_reg_data_t *rd = &old_attr->reg_data[i];
be_reg_data_t *newrd = &new_attr->reg_data[i];
- if(arch_register_req_is(&rd->req.req, limited)) {
- const arch_register_req_t *req = &rd->req.req;
- arch_register_req_t *new_req = &newrd->req.req;
+ if (arch_register_req_is(&rd->req, limited)) {
+ const arch_register_req_t *req = &rd->req;
+ arch_register_req_t *new_req = &newrd->req;
new_req->limited
= rbitset_duplicate_obstack_alloc(obst, req->limited, req->cls->n_regs);
}
- if(arch_register_req_is(&rd->in_req.req, limited)) {
- const arch_register_req_t *req = &rd->in_req.req;
- arch_register_req_t *new_req = &newrd->in_req.req;
+ if(arch_register_req_is(&rd->in_req, limited)) {
+ const arch_register_req_t *req = &rd->in_req;
+ arch_register_req_t *new_req = &newrd->in_req;
new_req->limited
= rbitset_duplicate_obstack_alloc(obst, req->limited, req->cls->n_regs);
}
return get_op_ops(get_irn_op(irn))->be_ops == &be_node_irn_ops;
}
-void be_node_init(void) {
- static int inited = 0;
-
- if(inited)
- return;
-
- inited = 1;
-
+void be_init_op(void)
+{
/* Acquire all needed opcodes. */
op_be_Spill = new_ir_op(beo_Spill, "be_Spill", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_Reload = new_ir_op(beo_Reload, "be_Reload", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_Perm = new_ir_op(beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_MemPerm = new_ir_op(beo_MemPerm, "be_MemPerm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops);
op_be_Copy = new_ir_op(beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_Keep = new_ir_op(beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_CopyKeep = new_ir_op(beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_be_Keep = new_ir_op(beo_Keep, "be_Keep", op_pin_state_floats, K, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_be_CopyKeep = new_ir_op(beo_CopyKeep, "be_CopyKeep", op_pin_state_floats, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_Call = new_ir_op(beo_Call, "be_Call", op_pin_state_pinned, F|M, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
op_be_Return = new_ir_op(beo_Return, "be_Return", op_pin_state_pinned, X, oparity_dynamic, 0, sizeof(be_return_attr_t), &be_node_op_ops);
op_be_AddSP = new_ir_op(beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);