* @brief Backend node support for generic backend nodes.
* @author Sebastian Hack
* @date 17.05.2005
- * @version $Id$
*
* Backend node support for generic backend nodes.
* This file provides Perm, Copy, Spill and Reload nodes.
*/
be_set_constr_in(res, n_be_Spill_frame, arch_no_register_req);
- arch_set_out_register_req(res, 0, arch_no_register_req);
+ arch_set_irn_register_req_out(res, 0, arch_no_register_req);
return res;
}
be_node_set_reg_class_out(res, 0, cls);
be_node_set_reg_class_in(res, n_be_Reload_frame, cls_frame);
- arch_irn_set_flags(res, arch_irn_flags_rematerializable);
+ arch_set_irn_flags(res, arch_irn_flags_rematerializable);
a = (be_frame_attr_t*) get_irn_generic_attr(res);
a->ent = NULL;
attr = (be_node_attr_t*) get_irn_generic_attr(irn);
attr->exc.pin_state = op_pin_state_pinned;
for (i = 0; i < n; ++i) {
- be_node_set_reg_class_in(irn, i, cls);
- be_node_set_reg_class_out(irn, i, cls);
+ const ir_node *input = in[i];
+ const arch_register_req_t *req = arch_get_irn_register_req(input);
+ if (req->width == 1) {
+ be_set_constr_in(irn, i, cls->class_req);
+ be_set_constr_out(irn, i, cls->class_req);
+ } else {
+ arch_register_req_t *new_req = allocate_reg_req(irn);
+ new_req->cls = cls;
+ new_req->type = (req->type & arch_register_req_type_aligned);
+ new_req->width = req->width;
+ be_set_constr_in(irn, i, new_req);
+ be_set_constr_out(irn, i, new_req);
+ }
}
return irn;
return irn;
}
-ir_node *be_new_Copy(const arch_register_class_t *cls, ir_node *bl, ir_node *op)
+ir_node *be_new_Copy(ir_node *bl, ir_node *op)
{
ir_node *in[1];
ir_node *res;
arch_register_req_t *req;
be_node_attr_t *attr;
ir_graph *irg = get_Block_irg(bl);
+ const arch_register_req_t *in_req = arch_get_irn_register_req(op);
+ const arch_register_class_t *cls = in_req->cls;
in[0] = op;
res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), 1, in);
req = allocate_reg_req(res);
req->cls = cls;
- req->type = arch_register_req_type_should_be_same;
+ req->type = arch_register_req_type_should_be_same
+ | (in_req->type & arch_register_req_type_aligned);
req->other_same = 1U << 0;
- req->width = 1;
+ req->width = in_req->width;
be_set_constr_out(res, 0, req);
return res;
in[0] = old_sp;
irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode,
- sizeof(in) / sizeof(in[0]), in);
+ ARRAY_SIZE(in), in);
init_node_attr(irn, 1, 1);
a = (be_incsp_attr_t*)get_irn_generic_attr(irn);
a->offset = offset;
{
ir_node *irn;
ir_node *in[n_be_AddSP_last];
- const arch_register_class_t *cls;
ir_graph *irg;
be_node_attr_t *attr;
/* Set output constraint to stack register. */
be_set_constr_single_reg_in(irn, n_be_AddSP_old_sp, sp,
arch_register_req_type_none);
- be_node_set_reg_class_in(irn, n_be_AddSP_size, arch_register_get_class(sp));
+ be_node_set_reg_class_in(irn, n_be_AddSP_size, sp->reg_class);
be_set_constr_single_reg_out(irn, pn_be_AddSP_sp, sp,
arch_register_req_type_produces_sp);
- cls = arch_register_get_class(sp);
-
return irn;
}
/* Set output constraint to stack register. */
be_set_constr_single_reg_in(irn, n_be_SubSP_old_sp, sp,
arch_register_req_type_none);
- be_node_set_reg_class_in(irn, n_be_SubSP_size, arch_register_get_class(sp));
+ be_node_set_reg_class_in(irn, n_be_SubSP_size, sp->reg_class);
be_set_constr_single_reg_out(irn, pn_be_SubSP_sp, sp, arch_register_req_type_produces_sp);
return irn;
return attr->ent;
}
-ir_node *be_new_CopyKeep(const arch_register_class_t *cls, ir_node *bl, ir_node *src, int n, ir_node *in_keep[], ir_mode *mode)
+ir_node *be_new_CopyKeep(ir_node *bl, ir_node *src, int n, ir_node *in_keep[])
{
ir_node *irn;
ir_node **in = ALLOCAN(ir_node*, n + 1);
ir_graph *irg = get_Block_irg(bl);
+ const arch_register_req_t *req = arch_get_irn_register_req(src);
+ const arch_register_class_t *cls = req->cls;
+ ir_mode *mode = get_irn_mode(src);
be_node_attr_t *attr;
in[0] = src;
return irn;
}
-ir_node *be_new_CopyKeep_single(const arch_register_class_t *cls, ir_node *bl, ir_node *src, ir_node *keep, ir_mode *mode)
+ir_node *be_new_CopyKeep_single(ir_node *bl, ir_node *src, ir_node *keep)
{
- return be_new_CopyKeep(cls, bl, src, 1, &keep, mode);
+ return be_new_CopyKeep(bl, src, 1, &keep);
}
ir_node *be_get_CopyKeep_op(const ir_node *cpy)
if (additional_types == 0) {
req = reg->single_req;
} else {
- ir_graph *irg = get_irn_irg(node);
struct obstack *obst = be_get_be_obst(irg);
req = be_create_reg_req(obst, reg, additional_types);
}
- arch_irn_set_register(node, pos, reg);
+ arch_set_irn_register_out(node, pos, reg);
be_set_constr_out(node, pos, req);
}
return a->align;
}
-ir_node *be_spill(ir_node *block, ir_node *irn)
-{
- ir_graph *irg = get_Block_irg(block);
- ir_node *frame = get_irg_frame(irg);
- const arch_register_class_t *cls = arch_get_irn_reg_class_out(irn);
- const arch_register_class_t *cls_frame = arch_get_irn_reg_class_out(frame);
- ir_node *spill;
-
- spill = be_new_Spill(cls, cls_frame, block, frame, irn);
- return spill;
-}
-
-ir_node *be_reload(const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
-{
- ir_node *reload;
- ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
- ir_graph *irg = get_Block_irg(bl);
- ir_node *frame = get_irg_frame(irg);
- const arch_register_class_t *cls_frame = arch_get_irn_reg_class_out(frame);
-
- assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
-
- reload = be_new_Reload(cls, cls_frame, bl, frame, spill, mode);
-
- if (is_Block(insert)) {
- do {
- insert = sched_prev(insert);
- } while (is_cfop(insert));
- sched_add_after(insert, reload);
- } else {
- sched_add_before(insert, reload);
- }
-
- return reload;
-}
-
-
-static arch_irn_class_t be_node_classify(const ir_node *irn)
-{
- switch (get_irn_opcode(irn)) {
- case beo_Spill: return arch_irn_class_spill;
- case beo_Reload: return arch_irn_class_reload;
- case beo_Perm: return arch_irn_class_perm;
- case beo_Copy: return arch_irn_class_copy;
- default: return arch_irn_class_none;
- }
-}
-
static ir_entity *be_node_get_frame_entity(const ir_node *irn)
{
return be_get_frame_entity(irn);
/* for be nodes */
static const arch_irn_ops_t be_node_irn_ops = {
- be_node_classify,
be_node_get_frame_entity,
be_node_set_frame_offset,
be_node_get_sp_bias,
NULL, /* perform_memory_operand */
};
-static arch_irn_class_t dummy_classify(const ir_node *node)
+static int get_start_reg_index(ir_graph *irg, const arch_register_t *reg)
+{
+ ir_node *start = get_irg_start(irg);
+ unsigned n_outs = arch_get_irn_n_outs(start);
+ int i;
+
+ /* do a naive linear search... */
+ for (i = 0; i < (int)n_outs; ++i) {
+ const arch_register_req_t *out_req
+ = arch_get_irn_register_req_out(start, i);
+ if (! (out_req->type & arch_register_req_type_limited))
+ continue;
+ if (out_req->cls != arch_register_get_class(reg))
+ continue;
+ if (!rbitset_is_set(out_req->limited, reg->index))
+ continue;
+ return i;
+ }
+ panic("Tried querying undefined register '%s' at Start", reg->name);
+}
+
+ir_node *be_get_initial_reg_value(ir_graph *irg, const arch_register_t *reg)
{
- (void) node;
- return arch_irn_class_none;
+ int i = get_start_reg_index(irg, reg);
+ ir_node *start = get_irg_start(irg);
+ ir_mode *mode = arch_register_class_mode(arch_register_get_class(reg));
+ const ir_edge_t *edge;
+
+ foreach_out_edge(start, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ if (!is_Proj(proj)) // maybe End/Anchor
+ continue;
+ if (get_Proj_proj(proj) == i) {
+ return proj;
+ }
+ }
+ return new_r_Proj(start, mode, i);
+}
+
+int be_find_return_reg_input(ir_node *ret, const arch_register_t *reg)
+{
+ int arity = get_irn_arity(ret);
+ int i;
+ /* do a naive linear search... */
+ for (i = 0; i < arity; ++i) {
+ const arch_register_req_t *req = arch_get_irn_register_req_in(ret, i);
+ if (! (req->type & arch_register_req_type_limited))
+ continue;
+ if (req->cls != arch_register_get_class(reg))
+ continue;
+ if (!rbitset_is_set(req->limited, reg->index))
+ continue;
+ return i;
+ }
+ panic("Tried querying undefined register '%s' at Return", reg->name);
}
static ir_entity* dummy_get_frame_entity(const ir_node *node)
/* for "middleend" nodes */
static const arch_irn_ops_t dummy_be_irn_ops = {
- dummy_classify,
dummy_get_frame_entity,
dummy_set_frame_offset,
dummy_get_sp_bias,
assert(mode_is_datab(get_irn_mode(node)));
}
-void be_dump_phi_reg_reqs(FILE *F, ir_node *node, dump_reason_t reason)
+void be_dump_phi_reg_reqs(FILE *F, const ir_node *node, dump_reason_t reason)
{
switch (reason) {
case dump_node_opcode_txt:
}
static const arch_irn_ops_t phi_irn_ops = {
- dummy_classify,
dummy_get_frame_entity,
dummy_set_frame_offset,
dummy_get_sp_bias,
/**
* ir_op-Operation: dump a be node to file
*/
-static void dump_node(FILE *f, ir_node *irn, dump_reason_t reason)
+static void dump_node(FILE *f, const ir_node *irn, dump_reason_t reason)
{
assert(is_be_node(irn));
op_be_Keep = new_ir_op(beo_Keep, "be_Keep", op_pin_state_exc_pinned, irop_flag_keep, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_CopyKeep = new_ir_op(beo_CopyKeep, "be_CopyKeep", op_pin_state_exc_pinned, irop_flag_keep, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_Call = new_ir_op(beo_Call, "be_Call", op_pin_state_exc_pinned, irop_flag_fragile|irop_flag_uses_memory, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
+ ir_op_set_memory_index(op_be_Call, n_be_Call_mem);
+ ir_op_set_fragile_indices(op_be_Call, pn_be_Call_X_regular, pn_be_Call_X_except);
op_be_Return = new_ir_op(beo_Return, "be_Return", op_pin_state_exc_pinned, irop_flag_cfopcode, oparity_dynamic, 0, sizeof(be_return_attr_t), &be_node_op_ops);
op_be_AddSP = new_ir_op(beo_AddSP, "be_AddSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_SubSP = new_ir_op(beo_SubSP, "be_SubSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);