* Copyright (C) 2005-2006 Universitaet Karlsruhe
* Released under the GPL
*/
-
#ifdef HAVE_CONFIG_H
-#include "config.h"
+#include <config.h>
#endif
#include <stdlib.h>
/** The generic be nodes attribute type. */
typedef struct {
- int max_reg_data;
be_reg_data_t *reg_data;
} be_node_attr_t;
* @return zero if both attributes are identically
*/
static int cmp_node_attr(be_node_attr_t *a, be_node_attr_t *b) {
- if (a->max_reg_data == b->max_reg_data) {
- int i;
+ int i, len;
+
+ if(ARR_LEN(a->reg_data) != ARR_LEN(b->reg_data))
+ return 1;
- for (i = 0; i < a->max_reg_data; ++i) {
- if (a->reg_data[i].reg != b->reg_data[i].reg ||
- memcmp(&a->reg_data[i].in_req, &b->reg_data[i].in_req, sizeof(b->reg_data[i].in_req)) ||
+ len = ARR_LEN(a->reg_data);
+ for (i = 0; i < len; ++i) {
+ if (a->reg_data[i].reg != b->reg_data[i].reg ||
+ memcmp(&a->reg_data[i].in_req, &b->reg_data[i].in_req, sizeof(b->reg_data[i].in_req)) ||
memcmp(&a->reg_data[i].req, &b->reg_data[i].req, sizeof(a->reg_data[i].req)))
- return 1;
- }
- return 0;
+ return 1;
}
- return 1;
+
+ return 0;
}
/**
op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_SubSP = new_ir_op(beo_base + beo_SubSP, "be_SubSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
- op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
+ op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
static void *init_node_attr(ir_node* irn, int max_reg_data)
{
ir_graph *irg = get_irn_irg(irn);
+ struct obstack *obst = get_irg_obstack(irg);
be_node_attr_t *a = get_irn_attr(irn);
+ int i;
memset(a, 0, sizeof(get_op_attr_size(get_irn_op(irn))));
- a->max_reg_data = max_reg_data;
- a->reg_data = NULL;
-
- if(max_reg_data > 0) {
- int i;
- a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), max_reg_data);
+ if(max_reg_data >= 0) {
+ a->reg_data = NEW_ARR_D(be_reg_data_t, obst, max_reg_data);
memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0]));
- for(i = 0; i < max_reg_data; ++i) {
- a->reg_data[i].req.req.cls = NULL;
- a->reg_data[i].req.req.type = arch_register_req_type_none;
- }
+ }
+
+ for(i = 0; i < max_reg_data; ++i) {
+ a->reg_data[i].req.req.cls = NULL;
+ a->reg_data[i].req.req.type = arch_register_req_type_none;
}
return a;
return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp;
}
-static int redir_proj(const ir_node **node, int pos)
+/**
+ * Skip Proj nodes and return their Proj numbers.
+ *
+ * If *node is a Proj or Proj(Proj) node, skip it.
+ *
+ * @param node points to the node to be skipped
+ *
+ * @return 0 if *node was no Proj node, its Proj number else.
+ */
+static int redir_proj(const ir_node **node)
{
const ir_node *n = *node;
assert(get_irn_mode(pred) == mode_T);
*pos = p;
res = get_irn_attr(pred);
- assert(p >= 0 && p < res->max_reg_data && "illegal proj number");
+ assert(p >= 0 && p < ARR_LEN(res->reg_data) && "illegal proj number");
}
- }
-
- else if(is_be_node(irn) && get_irn_mode(irn) != mode_T) {
+ } else if(is_be_node(irn) && get_irn_mode(irn) != mode_T) {
be_node_attr_t *a = get_irn_attr(irn);
- if(a->max_reg_data > 0) {
+ if(ARR_LEN(a->reg_data) > 0) {
res = a;
*pos = 0;
}
}
-ir_node *be_new_Spill(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *to_spill)
+ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
+ ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *to_spill)
{
be_frame_attr_t *a;
+ ir_node *in[2];
ir_node *res;
- res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 1, &to_spill);
+ in[0] = frame;
+ in[1] = to_spill;
+ res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in);
a = init_node_attr(res, 2);
a->ent = NULL;
a->offset = 0;
+ be_node_set_reg_class(res, be_pos_Spill_frame, cls_frame);
be_node_set_reg_class(res, be_pos_Spill_val, cls);
return res;
}
-ir_node *be_new_Reload(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *mem, ir_mode *mode)
+ir_node *be_new_Reload(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
+ ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *mem, ir_mode *mode)
{
- ir_node *res = new_ir_node(NULL, irg, bl, op_be_Reload, mode, 1, &mem);
+ ir_node *in[2];
+ ir_node *res;
+
+ in[0] = frame;
+ in[1] = mem;
+ res = new_ir_node(NULL, irg, bl, op_be_Reload, mode, 2, in);
+
init_node_attr(res, 2);
be_node_set_reg_class(res, -1, cls);
+ be_node_set_reg_class(res, be_pos_Reload_frame, cls_frame);
be_node_set_flags(res, -1, arch_irn_flags_rematerializable);
return res;
}
return get_irn_n(irn, be_pos_Reload_mem);
}
+ir_node *be_get_Reload_frame(const ir_node *irn)
+{
+ assert(be_is_Reload(irn));
+ return get_irn_n(irn, be_pos_Reload_frame);
+}
+
ir_node *be_get_Spill_val(const ir_node *irn)
{
assert(be_is_Spill(irn));
return get_irn_n(irn, be_pos_Spill_val);
}
+ir_node *be_get_Spill_frame(const ir_node *irn)
+{
+ assert(be_is_Spill(irn));
+ return get_irn_n(irn, be_pos_Spill_frame);
+}
ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
{
int be_is_IncSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_IncSP ; }
int be_is_SetSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SetSP ; }
int be_is_AddSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_AddSP ; }
+int be_is_SubSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SubSP ; }
int be_is_RegParams (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_RegParams ; }
int be_is_StackParam (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_StackParam ; }
int be_is_FrameAddr (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameAddr ; }
assert(is_be_node(irn));
assert(!(pos >= 0) || pos < get_irn_arity(irn));
- assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data);
+ assert(!(pos < 0) || -(pos + 1) <= ARR_LEN(a->reg_data));
return r;
}
void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls)
{
be_req_t *r = get_req(irn, pos);
+
r->req.cls = cls;
- if(r->req.type == arch_register_req_type_none)
+
+ if (cls == NULL)
+ r->req.type = arch_register_req_type_none;
+ else if (r->req.type == arch_register_req_type_none)
r->req.type = arch_register_req_type_normal;
}
ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn)
{
- ir_node *bl = get_nodes_block(irn);
- ir_graph *irg = get_irn_irg(bl);
- const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
+ ir_node *bl = get_nodes_block(irn);
+ ir_graph *irg = get_irn_irg(bl);
+ ir_node *frame = get_irg_frame(irg);
+ const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
ir_node *spill;
-
- spill = be_new_Spill(cls, irg, bl, irn);
+ spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
return spill;
}
ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
{
ir_node *reload;
- ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
- ir_graph *irg = get_irn_irg(bl);
+ ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
+ ir_graph *irg = get_irn_irg(bl);
+ ir_node *frame = get_irg_frame(irg);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
- reload = be_new_Reload(cls, irg, bl, spill, mode);
+ reload = be_new_Reload(cls, cls_frame, irg, bl, frame, spill, mode);
- if(is_Block(insert)) {
+ if (is_Block(insert)) {
insert = sched_skip(insert, 0, sched_skip_cf_predicator, (void *) arch_env);
sched_add_after(insert, reload);
- }
-
- else
+ } else {
sched_add_before(insert, reload);
+ }
return reload;
}
{
const be_node_attr_t *a = get_irn_attr(irn);
- if(out_pos < a->max_reg_data) {
+ if(out_pos < ARR_LEN(a->reg_data)) {
memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0]));
if(be_is_Copy(irn)) {
req->type |= arch_register_req_type_should_be_same;
req->other_same = be_get_Copy_op(irn);
}
- }
- else {
+ } else {
req->type = arch_register_req_type_none;
req->cls = NULL;
}
{
const be_node_attr_t *a = get_irn_attr(irn);
- if(pos < get_irn_arity(irn) && pos < a->max_reg_data)
+ if(pos < get_irn_arity(irn) && pos < ARR_LEN(a->reg_data)) {
memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0]));
- else {
+ } else {
req->type = arch_register_req_type_none;
req->cls = NULL;
}
{
int out_pos = pos;
- if(pos < 0) {
- if(get_irn_mode(irn) == mode_T)
+ if (pos < 0) {
+ if (get_irn_mode(irn) == mode_T)
return NULL;
- out_pos = redir_proj((const ir_node **) &irn, pos);
+ out_pos = redir_proj((const ir_node **)&irn);
assert(is_be_node(irn));
return put_out_reg_req(req, irn, out_pos);
}
else {
- return is_be_node(irn) ? put_in_reg_req(req, irn, pos) : NULL;
+ if (is_be_node(irn)) {
+ /*
+ For spills and reloads, we return "none" as requirement for frame pointer,
+ so every input is ok. Some backends need this (e.g. STA). We use an arbitrary
+ large number as pos, so put_in_reg_req will return "none" as requirement.
+ */
+ if ((be_is_Spill(irn) && pos == be_pos_Spill_frame) ||
+ (be_is_Reload(irn) && pos == be_pos_Reload_frame))
+ return put_in_reg_req(req, irn, INT_MAX);
+ else
+ return put_in_reg_req(req, irn, pos);
+ }
+ return NULL;
}
return req;
static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn)
{
- redir_proj((const ir_node **) &irn, -1);
+ redir_proj((const ir_node **) &irn);
switch(be_get_irn_opcode(irn)) {
#define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b
const void *be_node_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn)
{
- redir_proj((const ir_node **) &irn, -1);
+ redir_proj((const ir_node **) &irn);
return is_be_node(irn) ? &be_node_irn_ops : NULL;
}
{
int i;
be_node_attr_t *a = get_irn_attr(irn);
+ int len = ARR_LEN(a->reg_data);
fprintf(f, "registers: \n");
- for(i = 0; i < a->max_reg_data; ++i) {
+ for(i = 0; i < len; ++i) {
be_reg_data_t *rd = &a->reg_data[i];
if(rd->reg)
fprintf(f, "#%d: %s\n", i, rd->reg->name);
}
fprintf(f, "in requirements\n");
- for(i = 0; i < a->max_reg_data; ++i) {
+ for(i = 0; i < len; ++i) {
dump_node_req(f, i, &a->reg_data[i].in_req);
}
fprintf(f, "\nout requirements\n");
- for(i = 0; i < a->max_reg_data; ++i) {
+ for(i = 0; i < len; ++i) {
dump_node_req(f, i, &a->reg_data[i].req);
}
}
{
be_node_attr_t *old_attr = get_irn_attr(old_node);
be_node_attr_t *new_attr = get_irn_attr(new_node);
- int i;
+ struct obstack *obst = get_irg_obstack(get_irn_irg(new_node));
+ int i, len;
assert(is_be_node(old_node));
assert(is_be_node(new_node));
memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
new_attr->reg_data = NULL;
- if(new_attr->max_reg_data > 0) {
- new_attr->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(get_irn_irg(new_node)), new_attr->max_reg_data);
- memcpy(new_attr->reg_data, old_attr->reg_data, new_attr->max_reg_data * sizeof(be_reg_data_t));
+ if(old_attr->reg_data != NULL)
+ len = ARR_LEN(old_attr->reg_data);
+ else
+ len = 0;
+
+ new_attr->reg_data = NEW_ARR_D(be_reg_data_t, obst, len);
+
+ if(len > 0) {
+ memcpy(new_attr->reg_data, old_attr->reg_data, len * sizeof(be_reg_data_t));
- for(i = 0; i < old_attr->max_reg_data; ++i) {
+ for(i = 0; i < len; ++i) {
be_req_t *r;
r = &new_attr->reg_data[i].req;