beo_RegParams,
beo_FrameAddr,
beo_Barrier,
- beo_Unwind,
/* last backend node number */
- beo_Last = beo_Unwind,
+ beo_Last = beo_Barrier,
/* first unfixed number. Dynamic node numbers start here */
iro_MaxOpcode
} ir_opcode;
}
-
-static unsigned TEMPLATE_get_n_reg_class(const void *self)
+static unsigned TEMPLATE_get_n_reg_class(void)
{
- (void) self;
return N_CLASSES;
}
-static const arch_register_class_t *TEMPLATE_get_reg_class(const void *self,
- unsigned i)
+static const arch_register_class_t *TEMPLATE_get_reg_class(unsigned i)
{
- (void) self;
assert(i < N_CLASSES);
return &TEMPLATE_reg_classes[i];
}
* @param mode The mode in question.
* @return A register class which can hold values of the given mode.
*/
-const arch_register_class_t *TEMPLATE_get_reg_class_for_mode(const void *self,
- const ir_mode *mode)
+const arch_register_class_t *TEMPLATE_get_reg_class_for_mode(const ir_mode *mode)
{
- (void) self;
if (mode_is_float(mode))
return &TEMPLATE_reg_classes[CLASS_TEMPLATE_floating_point];
else
/**
* Returns the necessary byte alignment for storing a register of given class.
*/
-static int TEMPLATE_get_reg_class_alignment(const void *self,
- const arch_register_class_t *cls) {
+static int TEMPLATE_get_reg_class_alignment(const arch_register_class_t *cls)
+{
ir_mode *mode = arch_register_class_mode(cls);
- (void) self;
return get_mode_size_bytes(mode);
}
}
static const be_execution_unit_t ***TEMPLATE_get_allowed_execution_units(
- const void *self, const ir_node *irn)
+ const ir_node *irn)
{
- (void) self;
(void) irn;
/* TODO */
assert(0);
return NULL;
}
-static asm_constraint_flags_t TEMPLATE_parse_asm_constraint(const void *self,
- const char **c)
+static asm_constraint_flags_t TEMPLATE_parse_asm_constraint(const char **c)
{
- (void) self;
(void) c;
return ASM_CONSTRAINT_FLAG_INVALID;
}
-static int TEMPLATE_is_valid_clobber(const void *self, const char *clobber)
+static int TEMPLATE_is_valid_clobber(const char *clobber)
{
- (void) self;
(void) clobber;
return 0;
}
if (be_get_Proj_for_pn(node, pn_Load_res) == NULL) {
/* add a result proj and a Keep to produce a pseudo use */
ir_node *proj = new_r_Proj(block, new_load, mode_Iu, pn_arm_Load_res);
- be_new_Keep(arch_get_irn_reg_class_out(proj), block, 1, &proj);
+ be_new_Keep(block, 1, &proj);
}
return new_load;
* here to speed up register allocation (and makes dumps
* smaller and more readable).
*/
-static unsigned arm_get_n_reg_class(const void *self) {
- (void) self;
+static unsigned arm_get_n_reg_class(void) {
return N_CLASSES;
}
/**
* Return the register class with requested index.
*/
-static const arch_register_class_t *arm_get_reg_class(const void *self,
- unsigned i) {
- (void) self;
+static const arch_register_class_t *arm_get_reg_class(unsigned i) {
assert(i < N_CLASSES);
return &arm_reg_classes[i];
}
* @param mode The mode in question.
* @return A register class which can hold values of the given mode.
*/
-const arch_register_class_t *arm_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
- (void) self;
+const arch_register_class_t *arm_get_reg_class_for_mode(const ir_mode *mode) {
if (mode_is_float(mode))
return &arm_reg_classes[CLASS_arm_fpa];
else
/**
* Returns the necessary byte alignment for storing a register of given class.
*/
-static int arm_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
- (void) self;
+static int arm_get_reg_class_alignment(const arch_register_class_t *cls)
+{
(void) cls;
/* ARM is a 32 bit CPU, no need for other alignment */
return 4;
}
-static const be_execution_unit_t ***arm_get_allowed_execution_units(const void *self, const ir_node *irn) {
- (void) self;
+static const be_execution_unit_t ***arm_get_allowed_execution_units(const ir_node *irn) {
(void) irn;
/* TODO */
panic("Unimplemented arm_get_allowed_execution_units()");
return 1;
}
-static asm_constraint_flags_t arm_parse_asm_constraint(const void *self, const char **c)
+static asm_constraint_flags_t arm_parse_asm_constraint(const char **c)
{
/* asm not supported */
- (void) self;
(void) c;
return ASM_CONSTRAINT_FLAG_INVALID;
}
-static int arm_is_valid_clobber(const void *self, const char *clobber)
+static int arm_is_valid_clobber(const char *clobber)
{
- (void) self;
(void) clobber;
return 0;
}
/* create the Keep for the caller save registers */
in = (ir_node **) obstack_finish(obst);
- keep = be_new_Keep(NULL, bl, n, in);
+ keep = be_new_Keep(bl, n, in);
for (i = 0; i < n; ++i) {
const arch_register_t *reg = get_irn_link(in[i]);
be_node_set_reg_class_in(keep, i, reg->reg_class);
if (curr_sp != env->init_sp &&
!(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
nodes[0] = curr_sp;
- keep = be_new_Keep(env->arch_env->sp->reg_class, bl, 1, nodes);
+ keep = be_new_Keep(bl, 1, nodes);
pmap_insert(env->keep_map, bl, keep);
}
}
obstack_free(&env->obst, in);
for (n = 0; n < n_regs; ++n) {
- ir_node *pred = rm[n].irn;
- const arch_register_t *reg = rm[n].reg;
- arch_register_type_t add_type = 0;
- ir_node *proj;
+ ir_node *pred = rm[n].irn;
+ const arch_register_t *reg = rm[n].reg;
+ arch_register_type_t add_type = 0;
+ ir_node *proj;
/* stupid workaround for now... as not all nodes report register
* requirements. */
struct arch_register_t {
const char *name; /**< The name of the register. */
const arch_register_class_t *reg_class; /**< The class the register belongs to. */
- unsigned index; /**< The index of the register in the class. */
- arch_register_type_t type; /**< The type of the register. */
+ unsigned index; /**< The index of the register in the class. */
+ arch_register_type_t type; /**< The type of the register. */
+ const arch_register_req_t *single_req;
};
static inline const arch_register_class_t *
return reg->name;
}
-#define arch_register_get_class(reg) _arch_register_get_class(reg)
-#define arch_register_get_index(reg) _arch_register_get_index(reg)
-#define arch_register_get_name(reg) _arch_register_get_name(reg)
+#define arch_register_get_class(reg) _arch_register_get_class(reg)
+#define arch_register_get_index(reg) _arch_register_get_index(reg)
+#define arch_register_get_name(reg) _arch_register_get_name(reg)
/**
* Convenience macro to check for register type.
* Like general purpose or floating point.
*/
struct arch_register_class_t {
- unsigned index; /**< index of this register class */
- const char *name; /**< The name of the register class.*/
- unsigned n_regs; /**< Number of registers in this
- class. */
- ir_mode *mode; /**< The mode of the register class.*/
- const arch_register_t *regs; /**< The array of registers. */
- arch_register_class_flags_t flags; /**< register class flags. */
+ unsigned index; /**< index of this register class */
+ const char *name; /**< The name of the register class.*/
+ unsigned n_regs; /**< Number of registers in this
+ class. */
+ ir_mode *mode; /**< The mode of the register class.*/
+ const arch_register_t *regs; /**< The array of registers. */
+ arch_register_class_flags_t flags; /**< register class flags. */
+ const arch_register_req_t *class_req;
};
/** return the number of registers in this register class */
* Get the the number of register classes in the isa.
* @return The number of register classes.
*/
- unsigned (*get_n_reg_class)(const void *self);
+ unsigned (*get_n_reg_class)(void);
/**
* Get the i-th register class.
* @param i The number of the register class.
* @return The register class.
*/
- const arch_register_class_t *(*get_reg_class)(const void *self, unsigned i);
+ const arch_register_class_t *(*get_reg_class)(unsigned i);
/**
* Get the register class which shall be used to store a value of a given mode.
* @param mode The mode in question.
* @return A register class which can hold values of the given mode.
*/
- const arch_register_class_t *(*get_reg_class_for_mode)(const void *self, const ir_mode *mode);
+ const arch_register_class_t *(*get_reg_class_for_mode)(const ir_mode *mode);
/**
* Get the ABI restrictions for procedure calls.
* @param cls The register class.
* @return The alignment in bytes.
*/
- int (*get_reg_class_alignment)(const void *self, const arch_register_class_t *cls);
+ int (*get_reg_class_alignment)(const arch_register_class_t *cls);
/**
* A "static" function, returns the frontend settings
* NULL
* };
*/
- const be_execution_unit_t ***(*get_allowed_execution_units)(const void *self, const ir_node *irn);
+ const be_execution_unit_t ***(*get_allowed_execution_units)(const ir_node *irn);
/**
* Return the abstract machine for this isa.
/**
* mark node as rematerialized
*/
- void (*mark_remat)(const void *self, ir_node *node);
+ void (*mark_remat)(ir_node *node);
/**
* parse an assembler constraint part and set flags according to its nature
* advances the *c pointer to point to the last parsed character (so if you
* parse a single character don't advance c)
*/
- asm_constraint_flags_t (*parse_asm_constraint)(const void *self, const char **c);
+ asm_constraint_flags_t (*parse_asm_constraint)(const char **c);
/**
* returns true if the string is a valid clobbered (register) in this
* backend
*/
- int (*is_valid_clobber)(const void *self, const char *clobber);
+ int (*is_valid_clobber)(const char *clobber);
};
#define arch_env_done(env) ((env)->impl->done(env))
#define arch_env_handle_intrinsics(env) \
do { if((env)->impl->handle_intrinsics != NULL) (env)->impl->handle_intrinsics(); } while(0)
-#define arch_env_get_n_reg_class(env) ((env)->impl->get_n_reg_class(env))
-#define arch_env_get_reg_class(env,i) ((env)->impl->get_reg_class(env, i))
-#define arch_env_get_reg_class_for_mode(env,mode) ((env)->impl->get_reg_class_for_mode((env), (mode)))
+#define arch_env_get_n_reg_class(env) ((env)->impl->get_n_reg_class())
+#define arch_env_get_reg_class(env,i) ((env)->impl->get_reg_class(i))
+#define arch_env_get_reg_class_for_mode(env,mode) ((env)->impl->get_reg_class_for_mode((mode)))
#define arch_env_get_call_abi(env,tp,abi) ((env)->impl->get_call_abi((env), (tp), (abi)))
#define arch_env_get_code_generator_if(env) ((env)->impl->get_code_generator_if((env)))
#define arch_env_get_list_sched_selector(env,selector) ((env)->impl->get_list_sched_selector((env), (selector)))
#define arch_env_get_ilp_sched_selector(env) ((env)->impl->get_ilp_sched_selector(env))
-#define arch_env_get_reg_class_alignment(env,cls) ((env)->impl->get_reg_class_alignment((env), (cls)))
+#define arch_env_get_reg_class_alignment(env,cls) ((env)->impl->get_reg_class_alignment((cls)))
#define arch_env_get_params(env) ((env)->impl->get_params())
-#define arch_env_get_allowed_execution_units(env,irn) ((env)->impl->get_allowed_execution_units((env), (irn)))
+#define arch_env_get_allowed_execution_units(env,irn) ((env)->impl->get_allowed_execution_units((irn)))
#define arch_env_get_machine(env) ((env)->impl->get_machine(env))
#define arch_env_get_backend_irg_list(env,irgs) ((env)->impl->get_backend_irg_list((env), (irgs)))
-#define arch_env_parse_asm_constraint(env,c) ((env)->impl->parse_asm_constraint((env), (c))
-#define arch_env_is_valid_clobber(env,clobber) ((env)->impl->is_valid_clobber((env), (clobber))
+#define arch_env_parse_asm_constraint(env,c) ((env)->impl->parse_asm_constraint((c))
+#define arch_env_is_valid_clobber(env,clobber) ((env)->impl->is_valid_clobber((clobber))
#define arch_env_mark_remat(env,node) \
- do { if ((env)->impl->mark_remat != NULL) (env)->impl->mark_remat((env), (node)); } while(0)
+ do { if ((env)->impl->mark_remat != NULL) (env)->impl->mark_remat((node)); } while(0)
/**
* ISA base class.
coalesce_blocks(&env);
start_entry = finish_block_schedule(&env);
- block_list = create_blocksched_array(&env, start_entry, env.blockcount, get_irg_obstack(irg));
+ block_list = create_blocksched_array(&env, start_entry, env.blockcount,
+ be_get_birg_obst(irg));
DEL_ARR_F(env.edges);
obstack_free(&obst, NULL);
coalesce_blocks_ilp(&env);
start_entry = finish_block_schedule(&env.env);
- block_list = create_blocksched_array(&env.env, start_entry, env.env.blockcount, get_irg_obstack(irg));
+ block_list = create_blocksched_array(&env.env, start_entry,
+ env.env.blockcount,
+ be_get_birg_obst(irg));
DEL_ARR_F(env.ilpedges);
free_lpp(env.lpp);
* since ignore-nodes are not Perm'ed.
*/
if (op->has_constraints && is_Proj(proj) && get_Proj_pred(proj) == perm) {
- be_set_constr_limited(perm, BE_OUT_POS(get_Proj_proj(proj)), op->req);
+ be_set_constr_out(perm, get_Proj_proj(proj), op->req);
}
}
if (is_Proj(node)) // Projs need no be info, their tuple holds all information
return;
- obst = get_irg_obstack(current_ir_graph);
- info = OALLOCZ(obst, backend_info_t);
+ obst = be_get_birg_obst(current_ir_graph);
+ info = OALLOCZ(obst, backend_info_t);
if (is_Phi(node)) {
info->out_infos = NEW_ARR_D(reg_out_info_t, obst, 1);
static void new_Phi_copy_attr(const ir_node *old_node, ir_node *new_node)
{
- struct obstack *obst = get_irg_obstack(get_irn_irg(new_node));
backend_info_t *old_info = be_get_info(old_node);
backend_info_t *new_info = be_get_info(new_node);
+ *new_info = *old_info;
+
old_phi_copy_attr(old_node, new_node);
- new_info->out_infos = DUP_ARR_D(reg_out_info_t, obst, old_info->out_infos);
}
-int be_info_equal(const ir_node *node1, const ir_node *node2)
+int be_infos_equal(const backend_info_t *info1, const backend_info_t *info2)
{
- backend_info_t *info1 = be_get_info(node1);
- backend_info_t *info2 = be_get_info(node2);
int len = ARR_LEN(info1->out_infos);
int i;
if (ARR_LEN(info2->out_infos) != len)
- return 0;
+ return false;
for (i = 0; i < len; ++i) {
const reg_out_info_t *out1 = &info1->out_infos[i];
const reg_out_info_t *out2 = &info2->out_infos[i];
if (out1->reg != out2->reg)
- return 0;
+ return false;
if (!reg_reqs_equal(out1->req, out2->req))
- return 0;
+ return false;
}
- /* TODO: in reqs */
+ return true;
+}
- return 1;
+int be_nodes_equal(const ir_node *node1, const ir_node *node2)
+{
+ backend_info_t *info1 = be_get_info(node1);
+ backend_info_t *info2 = be_get_info(node2);
+ return be_infos_equal(info1, info2);
}
static void init_walker(ir_node *node, void *data)
static inline backend_info_t *be_get_info(const ir_node *node)
{
- backend_info_t *info = node->backend_info;
- return info;
+ return (backend_info_t*) node->backend_info;
}
void be_info_init(void);
void be_info_duplicate(const ir_node *old_node, ir_node *new_node);
int be_info_initialized(const ir_graph *irg);
-int be_info_equal(const ir_node *node1, const ir_node *node2);
+int be_nodes_equal(const ir_node *node1, const ir_node *node2);
+int be_infos_equal(const backend_info_t *info1, const backend_info_t *info2);
#endif
be_liveness_free(birg->lv);
birg->lv = NULL;
}
+
+ obstack_free(&birg->obst, NULL);
+ birg->irg->be_data = NULL;
}
#include "be.h"
#include "be_types.h"
#include "be_t.h"
+#include "irtypes.h"
be_lv_t *be_assure_liveness(be_irg_t *birg);
ir_exec_freq *exec_freq;
be_dom_front_info_t *dom_front;
be_lv_t *lv;
+ struct obstack obst; /**< birg obstack (mainly used to keep
+ register constraints which we can't keep
+ in the irg obst, because it gets replace
+ during code selection) */
};
-static inline be_lv_t *
-be_get_birg_liveness(const be_irg_t *birg) {
+static inline be_lv_t *be_get_birg_liveness(const be_irg_t *birg)
+{
return birg->lv;
}
-static inline ir_exec_freq *
-be_get_birg_exec_freq(const be_irg_t *birg) {
+static inline ir_exec_freq *be_get_birg_exec_freq(const be_irg_t *birg)
+{
return birg->exec_freq;
}
-static inline be_dom_front_info_t *
-be_get_birg_dom_front(const be_irg_t *birg) {
+static inline be_dom_front_info_t *be_get_birg_dom_front(const be_irg_t *birg)
+{
return birg->dom_front;
}
-static inline ir_graph *
-be_get_birg_irg(const be_irg_t *birg) {
+static inline ir_graph *be_get_birg_irg(const be_irg_t *birg)
+{
return birg->irg;
}
-static inline const arch_env_t *
-be_get_birg_arch_env(const be_irg_t *birg) {
+static inline const arch_env_t *be_get_birg_arch_env(const be_irg_t *birg)
+{
return birg->main_env->arch_env;
}
+static inline be_irg_t *be_birg_from_irg(const ir_graph *irg)
+{
+ return (be_irg_t*) irg->be_data;
+}
+
+static inline struct obstack *be_get_birg_obst(const ir_graph *irg)
+{
+ be_irg_t *birg = be_birg_from_irg(irg);
+ return &birg->obst;
+}
+
#endif /* FIRM_BE_BEIRG_H */
in[0] = irn;
in[1] = cpy;
- keep = be_new_Keep(cls, block, 2, in);
+ keep = be_new_Keep(block, 2, in);
}
DB((dbg_constr, LEVEL_1, "created %+F(%+F, %+F)\n\n", keep, irn, cpy));
/* so we transform unnecessary ones into Keeps. */
foreach_ir_nodeset(&entry->copies, cp, iter) {
if (be_is_CopyKeep(cp) && get_irn_n_edges(cp) < 1) {
- const arch_register_class_t *cls = arch_get_irn_reg_class_out(cp);
- int n = get_irn_arity(cp);
- ir_node *keep;
+ int n = get_irn_arity(cp);
+ ir_node *keep;
- keep = be_new_Keep(cls, get_nodes_block(cp), n, get_irn_in(cp) + 1);
+ keep = be_new_Keep(get_nodes_block(cp), n, get_irn_in(cp) + 1);
sched_add_before(cp, keep);
/* Set all ins (including the block) of the CopyKeep BAD to keep the verifier happy. */
if (tflags != 0) {
flags |= tflags;
} else {
- flags |= isa_if->parse_asm_constraint(isa_if, &c);
+ flags |= isa_if->parse_asm_constraint(&c);
}
break;
}
if (strcmp(clobber, "cc") == 0)
return 1;
- return isa_if->is_valid_clobber(isa_if, clobber);
+ return isa_if->is_valid_clobber(clobber);
}
void be_register_isa_if(const char *name, const arch_isa_if_t *isa)
memset(birg, 0, sizeof(*birg));
birg->irg = irg;
birg->main_env = env;
+ obstack_init(&birg->obst);
edges_deactivate_kind(irg, EDGE_KIND_DEP);
edges_activate_kind(irg, EDGE_KIND_DEP);
/* First: initialize all birgs */
for(i = 0; i < num_birgs; ++i) {
ir_graph *irg = backend_irg_list ? backend_irg_list[i] : get_irp_irg(i);
+ irg->be_data = &birgs[i];
initialize_birg(&birgs[i], irg, &env);
}
arch_env_handle_intrinsics(arch_env);
#define get_irn_attr_const(irn) get_irn_generic_attr_const(irn)
typedef struct {
- arch_register_req_t req;
- arch_register_req_t in_req;
+ const arch_register_req_t *in_req;
} be_reg_data_t;
/** The generic be nodes attribute type. */
ir_op *op_be_RegParams;
ir_op *op_be_FrameAddr;
ir_op *op_be_Barrier;
-ir_op *op_be_Unwind;
static const ir_op_ops be_node_op_ops;
*
* @return zero if both attributes are identically
*/
-static int _node_cmp_attr(const be_node_attr_t *a, const be_node_attr_t *b) {
- int i, len = ARR_LEN(a->reg_data);
+static int node_cmp_attr(ir_node *a, ir_node *b)
+{
+ const be_node_attr_t *a_attr = get_irn_attr_const(a);
+ const be_node_attr_t *b_attr = get_irn_attr_const(b);
+ int i, len = ARR_LEN(a_attr->reg_data);
- if (len != ARR_LEN(b->reg_data))
+ if (len != ARR_LEN(b_attr->reg_data))
+ return 1;
+
+ if (!be_nodes_equal(a, b))
return 1;
for (i = len - 1; i >= 0; --i) {
- if (!reg_reqs_equal(&a->reg_data[i].in_req, &b->reg_data[i].in_req) ||
- !reg_reqs_equal(&a->reg_data[i].req, &b->reg_data[i].req))
+ if (!reg_reqs_equal(a_attr->reg_data[i].in_req,
+ b_attr->reg_data[i].in_req))
return 1;
}
return 0;
}
-/**
- * Compare the node attributes of two be_node's.
- *
- * @return zero if both nodes have identically attributes
- */
-static int node_cmp_attr(ir_node *a, ir_node *b) {
- const be_node_attr_t *a_attr = get_irn_attr_const(a);
- const be_node_attr_t *b_attr = get_irn_attr_const(b);
-
- if (_node_cmp_attr(a_attr, b_attr) != 0)
- return 1;
-
- return !be_info_equal(a, b);
-}
-
/**
* Compare the attributes of two be_FrameAddr nodes.
*
* @return zero if both nodes have identically attributes
*/
-static int FrameAddr_cmp_attr(ir_node *a, ir_node *b) {
+static int FrameAddr_cmp_attr(ir_node *a, ir_node *b)
+{
const be_frame_attr_t *a_attr = get_irn_attr_const(a);
const be_frame_attr_t *b_attr = get_irn_attr_const(b);
if (a_attr->ent != b_attr->ent || a_attr->offset != b_attr->offset)
return 1;
- return _node_cmp_attr(&a_attr->node_attr, &b_attr->node_attr);
+ return node_cmp_attr(a, b);
}
/**
*
* @return zero if both nodes have identically attributes
*/
-static int Return_cmp_attr(ir_node *a, ir_node *b) {
+static int Return_cmp_attr(ir_node *a, ir_node *b)
+{
const be_return_attr_t *a_attr = get_irn_attr_const(a);
const be_return_attr_t *b_attr = get_irn_attr_const(b);
if (a_attr->emit_pop != b_attr->emit_pop)
return 1;
- return _node_cmp_attr(&a_attr->node_attr, &b_attr->node_attr);
+ return node_cmp_attr(a, b);
}
/**
if (a_attr->offset != b_attr->offset)
return 1;
- return _node_cmp_attr(&a_attr->node_attr, &b_attr->node_attr);
+ return node_cmp_attr(a, b);
}
/**
*
* @return zero if both nodes have identically attributes
*/
-static int Call_cmp_attr(ir_node *a, ir_node *b) {
+static int Call_cmp_attr(ir_node *a, ir_node *b)
+{
const be_call_attr_t *a_attr = get_irn_attr_const(a);
const be_call_attr_t *b_attr = get_irn_attr_const(b);
a_attr->call_tp != b_attr->call_tp)
return 1;
- return _node_cmp_attr(&a_attr->node_attr, &b_attr->node_attr);
+ return node_cmp_attr(a, b);
}
-static inline arch_register_req_t *get_be_req(const ir_node *node, int pos)
+static arch_register_req_t *allocate_reg_req(const ir_node *node)
{
- int idx;
- const be_node_attr_t *attr;
- be_reg_data_t *rd;
+ ir_graph *irg = get_irn_irg(node);
+ struct obstack *obst = be_get_birg_obst(irg);
- assert(is_be_node(node));
- attr = get_irn_attr_const(node);
+ arch_register_req_t *req = obstack_alloc(obst, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ return req;
+}
- if (pos < 0) {
- idx = -(pos + 1);
- } else {
- idx = pos;
- assert(idx < get_irn_arity(node));
- }
- assert(idx < ARR_LEN(attr->reg_data));
- rd = &attr->reg_data[idx];
+void be_set_constr_in(ir_node *node, int pos, const arch_register_req_t *req)
+{
+ const be_node_attr_t *attr = get_irn_attr_const(node);
+ be_reg_data_t *rd = &attr->reg_data[pos];
+ assert(pos < ARR_LEN(attr->reg_data));
+ rd->in_req = req;
+}
- return pos < 0 ? &rd->req : &rd->in_req;
+void be_set_constr_out(ir_node *node, int pos, const arch_register_req_t *req)
+{
+ backend_info_t *info = be_get_info(node);
+ info->out_infos[pos].req = req;
}
/**
* Initializes the generic attribute of all be nodes and return it.
*/
-static void *init_node_attr(ir_node *node, int max_reg_data)
+static void *init_node_attr(ir_node *node, int n_inputs, int n_outputs)
{
- ir_graph *irg = get_irn_irg(node);
- struct obstack *obst = get_irg_obstack(irg);
- be_node_attr_t *a = get_irn_attr(node);
+ ir_graph *irg = get_irn_irg(node);
+ struct obstack *obst = be_get_birg_obst(irg);
+ be_node_attr_t *a = get_irn_attr(node);
+ backend_info_t *info = be_get_info(node);
memset(a, 0, sizeof(get_op_attr_size(get_irn_op(node))));
- if(max_reg_data >= 0) {
- backend_info_t *info = be_get_info(node);
- info->out_infos = NEW_ARR_D(reg_out_info_t, obst, max_reg_data);
- memset(info->out_infos, 0, max_reg_data * sizeof(info->out_infos[0]));
+ if (n_inputs >= 0) {
+ int i;
+ a->reg_data = NEW_ARR_D(be_reg_data_t, obst, n_inputs);
+ for (i = 0; i < n_inputs; ++i) {
+ a->reg_data[i].in_req = arch_no_register_req;
+ }
+ } else {
+ a->reg_data = NEW_ARR_F(be_reg_data_t, 0);
+ }
- a->reg_data = NEW_ARR_D(be_reg_data_t, obst, max_reg_data);
- memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0]));
+ if (n_outputs >= 0) {
+ int i;
+ info->out_infos = NEW_ARR_D(reg_out_info_t, obst, n_outputs);
+ memset(info->out_infos, 0, n_outputs * sizeof(info->out_infos[0]));
+ for (i = 0; i < n_outputs; ++i) {
+ info->out_infos[i].req = arch_no_register_req;
+ }
} else {
- backend_info_t *info = be_get_info(node);
info->out_infos = NEW_ARR_F(reg_out_info_t, 0);
-
- a->reg_data = NEW_ARR_F(be_reg_data_t, 0);
}
return a;
}
-static void add_register_req(ir_node *node)
+static void add_register_req_out(ir_node *node)
{
backend_info_t *info = be_get_info(node);
+ reg_out_info_t out_info;
+ memset(&out_info, 0, sizeof(out_info));
+ out_info.req = arch_no_register_req;
+ ARR_APP1(reg_out_info_t, info->out_infos, out_info);
+}
+
+static void add_register_req_in(ir_node *node)
+{
be_node_attr_t *a = get_irn_attr(node);
be_reg_data_t regreq;
- reg_out_info_t out_info;
memset(®req, 0, sizeof(regreq));
- memset(&out_info, 0, sizeof(out_info));
+ regreq.in_req = arch_no_register_req;
ARR_APP1(be_reg_data_t, a->reg_data, regreq);
- ARR_APP1(reg_out_info_t, info->out_infos, out_info);
}
-ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
- ir_node *bl, ir_node *frame, ir_node *to_spill)
+ir_node *be_new_Spill(const arch_register_class_t *cls,
+ const arch_register_class_t *cls_frame, ir_node *bl,
+ ir_node *frame, ir_node *to_spill)
{
be_frame_attr_t *a;
ir_node *in[2];
in[0] = frame;
in[1] = to_spill;
res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in);
- a = init_node_attr(res, 2);
+ a = init_node_attr(res, 2, 1);
a->ent = NULL;
a->offset = 0;
* Matze: we should investigate if this is really needed, this solution
* looks very hacky to me
*/
- be_node_set_reg_class_in(res, be_pos_Spill_frame, NULL);
+ be_set_constr_in(res, be_pos_Spill_frame, arch_no_register_req);
return res;
}
in[1] = mem;
res = new_ir_node(NULL, irg, block, op_be_Reload, mode, 2, in);
- init_node_attr(res, 2);
+ init_node_attr(res, 2, 2);
be_node_set_reg_class_out(res, 0, cls);
be_node_set_reg_class_in(res, be_pos_Reload_frame, cls_frame);
arch_irn_set_flags(res, arch_irn_flags_rematerializable);
* Matze: we should investigate if this is really needed, this solution
* looks very hacky to me
*/
- be_node_set_reg_class_in(res, be_pos_Reload_frame, NULL);
+ be_set_constr_in(res, be_pos_Reload_frame, arch_no_register_req);
return res;
}
return get_irn_n(irn, be_pos_Spill_frame);
}
-ir_node *be_new_Perm(const arch_register_class_t *cls, ir_node *block, int n, ir_node *in[])
+ir_node *be_new_Perm(const arch_register_class_t *cls, ir_node *block,
+ int n, ir_node *in[])
{
int i;
ir_graph *irg = get_Block_irg(block);
ir_node *irn = new_ir_node(NULL, irg, block, op_be_Perm, mode_T, n, in);
- init_node_attr(irn, n);
+ init_node_attr(irn, n, n);
for (i = 0; i < n; ++i) {
be_node_set_reg_class_in(irn, i, cls);
be_node_set_reg_class_out(irn, i, cls);
irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n+1, real_in);
- init_node_attr(irn, n + 1);
+ init_node_attr(irn, n + 1, n + 1);
be_node_set_reg_class_in(irn, 0, sp->reg_class);
for (i = 0; i < n; ++i) {
be_node_set_reg_class_in(irn, i + 1, cls_frame);
return irn;
}
-
ir_node *be_new_Copy(const arch_register_class_t *cls, ir_node *bl, ir_node *op)
{
ir_node *in[1];
in[0] = op;
res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), 1, in);
- init_node_attr(res, 1);
+ init_node_attr(res, 1, 1);
be_node_set_reg_class_in(res, 0, cls);
be_node_set_reg_class_out(res, 0, cls);
- req = get_be_req(res, BE_OUT_POS(0));
- req->cls = cls;
- req->type = arch_register_req_type_should_be_same;
+ req = allocate_reg_req(res);
+ req->cls = cls;
+ req->type = arch_register_req_type_should_be_same;
req->other_same = 1U << 0;
+ be_set_constr_out(res, 0, req);
return res;
}
set_irn_n(cpy, be_pos_Copy_op, op);
}
-ir_node *be_new_Keep(const arch_register_class_t *cls, ir_node *bl, int n, ir_node *in[])
+ir_node *be_new_Keep(ir_node *block, int n, ir_node *in[])
{
int i;
ir_node *res;
- ir_graph *irg = get_Block_irg(bl);
+ ir_graph *irg = get_Block_irg(block);
- res = new_ir_node(NULL, irg, bl, op_be_Keep, mode_ANY, -1, NULL);
- init_node_attr(res, -1);
+ res = new_ir_node(NULL, irg, block, op_be_Keep, mode_ANY, -1, NULL);
+ init_node_attr(res, -1, 1);
- for(i = 0; i < n; ++i) {
+ for (i = 0; i < n; ++i) {
add_irn_n(res, in[i]);
- add_register_req(res);
- be_node_set_reg_class_in(res, i, cls);
+ add_register_req_in(res);
}
keep_alive(res);
assert(be_is_Keep(keep));
n = add_irn_n(keep, node);
- add_register_req(keep);
+ add_register_req_in(keep);
be_node_set_reg_class_in(keep, n, cls);
}
/* creates a be_Call */
-ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr,
- int n_outs, int n, ir_node *in[], ir_type *call_tp)
+ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem,
+ ir_node *sp, ir_node *ptr, int n_outs, int n, ir_node *in[],
+ ir_type *call_tp)
{
be_call_attr_t *a;
int real_n = be_pos_Call_first_arg + n;
memcpy(&real_in[be_pos_Call_first_arg], in, n * sizeof(in[0]));
irn = new_ir_node(dbg, irg, bl, op_be_Call, mode_T, real_n, real_in);
- a = init_node_attr(irn, (n_outs > real_n ? n_outs : real_n));
+ a = init_node_attr(irn, real_n, n_outs);
a->ent = NULL;
a->call_tp = call_tp;
a->pop = 0;
int i;
res = new_ir_node(dbg, irg, block, op_be_Return, mode_X, -1, NULL);
- init_node_attr(res, -1);
- for(i = 0; i < n; ++i) {
+ init_node_attr(res, -1, 1);
+ for (i = 0; i < n; ++i) {
add_irn_n(res, in[i]);
- add_register_req(res);
+ add_register_req_in(res);
}
+ be_set_constr_out(res, 0, arch_no_register_req);
a = get_irn_attr(res);
a->num_ret_vals = n_res;
int pos;
pos = add_irn_n(ret, node);
- add_register_req(ret);
+ add_register_req_in(ret);
return pos;
}
in[0] = old_sp;
irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode,
sizeof(in) / sizeof(in[0]), in);
- a = init_node_attr(irn, 1);
+ a = init_node_attr(irn, 1, 1);
a->offset = offset;
a->align = align;
return irn;
}
-ir_node *be_new_AddSP(const arch_register_t *sp, ir_node *bl, ir_node *old_sp, ir_node *sz)
+ir_node *be_new_AddSP(const arch_register_t *sp, ir_node *bl, ir_node *old_sp,
+ ir_node *sz)
{
be_node_attr_t *a;
ir_node *irn;
irg = get_Block_irg(bl);
irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, be_pos_AddSP_last, in);
- a = init_node_attr(irn, be_pos_AddSP_last);
+ a = init_node_attr(irn, be_pos_AddSP_last, pn_be_AddSP_last);
/* Set output constraint to stack register. */
be_set_constr_single_reg_in(irn, be_pos_AddSP_old_sp, sp, 0);
irg = get_Block_irg(bl);
irn = new_ir_node(NULL, irg, bl, op_be_SubSP, mode_T, be_pos_SubSP_last, in);
- a = init_node_attr(irn, be_pos_SubSP_last);
+ a = init_node_attr(irn, be_pos_SubSP_last, pn_be_SubSP_last);
/* Set output constraint to stack register. */
be_set_constr_single_reg_in(irn, be_pos_SubSP_old_sp, sp, 0);
ir_graph *irg = get_Block_irg(bl);
res = new_ir_node(NULL, irg, bl, op_be_RegParams, mode_T, 0, NULL);
- init_node_attr(res, -1);
- for(i = 0; i < n_outs; ++i)
- add_register_req(res);
+ init_node_attr(res, 0, -1);
+ for (i = 0; i < n_outs; ++i) {
+ add_register_req_out(res);
+ }
return res;
}
in[0] = frame;
irn = new_ir_node(NULL, irg, bl, op_be_FrameAddr, get_irn_mode(frame), 1, in);
- a = init_node_attr(irn, 1);
+ a = init_node_attr(irn, 1, 1);
a->ent = ent;
a->offset = 0;
be_node_set_reg_class_in(irn, 0, cls_frame);
in[0] = src;
memcpy(&in[1], in_keep, n * sizeof(in[0]));
irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, n + 1, in);
- init_node_attr(irn, n + 1);
+ init_node_attr(irn, n + 1, 1);
be_node_set_reg_class_in(irn, 0, cls);
be_node_set_reg_class_out(irn, 0, cls);
ir_graph *irg = get_Block_irg(bl);
res = new_ir_node(NULL, irg, bl, op_be_Barrier, mode_T, -1, NULL);
- init_node_attr(res, -1);
- for(i = 0; i < n; ++i) {
+ init_node_attr(res, -1, -1);
+ for (i = 0; i < n; ++i) {
add_irn_n(res, in[i]);
- add_register_req(res);
+ add_register_req_in(res);
+ add_register_req_out(res);
}
return res;
int n = add_irn_n(barrier, node);
ir_node *proj = new_r_Proj(block, barrier, mode, n);
- add_register_req(barrier);
+ add_register_req_in(barrier);
+ add_register_req_out(barrier);
return proj;
}
-/* Construct a new be_Unwind. */
-ir_node *be_new_Unwind(dbg_info *dbg, ir_node *block,
- ir_node *mem, ir_node *sp)
-{
- ir_node *res;
- ir_node *in[2];
- ir_graph *irg = get_Block_irg(block);
-
- in[be_pos_Unwind_mem] = mem;
- in[be_pos_Unwind_sp] = sp;
- res = new_ir_node(dbg, irg, block, op_be_Unwind, mode_X, 2, in);
- init_node_attr(res, -1);
-
- return res;
-}
-
int be_has_frame_entity(const ir_node *irn)
{
switch (get_irn_opcode(irn)) {
return get_irn_arity(irn) - 1;
}
-static void set_req_single(struct obstack *obst, arch_register_req_t *req,
+static const arch_register_req_t *get_single_req(struct obstack *obst,
const arch_register_t *reg, arch_register_req_type_t additional_types)
{
+ arch_register_req_t *req = obstack_alloc(obst, sizeof(*req));
const arch_register_class_t *cls = arch_register_get_class(reg);
unsigned *limited_bitset;
limited_bitset = rbitset_obstack_alloc(obst, arch_register_class_n_regs(cls));
rbitset_set(limited_bitset, arch_register_get_index(reg));
- req->cls = cls;
- req->type |= arch_register_req_type_limited | additional_types;
+ req->type = arch_register_req_type_limited | additional_types;
+ req->cls = cls;
req->limited = limited_bitset;
-
+ return req;
}
void be_set_constr_single_reg_in(ir_node *node, int pos,
const arch_register_t *reg, arch_register_req_type_t additional_types)
{
- arch_register_req_t *req = get_be_req(node, pos);
- ir_graph *irg = get_irn_irg(node);
- struct obstack *obst = get_irg_obstack(irg);
+ const arch_register_req_t *req;
- set_req_single(obst, req, reg, additional_types);
+ if (additional_types == 0) {
+ req = reg->single_req;
+ } else {
+ ir_graph *irg = get_irn_irg(node);
+ struct obstack *obst = be_get_birg_obst(irg);
+ req = get_single_req(obst, reg, additional_types);
+ }
+ be_set_constr_in(node, pos, req);
}
void be_set_constr_single_reg_out(ir_node *node, int pos,
const arch_register_t *reg, arch_register_req_type_t additional_types)
{
- arch_register_req_t *req = get_be_req(node, BE_OUT_POS(pos));
- ir_graph *irg = get_irn_irg(node);
- struct obstack *obst = get_irg_obstack(irg);
+ const arch_register_req_t *req;
/* if we have an ignore register, add ignore flag and just assign it */
if (reg->type & arch_register_type_ignore) {
additional_types |= arch_register_req_type_ignore;
}
+ if (additional_types == 0) {
+ req = reg->single_req;
+ } else {
+ ir_graph *irg = get_irn_irg(node);
+ struct obstack *obst = be_get_birg_obst(irg);
+ req = get_single_req(obst, reg, additional_types);
+ }
+
arch_irn_set_register(node, pos, reg);
- set_req_single(obst, req, reg, additional_types);
+ be_set_constr_out(node, pos, req);
}
-void be_set_constr_limited(ir_node *node, int pos, const arch_register_req_t *req)
+void be_node_set_reg_class_in(ir_node *irn, int pos,
+ const arch_register_class_t *cls)
{
- ir_graph *irg = get_irn_irg(node);
- struct obstack *obst = get_irg_obstack(irg);
- arch_register_req_t *r = get_be_req(node, pos);
-
- assert(arch_register_req_is(req, limited));
- assert(!(req->type & (arch_register_req_type_should_be_same | arch_register_req_type_must_be_different)));
- memcpy(r, req, sizeof(r[0]));
- r->limited = rbitset_duplicate_obstack_alloc(obst, req->limited, req->cls->n_regs);
+ be_set_constr_in(irn, pos, cls->class_req);
}
-void be_node_set_reg_class_in(ir_node *irn, int pos, const arch_register_class_t *cls)
+void be_node_set_reg_class_out(ir_node *irn, int pos,
+ const arch_register_class_t *cls)
{
- arch_register_req_t *req = get_be_req(irn, pos);
-
- req->cls = cls;
-
- if (cls == NULL) {
- req->type = arch_register_req_type_none;
- } else if (req->type == arch_register_req_type_none) {
- req->type = arch_register_req_type_normal;
- }
-}
-
-void be_node_set_reg_class_out(ir_node *irn, int pos, const arch_register_class_t *cls)
-{
- arch_register_req_t *req = get_be_req(irn, BE_OUT_POS(pos));
-
- req->cls = cls;
-
- if (cls == NULL) {
- req->type = arch_register_req_type_none;
- } else if (req->type == arch_register_req_type_none) {
- req->type = arch_register_req_type_normal;
- }
+ be_set_constr_out(irn, pos, cls->class_req);
}
ir_node *be_get_IncSP_pred(ir_node *irn) {
static const arch_register_req_t *be_node_get_out_reg_req(
const ir_node *irn, int pos)
{
- const be_node_attr_t *a = get_irn_attr_const(irn);
-
- assert(pos >= 0);
- if (pos >= ARR_LEN(a->reg_data)) {
- return arch_no_register_req;
- }
-
- return &a->reg_data[pos].req;
+ const backend_info_t *info = be_get_info(irn);
+ assert(pos < ARR_LEN(info->out_infos));
+ return info->out_infos[pos].req;
}
static const arch_register_req_t *be_node_get_in_reg_req(
if (pos >= get_irn_arity(irn) || pos >= ARR_LEN(a->reg_data))
return arch_no_register_req;
- return &a->reg_data[pos].in_req;
+ return a->reg_data[pos].in_req;
}
static arch_irn_class_t be_node_classify(const ir_node *irn)
{
be_frame_attr_t *a;
- if(!be_has_frame_entity(irn))
+ if (!be_has_frame_entity(irn))
return;
a = get_irn_attr(irn);
static int be_node_get_sp_bias(const ir_node *irn)
{
- if(be_is_IncSP(irn))
+ if (be_is_IncSP(irn))
return be_get_IncSP_offset(irn);
- if(be_is_Call(irn))
+ if (be_is_Call(irn))
return -(int)be_Call_get_pop(irn);
return 0;
ir_node *op;
int i;
- if(*visited && pset_find_ptr(*visited, phi))
+ if (*visited && pset_find_ptr(*visited, phi))
return NULL;
- for(i = 0; i < n; ++i) {
+ for (i = 0; i < n; ++i) {
op = get_irn_n(phi, i);
/* Matze: don't we unnecessary constraint our phis with this?
* we only need to take the regclass IMO*/
- if(!is_Phi(op))
+ if (!is_Phi(op))
return arch_get_register_req_out(op);
}
* The operands of that Phi were all Phis themselves.
* We have to start a DFS for a non-Phi argument now.
*/
- if(!*visited)
+ if (!*visited)
*visited = pset_new_ptr(16);
pset_insert_ptr(*visited, phi);
- for(i = 0; i < n; ++i) {
+ for (i = 0; i < n; ++i) {
const arch_register_req_t *req;
op = get_irn_n(phi, i);
req = get_Phi_reg_req_recursive(op, visited);
- if(req != NULL)
+ if (req != NULL)
return req;
}
if (!mode_is_datab(get_irn_mode(node))) {
req = arch_no_register_req;
} else {
- pset *visited = NULL;
- ir_graph *irg = get_irn_irg(node);
- struct obstack *obst = get_irg_obstack(irg);
+ pset *visited = NULL;
req = get_Phi_reg_req_recursive(node, &visited);
assert(req->cls != NULL);
-
- if (req->type != arch_register_req_type_normal) {
- arch_register_req_t *nreq = OALLOCZ(obst, arch_register_req_t);
- nreq->type = arch_register_req_type_normal;
- nreq->cls = req->cls;
- req = nreq;
- }
+ req = req->cls->class_req;
if (visited != NULL)
del_pset(visited);
int i;
be_node_attr_t *a = get_irn_attr(node);
int len = ARR_LEN(a->reg_data);
+ const backend_info_t *info = be_get_info(node);
for (i = 0; i < len; ++i) {
- const arch_register_req_t *req = &a->reg_data[i].in_req;
+ const arch_register_req_t *req = a->reg_data[i].in_req;
if (req->cls == NULL)
continue;
fprintf(F, "inreq #%d = ", i);
}
for (i = 0; i < len; ++i) {
- const arch_register_req_t *req = &a->reg_data[i].req;
+ const arch_register_req_t *req = info->out_infos[i].req;
if (req->cls == NULL)
continue;
fprintf(F, "outreq #%d = ", i);
fputs(get_op_name(get_irn_op(irn)), f);
break;
case dump_node_mode_txt:
- if(be_is_Perm(irn) || be_is_Copy(irn) || be_is_CopyKeep(irn)) {
+ if (be_is_Perm(irn) || be_is_Copy(irn) || be_is_CopyKeep(irn)) {
fprintf(f, " %s", get_mode_name(get_irn_mode(irn)));
}
break;
case dump_node_nodeattr_txt:
- if(be_is_Call(irn)) {
+ if (be_is_Call(irn)) {
be_call_attr_t *a = (be_call_attr_t *) at;
if (a->ent)
fprintf(f, " [%s] ", get_entity_name(a->ent));
}
- if(be_is_IncSP(irn)) {
+ if (be_is_IncSP(irn)) {
const be_incsp_attr_t *attr = get_irn_generic_attr_const(irn);
- if(attr->offset == BE_STACK_FRAME_SIZE_EXPAND) {
+ if (attr->offset == BE_STACK_FRAME_SIZE_EXPAND) {
fprintf(f, " [Setup Stackframe] ");
- } else if(attr->offset == BE_STACK_FRAME_SIZE_SHRINK) {
+ } else if (attr->offset == BE_STACK_FRAME_SIZE_SHRINK) {
fprintf(f, " [Destroy Stackframe] ");
} else {
fprintf(f, " [%d] ", attr->offset);
case dump_node_info_txt:
dump_node_reqs(f, irn);
- if(be_has_frame_entity(irn)) {
+ if (be_has_frame_entity(irn)) {
be_frame_attr_t *a = (be_frame_attr_t *) at;
if (a->ent) {
unsigned size = get_type_size_bytes(get_entity_type(a->ent));
be_incsp_attr_t *a = (be_incsp_attr_t *) at;
if (a->offset == BE_STACK_FRAME_SIZE_EXPAND)
fprintf(f, "offset: FRAME_SIZE\n");
- else if(a->offset == BE_STACK_FRAME_SIZE_SHRINK)
+ else if (a->offset == BE_STACK_FRAME_SIZE_SHRINK)
fprintf(f, "offset: -FRAME SIZE\n");
else
fprintf(f, "offset: %u\n", a->offset);
case beo_MemPerm:
{
int i;
- for(i = 0; i < be_get_MemPerm_entity_arity(irn); ++i) {
+ for (i = 0; i < be_get_MemPerm_entity_arity(irn); ++i) {
ir_entity *in, *out;
in = be_get_MemPerm_in_entity(irn, i);
out = be_get_MemPerm_out_entity(irn, i);
- if(in) {
+ if (in) {
fprintf(f, "\nin[%d]: %s\n", i, get_entity_name(in));
}
- if(out) {
+ if (out) {
fprintf(f, "\nout[%d]: %s\n", i, get_entity_name(out));
}
}
{
const be_node_attr_t *old_attr = get_irn_attr_const(old_node);
be_node_attr_t *new_attr = get_irn_attr(new_node);
- struct obstack *obst = get_irg_obstack(get_irn_irg(new_node));
+ ir_graph *irg = get_irn_irg(new_node);
+ struct obstack *obst = be_get_birg_obst(irg);
backend_info_t *old_info = be_get_info(old_node);
backend_info_t *new_info = be_get_info(new_node);
- unsigned i, len;
assert(is_be_node(old_node));
assert(is_be_node(new_node));
memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
- new_attr->reg_data = NULL;
-
- if(old_attr->reg_data != NULL)
- len = ARR_LEN(old_attr->reg_data);
- else
- len = 0;
- if(get_irn_op(old_node)->opar == oparity_dynamic
- || be_is_RegParams(old_node)) {
- new_attr->reg_data = NEW_ARR_F(be_reg_data_t, len);
- new_info->out_infos = NEW_ARR_F(reg_out_info_t, len);
+ if (old_info->out_infos != NULL) {
+ unsigned n_outs = ARR_LEN(old_info->out_infos);
+ /* need dyanmic out infos? */
+ if (be_is_RegParams(new_node) || be_is_Barrier(new_node)
+ || be_is_Perm(new_node)) {
+ new_info->out_infos = NEW_ARR_F(reg_out_info_t, n_outs);
+ } else {
+ new_info->out_infos = NEW_ARR_D(reg_out_info_t, obst, n_outs);
+ }
+ memcpy(new_info->out_infos, old_info->out_infos,
+ n_outs * sizeof(new_info->out_infos[0]));
} else {
- new_attr->reg_data = NEW_ARR_D(be_reg_data_t, obst, len);
- new_info->out_infos = NEW_ARR_D(reg_out_info_t, obst, len);
+ new_info->out_infos = NULL;
}
- if(len > 0) {
- memcpy(new_attr->reg_data, old_attr->reg_data, len * sizeof(be_reg_data_t));
- memcpy(new_info->out_infos, old_info->out_infos, len * sizeof(new_info->out_infos[0]));
- for(i = 0; i < len; ++i) {
- const be_reg_data_t *rd = &old_attr->reg_data[i];
- be_reg_data_t *newrd = &new_attr->reg_data[i];
- if (arch_register_req_is(&rd->req, limited)) {
- const arch_register_req_t *req = &rd->req;
- arch_register_req_t *new_req = &newrd->req;
- new_req->limited
- = rbitset_duplicate_obstack_alloc(obst, req->limited, req->cls->n_regs);
- }
- if(arch_register_req_is(&rd->in_req, limited)) {
- const arch_register_req_t *req = &rd->in_req;
- arch_register_req_t *new_req = &newrd->in_req;
- new_req->limited
- = rbitset_duplicate_obstack_alloc(obst, req->limited, req->cls->n_regs);
- }
+ /* input infos */
+ if (old_attr->reg_data != NULL) {
+ unsigned n_ins = ARR_LEN(old_attr->reg_data);
+ /* need dynamic in infos? */
+ if (get_irn_op(old_node)->opar == oparity_dynamic) {
+ new_attr->reg_data = NEW_ARR_F(be_reg_data_t, n_ins);
+ } else {
+ new_attr->reg_data = NEW_ARR_D(be_reg_data_t, obst, n_ins);
}
+ memcpy(new_attr->reg_data, old_attr->reg_data,
+ n_ins * sizeof(be_reg_data_t));
+ } else {
+ new_attr->reg_data = NULL;
}
}
op_be_RegParams = new_ir_op(beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_FrameAddr = new_ir_op(beo_FrameAddr, "be_FrameAddr", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_Barrier = new_ir_op(beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_Unwind = new_ir_op(beo_Unwind, "be_Unwind", op_pin_state_pinned, X, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_Spill->ops.node_cmp_attr = FrameAddr_cmp_attr;
op_be_Reload->ops.node_cmp_attr = FrameAddr_cmp_attr;
op_be_RegParams->ops.node_cmp_attr = node_cmp_attr;
op_be_FrameAddr->ops.node_cmp_attr = FrameAddr_cmp_attr;
op_be_Barrier->ops.node_cmp_attr = node_cmp_attr;
- op_be_Unwind->ops.node_cmp_attr = node_cmp_attr;
/* attach out dummy_ops to middle end nodes */
for (opc = iro_First; opc <= iro_Last; ++opc) {
extern ir_op *op_be_RegParams;
extern ir_op *op_be_FrameAddr;
extern ir_op *op_be_Barrier;
-extern ir_op *op_be_Unwind;
/**
* A "symbolic constant" for the size of the stack frame to use with IncSP nodes.
* Create a new MemPerm node.
*/
ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_node *bl, int n, ir_node *in[]);
-ir_node *be_new_Keep(const arch_register_class_t *cls, ir_node *bl, int arity, ir_node *in[]);
+ir_node *be_new_Keep(ir_node *block, int arity, ir_node *in[]);
void be_Keep_add_node(ir_node *keep, const arch_register_class_t *cls, ir_node *node);
ir_node *be_get_CopyKeep_op(const ir_node *cpy);
void be_set_CopyKeep_op(ir_node *cpy, ir_node *op);
-/**
- * Position numbers for the be_Unwind inputs.
- */
-enum {
- be_pos_Unwind_mem = 0, /**< memory input of a be_Unwind node */
- be_pos_Unwind_sp = 1, /**< stack pointer input of a be_Unwind node */
-};
-
-/**
- * Construct a new be_Unwind.
- *
- * @param dbg debug info
- * @param bl the block where the new node will be placed
- * @param mem the memory input
- * @param sp the stack pointer input
- */
-ir_node *be_new_Unwind(dbg_info *dbg, ir_node *bl, ir_node *mem, ir_node *sp);
-
-ir_node *be_get_Unwind_mem(const ir_node *irn);
-ir_node *be_get_Unwind_sp(const ir_node *irn);
-
/**
* Get the backend opcode of a backend node.
* @param irn The node.
* @param pos The position (@see be_set_constr_single_reg()).
* @param req The register requirements which shall be transferred.
*/
-void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req);
+void be_set_constr_in(ir_node *irn, int pos, const arch_register_req_t *req);
+void be_set_constr_out(ir_node *irn, int pos, const arch_register_req_t *req);
/**
* Set the register class of a node.
static inline int be_is_RegParams(const ir_node *irn) { return get_irn_opcode(irn) == beo_RegParams; }
static inline int be_is_FrameAddr(const ir_node *irn) { return get_irn_opcode(irn) == beo_FrameAddr; }
static inline int be_is_Barrier (const ir_node *irn) { return get_irn_opcode(irn) == beo_Barrier ; }
-static inline int be_is_Unwind (const ir_node *irn) { return get_irn_opcode(irn) == beo_Unwind ; }
#endif /* FIRM_BE_BENODE_T_H */
* @author Matthias Braun
* @version $Id$
*/
-
#ifndef BEPEEPHOLE_H
#define BEPEEPHOLE_H
ir_node *macroblock = get_Block_MacroBlock(node);
ir_node *block;
- /* we use the old blocks for now, because jumps allow cycles in the graph
- * we have to fix this later */
block = new_ir_node(dbgi, irg, NULL, get_irn_op(node), get_irn_mode(node),
get_irn_arity(node), get_irn_in(node) + 1);
copy_node_attr(node, block);
*/
pset *be_empty_set(void);
-/** Undefine this to disable debugging mode. */
-#define BE_DEBUG 1
-
/**
* Convenient block getter.
* Works also, if the given node is a block.
return is_Block(irn) ? irn : get_nodes_block(irn);
}
-static inline int is_firm_be_mode(const ir_mode *mode)
-{
- return mode_is_data(mode);
-}
-
/**
* Check, if a node produces or consumes a data value.
* If it does, it is significant for scheduling and register allocation.
int i, n;
/* If the node produces a data value, return immediately. */
- if (is_firm_be_mode(get_irn_mode(irn)))
+ if (mode_is_data(get_irn_mode(irn)))
return 1;
/* else check, if it takes a data value, if that is so, return */
for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
- if (is_firm_be_mode(get_irn_mode(op)))
+ if (mode_is_data(get_irn_mode(op)))
return 1;
}
if (get_irn_mode(node) == mode_M)
return 0;
break;
+ case beo_Return:
+ return 1;
default:
break;
}
*/
static void ia32_prepare_graph(void *self)
{
- ia32_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
+ ia32_code_gen_t *cg = self;
switch (be_transformer) {
case TRANSFORMER_DEFAULT:
}
in[0] = sp;
- keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], block, 1, in);
+ keep = be_new_Keep(block, 1, in);
sched_add_before(node, keep);
/* exchange memprojs */
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
- ia32_gen_routine(cg, irg);
+ if (ia32_cg_config.emit_machcode) {
+ ia32_gen_binary_routine(cg, irg);
+ } else {
+ ia32_gen_routine(cg, irg);
+ }
cur_reg_set = NULL;
* - the virtual floating point registers
* - the SSE vector register set
*/
-static unsigned ia32_get_n_reg_class(const void *self)
+static unsigned ia32_get_n_reg_class(void)
{
- (void) self;
return N_CLASSES;
}
/**
* Return the register class for index i.
*/
-static const arch_register_class_t *ia32_get_reg_class(const void *self,
- unsigned i)
+static const arch_register_class_t *ia32_get_reg_class(unsigned i)
{
- (void) self;
assert(i < N_CLASSES);
return &ia32_reg_classes[i];
}
* @param mode The mode in question.
* @return A register class which can hold values of the given mode.
*/
-const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self,
- const ir_mode *mode)
+const arch_register_class_t *ia32_get_reg_class_for_mode(const ir_mode *mode)
{
- (void) self;
-
if (mode_is_float(mode)) {
return ia32_cg_config.use_sse2 ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
}
/**
* Returns the necessary byte alignment for storing a register of given class.
*/
-static int ia32_get_reg_class_alignment(const void *self,
- const arch_register_class_t *cls)
+static int ia32_get_reg_class_alignment(const arch_register_class_t *cls)
{
ir_mode *mode = arch_register_class_mode(cls);
int bytes = get_mode_size_bytes(mode);
- (void) self;
if (mode_is_float(mode) && bytes > 8)
return 16;
}
static const be_execution_unit_t ***ia32_get_allowed_execution_units(
- const void *self, const ir_node *irn)
+ const ir_node *irn)
{
static const be_execution_unit_t *_allowed_units_BRANCH[] = {
&ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH1],
NULL
};
const be_execution_unit_t ***ret;
- (void) self;
if (is_ia32_irn(irn)) {
ret = get_ia32_exec_units(irn);
return NULL;
}
-static void ia32_mark_remat(const void *self, ir_node *node)
+static void ia32_mark_remat(ir_node *node)
{
- (void) self;
if (is_ia32_irn(node)) {
set_ia32_is_remat(node);
}
return 0;
}
-static asm_constraint_flags_t ia32_parse_asm_constraint(const void *self, const char **c)
+static asm_constraint_flags_t ia32_parse_asm_constraint(const char **c)
{
- (void) self;
(void) c;
/* we already added all our simple flags to the flags modifier list in
return ASM_CONSTRAINT_FLAG_INVALID;
}
-static int ia32_is_valid_clobber(const void *self, const char *clobber)
+static int ia32_is_valid_clobber(const char *clobber)
{
- (void) self;
-
return ia32_get_clobber_register(clobber) != NULL;
}
};
static int opt_size = 0;
+static int emit_machcode = 0;
static cpu_support arch = cpu_generic;
static cpu_support opt_arch = cpu_generic;
static int use_sse2 = 0;
&opt_cc, 1),
LC_OPT_ENT_BIT("unsafe_floatconv", "do unsafe floating point controlword "
"optimisations", &opt_unsafe_floatconv, 1),
+ LC_OPT_ENT_BOOL("machcode", "output machine code instead of assembler",
+ &emit_machcode),
LC_OPT_LAST
};
c->use_i486 = (arch & arch_mask) >= arch_i486;
c->optimize_cc = opt_cc;
c->use_unsafe_floatconv = opt_unsafe_floatconv;
+ c->emit_machcode = emit_machcode;
c->function_alignment = arch_costs->function_alignment;
c->label_alignment = arch_costs->label_alignment;
* rounding mode
*/
unsigned use_unsafe_floatconv:1;
+ /** emit machine code instead of assembler */
+ unsigned emit_machcode:1;
+
/** function alignment (a power of two in bytes) */
unsigned function_alignment;
/** alignment for labels (which are expected to be frequent jump targets) */
sched_add_before(schedpoint, pop);
in[0] = val;
- keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], block, 1, in);
+ keep = be_new_Keep(block, 1, in);
sched_add_before(schedpoint, keep);
return stack;
if (ia32_cg_config.use_fisttp) {
/* Note: fisttp ALWAYS pop the tos. We have to ensure here that the value is copied
if other users exists */
- const arch_register_class_t *reg_class = &ia32_reg_classes[CLASS_ia32_vfp];
ir_node *vfisttp = new_bd_ia32_vfisttp(dbgi, block, base, index, mem, val);
ir_node *value = new_r_Proj(block, vfisttp, mode_E, pn_ia32_vfisttp_res);
- be_new_Keep(reg_class, block, 1, &value);
+ be_new_Keep(block, 1, &value);
new_node = new_r_Proj(block, vfisttp, mode_M, pn_ia32_vfisttp_M);
*fist = vfisttp;
if (last_keep != NULL) {
be_Keep_add_node(last_keep, cls, in[0]);
} else {
- last_keep = be_new_Keep(cls, block, 1, in);
+ last_keep = be_new_Keep(block, 1, in);
if (sched_is_scheduled(node)) {
sched_add_after(node, last_keep);
}
*/
static void keep_float_node_alive(ir_node *node)
{
- ir_node *block = get_nodes_block(node);
- const arch_register_class_t *cls = arch_get_irn_reg_class_out(node);
- ir_node *keep;
-
- keep = be_new_Keep(cls, block, 1, &node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *keep = be_new_Keep(block, 1, &node);
assert(sched_is_scheduled(node));
sched_add_after(node, keep);
-} /* keep_float_node_alive */
+}
/**
* Create a copy of a node. Recreate the node if it's a constant.
free(isa);
}
-static unsigned mips_get_n_reg_class(const void *self)
+static unsigned mips_get_n_reg_class(void)
{
- (void) self;
return N_CLASSES;
}
-static const arch_register_class_t *mips_get_reg_class(const void *self,
- unsigned i)
+static const arch_register_class_t *mips_get_reg_class(unsigned i)
{
- (void) self;
assert(i < N_CLASSES);
return &mips_reg_classes[i];
}
* @param mode The mode in question.
* @return A register class which can hold values of the given mode.
*/
-const arch_register_class_t *mips_get_reg_class_for_mode(const void *self,
- const ir_mode *mode)
+const arch_register_class_t *mips_get_reg_class_for_mode(const ir_mode *mode)
{
- (void) self;
(void) mode;
ASSERT_NO_FLOAT(mode);
return &mips_reg_classes[CLASS_mips_gp];
/**
* Returns the necessary byte alignment for storing a register of given class.
*/
-static int mips_get_reg_class_alignment(const void *self,
- const arch_register_class_t *cls)
+static int mips_get_reg_class_alignment(const arch_register_class_t *cls)
{
ir_mode *mode = arch_register_class_mode(cls);
- (void) self;
return get_mode_size_bytes(mode);
}
static const be_execution_unit_t ***mips_get_allowed_execution_units(
- const void *self, const ir_node *irn)
+ const ir_node *irn)
{
- (void) self;
(void) irn;
/* TODO */
panic("Unimplemented mips_get_allowed_execution_units()");
return &p;
}
-static asm_constraint_flags_t mips_parse_asm_constraint(const void *self,
- const char **c)
+static asm_constraint_flags_t mips_parse_asm_constraint(const char **c)
{
- (void) self;
(void) c;
return ASM_CONSTRAINT_FLAG_INVALID;
}
-static int mips_is_valid_clobber(const void *self, const char *clobber)
+static int mips_is_valid_clobber(const char *clobber)
{
- (void) self;
(void) clobber;
return 0;
}
-static unsigned ppc32_get_n_reg_class(const void *self) {
- (void) self;
+static unsigned ppc32_get_n_reg_class(void)
+{
return N_CLASSES;
}
-static const arch_register_class_t *ppc32_get_reg_class(const void *self,
- unsigned i) {
- (void) self;
+static const arch_register_class_t *ppc32_get_reg_class(unsigned i)
+{
assert(i < N_CLASSES && "Invalid ppc register class requested.");
return &ppc32_reg_classes[i];
}
* @param mode The mode in question.
* @return A register class which can hold values of the given mode.
*/
-const arch_register_class_t *ppc32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
- (void) self;
+const arch_register_class_t *ppc32_get_reg_class_for_mode(const ir_mode *mode)
+{
if (mode_is_float(mode))
return &ppc32_reg_classes[CLASS_ppc32_fp];
else
/**
* Returns the necessary byte alignment for storing a register of given class.
*/
-static int ppc32_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) {
+static int ppc32_get_reg_class_alignment(const arch_register_class_t *cls)
+{
ir_mode *mode = arch_register_class_mode(cls);
- (void) self;
-
return get_mode_size_bytes(mode);
}
-static const be_execution_unit_t ***ppc32_get_allowed_execution_units(const void *self, const ir_node *irn) {
- (void) self;
+static const be_execution_unit_t ***ppc32_get_allowed_execution_units(const ir_node *irn) {
(void) irn;
/* TODO */
panic("Unimplemented ppc32_get_allowed_execution_units()");
return &p;
}
-static asm_constraint_flags_t ppc32_parse_asm_constraint(const void *self, const char **c)
+static asm_constraint_flags_t ppc32_parse_asm_constraint(const char **c)
{
/* no asm support yet */
- (void) self;
(void) c;
return ASM_CONSTRAINT_FLAG_INVALID;
}
-static int ppc32_is_valid_clobber(const void *self, const char *clobber)
+static int ppc32_is_valid_clobber(const char *clobber)
{
/* no asm support yet */
- (void) self;
(void) clobber;
return 0;
}
set_ppc32_offset_mode(andi, ppc32_ao_Lo16);
set_ppc32_constant_tarval(andi, new_tarval_from_long(mask, mode_Is));
in[0] = new_rd_Proj(env->dbg, env->block, andi, env->mode,1);
- be_new_Keep(&ppc32_reg_classes[CLASS_ppc32_condition], env->block, 1, in);
+ be_new_Keep(env->block, 1, in);
return new_rd_Proj(env->dbg, env->block, andi, env->mode,0);
}
in[0] = new_rd_Proj(env->dbg, env->block, store, mode_Is, 1); // src
in[1] = new_rd_Proj(env->dbg, env->block, store, mode_Is, 2); // dest
in[2] = new_rd_Proj(env->dbg, env->block, store, mode_Is, 4); // temp
- be_new_Keep(&ppc32_reg_classes[CLASS_ppc32_gp], env->block, 3, in);
+ be_new_Keep(env->block, 3, in);
in[0] = new_rd_Proj(env->dbg, env->block, store, mode_Is, 3); // ctr
- be_new_Keep(&ppc32_reg_classes[CLASS_ppc32_count], env->block, 1, in);
+ be_new_Keep(env->block, 1, in);
mem = new_rd_Proj(env->dbg, env->block, store, mode_M, 0);
push(@obst_limit_func, "static const unsigned " . $limit_name . "[] = { ");
my $first = 1;
my $limitbitsetlen = $regclass2len{$class};
- my $limitarraylen = $limitbitsetlen / 32 + ($limitbitsetlen % 32 > 0 ? 1 : 0);
+ my $limitarraylen = ($limitbitsetlen+31) / 32;
for(my $i = 0; $i < $limitarraylen; $i++) {
my $limitarraypart = $limit_array[$i];
}
# stacks for output
-my @obst_regtypes_def; # stack for the register type variables definitions
-my @obst_regtypes_decl;# stack for the register type variables declarations
-my @obst_regclasses; # stack for the register class variables
-my @obst_classdef; # stack to define a name for a class index
-my @obst_regdef; # stack to define a name for a register index
-my @obst_reginit; # stack for the register type inits
-my @obst_req; # stack for the register requirements
-my @obst_limit_func; # stack for functions to return a subset of a register class
-my @obst_header_all; # stack for some extern struct defs needed for bearch_$arch include
+my $regtypes_def; # stack for the register type variables definitions
+my $regtypes_decl;# stack for the register type variables declarations
+my @regclasses; # stack for the register class variables
+my $classdef; # stack to define a name for a class index
+my $regdef; # stack to define a name for a register index
+my $reginit; # stack for the register type inits
+my $single_constraints_decls;
+my $single_constraints;
my $numregs;
my $class_ptr;
my $tmp;
-my %reg2class;
-my %regclass2len;
+my %regclass2len = ();
+my %reg2class = ();
-push(@obst_classdef, "enum reg_classes {\n");
+$classdef .= "enum reg_classes {\n";
my $class_mode;
+foreach my $class_name (keys(%reg_classes)) {
+ my @class = @{ $reg_classes{"$class_name"} };
+
+ my $idx = 0;
+ foreach (@class) {
+ if (defined($_->{name})) {
+ $reg2class{$_->{name}} = {
+ "class" => $class_name,
+ "index" => $idx
+ };
+ }
+ $idx++;
+ }
+ $regclass2len{$class_name} = $idx;
+}
+
+sub get_limited_array {
+ my $reg = shift;
+ my $regclass = $reg2class{"$reg"}{"class"};
+ my $ucname = uc($reg);
+ my $result = "{ ";
+
+ my $limitedbitsetlen = $regclass2len{$regclass};
+ my $arraylen = ($limitedbitsetlen+31) / 32;
+ my $first = 1;
+ for (my $i = 0; $i < $arraylen; ++$i) {
+ if ($first) {
+ $first = 0;
+ } else {
+ $result .= ", ";
+ }
+
+ my $index = $reg2class{"$reg"}{"index"};
+ if ($index >= $i*32 && $index < ($i+1)*32) {
+ if ($i > 0) {
+ $result .= "(1 << (REG_${ucname} % 32))";
+ } else {
+ $result .= "(1 << REG_${ucname})";
+ }
+ } else {
+ $result .= "0";
+ }
+ }
+ $result .= " }";
+}
+
# generate register type and class variable, init function and default requirements
foreach my $class_name (keys(%reg_classes)) {
my @class = @{ $reg_classes{"$class_name"} };
$flags_prepared = "0";
}
- push(@obst_regtypes_decl, "extern const arch_register_t ${class_name}_regs[$numregs];\n");
+ $single_constraints_decls .= <<EOF;
+static const arch_register_req_t ${arch}_class_reg_req_${old_classname};
+EOF
- push(@obst_classdef, "\tCLASS_$class_name = $class_idx,\n");
- push(@obst_regclasses, "{ $class_idx, \"$class_name\", $numregs, NULL, ".$class_name."_regs, $flags_prepared }");
+ $single_constraints .= <<EOF;
+static const arch_register_req_t ${arch}_class_reg_req_${old_classname} = {
+ arch_register_req_type_normal,
+ &${arch}_reg_classes[CLASS_${arch}_${old_classname}],
+ NULL,
+ 0,
+ 0
+};
+EOF
+
+ $regtypes_decl .= "extern const arch_register_t ${class_name}_regs[$numregs];\n";
+
+ $classdef .= "\tCLASS_$class_name = $class_idx,\n";
+ push(@regclasses, "{ $class_idx, \"$class_name\", $numregs, NULL, ".$class_name."_regs, $flags_prepared, &${arch}_class_reg_req_${old_classname} }");
my $idx = 0;
- push(@obst_reginit, "\t/* set largest possible mode for '$class_name' */\n");
- push(@obst_reginit, "\t$arch\_reg_classes[CLASS_".$class_name."].mode = $class_mode;\n\n");
- push(@obst_regtypes_def, "const arch_register_t ${class_name}_regs[$numregs] = {\n");
+ $reginit .= "\t$arch\_reg_classes[CLASS_".$class_name."].mode = $class_mode;\n";
+ $regtypes_def .= "const arch_register_t ${class_name}_regs[$numregs] = {\n";
- push(@obst_regdef, "enum reg_${class_name}_indices {\n");
+ $regdef .= "enum reg_${class_name}_indices {\n";
foreach (@class) {
- my $ucname = uc($_->{"name"});
+ my $name = $_->{"name"};
+ my $ucname = uc($name);
my $type = translate_reg_type($_->{"type"});
# realname is name if not set by user
$_->{"realname"} = $_->{"name"} if (! exists($_->{"realname"}));
my $realname = $_->{realname};
+ $regdef .= "\tREG_${ucname},\n";
- $reg2class{$_->{"name"}} = { "class" => $old_classname, "index" => $idx }; # remember reg to class for later use
- push(@obst_regdef, "\tREG_${ucname},\n");
+ $regtypes_def .= <<EOF;
+ {
+ "${realname}",
+ ${class_ptr},
+ REG_${ucname},
+ ${type},
+ &${arch}_single_reg_req_${old_classname}_${name}
+ },
+EOF
- push(@obst_regtypes_def, "\t{\n");
- push(@obst_regtypes_def, "\t\t\"$realname\",\n");
- push(@obst_regtypes_def, "\t\t$class_ptr,\n");
- push(@obst_regtypes_def, "\t\tREG_${ucname},\n");
- push(@obst_regtypes_def, "\t\t$type\n");
- push(@obst_regtypes_def, "\t},\n");
+ my $limitedarray = get_limited_array($name);
+ $single_constraints .= <<EOF;
+static const unsigned ${arch}_limited_${old_classname}_${name}[] = ${limitedarray};
+static const arch_register_req_t ${arch}_single_reg_req_${old_classname}_${name} = {
+ arch_register_req_type_limited,
+ ${class_ptr},
+ ${arch}_limited_${old_classname}_${name},
+ 0,
+ 0
+};
+EOF
$idx++;
}
- push(@obst_regtypes_def, "};\n");
+ $regtypes_def .= "};\n";
- $regclass2len{$old_classname} = $idx;
- push(@obst_regdef, "\t$numregs = $idx\n");
- push(@obst_regdef, "};\n\n");
+ $regdef .= "\t$numregs = $idx\n";
+ $regdef .= "};\n\n";
$class_idx++;
}
-push(@obst_classdef, "\tN_CLASSES = ".scalar(keys(%reg_classes))."\n");
-push(@obst_classdef, "};\n\n");
+$classdef .= "\tN_CLASSES = ".scalar(keys(%reg_classes))."\n";
+$classdef .= "};\n\n";
$tmp = uc($arch);
#include "../bearch.h"
#include "${arch}_nodes_attr.h"
-EOF
-
-print OUT @obst_regdef, "\n";
-
-print OUT @obst_classdef, "\n";
-
-print OUT @obst_regtypes_decl, "\n";
-
-print OUT "extern arch_register_class_t $arch\_reg_classes[N_CLASSES];\n\n";
+${regdef}
+${classdef}
+${regtypes_decl}
-print OUT "void ".$arch."_register_init(void);\n\n";
+extern arch_register_class_t ${arch}_reg_classes[N_CLASSES];
-print OUT @obst_header_all, "\n";
-
-print OUT "\n#endif\n";
+void ${arch}_register_init(void);
+unsigned ${arch}_get_n_regs(void);
+#endif
+EOF
close(OUT);
-
# generate c file
open(OUT, ">$target_c") || die("Fatal error: Could not open $target_c, reason: $!\n");
#include "${arch}_map_regs.h"
#include "irmode.h"
+${single_constraints_decls}
EOF
-print OUT "arch_register_class_t ${arch}_reg_classes[] = {\n\t".join(",\n\t", @obst_regclasses)."\n};\n\n";
-
-print OUT @obst_regtypes_def, "\n";
-
-print OUT "void ${arch}_register_init(void) {\n";
-print OUT @obst_reginit;
-print OUT "}\n\n";
+print OUT "arch_register_class_t ${arch}_reg_classes[] = {\n\t".join(",\n\t", @regclasses)."\n};\n\n";
-print OUT @obst_limit_func;
-
-print OUT @obst_req;
+print OUT<<EOF;
+${single_constraints}
+${regtypes_def}
+void ${arch}_register_init(void)
+{
+${reginit}
+}
+EOF
close(OUT);
###
int index; /**< a unique number for each graph */
ir_phase *phases[PHASE_LAST]; /**< Phase information. */
+ void *be_data; /**< backend can put in private data here */
#ifdef DEBUG_libfirm
int n_outs; /**< Size wasted for outs */
long graph_nr; /**< a unique graph number for each