#define SNPRINTF_BUF_LEN 128
-/**
- * Returns the register at in position pos.
- */
-static const arch_register_t *get_in_reg(const ir_node *node, int pos)
-{
- ir_node *op = get_irn_n(node, pos);
- return arch_get_irn_register(op);
-}
-
void TEMPLATE_emit_immediate(const ir_node *node)
{
const TEMPLATE_attr_t *attr = get_TEMPLATE_attr_const(node);
void TEMPLATE_emit_source_register(const ir_node *node, int pos)
{
- const arch_register_t *reg = get_in_reg(node, pos);
+ const arch_register_t *reg = arch_get_irn_register_in(node, pos);
emit_register(reg);
}
void TEMPLATE_emit_dest_register(const ir_node *node, int pos)
{
- const arch_register_t *reg = arch_irn_get_register(node, pos);
+ const arch_register_t *reg = arch_get_irn_register_out(node, pos);
emit_register(reg);
}
backend_info_t *info;
(void) execution_units;
- arch_irn_set_flags(node, flags);
- arch_set_in_register_reqs(node, in_reqs);
+ arch_set_irn_flags(node, flags);
+ arch_set_irn_register_reqs_in(node, in_reqs);
info = be_get_info(node);
info->out_infos = NEW_ARR_D(reg_out_info_t, obst, n_res);
copy_node_attr(irg, node, phi);
be_duplicate_deps(node, phi);
- arch_set_out_register_req(phi, 0, req);
+ arch_set_irn_register_req_out(phi, 0, req);
be_enqueue_preds(node);
return phi;
}
#include "../benode.h"
-/**
- * Returns the register at in position pos.
- */
-static const arch_register_t *get_in_reg(const ir_node *node, int pos)
-{
- ir_node *op = get_irn_n(node, pos);
- return arch_get_irn_register(op);
-}
-
/*************************************************************
* _ _ __ _ _
* (_) | | / _| | | | |
void amd64_emit_source_register(const ir_node *node, int pos)
{
- amd64_emit_register(get_in_reg(node, pos));
+ amd64_emit_register(arch_get_irn_register_in(node, pos));
}
void amd64_emit_dest_register(const ir_node *node, int pos)
{
- amd64_emit_register(arch_irn_get_register(node, pos));
+ amd64_emit_register(arch_get_irn_register_out(node, pos));
}
/**
{
ir_mode *mode = get_irn_mode(irn);
- if (get_in_reg(irn, 0) == arch_irn_get_register(irn, 0)) {
+ if (arch_get_irn_register_in(irn, 0) == arch_get_irn_register_out(irn, 0)) {
/* omitted Copy */
return;
}
*/
static void emit_amd64_binop(const ir_node *irn)
{
- const arch_register_t *reg_s1 = get_in_reg(irn, 0);
- const arch_register_t *reg_s2 = get_in_reg(irn, 1);
- const arch_register_t *reg_d1 = arch_irn_get_register(irn, 0);
+ const arch_register_t *reg_s1 = arch_get_irn_register_in(irn, 0);
+ const arch_register_t *reg_s2 = arch_get_irn_register_in(irn, 1);
+ const arch_register_t *reg_d1 = arch_get_irn_register_out(irn, 0);
int second_op = 0;
backend_info_t *info;
(void) execution_units;
- arch_irn_set_flags(node, flags);
- arch_set_in_register_reqs(node, in_reqs);
+ arch_set_irn_flags(node, flags);
+ arch_set_irn_register_reqs_in(node, in_reqs);
info = be_get_info(node);
info->out_infos = NEW_ARR_D(reg_out_info_t, obst, n_res);
my $res = "";
if(defined($node->{modified_flags})) {
- $res .= "\tarch_irn_add_flags(res, arch_irn_flags_modify_flags);\n";
+ $res .= "\tarch_add_irn_flags(res, arch_irn_flags_modify_flags);\n";
}
return $res;
}
static ir_node *gen_be_Call(ir_node *node)
{
ir_node *res = be_duplicate_node(node);
- arch_irn_add_flags(res, arch_irn_flags_modify_flags);
+ arch_add_irn_flags(res, arch_irn_flags_modify_flags);
return res;
}
copy_node_attr(irg, node, phi);
be_duplicate_deps(node, phi);
- arch_set_out_register_req(phi, 0, req);
+ arch_set_irn_register_req_out(phi, 0, req);
be_enqueue_preds(node);
static set *sym_or_tv;
static arm_isa_t *isa;
-/**
- * Returns the register at in position pos.
- */
-static const arch_register_t *get_in_reg(const ir_node *irn, int pos)
-{
- ir_node *op = get_irn_n(irn, pos);
- return arch_get_irn_register(op);
-}
-
static void arm_emit_register(const arch_register_t *reg)
{
be_emit_string(arch_register_get_name(reg));
void arm_emit_source_register(const ir_node *node, int pos)
{
- const arch_register_t *reg = get_in_reg(node, pos);
+ const arch_register_t *reg = arch_get_irn_register_in(node, pos);
arm_emit_register(reg);
}
void arm_emit_dest_register(const ir_node *node, int pos)
{
- const arch_register_t *reg = arch_irn_get_register(node, pos);
+ const arch_register_t *reg = arch_get_irn_register_out(node, pos);
arm_emit_register(reg);
}
const arm_CopyB_attr_t *attr = get_arm_CopyB_attr_const(irn);
unsigned size = attr->size;
- const char *tgt = arch_register_get_name(get_in_reg(irn, 0));
- const char *src = arch_register_get_name(get_in_reg(irn, 1));
+ const char *tgt = arch_register_get_name(arch_get_irn_register_in(irn, 0));
+ const char *src = arch_register_get_name(arch_get_irn_register_in(irn, 1));
const char *t0, *t1, *t2, *t3;
const arch_register_t *tmpregs[4];
/* collect the temporary registers and sort them, we need ascending order */
- tmpregs[0] = get_in_reg(irn, 2);
- tmpregs[1] = get_in_reg(irn, 3);
- tmpregs[2] = get_in_reg(irn, 4);
+ tmpregs[0] = arch_get_irn_register_in(irn, 2);
+ tmpregs[1] = arch_get_irn_register_in(irn, 3);
+ tmpregs[2] = arch_get_irn_register_in(irn, 4);
tmpregs[3] = &arm_registers[REG_R12];
/* Note: R12 is always the last register because the RA did not assign higher ones */
{
ir_mode *mode = get_irn_mode(irn);
- if (get_in_reg(irn, 0) == arch_irn_get_register(irn, 0)) {
+ if (arch_get_irn_register_in(irn, 0) == arch_get_irn_register_out(irn, 0)) {
/* omitted Copy */
return;
}
backend_info_t *info;
(void) execution_units;
- arch_irn_set_flags(node, flags);
- arch_set_in_register_reqs(node, in_reqs);
+ arch_set_irn_flags(node, flags);
+ arch_set_irn_register_reqs_in(node, in_reqs);
attr->is_load_store = false;
info = be_get_info(node);
attr_type => "arm_shifter_operand_t",
attr => "arm_shift_modifier_t shift_modifier, unsigned char immediate_value, unsigned char immediate_rot",
custominit => "init_arm_shifter_operand(res, immediate_value, shift_modifier, immediate_rot);\n".
- "\tarch_irn_add_flags(res, arch_irn_flags_modify_flags);",
+ "\tarch_add_irn_flags(res, arch_irn_flags_modify_flags);",
emit => ". mov lr, pc\n".
". mov pc, %SO",
},
out_arity => "variable",
attr_type => "arm_load_store_attr_t",
attr => "ir_mode *ls_mode, ir_entity *entity, int entity_sign, long offset, bool is_frame_entity",
- custominit => "arch_irn_add_flags(res, arch_irn_flags_modify_flags);",
+ custominit => "arch_add_irn_flags(res, arch_irn_flags_modify_flags);",
emit => ". mov lr, pc\n".
". ldr pc, %SO",
},
out_arity => "variable",
attr_type => "arm_SymConst_attr_t",
attr => "ir_entity *entity, int symconst_offset",
- custominit => "arch_irn_add_flags(res, arch_irn_flags_modify_flags);",
+ custominit => "arch_add_irn_flags(res, arch_irn_flags_modify_flags);",
emit => '. bl %SC',
},
*/
static int find_out_for_reg(ir_node *node, const arch_register_t *reg)
{
- int n_outs = arch_irn_get_n_outs(node);
+ int n_outs = arch_get_irn_n_outs(node);
int o;
for (o = 0; o < n_outs; ++o) {
- const arch_register_req_t *req = arch_get_out_register_req(node, o);
+ const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
if (req == reg->single_req)
return o;
}
pmap_insert(node_to_stack, node, incsp);
}
- arch_set_in_register_reqs(res, in_req);
+ arch_set_irn_register_reqs_in(res, in_req);
/* create output register reqs */
- arch_set_out_register_req(res, 0, arch_no_register_req);
+ arch_set_irn_register_req_out(res, 0, arch_no_register_req);
for (o = 0; o < n_caller_saves; ++o) {
const arch_register_t *reg = caller_saves[o];
- arch_set_out_register_req(res, o+1, reg->single_req);
+ arch_set_irn_register_req_out(res, o+1, reg->single_req);
}
/* copy pinned attribute */
copy_node_attr(irg, node, phi);
be_duplicate_deps(node, phi);
- arch_set_out_register_req(phi, 0, req);
+ arch_set_irn_register_req_out(phi, 0, req);
be_enqueue_preds(node);
pmap_insert(map, reg, node);
}
+/**
+ * Check if the given register is callee save, ie. will be save by the callee.
+ */
+static bool arch_register_is_callee_save(
+ const arch_env_t *arch_env,
+ const arch_register_t *reg)
+{
+ if (arch_env->impl->register_saved_by)
+ return arch_env->impl->register_saved_by(reg, /*callee=*/1);
+ return false;
+}
+
+/**
+ * Check if the given register is caller save, ie. must be save by the caller.
+ */
+static bool arch_register_is_caller_save(
+ const arch_env_t *arch_env,
+ const arch_register_t *reg)
+{
+ if (arch_env->impl->register_saved_by)
+ return arch_env->impl->register_saved_by(reg, /*callee=*/0);
+ return false;
+}
+
+
+
/*
_ ____ ___ ____ _ _ _ _
/ \ | __ )_ _| / ___|__ _| | | |__ __ _ ___| | _____
/* add state registers ins */
for (s = 0; s < ARR_LEN(states); ++s) {
const arch_register_t *reg = states[s];
- const arch_register_class_t *cls = arch_register_get_class(reg);
- ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
+ const arch_register_class_t *cls = reg->reg_class;
+ ir_node *regnode = new_r_Unknown(irg, cls->mode);
in[n_ins++] = regnode;
}
assert(n_ins == (int) (n_reg_params + ARR_LEN(states)));
keep = be_new_Keep(bl, n, in);
for (i = 0; i < n; ++i) {
const arch_register_t *reg = (const arch_register_t*)get_irn_link(in[i]);
- be_node_set_reg_class_in(keep, i, reg->reg_class);
+ be_node_set_reg_class_in(keep, i, arch_register_get_class(reg));
}
}
/* create a new initial memory proj */
assert(is_Proj(old_mem));
- arch_set_out_register_req(env->start, 0, arch_no_register_req);
+ arch_set_irn_register_req_out(env->start, 0, arch_no_register_req);
new_mem_proj = new_r_Proj(env->start, mode_M, 0);
mem = new_mem_proj;
set_irg_initial_mem(irg, mem);
const arch_register_t *reg = regflag->reg;
ir_node *proj;
if (reg == NULL) {
- arch_set_out_register_req(start, o, arch_no_register_req);
+ arch_set_irn_register_req_out(start, o, arch_no_register_req);
proj = new_r_Proj(start, mode_M, o);
} else {
be_set_constr_single_reg_out(start, o, regflag->reg,
regflag->flags);
- arch_irn_set_register(start, o, regflag->reg);
+ arch_set_irn_register_out(start, o, regflag->reg);
proj = new_r_Proj(start, reg->reg_class->mode, o);
}
env->prolog.value_map[o] = proj;
(void) data;
if (mode != mode_T) {
if (!has_real_user(node)) {
- const arch_register_req_t *req = arch_get_register_req_out(node);
+ const arch_register_req_t *req = arch_get_irn_register_req(node);
const arch_register_class_t *cls = req->cls;
if (cls == NULL
|| (cls->flags & arch_register_class_flag_manual_ra)) {
return;
}
- n_outs = arch_irn_get_n_outs(node);
+ n_outs = arch_get_irn_n_outs(node);
if (n_outs <= 0)
return;
continue;
}
- req = arch_get_out_register_req(node, i);
+ req = arch_get_irn_register_req_out(node, i);
cls = req->cls;
if (cls == NULL || (cls->flags & arch_register_class_flag_manual_ra)) {
continue;
#include "irprintf.h"
+static const arch_register_req_t no_requirement = {
+ arch_register_req_type_none,
+ NULL,
+ NULL,
+ 0,
+ 0,
+ 0
+};
+const arch_register_req_t *arch_no_register_req = &no_requirement;
+
+static reg_out_info_t dummy_info = {
+ NULL,
+ &no_requirement
+};
+
/* Initialize the architecture environment struct. */
arch_env_t *arch_env_init(const arch_isa_if_t *isa_if, FILE *file_handle, be_main_env_t *main_env)
{
* @param irn The node to get the responsible isa for.
* @return The irn operations given by the responsible isa.
*/
-static inline const arch_irn_ops_t *get_irn_ops(const ir_node *irn)
+static const arch_irn_ops_t *get_irn_ops(const ir_node *irn)
{
const ir_op *ops;
const arch_irn_ops_t *be_ops;
return be_ops;
}
-const arch_register_req_t *arch_get_register_req(const ir_node *irn, int pos)
-{
- if (is_Proj(irn)) {
- ir_node *pred = get_Proj_pred(irn);
- long pn = get_Proj_proj(irn);
- assert(pos == -1);
- return arch_get_out_register_req(pred, pn);
- }
-
- if (pos < 0) {
- return arch_get_out_register_req(irn, -pos-1);
- } else {
- return arch_get_in_register_req(irn, pos);
- }
-}
-
void arch_set_frame_offset(ir_node *irn, int offset)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
}
}
-int arch_reg_is_allocatable(const ir_node *irn, int pos,
- const arch_register_t *reg)
-{
- const arch_register_req_t *req = arch_get_register_req(irn, pos);
-
- if (req->type == arch_register_req_type_none)
- return 0;
-
- if (arch_register_req_is(req, limited)) {
- if (arch_register_get_class(reg) != req->cls)
- return 0;
- return rbitset_is_set(req->limited, arch_register_get_index(reg));
- }
-
- return req->cls == reg->reg_class;
-}
-
-const arch_register_class_t *arch_get_irn_reg_class(const ir_node *irn, int pos)
+static reg_out_info_t *get_out_info(const ir_node *node)
{
- const arch_register_req_t *req = arch_get_register_req(irn, pos);
-
- assert(req->type != arch_register_req_type_none || req->cls == NULL);
-
- return req->cls;
-}
-
-static inline reg_out_info_t *get_out_info(const ir_node *node)
-{
- size_t pos = 0;
+ size_t pos = 0;
const backend_info_t *info;
-
assert(get_irn_mode(node) != mode_T);
if (is_Proj(node)) {
pos = get_Proj_proj(node);
}
info = be_get_info(node);
+ /* We have a problem with the switch-node where there can be arbitrary
+ * Proj-numbers, so we can't easily allocate an array big-enough to hold
+ * all of them. So until we rewrite Switch-nodes we need this special case
+ */
+ if (info->out_infos == NULL)
+ return &dummy_info;
assert(pos < ARR_LEN(info->out_infos));
return &info->out_infos[pos];
}
-
-static inline reg_out_info_t *get_out_info_n(const ir_node *node, int pos)
+static reg_out_info_t *get_out_info_n(const ir_node *node, int pos)
{
const backend_info_t *info = be_get_info(node);
assert(!is_Proj(node));
return out->reg;
}
-const arch_register_t *arch_irn_get_register(const ir_node *node, int pos)
+const arch_register_t *arch_get_irn_register_out(const ir_node *node, int pos)
{
const reg_out_info_t *out = get_out_info_n(node, pos);
return out->reg;
}
-void arch_irn_set_register(ir_node *node, int pos, const arch_register_t *reg)
+const arch_register_t *arch_get_irn_register_in(const ir_node *node, int pos)
+{
+ ir_node *op = get_irn_n(node, pos);
+ return arch_get_irn_register(op);
+}
+
+void arch_set_irn_register_out(ir_node *node, int pos,
+ const arch_register_t *reg)
{
reg_out_info_t *out = get_out_info_n(node, pos);
out->reg = reg;
out->reg = reg;
}
+const arch_register_req_t *arch_get_irn_register_req(const ir_node *node)
+{
+ reg_out_info_t *out = get_out_info(node);
+ return out->req;
+}
+
arch_irn_class_t arch_irn_classify(const ir_node *node)
{
const arch_irn_ops_t *ops = get_irn_ops(node);
return ops->classify(node);
}
-arch_irn_flags_t arch_irn_get_flags(const ir_node *node)
+arch_irn_flags_t arch_get_irn_flags(const ir_node *node)
{
- backend_info_t *info = be_get_info(node);
+ backend_info_t *info;
+ if (is_Proj(node))
+ return arch_irn_flags_not_scheduled;
+
+ info = be_get_info(node);
return info->flags;
}
-void arch_irn_set_flags(ir_node *node, arch_irn_flags_t flags)
+void arch_set_irn_flags(ir_node *node, arch_irn_flags_t flags)
{
- backend_info_t *info = be_get_info(node);
+ backend_info_t *info;
+
+ /* setting flags is only supported for instructions currently.
+ * (mainly because we found no use for it yet and saved the space for
+ * be_infos for them */
+ assert(!is_Proj(node));
+ info = be_get_info(node);
info->flags = flags;
}
-void arch_irn_add_flags(ir_node *node, arch_irn_flags_t flags)
+void arch_add_irn_flags(ir_node *node, arch_irn_flags_t flags)
{
- backend_info_t *info = be_get_info(node);
+ backend_info_t *info;
+ assert(!is_Proj(node));
+ info = be_get_info(node);
info->flags |= flags;
}
+bool arch_reg_is_allocatable(const arch_register_req_t *req,
+ const arch_register_t *reg)
+{
+ if (reg->type & arch_register_type_joker)
+ return true;
+ if (req->type == arch_register_req_type_none)
+ return false;
+ if (req->type & arch_register_req_type_limited) {
+ if (arch_register_get_class(reg) != req->cls)
+ return false;
+ return rbitset_is_set(req->limited, arch_register_get_index(reg));
+ }
+ return req->cls == arch_register_get_class(reg);
+}
+
void arch_dump_register_req(FILE *F, const arch_register_req_t *req,
const ir_node *node)
{
void arch_dump_reqs_and_registers(FILE *F, const ir_node *node)
{
int n_ins = get_irn_arity(node);
- int n_outs = arch_irn_get_n_outs(node);
- arch_irn_flags_t flags = arch_irn_get_flags(node);
+ int n_outs = arch_get_irn_n_outs(node);
+ arch_irn_flags_t flags = arch_get_irn_flags(node);
int i;
for (i = 0; i < n_ins; ++i) {
- const arch_register_req_t *req = arch_get_in_register_req(node, i);
+ const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
fprintf(F, "inreq #%d = ", i);
arch_dump_register_req(F, req, node);
fputs("\n", F);
}
for (i = 0; i < n_outs; ++i) {
- const arch_register_req_t *req = arch_get_out_register_req(node, i);
+ const arch_register_req_t *req = arch_get_irn_register_req_out(node, i);
fprintf(F, "outreq #%d = ", i);
arch_dump_register_req(F, req, node);
fputs("\n", F);
}
for (i = 0; i < n_outs; ++i) {
- const arch_register_t *reg = arch_irn_get_register(node, i);
- const arch_register_req_t *req = arch_get_out_register_req(node, i);
+ const arch_register_t *reg = arch_get_irn_register_out(node, i);
+ const arch_register_req_t *req = arch_get_irn_register_req_out(node, i);
if (req->cls == NULL)
continue;
fprintf(F, "reg #%d = %s\n", i, reg != NULL ? reg->name : "n/a");
}
fprintf(F, " (%d)\n", (int)flags);
}
-
-static const arch_register_req_t no_requirement = {
- arch_register_req_type_none,
- NULL,
- NULL,
- 0,
- 0,
- 0
-};
-const arch_register_req_t *arch_no_register_req = &no_requirement;
unsigned int i);
/**
- * Get the register requirements for a node.
- * @note Deprecated API! Preferably use
- * arch_get_in_register_req and
- * arch_get_out_register_req.
- *
- * @param irn The node.
- * @param pos The position of the operand you're interested in.
- * @return A pointer to the register requirements. If NULL is returned, the
- * operand was no register operand.
+ * Get the register allocated for a value.
*/
-const arch_register_req_t *arch_get_register_req(const ir_node *irn, int pos);
+const arch_register_t *arch_get_irn_register(const ir_node *irn);
/**
- * Check, if a register is assignable to an operand of a node.
- * @param irn The node.
- * @param pos The position of the operand.
- * @param reg The register.
- * @return 1, if the register might be allocated to the operand 0 if not.
+ * Assign register to a value
*/
-int arch_reg_is_allocatable(const ir_node *irn, int pos,
- const arch_register_t *reg);
-
-#define arch_reg_out_is_allocatable(irn, reg) arch_reg_is_allocatable(irn, -1, reg)
+void arch_set_irn_register(ir_node *irn, const arch_register_t *reg);
/**
- * Get the register class of an operand of a node.
- * @param irn The node.
- * @param pos The position of the operand, -1 for the output.
- * @return The register class of the operand or NULL, if
- * operand is a non-register operand.
+ * Set the register for a certain output operand.
*/
-const arch_register_class_t *arch_get_irn_reg_class(const ir_node *irn,
- int pos);
+void arch_set_irn_register_out(ir_node *irn, int pos, const arch_register_t *r);
-#define arch_get_irn_reg_class_out(irn) arch_get_irn_reg_class(irn, -1)
+const arch_register_t *arch_get_irn_register_out(const ir_node *irn, int pos);
+const arch_register_t *arch_get_irn_register_in(const ir_node *irn, int pos);
/**
- * Get the register allocated at a certain output operand of a node.
- * @param irn The node.
- * @return The register allocated for this operand
+ * Get register constraints for an operand at position @p
*/
-const arch_register_t *arch_get_irn_register(const ir_node *irn);
-const arch_register_t *arch_irn_get_register(const ir_node *irn, int pos);
+static inline const arch_register_req_t *arch_get_irn_register_req_in(
+ const ir_node *node, int pos)
+{
+ const backend_info_t *info = be_get_info(node);
+ if (info->in_reqs == NULL)
+ return arch_no_register_req;
+ return info->in_reqs[pos];
+}
/**
- * Set the register for a certain output operand.
- * @param irn The node.
- * @param reg The register.
+ * Get register constraint for a produced result (the @p pos result)
*/
-void arch_set_irn_register(ir_node *irn, const arch_register_t *reg);
-void arch_irn_set_register(ir_node *irn, int pos, const arch_register_t *reg);
+static inline const arch_register_req_t *arch_get_irn_register_req_out(
+ const ir_node *node, int pos)
+{
+ const backend_info_t *info = be_get_info(node);
+ if (info->out_infos == NULL)
+ return arch_no_register_req;
+ return info->out_infos[pos].req;
+}
-/**
- * Classify a node.
- * @param irn The node.
- * @return A classification of the node.
- */
-arch_irn_class_t arch_irn_classify(const ir_node *irn);
+static inline void arch_set_irn_register_req_out(ir_node *node, int pos,
+ const arch_register_req_t *req)
+{
+ backend_info_t *info = be_get_info(node);
+ assert(pos < (int)ARR_LEN(info->out_infos));
+ info->out_infos[pos].req = req;
+}
+
+static inline void arch_set_irn_register_reqs_in(ir_node *node,
+ const arch_register_req_t **reqs)
+{
+ backend_info_t *info = be_get_info(node);
+ info->in_reqs = reqs;
+}
+
+static inline const arch_register_req_t **arch_get_irn_register_reqs_in(
+ const ir_node *node)
+{
+ backend_info_t *info = be_get_info(node);
+ return info->in_reqs;
+}
+
+const arch_register_req_t *arch_get_irn_register_req(const ir_node *node);
/**
* Get the flags of a node.
* @param irn The node.
* @return The flags.
*/
-arch_irn_flags_t arch_irn_get_flags(const ir_node *irn);
+arch_irn_flags_t arch_get_irn_flags(const ir_node *irn);
-void arch_irn_set_flags(ir_node *node, arch_irn_flags_t flags);
-void arch_irn_add_flags(ir_node *node, arch_irn_flags_t flags);
+void arch_set_irn_flags(ir_node *node, arch_irn_flags_t flags);
+void arch_add_irn_flags(ir_node *node, arch_irn_flags_t flags);
-#define arch_irn_is(irn, flag) ((arch_irn_get_flags(irn) & arch_irn_flags_ ## flag) != 0)
+#define arch_irn_is(irn, flag) ((arch_get_irn_flags(irn) & arch_irn_flags_ ## flag) != 0)
+
+static inline unsigned arch_get_irn_n_outs(const ir_node *node)
+{
+ backend_info_t *info = be_get_info(node);
+ if (info->out_infos == NULL)
+ return 0;
+
+ return (unsigned)ARR_LEN(info->out_infos);
+}
+
+/**
+ * Classify a node.
+ * @param irn The node.
+ * @return A classification of the node.
+ */
+arch_irn_class_t arch_irn_classify(const ir_node *irn);
/**
* Initialize the architecture environment struct.
registers are required */
};
-static inline int reg_reqs_equal(const arch_register_req_t *req1,
- const arch_register_req_t *req2)
+static inline bool reg_reqs_equal(const arch_register_req_t *req1,
+ const arch_register_req_t *req2)
{
if (req1 == req2)
- return 1;
+ return true;
if (req1->type != req2->type
- || req1->cls != req2->cls
- || req1->other_same != req2->other_same
- || req1->other_different != req2->other_different)
- return 0;
+ || req1->cls != req2->cls
+ || req1->other_same != req2->other_same
+ || req1->other_different != req2->other_different)
+ return false;
if (req1->limited != NULL) {
size_t n_regs;
if (req2->limited == NULL)
- return 0;
+ return false;
n_regs = arch_register_class_n_regs(req1->cls);
if (!rbitsets_equal(req1->limited, req2->limited, n_regs))
- return 0;
+ return false;
}
- return 1;
+ return true;
}
/**
/**
* Checks if the given register is callee/caller saved.
+ * @deprecated, only necessary if backend still uses beabi functions
*/
int (*register_saved_by)(const arch_register_t *reg, int callee);
};
stuff from beabi.h/.c */
};
-static inline unsigned arch_irn_get_n_outs(const ir_node *node)
-{
- backend_info_t *info = be_get_info(node);
- if (info->out_infos == NULL)
- return 0;
-
- return (unsigned)ARR_LEN(info->out_infos);
-}
-
-static inline const arch_irn_ops_t *get_irn_ops_simple(const ir_node *node)
-{
- const ir_op *ops = get_irn_op(node);
- const arch_irn_ops_t *be_ops = get_op_ops(ops)->be_ops;
- assert(!is_Proj(node));
- return be_ops;
-}
-
-static inline const arch_register_req_t *arch_get_register_req_out(
- const ir_node *irn)
-{
- int pos = 0;
- backend_info_t *info;
-
- /* you have to query the Proj nodes for the constraints (or use
- * arch_get_out_register_req. Querying a mode_T node and expecting
- * arch_no_register_req is a bug in your code! */
- assert(get_irn_mode(irn) != mode_T);
-
- if (is_Proj(irn)) {
- pos = get_Proj_proj(irn);
- irn = get_Proj_pred(irn);
- }
-
- info = be_get_info(irn);
- if (info->out_infos == NULL)
- return arch_no_register_req;
-
- return info->out_infos[pos].req;
-}
-
static inline bool arch_irn_is_ignore(const ir_node *irn)
{
- const arch_register_req_t *req = arch_get_register_req_out(irn);
- return !!(req->type & arch_register_req_type_ignore);
+ const arch_register_req_t *req = arch_get_irn_register_req(irn);
+ return req->type & arch_register_req_type_ignore;
}
static inline bool arch_irn_consider_in_reg_alloc(
const arch_register_class_t *cls, const ir_node *node)
{
- const arch_register_req_t *req = arch_get_register_req_out(node);
+ const arch_register_req_t *req = arch_get_irn_register_req(node);
return
req->cls == cls &&
!(req->type & arch_register_req_type_ignore);
}
-/**
- * Get register constraints for an operand at position @p
- */
-static inline const arch_register_req_t *arch_get_in_register_req(
- const ir_node *node, int pos)
-{
- const backend_info_t *info = be_get_info(node);
- if (info->in_reqs == NULL)
- return arch_no_register_req;
- return info->in_reqs[pos];
-}
-
-/**
- * Get register constraint for a produced result (the @p pos result)
- */
-static inline const arch_register_req_t *arch_get_out_register_req(
- const ir_node *node, int pos)
-{
- const backend_info_t *info = be_get_info(node);
- if (info->out_infos == NULL)
- return arch_no_register_req;
- return info->out_infos[pos].req;
-}
-
-static inline void arch_set_out_register_req(ir_node *node, int pos,
- const arch_register_req_t *req)
-{
- backend_info_t *info = be_get_info(node);
- assert(pos < (int) arch_irn_get_n_outs(node));
- info->out_infos[pos].req = req;
-}
-
-static inline void arch_set_in_register_reqs(ir_node *node,
- const arch_register_req_t **in_reqs)
-{
- backend_info_t *info = be_get_info(node);
- info->in_reqs = in_reqs;
-}
-
-static inline const arch_register_req_t **arch_get_in_register_reqs(
- const ir_node *node)
-{
- backend_info_t *info = be_get_info(node);
- return info->in_reqs;
-}
-
-/**
- * Check if the given register is callee save, ie. will be save by the callee.
- */
-static inline bool arch_register_is_callee_save(
- const arch_env_t *arch_env,
- const arch_register_t *reg)
-{
- if (arch_env->impl->register_saved_by)
- return arch_env->impl->register_saved_by(reg, /*callee=*/1);
- return false;
-}
-
-/**
- * Check if the given register is caller save, ie. must be save by the caller.
- */
-static inline bool arch_register_is_caller_save(
- const arch_env_t *arch_env,
- const arch_register_t *reg)
-{
- if (arch_env->impl->register_saved_by)
- return arch_env->impl->register_saved_by(reg, /*callee=*/0);
- return false;
-}
-
/**
* Iterate over all values defined by an instruction.
* Only looks at values in a certain register class where the requirements
foreach_out_edge(node, edge_) { \
const arch_register_req_t *req_; \
value = get_edge_src_irn(edge_); \
- req_ = arch_get_register_req_out(value); \
+ req_ = arch_get_irn_register_req(value); \
if (req_->cls != cls) \
continue; \
code \
} \
} else { \
- const arch_register_req_t *req_ = arch_get_register_req_out(node); \
+ const arch_register_req_t *req_ = arch_get_irn_register_req(node); \
value = node; \
if (req_->cls == cls) { \
code \
code \
)
+static inline const arch_register_class_t *arch_get_irn_reg_class(
+ const ir_node *node)
+{
+ const arch_register_req_t *req = arch_get_irn_register_req(node);
+ return req->cls;
+}
+
+bool arch_reg_is_allocatable(const arch_register_req_t *req,
+ const arch_register_t *reg);
+
#endif
ir_node *sub_res, *curr;
be_ifg_t *ifg = chordal_env->ifg;
neighbours_iter_t iter;
-
+ const arch_register_req_t *req;
DBG((dbg, LEVEL_3, "\t %+F \tcaused col(%+F) \t%2d --> %2d\n", trigger, irn, irn_col, col));
return irn;
}
+ req = arch_get_irn_register_req(irn);
#ifdef SEARCH_FREE_COLORS
/* If we resolve conflicts (recursive calls) we can use any unused color.
* In case of the first call @p col must be used.
*/
if (irn != trigger) {
bitset_t *free_cols = bitset_alloca(cls->n_regs);
- const arch_register_req_t *req;
ir_node *curr;
int free_col;
bitset_copy(free_cols, co->cenv->allocatable_regs);
/* Exclude colors not assignable to the irn */
- req = arch_get_register_req_out(irn);
if (arch_register_req_is(req, limited)) {
bitset_t *limited = bitset_alloca(cls->n_regs);
rbitset_copy_to_bitset(req->limited, limited);
#endif /* SEARCH_FREE_COLORS */
/* If target color is not allocatable changing color is impossible */
- if (!arch_reg_out_is_allocatable(irn, arch_register_for_index(cls, col))) {
+ if (!arch_reg_is_allocatable(req, arch_register_for_index(cls, col))) {
DBG((dbg, LEVEL_3, "\t %+F impossible\n", irn));
return CHANGE_IMPOSSIBLE;
}
/* init queue */
INIT_LIST_HEAD(&ou->queue);
- req = arch_get_register_req_out(ou->nodes[0]);
+ req = arch_get_irn_register_req(ou->nodes[0]);
allocatable_regs = ou->co->cenv->allocatable_regs;
n_regs = req->cls->n_regs;
if (arch_register_req_is(req, limited)) {
if (ci->adm_cache == NULL) {
const arch_register_req_t *req;
ci->adm_cache = bitset_obstack_alloc(phase_obst(&env->ph), env->n_regs);
- req = arch_get_register_req_out(ci->irn);
+ req = arch_get_irn_register_req(ci->irn);
if (arch_register_req_is(req, limited)) {
int i, n;
static void incur_constraint_costs(co2_t *env, const ir_node *irn, col_cost_pair_t *col_costs, int costs)
{
- const arch_register_req_t *req = arch_get_register_req_out(irn);
+ const arch_register_req_t *req = arch_get_irn_register_req(irn);
if (arch_register_req_is(req, limited)) {
unsigned n_regs = env->co->cls->n_regs;
static const char *get_dot_shape_name(co2_irn_t *ci)
{
- const arch_register_req_t *req = arch_get_register_req_out(ci->irn);
+ const arch_register_req_t *req = arch_get_irn_register_req(ci->irn);
if (arch_register_req_is(req, limited))
return "diamond";
static int ifg_is_dump_node(void *self, ir_node *irn)
{
- const arch_register_req_t *req = arch_get_register_req_out(irn);
+ const arch_register_req_t *req = arch_get_irn_register_req(irn);
(void)self;
return !(req->type & arch_register_req_type_ignore);
}
res->adm_colors = bitset_obstack_alloc(phase_obst(ph), env->n_regs);
/* Exclude colors not assignable to the irn */
- req = arch_get_register_req_out(irn);
+ req = arch_get_irn_register_req(irn);
if (arch_register_req_is(req, limited))
rbitset_copy_to_bitset(req->limited, res->adm_colors);
else
while (redo) {
redo = 0;
be_ifg_foreach_node(ifg, &iter, irn) {
- const arch_register_req_t *req = arch_get_register_req_out(irn);
+ const arch_register_req_t *req = arch_get_irn_register_req(irn);
if (!arch_register_req_is(req, limited) && !sr_is_removed(sr, irn) && !co_gs_is_optimizable(sr->co, irn)) {
if (sr_is_simplicial(sr, irn)) {
pmap_insert(lenv->nr_2_irn, INT_TO_PTR(node_nr), irn);
- req = arch_get_register_req_out(irn);
+ req = arch_get_irn_register_req(irn);
bitset_clear_all(colors);
if (is_Reg_Phi(irn) || is_Perm_Proj(irn))
return 1;
- req = arch_get_register_req_out(irn);
+ req = arch_get_irn_register_req(irn);
if (is_2addr_code(req))
return 1;
if (get_irn_mode(irn) == mode_T)
return;
- req = arch_get_register_req_out(irn);
+ req = arch_get_irn_register_req(irn);
if (req->cls != co->cls)
return;
if (!co_is_optimizable_root(irn))
int o, arg_pos;
ir_node *arg = get_irn_n(irn, i);
- assert(arch_get_irn_reg_class_out(arg) == co->cls && "Argument not in same register class.");
+ assert(arch_get_irn_reg_class(arg) == co->cls && "Argument not in same register class.");
if (arg == irn)
continue;
if (nodes_interfere(co->cenv, irn, arg)) {
/* Units with constraints come first */
u1_has_constr = 0;
for (i=0; i<u1->node_count; ++i) {
- arch_get_register_req_out(&req, u1->nodes[i]);
+ arch_get_irn_register_req(&req, u1->nodes[i]);
if (arch_register_req_is(&req, limited)) {
u1_has_constr = 1;
break;
u2_has_constr = 0;
for (i=0; i<u2->node_count; ++i) {
- arch_get_register_req_out(&req, u2->nodes[i]);
+ arch_get_irn_register_req(&req, u2->nodes[i]);
if (arch_register_req_is(&req, limited)) {
u2_has_constr = 1;
break;
if (get_irn_mode(irn) == mode_T)
return;
- req = arch_get_register_req_out(irn);
+ req = arch_get_irn_register_req(irn);
if (req->cls != co->cls || arch_irn_is_ignore(irn))
return;
constr[1] = bitset_alloca(co->cls->n_regs);
for (j = 0; j < 2; ++j) {
- const arch_register_req_t *req = arch_get_register_req_out(nodes[j]);
+ const arch_register_req_t *req = arch_get_irn_register_req(nodes[j]);
if (arch_register_req_is(req, limited))
rbitset_copy_to_bitset(req->limited, constr[j]);
else
if (!arch_irn_is_ignore(irn)) {
int idx = node_map[get_irn_idx(irn)];
affinity_node_t *a = get_affinity_info(co, irn);
- const arch_register_req_t *req = arch_get_register_req_out(irn);
+ const arch_register_req_t *req = arch_get_irn_register_req(irn);
ir_node *adj;
if (arch_register_req_is(req, limited)) {
{
co_ifg_dump_t *env = (co_ifg_dump_t*)self;
const arch_register_t *reg = arch_get_irn_register(irn);
- const arch_register_req_t *req = arch_get_register_req_out(irn);
+ const arch_register_req_t *req = arch_get_irn_register_req(irn);
int limited = arch_register_req_is(req, limited);
if (env->flags & CO_IFG_DUMP_LABELS) {
/* test whether the current node needs flags */
arity = get_irn_arity(node);
for (i = 0; i < arity; ++i) {
- const arch_register_class_t *cls = arch_get_irn_reg_class(node, i);
- if (cls == flag_class) {
+ const arch_register_req_t *req
+ = arch_get_irn_register_req_in(node, i);
+ if (req->cls == flag_class) {
assert(new_flags_needed == NULL);
new_flags_needed = get_irn_n(node, i);
}
if (bitset_contains_irn(seen, m))
continue;
- if (arch_get_register_req_out(m)->type & arch_register_req_type_ignore)
+ if (arch_get_irn_register_req(m)->type & arch_register_req_type_ignore)
continue;
bitset_add_irn(seen, m);
if (bitset_contains_irn(seen, n))
continue;
- if (arch_get_register_req_out(n)->type & arch_register_req_type_ignore)
+ if (arch_get_irn_register_req(n)->type & arch_register_req_type_ignore)
continue;
++n_comp;
struct obstack *obst;
backend_info_t *info;
- /* Projs need no be info, their tuple holds all information */
+ /* Projs need no be info, all info is fetched from their predecessor */
if (is_Proj(node))
return;
* backend graphs
*/
switch (get_irn_opcode(node)) {
- case iro_Bad:
case iro_Block:
case iro_Dummy:
- case iro_End:
- case iro_Unknown:
- info->flags |= arch_irn_flags_not_scheduled;
- break;
case iro_NoMem:
case iro_Anchor:
case iro_Pin:
case iro_Sync:
+ case iro_Bad:
+ case iro_End:
+ case iro_Unknown:
info->flags |= arch_irn_flags_not_scheduled;
info->out_infos = NEW_ARR_D(reg_out_info_t, obst, 1);
memset(info->out_infos, 0, 1 * sizeof(info->out_infos[0]));
be_operand_t o;
/* found a register use, create an operand */
- o.req = arch_get_register_req(mach_op, i);
+ o.req = arch_get_irn_register_req_in(mach_op, i);
o.carrier = op;
o.irn = insn->irn;
o.pos = i;
if (arch_irn_consider_in_reg_alloc(env->cls, p)) {
/* found a def: create a new operand */
- o.req = arch_get_register_req_out(p);
+ o.req = arch_get_irn_register_req(p);
o.carrier = p;
o.irn = irn;
o.pos = -(get_Proj_proj(p) + 1);
}
} else if (arch_irn_consider_in_reg_alloc(env->cls, irn)) {
/* only one def, create one operand */
- o.req = arch_get_register_req_out(irn);
+ o.req = arch_get_irn_register_req(irn);
o.carrier = irn;
o.irn = irn;
o.pos = -1;
add_machine_operands(env, insn, op);
} else if (arch_irn_consider_in_reg_alloc(env->cls, op)) {
/* found a register use, create an operand */
- o.req = arch_get_register_req(irn, i);
+ o.req = arch_get_irn_register_req_in(irn, i);
o.carrier = op;
o.irn = irn;
o.pos = i;
sched_foreach(block, node) {
if (! is_Jmp(node)
- && !(arch_irn_get_flags(node) & arch_irn_flags_simple_jump))
+ && !(arch_get_irn_flags(node) & arch_irn_flags_simple_jump))
goto check_preds;
if (jump != NULL) {
/* we should never have 2 jumps in a block */
static void node_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
{
if (is_Proj(irn)
- || (arch_irn_get_flags(irn) & arch_irn_flags_not_scheduled)) {
+ || (arch_get_irn_flags(irn) & arch_irn_flags_not_scheduled)) {
selected(env, irn);
DB((dbg, LEVEL_3, "\tmaking immediately available: %+F\n", irn));
} else if (be_is_Keep(irn) || be_is_CopyKeep(irn)) {
*/
static void add_to_sched(block_sched_env_t *env, ir_node *irn)
{
- assert(! (arch_irn_get_flags(irn) & arch_irn_flags_not_scheduled));
+ assert(! (arch_get_irn_flags(irn) & arch_irn_flags_not_scheduled));
sched_add_before(env->block, irn);
op_set = &env->op_set;
block = get_nodes_block(irn);
- cls = arch_get_irn_reg_class_out(other_different);
+ cls = arch_get_irn_reg_class(other_different);
/* Make a not spillable copy of the different node */
/* this is needed because the different irn could be */
cpy = find_copy(skip_Proj(irn), other_different);
if (! cpy) {
cpy = be_new_Copy(cls, block, other_different);
- arch_irn_set_flags(cpy, arch_irn_flags_dont_spill);
+ arch_set_irn_flags(cpy, arch_irn_flags_dont_spill);
DB((dbg_constr, LEVEL_1, "created non-spillable %+F for value %+F\n", cpy, other_different));
} else {
DB((dbg_constr, LEVEL_1, "using already existing %+F for value %+F\n", cpy, other_different));
*/
static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, constraint_env_t *env)
{
- const arch_register_req_t *req = arch_get_register_req_out(irn);
+ const arch_register_req_t *req = arch_get_irn_register_req(irn);
if (arch_register_req_is(req, must_be_different)) {
const unsigned other = req->other_different;
/* get some Proj and find out the register class of that Proj. */
const ir_edge_t *edge = get_irn_out_edge_first_kind(perm, EDGE_KIND_NORMAL);
ir_node *one_proj = get_edge_src_irn(edge);
- const arch_register_class_t *cls = arch_get_irn_reg_class_out(one_proj);
+ const arch_register_class_t *cls = arch_get_irn_reg_class(one_proj);
assert(is_Proj(one_proj));
DB((dbg_permmove, LEVEL_1, "perm move %+F irg %+F\n", perm, irg));
break;
if (arch_irn_is(node, modify_flags))
break;
- req = arch_get_register_req_out(node);
+ req = arch_get_irn_register_req(node);
if (req->type != arch_register_req_type_normal)
break;
for (i = get_irn_arity(node) - 1; i >= 0; --i) {
*/
be_set_constr_in(res, n_be_Spill_frame, arch_no_register_req);
- arch_set_out_register_req(res, 0, arch_no_register_req);
+ arch_set_irn_register_req_out(res, 0, arch_no_register_req);
return res;
}
be_node_set_reg_class_out(res, 0, cls);
be_node_set_reg_class_in(res, n_be_Reload_frame, cls_frame);
- arch_irn_set_flags(res, arch_irn_flags_rematerializable);
+ arch_set_irn_flags(res, arch_irn_flags_rematerializable);
a = (be_frame_attr_t*) get_irn_generic_attr(res);
a->ent = NULL;
/* Set output constraint to stack register. */
be_set_constr_single_reg_in(irn, n_be_AddSP_old_sp, sp,
arch_register_req_type_none);
- be_node_set_reg_class_in(irn, n_be_AddSP_size, arch_register_get_class(sp));
+ be_node_set_reg_class_in(irn, n_be_AddSP_size, sp->reg_class);
be_set_constr_single_reg_out(irn, pn_be_AddSP_sp, sp,
arch_register_req_type_produces_sp);
/* Set output constraint to stack register. */
be_set_constr_single_reg_in(irn, n_be_SubSP_old_sp, sp,
arch_register_req_type_none);
- be_node_set_reg_class_in(irn, n_be_SubSP_size, arch_register_get_class(sp));
+ be_node_set_reg_class_in(irn, n_be_SubSP_size, sp->reg_class);
be_set_constr_single_reg_out(irn, pn_be_SubSP_sp, sp, arch_register_req_type_produces_sp);
return irn;
req = be_create_reg_req(obst, reg, additional_types);
}
- arch_irn_set_register(node, pos, reg);
+ arch_set_irn_register_out(node, pos, reg);
be_set_constr_out(node, pos, req);
}
{
ir_graph *irg = get_Block_irg(block);
ir_node *frame = get_irg_frame(irg);
- const arch_register_class_t *cls = arch_get_irn_reg_class_out(irn);
- const arch_register_class_t *cls_frame = arch_get_irn_reg_class_out(frame);
+ const arch_register_class_t *cls = arch_get_irn_reg_class(irn);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame);
ir_node *spill;
spill = be_new_Spill(cls, cls_frame, block, frame, irn);
ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
ir_graph *irg = get_Block_irg(bl);
ir_node *frame = get_irg_frame(irg);
- const arch_register_class_t *cls_frame = arch_get_irn_reg_class_out(frame);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame);
assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
static int get_start_reg_index(ir_graph *irg, const arch_register_t *reg)
{
ir_node *start = get_irg_start(irg);
- unsigned n_outs = arch_irn_get_n_outs(start);
+ unsigned n_outs = arch_get_irn_n_outs(start);
int i;
/* do a naive linear search... */
for (i = 0; i < (int)n_outs; ++i) {
const arch_register_req_t *out_req
- = arch_get_out_register_req(start, i);
+ = arch_get_irn_register_req_out(start, i);
if (! (out_req->type & arch_register_req_type_limited))
continue;
if (out_req->cls != arch_register_get_class(reg))
int i;
/* do a naive linear search... */
for (i = 0; i < arity; ++i) {
- const arch_register_req_t *req = arch_get_in_register_req(ret, i);
+ const arch_register_req_t *req = arch_get_irn_register_req_in(ret, i);
if (! (req->type & arch_register_req_type_limited))
continue;
if (req->cls != arch_register_get_class(reg))
/* set costs depending on register constrains */
unsigned idx;
for (idx = 0; idx < colors_n; idx++) {
- if (!bitset_is_set(allocatable_regs, idx) || !arch_reg_out_is_allocatable(irn, arch_register_for_index(cls, idx))) {
+ const arch_register_req_t *req = arch_get_irn_register_req(irn);
+ const arch_register_t *reg = arch_register_for_index(cls, idx);
+ if (!bitset_is_set(allocatable_regs, idx)
+ || !arch_reg_is_allocatable(req, reg)) {
/* constrained */
vector_set(costs_vector, idx, INF_COSTS);
cntConstrains++;
{
be_pbqp_alloc_env_t *pbqp_alloc_env = (be_pbqp_alloc_env_t*)env;
const arch_register_class_t *cls = pbqp_alloc_env->cls;
- const arch_register_req_t *req = arch_get_register_req_out(irn);
+ const arch_register_req_t *req = arch_get_irn_register_req(irn);
unsigned pos;
unsigned max;
static void check_defs(const ir_nodeset_t *live_nodes, float weight,
ir_node *node)
{
- const arch_register_req_t *req = arch_get_register_req_out(node);
+ const arch_register_req_t *req = arch_get_irn_register_req(node);
if (req->type & arch_register_req_type_limited) {
const unsigned *limited = req->limited;
float penalty = weight * DEF_FACTOR;
info = get_allocation_info(node);
for (i = 0; i < arity; ++i) {
ir_node *op = get_irn_n(node, i);
- const arch_register_req_t *req = arch_get_register_req_out(op);
+ const arch_register_req_t *req = arch_get_irn_register_req(op);
if (req->cls != cls)
continue;
if (!arch_irn_consider_in_reg_alloc(cls, op))
continue;
- req = arch_get_register_req(node, i);
+ req = arch_get_irn_register_req_in(node, i);
if (!(req->type & arch_register_req_type_limited))
continue;
static void congruence_def(ir_nodeset_t *live_nodes, const ir_node *node)
{
- const arch_register_req_t *req = arch_get_register_req_out(node);
+ const arch_register_req_t *req = arch_get_irn_register_req(node);
/* should be same constraint? */
if (req->type & arch_register_req_type_should_be_same) {
* (so we don't split away the values produced because of
* must_be_different constraints) */
original_insn = skip_Proj(info->original_value);
- if (arch_irn_get_flags(original_insn) & arch_irn_flags_dont_spill)
+ if (arch_get_irn_flags(original_insn) & arch_irn_flags_dont_spill)
return false;
from_reg = arch_get_irn_register(to_split);
return;
}
- req = arch_get_register_req_out(node);
+ req = arch_get_irn_register_req(node);
/* ignore reqs must be preassigned */
assert (! (req->type & arch_register_req_type_ignore));
if (!arch_irn_consider_in_reg_alloc(cls, op))
continue;
- req = arch_get_register_req(node, i);
+ req = arch_get_irn_register_req_in(node, i);
if (!(req->type & arch_register_req_type_limited))
continue;
continue;
/* are there any limitations for the i'th operand? */
- req = arch_get_register_req(node, i);
+ req = arch_get_irn_register_req_in(node, i);
if (req->width > 1)
double_width = true;
if (!(req->type & arch_register_req_type_limited))
if (!arch_irn_consider_in_reg_alloc(cls, op))
continue;
- req = arch_get_register_req(node, i);
+ req = arch_get_irn_register_req_in(node, i);
if (!(req->type & arch_register_req_type_limited))
continue;
int p;
node = be_lv_get_irn(lv, block, i);
- req = arch_get_register_req_out(node);
+ req = arch_get_irn_register_req(node);
if (req->cls != cls)
continue;
if (mode == mode_T)
return 1;
- if (arch_get_register_req_out(irn)->type & arch_register_req_type_ignore)
+ if (arch_get_irn_register_req(irn)->type & arch_register_req_type_ignore)
return 0;
return 1;
mode = get_irn_mode(op);
if (mode == mode_M)
continue;
+ if (arch_get_irn_flags(op) & arch_irn_flags_not_scheduled)
+ continue;
if (mode != mode_T && arch_irn_is_ignore(op))
continue;
cost = MAX(fc->costs[i].cost + n_op_res, cost);
sched_foreach(bl, irn) {
int i, n;
if (is_Proj(irn)
- || (arch_irn_get_flags(irn) & arch_irn_flags_not_scheduled))
+ || (arch_get_irn_flags(irn) & arch_irn_flags_not_scheduled))
continue;
for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
if (is_Proj(op)
- || (arch_irn_get_flags(op) & arch_irn_flags_not_scheduled))
+ || (arch_get_irn_flags(op) & arch_irn_flags_not_scheduled))
continue;
sum += compute_max_hops(env, op);
unsigned *tmp = NULL;
unsigned *def_constr = NULL;
int arity = get_irn_arity(node);
+ ir_node *def;
int i, i2;
*/
for (i = 0; i < arity; ++i) {
ir_node *op = get_irn_n(node, i);
- const arch_register_req_t *req = arch_get_register_req(node, i);
+ const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
const arch_register_t *reg;
ir_node *copy;
ir_node *copy;
const arch_register_req_t *req;
- req = arch_get_register_req(node, i);
+ req = arch_get_irn_register_req_in(node, i);
if (req->cls != cls)
continue;
ir_node *in2;
const arch_register_req_t *req2;
- req2 = arch_get_register_req(node, i2);
+ req2 = arch_get_irn_register_req_in(node, i2);
if (req2->cls != cls)
continue;
if (! (req2->type & arch_register_req_type_limited))
}
/* collect all registers occurring in out constraints. */
- if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge;
-
- foreach_out_edge(node, edge) {
- ir_node *proj = get_edge_src_irn(edge);
- const arch_register_req_t *req = arch_get_register_req_out(proj);
- if (! (req->type & arch_register_req_type_limited))
- continue;
-
- if (def_constr == NULL) {
- rbitset_alloca(def_constr, cls->n_regs);
- }
- rbitset_or(def_constr, req->limited, cls->n_regs);
- }
- } else {
- const arch_register_req_t *req = arch_get_register_req_out(node);
- if (req->type & arch_register_req_type_limited) {
+ be_foreach_definition(node, cls, def,
+ if (! (req_->type & arch_register_req_type_limited))
+ continue;
+ if (def_constr == NULL) {
rbitset_alloca(def_constr, cls->n_regs);
- rbitset_or(def_constr, req->limited, cls->n_regs);
}
- }
+ rbitset_or(def_constr, req_->limited, cls->n_regs);
+ );
/* no output constraints => we're good */
if (def_constr == NULL) {
* 2) lives through the node.
* 3) is constrained to a register occurring in out constraints.
*/
- req = arch_get_register_req(node, i);
+ req = arch_get_irn_register_req_in(node, i);
if (req->cls != cls)
continue;
if (!(req->type & arch_register_req_type_limited))
return USES_INFINITY;
/* We have to keep nonspillable nodes in the workingset */
- if (arch_irn_get_flags(skip_Proj_const(def)) & arch_irn_flags_dont_spill)
+ if (arch_get_irn_flags(skip_Proj_const(def)) & arch_irn_flags_dont_spill)
return 0;
/* give some bonus to rematerialisable nodes */
}
/* We have to keep nonspillable nodes in the workingset */
- if (arch_irn_get_flags(skip_Proj_const(node)) & arch_irn_flags_dont_spill) {
+ if (arch_get_irn_flags(skip_Proj_const(node)) & arch_irn_flags_dont_spill) {
loc.time = 0;
DB((dbg, DBG_START, " %+F taken (dontspill node)\n", node, loc.time));
return loc;
static unsigned get_value_width(const ir_node *node)
{
- const arch_register_req_t *req = arch_get_register_req_out(node);
+ const arch_register_req_t *req = arch_get_irn_register_req(node);
return req->width;
}
if (env->mode == NULL) {
env->mode = get_irn_mode(copy);
- env->phi_cls = arch_get_irn_reg_class_out(copy);
+ env->phi_cls = arch_get_irn_reg_class(copy);
} else {
assert(env->mode == get_irn_mode(copy));
}
if (env->mode == NULL) {
env->mode = get_irn_mode(copies[0]);
- env->phi_cls = arch_get_irn_reg_class_out(copies[0]);
+ env->phi_cls = arch_get_irn_reg_class(copies[0]);
}
for (i = 0; i < copies_len; ++i) {
/* iterate over all args of phi */
for (i = 0, max = get_irn_arity(phi); i < max; ++i) {
ir_node *arg = get_irn_n(phi, i);
- const arch_register_req_t *req = arch_get_register_req_out(arg);
+ const arch_register_req_t *req = arch_get_irn_register_req(arg);
if (req->type & arch_register_req_type_ignore)
continue;
insn = get_Proj_pred(node);
}
- if (arch_irn_get_n_outs(insn) == 0)
+ if (arch_get_irn_n_outs(insn) == 0)
return;
if (get_irn_mode(node) == mode_T)
return;
- req = arch_get_register_req_out(node);
+ req = arch_get_irn_register_req(node);
if (! (req->type & arch_register_req_type_produces_sp))
return;
static void check_schedule(ir_node *node, void *data)
{
be_verify_schedule_env_t *env = (be_verify_schedule_env_t*)data;
- bool should_be = !is_Proj(node) && !(arch_irn_get_flags(node) & arch_irn_flags_not_scheduled);
+ bool should_be = !is_Proj(node) && !(arch_get_irn_flags(node) & arch_irn_flags_not_scheduled);
bool scheduled = bitset_is_set(env->scheduled, get_irn_idx(node));
if (should_be != scheduled) {
static void check_output_constraints(ir_node *node)
{
/* verify output register */
- if (arch_get_irn_reg_class_out(node) == regclass) {
+ if (arch_get_irn_reg_class(node) == regclass) {
+ const arch_register_req_t *req = arch_get_irn_register_req(node);
const arch_register_t *reg = arch_get_irn_register(node);
if (reg == NULL) {
ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have a register assigned\n",
node, get_nodes_block(node), get_irg_dump_name(irg));
problem_found = 1;
- } else if (!(reg->type & arch_register_type_joker) && !arch_reg_out_is_allocatable(node, reg)) {
+ } else if (!arch_reg_is_allocatable(req, reg)) {
ir_fprintf(stderr, "Verify warning: Register %s assigned as output of %+F not allowed (register constraint) in block %+F(%s)\n",
reg->name, node, get_nodes_block(node), get_irg_dump_name(irg));
problem_found = 1;
/* verify input register */
arity = get_irn_arity(node);
for (i = 0; i < arity; ++i) {
- const arch_register_req_t *req = arch_get_in_register_req(node, i);
+ const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
ir_node *pred = get_irn_n(node, i);
- const arch_register_req_t *pred_req = arch_get_register_req_out(pred);
+ const arch_register_req_t *pred_req = arch_get_irn_register_req(pred);
if (is_Bad(pred)) {
ir_fprintf(stderr, "Verify warning: %+F in block %+F(%s) has Bad as input %d\n",
pred, get_nodes_block(pred), get_irg_dump_name(irg), node);
problem_found = 1;
continue;
- } else if (!(reg->type & arch_register_type_joker) && ! arch_reg_is_allocatable(node, i, reg)) {
+ } else if (!arch_reg_is_allocatable(req, reg)) {
ir_fprintf(stderr, "Verify warning: Register %s as input %d of %+F not allowed (register constraint) in block %+F(%s)\n",
reg->name, i, node, get_nodes_block(node), get_irg_dump_name(irg));
problem_found = 1;
const arch_register_t *reg;
ir_node *reg_node;
- if (arch_get_irn_reg_class_out(node) != regclass)
+ if (arch_get_irn_reg_class(node) != regclass)
return;
reg = arch_get_irn_register(node);
const arch_register_t *reg;
ir_node *reg_node;
- if (arch_get_irn_reg_class_out(node) != regclass)
+ if (arch_get_irn_reg_class(node) != regclass)
return;
reg = arch_get_irn_register(node);
static ir_node *ia32_get_admissible_noreg(ir_node *irn, int pos)
{
ir_graph *irg = get_irn_irg(irn);
- const arch_register_req_t *req = arch_get_register_req(irn, pos);
+ const arch_register_req_t *req = arch_get_irn_register_req_in(irn, pos);
assert(req != NULL && "Missing register requirements");
if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
/* we can't swap left/right for limited registers
* (As this (currently) breaks constraint handling copies)
*/
- req = arch_get_in_register_req(irn, n_ia32_binary_left);
+ req = arch_get_irn_register_req_in(irn, n_ia32_binary_left);
if (req->type & arch_register_req_type_limited)
return 0;
break;
for (i = 0; i < out_arity; ++i) {
info->out_infos[i].req = out_reg_reqs[i];
}
- arch_set_in_register_reqs(new_node, in_reg_reqs);
+ arch_set_irn_register_reqs_in(new_node, in_reg_reqs);
SET_IA32_ORIG_NODE(new_node, node);
return need_label;
}
-/**
- * Returns the register at in position pos.
- */
-static const arch_register_t *get_in_reg(const ir_node *irn, int pos)
-{
- ir_node *op = get_irn_n(irn, pos);
- return arch_get_irn_register(op);
-}
-
/**
* Add a number to a prefix. This number will not be used a second time.
*/
void ia32_emit_source_register(const ir_node *node, int pos)
{
- const arch_register_t *reg = get_in_reg(node, pos);
+ const arch_register_t *reg = arch_get_irn_register_in(node, pos);
emit_register(reg, NULL);
}
return;
}
- reg = get_in_reg(node, pos);
+ reg = arch_get_irn_register_in(node, pos);
emit_8bit_register(reg);
}
void ia32_emit_8bit_high_source_register(const ir_node *node, int pos)
{
- const arch_register_t *reg = get_in_reg(node, pos);
+ const arch_register_t *reg = arch_get_irn_register_in(node, pos);
emit_8bit_register_high(reg);
}
return;
}
- reg = get_in_reg(node, pos);
+ reg = arch_get_irn_register_in(node, pos);
emit_16bit_register(reg);
}
void ia32_emit_dest_register(const ir_node *node, int pos)
{
- const arch_register_t *reg = arch_irn_get_register(node, pos);
+ const arch_register_t *reg = arch_get_irn_register_out(node, pos);
emit_register(reg, NULL);
}
void ia32_emit_dest_register_size(const ir_node *node, int pos)
{
- const arch_register_t *reg = arch_irn_get_register(node, pos);
+ const arch_register_t *reg = arch_get_irn_register_out(node, pos);
emit_register(reg, get_ia32_ls_mode(node));
}
void ia32_emit_8bit_dest_register(const ir_node *node, int pos)
{
- const arch_register_t *reg = arch_irn_get_register(node, pos);
+ const arch_register_t *reg = arch_get_irn_register_out(node, pos);
emit_register(reg, mode_Bu);
}
emit_ia32_Immediate(in);
} else {
const ir_mode *mode = get_ia32_ls_mode(node);
- const arch_register_t *reg = get_in_reg(node, pos);
+ const arch_register_t *reg = arch_get_irn_register_in(node, pos);
emit_register(reg, mode);
}
}
/* emit base */
if (has_base) {
- const arch_register_t *reg = get_in_reg(node, n_ia32_base);
+ const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_base);
emit_register(reg, NULL);
}
/* emit index + scale */
if (has_index) {
- const arch_register_t *reg = get_in_reg(node, n_ia32_index);
+ const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_index);
int scale;
be_emit_char(',');
emit_register(reg, NULL);
case 'D':
if (*fmt < '0' || '9' <= *fmt)
goto unknown;
- reg = arch_irn_get_register(node, *fmt++ - '0');
+ reg = arch_get_irn_register_out(node, *fmt++ - '0');
goto emit_R;
case 'I':
if (is_ia32_Immediate(imm)) {
goto emit_I;
} else {
- reg = get_in_reg(node, pos);
+ reg = arch_get_irn_register_in(node, pos);
goto emit_R;
}
}
static void emit_ia32_IMul(const ir_node *node)
{
ir_node *left = get_irn_n(node, n_ia32_IMul_left);
- const arch_register_t *out_reg = arch_irn_get_register(node, pn_ia32_IMul_res);
+ const arch_register_t *out_reg = arch_get_irn_register_out(node, pn_ia32_IMul_res);
/* do we need the 3-address form? */
if (is_ia32_NoReg_GP(left) ||
- get_in_reg(node, n_ia32_IMul_left) != out_reg) {
+ arch_get_irn_register_in(node, n_ia32_IMul_left) != out_reg) {
ia32_emitf(node, "\timul%M %#S4, %#AS3, %#D0\n");
} else {
ia32_emitf(node, "\timul%M %#AS4, %#S3\n");
*/
static void emit_ia32_Setcc(const ir_node *node)
{
- const arch_register_t *dreg = arch_irn_get_register(node, pn_ia32_Setcc_res);
+ const arch_register_t *dreg = arch_get_irn_register_out(node, pn_ia32_Setcc_res);
ia32_condition_code_t cc = get_ia32_condcode(node);
cc = determine_final_cc(node, n_ia32_Setcc_eflags, cc);
static void emit_ia32_CMovcc(const ir_node *node)
{
const ia32_attr_t *attr = get_ia32_attr_const(node);
- const arch_register_t *out = arch_irn_get_register(node, pn_ia32_res);
+ const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_res);
ia32_condition_code_t cc = get_ia32_condcode(node);
const arch_register_t *in_true;
const arch_register_t *in_false;
/* get register */
if (asm_reg->use_input == 0) {
- reg = arch_irn_get_register(node, asm_reg->inout_pos);
+ reg = arch_get_irn_register_out(node, asm_reg->inout_pos);
} else {
ir_node *pred = get_irn_n(node, asm_reg->inout_pos);
emit_ia32_Immediate(pred);
return s;
}
- reg = get_in_reg(node, asm_reg->inout_pos);
+ reg = arch_get_irn_register_in(node, asm_reg->inout_pos);
}
if (reg == NULL) {
ir_fprintf(stderr,
static void emit_ia32_Minus64Bit(const ir_node *node)
{
- const arch_register_t *in_lo = get_in_reg(node, 0);
- const arch_register_t *in_hi = get_in_reg(node, 1);
- const arch_register_t *out_lo = arch_irn_get_register(node, 0);
- const arch_register_t *out_hi = arch_irn_get_register(node, 1);
+ const arch_register_t *in_lo = arch_get_irn_register_in(node, 0);
+ const arch_register_t *in_hi = arch_get_irn_register_in(node, 1);
+ const arch_register_t *out_lo = arch_get_irn_register_out(node, 0);
+ const arch_register_t *out_hi = arch_get_irn_register_out(node, 1);
if (out_lo == in_lo) {
if (out_hi != in_hi) {
if (get_ia32_op_type(node) == ia32_AddrModeS) {
bemit_mod_am(ruval, node);
} else {
- const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
+ const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
bemit_modru(reg, ruval);
}
bemit8((unsigned char)attr->offset);
bemit8(opcode);
bemit_mod_am(ruval, node);
} else {
- const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
+ const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
if (reg->index == REG_GP_EAX) {
bemit8(opcode_ax);
} else {
*/
static void bemit_binop_2(const ir_node *node, unsigned code)
{
- const arch_register_t *out = get_in_reg(node, n_ia32_binary_left);
+ const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_binary_left);
bemit8(code);
if (get_ia32_op_type(node) == ia32_Normal) {
- const arch_register_t *op2 = get_in_reg(node, n_ia32_binary_right);
+ const arch_register_t *op2 = arch_get_irn_register_in(node, n_ia32_binary_right);
bemit_modrr(op2, out);
} else {
bemit_mod_am(reg_gp_map[out->index], node);
{
bemit8(code);
if (get_ia32_op_type(node) == ia32_Normal) {
- const arch_register_t *in = get_in_reg(node, input);
+ const arch_register_t *in = arch_get_irn_register_in(node, input);
bemit_modru(in, ext);
} else {
bemit_mod_am(ext, node);
static void bemit_unop_reg(const ir_node *node, unsigned char code, int input)
{
- const arch_register_t *out = arch_irn_get_register(node, 0);
+ const arch_register_t *out = arch_get_irn_register_out(node, 0);
bemit_unop(node, code, reg_gp_map[out->index], input);
}
static void bemit_copy(const ir_node *copy)
{
- const arch_register_t *in = get_in_reg(copy, 0);
- const arch_register_t *out = arch_irn_get_register(copy, 0);
+ const arch_register_t *in = arch_get_irn_register_in(copy, 0);
+ const arch_register_t *out = arch_get_irn_register_out(copy, 0);
if (in == out)
return;
static void bemit_xor0(const ir_node *node)
{
- const arch_register_t *out = arch_irn_get_register(node, 0);
+ const arch_register_t *out = arch_get_irn_register_out(node, 0);
bemit8(0x31);
bemit_modrr(out, out);
}
static void bemit_mov_const(const ir_node *node)
{
- const arch_register_t *out = arch_irn_get_register(node, 0);
+ const arch_register_t *out = arch_get_irn_register_out(node, 0);
bemit8(0xB8 + reg_gp_map[out->index]);
bemit_immediate(node, false);
}
} \
} else { \
bemit8(ext << 3 | 1); \
- bemit_mod_am(reg_gp_map[arch_irn_get_register(val, 0)->index], node); \
+ bemit_mod_am(reg_gp_map[arch_get_irn_register_out(val, 0)->index], node); \
} \
} \
\
bemit8(get_ia32_immediate_attr_const(val)->offset); \
} else { \
bemit8(ext << 3); \
- bemit_mod_am(reg_gp_map[arch_irn_get_register(val, 0)->index], node); \
+ bemit_mod_am(reg_gp_map[arch_get_irn_register_out(val, 0)->index], node); \
} \
}
#define SHIFT(op, ext) \
static void bemit_##op(const ir_node *node) \
{ \
- const arch_register_t *out = arch_irn_get_register(node, 0); \
+ const arch_register_t *out = arch_get_irn_register_out(node, 0); \
ir_node *count = get_irn_n(node, 1); \
if (is_ia32_Immediate(count)) { \
int offset = get_ia32_immediate_attr_const(count)->offset; \
static void bemit_shld(const ir_node *node)
{
- const arch_register_t *in = get_in_reg(node, n_ia32_ShlD_val_low);
- const arch_register_t *out = arch_irn_get_register(node, pn_ia32_ShlD_res);
+ const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_ShlD_val_low);
+ const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_ShlD_res);
ir_node *count = get_irn_n(node, n_ia32_ShlD_count);
bemit8(0x0F);
if (is_ia32_Immediate(count)) {
static void bemit_shrd(const ir_node *node)
{
- const arch_register_t *in = get_in_reg(node, n_ia32_ShrD_val_low);
- const arch_register_t *out = arch_irn_get_register(node, pn_ia32_ShrD_res);
+ const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_ShrD_val_low);
+ const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_ShrD_res);
ir_node *count = get_irn_n(node, n_ia32_ShrD_count);
bemit8(0x0F);
if (is_ia32_Immediate(count)) {
*/
static void bemit_setcc(const ir_node *node)
{
- const arch_register_t *dreg = arch_irn_get_register(node, pn_ia32_Setcc_res);
+ const arch_register_t *dreg = arch_get_irn_register_out(node, pn_ia32_Setcc_res);
ia32_condition_code_t cc = get_ia32_condcode(node);
cc = determine_final_cc(node, n_ia32_Setcc_eflags, cc);
{
const ia32_attr_t *attr = get_ia32_attr_const(node);
int ins_permuted = attr->data.ins_permuted;
- const arch_register_t *out = arch_irn_get_register(node, pn_ia32_res);
+ const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_res);
ia32_condition_code_t cc = get_ia32_condcode(node);
const arch_register_t *in_true;
const arch_register_t *in_false;
if (get_ia32_op_type(node) == ia32_AddrModeS) {
bemit_mod_am(7, node);
} else {
- const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
+ const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
bemit_modru(reg, 7);
}
bemit8((unsigned char)attr->offset);
bemit8(0x81);
bemit_mod_am(7, node);
} else {
- const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
+ const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_binary_left);
if (reg->index == REG_GP_EAX) {
bemit8(0x3D);
} else {
}
panic("invalid imm size?!?");
} else {
- const arch_register_t *out = get_in_reg(node, n_ia32_binary_left);
+ const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_binary_left);
bemit8(0x3B);
if (get_ia32_op_type(node) == ia32_Normal) {
- const arch_register_t *op2 = get_in_reg(node, n_ia32_binary_right);
+ const arch_register_t *op2 = arch_get_irn_register_in(node, n_ia32_binary_right);
bemit_modrr(op2, out);
} else {
bemit_mod_am(reg_gp_map[out->index], node);
ir_node *right = get_irn_n(node, n_ia32_binary_right);
if (is_ia32_Immediate(right)) {
if (get_ia32_op_type(node) == ia32_Normal) {
- const arch_register_t *out = get_in_reg(node, n_ia32_Cmp_left);
+ const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Cmp_left);
if (out->index == REG_GP_EAX) {
bemit8(0x3C);
} else {
}
bemit8(get_ia32_immediate_attr_const(right)->offset);
} else {
- const arch_register_t *out = get_in_reg(node, n_ia32_Cmp_left);
+ const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Cmp_left);
bemit8(0x3A);
if (get_ia32_op_type(node) == ia32_Normal) {
- const arch_register_t *in = get_in_reg(node, n_ia32_Cmp_right);
+ const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_Cmp_right);
bemit_modrr(out, in);
} else {
bemit_mod_am(reg_gp_map[out->index], node);
ir_node *right = get_irn_n(node, n_ia32_Test8Bit_right);
if (is_ia32_Immediate(right)) {
if (get_ia32_op_type(node) == ia32_Normal) {
- const arch_register_t *out = get_in_reg(node, n_ia32_Test8Bit_left);
+ const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Test8Bit_left);
if (out->index == REG_GP_EAX) {
bemit8(0xA8);
} else {
}
bemit8(get_ia32_immediate_attr_const(right)->offset);
} else {
- const arch_register_t *out = get_in_reg(node, n_ia32_Test8Bit_left);
+ const arch_register_t *out = arch_get_irn_register_in(node, n_ia32_Test8Bit_left);
bemit8(0x84);
if (get_ia32_op_type(node) == ia32_Normal) {
- const arch_register_t *in = get_in_reg(node, n_ia32_Test8Bit_right);
+ const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_Test8Bit_right);
bemit_modrr(out, in);
} else {
bemit_mod_am(reg_gp_map[out->index], node);
static void bemit_dec(const ir_node *node)
{
- const arch_register_t *out = arch_irn_get_register(node, pn_ia32_Dec_res);
+ const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_Dec_res);
bemit8(0x48 + reg_gp_map[out->index]);
}
static void bemit_inc(const ir_node *node)
{
- const arch_register_t *out = arch_irn_get_register(node, pn_ia32_Inc_res);
+ const arch_register_t *out = arch_get_irn_register_out(node, pn_ia32_Inc_res);
bemit8(0x40 + reg_gp_map[out->index]);
}
static void bemit_ldtls(const ir_node *node)
{
- const arch_register_t *out = arch_irn_get_register(node, 0);
+ const arch_register_t *out = arch_get_irn_register_out(node, 0);
bemit8(0x65); // gs:
if (out->index == REG_GP_EAX) {
*/
static void bemit_lea(const ir_node *node)
{
- const arch_register_t *out = arch_irn_get_register(node, 0);
+ const arch_register_t *out = arch_get_irn_register_out(node, 0);
bemit8(0x8D);
bemit_mod_am(reg_gp_map[out->index], node);
}
static void bemit_minus64bit(const ir_node *node)
{
- const arch_register_t *in_lo = get_in_reg(node, 0);
- const arch_register_t *in_hi = get_in_reg(node, 1);
- const arch_register_t *out_lo = arch_irn_get_register(node, 0);
- const arch_register_t *out_hi = arch_irn_get_register(node, 1);
+ const arch_register_t *in_lo = arch_get_irn_register_in(node, 0);
+ const arch_register_t *in_hi = arch_get_irn_register_in(node, 1);
+ const arch_register_t *out_lo = arch_get_irn_register_out(node, 0);
+ const arch_register_t *out_hi = arch_get_irn_register_out(node, 1);
if (out_lo == in_lo) {
if (out_hi != in_hi) {
*/
static void bemit_load(const ir_node *node)
{
- const arch_register_t *out = arch_irn_get_register(node, 0);
+ const arch_register_t *out = arch_get_irn_register_out(node, 0);
if (out->index == REG_GP_EAX) {
ir_node *base = get_irn_n(node, n_ia32_base);
bemit_immediate(value, false);
}
} else {
- const arch_register_t *in = get_in_reg(node, n_ia32_Store_val);
+ const arch_register_t *in = arch_get_irn_register_in(node, n_ia32_Store_val);
if (in->index == REG_GP_EAX) {
ir_node *base = get_irn_n(node, n_ia32_base);
bemit8(0xFF);
bemit_mod_am(6, node);
} else {
- const arch_register_t *reg = get_in_reg(node, n_ia32_Push_val);
+ const arch_register_t *reg = arch_get_irn_register_in(node, n_ia32_Push_val);
bemit8(0x50 + reg_gp_map[reg->index]);
}
}
*/
static void bemit_pop(const ir_node *node)
{
- const arch_register_t *reg = arch_irn_get_register(node, pn_ia32_Pop_res);
+ const arch_register_t *reg = arch_get_irn_register_out(node, pn_ia32_Pop_res);
bemit8(0x58 + reg_gp_map[reg->index]);
}
bemit_sub(node);
/* mov %esp, %out */
bemit8(0x8B);
- out = arch_irn_get_register(node, 1);
+ out = arch_get_irn_register_out(node, 1);
bemit8(MOD_REG | ENC_REG(reg_gp_map[out->index]) | ENC_RM(0x04));
}
size = get_signed_imm_size(offs);
bemit8(size == 1 ? 0x83 : 0x81);
- reg = arch_irn_get_register(node, 0);
+ reg = arch_get_irn_register_out(node, 0);
bemit_modru(reg, ext);
if (size == 1) {
in2 = get_irn_n(irn, n_ia32_binary_right);
in1_reg = arch_get_irn_register(in1);
in2_reg = arch_get_irn_register(in2);
- out_reg = arch_irn_get_register(irn, 0);
+ out_reg = arch_get_irn_register_out(irn, 0);
if (out_reg == in1_reg)
return;
int n_res, i;
ir_node *in_node, *block;
- n_res = arch_irn_get_n_outs(node);
+ n_res = arch_get_irn_n_outs(node);
block = get_nodes_block(node);
/* check all OUT requirements, if there is a should_be_same */
int i2, arity;
int same_pos;
ir_node *uses_out_reg;
- const arch_register_req_t *req = arch_get_out_register_req(node, i);
+ const arch_register_req_t *req = arch_get_irn_register_req_out(node, i);
const arch_register_class_t *cls;
int uses_out_reg_pos;
same_pos = get_first_same(req);
/* get in and out register */
- out_reg = arch_irn_get_register(node, i);
+ out_reg = arch_get_irn_register_out(node, i);
in_node = get_irn_n(node, same_pos);
in_reg = arch_get_irn_register(in_node);
if (get_ia32_am_support(irn) != ia32_am_binary)
return;
- n_res = arch_irn_get_n_outs(irn);
+ n_res = arch_get_irn_n_outs(irn);
for (i = 0; i < n_res; i++) {
- const arch_register_req_t *req = arch_get_out_register_req(irn, i);
+ const arch_register_req_t *req = arch_get_irn_register_req_out(irn, i);
const arch_register_t *out_reg;
int same_pos;
ir_node *same_node;
continue;
/* get in and out register */
- out_reg = arch_irn_get_register(irn, i);
+ out_reg = arch_get_irn_register_out(irn, i);
same_pos = get_first_same(req);
same_node = get_irn_n(irn, same_pos);
same_reg = arch_get_irn_register(same_node);
ia32_attr_t *attr = get_ia32_attr(node);
backend_info_t *info;
- arch_irn_set_flags(node, flags);
- arch_set_in_register_reqs(node, in_reqs);
+ arch_set_irn_flags(node, flags);
+ arch_set_irn_register_reqs_in(node, in_reqs);
attr->exec_units = execution_units;
#ifndef NDEBUG
}
set_ia32_ls_mode(test, get_ia32_ls_mode(node));
- reg = arch_irn_get_register(node, pn_ia32_Cmp_eflags);
- arch_irn_set_register(test, pn_ia32_Test_eflags, reg);
+ reg = arch_get_irn_register_out(node, pn_ia32_Cmp_eflags);
+ arch_set_irn_register_out(test, pn_ia32_Test_eflags, reg);
foreach_out_edge_safe(node, edge, tmp) {
ir_node *const user = get_edge_src_irn(edge);
if (loads[loadslot] != NULL)
break;
- dreg = arch_irn_get_register(node, pn_ia32_Load_res);
+ dreg = arch_get_irn_register_out(node, pn_ia32_Load_res);
if (regmask & (1 << dreg->index)) {
/* this register is already used */
break;
const arch_register_t *reg;
mem = get_irn_n(load, n_ia32_mem);
- reg = arch_irn_get_register(load, pn_ia32_Load_res);
+ reg = arch_get_irn_register_out(load, pn_ia32_Load_res);
pop = new_bd_ia32_Pop(get_irn_dbg_info(load), block, mem, pred_sp);
- arch_irn_set_register(pop, pn_ia32_Load_res, reg);
+ arch_set_irn_register_out(pop, pn_ia32_Load_res, reg);
copy_mark(load, pop);
if (get_mode_size_bits(smaller_mode) != 16 ||
!mode_is_signed(smaller_mode) ||
eax != arch_get_irn_register(val) ||
- eax != arch_irn_get_register(node, pn_ia32_Conv_I2I_res))
+ eax != arch_get_irn_register_out(node, pn_ia32_Conv_I2I_res))
return;
dbgi = get_irn_dbg_info(node);
/* Argh:We must change the opcode to 8bit AND copy the register constraints */
if (get_mode_size_bits(conv_mode) == 8) {
+ const arch_register_req_t **reqs = arch_get_irn_register_reqs_in(node);
set_irn_op(pred, op_ia32_Conv_I2I8Bit);
- arch_set_in_register_reqs(pred,
- arch_get_in_register_reqs(node));
+ arch_set_irn_register_reqs_in(pred, reqs);
}
} else {
/* we don't want to end up with 2 loads, so we better do nothing */
/* Argh:We must change the opcode to 8bit AND copy the register constraints */
if (get_mode_size_bits(conv_mode) == 8) {
+ const arch_register_req_t **reqs = arch_get_irn_register_reqs_in(node);
set_irn_op(result_conv, op_ia32_Conv_I2I8Bit);
- arch_set_in_register_reqs(result_conv,
- arch_get_in_register_reqs(node));
+ arch_set_irn_register_reqs_in(result_conv, reqs);
}
}
} else {
my $res = "";
if(defined($node->{modified_flags})) {
- $res .= "\tarch_irn_add_flags(res, arch_irn_flags_modify_flags);\n";
+ $res .= "\tarch_add_irn_flags(res, arch_irn_flags_modify_flags);\n";
}
if(defined($node->{am})) {
my $am = $node->{am};
# (when we emit the setX; setp; orb and the setX;setnp;andb sequences)
init_attr => "set_ia32_ls_mode(res, mode_Bu);\n"
. "\tif (condition_code & ia32_cc_additional_float_cases) {\n"
- . "\t\tarch_irn_add_flags(res, arch_irn_flags_modify_flags);\n"
+ . "\t\tarch_add_irn_flags(res, arch_irn_flags_modify_flags);\n"
. "\t\t/* attr->latency = 3; */\n"
. "\t}\n",
latency => 1,
mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
- arch_irn_add_flags(load, arch_irn_flags_rematerializable);
+ arch_add_irn_flags(load, arch_irn_flags_rematerializable);
res = new_r_Proj(load, mode_xmm, pn_ia32_xLoad_res);
}
} else {
ls_mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
- arch_irn_add_flags(load, arch_irn_flags_rematerializable);
+ arch_add_irn_flags(load, arch_irn_flags_rematerializable);
res = new_r_Proj(load, mode_vfp, pn_ia32_vfld_res);
}
}
assert((int)pn_ia32_xLoad_res == (int)pn_ia32_vfld_res
&& (int)pn_ia32_vfld_res == (int)pn_ia32_Load_res
&& (int)pn_ia32_Load_res == (int)pn_ia32_res);
- arch_irn_add_flags(new_node, arch_irn_flags_rematerializable);
+ arch_add_irn_flags(new_node, arch_irn_flags_rematerializable);
}
SET_IA32_ORIG_NODE(new_node, node);
ir_node *new_node = gen_binop(node, sp, sz, new_bd_ia32_SubSP,
match_am | match_immediate);
assert(is_ia32_SubSP(new_node));
- arch_irn_set_register(new_node, pn_ia32_SubSP_stack,
- &ia32_registers[REG_ESP]);
+ arch_set_irn_register_out(new_node, pn_ia32_SubSP_stack,
+ &ia32_registers[REG_ESP]);
return new_node;
}
ir_node *new_node = gen_binop(node, sp, sz, new_bd_ia32_AddSP,
match_am | match_immediate);
assert(is_ia32_AddSP(new_node));
- arch_irn_set_register(new_node, pn_ia32_AddSP_stack,
- &ia32_registers[REG_ESP]);
+ arch_set_irn_register_out(new_node, pn_ia32_AddSP_stack,
+ &ia32_registers[REG_ESP]);
return new_node;
}
copy_node_attr(irg, node, phi);
be_duplicate_deps(node, phi);
- arch_set_out_register_req(phi, 0, req);
+ arch_set_irn_register_req_out(phi, 0, req);
be_enqueue_preds(node);
i = get_irn_arity(node) - 1;
fpcw = be_transform_node(get_irn_n(node, i--));
for (; i >= n_be_Call_first_arg; --i) {
- arch_register_req_t const *const req = arch_get_register_req(node, i);
+ arch_register_req_t const *const req
+ = arch_get_irn_register_req_in(node, i);
ir_node *const reg_parm = be_transform_node(get_irn_n(node, i));
assert(req->type == arch_register_req_type_limited);
assert((int)pn_ia32_xLoad_res == (int)pn_ia32_vfld_res
&& (int)pn_ia32_vfld_res == (int)pn_ia32_Load_res
&& (int)pn_ia32_Load_res == (int)pn_ia32_res);
- arch_irn_add_flags(load, arch_irn_flags_rematerializable);
+ arch_add_irn_flags(load, arch_irn_flags_rematerializable);
}
SET_IA32_ORIG_NODE(load, node);
assert((int)pn_ia32_xLoad_res == (int)pn_ia32_vfld_res
&& (int)pn_ia32_vfld_res == (int)pn_ia32_Load_res
&& (int)pn_ia32_Load_res == (int)pn_ia32_res);
- arch_irn_add_flags(load, arch_irn_flags_rematerializable);
+ arch_add_irn_flags(load, arch_irn_flags_rematerializable);
}
SET_IA32_ORIG_NODE(load, node);
static ir_node *gen_be_IncSP(ir_node *node)
{
ir_node *res = be_duplicate_node(node);
- arch_irn_add_flags(res, arch_irn_flags_modify_flags);
+ arch_add_irn_flags(res, arch_irn_flags_modify_flags);
return res;
}
}
/* transform call modes */
if (mode_is_data(mode)) {
- const arch_register_class_t *cls = arch_get_irn_reg_class_out(node);
+ const arch_register_class_t *cls = arch_get_irn_reg_class(node);
mode = cls->mode;
}
} else if (proj == pn_be_Call_X_regular) {
proj = pn_ia32_Call_X_regular;
} else {
- arch_register_req_t const *const req = arch_get_register_req_out(node);
- int const n_outs = arch_irn_get_n_outs(new_call);
+ arch_register_req_t const *const req = arch_get_irn_register_req(node);
+ int const n_outs = arch_get_irn_n_outs(new_call);
int i;
assert(proj >= pn_be_Call_first_res);
for (i = 0; i < n_outs; ++i) {
arch_register_req_t const *const new_req
- = arch_get_out_register_req(new_call, i);
+ = arch_get_irn_register_req_out(new_call, i);
if (!(new_req->type & arch_register_req_type_limited) ||
new_req->cls != req->cls ||
long pos = get_Proj_proj(node);
if (mode == mode_M) {
- pos = arch_irn_get_n_outs(new_pred)-1;
+ pos = arch_get_irn_n_outs(new_pred)-1;
} else if (mode_is_int(mode) || mode_is_reference(mode)) {
mode = mode_Iu;
} else if (mode_is_float(mode)) {
static inline const arch_register_t *x87_irn_get_register(const ir_node *irn,
int pos)
{
- const arch_register_t *res = arch_irn_get_register(irn, pos);
+ const arch_register_t *res = arch_get_irn_register_out(irn, pos);
assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
return res;
int op1_idx, out_idx;
unsigned live;
- cls = arch_get_irn_reg_class_out(n);
+ cls = arch_get_irn_reg_class(n);
if (cls != &ia32_reg_classes[CLASS_ia32_vfp])
return 0;
static bool sparc_modifies_flags(const ir_node *node)
{
- return arch_irn_get_flags(node) & sparc_arch_irn_flag_modifies_flags;
+ return arch_get_irn_flags(node) & sparc_arch_irn_flag_modifies_flags;
}
static bool sparc_modifies_fp_flags(const ir_node *node)
{
- return arch_irn_get_flags(node) & sparc_arch_irn_flag_modifies_fp_flags;
+ return arch_get_irn_flags(node) & sparc_arch_irn_flag_modifies_fp_flags;
}
static void sparc_before_ra(ir_graph *irg)
return;
if (!attr->is_frame_entity)
return;
- if (arch_irn_get_flags(node) & sparc_arch_irn_flag_needs_64bit_spillslot)
+ if (arch_get_irn_flags(node) & sparc_arch_irn_flag_needs_64bit_spillslot)
mode = mode_Lu;
align = get_mode_size_bytes(mode);
be_node_needs_frame_entity(env, node, mode, align);
sparc_attr_t *attr = (sparc_attr_t*)get_irn_generic_attr(res);
attr->immediate_value_entity = entity;
attr->immediate_value = immediate_value;
- arch_irn_add_flags(res, (arch_irn_flags_t)sparc_arch_irn_flag_immediate_form);
+ arch_add_irn_flags(res, (arch_irn_flags_t)sparc_arch_irn_flag_immediate_form);
}
static void init_sparc_jmp_cond_attr(ir_node *node, ir_relation relation,
backend_info_t *info;
(void) execution_units;
- arch_irn_set_flags(node, flags);
- arch_set_in_register_reqs(node, in_reqs);
+ arch_set_irn_flags(node, flags);
+ arch_set_irn_register_reqs_in(node, in_reqs);
info = be_get_info(node);
info->out_infos = NEW_ARR_D(reg_out_info_t, obst, n_res);
imm => {
attr => "ir_entity *entity, int32_t offset, bool aggregate_return",
custominit => "\tsparc_set_attr_imm(res, entity, offset);".
- "\tif (aggregate_return) arch_irn_add_flags(res, sparc_arch_irn_flag_aggregate_return);",
+ "\tif (aggregate_return) arch_add_irn_flags(res, sparc_arch_irn_flag_aggregate_return);",
arity => "variable",
out_arity => "variable",
},
attr => "bool aggregate_return",
arity => "variable",
out_arity => "variable",
- custominit => "\tif (aggregate_return) arch_irn_add_flags(res, sparc_arch_irn_flag_aggregate_return);",
+ custominit => "\tif (aggregate_return) arch_add_irn_flags(res, sparc_arch_irn_flag_aggregate_return);",
}
},
},
/* first output is memory */
start_mem_offset = o;
- arch_set_out_register_req(start, o, arch_no_register_req);
+ arch_set_irn_register_req_out(start, o, arch_no_register_req);
++o;
/* the zero register */
start_g0_offset = o;
req = be_create_reg_req(obst, &sparc_registers[REG_G0],
arch_register_req_type_ignore);
- arch_set_out_register_req(start, o, req);
- arch_irn_set_register(start, o, &sparc_registers[REG_G0]);
+ arch_set_irn_register_req_out(start, o, req);
+ arch_set_irn_register_out(start, o, &sparc_registers[REG_G0]);
++o;
/* we need an output for the stackpointer */
start_sp_offset = o;
req = be_create_reg_req(obst, sp_reg,
arch_register_req_type_produces_sp | arch_register_req_type_ignore);
- arch_set_out_register_req(start, o, req);
- arch_irn_set_register(start, o, sp_reg);
+ arch_set_irn_register_req_out(start, o, req);
+ arch_set_irn_register_out(start, o, sp_reg);
++o;
if (!current_cconv->omit_fp) {
start_fp_offset = o;
req = be_create_reg_req(obst, fp_reg, arch_register_req_type_ignore);
- arch_set_out_register_req(start, o, req);
- arch_irn_set_register(start, o, fp_reg);
+ arch_set_irn_register_req_out(start, o, req);
+ arch_set_irn_register_out(start, o, fp_reg);
++o;
}
const arch_register_t *reg0 = param->reg0;
const arch_register_t *reg1 = param->reg1;
if (reg0 != NULL) {
- arch_set_out_register_req(start, o, reg0->single_req);
- arch_irn_set_register(start, o, reg0);
+ arch_set_irn_register_req_out(start, o, reg0->single_req);
+ arch_set_irn_register_out(start, o, reg0);
++o;
}
if (reg1 != NULL) {
- arch_set_out_register_req(start, o, reg1->single_req);
- arch_irn_set_register(start, o, reg1);
+ arch_set_irn_register_req_out(start, o, reg1->single_req);
+ arch_set_irn_register_out(start, o, reg1);
++o;
}
}
size_t c;
for (c = 0; c < n_callee_saves; ++c) {
const arch_register_t *reg = omit_fp_callee_saves[c];
- arch_set_out_register_req(start, o, reg->single_req);
- arch_irn_set_register(start, o, reg);
+ arch_set_irn_register_req_out(start, o, reg->single_req);
+ arch_set_irn_register_out(start, o, reg);
++o;
}
}
assert(p == n_ins);
bereturn = new_bd_sparc_Return_reg(dbgi, new_block, n_ins, in);
- arch_set_in_register_reqs(bereturn, reqs);
+ arch_set_irn_register_reqs_in(bereturn, reqs);
return bereturn;
}
set_irn_pinned(ld, op_pin_state_floats);
result[1] = new_r_Proj(ld2, mode_gp, pn_sparc_Ld_res);
- arch_irn_add_flags(ld, (arch_irn_flags_t)sparc_arch_irn_flag_needs_64bit_spillslot);
- arch_irn_add_flags(ld2, (arch_irn_flags_t)sparc_arch_irn_flag_needs_64bit_spillslot);
+ arch_add_irn_flags(ld, (arch_irn_flags_t)sparc_arch_irn_flag_needs_64bit_spillslot);
+ arch_add_irn_flags(ld2, (arch_irn_flags_t)sparc_arch_irn_flag_needs_64bit_spillslot);
} else {
assert(bits == 32);
result[1] = NULL;
res = new_bd_sparc_Call_reg(dbgi, new_block, in_arity, in, out_arity,
aggregate_return);
}
- arch_set_in_register_reqs(res, in_req);
+ arch_set_irn_register_reqs_in(res, in_req);
/* create output register reqs */
o = 0;
- arch_set_out_register_req(res, o++, arch_no_register_req);
+ arch_set_irn_register_req_out(res, o++, arch_no_register_req);
/* add register requirements for the result regs */
for (r = 0; r < n_ress; ++r) {
const reg_or_stackslot_t *result_info = &cconv->results[r];
const arch_register_req_t *req = result_info->req0;
if (req != NULL) {
- arch_set_out_register_req(res, o++, req);
+ arch_set_irn_register_req_out(res, o++, req);
}
assert(result_info->req1 == NULL);
}
if (!rbitset_is_set(cconv->caller_saves, i))
continue;
reg = &sparc_registers[i];
- arch_set_out_register_req(res, o++, reg->single_req);
+ arch_set_irn_register_req_out(res, o++, reg->single_req);
}
assert(o == out_arity);
phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), get_irn_in(node) + 1);
copy_node_attr(irg, node, phi);
be_duplicate_deps(node, phi);
- arch_set_out_register_req(phi, 0, req);
+ arch_set_irn_register_req_out(phi, 0, req);
be_enqueue_preds(node);
return phi;
}