(We have a separate get_input, and get_output callback for now).
This should make the code faster for now and is a first step towards
changing the interface to query register constraints on the mode_T node
itself instead of the Proj nodes.
- Handle middleend node constraints and stuff in benode.c instead of in each
backend
- Remove irn_class_branch we already had is_cfop in the middleend
- Fix a bunch of bugs/problems in the process
[r26320]
/** The opcodes of the libFirm predefined operations. */
typedef enum {
- iro_Block,
+ iro_First,
+ iro_Block = iro_First,
iro_Start, iro_End, iro_Jmp, iro_IJmp, iro_Cond, iro_Return,
iro_Const, iro_SymConst,
iro_Sel,
* |___/
**************************************************/
-/**
- * Return register requirements for a TEMPLATE node.
- * If the node returns a tuple (mode_T) then the proj's
- * will be asked for this information.
- */
-static const arch_register_req_t *TEMPLATE_get_irn_reg_req(const ir_node *node,
- int pos)
-{
- long node_pos = pos == -1 ? 0 : pos;
- ir_mode *mode = get_irn_mode(node);
-
- if (mode == mode_T || mode == mode_M) {
- return arch_no_register_req;
- }
-
- if (is_Proj(node)) {
- /* in case of a proj, we need to get the correct OUT slot */
- /* of the node corresponding to the proj number */
- if (pos == -1) {
- node_pos = TEMPLATE_translate_proj_pos(node);
- } else {
- node_pos = pos;
- }
-
- node = skip_Proj_const(node);
- }
-
- /* get requirements for our own nodes */
- if (is_TEMPLATE_irn(node)) {
- const arch_register_req_t *req;
- if (pos >= 0) {
- req = get_TEMPLATE_in_req(node, pos);
- } else {
- req = get_TEMPLATE_out_req(node, node_pos);
- }
-
- assert(req != NULL);
-
- return req;
- }
-
- /* unknowns should be transformed already */
- assert(!is_Unknown(node));
-
- return arch_no_register_req;
-}
-
static arch_irn_class_t TEMPLATE_classify(const ir_node *irn)
{
- irn = skip_Proj_const(irn);
-
- if (is_cfop(irn)) {
- return arch_irn_class_branch;
- }
-
+ (void) irn;
return 0;
}
/* fill register allocator interface */
static const arch_irn_ops_t TEMPLATE_irn_ops = {
- TEMPLATE_get_irn_reg_req,
+ get_TEMPLATE_in_req,
+ get_TEMPLATE_out_req,
TEMPLATE_classify,
TEMPLATE_get_frame_entity,
TEMPLATE_set_frame_entity,
* |___/
**************************************************/
-/**
- * Return register requirements for a arm node.
- * If the node returns a tuple (mode_T) then the proj's
- * will be asked for this information.
- */
-static const arch_register_req_t *arm_get_irn_reg_req(const ir_node *node,
- int pos)
-{
- long node_pos = pos == -1 ? 0 : pos;
- ir_mode *mode = get_irn_mode(node);
-
- if (is_Block(node) || mode == mode_X) {
- return arch_no_register_req;
- }
-
- if (mode == mode_T && pos < 0) {
- return arch_no_register_req;
- }
-
- if (is_Proj(node)) {
- if(mode == mode_M)
- return arch_no_register_req;
-
- if(pos >= 0) {
- return arch_no_register_req;
- }
-
- node_pos = (pos == -1) ? get_Proj_proj(node) : pos;
- node = skip_Proj_const(node);
- }
-
- /* get requirements for our own nodes */
- if (is_arm_irn(node)) {
- const arch_register_req_t *req;
- if (pos >= 0) {
- req = get_arm_in_req(node, pos);
- } else {
- req = get_arm_out_req(node, node_pos);
- }
-
- return req;
- }
-
- /* unknown should be transformed by now */
- assert(!is_Unknown(node));
- return arch_no_register_req;
-}
-
static arch_irn_class_t arm_classify(const ir_node *irn)
{
- irn = skip_Proj_const(irn);
-
- if (is_cfop(irn)) {
- return arch_irn_class_branch;
- }
-
+ (void) irn;
return 0;
}
/* fill register allocator interface */
static const arch_irn_ops_t arm_irn_ops = {
- arm_get_irn_reg_req,
+ get_arm_in_req,
+ get_arm_out_req,
arm_classify,
arm_get_frame_entity,
arm_set_frame_entity,
const arch_register_req_t *arch_get_register_req(const ir_node *irn, int pos)
{
- const arch_irn_ops_t *ops = get_irn_ops(irn);
- return ops->get_irn_reg_req(irn, pos);
+ const arch_irn_ops_t *ops;
+
+ if (is_Proj(irn)) {
+ assert(pos == -1);
+ pos = -1-get_Proj_proj(irn);
+ irn = get_Proj_pred(irn);
+ }
+ ops = get_irn_ops(irn);
+ if (pos < 0) {
+ return ops->get_irn_reg_req_out(irn, -pos-1);
+ } else {
+ return ops->get_irn_reg_req_in(irn, pos);
+ }
+}
+
+const arch_register_req_t *arch_get_register_req_out(const ir_node *irn)
+{
+ int pos = 0;
+ const arch_irn_ops_t *ops;
+
+ if (is_Proj(irn)) {
+ pos = get_Proj_proj(irn);
+ irn = get_Proj_pred(irn);
+ } else if (get_irn_mode(irn) == mode_T) {
+ return arch_no_register_req;
+ }
+ ops = get_irn_ops(irn);
+ return ops->get_irn_reg_req_out(irn, pos);
}
void arch_set_frame_offset(ir_node *irn, int offset)
extern char *arch_register_req_format(char *buf, size_t len, const arch_register_req_t *req, const ir_node *node);
/**
- * Certain node classes which are relevant for the register allocator.
+ * Node classification. Mainly used for statistics.
*/
typedef enum arch_irn_class_t {
- arch_irn_class_spill = 1 << 0,
- arch_irn_class_reload = 1 << 1,
- arch_irn_class_remat = 1 << 2,
- arch_irn_class_copy = 1 << 3,
- arch_irn_class_perm = 1 << 4,
- arch_irn_class_branch = 1 << 5
+ arch_irn_class_spill = 1 << 0,
+ arch_irn_class_reload = 1 << 1,
+ arch_irn_class_remat = 1 << 2,
+ arch_irn_class_copy = 1 << 3,
+ arch_irn_class_perm = 1 << 4
} arch_irn_class_t;
void arch_set_frame_offset(ir_node *irn, int bias);
* operand was no register operand.
*/
const arch_register_req_t *arch_get_register_req(const ir_node *irn, int pos);
-
-#define arch_get_register_req_out(irn) arch_get_register_req(irn, -1)
+const arch_register_req_t *arch_get_register_req_out(const ir_node *irn);
/**
* Put all registers which shall not be ignored by the register
* Expresses requirements to register allocation for an operand.
*/
struct arch_register_req_t {
- arch_register_req_type_t type; /**< The type of the constraint. */
+ arch_register_req_type_t type; /**< The type of the constraint. */
const arch_register_class_t *cls; /**< The register class this constraint belongs to. */
const unsigned *limited; /**< allowed register bitset */
/**
* Get the register requirements for a given operand.
- * @param self The self pointer.
* @param irn The node.
- * @param pos The operand's position (0..n for the input operands).
+ * @param pos The operand's position
+ * @return The register requirements for the selected operand.
+ * The pointer returned is never NULL.
+ */
+ const arch_register_req_t *(*get_irn_reg_req_in)(const ir_node *irn, int pos);
+
+ /**
+ * Get the register requirements for values produced by a node
+ * @param irn The node.
+ * @param pos The operand's position (0 for most nodes,
+ * 0..n for mode_T nodes)
* @return The register requirements for the selected operand.
* The pointer returned is never NULL.
*/
- const arch_register_req_t *(*get_irn_reg_req)(const ir_node *irn, int pos);
+ const arch_register_req_t *(*get_irn_reg_req_out)(const ir_node *irn, int pos);
/**
* Classify the node.
/**
* Get the entity on the stack frame this node depends on.
- * @param self The this pointer.
* @param irn The node in question.
* @return The entity on the stack frame or NULL, if the node does not have a
* stack frame entity.
/**
* Set the entity on the stack frame this node depends on.
- * @param self The this pointer.
* @param irn The node in question.
* @param ent The entity to set
*/
/**
* Set the offset of a node carrying an entity on the stack frame.
- * @param self The this pointer.
* @param irn The node.
* @param offset The offset of the node's stack frame entity.
*/
* A positive value stands for an expanding stack area, a negative value for
* a shrinking one.
*
- * @param self The this pointer
* @param irn The node
* @return 0 if the stackpointer is not modified with a constant
* value, otherwise the increment/decrement value
* Returns an inverse operation which yields the i-th argument
* of the given node as result.
*
- * @param self The this pointer.
* @param irn The original operation
* @param i Index of the argument we want the inverse operation to yield
* @param inverse struct to be filled with the resulting inverse op
/**
* Get the estimated cycle count for @p irn.
*
- * @param self The this pointer.
* @param irn The node.
*
* @return The estimated cycle count for this operation
/**
* Asks the backend whether operand @p i of @p irn can be loaded form memory internally
*
- * @param self The this pointer.
* @param irn The node.
* @param i Index of the argument we would like to know whether @p irn can load it form memory internally
*
/**
* Ask the backend to assimilate @p reload of operand @p i into @p irn.
*
- * @param self The this pointer.
* @param irn The node.
* @param spill The spill.
* @param i The position of the reload.
border_def(proj, step, 1);
}
}
- }
-
- /*
- * If the node defines some value, which can put into a
- * register of the current class, make a border for it.
- */
- if (has_reg_class(env, irn)) {
- int nr = get_irn_idx(irn);
-
- bitset_clear(live, nr);
- border_def(irn, step, 1);
+ } else {
+ /*
+ * If the node defines some value, which can put into a
+ * register of the current class, make a border for it.
+ */
+ if (has_reg_class(env, irn)) {
+ int nr = get_irn_idx(irn);
+
+ bitset_clear(live, nr);
+ border_def(irn, step, 1);
+ }
}
/*
ARR_APP1(reg_out_info_t, info->out_infos, out_info);
}
-/**
- * Skip Proj nodes and return their Proj numbers.
- *
- * If *node is a Proj or Proj(Proj) node, skip it.
- *
- * @param node points to the node to be skipped
- *
- * @return 0 if *node was no Proj node, its Proj number else.
- */
-static int redir_proj(const ir_node **node)
-{
- const ir_node *n = *node;
-
- if(is_Proj(n)) {
- ir_node *irn;
-
- *node = irn = get_Proj_pred(n);
- if(is_Proj(irn)) {
- assert(get_irn_mode(irn) == mode_T);
- *node = get_Proj_pred(irn);
- }
- return get_Proj_proj(n);
- }
-
- return 0;
-}
-
ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
ir_node *bl, ir_node *frame, ir_node *to_spill)
{
be_node_set_reg_class_in(res, be_pos_Spill_frame, cls_frame);
be_node_set_reg_class_in(res, be_pos_Spill_val, cls);
+
+ /*
+ * For spills and reloads, we return "none" as requirement for frame
+ * pointer, so every input is ok. Some backends need this (e.g. STA).
+ * Matze: we should investigate if this is really needed, this solution
+ * looks very hacky to me
+ */
+ be_node_set_reg_class_in(res, be_pos_Spill_frame, NULL);
+
return res;
}
be_node_set_reg_class_out(res, 0, cls);
be_node_set_reg_class_in(res, be_pos_Reload_frame, cls_frame);
arch_irn_set_flags(res, arch_irn_flags_rematerializable);
+
+ /*
+ * For spills and reloads, we return "none" as requirement for frame
+ * pointer, so every input is ok. Some backends need this (e.g. STA).
+ * Matze: we should investigate if this is really needed, this solution
+ * looks very hacky to me
+ */
+ be_node_set_reg_class_in(res, be_pos_Reload_frame, NULL);
+
return res;
}
*/
-static const
-arch_register_req_t *get_out_reg_req(const ir_node *irn, int out_pos)
+static const arch_register_req_t *be_node_get_out_reg_req(
+ const ir_node *irn, int pos)
{
const be_node_attr_t *a = get_irn_attr_const(irn);
- if (out_pos >= ARR_LEN(a->reg_data)) {
+ assert(pos >= 0);
+ if (pos >= ARR_LEN(a->reg_data)) {
return arch_no_register_req;
}
- return &a->reg_data[out_pos].req;
+ return &a->reg_data[pos].req;
}
-static const
-arch_register_req_t *get_in_reg_req(const ir_node *irn, int pos)
+static const arch_register_req_t *be_node_get_in_reg_req(
+ const ir_node *irn, int pos)
{
const be_node_attr_t *a = get_irn_attr_const(irn);
+ assert(pos >= 0);
if (pos >= get_irn_arity(irn) || pos >= ARR_LEN(a->reg_data))
return arch_no_register_req;
return &a->reg_data[pos].in_req;
}
-static const arch_register_req_t *
-be_node_get_irn_reg_req(const ir_node *irn, int pos)
-{
- int out_pos = pos;
-
- if (pos < 0) {
- if (get_irn_mode(irn) == mode_T)
- return arch_no_register_req;
-
- assert(pos == -1);
- out_pos = redir_proj((const ir_node **)&irn);
- assert(is_be_node(irn));
- return get_out_reg_req(irn, out_pos);
- } else if (is_be_node(irn)) {
- /*
- * For spills and reloads, we return "none" as requirement for frame
- * pointer, so every input is ok. Some backends need this (e.g. STA).
- */
- if ((pos == be_pos_Spill_frame && be_is_Spill(irn)) ||
- (pos == be_pos_Reload_frame && be_is_Reload(irn)))
- return arch_no_register_req;
-
- return get_in_reg_req(irn, pos);
- }
-
- return arch_no_register_req;
-}
-
static arch_irn_class_t be_node_classify(const ir_node *irn)
{
-restart:
switch (get_irn_opcode(irn)) {
-#define XXX(a,b) case a: return b
- XXX(beo_Spill, arch_irn_class_spill);
- XXX(beo_Reload, arch_irn_class_reload);
- XXX(beo_Perm, arch_irn_class_perm);
- XXX(beo_Copy, arch_irn_class_copy);
- XXX(beo_Return, arch_irn_class_branch);
-#undef XXX
- case iro_Proj:
- irn = get_Proj_pred(irn);
- if (is_Proj(irn)) {
- assert(get_irn_mode(irn) == mode_T);
- irn = get_Proj_pred(irn);
- }
- goto restart;
-
- default:
- return 0;
+ case beo_Spill: return arch_irn_class_spill;
+ case beo_Reload: return arch_irn_class_reload;
+ case beo_Perm: return arch_irn_class_perm;
+ case beo_Copy: return arch_irn_class_copy;
+ default: return 0;
}
}
static void be_node_set_frame_offset(ir_node *irn, int offset)
{
- if(be_has_frame_entity(irn)) {
- be_frame_attr_t *a = get_irn_attr(irn);
- a->offset = offset;
- }
+ be_frame_attr_t *a;
+
+ if(!be_has_frame_entity(irn))
+ return;
+
+ a = get_irn_attr(irn);
+ a->offset = offset;
}
static int be_node_get_sp_bias(const ir_node *irn)
*/
+/* for be nodes */
static const arch_irn_ops_t be_node_irn_ops = {
- be_node_get_irn_reg_req,
+ be_node_get_in_reg_req,
+ be_node_get_out_reg_req,
be_node_classify,
be_node_get_frame_entity,
be_node_set_frame_entity,
NULL, /* perform_memory_operand */
};
+static const arch_register_req_t *dummy_reg_req(
+ const ir_node *node, int pos)
+{
+ (void) node;
+ (void) pos;
+ return arch_no_register_req;
+}
+
+static arch_irn_class_t dummy_classify(const ir_node *node)
+{
+ (void) node;
+ return 0;
+}
+
+static ir_entity* dummy_get_frame_entity(const ir_node *node)
+{
+ (void) node;
+ return NULL;
+}
+
+static void dummy_set_frame_entity(ir_node *node, ir_entity *entity)
+{
+ (void) node;
+ (void) entity;
+ panic("dummy_set_frame_entity() should not be called");
+}
+
+static void dummy_set_frame_offset(ir_node *node, int bias)
+{
+ (void) node;
+ (void) bias;
+ panic("dummy_set_frame_offset() should not be called");
+}
+
+static int dummy_get_sp_bias(const ir_node *node)
+{
+ (void) node;
+ return 0;
+}
+
+/* for "middleend" nodes */
+static const arch_irn_ops_t dummy_be_irn_ops = {
+ dummy_reg_req,
+ dummy_reg_req,
+ dummy_classify,
+ dummy_get_frame_entity,
+ dummy_set_frame_entity,
+ dummy_set_frame_offset,
+ dummy_get_sp_bias,
+ NULL, /* get_inverse */
+ NULL, /* get_op_estimated_cost */
+ NULL, /* possible_memory_operand */
+ NULL, /* perform_memory_operand */
+};
+
/*
____ _ _ ___ ____ _ _ _ _ _ _
| _ \| |__ (_) |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
attr->flags = flags;
}
-static arch_irn_class_t phi_classify(const ir_node *irn)
-{
- (void) irn;
- return 0;
-}
-
-static ir_entity *phi_get_frame_entity(const ir_node *irn)
-{
- (void) irn;
- return NULL;
-}
-
-static void phi_set_frame_entity(ir_node *irn, ir_entity *ent)
-{
- (void) irn;
- (void) ent;
- panic("phi_set_frame_entity() should not be called");
-}
-
-static void phi_set_frame_offset(ir_node *irn, int bias)
-{
- (void) irn;
- (void) bias;
- panic("phi_set_frame_offset() should not be called");
-}
-
-static int phi_get_sp_bias(const ir_node *irn)
-{
- (void) irn;
- return 0;
-}
-
static const arch_irn_ops_t phi_irn_ops = {
phi_get_irn_reg_req,
- phi_classify,
- phi_get_frame_entity,
- phi_set_frame_entity,
- phi_set_frame_offset,
- phi_get_sp_bias,
+ phi_get_irn_reg_req,
+ dummy_classify,
+ dummy_get_frame_entity,
+ dummy_set_frame_entity,
+ dummy_set_frame_offset,
+ dummy_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
phi_handler.phi_attrs = pmap_create();
}
+
+
+
/*
_ _ _ ____ _
| \ | | ___ __| | ___ | _ \ _ _ _ __ ___ _ __ (_)_ __ __ _
void be_init_op(void)
{
+ ir_opcode opc;
+
/* Acquire all needed opcodes. */
op_be_Spill = new_ir_op(beo_Spill, "be_Spill", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_Reload = new_ir_op(beo_Reload, "be_Reload", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_FrameAddr->ops.node_cmp_attr = FrameAddr_cmp_attr;
op_be_Barrier->ops.node_cmp_attr = node_cmp_attr;
op_be_Unwind->ops.node_cmp_attr = node_cmp_attr;
+
+ /* attach out dummy_ops to middle end nodes */
+ for (opc = iro_First; opc <= iro_Last; ++opc) {
+ ir_op *op = get_irp_opcode(opc);
+ assert(op->ops.be_ops == NULL);
+ op->ops.be_ops = &dummy_be_irn_ops;
+ }
}
int sched_skip_cf_predicator(const ir_node *irn, void *data)
{
(void)data;
- return arch_irn_class_is(irn, branch);
+ return is_cfop(irn);
}
int sched_skip_phi_predicator(const ir_node *irn, void *data) {
The link field is used anyway. */
for (i = ARR_LEN(sched) - 1; i >= 0; --i) {
ir_node* irn = sched[i];
- if (!arch_irn_class_is(irn, branch)) {
+ if (!is_cfop(irn)) {
set_irn_link(irn, first);
first = irn;
}
/* assure that branches and constants are executed last */
ir_nodeset_iterator_init(&iter, ready_set);
while( (irn = ir_nodeset_iterator_next(&iter)) != NULL) {
- if (!arch_irn_class_is(irn, branch)) {
+ if (!is_cfop(irn)) {
only_branches_left = 0;
break;
}
}
++i;
}
- } while (arch_irn_class_is(irn, branch));
+ } while (is_cfop(irn));
}
return irn;
Ignore branch instructions for the time being.
They should only be scheduled if there is nothing else.
*/
- if (!arch_irn_class_is(irn, branch)) {
+ if (!is_cfop(irn)) {
int costs = reg_pr_costs(env, irn);
if (costs <= curr_cost) {
res = irn;
for (cur_pos = 0, curr = root; curr; curr = get_irn_link(curr), cur_pos++) {
sched_timestep_t d;
- if (arch_irn_class_is(curr, branch)) {
+ if (is_cfop(curr)) {
/* assure, that branches can be executed last */
d = 0;
}
/* assure that branches and constants are executed last */
foreach_ir_nodeset(ready_set, irn, iter) {
- if (!arch_irn_class_is(irn, branch)) {
+ if (!is_cfop(irn)) {
return irn;
}
}
if (cnt == 1) {
irn = get_nodeset_node(&ecands);
- if (arch_irn_class_is(irn, branch)) {
+ if (is_cfop(irn)) {
/* BEWARE: don't select a JUMP if others are still possible */
goto force_mcands;
}
/* priority based selection, heuristic inspired by mueller diss */
foreach_ir_nodeset(ns, irn, iter) {
/* make sure that branches are scheduled last */
- if (!arch_irn_class_is(irn, branch)) {
+ if (!is_cfop(irn)) {
int rdiff = get_irn_reg_diff(trace_env, irn);
int sign = rdiff < 0;
int chg = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;
/* assure that branches and constants are executed last */
foreach_ir_nodeset(ready_set, irn, iter) {
- if (!arch_irn_class_is(irn, branch)) {
+ if (!is_cfop(irn)) {
return irn;
}
}
}
/* Creates the unique per irg GP NoReg node. */
-ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
+ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->noreg_gp, new_bd_ia32_NoReg_GP,
&ia32_gp_regs[REG_GP_NOREG]);
}
-ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg) {
+ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->noreg_vfp, new_bd_ia32_NoReg_VFP,
&ia32_vfp_regs[REG_VFP_NOREG]);
}
-ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg) {
+ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->noreg_xmm, new_bd_ia32_NoReg_XMM,
&ia32_xmm_regs[REG_XMM_NOREG]);
}
-ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg) {
+ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->unknown_gp, new_bd_ia32_Unknown_GP,
&ia32_gp_regs[REG_GP_UKNWN]);
}
-ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg) {
+ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->unknown_vfp, new_bd_ia32_Unknown_VFP,
&ia32_vfp_regs[REG_VFP_UKNWN]);
}
-ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg) {
+ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->unknown_xmm, new_bd_ia32_Unknown_XMM,
&ia32_xmm_regs[REG_XMM_UKNWN]);
}
-ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg) {
+ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->fpu_trunc_mode, new_bd_ia32_ChangeCW,
&ia32_fp_cw_regs[REG_FPCW]);
}
* |___/
**************************************************/
-/**
- * Return register requirements for an ia32 node.
- * If the node returns a tuple (mode_T) then the proj's
- * will be asked for this information.
- */
-static const arch_register_req_t *ia32_get_irn_reg_req(const ir_node *node,
- int pos)
+static const arch_register_req_t *get_ia32_SwitchJmp_out_req(
+ const ir_node *node, int pos)
{
- ir_mode *mode = get_irn_mode(node);
- long node_pos;
-
- if (mode == mode_X || is_Block(node)) {
- return arch_no_register_req;
- }
-
- if (mode == mode_T && pos < 0) {
- return arch_no_register_req;
- }
-
- node_pos = pos == -1 ? 0 : pos;
- if (is_Proj(node)) {
- if (mode == mode_M || pos >= 0) {
- return arch_no_register_req;
- }
-
- node_pos = (pos == -1) ? get_Proj_proj(node) : pos;
- node = skip_Proj_const(node);
- }
-
- if (is_ia32_irn(node)) {
- const arch_register_req_t *req;
- if (pos >= 0)
- req = get_ia32_in_req(node, pos);
- else
- req = get_ia32_out_req(node, node_pos);
-
- assert(req != NULL);
-
- return req;
- }
-
- /* unknowns should be transformed already */
+ (void) node;
+ (void) pos;
return arch_no_register_req;
}
-static arch_irn_class_t ia32_classify(const ir_node *irn) {
+static arch_irn_class_t ia32_classify(const ir_node *irn)
+{
arch_irn_class_t classification = 0;
- irn = skip_Proj_const(irn);
-
- if (is_cfop(irn))
- classification |= arch_irn_class_branch;
-
- if (! is_ia32_irn(irn))
- return classification;
+ assert(is_ia32_irn(irn));
if (is_ia32_is_reload(irn))
classification |= arch_irn_class_reload;
ir_graph *irg; /**< The associated graph. */
} ia32_abi_env_t;
-static ir_entity *ia32_get_frame_entity(const ir_node *irn) {
+static ir_entity *ia32_get_frame_entity(const ir_node *irn)
+{
return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
}
-static void ia32_set_frame_entity(ir_node *irn, ir_entity *ent) {
+static void ia32_set_frame_entity(ir_node *irn, ir_entity *ent)
+{
set_ia32_frame_ent(irn, ent);
}
* Destroy the callback object.
* @param self The callback object.
*/
-static void ia32_abi_done(void *self) {
+static void ia32_abi_done(void *self)
+{
free(self);
}
/**
* Build the between type and entities if not already build.
*/
-static void ia32_build_between_type(void) {
+static void ia32_build_between_type(void)
+{
#define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
if (! between_type) {
ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_Iu);
/**
* Return the stack entity that contains the return address.
*/
-ir_entity *ia32_get_return_address_entity(void) {
+ir_entity *ia32_get_return_address_entity(void)
+{
ia32_build_between_type();
return ia32_curr_fp_ommitted ? omit_fp_ret_addr_ent : ret_addr_ent;
}
/**
* Return the stack entity that contains the frame address.
*/
-ir_entity *ia32_get_frame_address_entity(void) {
+ir_entity *ia32_get_frame_address_entity(void)
+{
ia32_build_between_type();
return ia32_curr_fp_ommitted ? NULL : old_bp_ent;
}
* @param obstack The obstack to use for allocation of the returned nodes array
* @return The inverse operation or NULL if operation invertible
*/
-static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
+static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst)
+{
ir_mode *mode;
ir_mode *irn_mode;
ir_node *block, *noreg, *nomem;
ia32_abi_epilogue
};
-/* fill register allocator interface */
-
+/* register allocator interface */
static const arch_irn_ops_t ia32_irn_ops = {
- ia32_get_irn_reg_req,
+ get_ia32_in_req,
+ get_ia32_out_req,
+ ia32_classify,
+ ia32_get_frame_entity,
+ ia32_set_frame_entity,
+ ia32_set_frame_offset,
+ ia32_get_sp_bias,
+ ia32_get_inverse,
+ ia32_get_op_estimated_cost,
+ ia32_possible_memory_operand,
+ ia32_perform_memory_operand,
+};
+
+/* special register allocator interface for SwitchJmp
+ as it possibly has a WIDE range of Proj numbers.
+ We don't want to allocate output for register constraints for
+ all these. */
+static const arch_irn_ops_t ia32_SwitchJmp_irn_ops = {
+ /* Note: we also use SwitchJmp_out_req for the inputs too:
+ This is because the bearch API has a conceptual problem at the moment.
+ Querying for negative proj numbers which can happen for switchs
+ isn't possible and will result in inputs getting queried */
+ get_ia32_SwitchJmp_out_req,
+ get_ia32_SwitchJmp_out_req,
ia32_classify,
ia32_get_frame_entity,
ia32_set_frame_entity,
#define ID(s) new_id_from_chars(s, sizeof(s) - 1)
-static void ia32_before_abi(void *self) {
+static void ia32_before_abi(void *self)
+{
lower_mode_b_config_t lower_mode_b_config = {
mode_Iu, /* lowered mode */
mode_Bu, /* preferred mode for set */
/**
* Called before the register allocator.
*/
-static void ia32_before_ra(void *self) {
+static void ia32_before_ra(void *self)
+{
ia32_code_gen_t *cg = self;
/* setup fpu rounding modes */
/**
* Transforms a be_Reload into a ia32 Load.
*/
-static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node) {
+static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node)
+{
ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
/**
* Transforms a be_Spill node into a ia32 Store.
*/
-static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node) {
+static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node)
+{
ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
exchange(node, store);
}
-static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) {
+static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent)
+{
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *noreg = ia32_new_NoReg_gp(cg);
return push;
}
-static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent) {
+static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent)
+{
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *noreg = ia32_new_NoReg_gp(cg);
/**
* Block-Walker: Calls the transform functions Spill and Reload.
*/
-static void ia32_after_ra_walker(ir_node *block, void *env) {
+static void ia32_after_ra_walker(ir_node *block, void *env)
+{
ir_node *node, *prev;
ia32_code_gen_t *cg = env;
* We transform Spill and Reload here. This needs to be done before
* stack biasing otherwise we would miss the corrected offset for these nodes.
*/
-static void ia32_after_ra(void *self) {
+static void ia32_after_ra(void *self)
+{
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg);
* virtual with real x87 instructions, creating a block schedule and peephole
* optimisations.
*/
-static void ia32_finish(void *self) {
+static void ia32_finish(void *self)
+{
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
* Emits the code, closes the output file and frees
* the code generator interface.
*/
-static void ia32_codegen(void *self) {
+static void ia32_codegen(void *self)
+{
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
/**
* Returns the node representing the PIC base.
*/
-static ir_node *ia32_get_pic_base(void *self) {
+static ir_node *ia32_get_pic_base(void *self)
+{
ir_node *block;
ia32_code_gen_t *cg = self;
ir_node *get_eip = cg->get_eip;
/**
* Initializes a IA32 code generator.
*/
-static void *ia32_cg_init(be_irg_t *birg) {
+static void *ia32_cg_init(be_irg_t *birg)
+{
ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env;
ia32_code_gen_t *cg = XMALLOCZ(ia32_code_gen_t);
/**
* Initializes the backend ISA.
*/
-static arch_env_t *ia32_init(FILE *file_handle) {
+static arch_env_t *ia32_init(FILE *file_handle)
+{
static int inited = 0;
ia32_isa_t *isa;
int i, n;
ia32_register_init();
ia32_create_opcodes(&ia32_irn_ops);
+ /* special handling for SwitchJmp */
+ op_ia32_SwitchJmp->ops.be_ops = &ia32_SwitchJmp_irn_ops;
be_emit_init(file_handle);
isa->regs_16bit = pmap_create();
/**
* Closes the output file and frees the ISA structure.
*/
-static void ia32_done(void *self) {
+static void ia32_done(void *self)
+{
ia32_isa_t *isa = self;
/* emit now all global declarations */
* - the virtual floating point registers
* - the SSE vector register set
*/
-static unsigned ia32_get_n_reg_class(const void *self) {
+static unsigned ia32_get_n_reg_class(const void *self)
+{
(void) self;
return N_CLASSES;
}
/**
* Returns the estimated execution time of an ia32 irn.
*/
-static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) {
+static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn)
+{
(void) env;
return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(irn) : 1;
}
/**
* Return the abstract ia32 machine.
*/
-static const be_machine_t *ia32_get_machine(const void *self) {
+static const be_machine_t *ia32_get_machine(const void *self)
+{
const ia32_isa_t *isa = self;
return isa->cpu;
}
return NULL;
}
-static void ia32_mark_remat(const void *self, ir_node *node) {
+static void ia32_mark_remat(const void *self, ir_node *node)
+{
(void) self;
if (is_ia32_irn(node)) {
set_ia32_is_remat(node);
/**
* Check for Abs or -Abs.
*/
-static int psi_is_Abs_or_Nabs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f) {
+static int psi_is_Abs_or_Nabs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f)
+{
ir_node *l, *r;
pn_Cmp pnc;
/**
* Check for Abs only
*/
-static int psi_is_Abs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f) {
+static int psi_is_Abs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f)
+{
ir_node *l, *r;
pn_Cmp pnc;
/**
* Returns the libFirm configuration parameter for this backend.
*/
-static const backend_params *ia32_get_libfirm_params(void) {
+static const backend_params *ia32_get_libfirm_params(void)
+{
static const ir_settings_if_conv_t ifconv = {
4, /* maxdepth, doesn't matter for Mux-conversion */
ia32_is_mux_allowed /* allows or disallows Mux creation for given selector */
ident **clobbers;
int clobbers_flags = 0;
unsigned clobber_bits[N_CLASSES];
+ int out_size;
memset(&clobber_bits, 0, sizeof(clobber_bits));
memset(register_map, 0, reg_map_size * sizeof(register_map[0]));
/* construct output constraints */
- out_reg_reqs = obstack_alloc(obst, out_arity * sizeof(out_reg_reqs[0]));
+ out_size = out_arity + 1;
+ out_reg_reqs = obstack_alloc(obst, out_size * sizeof(out_reg_reqs[0]));
for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
const ir_asm_constraint *constraint = &out_constraints[out_idx];
int i;
bitset_t *used_outs = bitset_alloca(out_arity);
int orig_out_arity = out_arity;
- int out_size = out_arity;
for (i = 0; i < arity; ++i) {
int o;
const arch_register_req_t *inreq = in_reg_reqs[i];
}
}
+ /* append none register requirement for the memory output */
+ if (out_arity + 1 >= out_size) {
+ const arch_register_req_t **new_out_reg_reqs;
+
+ out_size = out_arity + 1;
+ new_out_reg_reqs
+ = obstack_alloc(obst, out_size*sizeof(out_reg_reqs[0]));
+ memcpy(new_out_reg_reqs, out_reg_reqs,
+ out_arity * sizeof(new_out_reg_reqs[0]));
+ out_reg_reqs = new_out_reg_reqs;
+ }
+
+ /* add a new (dummy) output which occupies the register */
+ out_reg_reqs[out_arity] = arch_no_register_req;
+ ++out_arity;
+
new_node = new_bd_ia32_Asm(dbgi, new_block, arity, in, out_arity,
get_ASM_text(node), register_map);
/* handle the often used case of 32x32=64 mul */
if (is_sign_extend(a_l, a_h) && is_sign_extend(b_l, b_h)) {
mul = new_bd_ia32_l_IMul(dbg, block, a_l, b_l);
- h_res = new_rd_Proj(dbg, block, mul, h_mode, pn_ia32_l_Mul_EDX);
- l_res = new_rd_Proj(dbg, block, mul, l_mode, pn_ia32_l_Mul_EAX);
+ h_res = new_rd_Proj(dbg, block, mul, h_mode, pn_ia32_l_IMul_res_high);
+ l_res = new_rd_Proj(dbg, block, mul, l_mode, pn_ia32_l_IMul_res_low);
} else {
/* note that zero extension is handled hare efficiently */
mul = new_bd_ia32_l_Mul(dbg, block, a_l, b_l);
- pEDX = new_rd_Proj(dbg, block, mul, h_mode, pn_ia32_l_Mul_EDX);
- l_res = new_rd_Proj(dbg, block, mul, l_mode, pn_ia32_l_Mul_EAX);
+ pEDX = new_rd_Proj(dbg, block, mul, h_mode, pn_ia32_l_Mul_res_high);
+ l_res = new_rd_Proj(dbg, block, mul, l_mode, pn_ia32_l_Mul_res_low);
b_l = new_rd_Conv(dbg, block, b_l, h_mode);
mul = new_rd_Mul( dbg, block, a_h, b_l, h_mode);
# very strict constraints
state => "exc_pinned",
reg_req => { in => [ "gp", "gp", "none", "eax", "gp" ],
- out => [ "eax", "flags", "edx", "none" ] },
+ out => [ "eax", "flags", "none", "edx" ] },
ins => [ "base", "index", "mem", "left", "right" ],
emit => '. mul%M %unop4',
- outs => [ "res_low", "flags", "res_high", "M" ],
+ outs => [ "res_low", "flags", "M", "res_high" ],
am => "source,binary",
latency => 10,
units => [ "GP" ],
# very strict constraints
op_flags => "C",
cmp_attr => "return 1;",
- outs => [ "EAX", "flags", "EDX", "M" ],
+ outs => [ "res_low", "flags", "M", "res_high" ],
arity => 2
},
irn_flags => "R",
state => "exc_pinned",
reg_req => { in => [ "gp", "gp", "none", "eax", "gp" ],
- out => [ "eax", "flags", "edx", "none" ] },
+ out => [ "eax", "flags", "none", "edx" ] },
ins => [ "base", "index", "mem", "left", "right" ],
emit => '. imul%M %unop4',
- outs => [ "res_low", "flags", "res_high", "M" ],
+ outs => [ "res_low", "flags", "M", "res_high" ],
am => "source,binary",
latency => 5,
units => [ "GP" ],
l_IMul => {
op_flags => "C",
cmp_attr => "return 1;",
- outs => [ "res_low", "res_high", "M" ],
+ outs => [ "res_low", "flags", "M", "res_high" ],
arity => 2
},
Cmp8Bit => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "eax ebx ecx edx", "eax ebx ecx edx" ] , out => [ "flags" ] },
+ reg_req => { in => [ "gp", "gp", "none", "eax ebx ecx edx", "eax ebx ecx edx" ] ,
+ out => [ "flags", "none", "none" ] },
ins => [ "base", "index", "mem", "left", "right" ],
- outs => [ "eflags" ],
+ outs => [ "eflags", "unused", "M" ],
am => "source,binary",
emit => '. cmpb %binop',
attr => "int ins_permuted, int cmp_unsigned",
Test => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "gp", "gp" ] , out => [ "flags" ] },
+ reg_req => { in => [ "gp", "gp", "none", "gp", "gp" ] ,
+ out => [ "flags", "none", "none" ] },
ins => [ "base", "index", "mem", "left", "right" ],
- outs => [ "eflags" ],
+ outs => [ "eflags", "unused", "M" ],
am => "source,binary",
emit => '. test%M %binop',
attr => "int ins_permuted, int cmp_unsigned",
Test8Bit => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "eax ebx ecx edx", "eax ebx ecx edx" ] , out => [ "flags" ] },
+ reg_req => { in => [ "gp", "gp", "none", "eax ebx ecx edx", "eax ebx ecx edx" ] ,
+ out => [ "flags", "none", "none" ] },
ins => [ "base", "index", "mem", "left", "right" ],
- outs => [ "eflags" ],
+ outs => [ "eflags", "unused", "M" ],
am => "source,binary",
emit => '. testb %binop',
attr => "int ins_permuted, int cmp_unsigned",
# (note: leave the false,true order intact to make it compatible with other
# ia32_binary ops)
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "gp", "gp", "eflags" ], out => [ "in_r4 in_r5" ] },
+ reg_req => { in => [ "gp", "gp", "none", "gp", "gp", "eflags" ],
+ out => [ "in_r4 in_r5", "flags", "none" ] },
ins => [ "base", "index", "mem", "val_false", "val_true", "eflags" ],
+ outs => [ "res", "flags", "M" ],
am => "source,binary",
attr_type => "ia32_condcode_attr_t",
attr => "int ins_permuted, pn_Cmp pnc",
Load => {
op_flags => "L|F",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none" ], out => [ "gp", "none", "none" ] },
+ reg_req => { in => [ "gp", "gp", "none" ],
+ out => [ "gp", "none", "none", "none" ] },
ins => [ "base", "index", "mem" ],
- outs => [ "res", "M", "X_exc" ],
+ outs => [ "res", "unused", "M", "X_exc" ],
latency => 0,
emit => ". mov%EX%.l %AM, %D0",
units => [ "GP" ],
xAdd => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ], out => [ "in_r4 in_r5" ] },
+ reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ],
+ out => [ "in_r4 in_r5", "flags", "none" ] },
ins => [ "base", "index", "mem", "left", "right" ],
+ outs => [ "res", "flags", "M" ],
am => "source,binary",
emit => '. add%XXM %binop',
latency => 4,
xMul => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ], out => [ "in_r4 in_r5" ] },
+ reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ],
+ out => [ "in_r4 in_r5", "flags", "none" ] },
ins => [ "base", "index", "mem", "left", "right" ],
+ outs => [ "res", "flags", "M" ],
am => "source,binary",
emit => '. mul%XXM %binop',
latency => 4,
xMax => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ], out => [ "in_r4 in_r5" ] },
+ reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ],
+ out => [ "in_r4 in_r5", "flags", "none" ] },
ins => [ "base", "index", "mem", "left", "right" ],
+ outs => [ "res", "flags", "M" ],
am => "source,binary",
emit => '. max%XXM %binop',
latency => 2,
xMin => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ], out => [ "in_r4 in_r5" ] },
+ reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ],
+ out => [ "in_r4 in_r5", "flags", "none" ] },
ins => [ "base", "index", "mem", "left", "right" ],
+ outs => [ "res", "flags", "M" ],
am => "source,binary",
emit => '. min%XXM %binop',
latency => 2,
xAnd => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ], out => [ "in_r4 in_r5" ] },
+ reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ],
+ out => [ "in_r4 in_r5", "flags", "none" ] },
ins => [ "base", "index", "mem", "left", "right" ],
+ outs => [ "res", "flags", "M" ],
am => "source,binary",
emit => '. andp%XSD %binop',
latency => 3,
xOr => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ], out => [ "in_r4 in_r5" ] },
+ reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ],
+ out => [ "in_r4 in_r5", "flags", "none" ] },
ins => [ "base", "index", "mem", "left", "right" ],
+ outs => [ "res", "flags", "M" ],
am => "source,binary",
emit => '. orp%XSD %binop',
latency => 3,
xXor => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ], out => [ "in_r4 in_r5" ] },
+ reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ],
+ out => [ "in_r4 in_r5", "flags", "none" ] },
ins => [ "base", "index", "mem", "left", "right" ],
+ outs => [ "res", "flags", "M" ],
am => "source,binary",
emit => '. xorp%XSD %binop',
latency => 3,
xAndNot => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ], out => [ "in_r4 !in_r5" ] },
+ reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ],
+ out => [ "in_r4 !in_r5", "flags", "none" ] },
ins => [ "base", "index", "mem", "left", "right" ],
+ outs => [ "res", "flags", "M" ],
am => "source,binary",
emit => '. andnp%XSD %binop',
latency => 3,
xSub => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ], out => [ "in_r4" ] },
+ reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ],
+ out => [ "in_r4", "flags", "none" ] },
ins => [ "base", "index", "mem", "minuend", "subtrahend" ],
+ outs => [ "res", "flags", "M" ],
am => "source,binary",
emit => '. sub%XXM %binop',
latency => 4,
xDiv => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ], out => [ "in_r4 !in_r5", "none" ] },
+ reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ],
+ out => [ "in_r4 !in_r5", "flags", "none" ] },
ins => [ "base", "index", "mem", "dividend", "divisor" ],
+ outs => [ "res", "flags", "M" ],
am => "source,binary",
- outs => [ "res", "M" ],
emit => '. div%XXM %binop',
latency => 16,
units => [ "SSE" ],
Ucomi => {
irn_flags => "R",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ], out => [ "eflags" ] },
+ reg_req => { in => [ "gp", "gp", "none", "xmm", "xmm" ],
+ out => [ "eflags" ] },
ins => [ "base", "index", "mem", "left", "right" ],
outs => [ "flags" ],
am => "source,binary",
xLoad => {
op_flags => "L|F",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none" ], out => [ "xmm", "none", "none" ] },
+ reg_req => { in => [ "gp", "gp", "none" ],
+ out => [ "xmm", "none", "none", "none" ] },
ins => [ "base", "index", "mem" ],
- outs => [ "res", "M", "X_exc" ],
+ outs => [ "res", "unused", "M", "X_exc" ],
emit => '. mov%XXM %AM, %D0',
attr => "ir_mode *load_mode",
init_attr => "attr->ls_mode = load_mode;",
Conv_I2I => {
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "gp" ], out => [ "gp", "none" ] },
+ reg_req => { in => [ "gp", "gp", "none", "gp" ],
+ out => [ "gp", "none", "none" ] },
ins => [ "base", "index", "mem", "val" ],
- outs => [ "res", "M" ],
+ outs => [ "res", "flags", "M" ],
am => "source,unary",
units => [ "GP" ],
latency => 1,
Conv_I2I8Bit => {
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "eax ebx ecx edx" ], out => [ "gp", "none" ] },
+ reg_req => { in => [ "gp", "gp", "none", "eax ebx ecx edx" ],
+ out => [ "gp", "none", "none" ] },
ins => [ "base", "index", "mem", "val" ],
+ outs => [ "res", "flags", "M" ],
am => "source,unary",
units => [ "GP" ],
latency => 1,
irn_flags => "R",
op_flags => "L|F",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none" ], out => [ "vfp", "none", "none" ] },
+ reg_req => { in => [ "gp", "gp", "none" ],
+ out => [ "vfp", "none", "none", "none" ] },
ins => [ "base", "index", "mem" ],
- outs => [ "res", "M", "X_exc" ],
+ outs => [ "res", "unused", "M", "X_exc" ],
attr => "ir_mode *load_mode",
init_attr => "attr->attr.ls_mode = load_mode;",
latency => 2,
irn_flags => "R",
op_flags => "L|F",
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "vfp" ], out => [ "none", "none" ] },
+ reg_req => { in => [ "gp", "gp", "none", "vfp" ],
+ out => [ "none", "none" ] },
ins => [ "base", "index", "mem", "val" ],
outs => [ "M", "X_exc" ],
attr => "ir_mode *store_mode",
vfild => {
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none" ], out => [ "vfp", "none" ] },
- outs => [ "res", "M" ],
+ reg_req => { in => [ "gp", "gp", "none" ],
+ out => [ "vfp", "none", "none" ] },
+ outs => [ "res", "unused", "M" ],
ins => [ "base", "index", "mem" ],
latency => 4,
units => [ "VFP" ],
switch_max = pn;
}
- if ((unsigned long) (switch_max - switch_min) > 256000) {
- panic("Size of switch %+F bigger than 256000", node);
+ if ((unsigned long) (switch_max - switch_min) > 128000) {
+ panic("Size of switch %+F bigger than 128000", node);
}
if (switch_min != 0) {
case pn_Load_X_except:
/* This Load might raise an exception. Mark it. */
set_ia32_exc_label(new_pred, 1);
- return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_xLoad_X_exc);
+ return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_vfld_X_exc);
default:
break;
}
long pos = get_Proj_proj(node);
if (mode == mode_M) {
- pos = arch_irn_get_n_outs(new_pred) + 1;
+ pos = arch_irn_get_n_outs(new_pred)-1;
} else if (mode_is_int(mode) || mode_is_reference(mode)) {
mode = mode_Iu;
} else if (mode_is_float(mode)) {
* |___/
**************************************************/
-/**
- * Return register requirements for a mips node.
- * If the node returns a tuple (mode_T) then the proj's
- * will be asked for this information.
- */
-static const
-arch_register_req_t *mips_get_irn_reg_req(const ir_node *node, int pos)
-{
- long node_pos = pos == -1 ? 0 : pos;
- ir_mode *mode = get_irn_mode(node);
-
- if (is_Block(node) || mode == mode_X || mode == mode_M) {
- return arch_no_register_req;
- }
-
- if (mode == mode_T && pos < 0) {
- return arch_no_register_req;
- }
-
- if (is_Proj(node)) {
- /* in case of a proj, we need to get the correct OUT slot */
- /* of the node corresponding to the proj number */
- if (pos == -1) {
- node_pos = mips_translate_proj_pos(node);
- }
- else {
- node_pos = pos;
- }
-
- node = skip_Proj_const(node);
- }
-
- /* get requirements for our own nodes */
- if (is_mips_irn(node)) {
- const arch_register_req_t *req;
- if (pos >= 0) {
- req = get_mips_in_req(node, pos);
- } else {
- req = get_mips_out_req(node, node_pos);
- }
-
- return req;
- }
-
- /* unknown should be translated by now */
- assert(!is_Unknown(node));
-
- return arch_no_register_req;
-}
-
static arch_irn_class_t mips_classify(const ir_node *irn)
{
- irn = skip_Proj_const(irn);
-
- if (is_cfop(irn)) {
- return arch_irn_class_branch;
- }
-
+ (void) irn;
return 0;
}
/* fill register allocator interface */
static const arch_irn_ops_t mips_irn_ops = {
- mips_get_irn_reg_req,
+ get_mips_in_req,
+ get_mips_out_req,
mips_classify,
mips_get_frame_entity,
mips_set_frame_entity,
* |___/
**************************************************/
-/**
- * Return register requirements for a ppc node.
- * If the node returns a tuple (mode_T) then the proj's
- * will be asked for this information.
- */
-static const arch_register_req_t *ppc32_get_irn_reg_req(const ir_node *irn,
- int pos)
-{
- long node_pos = pos == -1 ? 0 : pos;
- ir_mode *mode = get_irn_mode(irn);
- FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
-
- if (is_Block(irn) || mode == mode_X || mode == mode_M) {
- DBG((mod, LEVEL_1, "ignoring block, mode_X or mode_M node %+F\n", irn));
- return arch_no_register_req;
- }
-
- if (mode == mode_T && pos < 0) {
- DBG((mod, LEVEL_1, "ignoring request for OUT requirements at %+F", irn));
- return arch_no_register_req;
- }
-
- DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
-
- if (is_Proj(irn)) {
- /* in case of a proj, we need to get the correct OUT slot */
- /* of the node corresponding to the proj number */
- if (pos == -1) {
- node_pos = ppc32_translate_proj_pos(irn);
- } else {
- node_pos = pos;
- }
-
- irn = skip_Proj_const(irn);
-
- DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
- }
-
- /* get requirements for our own nodes */
- if (is_ppc32_irn(irn)) {
- const arch_register_req_t *req;
- if (pos >= 0) {
- req = get_ppc32_in_req(irn, pos);
- } else {
- req = get_ppc32_out_req(irn, node_pos);
- }
-
- DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
- return req;
- }
-
- /* unknowns should be transformed by now */
- assert(!is_Unknown(irn));
-
- DB((mod, LEVEL_1, "returning NULL for %+F (node not supported)\n", irn));
- return arch_no_register_req;
-}
-
static arch_irn_class_t ppc32_classify(const ir_node *irn)
{
- irn = skip_Proj_const(irn);
-
- if (is_cfop(irn)) {
- return arch_irn_class_branch;
- }
-
+ (void) irn;
return 0;
}
/* fill register allocator interface */
static const arch_irn_ops_t ppc32_irn_ops = {
- ppc32_get_irn_reg_req,
+ get_ppc32_in_req,
+ get_ppc32_out_req,
ppc32_classify,
ppc32_get_frame_entity,
ppc32_set_frame_entity,
ir_op_ops ops;
int cur_opcode;
static int run_once = 0;
- int i;
ENDOFMAIN
if (defined($default_op_attr_type)) {
return;
run_once = 1;
- /* we handle all middleend nodes as well that have no other handler */
- for (i = 0; i <= iro_Last; ++i) {
- ir_op *op = get_irp_opcode(i);
- if (op->ops.be_ops == NULL)
- op->ops.be_ops = be_ops;
- }
-
cur_opcode = get_next_ir_opcodes(iro_$arch\_last);
$arch\_opcode_start = cur_opcode;