return false;
}
+/**
+ * return NoREG or pic_base in case of PIC.
+ * This is necessary as base address for newly created symbols
+ */
+static ir_node *get_symconst_base(void)
+{
+ if (env_cg->birg->main_env->options->pic) {
+ return arch_code_generator_get_pic_base(env_cg);
+ }
+
+ return noreg_GP;
+}
+
/**
* Transforms a Const.
*/
if (mode_is_float(mode)) {
ir_node *res = NULL;
ir_node *load;
+ ir_node *base;
ir_entity *floatent;
if (ia32_cg_config.use_sse2) {
#endif /* CONSTRUCT_SSE_CONST */
floatent = create_float_const_entity(node);
- load = new_bd_ia32_xLoad(dbgi, block, noreg_GP, noreg_GP, nomem, mode);
+ base = get_symconst_base();
+ load = new_bd_ia32_xLoad(dbgi, block, base, noreg_GP, nomem,
+ mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
arch_irn_add_flags(load, arch_irn_flags_rematerializable);
- res = new_r_Proj(block, load, mode_xmm, pn_ia32_xLoad_res);
+ res = new_r_Proj(load, mode_xmm, pn_ia32_xLoad_res);
}
} else {
if (is_Const_null(node)) {
set_ia32_ls_mode(load, mode);
} else {
ir_mode *ls_mode;
+ ir_node *base;
floatent = create_float_const_entity(node);
/* create_float_const_ent is smart and sometimes creates
smaller entities */
ls_mode = get_type_mode(get_entity_type(floatent));
-
- load = new_bd_ia32_vfld(dbgi, block, noreg_GP, noreg_GP, nomem,
+ base = get_symconst_base();
+ load = new_bd_ia32_vfld(dbgi, block, base, noreg_GP, nomem,
ls_mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
arch_irn_add_flags(load, arch_irn_flags_rematerializable);
- res = new_r_Proj(block, load, mode_vfp, pn_ia32_vfld_res);
+ res = new_r_Proj(load, mode_vfp, pn_ia32_vfld_res);
}
}
#ifdef CONSTRUCT_SSE_CONST
ent = new_entity(get_glob_type(), new_id_from_str(ent_name), tp);
set_entity_ld_ident(ent, get_entity_ident(ent));
- set_entity_visibility(ent, visibility_local);
- set_entity_variability(ent, variability_constant);
- set_entity_allocation(ent, allocation_static);
+ add_entity_linkage(ent, IR_LINKAGE_CONSTANT);
+ set_entity_visibility(ent, ir_visibility_local);
if (kct == ia32_ULLBIAS) {
ir_initializer_t *initializer = create_initializer_compound(2);
set_initializer_compound_value(initializer, 0,
- create_initializer_tarval(get_tarval_null(mode)));
+ create_initializer_tarval(get_mode_null(mode)));
set_initializer_compound_value(initializer, 1,
create_initializer_tarval(tv));
ir_node *mem;
ir_node *new_mem;
+ /* floating point immediates */
if (is_Const(node)) {
ir_entity *entity = create_float_const_entity(node);
- addr->base = noreg_GP;
+ addr->base = get_symconst_base();
addr->index = noreg_GP;
addr->mem = nomem;
addr->symconst_ent = entity;
get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode);
}
-/* Skip all Down-Conv's on a given node and return the resulting node. */
+/** Skip all Down-Conv's on a given node and return the resulting node. */
ir_node *ia32_skip_downconv(ir_node *node)
{
while (is_downconv(node))
return node;
}
+static bool is_sameconv(ir_node *node)
+{
+ ir_mode *src_mode;
+ ir_mode *dest_mode;
+
+ if (!is_Conv(node))
+ return 0;
+
+ src_mode = get_irn_mode(get_Conv_op(node));
+ dest_mode = get_irn_mode(node);
+ return
+ ia32_mode_needs_gp_reg(src_mode) &&
+ ia32_mode_needs_gp_reg(dest_mode) &&
+ get_mode_size_bits(dest_mode) == get_mode_size_bits(src_mode);
+}
+
+/** Skip all signedness convs */
+static ir_node *ia32_skip_sameconv(ir_node *node)
+{
+ while (is_sameconv(node))
+ node = get_Conv_op(node);
+
+ return node;
+}
+
static ir_node *create_upconv(ir_node *node, ir_node *orig_node)
{
ir_mode *mode = get_irn_mode(node);
if (op1 != NULL) {
op1 = ia32_skip_downconv(op1);
}
+ } else {
+ op2 = ia32_skip_sameconv(op2);
+ if (op1 != NULL) {
+ op1 = ia32_skip_sameconv(op1);
+ }
}
/* match immediates. firm nodes are normalized: constants are always on the
if (mode != mode_T) {
set_irn_mode(node, mode_T);
- return new_rd_Proj(NULL, get_nodes_block(node), node, mode, pn_ia32_res);
+ return new_rd_Proj(NULL, node, mode, pn_ia32_res);
} else {
return node;
}
*/
static ir_node *gen_Mulh(ir_node *node)
{
- ir_node *block = get_nodes_block(node);
- ir_node *new_block = be_transform_node(block);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *op1 = get_Mulh_left(node);
- ir_node *op2 = get_Mulh_right(node);
- ir_mode *mode = get_irn_mode(node);
- ir_node *new_node;
- ir_node *proj_res_high;
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *op1 = get_Mulh_left(node);
+ ir_node *op2 = get_Mulh_right(node);
+ ir_mode *mode = get_irn_mode(node);
+ ir_node *new_node;
+ ir_node *proj_res_high;
if (get_mode_size_bits(mode) != 32) {
panic("Mulh without 32bit size not supported in ia32 backend (%+F)", node);
if (mode_is_signed(mode)) {
new_node = gen_binop(node, op1, op2, new_bd_ia32_IMul1OP, match_commutative | match_am);
- proj_res_high = new_rd_Proj(dbgi, new_block, new_node, mode_Iu, pn_ia32_IMul1OP_res_high);
+ proj_res_high = new_rd_Proj(dbgi, new_node, mode_Iu, pn_ia32_IMul1OP_res_high);
} else {
new_node = gen_binop(node, op1, op2, new_bd_ia32_Mul, match_commutative | match_am);
- proj_res_high = new_rd_Proj(dbgi, new_block, new_node, mode_Iu, pn_ia32_Mul_res_high);
+ proj_res_high = new_rd_Proj(dbgi, new_node, mode_Iu, pn_ia32_Mul_res_high);
}
return proj_res_high;
}
* several AM nodes... */
ir_node *noreg_xmm = ia32_new_NoReg_xmm(env_cg);
- new_node = new_bd_ia32_xXor(dbgi, block, noreg_GP, noreg_GP,
- nomem, new_op, noreg_xmm);
+ new_node = new_bd_ia32_xXor(dbgi, block, get_symconst_base(),
+ noreg_GP, nomem, new_op, noreg_xmm);
size = get_mode_size_bits(mode);
ent = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
if (ia32_cg_config.use_sse2) {
ir_node *noreg_fp = ia32_new_NoReg_xmm(env_cg);
- new_node = new_bd_ia32_xAnd(dbgi, new_block, noreg_GP, noreg_GP,
- nomem, new_op, noreg_fp);
+ new_node = new_bd_ia32_xAnd(dbgi, new_block, get_symconst_base(),
+ noreg_GP, nomem, new_op, noreg_fp);
size = get_mode_size_bits(mode);
ent = ia32_gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS);
}
}
}
- flags = be_transform_node(pred);
+ /* add ia32 compare flags */
+ {
+ ir_node *l = get_Cmp_left(pred);
+ ir_mode *mode = get_irn_mode(l);
+ if (mode_is_float(mode))
+ pnc |= ia32_pn_Cmp_float;
+ else if (! mode_is_signed(mode))
+ pnc |= ia32_pn_Cmp_unsigned;
+ }
*pnc_out = pnc;
- if (mode_is_float(get_irn_mode(get_Cmp_left(pred))))
- *pnc_out |= ia32_pn_Cmp_float;
+ flags = be_transform_node(pred);
return flags;
}
}
ir_node *index;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_mode *mode = get_Load_mode(node);
- ir_mode *res_mode;
ir_node *new_node;
ia32_address_t addr;
if (ia32_cg_config.use_sse2) {
new_node = new_bd_ia32_xLoad(dbgi, block, base, index, new_mem,
mode);
- res_mode = mode_xmm;
} else {
new_node = new_bd_ia32_vfld(dbgi, block, base, index, new_mem,
mode);
- res_mode = mode_vfp;
}
} else {
assert(mode != mode_b);
} else {
new_node = new_bd_ia32_Load(dbgi, block, base, index, new_mem);
}
- res_mode = mode_Iu;
}
set_irn_pinned(new_node, get_irn_pinned(node));
ir_node *mux_true = get_Mux_true(node);
ir_node *mux_false = get_Mux_false(node);
ir_node *cond;
- ir_node *new_mem;
dbg_info *dbgi;
ir_node *block;
ir_node *new_block;
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
new_block = be_transform_node(block);
- new_mem = be_transform_node(mem);
new_node = new_bd_ia32_SetccMem(dbgi, new_block, addr.base,
addr.index, addr.mem, flags, pnc);
set_address(new_node, &addr);
/* Note: fisttp ALWAYS pop the tos. We have to ensure here that the value is copied
if other users exists */
ir_node *vfisttp = new_bd_ia32_vfisttp(dbgi, block, base, index, mem, val);
- ir_node *value = new_r_Proj(block, vfisttp, mode_E, pn_ia32_vfisttp_res);
+ ir_node *value = new_r_Proj(vfisttp, mode_E, pn_ia32_vfisttp_res);
be_new_Keep(block, 1, &value);
- new_node = new_r_Proj(block, vfisttp, mode_M, pn_ia32_vfisttp_M);
+ new_node = new_r_Proj(vfisttp, mode_M, pn_ia32_vfisttp_M);
*fist = vfisttp;
} else {
ir_node *trunc_mode = ia32_new_Fpu_truncate(env_cg);
} else {
sub = new_node;
set_irn_mode(sub, mode_T);
- new_node = new_rd_Proj(NULL, block, sub, mode, pn_ia32_res);
+ new_node = new_rd_Proj(NULL, sub, mode, pn_ia32_res);
}
- eflags = new_rd_Proj(NULL, block, sub, mode_Iu, pn_ia32_Sub_flags);
+ eflags = new_rd_Proj(NULL, sub, mode_Iu, pn_ia32_Sub_flags);
dbgi = get_irn_dbg_info(psi);
sbb = new_bd_ia32_Sbb0(dbgi, block, eflags);
* @param new_mode IN/OUT for the mode of the constants, if NULL
* smallest possible mode will be used
*/
-static ir_entity *ia32_create_const_array(ir_node *c0, ir_node *c1, ir_mode **new_mode) {
+static ir_entity *ia32_create_const_array(ir_node *c0, ir_node *c1, ir_mode **new_mode)
+{
ir_entity *ent;
ir_mode *mode = *new_mode;
ir_type *tp;
ent = new_entity(get_glob_type(), ia32_unique_id(".LC%u"), tp);
set_entity_ld_ident(ent, get_entity_ident(ent));
- set_entity_visibility(ent, visibility_local);
- set_entity_variability(ent, variability_constant);
- set_entity_allocation(ent, allocation_static);
+ set_entity_visibility(ent, ir_visibility_local);
+ add_entity_linkage(ent, IR_LINKAGE_CONSTANT);
initializer = create_initializer_compound(2);
return ent;
}
+/**
+ * Possible transformations for creating a Setcc.
+ */
+enum setcc_transform_insn {
+ SETCC_TR_ADD,
+ SETCC_TR_ADDxx,
+ SETCC_TR_LEA,
+ SETCC_TR_LEAxx,
+ SETCC_TR_SHL,
+ SETCC_TR_NEG,
+ SETCC_TR_NOT,
+ SETCC_TR_AND,
+ SETCC_TR_SET,
+ SETCC_TR_SBB,
+};
+
+typedef struct setcc_transform {
+ unsigned num_steps;
+ unsigned permutate_cmp_ins;
+ pn_Cmp pnc;
+ struct {
+ enum setcc_transform_insn transform;
+ long val;
+ int scale;
+ } steps[4];
+} setcc_transform_t;
+
+/**
+ * Setcc can only handle 0 and 1 result.
+ * Find a transformation that creates 0 and 1 from
+ * tv_t and tv_f.
+ */
+static void find_const_transform(pn_Cmp pnc, tarval *t, tarval *f,
+ setcc_transform_t *res)
+{
+ unsigned step = 0;
+
+ res->num_steps = 0;
+ res->permutate_cmp_ins = 0;
+
+ if (tarval_is_null(t)) {
+ tarval *tmp = t;
+ t = f;
+ f = tmp;
+ pnc = ia32_get_negated_pnc(pnc);
+ } else if (tarval_cmp(t, f) == pn_Cmp_Lt) {
+ // now, t is the bigger one
+ tarval *tmp = t;
+ t = f;
+ f = tmp;
+ pnc = ia32_get_negated_pnc(pnc);
+ }
+ res->pnc = pnc;
+
+ if (! tarval_is_null(f)) {
+ tarval *t_sub = tarval_sub(t, f, NULL);
+
+ t = t_sub;
+ res->steps[step].transform = SETCC_TR_ADD;
+
+ if (t == tarval_bad)
+ panic("constant subtract failed");
+ if (! tarval_is_long(f))
+ panic("tarval is not long");
+
+ res->steps[step].val = get_tarval_long(f);
+ ++step;
+ f = tarval_sub(f, f, NULL);
+ assert(tarval_is_null(f));
+ }
+
+ if (tarval_is_one(t)) {
+ res->steps[step].transform = SETCC_TR_SET;
+ res->num_steps = ++step;
+ return;
+ }
+
+ if (tarval_is_minus_one(t)) {
+ res->steps[step].transform = SETCC_TR_NEG;
+ ++step;
+ res->steps[step].transform = SETCC_TR_SET;
+ res->num_steps = ++step;
+ return;
+ }
+ if (tarval_is_long(t)) {
+ long v = get_tarval_long(t);
+
+ res->steps[step].val = 0;
+ switch (v) {
+ case 9:
+ if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD)
+ --step;
+ res->steps[step].transform = SETCC_TR_LEAxx;
+ res->steps[step].scale = 3; /* (a << 3) + a */
+ break;
+ case 8:
+ if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD)
+ --step;
+ res->steps[step].transform = res->steps[step].val == 0 ? SETCC_TR_SHL : SETCC_TR_LEA;
+ res->steps[step].scale = 3; /* (a << 3) */
+ break;
+ case 5:
+ if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD)
+ --step;
+ res->steps[step].transform = SETCC_TR_LEAxx;
+ res->steps[step].scale = 2; /* (a << 2) + a */
+ break;
+ case 4:
+ if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD)
+ --step;
+ res->steps[step].transform = res->steps[step].val == 0 ? SETCC_TR_SHL : SETCC_TR_LEA;
+ res->steps[step].scale = 2; /* (a << 2) */
+ break;
+ case 3:
+ if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD)
+ --step;
+ res->steps[step].transform = SETCC_TR_LEAxx;
+ res->steps[step].scale = 1; /* (a << 1) + a */
+ break;
+ case 2:
+ if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD)
+ --step;
+ res->steps[step].transform = res->steps[step].val == 0 ? SETCC_TR_SHL : SETCC_TR_LEA;
+ res->steps[step].scale = 1; /* (a << 1) */
+ break;
+ case 1:
+ res->num_steps = step;
+ return;
+ default:
+ if (! tarval_is_single_bit(t)) {
+ res->steps[step].transform = SETCC_TR_AND;
+ res->steps[step].val = v;
+ ++step;
+ res->steps[step].transform = SETCC_TR_NEG;
+ } else {
+ int v = get_tarval_lowest_bit(t);
+ assert(v >= 0);
+
+ res->steps[step].transform = SETCC_TR_SHL;
+ res->steps[step].scale = v;
+ }
+ }
+ ++step;
+ res->steps[step].transform = SETCC_TR_SET;
+ res->num_steps = ++step;
+ return;
+ }
+ panic("tarval is not long");
+}
+
/**
* Transforms a Mux node into some code sequence.
*
}
am.ls_mode = new_mode;
- am.addr.base = noreg_GP;
+ am.addr.base = get_symconst_base();
am.addr.index = new_node;
am.addr.mem = nomem;
am.addr.offset = 0;
load = new_bd_ia32_vfld(dbgi, block, am.addr.base, am.addr.index, am.addr.mem, new_mode);
set_am_attributes(load, &am);
- return new_rd_Proj(NULL, block, load, mode_vfp, pn_ia32_res);
+ return new_rd_Proj(NULL, load, mode_vfp, pn_ia32_res);
}
panic("cannot transform floating point Mux");
if (is_Const(mux_true) && is_Const(mux_false)) {
/* both are const, good */
- if (is_Const_1(mux_true) && is_Const_0(mux_false)) {
- new_node = create_set_32bit(dbgi, new_block, flags, pnc, node);
- } else if (is_Const_0(mux_true) && is_Const_1(mux_false)) {
- pnc = ia32_get_negated_pnc(pnc);
- new_node = create_set_32bit(dbgi, new_block, flags, pnc, node);
- } else {
- /* Not that simple. */
- goto need_cmov;
+ tarval *tv_true = get_Const_tarval(mux_true);
+ tarval *tv_false = get_Const_tarval(mux_false);
+ setcc_transform_t res;
+ int step;
+
+ find_const_transform(pnc, tv_true, tv_false, &res);
+ new_node = node;
+ if (res.permutate_cmp_ins) {
+ ia32_attr_t *attr = get_ia32_attr(flags);
+ attr->data.ins_permuted ^= 1;
+ }
+ for (step = (int)res.num_steps - 1; step >= 0; --step) {
+ ir_node *imm;
+
+ switch (res.steps[step].transform) {
+ case SETCC_TR_ADD:
+ imm = ia32_immediate_from_long(res.steps[step].val);
+ new_node = new_bd_ia32_Add(dbgi, new_block, noreg_GP, noreg_GP, nomem, new_node, imm);
+ break;
+ case SETCC_TR_ADDxx:
+ new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node);
+ break;
+ case SETCC_TR_LEA:
+ new_node = new_bd_ia32_Lea(dbgi, new_block, noreg_GP, new_node);
+ set_ia32_am_scale(new_node, res.steps[step].scale);
+ set_ia32_am_offs_int(new_node, res.steps[step].val);
+ break;
+ case SETCC_TR_LEAxx:
+ new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node);
+ set_ia32_am_scale(new_node, res.steps[step].scale);
+ set_ia32_am_offs_int(new_node, res.steps[step].val);
+ break;
+ case SETCC_TR_SHL:
+ imm = ia32_immediate_from_long(res.steps[step].scale);
+ new_node = new_bd_ia32_Shl(dbgi, new_block, new_node, imm);
+ break;
+ case SETCC_TR_NEG:
+ new_node = new_bd_ia32_Neg(dbgi, new_block, new_node);
+ break;
+ case SETCC_TR_NOT:
+ new_node = new_bd_ia32_Not(dbgi, new_block, new_node);
+ break;
+ case SETCC_TR_AND:
+ imm = ia32_immediate_from_long(res.steps[step].val);
+ new_node = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, new_node, imm);
+ break;
+ case SETCC_TR_SET:
+ new_node = create_set_32bit(dbgi, new_block, flags, res.pnc, new_node);
+ break;
+ case SETCC_TR_SBB:
+ new_node = new_bd_ia32_Sbb0(dbgi, new_block, flags);
+ break;
+ default:
+ panic("unknown setcc transform");
+ }
}
} else {
-need_cmov:
new_node = create_CMov(node, cond, flags, pnc);
}
return new_node;
}
SET_IA32_ORIG_NODE(load, node);
- return new_r_Proj(block, load, mode_Iu, pn_ia32_Load_res);
+ return new_r_Proj(load, mode_Iu, pn_ia32_Load_res);
}
/**
set_ia32_op_type(load, ia32_AddrModeS);
SET_IA32_ORIG_NODE(load, node);
- new_node = new_r_Proj(block, load, mode_E, pn_ia32_vfld_res);
+ new_node = new_r_Proj(load, mode_E, pn_ia32_vfld_res);
return new_node;
}
ia32_address_t *addr = &am.addr;
fild = new_bd_ia32_vfild(dbgi, block, addr->base, addr->index, addr->mem);
- new_node = new_r_Proj(block, fild, mode_vfp, pn_ia32_vfild_res);
+ new_node = new_r_Proj(fild, mode_vfp, pn_ia32_vfild_res);
set_am_attributes(fild, &am);
SET_IA32_ORIG_NODE(fild, node);
set_ia32_op_type(fild, ia32_AddrModeS);
set_ia32_ls_mode(fild, store_mode);
- new_node = new_r_Proj(block, fild, mode_vfp, pn_ia32_vfild_res);
+ new_node = new_r_Proj(fild, mode_vfp, pn_ia32_vfild_res);
return new_node;
}
set_ia32_op_type(fld, ia32_AddrModeS);
set_ia32_use_frame(fld);
- mproj = new_r_Proj(block, fld, mode_M, pn_ia32_vfld_M);
- fld = new_r_Proj(block, fld, mode_vfp, pn_ia32_vfld_res);
+ mproj = new_r_Proj(fld, mode_M, pn_ia32_vfld_M);
+ fld = new_r_Proj(fld, mode_vfp, pn_ia32_vfld_res);
/* create a new barrier */
arity = get_irn_arity(barrier);
new_barrier = new_ir_node(dbgi, irg, block,
get_irn_op(barrier), get_irn_mode(barrier),
arity, in);
- copy_node_attr(barrier, new_barrier);
+ copy_node_attr(irg, barrier, new_barrier);
be_duplicate_deps(barrier, new_barrier);
be_set_transformed_node(barrier, new_barrier);
* and fix this later */
phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node),
get_irn_in(node) + 1);
- copy_node_attr(node, phi);
+ copy_node_attr(irg, node, phi);
be_duplicate_deps(node, phi);
arch_set_out_register_req(phi, 0, req);
if (! is_Proj(res)) {
sub = res;
set_irn_mode(sub, mode_T);
- res = new_rd_Proj(NULL, block, sub, mode_Iu, pn_ia32_res);
+ res = new_rd_Proj(NULL, sub, mode_Iu, pn_ia32_res);
} else {
sub = get_Proj_pred(res);
}
- flags = new_rd_Proj(NULL, block, sub, mode_Iu, pn_ia32_Sub_flags);
+ flags = new_rd_Proj(NULL, sub, mode_Iu, pn_ia32_Sub_flags);
new_node = new_bd_ia32_Jcc(dbgi, block, flags, pn_Cmp_Lt | ia32_pn_Cmp_unsigned);
SET_IA32_ORIG_NODE(new_node, node);
} else {
SET_IA32_ORIG_NODE(fild, node);
- res = new_r_Proj(block, fild, mode_vfp, pn_ia32_vfild_res);
+ res = new_r_Proj(fild, mode_vfp, pn_ia32_vfild_res);
if (! mode_is_signed(get_irn_mode(val_high))) {
ia32_address_mode_t am;
ir_node *count = ia32_create_Immediate(NULL, 0, 31);
ir_node *fadd;
- am.addr.base = noreg_GP;
+ am.addr.base = get_symconst_base();
am.addr.index = new_bd_ia32_Shr(dbgi, block, new_val_high, count);
am.addr.mem = nomem;
am.addr.offset = 0;
set_am_attributes(fadd, &am);
set_irn_mode(fadd, mode_T);
- res = new_rd_Proj(NULL, block, fadd, mode_vfp, pn_ia32_res);
+ res = new_rd_Proj(NULL, fadd, mode_vfp, pn_ia32_res);
}
return res;
}
static ir_node *bad_transform(ir_node *node)
{
panic("No transform function for %+F available.", node);
- return NULL;
}
static ir_node *gen_Proj_l_FloattoLL(ir_node *node)
assert(pn == pn_ia32_l_FloattoLL_res_low);
}
- proj = new_r_Proj(block, load, mode_Iu, pn_ia32_Load_res);
+ proj = new_r_Proj(load, mode_Iu, pn_ia32_Load_res);
return proj;
}
*/
static ir_node *gen_Proj_be_AddSP(ir_node *node)
{
- ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
if (proj == pn_be_AddSP_sp) {
- ir_node *res = new_rd_Proj(dbgi, block, new_pred, mode_Iu,
+ ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
pn_ia32_SubSP_stack);
arch_set_irn_register(res, &ia32_gp_regs[REG_ESP]);
return res;
} else if (proj == pn_be_AddSP_res) {
- return new_rd_Proj(dbgi, block, new_pred, mode_Iu,
+ return new_rd_Proj(dbgi, new_pred, mode_Iu,
pn_ia32_SubSP_addr);
} else if (proj == pn_be_AddSP_M) {
- return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_SubSP_M);
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_SubSP_M);
}
panic("No idea how to transform proj->AddSP");
*/
static ir_node *gen_Proj_be_SubSP(ir_node *node)
{
- ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
if (proj == pn_be_SubSP_sp) {
- ir_node *res = new_rd_Proj(dbgi, block, new_pred, mode_Iu,
+ ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
pn_ia32_AddSP_stack);
arch_set_irn_register(res, &ia32_gp_regs[REG_ESP]);
return res;
} else if (proj == pn_be_SubSP_M) {
- return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_AddSP_M);
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_AddSP_M);
}
panic("No idea how to transform proj->SubSP");
*/
if (is_Load(pred) && proj == pn_Load_M && get_irn_n_edges(pred) > 1) {
ir_node *res;
- ir_node *old_block = get_nodes_block(node);
/* this is needed, because sometimes we have loops that are only
reachable through the ProjM */
be_enqueue_preds(node);
/* do it in 2 steps, to silence firm verifier */
- res = new_rd_Proj(dbgi, old_block, pred, mode_M, pn_Load_M);
+ res = new_rd_Proj(dbgi, pred, mode_M, pn_Load_M);
set_Proj_proj(res, pn_ia32_mem);
return res;
}
if (is_ia32_Load(new_pred)) {
switch (proj) {
case pn_Load_res:
- return new_rd_Proj(dbgi, block, new_pred, mode_Iu, pn_ia32_Load_res);
+ return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_Load_res);
case pn_Load_M:
- return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_Load_M);
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Load_M);
case pn_Load_X_regular:
return new_rd_Jmp(dbgi, block);
case pn_Load_X_except:
/* This Load might raise an exception. Mark it. */
set_ia32_exc_label(new_pred, 1);
- return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_Load_X_exc);
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Load_X_exc);
default:
break;
}
is_ia32_Conv_I2I8Bit(new_pred)) {
set_irn_mode(new_pred, mode_T);
if (proj == pn_Load_res) {
- return new_rd_Proj(dbgi, block, new_pred, mode_Iu, pn_ia32_res);
+ return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_res);
} else if (proj == pn_Load_M) {
- return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_mem);
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_mem);
}
} else if (is_ia32_xLoad(new_pred)) {
switch (proj) {
case pn_Load_res:
- return new_rd_Proj(dbgi, block, new_pred, mode_xmm, pn_ia32_xLoad_res);
+ return new_rd_Proj(dbgi, new_pred, mode_xmm, pn_ia32_xLoad_res);
case pn_Load_M:
- return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_xLoad_M);
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_xLoad_M);
case pn_Load_X_regular:
return new_rd_Jmp(dbgi, block);
case pn_Load_X_except:
/* This Load might raise an exception. Mark it. */
set_ia32_exc_label(new_pred, 1);
- return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_xLoad_X_exc);
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_xLoad_X_exc);
default:
break;
}
} else if (is_ia32_vfld(new_pred)) {
switch (proj) {
case pn_Load_res:
- return new_rd_Proj(dbgi, block, new_pred, mode_vfp, pn_ia32_vfld_res);
+ return new_rd_Proj(dbgi, new_pred, mode_vfp, pn_ia32_vfld_res);
case pn_Load_M:
- return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_vfld_M);
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfld_M);
case pn_Load_X_regular:
return new_rd_Jmp(dbgi, block);
case pn_Load_X_except:
/* This Load might raise an exception. Mark it. */
set_ia32_exc_label(new_pred, 1);
- return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_vfld_X_exc);
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfld_X_exc);
default:
break;
}
if (proj != pn_Load_M) {
panic("internal error: transformed node not a Load");
}
- return new_rd_Proj(dbgi, block, new_pred, mode_M, 1);
+ return new_rd_Proj(dbgi, new_pred, mode_M, 1);
}
panic("No idea how to transform proj");
case iro_Div:
switch (proj) {
case pn_Div_M:
- return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_Div_M);
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Div_M);
case pn_Div_res:
- return new_rd_Proj(dbgi, block, new_pred, mode_Iu, pn_ia32_Div_div_res);
+ return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_Div_div_res);
case pn_Div_X_regular:
return new_rd_Jmp(dbgi, block);
case pn_Div_X_except:
set_ia32_exc_label(new_pred, 1);
- return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_Div_X_exc);
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_exc);
default:
break;
}
case iro_Mod:
switch (proj) {
case pn_Mod_M:
- return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_Div_M);
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Div_M);
case pn_Mod_res:
- return new_rd_Proj(dbgi, block, new_pred, mode_Iu, pn_ia32_Div_mod_res);
+ return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_Div_mod_res);
case pn_Mod_X_except:
set_ia32_exc_label(new_pred, 1);
- return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_Div_X_exc);
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_exc);
default:
break;
}
case iro_DivMod:
switch (proj) {
case pn_DivMod_M:
- return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_Div_M);
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Div_M);
case pn_DivMod_res_div:
- return new_rd_Proj(dbgi, block, new_pred, mode_Iu, pn_ia32_Div_div_res);
+ return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_Div_div_res);
case pn_DivMod_res_mod:
- return new_rd_Proj(dbgi, block, new_pred, mode_Iu, pn_ia32_Div_mod_res);
+ return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_Div_mod_res);
case pn_DivMod_X_regular:
return new_rd_Jmp(dbgi, block);
case pn_DivMod_X_except:
set_ia32_exc_label(new_pred, 1);
- return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_Div_X_exc);
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_exc);
default:
break;
}
*/
static ir_node *gen_Proj_CopyB(ir_node *node)
{
- ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
dbg_info *dbgi = get_irn_dbg_info(node);
switch (proj) {
case pn_CopyB_M_regular:
if (is_ia32_CopyB_i(new_pred)) {
- return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_CopyB_i_M);
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_CopyB_i_M);
} else if (is_ia32_CopyB(new_pred)) {
- return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_CopyB_M);
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_CopyB_M);
}
break;
default:
*/
static ir_node *gen_Proj_Quot(ir_node *node)
{
- ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
dbg_info *dbgi = get_irn_dbg_info(node);
switch (proj) {
case pn_Quot_M:
if (is_ia32_xDiv(new_pred)) {
- return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_xDiv_M);
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_xDiv_M);
} else if (is_ia32_vfdiv(new_pred)) {
- return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_vfdiv_M);
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfdiv_M);
}
break;
case pn_Quot_res:
if (is_ia32_xDiv(new_pred)) {
- return new_rd_Proj(dbgi, block, new_pred, mode_xmm, pn_ia32_xDiv_res);
+ return new_rd_Proj(dbgi, new_pred, mode_xmm, pn_ia32_xDiv_res);
} else if (is_ia32_vfdiv(new_pred)) {
- return new_rd_Proj(dbgi, block, new_pred, mode_vfp, pn_ia32_vfdiv_res);
+ return new_rd_Proj(dbgi, new_pred, mode_vfp, pn_ia32_vfdiv_res);
}
break;
case pn_Quot_X_regular:
/**
* Transform Builtin trap
*/
-static ir_node *gen_trap(ir_node *node) {
+static ir_node *gen_trap(ir_node *node)
+{
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *mem = be_transform_node(get_Builtin_mem(node));
/**
* Transform Builtin debugbreak
*/
-static ir_node *gen_debugbreak(ir_node *node) {
+static ir_node *gen_debugbreak(ir_node *node)
+{
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *mem = be_transform_node(get_Builtin_mem(node));
/**
* Transform Builtin return_address
*/
-static ir_node *gen_return_address(ir_node *node) {
+static ir_node *gen_return_address(ir_node *node)
+{
ir_node *param = get_Builtin_param(node, 0);
ir_node *frame = get_Builtin_param(node, 1);
dbg_info *dbgi = get_irn_dbg_info(node);
}
SET_IA32_ORIG_NODE(load, node);
- return new_r_Proj(block, load, mode_Iu, pn_ia32_Load_res);
+ return new_r_Proj(load, mode_Iu, pn_ia32_Load_res);
}
/**
* Transform Builtin frame_address
*/
-static ir_node *gen_frame_address(ir_node *node) {
+static ir_node *gen_frame_address(ir_node *node)
+{
ir_node *param = get_Builtin_param(node, 0);
ir_node *frame = get_Builtin_param(node, 1);
dbg_info *dbgi = get_irn_dbg_info(node);
}
SET_IA32_ORIG_NODE(load, node);
- return new_r_Proj(block, load, mode_Iu, pn_ia32_Load_res);
+ return new_r_Proj(load, mode_Iu, pn_ia32_Load_res);
}
/**
* Transform Builtin frame_address
*/
-static ir_node *gen_prefetch(ir_node *node) {
+static ir_node *gen_prefetch(ir_node *node)
+{
dbg_info *dbgi;
ir_node *ptr, *block, *mem, *base, *index;
ir_node *param, *new_node;
SET_IA32_ORIG_NODE(new_node, node);
be_dep_on_frame(new_node);
- return new_r_Proj(block, new_node, mode_M, pn_ia32_Prefetch_M);
+ return new_r_Proj(new_node, mode_M, pn_ia32_Prefetch_M);
}
/**
/* bsf x */
if (get_irn_mode(real) != mode_T) {
set_irn_mode(real, mode_T);
- bsf = new_r_Proj(block, real, mode_Iu, pn_ia32_res);
+ bsf = new_r_Proj(real, mode_Iu, pn_ia32_res);
}
- flag = new_r_Proj(block, real, mode_b, pn_ia32_flags);
+ flag = new_r_Proj(real, mode_b, pn_ia32_flags);
/* sete */
set = new_bd_ia32_Setcc(dbgi, block, flag, pn_Cmp_Eq);
/**
* Transform builtin popcount
*/
-static ir_node *gen_popcount(ir_node *node) {
+static ir_node *gen_popcount(ir_node *node)
+{
ir_node *param = get_Builtin_param(node, 0);
dbg_info *dbgi = get_irn_dbg_info(node);
/**
* Transform builtin byte swap.
*/
-static ir_node *gen_bswap(ir_node *node) {
+static ir_node *gen_bswap(ir_node *node)
+{
ir_node *param = be_transform_node(get_Builtin_param(node, 0));
dbg_info *dbgi = get_irn_dbg_info(node);
/**
* Transform builtin outport.
*/
-static ir_node *gen_outport(ir_node *node) {
+static ir_node *gen_outport(ir_node *node)
+{
ir_node *port = create_immediate_or_transform(get_Builtin_param(node, 0), 0);
ir_node *oldv = get_Builtin_param(node, 1);
ir_mode *mode = get_irn_mode(oldv);
/**
* Transform builtin inport.
*/
-static ir_node *gen_inport(ir_node *node) {
+static ir_node *gen_inport(ir_node *node)
+{
ir_type *tp = get_Builtin_type(node);
ir_type *rstp = get_method_res_type(tp, 0);
ir_mode *mode = get_type_mode(rstp);
/**
* Transform a builtin inner trampoline
*/
-static ir_node *gen_inner_trampoline(ir_node *node) {
+static ir_node *gen_inner_trampoline(ir_node *node)
+{
ir_node *ptr = get_Builtin_param(node, 0);
ir_node *callee = get_Builtin_param(node, 1);
ir_node *env = be_transform_node(get_Builtin_param(node, 2));
/**
* Transform Builtin node.
*/
-static ir_node *gen_Builtin(ir_node *node) {
+static ir_node *gen_Builtin(ir_node *node)
+{
ir_builtin_kind kind = get_Builtin_kind(node);
switch (kind) {
/**
* Transform Proj(Builtin) node.
*/
-static ir_node *gen_Proj_Builtin(ir_node *proj) {
+static ir_node *gen_Proj_Builtin(ir_node *proj)
+{
ir_node *node = get_Proj_pred(proj);
ir_node *new_node = be_transform_node(node);
ir_builtin_kind kind = get_Builtin_kind(node);
return new_node;
case ir_bk_inport:
if (get_Proj_proj(proj) == pn_Builtin_1_result) {
- return new_r_Proj(get_nodes_block(new_node),
- new_node, get_irn_mode(proj), pn_ia32_Inport_res);
+ return new_r_Proj(new_node, get_irn_mode(proj), pn_ia32_Inport_res);
} else {
assert(get_Proj_proj(proj) == pn_Builtin_M);
- return new_r_Proj(get_nodes_block(new_node),
- new_node, mode_M, pn_ia32_Inport_M);
+ return new_r_Proj(new_node, mode_M, pn_ia32_Inport_M);
}
case ir_bk_inner_trampoline:
if (get_Proj_proj(proj) == pn_Builtin_1_result) {
*/
static ir_node *gen_Proj_be_Call(ir_node *node)
{
- ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *call = get_Proj_pred(node);
ir_node *new_call = be_transform_node(call);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *res;
if (proj == pn_be_Call_M_regular) {
- return new_rd_Proj(dbgi, block, new_call, mode_M, n_ia32_Call_mem);
+ return new_rd_Proj(dbgi, new_call, mode_M, n_ia32_Call_mem);
}
/* transform call modes */
if (mode_is_data(mode)) {
assert(i < n_outs);
}
- res = new_rd_Proj(dbgi, block, new_call, mode, proj);
+ res = new_rd_Proj(dbgi, new_call, mode, proj);
/* TODO arch_set_irn_register() only operates on Projs, need variant with index */
switch (proj) {
*/
static ir_node *gen_Proj_Bound(ir_node *node)
{
- ir_node *new_node, *block;
+ ir_node *new_node;
ir_node *pred = get_Proj_pred(node);
switch (get_Proj_proj(node)) {
return be_transform_node(get_Bound_mem(pred));
case pn_Bound_X_regular:
new_node = be_transform_node(pred);
- block = get_nodes_block(new_node);
- return new_r_Proj(block, new_node, mode_X, pn_ia32_Jcc_true);
+ return new_r_Proj(new_node, mode_X, pn_ia32_Jcc_true);
case pn_Bound_X_except:
new_node = be_transform_node(pred);
- block = get_nodes_block(new_node);
- return new_r_Proj(block, new_node, mode_X, pn_ia32_Jcc_false);
+ return new_r_Proj(new_node, mode_X, pn_ia32_Jcc_false);
case pn_Bound_res:
return be_transform_node(get_Bound_index(pred));
default:
ir_mode *mode = get_irn_mode(node);
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
- ir_node *block = get_nodes_block(new_pred);
long pos = get_Proj_proj(node);
if (mode == mode_M) {
panic("unexpected proj mode at ASM");
}
- return new_r_Proj(block, new_pred, mode, pos);
+ return new_r_Proj(new_pred, mode, pos);
}
/**
ir_mode *mode = get_irn_mode(node);
if (ia32_mode_needs_gp_reg(mode)) {
ir_node *new_pred = be_transform_node(pred);
- ir_node *block = be_transform_node(get_nodes_block(node));
- ir_node *new_proj = new_r_Proj(block, new_pred,
- mode_Iu, get_Proj_proj(node));
+ ir_node *new_proj = new_r_Proj(new_pred, mode_Iu,
+ get_Proj_proj(node));
new_proj->node_nr = node->node_nr;
return new_proj;
}
clear_irp_opcodes_generic_func();
#define GEN(a) { be_transform_func *func = gen_##a; op_##a->ops.generic = (op_func) func; }
-#define BAD(a) op_##a->ops.generic = (op_func)bad_transform
-
- GEN(Add);
- GEN(Sub);
- GEN(Mul);
- GEN(Mulh);
- GEN(And);
- GEN(Or);
- GEN(Eor);
-
- GEN(Shl);
- GEN(Shr);
- GEN(Shrs);
- GEN(Rotl);
-
- GEN(Quot);
-
- GEN(Div);
- GEN(Mod);
- GEN(DivMod);
-
- GEN(Minus);
- GEN(Conv);
- GEN(Abs);
- GEN(Not);
-
- GEN(Load);
- GEN(Store);
- GEN(Cond);
-
- GEN(Cmp);
- GEN(ASM);
- GEN(CopyB);
- GEN(Mux);
- GEN(Proj);
- GEN(Phi);
- GEN(Jmp);
- GEN(IJmp);
- GEN(Bound);
+#define BAD(a) { op_##a->ops.generic = (op_func)bad_transform; }
+
+ GEN(Add)
+ GEN(Sub)
+ GEN(Mul)
+ GEN(Mulh)
+ GEN(And)
+ GEN(Or)
+ GEN(Eor)
+
+ GEN(Shl)
+ GEN(Shr)
+ GEN(Shrs)
+ GEN(Rotl)
+
+ GEN(Quot)
+
+ GEN(Div)
+ GEN(Mod)
+ GEN(DivMod)
+
+ GEN(Minus)
+ GEN(Conv)
+ GEN(Abs)
+ GEN(Not)
+
+ GEN(Load)
+ GEN(Store)
+ GEN(Cond)
+
+ GEN(Cmp)
+ GEN(ASM)
+ GEN(CopyB)
+ GEN(Mux)
+ GEN(Proj)
+ GEN(Phi)
+ GEN(Jmp)
+ GEN(IJmp)
+ GEN(Bound)
/* transform ops from intrinsic lowering */
- GEN(ia32_l_Add);
- GEN(ia32_l_Adc);
- GEN(ia32_l_Mul);
- GEN(ia32_l_IMul);
- GEN(ia32_l_ShlDep);
- GEN(ia32_l_ShrDep);
- GEN(ia32_l_SarDep);
- GEN(ia32_l_ShlD);
- GEN(ia32_l_ShrD);
- GEN(ia32_l_Sub);
- GEN(ia32_l_Sbb);
- GEN(ia32_l_LLtoFloat);
- GEN(ia32_l_FloattoLL);
-
- GEN(Const);
- GEN(SymConst);
- GEN(Unknown);
+ GEN(ia32_l_Add)
+ GEN(ia32_l_Adc)
+ GEN(ia32_l_Mul)
+ GEN(ia32_l_IMul)
+ GEN(ia32_l_ShlDep)
+ GEN(ia32_l_ShrDep)
+ GEN(ia32_l_SarDep)
+ GEN(ia32_l_ShlD)
+ GEN(ia32_l_ShrD)
+ GEN(ia32_l_Sub)
+ GEN(ia32_l_Sbb)
+ GEN(ia32_l_LLtoFloat)
+ GEN(ia32_l_FloattoLL)
+
+ GEN(Const)
+ GEN(SymConst)
+ GEN(Unknown)
/* we should never see these nodes */
- BAD(Raise);
- BAD(Sel);
- BAD(InstOf);
- BAD(Cast);
- BAD(Free);
- BAD(Tuple);
- BAD(Id);
- //BAD(Bad);
- BAD(Confirm);
- BAD(Filter);
- BAD(CallBegin);
- BAD(EndReg);
- BAD(EndExcept);
+ BAD(Raise)
+ BAD(Sel)
+ BAD(InstOf)
+ BAD(Cast)
+ BAD(Free)
+ BAD(Tuple)
+ BAD(Id)
+ //BAD(Bad)
+ BAD(Confirm)
+ BAD(Filter)
+ BAD(CallBegin)
+ BAD(EndReg)
+ BAD(EndExcept)
/* handle builtins */
- GEN(Builtin);
+ GEN(Builtin)
/* handle generic backend nodes */
- GEN(be_FrameAddr);
- GEN(be_Call);
- GEN(be_IncSP);
- GEN(be_Return);
- GEN(be_AddSP);
- GEN(be_SubSP);
- GEN(be_Copy);
+ GEN(be_FrameAddr)
+ GEN(be_Call)
+ GEN(be_IncSP)
+ GEN(be_Return)
+ GEN(be_AddSP)
+ GEN(be_SubSP)
+ GEN(be_Copy)
#undef GEN
#undef BAD
{
ia32_code_gen_t *cg = env_cg;
- cg->unknown_gp = be_pre_transform_node(cg->unknown_gp);
- cg->unknown_vfp = be_pre_transform_node(cg->unknown_vfp);
- cg->unknown_xmm = be_pre_transform_node(cg->unknown_xmm);
cg->noreg_gp = be_pre_transform_node(cg->noreg_gp);
cg->noreg_vfp = be_pre_transform_node(cg->noreg_vfp);
cg->noreg_xmm = be_pre_transform_node(cg->noreg_xmm);
}
block = get_nodes_block(node);
- in[0] = new_r_Proj(block, node, arch_register_class_mode(cls), i);
+ in[0] = new_r_Proj(node, arch_register_class_mode(cls), i);
if (last_keep != NULL) {
be_Keep_add_node(last_keep, cls, in[0]);
} else {
* The ABI requires that the results are in st0, copy them
* to a xmm register.
*/
-static void postprocess_fp_call_results(void) {
+static void postprocess_fp_call_results(void)
+{
int i;
for (i = ARR_LEN(call_list) - 1; i >= 0; --i) {
ir_node *block = get_nodes_block(call);
ir_node *frame = get_irg_frame(current_ir_graph);
ir_node *old_mem = be_get_Proj_for_pn(call, pn_ia32_Call_M);
- ir_node *call_mem = new_r_Proj(block, call, mode_M, pn_ia32_Call_M);
+ ir_node *call_mem = new_r_Proj(call, mode_M, pn_ia32_Call_M);
ir_node *vfst, *xld, *new_mem;
/* store st(0) on stack */
set_ia32_op_type(xld, ia32_AddrModeS);
set_ia32_use_frame(xld);
- new_res = new_r_Proj(block, xld, mode, pn_ia32_xLoad_res);
- new_mem = new_r_Proj(block, xld, mode_M, pn_ia32_xLoad_M);
+ new_res = new_r_Proj(xld, mode, pn_ia32_xLoad_res);
+ new_mem = new_r_Proj(xld, mode_M, pn_ia32_xLoad_M);
if (old_mem != NULL) {
edges_reroute(old_mem, new_mem, current_ir_graph);
call_list = NEW_ARR_F(ir_node *, 0);
call_types = NEW_ARR_F(ir_type *, 0);
- be_transform_graph(cg->birg, ia32_pretransform_node);
+ be_transform_graph(cg->irg, ia32_pretransform_node);
if (ia32_cg_config.use_sse2)
postprocess_fp_call_results();