};
-typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_graph *irg, ir_node *block);
+typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
static inline ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
create_const_node_func func,
return *place;
block = get_irg_start_block(cg->irg);
- res = func(NULL, cg->irg, block);
+ res = func(NULL, block);
arch_set_irn_register(res, reg);
*place = res;
/* Creates the unique per irg GP NoReg node. */
ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->noreg_gp, new_rd_ia32_NoReg_GP,
+ return create_const(cg, &cg->noreg_gp, new_bd_ia32_NoReg_GP,
&ia32_gp_regs[REG_GP_NOREG]);
}
ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->noreg_vfp, new_rd_ia32_NoReg_VFP,
+ return create_const(cg, &cg->noreg_vfp, new_bd_ia32_NoReg_VFP,
&ia32_vfp_regs[REG_VFP_NOREG]);
}
ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->noreg_xmm, new_rd_ia32_NoReg_XMM,
+ return create_const(cg, &cg->noreg_xmm, new_bd_ia32_NoReg_XMM,
&ia32_xmm_regs[REG_XMM_NOREG]);
}
ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->unknown_gp, new_rd_ia32_Unknown_GP,
+ return create_const(cg, &cg->unknown_gp, new_bd_ia32_Unknown_GP,
&ia32_gp_regs[REG_GP_UKNWN]);
}
ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->unknown_vfp, new_rd_ia32_Unknown_VFP,
+ return create_const(cg, &cg->unknown_vfp, new_bd_ia32_Unknown_VFP,
&ia32_vfp_regs[REG_VFP_UKNWN]);
}
ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->unknown_xmm, new_rd_ia32_Unknown_XMM,
+ return create_const(cg, &cg->unknown_xmm, new_bd_ia32_Unknown_XMM,
&ia32_xmm_regs[REG_XMM_UKNWN]);
}
ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->fpu_trunc_mode, new_rd_ia32_ChangeCW,
+ return create_const(cg, &cg->fpu_trunc_mode, new_bd_ia32_ChangeCW,
&ia32_fp_cw_regs[REG_FPCW]);
}
}
/* unknowns should be transformed already */
- assert(!is_Unknown(node));
return arch_no_register_req;
}
-static void ia32_set_irn_reg(ir_node *irn, const arch_register_t *reg)
-{
- int pos = 0;
-
- if (get_irn_mode(irn) == mode_X) {
- return;
- }
-
- if (is_Proj(irn)) {
- pos = get_Proj_proj(irn);
- irn = skip_Proj(irn);
- }
-
- if (is_ia32_irn(irn)) {
- const arch_register_t **slots;
-
- slots = get_ia32_slots(irn);
- slots[pos] = reg;
- } else {
- ia32_set_firm_reg(irn, reg, cur_reg_set);
- }
-}
-
-static const arch_register_t *ia32_get_irn_reg(const ir_node *irn)
-{
- int pos = 0;
-
- if (is_Proj(irn)) {
- if (get_irn_mode(irn) == mode_X) {
- return NULL;
- }
-
- pos = get_Proj_proj(irn);
- irn = skip_Proj_const(irn);
- }
-
- if (is_ia32_irn(irn)) {
- const arch_register_t **slots = get_ia32_slots(irn);
- assert(pos < get_ia32_n_res(irn));
- return slots[pos];
- } else {
- return ia32_get_firm_reg(irn, cur_reg_set);
- }
-}
-
static arch_irn_class_t ia32_classify(const ir_node *irn) {
arch_irn_class_t classification = 0;
return classification;
}
-static arch_irn_flags_t ia32_get_flags(const ir_node *irn) {
- arch_irn_flags_t flags = arch_irn_flags_none;
-
- if (is_Unknown(irn))
- return arch_irn_flags_ignore;
-
- if(is_Proj(irn) && mode_is_datab(get_irn_mode(irn))) {
- ir_node *pred = get_Proj_pred(irn);
-
- if(is_ia32_irn(pred)) {
- flags = get_ia32_out_flags(pred, get_Proj_proj(irn));
- }
-
- irn = pred;
- }
-
- if (is_ia32_irn(irn)) {
- flags |= get_ia32_flags(irn);
- }
-
- return flags;
-}
-
/**
* The IA32 ABI callback object.
*/
return 0;
}
-/**
- * Put all registers which are saved by the prologue/epilogue in a set.
- *
- * @param self The callback object.
- * @param s The result set.
- */
-static void ia32_abi_dont_save_regs(void *self, pset *s)
-{
- ia32_abi_env_t *env = self;
- if(env->flags.try_omit_fp)
- pset_insert_ptr(s, env->aenv->bp);
-}
-
/**
* Generate the routine prologue.
*
const arch_env_t *arch_env = env->aenv;
if (! env->flags.try_omit_fp) {
- ir_graph *irg =env->irg;
+ ir_graph *irg = env->irg;
ir_node *bl = get_irg_start_block(irg);
ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
- ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_node *push;
- /* ALL nodes representing bp must be set to ignore. */
- be_node_set_flags(get_Proj_pred(curr_bp), BE_OUT_POS(get_Proj_proj(curr_bp)), arch_irn_flags_ignore);
+ /* mark bp register as ignore */
+ be_set_constr_single_reg_out(get_Proj_pred(curr_bp),
+ get_Proj_proj(curr_bp), arch_env->bp, arch_register_req_type_ignore);
/* push ebp */
- push = new_rd_ia32_Push(NULL, irg, bl, noreg, noreg, *mem, curr_bp, curr_sp);
+ push = new_bd_ia32_Push(NULL, bl, noreg, noreg, *mem, curr_bp, curr_sp);
curr_sp = new_r_Proj(irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
*mem = new_r_Proj(irg, bl, push, mode_M, pn_ia32_Push_M);
/* the push must have SP out register */
arch_set_irn_register(curr_sp, arch_env->sp);
- set_ia32_flags(push, arch_irn_flags_ignore);
/* this modifies the stack bias, because we pushed 32bit */
*stack_bias -= 4;
/* move esp to ebp */
- curr_bp = be_new_Copy(arch_env->bp->reg_class, irg, bl, curr_sp);
- be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), arch_env->bp);
- arch_set_irn_register(curr_bp, arch_env->bp);
- be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
+ curr_bp = be_new_Copy(arch_env->bp->reg_class, irg, bl, curr_sp);
+ be_set_constr_single_reg_out(curr_bp, 0, arch_env->bp,
+ arch_register_req_type_ignore);
/* beware: the copy must be done before any other sp use */
curr_sp = be_new_CopyKeep_single(arch_env->sp->reg_class, irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
- be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), arch_env->sp);
- arch_set_irn_register(curr_sp, arch_env->sp);
- be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
+ be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
+ arch_register_req_type_produces_sp);
be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
ir_node *leave;
/* leave */
- leave = new_rd_ia32_Leave(NULL, irg, bl, curr_bp);
- set_ia32_flags(leave, arch_irn_flags_ignore);
+ leave = new_bd_ia32_Leave(NULL, bl, curr_bp);
curr_bp = new_r_Proj(irg, bl, leave, mode_bp, pn_ia32_Leave_frame);
curr_sp = new_r_Proj(irg, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
} else {
/* copy ebp to esp */
curr_sp = be_new_Copy(&ia32_reg_classes[CLASS_ia32_gp], irg, bl, curr_bp);
arch_set_irn_register(curr_sp, arch_env->sp);
- be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
+ be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
+ arch_register_req_type_ignore);
/* pop ebp */
- pop = new_rd_ia32_Pop(NULL, env->irg, bl, *mem, curr_sp);
- set_ia32_flags(pop, arch_irn_flags_ignore);
+ pop = new_bd_ia32_PopEbp(NULL, bl, *mem, curr_sp);
curr_bp = new_r_Proj(irg, bl, pop, mode_bp, pn_ia32_Pop_res);
curr_sp = new_r_Proj(irg, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
* @return The inverse operation or NULL if operation invertible
*/
static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
- ir_graph *irg;
ir_mode *mode;
ir_mode *irn_mode;
ir_node *block, *noreg, *nomem;
irn);
return NULL;
- irg = get_irn_irg(irn);
block = get_nodes_block(irn);
mode = get_irn_mode(irn);
irn_mode = get_irn_mode(irn);
noreg = get_irn_n(irn, 0);
- nomem = new_r_NoMem(irg);
+ nomem = new_NoMem();
dbg = get_irn_dbg_info(irn);
/* initialize structure */
if (get_ia32_immop_type(irn) == ia32_ImmConst) {
/* we have an add with a const here */
/* invers == add with negated const */
- inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
/* we have an add with a symconst here */
/* invers == sub with const */
- inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += 2;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal add: inverse == sub */
- inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
+ inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
inverse->costs += 2;
}
#endif
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* we have a sub with a const/symconst here */
/* invers == add with this const */
- inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal sub */
if (i == n_ia32_binary_left) {
- inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
+ inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
}
else {
- inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
+ inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
}
inverse->costs += 1;
}
#if 0
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* xor with const: inverse = xor */
- inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal xor */
- inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
+ inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
inverse->costs += 1;
}
#endif
break;
case iro_ia32_Not: {
- inverse->nodes[0] = new_rd_ia32_Not(dbg, irg, block, (ir_node*) irn);
+ inverse->nodes[0] = new_bd_ia32_Not(dbg, block, (ir_node*) irn);
inverse->costs += 1;
break;
}
case iro_ia32_Neg: {
- inverse->nodes[0] = new_rd_ia32_Neg(dbg, irg, block, (ir_node*) irn);
+ inverse->nodes[0] = new_bd_ia32_Neg(dbg, block, (ir_node*) irn);
inverse->costs += 1;
break;
}
ia32_abi_init,
ia32_abi_done,
ia32_abi_get_between_type,
- ia32_abi_dont_save_regs,
ia32_abi_prologue,
ia32_abi_epilogue
};
static const arch_irn_ops_t ia32_irn_ops = {
ia32_get_irn_reg_req,
- ia32_set_irn_reg,
- ia32_get_irn_reg,
ia32_classify,
- ia32_get_flags,
ia32_get_frame_entity,
ia32_set_frame_entity,
ia32_set_frame_offset,
be_dump(cg->irg, "-place", dump_ir_block_graph_sched);
}
-/**
- * Dummy functions for hooks we don't need but which must be filled.
- */
-static void ia32_before_sched(void *self) {
- (void) self;
-}
-
ir_node *turn_back_am(ir_node *node)
{
ir_graph *irg = current_ir_graph;
ir_node *mem = get_irn_n(node, n_ia32_mem);
ir_node *noreg;
- ir_node *load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem);
+ ir_node *load = new_bd_ia32_Load(dbgi, block, base, index, mem);
ir_node *load_res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
ia32_copy_am_attrs(load, node);
if (mode_is_float(spillmode)) {
if (ia32_cg_config.use_sse2)
- new_op = new_rd_ia32_xLoad(dbg, irg, block, ptr, noreg, mem, spillmode);
+ new_op = new_bd_ia32_xLoad(dbg, block, ptr, noreg, mem, spillmode);
else
- new_op = new_rd_ia32_vfld(dbg, irg, block, ptr, noreg, mem, spillmode);
+ new_op = new_bd_ia32_vfld(dbg, block, ptr, noreg, mem, spillmode);
}
else if (get_mode_size_bits(spillmode) == 128) {
/* Reload 128 bit SSE registers */
- new_op = new_rd_ia32_xxLoad(dbg, irg, block, ptr, noreg, mem);
+ new_op = new_bd_ia32_xxLoad(dbg, block, ptr, noreg, mem);
}
else
- new_op = new_rd_ia32_Load(dbg, irg, block, ptr, noreg, mem);
+ new_op = new_bd_ia32_Load(dbg, block, ptr, noreg, mem);
set_ia32_op_type(new_op, ia32_AddrModeS);
set_ia32_ls_mode(new_op, spillmode);
/* copy the register from the old node to the new Load */
reg = arch_get_irn_register(node);
- arch_set_irn_register(new_op, reg);
+ arch_set_irn_register(proj, reg);
- SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
+ SET_IA32_ORIG_NODE(new_op, node);
exchange(node, proj);
}
const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
ir_mode *mode = get_spill_mode(spillval);
ir_node *noreg = ia32_new_NoReg_gp(cg);
- ir_node *nomem = new_rd_NoMem(irg);
+ ir_node *nomem = new_NoMem();
ir_node *ptr = get_irg_frame(irg);
ir_node *val = get_irn_n(node, be_pos_Spill_val);
ir_node *store;
if (mode_is_float(mode)) {
if (ia32_cg_config.use_sse2)
- store = new_rd_ia32_xStore(dbg, irg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_xStore(dbg, block, ptr, noreg, nomem, val);
else
- store = new_rd_ia32_vfst(dbg, irg, block, ptr, noreg, nomem, val, mode);
+ store = new_bd_ia32_vfst(dbg, block, ptr, noreg, nomem, val, mode);
} else if (get_mode_size_bits(mode) == 128) {
/* Spill 128 bit SSE registers */
- store = new_rd_ia32_xxStore(dbg, irg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_xxStore(dbg, block, ptr, noreg, nomem, val);
} else if (get_mode_size_bits(mode) == 8) {
- store = new_rd_ia32_Store8Bit(dbg, irg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_Store8Bit(dbg, block, ptr, noreg, nomem, val);
} else {
- store = new_rd_ia32_Store(dbg, irg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_Store(dbg, block, ptr, noreg, nomem, val);
}
set_ia32_op_type(store, ia32_AddrModeD);
set_ia32_frame_ent(store, ent);
set_ia32_use_frame(store);
set_ia32_is_spill(store);
- SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(cg, node));
+ SET_IA32_ORIG_NODE(store, node);
DBG_OPT_SPILL2ST(node, store);
if (sched_point) {
}
static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) {
- ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_graph *irg = get_irn_irg(node);
ir_node *frame = get_irg_frame(irg);
- ir_node *push = new_rd_ia32_Push(dbg, irg, block, frame, noreg, mem, noreg, sp);
+ ir_node *push = new_bd_ia32_Push(dbg, block, frame, noreg, mem, noreg, sp);
set_ia32_frame_ent(push, ent);
set_ia32_use_frame(push);
}
static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent) {
- ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_graph *irg = get_irn_irg(node);
ir_node *frame = get_irg_frame(irg);
- ir_node *pop = new_rd_ia32_PopMem(dbg, irg, block, frame, noreg, new_NoMem(), sp);
+ ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg, new_NoMem(), sp);
set_ia32_frame_ent(pop, ent);
set_ia32_use_frame(pop);
* push/pop into/from memory cascades. This is possible without using
* any registers.
*/
-static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node) {
- ir_graph *irg = get_irn_irg(node);
- ir_node *block = get_nodes_block(node);
- ir_node *in[1];
- ir_node *keep;
- int i, arity;
- ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
+static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node)
+{
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
+ int arity = be_get_MemPerm_entity_arity(node);
+ ir_node **pops = ALLOCAN(ir_node*, arity);
+ ir_node *in[1];
+ ir_node *keep;
+ int i;
const ir_edge_t *edge;
const ir_edge_t *next;
- ir_node **pops;
-
- arity = be_get_MemPerm_entity_arity(node);
- pops = alloca(arity * sizeof(pops[0]));
/* create Pushs */
for(i = 0; i < arity; ++i) {
return get_eip;
block = get_irg_start_block(cg->irg);
- get_eip = new_rd_ia32_GetEIP(NULL, cg->irg, block);
+ get_eip = new_bd_ia32_GetEIP(NULL, block);
cg->get_eip = get_eip;
be_dep_on_frame(get_eip);
ia32_before_abi, /* before abi introduce hook */
ia32_prepare_graph,
NULL, /* spill */
- ia32_before_sched, /* before scheduling hook */
ia32_before_ra, /* before register allocation hook */
ia32_after_ra, /* after register allocation hook */
ia32_finish, /* called before codegen */
/* enter the ISA object into the intrinsic environment */
intrinsic_env.isa = isa;
- ia32_handle_intrinsics();
/* emit asm includes */
n = get_irp_n_asms();
}
}
+/**
+ * Check for Abs or Nabs.
+ */
+static int is_Abs_or_Nabs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f) {
+ ir_node *l, *r;
+ pn_Cmp pnc;
+
+ if (cmp == NULL)
+ return 0;
+
+ /* must be <, <=, >=, > */
+ pnc = get_Proj_proj(sel);
+ if (pnc != pn_Cmp_Ge && pnc != pn_Cmp_Gt &&
+ pnc != pn_Cmp_Le && pnc != pn_Cmp_Lt)
+ return 0;
+
+ l = get_Cmp_left(cmp);
+ r = get_Cmp_right(cmp);
+
+ /* must be x cmp 0 */
+ if ((l != t && l != f) || !is_Const(r) || !is_Const_null(r))
+ return 0;
+
+ if ((!is_Minus(t) || get_Minus_op(t) != f) &&
+ (!is_Minus(f) || get_Minus_op(f) != t))
+ return 0;
+ return 1;
+}
+
/**
* Allows or disallows the creation of Psi nodes for the given Phi nodes.
+ *
+ * @param sel A selector of a Cond.
+ * @param phi_list List of Phi nodes about to be converted (linked via get_Phi_next() field)
+ * @param i First data predecessor involved in if conversion
+ * @param j Second data predecessor involved in if conversion
+ *
* @return 1 if allowed, 0 otherwise
*/
static int ia32_is_psi_allowed(ir_node *sel, ir_node *phi_list, int i, int j)
ir_node *phi;
ir_node *cmp = NULL;
- /* we can't handle psis with 64bit compares yet */
+ /* we can't handle Psis with 64bit compares yet */
if (is_Proj(sel)) {
cmp = get_Proj_pred(sel);
if (is_Cmp(cmp)) {
for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
ir_mode *mode = get_irn_mode(phi);
- if (mode_is_float(mode) || get_mode_size_bits(mode) > 32)
+ if (mode_is_float(mode)) {
+ ir_node *t = get_Phi_pred(phi, i);
+ ir_node *f = get_Phi_pred(phi, j);
+
+ if (! is_Abs_or_Nabs(cmp, sel, t, f))
+ return 0;
+ } else if (get_mode_size_bits(mode) > 32)
return 0;
}
}
ir_node *cl, *cr;
pn_Cmp pn;
- /* No cmov, only some special cases */
+ /* No Cmov, only some special cases */
if (cmp == NULL)
return 0;
t = get_Phi_pred(phi, i);
f = get_Phi_pred(phi, j);
- /* no floating point and no 64bit yet */
- if (mode_is_float(mode) || get_mode_size_bits(mode) > 32)
+ if (mode_is_float(mode)) {
+ /* only abs or nabs supported */
+ if (! is_Abs_or_Nabs(cmp, sel, t, f))
+ return 0;
+ } else if (get_mode_size_bits(mode) > 32) {
+ /* no 64bit yet */
return 0;
+ }
if (is_Const(t) && is_Const(f)) {
if ((is_Const_null(t) && is_Const_one(f)) || (is_Const_one(t) && is_Const_null(f))) {
const arch_isa_if_t ia32_isa_if = {
ia32_init,
ia32_done,
+ ia32_handle_intrinsics,
ia32_get_n_reg_class,
ia32_get_reg_class,
ia32_get_reg_class_for_mode,
ia32_is_valid_clobber
};
-void ia32_init_emitter(void);
-void ia32_init_finish(void);
-void ia32_init_optimize(void);
-void ia32_init_transform(void);
-void ia32_init_x87(void);
-
void be_init_arch_ia32(void)
{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");