* @author Christian Wuerdig
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include "lc_opts.h"
#include "lc_opts_enum.h"
#include "../begnuas.h"
#include "../bestate.h"
#include "../beflags.h"
+#include "../betranshlp.h"
#include "bearch_ia32_t.h"
#ifdef FIRM_GRGEN_BE
#include "ia32_pbqp_transform.h"
+
+transformer_t be_transformer = TRANSFORMER_DEFAULT;
#endif
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
ir_mode *mode_fpcw = NULL;
ia32_code_gen_t *ia32_current_cg = NULL;
+/** The current omit-fp state */
+static unsigned ia32_curr_fp_ommitted = 0;
+static ir_type *omit_fp_between_type = NULL;
+static ir_type *between_type = NULL;
+static ir_entity *old_bp_ent = NULL;
+static ir_entity *ret_addr_ent = NULL;
+static ir_entity *omit_fp_ret_addr_ent = NULL;
+
/**
* The environment for the intrinsic mapping.
*/
static ia32_intrinsic_env_t intrinsic_env = {
NULL, /* the isa */
NULL, /* the irg, these entities belong to */
- NULL, /* entity for first div operand (move into FPU) */
- NULL, /* entity for second div operand (move into FPU) */
- NULL, /* entity for converts ll -> d */
- NULL, /* entity for converts d -> ll */
NULL, /* entity for __divdi3 library call */
NULL, /* entity for __moddi3 library call */
NULL, /* entity for __udivdi3 library call */
NULL, /* entity for __umoddi3 library call */
- NULL, /* bias value for conversion from float to unsigned 64 */
};
-typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_graph *irg, ir_node *block);
+typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
-static INLINE ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
+/**
+ * Used to create a Pseudo-Register or Unknown node.
+ */
+static inline ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
create_const_node_func func,
const arch_register_t* reg)
{
return *place;
block = get_irg_start_block(cg->irg);
- res = func(NULL, cg->irg, block);
- arch_set_irn_register(cg->arch_env, res, reg);
+ res = func(NULL, block);
+ arch_set_irn_register(res, reg);
*place = res;
- add_irn_dep(get_irg_end(cg->irg), res);
- /* add_irn_dep(get_irg_start(cg->irg), res); */
-
return res;
}
/* Creates the unique per irg GP NoReg node. */
ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->noreg_gp, new_rd_ia32_NoReg_GP,
+ return create_const(cg, &cg->noreg_gp, new_bd_ia32_NoReg_GP,
&ia32_gp_regs[REG_GP_NOREG]);
}
ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->noreg_vfp, new_rd_ia32_NoReg_VFP,
+ return create_const(cg, &cg->noreg_vfp, new_bd_ia32_NoReg_VFP,
&ia32_vfp_regs[REG_VFP_NOREG]);
}
ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->noreg_xmm, new_rd_ia32_NoReg_XMM,
+ return create_const(cg, &cg->noreg_xmm, new_bd_ia32_NoReg_XMM,
&ia32_xmm_regs[REG_XMM_NOREG]);
}
ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->unknown_gp, new_rd_ia32_Unknown_GP,
+ return create_const(cg, &cg->unknown_gp, new_bd_ia32_Unknown_GP,
&ia32_gp_regs[REG_GP_UKNWN]);
}
ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->unknown_vfp, new_rd_ia32_Unknown_VFP,
+ return create_const(cg, &cg->unknown_vfp, new_bd_ia32_Unknown_VFP,
&ia32_vfp_regs[REG_VFP_UKNWN]);
}
ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->unknown_xmm, new_rd_ia32_Unknown_XMM,
+ return create_const(cg, &cg->unknown_xmm, new_bd_ia32_Unknown_XMM,
&ia32_xmm_regs[REG_XMM_UKNWN]);
}
ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->fpu_trunc_mode, new_rd_ia32_ChangeCW,
+ return create_const(cg, &cg->fpu_trunc_mode, new_bd_ia32_ChangeCW,
&ia32_fp_cw_regs[REG_FPCW]);
}
/**
* Returns the admissible noreg register node for input register pos of node irn.
*/
-ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos) {
- const arch_register_req_t *req;
+static ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos)
+{
+ const arch_register_req_t *req = arch_get_register_req(irn, pos);
- req = arch_get_register_req(cg->arch_env, irn, pos);
assert(req != NULL && "Missing register requirements");
if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
return ia32_new_NoReg_gp(cg);
}
/* unknowns should be transformed already */
- assert(!is_Unknown(node));
return arch_no_register_req;
}
-static void ia32_set_irn_reg(ir_node *irn, const arch_register_t *reg)
-{
- int pos = 0;
-
- if (get_irn_mode(irn) == mode_X) {
- return;
- }
-
- if (is_Proj(irn)) {
- pos = get_Proj_proj(irn);
- irn = skip_Proj(irn);
- }
-
- if (is_ia32_irn(irn)) {
- const arch_register_t **slots;
-
- slots = get_ia32_slots(irn);
- slots[pos] = reg;
- } else {
- ia32_set_firm_reg(irn, reg, cur_reg_set);
- }
-}
-
-static const arch_register_t *ia32_get_irn_reg(const ir_node *irn)
-{
- int pos = 0;
- const arch_register_t *reg = NULL;
-
- if (is_Proj(irn)) {
-
- if (get_irn_mode(irn) == mode_X) {
- return NULL;
- }
-
- pos = get_Proj_proj(irn);
- irn = skip_Proj_const(irn);
- }
-
- if (is_ia32_irn(irn)) {
- const arch_register_t **slots;
- slots = get_ia32_slots(irn);
- assert(pos < get_ia32_n_res(irn));
- reg = slots[pos];
- } else {
- reg = ia32_get_firm_reg(irn, cur_reg_set);
- }
-
- return reg;
-}
-
static arch_irn_class_t ia32_classify(const ir_node *irn) {
- arch_irn_class_t classification = arch_irn_class_normal;
+ arch_irn_class_t classification = 0;
irn = skip_Proj_const(irn);
classification |= arch_irn_class_branch;
if (! is_ia32_irn(irn))
- return classification & ~arch_irn_class_normal;
-
- if (is_ia32_Ld(irn))
- classification |= arch_irn_class_load;
+ return classification;
- if (is_ia32_St(irn))
- classification |= arch_irn_class_store;
-
- if (is_ia32_need_stackent(irn))
+ if (is_ia32_is_reload(irn))
classification |= arch_irn_class_reload;
- return classification;
-}
-
-static arch_irn_flags_t ia32_get_flags(const ir_node *irn) {
- arch_irn_flags_t flags = arch_irn_flags_none;
+ if (is_ia32_is_spill(irn))
+ classification |= arch_irn_class_spill;
- if (is_Unknown(irn))
- return arch_irn_flags_ignore;
-
- if(is_Proj(irn) && mode_is_datab(get_irn_mode(irn))) {
- ir_node *pred = get_Proj_pred(irn);
-
- if(is_ia32_irn(pred)) {
- flags = get_ia32_out_flags(pred, get_Proj_proj(irn));
- }
-
- irn = pred;
- }
-
- if (is_ia32_irn(irn)) {
- flags |= get_ia32_flags(irn);
- }
+ if (is_ia32_is_remat(irn))
+ classification |= arch_irn_class_remat;
- return flags;
+ return classification;
}
/**
static int ia32_get_sp_bias(const ir_node *node)
{
+ if (is_ia32_Call(node))
+ return -(int)get_ia32_call_attr_const(node)->pop;
+
if (is_ia32_Push(node))
return 4;
return 0;
}
-/**
- * Put all registers which are saved by the prologue/epilogue in a set.
- *
- * @param self The callback object.
- * @param s The result set.
- */
-static void ia32_abi_dont_save_regs(void *self, pset *s)
-{
- ia32_abi_env_t *env = self;
- if(env->flags.try_omit_fp)
- pset_insert_ptr(s, env->aenv->bp);
-}
-
/**
* Generate the routine prologue.
*
ia32_code_gen_t *cg = ia32_current_cg;
const arch_env_t *arch_env = env->aenv;
+ ia32_curr_fp_ommitted = env->flags.try_omit_fp;
if (! env->flags.try_omit_fp) {
- ir_graph *irg =env->irg;
+ ir_graph *irg = env->irg;
ir_node *bl = get_irg_start_block(irg);
ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
- ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_node *push;
- /* ALL nodes representing bp must be set to ignore. */
- be_node_set_flags(get_Proj_pred(curr_bp), BE_OUT_POS(get_Proj_proj(curr_bp)), arch_irn_flags_ignore);
+ /* mark bp register as ignore */
+ be_set_constr_single_reg_out(get_Proj_pred(curr_bp),
+ get_Proj_proj(curr_bp), arch_env->bp, arch_register_req_type_ignore);
/* push ebp */
- push = new_rd_ia32_Push(NULL, irg, bl, noreg, noreg, *mem, curr_bp, curr_sp);
+ push = new_bd_ia32_Push(NULL, bl, noreg, noreg, *mem, curr_bp, curr_sp);
curr_sp = new_r_Proj(irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
*mem = new_r_Proj(irg, bl, push, mode_M, pn_ia32_Push_M);
/* the push must have SP out register */
- arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
- set_ia32_flags(push, arch_irn_flags_ignore);
+ arch_set_irn_register(curr_sp, arch_env->sp);
/* this modifies the stack bias, because we pushed 32bit */
*stack_bias -= 4;
/* move esp to ebp */
- curr_bp = be_new_Copy(arch_env->bp->reg_class, irg, bl, curr_sp);
- be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), arch_env->bp);
- arch_set_irn_register(arch_env, curr_bp, arch_env->bp);
- be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
+ curr_bp = be_new_Copy(arch_env->bp->reg_class, irg, bl, curr_sp);
+ be_set_constr_single_reg_out(curr_bp, 0, arch_env->bp,
+ arch_register_req_type_ignore);
/* beware: the copy must be done before any other sp use */
curr_sp = be_new_CopyKeep_single(arch_env->sp->reg_class, irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
- be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), arch_env->sp);
- arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
- be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
+ be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
+ arch_register_req_type_produces_sp);
be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
ir_node *leave;
/* leave */
- leave = new_rd_ia32_Leave(NULL, irg, bl, curr_bp);
- set_ia32_flags(leave, arch_irn_flags_ignore);
+ leave = new_bd_ia32_Leave(NULL, bl, curr_bp);
curr_bp = new_r_Proj(irg, bl, leave, mode_bp, pn_ia32_Leave_frame);
curr_sp = new_r_Proj(irg, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
} else {
/* copy ebp to esp */
curr_sp = be_new_Copy(&ia32_reg_classes[CLASS_ia32_gp], irg, bl, curr_bp);
- arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
- be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
+ arch_set_irn_register(curr_sp, arch_env->sp);
+ be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
+ arch_register_req_type_ignore);
/* pop ebp */
- pop = new_rd_ia32_Pop(NULL, env->irg, bl, *mem, curr_sp);
- set_ia32_flags(pop, arch_irn_flags_ignore);
+ pop = new_bd_ia32_PopEbp(NULL, bl, *mem, curr_sp);
curr_bp = new_r_Proj(irg, bl, pop, mode_bp, pn_ia32_Pop_res);
curr_sp = new_r_Proj(irg, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
*mem = new_r_Proj(irg, bl, pop, mode_M, pn_ia32_Pop_M);
}
- arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
- arch_set_irn_register(arch_env, curr_bp, arch_env->bp);
+ arch_set_irn_register(curr_sp, arch_env->sp);
+ arch_set_irn_register(curr_bp, arch_env->bp);
}
be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
*/
static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
{
- ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
- be_abi_call_flags_t fl = be_abi_call_get_flags(call);
+ ia32_abi_env_t *env = XMALLOC(ia32_abi_env_t);
+ be_abi_call_flags_t fl = be_abi_call_get_flags(call);
env->flags = fl.bits;
env->irg = irg;
env->aenv = aenv;
}
/**
- * Produces the type which sits between the stack args and the locals on the stack.
- * it will contain the return address and space to store the old base pointer.
- * @return The Firm type modeling the ABI between type.
+ * Build the between type and entities if not already build.
*/
-static ir_type *ia32_abi_get_between_type(void *self)
-{
+static void ia32_build_between_type(void) {
#define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
- static ir_type *omit_fp_between_type = NULL;
- static ir_type *between_type = NULL;
-
- ia32_abi_env_t *env = self;
-
if (! between_type) {
- ir_entity *old_bp_ent;
- ir_entity *ret_addr_ent;
- ir_entity *omit_fp_ret_addr_ent;
-
ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_Iu);
ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_Iu);
set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
set_type_state(omit_fp_between_type, layout_fixed);
}
+#undef IDENT
+}
+
+/**
+ * Produces the type which sits between the stack args and the locals on the stack.
+ * it will contain the return address and space to store the old base pointer.
+ * @return The Firm type modeling the ABI between type.
+ */
+static ir_type *ia32_abi_get_between_type(void *self)
+{
+ ia32_abi_env_t *env = self;
+ ia32_build_between_type();
return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
-#undef IDENT
+}
+
+/**
+ * Return the stack entity that contains the return address.
+ */
+ir_entity *ia32_get_return_address_entity(void) {
+ ia32_build_between_type();
+ return ia32_curr_fp_ommitted ? omit_fp_ret_addr_ent : ret_addr_ent;
+}
+
+/**
+ * Return the stack entity that contains the frame address.
+ */
+ir_entity *ia32_get_frame_address_entity(void) {
+ ia32_build_between_type();
+ return ia32_curr_fp_ommitted ? NULL : old_bp_ent;
}
/**
* @return The inverse operation or NULL if operation invertible
*/
static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
- ir_graph *irg;
ir_mode *mode;
ir_mode *irn_mode;
ir_node *block, *noreg, *nomem;
irn);
return NULL;
- irg = get_irn_irg(irn);
block = get_nodes_block(irn);
mode = get_irn_mode(irn);
irn_mode = get_irn_mode(irn);
noreg = get_irn_n(irn, 0);
- nomem = new_r_NoMem(irg);
+ nomem = new_NoMem();
dbg = get_irn_dbg_info(irn);
/* initialize structure */
if (get_ia32_immop_type(irn) == ia32_ImmConst) {
/* we have an add with a const here */
/* invers == add with negated const */
- inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
/* we have an add with a symconst here */
/* invers == sub with const */
- inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += 2;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal add: inverse == sub */
- inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
+ inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
inverse->costs += 2;
}
#endif
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* we have a sub with a const/symconst here */
/* invers == add with this const */
- inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal sub */
if (i == n_ia32_binary_left) {
- inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
+ inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
}
else {
- inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
+ inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
}
inverse->costs += 1;
}
#if 0
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* xor with const: inverse = xor */
- inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal xor */
- inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
+ inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
inverse->costs += 1;
}
#endif
break;
case iro_ia32_Not: {
- inverse->nodes[0] = new_rd_ia32_Not(dbg, irg, block, (ir_node*) irn);
+ inverse->nodes[0] = new_bd_ia32_Not(dbg, block, (ir_node*) irn);
inverse->costs += 1;
break;
}
case iro_ia32_Neg: {
- inverse->nodes[0] = new_rd_ia32_Neg(dbg, irg, block, (ir_node*) irn);
+ inverse->nodes[0] = new_bd_ia32_Neg(dbg, block, (ir_node*) irn);
inverse->costs += 1;
break;
}
*/
static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
{
- if(mode_is_float(mode)) {
- return mode == spillmode;
- } else {
- return 1;
- }
+ return !mode_is_float(mode) || mode == spillmode;
}
/**
* Check if irn can load its operand at position i from memory (source addressmode).
- * @param self Pointer to irn ops itself
* @param irn The irn to be checked
* @param i The operands position
* @return Non-Zero if operand can be loaded
*/
-static int ia32_possible_memory_operand(const ir_node *irn, unsigned int i) {
- ir_node *op = get_irn_n(irn, i);
- const ir_mode *mode = get_irn_mode(op);
+static int ia32_possible_memory_operand(const ir_node *irn, unsigned int i)
+{
+ ir_node *op = get_irn_n(irn, i);
+ const ir_mode *mode = get_irn_mode(op);
const ir_mode *spillmode = get_spill_mode(op);
- if (
- (i != n_ia32_binary_left && i != n_ia32_binary_right) || /* a "real" operand position must be requested */
- ! is_ia32_irn(irn) || /* must be an ia32 irn */
- get_ia32_am_arity(irn) != ia32_am_binary || /* must be a binary operation TODO is this necessary? */
- get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
- ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
- ! ia32_is_spillmode_compatible(mode, spillmode) ||
- is_ia32_use_frame(irn)) /* must not already use frame */
+ if (!is_ia32_irn(irn) || /* must be an ia32 irn */
+ get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
+ !ia32_is_spillmode_compatible(mode, spillmode) ||
+ is_ia32_use_frame(irn)) /* must not already use frame */
return 0;
- if (i == n_ia32_binary_left) {
- const arch_register_req_t *req;
- if(!is_ia32_commutative(irn))
+ switch (get_ia32_am_support(irn)) {
+ case ia32_am_none:
return 0;
- /* we can't swap left/right for limited registers
- * (As this (currently) breaks constraint handling copies)
- */
- req = get_ia32_in_req(irn, n_ia32_binary_left);
- if (req->type & arch_register_req_type_limited) {
- return 0;
- }
+
+ case ia32_am_unary:
+ if (i != n_ia32_unary_op)
+ return 0;
+ break;
+
+ case ia32_am_binary:
+ switch (i) {
+ case n_ia32_binary_left: {
+ const arch_register_req_t *req;
+ if (!is_ia32_commutative(irn))
+ return 0;
+
+ /* we can't swap left/right for limited registers
+ * (As this (currently) breaks constraint handling copies)
+ */
+ req = get_ia32_in_req(irn, n_ia32_binary_left);
+ if (req->type & arch_register_req_type_limited)
+ return 0;
+ break;
+ }
+
+ case n_ia32_binary_right:
+ break;
+
+ default:
+ return 0;
+ }
+ break;
+
+ default:
+ panic("Unknown AM type");
}
+ /* HACK: must not already use "real" memory.
+ * This can happen for Call and Div */
+ if (!is_NoMem(get_irn_n(irn, n_ia32_mem)))
+ return 0;
+
return 1;
}
ir_mode *load_mode;
ir_mode *dest_op_mode;
- ia32_code_gen_t *cg = ia32_current_cg;
-
assert(ia32_possible_memory_operand(irn, i) && "Cannot perform memory operand change");
- if (i == n_ia32_binary_left) {
- ia32_swap_left_right(irn);
- }
-
set_ia32_op_type(irn, ia32_AddrModeS);
load_mode = get_irn_mode(get_irn_n(irn, i));
set_ia32_use_frame(irn);
set_ia32_need_stackent(irn);
- set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
- set_irn_n(irn, n_ia32_binary_right, ia32_get_admissible_noreg(cg, irn, n_ia32_binary_right));
- set_irn_n(irn, n_ia32_mem, spill);
- set_ia32_is_reload(irn);
-
- /* immediates are only allowed on the right side */
- if (i == n_ia32_binary_left && is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_left))) {
+ if (i == n_ia32_binary_left &&
+ get_ia32_am_support(irn) == ia32_am_binary &&
+ /* immediates are only allowed on the right side */
+ !is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_right))) {
ia32_swap_left_right(irn);
+ i = n_ia32_binary_right;
}
+
+ assert(is_NoMem(get_irn_n(irn, n_ia32_mem)));
+
+ set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
+ set_irn_n(irn, n_ia32_mem, spill);
+ set_irn_n(irn, i, ia32_get_admissible_noreg(ia32_current_cg, irn, i));
+ set_ia32_is_reload(irn);
}
static const be_abi_callbacks_t ia32_abi_callbacks = {
ia32_abi_init,
ia32_abi_done,
ia32_abi_get_between_type,
- ia32_abi_dont_save_regs,
ia32_abi_prologue,
ia32_abi_epilogue
};
static const arch_irn_ops_t ia32_irn_ops = {
ia32_get_irn_reg_req,
- ia32_set_irn_reg,
- ia32_get_irn_reg,
ia32_classify,
- ia32_get_flags,
ia32_get_frame_entity,
ia32_set_frame_entity,
ia32_set_frame_offset,
static void ia32_before_abi(void *self) {
lower_mode_b_config_t lower_mode_b_config = {
mode_Iu, /* lowered mode */
- mode_Bu, /* prefered mode for set */
+ mode_Bu, /* preferred mode for set */
0, /* don't lower direct compares */
};
ia32_code_gen_t *cg = self;
}
}
-transformer_t be_transformer = TRANSFORMER_DEFAULT;
-
/**
* Transforms the standard firm graph into
* an ia32 firm graph
*/
-static void ia32_prepare_graph(void *self) {
- ia32_code_gen_t *cg = self;
+static void ia32_prepare_graph(void *self)
+{
+ ia32_code_gen_t *cg = self;
+ ir_graph *irg = cg->irg;
/* do local optimizations */
- optimize_graph_df(cg->irg);
+ optimize_graph_df(irg);
+
+ /* we have to do cfopt+remove_critical_edges as we can't have Bad-blocks
+ * or critical edges in the backend */
+ optimize_cf(irg);
+ remove_critical_cf_edges(irg);
/* TODO: we often have dead code reachable through out-edges here. So for
* now we rebuild edges (as we need correct user count for code selection)
be_dump(cg->irg, "-pre_transform", dump_ir_block_graph_sched);
switch (be_transformer) {
- case TRANSFORMER_DEFAULT:
- /* transform remaining nodes into assembler instructions */
- ia32_transform_graph(cg);
- break;
+ case TRANSFORMER_DEFAULT:
+ /* transform remaining nodes into assembler instructions */
+ ia32_transform_graph(cg);
+ break;
#ifdef FIRM_GRGEN_BE
- case TRANSFORMER_PBQP:
- case TRANSFORMER_RAND:
- /* transform nodes into assembler instructions by PBQP magic */
- ia32_transform_graph_by_pbqp(cg);
- break;
+ case TRANSFORMER_PBQP:
+ case TRANSFORMER_RAND:
+ /* transform nodes into assembler instructions by PBQP magic */
+ ia32_transform_graph_by_pbqp(cg);
+ break;
#endif
- default: panic("invalid transformer");
+ default:
+ panic("invalid transformer");
}
/* do local optimizations (mainly CSE) */
/* optimize address mode */
ia32_optimize_graph(cg);
- if (cg->dump)
- be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
-
/* do code placement, to optimize the position of constants */
place_code(cg->irg);
be_dump(cg->irg, "-place", dump_ir_block_graph_sched);
}
-/**
- * Dummy functions for hooks we don't need but which must be filled.
- */
-static void ia32_before_sched(void *self) {
- (void) self;
-}
-
-static void turn_back_am(ir_node *node)
+ir_node *turn_back_am(ir_node *node)
{
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *base = get_irn_n(node, n_ia32_base);
ir_node *index = get_irn_n(node, n_ia32_index);
ir_node *mem = get_irn_n(node, n_ia32_mem);
- ir_node *noreg = ia32_new_NoReg_gp(ia32_current_cg);
- ir_node *load;
- ir_node *load_res;
- ir_node *mem_proj;
- const ir_edge_t *edge;
+ ir_node *noreg;
- load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem);
- load_res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
+ ir_node *load = new_bd_ia32_Load(dbgi, block, base, index, mem);
+ ir_node *load_res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
ia32_copy_am_attrs(load, node);
if (is_ia32_is_reload(node))
set_ia32_is_reload(load);
set_irn_n(node, n_ia32_mem, new_NoMem());
- switch (get_ia32_am_arity(node)) {
+ switch (get_ia32_am_support(node)) {
case ia32_am_unary:
set_irn_n(node, n_ia32_unary_op, load_res);
break;
case ia32_am_binary:
- if (is_ia32_Immediate(get_irn_n(node, n_ia32_Cmp_right))) {
- assert(is_ia32_Cmp(node) || is_ia32_Cmp8Bit(node) ||
- is_ia32_Test(node) || is_ia32_Test8Bit(node));
+ if (is_ia32_Immediate(get_irn_n(node, n_ia32_binary_right))) {
set_irn_n(node, n_ia32_binary_left, load_res);
} else {
set_irn_n(node, n_ia32_binary_right, load_res);
}
break;
- case ia32_am_ternary:
- set_irn_n(node, n_ia32_binary_right, load_res);
- break;
-
- default: break;
+ default:
+ panic("Unknown AM type");
}
- set_irn_n(node, n_ia32_base, noreg);
+ noreg = ia32_new_NoReg_gp(ia32_current_cg);
+ set_irn_n(node, n_ia32_base, noreg);
set_irn_n(node, n_ia32_index, noreg);
set_ia32_am_offs_int(node, 0);
set_ia32_am_sc(node, NULL);
/* rewire mem-proj */
if (get_irn_mode(node) == mode_T) {
- mem_proj = NULL;
+ const ir_edge_t *edge;
foreach_out_edge(node, edge) {
ir_node *out = get_edge_src_irn(edge);
- if(get_irn_mode(out) == mode_M) {
- assert(mem_proj == NULL);
- mem_proj = out;
+ if (get_irn_mode(out) == mode_M) {
+ set_Proj_pred(out, load);
+ set_Proj_proj(out, pn_ia32_Load_M);
+ break;
}
}
-
- if(mem_proj != NULL) {
- set_Proj_pred(mem_proj, load);
- set_Proj_proj(mem_proj, pn_ia32_Load_M);
- }
}
set_ia32_op_type(node, ia32_Normal);
if (sched_is_scheduled(node))
sched_add_before(node, load);
+
+ return load_res;
}
static ir_node *flags_remat(ir_node *node, ir_node *after)
if (mode_is_float(spillmode)) {
if (ia32_cg_config.use_sse2)
- new_op = new_rd_ia32_xLoad(dbg, irg, block, ptr, noreg, mem, spillmode);
+ new_op = new_bd_ia32_xLoad(dbg, block, ptr, noreg, mem, spillmode);
else
- new_op = new_rd_ia32_vfld(dbg, irg, block, ptr, noreg, mem, spillmode);
+ new_op = new_bd_ia32_vfld(dbg, block, ptr, noreg, mem, spillmode);
}
else if (get_mode_size_bits(spillmode) == 128) {
/* Reload 128 bit SSE registers */
- new_op = new_rd_ia32_xxLoad(dbg, irg, block, ptr, noreg, mem);
+ new_op = new_bd_ia32_xxLoad(dbg, block, ptr, noreg, mem);
}
else
- new_op = new_rd_ia32_Load(dbg, irg, block, ptr, noreg, mem);
+ new_op = new_bd_ia32_Load(dbg, block, ptr, noreg, mem);
set_ia32_op_type(new_op, ia32_AddrModeS);
set_ia32_ls_mode(new_op, spillmode);
}
/* copy the register from the old node to the new Load */
- reg = arch_get_irn_register(cg->arch_env, node);
- arch_set_irn_register(cg->arch_env, new_op, reg);
+ reg = arch_get_irn_register(node);
+ arch_set_irn_register(proj, reg);
- SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
+ SET_IA32_ORIG_NODE(new_op, node);
exchange(node, proj);
}
const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
ir_mode *mode = get_spill_mode(spillval);
ir_node *noreg = ia32_new_NoReg_gp(cg);
- ir_node *nomem = new_rd_NoMem(irg);
+ ir_node *nomem = new_NoMem();
ir_node *ptr = get_irg_frame(irg);
ir_node *val = get_irn_n(node, be_pos_Spill_val);
ir_node *store;
if (mode_is_float(mode)) {
if (ia32_cg_config.use_sse2)
- store = new_rd_ia32_xStore(dbg, irg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_xStore(dbg, block, ptr, noreg, nomem, val);
else
- store = new_rd_ia32_vfst(dbg, irg, block, ptr, noreg, nomem, val, mode);
+ store = new_bd_ia32_vfst(dbg, block, ptr, noreg, nomem, val, mode);
} else if (get_mode_size_bits(mode) == 128) {
/* Spill 128 bit SSE registers */
- store = new_rd_ia32_xxStore(dbg, irg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_xxStore(dbg, block, ptr, noreg, nomem, val);
} else if (get_mode_size_bits(mode) == 8) {
- store = new_rd_ia32_Store8Bit(dbg, irg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_Store8Bit(dbg, block, ptr, noreg, nomem, val);
} else {
- store = new_rd_ia32_Store(dbg, irg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_Store(dbg, block, ptr, noreg, nomem, val);
}
set_ia32_op_type(store, ia32_AddrModeD);
set_ia32_frame_ent(store, ent);
set_ia32_use_frame(store);
set_ia32_is_spill(store);
- SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(cg, node));
+ SET_IA32_ORIG_NODE(store, node);
DBG_OPT_SPILL2ST(node, store);
if (sched_point) {
}
static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) {
- ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_graph *irg = get_irn_irg(node);
ir_node *frame = get_irg_frame(irg);
- ir_node *push = new_rd_ia32_Push(dbg, irg, block, frame, noreg, mem, noreg, sp);
+ ir_node *push = new_bd_ia32_Push(dbg, block, frame, noreg, mem, noreg, sp);
set_ia32_frame_ent(push, ent);
set_ia32_use_frame(push);
set_ia32_op_type(push, ia32_AddrModeS);
set_ia32_ls_mode(push, mode_Is);
+ set_ia32_is_spill(push);
sched_add_before(schedpoint, push);
return push;
}
static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent) {
- ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_graph *irg = get_irn_irg(node);
ir_node *frame = get_irg_frame(irg);
- ir_node *pop = new_rd_ia32_PopMem(dbg, irg, block, frame, noreg, new_NoMem(), sp);
+ ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg, new_NoMem(), sp);
set_ia32_frame_ent(pop, ent);
set_ia32_use_frame(pop);
set_ia32_op_type(pop, ia32_AddrModeD);
set_ia32_ls_mode(pop, mode_Is);
+ set_ia32_is_reload(pop);
sched_add_before(schedpoint, pop);
return pop;
}
-static ir_node* create_spproj(ia32_code_gen_t *cg, ir_node *node, ir_node *pred, int pos) {
+static ir_node* create_spproj(ir_node *node, ir_node *pred, int pos)
+{
ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *sp;
sp = new_rd_Proj(dbg, irg, block, pred, spmode, pos);
- arch_set_irn_register(cg->arch_env, sp, spreg);
+ arch_set_irn_register(sp, spreg);
return sp;
}
* push/pop into/from memory cascades. This is possible without using
* any registers.
*/
-static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node) {
- ir_graph *irg = get_irn_irg(node);
- ir_node *block = get_nodes_block(node);
- ir_node *in[1];
- ir_node *keep;
- int i, arity;
- ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
+static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node)
+{
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
+ int arity = be_get_MemPerm_entity_arity(node);
+ ir_node **pops = ALLOCAN(ir_node*, arity);
+ ir_node *in[1];
+ ir_node *keep;
+ int i;
const ir_edge_t *edge;
const ir_edge_t *next;
- ir_node **pops;
-
- arity = be_get_MemPerm_entity_arity(node);
- pops = alloca(arity * sizeof(pops[0]));
/* create Pushs */
for(i = 0; i < arity; ++i) {
assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
push = create_push(cg, node, node, sp, mem, inent);
- sp = create_spproj(cg, node, push, pn_ia32_Push_stack);
+ sp = create_spproj(node, push, pn_ia32_Push_stack);
if(entsize == 8) {
/* add another push after the first one */
push = create_push(cg, node, node, sp, mem, inent);
add_ia32_am_offs_int(push, 4);
- sp = create_spproj(cg, node, push, pn_ia32_Push_stack);
+ sp = create_spproj(node, push, pn_ia32_Push_stack);
}
set_irn_n(node, i, new_Bad());
assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
pop = create_pop(cg, node, node, sp, outent);
- sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack);
+ sp = create_spproj(node, pop, pn_ia32_Pop_stack);
if(entsize == 8) {
add_ia32_am_offs_int(pop, 4);
/* add another pop after the first one */
pop = create_pop(cg, node, node, sp, outent);
- sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack);
+ sp = create_spproj(node, pop, pn_ia32_Pop_stack);
}
pops[i] = pop;
*/
static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
{
- be_fec_env_t *env = data;
+ be_fec_env_t *env = data;
+ const ir_mode *mode;
+ int align;
if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
- const ir_mode *mode = get_spill_mode_mode(get_irn_mode(node));
- int align = get_mode_size_bytes(mode);
- be_node_needs_frame_entity(env, node, mode, align);
- } else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL
- && is_ia32_use_frame(node)) {
- if (is_ia32_need_stackent(node) || is_ia32_Load(node)) {
- const ir_mode *mode = get_ia32_ls_mode(node);
- const ia32_attr_t *attr = get_ia32_attr_const(node);
- int align;
-
- if (is_ia32_is_reload(node)) {
- mode = get_spill_mode_mode(mode);
+ mode = get_spill_mode_mode(get_irn_mode(node));
+ align = get_mode_size_bytes(mode);
+ } else if (is_ia32_irn(node) &&
+ get_ia32_frame_ent(node) == NULL &&
+ is_ia32_use_frame(node)) {
+ if (is_ia32_need_stackent(node))
+ goto need_stackent;
+
+ switch (get_ia32_irn_opcode(node)) {
+need_stackent:
+ case iro_ia32_Load: {
+ const ia32_attr_t *attr = get_ia32_attr_const(node);
+
+ if (attr->data.need_32bit_stackent) {
+ mode = mode_Is;
+ } else if (attr->data.need_64bit_stackent) {
+ mode = mode_Ls;
+ } else {
+ mode = get_ia32_ls_mode(node);
+ if (is_ia32_is_reload(node))
+ mode = get_spill_mode_mode(mode);
+ }
+ align = get_mode_size_bytes(mode);
+ break;
}
- if(attr->data.need_64bit_stackent) {
- mode = mode_Ls;
+ case iro_ia32_vfild:
+ case iro_ia32_vfld:
+ case iro_ia32_xLoad: {
+ mode = get_ia32_ls_mode(node);
+ align = 4;
+ break;
}
- if(attr->data.need_32bit_stackent) {
- mode = mode_Is;
+
+ case iro_ia32_FldCW: {
+ /* although 2 byte would be enough 4 byte performs best */
+ mode = mode_Iu;
+ align = 4;
+ break;
}
- align = get_mode_size_bytes(mode);
- be_node_needs_frame_entity(env, node, mode, align);
- } else if (is_ia32_vfild(node) || is_ia32_xLoad(node)
- || is_ia32_vfld(node)) {
- const ir_mode *mode = get_ia32_ls_mode(node);
- int align = 4;
- be_node_needs_frame_entity(env, node, mode, align);
- } else if(is_ia32_FldCW(node)) {
- /* although 2 byte would be enough 4 byte performs best */
- const ir_mode *mode = mode_Iu;
- int align = 4;
- be_node_needs_frame_entity(env, node, mode, align);
- } else {
+
+ default:
#ifndef NDEBUG
- assert(is_ia32_St(node) ||
- is_ia32_xStoreSimple(node) ||
- is_ia32_vfst(node) ||
- is_ia32_vfist(node) ||
- is_ia32_vfisttp(node) ||
- is_ia32_FnstCW(node));
+ panic("unexpected frame user while collection frame entity nodes");
+
+ case iro_ia32_FnstCW:
+ case iro_ia32_Store8Bit:
+ case iro_ia32_Store:
+ case iro_ia32_fst:
+ case iro_ia32_fstp:
+ case iro_ia32_vfist:
+ case iro_ia32_vfisttp:
+ case iro_ia32_vfst:
+ case iro_ia32_xStore:
+ case iro_ia32_xStoreSimple:
#endif
+ return;
}
+ } else {
+ return;
}
+ be_node_needs_frame_entity(env, node, mode, align);
}
/**
/* we might have to rewrite x87 virtual registers */
if (cg->do_x87_sim) {
- x87_simulate_graph(cg->arch_env, cg->birg);
+ x87_simulate_graph(cg->birg);
}
/* do peephole optimisations */
return get_eip;
block = get_irg_start_block(cg->irg);
- get_eip = new_rd_ia32_GetEIP(NULL, cg->irg, block);
+ get_eip = new_bd_ia32_GetEIP(NULL, block);
cg->get_eip = get_eip;
- add_irn_dep(get_eip, get_irg_frame(cg->irg));
-
+ be_dep_on_frame(get_eip);
return get_eip;
}
ia32_before_abi, /* before abi introduce hook */
ia32_prepare_graph,
NULL, /* spill */
- ia32_before_sched, /* before scheduling hook */
ia32_before_ra, /* before register allocation hook */
ia32_after_ra, /* after register allocation hook */
ia32_finish, /* called before codegen */
*/
static void *ia32_cg_init(be_irg_t *birg) {
ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env;
- ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
+ ia32_code_gen_t *cg = XMALLOCZ(ia32_code_gen_t);
cg->impl = &ia32_code_gen_if;
cg->irg = birg->irg;
cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
cg->isa = isa;
- cg->arch_env = birg->main_env->arch_env;
cg->birg = birg;
cg->blk_sched = NULL;
cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
set_tarval_output_modes();
- isa = xmalloc(sizeof(*isa));
+ isa = XMALLOC(ia32_isa_t);
memcpy(isa, &ia32_isa_template, sizeof(*isa));
if(mode_fpcw == NULL) {
ia32_build_8bit_reg_map_high(isa->regs_8bit_high);
#ifndef NDEBUG
- isa->name_obst = xmalloc(sizeof(*isa->name_obst));
+ isa->name_obst = XMALLOC(struct obstack);
obstack_init(isa->name_obst);
#endif /* NDEBUG */
/* enter the ISA object into the intrinsic environment */
intrinsic_env.isa = isa;
- ia32_handle_intrinsics();
/* emit asm includes */
n = get_irp_n_asms();
call_flags.bits.store_args_sequential = 0;
/* call_flags.bits.try_omit_fp not changed: can handle both settings */
call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
- call_flags.bits.call_has_imm = 1; /* No call immediates, we handle this by ourselves */
+ call_flags.bits.call_has_imm = 0; /* No call immediates, we handle this by ourselves */
/* set parameter passing style */
be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
if (is_ia32_irn(irn)) {
ret = get_ia32_exec_units(irn);
- }
- else if (is_be_node(irn)) {
- if (be_is_Call(irn) || be_is_Return(irn)) {
+ } else if (is_be_node(irn)) {
+ if (be_is_Return(irn)) {
ret = _units_callret;
- }
- else if (be_is_Barrier(irn)) {
+ } else if (be_is_Barrier(irn)) {
ret = _units_dummy;
- }
- else {
- ret = _units_other;
+ } else {
+ ret = _units_other;
}
}
else {
}
/**
- * Allows or disallows the creation of Psi nodes for the given Phi nodes.
+ * Check for Abs or -Abs.
+ */
+static int psi_is_Abs_or_Nabs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f) {
+ ir_node *l, *r;
+ pn_Cmp pnc;
+
+ if (cmp == NULL)
+ return 0;
+
+ /* must be <, <=, >=, > */
+ pnc = get_Proj_proj(sel);
+ if (pnc != pn_Cmp_Ge && pnc != pn_Cmp_Gt &&
+ pnc != pn_Cmp_Le && pnc != pn_Cmp_Lt)
+ return 0;
+
+ l = get_Cmp_left(cmp);
+ r = get_Cmp_right(cmp);
+
+ /* must be x cmp 0 */
+ if ((l != t && l != f) || !is_Const(r) || !is_Const_null(r))
+ return 0;
+
+ if ((!is_Minus(t) || get_Minus_op(t) != f) &&
+ (!is_Minus(f) || get_Minus_op(f) != t))
+ return 0;
+ return 1;
+}
+
+/**
+ * Check for Abs only
+ */
+static int psi_is_Abs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f) {
+ ir_node *l, *r;
+ pn_Cmp pnc;
+
+ if (cmp == NULL)
+ return 0;
+
+ /* must be <, <=, >=, > */
+ pnc = get_Proj_proj(sel);
+ if (pnc != pn_Cmp_Ge && pnc != pn_Cmp_Gt &&
+ pnc != pn_Cmp_Le && pnc != pn_Cmp_Lt)
+ return 0;
+
+ l = get_Cmp_left(cmp);
+ r = get_Cmp_right(cmp);
+
+ /* must be x cmp 0 */
+ if ((l != t && l != f) || !is_Const(r) || !is_Const_null(r))
+ return 0;
+
+ if ((!is_Minus(t) || get_Minus_op(t) != f) &&
+ (!is_Minus(f) || get_Minus_op(f) != t))
+ return 0;
+
+ if (pnc & pn_Cmp_Gt) {
+ /* x >= 0 ? -x : x is NABS */
+ if (is_Minus(t))
+ return 0;
+ } else {
+ /* x < 0 ? x : -x is NABS */
+ if (is_Minus(f))
+ return 0;
+ }
+ return 1;
+}
+
+
+/**
+ * Allows or disallows the creation of Mux nodes for the given Phi nodes.
+ *
+ * @param sel A selector of a Cond.
+ * @param phi_list List of Phi nodes about to be converted (linked via get_Phi_next() field)
+ * @param i First data predecessor involved in if conversion
+ * @param j Second data predecessor involved in if conversion
+ *
* @return 1 if allowed, 0 otherwise
*/
-static int ia32_is_psi_allowed(ir_node *sel, ir_node *phi_list, int i, int j)
+static int ia32_is_mux_allowed(ir_node *sel, ir_node *phi_list, int i, int j)
{
ir_node *phi;
- ir_node *cmp = NULL;
+ ir_node *cmp;
+ pn_Cmp pn;
+ ir_node *cl, *cr;
- /* we can't handle psis with 64bit compares yet */
+ /* we can't handle Muxs with 64bit compares yet */
if (is_Proj(sel)) {
cmp = get_Proj_pred(sel);
if (is_Cmp(cmp)) {
ir_node *left = get_Cmp_left(cmp);
ir_mode *cmp_mode = get_irn_mode(left);
- if (!mode_is_float(cmp_mode) && get_mode_size_bits(cmp_mode) > 32)
- return 0;
+ if (!mode_is_float(cmp_mode) && get_mode_size_bits(cmp_mode) > 32) {
+ /* 64bit Abs IS supported */
+ for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
+ ir_node *t = get_Phi_pred(phi, i);
+ ir_node *f = get_Phi_pred(phi, j);
+
+ if (! psi_is_Abs(cmp, sel, t, f))
+ return 0;
+ }
+ return 1;
+ }
} else {
- cmp = NULL;
+ /* we do not support nodes without Cmp yet */
+ return 0;
}
+ } else {
+ /* we do not support nodes without Cmp yet */
+ return 0;
}
- if (ia32_cg_config.use_cmov) {
- if (ia32_cg_config.use_sse2 && cmp != NULL) {
- pn_Cmp pn = get_Proj_proj(sel);
- ir_node *cl = get_Cmp_left(cmp);
- ir_node *cr = get_Cmp_right(cmp);
+ pn = get_Proj_proj(sel);
+ cl = get_Cmp_left(cmp);
+ cr = get_Cmp_right(cmp);
+ if (ia32_cg_config.use_cmov) {
+ if (ia32_cg_config.use_sse2) {
/* check the Phi nodes: no 64bit and no floating point cmov */
for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
ir_mode *mode = get_irn_mode(phi);
/* check for Min, Max */
ir_node *t = get_Phi_pred(phi, i);
ir_node *f = get_Phi_pred(phi, j);
- int res = 0;
/* SSE2 supports Min & Max */
if (pn == pn_Cmp_Lt || pn == pn_Cmp_Le || pn == pn_Cmp_Ge || pn == pn_Cmp_Gt) {
if (cl == t && cr == f) {
- /* Psi(a <=/>= b, a, b) => MIN, MAX */
- res = 1;
+ /* Mux(a <=/>= b, a, b) => MIN, MAX */
+ continue;
} else if (cl == f && cr == t) {
- /* Psi(a <=/>= b, b, a) => MAX, MIN */
- res = 1;
+ /* Mux(a <=/>= b, b, a) => MAX, MIN */
+ continue;
}
}
- if (! res)
- return 0;
-
- } else if (get_mode_size_bits(mode) > 32)
return 0;
+ } else if (get_mode_size_bits(mode) > 32) {
+ /* no 64bit cmov */
+ return 0;
+ }
}
} else {
/* check the Phi nodes: no 64bit and no floating point cmov */
for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
ir_mode *mode = get_irn_mode(phi);
- if (mode_is_float(mode) || get_mode_size_bits(mode) > 32)
+ if (mode_is_float(mode)) {
+ ir_node *t = get_Phi_pred(phi, i);
+ ir_node *f = get_Phi_pred(phi, j);
+
+ /* always support Mux(!float, C1, C2) */
+ if (is_Const(t) && is_Const(f) && !mode_is_float(get_irn_mode(cl)))
+ continue;
+ /* only abs or nabs supported */
+ if (! psi_is_Abs_or_Nabs(cmp, sel, t, f))
+ return 0;
+ } else if (get_mode_size_bits(mode) > 32)
return 0;
}
}
return 1;
- } else {
- ir_node *cl, *cr;
- pn_Cmp pn;
-
- /* No cmov, only some special cases */
- if (cmp == NULL)
- return 0;
+ } else { /* No Cmov, only some special cases */
/* Now some supported cases here */
- pn = get_Proj_proj(sel);
- cl = get_Cmp_left(cmp);
- cr = get_Cmp_right(cmp);
-
for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
ir_mode *mode = get_irn_mode(phi);
- int res = 0;
ir_node *t, *f;
t = get_Phi_pred(phi, i);
f = get_Phi_pred(phi, j);
- /* no floating point and no 64bit yet */
- if (mode_is_float(mode) || get_mode_size_bits(mode) > 32)
+ if (mode_is_float(mode)) {
+ /* always support Mux(!float, C1, C2) */
+ if (is_Const(t) && is_Const(f) && !mode_is_float(get_irn_mode(cl)))
+ continue;
+ /* only abs or nabs supported */
+ if (! psi_is_Abs_or_Nabs(cmp, sel, t, f))
+ return 0;
+ } else if (get_mode_size_bits(mode) > 32) {
+ /* no 64bit yet */
return 0;
+ }
if (is_Const(t) && is_Const(f)) {
if ((is_Const_null(t) && is_Const_one(f)) || (is_Const_one(t) && is_Const_null(f))) {
- /* always support Psi(x, C1, C2) */
- res = 1;
+ /* always support Mux(x, C1, C2) */
+ continue;
}
} else if (pn == pn_Cmp_Lt || pn == pn_Cmp_Le || pn == pn_Cmp_Ge || pn == pn_Cmp_Gt) {
- if (0) {
#if 0
- } else if (cl == t && cr == f) {
- /* Psi(a <=/>= b, a, b) => Min, Max */
- res = 1;
- } else if (cl == f && cr == t) {
- /* Psi(a <=/>= b, b, a) => Max, Min */
- res = 1;
+ if (cl == t && cr == f) {
+ /* Mux(a <=/>= b, a, b) => Min, Max */
+ continue;
+ }
+ if (cl == f && cr == t) {
+ /* Mux(a <=/>= b, b, a) => Max, Min */
+ continue;
+ }
#endif
- } else if ((pn & pn_Cmp_Gt) && !mode_is_signed(mode) &&
- is_Const(f) && is_Const_null(f) && is_Sub(t) &&
- get_Sub_left(t) == cl && get_Sub_right(t) == cr) {
- /* Psi(a >=u b, a - b, 0) unsigned Doz */
- res = 1;
- } else if ((pn & pn_Cmp_Lt) && !mode_is_signed(mode) &&
- is_Const(t) && is_Const_null(t) && is_Sub(f) &&
- get_Sub_left(f) == cl && get_Sub_right(f) == cr) {
- /* Psi(a <=u b, 0, a - b) unsigned Doz */
- res = 1;
- } else if (is_Const(cr) && is_Const_null(cr)) {
+ if ((pn & pn_Cmp_Gt) && !mode_is_signed(mode) &&
+ is_Const(f) && is_Const_null(f) && is_Sub(t) &&
+ get_Sub_left(t) == cl && get_Sub_right(t) == cr) {
+ /* Mux(a >=u b, a - b, 0) unsigned Doz */
+ continue;
+ }
+ if ((pn & pn_Cmp_Lt) && !mode_is_signed(mode) &&
+ is_Const(t) && is_Const_null(t) && is_Sub(f) &&
+ get_Sub_left(f) == cl && get_Sub_right(f) == cr) {
+ /* Mux(a <=u b, 0, a - b) unsigned Doz */
+ continue;
+ }
+ if (is_Const(cr) && is_Const_null(cr)) {
if (cl == t && is_Minus(f) && get_Minus_op(f) == cl) {
- /* Psi(a <=/>= 0 ? a : -a) Nabs/Abs */
- res = 1;
+ /* Mux(a <=/>= 0 ? a : -a) Nabs/Abs */
+ continue;
} else if (cl == f && is_Minus(t) && get_Minus_op(t) == cl) {
- /* Psi(a <=/>= 0 ? -a : a) Abs/Nabs */
- res = 1;
+ /* Mux(a <=/>= 0 ? -a : a) Abs/Nabs */
+ continue;
}
}
}
- if (! res)
- return 0;
+ return 0;
}
/* all checks passed */
return 1;
return ia32_get_clobber_register(clobber) != NULL;
}
+/**
+ * Create the trampoline code.
+ */
+static ir_node *ia32_create_trampoline_fkt(ir_node *block, ir_node *mem, ir_node *trampoline, ir_node *env, ir_node *callee)
+{
+ ir_graph *irg = get_Block_irg(block);
+ ir_node *st, *p = trampoline;
+ ir_mode *mode = get_irn_mode(p);
+
+ /* mov ecx,<env> */
+ st = new_r_Store(irg, block, mem, p, new_Const_long(mode_Bu, 0xb9), 0);
+ mem = new_r_Proj(irg, block, st, mode_M, pn_Store_M);
+ p = new_r_Add(irg, block, p, new_Const_long(mode_Iu, 1), mode);
+ st = new_r_Store(irg, block, mem, p, env, 0);
+ mem = new_r_Proj(irg, block, st, mode_M, pn_Store_M);
+ p = new_r_Add(irg, block, p, new_Const_long(mode_Iu, 4), mode);
+ /* jmp <callee> */
+ st = new_r_Store(irg, block, mem, p, new_Const_long(mode_Bu, 0xe9), 0);
+ mem = new_r_Proj(irg, block, st, mode_M, pn_Store_M);
+ p = new_r_Add(irg, block, p, new_Const_long(mode_Iu, 1), mode);
+ st = new_r_Store(irg, block, mem, p, callee, 0);
+ mem = new_r_Proj(irg, block, st, mode_M, pn_Store_M);
+ p = new_r_Add(irg, block, p, new_Const_long(mode_Iu, 4), mode);
+
+ return mem;
+}
+
/**
* Returns the libFirm configuration parameter for this backend.
*/
static const backend_params *ia32_get_libfirm_params(void) {
static const ir_settings_if_conv_t ifconv = {
- 4, /* maxdepth, doesn't matter for Psi-conversion */
- ia32_is_psi_allowed /* allows or disallows Psi creation for given selector */
+ 4, /* maxdepth, doesn't matter for Mux-conversion */
+ ia32_is_mux_allowed /* allows or disallows Mux creation for given selector */
};
static const ir_settings_arch_dep_t ad = {
1, /* also use subs */
1, /* allow Mulhs */
1, /* allow Mulus */
- 32 /* Mulh allowed up to 32 bit */
+ 32, /* Mulh allowed up to 32 bit */
};
static backend_params p = {
1, /* need dword lowering */
1, /* support inline assembly */
- 0, /* no immediate floating point mode. */
- NULL, /* no additional opcodes */
NULL, /* will be set later */
ia32_create_intrinsic_fkt,
&intrinsic_env, /* context for ia32_create_intrinsic_fkt */
- NULL, /* will be set below */
- NULL /* will be set below */
+ NULL, /* ifconv info will be set below */
+ NULL, /* float arithmetic mode, will be set below */
+ 12, /* size of trampoline code */
+ 4, /* alignment of trampoline code */
+ ia32_create_trampoline_fkt,
+ 4 /* alignment of stack parameter */
};
ia32_setup_cg_config();
p.dep_param = &ad;
p.if_conv_info = &ifconv;
+ if (! ia32_cg_config.use_sse2)
+ p.mode_float_arithmetic = mode_E;
return &p;
}
(int*) &be_gas_flavour, gas_items
};
+#ifdef FIRM_GRGEN_BE
static const lc_opt_enum_int_items_t transformer_items[] = {
{ "default", TRANSFORMER_DEFAULT },
-#ifdef FIRM_GRGEN_BE
{ "pbqp", TRANSFORMER_PBQP },
{ "random", TRANSFORMER_RAND },
-#endif
{ NULL, 0 }
};
static lc_opt_enum_int_var_t transformer_var = {
(int*)&be_transformer, transformer_items
};
+#endif
static const lc_opt_table_entry_t ia32_options[] = {
LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
+#ifdef FIRM_GRGEN_BE
LC_OPT_ENT_ENUM_INT("transformer", "the transformer used for code selection", &transformer_var),
+#endif
LC_OPT_ENT_INT("stackalign", "set power of two stack alignment for calls",
&ia32_isa_template.arch_env.stack_alignment),
LC_OPT_LAST
const arch_isa_if_t ia32_isa_if = {
ia32_init,
ia32_done,
+ ia32_handle_intrinsics,
ia32_get_n_reg_class,
ia32_get_reg_class,
ia32_get_reg_class_for_mode,
ia32_is_valid_clobber
};
-void ia32_init_emitter(void);
-void ia32_init_finish(void);
-void ia32_init_optimize(void);
-void ia32_init_transform(void);
-void ia32_init_x87(void);
-
void be_init_arch_ia32(void)
{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");