#include "gen_ia32_regalloc_if.h"
-#ifdef NDEBUG
-#define SET_IA32_ORIG_NODE(n, o)
-#else
-#define SET_IA32_ORIG_NODE(n, o) set_ia32_orig_node(n, o);
-#endif /* NDEBUG */
-
-
#define SFP_SIGN "0x80000000"
#define DFP_SIGN "0x8000000000000000"
#define SFP_ABS "0x7FFFFFFF"
*
****************************************************************************************************/
+/**
+ * Gets the Proj with number pn from irn.
+ */
+static ir_node *get_proj_for_pn(const ir_node *irn, long pn) {
+ const ir_edge_t *edge;
+ ir_node *proj;
+ assert(get_irn_mode(irn) == mode_T && "need mode_T");
+
+ foreach_out_edge(irn, edge) {
+ proj = get_edge_src_irn(edge);
+
+ if (get_Proj_proj(proj) == pn)
+ return proj;
+ }
+
+ return NULL;
+}
+
/* Generates an entity for a known FP const (used for FP Neg + Abs) */
-static const char *gen_fp_known_const(ir_mode *mode, ia32_known_const_t kct) {
+static ident *gen_fp_known_const(ir_mode *mode, ia32_known_const_t kct) {
static const struct {
const char *tp_name;
const char *ent_name;
entity *ent;
tarval *tv;
- ent_name = names[kct].ent_name;
+ ent_name = names[kct].ent_name;
if (! ent_cache[kct]) {
tp_name = names[kct].tp_name;
cnst_str = names[kct].cnst_str;
/* cache the entry */
ent_cache[kct] = ent;
}
- return ent_name;
+
+ return get_entity_ident(ent_cache[kct]);
}
#ifndef NDEBUG
/**
* Prints the old node name on cg obst and returns a pointer to it.
*/
-const char *get_old_node_name(ia32_transform_env_t *env) {
- static int name_cnt = 0;
+const char *ia32_get_old_node_name(ia32_transform_env_t *env) {
ia32_isa_t *isa = (ia32_isa_t *)env->cg->arch_env->isa;
lc_eoprintf(firm_get_arg_env(), isa->name_obst, "%+F", env->irn);
obstack_1grow(isa->name_obst, 0);
isa->name_obst_size += obstack_object_size(isa->name_obst);
- name_cnt++;
- if (name_cnt % 1024 == 0) {
- printf("name obst size reached %d bytes after %d nodes\n", isa->name_obst_size, name_cnt);
- }
return obstack_finish(isa->name_obst);
}
#endif /* NDEBUG */
dbg_info *dbg = env->dbg;
ir_graph *irg = env->irg;
ir_node *block = env->block;
- firm_dbg_module_t *mod = env->mod;
ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg);
ir_node *nomem = new_NoMem();
ir_node *expr_op, *imm_op;
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
/* Check if immediate optimization is on and */
/* if it's an operation with immediate. */
}
}
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
set_ia32_res_mode(new_op, mode);
dbg_info *dbg = env->dbg;
ir_graph *irg = env->irg;
ir_node *block = env->block;
- firm_dbg_module_t *mod = env->mod;
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
ir_node *nomem = new_NoMem();
ir_node *expr_op, *imm_op;
tarval *tv;
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
assert(! mode_is_float(mode) && "Shift/Rotate with float not supported");
/* set AM support */
set_ia32_am_support(new_op, ia32_am_Dest);
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
set_ia32_res_mode(new_op, mode);
+ set_ia32_emit_cl(new_op);
return new_rd_Proj(dbg, irg, block, new_op, mode, 0);
}
ir_node *new_op = NULL;
ir_mode *mode = env->mode;
dbg_info *dbg = env->dbg;
- firm_dbg_module_t *mod = env->mod;
ir_graph *irg = env->irg;
ir_node *block = env->block;
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
ir_node *nomem = new_NoMem();
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
new_op = func(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
set_ia32_am_support(new_op, ia32_am_Dest);
}
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
set_ia32_res_mode(new_op, mode);
static ir_node *gen_imm_Add(ia32_transform_env_t *env, ir_node *expr_op, ir_node *const_op) {
ir_node *new_op = NULL;
tarval *tv = get_ia32_Immop_tarval(const_op);
- firm_dbg_module_t *mod = env->mod;
dbg_info *dbg = env->dbg;
ir_graph *irg = env->irg;
ir_node *block = env->block;
ir_node *nomem = new_NoMem();
int normal_add = 1;
tarval_classification_t class_tv, class_negtv;
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
/* try to optimize to inc/dec */
if (env->cg->opt.incdec && tv) {
assert((expr_op || imm_op) && "invalid operands");
if (mode_is_float(mode)) {
- return gen_binop(env, op1, op2, new_rd_ia32_fAdd);
+ if (USE_SSE2(env->cg))
+ return gen_binop(env, op1, op2, new_rd_ia32_fAdd);
+ else {
+ env->cg->used_x87 = 1;
+ return gen_binop(env, op1, op2, new_rd_ia32_vfadd);
+ }
}
else {
/* integer ADD */
/* No expr_op means, that we have two const - one symconst and */
/* one tarval or another symconst - because this case is not */
/* covered by constant folding */
+ /* We need to check for: */
+ /* 1) symconst + const -> becomes a LEA */
+ /* 2) symconst + symconst -> becomes a const + LEA as the elf */
+ /* linker doesn't support two symconsts */
+
+ if (get_ia32_op_type(op1) == ia32_SymConst && get_ia32_op_type(op2) == ia32_SymConst) {
+ /* this is the 2nd case */
+ new_op = new_rd_ia32_Lea(dbg, irg, block, op1, noreg, mode);
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
+ set_ia32_am_flavour(new_op, ia32_am_OB);
+ }
+ else {
+ /* this is the 1st case */
+ new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode);
- new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode);
- add_ia32_am_offs(new_op, get_ia32_cnst(op1));
- add_ia32_am_offs(new_op, get_ia32_cnst(op2));
+ if (get_ia32_op_type(op1) == ia32_SymConst) {
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(op1));
+ add_ia32_am_offs(new_op, get_ia32_cnst(op2));
+ }
+ else {
+ add_ia32_am_offs(new_op, get_ia32_cnst(op1));
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
+ }
+ set_ia32_am_flavour(new_op, ia32_am_O);
+ }
/* set AM support */
set_ia32_am_support(new_op, ia32_am_Source);
set_ia32_op_type(new_op, ia32_AddrModeS);
- set_ia32_am_flavour(new_op, ia32_am_O);
/* Lea doesn't need a Proj */
return new_op;
}
}
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
set_ia32_res_mode(new_op, mode);
ir_node *new_op;
if (mode_is_float(env->mode)) {
- new_op = gen_binop(env, op1, op2, new_rd_ia32_fMul);
+ if (USE_SSE2(env->cg))
+ new_op = gen_binop(env, op1, op2, new_rd_ia32_fMul);
+ else {
+ env->cg->used_x87 = 1;
+ new_op = gen_binop(env, op1, op2, new_rd_ia32_vfmul);
+ }
}
else {
new_op = gen_binop(env, op1, op2, new_rd_ia32_Mul);
/* to be on the save side */
set_Proj_proj(proj_EAX, pn_EAX);
- if (get_ia32_cnst(mulh)) {
+ if (is_ia32_ImmConst(mulh) || is_ia32_ImmSymConst(mulh)) {
/* Mulh with const cannot have AM */
set_ia32_am_support(mulh, ia32_am_None);
}
* @return The created ia32 And node
*/
static ir_node *gen_And(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
- if (mode_is_float(env->mode)) {
- return gen_binop(env, op1, op2, new_rd_ia32_fAnd);
- }
- else {
- return gen_binop(env, op1, op2, new_rd_ia32_And);
- }
+ assert (! mode_is_float(env->mode));
+ return gen_binop(env, op1, op2, new_rd_ia32_And);
}
* @return The created ia32 Or node
*/
static ir_node *gen_Or(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
- if (mode_is_float(env->mode)) {
- return gen_binop(env, op1, op2, new_rd_ia32_fOr);
- }
- else {
- return gen_binop(env, op1, op2, new_rd_ia32_Or);
- }
+ assert (! mode_is_float(env->mode));
+ return gen_binop(env, op1, op2, new_rd_ia32_Or);
}
* @return The created ia32 Eor node
*/
static ir_node *gen_Eor(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
- if (mode_is_float(env->mode)) {
- return gen_binop(env, op1, op2, new_rd_ia32_fEor);
- }
- else {
- return gen_binop(env, op1, op2, new_rd_ia32_Eor);
- }
+ assert(! mode_is_float(env->mode));
+ return gen_binop(env, op1, op2, new_rd_ia32_Eor);
}
ir_node *new_op;
if (mode_is_float(env->mode)) {
- new_op = gen_binop(env, op1, op2, new_rd_ia32_fMax);
+ if (USE_SSE2(env->cg))
+ new_op = gen_binop(env, op1, op2, new_rd_ia32_fMax);
+ else {
+ env->cg->used_x87 = 1;
+ assert(0);
+ }
}
else {
new_op = new_rd_ia32_Max(env->dbg, env->irg, env->block, op1, op2, env->mode);
set_ia32_am_support(new_op, ia32_am_None);
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
}
return new_op;
ir_node *new_op;
if (mode_is_float(env->mode)) {
- new_op = gen_binop(env, op1, op2, new_rd_ia32_fMin);
+ if (USE_SSE2(env->cg))
+ new_op = gen_binop(env, op1, op2, new_rd_ia32_fMin);
+ else {
+ env->cg->used_x87 = 1;
+ assert(0);
+ }
}
else {
new_op = new_rd_ia32_Min(env->dbg, env->irg, env->block, op1, op2, env->mode);
set_ia32_am_support(new_op, ia32_am_None);
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
}
return new_op;
static ir_node *gen_imm_Sub(ia32_transform_env_t *env, ir_node *expr_op, ir_node *const_op) {
ir_node *new_op = NULL;
tarval *tv = get_ia32_Immop_tarval(const_op);
- firm_dbg_module_t *mod = env->mod;
dbg_info *dbg = env->dbg;
ir_graph *irg = env->irg;
ir_node *block = env->block;
ir_node *nomem = new_NoMem();
int normal_sub = 1;
tarval_classification_t class_tv, class_negtv;
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
/* try to optimize to inc/dec */
if (env->cg->opt.incdec && tv) {
assert((expr_op || imm_op) && "invalid operands");
if (mode_is_float(mode)) {
- return gen_binop(env, op1, op2, new_rd_ia32_fSub);
+ if (USE_SSE2(env->cg))
+ return gen_binop(env, op1, op2, new_rd_ia32_fSub);
+ else {
+ env->cg->used_x87 = 1;
+ return gen_binop(env, op1, op2, new_rd_ia32_vfsub);
+ }
}
else {
/* integer SUB */
/* No expr_op means, that we have two const - one symconst and */
/* one tarval or another symconst - because this case is not */
/* covered by constant folding */
+ /* We need to check for: */
+ /* 1) symconst + const -> becomes a LEA */
+ /* 2) symconst + symconst -> becomes a const + LEA as the elf */
+ /* linker doesn't support two symconsts */
+
+ if (get_ia32_op_type(op1) == ia32_SymConst && get_ia32_op_type(op2) == ia32_SymConst) {
+ /* this is the 2nd case */
+ new_op = new_rd_ia32_Lea(dbg, irg, block, op1, noreg, mode);
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
+ set_ia32_am_sc_sign(new_op);
+ set_ia32_am_flavour(new_op, ia32_am_OB);
+ }
+ else {
+ /* this is the 1st case */
+ new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode);
- new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode);
- add_ia32_am_offs(new_op, get_ia32_cnst(op1));
- sub_ia32_am_offs(new_op, get_ia32_cnst(op2));
+ if (get_ia32_op_type(op1) == ia32_SymConst) {
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(op1));
+ sub_ia32_am_offs(new_op, get_ia32_cnst(op2));
+ }
+ else {
+ add_ia32_am_offs(new_op, get_ia32_cnst(op1));
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
+ set_ia32_am_sc_sign(new_op);
+ }
+ set_ia32_am_flavour(new_op, ia32_am_O);
+ }
/* set AM support */
set_ia32_am_support(new_op, ia32_am_Source);
set_ia32_op_type(new_op, ia32_AddrModeS);
- set_ia32_am_flavour(new_op, ia32_am_O);
/* Lea doesn't need a Proj */
return new_op;
}
}
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
set_ia32_res_mode(new_op, mode);
return new_rd_Proj(dbg, irg, block, new_op, mode, 0);
}
-static ir_node *get_proj_for_pn(const ir_node *irn, long pn) {
- const ir_edge_t *edge;
- ir_node *proj;
- assert(get_irn_mode(irn) == mode_T && "need mode_T");
- foreach_out_edge(irn, edge) {
- proj = get_edge_src_irn(edge);
-
- if (get_Proj_proj(proj) == pn)
- return proj;
- }
-
- return NULL;
-}
/**
* Generates an ia32 DivMod with additional infrastructure for the
be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep);
}
- SET_IA32_ORIG_NODE(res, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
set_ia32_res_mode(res, mode_Is);
*/
static ir_node *gen_Quot(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
- ir_node *nomem = new_rd_NoMem(env->irg);
ir_node *new_op;
+ ir_node *nomem = new_rd_NoMem(env->irg);
- new_op = new_rd_ia32_fDiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem, env->mode);
- set_ia32_am_support(new_op, ia32_am_Source);
+ if (USE_SSE2(env->cg)) {
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ if (is_ia32_fConst(op2)) {
+ new_op = new_rd_ia32_fDiv(env->dbg, env->irg, env->block, noreg, noreg, op1, noreg, nomem, mode_T);
+ set_ia32_am_support(new_op, ia32_am_None);
+ set_ia32_Immop_attr(new_op, op2);
+ }
+ else {
+ new_op = new_rd_ia32_fDiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem, mode_T);
+ set_ia32_am_support(new_op, ia32_am_Source);
+ }
+ }
+ else {
+ new_op = new_rd_ia32_vfdiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem, mode_T);
+ set_ia32_am_support(new_op, ia32_am_Source);
+ }
+ set_ia32_res_mode(new_op, get_irn_mode(get_proj_for_pn(env->irn, pn_Quot_res)));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
return new_op;
}
* @return The created ia32 Minus node
*/
static ir_node *gen_Minus(ia32_transform_env_t *env, ir_node *op) {
- const char *name;
+ ident *name;
ir_node *new_op;
ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg);
int size;
if (mode_is_float(env->mode)) {
- new_op = new_rd_ia32_fEor(env->dbg, env->irg, env->block, noreg_gp, noreg_gp, op, noreg_fp, nomem, mode_T);
+ if (USE_SSE2(env->cg)) {
+ new_op = new_rd_ia32_fEor(env->dbg, env->irg, env->block, noreg_gp, noreg_gp, op, noreg_fp, nomem, mode_T);
- size = get_mode_size_bits(env->mode);
- name = gen_fp_known_const(env->mode, size == 32 ? ia32_SSIGN : ia32_DSIGN);
+ size = get_mode_size_bits(env->mode);
+ name = gen_fp_known_const(env->mode, size == 32 ? ia32_SSIGN : ia32_DSIGN);
- set_ia32_sc(new_op, name);
+ set_ia32_sc(new_op, name);
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
- set_ia32_res_mode(new_op, env->mode);
+ set_ia32_res_mode(new_op, env->mode);
+ set_ia32_immop_type(new_op, ia32_ImmSymConst);
- new_op = new_rd_Proj(env->dbg, env->irg, env->block, new_op, env->mode, 0);
+ new_op = new_rd_Proj(env->dbg, env->irg, env->block, new_op, env->mode, 0);
+ }
+ else {
+ env->cg->used_x87 = 1;
+ new_op = new_rd_ia32_vfchs(env->dbg, env->irg, env->block, op, env->mode);
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
+ }
}
else {
new_op = gen_unop(env, op, new_rd_ia32_Minus);
* @return The created ia32 Not node
*/
static ir_node *gen_Not(ia32_transform_env_t *env, ir_node *op) {
- ir_node *new_op;
-
- if (mode_is_float(env->mode)) {
- assert(0);
- }
- else {
- new_op = gen_unop(env, op, new_rd_ia32_Not);
- }
-
- return new_op;
+ assert (! mode_is_float(env->mode));
+ return gen_unop(env, op, new_rd_ia32_Not);
}
ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg);
ir_node *nomem = new_NoMem();
int size;
- const char *name;
+ ident *name;
if (mode_is_float(mode)) {
- res = new_rd_ia32_fAnd(dbg,irg, block, noreg_gp, noreg_gp, op, noreg_fp, nomem, mode_T);
+ if (USE_SSE2(env->cg)) {
+ res = new_rd_ia32_fAnd(dbg,irg, block, noreg_gp, noreg_gp, op, noreg_fp, nomem, mode_T);
- size = get_mode_size_bits(mode);
- name = gen_fp_known_const(mode, size == 32 ? ia32_SABS : ia32_DABS);
+ size = get_mode_size_bits(mode);
+ name = gen_fp_known_const(mode, size == 32 ? ia32_SABS : ia32_DABS);
- set_ia32_sc(res, name);
+ set_ia32_sc(res, name);
- SET_IA32_ORIG_NODE(res, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
- set_ia32_res_mode(res, mode);
+ set_ia32_res_mode(res, mode);
+ set_ia32_immop_type(res, ia32_ImmSymConst);
- res = new_rd_Proj(dbg, irg, block, res, mode, 0);
+ res = new_rd_Proj(dbg, irg, block, res, mode, 0);
+ }
+ else {
+ env->cg->used_x87 = 1;
+ res = new_rd_ia32_vfabs(dbg, irg, block, op, mode);
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
+ }
}
else {
res = new_rd_ia32_Cdq(dbg, irg, block, op, mode_T);
- SET_IA32_ORIG_NODE(res, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
set_ia32_res_mode(res, mode);
p_eax = new_rd_Proj(dbg, irg, block, res, mode, pn_EAX);
p_edx = new_rd_Proj(dbg, irg, block, res, mode, pn_EDX);
res = new_rd_ia32_Eor(dbg, irg, block, noreg_gp, noreg_gp, p_eax, p_edx, nomem, mode_T);
- SET_IA32_ORIG_NODE(res, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
set_ia32_res_mode(res, mode);
res = new_rd_Proj(dbg, irg, block, res, mode, 0);
res = new_rd_ia32_Sub(dbg, irg, block, noreg_gp, noreg_gp, res, p_edx, nomem, mode_T);
- SET_IA32_ORIG_NODE(res, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
set_ia32_res_mode(res, mode);
res = new_rd_Proj(dbg, irg, block, res, mode, 0);
* @return the created ia32 Load node
*/
static ir_node *gen_Load(ia32_transform_env_t *env) {
- ir_node *node = env->irn;
- ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *node = env->irn;
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *ptr = get_Load_ptr(node);
+ ir_node *lptr = ptr;
+ ir_mode *mode = get_Load_mode(node);
+ int is_imm = 0;
ir_node *new_op;
+ ia32_am_flavour_t am_flav = ia32_B;
- if (mode_is_float(env->mode)) {
- new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, get_Load_ptr(node), noreg, get_Load_mem(node), env->mode);
+ /* address might be a constant (symconst or absolute address) */
+ if (is_ia32_Const(ptr)) {
+ lptr = noreg;
+ is_imm = 1;
+ }
+
+ if (mode_is_float(mode)) {
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node), env->mode);
+ else {
+ env->cg->used_x87 = 1;
+ new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node), env->mode);
+ }
}
else {
- new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, get_Load_ptr(node), noreg, get_Load_mem(node), env->mode);
+ new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node), env->mode);
+ }
+
+ /* base is an constant address */
+ if (is_imm) {
+ if (get_ia32_immop_type(ptr) == ia32_ImmSymConst) {
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(ptr));
+ }
+ else {
+ add_ia32_am_offs(new_op, get_ia32_cnst(ptr));
+ }
+
+ am_flav = ia32_O;
}
set_ia32_am_support(new_op, ia32_am_Source);
set_ia32_op_type(new_op, ia32_AddrModeS);
- set_ia32_am_flavour(new_op, ia32_B);
- set_ia32_ls_mode(new_op, get_Load_mode(node));
+ set_ia32_am_flavour(new_op, am_flav);
+ set_ia32_ls_mode(new_op, mode);
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
return new_op;
}
* @return the created ia32 Store node
*/
static ir_node *gen_Store(ia32_transform_env_t *env) {
- ir_node *node = env->irn;
- ir_node *noreg = ia32_new_NoReg_gp(env->cg);
- ir_node *val = get_Store_value(node);
- ir_node *ptr = get_Store_ptr(node);
- ir_node *mem = get_Store_mem(node);
- ir_mode *mode = get_irn_mode(val);
- ir_node *sval = val;
+ ir_node *node = env->irn;
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *val = get_Store_value(node);
+ ir_node *ptr = get_Store_ptr(node);
+ ir_node *sptr = ptr;
+ ir_node *mem = get_Store_mem(node);
+ ir_mode *mode = get_irn_mode(val);
+ ir_node *sval = val;
+ int is_imm = 0;
ir_node *new_op;
+ ia32_am_flavour_t am_flav = ia32_B;
+ ia32_immop_type_t immop = ia32_ImmNone;
+
+ if (! mode_is_float(mode)) {
+ /* in case of storing a const (but not a symconst) -> make it an attribute */
+ if (is_ia32_Cnst(val)) {
+ switch (get_ia32_op_type(val)) {
+ case ia32_Const:
+ immop = ia32_ImmConst;
+ break;
+ case ia32_SymConst:
+ immop = ia32_ImmSymConst;
+ break;
+ default:
+ assert(0 && "unsupported Const type");
+ }
+ sval = noreg;
+ }
+ }
- /* in case of storing a const (but not a symconst) -> make it an attribute */
- if (is_ia32_Const(val) && get_ia32_op_type(val) == ia32_Const) {
- sval = noreg;
+ /* address might be a constant (symconst or absolute address) */
+ if (is_ia32_Const(ptr)) {
+ sptr = noreg;
+ is_imm = 0;
}
if (mode_is_float(mode)) {
- new_op = new_rd_ia32_fStore(env->dbg, env->irg, env->block, ptr, noreg, sval, mem, mode_T);
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fStore(env->dbg, env->irg, env->block, sptr, noreg, sval, mem, mode_T);
+ else {
+ env->cg->used_x87 = 1;
+ new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, sptr, noreg, sval, mem, mode_T);
+ }
}
else if (get_mode_size_bits(mode) == 8) {
- new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, sval, mem, mode_T);
+ new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, sptr, noreg, sval, mem, mode_T);
}
else {
new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, sval, mem, mode_T);
}
/* stored const is an attribute (saves a register) */
- if (is_ia32_Const(val) && get_ia32_op_type(val) == ia32_Const) {
+ if (! mode_is_float(mode) && is_ia32_Cnst(val)) {
set_ia32_Immop_attr(new_op, val);
}
+ /* base is an constant address */
+ if (is_imm) {
+ if (get_ia32_immop_type(ptr) == ia32_ImmSymConst) {
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(ptr));
+ }
+ else {
+ add_ia32_am_offs(new_op, get_ia32_cnst(ptr));
+ }
+
+ am_flav = ia32_O;
+ }
+
set_ia32_am_support(new_op, ia32_am_Dest);
set_ia32_op_type(new_op, ia32_AddrModeD);
- set_ia32_am_flavour(new_op, ia32_B);
+ set_ia32_am_flavour(new_op, am_flav);
set_ia32_ls_mode(new_op, get_irn_mode(val));
+ set_ia32_immop_type(new_op, immop);
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
return new_op;
}
expr = get_expr_op(cmp_a, cmp_b);
if (cnst && expr) {
- if (mode_is_int(get_irn_mode(expr))) {
+ pn_Cmp pnc = get_Proj_proj(sel);
+
+ if ((pnc == pn_Cmp_Eq || pnc == pn_Cmp_Lg) && mode_is_int(get_irn_mode(expr))) {
if (classify_tarval(get_ia32_Immop_tarval(cnst)) == TV_CLASSIFY_NULL) {
- /* a Cmp A, 0 */
- ir_node *op1 = expr;
- ir_node *op2 = expr;
- ir_node *and = skip_Proj(expr);
- char *cnst = NULL;
+ /* a Cmp A =/!= 0 */
+ ir_node *op1 = expr;
+ ir_node *op2 = expr;
+ ir_node *and = skip_Proj(expr);
+ const char *cnst = NULL;
/* check, if expr is an only once used And operation */
if (get_irn_n_edges(expr) == 1 && is_ia32_And(and)) {
op1 = get_irn_n(and, 2);
op2 = get_irn_n(and, 3);
- cnst = get_ia32_cnst(and);
+ cnst = (is_ia32_ImmConst(and) || is_ia32_ImmSymConst(and)) ? get_ia32_cnst(and) : NULL;
}
res = new_rd_ia32_TestJmp(dbg, irg, block, op1, op2, mode_T);
set_ia32_pncode(res, get_Proj_proj(sel));
+ set_ia32_res_mode(res, get_irn_mode(op1));
if (cnst) {
copy_ia32_Immop_attr(res, and);
}
- SET_IA32_ORIG_NODE(res, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
return res;
}
}
- res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem, mode_T);
+
+ if (mode_is_float(get_irn_mode(expr))) {
+ if (USE_SSE2(env->cg))
+ res = new_rd_ia32_fCondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem, mode_T);
+ else {
+ env->cg->used_x87 = 1;
+ assert(0);
+ }
+ }
+ else {
+ res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem, mode_T);
+ }
set_ia32_Immop_attr(res, cnst);
+ set_ia32_res_mode(res, get_irn_mode(expr));
}
else {
- res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem, mode_T);
+ if (mode_is_float(get_irn_mode(cmp_a))) {
+ if (USE_SSE2(env->cg))
+ res = new_rd_ia32_fCondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem, mode_T);
+ else {
+ env->cg->used_x87 = 1;
+ assert(0);
+ }
+ }
+ else {
+ res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem, mode_T);
+ }
+ set_ia32_res_mode(res, get_irn_mode(cmp_a));
}
set_ia32_pncode(res, get_Proj_proj(sel));
else {
res = new_rd_ia32_SwitchJmp(dbg, irg, block, sel, mode_T);
set_ia32_pncode(res, get_Cond_defaultProj(node));
+ set_ia32_res_mode(res, get_irn_mode(sel));
}
- SET_IA32_ORIG_NODE(res, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
return res;
}
else {
res = new_rd_ia32_CopyB_i(dbg, irg, block, dst, src, mem, mode);
set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is));
+ set_ia32_immop_type(res, ia32_ImmConst);
}
- SET_IA32_ORIG_NODE(res, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
return res;
}
ir_node *new_op = new_rd_ia32_CMov(env->dbg, env->irg, env->block, \
get_Mux_sel(node), get_Mux_false(node), get_Mux_true(node), env->mode);
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
return new_op;
}
*
* INT -> INT
* ============
- * 1) n bit -> m bit n < m (upscale)
- * always ignored
+ * 1) n bit -> m bit n > m (downscale)
+ * a) target is signed: movsx
+ * b) target is unsigned: and with lower bits sets
* 2) n bit -> m bit n == m (sign change)
* always ignored
- * 3) n bit -> m bit n > m (downscale)
- * a) Un -> Um = AND Un, (1 << m) - 1
- * b) Sn -> Um same as a)
- * c) Un -> Sm same as a)
- * d) Sn -> Sm = ASHL Sn, (n - m); ASHR Sn, (n - m)
+ * 3) n bit -> m bit n < m (upscale)
+ * a) source is signed: movsx
+ * b) source is unsigned: and with lower bits sets
*
* INT -> FLOAT
* ==============
* FLOAT -> FLOAT
* ================
* SSE(1/2) convert from float or double to double or float (cvtss/sd2sd/ss)
+ * x87 is mode_E internally, conversions happen only at load and store
+ * in non-strict semantic
*/
-static ir_node *gen_int_downscale_conv(ia32_transform_env_t *env, ir_node *op,
- ir_mode *src_mode, ir_mode *tgt_mode)
-{
- int n = get_mode_size_bits(src_mode);
- int m = get_mode_size_bits(tgt_mode);
- dbg_info *dbg = env->dbg;
- ir_graph *irg = env->irg;
- ir_node *block = env->block;
- ir_node *noreg = ia32_new_NoReg_gp(env->cg);
- ir_node *nomem = new_rd_NoMem(irg);
- ir_node *new_op, *proj;
-
- assert(n > m && "downscale expected");
-
- if (mode_is_signed(src_mode) && mode_is_signed(tgt_mode)) {
- /* ASHL Sn, n - m */
- new_op = new_rd_ia32_Shl(dbg, irg, block, noreg, noreg, op, noreg, nomem, mode_T);
- proj = new_rd_Proj(dbg, irg, block, new_op, src_mode, 0);
- set_ia32_Immop_tarval(new_op, new_tarval_from_long(n - m, mode_Is));
- set_ia32_am_support(new_op, ia32_am_Source);
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
-
- /* ASHR Sn, n - m */
- new_op = new_rd_ia32_Shrs(dbg, irg, block, noreg, noreg, proj, noreg, nomem, mode_T);
- set_ia32_Immop_tarval(new_op, new_tarval_from_long(n - m, mode_Is));
- }
- else {
- new_op = new_rd_ia32_And(dbg, irg, block, noreg, noreg, op, noreg, nomem, mode_T);
- set_ia32_Immop_tarval(new_op, new_tarval_from_long((1 << m) - 1, mode_Is));
- }
-
- return new_op;
-}
+//static ir_node *gen_int_downscale_conv(ia32_transform_env_t *env, ir_node *op,
+// ir_mode *src_mode, ir_mode *tgt_mode)
+//{
+// int n = get_mode_size_bits(src_mode);
+// int m = get_mode_size_bits(tgt_mode);
+// dbg_info *dbg = env->dbg;
+// ir_graph *irg = env->irg;
+// ir_node *block = env->block;
+// ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+// ir_node *nomem = new_rd_NoMem(irg);
+// ir_node *new_op, *proj;
+// assert(n > m && "downscale expected");
+// if (mode_is_signed(src_mode) && mode_is_signed(tgt_mode)) {
+// /* ASHL Sn, n - m */
+// new_op = new_rd_ia32_Shl(dbg, irg, block, noreg, noreg, op, noreg, nomem, mode_T);
+// proj = new_rd_Proj(dbg, irg, block, new_op, src_mode, 0);
+// set_ia32_Immop_tarval(new_op, new_tarval_from_long(n - m, mode_Is));
+// set_ia32_am_support(new_op, ia32_am_Source);
+// SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
+// /* ASHR Sn, n - m */
+// new_op = new_rd_ia32_Shrs(dbg, irg, block, noreg, noreg, proj, noreg, nomem, mode_T);
+// set_ia32_Immop_tarval(new_op, new_tarval_from_long(n - m, mode_Is));
+// }
+// else {
+// new_op = new_rd_ia32_And(dbg, irg, block, noreg, noreg, op, noreg, nomem, mode_T);
+// set_ia32_Immop_tarval(new_op, new_tarval_from_long((1 << m) - 1, mode_Is));
+// }
+// return new_op;
+//}
/**
* Transforms a Conv node.
ir_graph *irg = env->irg;
ir_mode *src_mode = get_irn_mode(op);
ir_mode *tgt_mode = env->mode;
+ int src_bits = get_mode_size_bits(src_mode);
+ int tgt_bits = get_mode_size_bits(tgt_mode);
ir_node *block = env->block;
ir_node *new_op = NULL;
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
ir_node *nomem = new_rd_NoMem(irg);
- firm_dbg_module_t *mod = env->mod;
ir_node *proj;
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
if (src_mode == tgt_mode) {
/* this can happen when changing mode_P to mode_Is */
/* we convert from float ... */
if (mode_is_float(tgt_mode)) {
/* ... to float */
- DB((mod, LEVEL_1, "create Conv(float, float) ..."));
- new_op = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
+ if (USE_SSE2(env->cg)) {
+ DB((mod, LEVEL_1, "create Conv(float, float) ..."));
+ new_op = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
+ }
+ else {
+ DB((mod, LEVEL_1, "killed Conv(float, float) ..."));
+ edges_reroute(env->irn, op, irg);
+ }
}
else {
/* ... to int */
DB((mod, LEVEL_1, "create Conv(float, int) ..."));
new_op = new_rd_ia32_Conv_FP2I(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
/* if target mode is not int: add an additional downscale convert */
- if (get_mode_size_bits(tgt_mode) < 32) {
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ if (tgt_bits < 32) {
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
set_ia32_res_mode(new_op, tgt_mode);
set_ia32_am_support(new_op, ia32_am_Source);
proj = new_rd_Proj(dbg, irg, block, new_op, mode_Is, 0);
- new_op = gen_int_downscale_conv(env, proj, src_mode, tgt_mode);
+
+ if (tgt_bits == 8 || src_bits == 8) {
+ new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, proj, nomem, mode_T);
+ }
+ else {
+ new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, proj, nomem, mode_T);
+ }
}
}
}
}
else {
/* ... to int */
- if (get_mode_size_bits(src_mode) <= get_mode_size_bits(tgt_mode)) {
- DB((mod, LEVEL_1, "omitting upscale Conv(%+F, %+F) ...", src_mode, tgt_mode));
+ if (get_mode_size_bits(src_mode) == tgt_bits) {
+ DB((mod, LEVEL_1, "omitting equal size Conv(%+F, %+F) ...", src_mode, tgt_mode));
edges_reroute(env->irn, op, irg);
}
else {
- DB((mod, LEVEL_1, "create downscale Conv(%+F, %+F) ...", src_mode, tgt_mode));
- new_op = gen_int_downscale_conv(env, op, src_mode, tgt_mode);
+ DB((mod, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode));
+ if (tgt_bits == 8 || src_bits == 8) {
+ new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
+ }
+ else {
+ new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
+ }
}
}
}
if (new_op) {
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
set_ia32_res_mode(new_op, tgt_mode);
set_ia32_am_support(new_op, ia32_am_Source);
entity *ent = be_get_frame_entity(node);
ir_mode *mode = env->mode;
+// /* If the StackParam has only one user -> */
+// /* put it in the Block where the user resides */
+// if (get_irn_n_edges(node) == 1) {
+// env->block = get_nodes_block(get_edge_src_irn(get_irn_out_edge_first(node)));
+// }
+
if (mode_is_float(mode)) {
- new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ else {
+ env->cg->used_x87 = 1;
+ new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ }
}
else {
new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
set_ia32_am_flavour(new_op, ia32_B);
set_ia32_ls_mode(new_op, mode);
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
return new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, 0);
}
set_ia32_frame_ent(new_op, be_get_frame_entity(node));
set_ia32_am_support(new_op, ia32_am_Full);
set_ia32_use_frame(new_op);
+ set_ia32_immop_type(new_op, ia32_ImmConst);
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
return new_rd_Proj(env->dbg, env->irg, env->block, new_op, env->mode, 0);
}
ir_mode *mode = get_type_mode(get_entity_type(ent));
if (mode_is_float(mode)) {
- new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ else {
+ env->cg->used_x87 = 1;
+ new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ }
}
else {
new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
set_ia32_am_flavour(new_op, ia32_B);
set_ia32_ls_mode(new_op, mode);
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
return new_op;
}
ir_mode *mode = get_irn_mode(val);
if (mode_is_float(mode)) {
- new_op = new_rd_ia32_fStore(env->dbg, env->irg, env->block, ptr, noreg, val, mem, mode_T);
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fStore(env->dbg, env->irg, env->block, ptr, noreg, val, mem, mode_T);
+ else {
+ env->cg->used_x87 = 1;
+ new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, mem, mode_T);
+ }
}
else if (get_mode_size_bits(mode) == 8) {
new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, mem, mode_T);
set_ia32_am_flavour(new_op, ia32_B);
set_ia32_ls_mode(new_op, mode);
- SET_IA32_ORIG_NODE(new_op, get_old_node_name(env));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
return new_op;
}
tenv.dbg = get_irn_dbg_info(irn);
tenv.irg = cg->irg;
tenv.irn = irn;
- tenv.mod = cg->mod;
+ DEBUG_ONLY(tenv.mod = cg->mod;)
tenv.mode = get_ia32_res_mode(irn);
tenv.cg = cg;
set_ia32_am_support(res, ia32_am_Full);
}
- SET_IA32_ORIG_NODE(res, get_old_node_name(&tenv));
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(&tenv));
/* copy register */
slots = get_ia32_slots(res);
slots[0] = in2_reg;
}
}
+/**
+ * Transforms a LEA into an Add if possible
+ * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
+ */
+void ia32_transform_lea_to_add(ir_node *irn, ia32_code_gen_t *cg) {
+ ia32_am_flavour_t am_flav;
+ int imm = 0;
+ ir_node *res = NULL;
+ ir_node *nomem, *noreg, *base, *index, *op1, *op2;
+ char *offs;
+ ia32_transform_env_t tenv;
+ const arch_register_t *out_reg, *base_reg, *index_reg;
+
+ /* must be a LEA */
+ if (! is_ia32_Lea(irn))
+ return;
+
+ am_flav = get_ia32_am_flavour(irn);
+
+ /* only some LEAs can be transformed to an Add */
+ if (am_flav != ia32_am_B && am_flav != ia32_am_OB && am_flav != ia32_am_OI && am_flav != ia32_am_BI)
+ return;
+
+ noreg = ia32_new_NoReg_gp(cg);
+ nomem = new_rd_NoMem(cg->irg);
+ op1 = noreg;
+ op2 = noreg;
+ base = get_irn_n(irn, 0);
+ index = get_irn_n(irn,1);
+
+ offs = get_ia32_am_offs(irn);
+
+ /* offset has a explicit sign -> we need to skip + */
+ if (offs && offs[0] == '+')
+ offs++;
+
+ out_reg = arch_get_irn_register(cg->arch_env, irn);
+ base_reg = arch_get_irn_register(cg->arch_env, base);
+ index_reg = arch_get_irn_register(cg->arch_env, index);
+
+ tenv.block = get_nodes_block(irn);
+ tenv.dbg = get_irn_dbg_info(irn);
+ tenv.irg = cg->irg;
+ tenv.irn = irn;
+ DEBUG_ONLY(tenv.mod = cg->mod;)
+ tenv.mode = get_irn_mode(irn);
+ tenv.cg = cg;
+
+ switch(get_ia32_am_flavour(irn)) {
+ case ia32_am_B:
+ /* out register must be same as base register */
+ if (! REGS_ARE_EQUAL(out_reg, base_reg))
+ return;
+
+ op1 = base;
+ break;
+ case ia32_am_OB:
+ /* out register must be same as base register */
+ if (! REGS_ARE_EQUAL(out_reg, base_reg))
+ return;
+
+ op1 = base;
+ imm = 1;
+ break;
+ case ia32_am_OI:
+ /* out register must be same as index register */
+ if (! REGS_ARE_EQUAL(out_reg, index_reg))
+ return;
+
+ op1 = index;
+ imm = 1;
+ break;
+ case ia32_am_BI:
+ /* out register must be same as one in register */
+ if (REGS_ARE_EQUAL(out_reg, base_reg)) {
+ op1 = base;
+ op2 = index;
+ }
+ else if (REGS_ARE_EQUAL(out_reg, index_reg)) {
+ op1 = index;
+ op2 = base;
+ }
+ else {
+ /* in registers a different from out -> no Add possible */
+ return;
+ }
+ default:
+ break;
+ }
+
+ res = new_rd_ia32_Add(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, op1, op2, nomem, mode_T);
+ arch_set_irn_register(cg->arch_env, res, out_reg);
+ set_ia32_op_type(res, ia32_Normal);
+
+ if (imm) {
+ set_ia32_cnst(res, offs);
+ set_ia32_immop_type(res, ia32_ImmConst);
+ }
+
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(&tenv));
+
+ /* add Add to schedule */
+ sched_add_before(irn, res);
+
+ res = new_rd_Proj(tenv.dbg, tenv.irg, tenv.block, res, tenv.mode, 0);
+
+ /* add result Proj to schedule */
+ sched_add_before(irn, res);
+
+ /* remove the old LEA */
+ sched_remove(irn);
+
+ /* exchange the Add and the LEA */
+ exchange(irn, res);
+}
+
/**
* Transforms the given firm node (and maybe some other related nodes)
* into one or more assembler nodes.
tenv.dbg = get_irn_dbg_info(node);
tenv.irg = current_ir_graph;
tenv.irn = node;
- tenv.mod = cgenv->mod;
+ DEBUG_ONLY(tenv.mod = cgenv->mod;)
tenv.mode = get_irn_mode(node);
tenv.cg = cgenv;