+/**
+ * This file implements the IR transformation from firm into
+ * ia32-Firm.
+ *
+ * $Id$
+ */
+
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
+#include "irargs_t.h"
#include "irnode_t.h"
#include "irgraph_t.h"
#include "irmode_t.h"
#include "iropt_t.h"
#include "irop_t.h"
#include "irprog_t.h"
+#include "iredges_t.h"
#include "irgmod.h"
-#include "iredges.h"
#include "irvrfy.h"
#include "ircons.h"
#include "dbginfo.h"
#include "debug.h"
#include "../benode_t.h"
+#include "../besched.h"
+
#include "bearch_ia32_t.h"
#include "ia32_nodes_attr.h"
#include "../arch/archop.h" /* we need this for Min and Max nodes */
#include "ia32_transform.h"
#include "ia32_new_nodes.h"
+#include "ia32_map_regs.h"
#include "gen_ia32_regalloc_if.h"
ir_node *op, ir_node *mem, ir_mode *mode);
typedef enum {
- ia32_SSIGN, ia32_DSIGN, ia32_SABS, ia32_DABS
+ ia32_SSIGN, ia32_DSIGN, ia32_SABS, ia32_DABS, ia32_known_const_max
} ia32_known_const_t;
/****************************************************************************************************
*
****************************************************************************************************/
-struct tv_ent {
- entity *ent;
- tarval *tv;
-};
+/**
+ * Gets the Proj with number pn from irn.
+ */
+static ir_node *get_proj_for_pn(const ir_node *irn, long pn) {
+ const ir_edge_t *edge;
+ ir_node *proj;
+ assert(get_irn_mode(irn) == mode_T && "need mode_T");
-/* Compares two (entity, tarval) combinations */
-static int cmp_tv_ent(const void *a, const void *b, size_t len) {
- const struct tv_ent *e1 = a;
- const struct tv_ent *e2 = b;
+ foreach_out_edge(irn, edge) {
+ proj = get_edge_src_irn(edge);
- return !(e1->tv == e2->tv);
+ if (get_Proj_proj(proj) == pn)
+ return proj;
+ }
+
+ return NULL;
}
/* Generates an entity for a known FP const (used for FP Neg + Abs) */
-static char *gen_fp_known_const(ir_mode *mode, ia32_known_const_t kct) {
- static set *const_set = NULL;
- struct tv_ent key;
- struct tv_ent *entry;
- char *tp_name;
- char *ent_name;
- char *cnst_str;
+static ident *gen_fp_known_const(ir_mode *mode, ia32_known_const_t kct) {
+ static const struct {
+ const char *tp_name;
+ const char *ent_name;
+ const char *cnst_str;
+ } names [ia32_known_const_max] = {
+ { TP_SFP_SIGN, ENT_SFP_SIGN, SFP_SIGN }, /* ia32_SSIGN */
+ { TP_DFP_SIGN, ENT_DFP_SIGN, DFP_SIGN }, /* ia32_DSIGN */
+ { TP_SFP_ABS, ENT_SFP_ABS, SFP_ABS }, /* ia32_SABS */
+ { TP_DFP_ABS, ENT_DFP_ABS, DFP_ABS } /* ia32_DABS */
+ };
+ static struct entity *ent_cache[ia32_known_const_max];
+
+ const char *tp_name, *ent_name, *cnst_str;
ir_type *tp;
ir_node *cnst;
ir_graph *rem;
entity *ent;
+ tarval *tv;
- if (! const_set) {
- const_set = new_set(cmp_tv_ent, 10);
- }
-
- switch (kct) {
- case ia32_SSIGN:
- tp_name = TP_SFP_SIGN;
- ent_name = ENT_SFP_SIGN;
- cnst_str = SFP_SIGN;
- break;
- case ia32_DSIGN:
- tp_name = TP_DFP_SIGN;
- ent_name = ENT_DFP_SIGN;
- cnst_str = DFP_SIGN;
- break;
- case ia32_SABS:
- tp_name = TP_SFP_ABS;
- ent_name = ENT_SFP_ABS;
- cnst_str = SFP_ABS;
- break;
- case ia32_DABS:
- tp_name = TP_DFP_ABS;
- ent_name = ENT_DFP_ABS;
- cnst_str = DFP_ABS;
- break;
- }
-
-
- key.tv = new_tarval_from_str(cnst_str, strlen(cnst_str), mode);
- key.ent = NULL;
+ ent_name = names[kct].ent_name;
+ if (! ent_cache[kct]) {
+ tp_name = names[kct].tp_name;
+ cnst_str = names[kct].cnst_str;
- entry = set_insert(const_set, &key, sizeof(key), HASH_PTR(key.tv));
-
- if (! entry->ent) {
+ tv = new_tarval_from_str(cnst_str, strlen(cnst_str), mode);
tp = new_type_primitive(new_id_from_str(tp_name), mode);
ent = new_entity(get_glob_type(), new_id_from_str(ent_name), tp);
const code irg */
rem = current_ir_graph;
current_ir_graph = get_const_code_irg();
- cnst = new_Const(mode, key.tv);
+ cnst = new_Const(mode, tv);
current_ir_graph = rem;
set_atomic_ent_value(ent, cnst);
- /* set the entry for hashmap */
- entry->ent = ent;
+ /* cache the entry */
+ ent_cache[kct] = ent;
}
- return ent_name;
+ return get_entity_ident(ent_cache[kct]);
}
+#ifndef NDEBUG
+/**
+ * Prints the old node name on cg obst and returns a pointer to it.
+ */
+const char *ia32_get_old_node_name(ia32_transform_env_t *env) {
+ ia32_isa_t *isa = (ia32_isa_t *)env->cg->arch_env->isa;
+ lc_eoprintf(firm_get_arg_env(), isa->name_obst, "%+F", env->irn);
+ obstack_1grow(isa->name_obst, 0);
+ isa->name_obst_size += obstack_object_size(isa->name_obst);
+ return obstack_finish(isa->name_obst);
+}
+#endif /* NDEBUG */
/* determine if one operator is an Imm */
static ir_node *get_immediate_op(ir_node *op1, ir_node *op2) {
dbg_info *dbg = env->dbg;
ir_graph *irg = env->irg;
ir_node *block = env->block;
- firm_dbg_module_t *mod = env->mod;
ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg);
ir_node *nomem = new_NoMem();
ir_node *expr_op, *imm_op;
-
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
/* Check if immediate optimization is on and */
/* if it's an operation with immediate. */
if (mode_is_float(mode)) {
/* floating point operations */
if (imm_op) {
+ DB((mod, LEVEL_1, "FP with immediate ..."));
new_op = func(dbg, irg, block, noreg_gp, noreg_gp, expr_op, noreg_fp, nomem, mode_T);
set_ia32_Immop_attr(new_op, imm_op);
set_ia32_am_support(new_op, ia32_am_None);
}
else {
+ DB((mod, LEVEL_1, "FP binop ..."));
new_op = func(dbg, irg, block, noreg_gp, noreg_gp, op1, op2, nomem, mode_T);
set_ia32_am_support(new_op, ia32_am_Source);
}
/* integer operations */
if (imm_op) {
/* This is expr + const */
+ DB((mod, LEVEL_1, "INT with immediate ..."));
new_op = func(dbg, irg, block, noreg_gp, noreg_gp, expr_op, noreg_gp, nomem, mode_T);
set_ia32_Immop_attr(new_op, imm_op);
set_ia32_am_support(new_op, ia32_am_Dest);
}
else {
+ DB((mod, LEVEL_1, "INT binop ..."));
/* This is a normal operation */
new_op = func(dbg, irg, block, noreg_gp, noreg_gp, op1, op2, nomem, mode_T);
}
}
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
+
+ set_ia32_res_mode(new_op, mode);
+
if (is_op_commutative(get_irn_op(env->irn))) {
set_ia32_commutative(new_op);
}
dbg_info *dbg = env->dbg;
ir_graph *irg = env->irg;
ir_node *block = env->block;
- firm_dbg_module_t *mod = env->mod;
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
ir_node *nomem = new_NoMem();
ir_node *expr_op, *imm_op;
tarval *tv;
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
assert(! mode_is_float(mode) && "Shift/Rotate with float not supported");
/* integer operations */
if (imm_op) {
/* This is shift/rot with const */
+ DB((mod, LEVEL_1, "Shift/Rot with immediate ..."));
new_op = func(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem, mode_T);
set_ia32_Immop_attr(new_op, imm_op);
}
else {
/* This is a normal shift/rot */
+ DB((mod, LEVEL_1, "Shift/Rot binop ..."));
new_op = func(dbg, irg, block, noreg, noreg, op1, op2, nomem, mode_T);
}
/* set AM support */
set_ia32_am_support(new_op, ia32_am_Dest);
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
+
+ set_ia32_res_mode(new_op, mode);
+ set_ia32_emit_cl(new_op);
+
return new_rd_Proj(dbg, irg, block, new_op, mode, 0);
}
ir_node *block = env->block;
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
ir_node *nomem = new_NoMem();
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
new_op = func(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
if (mode_is_float(mode)) {
+ DB((mod, LEVEL_1, "FP unop ..."));
/* floating point operations don't support implicit store */
set_ia32_am_support(new_op, ia32_am_None);
}
else {
+ DB((mod, LEVEL_1, "INT unop ..."));
set_ia32_am_support(new_op, ia32_am_Dest);
}
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
+
+ set_ia32_res_mode(new_op, mode);
+
return new_rd_Proj(dbg, irg, block, new_op, mode, 0);
}
static ir_node *gen_imm_Add(ia32_transform_env_t *env, ir_node *expr_op, ir_node *const_op) {
ir_node *new_op = NULL;
tarval *tv = get_ia32_Immop_tarval(const_op);
- firm_dbg_module_t *mod = env->mod;
dbg_info *dbg = env->dbg;
- ir_mode *mode = env->mode;
ir_graph *irg = env->irg;
ir_node *block = env->block;
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
ir_node *nomem = new_NoMem();
int normal_add = 1;
tarval_classification_t class_tv, class_negtv;
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
/* try to optimize to inc/dec */
if (env->cg->opt.incdec && tv) {
assert((expr_op || imm_op) && "invalid operands");
if (mode_is_float(mode)) {
- return gen_binop(env, op1, op2, new_rd_ia32_fAdd);
+ if (USE_SSE2(env->cg))
+ return gen_binop(env, op1, op2, new_rd_ia32_fAdd);
+ else {
+ env->cg->used_x87 = 1;
+ return gen_binop(env, op1, op2, new_rd_ia32_vfadd);
+ }
}
else {
/* integer ADD */
/* No expr_op means, that we have two const - one symconst and */
/* one tarval or another symconst - because this case is not */
/* covered by constant folding */
-
- new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode);
- add_ia32_am_offs(new_op, get_ia32_cnst(op1));
- add_ia32_am_offs(new_op, get_ia32_cnst(op2));
+ /* We need to check for: */
+ /* 1) symconst + const -> becomes a LEA */
+ /* 2) symconst + symconst -> becomes a const + LEA as the elf */
+ /* linker doesn't support two symconsts */
+
+ if (get_ia32_op_type(op1) == ia32_SymConst && get_ia32_op_type(op2) == ia32_SymConst) {
+ /* this is the 2nd case */
+ new_op = new_rd_ia32_Lea(dbg, irg, block, op1, noreg, mode);
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
+ set_ia32_am_flavour(new_op, ia32_am_OB);
+ }
+ else {
+ /* this is the 1st case */
+ new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode);
+
+ if (get_ia32_op_type(op1) == ia32_SymConst) {
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(op1));
+ add_ia32_am_offs(new_op, get_ia32_cnst(op2));
+ }
+ else {
+ add_ia32_am_offs(new_op, get_ia32_cnst(op1));
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
+ }
+ set_ia32_am_flavour(new_op, ia32_am_O);
+ }
/* set AM support */
set_ia32_am_support(new_op, ia32_am_Source);
set_ia32_op_type(new_op, ia32_AddrModeS);
- set_ia32_am_flavour(new_op, ia32_am_O);
/* Lea doesn't need a Proj */
return new_op;
}
}
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
+
+ set_ia32_res_mode(new_op, mode);
+
return new_rd_Proj(dbg, irg, block, new_op, mode, 0);
}
ir_node *new_op;
if (mode_is_float(env->mode)) {
- new_op = gen_binop(env, op1, op2, new_rd_ia32_fMul);
+ if (USE_SSE2(env->cg))
+ new_op = gen_binop(env, op1, op2, new_rd_ia32_fMul);
+ else {
+ env->cg->used_x87 = 1;
+ new_op = gen_binop(env, op1, op2, new_rd_ia32_vfmul);
+ }
}
else {
new_op = gen_binop(env, op1, op2, new_rd_ia32_Mul);
ir_node *proj_EAX, *proj_EDX, *mulh;
ir_node *in[1];
- assert(mode_is_float(env->mode) && "Mulh with float not supported");
+ assert(!mode_is_float(env->mode) && "Mulh with float not supported");
proj_EAX = gen_binop(env, op1, op2, new_rd_ia32_Mulh);
mulh = get_Proj_pred(proj_EAX);
proj_EDX = new_rd_Proj(env->dbg, env->irg, env->block, mulh, env->mode, pn_EDX);
/* to be on the save side */
set_Proj_proj(proj_EAX, pn_EAX);
- if (get_ia32_cnst(mulh)) {
+ if (is_ia32_ImmConst(mulh) || is_ia32_ImmSymConst(mulh)) {
/* Mulh with const cannot have AM */
set_ia32_am_support(mulh, ia32_am_None);
}
* @return The created ia32 And node
*/
static ir_node *gen_And(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
- if (mode_is_float(env->mode)) {
- return gen_binop(env, op1, op2, new_rd_ia32_fAnd);
- }
- else {
- return gen_binop(env, op1, op2, new_rd_ia32_And);
- }
+ assert (! mode_is_float(env->mode));
+ return gen_binop(env, op1, op2, new_rd_ia32_And);
}
* @return The created ia32 Or node
*/
static ir_node *gen_Or(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
- if (mode_is_float(env->mode)) {
- return gen_binop(env, op1, op2, new_rd_ia32_fOr);
- }
- else {
- return gen_binop(env, op1, op2, new_rd_ia32_Or);
- }
+ assert (! mode_is_float(env->mode));
+ return gen_binop(env, op1, op2, new_rd_ia32_Or);
}
* @return The created ia32 Eor node
*/
static ir_node *gen_Eor(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
- if (mode_is_float(env->mode)) {
- return gen_binop(env, op1, op2, new_rd_ia32_fEor);
- }
- else {
- return gen_binop(env, op1, op2, new_rd_ia32_Eor);
- }
+ assert(! mode_is_float(env->mode));
+ return gen_binop(env, op1, op2, new_rd_ia32_Eor);
}
ir_node *new_op;
if (mode_is_float(env->mode)) {
- new_op = gen_binop(env, op1, op2, new_rd_ia32_fMax);
+ if (USE_SSE2(env->cg))
+ new_op = gen_binop(env, op1, op2, new_rd_ia32_fMax);
+ else {
+ env->cg->used_x87 = 1;
+ assert(0);
+ }
}
else {
new_op = new_rd_ia32_Max(env->dbg, env->irg, env->block, op1, op2, env->mode);
set_ia32_am_support(new_op, ia32_am_None);
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
}
return new_op;
ir_node *new_op;
if (mode_is_float(env->mode)) {
- new_op = gen_binop(env, op1, op2, new_rd_ia32_fMin);
+ if (USE_SSE2(env->cg))
+ new_op = gen_binop(env, op1, op2, new_rd_ia32_fMin);
+ else {
+ env->cg->used_x87 = 1;
+ assert(0);
+ }
}
else {
new_op = new_rd_ia32_Min(env->dbg, env->irg, env->block, op1, op2, env->mode);
set_ia32_am_support(new_op, ia32_am_None);
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
}
return new_op;
static ir_node *gen_imm_Sub(ia32_transform_env_t *env, ir_node *expr_op, ir_node *const_op) {
ir_node *new_op = NULL;
tarval *tv = get_ia32_Immop_tarval(const_op);
- firm_dbg_module_t *mod = env->mod;
dbg_info *dbg = env->dbg;
- ir_mode *mode = env->mode;
ir_graph *irg = env->irg;
ir_node *block = env->block;
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
ir_node *nomem = new_NoMem();
int normal_sub = 1;
tarval_classification_t class_tv, class_negtv;
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
/* try to optimize to inc/dec */
if (env->cg->opt.incdec && tv) {
assert((expr_op || imm_op) && "invalid operands");
if (mode_is_float(mode)) {
- return gen_binop(env, op1, op2, new_rd_ia32_fSub);
+ if (USE_SSE2(env->cg))
+ return gen_binop(env, op1, op2, new_rd_ia32_fSub);
+ else {
+ env->cg->used_x87 = 1;
+ return gen_binop(env, op1, op2, new_rd_ia32_vfsub);
+ }
}
else {
/* integer SUB */
/* No expr_op means, that we have two const - one symconst and */
/* one tarval or another symconst - because this case is not */
/* covered by constant folding */
-
- new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode);
- add_ia32_am_offs(new_op, get_ia32_cnst(op1));
- sub_ia32_am_offs(new_op, get_ia32_cnst(op2));
+ /* We need to check for: */
+ /* 1) symconst + const -> becomes a LEA */
+ /* 2) symconst + symconst -> becomes a const + LEA as the elf */
+ /* linker doesn't support two symconsts */
+
+ if (get_ia32_op_type(op1) == ia32_SymConst && get_ia32_op_type(op2) == ia32_SymConst) {
+ /* this is the 2nd case */
+ new_op = new_rd_ia32_Lea(dbg, irg, block, op1, noreg, mode);
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
+ set_ia32_am_sc_sign(new_op);
+ set_ia32_am_flavour(new_op, ia32_am_OB);
+ }
+ else {
+ /* this is the 1st case */
+ new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode);
+
+ if (get_ia32_op_type(op1) == ia32_SymConst) {
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(op1));
+ sub_ia32_am_offs(new_op, get_ia32_cnst(op2));
+ }
+ else {
+ add_ia32_am_offs(new_op, get_ia32_cnst(op1));
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
+ set_ia32_am_sc_sign(new_op);
+ }
+ set_ia32_am_flavour(new_op, ia32_am_O);
+ }
/* set AM support */
set_ia32_am_support(new_op, ia32_am_Source);
set_ia32_op_type(new_op, ia32_AddrModeS);
- set_ia32_am_flavour(new_op, ia32_am_O);
/* Lea doesn't need a Proj */
return new_op;
}
}
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
+
+ set_ia32_res_mode(new_op, mode);
+
return new_rd_Proj(dbg, irg, block, new_op, mode, 0);
}
switch (dm_flav) {
case flavour_Div:
- mem = get_Div_mem(irn);
+ mem = get_Div_mem(irn);
+ mode = get_irn_mode(get_proj_for_pn(irn, pn_Div_res));
break;
case flavour_Mod:
- mem = get_Mod_mem(irn);
+ mem = get_Mod_mem(irn);
+ mode = get_irn_mode(get_proj_for_pn(irn, pn_Mod_res));
break;
case flavour_DivMod:
- mem = get_DivMod_mem(irn);
+ mem = get_DivMod_mem(irn);
+ mode = get_irn_mode(get_proj_for_pn(irn, pn_DivMod_res_div));
break;
default:
assert(0);
set_ia32_Immop_tarval(edx_node, get_tarval_null(mode_Iu));
}
- res = new_rd_ia32_DivMod(dbg, irg, block, dividend, divisor, edx_node, mem, mode);
+ res = new_rd_ia32_DivMod(dbg, irg, block, dividend, divisor, edx_node, mem, mode_T);
set_ia32_flavour(res, dm_flav);
set_ia32_n_res(res, 2);
be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep);
}
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
+
+ set_ia32_res_mode(res, mode_Is);
+
return res;
}
*/
static ir_node *gen_Quot(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
- ir_node *nomem = new_rd_NoMem(env->irg);
ir_node *new_op;
+ ir_node *nomem = new_rd_NoMem(env->irg);
- new_op = new_rd_ia32_fDiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem, env->mode);
- set_ia32_am_support(new_op, ia32_am_Source);
+ if (USE_SSE2(env->cg)) {
+
+ if (is_ia32_fConst(op2)) {
+ new_op = new_rd_ia32_fDiv(env->dbg, env->irg, env->block, noreg, noreg, op1, noreg, nomem, mode_T);
+ set_ia32_am_support(new_op, ia32_am_None);
+ set_ia32_Immop_attr(new_op, op2);
+ }
+ else {
+ new_op = new_rd_ia32_fDiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem, mode_T);
+ set_ia32_am_support(new_op, ia32_am_Source);
+ }
+ }
+ else {
+ new_op = new_rd_ia32_vfdiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem, mode_T);
+ set_ia32_am_support(new_op, ia32_am_Source);
+ }
+ set_ia32_res_mode(new_op, get_irn_mode(get_proj_for_pn(env->irn, pn_Quot_res)));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
return new_op;
}
-/**
- * Transforms a Conv node.
- *
- * @param env The transformation environment
- * @param op The operator
- * @return The created ia32 Conv node
- */
-static ir_node *gen_Conv(ia32_transform_env_t *env, ir_node *op) {
- return new_rd_ia32_Conv(env->dbg, env->irg, env->block, op, env->mode);
-}
-
-
-
/**
* Transforms a Minus node.
*
* @return The created ia32 Minus node
*/
static ir_node *gen_Minus(ia32_transform_env_t *env, ir_node *op) {
- char *name;
+ ident *name;
ir_node *new_op;
ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg);
int size;
if (mode_is_float(env->mode)) {
- new_op = new_rd_ia32_fEor(env->dbg, env->irg, env->block, noreg_gp, noreg_gp, op, noreg_fp, nomem, mode_T);
+ if (USE_SSE2(env->cg)) {
+ new_op = new_rd_ia32_fEor(env->dbg, env->irg, env->block, noreg_gp, noreg_gp, op, noreg_fp, nomem, mode_T);
+
+ size = get_mode_size_bits(env->mode);
+ name = gen_fp_known_const(env->mode, size == 32 ? ia32_SSIGN : ia32_DSIGN);
+
+ set_ia32_sc(new_op, name);
- size = get_mode_size_bits(env->mode);
- name = gen_fp_known_const(env->mode, size == 32 ? ia32_SSIGN : ia32_DSIGN);
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
- set_ia32_sc(new_op, name);
+ set_ia32_res_mode(new_op, env->mode);
+ set_ia32_immop_type(new_op, ia32_ImmSymConst);
- new_op = new_rd_Proj(env->dbg, env->irg, env->block, new_op, env->mode, 0);
+ new_op = new_rd_Proj(env->dbg, env->irg, env->block, new_op, env->mode, 0);
+ }
+ else {
+ env->cg->used_x87 = 1;
+ new_op = new_rd_ia32_vfchs(env->dbg, env->irg, env->block, op, env->mode);
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
+ }
}
else {
new_op = gen_unop(env, op, new_rd_ia32_Minus);
* @return The created ia32 Not node
*/
static ir_node *gen_Not(ia32_transform_env_t *env, ir_node *op) {
- ir_node *new_op;
-
- if (mode_is_float(env->mode)) {
- assert(0);
- }
- else {
- new_op = gen_unop(env, op, new_rd_ia32_Not);
- }
-
- return new_op;
+ assert (! mode_is_float(env->mode));
+ return gen_unop(env, op, new_rd_ia32_Not);
}
ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg);
ir_node *nomem = new_NoMem();
int size;
- char *name;
+ ident *name;
if (mode_is_float(mode)) {
- res = new_rd_ia32_fAnd(dbg,irg, block, noreg_gp, noreg_gp, op, noreg_fp, nomem, mode_T);
+ if (USE_SSE2(env->cg)) {
+ res = new_rd_ia32_fAnd(dbg,irg, block, noreg_gp, noreg_gp, op, noreg_fp, nomem, mode_T);
- size = get_mode_size_bits(mode);
- name = gen_fp_known_const(mode, size == 32 ? ia32_SABS : ia32_DABS);
+ size = get_mode_size_bits(mode);
+ name = gen_fp_known_const(mode, size == 32 ? ia32_SABS : ia32_DABS);
- set_ia32_sc(res, name);
+ set_ia32_sc(res, name);
- res = new_rd_Proj(dbg, irg, block, res, mode, 0);
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
+
+ set_ia32_res_mode(res, mode);
+ set_ia32_immop_type(res, ia32_ImmSymConst);
+
+ res = new_rd_Proj(dbg, irg, block, res, mode, 0);
+ }
+ else {
+ env->cg->used_x87 = 1;
+ res = new_rd_ia32_vfabs(dbg, irg, block, op, mode);
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
+ }
}
else {
res = new_rd_ia32_Cdq(dbg, irg, block, op, mode_T);
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
+ set_ia32_res_mode(res, mode);
+
p_eax = new_rd_Proj(dbg, irg, block, res, mode, pn_EAX);
p_edx = new_rd_Proj(dbg, irg, block, res, mode, pn_EDX);
+
res = new_rd_ia32_Eor(dbg, irg, block, noreg_gp, noreg_gp, p_eax, p_edx, nomem, mode_T);
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
+ set_ia32_res_mode(res, mode);
+
res = new_rd_Proj(dbg, irg, block, res, mode, 0);
+
res = new_rd_ia32_Sub(dbg, irg, block, noreg_gp, noreg_gp, res, p_edx, nomem, mode_T);
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
+ set_ia32_res_mode(res, mode);
+
res = new_rd_Proj(dbg, irg, block, res, mode, 0);
}
* @return the created ia32 Load node
*/
static ir_node *gen_Load(ia32_transform_env_t *env) {
- ir_node *node = env->irn;
- ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *node = env->irn;
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *ptr = get_Load_ptr(node);
+ ir_node *lptr = ptr;
+ ir_mode *mode = get_Load_mode(node);
+ int is_imm = 0;
ir_node *new_op;
+ ia32_am_flavour_t am_flav = ia32_B;
- if (mode_is_float(env->mode)) {
- new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, get_Load_ptr(node), noreg, get_Load_mem(node), env->mode);
+ /* address might be a constant (symconst or absolute address) */
+ if (is_ia32_Const(ptr)) {
+ lptr = noreg;
+ is_imm = 1;
+ }
+
+ if (mode_is_float(mode)) {
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node), env->mode);
+ else {
+ env->cg->used_x87 = 1;
+ new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node), env->mode);
+ }
}
else {
- new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, get_Load_ptr(node), noreg, get_Load_mem(node), env->mode);
+ new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node), env->mode);
+ }
+
+ /* base is an constant address */
+ if (is_imm) {
+ if (get_ia32_immop_type(ptr) == ia32_ImmSymConst) {
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(ptr));
+ }
+ else {
+ add_ia32_am_offs(new_op, get_ia32_cnst(ptr));
+ }
+
+ am_flav = ia32_O;
}
set_ia32_am_support(new_op, ia32_am_Source);
set_ia32_op_type(new_op, ia32_AddrModeS);
- set_ia32_am_flavour(new_op, ia32_B);
- set_ia32_ls_mode(new_op, get_Load_mode(node));
+ set_ia32_am_flavour(new_op, am_flav);
+ set_ia32_ls_mode(new_op, mode);
+
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
return new_op;
}
* @return the created ia32 Store node
*/
static ir_node *gen_Store(ia32_transform_env_t *env) {
- ir_node *node = env->irn;
- ir_node *noreg = ia32_new_NoReg_gp(env->cg);
- ir_node *val = get_Store_value(node);
- ir_node *ptr = get_Store_ptr(node);
- ir_node *mem = get_Store_mem(node);
- ir_node *sval = val;
+ ir_node *node = env->irn;
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *val = get_Store_value(node);
+ ir_node *ptr = get_Store_ptr(node);
+ ir_node *sptr = ptr;
+ ir_node *mem = get_Store_mem(node);
+ ir_mode *mode = get_irn_mode(val);
+ ir_node *sval = val;
+ int is_imm = 0;
ir_node *new_op;
+ ia32_am_flavour_t am_flav = ia32_B;
+ ia32_immop_type_t immop = ia32_ImmNone;
+
+ if (! mode_is_float(mode)) {
+ /* in case of storing a const (but not a symconst) -> make it an attribute */
+ if (is_ia32_Cnst(val)) {
+ switch (get_ia32_op_type(val)) {
+ case ia32_Const:
+ immop = ia32_ImmConst;
+ break;
+ case ia32_SymConst:
+ immop = ia32_ImmSymConst;
+ break;
+ default:
+ assert(0 && "unsupported Const type");
+ }
+ sval = noreg;
+ }
+ }
- /* in case of storing a const -> make it an attribute */
- if (is_ia32_Cnst(val)) {
- sval = noreg;
+ /* address might be a constant (symconst or absolute address) */
+ if (is_ia32_Const(ptr)) {
+ sptr = noreg;
+ is_imm = 0;
}
- if (mode_is_float(env->mode)) {
- new_op = new_rd_ia32_fStore(env->dbg, env->irg, env->block, ptr, noreg, sval, mem, env->mode);
+ if (mode_is_float(mode)) {
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fStore(env->dbg, env->irg, env->block, sptr, noreg, sval, mem, mode_T);
+ else {
+ env->cg->used_x87 = 1;
+ new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, sptr, noreg, sval, mem, mode_T);
+ }
+ }
+ else if (get_mode_size_bits(mode) == 8) {
+ new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, sptr, noreg, sval, mem, mode_T);
}
else {
- new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, sval, mem, env->mode);
+ new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, sval, mem, mode_T);
}
/* stored const is an attribute (saves a register) */
- if (is_ia32_Cnst(val)) {
+ if (! mode_is_float(mode) && is_ia32_Cnst(val)) {
set_ia32_Immop_attr(new_op, val);
}
+ /* base is an constant address */
+ if (is_imm) {
+ if (get_ia32_immop_type(ptr) == ia32_ImmSymConst) {
+ set_ia32_am_sc(new_op, get_ia32_id_cnst(ptr));
+ }
+ else {
+ add_ia32_am_offs(new_op, get_ia32_cnst(ptr));
+ }
+
+ am_flav = ia32_O;
+ }
+
set_ia32_am_support(new_op, ia32_am_Dest);
set_ia32_op_type(new_op, ia32_AddrModeD);
- set_ia32_am_flavour(new_op, ia32_B);
+ set_ia32_am_flavour(new_op, am_flav);
set_ia32_ls_mode(new_op, get_irn_mode(val));
- return new_op;
-}
+ set_ia32_immop_type(new_op, immop);
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
-
-/**
- * Transforms a Call and its arguments corresponding to the calling convention.
- *
- * @param env The transformation environment
- * @return The created ia32 Call node
- */
-static ir_node *gen_Call(ia32_transform_env_t *env) {
+ return new_op;
}
/**
- * Transforms a Cond -> Proj[b] -> Cmp into a CondJmp or CondJmp_i
+ * Transforms a Cond -> Proj[b] -> Cmp into a CondJmp, CondJmp_i or TestJmp
*
* @param env The transformation environment
* @return The transformed node.
ir_node *res = NULL;
ir_node *pred = NULL;
ir_node *noreg = ia32_new_NoReg_gp(env->cg);
- ir_node *nomem = new_NoMem();
ir_node *cmp_a, *cmp_b, *cnst, *expr;
if (is_Proj(sel) && sel_mode == mode_b) {
+ ir_node *nomem = new_NoMem();
+
pred = get_Proj_pred(sel);
/* get both compare operators */
expr = get_expr_op(cmp_a, cmp_b);
if (cnst && expr) {
- res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem, mode_T);
+ pn_Cmp pnc = get_Proj_proj(sel);
+
+ if ((pnc == pn_Cmp_Eq || pnc == pn_Cmp_Lg) && mode_is_int(get_irn_mode(expr))) {
+ if (classify_tarval(get_ia32_Immop_tarval(cnst)) == TV_CLASSIFY_NULL) {
+ /* a Cmp A =/!= 0 */
+ ir_node *op1 = expr;
+ ir_node *op2 = expr;
+ ir_node *and = skip_Proj(expr);
+ const char *cnst = NULL;
+
+ /* check, if expr is an only once used And operation */
+ if (get_irn_n_edges(expr) == 1 && is_ia32_And(and)) {
+ op1 = get_irn_n(and, 2);
+ op2 = get_irn_n(and, 3);
+
+ cnst = (is_ia32_ImmConst(and) || is_ia32_ImmSymConst(and)) ? get_ia32_cnst(and) : NULL;
+ }
+ res = new_rd_ia32_TestJmp(dbg, irg, block, op1, op2, mode_T);
+ set_ia32_pncode(res, get_Proj_proj(sel));
+ set_ia32_res_mode(res, get_irn_mode(op1));
+
+ if (cnst) {
+ copy_ia32_Immop_attr(res, and);
+ }
+
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
+ return res;
+ }
+ }
+
+ if (mode_is_float(get_irn_mode(expr))) {
+ if (USE_SSE2(env->cg))
+ res = new_rd_ia32_fCondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem, mode_T);
+ else {
+ env->cg->used_x87 = 1;
+ assert(0);
+ }
+ }
+ else {
+ res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem, mode_T);
+ }
set_ia32_Immop_attr(res, cnst);
+ set_ia32_res_mode(res, get_irn_mode(expr));
}
else {
- res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem, mode_T);
+ if (mode_is_float(get_irn_mode(cmp_a))) {
+ if (USE_SSE2(env->cg))
+ res = new_rd_ia32_fCondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem, mode_T);
+ else {
+ env->cg->used_x87 = 1;
+ assert(0);
+ }
+ }
+ else {
+ res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem, mode_T);
+ }
+ set_ia32_res_mode(res, get_irn_mode(cmp_a));
}
set_ia32_pncode(res, get_Proj_proj(sel));
else {
res = new_rd_ia32_SwitchJmp(dbg, irg, block, sel, mode_T);
set_ia32_pncode(res, get_Cond_defaultProj(node));
+ set_ia32_res_mode(res, get_irn_mode(sel));
}
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
return res;
}
ir_node *src = get_CopyB_src(node);
ir_node *dst = get_CopyB_dst(node);
ir_node *mem = get_CopyB_mem(node);
- ir_node *noreg = ia32_new_NoReg_gp(env->cg);
int size = get_type_size_bytes(get_CopyB_type(node));
int rem;
else {
res = new_rd_ia32_CopyB_i(dbg, irg, block, dst, src, mem, mode);
set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is));
+ set_ia32_immop_type(res, ia32_ImmConst);
}
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
+
return res;
}
* @return The transformed node.
*/
static ir_node *gen_Mux(ia32_transform_env_t *env) {
- ir_node *node = env->irn;
-
- return new_rd_ia32_CMov(env->dbg, env->irg, env->block,
+ ir_node *node = env->irn;
+ ir_node *new_op = new_rd_ia32_CMov(env->dbg, env->irg, env->block, \
get_Mux_sel(node), get_Mux_false(node), get_Mux_true(node), env->mode);
+
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
+
+ return new_op;
+}
+
+
+/**
+ * Following conversion rules apply:
+ *
+ * INT -> INT
+ * ============
+ * 1) n bit -> m bit n > m (downscale)
+ * a) target is signed: movsx
+ * b) target is unsigned: and with lower bits sets
+ * 2) n bit -> m bit n == m (sign change)
+ * always ignored
+ * 3) n bit -> m bit n < m (upscale)
+ * a) source is signed: movsx
+ * b) source is unsigned: and with lower bits sets
+ *
+ * INT -> FLOAT
+ * ==============
+ * SSE(1/2) convert to float or double (cvtsi2ss/sd)
+ *
+ * FLOAT -> INT
+ * ==============
+ * SSE(1/2) convert from float or double to 32bit int (cvtss/sd2si)
+ * if target mode < 32bit: additional INT -> INT conversion (see above)
+ *
+ * FLOAT -> FLOAT
+ * ================
+ * SSE(1/2) convert from float or double to double or float (cvtss/sd2sd/ss)
+ * x87 is mode_E internally, conversions happen only at load and store
+ * in non-strict semantic
+ */
+
+//static ir_node *gen_int_downscale_conv(ia32_transform_env_t *env, ir_node *op,
+// ir_mode *src_mode, ir_mode *tgt_mode)
+//{
+// int n = get_mode_size_bits(src_mode);
+// int m = get_mode_size_bits(tgt_mode);
+// dbg_info *dbg = env->dbg;
+// ir_graph *irg = env->irg;
+// ir_node *block = env->block;
+// ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+// ir_node *nomem = new_rd_NoMem(irg);
+// ir_node *new_op, *proj;
+// assert(n > m && "downscale expected");
+// if (mode_is_signed(src_mode) && mode_is_signed(tgt_mode)) {
+// /* ASHL Sn, n - m */
+// new_op = new_rd_ia32_Shl(dbg, irg, block, noreg, noreg, op, noreg, nomem, mode_T);
+// proj = new_rd_Proj(dbg, irg, block, new_op, src_mode, 0);
+// set_ia32_Immop_tarval(new_op, new_tarval_from_long(n - m, mode_Is));
+// set_ia32_am_support(new_op, ia32_am_Source);
+// SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
+// /* ASHR Sn, n - m */
+// new_op = new_rd_ia32_Shrs(dbg, irg, block, noreg, noreg, proj, noreg, nomem, mode_T);
+// set_ia32_Immop_tarval(new_op, new_tarval_from_long(n - m, mode_Is));
+// }
+// else {
+// new_op = new_rd_ia32_And(dbg, irg, block, noreg, noreg, op, noreg, nomem, mode_T);
+// set_ia32_Immop_tarval(new_op, new_tarval_from_long((1 << m) - 1, mode_Is));
+// }
+// return new_op;
+//}
+
+/**
+ * Transforms a Conv node.
+ *
+ * @param env The transformation environment
+ * @param op The operator
+ * @return The created ia32 Conv node
+ */
+static ir_node *gen_Conv(ia32_transform_env_t *env, ir_node *op) {
+ dbg_info *dbg = env->dbg;
+ ir_graph *irg = env->irg;
+ ir_mode *src_mode = get_irn_mode(op);
+ ir_mode *tgt_mode = env->mode;
+ int src_bits = get_mode_size_bits(src_mode);
+ int tgt_bits = get_mode_size_bits(tgt_mode);
+ ir_node *block = env->block;
+ ir_node *new_op = NULL;
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *nomem = new_rd_NoMem(irg);
+ ir_node *proj;
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
+
+ if (src_mode == tgt_mode) {
+ /* this can happen when changing mode_P to mode_Is */
+ DB((mod, LEVEL_1, "killed Conv(mode, mode) ..."));
+ edges_reroute(env->irn, op, irg);
+ }
+ else if (mode_is_float(src_mode)) {
+ /* we convert from float ... */
+ if (mode_is_float(tgt_mode)) {
+ /* ... to float */
+ if (USE_SSE2(env->cg)) {
+ DB((mod, LEVEL_1, "create Conv(float, float) ..."));
+ new_op = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
+ }
+ else {
+ DB((mod, LEVEL_1, "killed Conv(float, float) ..."));
+ edges_reroute(env->irn, op, irg);
+ }
+ }
+ else {
+ /* ... to int */
+ DB((mod, LEVEL_1, "create Conv(float, int) ..."));
+ new_op = new_rd_ia32_Conv_FP2I(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
+ /* if target mode is not int: add an additional downscale convert */
+ if (tgt_bits < 32) {
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
+ set_ia32_res_mode(new_op, tgt_mode);
+ set_ia32_am_support(new_op, ia32_am_Source);
+
+ proj = new_rd_Proj(dbg, irg, block, new_op, mode_Is, 0);
+
+ if (tgt_bits == 8 || src_bits == 8) {
+ new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, proj, nomem, mode_T);
+ }
+ else {
+ new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, proj, nomem, mode_T);
+ }
+ }
+ }
+ }
+ else {
+ /* we convert from int ... */
+ if (mode_is_float(tgt_mode)) {
+ /* ... to float */
+ DB((mod, LEVEL_1, "create Conv(int, float) ..."));
+ new_op = new_rd_ia32_Conv_I2FP(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
+ }
+ else {
+ /* ... to int */
+ if (get_mode_size_bits(src_mode) == tgt_bits) {
+ DB((mod, LEVEL_1, "omitting equal size Conv(%+F, %+F) ...", src_mode, tgt_mode));
+ edges_reroute(env->irn, op, irg);
+ }
+ else {
+ DB((mod, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode));
+ if (tgt_bits == 8 || src_bits == 8) {
+ new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
+ }
+ else {
+ new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
+ }
+ }
+ }
+ }
+
+ if (new_op) {
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
+ set_ia32_res_mode(new_op, tgt_mode);
+
+ set_ia32_am_support(new_op, ia32_am_Source);
+
+ new_op = new_rd_Proj(dbg, irg, block, new_op, tgt_mode, 0);
+ }
+
+ return new_op;
}
*
********************************************/
+static ir_node *gen_StackParam(ia32_transform_env_t *env) {
+ ir_node *new_op = NULL;
+ ir_node *node = env->irn;
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *mem = new_rd_NoMem(env->irg);
+ ir_node *ptr = get_irn_n(node, 0);
+ entity *ent = be_get_frame_entity(node);
+ ir_mode *mode = env->mode;
+
+// /* If the StackParam has only one user -> */
+// /* put it in the Block where the user resides */
+// if (get_irn_n_edges(node) == 1) {
+// env->block = get_nodes_block(get_edge_src_irn(get_irn_out_edge_first(node)));
+// }
+
+ if (mode_is_float(mode)) {
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ else {
+ env->cg->used_x87 = 1;
+ new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ }
+ }
+ else {
+ new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ }
+
+ set_ia32_frame_ent(new_op, ent);
+ set_ia32_use_frame(new_op);
+
+ set_ia32_am_support(new_op, ia32_am_Source);
+ set_ia32_op_type(new_op, ia32_AddrModeS);
+ set_ia32_am_flavour(new_op, ia32_B);
+ set_ia32_ls_mode(new_op, mode);
+
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
+
+ return new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, 0);
+}
+
+/**
+ * Transforms a FrameAddr into an ia32 Add.
+ */
static ir_node *gen_FrameAddr(ia32_transform_env_t *env) {
ir_node *new_op = NULL;
ir_node *node = env->irn;
set_ia32_frame_ent(new_op, be_get_frame_entity(node));
set_ia32_am_support(new_op, ia32_am_Full);
set_ia32_use_frame(new_op);
+ set_ia32_immop_type(new_op, ia32_ImmConst);
+
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
return new_rd_Proj(env->dbg, env->irg, env->block, new_op, env->mode, 0);
}
+/**
+ * Transforms a FrameLoad into an ia32 Load.
+ */
static ir_node *gen_FrameLoad(ia32_transform_env_t *env) {
ir_node *new_op = NULL;
+ ir_node *node = env->irn;
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *mem = get_irn_n(node, 0);
+ ir_node *ptr = get_irn_n(node, 1);
+ entity *ent = be_get_frame_entity(node);
+ ir_mode *mode = get_type_mode(get_entity_type(ent));
+
+ if (mode_is_float(mode)) {
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ else {
+ env->cg->used_x87 = 1;
+ new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ }
+ }
+ else {
+ new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
+ }
+
+ set_ia32_frame_ent(new_op, ent);
+ set_ia32_use_frame(new_op);
+
+ set_ia32_am_support(new_op, ia32_am_Source);
+ set_ia32_op_type(new_op, ia32_AddrModeS);
+ set_ia32_am_flavour(new_op, ia32_B);
+ set_ia32_ls_mode(new_op, mode);
+
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
return new_op;
}
+
+/**
+ * Transforms a FrameStore into an ia32 Store.
+ */
static ir_node *gen_FrameStore(ia32_transform_env_t *env) {
ir_node *new_op = NULL;
+ ir_node *node = env->irn;
+ ir_node *noreg = ia32_new_NoReg_gp(env->cg);
+ ir_node *mem = get_irn_n(node, 0);
+ ir_node *ptr = get_irn_n(node, 1);
+ ir_node *val = get_irn_n(node, 2);
+ entity *ent = be_get_frame_entity(node);
+ ir_mode *mode = get_irn_mode(val);
+
+ if (mode_is_float(mode)) {
+ if (USE_SSE2(env->cg))
+ new_op = new_rd_ia32_fStore(env->dbg, env->irg, env->block, ptr, noreg, val, mem, mode_T);
+ else {
+ env->cg->used_x87 = 1;
+ new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, mem, mode_T);
+ }
+ }
+ else if (get_mode_size_bits(mode) == 8) {
+ new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, mem, mode_T);
+ }
+ else {
+ new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, mem, mode_T);
+ }
+
+ set_ia32_frame_ent(new_op, ent);
+ set_ia32_use_frame(new_op);
+
+ set_ia32_am_support(new_op, ia32_am_Dest);
+ set_ia32_op_type(new_op, ia32_AddrModeD);
+ set_ia32_am_flavour(new_op, ia32_B);
+ set_ia32_ls_mode(new_op, mode);
+
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
return new_op;
}
*
*********************************************************/
+/**
+ * Transforms a Sub or fSub into Neg--Add iff OUT_REG == SRC2_REG.
+ * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
+ */
+void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) {
+ ia32_transform_env_t tenv;
+ ir_node *in1, *in2, *noreg, *nomem, *res;
+ const arch_register_t *in1_reg, *in2_reg, *out_reg, **slots;
+
+ /* Return if AM node or not a Sub or fSub */
+ if (get_ia32_op_type(irn) != ia32_Normal || !(is_ia32_Sub(irn) || is_ia32_fSub(irn)))
+ return;
+
+ noreg = ia32_new_NoReg_gp(cg);
+ nomem = new_rd_NoMem(cg->irg);
+ in1 = get_irn_n(irn, 2);
+ in2 = get_irn_n(irn, 3);
+ in1_reg = arch_get_irn_register(cg->arch_env, in1);
+ in2_reg = arch_get_irn_register(cg->arch_env, in2);
+ out_reg = get_ia32_out_reg(irn, 0);
+
+ tenv.block = get_nodes_block(irn);
+ tenv.dbg = get_irn_dbg_info(irn);
+ tenv.irg = cg->irg;
+ tenv.irn = irn;
+ DEBUG_ONLY(tenv.mod = cg->mod;)
+ tenv.mode = get_ia32_res_mode(irn);
+ tenv.cg = cg;
+
+ /* in case of sub and OUT == SRC2 we can transform the sequence into neg src2 -- add */
+ if (REGS_ARE_EQUAL(out_reg, in2_reg)) {
+ /* generate the neg src2 */
+ res = gen_Minus(&tenv, in2);
+ arch_set_irn_register(cg->arch_env, res, in2_reg);
+
+ /* add to schedule */
+ sched_add_before(irn, res);
+
+ /* generate the add */
+ if (mode_is_float(tenv.mode)) {
+ res = new_rd_ia32_fAdd(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, res, in1, nomem, mode_T);
+ set_ia32_am_support(res, ia32_am_Source);
+ }
+ else {
+ res = new_rd_ia32_Add(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, res, in1, nomem, mode_T);
+ set_ia32_am_support(res, ia32_am_Full);
+ }
+
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(&tenv));
+ /* copy register */
+ slots = get_ia32_slots(res);
+ slots[0] = in2_reg;
+
+ /* add to schedule */
+ sched_add_before(irn, res);
+
+ /* remove the old sub */
+ sched_remove(irn);
+
+ /* exchange the add and the sub */
+ exchange(irn, res);
+ }
+}
+
+/**
+ * Transforms a LEA into an Add if possible
+ * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
+ */
+void ia32_transform_lea_to_add(ir_node *irn, ia32_code_gen_t *cg) {
+ ia32_am_flavour_t am_flav;
+ int imm = 0;
+ ir_node *res = NULL;
+ ir_node *nomem, *noreg, *base, *index, *op1, *op2;
+ char *offs;
+ ia32_transform_env_t tenv;
+ const arch_register_t *out_reg, *base_reg, *index_reg;
+
+ /* must be a LEA */
+ if (! is_ia32_Lea(irn))
+ return;
+
+ am_flav = get_ia32_am_flavour(irn);
+
+ /* only some LEAs can be transformed to an Add */
+ if (am_flav != ia32_am_B && am_flav != ia32_am_OB && am_flav != ia32_am_OI && am_flav != ia32_am_BI)
+ return;
+
+ noreg = ia32_new_NoReg_gp(cg);
+ nomem = new_rd_NoMem(cg->irg);
+ op1 = noreg;
+ op2 = noreg;
+ base = get_irn_n(irn, 0);
+ index = get_irn_n(irn,1);
+
+ offs = get_ia32_am_offs(irn);
+
+ /* offset has a explicit sign -> we need to skip + */
+ if (offs && offs[0] == '+')
+ offs++;
+
+ out_reg = arch_get_irn_register(cg->arch_env, irn);
+ base_reg = arch_get_irn_register(cg->arch_env, base);
+ index_reg = arch_get_irn_register(cg->arch_env, index);
+
+ tenv.block = get_nodes_block(irn);
+ tenv.dbg = get_irn_dbg_info(irn);
+ tenv.irg = cg->irg;
+ tenv.irn = irn;
+ DEBUG_ONLY(tenv.mod = cg->mod;)
+ tenv.mode = get_irn_mode(irn);
+ tenv.cg = cg;
+
+ switch(get_ia32_am_flavour(irn)) {
+ case ia32_am_B:
+ /* out register must be same as base register */
+ if (! REGS_ARE_EQUAL(out_reg, base_reg))
+ return;
+
+ op1 = base;
+ break;
+ case ia32_am_OB:
+ /* out register must be same as base register */
+ if (! REGS_ARE_EQUAL(out_reg, base_reg))
+ return;
+
+ op1 = base;
+ imm = 1;
+ break;
+ case ia32_am_OI:
+ /* out register must be same as index register */
+ if (! REGS_ARE_EQUAL(out_reg, index_reg))
+ return;
+
+ op1 = index;
+ imm = 1;
+ break;
+ case ia32_am_BI:
+ /* out register must be same as one in register */
+ if (REGS_ARE_EQUAL(out_reg, base_reg)) {
+ op1 = base;
+ op2 = index;
+ }
+ else if (REGS_ARE_EQUAL(out_reg, index_reg)) {
+ op1 = index;
+ op2 = base;
+ }
+ else {
+ /* in registers a different from out -> no Add possible */
+ return;
+ }
+ default:
+ break;
+ }
+
+ res = new_rd_ia32_Add(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, op1, op2, nomem, mode_T);
+ arch_set_irn_register(cg->arch_env, res, out_reg);
+ set_ia32_op_type(res, ia32_Normal);
+
+ if (imm) {
+ set_ia32_cnst(res, offs);
+ set_ia32_immop_type(res, ia32_ImmConst);
+ }
+
+ SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(&tenv));
+
+ /* add Add to schedule */
+ sched_add_before(irn, res);
+
+ res = new_rd_Proj(tenv.dbg, tenv.irg, tenv.block, res, tenv.mode, 0);
+
+ /* add result Proj to schedule */
+ sched_add_before(irn, res);
+
+ /* remove the old LEA */
+ sched_remove(irn);
+
+ /* exchange the Add and the LEA */
+ exchange(irn, res);
+}
+
/**
* Transforms the given firm node (and maybe some other related nodes)
* into one or more assembler nodes.
*/
void ia32_transform_node(ir_node *node, void *env) {
ia32_code_gen_t *cgenv = (ia32_code_gen_t *)env;
- opcode code = get_irn_opcode(node);
+ opcode code;
ir_node *asm_node = NULL;
ia32_transform_env_t tenv;
tenv.dbg = get_irn_dbg_info(node);
tenv.irg = current_ir_graph;
tenv.irn = node;
- tenv.mod = cgenv->mod;
+ DEBUG_ONLY(tenv.mod = cgenv->mod;)
tenv.mode = get_irn_mode(node);
tenv.cg = cgenv;
DBG((tenv.mod, LEVEL_1, "check %+F ... ", node));
+ code = get_irn_opcode(node);
switch (code) {
BINOP(Add);
BINOP(Sub);
BINOP(Shl);
BINOP(Shr);
BINOP(Shrs);
+ BINOP(Rot);
BINOP(Quot);
BE_GEN(FrameAddr);
BE_GEN(FrameLoad);
BE_GEN(FrameStore);
+ BE_GEN(StackParam);
break;
bad:
fprintf(stderr, "Not implemented: %s\n", get_irn_opname(node));