X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_transform.c;h=bbe8d6711f642268aba97072c9560429bf2fb3f2;hb=e09239ef2a54d46ebac3f24e2afe811c7ead727f;hp=9b96e571873f5d4ac6818ad4d6ef65db05d5cc1b;hpb=76dffc7b0e07793810e0449b9feb6d3e4e57de6b;p=libfirm diff --git a/ir/be/ia32/ia32_transform.c b/ir/be/ia32/ia32_transform.c index 9b96e5718..25b0da86c 100644 --- a/ir/be/ia32/ia32_transform.c +++ b/ir/be/ia32/ia32_transform.c @@ -1,9 +1,29 @@ -/** - * This file implements the IR transformation from firm into ia32-Firm. - * @author Christian Wuerdig - * $Id$ +/* + * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * + * This file is part of libFirm. + * + * This file may be distributed and/or modified under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation and appearing in the file LICENSE.GPL included in the + * packaging of this file. + * + * Licensees holding valid libFirm Professional Edition licenses may use + * this file in accordance with the libFirm Commercial License. + * Agreement provided with the Software. + * + * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE. */ +/** + * @file + * @brief This file implements the IR transformation from firm into + * ia32-Firm. + * @author Christian Wuerdig, Matthias Braun + * @version $Id$ + */ #ifdef HAVE_CONFIG_H #include "config.h" #endif @@ -21,17 +41,20 @@ #include "irgmod.h" #include "irvrfy.h" #include "ircons.h" -#include "dbginfo.h" +#include "irgwalk.h" #include "irprintf.h" #include "debug.h" #include "irdom.h" -#include "type.h" -#include "entity.h" -#include "archop.h" /* we need this for Min and Max nodes */ +#include "archop.h" +#include "error.h" +#include "height.h" #include "../benode_t.h" #include "../besched.h" #include "../beabi.h" +#include "../beutil.h" +#include "../beirg_t.h" +#include "../betranshlp.h" #include "bearch_ia32_t.h" #include "ia32_nodes_attr.h" @@ -41,35 +64,60 @@ #include "ia32_dbg_stat.h" #include "ia32_optimize.h" #include "ia32_util.h" +#include "ia32_address_mode.h" #include "gen_ia32_regalloc_if.h" -#define SFP_SIGN "0x80000000" -#define DFP_SIGN "0x8000000000000000" -#define SFP_ABS "0x7FFFFFFF" -#define DFP_ABS "0x7FFFFFFFFFFFFFFF" +#define SFP_SIGN "0x80000000" +#define DFP_SIGN "0x8000000000000000" +#define SFP_ABS "0x7FFFFFFF" +#define DFP_ABS "0x7FFFFFFFFFFFFFFF" +#define DFP_INTMAX "9223372036854775807" #define TP_SFP_SIGN "ia32_sfp_sign" #define TP_DFP_SIGN "ia32_dfp_sign" #define TP_SFP_ABS "ia32_sfp_abs" #define TP_DFP_ABS "ia32_dfp_abs" +#define TP_INT_MAX "ia32_int_max" #define ENT_SFP_SIGN "IA32_SFP_SIGN" #define ENT_DFP_SIGN "IA32_DFP_SIGN" #define ENT_SFP_ABS "IA32_SFP_ABS" #define ENT_DFP_ABS "IA32_DFP_ABS" +#define ENT_INT_MAX "IA32_INT_MAX" + +#define mode_vfp (ia32_reg_classes[CLASS_ia32_vfp].mode) +#define mode_xmm (ia32_reg_classes[CLASS_ia32_xmm].mode) + +DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) + +/** hold the current code generator during transformation */ +static ia32_code_gen_t *env_cg = NULL; +static ir_node *initial_fpcw = NULL; +static heights_t *heights = NULL; extern ir_op *get_op_Mulh(void); -typedef ir_node *construct_binop_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \ - ir_node *op1, ir_node *op2, ir_node *mem); +typedef ir_node *construct_binop_func(dbg_info *db, ir_graph *irg, + ir_node *block, ir_node *base, ir_node *index, ir_node *mem, + ir_node *op1, ir_node *op2); -typedef ir_node *construct_unop_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \ - ir_node *op, ir_node *mem); +typedef ir_node *construct_shift_func(dbg_info *db, ir_graph *irg, + ir_node *block, ir_node *op1, ir_node *op2); -typedef enum { - ia32_SSIGN, ia32_DSIGN, ia32_SABS, ia32_DABS, ia32_known_const_max -} ia32_known_const_t; +typedef ir_node *construct_binop_dest_func(dbg_info *db, ir_graph *irg, + ir_node *block, ir_node *base, ir_node *index, ir_node *mem, + ir_node *op); + +typedef ir_node *construct_unop_dest_func(dbg_info *db, ir_graph *irg, + ir_node *block, ir_node *base, ir_node *index, ir_node *mem); + +typedef ir_node *construct_binop_float_func(dbg_info *db, ir_graph *irg, + ir_node *block, ir_node *base, ir_node *index, ir_node *mem, + ir_node *op1, ir_node *op2, ir_node *fpcw); + +typedef ir_node *construct_unop_func(dbg_info *db, ir_graph *irg, + ir_node *block, ir_node *op); /**************************************************************************************************** * _ _ __ _ _ @@ -81,112 +129,270 @@ typedef enum { * ****************************************************************************************************/ +static ir_node *try_create_Immediate(ir_node *node, + char immediate_constraint_type); + +static ir_node *create_immediate_or_transform(ir_node *node, + char immediate_constraint_type); + +static ir_node *create_I2I_Conv(ir_mode *src_mode, ir_mode *tgt_mode, + dbg_info *dbgi, ir_node *block, + ir_node *op, ir_node *orig_node); + /** - * Returns 1 if irn is a Const representing 0, 0 otherwise + * Return true if a mode can be stored in the GP register set */ -static INLINE int is_ia32_Const_0(ir_node *irn) { - return (is_ia32_irn(irn) && get_ia32_op_type(irn) == ia32_Const) ? - classify_tarval(get_ia32_Immop_tarval(irn)) == TV_CLASSIFY_NULL : 0; +static INLINE int mode_needs_gp_reg(ir_mode *mode) { + if(mode == mode_fpcw) + return 0; + if(get_mode_size_bits(mode) > 32) + return 0; + return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b; } /** - * Returns 1 if irn is a Const representing 1, 0 otherwise + * creates a unique ident by adding a number to a tag + * + * @param tag the tag string, must contain a %d if a number + * should be added */ -static INLINE int is_ia32_Const_1(ir_node *irn) { - return (is_ia32_irn(irn) && get_ia32_op_type(irn) == ia32_Const) ? - classify_tarval(get_ia32_Immop_tarval(irn)) == TV_CLASSIFY_ONE : 0; +static ident *unique_id(const char *tag) +{ + static unsigned id = 0; + char str[256]; + + snprintf(str, sizeof(str), tag, ++id); + return new_id_from_str(str); } /** - * Returns the Proj representing the UNKNOWN register for given mode. + * Get a primitive type for a mode. */ -static ir_node *be_get_unknown_for_mode(ia32_code_gen_t *cg, ir_mode *mode) { - be_abi_irg_t *babi = cg->birg->abi; - const arch_register_t *unknwn_reg = NULL; +static ir_type *get_prim_type(pmap *types, ir_mode *mode) +{ + pmap_entry *e = pmap_find(types, mode); + ir_type *res; - if (mode_is_float(mode)) { - unknwn_reg = USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_UKNWN] : &ia32_vfp_regs[REG_VFP_UKNWN]; - } - else { - unknwn_reg = &ia32_gp_regs[REG_GP_UKNWN]; + if (! e) { + char buf[64]; + snprintf(buf, sizeof(buf), "prim_type_%s", get_mode_name(mode)); + res = new_type_primitive(new_id_from_str(buf), mode); + set_type_alignment_bytes(res, 16); + pmap_insert(types, mode, res); } - - return be_abi_get_callee_save_irn(babi, unknwn_reg); + else + res = e->value; + return res; } /** - * Gets the Proj with number pn from irn. + * Get an atomic entity that is initialized with a tarval */ -static ir_node *get_proj_for_pn(const ir_node *irn, long pn) { - const ir_edge_t *edge; - ir_node *proj; - assert(get_irn_mode(irn) == mode_T && "need mode_T"); +static ir_entity *ia32_get_entity_for_tv(ia32_isa_t *isa, ir_node *cnst) +{ + tarval *tv = get_Const_tarval(cnst); + pmap_entry *e = pmap_find(isa->tv_ent, tv); + ir_entity *res; + ir_graph *rem; + + if (! e) { + ir_mode *mode = get_irn_mode(cnst); + ir_type *tp = get_Const_type(cnst); + if (tp == firm_unknown_type) + tp = get_prim_type(isa->types, mode); + + res = new_entity(get_glob_type(), unique_id(".LC%u"), tp); + + set_entity_ld_ident(res, get_entity_ident(res)); + set_entity_visibility(res, visibility_local); + set_entity_variability(res, variability_constant); + set_entity_allocation(res, allocation_static); - foreach_out_edge(irn, edge) { - proj = get_edge_src_irn(edge); + /* we create a new entity here: It's initialization must resist on the + const code irg */ + rem = current_ir_graph; + current_ir_graph = get_const_code_irg(); + set_atomic_ent_value(res, new_Const_type(tv, tp)); + current_ir_graph = rem; - if (get_Proj_proj(proj) == pn) - return proj; + pmap_insert(isa->tv_ent, tv, res); + } else { + res = e->value; } - return NULL; + return res; +} + +static int is_Const_0(ir_node *node) { + return is_Const(node) && is_Const_null(node); +} + +static int is_Const_1(ir_node *node) { + return is_Const(node) && is_Const_one(node); +} + +static int is_Const_Minus_1(ir_node *node) { + return is_Const(node) && is_Const_all_one(node); } /** - * SSE convert of an integer node into a floating point node. + * Transforms a Const. */ -static ir_node *gen_sse_conv_int2float(ia32_code_gen_t *cg, dbg_info *dbg, ir_graph *irg, ir_node *block, - ir_node *in, ir_node *old_node, ir_mode *tgt_mode) -{ - ir_node *noreg = ia32_new_NoReg_gp(cg); - ir_node *nomem = new_rd_NoMem(irg); +static ir_node *gen_Const(ir_node *node) { + ir_graph *irg = current_ir_graph; + ir_node *old_block = get_nodes_block(node); + ir_node *block = be_transform_node(old_block); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + + if (mode_is_float(mode)) { + ir_node *res = NULL; + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *load; + ir_entity *floatent; + + if (USE_SSE2(env_cg)) { + if (is_Const_null(node)) { + load = new_rd_ia32_xZero(dbgi, irg, block); + set_ia32_ls_mode(load, mode); + res = load; + } else { + floatent = ia32_get_entity_for_tv(env_cg->isa, node); + + load = new_rd_ia32_xLoad(dbgi, irg, block, noreg, noreg, nomem, + mode); + set_ia32_op_type(load, ia32_AddrModeS); + set_ia32_am_sc(load, floatent); + set_ia32_flags(load, get_ia32_flags(load) | arch_irn_flags_rematerializable); + res = new_r_Proj(irg, block, load, mode_xmm, pn_ia32_xLoad_res); + } + } else { + if (is_Const_null(node)) { + load = new_rd_ia32_vfldz(dbgi, irg, block); + res = load; + } else if (is_Const_one(node)) { + load = new_rd_ia32_vfld1(dbgi, irg, block); + res = load; + } else { + floatent = ia32_get_entity_for_tv(env_cg->isa, node); + + load = new_rd_ia32_vfld(dbgi, irg, block, noreg, noreg, nomem, mode); + set_ia32_op_type(load, ia32_AddrModeS); + set_ia32_am_sc(load, floatent); + set_ia32_flags(load, get_ia32_flags(load) | arch_irn_flags_rematerializable); + res = new_r_Proj(irg, block, load, mode_vfp, pn_ia32_vfld_res); + } + set_ia32_ls_mode(load, mode); + } + + SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env_cg, node)); + + /* Const Nodes before the initial IncSP are a bad idea, because + * they could be spilled and we have no SP ready at that point yet. + * So add a dependency to the initial frame pointer calculation to + * avoid that situation. + */ + if (get_irg_start_block(irg) == block) { + add_irn_dep(load, get_irg_frame(irg)); + } + + SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env_cg, node)); + return res; + } else { + ir_node *cnst; + tarval *tv = get_Const_tarval(node); + long val; + + tv = tarval_convert_to(tv, mode_Iu); + + if(tv == get_tarval_bad() || tv == get_tarval_undefined() + || tv == NULL) { + panic("couldn't convert constant tarval (%+F)", node); + } + val = get_tarval_long(tv); + + cnst = new_rd_ia32_Const(dbgi, irg, block, NULL, 0, val); + SET_IA32_ORIG_NODE(cnst, ia32_get_old_node_name(env_cg, node)); + if(val == 0) { + set_ia32_flags(cnst, + get_ia32_flags(cnst) | arch_irn_flags_modify_flags); + } - ir_node *conv = new_rd_ia32_Conv_I2FP(dbg, irg, block, noreg, noreg, in, nomem); - set_ia32_src_mode(conv, get_irn_mode(in)); - set_ia32_tgt_mode(conv, tgt_mode); - set_ia32_am_support(conv, ia32_am_Source); - SET_IA32_ORIG_NODE(conv, ia32_get_old_node_name(cg, old_node)); + /* see above */ + if (get_irg_start_block(irg) == block) { + add_irn_dep(cnst, get_irg_frame(irg)); + } - return new_rd_Proj(dbg, irg, block, conv, tgt_mode, pn_ia32_Conv_I2FP_res); + return cnst; + } } /** -* SSE convert of an float node into a double node. -*/ -static ir_node *gen_sse_conv_f2d(ia32_code_gen_t *cg, dbg_info *dbg, ir_graph *irg, ir_node *block, - ir_node *in, ir_node *old_node) -{ - ir_node *noreg = ia32_new_NoReg_gp(cg); - ir_node *nomem = new_rd_NoMem(irg); + * Transforms a SymConst. + */ +static ir_node *gen_SymConst(ir_node *node) { + ir_graph *irg = current_ir_graph; + ir_node *old_block = get_nodes_block(node); + ir_node *block = be_transform_node(old_block); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_node *cnst; + + if (mode_is_float(mode)) { + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); + + if (USE_SSE2(env_cg)) + cnst = new_rd_ia32_xLoad(dbgi, irg, block, noreg, noreg, nomem, mode_E); + else + cnst = new_rd_ia32_vfld(dbgi, irg, block, noreg, noreg, nomem, mode_E); + set_ia32_am_sc(cnst, get_SymConst_entity(node)); + set_ia32_use_frame(cnst); + } else { + ir_entity *entity; + + if(get_SymConst_kind(node) != symconst_addr_ent) { + panic("backend only support symconst_addr_ent (at %+F)", node); + } + entity = get_SymConst_entity(node); + cnst = new_rd_ia32_Const(dbgi, irg, block, entity, 0, 0); + } + + /* Const Nodes before the initial IncSP are a bad idea, because + * they could be spilled and we have no SP ready at that point yet + */ + if (get_irg_start_block(irg) == block) { + add_irn_dep(cnst, get_irg_frame(irg)); + } - ir_node *conv = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, in, nomem); - set_ia32_src_mode(conv, mode_F); - set_ia32_tgt_mode(conv, mode_D); - set_ia32_am_support(conv, ia32_am_Source); - SET_IA32_ORIG_NODE(conv, ia32_get_old_node_name(cg, old_node)); + SET_IA32_ORIG_NODE(cnst, ia32_get_old_node_name(env_cg, node)); - return new_rd_Proj(dbg, irg, block, conv, mode_D, pn_ia32_Conv_FP2FP_res); + return cnst; } /* Generates an entity for a known FP const (used for FP Neg + Abs) */ -static ident *gen_fp_known_const(ia32_known_const_t kct) { +ir_entity *ia32_gen_fp_known_const(ia32_known_const_t kct) { static const struct { const char *tp_name; const char *ent_name; const char *cnst_str; + char mode; + char align; } names [ia32_known_const_max] = { - { TP_SFP_SIGN, ENT_SFP_SIGN, SFP_SIGN }, /* ia32_SSIGN */ - { TP_DFP_SIGN, ENT_DFP_SIGN, DFP_SIGN }, /* ia32_DSIGN */ - { TP_SFP_ABS, ENT_SFP_ABS, SFP_ABS }, /* ia32_SABS */ - { TP_DFP_ABS, ENT_DFP_ABS, DFP_ABS } /* ia32_DABS */ + { TP_SFP_SIGN, ENT_SFP_SIGN, SFP_SIGN, 0, 16 }, /* ia32_SSIGN */ + { TP_DFP_SIGN, ENT_DFP_SIGN, DFP_SIGN, 1, 16 }, /* ia32_DSIGN */ + { TP_SFP_ABS, ENT_SFP_ABS, SFP_ABS, 0, 16 }, /* ia32_SABS */ + { TP_DFP_ABS, ENT_DFP_ABS, DFP_ABS, 1, 16 }, /* ia32_DABS */ + { TP_INT_MAX, ENT_INT_MAX, DFP_INTMAX, 2, 4 } /* ia32_INTMAX */ }; - static struct entity *ent_cache[ia32_known_const_max]; + static ir_entity *ent_cache[ia32_known_const_max]; const char *tp_name, *ent_name, *cnst_str; ir_type *tp; ir_node *cnst; ir_graph *rem; - entity *ent; + ir_entity *ent; tarval *tv; ir_mode *mode; @@ -195,9 +401,16 @@ static ident *gen_fp_known_const(ia32_known_const_t kct) { tp_name = names[kct].tp_name; cnst_str = names[kct].cnst_str; - mode = kct == ia32_SSIGN || kct == ia32_SABS ? mode_Iu : mode_Lu; + switch (names[kct].mode) { + case 0: mode = mode_Iu; break; + case 1: mode = mode_Lu; break; + default: mode = mode_F; break; + } tv = new_tarval_from_str(cnst_str, strlen(cnst_str), mode); tp = new_type_primitive(new_id_from_str(tp_name), mode); + /* set the specified alignment */ + set_type_alignment_bytes(tp, names[kct].align); + ent = new_entity(get_glob_type(), new_id_from_str(ent_name), tp); set_entity_ld_ident(ent, get_entity_ident(ent)); @@ -218,7 +431,7 @@ static ident *gen_fp_known_const(ia32_known_const_t kct) { ent_cache[kct] = ent; } - return get_entity_ident(ent_cache[kct]); + return ent_cache[kct]; } #ifndef NDEBUG @@ -230,452 +443,564 @@ const char *ia32_get_old_node_name(ia32_code_gen_t *cg, ir_node *irn) { lc_eoprintf(firm_get_arg_env(), isa->name_obst, "%+F", irn); obstack_1grow(isa->name_obst, 0); - isa->name_obst_size += obstack_object_size(isa->name_obst); return obstack_finish(isa->name_obst); } #endif /* NDEBUG */ -/* determine if one operator is an Imm */ -static ir_node *get_immediate_op(ir_node *op1, ir_node *op2) { - if (op1) - return is_ia32_Cnst(op1) ? op1 : (is_ia32_Cnst(op2) ? op2 : NULL); - else return is_ia32_Cnst(op2) ? op2 : NULL; -} +int use_source_address_mode(ir_node *block, ir_node *node, ir_node *other) +{ + ir_mode *mode; + ir_node *load; + long pn; + + if(!is_Proj(node)) + return 0; + load = get_Proj_pred(node); + pn = get_Proj_proj(node); + if(!is_Load(load) || pn != pn_Load_res) + return 0; + if(get_nodes_block(load) != block) + return 0; + /* we only use address mode if we're the only user of the load */ + if(get_irn_n_edges(node) > 1) + return 0; -/* determine if one operator is not an Imm */ -static ir_node *get_expr_op(ir_node *op1, ir_node *op2) { - return !is_ia32_Cnst(op1) ? op1 : (!is_ia32_Cnst(op2) ? op2 : NULL); -} + mode = get_irn_mode(node); + if(!mode_needs_gp_reg(mode)) + return 0; + if(other != NULL && get_Load_mode(load) != get_irn_mode(other)) + return 0; + /* don't do AM if other node inputs depend on the load (via mem-proj) */ + if(other != NULL && get_nodes_block(other) == block + && heights_reachable_in_block(heights, other, load)) + return 0; -/** - * Construct a standard binary operation, set AM and immediate if required. - * - * @param env The transformation environment - * @param op1 The first operand - * @param op2 The second operand - * @param func The node constructor function - * @return The constructed ia32 node. - */ -static ir_node *gen_binop(ia32_transform_env_t *env, ir_node *op1, ir_node *op2, construct_binop_func *func) { - ir_node *new_op = NULL; - ir_mode *mode = env->mode; - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg); - ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg); - ir_node *nomem = new_NoMem(); - int is_mul = 0; - ir_node *expr_op, *imm_op; - DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) + return 1; +} - /* Check if immediate optimization is on and */ - /* if it's an operation with immediate. */ - /* Mul/MulS/Mulh don't support immediates */ - if (! (env->cg->opt & IA32_OPT_IMMOPS) || - func == new_rd_ia32_Mul || - func == new_rd_ia32_Mulh || - func == new_rd_ia32_MulS) - { - expr_op = op1; - imm_op = NULL; - /* immediate operations are requested, but we are here: it a mul */ - if (env->cg->opt & IA32_OPT_IMMOPS) - is_mul = 1; - } - else if (is_op_commutative(get_irn_op(env->irn))) { - imm_op = get_immediate_op(op1, op2); - expr_op = get_expr_op(op1, op2); +typedef struct ia32_address_mode_t ia32_address_mode_t; +struct ia32_address_mode_t { + ia32_address_t addr; + ir_mode *ls_mode; + ir_node *mem_proj; + ia32_op_type_t op_type; + ir_node *new_op1; + ir_node *new_op2; + int commutative; + int flipped; +}; + +static void build_address(ia32_address_mode_t *am, ir_node *node) +{ + ia32_address_t *addr = &am->addr; + ir_node *load = get_Proj_pred(node); + ir_node *ptr = get_Load_ptr(load); + ir_node *mem = get_Load_mem(load); + ir_node *new_mem = be_transform_node(mem); + ir_node *base; + ir_node *index; + + am->ls_mode = get_Load_mode(load); + am->mem_proj = be_get_Proj_for_pn(load, pn_Load_M); + + /* construct load address */ + ia32_create_address_mode(addr, ptr, 0); + base = addr->base; + index = addr->index; + + if(base == NULL) { + base = ia32_new_NoReg_gp(env_cg); + } else { + base = be_transform_node(base); } - else { - imm_op = get_immediate_op(NULL, op2); - expr_op = get_expr_op(op1, op2); + + if(index == NULL) { + index = ia32_new_NoReg_gp(env_cg); + } else { + index = be_transform_node(index); } - assert((expr_op || imm_op) && "invalid operands"); + addr->base = base; + addr->index = index; + addr->mem = new_mem; +} - if (!expr_op) { - /* We have two consts here: not yet supported */ - imm_op = NULL; - } +static void set_address(ir_node *node, ia32_address_t *addr) +{ + set_ia32_am_scale(node, addr->scale); + set_ia32_am_sc(node, addr->symconst_ent); + set_ia32_am_offs_int(node, addr->offset); + if(addr->symconst_sign) + set_ia32_am_sc_sign(node); + if(addr->use_frame) + set_ia32_use_frame(node); + set_ia32_frame_ent(node, addr->frame_entity); +} - if (mode_is_float(mode)) { - /* floating point operations */ - if (imm_op) { - DB((mod, LEVEL_1, "FP with immediate ...")); - new_op = func(dbg, irg, block, noreg_gp, noreg_gp, expr_op, noreg_fp, nomem); - set_ia32_Immop_attr(new_op, imm_op); - set_ia32_am_support(new_op, ia32_am_None); - } - else { - DB((mod, LEVEL_1, "FP binop ...")); - new_op = func(dbg, irg, block, noreg_gp, noreg_gp, op1, op2, nomem); - set_ia32_am_support(new_op, ia32_am_Source); - } - set_ia32_ls_mode(new_op, mode); - } - else { - /* integer operations */ - if (imm_op) { - /* This is expr + const */ - DB((mod, LEVEL_1, "INT with immediate ...")); - new_op = func(dbg, irg, block, noreg_gp, noreg_gp, expr_op, noreg_gp, nomem); - set_ia32_Immop_attr(new_op, imm_op); +static void set_am_attributes(ir_node *node, ia32_address_mode_t *am) +{ + set_address(node, &am->addr); - /* set AM support */ - set_ia32_am_support(new_op, ia32_am_Dest); - } - else { - DB((mod, LEVEL_1, "INT binop ...")); - /* This is a normal operation */ - new_op = func(dbg, irg, block, noreg_gp, noreg_gp, op1, op2, nomem); + set_ia32_op_type(node, am->op_type); + set_ia32_ls_mode(node, am->ls_mode); + if(am->commutative) + set_ia32_commutative(node); +} - /* set AM support */ - set_ia32_am_support(new_op, ia32_am_Full); +typedef enum { + match_commutative = 1 << 0, + match_am_and_immediates = 1 << 1, + match_no_am = 1 << 2, + match_8_16_bit_am = 1 << 3, + match_no_immediate = 1 << 4 +} match_flags_t; + +static void match_arguments(ia32_address_mode_t *am, ir_node *block, + ir_node *op1, ir_node *op2, match_flags_t flags) +{ + ia32_address_t *addr = &am->addr; + ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); + ir_node *new_op1; + ir_node *new_op2; + int use_am; + int commutative; + int use_am_and_immediates; + int use_immediate; + + memset(am, 0, sizeof(am[0])); + + commutative = (flags & match_commutative) != 0; + use_am_and_immediates = (flags & match_am_and_immediates) != 0; + use_am = ! (flags & match_no_am); + use_immediate = !(flags & match_no_immediate); + + assert(op2 != NULL); + assert(!commutative || op1 != NULL); + + if(!(flags & match_8_16_bit_am) + && op1 != NULL + && get_mode_size_bits(get_irn_mode(op1)) < 32) + use_am = 0; + + new_op2 = (use_immediate ? try_create_Immediate(op2, 0) : NULL); + if(new_op2 == NULL && use_am && use_source_address_mode(block, op2, op1)) { + build_address(am, op2); + new_op1 = (op1 == NULL ? NULL : be_transform_node(op1)); + new_op2 = noreg_gp; + am->op_type = ia32_AddrModeS; + } else if(commutative && (new_op2 == NULL || use_am_and_immediates) && + use_am && use_source_address_mode(block, op1, op2)) { + build_address(am, op1); + if(new_op2 != NULL) { + new_op1 = noreg_gp; + } else { + new_op1 = be_transform_node(op2); + new_op2 = noreg_gp; + am->flipped = 1; } - - /* Muls can only have AM source */ - if (is_mul) - set_ia32_am_support(new_op, ia32_am_Source); + am->op_type = ia32_AddrModeS; + } else { + new_op1 = (op1 == NULL ? NULL : be_transform_node(op1)); + if(new_op2 == NULL) + new_op2 = be_transform_node(op2); + am->op_type = ia32_Normal; } + if(addr->base == NULL) + addr->base = noreg_gp; + if(addr->index == NULL) + addr->index = noreg_gp; + if(addr->mem == NULL) + addr->mem = new_NoMem(); + + am->new_op1 = new_op1; + am->new_op2 = new_op2; + am->commutative = commutative; +} - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); +static ir_node *fix_mem_proj(ir_node *node, ia32_address_mode_t *am) +{ + ir_graph *irg = current_ir_graph; + ir_mode *mode; + ir_node *load; - set_ia32_res_mode(new_op, mode); + if(am->mem_proj == NULL) + return node; - if (is_op_commutative(get_irn_op(env->irn))) { - set_ia32_commutative(new_op); - } + /* we have to create a mode_T so the old MemProj can attach to us */ + mode = get_irn_mode(node); + load = get_Proj_pred(am->mem_proj); - return new_rd_Proj(dbg, irg, block, new_op, mode, 0); -} + mark_irn_visited(load); + be_set_transformed_node(load, node); + if(mode != mode_T) { + set_irn_mode(node, mode_T); + return new_rd_Proj(NULL, irg, get_nodes_block(node), node, mode, pn_ia32_res); + } else { + return node; + } +} +/** + * Construct a standard binary operation, set AM and immediate if required. + * + * @param op1 The first operand + * @param op2 The second operand + * @param func The node constructor function + * @return The constructed ia32 node. + */ +static ir_node *gen_binop(ir_node *node, ir_node *op1, ir_node *op2, + construct_binop_func *func, int commutative) +{ + ir_node *src_block = get_nodes_block(node); + ir_node *block = be_transform_node(src_block); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *new_node; + ia32_address_mode_t am; + ia32_address_t *addr = &am.addr; + match_flags_t flags = 0; + + if(commutative) + flags |= match_commutative; + + match_arguments(&am, src_block, op1, op2, flags); + + new_node = func(dbgi, irg, block, addr->base, addr->index, addr->mem, + am.new_op1, am.new_op2); + set_am_attributes(new_node, &am); + /* we can't use source address mode anymore when using immediates */ + if(is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2)) + set_ia32_am_support(new_node, ia32_am_None, ia32_am_arity_none); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); + + new_node = fix_mem_proj(new_node, &am); + + return new_node; +} /** - * Construct a shift/rotate binary operation, sets AM and immediate if required. + * Construct a standard binary operation, set AM and immediate if required. * - * @param env The transformation environment * @param op1 The first operand * @param op2 The second operand * @param func The node constructor function * @return The constructed ia32 node. */ -static ir_node *gen_shift_binop(ia32_transform_env_t *env, ir_node *op1, ir_node *op2, construct_binop_func *func) { - ir_node *new_op = NULL; - ir_mode *mode = env->mode; - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); - ir_node *expr_op, *imm_op; - tarval *tv; - DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) +static ir_node *gen_binop_sse_float(ir_node *node, ir_node *op1, ir_node *op2, + construct_binop_func *func) +{ + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *new_op1 = be_transform_node(op1); + ir_node *new_op2 = be_transform_node(op2); + ir_node *new_node = NULL; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_graph *irg = current_ir_graph; + ir_mode *mode = get_irn_mode(node); + ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); - assert(! mode_is_float(mode) && "Shift/Rotate with float not supported"); + new_node = func(dbgi, irg, block, noreg_gp, noreg_gp, nomem, new_op1, + new_op2); + if (is_op_commutative(get_irn_op(node))) { + set_ia32_commutative(new_node); + } + set_ia32_ls_mode(new_node, mode); - /* Check if immediate optimization is on and */ - /* if it's an operation with immediate. */ - imm_op = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(NULL, op2) : NULL; - expr_op = get_expr_op(op1, op2); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - assert((expr_op || imm_op) && "invalid operands"); + return new_node; +} - if (!expr_op) { - /* We have two consts here: not yet supported */ - imm_op = NULL; - } +static ir_node *get_fpcw(void) +{ + ir_node *fpcw; + if(initial_fpcw != NULL) + return initial_fpcw; - /* Limit imm_op within range imm8 */ - if (imm_op) { - tv = get_ia32_Immop_tarval(imm_op); + fpcw = be_abi_get_ignore_irn(env_cg->birg->abi, + &ia32_fp_cw_regs[REG_FPCW]); + initial_fpcw = be_transform_node(fpcw); - if (tv) { - tv = tarval_mod(tv, new_tarval_from_long(32, mode_Iu)); - set_ia32_Immop_tarval(imm_op, tv); - } - else { - imm_op = NULL; - } - } + return initial_fpcw; +} - /* integer operations */ - if (imm_op) { - /* This is shift/rot with const */ - DB((mod, LEVEL_1, "Shift/Rot with immediate ...")); +/** + * Construct a standard binary operation, set AM and immediate if required. + * + * @param op1 The first operand + * @param op2 The second operand + * @param func The node constructor function + * @return The constructed ia32 node. + */ +static ir_node *gen_binop_x87_float(ir_node *node, ir_node *op1, ir_node *op2, + construct_binop_float_func *func) +{ + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *new_op1 = be_transform_node(op1); + ir_node *new_op2 = be_transform_node(op2); + ir_node *new_node = NULL; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_graph *irg = current_ir_graph; + ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); - new_op = func(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem); - set_ia32_Immop_attr(new_op, imm_op); - } - else { - /* This is a normal shift/rot */ - DB((mod, LEVEL_1, "Shift/Rot binop ...")); - new_op = func(dbg, irg, block, noreg, noreg, op1, op2, nomem); + new_node = func(dbgi, irg, block, noreg_gp, noreg_gp, nomem, new_op1, new_op2, + get_fpcw()); + if (is_op_commutative(get_irn_op(node))) { + set_ia32_commutative(new_node); } - /* set AM support */ - set_ia32_am_support(new_op, ia32_am_Dest); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + return new_node; +} - set_ia32_res_mode(new_op, mode); - set_ia32_emit_cl(new_op); +/** + * Construct a shift/rotate binary operation, sets AM and immediate if required. + * + * @param op1 The first operand + * @param op2 The second operand + * @param func The node constructor function + * @return The constructed ia32 node. + */ +static ir_node *gen_shift_binop(ir_node *node, ir_node *op1, ir_node *op2, + construct_shift_func *func) +{ + dbg_info *dbgi = get_irn_dbg_info(node); + ir_graph *irg = current_ir_graph; + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *new_op1 = be_transform_node(op1); + ir_node *new_op2 = create_immediate_or_transform(op2, 0); + ir_node *res; + + assert(! mode_is_float(get_irn_mode(node)) + && "Shift/Rotate with float not supported"); + + res = func(dbgi, irg, new_block, new_op1, new_op2); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + + /* lowered shift instruction may have a dependency operand, handle it here */ + if (get_irn_arity(node) == 3) { + /* we have a dependency */ + ir_node *new_dep = be_transform_node(get_irn_n(node, 2)); + add_irn_dep(res, new_dep); + } - return new_rd_Proj(dbg, irg, block, new_op, mode, 0); + return res; } /** * Construct a standard unary operation, set AM and immediate if required. * - * @param env The transformation environment * @param op The operand * @param func The node constructor function * @return The constructed ia32 node. */ -static ir_node *gen_unop(ia32_transform_env_t *env, ir_node *op, construct_unop_func *func) { - ir_node *new_op = NULL; - ir_mode *mode = env->mode; - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); - DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) - - new_op = func(dbg, irg, block, noreg, noreg, op, nomem); - - if (mode_is_float(mode)) { - DB((mod, LEVEL_1, "FP unop ...")); - /* floating point operations don't support implicit store */ - set_ia32_am_support(new_op, ia32_am_None); - } - else { - DB((mod, LEVEL_1, "INT unop ...")); - set_ia32_am_support(new_op, ia32_am_Dest); - } +static ir_node *gen_unop(ir_node *node, ir_node *op, construct_unop_func *func) +{ + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *new_op = be_transform_node(op); + ir_node *new_node = NULL; + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + new_node = func(dbgi, irg, block, new_op); - set_ia32_res_mode(new_op, mode); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); - return new_rd_Proj(dbg, irg, block, new_op, mode, 0); + return new_node; } - - -/** - * Creates an ia32 Add with immediate. - * - * @param env The transformation environment - * @param expr_op The expression operator - * @param const_op The constant - * @return the created ia32 Add node - */ -static ir_node *gen_imm_Add(ia32_transform_env_t *env, ir_node *expr_op, ir_node *const_op) { - ir_node *new_op = NULL; - tarval *tv = get_ia32_Immop_tarval(const_op); - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); - int normal_add = 1; - tarval_classification_t class_tv, class_negtv; - DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) - - /* try to optimize to inc/dec */ - if ((env->cg->opt & IA32_OPT_INCDEC) && (get_ia32_op_type(const_op) == ia32_Const)) { - /* optimize tarvals */ - class_tv = classify_tarval(tv); - class_negtv = classify_tarval(tarval_neg(tv)); - - if (class_tv == TV_CLASSIFY_ONE) { /* + 1 == INC */ - DB((env->mod, LEVEL_2, "Add(1) to Inc ... ")); - new_op = new_rd_ia32_Inc(dbg, irg, block, noreg, noreg, expr_op, nomem); - normal_add = 0; - } - else if (class_tv == TV_CLASSIFY_ALL_ONE || class_negtv == TV_CLASSIFY_ONE) { /* + (-1) == DEC */ - DB((mod, LEVEL_2, "Add(-1) to Dec ... ")); - new_op = new_rd_ia32_Dec(dbg, irg, block, noreg, noreg, expr_op, nomem); - normal_add = 0; - } +static ir_node *create_lea_from_address(dbg_info *dbgi, ir_node *block, + ia32_address_t *addr) +{ + ir_graph *irg = current_ir_graph; + ir_node *base = addr->base; + ir_node *index = addr->index; + ir_node *res; + + if(base == NULL) { + base = ia32_new_NoReg_gp(env_cg); + } else { + base = be_transform_node(base); } - if (normal_add) { - new_op = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem); - set_ia32_Immop_attr(new_op, const_op); - set_ia32_commutative(new_op); + if(index == NULL) { + index = ia32_new_NoReg_gp(env_cg); + } else { + index = be_transform_node(index); } - return new_op; + res = new_rd_ia32_Lea(dbgi, irg, block, base, index); + set_address(res, addr); + + return res; +} + +static int am_has_immediates(const ia32_address_t *addr) +{ + return addr->offset != 0 || addr->symconst_ent != NULL + || addr->frame_entity || addr->use_frame; } /** * Creates an ia32 Add. * - * @param env The transformation environment * @return the created ia32 Add node */ -static ir_node *gen_Add(ia32_transform_env_t *env) { - ir_node *new_op = NULL; - dbg_info *dbg = env->dbg; - ir_mode *mode = env->mode; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); - ir_node *expr_op, *imm_op; - ir_node *op1 = get_Add_left(env->irn); - ir_node *op2 = get_Add_right(env->irn); - - /* Check if immediate optimization is on and */ - /* if it's an operation with immediate. */ - imm_op = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(op1, op2) : NULL; - expr_op = get_expr_op(op1, op2); - - assert((expr_op || imm_op) && "invalid operands"); +static ir_node *gen_Add(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *op1 = get_Add_left(node); + ir_node *op2 = get_Add_right(node); + ir_node *new_op; + ir_node *new_op1; + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *src_block = get_nodes_block(node); + ir_node *add_immediate_op; + ia32_address_t addr; + ia32_address_mode_t am; if (mode_is_float(mode)) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) - return gen_binop(env, op1, op2, new_rd_ia32_xAdd); + if (USE_SSE2(env_cg)) + return gen_binop_sse_float(node, op1, op2, new_rd_ia32_xAdd); else - return gen_binop(env, op1, op2, new_rd_ia32_vfadd); - } - else { - /* integer ADD */ - if (!expr_op) { - /* No expr_op means, that we have two const - one symconst and */ - /* one tarval or another symconst - because this case is not */ - /* covered by constant folding */ - /* We need to check for: */ - /* 1) symconst + const -> becomes a LEA */ - /* 2) symconst + symconst -> becomes a const + LEA as the elf */ - /* linker doesn't support two symconsts */ - - if (get_ia32_op_type(op1) == ia32_SymConst && get_ia32_op_type(op2) == ia32_SymConst) { - /* this is the 2nd case */ - new_op = new_rd_ia32_Lea(dbg, irg, block, op1, noreg, mode); - set_ia32_am_sc(new_op, get_ia32_id_cnst(op2)); - set_ia32_am_flavour(new_op, ia32_am_OB); - - DBG_OPT_LEA3(op1, op2, env->irn, new_op); - } - else { - /* this is the 1st case */ - new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode); - - DBG_OPT_LEA3(op1, op2, env->irn, new_op); - - if (get_ia32_op_type(op1) == ia32_SymConst) { - set_ia32_am_sc(new_op, get_ia32_id_cnst(op1)); - add_ia32_am_offs(new_op, get_ia32_cnst(op2)); - } - else { - add_ia32_am_offs(new_op, get_ia32_cnst(op1)); - set_ia32_am_sc(new_op, get_ia32_id_cnst(op2)); - } - set_ia32_am_flavour(new_op, ia32_am_O); - } + return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfadd); + } - /* set AM support */ - set_ia32_am_support(new_op, ia32_am_Source); - set_ia32_op_type(new_op, ia32_AddrModeS); + /** + * Rules for an Add: + * 0. Immediate Trees (example Add(Symconst, Const) -> Const) + * 1. Add with immediate -> Lea + * 2. Add with possible source address mode -> Add + * 3. Otherwise -> Lea + */ + memset(&addr, 0, sizeof(addr)); + ia32_create_address_mode(&addr, node, 1); + add_immediate_op = NULL; + /* a constant? */ + if(addr.base == NULL && addr.index == NULL) { + new_op = new_rd_ia32_Const(dbgi, irg, block, addr.symconst_ent, + addr.symconst_sign, addr.offset); + add_irn_dep(new_op, get_irg_frame(irg)); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); + return new_op; + } + /* add with immediate? */ + if(addr.index == NULL) { + add_immediate_op = addr.base; + } else if(addr.base == NULL && addr.scale == 0) { + add_immediate_op = addr.index; + } - /* Lea doesn't need a Proj */ - return new_op; + if(add_immediate_op != NULL) { + if(!am_has_immediates(&addr)) { +#ifdef DEBUG_libfirm + ir_fprintf(stderr, "Optimisation warning Add x,0 (%+F) found\n", + node); +#endif + return be_transform_node(add_immediate_op); } - else if (imm_op) { - /* This is expr + const */ - new_op = gen_imm_Add(env, expr_op, imm_op); - /* set AM support */ - set_ia32_am_support(new_op, ia32_am_Dest); - } - else { - /* This is a normal add */ - new_op = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, op1, op2, nomem); + new_op = create_lea_from_address(dbgi, block, &addr); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); + return new_op; + } - /* set AM support */ - set_ia32_am_support(new_op, ia32_am_Full); - set_ia32_commutative(new_op); - } + /* test if we can use source address mode */ + memset(&am, 0, sizeof(am)); + new_op1 = NULL; + if(use_source_address_mode(src_block, op2, op1)) { + build_address(&am, op2); + new_op1 = be_transform_node(op1); + } else if(use_source_address_mode(src_block, op1, op2)) { + build_address(&am, op1); + new_op1 = be_transform_node(op2); } + /* construct an Add with source address mode */ + if(new_op1 != NULL) { + ia32_address_t *am_addr = &am.addr; + new_op = new_rd_ia32_Add(dbgi, irg, block, am_addr->base, am_addr->index, + am_addr->mem, new_op1, noreg); + set_address(new_op, am_addr); + set_ia32_op_type(new_op, ia32_AddrModeS); + set_ia32_ls_mode(new_op, am.ls_mode); + set_ia32_commutative(new_op); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + new_op = fix_mem_proj(new_op, &am); - set_ia32_res_mode(new_op, mode); + return new_op; + } - return new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_Add_res); + /* otherwise construct a lea */ + new_op = create_lea_from_address(dbgi, block, &addr); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); + return new_op; } - - /** * Creates an ia32 Mul. * - * @param env The transformation environment * @return the created ia32 Mul node */ -static ir_node *gen_Mul(ia32_transform_env_t *env) { - ir_node *op1 = get_Mul_left(env->irn); - ir_node *op2 = get_Mul_right(env->irn); - ir_node *new_op; +static ir_node *gen_Mul(ir_node *node) { + ir_node *op1 = get_Mul_left(node); + ir_node *op2 = get_Mul_right(node); + ir_mode *mode = get_irn_mode(node); - if (mode_is_float(env->mode)) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) - new_op = gen_binop(env, op1, op2, new_rd_ia32_xMul); + if (mode_is_float(mode)) { + if (USE_SSE2(env_cg)) + return gen_binop_sse_float(node, op1, op2, new_rd_ia32_xMul); else - new_op = gen_binop(env, op1, op2, new_rd_ia32_vfmul); - } - else { - new_op = gen_binop(env, op1, op2, new_rd_ia32_Mul); + return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfmul); } - return new_op; + /* + for the lower 32bit of the result it doesn't matter whether we use + signed or unsigned multiplication so we use IMul as it has fewer + constraints + */ + return gen_binop(node, op1, op2, new_rd_ia32_IMul, 1); } - - /** * Creates an ia32 Mulh. * Note: Mul produces a 64Bit result and Mulh returns the upper 32 bit of * this result while Mul returns the lower 32 bit. * - * @param env The transformation environment * @return the created ia32 Mulh node */ -static ir_node *gen_Mulh(ia32_transform_env_t *env) { - ir_node *op1 = get_irn_n(env->irn, 0); - ir_node *op2 = get_irn_n(env->irn, 1); - ir_node *proj_EAX, *proj_EDX, *mulh; - ir_node *in[1]; - - assert(!mode_is_float(env->mode) && "Mulh with float not supported"); - proj_EAX = gen_binop(env, op1, op2, new_rd_ia32_Mulh); - mulh = get_Proj_pred(proj_EAX); - proj_EDX = new_rd_Proj(env->dbg, env->irg, env->block, mulh, env->mode, pn_EDX); - - /* to be on the save side */ - set_Proj_proj(proj_EAX, pn_EAX); - - if (is_ia32_ImmConst(mulh) || is_ia32_ImmSymConst(mulh)) { - /* Mulh with const cannot have AM */ - set_ia32_am_support(mulh, ia32_am_None); - } - else { - /* Mulh cannot have AM for destination */ - set_ia32_am_support(mulh, ia32_am_Source); +static ir_node *gen_Mulh(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *op1 = get_irn_n(node, 0); + ir_node *new_op1 = be_transform_node(op1); + ir_node *op2 = get_irn_n(node, 1); + ir_node *new_op2 = be_transform_node(op2); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_mode *mode = get_irn_mode(node); + ir_node *proj_EDX, *res; + + assert(!mode_is_float(mode) && "Mulh with float not supported"); + if (mode_is_signed(mode)) { + res = new_rd_ia32_IMul1OP(dbgi, irg, block, noreg, noreg, new_NoMem(), + new_op1, new_op2); + } else { + res = new_rd_ia32_Mul(dbgi, irg, block, noreg, noreg, new_NoMem(), new_op1, + new_op2); } - in[0] = proj_EAX; + set_ia32_commutative(res); - /* keep EAX */ - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], env->irg, env->block, 1, in); + proj_EDX = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_EDX); return proj_EDX; } @@ -685,15 +1010,37 @@ static ir_node *gen_Mulh(ia32_transform_env_t *env) { /** * Creates an ia32 And. * - * @param env The transformation environment * @return The created ia32 And node */ -static ir_node *gen_And(ia32_transform_env_t *env) { - ir_node *op1 = get_And_left(env->irn); - ir_node *op2 = get_And_right(env->irn); +static ir_node *gen_And(ir_node *node) { + ir_node *op1 = get_And_left(node); + ir_node *op2 = get_And_right(node); + assert(! mode_is_float(get_irn_mode(node))); + + /* is it a zero extension? */ + if (is_Const(op2)) { + tarval *tv = get_Const_tarval(op2); + long v = get_tarval_long(tv); + + if (v == 0xFF || v == 0xFFFF) { + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_mode *src_mode; + ir_node *res; + + if(v == 0xFF) { + src_mode = mode_Bu; + } else { + assert(v == 0xFFFF); + src_mode = mode_Hu; + } + res = create_I2I_Conv(src_mode, mode_Iu, dbgi, block, op1, node); - assert (! mode_is_float(env->mode)); - return gen_binop(env, op1, op2, new_rd_ia32_And); + return res; + } + } + + return gen_binop(node, op1, op2, new_rd_ia32_And, 1); } @@ -701,15 +1048,14 @@ static ir_node *gen_And(ia32_transform_env_t *env) { /** * Creates an ia32 Or. * - * @param env The transformation environment * @return The created ia32 Or node */ -static ir_node *gen_Or(ia32_transform_env_t *env) { - ir_node *op1 = get_Or_left(env->irn); - ir_node *op2 = get_Or_right(env->irn); +static ir_node *gen_Or(ir_node *node) { + ir_node *op1 = get_Or_left(node); + ir_node *op2 = get_Or_right(node); - assert (! mode_is_float(env->mode)); - return gen_binop(env, op1, op2, new_rd_ia32_Or); + assert (! mode_is_float(get_irn_mode(node))); + return gen_binop(node, op1, op2, new_rd_ia32_Or, 1); } @@ -717,310 +1063,119 @@ static ir_node *gen_Or(ia32_transform_env_t *env) { /** * Creates an ia32 Eor. * - * @param env The transformation environment * @return The created ia32 Eor node */ -static ir_node *gen_Eor(ia32_transform_env_t *env) { - ir_node *op1 = get_Eor_left(env->irn); - ir_node *op2 = get_Eor_right(env->irn); +static ir_node *gen_Eor(ir_node *node) { + ir_node *op1 = get_Eor_left(node); + ir_node *op2 = get_Eor_right(node); - assert(! mode_is_float(env->mode)); - return gen_binop(env, op1, op2, new_rd_ia32_Eor); + assert(! mode_is_float(get_irn_mode(node))); + return gen_binop(node, op1, op2, new_rd_ia32_Xor, 1); } - /** - * Creates an ia32 Max. + * Creates an ia32 Sub. * - * @param env The transformation environment - * @return the created ia32 Max node + * @return The created ia32 Sub node */ -static ir_node *gen_Max(ia32_transform_env_t *env) { - ir_node *op1 = get_irn_n(env->irn, 0); - ir_node *op2 = get_irn_n(env->irn, 1); - ir_node *new_op; +static ir_node *gen_Sub(ir_node *node) { + ir_node *op1 = get_Sub_left(node); + ir_node *op2 = get_Sub_right(node); + ir_mode *mode = get_irn_mode(node); - if (mode_is_float(env->mode)) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) - new_op = gen_binop(env, op1, op2, new_rd_ia32_xMax); - else { - assert(0); - } + if (mode_is_float(mode)) { + if (USE_SSE2(env_cg)) + return gen_binop_sse_float(node, op1, op2, new_rd_ia32_xSub); + else + return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfsub); } - else { - new_op = new_rd_ia32_Max(env->dbg, env->irg, env->block, op1, op2, env->mode); - set_ia32_am_support(new_op, ia32_am_None); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + + if(is_Const(op2)) { + ir_fprintf(stderr, "Optimisation warning: found sub with const (%+F)\n", + node); } - return new_op; + return gen_binop(node, op1, op2, new_rd_ia32_Sub, 0); } /** - * Creates an ia32 Min. + * Generates an ia32 DivMod with additional infrastructure for the + * register allocator if needed. * - * @param env The transformation environment - * @return the created ia32 Min node + * @param dividend -no comment- :) + * @param divisor -no comment- :) + * @param dm_flav flavour_Div/Mod/DivMod + * @return The created ia32 DivMod node */ -static ir_node *gen_Min(ia32_transform_env_t *env) { - ir_node *op1 = get_irn_n(env->irn, 0); - ir_node *op2 = get_irn_n(env->irn, 1); - ir_node *new_op; - - if (mode_is_float(env->mode)) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) - new_op = gen_binop(env, op1, op2, new_rd_ia32_xMin); - else { - assert(0); - } - } - else { - new_op = new_rd_ia32_Min(env->dbg, env->irg, env->block, op1, op2, env->mode); - set_ia32_am_support(new_op, ia32_am_None); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); - } - - return new_op; -} - - - -/** - * Creates an ia32 Sub with immediate. - * - * @param env The transformation environment - * @param expr_op The first operator - * @param const_op The constant operator - * @return The created ia32 Sub node - */ -static ir_node *gen_imm_Sub(ia32_transform_env_t *env, ir_node *expr_op, ir_node *const_op) { - ir_node *new_op = NULL; - tarval *tv = get_ia32_Immop_tarval(const_op); - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); - int normal_sub = 1; - tarval_classification_t class_tv, class_negtv; - DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) - - /* try to optimize to inc/dec */ - if ((env->cg->opt & IA32_OPT_INCDEC) && tv) { - /* optimize tarvals */ - class_tv = classify_tarval(tv); - class_negtv = classify_tarval(tarval_neg(tv)); - - if (class_tv == TV_CLASSIFY_ONE) { /* - 1 == DEC */ - DB((mod, LEVEL_2, "Sub(1) to Dec ... ")); - new_op = new_rd_ia32_Dec(dbg, irg, block, noreg, noreg, expr_op, nomem); - normal_sub = 0; - } - else if (class_negtv == TV_CLASSIFY_ONE) { /* - (-1) == Sub */ - DB((mod, LEVEL_2, "Sub(-1) to Inc ... ")); - new_op = new_rd_ia32_Inc(dbg, irg, block, noreg, noreg, expr_op, nomem); - normal_sub = 0; - } - } - - if (normal_sub) { - new_op = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem); - set_ia32_Immop_attr(new_op, const_op); - } - - return new_op; -} - -/** - * Creates an ia32 Sub. - * - * @param env The transformation environment - * @return The created ia32 Sub node - */ -static ir_node *gen_Sub(ia32_transform_env_t *env) { - ir_node *new_op = NULL; - dbg_info *dbg = env->dbg; - ir_mode *mode = env->mode; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); - ir_node *op1 = get_Sub_left(env->irn); - ir_node *op2 = get_Sub_right(env->irn); - ir_node *expr_op, *imm_op; - - /* Check if immediate optimization is on and */ - /* if it's an operation with immediate. */ - imm_op = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(NULL, op2) : NULL; - expr_op = get_expr_op(op1, op2); - - assert((expr_op || imm_op) && "invalid operands"); - - if (mode_is_float(mode)) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) - return gen_binop(env, op1, op2, new_rd_ia32_xSub); - else - return gen_binop(env, op1, op2, new_rd_ia32_vfsub); - } - else { - /* integer SUB */ - if (! expr_op) { - /* No expr_op means, that we have two const - one symconst and */ - /* one tarval or another symconst - because this case is not */ - /* covered by constant folding */ - /* We need to check for: */ - /* 1) symconst - const -> becomes a LEA */ - /* 2) symconst - symconst -> becomes a const - LEA as the elf */ - /* linker doesn't support two symconsts */ - - if (get_ia32_op_type(op1) == ia32_SymConst && get_ia32_op_type(op2) == ia32_SymConst) { - /* this is the 2nd case */ - new_op = new_rd_ia32_Lea(dbg, irg, block, op1, noreg, mode); - set_ia32_am_sc(new_op, get_ia32_id_cnst(op2)); - set_ia32_am_sc_sign(new_op); - set_ia32_am_flavour(new_op, ia32_am_OB); - - DBG_OPT_LEA3(op1, op2, env->irn, new_op); - } - else { - /* this is the 1st case */ - new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode); - - DBG_OPT_LEA3(op1, op2, env->irn, new_op); - - if (get_ia32_op_type(op1) == ia32_SymConst) { - set_ia32_am_sc(new_op, get_ia32_id_cnst(op1)); - sub_ia32_am_offs(new_op, get_ia32_cnst(op2)); - } - else { - add_ia32_am_offs(new_op, get_ia32_cnst(op1)); - set_ia32_am_sc(new_op, get_ia32_id_cnst(op2)); - set_ia32_am_sc_sign(new_op); - } - set_ia32_am_flavour(new_op, ia32_am_O); - } - - /* set AM support */ - set_ia32_am_support(new_op, ia32_am_Source); - set_ia32_op_type(new_op, ia32_AddrModeS); - - /* Lea doesn't need a Proj */ - return new_op; - } - else if (imm_op) { - /* This is expr - const */ - new_op = gen_imm_Sub(env, expr_op, imm_op); - - /* set AM support */ - set_ia32_am_support(new_op, ia32_am_Dest); - } - else { - /* This is a normal sub */ - new_op = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, op1, op2, nomem); - - /* set AM support */ - set_ia32_am_support(new_op, ia32_am_Full); - } - } - - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); - - set_ia32_res_mode(new_op, mode); - - return new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_Sub_res); -} - - - -/** - * Generates an ia32 DivMod with additional infrastructure for the - * register allocator if needed. - * - * @param env The transformation environment - * @param dividend -no comment- :) - * @param divisor -no comment- :) - * @param dm_flav flavour_Div/Mod/DivMod - * @return The created ia32 DivMod node - */ -static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *dividend, ir_node *divisor, ia32_op_flavour_t dm_flav) { - ir_node *res, *proj; - ir_node *edx_node, *cltd; - ir_node *in_keep[1]; - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_mode *mode = env->mode; - ir_node *irn = env->irn; - ir_node *mem; - int n; - +static ir_node *generate_DivMod(ir_node *node, ir_node *dividend, + ir_node *divisor, ia32_op_flavour_t dm_flav) +{ + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *new_dividend = be_transform_node(dividend); + ir_node *new_divisor = be_transform_node(divisor); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *res, *proj_div, *proj_mod; + ir_node *sign_extension; + ir_node *mem, *new_mem; + int has_exc; + + proj_div = proj_mod = NULL; + has_exc = 0; switch (dm_flav) { case flavour_Div: - mem = get_Div_mem(irn); - mode = get_irn_mode(get_proj_for_pn(irn, pn_Div_res)); + mem = get_Div_mem(node); + mode = get_Div_resmode(node); + proj_div = be_get_Proj_for_pn(node, pn_Div_res); + has_exc = be_get_Proj_for_pn(node, pn_Div_X_except) != NULL; break; case flavour_Mod: - mem = get_Mod_mem(irn); - mode = get_irn_mode(get_proj_for_pn(irn, pn_Mod_res)); + mem = get_Mod_mem(node); + mode = get_Mod_resmode(node); + proj_mod = be_get_Proj_for_pn(node, pn_Mod_res); + has_exc = be_get_Proj_for_pn(node, pn_Mod_X_except) != NULL; break; case flavour_DivMod: - mem = get_DivMod_mem(irn); - mode = get_irn_mode(get_proj_for_pn(irn, pn_DivMod_res_div)); + mem = get_DivMod_mem(node); + mode = get_DivMod_resmode(node); + proj_div = be_get_Proj_for_pn(node, pn_DivMod_res_div); + proj_mod = be_get_Proj_for_pn(node, pn_DivMod_res_mod); + has_exc = be_get_Proj_for_pn(node, pn_DivMod_X_except) != NULL; break; default: - assert(0); + panic("invalid divmod flavour!"); } + new_mem = be_transform_node(mem); if (mode_is_signed(mode)) { /* in signed mode, we need to sign extend the dividend */ - cltd = new_rd_ia32_Cdq(dbg, irg, block, dividend); - dividend = new_rd_Proj(dbg, irg, block, cltd, mode_Is, pn_ia32_Cdq_EAX); - edx_node = new_rd_Proj(dbg, irg, block, cltd, mode_Is, pn_ia32_Cdq_EDX); + ir_node *produceval = new_rd_ia32_ProduceVal(dbgi, irg, block); + add_irn_dep(produceval, get_irg_frame(irg)); + sign_extension = new_rd_ia32_Cltd(dbgi, irg, block, new_dividend, + produceval); + } else { + sign_extension = new_rd_ia32_Const(dbgi, irg, block, NULL, 0, 0); + set_ia32_flags(sign_extension, get_ia32_flags(sign_extension) | arch_irn_flags_modify_flags); + add_irn_dep(sign_extension, get_irg_frame(irg)); } - else { - edx_node = new_rd_ia32_Const(dbg, irg, block, get_irg_no_mem(irg), mode_Iu); - set_ia32_Const_type(edx_node, ia32_Const); - set_ia32_Immop_tarval(edx_node, get_tarval_null(mode_Iu)); - } - - res = new_rd_ia32_DivMod(dbg, irg, block, dividend, divisor, edx_node, mem, dm_flav); - - set_ia32_n_res(res, 2); - - /* Only one proj is used -> We must add a second proj and */ - /* connect this one to a Keep node to eat up the second */ - /* destroyed register. */ - n = get_irn_n_edges(irn); - proj = NULL; - if (n == 2) - proj = ia32_get_proj_for_mode(irn, mode_M); - - /* in case of two projs, one must be the memory proj */ - if (n == 1 || (n == 2 && proj)) { - proj = ia32_get_res_proj(irn); - assert(proj && "Result proj expected"); - - if (get_irn_op(irn) == op_Div) { - set_Proj_proj(proj, pn_DivMod_res_div); - in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode, pn_DivMod_res_mod); - } - else { - set_Proj_proj(proj, pn_DivMod_res_mod); - in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode, pn_DivMod_res_div); - } - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep); + if (mode_is_signed(mode)) { + res = new_rd_ia32_IDiv(dbgi, irg, block, noreg, noreg, new_mem, + new_dividend, sign_extension, new_divisor, dm_flav); + } else { + res = new_rd_ia32_Div(dbgi, irg, block, noreg, noreg, new_mem, new_dividend, + sign_extension, new_divisor, dm_flav); } - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); + set_ia32_exc_label(res, has_exc); + set_irn_pinned(res, get_irn_pinned(node)); - set_ia32_res_mode(res, mode); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); return res; } @@ -1029,26 +1184,27 @@ static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *dividend, ir /** * Wrapper for generate_DivMod. Sets flavour_Mod. * - * @param env The transformation environment */ -static ir_node *gen_Mod(ia32_transform_env_t *env) { - return generate_DivMod(env, get_Mod_left(env->irn), get_Mod_right(env->irn), flavour_Mod); +static ir_node *gen_Mod(ir_node *node) { + return generate_DivMod(node, get_Mod_left(node), + get_Mod_right(node), flavour_Mod); } /** * Wrapper for generate_DivMod. Sets flavour_Div. * - * @param env The transformation environment */ -static ir_node *gen_Div(ia32_transform_env_t *env) { - return generate_DivMod(env, get_Div_left(env->irn), get_Div_right(env->irn), flavour_Div); +static ir_node *gen_Div(ir_node *node) { + return generate_DivMod(node, get_Div_left(node), + get_Div_right(node), flavour_Div); } /** * Wrapper for generate_DivMod. Sets flavour_DivMod. */ -static ir_node *gen_DivMod(ia32_transform_env_t *env) { - return generate_DivMod(env, get_DivMod_left(env->irn), get_DivMod_right(env->irn), flavour_DivMod); +static ir_node *gen_DivMod(ir_node *node) { + return generate_DivMod(node, get_DivMod_left(node), + get_DivMod_right(node), flavour_DivMod); } @@ -1056,48 +1212,63 @@ static ir_node *gen_DivMod(ia32_transform_env_t *env) { /** * Creates an ia32 floating Div. * - * @param env The transformation environment * @return The created ia32 xDiv node */ -static ir_node *gen_Quot(ia32_transform_env_t *env) { - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *new_op; - ir_node *nomem = new_rd_NoMem(env->irg); - ir_node *op1 = get_Quot_left(env->irn); - ir_node *op2 = get_Quot_right(env->irn); - - FP_USED(env->cg); - if (USE_SSE2(env->cg)) { - if (is_ia32_xConst(op2)) { - new_op = new_rd_ia32_xDiv(env->dbg, env->irg, env->block, noreg, noreg, op1, noreg, nomem); - set_ia32_am_support(new_op, ia32_am_None); - set_ia32_Immop_attr(new_op, op2); - } - else { - new_op = new_rd_ia32_xDiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem); - set_ia32_am_support(new_op, ia32_am_Source); - } - } - else { - new_op = new_rd_ia32_vfdiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem); - set_ia32_am_support(new_op, ia32_am_Source); +static ir_node *gen_Quot(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *op1 = get_Quot_left(node); + ir_node *new_op1 = be_transform_node(op1); + ir_node *op2 = get_Quot_right(node); + ir_node *new_op2 = be_transform_node(op2); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_rd_NoMem(current_ir_graph); + ir_node *new_op; + + if (USE_SSE2(env_cg)) { + ir_mode *mode = get_irn_mode(op1); + new_op = new_rd_ia32_xDiv(dbgi, irg, block, noreg, noreg, nomem, new_op1, + new_op2); + set_ia32_ls_mode(new_op, mode); + } else { + new_op = new_rd_ia32_vfdiv(dbgi, irg, block, noreg, noreg, nomem, new_op1, + new_op2, get_fpcw()); } - set_ia32_res_mode(new_op, get_irn_mode(get_proj_for_pn(env->irn, pn_Quot_res))); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); - + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); return new_op; } - /** * Creates an ia32 Shl. * - * @param env The transformation environment * @return The created ia32 Shl node */ -static ir_node *gen_Shl(ia32_transform_env_t *env) { - return gen_shift_binop(env, get_Shl_left(env->irn), get_Shl_right(env->irn), new_rd_ia32_Shl); +static ir_node *gen_Shl(ir_node *node) { + ir_node *right = get_Shl_right(node); + + /* test whether we can build a lea */ + if(is_Const(right)) { + tarval *tv = get_Const_tarval(right); + if(tarval_is_long(tv)) { + long val = get_tarval_long(tv); + if(val >= 0 && val <= 3) { + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *base = ia32_new_NoReg_gp(env_cg); + ir_node *index = be_transform_node(get_Shl_left(node)); + ir_node *res = new_rd_ia32_Lea(dbgi, irg, block, base, index); + set_ia32_am_scale(res, val); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + return res; + } + } + } + + return gen_shift_binop(node, get_Shl_left(node), get_Shl_right(node), + new_rd_ia32_Shl); } @@ -1105,23 +1276,72 @@ static ir_node *gen_Shl(ia32_transform_env_t *env) { /** * Creates an ia32 Shr. * - * @param env The transformation environment * @return The created ia32 Shr node */ -static ir_node *gen_Shr(ia32_transform_env_t *env) { - return gen_shift_binop(env, get_Shr_left(env->irn), get_Shr_right(env->irn), new_rd_ia32_Shr); +static ir_node *gen_Shr(ir_node *node) { + return gen_shift_binop(node, get_Shr_left(node), + get_Shr_right(node), new_rd_ia32_Shr); } /** - * Creates an ia32 Shrs. + * Creates an ia32 Sar. * - * @param env The transformation environment * @return The created ia32 Shrs node */ -static ir_node *gen_Shrs(ia32_transform_env_t *env) { - return gen_shift_binop(env, get_Shrs_left(env->irn), get_Shrs_right(env->irn), new_rd_ia32_Shrs); +static ir_node *gen_Shrs(ir_node *node) { + ir_node *left = get_Shrs_left(node); + ir_node *right = get_Shrs_right(node); + ir_mode *mode = get_irn_mode(node); + if(is_Const(right) && mode == mode_Is) { + tarval *tv = get_Const_tarval(right); + long val = get_tarval_long(tv); + if(val == 31) { + /* this is a sign extension */ + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *op = left; + ir_node *new_op = be_transform_node(op); + ir_node *pval = new_rd_ia32_ProduceVal(dbgi, irg, block); + add_irn_dep(pval, get_irg_frame(irg)); + + return new_rd_ia32_Cltd(dbgi, irg, block, new_op, pval); + } + } + + /* 8 or 16 bit sign extension? */ + if(is_Const(right) && is_Shl(left) && mode == mode_Is) { + ir_node *shl_left = get_Shl_left(left); + ir_node *shl_right = get_Shl_right(left); + if(is_Const(shl_right)) { + tarval *tv1 = get_Const_tarval(right); + tarval *tv2 = get_Const_tarval(shl_right); + if(tv1 == tv2 && tarval_is_long(tv1)) { + long val = get_tarval_long(tv1); + if(val == 16 || val == 24) { + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_mode *src_mode; + ir_node *res; + + if(val == 24) { + src_mode = mode_Bs; + } else { + assert(val == 16); + src_mode = mode_Hs; + } + res = create_I2I_Conv(src_mode, mode_Is, dbgi, block, + shl_left, node); + + return res; + } + } + } + } + + return gen_shift_binop(node, left, right, new_rd_ia32_Sar); } @@ -1129,13 +1349,13 @@ static ir_node *gen_Shrs(ia32_transform_env_t *env) { /** * Creates an ia32 RotL. * - * @param env The transformation environment * @param op1 The first operator * @param op2 The second operator * @return The created ia32 RotL node */ -static ir_node *gen_RotL(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) { - return gen_shift_binop(env, op1, op2, new_rd_ia32_RotL); +static ir_node *gen_RotL(ir_node *node, + ir_node *op1, ir_node *op2) { + return gen_shift_binop(node, op1, op2, new_rd_ia32_Rol); } @@ -1145,13 +1365,13 @@ static ir_node *gen_RotL(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) * NOTE: There is no RotR with immediate because this would always be a RotL * "imm-mode_size_bits" which can be pre-calculated. * - * @param env The transformation environment * @param op1 The first operator * @param op2 The second operator * @return The created ia32 RotR node */ -static ir_node *gen_RotR(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) { - return gen_shift_binop(env, op1, op2, new_rd_ia32_RotR); +static ir_node *gen_RotR(ir_node *node, ir_node *op1, + ir_node *op2) { + return gen_shift_binop(node, op1, op2, new_rd_ia32_Ror); } @@ -1159,43 +1379,38 @@ static ir_node *gen_RotR(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) /** * Creates an ia32 RotR or RotL (depending on the found pattern). * - * @param env The transformation environment * @return The created ia32 RotL or RotR node */ -static ir_node *gen_Rot(ia32_transform_env_t *env) { +static ir_node *gen_Rot(ir_node *node) { ir_node *rotate = NULL; - ir_node *op1 = get_Rot_left(env->irn); - ir_node *op2 = get_Rot_right(env->irn); + ir_node *op1 = get_Rot_left(node); + ir_node *op2 = get_Rot_right(node); /* Firm has only Rot (which is a RotL), so we are looking for a right (op2) operand "-e+mode_size_bits" (it's an already modified "mode_size_bits-e", that means we can create a RotR instead of an Add and a RotL */ - if (is_Proj(op2)) { - ir_node *pred = get_Proj_pred(op2); - - if (is_ia32_Add(pred)) { - ir_node *pred_pred = get_irn_n(pred, 2); - tarval *tv = get_ia32_Immop_tarval(pred); - long bits = get_mode_size_bits(env->mode); - - if (is_Proj(pred_pred)) { - pred_pred = get_Proj_pred(pred_pred); - } - - if (is_ia32_Minus(pred_pred) && - tarval_is_long(tv) && - get_tarval_long(tv) == bits) + if (get_irn_op(op2) == op_Add) { + ir_node *add = op2; + ir_node *left = get_Add_left(add); + ir_node *right = get_Add_right(add); + if (is_Const(right)) { + tarval *tv = get_Const_tarval(right); + ir_mode *mode = get_irn_mode(node); + long bits = get_mode_size_bits(mode); + + if (get_irn_op(left) == op_Minus && + tarval_is_long(tv) && + get_tarval_long(tv) == bits) { - DB((env->mod, LEVEL_1, "RotL into RotR ... ")); - rotate = gen_RotR(env, op1, get_irn_n(pred_pred, 2)); + DB((dbg, LEVEL_1, "RotL into RotR ... ")); + rotate = gen_RotR(node, op1, get_Minus_op(left)); } - } } - if (!rotate) { - rotate = gen_RotL(env, op1, op2); + if (rotate == NULL) { + rotate = gen_RotL(node, op1, op2); } return rotate; @@ -1206,69 +1421,86 @@ static ir_node *gen_Rot(ia32_transform_env_t *env) { /** * Transforms a Minus node. * - * @param env The transformation environment - * @param op The Minus operand * @return The created ia32 Minus node */ -ir_node *gen_Minus_ex(ia32_transform_env_t *env, ir_node *op) { - ident *name; - ir_node *new_op; - int size; - - if (mode_is_float(env->mode)) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) { - ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg); - ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg); - ir_node *nomem = new_rd_NoMem(env->irg); - - new_op = new_rd_ia32_xEor(env->dbg, env->irg, env->block, noreg_gp, noreg_gp, op, noreg_fp, nomem); - - size = get_mode_size_bits(env->mode); - name = gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN); +static ir_node *gen_Minus(ir_node *node) +{ + ir_node *op = get_Minus_op(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_entity *ent; + ir_node *res; + int size; - set_ia32_am_sc(new_op, name); + if (mode_is_float(mode)) { + ir_node *new_op = be_transform_node(op); + if (USE_SSE2(env_cg)) { + ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); + ir_node *noreg_fp = ia32_new_NoReg_fp(env_cg); + ir_node *nomem = new_rd_NoMem(irg); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + res = new_rd_ia32_xXor(dbgi, irg, block, noreg_gp, noreg_gp, nomem, + new_op, noreg_fp); - set_ia32_res_mode(new_op, env->mode); - set_ia32_op_type(new_op, ia32_AddrModeS); - set_ia32_ls_mode(new_op, env->mode); + size = get_mode_size_bits(mode); + ent = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN); - new_op = new_rd_Proj(env->dbg, env->irg, env->block, new_op, env->mode, pn_ia32_xEor_res); - } - else { - new_op = new_rd_ia32_vfchs(env->dbg, env->irg, env->block, op, env->mode); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + set_ia32_am_sc(res, ent); + set_ia32_op_type(res, ia32_AddrModeS); + set_ia32_ls_mode(res, mode); + } else { + res = new_rd_ia32_vfchs(dbgi, irg, block, new_op); } - } - else { - new_op = gen_unop(env, op, new_rd_ia32_Minus); + } else { + res = gen_unop(node, op, new_rd_ia32_Neg); } - return new_op; + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + + return res; } -/** - * Transforms a Minus node. - * - * @param env The transformation environment - * @return The created ia32 Minus node - */ -static ir_node *gen_Minus(ia32_transform_env_t *env) { - return gen_Minus_ex(env, get_Minus_op(env->irn)); +static ir_node *create_Immediate_from_int(int val) +{ + ir_graph *irg = current_ir_graph; + ir_node *start_block = get_irg_start_block(irg); + ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block, NULL, 0, val); + arch_set_irn_register(env_cg->arch_env, immediate, &ia32_gp_regs[REG_GP_NOREG]); + + return immediate; } +static ir_node *gen_bin_Not(ir_node *node) +{ + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *op = get_Not_op(node); + ir_node *new_op = be_transform_node(op); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *one = create_Immediate_from_int(1); + + return new_rd_ia32_Xor(dbgi, irg, block, noreg, noreg, nomem, new_op, one); +} /** * Transforms a Not node. * - * @param env The transformation environment * @return The created ia32 Not node */ -static ir_node *gen_Not(ia32_transform_env_t *env) { - assert (! mode_is_float(env->mode)); - return gen_unop(env, get_Not_op(env->irn), new_rd_ia32_Not); +static ir_node *gen_Not(ir_node *node) { + ir_node *op = get_Not_op(node); + ir_mode *mode = get_irn_mode(node); + + if(mode == mode_b) { + return gen_bin_Not(node); + } + + assert (! mode_is_float(get_irn_mode(node))); + return gen_unop(node, op, new_rd_ia32_Not); } @@ -1276,357 +1508,540 @@ static ir_node *gen_Not(ia32_transform_env_t *env) { /** * Transforms an Abs node. * - * @param env The transformation environment * @return The created ia32 Abs node */ -static ir_node *gen_Abs(ia32_transform_env_t *env) { - ir_node *res, *p_eax, *p_edx; - dbg_info *dbg = env->dbg; - ir_mode *mode = env->mode; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg); - ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg); - ir_node *nomem = new_NoMem(); - ir_node *op = get_Abs_op(env->irn); +static ir_node *gen_Abs(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *op = get_Abs_op(node); + ir_node *new_op = be_transform_node(op); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); + ir_node *noreg_fp = ia32_new_NoReg_fp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *res; int size; - ident *name; + ir_entity *ent; if (mode_is_float(mode)) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) { - res = new_rd_ia32_xAnd(dbg,irg, block, noreg_gp, noreg_gp, op, noreg_fp, nomem); + if (USE_SSE2(env_cg)) { + res = new_rd_ia32_xAnd(dbgi,irg, block, noreg_gp, noreg_gp, nomem, new_op, noreg_fp); - size = get_mode_size_bits(mode); - name = gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS); + size = get_mode_size_bits(mode); + ent = ia32_gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS); - set_ia32_am_sc(res, name); + set_ia32_am_sc(res, ent); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); - set_ia32_res_mode(res, mode); set_ia32_op_type(res, ia32_AddrModeS); - set_ia32_ls_mode(res, env->mode); - - res = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_xAnd_res); + set_ia32_ls_mode(res, mode); } else { - res = new_rd_ia32_vfabs(dbg, irg, block, op, mode); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); + res = new_rd_ia32_vfabs(dbgi, irg, block, new_op); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); } - } - else { - res = new_rd_ia32_Cdq(dbg, irg, block, op); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); - set_ia32_res_mode(res, mode); - - p_eax = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_Cdq_EAX); - p_edx = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_Cdq_EDX); - - res = new_rd_ia32_Eor(dbg, irg, block, noreg_gp, noreg_gp, p_eax, p_edx, nomem); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); - set_ia32_res_mode(res, mode); - - res = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_Eor_res); - - res = new_rd_ia32_Sub(dbg, irg, block, noreg_gp, noreg_gp, res, p_edx, nomem); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); - set_ia32_res_mode(res, mode); - - res = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_Sub_res); + } else { + ir_node *xor; + ir_node *pval = new_rd_ia32_ProduceVal(dbgi, irg, block); + ir_node *sign_extension = new_rd_ia32_Cltd(dbgi, irg, block, new_op, + pval); + + add_irn_dep(pval, get_irg_frame(irg)); + SET_IA32_ORIG_NODE(sign_extension, + ia32_get_old_node_name(env_cg, node)); + + xor = new_rd_ia32_Xor(dbgi, irg, block, noreg_gp, noreg_gp, nomem, new_op, + sign_extension); + SET_IA32_ORIG_NODE(xor, ia32_get_old_node_name(env_cg, node)); + + res = new_rd_ia32_Sub(dbgi, irg, block, noreg_gp, noreg_gp, nomem, xor, + sign_extension); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); } return res; } - - /** * Transforms a Load. * - * @param env The transformation environment * @return the created ia32 Load node */ -static ir_node *gen_Load(ia32_transform_env_t *env) { - ir_node *node = env->irn; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *ptr = get_Load_ptr(node); - ir_node *lptr = ptr; - ir_mode *mode = get_Load_mode(node); - int is_imm = 0; - ir_node *new_op; - ia32_am_flavour_t am_flav = ia32_am_B; - - /* address might be a constant (symconst or absolute address) */ - if (is_ia32_Const(ptr)) { - lptr = noreg; - is_imm = 1; +static ir_node *gen_Load(ir_node *node) { + ir_node *old_block = get_nodes_block(node); + ir_node *block = be_transform_node(old_block); + ir_node *ptr = get_Load_ptr(node); + ir_node *mem = get_Load_mem(node); + ir_node *new_mem = be_transform_node(mem); + ir_node *base; + ir_node *index; + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_mode *mode = get_Load_mode(node); + ir_mode *res_mode; + ir_node *new_op; + ia32_address_t addr; + + /* construct load address */ + memset(&addr, 0, sizeof(addr)); + ia32_create_address_mode(&addr, ptr, 0); + base = addr.base; + index = addr.index; + + if(base == NULL) { + base = noreg; + } else { + base = be_transform_node(base); } - if (mode_is_float(mode)) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) - new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node)); - else - new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node)); - } - else { - new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node)); + if(index == NULL) { + index = noreg; + } else { + index = be_transform_node(index); } - /* base is an constant address */ - if (is_imm) { - if (get_ia32_op_type(ptr) == ia32_SymConst) { - set_ia32_am_sc(new_op, get_ia32_id_cnst(ptr)); - am_flav = ia32_am_N; + if (mode_is_float(mode)) { + if (USE_SSE2(env_cg)) { + new_op = new_rd_ia32_xLoad(dbgi, irg, block, base, index, new_mem, + mode); + res_mode = mode_xmm; + } else { + new_op = new_rd_ia32_vfld(dbgi, irg, block, base, index, new_mem, + mode); + res_mode = mode_vfp; } - else { - add_ia32_am_offs(new_op, get_ia32_cnst(ptr)); - am_flav = ia32_am_O; + } else { + if(mode == mode_b) + mode = mode_Iu; + + /* create a conv node with address mode for smaller modes */ + if(get_mode_size_bits(mode) < 32) { + new_op = new_rd_ia32_Conv_I2I(dbgi, irg, block, base, index, + new_mem, noreg, mode); + } else { + new_op = new_rd_ia32_Load(dbgi, irg, block, base, index, new_mem); } + res_mode = mode_Iu; } - set_ia32_am_support(new_op, ia32_am_Source); + set_irn_pinned(new_op, get_irn_pinned(node)); set_ia32_op_type(new_op, ia32_AddrModeS); - set_ia32_am_flavour(new_op, am_flav); set_ia32_ls_mode(new_op, mode); + set_address(new_op, &addr); - /* - check for special case: the loaded value might not be used (optimized, volatile, ...) - we add a Proj + Keep for volatile loads and ignore all other cases - */ - if (! get_proj_for_pn(node, pn_Load_res) && get_Load_volatility(node) == volatility_is_volatile) { - /* add a result proj and a Keep to produce a pseudo use */ - ir_node *proj = new_r_Proj(env->irg, env->block, new_op, mode, pn_ia32_Load_res); - be_new_Keep(arch_get_irn_reg_class(env->cg->arch_env, proj, -1), env->irg, env->block, 1, &proj); + /* make sure we are scheduled behind the initial IncSP/Barrier + * to avoid spills being placed before it + */ + if (block == get_irg_start_block(irg)) { + add_irn_dep(new_op, get_irg_frame(irg)); } - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + set_ia32_exc_label(new_op, be_get_Proj_for_pn(node, pn_Load_X_except) != NULL); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); return new_op; } +static int use_dest_am(ir_node *block, ir_node *node, ir_node *mem, + ir_node *ptr, ir_mode *mode, ir_node *other) +{ + ir_node *load; + if(!is_Proj(node)) + return 0; -/** - * Transforms a Store. - * - * @param env The transformation environment - * @return the created ia32 Store node - */ -static ir_node *gen_Store(ia32_transform_env_t *env) { - ir_node *node = env->irn; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *val = get_Store_value(node); - ir_node *ptr = get_Store_ptr(node); - ir_node *sptr = ptr; - ir_node *mem = get_Store_mem(node); - ir_mode *mode = get_irn_mode(val); - ir_node *sval = val; - int is_imm = 0; - ir_node *new_op; - ia32_am_flavour_t am_flav = ia32_am_B; - ia32_immop_type_t immop = ia32_ImmNone; + /* we only use address mode if we're the only user of the load */ + if(get_irn_n_edges(node) > 1) + return 0; - if (! mode_is_float(mode)) { - /* in case of storing a const (but not a symconst) -> make it an attribute */ - if (is_ia32_Cnst(val)) { - switch (get_ia32_op_type(val)) { - case ia32_Const: - immop = ia32_ImmConst; - break; - case ia32_SymConst: - immop = ia32_ImmSymConst; - break; - default: - assert(0 && "unsupported Const type"); - } - sval = noreg; - } - } + load = get_Proj_pred(node); + if(!is_Load(load)) + return 0; + if(get_nodes_block(load) != block) + return 0; + + /* Store should be attached to the load */ + if(!is_Proj(mem) || get_Proj_pred(mem) != load) + return 0; + /* store should have the same pointer as the load */ + if(get_Load_ptr(load) != ptr) + return 0; - /* address might be a constant (symconst or absolute address) */ - if (is_ia32_Const(ptr)) { - sptr = noreg; - is_imm = 1; + /* don't do AM if other node inputs depend on the load (via mem-proj) */ + if(other != NULL && get_nodes_block(other) == block + && heights_reachable_in_block(heights, other, load)) + return 0; + + assert(get_Load_mode(load) == mode); + + return 1; +} + +static ir_node *dest_am_binop(ir_node *node, ir_node *op1, ir_node *op2, + ir_node *mem, ir_node *ptr, ir_mode *mode, + construct_binop_dest_func *func, + construct_binop_dest_func *func8bit, + int commutative) +{ + ir_node *src_block = get_nodes_block(node); + ir_node *block; + ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi; + ir_node *new_node; + ir_node *new_op; + ia32_address_mode_t am; + ia32_address_t *addr = &am.addr; + memset(&am, 0, sizeof(am)); + + if(use_dest_am(src_block, op1, mem, ptr, mode, op2)) { + build_address(&am, op1); + new_op = create_immediate_or_transform(op2, 0); + } else if(commutative && use_dest_am(src_block, op2, mem, ptr, mode, op1)) { + build_address(&am, op2); + new_op = create_immediate_or_transform(op1, 0); + } else { + return NULL; } - if (mode_is_float(mode)) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) - new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, sptr, noreg, sval, mem); - else - new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, sptr, noreg, sval, mem); + if(addr->base == NULL) + addr->base = noreg_gp; + if(addr->index == NULL) + addr->index = noreg_gp; + if(addr->mem == NULL) + addr->mem = new_NoMem(); + + dbgi = get_irn_dbg_info(node); + block = be_transform_node(src_block); + if(get_mode_size_bits(mode) == 8) { + new_node = func8bit(dbgi, irg, block, addr->base, addr->index, + addr->mem, new_op); + } else { + new_node = func(dbgi, irg, block, addr->base, addr->index, addr->mem, + new_op); } - else if (get_mode_size_bits(mode) == 8) { - new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, sptr, noreg, sval, mem); + set_address(new_node, addr); + set_ia32_op_type(new_node, ia32_AddrModeD); + set_ia32_ls_mode(new_node, mode); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); + + return new_node; +} + +static ir_node *dest_am_unop(ir_node *node, ir_node *op, ir_node *mem, + ir_node *ptr, ir_mode *mode, + construct_unop_dest_func *func) +{ + ir_node *src_block = get_nodes_block(node); + ir_node *block; + ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi; + ir_node *new_node; + ia32_address_mode_t am; + ia32_address_t *addr = &am.addr; + memset(&am, 0, sizeof(am)); + + if(!use_dest_am(src_block, op, mem, ptr, mode, NULL)) + return NULL; + + build_address(&am, op); + + if(addr->base == NULL) + addr->base = noreg_gp; + if(addr->index == NULL) + addr->index = noreg_gp; + if(addr->mem == NULL) + addr->mem = new_NoMem(); + + dbgi = get_irn_dbg_info(node); + block = be_transform_node(src_block); + new_node = func(dbgi, irg, block, addr->base, addr->index, addr->mem); + set_address(new_node, addr); + set_ia32_op_type(new_node, ia32_AddrModeD); + set_ia32_ls_mode(new_node, mode); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); + + return new_node; +} + +static ir_node *try_create_dest_am(ir_node *node) { + ir_node *val = get_Store_value(node); + ir_node *mem = get_Store_mem(node); + ir_node *ptr = get_Store_ptr(node); + ir_mode *mode = get_irn_mode(val); + ir_node *op1; + ir_node *op2; + ir_node *new_node; + + /* handle only GP modes for now... */ + if(!mode_needs_gp_reg(mode)) + return NULL; + + /* store must be the only user of the val node */ + if(get_irn_n_edges(val) > 1) + return NULL; + + switch(get_irn_opcode(val)) { + case iro_Add: + op1 = get_Add_left(val); + op2 = get_Add_right(val); + if(is_Const_1(op2)) { + new_node = dest_am_unop(val, op1, mem, ptr, mode, + new_rd_ia32_IncMem); + break; + } else if(is_Const_Minus_1(op2)) { + new_node = dest_am_unop(val, op1, mem, ptr, mode, + new_rd_ia32_DecMem); + break; + } + new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, + new_rd_ia32_AddMem, new_rd_ia32_AddMem8Bit, 1); + break; + case iro_Sub: + op1 = get_Sub_left(val); + op2 = get_Sub_right(val); + if(is_Const(op2)) { + ir_fprintf(stderr, "Optimisation warning: not-normalize sub ,C" + "found\n"); + } + new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, + new_rd_ia32_SubMem, new_rd_ia32_SubMem8Bit, 0); + break; + case iro_And: + op1 = get_And_left(val); + op2 = get_And_right(val); + new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, + new_rd_ia32_AndMem, new_rd_ia32_AndMem8Bit, 1); + break; + case iro_Or: + op1 = get_Or_left(val); + op2 = get_Or_right(val); + new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, + new_rd_ia32_OrMem, new_rd_ia32_OrMem8Bit, 1); + break; + case iro_Eor: + op1 = get_Eor_left(val); + op2 = get_Eor_right(val); + new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, + new_rd_ia32_XorMem, new_rd_ia32_XorMem8Bit, 1); + break; + case iro_Shl: + op1 = get_Shl_left(val); + op2 = get_Shl_right(val); + new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, + new_rd_ia32_ShlMem, new_rd_ia32_ShlMem, 0); + break; + case iro_Shr: + op1 = get_Shr_left(val); + op2 = get_Shr_right(val); + new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, + new_rd_ia32_ShrMem, new_rd_ia32_ShrMem, 0); + break; + case iro_Shrs: + op1 = get_Shrs_left(val); + op2 = get_Shrs_right(val); + new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, + new_rd_ia32_SarMem, new_rd_ia32_SarMem, 0); + break; + case iro_Rot: + op1 = get_Rot_left(val); + op2 = get_Rot_right(val); + new_node = dest_am_binop(val, op1, op2, mem, ptr, mode, + new_rd_ia32_RolMem, new_rd_ia32_RolMem, 0); + break; + /* TODO: match ROR patterns... */ + case iro_Minus: + op1 = get_Minus_op(val); + new_node = dest_am_unop(val, op1, mem, ptr, mode, new_rd_ia32_NegMem); + break; + case iro_Not: + /* should be lowered already */ + assert(mode != mode_b); + op1 = get_Not_op(val); + new_node = dest_am_unop(val, op1, mem, ptr, mode, new_rd_ia32_NotMem); + break; + default: + return NULL; } - else { - new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, sptr, noreg, sval, mem); + + return new_node; +} + +/** + * Transforms a Store. + * + * @return the created ia32 Store node + */ +static ir_node *gen_Store(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *ptr = get_Store_ptr(node); + ir_node *base; + ir_node *index; + ir_node *val = get_Store_value(node); + ir_node *new_val; + ir_node *mem = get_Store_mem(node); + ir_node *new_mem = be_transform_node(mem); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_mode *mode = get_irn_mode(val); + ir_node *new_op; + ia32_address_t addr; + + /* check for destination address mode */ + new_op = try_create_dest_am(node); + if(new_op != NULL) + return new_op; + + /* construct store address */ + memset(&addr, 0, sizeof(addr)); + ia32_create_address_mode(&addr, ptr, 0); + base = addr.base; + index = addr.index; + + if(base == NULL) { + base = noreg; + } else { + base = be_transform_node(base); } - /* stored const is an attribute (saves a register) */ - if (! mode_is_float(mode) && is_ia32_Cnst(val)) { - set_ia32_Immop_attr(new_op, val); + if(index == NULL) { + index = noreg; + } else { + index = be_transform_node(index); } - /* base is an constant address */ - if (is_imm) { - if (get_ia32_op_type(ptr) == ia32_SymConst) { - set_ia32_am_sc(new_op, get_ia32_id_cnst(ptr)); - am_flav = ia32_am_N; + if (mode_is_float(mode)) { + new_val = be_transform_node(val); + if (USE_SSE2(env_cg)) { + new_op = new_rd_ia32_xStore(dbgi, irg, block, base, index, new_mem, + new_val); + } else { + new_op = new_rd_ia32_vfst(dbgi, irg, block, base, index, new_mem, new_val, + mode); } - else { - add_ia32_am_offs(new_op, get_ia32_cnst(ptr)); - am_flav = ia32_am_O; + } else { + new_val = create_immediate_or_transform(val, 0); + if(mode == mode_b) + mode = mode_Iu; + + if (get_mode_size_bits(mode) == 8) { + new_op = new_rd_ia32_Store8Bit(dbgi, irg, block, base, index, new_mem, + new_val); + } else { + new_op = new_rd_ia32_Store(dbgi, irg, block, base, index, new_mem, + new_val); } } - set_ia32_am_support(new_op, ia32_am_Dest); + set_irn_pinned(new_op, get_irn_pinned(node)); set_ia32_op_type(new_op, ia32_AddrModeD); - set_ia32_am_flavour(new_op, am_flav); set_ia32_ls_mode(new_op, mode); - set_ia32_immop_type(new_op, immop); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + set_ia32_exc_label(new_op, be_get_Proj_for_pn(node, pn_Store_X_except) != NULL); + set_address(new_op, &addr); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); return new_op; } +static ir_node *create_Switch(ir_node *node) +{ + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *sel = get_Cond_selector(node); + ir_node *new_sel = be_transform_node(sel); + ir_node *res; + int switch_min = INT_MAX; + const ir_edge_t *edge; + assert(get_mode_size_bits(get_irn_mode(sel)) == 32); -/** - * Transforms a Cond -> Proj[b] -> Cmp into a CondJmp, CondJmp_i or TestJmp - * - * @param env The transformation environment - * @return The transformed node. - */ -static ir_node *gen_Cond(ia32_transform_env_t *env) { - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *node = env->irn; - ir_node *sel = get_Cond_selector(node); - ir_mode *sel_mode = get_irn_mode(sel); - ir_node *res = NULL; - ir_node *pred = NULL; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *cmp_a, *cmp_b, *cnst, *expr; - - if (is_Proj(sel) && sel_mode == mode_b) { - ir_node *nomem = new_NoMem(); - pn_Cmp pnc = get_Proj_proj(sel); - - pred = get_Proj_pred(sel); + /* determine the smallest switch case value */ + foreach_out_edge(node, edge) { + ir_node *proj = get_edge_src_irn(edge); + int pn = get_Proj_proj(proj); + if(pn < switch_min) + switch_min = pn; + } - /* get both compare operators */ - cmp_a = get_Cmp_left(pred); - cmp_b = get_Cmp_right(pred); + if (switch_min != 0) { + ir_node *noreg = ia32_new_NoReg_gp(env_cg); - /* check if we can use a CondJmp with immediate */ - cnst = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(cmp_a, cmp_b) : NULL; - expr = get_expr_op(cmp_a, cmp_b); + /* if smallest switch case is not 0 we need an additional sub */ + new_sel = new_rd_ia32_Lea(dbgi, irg, block, new_sel, noreg); + add_ia32_am_offs_int(new_sel, -switch_min); + set_ia32_op_type(new_sel, ia32_AddrModeS); - if (cnst && expr) { - /* immop has to be the right operand, we might need to flip pnc */ - if(cnst != cmp_b) { - pnc = get_inversed_pnc(pnc); - } + SET_IA32_ORIG_NODE(new_sel, ia32_get_old_node_name(env_cg, node)); + } - if ((pnc == pn_Cmp_Eq || pnc == pn_Cmp_Lg) && mode_is_int(get_irn_mode(expr))) { - if (get_ia32_op_type(cnst) == ia32_Const && - classify_tarval(get_ia32_Immop_tarval(cnst)) == TV_CLASSIFY_NULL) - { - /* a Cmp A =/!= 0 */ - ir_node *op1 = expr; - ir_node *op2 = expr; - ir_node *and = skip_Proj(expr); - const char *cnst = NULL; - - /* check, if expr is an only once used And operation */ - if (get_irn_n_edges(expr) == 1 && is_ia32_And(and)) { - op1 = get_irn_n(and, 2); - op2 = get_irn_n(and, 3); - - cnst = (is_ia32_ImmConst(and) || is_ia32_ImmSymConst(and)) ? get_ia32_cnst(and) : NULL; - } - res = new_rd_ia32_TestJmp(dbg, irg, block, op1, op2); - set_ia32_pncode(res, pnc); - set_ia32_res_mode(res, get_irn_mode(op1)); + res = new_rd_ia32_SwitchJmp(dbgi, irg, block, new_sel); + set_ia32_pncode(res, get_Cond_defaultProj(node)); - if (cnst) { - copy_ia32_Immop_attr(res, and); - } + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); - return res; - } - } + return res; +} - if (mode_is_float(get_irn_mode(expr))) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) - res = new_rd_ia32_xCondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem); - else { - assert(0); - } - } - else { - res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem); - } - set_ia32_Immop_attr(res, cnst); - set_ia32_res_mode(res, get_irn_mode(expr)); - } - else { - if (mode_is_float(get_irn_mode(cmp_a))) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) - res = new_rd_ia32_xCondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); - else { - ir_node *proj_eax; - res = new_rd_ia32_vfCondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); - proj_eax = new_r_Proj(irg, block, res, mode_Is, pn_ia32_vfCondJmp_temp_reg_eax); - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, &proj_eax); - } - } - else { - res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); - set_ia32_commutative(res); - } - set_ia32_res_mode(res, get_irn_mode(cmp_a)); +static ir_node *get_flags_node(ir_node *node, pn_Cmp *pnc_out) +{ + ir_graph *irg = current_ir_graph; + ir_node *flags; + ir_node *new_op; + ir_node *noreg; + ir_node *nomem; + ir_node *new_block; + dbg_info *dbgi; + + /* we have a Cmp as input */ + if(is_Proj(node)) { + ir_node *pred = get_Proj_pred(node); + if(is_Cmp(pred)) { + flags = be_transform_node(pred); + *pnc_out = get_Proj_proj(node); + return flags; } - - set_ia32_pncode(res, pnc); - //set_ia32_am_support(res, ia32_am_Source); } - else { - /* determine the smallest switch case value */ - int switch_min = INT_MAX; - const ir_edge_t *edge; - char buf[64]; - - foreach_out_edge(node, edge) { - int pn = get_Proj_proj(get_edge_src_irn(edge)); - switch_min = pn < switch_min ? pn : switch_min; - } - if (switch_min) { - /* if smallest switch case is not 0 we need an additional sub */ - snprintf(buf, sizeof(buf), "%d", switch_min); - res = new_rd_ia32_Lea(dbg, irg, block, sel, noreg, mode_Is); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); - sub_ia32_am_offs(res, buf); - set_ia32_am_flavour(res, ia32_am_OB); - set_ia32_am_support(res, ia32_am_Source); - set_ia32_op_type(res, ia32_AddrModeS); - } + /* a mode_b value, we have to compare it against 0 */ + dbgi = get_irn_dbg_info(node); + new_block = be_transform_node(get_nodes_block(node)); + new_op = be_transform_node(node); + noreg = ia32_new_NoReg_gp(env_cg); + nomem = new_NoMem(); + flags = new_rd_ia32_Test(dbgi, irg, new_block, noreg, noreg, nomem, + new_op, new_op, 0, 0); + *pnc_out = pn_Cmp_Lg; + return flags; +} - res = new_rd_ia32_SwitchJmp(dbg, irg, block, switch_min ? res : sel, mode_T); - set_ia32_pncode(res, get_Cond_defaultProj(node)); - set_ia32_res_mode(res, get_irn_mode(sel)); +static ir_node *gen_Cond(ir_node *node) { + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *sel = get_Cond_selector(node); + ir_mode *sel_mode = get_irn_mode(sel); + ir_node *res; + ir_node *flags = NULL; + pn_Cmp pnc; + + if (sel_mode != mode_b) { + return create_Switch(node); } - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); + /* we get flags from a cmp */ + flags = get_flags_node(sel, &pnc); + + res = new_rd_ia32_Jcc(dbgi, irg, new_block, flags, pnc); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + return res; } @@ -1635,23 +2050,21 @@ static ir_node *gen_Cond(ia32_transform_env_t *env) { /** * Transforms a CopyB node. * - * @param env The transformation environment * @return The transformed node. */ -static ir_node *gen_CopyB(ia32_transform_env_t *env) { - ir_node *res = NULL; - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *node = env->irn; +static ir_node *gen_CopyB(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *src = get_CopyB_src(node); + ir_node *new_src = be_transform_node(src); ir_node *dst = get_CopyB_dst(node); + ir_node *new_dst = be_transform_node(dst); ir_node *mem = get_CopyB_mem(node); + ir_node *new_mem = be_transform_node(mem); + ir_node *res = NULL; + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); int size = get_type_size_bytes(get_CopyB_type(node)); - ir_mode *dst_mode = get_irn_mode(dst); - ir_mode *src_mode = get_irn_mode(src); int rem; - ir_node *in[3], *tmp; /* If we have to copy more than 32 bytes, we use REP MOVSx and */ /* then we need the size explicitly in ECX. */ @@ -1659,875 +2072,1354 @@ static ir_node *gen_CopyB(ia32_transform_env_t *env) { rem = size & 0x3; /* size % 4 */ size >>= 2; - res = new_rd_ia32_Const(dbg, irg, block, get_irg_no_mem(irg), mode_Is); - set_ia32_op_type(res, ia32_Const); - set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is)); + res = new_rd_ia32_Const(dbgi, irg, block, NULL, 0, size); + if(size == 0) { + ir_fprintf(stderr, "Optimisation warning copyb %+F with size <4\n", + node); + set_ia32_flags(res, get_ia32_flags(res) | arch_irn_flags_modify_flags); + } + add_irn_dep(res, get_irg_frame(irg)); + + res = new_rd_ia32_CopyB(dbgi, irg, block, new_dst, new_src, res, new_mem); + /* we misuse the pncode field for the copyb size */ + set_ia32_pncode(res, rem); + } else { + res = new_rd_ia32_CopyB_i(dbgi, irg, block, new_dst, new_src, new_mem); + set_ia32_pncode(res, size); + } - res = new_rd_ia32_CopyB(dbg, irg, block, dst, src, res, mem); - set_ia32_Immop_tarval(res, new_tarval_from_long(rem, mode_Is)); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); - /* ok: now attach Proj's because rep movsd will destroy esi, edi and ecx */ - in[0] = new_r_Proj(irg, block, res, dst_mode, pn_ia32_CopyB_DST); - in[1] = new_r_Proj(irg, block, res, src_mode, pn_ia32_CopyB_SRC); - in[2] = new_r_Proj(irg, block, res, mode_Is, pn_ia32_CopyB_CNT); - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 3, in); + return res; +} + +static ir_node *gen_be_Copy(ir_node *node) +{ + ir_node *result = be_duplicate_node(node); + ir_mode *mode = get_irn_mode(result); - tmp = ia32_get_proj_for_mode(node, mode_M); - set_Proj_proj(tmp, pn_ia32_CopyB_M); + if (mode_needs_gp_reg(mode)) { + set_irn_mode(result, mode_Iu); } - else { - res = new_rd_ia32_CopyB_i(dbg, irg, block, dst, src, mem); - set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is)); - set_ia32_immop_type(res, ia32_ImmConst); - /* ok: now attach Proj's because movsd will destroy esi and edi */ - in[0] = new_r_Proj(irg, block, res, dst_mode, pn_ia32_CopyB_i_DST); - in[1] = new_r_Proj(irg, block, res, src_mode, pn_ia32_CopyB_i_SRC); - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 2, in); + return result; +} - tmp = ia32_get_proj_for_mode(node, mode_M); - set_Proj_proj(tmp, pn_ia32_CopyB_i_M); - } +/** + * helper function: checks wether all Cmp projs are Lg or Eq which is needed + * to fold an and into a test node + */ +static int can_fold_test_and(ir_node *node) +{ + const ir_edge_t *edge; - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); + /** we can only have eq and lg projs */ + foreach_out_edge(node, edge) { + ir_node *proj = get_edge_src_irn(edge); + pn_Cmp pnc = get_Proj_proj(proj); + if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) + return 0; + } - return res; + return 1; } +static ir_node *try_create_Test(ir_node *node) +{ + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *cmp_left = get_Cmp_left(node); + ir_node *cmp_right = get_Cmp_right(node); + ir_mode *mode; + ir_node *left; + ir_node *right; + ir_node *res; + ia32_address_mode_t am; + ia32_address_t *addr = &am.addr; + int cmp_unsigned; + + /* can we use a test instruction? */ + if(!is_Const_0(cmp_right)) + return NULL; + if(is_And(cmp_left) && get_irn_n_edges(cmp_left) == 1 && + can_fold_test_and(node)) { + ir_node *and_left = get_And_left(cmp_left); + ir_node *and_right = get_And_right(cmp_left); + + mode = get_irn_mode(and_left); + left = and_left; + right = and_right; + } else { + mode = get_irn_mode(cmp_left); + left = cmp_left; + right = cmp_left; + } -/** - * Transforms a Mux node into CMov. - * - * @param env The transformation environment - * @return The transformed node. - */ -static ir_node *gen_Mux(ia32_transform_env_t *env) { -#if 0 - ir_node *node = env->irn; - ir_node *new_op = new_rd_ia32_CMov(env->dbg, env->irg, env->block, \ - get_Mux_sel(node), get_Mux_false(node), get_Mux_true(node), env->mode); + assert(get_mode_size_bits(mode) <= 32); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + match_arguments(&am, block, left, right, match_commutative | + match_8_16_bit_am | match_am_and_immediates); - return new_op; -#endif - return NULL; + cmp_unsigned = !mode_is_signed(mode); + if(get_mode_size_bits(mode) == 8) { + res = new_rd_ia32_Test8Bit(dbgi, irg, new_block, addr->base, + addr->index, addr->mem, am.new_op1, + am.new_op2, am.flipped, cmp_unsigned); + } else { + res = new_rd_ia32_Test(dbgi, irg, new_block, addr->base, addr->index, + addr->mem, am.new_op1, am.new_op2, am.flipped, + cmp_unsigned); + } + set_am_attributes(res, &am); + assert(mode != NULL); + set_ia32_ls_mode(res, mode); + + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + + res = fix_mem_proj(res, &am); + return res; } -typedef ir_node *cmov_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *cmp_a, ir_node *cmp_b, \ - ir_node *psi_true, ir_node *psi_default, ir_mode *mode); +static ir_node *create_Fucom(ir_node *node) +{ + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *left = get_Cmp_left(node); + ir_node *new_left = be_transform_node(left); + ir_node *right = get_Cmp_right(node); + ir_node *new_right = be_transform_node(right); + ir_node *res; -/** - * Transforms a Psi node into CMov. - * - * @param env The transformation environment - * @return The transformed node. - */ -static ir_node *gen_Psi(ia32_transform_env_t *env) { - ia32_code_gen_t *cg = env->cg; - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_mode *mode = env->mode; - ir_node *block = env->block; - ir_node *node = env->irn; - ir_node *cmp_proj = get_Mux_sel(node); - ir_node *psi_true = get_Psi_val(node, 0); - ir_node *psi_default = get_Psi_default(node); - ir_node *noreg = ia32_new_NoReg_gp(cg); - ir_node *nomem = new_rd_NoMem(irg); - ir_node *cmp, *cmp_a, *cmp_b, *and1, *and2, *new_op = NULL; - int pnc; + res = new_rd_ia32_vFucomFnstsw(dbgi, irg, new_block, new_left, new_right, + 0); + set_ia32_commutative(res); - assert(get_irn_mode(cmp_proj) == mode_b && "Condition for Psi must have mode_b"); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); - cmp = get_Proj_pred(cmp_proj); - cmp_a = get_Cmp_left(cmp); - cmp_b = get_Cmp_right(cmp); - pnc = get_Proj_proj(cmp_proj); + res = new_rd_ia32_Sahf(dbgi, irg, new_block, res); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); - if (mode_is_float(mode)) { - /* floating point psi */ - FP_USED(cg); - - /* 1st case: compare operands are float too */ - if (USE_SSE2(cg)) { - /* psi(cmp(a, b), t, f) can be done as: */ - /* tmp = cmp a, b */ - /* tmp2 = t and tmp */ - /* tmp3 = f and not tmp */ - /* res = tmp2 or tmp3 */ - - /* in case the compare operands are int, we move them into xmm register */ - if (! mode_is_float(get_irn_mode(cmp_a))) { - cmp_a = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_a, node, mode_D); - cmp_b = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_b, node, mode_D); - - pnc |= 8; /* transform integer compare to fp compare */ - } + return res; +} - new_op = new_rd_ia32_xCmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); - set_ia32_pncode(new_op, pnc); - set_ia32_am_support(new_op, ia32_am_Source); - set_ia32_res_mode(new_op, mode); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); - new_op = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_xCmp_res); - - and1 = new_rd_ia32_xAnd(dbg, irg, block, noreg, noreg, psi_true, new_op, nomem); - set_ia32_am_support(and1, ia32_am_None); - set_ia32_res_mode(and1, mode); - set_ia32_commutative(and1); - SET_IA32_ORIG_NODE(and1, ia32_get_old_node_name(cg, node)); - and1 = new_rd_Proj(dbg, irg, block, and1, mode, pn_ia32_xAnd_res); - - and2 = new_rd_ia32_xAndNot(dbg, irg, block, noreg, noreg, new_op, psi_default, nomem); - set_ia32_am_support(and2, ia32_am_None); - set_ia32_res_mode(and2, mode); - set_ia32_commutative(and2); - SET_IA32_ORIG_NODE(and2, ia32_get_old_node_name(cg, node)); - and2 = new_rd_Proj(dbg, irg, block, and2, mode, pn_ia32_xAndNot_res); - - new_op = new_rd_ia32_xOr(dbg, irg, block, noreg, noreg, and1, and2, nomem); - set_ia32_am_support(new_op, ia32_am_None); - set_ia32_res_mode(new_op, mode); - set_ia32_commutative(new_op); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); - new_op = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_xOr_res); - } - else { - /* x87 FPU */ - new_op = new_rd_ia32_vfCMov(dbg, irg, block, cmp_a, cmp_b, psi_true, psi_default, mode); - set_ia32_pncode(new_op, pnc); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); +static ir_node *create_Ucomi(ir_node *node) +{ + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *left = get_Cmp_left(node); + ir_node *new_left = be_transform_node(left); + ir_node *right = get_Cmp_right(node); + ir_node *new_right = be_transform_node(right); + ir_mode *mode = get_irn_mode(left); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *res; + + res = new_rd_ia32_Ucomi(dbgi, irg, new_block, noreg, noreg, nomem, new_left, + new_right, 0); + set_ia32_commutative(res); + set_ia32_ls_mode(res, mode); + + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + + return res; +} + +static ir_node *gen_Cmp(ir_node *node) +{ + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *left = get_Cmp_left(node); + ir_node *right = get_Cmp_right(node); + ir_mode *cmp_mode = get_irn_mode(left); + ir_node *res; + ia32_address_mode_t am; + ia32_address_t *addr = &am.addr; + int cmp_unsigned; + + if(mode_is_float(cmp_mode)) { + if (USE_SSE2(env_cg)) { + return create_Ucomi(node); + } else { + return create_Fucom(node); } } - else { - /* integer psi */ - construct_binop_func *set_func = NULL; - cmov_func_t *cmov_func = NULL; - if (mode_is_float(get_irn_mode(cmp_a))) { - /* 1st case: compare operands are floats */ - FP_USED(cg); + assert(mode_needs_gp_reg(cmp_mode)); - if (USE_SSE2(cg)) { - /* SSE FPU */ - set_func = new_rd_ia32_xCmpSet; - cmov_func = new_rd_ia32_xCmpCMov; - } - else { - /* x87 FPU */ - set_func = new_rd_ia32_vfCmpSet; - cmov_func = new_rd_ia32_vfCmpCMov; - } + /* we prefer the Test instruction where possible except cases where + * we can use SourceAM */ + if(!use_source_address_mode(block, left, right) && + !use_source_address_mode(block, right, left)) { + res = try_create_Test(node); + if(res != NULL) + return res; + } - pnc &= 7; /* fp compare -> int compare */ - } - else { - /* 2nd case: compare operand are integer too */ - set_func = new_rd_ia32_CmpSet; - cmov_func = new_rd_ia32_CmpCMov; - } + match_arguments(&am, block, left, right, + match_commutative | match_8_16_bit_am | + match_am_and_immediates); + + cmp_unsigned = !mode_is_signed(get_irn_mode(left)); + if(get_mode_size_bits(cmp_mode) == 8) { + res = new_rd_ia32_Cmp8Bit(dbgi, irg, new_block, addr->base, addr->index, + addr->mem, am.new_op1, am.new_op2, + am.flipped, cmp_unsigned); + } else { + res = new_rd_ia32_Cmp(dbgi, irg, new_block, addr->base, addr->index, + addr->mem, am.new_op1, am.new_op2, am.flipped, + cmp_unsigned); + } + set_am_attributes(res, &am); + assert(cmp_mode != NULL); + set_ia32_ls_mode(res, cmp_mode); - /* create the nodes */ + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); - /* check for special case first: And/Or -- Cmp with 0 -- Psi */ - if (is_ia32_Const_0(cmp_b) && is_Proj(cmp_a) && (is_ia32_And(get_Proj_pred(cmp_a)) || is_ia32_Or(get_Proj_pred(cmp_a)))) { - if (is_ia32_Const_1(psi_true) && is_ia32_Const_0(psi_default)) { - /* first case for SETcc: default is 0, set to 1 iff condition is true */ - new_op = new_rd_ia32_PsiCondSet(dbg, irg, block, cmp_a, mode); - set_ia32_pncode(new_op, pnc); - } - else if (is_ia32_Const_0(psi_true) && is_ia32_Const_1(psi_default)) { - /* second case for SETcc: default is 1, set to 0 iff condition is true: */ - /* we invert condition and set default to 0 */ - new_op = new_rd_ia32_PsiCondSet(dbg, irg, block, cmp_a, mode); - set_ia32_pncode(new_op, get_inversed_pnc(pnc)); - } - else { - /* otherwise: use CMOVcc */ - new_op = new_rd_ia32_PsiCondCMov(dbg, irg, block, cmp_a, psi_true, psi_default, mode); - set_ia32_pncode(new_op, pnc); - } + res = fix_mem_proj(res, &am); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); - } - else { - env->irn = cmp; - if (is_ia32_Const_1(psi_true) && is_ia32_Const_0(psi_default)) { - /* first case for SETcc: default is 0, set to 1 iff condition is true */ - new_op = gen_binop(env, cmp_a, cmp_b, set_func); - set_ia32_pncode(get_Proj_pred(new_op), pnc); - set_ia32_am_support(get_Proj_pred(new_op), ia32_am_Source); - } - else if (is_ia32_Const_0(psi_true) && is_ia32_Const_1(psi_default)) { - /* second case for SETcc: default is 1, set to 0 iff condition is true: */ - /* we invert condition and set default to 0 */ - new_op = gen_binop(env, cmp_a, cmp_b, set_func); - set_ia32_pncode(get_Proj_pred(new_op), get_inversed_pnc(pnc)); - set_ia32_am_support(get_Proj_pred(new_op), ia32_am_Source); - } - else { - /* otherwise: use CMOVcc */ - new_op = cmov_func(dbg, irg, block, cmp_a, cmp_b, psi_true, psi_default, mode); - set_ia32_pncode(new_op, pnc); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); - } - } - } + return res; +} - return new_op; +static ir_node *create_CMov(ir_node *node, ir_node *new_flags, pn_Cmp pnc) +{ + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *val_true = get_Psi_val(node, 0); + ir_node *new_val_true = be_transform_node(val_true); + ir_node *val_false = get_Psi_default(node); + ir_node *new_val_false = be_transform_node(val_false); + ir_mode *mode = get_irn_mode(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *res; + + assert(mode_needs_gp_reg(mode)); + + res = new_rd_ia32_CMov(dbgi, irg, new_block, noreg, noreg, nomem, + new_val_false, new_val_true, new_flags, pnc); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + + return res; } + +static ir_node *create_set_32bit(dbg_info *dbgi, ir_node *new_block, + ir_node *flags, pn_Cmp pnc, ir_node *orig_node) +{ + ir_graph *irg = current_ir_graph; + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *res; + + res = new_rd_ia32_Set(dbgi, irg, new_block, flags, pnc); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, orig_node)); + res = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, new_block, noreg, noreg, + nomem, res, mode_Bu); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, orig_node)); + + return res; +} + /** - * Following conversion rules apply: - * - * INT -> INT - * ============ - * 1) n bit -> m bit n > m (downscale) - * a) target is signed: movsx - * b) target is unsigned: and with lower bits sets - * 2) n bit -> m bit n == m (sign change) - * always ignored - * 3) n bit -> m bit n < m (upscale) - * a) source is signed: movsx - * b) source is unsigned: and with lower bits sets - * - * INT -> FLOAT - * ============== - * SSE(1/2) convert to float or double (cvtsi2ss/sd) - * - * FLOAT -> INT - * ============== - * SSE(1/2) convert from float or double to 32bit int (cvtss/sd2si) - * if target mode < 32bit: additional INT -> INT conversion (see above) + * Transforms a Psi node into CMov. * - * FLOAT -> FLOAT - * ================ - * SSE(1/2) convert from float or double to double or float (cvtss/sd2sd/ss) - * x87 is mode_E internally, conversions happen only at load and store - * in non-strict semantic + * @return The transformed node. */ +static ir_node *gen_Psi(ir_node *node) +{ + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *psi_true = get_Psi_val(node, 0); + ir_node *psi_default = get_Psi_default(node); + ir_node *cond = get_Psi_cond(node, 0); + ir_node *flags = NULL; + ir_node *res; + ir_mode *cmp_mode; + pn_Cmp pnc; + + assert(get_Psi_n_conds(node) == 1); + assert(get_irn_mode(cond) == mode_b); + assert(mode_needs_gp_reg(get_irn_mode(node))); + + flags = get_flags_node(cond, &pnc); + + if(is_Const_1(psi_true) && is_Const_0(psi_default)) { + res = create_set_32bit(dbgi, new_block, flags, pnc, node); + } else if(is_Const_0(psi_true) && is_Const_1(psi_default)) { + pnc = get_negated_pnc(pnc, cmp_mode); + res = create_set_32bit(dbgi, new_block, flags, pnc, node); + } else { + res = create_CMov(node, flags, pnc); + } + return res; +} + /** * Create a conversion from x87 state register to general purpose. */ -static ir_node *gen_x87_fp_to_gp(ia32_transform_env_t *env, ir_mode *tgt_mode) { - ia32_code_gen_t *cg = env->cg; - entity *ent = cg->fp_to_gp; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *op = get_Conv_op(env->irn); - ir_node *fist, *mem, *load; - - if (! ent) { - int size = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_vfp].mode); - ent = cg->fp_to_gp = - frame_alloc_area(get_irg_frame_type(env->irg), size, 16, 0); - } +static ir_node *gen_x87_fp_to_gp(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *op = get_Conv_op(node); + ir_node *new_op = be_transform_node(op); + ia32_code_gen_t *cg = env_cg; + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(cg); + ir_node *trunc_mode = ia32_new_Fpu_truncate(cg); + ir_mode *mode = get_irn_mode(node); + ir_node *fist, *load; /* do a fist */ - fist = new_rd_ia32_vfist(env->dbg, irg, block, get_irg_frame(irg), noreg, op, get_irg_no_mem(irg)); + fist = new_rd_ia32_vfist(dbgi, irg, block, get_irg_frame(irg), noreg, + new_NoMem(), new_op, trunc_mode); - set_ia32_frame_ent(fist, ent); + set_irn_pinned(fist, op_pin_state_floats); set_ia32_use_frame(fist); - set_ia32_am_support(fist, ia32_am_Dest); set_ia32_op_type(fist, ia32_AddrModeD); - set_ia32_am_flavour(fist, ia32_B); - set_ia32_ls_mode(fist, mode_F); - mem = new_r_Proj(irg, block, fist, mode_M, pn_ia32_vfist_M); + assert(get_mode_size_bits(mode) <= 32); + /* exception we can only store signed 32 bit integers, so for unsigned + we store a 64bit (signed) integer and load the lower bits */ + if(get_mode_size_bits(mode) == 32 && !mode_is_signed(mode)) { + set_ia32_ls_mode(fist, mode_Ls); + } else { + set_ia32_ls_mode(fist, mode_Is); + } + SET_IA32_ORIG_NODE(fist, ia32_get_old_node_name(cg, node)); /* do a Load */ - load = new_rd_ia32_Load(env->dbg, irg, block, get_irg_frame(irg), noreg, mem); + load = new_rd_ia32_Load(dbgi, irg, block, get_irg_frame(irg), noreg, fist); - set_ia32_frame_ent(load, ent); + set_irn_pinned(load, op_pin_state_floats); set_ia32_use_frame(load); - set_ia32_am_support(load, ia32_am_Source); set_ia32_op_type(load, ia32_AddrModeS); - set_ia32_am_flavour(load, ia32_B); - set_ia32_ls_mode(load, tgt_mode); + set_ia32_ls_mode(load, mode_Is); + if(get_ia32_ls_mode(fist) == mode_Ls) { + ia32_attr_t *attr = get_ia32_attr(load); + attr->data.need_64bit_stackent = 1; + } else { + ia32_attr_t *attr = get_ia32_attr(load); + attr->data.need_32bit_stackent = 1; + } + SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(cg, node)); - return new_r_Proj(irg, block, load, tgt_mode, pn_ia32_Load_res); + return new_r_Proj(irg, block, load, mode_Iu, pn_ia32_Load_res); } /** - * Create a conversion from x87 state register to general purpose. + * Creates a x87 strict Conv by placing a Sore and a Load */ -static ir_node *gen_x87_gp_to_fp(ia32_transform_env_t *env, ir_mode *src_mode) { - ia32_code_gen_t *cg = env->cg; - entity *ent = cg->gp_to_fp; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = get_irg_no_mem(irg); - ir_node *op = get_Conv_op(env->irn); - ir_node *fild, *store, *mem; - int src_bits; +static ir_node *gen_x87_strict_conv(ir_mode *tgt_mode, ir_node *node) +{ + ir_node *block = get_nodes_block(node); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *frame = get_irg_frame(irg); + ir_node *store, *load; + ir_node *res; - if (! ent) { - int size = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode); - ent = cg->gp_to_fp = - frame_alloc_area(get_irg_frame_type(env->irg), size, size, 0); + store = new_rd_ia32_vfst(dbgi, irg, block, frame, noreg, nomem, node, + tgt_mode); + set_ia32_use_frame(store); + set_ia32_op_type(store, ia32_AddrModeD); + SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(env_cg, node)); + + load = new_rd_ia32_vfld(dbgi, irg, block, frame, noreg, store, + tgt_mode); + set_ia32_use_frame(load); + set_ia32_op_type(load, ia32_AddrModeS); + SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env_cg, node)); + + res = new_r_Proj(irg, block, load, mode_E, pn_ia32_vfld_res); + return res; +} + +/** + * Create a conversion from general purpose to x87 register + */ +static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) { + ir_node *src_block = get_nodes_block(node); + ir_node *block = be_transform_node(src_block); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *op = get_Conv_op(node); + ir_node *new_op; + ir_node *noreg; + ir_node *nomem; + ir_mode *mode; + ir_mode *store_mode; + ir_node *fild; + ir_node *store; + ir_node *res; + int src_bits; + + /* fild can use source AM if the operand is a signed 32bit integer */ + if (src_mode == mode_Is) { + ia32_address_mode_t am; + + match_arguments(&am, src_block, NULL, op, match_no_immediate); + if (am.op_type == ia32_AddrModeS) { + ia32_address_t *addr = &am.addr; + + fild = new_rd_ia32_vfild(dbgi, irg, block, addr->base, addr->index, addr->mem); + res = new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res); + + set_am_attributes(fild, &am); + SET_IA32_ORIG_NODE(fild, ia32_get_old_node_name(env_cg, node)); + + fix_mem_proj(fild, &am); + + return res; + } + new_op = am.new_op2; + } else { + new_op = be_transform_node(op); } - /* first convert to 32 bit */ + noreg = ia32_new_NoReg_gp(env_cg); + nomem = new_NoMem(); + mode = get_irn_mode(op); + + /* first convert to 32 bit signed if necessary */ src_bits = get_mode_size_bits(src_mode); if (src_bits == 8) { - op = new_rd_ia32_Conv_I2I8Bit(env->dbg, irg, block, noreg, noreg, op, nomem); - op = new_r_Proj(irg, block, op, mode_Is, 0); - } - else if (src_bits < 32) { - op = new_rd_ia32_Conv_I2I(env->dbg, irg, block, noreg, noreg, op, nomem); - op = new_r_Proj(irg, block, op, mode_Is, 0); + new_op = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, block, noreg, noreg, nomem, + new_op, src_mode); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); + mode = mode_Is; + } else if (src_bits < 32) { + new_op = new_rd_ia32_Conv_I2I(dbgi, irg, block, noreg, noreg, nomem, + new_op, src_mode); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); + mode = mode_Is; } + assert(get_mode_size_bits(mode) == 32); + /* do a store */ - store = new_rd_ia32_Store(env->dbg, irg, block, get_irg_frame(irg), noreg, op, nomem); + store = new_rd_ia32_Store(dbgi, irg, block, get_irg_frame(irg), noreg, nomem, + new_op); - set_ia32_frame_ent(store, ent); set_ia32_use_frame(store); - - set_ia32_am_support(store, ia32_am_Dest); set_ia32_op_type(store, ia32_AddrModeD); - set_ia32_am_flavour(store, ia32_B); - set_ia32_ls_mode(store, mode_Is); - - mem = new_r_Proj(irg, block, store, mode_M, 0); + set_ia32_ls_mode(store, mode_Iu); + + /* exception for 32bit unsigned, do a 64bit spill+load */ + if(!mode_is_signed(mode)) { + ir_node *in[2]; + /* store a zero */ + ir_node *zero_const = create_Immediate_from_int(0); + + ir_node *zero_store = new_rd_ia32_Store(dbgi, irg, block, + get_irg_frame(irg), noreg, nomem, + zero_const); + + set_ia32_use_frame(zero_store); + set_ia32_op_type(zero_store, ia32_AddrModeD); + add_ia32_am_offs_int(zero_store, 4); + set_ia32_ls_mode(zero_store, mode_Iu); + + in[0] = zero_store; + in[1] = store; + + store = new_rd_Sync(dbgi, irg, block, 2, in); + store_mode = mode_Ls; + } else { + store_mode = mode_Is; + } /* do a fild */ - fild = new_rd_ia32_vfild(env->dbg, irg, block, get_irg_frame(irg), noreg, mem); + fild = new_rd_ia32_vfild(dbgi, irg, block, get_irg_frame(irg), noreg, store); - set_ia32_frame_ent(fild, ent); set_ia32_use_frame(fild); - set_ia32_am_support(fild, ia32_am_Source); set_ia32_op_type(fild, ia32_AddrModeS); - set_ia32_am_flavour(fild, ia32_B); - set_ia32_ls_mode(fild, mode_F); + set_ia32_ls_mode(fild, store_mode); + + res = new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res); - return new_r_Proj(irg, block, fild, mode_F, 0); + return res; +} + +/** + * Crete a conversion from one integer mode into another one + */ +static ir_node *create_I2I_Conv(ir_mode *src_mode, ir_mode *tgt_mode, + dbg_info *dbgi, ir_node *block, ir_node *op, + ir_node *node) +{ + ir_graph *irg = current_ir_graph; + int src_bits = get_mode_size_bits(src_mode); + int tgt_bits = get_mode_size_bits(tgt_mode); + ir_node *new_block = be_transform_node(block); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *new_op; + ir_node *res; + ir_mode *smaller_mode; + int smaller_bits; + ia32_address_mode_t am; + ia32_address_t *addr = &am.addr; + + if (src_bits < tgt_bits) { + smaller_mode = src_mode; + smaller_bits = src_bits; + } else { + smaller_mode = tgt_mode; + smaller_bits = tgt_bits; + } + + memset(&am, 0, sizeof(am)); + if(use_source_address_mode(block, op, NULL)) { + build_address(&am, op); + new_op = noreg; + am.op_type = ia32_AddrModeS; + } else { + new_op = be_transform_node(op); + am.op_type = ia32_Normal; + } + if(addr->base == NULL) + addr->base = noreg; + if(addr->index == NULL) + addr->index = noreg; + if(addr->mem == NULL) + addr->mem = new_NoMem(); + + DB((dbg, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode)); + if (smaller_bits == 8) { + res = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, new_block, addr->base, + addr->index, addr->mem, new_op, + smaller_mode); + } else { + res = new_rd_ia32_Conv_I2I(dbgi, irg, new_block, addr->base, + addr->index, addr->mem, new_op, + smaller_mode); + } + + set_am_attributes(res, &am); + set_ia32_ls_mode(res, smaller_mode); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + res = fix_mem_proj(res, &am); + + return res; } /** * Transforms a Conv node. * - * @param env The transformation environment * @return The created ia32 Conv node */ -static ir_node *gen_Conv(ia32_transform_env_t *env) { - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_node *op = get_Conv_op(env->irn); - ir_mode *src_mode = get_irn_mode(op); - ir_mode *tgt_mode = env->mode; - int src_bits = get_mode_size_bits(src_mode); - int tgt_bits = get_mode_size_bits(tgt_mode); - int pn = -1; - int kill = 0; - ir_node *block = env->block; - ir_node *new_op = NULL; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_rd_NoMem(irg); - ir_node *proj; - DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) +static ir_node *gen_Conv(ir_node *node) { + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *op = get_Conv_op(node); + ir_node *new_op = NULL; + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *src_mode = get_irn_mode(op); + ir_mode *tgt_mode = get_irn_mode(node); + int src_bits = get_mode_size_bits(src_mode); + int tgt_bits = get_mode_size_bits(tgt_mode); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_rd_NoMem(irg); + ir_node *res = NULL; + + if (src_mode == mode_b) { + assert(mode_is_int(tgt_mode)); + /* nothing to do, we already model bools as 0/1 ints */ + return be_transform_node(op); + } if (src_mode == tgt_mode) { - /* this can happen when changing mode_P to mode_Is */ - DB((mod, LEVEL_1, "killed Conv(mode, mode) ...")); - edges_reroute(env->irn, op, irg); + if (get_Conv_strict(node)) { + if (USE_SSE2(env_cg)) { + /* when we are in SSE mode, we can kill all strict no-op conversion */ + return be_transform_node(op); + } + } else { + /* this should be optimized already, but who knows... */ + DEBUG_ONLY(ir_fprintf(stderr, "Debug warning: conv %+F is pointless\n", node)); + DB((dbg, LEVEL_1, "killed Conv(mode, mode) ...")); + return be_transform_node(op); + } } - else if (mode_is_float(src_mode)) { + + if (mode_is_float(src_mode)) { + new_op = be_transform_node(op); /* we convert from float ... */ if (mode_is_float(tgt_mode)) { - /* ... to float */ - if (USE_SSE2(env->cg)) { - DB((mod, LEVEL_1, "create Conv(float, float) ...")); - new_op = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, op, nomem); - pn = pn_ia32_Conv_FP2FP_res; - } - else { - DB((mod, LEVEL_1, "killed Conv(float, float) ...")); - /* - remark: we create a intermediate conv here, so modes will be spread correctly - these convs will be killed later - */ - new_op = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, op, nomem); - pn = pn_ia32_Conv_FP2FP_res; - kill = 1; + if(src_mode == mode_E && tgt_mode == mode_D + && !get_Conv_strict(node)) { + DB((dbg, LEVEL_1, "killed Conv(mode, mode) ...")); + return new_op; } - } - else { - /* ... to int */ - DB((mod, LEVEL_1, "create Conv(float, int) ...")); - if (USE_SSE2(env->cg)) { - new_op = new_rd_ia32_Conv_FP2I(dbg, irg, block, noreg, noreg, op, nomem); - pn = pn_ia32_Conv_FP2I_res; - } - else - return gen_x87_fp_to_gp(env, tgt_mode); - - /* if target mode is not int: add an additional downscale convert */ - if (tgt_bits < 32) { - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); - set_ia32_am_support(new_op, ia32_am_Source); - set_ia32_tgt_mode(new_op, tgt_mode); - set_ia32_src_mode(new_op, src_mode); - - proj = new_rd_Proj(dbg, irg, block, new_op, mode_Is, pn_ia32_Conv_FP2I_res); - if (tgt_bits == 8 || src_bits == 8) { - new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, proj, nomem); - pn = pn_ia32_Conv_I2I8Bit_res; - } - else { - new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, proj, nomem); - pn = pn_ia32_Conv_I2I_res; + /* ... to float */ + if (USE_SSE2(env_cg)) { + DB((dbg, LEVEL_1, "create Conv(float, float) ...")); + res = new_rd_ia32_Conv_FP2FP(dbgi, irg, new_block, noreg, noreg, + nomem, new_op); + set_ia32_ls_mode(res, tgt_mode); + } else { + if(get_Conv_strict(node)) { + res = gen_x87_strict_conv(tgt_mode, new_op); + SET_IA32_ORIG_NODE(get_Proj_pred(res), ia32_get_old_node_name(env_cg, node)); + return res; } + DB((dbg, LEVEL_1, "killed Conv(float, float) ...")); + return new_op; + } + } else { + /* ... to int */ + DB((dbg, LEVEL_1, "create Conv(float, int) ...")); + if (USE_SSE2(env_cg)) { + res = new_rd_ia32_Conv_FP2I(dbgi, irg, new_block, noreg, noreg, + nomem, new_op); + set_ia32_ls_mode(res, src_mode); + } else { + return gen_x87_fp_to_gp(node); } } - } - else { + } else { /* we convert from int ... */ if (mode_is_float(tgt_mode)) { - FP_USED(env->cg); /* ... to float */ - DB((mod, LEVEL_1, "create Conv(int, float) ...")); - if (USE_SSE2(env->cg)) { - new_op = new_rd_ia32_Conv_I2FP(dbg, irg, block, noreg, noreg, op, nomem); - pn = pn_ia32_Conv_I2FP_res; - } - else - return gen_x87_gp_to_fp(env, src_mode); - } - else { - /* ... to int */ - if (get_mode_size_bits(src_mode) == tgt_bits) { - DB((mod, LEVEL_1, "omitting equal size Conv(%+F, %+F) ...", src_mode, tgt_mode)); - /* - remark: we create a intermediate conv here, so modes will be spread correctly - these convs will be killed later - */ - new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, op, nomem); - pn = pn_ia32_Conv_I2I_res; - kill = 1; - } - else { - DB((mod, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode)); - if (tgt_bits == 8 || src_bits == 8) { - new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, op, nomem); - pn = pn_ia32_Conv_I2I8Bit_res; - } - else { - new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, op, nomem); - pn = pn_ia32_Conv_I2I_res; + DB((dbg, LEVEL_1, "create Conv(int, float) ...")); + if (USE_SSE2(env_cg)) { + new_op = be_transform_node(op); + res = new_rd_ia32_Conv_I2FP(dbgi, irg, new_block, noreg, noreg, + nomem, new_op); + set_ia32_ls_mode(res, tgt_mode); + } else { + res = gen_x87_gp_to_fp(node, src_mode); + if(get_Conv_strict(node)) { + res = gen_x87_strict_conv(tgt_mode, res); + SET_IA32_ORIG_NODE(get_Proj_pred(res), + ia32_get_old_node_name(env_cg, node)); } + return res; + } + } else if(tgt_mode == mode_b) { + /* mode_b lowering already took care that we only have 0/1 values */ + DB((dbg, LEVEL_1, "omitting unnecessary Conv(%+F, %+F) ...", + src_mode, tgt_mode)); + return be_transform_node(op); + } else { + /* to int */ + if (src_bits == tgt_bits) { + DB((dbg, LEVEL_1, "omitting unnecessary Conv(%+F, %+F) ...", + src_mode, tgt_mode)); + return be_transform_node(op); } + + res = create_I2I_Conv(src_mode, tgt_mode, dbgi, block, op, node); + return res; } } - if (new_op) { - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); - set_ia32_tgt_mode(new_op, tgt_mode); - set_ia32_src_mode(new_op, src_mode); + return res; +} - set_ia32_am_support(new_op, ia32_am_Source); +static int check_immediate_constraint(long val, char immediate_constraint_type) +{ + switch (immediate_constraint_type) { + case 0: + return 1; + case 'I': + return val >= 0 && val <= 32; + case 'J': + return val >= 0 && val <= 63; + case 'K': + return val >= -128 && val <= 127; + case 'L': + return val == 0xff || val == 0xffff; + case 'M': + return val >= 0 && val <= 3; + case 'N': + return val >= 0 && val <= 255; + case 'O': + return val >= 0 && val <= 127; + default: + break; + } + panic("Invalid immediate constraint found"); + return 0; +} - new_op = new_rd_Proj(dbg, irg, block, new_op, tgt_mode, pn); +static ir_node *try_create_Immediate(ir_node *node, + char immediate_constraint_type) +{ + int minus = 0; + tarval *offset = NULL; + int offset_sign = 0; + long val = 0; + ir_entity *symconst_ent = NULL; + int symconst_sign = 0; + ir_mode *mode; + ir_node *cnst = NULL; + ir_node *symconst = NULL; + ir_node *res; + ir_graph *irg; + dbg_info *dbgi; + ir_node *block; - if (kill) - nodeset_insert(env->cg->kill_conv, new_op); + mode = get_irn_mode(node); + if(!mode_is_int(mode) && !mode_is_reference(mode)) { + return NULL; } - return new_op; -} + if(is_Minus(node)) { + minus = 1; + node = get_Minus_op(node); + } + if(is_Const(node)) { + cnst = node; + symconst = NULL; + offset_sign = minus; + } else if(is_SymConst(node)) { + cnst = NULL; + symconst = node; + symconst_sign = minus; + } else if(is_Add(node)) { + ir_node *left = get_Add_left(node); + ir_node *right = get_Add_right(node); + if(is_Const(left) && is_SymConst(right)) { + cnst = left; + symconst = right; + symconst_sign = minus; + offset_sign = minus; + } else if(is_SymConst(left) && is_Const(right)) { + cnst = right; + symconst = left; + symconst_sign = minus; + offset_sign = minus; + } + } else if(is_Sub(node)) { + ir_node *left = get_Sub_left(node); + ir_node *right = get_Sub_right(node); + if(is_Const(left) && is_SymConst(right)) { + cnst = left; + symconst = right; + symconst_sign = !minus; + offset_sign = minus; + } else if(is_SymConst(left) && is_Const(right)) { + cnst = right; + symconst = left; + symconst_sign = minus; + offset_sign = !minus; + } + } else { + return NULL; + } + if(cnst != NULL) { + offset = get_Const_tarval(cnst); + if(tarval_is_long(offset)) { + val = get_tarval_long(offset); + } else { + ir_fprintf(stderr, "Optimisation Warning: tarval from %+F is not a " + "long?\n", cnst); + return NULL; + } -/******************************************** - * _ _ - * | | | | - * | |__ ___ _ __ ___ __| | ___ ___ - * | '_ \ / _ \ '_ \ / _ \ / _` |/ _ \/ __| - * | |_) | __/ | | | (_) | (_| | __/\__ \ - * |_.__/ \___|_| |_|\___/ \__,_|\___||___/ - * - ********************************************/ + if(!check_immediate_constraint(val, immediate_constraint_type)) + return NULL; + } + if(symconst != NULL) { + if(immediate_constraint_type != 0) { + /* we need full 32bits for symconsts */ + return NULL; + } -static ir_node *gen_be_StackParam(ia32_transform_env_t *env) { - ir_node *new_op = NULL; - ir_node *node = env->irn; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *mem = new_rd_NoMem(env->irg); - ir_node *ptr = get_irn_n(node, 0); - entity *ent = arch_get_frame_entity(env->cg->arch_env, node); - ir_mode *mode = env->mode; + /* unfortunately the assembler/linker doesn't support -symconst */ + if(symconst_sign) + return NULL; - if (mode_is_float(mode)) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) - new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem); - else - new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem); - } - else { - new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem); + if(get_SymConst_kind(symconst) != symconst_addr_ent) + return NULL; + symconst_ent = get_SymConst_entity(symconst); } + if(cnst == NULL && symconst == NULL) + return NULL; - set_ia32_frame_ent(new_op, ent); - set_ia32_use_frame(new_op); - - set_ia32_am_support(new_op, ia32_am_Source); - set_ia32_op_type(new_op, ia32_AddrModeS); - set_ia32_am_flavour(new_op, ia32_B); - set_ia32_ls_mode(new_op, mode); - set_ia32_flags(new_op, get_ia32_flags(new_op) | arch_irn_flags_rematerializable); + if(offset_sign && offset != NULL) { + offset = tarval_neg(offset); + } - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + irg = current_ir_graph; + dbgi = get_irn_dbg_info(node); + block = get_irg_start_block(irg); + res = new_rd_ia32_Immediate(dbgi, irg, block, symconst_ent, + symconst_sign, val); + arch_set_irn_register(env_cg->arch_env, res, &ia32_gp_regs[REG_GP_NOREG]); - return new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_ia32_Load_res); + return res; } -/** - * Transforms a FrameAddr into an ia32 Add. - */ -static ir_node *gen_be_FrameAddr(ia32_transform_env_t *env) { - ir_node *new_op = NULL; - ir_node *node = env->irn; - ir_node *op = get_irn_n(node, 0); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_rd_NoMem(env->irg); - - new_op = new_rd_ia32_Add(env->dbg, env->irg, env->block, noreg, noreg, op, noreg, nomem); - set_ia32_frame_ent(new_op, arch_get_frame_entity(env->cg->arch_env, node)); - set_ia32_am_support(new_op, ia32_am_Full); - set_ia32_use_frame(new_op); - set_ia32_immop_type(new_op, ia32_ImmConst); - set_ia32_commutative(new_op); +static ir_node *create_immediate_or_transform(ir_node *node, + char immediate_constraint_type) +{ + ir_node *new_node = try_create_Immediate(node, immediate_constraint_type); + if (new_node == NULL) { + new_node = be_transform_node(node); + } + return new_node; +} - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); +typedef struct constraint_t constraint_t; +struct constraint_t { + int is_in; + int n_outs; + const arch_register_req_t **out_reqs; - return new_rd_Proj(env->dbg, env->irg, env->block, new_op, env->mode, pn_ia32_Add_res); -} + const arch_register_req_t *req; + unsigned immediate_possible; + char immediate_type; +}; -/** - * Transforms a FrameLoad into an ia32 Load. - */ -static ir_node *gen_be_FrameLoad(ia32_transform_env_t *env) { - ir_node *new_op = NULL; - ir_node *node = env->irn; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *mem = get_irn_n(node, 0); - ir_node *ptr = get_irn_n(node, 1); - entity *ent = arch_get_frame_entity(env->cg->arch_env, node); - ir_mode *mode = get_type_mode(get_entity_type(ent)); +void parse_asm_constraint(int pos, constraint_t *constraint, const char *c) +{ + int immediate_possible = 0; + char immediate_type = 0; + unsigned limited = 0; + const arch_register_class_t *cls = NULL; + ir_graph *irg = current_ir_graph; + struct obstack *obst = get_irg_obstack(irg); + arch_register_req_t *req; + unsigned *limited_ptr; + int p; + int same_as = -1; + + /* TODO: replace all the asserts with nice error messages */ + + printf("Constraint: %s\n", c); + + while(*c != 0) { + switch(*c) { + case ' ': + case '\t': + case '\n': + break; - if (mode_is_float(mode)) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) - new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem); - else - new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem); - } - else - new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem); + case 'a': + assert(cls == NULL || + (cls == &ia32_reg_classes[CLASS_ia32_gp] && limited != 0)); + cls = &ia32_reg_classes[CLASS_ia32_gp]; + limited |= 1 << REG_EAX; + break; + case 'b': + assert(cls == NULL || + (cls == &ia32_reg_classes[CLASS_ia32_gp] && limited != 0)); + cls = &ia32_reg_classes[CLASS_ia32_gp]; + limited |= 1 << REG_EBX; + break; + case 'c': + assert(cls == NULL || + (cls == &ia32_reg_classes[CLASS_ia32_gp] && limited != 0)); + cls = &ia32_reg_classes[CLASS_ia32_gp]; + limited |= 1 << REG_ECX; + break; + case 'd': + assert(cls == NULL || + (cls == &ia32_reg_classes[CLASS_ia32_gp] && limited != 0)); + cls = &ia32_reg_classes[CLASS_ia32_gp]; + limited |= 1 << REG_EDX; + break; + case 'D': + assert(cls == NULL || + (cls == &ia32_reg_classes[CLASS_ia32_gp] && limited != 0)); + cls = &ia32_reg_classes[CLASS_ia32_gp]; + limited |= 1 << REG_EDI; + break; + case 'S': + assert(cls == NULL || + (cls == &ia32_reg_classes[CLASS_ia32_gp] && limited != 0)); + cls = &ia32_reg_classes[CLASS_ia32_gp]; + limited |= 1 << REG_ESI; + break; + case 'Q': + case 'q': /* q means lower part of the regs only, this makes no + * difference to Q for us (we only assigne whole registers) */ + assert(cls == NULL || + (cls == &ia32_reg_classes[CLASS_ia32_gp] && limited != 0)); + cls = &ia32_reg_classes[CLASS_ia32_gp]; + limited |= 1 << REG_EAX | 1 << REG_EBX | 1 << REG_ECX | + 1 << REG_EDX; + break; + case 'A': + assert(cls == NULL || + (cls == &ia32_reg_classes[CLASS_ia32_gp] && limited != 0)); + cls = &ia32_reg_classes[CLASS_ia32_gp]; + limited |= 1 << REG_EAX | 1 << REG_EDX; + break; + case 'l': + assert(cls == NULL || + (cls == &ia32_reg_classes[CLASS_ia32_gp] && limited != 0)); + cls = &ia32_reg_classes[CLASS_ia32_gp]; + limited |= 1 << REG_EAX | 1 << REG_EBX | 1 << REG_ECX | + 1 << REG_EDX | 1 << REG_ESI | 1 << REG_EDI | + 1 << REG_EBP; + break; - set_ia32_frame_ent(new_op, ent); - set_ia32_use_frame(new_op); + case 'R': + case 'r': + case 'p': + assert(cls == NULL); + cls = &ia32_reg_classes[CLASS_ia32_gp]; + break; - set_ia32_am_support(new_op, ia32_am_Source); - set_ia32_op_type(new_op, ia32_AddrModeS); - set_ia32_am_flavour(new_op, ia32_B); - set_ia32_ls_mode(new_op, mode); + case 'f': + case 't': + case 'u': + /* TODO: mark values so the x87 simulator knows about t and u */ + assert(cls == NULL); + cls = &ia32_reg_classes[CLASS_ia32_vfp]; + break; - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + case 'Y': + case 'x': + assert(cls == NULL); + /* TODO: check that sse2 is supported */ + cls = &ia32_reg_classes[CLASS_ia32_xmm]; + break; - return new_op; -} + case 'I': + case 'J': + case 'K': + case 'L': + case 'M': + case 'N': + case 'O': + assert(!immediate_possible); + immediate_possible = 1; + immediate_type = *c; + break; + case 'n': + case 'i': + assert(!immediate_possible); + immediate_possible = 1; + break; + case 'g': + assert(!immediate_possible && cls == NULL); + immediate_possible = 1; + cls = &ia32_reg_classes[CLASS_ia32_gp]; + break; -/** - * Transforms a FrameStore into an ia32 Store. - */ -static ir_node *gen_be_FrameStore(ia32_transform_env_t *env) { - ir_node *new_op = NULL; - ir_node *node = env->irn; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *mem = get_irn_n(node, 0); - ir_node *ptr = get_irn_n(node, 1); - ir_node *val = get_irn_n(node, 2); - entity *ent = arch_get_frame_entity(env->cg->arch_env, node); - ir_mode *mode = get_irn_mode(val); + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + assert(constraint->is_in && "can only specify same constraint " + "on input"); + + sscanf(c, "%d%n", &same_as, &p); + if(same_as >= 0) { + c += p; + continue; + } + break; - if (mode_is_float(mode)) { - FP_USED(env->cg); - if (USE_SSE2(env->cg)) - new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, mem); - else - new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, mem); + case 'E': /* no float consts yet */ + case 'F': /* no float consts yet */ + case 's': /* makes no sense on x86 */ + case 'X': /* we can't support that in firm */ + case 'm': + case 'o': + case 'V': + case '<': /* no autodecrement on x86 */ + case '>': /* no autoincrement on x86 */ + case 'C': /* sse constant not supported yet */ + case 'G': /* 80387 constant not supported yet */ + case 'y': /* we don't support mmx registers yet */ + case 'Z': /* not available in 32 bit mode */ + case 'e': /* not available in 32 bit mode */ + panic("unsupported asm constraint '%c' found in (%+F)", + *c, current_ir_graph); + break; + default: + panic("unknown asm constraint '%c' found in (%+F)", *c, + current_ir_graph); + break; + } + ++c; } - else if (get_mode_size_bits(mode) == 8) { - new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, mem); + + if(same_as >= 0) { + const arch_register_req_t *other_constr; + + assert(cls == NULL && "same as and register constraint not supported"); + assert(!immediate_possible && "same as and immediate constraint not " + "supported"); + assert(same_as < constraint->n_outs && "wrong constraint number in " + "same_as constraint"); + + other_constr = constraint->out_reqs[same_as]; + + req = obstack_alloc(obst, sizeof(req[0])); + req->cls = other_constr->cls; + req->type = arch_register_req_type_should_be_same; + req->limited = NULL; + req->other_same[0] = pos; + req->other_same[1] = -1; + req->other_different = -1; + + /* switch constraints. This is because in firm we have same_as + * constraints on the output constraints while in the gcc asm syntax + * they are specified on the input constraints */ + constraint->req = other_constr; + constraint->out_reqs[same_as] = req; + constraint->immediate_possible = 0; + return; } - else { - new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, mem); + + if(immediate_possible && cls == NULL) { + cls = &ia32_reg_classes[CLASS_ia32_gp]; } + assert(!immediate_possible || cls == &ia32_reg_classes[CLASS_ia32_gp]); + assert(cls != NULL); - set_ia32_frame_ent(new_op, ent); - set_ia32_use_frame(new_op); + if(immediate_possible) { + assert(constraint->is_in + && "imeediates make no sense for output constraints"); + } + /* todo: check types (no float input on 'r' constrained in and such... */ - set_ia32_am_support(new_op, ia32_am_Dest); - set_ia32_op_type(new_op, ia32_AddrModeD); - set_ia32_am_flavour(new_op, ia32_B); - set_ia32_ls_mode(new_op, mode); + if(limited != 0) { + req = obstack_alloc(obst, sizeof(req[0]) + sizeof(unsigned)); + limited_ptr = (unsigned*) (req+1); + } else { + req = obstack_alloc(obst, sizeof(req[0])); + } + memset(req, 0, sizeof(req[0])); + + if(limited != 0) { + req->type = arch_register_req_type_limited; + *limited_ptr = limited; + req->limited = limited_ptr; + } else { + req->type = arch_register_req_type_normal; + } + req->cls = cls; - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + constraint->req = req; + constraint->immediate_possible = immediate_possible; + constraint->immediate_type = immediate_type; +} - return new_op; +static void parse_clobber(ir_node *node, int pos, constraint_t *constraint, + const char *c) +{ + (void) node; + (void) pos; + (void) constraint; + (void) c; + panic("Clobbers not supported yet"); } /** - * In case SSE is used we need to copy the result from FPU TOS. + * generates code for a ASM node */ -static ir_node *gen_be_Call(ia32_transform_env_t *env) { - ir_node *call_res = get_proj_for_pn(env->irn, pn_be_Call_first_res); - ir_node *call_mem = get_proj_for_pn(env->irn, pn_be_Call_M_regular); - ir_mode *mode; +static ir_node *gen_ASM(ir_node *node) +{ + int i, arity; + ir_graph *irg = current_ir_graph; + ir_node *block = be_transform_node(get_nodes_block(node)); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node **in; + ir_node *res; + int out_arity; + int n_outs; + int n_clobbers; + void *generic_attr; + ia32_asm_attr_t *attr; + const arch_register_req_t **out_reqs; + const arch_register_req_t **in_reqs; + struct obstack *obst; + constraint_t parsed_constraint; + + /* transform inputs */ + arity = get_irn_arity(node); + in = alloca(arity * sizeof(in[0])); + memset(in, 0, arity * sizeof(in[0])); + + n_outs = get_ASM_n_output_constraints(node); + n_clobbers = get_ASM_n_clobbers(node); + out_arity = n_outs + n_clobbers; + + /* construct register constraints */ + obst = get_irg_obstack(irg); + out_reqs = obstack_alloc(obst, out_arity * sizeof(out_reqs[0])); + parsed_constraint.out_reqs = out_reqs; + parsed_constraint.n_outs = n_outs; + parsed_constraint.is_in = 0; + for(i = 0; i < out_arity; ++i) { + const char *c; + + if(i < n_outs) { + const ir_asm_constraint *constraint; + constraint = & get_ASM_output_constraints(node) [i]; + c = get_id_str(constraint->constraint); + parse_asm_constraint(i, &parsed_constraint, c); + } else { + ident *glob_id = get_ASM_clobbers(node) [i - n_outs]; + c = get_id_str(glob_id); + parse_clobber(node, i, &parsed_constraint, c); + } + out_reqs[i] = parsed_constraint.req; + } - if (! call_res || ! USE_SSE2(env->cg)) - return NULL; + in_reqs = obstack_alloc(obst, arity * sizeof(in_reqs[0])); + parsed_constraint.is_in = 1; + for(i = 0; i < arity; ++i) { + const ir_asm_constraint *constraint; + ident *constr_id; + const char *c; + + constraint = & get_ASM_input_constraints(node) [i]; + constr_id = constraint->constraint; + c = get_id_str(constr_id); + parse_asm_constraint(i, &parsed_constraint, c); + in_reqs[i] = parsed_constraint.req; + + if(parsed_constraint.immediate_possible) { + ir_node *pred = get_irn_n(node, i); + char imm_type = parsed_constraint.immediate_type; + ir_node *immediate = try_create_Immediate(pred, imm_type); + + if(immediate != NULL) { + in[i] = immediate; + } + } + } - mode = get_irn_mode(call_res); + /* transform inputs */ + for(i = 0; i < arity; ++i) { + ir_node *pred; + ir_node *transformed; - /* in case there is no memory output: create one to serialize the copy FPU -> SSE */ - if (! call_mem) - call_mem = new_r_Proj(env->irg, env->block, env->irn, mode_M, pn_be_Call_M_regular); + if(in[i] != NULL) + continue; - if (mode_is_float(mode)) { - /* store st(0) onto stack */ - ir_node *frame = get_irg_frame(env->irg); - ir_node *fstp = new_rd_ia32_GetST0(env->dbg, env->irg, env->block, frame, call_mem); - ir_node *mproj = new_r_Proj(env->irg, env->block, fstp, mode_M, pn_ia32_GetST0_M); - entity *ent = frame_alloc_area(get_irg_frame_type(env->irg), get_mode_size_bytes(mode), 16, 0); - ir_node *sse_load, *p, *bad, *keep; - ir_node **in_keep; - int keep_arity, i; - - set_ia32_ls_mode(fstp, mode); - set_ia32_op_type(fstp, ia32_AddrModeD); - set_ia32_use_frame(fstp); - set_ia32_frame_ent(fstp, ent); - set_ia32_am_flavour(fstp, ia32_B); - set_ia32_am_support(fstp, ia32_am_Dest); + pred = get_irn_n(node, i); + transformed = be_transform_node(pred); + in[i] = transformed; + } - /* load into SSE register */ - sse_load = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, frame, ia32_new_NoReg_gp(env->cg), mproj); - set_ia32_ls_mode(sse_load, mode); - set_ia32_op_type(sse_load, ia32_AddrModeS); - set_ia32_use_frame(sse_load); - set_ia32_frame_ent(sse_load, ent); - set_ia32_am_flavour(sse_load, ia32_B); - set_ia32_am_support(sse_load, ia32_am_Source); - sse_load = new_r_Proj(env->irg, env->block, sse_load, mode, pn_ia32_xLoad_res); - - /* reroute all users of the result proj to the sse load */ - edges_reroute(call_res, sse_load, env->irg); - - /* now: create new Keep whith all former ins and one additional in - the result Proj */ - - /* get a Proj representing a caller save register */ - p = get_proj_for_pn(env->irn, pn_be_Call_first_res + 1); - assert(is_Proj(p) && "Proj expected."); - - /* user of the the proj is the Keep */ - p = get_edge_src_irn(get_irn_out_edge_first(p)); - assert(be_is_Keep(p) && "Keep expected."); - - /* copy in array of the old keep and set the result proj as additional in */ - keep_arity = get_irn_arity(p) + 1; - NEW_ARR_A(ir_node *, in_keep, keep_arity); - in_keep[keep_arity - 1] = call_res; - for (i = 0; i < keep_arity - 1; ++i) - in_keep[i] = get_irn_n(p, i); - - /* create new keep and set the in class requirements properly */ - keep = be_new_Keep(NULL, env->irg, env->block, keep_arity, in_keep); - for(i = 0; i < keep_arity; ++i) { - const arch_register_class_t *cls = arch_get_irn_reg_class(env->cg->arch_env, in_keep[i], -1); - be_node_set_reg_class(keep, i, cls); - } + res = new_rd_ia32_Asm(dbgi, irg, block, arity, in, out_arity); - /* kill the old keep */ - bad = get_irg_bad(env->irg); - for (i = 0; i < keep_arity - 1; i++) - set_irn_n(p, i, bad); - } + generic_attr = get_irn_generic_attr(res); + attr = CAST_IA32_ATTR(ia32_asm_attr_t, generic_attr); + attr->asm_text = get_ASM_text(node); + set_ia32_out_req_all(res, out_reqs); + set_ia32_in_req_all(res, in_reqs); - return NULL; + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + + return res; } +/******************************************** + * _ _ + * | | | | + * | |__ ___ _ __ ___ __| | ___ ___ + * | '_ \ / _ \ '_ \ / _ \ / _` |/ _ \/ __| + * | |_) | __/ | | | (_) | (_| | __/\__ \ + * |_.__/ \___|_| |_|\___/ \__,_|\___||___/ + * + ********************************************/ + /** - * In case SSE is used we need to copy the result from XMM0 to FPU TOS before return. + * Transforms a FrameAddr into an ia32 Add. */ -static ir_node *gen_be_Return(ia32_transform_env_t *env) { - ir_node *ret_val = get_irn_n(env->irn, be_pos_Return_val); - ir_node *ret_mem = get_irn_n(env->irn, be_pos_Return_mem); - entity *ent = get_irg_entity(get_irn_irg(ret_val)); - ir_type *tp = get_entity_type(ent); +static ir_node *gen_be_FrameAddr(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *op = be_get_FrameAddr_frame(node); + ir_node *new_op = be_transform_node(op); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *res; - if (be_Return_get_n_rets(env->irn) < 1 || ! ret_val || ! USE_SSE2(env->cg)) - return NULL; + res = new_rd_ia32_Lea(dbgi, irg, block, new_op, noreg); + set_ia32_frame_ent(res, arch_get_frame_entity(env_cg->arch_env, node)); + set_ia32_use_frame(res); - if (get_method_n_ress(tp) == 1) { - ir_type *res_type = get_method_res_type(tp, 0); - ir_mode *mode; - - if (is_Primitive_type(res_type)) { - mode = get_type_mode(res_type); - if (mode_is_float(mode)) { - ir_node *frame; - entity *ent; - ir_node *sse_store, *fld, *mproj, *barrier; - int pn_ret_val = get_Proj_proj(ret_val); - int pn_ret_mem = get_Proj_proj(ret_mem); - - /* get the Barrier */ - barrier = get_Proj_pred(ret_val); - - /* get result input of the Barrier */ - ret_val = get_irn_n(barrier, pn_ret_val); - - /* get memory input of the Barrier */ - ret_mem = get_irn_n(barrier, pn_ret_mem); - - frame = get_irg_frame(env->irg); - ent = frame_alloc_area(get_irg_frame_type(env->irg), get_mode_size_bytes(mode), 16, 0); - - /* store xmm0 onto stack */ - sse_store = new_rd_ia32_xStoreSimple(env->dbg, env->irg, env->block, frame, ret_val, ret_mem); - set_ia32_ls_mode(sse_store, mode); - set_ia32_op_type(sse_store, ia32_AddrModeD); - set_ia32_use_frame(sse_store); - set_ia32_frame_ent(sse_store, ent); - set_ia32_am_flavour(sse_store, ia32_B); - set_ia32_am_support(sse_store, ia32_am_Dest); - sse_store = new_r_Proj(env->irg, env->block, sse_store, mode_M, pn_ia32_xStore_M); - - /* load into st0 */ - fld = new_rd_ia32_SetST0(env->dbg, env->irg, env->block, frame, sse_store); - set_ia32_ls_mode(fld, mode); - set_ia32_op_type(fld, ia32_AddrModeS); - set_ia32_use_frame(fld); - set_ia32_frame_ent(fld, ent); - set_ia32_am_flavour(fld, ia32_B); - set_ia32_am_support(fld, ia32_am_Source); - mproj = new_r_Proj(env->irg, env->block, fld, mode_M, pn_ia32_SetST0_M); - fld = new_r_Proj(env->irg, env->block, fld, mode, pn_ia32_SetST0_res); - arch_set_irn_register(env->cg->arch_env, fld, &ia32_st_regs[REG_ST0]); - - /* set new return value */ - set_irn_n(barrier, pn_ret_val, fld); - set_irn_n(barrier, pn_ret_mem, mproj); - } - } - } + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); - return NULL; + return res; } /** - * Transform a be_AddSP into an ia32_AddSP. Eat up const sizes. + * In case SSE is used we need to copy the result from XMM0 to FPU TOS before return. */ -static ir_node *gen_be_AddSP(ia32_transform_env_t *env) { - ir_node *new_op; - const ir_edge_t *edge; - ir_node *sz = get_irn_n(env->irn, be_pos_AddSP_size); - ir_node *sp = get_irn_n(env->irn, be_pos_AddSP_old_sp); +static ir_node *gen_be_Return(ir_node *node) { + ir_graph *irg = current_ir_graph; + ir_node *ret_val = get_irn_n(node, be_pos_Return_val); + ir_node *ret_mem = get_irn_n(node, be_pos_Return_mem); + ir_entity *ent = get_irg_entity(irg); + ir_type *tp = get_entity_type(ent); + dbg_info *dbgi; + ir_node *block; + ir_type *res_type; + ir_mode *mode; + ir_node *frame, *sse_store, *fld, *mproj, *barrier; + ir_node *new_barrier, *new_ret_val, *new_ret_mem; + ir_node *noreg; + ir_node **in; + int pn_ret_val, pn_ret_mem, arity, i; + + assert(ret_val != NULL); + if (be_Return_get_n_rets(node) < 1 || ! USE_SSE2(env_cg)) { + return be_duplicate_node(node); + } - new_op = new_rd_ia32_AddSP(env->dbg, env->irg, env->block, sp, sz); + res_type = get_method_res_type(tp, 0); - if (is_ia32_Const(sz)) { - set_ia32_Immop_attr(new_op, sz); - set_irn_n(new_op, 1, ia32_new_NoReg_gp(env->cg)); + if (! is_Primitive_type(res_type)) { + return be_duplicate_node(node); } - else if (is_ia32_Load(sz) && get_ia32_am_flavour(sz) == ia32_O) { - set_ia32_immop_type(new_op, ia32_ImmSymConst); - set_ia32_op_type(new_op, ia32_AddrModeS); - set_ia32_am_sc(new_op, get_ia32_am_sc(sz)); - add_ia32_am_offs(new_op, get_ia32_am_offs(sz)); - set_irn_n(new_op, 1, ia32_new_NoReg_gp(env->cg)); + + mode = get_type_mode(res_type); + if (! mode_is_float(mode)) { + return be_duplicate_node(node); } - /* fix proj nums */ - foreach_out_edge(env->irn, edge) { - ir_node *proj = get_edge_src_irn(edge); + assert(get_method_n_ress(tp) == 1); - assert(is_Proj(proj)); + pn_ret_val = get_Proj_proj(ret_val); + pn_ret_mem = get_Proj_proj(ret_mem); - if (get_Proj_proj(proj) == pn_be_AddSP_res) { - /* the node is not yet exchanged: we need to set the register manually */ - ia32_attr_t *attr = get_ia32_attr(new_op); - attr->slots[pn_ia32_AddSP_stack] = &ia32_gp_regs[REG_ESP]; - set_Proj_proj(proj, pn_ia32_AddSP_stack); - } - else if (get_Proj_proj(proj) == pn_be_AddSP_M) { - set_Proj_proj(proj, pn_ia32_AddSP_M); - } - else { - assert(0); + /* get the Barrier */ + barrier = get_Proj_pred(ret_val); + + /* get result input of the Barrier */ + ret_val = get_irn_n(barrier, pn_ret_val); + new_ret_val = be_transform_node(ret_val); + + /* get memory input of the Barrier */ + ret_mem = get_irn_n(barrier, pn_ret_mem); + new_ret_mem = be_transform_node(ret_mem); + + frame = get_irg_frame(irg); + + dbgi = get_irn_dbg_info(barrier); + block = be_transform_node(get_nodes_block(barrier)); + + noreg = ia32_new_NoReg_gp(env_cg); + + /* store xmm0 onto stack */ + sse_store = new_rd_ia32_xStoreSimple(dbgi, irg, block, frame, noreg, + new_ret_mem, new_ret_val); + set_ia32_ls_mode(sse_store, mode); + set_ia32_op_type(sse_store, ia32_AddrModeD); + set_ia32_use_frame(sse_store); + + /* load into x87 register */ + fld = new_rd_ia32_vfld(dbgi, irg, block, frame, noreg, sse_store, mode); + set_ia32_op_type(fld, ia32_AddrModeS); + set_ia32_use_frame(fld); + + mproj = new_r_Proj(irg, block, fld, mode_M, pn_ia32_vfld_M); + fld = new_r_Proj(irg, block, fld, mode_vfp, pn_ia32_vfld_res); + + /* create a new barrier */ + arity = get_irn_arity(barrier); + in = alloca(arity * sizeof(in[0])); + for (i = 0; i < arity; ++i) { + ir_node *new_in; + + if (i == pn_ret_val) { + new_in = fld; + } else if (i == pn_ret_mem) { + new_in = mproj; + } else { + ir_node *in = get_irn_n(barrier, i); + new_in = be_transform_node(in); } + in[i] = new_in; } - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + new_barrier = new_ir_node(dbgi, irg, block, + get_irn_op(barrier), get_irn_mode(barrier), + arity, in); + copy_node_attr(barrier, new_barrier); + be_duplicate_deps(barrier, new_barrier); + be_set_transformed_node(barrier, new_barrier); + mark_irn_visited(barrier); - return new_op; + /* transform normally */ + return be_duplicate_node(node); } /** - * Transform a be_SubSP into an ia32_SubSP. Eat up const sizes. + * Transform a be_AddSP into an ia32_AddSP. Eat up const sizes. */ -static ir_node *gen_be_SubSP(ia32_transform_env_t *env) { - ir_node *new_op; - const ir_edge_t *edge; - ir_node *sz = get_irn_n(env->irn, be_pos_SubSP_size); - ir_node *sp = get_irn_n(env->irn, be_pos_SubSP_old_sp); +static ir_node *gen_be_AddSP(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *sz = get_irn_n(node, be_pos_AddSP_size); + ir_node *new_sz; + ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp); + ir_node *new_sp = be_transform_node(sp); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *new_op; - new_op = new_rd_ia32_SubSP(env->dbg, env->irg, env->block, sp, sz); + new_sz = create_immediate_or_transform(sz, 0); - if (is_ia32_Const(sz)) { - set_ia32_Immop_attr(new_op, sz); - set_irn_n(new_op, 1, ia32_new_NoReg_gp(env->cg)); - } - else if (is_ia32_Load(sz) && get_ia32_am_flavour(sz) == ia32_O) { - set_ia32_immop_type(new_op, ia32_ImmSymConst); - set_ia32_op_type(new_op, ia32_AddrModeS); - set_ia32_am_sc(new_op, get_ia32_am_sc(sz)); - add_ia32_am_offs(new_op, get_ia32_am_offs(sz)); - set_irn_n(new_op, 1, ia32_new_NoReg_gp(env->cg)); - } + /* ia32 stack grows in reverse direction, make a SubSP */ + new_op = new_rd_ia32_SubSP(dbgi, irg, block, noreg, noreg, nomem, new_sp, + new_sz); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); - /* fix proj nums */ - foreach_out_edge(env->irn, edge) { - ir_node *proj = get_edge_src_irn(edge); + return new_op; +} - assert(is_Proj(proj)); +/** + * Transform a be_SubSP into an ia32_SubSP. Eat up const sizes. + */ +static ir_node *gen_be_SubSP(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *sz = get_irn_n(node, be_pos_SubSP_size); + ir_node *new_sz; + ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp); + ir_node *new_sp = be_transform_node(sp); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *new_op; - if (get_Proj_proj(proj) == pn_be_SubSP_res) { - /* the node is not yet exchanged: we need to set the register manually */ - ia32_attr_t *attr = get_ia32_attr(new_op); - attr->slots[pn_ia32_SubSP_stack] = &ia32_gp_regs[REG_ESP]; - set_Proj_proj(proj, pn_ia32_SubSP_stack); - } - else if (get_Proj_proj(proj) == pn_be_SubSP_M) { - set_Proj_proj(proj, pn_ia32_SubSP_M); - } - else { - assert(0); - } - } + new_sz = create_immediate_or_transform(sz, 0); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + /* ia32 stack grows in reverse direction, make an AddSP */ + new_op = new_rd_ia32_AddSP(dbgi, irg, block, noreg, noreg, nomem, new_sp, + new_sz); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); return new_op; } @@ -2537,26 +3429,73 @@ static ir_node *gen_be_SubSP(ia32_transform_env_t *env) { * as this is not done during register allocation because Unknown * is an "ignore" node. */ -static ir_node *gen_Unknown(ia32_transform_env_t *env) { - ir_mode *mode = env->mode; - ir_node *irn = env->irn; +static ir_node *gen_Unknown(ir_node *node) { + ir_mode *mode = get_irn_mode(node); if (mode_is_float(mode)) { - if (USE_SSE2(env->cg)) - arch_set_irn_register(env->cg->arch_env, irn, &ia32_xmm_regs[REG_XMM_UKNWN]); - else - arch_set_irn_register(env->cg->arch_env, irn, &ia32_vfp_regs[REG_VFP_UKNWN]); - } - else if (mode_is_int(mode) || mode_is_reference(mode) || mode_is_character(mode)) { - arch_set_irn_register(env->cg->arch_env, irn, &ia32_gp_regs[REG_GP_UKNWN]); - } - else { + if (USE_SSE2(env_cg)) { + return ia32_new_Unknown_xmm(env_cg); + } else { + /* Unknown nodes are buggy in x87 sim, use zero for now... */ + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_irg_start_block(irg); + return new_rd_ia32_vfldz(dbgi, irg, block); + } + } else if (mode_needs_gp_reg(mode)) { + return ia32_new_Unknown_gp(env_cg); + } else { assert(0 && "unsupported Unknown-Mode"); } return NULL; } +/** + * Change some phi modes + */ +static ir_node *gen_Phi(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_node *phi; + + if(mode_needs_gp_reg(mode)) { + /* we shouldn't have any 64bit stuff around anymore */ + assert(get_mode_size_bits(mode) <= 32); + /* all integer operations are on 32bit registers now */ + mode = mode_Iu; + } else if(mode_is_float(mode)) { + if (USE_SSE2(env_cg)) { + mode = mode_xmm; + } else { + mode = mode_vfp; + } + } + + /* phi nodes allow loops, so we use the old arguments for now + * and fix this later */ + phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), + get_irn_in(node) + 1); + copy_node_attr(node, phi); + be_duplicate_deps(node, phi); + + be_set_transformed_node(node, phi); + be_enqueue_preds(node); + + return phi; +} + +/** + * Transform IJmp + */ +static ir_node *gen_IJmp(ir_node *node) { + /* TODO: support AM */ + return gen_unop(node, get_IJmp_target(node), new_rd_ia32_IJmp); +} + + /********************************************************************** * _ _ _ * | | | | | | @@ -2578,168 +3517,351 @@ typedef ir_node *construct_store_func(dbg_info *db, ir_graph *irg, ir_node *bloc /** * Transforms a lowered Load into a "real" one. */ -static ir_node *gen_lowered_Load(ia32_transform_env_t *env, construct_load_func func, char fp_unit) { - ir_node *node = env->irn; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_mode *mode = get_ia32_ls_mode(node); - ir_node *new_op; - char *am_offs; - ia32_am_flavour_t am_flav = ia32_B; +static ir_node *gen_lowered_Load(ir_node *node, construct_load_func func) +{ + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *ptr = get_irn_n(node, 0); + ir_node *new_ptr = be_transform_node(ptr); + ir_node *mem = get_irn_n(node, 1); + ir_node *new_mem = be_transform_node(mem); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_ia32_ls_mode(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *new_op; + + new_op = func(dbgi, irg, block, new_ptr, noreg, new_mem); - /* - Could be that we have SSE2 unit, but due to 64Bit Div/Conv - lowering we have x87 nodes, so we need to enforce simulation. - */ - if (mode_is_float(mode)) { - FP_USED(env->cg); - if (fp_unit == fp_x87) - FORCE_x87(env->cg); + set_ia32_op_type(new_op, ia32_AddrModeS); + set_ia32_am_offs_int(new_op, get_ia32_am_offs_int(node)); + set_ia32_am_scale(new_op, get_ia32_am_scale(node)); + set_ia32_am_sc(new_op, get_ia32_am_sc(node)); + if (is_ia32_am_sc_sign(node)) + set_ia32_am_sc_sign(new_op); + set_ia32_ls_mode(new_op, mode); + if (is_ia32_use_frame(node)) { + set_ia32_frame_ent(new_op, get_ia32_frame_ent(node)); + set_ia32_use_frame(new_op); } - new_op = func(env->dbg, env->irg, env->block, get_irn_n(node, 0), noreg, get_irn_n(node, 1)); - am_offs = get_ia32_am_offs(node); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); - if (am_offs) { - am_flav |= ia32_O; - add_ia32_am_offs(new_op, am_offs); - } + return new_op; +} - set_ia32_am_support(new_op, ia32_am_Source); - set_ia32_op_type(new_op, ia32_AddrModeS); - set_ia32_am_flavour(new_op, am_flav); +/** + * Transforms a lowered Store into a "real" one. + */ +static ir_node *gen_lowered_Store(ir_node *node, construct_store_func func) +{ + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *ptr = get_irn_n(node, 0); + ir_node *new_ptr = be_transform_node(ptr); + ir_node *val = get_irn_n(node, 1); + ir_node *new_val = be_transform_node(val); + ir_node *mem = get_irn_n(node, 2); + ir_node *new_mem = be_transform_node(mem); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_mode *mode = get_ia32_ls_mode(node); + ir_node *new_op; + long am_offs; + + new_op = func(dbgi, irg, block, new_ptr, noreg, new_val, new_mem); + + am_offs = get_ia32_am_offs_int(node); + add_ia32_am_offs_int(new_op, am_offs); + + set_ia32_op_type(new_op, ia32_AddrModeD); set_ia32_ls_mode(new_op, mode); set_ia32_frame_ent(new_op, get_ia32_frame_ent(node)); set_ia32_use_frame(new_op); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); return new_op; } + +/** + * Transforms an ia32_l_XXX into a "real" XXX node + * + * @param node The node to transform + * @return the created ia32 XXX node + */ +#define GEN_LOWERED_OP(op) \ + static ir_node *gen_ia32_l_##op(ir_node *node) { \ + return gen_binop(node, get_binop_left(node), \ + get_binop_right(node), new_rd_ia32_##op,0); \ + } + +#define GEN_LOWERED_x87_OP(op) \ + static ir_node *gen_ia32_l_##op(ir_node *node) { \ + ir_node *new_op; \ + new_op = gen_binop_x87_float(node, get_binop_left(node), \ + get_binop_right(node), new_rd_ia32_##op); \ + return new_op; \ + } + +#define GEN_LOWERED_SHIFT_OP(l_op, op) \ + static ir_node *gen_ia32_##l_op(ir_node *node) { \ + return gen_shift_binop(node, get_irn_n(node, 0), \ + get_irn_n(node, 1), new_rd_ia32_##op); \ + } + +GEN_LOWERED_x87_OP(vfprem) +GEN_LOWERED_x87_OP(vfmul) +GEN_LOWERED_x87_OP(vfsub) +GEN_LOWERED_SHIFT_OP(l_ShlDep, Shl) +GEN_LOWERED_SHIFT_OP(l_ShrDep, Shr) +GEN_LOWERED_SHIFT_OP(l_Sar, Sar) +GEN_LOWERED_SHIFT_OP(l_SarDep, Sar) + +static ir_node *gen_ia32_l_Add(ir_node *node) { + ir_node *left = get_irn_n(node, n_ia32_l_Add_left); + ir_node *right = get_irn_n(node, n_ia32_l_Add_right); + ir_node *lowered = gen_binop(node, left, right, new_rd_ia32_Add, 1); + + if(is_Proj(lowered)) { + lowered = get_Proj_pred(lowered); + } else { + assert(is_ia32_Add(lowered)); + set_irn_mode(lowered, mode_T); + } + + return lowered; +} + +static ir_node *gen_ia32_l_Adc(ir_node *node) { + ir_node *src_block = get_nodes_block(node); + ir_node *block = be_transform_node(src_block); + ir_node *op1 = get_irn_n(node, n_ia32_l_Adc_left); + ir_node *op2 = get_irn_n(node, n_ia32_l_Adc_right); + ir_node *flags = get_irn_n(node, n_ia32_l_Adc_eflags); + ir_node *new_flags = be_transform_node(flags); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *new_node; + ia32_address_mode_t am; + ia32_address_t *addr = &am.addr; + + match_arguments(&am, src_block, op1, op2, match_commutative); + + new_node = new_rd_ia32_Adc(dbgi, irg, block, addr->base, addr->index, + addr->mem, am.new_op1, am.new_op2, new_flags); + set_am_attributes(new_node, &am); + /* we can't use source address mode anymore when using immediates */ + if(is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2)) + set_ia32_am_support(new_node, ia32_am_None, ia32_am_arity_none); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); + + new_node = fix_mem_proj(new_node, &am); + + return new_node; +} + +/** + * Transforms an ia32_l_Neg into a "real" ia32_Neg node + * + * @param node The node to transform + * @return the created ia32 Neg node + */ +static ir_node *gen_ia32_l_Neg(ir_node *node) { + return gen_unop(node, get_unop_op(node), new_rd_ia32_Neg); +} + +/** + * Transforms an ia32_l_vfild into a "real" ia32_vfild node + * + * @param node The node to transform + * @return the created ia32 vfild node + */ +static ir_node *gen_ia32_l_vfild(ir_node *node) { + return gen_lowered_Load(node, new_rd_ia32_vfild); +} + /** -* Transforms a lowered Store into a "real" one. -*/ -static ir_node *gen_lowered_Store(ia32_transform_env_t *env, construct_store_func func, char fp_unit) { - ir_node *node = env->irn; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_mode *mode = get_ia32_ls_mode(node); - ir_node *new_op; - char *am_offs; - ia32_am_flavour_t am_flav = ia32_B; - - /* - Could be that we have SSE2 unit, but due to 64Bit Div/Conv - lowering we have x87 nodes, so we need to enforce simulation. - */ - if (mode_is_float(mode)) { - FP_USED(env->cg); - if (fp_unit == fp_x87) - FORCE_x87(env->cg); - } + * Transforms an ia32_l_Load into a "real" ia32_Load node + * + * @param node The node to transform + * @return the created ia32 Load node + */ +static ir_node *gen_ia32_l_Load(ir_node *node) { + return gen_lowered_Load(node, new_rd_ia32_Load); +} - new_op = func(env->dbg, env->irg, env->block, get_irn_n(node, 0), noreg, get_irn_n(node, 1), get_irn_n(node, 2)); +/** + * Transforms an ia32_l_Store into a "real" ia32_Store node + * + * @param node The node to transform + * @return the created ia32 Store node + */ +static ir_node *gen_ia32_l_Store(ir_node *node) { + return gen_lowered_Store(node, new_rd_ia32_Store); +} - if ((am_offs = get_ia32_am_offs(node)) != NULL) { - am_flav |= ia32_O; - add_ia32_am_offs(new_op, am_offs); - } +/** + * Transforms a l_vfist into a "real" vfist node. + * + * @param node The node to transform + * @return the created ia32 vfist node + */ +static ir_node *gen_ia32_l_vfist(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *ptr = get_irn_n(node, 0); + ir_node *new_ptr = be_transform_node(ptr); + ir_node *val = get_irn_n(node, 1); + ir_node *new_val = be_transform_node(val); + ir_node *mem = get_irn_n(node, 2); + ir_node *new_mem = be_transform_node(mem); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_mode *mode = get_ia32_ls_mode(node); + ir_node *trunc_mode = ia32_new_Fpu_truncate(env_cg); + ir_node *new_op; + long am_offs; + + new_op = new_rd_ia32_vfist(dbgi, irg, block, new_ptr, noreg, new_mem, + new_val, trunc_mode); + + am_offs = get_ia32_am_offs_int(node); + add_ia32_am_offs_int(new_op, am_offs); - set_ia32_am_support(new_op, ia32_am_Dest); set_ia32_op_type(new_op, ia32_AddrModeD); - set_ia32_am_flavour(new_op, am_flav); set_ia32_ls_mode(new_op, mode); set_ia32_frame_ent(new_op, get_ia32_frame_ent(node)); set_ia32_use_frame(new_op); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); return new_op; } - /** - * Transforms an ia32_l_XXX into a "real" XXX node + * Transforms a l_vfdiv into a "real" vfdiv node. * * @param env The transformation environment - * @return the created ia32 XXX node + * @return the created ia32 vfdiv node */ -#define GEN_LOWERED_OP(op) \ - static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \ - if (mode_is_float(env->mode)) \ - FP_USED(env->cg); \ - return gen_binop(env, get_binop_left(env->irn), get_binop_right(env->irn), new_rd_ia32_##op); \ - } - -#define GEN_LOWERED_x87_OP(op) \ - static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \ - ir_node *new_op; \ - FORCE_x87(env->cg); \ - new_op = gen_binop(env, get_binop_left(env->irn), get_binop_right(env->irn), new_rd_ia32_##op); \ - set_ia32_am_support(get_Proj_pred(new_op), ia32_am_None); \ - return new_op; \ - } - -#define GEN_LOWERED_UNOP(op) \ - static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \ - return gen_unop(env, get_unop_op(env->irn), new_rd_ia32_##op); \ - } - -#define GEN_LOWERED_SHIFT_OP(op) \ - static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \ - return gen_shift_binop(env, get_binop_left(env->irn), get_binop_right(env->irn), new_rd_ia32_##op); \ - } - -#define GEN_LOWERED_LOAD(op, fp_unit) \ - static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \ - return gen_lowered_Load(env, new_rd_ia32_##op, fp_unit); \ - } - -#define GEN_LOWERED_STORE(op, fp_unit) \ - static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \ - return gen_lowered_Store(env, new_rd_ia32_##op, fp_unit); \ +static ir_node *gen_ia32_l_vfdiv(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *left = get_binop_left(node); + ir_node *new_left = be_transform_node(left); + ir_node *right = get_binop_right(node); + ir_node *new_right = be_transform_node(right); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *fpcw = get_fpcw(); + ir_node *vfdiv; + + vfdiv = new_rd_ia32_vfdiv(dbgi, irg, block, noreg, noreg, new_NoMem(), + new_left, new_right, fpcw); + clear_ia32_commutative(vfdiv); + + SET_IA32_ORIG_NODE(vfdiv, ia32_get_old_node_name(env_cg, node)); + + return vfdiv; } -GEN_LOWERED_OP(AddC) -GEN_LOWERED_OP(Add) -GEN_LOWERED_OP(SubC) -GEN_LOWERED_OP(Sub) -GEN_LOWERED_OP(Mul) -GEN_LOWERED_OP(Eor) -GEN_LOWERED_x87_OP(vfdiv) -GEN_LOWERED_x87_OP(vfmul) -GEN_LOWERED_x87_OP(vfsub) +/** + * Transforms a l_MulS into a "real" MulS node. + * + * @param env The transformation environment + * @return the created ia32 Mul node + */ +static ir_node *gen_ia32_l_Mul(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *left = get_binop_left(node); + ir_node *new_left = be_transform_node(left); + ir_node *right = get_binop_right(node); + ir_node *new_right = be_transform_node(right); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + + /* l_Mul is already a mode_T node, so we create the Mul in the normal way */ + /* and then skip the result Proj, because all needed Projs are already there. */ + ir_node *muls = new_rd_ia32_Mul(dbgi, irg, block, noreg, noreg, new_NoMem(), + new_left, new_right); + clear_ia32_commutative(muls); -GEN_LOWERED_UNOP(Minus) + SET_IA32_ORIG_NODE(muls, ia32_get_old_node_name(env_cg, node)); -GEN_LOWERED_LOAD(vfild, fp_x87) -GEN_LOWERED_LOAD(Load, fp_none) -GEN_LOWERED_STORE(vfist, fp_x87) -GEN_LOWERED_STORE(Store, fp_none) + return muls; +} /** - * Transforms a l_MulS into a "real" MulS node. + * Transforms a l_IMulS into a "real" IMul1OPS node. * * @param env The transformation environment - * @return the created ia32 MulS node + * @return the created ia32 IMul1OP node */ -static ir_node *gen_ia32_l_MulS(ia32_transform_env_t *env) { - - /* l_MulS is already a mode_T node, so we create the MulS in the normal way */ +static ir_node *gen_ia32_l_IMul(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *left = get_binop_left(node); + ir_node *new_left = be_transform_node(left); + ir_node *right = get_binop_right(node); + ir_node *new_right = be_transform_node(right); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + + /* l_IMul is already a mode_T node, so we create the IMul1OP in the normal way */ /* and then skip the result Proj, because all needed Projs are already there. */ + ir_node *muls = new_rd_ia32_IMul1OP(dbgi, irg, block, noreg, noreg, + new_NoMem(), new_left, new_right); + clear_ia32_commutative(muls); - ir_node *new_op = gen_binop(env, get_binop_left(env->irn), get_binop_right(env->irn), new_rd_ia32_MulS); - ir_node *muls = get_Proj_pred(new_op); - - /* MulS cannot have AM for destination */ - if (get_ia32_am_support(muls) != ia32_am_None) - set_ia32_am_support(muls, ia32_am_Source); + SET_IA32_ORIG_NODE(muls, ia32_get_old_node_name(env_cg, node)); return muls; } -GEN_LOWERED_SHIFT_OP(Shl) -GEN_LOWERED_SHIFT_OP(Shr) -GEN_LOWERED_SHIFT_OP(Shrs) +static ir_node *gen_ia32_l_Sub(ir_node *node) { + ir_node *left = get_irn_n(node, n_ia32_l_Sub_left); + ir_node *right = get_irn_n(node, n_ia32_l_Sub_right); + ir_node *lowered = gen_binop(node, left, right, new_rd_ia32_Sub, 0); + + if(is_Proj(lowered)) { + lowered = get_Proj_pred(lowered); + } else { + assert(is_ia32_Sub(lowered)); + set_irn_mode(lowered, mode_T); + } + + return lowered; +} + +static ir_node *gen_ia32_l_Sbb(ir_node *node) { + ir_node *src_block = get_nodes_block(node); + ir_node *block = be_transform_node(src_block); + ir_node *op1 = get_irn_n(node, n_ia32_l_Sbb_left); + ir_node *op2 = get_irn_n(node, n_ia32_l_Sbb_right); + ir_node *flags = get_irn_n(node, n_ia32_l_Sbb_eflags); + ir_node *new_flags = be_transform_node(flags); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *new_node; + ia32_address_mode_t am; + ia32_address_t *addr = &am.addr; + + match_arguments(&am, src_block, op1, op2, match_commutative); + + new_node = new_rd_ia32_Sbb(dbgi, irg, block, addr->base, addr->index, + addr->mem, am.new_op1, am.new_op2, new_flags); + set_am_attributes(new_node, &am); + /* we can't use source address mode anymore when using immediates */ + if(is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2)) + set_ia32_am_support(new_node, ia32_am_None, ia32_am_arity_none); + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); + + new_node = fix_mem_proj(new_node, &am); + + return new_node; +} /** * Transforms a l_ShlD/l_ShrD into a ShlD/ShrD. Those nodes have 3 data inputs: @@ -2748,115 +3870,79 @@ GEN_LOWERED_SHIFT_OP(Shrs) * op3 - shift count * Only op3 can be an immediate. */ -static ir_node *gen_lowered_64bit_shifts(ia32_transform_env_t *env, ir_node *op1, ir_node *op2, ir_node *count) { - ir_node *new_op = NULL; - ir_mode *mode = env->mode; - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); - ir_node *imm_op; - tarval *tv; - DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) - - assert(! mode_is_float(mode) && "Shift/Rotate with float not supported"); - - /* Check if immediate optimization is on and */ - /* if it's an operation with immediate. */ - imm_op = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(NULL, count) : NULL; - - /* Limit imm_op within range imm8 */ - if (imm_op) { - tv = get_ia32_Immop_tarval(imm_op); - - if (tv) { - tv = tarval_mod(tv, new_tarval_from_long(32, mode_Iu)); - set_ia32_Immop_tarval(imm_op, tv); - } - else { - imm_op = NULL; - } - } - - /* integer operations */ - if (imm_op) { - /* This is ShiftD with const */ - DB((mod, LEVEL_1, "ShiftD with immediate ...")); - - if (is_ia32_l_ShlD(env->irn)) - new_op = new_rd_ia32_ShlD(dbg, irg, block, noreg, noreg, op1, op2, noreg, nomem); - else - new_op = new_rd_ia32_ShrD(dbg, irg, block, noreg, noreg, op1, op2, noreg, nomem); - set_ia32_Immop_attr(new_op, imm_op); - } - else { - /* This is a normal ShiftD */ - DB((mod, LEVEL_1, "ShiftD binop ...")); - if (is_ia32_l_ShlD(env->irn)) - new_op = new_rd_ia32_ShlD(dbg, irg, block, noreg, noreg, op1, op2, count, nomem); - else - new_op = new_rd_ia32_ShrD(dbg, irg, block, noreg, noreg, op1, op2, count, nomem); - } - - /* set AM support */ - set_ia32_am_support(new_op, ia32_am_Dest); - - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); +static ir_node *gen_lowered_64bit_shifts(ir_node *node, ir_node *op1, + ir_node *op2, ir_node *count) +{ + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *new_op = NULL; + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *new_op1 = be_transform_node(op1); + ir_node *new_op2 = be_transform_node(op2); + ir_node *new_count = create_immediate_or_transform(count, 'I'); + + /* TODO proper AM support */ + + if (is_ia32_l_ShlD(node)) + new_op = new_rd_ia32_ShlD(dbgi, irg, block, new_op1, new_op2, new_count); + else + new_op = new_rd_ia32_ShrD(dbgi, irg, block, new_op1, new_op2, new_count); - set_ia32_res_mode(new_op, mode); - set_ia32_emit_cl(new_op); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); - return new_rd_Proj(dbg, irg, block, new_op, mode, 0); + return new_op; } -static ir_node *gen_ia32_l_ShlD(ia32_transform_env_t *env) { - return gen_lowered_64bit_shifts(env, get_irn_n(env->irn, 0), get_irn_n(env->irn, 1), get_irn_n(env->irn, 2)); +static ir_node *gen_ia32_l_ShlD(ir_node *node) { + return gen_lowered_64bit_shifts(node, get_irn_n(node, 0), + get_irn_n(node, 1), get_irn_n(node, 2)); } -static ir_node *gen_ia32_l_ShrD(ia32_transform_env_t *env) { - return gen_lowered_64bit_shifts(env, get_irn_n(env->irn, 0), get_irn_n(env->irn, 1), get_irn_n(env->irn, 2)); +static ir_node *gen_ia32_l_ShrD(ir_node *node) { + return gen_lowered_64bit_shifts(node, get_irn_n(node, 0), + get_irn_n(node, 1), get_irn_n(node, 2)); } /** * In case SSE Unit is used, the node is transformed into a vfst + xLoad. */ -static ir_node *gen_ia32_l_X87toSSE(ia32_transform_env_t *env) { - ia32_code_gen_t *cg = env->cg; - ir_node *res = NULL; - ir_node *ptr = get_irn_n(env->irn, 0); - ir_node *val = get_irn_n(env->irn, 1); - ir_node *mem = get_irn_n(env->irn, 2); +static ir_node *gen_ia32_l_X87toSSE(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *val = get_irn_n(node, 1); + ir_node *new_val = be_transform_node(val); + ia32_code_gen_t *cg = env_cg; + ir_node *res = NULL; + ir_graph *irg = current_ir_graph; + dbg_info *dbgi; + ir_node *noreg, *new_ptr, *new_mem; + ir_node *ptr, *mem; if (USE_SSE2(cg)) { - ir_node *noreg = ia32_new_NoReg_gp(cg); - - /* Store x87 -> MEM */ - res = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, mem); - set_ia32_frame_ent(res, get_ia32_frame_ent(env->irn)); - set_ia32_use_frame(res); - set_ia32_ls_mode(res, get_ia32_ls_mode(env->irn)); - set_ia32_am_support(res, ia32_am_Dest); - set_ia32_am_flavour(res, ia32_B); - res = new_rd_Proj(env->dbg, env->irg, env->block, res, mode_M, pn_ia32_vfst_M); - - /* Load MEM -> SSE */ - res = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, res); - set_ia32_frame_ent(res, get_ia32_frame_ent(env->irn)); - set_ia32_use_frame(res); - set_ia32_ls_mode(res, get_ia32_ls_mode(env->irn)); - set_ia32_am_support(res, ia32_am_Source); - set_ia32_am_flavour(res, ia32_B); - res = new_rd_Proj(env->dbg, env->irg, env->block, res, get_ia32_ls_mode(env->irn), pn_ia32_xLoad_res); + return new_val; } - else { - /* SSE unit is not used -> skip this node. */ - int i; - edges_reroute(env->irn, val, env->irg); - for (i = get_irn_arity(env->irn) - 1; i >= 0; i--) - set_irn_n(env->irn, i, get_irg_bad(env->irg)); - } + mem = get_irn_n(node, 2); + new_mem = be_transform_node(mem); + ptr = get_irn_n(node, 0); + new_ptr = be_transform_node(ptr); + noreg = ia32_new_NoReg_gp(cg); + dbgi = get_irn_dbg_info(node); + + /* Store x87 -> MEM */ + res = new_rd_ia32_vfst(dbgi, irg, block, new_ptr, noreg, new_mem, new_val, + get_ia32_ls_mode(node)); + set_ia32_frame_ent(res, get_ia32_frame_ent(node)); + set_ia32_use_frame(res); + set_ia32_ls_mode(res, get_ia32_ls_mode(node)); + set_ia32_op_type(res, ia32_AddrModeD); + + /* Load MEM -> SSE */ + res = new_rd_ia32_xLoad(dbgi, irg, block, new_ptr, noreg, res, + get_ia32_ls_mode(node)); + set_ia32_frame_ent(res, get_ia32_frame_ent(node)); + set_ia32_use_frame(res); + set_ia32_op_type(res, ia32_AddrModeS); + res = new_rd_Proj(dbgi, irg, block, res, mode_xmm, pn_ia32_xLoad_res); return res; } @@ -2864,42 +3950,57 @@ static ir_node *gen_ia32_l_X87toSSE(ia32_transform_env_t *env) { /** * In case SSE Unit is used, the node is transformed into a xStore + vfld. */ -static ir_node *gen_ia32_l_SSEtoX87(ia32_transform_env_t *env) { - ia32_code_gen_t *cg = env->cg; - ir_node *res = NULL; - ir_node *ptr = get_irn_n(env->irn, 0); - ir_node *val = get_irn_n(env->irn, 1); - ir_node *mem = get_irn_n(env->irn, 2); - - if (USE_SSE2(cg)) { - ir_node *noreg = ia32_new_NoReg_gp(cg); +static ir_node *gen_ia32_l_SSEtoX87(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *val = get_irn_n(node, 1); + ir_node *new_val = be_transform_node(val); + ia32_code_gen_t *cg = env_cg; + ir_graph *irg = current_ir_graph; + ir_node *res = NULL; + ir_entity *fent = get_ia32_frame_ent(node); + ir_mode *lsmode = get_ia32_ls_mode(node); + int offs = 0; + ir_node *noreg, *new_ptr, *new_mem; + ir_node *ptr, *mem; + dbg_info *dbgi; + + if (! USE_SSE2(cg)) { + /* SSE unit is not used -> skip this node. */ + return new_val; + } - /* Store SSE -> MEM */ - res = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, mem); - set_ia32_frame_ent(res, get_ia32_frame_ent(env->irn)); - set_ia32_use_frame(res); - set_ia32_ls_mode(res, get_ia32_ls_mode(env->irn)); - set_ia32_am_support(res, ia32_am_Dest); - set_ia32_am_flavour(res, ia32_B); - res = new_rd_Proj(env->dbg, env->irg, env->block, res, mode_M, pn_ia32_xStore_M); - - /* Load MEM -> x87 */ - res = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem); - set_ia32_frame_ent(res, get_ia32_frame_ent(env->irn)); + ptr = get_irn_n(node, 0); + new_ptr = be_transform_node(ptr); + mem = get_irn_n(node, 2); + new_mem = be_transform_node(mem); + noreg = ia32_new_NoReg_gp(cg); + dbgi = get_irn_dbg_info(node); + + /* Store SSE -> MEM */ + if (is_ia32_xLoad(skip_Proj(new_val))) { + ir_node *ld = skip_Proj(new_val); + + /* we can vfld the value directly into the fpu */ + fent = get_ia32_frame_ent(ld); + ptr = get_irn_n(ld, 0); + offs = get_ia32_am_offs_int(ld); + } else { + res = new_rd_ia32_xStore(dbgi, irg, block, new_ptr, noreg, new_mem, + new_val); + set_ia32_frame_ent(res, fent); set_ia32_use_frame(res); - set_ia32_ls_mode(res, get_ia32_ls_mode(env->irn)); - set_ia32_am_support(res, ia32_am_Source); - set_ia32_am_flavour(res, ia32_B); - res = new_rd_Proj(env->dbg, env->irg, env->block, res, get_ia32_ls_mode(env->irn), pn_ia32_vfld_res); + set_ia32_ls_mode(res, lsmode); + set_ia32_op_type(res, ia32_AddrModeD); + mem = res; } - else { - /* SSE unit is not used -> skip this node. */ - int i; - edges_reroute(env->irn, val, env->irg); - for (i = get_irn_arity(env->irn) - 1; i >= 0; i--) - set_irn_n(env->irn, i, get_irg_bad(env->irg)); - } + /* Load MEM -> x87 */ + res = new_rd_ia32_vfld(dbgi, irg, block, new_ptr, noreg, new_mem, lsmode); + set_ia32_frame_ent(res, fent); + set_ia32_use_frame(res); + add_ia32_am_offs_int(res, offs); + set_ia32_op_type(res, ia32_AddrModeS); + res = new_rd_Proj(dbgi, irg, block, res, mode_vfp, pn_ia32_vfld_res); return res; } @@ -2917,24 +4018,496 @@ static ir_node *gen_ia32_l_SSEtoX87(ia32_transform_env_t *env) { /** * the BAD transformer. */ -static ir_node *bad_transform(ia32_transform_env_t *env) { - ir_fprintf(stderr, "Not implemented: %+F\n", env->irn); - assert(0); +static ir_node *bad_transform(ir_node *node) { + panic("No transform function for %+F available.\n", node); return NULL; } +/** + * Transform the Projs of an AddSP. + */ +static ir_node *gen_Proj_be_AddSP(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = be_transform_node(pred); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + long proj = get_Proj_proj(node); + + if (proj == pn_be_AddSP_sp) { + ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, + pn_ia32_SubSP_stack); + arch_set_irn_register(env_cg->arch_env, res, &ia32_gp_regs[REG_ESP]); + return res; + } else if(proj == pn_be_AddSP_res) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, + pn_ia32_SubSP_addr); + } else if (proj == pn_be_AddSP_M) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_SubSP_M); + } + + assert(0); + return new_rd_Unknown(irg, get_irn_mode(node)); +} + +/** + * Transform the Projs of a SubSP. + */ +static ir_node *gen_Proj_be_SubSP(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = be_transform_node(pred); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + long proj = get_Proj_proj(node); + + if (proj == pn_be_SubSP_sp) { + ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, + pn_ia32_AddSP_stack); + arch_set_irn_register(env_cg->arch_env, res, &ia32_gp_regs[REG_ESP]); + return res; + } else if (proj == pn_be_SubSP_M) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_AddSP_M); + } + + assert(0); + return new_rd_Unknown(irg, get_irn_mode(node)); +} + +/** + * Transform and renumber the Projs from a Load. + */ +static ir_node *gen_Proj_Load(ir_node *node) { + ir_node *new_pred; + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + long proj = get_Proj_proj(node); + + + /* loads might be part of source address mode matches, so we don't + transform the ProjMs yet (with the exception of loads whose result is + not used) + */ + if (is_Load(pred) && proj == pn_Load_M && get_irn_n_edges(pred) > 1) { + ir_node *res; + + assert(pn_ia32_Load_M == 1); /* convention: mem-result of Source-AM + nodes is 1 */ + /* this is needed, because sometimes we have loops that are only + reachable through the ProjM */ + be_enqueue_preds(node); + /* do it in 2 steps, to silence firm verifier */ + res = new_rd_Proj(dbgi, irg, block, pred, mode_M, pn_Load_M); + set_Proj_proj(res, pn_ia32_Load_M); + return res; + } + + /* renumber the proj */ + new_pred = be_transform_node(pred); + if (is_ia32_Load(new_pred)) { + if (proj == pn_Load_res) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, + pn_ia32_Load_res); + } else if (proj == pn_Load_M) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, + pn_ia32_Load_M); + } + } else if(is_ia32_Conv_I2I(new_pred)) { + set_irn_mode(new_pred, mode_T); + if (proj == pn_Load_res) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_res); + } else if (proj == pn_Load_M) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_mem); + } + } else if (is_ia32_xLoad(new_pred)) { + if (proj == pn_Load_res) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_xmm, + pn_ia32_xLoad_res); + } else if (proj == pn_Load_M) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, + pn_ia32_xLoad_M); + } + } else if (is_ia32_vfld(new_pred)) { + if (proj == pn_Load_res) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_vfp, + pn_ia32_vfld_res); + } else if (proj == pn_Load_M) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, + pn_ia32_vfld_M); + } + } else { + /* can happen for ProJMs when source address mode happened for the + node */ + + /* however it should not be the result proj, as that would mean the + load had multiple users and should not have been used for + SourceAM */ + if(proj != pn_Load_M) { + panic("internal error: transformed node not a Load"); + } + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, 1); + } + + assert(0); + return new_rd_Unknown(irg, get_irn_mode(node)); +} + +/** + * Transform and renumber the Projs from a DivMod like instruction. + */ +static ir_node *gen_Proj_DivMod(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = be_transform_node(pred); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + long proj = get_Proj_proj(node); + + assert(is_ia32_Div(new_pred) || is_ia32_IDiv(new_pred)); + + switch (get_irn_opcode(pred)) { + case iro_Div: + switch (proj) { + case pn_Div_M: + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M); + case pn_Div_res: + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res); + default: + break; + } + break; + case iro_Mod: + switch (proj) { + case pn_Mod_M: + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M); + case pn_Mod_res: + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res); + default: + break; + } + break; + case iro_DivMod: + switch (proj) { + case pn_DivMod_M: + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M); + case pn_DivMod_res_div: + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res); + case pn_DivMod_res_mod: + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res); + default: + break; + } + break; + default: + break; + } + + assert(0); + return new_rd_Unknown(irg, mode); +} + +/** + * Transform and renumber the Projs from a CopyB. + */ +static ir_node *gen_Proj_CopyB(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = be_transform_node(pred); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + long proj = get_Proj_proj(node); + + switch(proj) { + case pn_CopyB_M_regular: + if (is_ia32_CopyB_i(new_pred)) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_CopyB_i_M); + } else if (is_ia32_CopyB(new_pred)) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_CopyB_M); + } + break; + default: + break; + } + + assert(0); + return new_rd_Unknown(irg, mode); +} + +/** + * Transform and renumber the Projs from a vfdiv. + */ +static ir_node *gen_Proj_l_vfdiv(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = be_transform_node(pred); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + long proj = get_Proj_proj(node); + + switch (proj) { + case pn_ia32_l_vfdiv_M: + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_vfdiv_M); + case pn_ia32_l_vfdiv_res: + return new_rd_Proj(dbgi, irg, block, new_pred, mode_vfp, pn_ia32_vfdiv_res); + default: + assert(0); + } + + return new_rd_Unknown(irg, mode); +} + +/** + * Transform and renumber the Projs from a Quot. + */ +static ir_node *gen_Proj_Quot(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = be_transform_node(pred); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + long proj = get_Proj_proj(node); + + switch(proj) { + case pn_Quot_M: + if (is_ia32_xDiv(new_pred)) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_xDiv_M); + } else if (is_ia32_vfdiv(new_pred)) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_vfdiv_M); + } + break; + case pn_Quot_res: + if (is_ia32_xDiv(new_pred)) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_xmm, pn_ia32_xDiv_res); + } else if (is_ia32_vfdiv(new_pred)) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_vfp, pn_ia32_vfdiv_res); + } + break; + default: + break; + } + + assert(0); + return new_rd_Unknown(irg, mode); +} + +/** + * Transform the Thread Local Storage Proj. + */ +static ir_node *gen_Proj_tls(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = NULL; + ir_node *res = new_rd_ia32_LdTls(dbgi, irg, block, mode_Iu); + + return res; +} + +static ir_node *gen_be_Call(ir_node *node) { + ir_node *res = be_duplicate_node(node); + be_node_add_flags(res, -1, arch_irn_flags_modify_flags); + + return res; +} + +static ir_node *gen_be_IncSP(ir_node *node) { + ir_node *res = be_duplicate_node(node); + be_node_add_flags(res, -1, arch_irn_flags_modify_flags); + + return res; +} + +/** + * Transform the Projs from a be_Call. + */ +static ir_node *gen_Proj_be_Call(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *call = get_Proj_pred(node); + ir_node *new_call = be_transform_node(call); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_type *method_type = be_Call_get_type(call); + int n_res = get_method_n_ress(method_type); + long proj = get_Proj_proj(node); + ir_mode *mode = get_irn_mode(node); + ir_node *sse_load; + const arch_register_class_t *cls; + + /* The following is kinda tricky: If we're using SSE, then we have to + * move the result value of the call in floating point registers to an + * xmm register, we therefore construct a GetST0 -> xLoad sequence + * after the call, we have to make sure to correctly make the + * MemProj and the result Proj use these 2 nodes + */ + if (proj == pn_be_Call_M_regular) { + // get new node for result, are we doing the sse load/store hack? + ir_node *call_res = be_get_Proj_for_pn(call, pn_be_Call_first_res); + ir_node *call_res_new; + ir_node *call_res_pred = NULL; + + if (call_res != NULL) { + call_res_new = be_transform_node(call_res); + call_res_pred = get_Proj_pred(call_res_new); + } + + if (call_res_pred == NULL || be_is_Call(call_res_pred)) { + return new_rd_Proj(dbgi, irg, block, new_call, mode_M, + pn_be_Call_M_regular); + } else { + assert(is_ia32_xLoad(call_res_pred)); + return new_rd_Proj(dbgi, irg, block, call_res_pred, mode_M, + pn_ia32_xLoad_M); + } + } + if (USE_SSE2(env_cg) && proj >= pn_be_Call_first_res + && proj < (pn_be_Call_first_res + n_res) && mode_is_float(mode) + && USE_SSE2(env_cg)) { + ir_node *fstp; + ir_node *frame = get_irg_frame(irg); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + //ir_node *p; + ir_node *call_mem = be_get_Proj_for_pn(call, pn_be_Call_M_regular); + ir_node *call_res; + + /* in case there is no memory output: create one to serialize the copy + FPU -> SSE */ + call_mem = new_rd_Proj(dbgi, irg, block, new_call, mode_M, + pn_be_Call_M_regular); + call_res = new_rd_Proj(dbgi, irg, block, new_call, mode, + pn_be_Call_first_res); + + /* store st(0) onto stack */ + fstp = new_rd_ia32_vfst(dbgi, irg, block, frame, noreg, call_mem, + call_res, mode); + set_ia32_op_type(fstp, ia32_AddrModeD); + set_ia32_use_frame(fstp); + + /* load into SSE register */ + sse_load = new_rd_ia32_xLoad(dbgi, irg, block, frame, noreg, fstp, + mode); + set_ia32_op_type(sse_load, ia32_AddrModeS); + set_ia32_use_frame(sse_load); + + sse_load = new_rd_Proj(dbgi, irg, block, sse_load, mode_xmm, + pn_ia32_xLoad_res); + + return sse_load; + } + + /* transform call modes */ + if (mode_is_data(mode)) { + cls = arch_get_irn_reg_class(env_cg->arch_env, node, -1); + mode = cls->mode; + } + + return new_rd_Proj(dbgi, irg, block, new_call, mode, proj); +} + +/** + * Transform the Projs from a Cmp. + */ +static ir_node *gen_Proj_Cmp(ir_node *node) +{ + /* normally Cmps are processed when looking at Cond nodes, but this case + * can happen in complicated Psi conditions */ + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + ir_node *cmp = get_Proj_pred(node); + ir_node *new_cmp = be_transform_node(cmp); + long pnc = get_Proj_proj(node); + ir_node *res; + + res = create_set_32bit(dbgi, new_block, new_cmp, pnc, node); + + return res; +} + +/** + * Transform and potentially renumber Proj nodes. + */ +static ir_node *gen_Proj(ir_node *node) { + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *pred = get_Proj_pred(node); + long proj = get_Proj_proj(node); + + if (is_Store(pred)) { + if (proj == pn_Store_M) { + return be_transform_node(pred); + } else { + assert(0); + return new_r_Bad(irg); + } + } else if (is_Load(pred)) { + return gen_Proj_Load(node); + } else if (is_Div(pred) || is_Mod(pred) || is_DivMod(pred)) { + return gen_Proj_DivMod(node); + } else if (is_CopyB(pred)) { + return gen_Proj_CopyB(node); + } else if (is_Quot(pred)) { + return gen_Proj_Quot(node); + } else if (is_ia32_l_vfdiv(pred)) { + return gen_Proj_l_vfdiv(node); + } else if (be_is_SubSP(pred)) { + return gen_Proj_be_SubSP(node); + } else if (be_is_AddSP(pred)) { + return gen_Proj_be_AddSP(node); + } else if (be_is_Call(pred)) { + return gen_Proj_be_Call(node); + } else if (is_Cmp(pred)) { + return gen_Proj_Cmp(node); + } else if (get_irn_op(pred) == op_Start) { + if (proj == pn_Start_X_initial_exec) { + ir_node *block = get_nodes_block(pred); + ir_node *jump; + + /* we exchange the ProjX with a jump */ + block = be_transform_node(block); + jump = new_rd_Jmp(dbgi, irg, block); + return jump; + } + if (node == be_get_old_anchor(anchor_tls)) { + return gen_Proj_tls(node); + } +#ifdef FIRM_EXT_GRS + } else if(!is_ia32_irn(pred)) { // Quick hack for SIMD optimization +#else + } else { +#endif + ir_node *new_pred = be_transform_node(pred); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_mode *mode = get_irn_mode(node); + if (mode_needs_gp_reg(mode)) { + ir_node *new_proj = new_r_Proj(irg, block, new_pred, mode_Iu, + get_Proj_proj(node)); +#ifdef DEBUG_libfirm + new_proj->node_nr = node->node_nr; +#endif + return new_proj; + } + } + + return be_duplicate_node(node); +} + /** * Enters all transform functions into the generic pointer */ -void ia32_register_transformers(void) { - ir_op *op_Max, *op_Min, *op_Mulh; +static void register_transformers(void) +{ + ir_op *op_Mulh; /* first clear the generic function pointer for all ops */ clear_irp_opcodes_generic_func(); -#define GEN(a) op_##a->ops.generic = (op_func)gen_##a +#define GEN(a) { be_transform_func *func = gen_##a; op_##a->ops.generic = (op_func) func; } #define BAD(a) op_##a->ops.generic = (op_func)bad_transform -#define IGN(a) GEN(Add); GEN(Sub); @@ -2963,25 +4536,31 @@ void ia32_register_transformers(void) { GEN(Store); GEN(Cond); + GEN(Cmp); + GEN(ASM); GEN(CopyB); - GEN(Mux); + BAD(Mux); GEN(Psi); + GEN(Proj); + GEN(Phi); + GEN(IJmp); /* transform ops from intrinsic lowering */ GEN(ia32_l_Add); - GEN(ia32_l_AddC); - GEN(ia32_l_Sub); - GEN(ia32_l_SubC); - GEN(ia32_l_Minus); + GEN(ia32_l_Adc); + GEN(ia32_l_Neg); GEN(ia32_l_Mul); - GEN(ia32_l_Eor); - GEN(ia32_l_MulS); - GEN(ia32_l_Shl); - GEN(ia32_l_Shr); - GEN(ia32_l_Shrs); + GEN(ia32_l_IMul); + GEN(ia32_l_ShlDep); + GEN(ia32_l_ShrDep); + GEN(ia32_l_Sar); + GEN(ia32_l_SarDep); GEN(ia32_l_ShlD); GEN(ia32_l_ShrD); + GEN(ia32_l_Sub); + GEN(ia32_l_Sbb); GEN(ia32_l_vfdiv); + GEN(ia32_l_vfprem); GEN(ia32_l_vfmul); GEN(ia32_l_vfsub); GEN(ia32_l_vfild); @@ -2991,23 +4570,9 @@ void ia32_register_transformers(void) { GEN(ia32_l_X87toSSE); GEN(ia32_l_SSEtoX87); - IGN(Call); - IGN(Alloc); - - IGN(Proj); - IGN(Block); - IGN(Start); - IGN(End); - IGN(NoMem); - IGN(Phi); - IGN(IJmp); - IGN(Break); - IGN(Cmp); - - /* constant transformation happens earlier */ - IGN(Const); - IGN(SymConst); - IGN(Sync); + GEN(Const); + GEN(SymConst); + GEN(Unknown); /* we should never see these nodes */ BAD(Raise); @@ -3017,7 +4582,7 @@ void ia32_register_transformers(void) { BAD(Free); BAD(Tuple); BAD(Id); - BAD(Bad); + //BAD(Bad); BAD(Confirm); BAD(Filter); BAD(CallBegin); @@ -3027,219 +4592,132 @@ void ia32_register_transformers(void) { /* handle generic backend nodes */ GEN(be_FrameAddr); GEN(be_Call); + GEN(be_IncSP); GEN(be_Return); - GEN(be_FrameLoad); - GEN(be_FrameStore); - GEN(be_StackParam); GEN(be_AddSP); GEN(be_SubSP); + GEN(be_Copy); - /* set the register for all Unknown nodes */ - GEN(Unknown); - - op_Max = get_op_Max(); - if (op_Max) - GEN(Max); - op_Min = get_op_Min(); - if (op_Min) - GEN(Min); op_Mulh = get_op_Mulh(); if (op_Mulh) GEN(Mulh); #undef GEN #undef BAD -#undef IGN } -typedef ir_node *(transform_func)(ia32_transform_env_t *env); - /** - * Transforms the given firm node (and maybe some other related nodes) - * into one or more assembler nodes. - * - * @param node the firm node - * @param env the debug module + * Pre-transform all unknown and noreg nodes. */ -void ia32_transform_node(ir_node *node, void *env) { - ia32_code_gen_t *cg = (ia32_code_gen_t *)env; - ir_op *op = get_irn_op(node); - ir_node *asm_node = NULL; - int i; - - if (is_Block(node)) - return; - - /* link arguments pointing to Unknown to the UNKNOWN Proj */ - for (i = get_irn_arity(node) - 1; i >= 0; i--) { - if (is_Unknown(get_irn_n(node, i))) - set_irn_n(node, i, be_get_unknown_for_mode(cg, get_irn_mode(get_irn_n(node, i)))); - } - - DBG((cg->mod, LEVEL_1, "check %+F ... ", node)); - if (op->ops.generic) { - ia32_transform_env_t tenv; - transform_func *transform = (transform_func *)op->ops.generic; - - tenv.block = get_nodes_block(node); - tenv.dbg = get_irn_dbg_info(node); - tenv.irg = current_ir_graph; - tenv.irn = node; - tenv.mode = get_irn_mode(node); - tenv.cg = cg; - DEBUG_ONLY(tenv.mod = cg->mod;) - - asm_node = (*transform)(&tenv); - } - - /* exchange nodes if a new one was generated */ - if (asm_node) { - exchange(node, asm_node); - DB((cg->mod, LEVEL_1, "created node %+F[%p]\n", asm_node, asm_node)); - } - else { - DB((cg->mod, LEVEL_1, "ignored\n")); - } +static void ia32_pretransform_node(void *arch_cg) { + ia32_code_gen_t *cg = arch_cg; + + cg->unknown_gp = be_pre_transform_node(cg->unknown_gp); + cg->unknown_vfp = be_pre_transform_node(cg->unknown_vfp); + cg->unknown_xmm = be_pre_transform_node(cg->unknown_xmm); + cg->noreg_gp = be_pre_transform_node(cg->noreg_gp); + cg->noreg_vfp = be_pre_transform_node(cg->noreg_vfp); + cg->noreg_xmm = be_pre_transform_node(cg->noreg_xmm); + get_fpcw(); } /** - * Transforms a psi condition. + * Walker, checks if all ia32 nodes producing more than one result have + * its Projs, other wise creates new projs and keep them using a be_Keep node. */ -static void transform_psi_cond(ir_node *cond, ir_mode *mode, ia32_code_gen_t *cg) { - int i; +static void add_missing_keep_walker(ir_node *node, void *data) +{ + int n_outs, i; + unsigned found_projs = 0; + const ir_edge_t *edge; + ir_mode *mode = get_irn_mode(node); + ir_node *last_keep; + (void) data; + if(mode != mode_T) + return; + if(!is_ia32_irn(node)) + return; - /* if the mode is target mode, we have already seen this part of the tree */ - if (get_irn_mode(cond) == mode) + n_outs = get_ia32_n_res(node); + if(n_outs <= 0) + return; + if(is_ia32_SwitchJmp(node)) return; - assert(get_irn_mode(cond) == mode_b && "logical operator for condition must be mode_b"); - - set_irn_mode(cond, mode); - - for (i = get_irn_arity(cond) - 1; i >= 0; i--) { - ir_node *in = get_irn_n(cond, i); - - /* if in is a compare: transform into Set/xCmp */ - if (is_Proj(in)) { - ir_node *new_op = NULL; - ir_node *cmp = get_Proj_pred(in); - ir_node *cmp_a = get_Cmp_left(cmp); - ir_node *cmp_b = get_Cmp_right(cmp); - dbg_info *dbg = get_irn_dbg_info(cmp); - ir_graph *irg = get_irn_irg(cmp); - ir_node *block = get_nodes_block(cmp); - ir_node *noreg = ia32_new_NoReg_gp(cg); - ir_node *nomem = new_rd_NoMem(irg); - int pnc = get_Proj_proj(in); - - /* this is a compare */ - if (mode_is_float(mode)) { - /* Psi is float, we need a floating point compare */ - - if (USE_SSE2(cg)) { - ir_mode *m = get_irn_mode(cmp_a); - /* SSE FPU */ - if (! mode_is_float(m)) { - cmp_a = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_a, cmp_a, mode); - cmp_b = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_b, cmp_b, mode); - } - else if (m == mode_F) { - /* we convert cmp values always to double, to get correct bitmask with cmpsd */ - cmp_a = gen_sse_conv_f2d(cg, dbg, irg, block, cmp_a, cmp_a); - cmp_b = gen_sse_conv_f2d(cg, dbg, irg, block, cmp_b, cmp_b); - } + assert(n_outs < (int) sizeof(unsigned) * 8); + foreach_out_edge(node, edge) { + ir_node *proj = get_edge_src_irn(edge); + int pn = get_Proj_proj(proj); - new_op = new_rd_ia32_xCmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); - set_ia32_pncode(new_op, pnc); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, cmp)); - } - else { - /* x87 FPU */ - assert(0); - } - } - else { - /* integer Psi */ - ia32_transform_env_t tenv; - construct_binop_func *set_func = NULL; - - if (mode_is_float(get_irn_mode(cmp_a))) { - /* 1st case: compare operands are floats */ - FP_USED(cg); - - if (USE_SSE2(cg)) { - /* SSE FPU */ - set_func = new_rd_ia32_xCmpSet; - } - else { - /* x87 FPU */ - set_func = new_rd_ia32_vfCmpSet; - } + assert(get_irn_mode(proj) == mode_M || pn < n_outs); + found_projs |= 1 << pn; + } - pnc &= 7; /* fp compare -> int compare */ - } - else { - /* 2nd case: compare operand are integer too */ - set_func = new_rd_ia32_CmpSet; - } - tenv.block = block; - tenv.cg = cg; - tenv.dbg = dbg; - tenv.irg = irg; - tenv.irn = cmp; - tenv.mode = mode; - tenv.mod = cg->mod; - - new_op = gen_binop(&tenv, cmp_a, cmp_b, set_func); - set_ia32_pncode(get_Proj_pred(new_op), pnc); - set_ia32_am_support(get_Proj_pred(new_op), ia32_am_Source); - } + /* are keeps missing? */ + last_keep = NULL; + for(i = 0; i < n_outs; ++i) { + ir_node *block; + ir_node *in[1]; + const arch_register_req_t *req; + const arch_register_class_t *class; - /* the the new compare as in */ - set_irn_n(cond, i, new_op); + if(found_projs & (1 << i)) { + continue; } - else { - /* another complex condition */ - transform_psi_cond(in, mode, cg); + + req = get_ia32_out_req(node, i); + class = req->cls; + if(class == NULL) { + continue; + } + if(class == &ia32_reg_classes[CLASS_ia32_flags]) { + continue; + } + + block = get_nodes_block(node); + in[0] = new_r_Proj(current_ir_graph, block, node, + arch_register_class_mode(class), i); + if(last_keep != NULL) { + be_Keep_add_node(last_keep, class, in[0]); + } else { + last_keep = be_new_Keep(class, current_ir_graph, block, 1, in); + if(sched_is_scheduled(node)) { + sched_add_after(node, last_keep); + } } } } /** - * The Psi selector can be a tree of compares combined with "And"s and "Or"s. - * We create a Set node, respectively a xCmp in case the Psi is a float, for each - * compare, which causes the compare result to be stores in a register. The - * "And"s and "Or"s are transformed later, we just have to set their mode right. + * Adds missing keeps to nodes. Adds missing Proj nodes for unused outputs + * and keeps them. */ -void ia32_transform_psi_cond_tree(ir_node *node, void *env) { - ia32_code_gen_t *cg = env; - ir_node *psi_sel, *new_cmp, *block; - ir_graph *irg; - ir_mode *mode; - - /* check for Psi */ - if (get_irn_opcode(node) != iro_Psi) - return; - - psi_sel = get_Psi_cond(node, 0); - - /* if psi_cond is a cmp: do nothing, this case is covered by gen_Psi */ - if (is_Proj(psi_sel)) - return; +void ia32_add_missing_keeps(ia32_code_gen_t *cg) +{ + ir_graph *irg = be_get_birg_irg(cg->birg); + irg_walk_graph(irg, add_missing_keep_walker, NULL, NULL); +} - mode = get_irn_mode(node); +/* do the transformation */ +void ia32_transform_graph(ia32_code_gen_t *cg) { + ir_graph *irg = cg->irg; - transform_psi_cond(psi_sel, mode, cg); + register_transformers(); + env_cg = cg; + initial_fpcw = NULL; - irg = get_irn_irg(node); - block = get_nodes_block(node); + heights = heights_new(irg); + calculate_non_address_mode_nodes(irg); - /* we need to compare the evaluated condition tree with 0 */ + be_transform_graph(cg->birg, ia32_pretransform_node, cg); - /* BEWARE: new_r_Const_long works for floating point as well */ - new_cmp = new_r_Cmp(irg, block, psi_sel, new_r_Const_long(irg, block, mode, 0)); - new_cmp = new_r_Proj(irg, block, new_cmp, mode_b, mode_is_float(mode) ? pn_Cmp_Ne : pn_Cmp_Gt | pn_Cmp_Lt); + free_non_address_mode_nodes(); + heights_free(heights); + heights = NULL; +} - set_Psi_cond(node, 0, new_cmp); +void ia32_init_transform(void) +{ + FIRM_DBG_REGISTER(dbg, "firm.be.ia32.transform"); }