X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fbearch_ia32.c;h=4f02a245a94c70be08d79afb121ef822c05ed787;hb=03a8e4c11d91d07dcfe8fd0174a24d685e74a9d3;hp=dfe974f906daa2eadb57611da90a8475159e0f6a;hpb=1d8939e3335834eb5fa6366a58ae8c31ee1abe92;p=libfirm diff --git a/ir/be/ia32/bearch_ia32.c b/ir/be/ia32/bearch_ia32.c index dfe974f90..4f02a245a 100644 --- a/ir/be/ia32/bearch_ia32.c +++ b/ir/be/ia32/bearch_ia32.c @@ -1,6 +1,6 @@ /** * This is the main ia32 firm backend driver. - * + * @author Christian Wuerdig * $Id$ */ @@ -452,6 +452,127 @@ static ir_type *ia32_abi_get_between_type(void *self) return env->flags.try_omit_fp ? omit_fp_between_type : between_type; } +/** + * Returns the inverse operation if @p irn, recalculating the argument at position @p i. + * + * @param irn The original operation + * @param i Index of the argument we want the inverse operation to yield + * @param inverse struct to be filled with the resulting inverse op + * @param obstack The obstack to use for allocation of the returned nodes array + * @return The inverse operation or NULL if operation invertible + */ +static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) { + ir_graph *irg; + ir_mode *mode; + ir_node *block, *noreg, *nomem; + int pnc; + + /* we cannot invert non-ia32 irns */ + if (! is_ia32_irn(irn)) + return NULL; + + /* operand must always be a real operand (not base, index or mem) */ + if (i != 2 && i != 3) + return NULL; + + /* we don't invert address mode operations */ + if (get_ia32_op_type(irn) != ia32_Normal) + return NULL; + + irg = get_irn_irg(irn); + block = get_nodes_block(irn); + mode = get_ia32_res_mode(irn); + noreg = get_irn_n(irn, 0); + nomem = new_r_NoMem(irg); + + /* initialize structure */ + inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0])); + inverse->costs = 0; + inverse->n = 2; + + switch (get_ia32_irn_opcode(irn)) { + case iro_ia32_Add: + if (get_ia32_immop_type(irn) == ia32_ImmConst) { + /* we have an add with a const here */ + /* invers == add with negated const */ + inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem); + pnc = pn_ia32_Add_res; + inverse->costs += 1; + copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn); + set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn))); + set_ia32_commutative(inverse->nodes[0]); + } + else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) { + /* we have an add with a symconst here */ + /* invers == sub with const */ + inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem); + pnc = pn_ia32_Sub_res; + inverse->costs += 5; + copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn); + } + else { + /* normal add: inverse == sub */ + inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i ^ 1), nomem); + pnc = pn_ia32_Sub_res; + inverse->costs += 5; + } + break; + case iro_ia32_Sub: + if (get_ia32_immop_type(irn) != ia32_ImmNone) { + /* we have a sub with a const/symconst here */ + /* invers == add with this const */ + inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem); + pnc = pn_ia32_Add_res; + inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1; + copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn); + } + else { + /* normal sub */ + if (i == 2) { + inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, 3), nomem); + } + else { + inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), (ir_node *)irn, nomem); + } + pnc = pn_ia32_Sub_res; + inverse->costs += 1; + } + break; + case iro_ia32_Eor: + if (get_ia32_immop_type(irn) != ia32_ImmNone) { + /* xor with const: inverse = xor */ + inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem); + pnc = pn_ia32_Eor_res; + inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1; + copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn); + } + else { + /* normal xor */ + inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem); + pnc = pn_ia32_Eor_res; + inverse->costs += 1; + } + break; + case iro_ia32_Not: + inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), nomem); + pnc = pn_ia32_Not_res; + inverse->costs += 1; + break; + case iro_ia32_Minus: + inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), nomem); + pnc = pn_ia32_Minus_res; + inverse->costs += 1; + break; + default: + /* inverse operation not supported */ + return NULL; + } + + inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc); + + return inverse; +} + static const be_abi_callbacks_t ia32_abi_callbacks = { ia32_abi_init, free, @@ -470,7 +591,8 @@ static const arch_irn_ops_if_t ia32_irn_ops_if = { ia32_classify, ia32_get_flags, ia32_get_frame_entity, - ia32_set_stack_bias + ia32_set_stack_bias, + ia32_get_inverse }; ia32_irn_ops_t ia32_irn_ops = { @@ -497,19 +619,36 @@ ia32_irn_ops_t ia32_irn_ops = { */ static void ia32_prepare_graph(void *self) { ia32_code_gen_t *cg = self; + dom_front_info_t *dom; DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;) FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform"); + + /* 1st: transform constants and psi condition trees */ + irg_walk_blkwise_graph(cg->irg, ia32_place_consts_set_modes, ia32_transform_psi_cond_tree, cg); + + /* 2nd: transform all remaining nodes */ ia32_register_transformers(); - irg_walk_blkwise_graph(cg->irg, ia32_place_consts_set_modes, ia32_transform_node, cg); + dom = be_compute_dominance_frontiers(cg->irg); + irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg); + be_free_dominance_frontiers(dom); be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched); + /* 3rd: optimize address mode */ FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am"); ia32_optimize_addressmode(cg); be_dump(cg->irg, "-am", dump_ir_block_graph_sched); DEBUG_ONLY(cg->mod = old_mod;) } +static INLINE int need_constraint_copy(ir_node *irn) { + return \ + ! is_ia32_Lea(irn) && \ + ! is_ia32_Conv_I2I(irn) && \ + ! is_ia32_Conv_I2I8Bit(irn) && \ + ! is_ia32_CmpCMov(irn) && \ + ! is_ia32_CmpSet(irn); +} /** * Insert copies for all ia32 nodes where the should_be_same requirement @@ -535,8 +674,7 @@ static void ia32_finish_node(ir_node *irn, void *env) { block = get_nodes_block(irn); /* check all OUT requirements, if there is a should_be_same */ - if ((op_tp == ia32_Normal || op_tp == ia32_AddrModeS) && - ! is_ia32_Lea(irn) && ! is_ia32_Conv_I2I(irn) && ! is_ia32_Conv_I2I8Bit(irn)) + if ((op_tp == ia32_Normal || op_tp == ia32_AddrModeS) && need_constraint_copy(irn)) { for (i = 0; i < n_res; i++) { if (arch_register_req_is(&(reqs[i]->req), should_be_same)) { @@ -587,11 +725,14 @@ insert_copy: } } - /* If we have a CondJmp with immediate, we need to */ + /* If we have a CondJmp/CmpSet/xCmpSet with immediate, we need to */ /* check if it's the right operand, otherwise we have */ /* to change it, as CMP doesn't support immediate as */ /* left operands. */ - if (is_ia32_CondJmp(irn) && (is_ia32_ImmConst(irn) || is_ia32_ImmSymConst(irn)) && op_tp == ia32_AddrModeS) { + if ((is_ia32_CondJmp(irn) || is_ia32_CmpSet(irn) || is_ia32_xCmpSet(irn)) && + (is_ia32_ImmConst(irn) || is_ia32_ImmSymConst(irn)) && + op_tp == ia32_AddrModeS) + { set_ia32_op_type(irn, ia32_AddrModeD); set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn))); } @@ -600,7 +741,7 @@ insert_copy: ia32_transform_sub_to_neg_add(irn, cg); /* transform a LEA into an Add if possible */ - //ia32_transform_lea_to_add(irn, cg); + ia32_transform_lea_to_add(irn, cg); } end: @@ -997,6 +1138,8 @@ static void *ia32_init(FILE *file_handle) { isa->name_obst_size = 0; #endif /* NDEBUG */ + ia32_handle_intrinsics(); + ia32_switch_section(NULL, NO_SECTION); fprintf(isa->out, "\t.intel_syntax\n"); inited = 1; @@ -1210,6 +1353,30 @@ static int ia32_get_reg_class_alignment(const void *self, const arch_register_cl return bytes; } +/** + * Returns the libFirm configuration parameter for this backend. + */ +static const backend_params *ia32_get_libfirm_params(void) { + static const arch_dep_params_t ad = { + 1, /* also use subs */ + 4, /* maximum shifts */ + 31, /* maximum shift amount */ + + 1, /* allow Mulhs */ + 1, /* allow Mulus */ + 32 /* Mulh allowed up to 32 bit */ + }; + static backend_params p = { + NULL, /* no additional opcodes */ + NULL, /* will be set later */ + 1, /* need dword lowering */ + ia32_create_intrinsic_fkt, + NULL, /* context for ia32_create_intrinsic_fkt */ + }; + + p.dep_param = &ad; + return &p; +} #ifdef WITH_LIBCORE /* instruction set architectures. */ @@ -1262,7 +1429,7 @@ static const lc_opt_enum_int_items_t gas_items[] = { }; static lc_opt_enum_int_var_t gas_var = { - &asm_flavour, gas_items + (int *)&asm_flavour, gas_items }; static const lc_opt_table_entry_t ia32_options[] = { @@ -1312,6 +1479,7 @@ const arch_isa_if_t ia32_isa_if = { ia32_get_code_generator_if, ia32_get_list_sched_selector, ia32_get_reg_class_alignment, + ia32_get_libfirm_params, #ifdef WITH_LIBCORE ia32_register_options #endif