X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Farm%2Fbearch_arm.c;h=e4a350e1808604b7a179f28d6942148b5cee990b;hb=78a7ccdf4f405f1bd41e82f60b6e996a75f631fb;hp=f39dfa62c3c273b1975a4f37aa484fcdfe1f216a;hpb=a39a1c37b67b86361c089e416883812d64eab6fa;p=libfirm diff --git a/ir/be/arm/bearch_arm.c b/ir/be/arm/bearch_arm.c index f39dfa62c..e4a350e18 100644 --- a/ir/be/arm/bearch_arm.c +++ b/ir/be/arm/bearch_arm.c @@ -36,7 +36,8 @@ #include "irprintf.h" #include "ircons.h" #include "irgmod.h" -#include "lower_intrinsics.h" +#include "irgopt.h" +#include "lowering.h" #include "bitset.h" #include "debug.h" @@ -45,12 +46,13 @@ #include "../benode_t.h" #include "../belower.h" #include "../besched_t.h" -#include "../be.h" +#include "be.h" #include "../beabi.h" #include "../bemachine.h" #include "../beilpsched.h" #include "../bemodule.h" #include "../beirg_t.h" +#include "../bespillslots.h" #include "../begnuas.h" #include "bearch_arm_t.h" @@ -84,36 +86,30 @@ static set *cur_reg_set = NULL; */ static const arch_register_req_t *arm_get_irn_reg_req(const void *self, const ir_node *node, - int pos) { + int pos) +{ long node_pos = pos == -1 ? 0 : pos; ir_mode *mode = get_irn_mode(node); - FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE); + (void) self; - if (is_Block(node) || mode == mode_X || mode == mode_M) { - DBG((mod, LEVEL_1, "ignoring mode_T, mode_M node %+F\n", node)); + if (is_Block(node) || mode == mode_X) { return arch_no_register_req; } if (mode == mode_T && pos < 0) { - DBG((mod, LEVEL_1, "ignoring request for OUT requirements at %+F\n", node)); return arch_no_register_req; } - DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, node)); - if (is_Proj(node)) { - /* in case of a proj, we need to get the correct OUT slot */ - /* of the node corresponding to the proj number */ - if (pos == -1) { - node_pos = arm_translate_proj_pos(node); - } - else { - node_pos = pos; - } + if(mode == mode_M) + return arch_no_register_req; - node = skip_Proj_const(node); + if(pos >= 0) { + return arch_no_register_req; + } - DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", node, node_pos)); + node_pos = (pos == -1) ? get_Proj_proj(node) : pos; + node = skip_Proj_const(node); } /* get requirements for our own nodes */ @@ -125,27 +121,26 @@ arch_register_req_t *arm_get_irn_reg_req(const void *self, const ir_node *node, req = get_arm_out_req(node, node_pos); } - DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", node, pos)); return req; } - /* unknown should be tranformed by now */ + /* unknown should be transformed by now */ assert(!is_Unknown(node)); - DB((mod, LEVEL_1, "returning NULL for %+F (node not supported)\n", node)); - return arch_no_register_req; } -static void arm_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) { +static void arm_set_irn_reg(const void *self, ir_node *irn, + const arch_register_t *reg) +{ int pos = 0; + (void) self; - if (is_Proj(irn)) { - - if (get_irn_mode(irn) == mode_X) { - return; - } + if (get_irn_mode(irn) == mode_X) { + return; + } - pos = arm_translate_proj_pos(irn); + if (is_Proj(irn)) { + pos = get_Proj_proj(irn); irn = skip_Proj(irn); } @@ -161,9 +156,12 @@ static void arm_set_irn_reg(const void *self, ir_node *irn, const arch_register_ } } -static const arch_register_t *arm_get_irn_reg(const void *self, const ir_node *irn) { +static const arch_register_t *arm_get_irn_reg(const void *self, + const ir_node *irn) +{ int pos = 0; const arch_register_t *reg = NULL; + (void) self; if (is_Proj(irn)) { @@ -171,7 +169,7 @@ static const arch_register_t *arm_get_irn_reg(const void *self, const ir_node *i return NULL; } - pos = arm_translate_proj_pos(irn); + pos = get_Proj_proj(irn); irn = skip_Proj_const(irn); } @@ -187,7 +185,9 @@ static const arch_register_t *arm_get_irn_reg(const void *self, const ir_node *i return reg; } -static arch_irn_class_t arm_classify(const void *self, const ir_node *irn) { +static arch_irn_class_t arm_classify(const void *self, const ir_node *irn) +{ + (void) self; irn = skip_Proj_const(irn); if (is_cfop(irn)) { @@ -200,7 +200,9 @@ static arch_irn_class_t arm_classify(const void *self, const ir_node *irn) { return 0; } -static arch_irn_flags_t arm_get_flags(const void *self, const ir_node *irn) { +static arch_irn_flags_t arm_get_flags(const void *self, const ir_node *irn) +{ + (void) self; irn = skip_Proj_const(irn); if (is_arm_irn(irn)) { @@ -213,12 +215,19 @@ static arch_irn_flags_t arm_get_flags(const void *self, const ir_node *irn) { return 0; } -static ir_entity *arm_get_frame_entity(const void *self, const ir_node *irn) { +static ir_entity *arm_get_frame_entity(const void *self, const ir_node *irn) +{ + (void) self; + (void) irn; /* TODO: return the entity assigned to the frame */ return NULL; } -static void arm_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent) { +static void arm_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent) +{ + (void) self; + (void) irn; + (void) ent; /* TODO: set the entity assigned to the frame */ } @@ -226,11 +235,18 @@ static void arm_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent) * This function is called by the generic backend to correct offsets for * nodes accessing the stack. */ -static void arm_set_stack_bias(const void *self, ir_node *irn, int bias) { +static void arm_set_stack_bias(const void *self, ir_node *irn, int bias) +{ + (void) self; + (void) irn; + (void) bias; /* TODO: correct offset if irn accesses the stack */ } -static int arm_get_sp_bias(const void *self, const ir_node *irn) { +static int arm_get_sp_bias(const void *self, const ir_node *irn) +{ + (void) self; + (void) irn; return 0; } @@ -277,16 +293,28 @@ arm_irn_ops_t arm_irn_ops = { static void arm_prepare_graph(void *self) { arm_code_gen_t *cg = self; - arm_register_transformers(); - irg_walk_blkwise_graph(cg->irg, arm_move_consts, arm_transform_node, cg); -} + /* transform nodes into assembler instructions */ + arm_transform_graph(cg); + /* do local optimizations (mainly CSE) */ + local_optimize_graph(cg->irg); + if (cg->dump) + be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched); + + /* do code placement, to optimize the position of constants */ + place_code(cg->irg); + + if (cg->dump) + be_dump(cg->irg, "-place", dump_ir_block_graph_sched); +} /** * Called immediately before emit phase. */ -static void arm_finish_irg(void *self) { +static void arm_finish_irg(void *self) +{ + (void) self; /* TODO: - fix offsets for nodes accessing stack - ... */ @@ -296,14 +324,27 @@ static void arm_finish_irg(void *self) { /** * These are some hooks which must be filled but are probably not needed. */ -static void arm_before_sched(void *self) { +static void arm_before_sched(void *self) +{ + (void) self; /* Some stuff you need to do after scheduling but before register allocation */ } -static void arm_before_ra(void *self) { +static void arm_before_ra(void *self) +{ + (void) self; /* Some stuff you need to do immediately after register allocation */ } +/** + * We transform Spill and Reload here. This needs to be done before + * stack biasing otherwise we would miss the corrected offset for these nodes. + */ +static void arm_after_ra(void *self) +{ + arm_code_gen_t *cg = self; + be_coalesce_spillslots(cg->birg); +} /** * Emits the code, closes the output file and frees @@ -311,9 +352,8 @@ static void arm_before_ra(void *self) { */ static void arm_emit_and_done(void *self) { arm_code_gen_t *cg = self; - ir_graph *irg = cg->irg; + ir_graph *irg = cg->irg; - dump_ir_block_graph_sched(irg, "-arm-finished"); arm_gen_routine(cg, irg); cur_reg_set = NULL; @@ -376,7 +416,10 @@ static ir_node *convert_dbl_to_int(ir_node *bl, ir_node *arg, ir_node *mem, * 1.) A constant: simply move * 2.) A load: simply load */ -static ir_node *convert_sng_to_int(ir_node *bl, ir_node *arg) { +static ir_node *convert_sng_to_int(ir_node *bl, ir_node *arg) +{ + (void) bl; + if (is_Const(arg)) { tarval *tv = get_Const_tarval(arg); unsigned v; @@ -405,7 +448,7 @@ static ir_node *convert_sng_to_int(ir_node *bl, ir_node *arg) { static void handle_calls(ir_node *call, void *env) { arm_code_gen_t *cg = env; - int i, j, n, size, idx, flag, n_param, n_res; + int i, j, n, size, idx, flag, n_param, n_res, first_variadic; ir_type *mtp, *new_mtd, *new_tp[5]; ir_node *new_in[5], **in; ir_node *bl; @@ -484,7 +527,9 @@ static void handle_calls(ir_node *call, void *env) set_method_res_type(new_mtd, i, get_method_res_type(mtp, i)); set_method_calling_convention(new_mtd, get_method_calling_convention(mtp)); - set_method_first_variadic_param_index(new_mtd, get_method_first_variadic_param_index(mtp)); + first_variadic = get_method_first_variadic_param_index(mtp); + if (first_variadic >= 0) + set_method_first_variadic_param_index(new_mtd, first_variadic); if (is_lowered_type(mtp)) { mtp = get_associated_type(mtp); @@ -516,6 +561,7 @@ static void arm_before_abi(void *self) { irg_walk_graph(cg->irg, NULL, handle_calls, cg); } +/* forward */ static void *arm_cg_init(be_irg_t *birg); static const arch_code_generator_if_t arm_code_gen_if = { @@ -525,7 +571,7 @@ static const arch_code_generator_if_t arm_code_gen_if = { NULL, /* spill */ arm_before_sched, /* before scheduling hook */ arm_before_ra, /* before register allocation hook */ - NULL, /* after register allocation */ + arm_after_ra, arm_finish_irg, arm_emit_and_done, }; @@ -544,14 +590,17 @@ static void *arm_cg_init(be_irg_t *birg) { } cg = xmalloc(sizeof(*cg)); - cg->impl = &arm_code_gen_if; - cg->irg = birg->irg; - cg->reg_set = new_set(arm_cmp_irn_reg_assoc, 1024); - cg->arch_env = birg->main_env->arch_env; - cg->isa = isa; - cg->birg = birg; - cg->int_tp = int_tp; - cg->have_fp = 0; + cg->impl = &arm_code_gen_if; + cg->irg = birg->irg; + cg->reg_set = new_set(arm_cmp_irn_reg_assoc, 1024); + cg->arch_env = birg->main_env->arch_env; + cg->isa = isa; + cg->birg = birg; + cg->int_tp = int_tp; + cg->have_fp_insn = 0; + cg->unknown_gp = NULL; + cg->unknown_fpa = NULL; + cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0; FIRM_DBG_REGISTER(cg->mod, "firm.be.arm.cg"); @@ -572,114 +621,122 @@ static void *arm_cg_init(be_irg_t *birg) { * to runtime calls. */ static void arm_handle_intrinsics(void) { - ir_type *tp, *int_tp, *uint_tp; - i_record records[8]; - int n_records = 0; + ir_type *tp, *int_tp, *uint_tp; + i_record records[8]; + int n_records = 0; #define ID(x) new_id_from_chars(x, sizeof(x)-1) - int_tp = new_type_primitive(ID("int"), mode_Is); - uint_tp = new_type_primitive(ID("uint"), mode_Iu); + int_tp = new_type_primitive(ID("int"), mode_Is); + uint_tp = new_type_primitive(ID("uint"), mode_Iu); /* ARM has neither a signed div instruction ... */ - { - runtime_rt rt_Div; - i_instr_record *map_Div = &records[n_records++].i_instr; - - tp = new_type_method(ID("rt_iDiv"), 2, 1); - set_method_param_type(tp, 0, int_tp); - set_method_param_type(tp, 1, int_tp); - set_method_res_type(tp, 0, int_tp); - - rt_Div.ent = new_entity(get_glob_type(), ID("__divsi3"), tp); - rt_Div.mode = mode_T; - rt_Div.mem_proj_nr = pn_Div_M; - rt_Div.exc_proj_nr = pn_Div_X_except; - rt_Div.exc_mem_proj_nr = pn_Div_M; - rt_Div.res_proj_nr = pn_Div_res; - - set_entity_visibility(rt_Div.ent, visibility_external_allocated); - - map_Div->kind = INTRINSIC_INSTR; - map_Div->op = op_Div; - map_Div->i_mapper = (i_mapper_func)i_mapper_RuntimeCall; - map_Div->ctx = &rt_Div; - } - /* ... nor a signed div instruction ... */ - { - runtime_rt rt_Div; - i_instr_record *map_Div = &records[n_records++].i_instr; - - tp = new_type_method(ID("rt_uDiv"), 2, 1); - set_method_param_type(tp, 0, uint_tp); - set_method_param_type(tp, 1, uint_tp); - set_method_res_type(tp, 0, uint_tp); - - rt_Div.ent = new_entity(get_glob_type(), ID("__udivsi3"), tp); - rt_Div.mode = mode_T; - rt_Div.mem_proj_nr = pn_Div_M; - rt_Div.exc_proj_nr = pn_Div_X_except; - rt_Div.exc_mem_proj_nr = pn_Div_M; - rt_Div.res_proj_nr = pn_Div_res; - - set_entity_visibility(rt_Div.ent, visibility_external_allocated); - - map_Div->kind = INTRINSIC_INSTR; - map_Div->op = op_Div; - map_Div->i_mapper = (i_mapper_func)i_mapper_RuntimeCall; - map_Div->ctx = &rt_Div; - } + { + runtime_rt rt_Div; + i_instr_record *map_Div = &records[n_records++].i_instr; + + tp = new_type_method(ID("rt_iDiv"), 2, 1); + set_method_param_type(tp, 0, int_tp); + set_method_param_type(tp, 1, int_tp); + set_method_res_type(tp, 0, int_tp); + + rt_Div.ent = new_entity(get_glob_type(), ID("__divsi3"), tp); + rt_Div.mode = mode_T; + rt_Div.res_mode = mode_Is; + rt_Div.mem_proj_nr = pn_Div_M; + rt_Div.regular_proj_nr = pn_Div_X_regular; + rt_Div.exc_proj_nr = pn_Div_X_except; + rt_Div.exc_mem_proj_nr = pn_Div_M; + rt_Div.res_proj_nr = pn_Div_res; + + set_entity_visibility(rt_Div.ent, visibility_external_allocated); + + map_Div->kind = INTRINSIC_INSTR; + map_Div->op = op_Div; + map_Div->i_mapper = (i_mapper_func)i_mapper_RuntimeCall; + map_Div->ctx = &rt_Div; + } + /* ... nor an unsigned div instruction ... */ + { + runtime_rt rt_Div; + i_instr_record *map_Div = &records[n_records++].i_instr; + + tp = new_type_method(ID("rt_uDiv"), 2, 1); + set_method_param_type(tp, 0, uint_tp); + set_method_param_type(tp, 1, uint_tp); + set_method_res_type(tp, 0, uint_tp); + + rt_Div.ent = new_entity(get_glob_type(), ID("__udivsi3"), tp); + rt_Div.mode = mode_T; + rt_Div.res_mode = mode_Iu; + rt_Div.mem_proj_nr = pn_Div_M; + rt_Div.regular_proj_nr = pn_Div_X_regular; + rt_Div.exc_proj_nr = pn_Div_X_except; + rt_Div.exc_mem_proj_nr = pn_Div_M; + rt_Div.res_proj_nr = pn_Div_res; + + set_entity_visibility(rt_Div.ent, visibility_external_allocated); + + map_Div->kind = INTRINSIC_INSTR; + map_Div->op = op_Div; + map_Div->i_mapper = (i_mapper_func)i_mapper_RuntimeCall; + map_Div->ctx = &rt_Div; + } /* ... nor a signed mod instruction ... */ - { - runtime_rt rt_Mod; - i_instr_record *map_Mod = &records[n_records++].i_instr; - - tp = new_type_method(ID("rt_iMod"), 2, 1); - set_method_param_type(tp, 0, int_tp); - set_method_param_type(tp, 1, int_tp); - set_method_res_type(tp, 0, int_tp); - - rt_Mod.ent = new_entity(get_glob_type(), ID("__modsi3"), tp); - rt_Mod.mode = mode_T; - rt_Mod.mem_proj_nr = pn_Mod_M; - rt_Mod.exc_proj_nr = pn_Mod_X_except; - rt_Mod.exc_mem_proj_nr = pn_Mod_M; - rt_Mod.res_proj_nr = pn_Mod_res; - - set_entity_visibility(rt_Mod.ent, visibility_external_allocated); - - map_Mod->kind = INTRINSIC_INSTR; - map_Mod->op = op_Mod; - map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall; - map_Mod->ctx = &rt_Mod; - } - /* ... nor a unsigned mod. */ - { - runtime_rt rt_Mod; - i_instr_record *map_Mod = &records[n_records++].i_instr; - - tp = new_type_method(ID("rt_uMod"), 2, 1); - set_method_param_type(tp, 0, uint_tp); - set_method_param_type(tp, 1, uint_tp); - set_method_res_type(tp, 0, uint_tp); - - rt_Mod.ent = new_entity(get_glob_type(), ID("__umodsi3"), tp); - rt_Mod.mode = mode_T; - rt_Mod.mem_proj_nr = pn_Mod_M; - rt_Mod.exc_proj_nr = pn_Mod_X_except; - rt_Mod.exc_mem_proj_nr = pn_Mod_M; - rt_Mod.res_proj_nr = pn_Mod_res; - - set_entity_visibility(rt_Mod.ent, visibility_external_allocated); - - map_Mod->kind = INTRINSIC_INSTR; - map_Mod->op = op_Mod; - map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall; - map_Mod->ctx = &rt_Mod; - } - - if (n_records > 0) - lower_intrinsics(records, n_records); + { + runtime_rt rt_Mod; + i_instr_record *map_Mod = &records[n_records++].i_instr; + + tp = new_type_method(ID("rt_iMod"), 2, 1); + set_method_param_type(tp, 0, int_tp); + set_method_param_type(tp, 1, int_tp); + set_method_res_type(tp, 0, int_tp); + + rt_Mod.ent = new_entity(get_glob_type(), ID("__modsi3"), tp); + rt_Mod.mode = mode_T; + rt_Mod.res_mode = mode_Is; + rt_Mod.mem_proj_nr = pn_Mod_M; + rt_Mod.regular_proj_nr = pn_Mod_X_regular; + rt_Mod.exc_proj_nr = pn_Mod_X_except; + rt_Mod.exc_mem_proj_nr = pn_Mod_M; + rt_Mod.res_proj_nr = pn_Mod_res; + + set_entity_visibility(rt_Mod.ent, visibility_external_allocated); + + map_Mod->kind = INTRINSIC_INSTR; + map_Mod->op = op_Mod; + map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall; + map_Mod->ctx = &rt_Mod; + } + /* ... nor an unsigned mod. */ + { + runtime_rt rt_Mod; + i_instr_record *map_Mod = &records[n_records++].i_instr; + + tp = new_type_method(ID("rt_uMod"), 2, 1); + set_method_param_type(tp, 0, uint_tp); + set_method_param_type(tp, 1, uint_tp); + set_method_res_type(tp, 0, uint_tp); + + rt_Mod.ent = new_entity(get_glob_type(), ID("__umodsi3"), tp); + rt_Mod.mode = mode_T; + rt_Mod.res_mode = mode_Iu; + rt_Mod.mem_proj_nr = pn_Mod_M; + rt_Mod.regular_proj_nr = pn_Mod_X_regular; + rt_Mod.exc_proj_nr = pn_Mod_X_except; + rt_Mod.exc_mem_proj_nr = pn_Mod_M; + rt_Mod.res_proj_nr = pn_Mod_res; + + set_entity_visibility(rt_Mod.ent, visibility_external_allocated); + + map_Mod->kind = INTRINSIC_INSTR; + map_Mod->op = op_Mod; + map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall; + map_Mod->ctx = &rt_Mod; + } + + if (n_records > 0) + lower_intrinsics(records, n_records); } /***************************************************************** @@ -699,11 +756,13 @@ static arm_isa_t arm_isa_template = { &arm_gp_regs[REG_R11], /* base pointer */ -1, /* stack direction */ NULL, /* main environment */ + 7, /* spill costs */ + 5, /* reload costs */ }, 0, /* use generic register names instead of SP, LR, PC */ ARM_FPU_ARCH_FPE, /* FPU architecture */ NULL, /* current code generator */ - { NULL, }, /* emitter environment */ + NULL_EMITTER, /* emitter environment */ }; /** @@ -719,12 +778,13 @@ static void *arm_init(FILE *file_handle) { isa = xmalloc(sizeof(*isa)); memcpy(isa, &arm_isa_template, sizeof(*isa)); - arm_register_init(isa); + arm_register_init(); isa->cg = NULL; be_emit_init_env(&isa->emit, file_handle); arm_create_opcodes(); + arm_register_copy_attr_func(); arm_handle_intrinsics(); /* we mark referenced global entities, so we can only emit those which @@ -761,13 +821,16 @@ static void arm_done(void *self) { static int arm_get_n_reg_class(const void *self) { const arm_isa_t *isa = self; - return isa->cg->have_fp ? 2 : 1; + /* ARGH! is called BEFORE transform */ + return 2; + return isa->cg->have_fp_insn ? 2 : 1; } /** * Return the register class with requested index. */ static const arch_register_class_t *arm_get_reg_class(const void *self, int i) { + (void) self; return i == 0 ? &arm_reg_classes[CLASS_arm_gp] : &arm_reg_classes[CLASS_arm_fpa]; } @@ -928,6 +991,7 @@ static void arm_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_m curr_pc = be_new_Copy(&arm_reg_classes[CLASS_arm_gp], env->irg, bl, curr_lr ); arch_set_irn_register(env->arch_env, curr_pc, &arm_gp_regs[REG_PC]); be_set_constr_single_reg(curr_pc, BE_OUT_POS(0), &arm_gp_regs[REG_PC] ); + be_node_set_flags(curr_pc, BE_OUT_POS(0), arch_irn_flags_ignore); } else { ir_node *sub12_node; ir_node *load_node; @@ -999,14 +1063,34 @@ void arm_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi be_abi_call_param_stack(abi, i, 4, 0, 0); } - /* default: return value is in R0 resp. F0 */ - assert(get_method_n_ress(method_type) < 2); - if (get_method_n_ress(method_type) > 0) { + /* set return registers */ + n = get_method_n_ress(method_type); + + assert(n <= 2 && "more than two results not supported"); + + /* In case of 64bit returns, we will have two 32bit values */ + if (n == 2) { + tp = get_method_res_type(method_type, 0); + mode = get_type_mode(tp); + + assert(!mode_is_float(mode) && "two FP results not supported"); + + tp = get_method_res_type(method_type, 1); + mode = get_type_mode(tp); + + assert(!mode_is_float(mode) && "mixed INT, FP results not supported"); + + be_abi_call_res_reg(abi, 0, &arm_gp_regs[REG_R0]); + be_abi_call_res_reg(abi, 1, &arm_gp_regs[REG_R1]); + } else if (n == 1) { + const arch_register_t *reg; + tp = get_method_res_type(method_type, 0); + assert(is_atomic_type(tp)); mode = get_type_mode(tp); - be_abi_call_res_reg(abi, 0, - mode_is_float(mode) ? &arm_fpa_regs[REG_F0] : &arm_gp_regs[REG_R0]); + reg = mode_is_float(mode) ? &arm_fpa_regs[REG_F0] : &arm_gp_regs[REG_R0]; + be_abi_call_res_reg(abi, 0, reg); } } @@ -1023,7 +1107,10 @@ const arch_irn_handler_t *arm_get_irn_handler(const void *self) { } int arm_to_appear_in_schedule(void *block_env, const ir_node *irn) { - return is_arm_irn(irn); + if(!is_arm_irn(irn)) + return -1; + + return 1; } /** @@ -1052,8 +1139,8 @@ static const ilp_sched_selector_t *arm_get_ilp_sched_selector(const void *self) * Returns the necessary byte alignment for storing a register of given class. */ static int arm_get_reg_class_alignment(const void *self, const arch_register_class_t *cls) { - ir_mode *mode = arch_register_class_mode(cls); - return get_mode_size_bytes(mode); + /* ARM is a 32 bit CPU, no need for other alignment */ + return 4; } static const be_execution_unit_t ***arm_get_allowed_execution_units(const void *self, const ir_node *irn) { @@ -1081,18 +1168,20 @@ static ir_graph **arm_get_irg_list(const void *self, ir_graph ***irg_list) { static const backend_params *arm_get_libfirm_params(void) { static arch_dep_params_t ad = { 1, /* allow subs */ - 0, /* Muls are fast enough on ARM */ - 31, /* shift would be ok */ + 1, /* Muls are fast enough on ARM but ... */ + 1, /* ... one shift would be possible better */ 0, /* SMUL is needed, only in Arch M*/ 0, /* UMUL is needed, only in Arch M */ 32, /* SMUL & UMUL available for 32 bit */ }; static backend_params p = { + 1, /* need dword lowering */ + 0, /* don't support inline assembler yet */ NULL, /* no additional opcodes */ NULL, /* will be set later */ - 1, /* need dword lowering */ NULL, /* but yet no creator function */ NULL, /* context for create_intrinsic_fkt */ + NULL, /* no if conversion settings */ }; p.dep_param = &ad; @@ -1146,6 +1235,8 @@ void be_init_arch_arm(void) lc_opt_add_table(arm_grp, arm_options); be_register_isa_if("arm", &arm_isa_if); + + arm_init_transform(); } BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_arm);