X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Flower%2Flower_hl.c;h=70f44f4de25652cbdc13e2bd58f2cf412623d69b;hb=269be5738826952369aef86eaab261f23ba6485d;hp=b59794d35891fe0211cc47e2212aa7af114cc289;hpb=2807124a2e9c4a4bd1e67c854ce2332b6e2c1ecc;p=libfirm diff --git a/ir/lower/lower_hl.c b/ir/lower/lower_hl.c index b59794d35..70f44f4de 100644 --- a/ir/lower/lower_hl.c +++ b/ir/lower/lower_hl.c @@ -41,7 +41,8 @@ /** * Lower a Sel node. Do not touch Sels accessing entities on the frame type. */ -static void lower_sel(ir_node *sel) { +static void lower_sel(ir_node *sel) +{ ir_graph *irg = current_ir_graph; ir_entity *ent; ir_node *newn, *cnst, *index, *ptr, *bl; @@ -212,14 +213,12 @@ static void lower_sel(ir_node *sel) { #else /* normal code */ newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), add, ent_mode, 0); #endif - newn = new_r_Proj(bl, newn, ent_mode, pn_Load_res); + newn = new_r_Proj(newn, ent_mode, pn_Load_res); } else if (get_entity_owner(ent) != get_glob_type()) { int offset; /* replace Sel by add(obj, const(ent.offset)) */ - assert(!(get_entity_allocation(ent) == allocation_static && - (get_entity_n_overwrites(ent) == 0 && get_entity_n_overwrittenby(ent) == 0))); newn = get_Sel_ptr(sel); offset = get_entity_offset(ent); if (offset != 0) { @@ -243,7 +242,8 @@ static void lower_sel(ir_node *sel) { /** * Lower a all possible SymConst nodes. */ -static void lower_symconst(ir_node *symc) { +static void lower_symconst(ir_node *symc) +{ ir_node *newn; ir_type *tp; ir_entity *ent; @@ -277,9 +277,6 @@ static void lower_symconst(ir_node *symc) { hook_lower(symc); exchange(symc, newn); break; - case symconst_addr_name: - /* do not rewrite - pass info to back end */ - break; case symconst_addr_ent: /* leave */ break; @@ -317,7 +314,8 @@ static void lower_symconst(ir_node *symc) { * * @param size the size on bits */ -static int is_integral_size(int size) { +static int is_integral_size(int size) +{ /* must be a 2^n */ if (size & (size-1)) return 0; @@ -331,7 +329,8 @@ static int is_integral_size(int size) { * @param proj the Proj(result) node * @param load the Load node */ -static void lower_bitfields_loads(ir_node *proj, ir_node *load) { +static void lower_bitfields_loads(ir_node *proj, ir_node *load) +{ ir_node *sel = get_Load_ptr(load); ir_node *block, *n_proj, *res, *ptr; ir_entity *ent; @@ -384,7 +383,7 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) { /* create new proj, switch off CSE or we may get the old one back */ old_cse = get_opt_cse(); set_opt_cse(0); - res = n_proj = new_r_Proj(block, load, mode, pn_Load_res); + res = n_proj = new_r_Proj(load, mode, pn_Load_res); set_opt_cse(old_cse); if (mode_is_signed(mode)) { /* signed */ @@ -417,7 +416,8 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) { * * @todo: It adds a load which may produce an exception! */ -static void lower_bitfields_stores(ir_node *store) { +static void lower_bitfields_stores(ir_node *store) +{ ir_node *sel = get_Store_ptr(store); ir_node *ptr, *value; ir_entity *ent; @@ -473,9 +473,9 @@ static void lower_bitfields_stores(ir_node *store) { if (neg_mask) { /* there are some bits, normal case */ - irn = new_r_Load( block, mem, ptr, mode, 0); - mem = new_r_Proj( block, irn, mode_M, pn_Load_M); - irn = new_r_Proj( block, irn, mode, pn_Load_res); + irn = new_r_Load(block, mem, ptr, mode, 0); + mem = new_r_Proj(irn, mode_M, pn_Load_M); + irn = new_r_Proj(irn, mode, pn_Load_res); irn = new_r_And(block, irn, new_Const_long(mode, neg_mask), mode); @@ -496,7 +496,8 @@ static void lower_bitfields_stores(ir_node *store) { /** * Lowers unaligned Loads. */ -static void lower_unaligned_Load(ir_node *load) { +static void lower_unaligned_Load(ir_node *load) +{ (void) load; /* NYI */ } @@ -504,7 +505,8 @@ static void lower_unaligned_Load(ir_node *load) { /** * Lowers unaligned Stores */ -static void lower_unaligned_Store(ir_node *store) { +static void lower_unaligned_Store(ir_node *store) +{ (void) store; /* NYI */ } @@ -590,14 +592,16 @@ struct pass_t { /** * Wrapper for running lower_highlevel_graph() as an ir_graph pass. */ -static int lower_highlevel_graph_wrapper(ir_graph *irg, void *context) { +static int lower_highlevel_graph_wrapper(ir_graph *irg, void *context) +{ struct pass_t *pass = context; lower_highlevel_graph(irg, pass->lower_bitfields); return 0; } /* lower_highlevel_graph_wrapper */ -ir_graph_pass_t *lower_highlevel_graph_pass(const char *name, int lower_bitfields) { +ir_graph_pass_t *lower_highlevel_graph_pass(const char *name, int lower_bitfields) +{ struct pass_t *pass = XMALLOCZ(struct pass_t); pass->lower_bitfields = lower_bitfields; @@ -608,11 +612,13 @@ ir_graph_pass_t *lower_highlevel_graph_pass(const char *name, int lower_bitfield /* * does the same as lower_highlevel() for all nodes on the const code irg */ -void lower_const_code(void) { +void lower_const_code(void) +{ walk_const_code(NULL, lower_irnode, NULL); } /* lower_const_code */ -ir_prog_pass_t *lower_const_code_pass(const char *name) { +ir_prog_pass_t *lower_const_code_pass(const char *name) +{ return def_prog_pass(name ? name : "lower_const_code", lower_const_code); } @@ -621,7 +627,8 @@ ir_prog_pass_t *lower_const_code_pass(const char *name) { * Replace Sel nodes by address computation. Also resolves array access. * Handle Bitfields by added And/Or calculations. */ -void lower_highlevel(int lower_bitfields) { +void lower_highlevel(int lower_bitfields) +{ int i, n; n = get_irp_n_irgs();