X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Flower%2Flower_hl.c;h=fd508afca6eff6b72c8451cd63d156a114d972bd;hb=3e889332cb054e5cee1a12bba6dd0209121100cf;hp=28f4d5049ebd68b0e3403e8faf7393c15cf6b79c;hpb=f81ab0197cf9d101d6b17f2b87cd85b6eaa8a529;p=libfirm diff --git a/ir/lower/lower_hl.c b/ir/lower/lower_hl.c index 28f4d5049..fd508afca 100644 --- a/ir/lower/lower_hl.c +++ b/ir/lower/lower_hl.c @@ -35,18 +35,21 @@ #include "irhooks.h" #include "irgmod.h" #include "irgwalk.h" +#include "irtools.h" +#include "irpass_t.h" /** * Lower a Sel node. Do not touch Sels accessing entities on the frame type. */ -static void lower_sel(ir_node *sel) { - ir_graph *irg = current_ir_graph; - ir_entity *ent; - ir_node *newn, *cnst, *index, *ptr, *bl; - tarval *tv; - ir_mode *basemode, *mode, *mode_Int; - ir_type *basetyp, *owner; - dbg_info *dbg; +static void lower_sel(ir_node *sel) +{ + ir_graph *irg = current_ir_graph; + ir_entity *ent; + ir_node *newn, *cnst, *index, *ptr, *bl; + ir_tarval *tv; + ir_mode *basemode, *mode, *mode_Int; + ir_type *basetyp, *owner; + dbg_info *dbg; assert(is_Sel(sel)); @@ -59,10 +62,10 @@ static void lower_sel(ir_node *sel) { owner = get_entity_owner(ent); /* - * Cannot handle value param entities here. + * Cannot handle value param entities or frame type entities here. * Must be lowered by the backend. */ - if (is_value_param_type(owner)) + if (is_value_param_type(owner) || is_frame_type(owner)) return; dbg = get_irn_dbg_info(sel); @@ -77,8 +80,8 @@ static void lower_sel(ir_node *sel) { sym.entity_p = ent; bl = get_nodes_block(sel); - cnst = new_rd_SymConst(dbg, irg, bl, mode, sym, symconst_addr_ent); - newn = new_rd_Add(dbg, irg, bl, ptr, cnst, mode); + cnst = new_rd_SymConst(dbg, irg, mode, sym, symconst_addr_ent); + newn = new_rd_Add(dbg, bl, ptr, cnst, mode); } else { /* not TLS */ @@ -119,7 +122,7 @@ static void lower_sel(ir_node *sel) { /* Size of the array element */ tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int); - last_size = new_rd_Const(dbg, irg, mode_Int, tv); + last_size = new_rd_Const(dbg, irg, tv); /* * We compute the offset part of dimension d_i recursively @@ -141,12 +144,12 @@ static void lower_sel(ir_node *sel) { assert(irg == current_ir_graph); if (! is_Unknown(lb)) - lb = new_rd_Conv(dbg, irg, bl, copy_const_value(get_irn_dbg_info(sel), lb), mode_Int); + lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int); else lb = NULL; if (! is_Unknown(ub)) - ub = new_rd_Conv(dbg, irg, bl, copy_const_value(get_irn_dbg_info(sel), ub), mode_Int); + ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int); else ub = NULL; @@ -159,36 +162,36 @@ static void lower_sel(ir_node *sel) { assert(ub != NULL && "upper bound has to be set in multi-dim array"); /* Elements in one Dimension */ - elms = new_rd_Sub(dbg, irg, bl, ub, lb, mode_Int); + elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int); } - ind = new_rd_Conv(dbg, irg, bl, get_Sel_index(sel, dim), mode_Int); + ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int); /* * Normalize index, id lower bound is set, also assume * lower bound == 0 */ if (lb != NULL) - ind = new_rd_Sub(dbg, irg, bl, ind, lb, mode_Int); + ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int); - n = new_rd_Mul(dbg, irg, bl, ind, last_size, mode_Int); + n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int); /* * see comment above. */ if (i > 0) - last_size = new_rd_Mul(dbg, irg, bl, last_size, elms, mode_Int); + last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int); - newn = new_rd_Add(dbg, irg, bl, newn, n, mode); + newn = new_rd_Add(dbg, bl, newn, n, mode); } } else { /* no array type */ - ir_mode *idx_mode = get_irn_mode(index); - tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode); + ir_mode *idx_mode = get_irn_mode(index); + ir_tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode); - newn = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel), - new_rd_Mul(dbg, irg, bl, index, - new_r_Const(irg, idx_mode, tv), + newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel), + new_rd_Mul(dbg, bl, index, + new_r_Const(irg, tv), idx_mode), mode); } @@ -201,35 +204,33 @@ static void lower_sel(ir_node *sel) { /* We need an additional load when accessing methods from a dispatch table. */ tv = new_tarval_from_long(get_entity_offset(ent), mode_Int); - cnst = new_rd_Const(dbg, irg, mode_Int, tv); - add = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel), cnst, mode); + cnst = new_rd_Const(dbg, irg, tv); + add = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode); #ifdef DO_CACHEOPT /* cacheopt version */ - newn = new_rd_Load(dbg, irg, bl, get_Sel_mem(sel), sel, ent_mode); + newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), sel, ent_mode, cons_none); cacheopt_map_addrs_register_node(newn); set_Load_ptr(newn, add); #else /* normal code */ - newn = new_rd_Load(dbg, irg, bl, get_Sel_mem(sel), add, ent_mode); + newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), add, ent_mode, cons_none); #endif - newn = new_r_Proj(irg, bl, newn, ent_mode, pn_Load_res); + newn = new_r_Proj(newn, ent_mode, pn_Load_res); } else if (get_entity_owner(ent) != get_glob_type()) { int offset; /* replace Sel by add(obj, const(ent.offset)) */ - assert(!(get_entity_allocation(ent) == allocation_static && - (get_entity_n_overwrites(ent) == 0 && get_entity_n_overwrittenby(ent) == 0))); newn = get_Sel_ptr(sel); offset = get_entity_offset(ent); if (offset != 0) { ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode); tv = new_tarval_from_long(offset, mode_UInt); - cnst = new_r_Const(irg, mode_UInt, tv); - newn = new_rd_Add(dbg, irg, bl, newn, cnst, mode); + cnst = new_r_Const(irg, tv); + newn = new_rd_Add(dbg, bl, newn, cnst, mode); } } else { /* global_type */ - newn = new_rd_SymConst_addr_ent(NULL, current_ir_graph, mode, ent, firm_unknown_type); + newn = new_rd_SymConst_addr_ent(NULL, irg, mode, ent); } } /* run the hooks */ @@ -241,13 +242,15 @@ static void lower_sel(ir_node *sel) { /** * Lower a all possible SymConst nodes. */ -static void lower_symconst(ir_node *symc) { +static void lower_symconst(ir_node *symc) +{ ir_node *newn; ir_type *tp; ir_entity *ent; - tarval *tv; + ir_tarval *tv; ir_enum_const *ec; ir_mode *mode; + ir_graph *irg; switch (get_SymConst_kind(symc)) { case symconst_type_tag: @@ -255,11 +258,11 @@ static void lower_symconst(ir_node *symc) { break; case symconst_type_size: /* rewrite the SymConst node by a Const node */ + irg = get_irn_irg(symc); tp = get_SymConst_type(symc); assert(get_type_state(tp) == layout_fixed); mode = get_irn_mode(symc); - tv = new_tarval_from_long(get_type_size_bytes(tp), mode); - newn = new_Const(get_irn_mode(symc), tv); + newn = new_r_Const_long(irg, mode, get_type_size_bytes(tp)); assert(newn); /* run the hooks */ hook_lower(symc); @@ -267,29 +270,26 @@ static void lower_symconst(ir_node *symc) { break; case symconst_type_align: /* rewrite the SymConst node by a Const node */ + irg = get_irn_irg(symc); tp = get_SymConst_type(symc); assert(get_type_state(tp) == layout_fixed); mode = get_irn_mode(symc); - tv = new_tarval_from_long(get_type_alignment_bytes(tp), mode); - newn = new_Const(mode, tv); + newn = new_r_Const_long(irg, mode, get_type_alignment_bytes(tp)); assert(newn); /* run the hooks */ hook_lower(symc); exchange(symc, newn); break; - case symconst_addr_name: - /* do not rewrite - pass info to back end */ - break; case symconst_addr_ent: /* leave */ break; case symconst_ofs_ent: /* rewrite the SymConst node by a Const node */ + irg = get_irn_irg(symc); ent = get_SymConst_entity(symc); assert(get_type_state(get_entity_type(ent)) == layout_fixed); mode = get_irn_mode(symc); - tv = new_tarval_from_long(get_entity_offset(ent), mode); - newn = new_Const(mode, tv); + newn = new_r_Const_long(irg, mode, get_entity_offset(ent)); assert(newn); /* run the hooks */ hook_lower(symc); @@ -297,18 +297,16 @@ static void lower_symconst(ir_node *symc) { break; case symconst_enum_const: /* rewrite the SymConst node by a Const node */ + irg = get_irn_irg(symc); ec = get_SymConst_enum(symc); assert(get_type_state(get_enumeration_owner(ec)) == layout_fixed); tv = get_enumeration_value(ec); - newn = new_Const(get_irn_mode(symc), tv); + newn = new_r_Const(irg, tv); assert(newn); /* run the hooks */ hook_lower(symc); exchange(symc, newn); break; - case symconst_label: - /* leave */ - break; default: assert(!"unknown SymConst kind"); @@ -321,7 +319,8 @@ static void lower_symconst(ir_node *symc) { * * @param size the size on bits */ -static int is_integral_size(int size) { +static int is_integral_size(int size) +{ /* must be a 2^n */ if (size & (size-1)) return 0; @@ -335,9 +334,11 @@ static int is_integral_size(int size) { * @param proj the Proj(result) node * @param load the Load node */ -static void lower_bitfields_loads(ir_node *proj, ir_node *load) { +static void lower_bitfields_loads(ir_node *proj, ir_node *load) +{ ir_node *sel = get_Load_ptr(load); ir_node *block, *n_proj, *res, *ptr; + ir_graph *irg; ir_entity *ent; ir_type *bf_type; ir_mode *bf_mode, *mode; @@ -377,9 +378,10 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) { */ /* abandon bitfield sel */ + irg = get_irn_irg(sel); ptr = get_Sel_ptr(sel); db = get_irn_dbg_info(sel); - ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr)); + ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr)); set_Load_ptr(load, ptr); set_Load_mode(load, mode); @@ -388,7 +390,7 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) { /* create new proj, switch off CSE or we may get the old one back */ old_cse = get_opt_cse(); set_opt_cse(0); - res = n_proj = new_r_Proj(current_ir_graph, block, load, mode, pn_Load_res); + res = n_proj = new_r_Proj(load, mode, pn_Load_res); set_opt_cse(old_cse); if (mode_is_signed(mode)) { /* signed */ @@ -396,24 +398,20 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) { int shift_count_down = bits - bf_bits; if (shift_count_up) { - res = new_r_Shl(current_ir_graph, block, res, - new_Const(mode_Iu, new_tarval_from_long(shift_count_up, mode_Iu)), mode); + res = new_r_Shl(block, res, new_r_Const_long(irg, mode_Iu, shift_count_up), mode); } if (shift_count_down) { - res = new_r_Shrs(current_ir_graph, block, res, - new_Const(mode_Iu, new_tarval_from_long(shift_count_down, mode_Iu)), mode); + res = new_r_Shrs(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode); } } else { /* unsigned */ int shift_count_down = bit_offset; unsigned mask = ((unsigned)-1) >> (bits - bf_bits); if (shift_count_down) { - res = new_r_Shr(current_ir_graph, block, res, - new_Const(mode_Iu, new_tarval_from_long(shift_count_down, mode_Iu)), mode); + res = new_r_Shr(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode); } if (bits != bf_bits) { - res = new_r_And(current_ir_graph, block, res, - new_Const(mode, new_tarval_from_long(mask, mode)), mode); + res = new_r_And(block, res, new_r_Const_long(irg, mode, mask), mode); } } @@ -425,13 +423,15 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) { * * @todo: It adds a load which may produce an exception! */ -static void lower_bitfields_stores(ir_node *store) { +static void lower_bitfields_stores(ir_node *store) +{ ir_node *sel = get_Store_ptr(store); ir_node *ptr, *value; ir_entity *ent; ir_type *bf_type; ir_mode *bf_mode, *mode; ir_node *mem, *irn, *block; + ir_graph *irg; unsigned mask, neg_mask; int bf_bits, bits_mask, offset, bit_offset; dbg_info *db; @@ -475,28 +475,26 @@ static void lower_bitfields_stores(ir_node *store) { neg_mask = ~mask; /* abandon bitfield sel */ + irg = get_irn_irg(sel); ptr = get_Sel_ptr(sel); db = get_irn_dbg_info(sel); - ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr)); + ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr)); if (neg_mask) { /* there are some bits, normal case */ - irn = new_r_Load(current_ir_graph, block, mem, ptr, mode); - mem = new_r_Proj(current_ir_graph, block, irn, mode_M, pn_Load_M); - irn = new_r_Proj(current_ir_graph, block, irn, mode, pn_Load_res); + irn = new_r_Load(block, mem, ptr, mode, cons_none); + mem = new_r_Proj(irn, mode_M, pn_Load_M); + irn = new_r_Proj(irn, mode, pn_Load_res); - irn = new_r_And(current_ir_graph, block, irn, - new_Const(mode, new_tarval_from_long(neg_mask, mode)), mode); + irn = new_r_And(block, irn, new_r_Const_long(irg, mode, neg_mask), mode); if (bit_offset > 0) { - value = new_r_Shl(current_ir_graph, block, value, - new_Const(mode_Iu, new_tarval_from_long(bit_offset, mode_Iu)), mode); + value = new_r_Shl(block, value, new_r_Const_long(irg, mode_Iu, bit_offset), mode); } - value = new_r_And(current_ir_graph, block, value, - new_Const(mode, new_tarval_from_long(mask, mode)), mode); + value = new_r_And(block, value, new_r_Const_long(irg, mode, mask), mode); - value = new_r_Or(current_ir_graph, block, value, irn, mode); + value = new_r_Or(block, value, irn, mode); } set_Store_mem(store, mem); @@ -507,7 +505,8 @@ static void lower_bitfields_stores(ir_node *store) { /** * Lowers unaligned Loads. */ -static void lower_unaligned_Load(ir_node *load) { +static void lower_unaligned_Load(ir_node *load) +{ (void) load; /* NYI */ } @@ -515,7 +514,8 @@ static void lower_unaligned_Load(ir_node *load) { /** * Lowers unaligned Stores */ -static void lower_unaligned_Store(ir_node *store) { +static void lower_unaligned_Store(ir_node *store) +{ (void) store; /* NYI */ } @@ -523,7 +523,8 @@ static void lower_unaligned_Store(ir_node *store) { /** * lowers IR-nodes, called from walker */ -static void lower_irnode(ir_node *irn, void *env) { +static void lower_irnode(ir_node *irn, void *env) +{ (void) env; switch (get_irn_opcode(irn)) { case iro_Sel: @@ -546,12 +547,13 @@ static void lower_irnode(ir_node *irn, void *env) { default: break; } -} /* lower_irnode */ +} /** * Walker: lowers IR-nodes for bitfield access */ -static void lower_bf_access(ir_node *irn, void *env) { +static void lower_bf_access(ir_node *irn, void *env) +{ (void) env; switch (get_irn_opcode(irn)) { case iro_Proj: @@ -570,15 +572,15 @@ static void lower_bf_access(ir_node *irn, void *env) { default: break; } -} /* lower_bf_access */ +} /* * Replaces SymConsts by a real constant if possible. * Replace Sel nodes by address computation. Also resolves array access. * Handle Bitfields by added And/Or calculations. */ -void lower_highlevel_graph(ir_graph *irg, int lower_bitfields) { - +void lower_highlevel_graph(ir_graph *irg, int lower_bitfields) +{ if (lower_bitfields) { /* First step: lower bitfield access: must be run as long as Sels still * exists. */ @@ -587,21 +589,55 @@ void lower_highlevel_graph(ir_graph *irg, int lower_bitfields) { /* Finally: lower SymConst-Size and Sel nodes, Casts, unaligned Load/Stores. */ irg_walk_graph(irg, NULL, lower_irnode, NULL); -} /* lower_highlevel_graph */ + + set_irg_outs_inconsistent(irg); +} + +typedef struct pass_t { + ir_graph_pass_t pass; + int lower_bitfields; +} pass_t; + +/** + * Wrapper for running lower_highlevel_graph() as an ir_graph pass. + */ +static int lower_highlevel_graph_wrapper(ir_graph *irg, void *context) +{ + pass_t *pass = (pass_t*)context; + + lower_highlevel_graph(irg, pass->lower_bitfields); + return 0; +} /* lower_highlevel_graph_wrapper */ + +ir_graph_pass_t *lower_highlevel_graph_pass(const char *name, int lower_bitfields) +{ + pass_t *pass = XMALLOCZ(pass_t); + + pass->lower_bitfields = lower_bitfields; + return def_graph_pass_constructor( + &pass->pass, name ? name : "lower_hl", lower_highlevel_graph_wrapper); +} /* lower_highlevel_graph_pass */ /* * does the same as lower_highlevel() for all nodes on the const code irg */ -void lower_const_code(void) { +void lower_const_code(void) +{ walk_const_code(NULL, lower_irnode, NULL); } /* lower_const_code */ +ir_prog_pass_t *lower_const_code_pass(const char *name) +{ + return def_prog_pass(name ? name : "lower_const_code", lower_const_code); +} + /* * Replaces SymConsts by a real constant if possible. * Replace Sel nodes by address computation. Also resolves array access. * Handle Bitfields by added And/Or calculations. */ -void lower_highlevel(int lower_bitfields) { +void lower_highlevel(int lower_bitfields) +{ int i, n; n = get_irp_n_irgs();