X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Flower%2Flower_hl.c;h=cffbf597bc9fe2a530bbd29cea0e8c3563a49a6e;hb=ed0e4ce9a109277e1a63f48f2aca7750f473bc6a;hp=70f44f4de25652cbdc13e2bd58f2cf412623d69b;hpb=ca21c59ea00ff05918de26952e91ac39f1589e01;p=libfirm diff --git a/ir/lower/lower_hl.c b/ir/lower/lower_hl.c index 70f44f4de..cffbf597b 100644 --- a/ir/lower/lower_hl.c +++ b/ir/lower/lower_hl.c @@ -43,201 +43,161 @@ */ static void lower_sel(ir_node *sel) { - ir_graph *irg = current_ir_graph; - ir_entity *ent; - ir_node *newn, *cnst, *index, *ptr, *bl; - tarval *tv; - ir_mode *basemode, *mode, *mode_Int; - ir_type *basetyp, *owner; - dbg_info *dbg; - - assert(is_Sel(sel)); - - /* Do not lower frame type/global offset table access: must be lowered by the backend. */ - ptr = get_Sel_ptr(sel); - if (ptr == get_irg_frame(current_ir_graph)) - return; - - ent = get_Sel_entity(sel); - owner = get_entity_owner(ent); - - /* - * Cannot handle value param entities or frame type entities here. - * Must be lowered by the backend. - */ - if (is_value_param_type(owner) || is_frame_type(owner)) + ir_graph *irg = get_irn_irg(sel); + ir_entity *ent = get_Sel_entity(sel); + ir_type *owner = get_entity_owner(ent); + dbg_info *dbg = get_irn_dbg_info(sel); + ir_mode *mode = get_irn_mode(sel); + ir_node *bl = get_nodes_block(sel); + ir_node *newn; + + /* we can only replace Sels when the layout of the owner type is decided. */ + if (get_type_state(owner) != layout_fixed) return; - dbg = get_irn_dbg_info(sel); - mode = get_irn_mode(sel); - - mode_Int = get_reference_mode_signed_eq(mode); - - /* TLS access, must be handled by the linker */ - if (get_tls_type() == owner) { - symconst_symbol sym; + if (0 < get_Sel_n_indexs(sel)) { + /* an Array access */ + ir_type *basetyp = get_entity_type(ent); + ir_mode *basemode; + ir_node *index; + if (is_Primitive_type(basetyp)) + basemode = get_type_mode(basetyp); + else + basemode = mode_P_data; + + assert(basemode && "no mode for lowering Sel"); + assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes"); + index = get_Sel_index(sel, 0); + + if (is_Array_type(owner)) { + ir_type *arr_ty = owner; + size_t dims = get_array_n_dimensions(arr_ty); + size_t *map = ALLOCAN(size_t, dims); + ir_mode *mode_Int = get_reference_mode_signed_eq(mode); + ir_tarval *tv; + ir_node *last_size; + size_t i; + + assert(dims == (size_t)get_Sel_n_indexs(sel) + && "array dimension must match number of indices of Sel node"); + + for (i = 0; i < dims; i++) { + size_t order = get_array_order(arr_ty, i); + + assert(order < dims && + "order of a dimension must be smaller than the arrays dim"); + map[order] = i; + } + newn = get_Sel_ptr(sel); + + /* Size of the array element */ + tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int); + last_size = new_rd_Const(dbg, irg, tv); + + /* + * We compute the offset part of dimension d_i recursively + * with the the offset part of dimension d_{i-1} + * + * off_0 = sizeof(array_element_type); + * off_i = (u_i - l_i) * off_{i-1} ; i >= 1 + * + * whereas u_i is the upper bound of the current dimension + * and l_i the lower bound of the current dimension. + */ + for (i = dims; i > 0;) { + size_t dim = map[--i]; + ir_node *lb, *ub, *elms, *n, *ind; + + elms = NULL; + lb = get_array_lower_bound(arr_ty, dim); + ub = get_array_upper_bound(arr_ty, dim); + + assert(irg == current_ir_graph); + if (! is_Unknown(lb)) + lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int); + else + lb = NULL; + + if (! is_Unknown(ub)) + ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int); + else + ub = NULL; - sym.entity_p = ent; - bl = get_nodes_block(sel); + /* + * If the array has more than one dimension, lower and upper + * bounds have to be set in the non-last dimension. + */ + if (i > 0) { + assert(lb != NULL && "lower bound has to be set in multi-dim array"); + assert(ub != NULL && "upper bound has to be set in multi-dim array"); - cnst = new_rd_SymConst(dbg, irg, mode, sym, symconst_addr_ent); - newn = new_rd_Add(dbg, bl, ptr, cnst, mode); - } else { - /* not TLS */ + /* Elements in one Dimension */ + elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int); + } - assert(get_type_state(get_entity_owner(ent)) == layout_fixed); - assert(get_type_state(get_entity_type(ent)) == layout_fixed); + ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int); - bl = get_nodes_block(sel); - if (0 < get_Sel_n_indexs(sel)) { - /* an Array access */ - basetyp = get_entity_type(ent); - if (is_Primitive_type(basetyp)) - basemode = get_type_mode(basetyp); - else - basemode = mode_P_data; - - assert(basemode && "no mode for lowering Sel"); - assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes"); - index = get_Sel_index(sel, 0); - - if (is_Array_type(owner)) { - ir_type *arr_ty = owner; - int dims = get_array_n_dimensions(arr_ty); - int *map = ALLOCAN(int, dims); - ir_node *last_size; - int i; - - assert(dims == get_Sel_n_indexs(sel) - && "array dimension must match number of indices of Sel node"); - - for (i = 0; i < dims; i++) { - int order = get_array_order(arr_ty, i); - - assert(order < dims && - "order of a dimension must be smaller than the arrays dim"); - map[order] = i; - } - newn = get_Sel_ptr(sel); + /* + * Normalize index, id lower bound is set, also assume + * lower bound == 0 + */ + if (lb != NULL) + ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int); - /* Size of the array element */ - tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int); - last_size = new_rd_Const(dbg, irg, tv); + n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int); /* - * We compute the offset part of dimension d_i recursively - * with the the offset part of dimension d_{i-1} - * - * off_0 = sizeof(array_element_type); - * off_i = (u_i - l_i) * off_{i-1} ; i >= 1 - * - * whereas u_i is the upper bound of the current dimension - * and l_i the lower bound of the current dimension. + * see comment above. */ - for (i = dims - 1; i >= 0; i--) { - int dim = map[i]; - ir_node *lb, *ub, *elms, *n, *ind; - - elms = NULL; - lb = get_array_lower_bound(arr_ty, dim); - ub = get_array_upper_bound(arr_ty, dim); - - assert(irg == current_ir_graph); - if (! is_Unknown(lb)) - lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb), mode_Int); - else - lb = NULL; - - if (! is_Unknown(ub)) - ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub), mode_Int); - else - ub = NULL; - - /* - * If the array has more than one dimension, lower and upper - * bounds have to be set in the non-last dimension. - */ - if (i > 0) { - assert(lb != NULL && "lower bound has to be set in multi-dim array"); - assert(ub != NULL && "upper bound has to be set in multi-dim array"); - - /* Elements in one Dimension */ - elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int); - } - - ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int); - - /* - * Normalize index, id lower bound is set, also assume - * lower bound == 0 - */ - if (lb != NULL) - ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int); - - n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int); - - /* - * see comment above. - */ - if (i > 0) - last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int); - - newn = new_rd_Add(dbg, bl, newn, n, mode); - } - } else { - /* no array type */ - ir_mode *idx_mode = get_irn_mode(index); - tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode); - - newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel), - new_rd_Mul(dbg, bl, index, - new_r_Const(irg, tv), - idx_mode), - mode); - } - } else if (is_Method_type(get_entity_type(ent)) && - is_Class_type(owner) && - (owner != get_glob_type()) && - (!is_frame_type(owner))) { - ir_node *add; - ir_mode *ent_mode = get_type_mode(get_entity_type(ent)); - - /* We need an additional load when accessing methods from a dispatch table. */ - tv = new_tarval_from_long(get_entity_offset(ent), mode_Int); - cnst = new_rd_Const(dbg, irg, tv); - add = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode); -#ifdef DO_CACHEOPT /* cacheopt version */ - newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), sel, ent_mode, 0); - cacheopt_map_addrs_register_node(newn); - set_Load_ptr(newn, add); -#else /* normal code */ - newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), add, ent_mode, 0); -#endif - newn = new_r_Proj(newn, ent_mode, pn_Load_res); - - } else if (get_entity_owner(ent) != get_glob_type()) { - int offset; - - /* replace Sel by add(obj, const(ent.offset)) */ - newn = get_Sel_ptr(sel); - offset = get_entity_offset(ent); - if (offset != 0) { - ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode); - - tv = new_tarval_from_long(offset, mode_UInt); - cnst = new_r_Const(irg, tv); - newn = new_rd_Add(dbg, bl, newn, cnst, mode); + if (i > 0) + last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int); + + newn = new_rd_Add(dbg, bl, newn, n, mode); } } else { - /* global_type */ - newn = new_rd_SymConst_addr_ent(NULL, irg, mode, ent, firm_unknown_type); + /* no array type */ + ir_mode *idx_mode = get_irn_mode(index); + ir_tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode); + + newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel), + new_rd_Mul(dbg, bl, index, + new_r_Const(irg, tv), + idx_mode), + mode); + } + } else if (is_Method_type(get_entity_type(ent)) && is_Class_type(owner)) { + /* We need an additional load when accessing methods from a dispatch + * table. + * Matze TODO: Is this really still used? At least liboo does its own + * lowering of Method-Sels... + */ + ir_mode *ent_mode = get_type_mode(get_entity_type(ent)); + int offset = get_entity_offset(ent); + ir_mode *mode_Int = get_reference_mode_signed_eq(mode); + ir_tarval *tv = new_tarval_from_long(offset, mode_Int); + ir_node *cnst = new_rd_Const(dbg, irg, tv); + ir_node *add = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode); + ir_node *mem = get_Sel_mem(sel); + newn = new_rd_Load(dbg, bl, mem, add, ent_mode, cons_none); + newn = new_r_Proj(newn, ent_mode, pn_Load_res); + } else { + int offset = get_entity_offset(ent); + + /* replace Sel by add(obj, const(ent.offset)) */ + newn = get_Sel_ptr(sel); + if (offset != 0) { + ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode); + ir_tarval *tv = new_tarval_from_long(offset, mode_UInt); + ir_node *cnst = new_r_Const(irg, tv); + newn = new_rd_Add(dbg, bl, newn, cnst, mode); } } + /* run the hooks */ hook_lower(sel); exchange(sel, newn); -} /* lower_sel */ +} /** * Lower a all possible SymConst nodes. @@ -247,9 +207,10 @@ static void lower_symconst(ir_node *symc) ir_node *newn; ir_type *tp; ir_entity *ent; - tarval *tv; + ir_tarval *tv; ir_enum_const *ec; ir_mode *mode; + ir_graph *irg; switch (get_SymConst_kind(symc)) { case symconst_type_tag: @@ -257,10 +218,11 @@ static void lower_symconst(ir_node *symc) break; case symconst_type_size: /* rewrite the SymConst node by a Const node */ + irg = get_irn_irg(symc); tp = get_SymConst_type(symc); assert(get_type_state(tp) == layout_fixed); mode = get_irn_mode(symc); - newn = new_Const_long(mode, get_type_size_bytes(tp)); + newn = new_r_Const_long(irg, mode, get_type_size_bytes(tp)); assert(newn); /* run the hooks */ hook_lower(symc); @@ -268,10 +230,11 @@ static void lower_symconst(ir_node *symc) break; case symconst_type_align: /* rewrite the SymConst node by a Const node */ + irg = get_irn_irg(symc); tp = get_SymConst_type(symc); assert(get_type_state(tp) == layout_fixed); mode = get_irn_mode(symc); - newn = new_Const_long(mode, get_type_alignment_bytes(tp)); + newn = new_r_Const_long(irg, mode, get_type_alignment_bytes(tp)); assert(newn); /* run the hooks */ hook_lower(symc); @@ -282,10 +245,11 @@ static void lower_symconst(ir_node *symc) break; case symconst_ofs_ent: /* rewrite the SymConst node by a Const node */ + irg = get_irn_irg(symc); ent = get_SymConst_entity(symc); assert(get_type_state(get_entity_type(ent)) == layout_fixed); mode = get_irn_mode(symc); - newn = new_Const_long(mode, get_entity_offset(ent)); + newn = new_r_Const_long(irg, mode, get_entity_offset(ent)); assert(newn); /* run the hooks */ hook_lower(symc); @@ -293,10 +257,11 @@ static void lower_symconst(ir_node *symc) break; case symconst_enum_const: /* rewrite the SymConst node by a Const node */ + irg = get_irn_irg(symc); ec = get_SymConst_enum(symc); assert(get_type_state(get_enumeration_owner(ec)) == layout_fixed); tv = get_enumeration_value(ec); - newn = new_Const(tv); + newn = new_r_Const(irg, tv); assert(newn); /* run the hooks */ hook_lower(symc); @@ -333,6 +298,7 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) { ir_node *sel = get_Load_ptr(load); ir_node *block, *n_proj, *res, *ptr; + ir_graph *irg; ir_entity *ent; ir_type *bf_type; ir_mode *bf_mode, *mode; @@ -372,9 +338,10 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) */ /* abandon bitfield sel */ + irg = get_irn_irg(sel); ptr = get_Sel_ptr(sel); db = get_irn_dbg_info(sel); - ptr = new_rd_Add(db, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr)); + ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr)); set_Load_ptr(load, ptr); set_Load_mode(load, mode); @@ -391,20 +358,20 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) int shift_count_down = bits - bf_bits; if (shift_count_up) { - res = new_r_Shl(block, res, new_Const_long(mode_Iu, shift_count_up), mode); + res = new_r_Shl(block, res, new_r_Const_long(irg, mode_Iu, shift_count_up), mode); } if (shift_count_down) { - res = new_r_Shrs(block, res, new_Const_long(mode_Iu, shift_count_down), mode); + res = new_r_Shrs(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode); } } else { /* unsigned */ int shift_count_down = bit_offset; unsigned mask = ((unsigned)-1) >> (bits - bf_bits); if (shift_count_down) { - res = new_r_Shr(block, res, new_Const_long(mode_Iu, shift_count_down), mode); + res = new_r_Shr(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode); } if (bits != bf_bits) { - res = new_r_And(block, res, new_Const_long(mode, mask), mode); + res = new_r_And(block, res, new_r_Const_long(irg, mode, mask), mode); } } @@ -424,6 +391,7 @@ static void lower_bitfields_stores(ir_node *store) ir_type *bf_type; ir_mode *bf_mode, *mode; ir_node *mem, *irn, *block; + ir_graph *irg; unsigned mask, neg_mask; int bf_bits, bits_mask, offset, bit_offset; dbg_info *db; @@ -467,23 +435,24 @@ static void lower_bitfields_stores(ir_node *store) neg_mask = ~mask; /* abandon bitfield sel */ + irg = get_irn_irg(sel); ptr = get_Sel_ptr(sel); db = get_irn_dbg_info(sel); - ptr = new_rd_Add(db, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr)); + ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr)); if (neg_mask) { /* there are some bits, normal case */ - irn = new_r_Load(block, mem, ptr, mode, 0); + irn = new_r_Load(block, mem, ptr, mode, cons_none); mem = new_r_Proj(irn, mode_M, pn_Load_M); irn = new_r_Proj(irn, mode, pn_Load_res); - irn = new_r_And(block, irn, new_Const_long(mode, neg_mask), mode); + irn = new_r_And(block, irn, new_r_Const_long(irg, mode, neg_mask), mode); if (bit_offset > 0) { - value = new_r_Shl(block, value, new_Const_long(mode_Iu, bit_offset), mode); + value = new_r_Shl(block, value, new_r_Const_long(irg, mode_Iu, bit_offset), mode); } - value = new_r_And(block, value, new_Const_long(mode, mask), mode); + value = new_r_And(block, value, new_r_Const_long(irg, mode, mask), mode); value = new_r_Or(block, value, irn, mode); } @@ -493,24 +462,6 @@ static void lower_bitfields_stores(ir_node *store) set_Store_ptr(store, ptr); } /* lower_bitfields_stores */ -/** - * Lowers unaligned Loads. - */ -static void lower_unaligned_Load(ir_node *load) -{ - (void) load; - /* NYI */ -} - -/** - * Lowers unaligned Stores - */ -static void lower_unaligned_Store(ir_node *store) -{ - (void) store; - /* NYI */ -} - /** * lowers IR-nodes, called from walker */ @@ -524,14 +475,6 @@ static void lower_irnode(ir_node *irn, void *env) case iro_SymConst: lower_symconst(irn); break; - case iro_Load: - if (env != NULL && get_Load_align(irn) == align_non_aligned) - lower_unaligned_Load(irn); - break; - case iro_Store: - if (env != NULL && get_Store_align(irn) == align_non_aligned) - lower_unaligned_Store(irn); - break; case iro_Cast: exchange(irn, get_Cast_op(irn)); break; @@ -580,21 +523,19 @@ void lower_highlevel_graph(ir_graph *irg, int lower_bitfields) /* Finally: lower SymConst-Size and Sel nodes, Casts, unaligned Load/Stores. */ irg_walk_graph(irg, NULL, lower_irnode, NULL); - - set_irg_outs_inconsistent(irg); } -struct pass_t { +typedef struct pass_t { ir_graph_pass_t pass; int lower_bitfields; -}; +} pass_t; /** * Wrapper for running lower_highlevel_graph() as an ir_graph pass. */ static int lower_highlevel_graph_wrapper(ir_graph *irg, void *context) { - struct pass_t *pass = context; + pass_t *pass = (pass_t*)context; lower_highlevel_graph(irg, pass->lower_bitfields); return 0; @@ -602,7 +543,7 @@ static int lower_highlevel_graph_wrapper(ir_graph *irg, void *context) ir_graph_pass_t *lower_highlevel_graph_pass(const char *name, int lower_bitfields) { - struct pass_t *pass = XMALLOCZ(struct pass_t); + pass_t *pass = XMALLOCZ(pass_t); pass->lower_bitfields = lower_bitfields; return def_graph_pass_constructor( @@ -629,7 +570,7 @@ ir_prog_pass_t *lower_const_code_pass(const char *name) */ void lower_highlevel(int lower_bitfields) { - int i, n; + size_t i, n; n = get_irp_n_irgs(); for (i = 0; i < n; ++i) {