X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Flower%2Flower_hl.c;h=f1263f8f1942481e13d8dc9c79d61215931fd316;hb=f600524c363556c785b85c0dff8792d04f73e252;hp=f30e76dbe02cab126a87ee0a18d7e003391ee891;hpb=300c3b362f0625521b30e6dc378139f129d9dc9f;p=libfirm diff --git a/ir/lower/lower_hl.c b/ir/lower/lower_hl.c index f30e76dbe..f1263f8f1 100644 --- a/ir/lower/lower_hl.c +++ b/ir/lower/lower_hl.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -52,13 +52,14 @@ static void lower_sel(ir_node *sel) { assert(is_Sel(sel)); + /* Do not lower frame type/global offset table access: must be lowered by the backend. */ + ptr = get_Sel_ptr(sel); + if (ptr == get_irg_frame(current_ir_graph)) + return; + ent = get_Sel_entity(sel); owner = get_entity_owner(ent); - /* Do not lower frame type access: must be lowered by the backend. */ - if (is_frame_type(owner)) - return; - /* * Cannot handle value param entities here. * Must be lowered by the backend. @@ -66,7 +67,6 @@ static void lower_sel(ir_node *sel) { if (is_value_param_type(owner)) return; - ptr = get_Sel_ptr(sel); dbg = get_irn_dbg_info(sel); mode = get_irn_mode(sel); @@ -79,7 +79,7 @@ static void lower_sel(ir_node *sel) { sym.entity_p = ent; bl = get_nodes_block(sel); - cnst = new_rd_SymConst(dbg, irg, bl, sym, symconst_addr_ent); + cnst = new_rd_SymConst(dbg, irg, bl, mode, sym, symconst_addr_ent); newn = new_rd_Add(dbg, irg, bl, ptr, cnst, mode); } else { /* not TLS */ @@ -97,7 +97,7 @@ static void lower_sel(ir_node *sel) { basemode = mode_P_data; assert(basemode && "no mode for lowering Sel"); - assert((get_mode_size_bytes(basemode) != -1) && "can not deal with unorthodox modes"); + assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes"); index = get_Sel_index(sel, 0); if (is_Array_type(owner)) { @@ -142,12 +142,12 @@ static void lower_sel(ir_node *sel) { ub = get_array_upper_bound(arr_ty, dim); assert(irg == current_ir_graph); - if (get_irn_op(lb) != op_Unknown) + if (! is_Unknown(lb)) lb = new_rd_Conv(dbg, irg, bl, copy_const_value(get_irn_dbg_info(sel), lb), mode_Int); else lb = NULL; - if (get_irn_op(ub) != op_Unknown) + if (! is_Unknown(ub)) ub = new_rd_Conv(dbg, irg, bl, copy_const_value(get_irn_dbg_info(sel), ub), mode_Int); else ub = NULL; @@ -157,8 +157,8 @@ static void lower_sel(ir_node *sel) { * bounds have to be set in the non-last dimension. */ if (i > 0) { - assert(lb && "lower bound has to be set in multi-dim array"); - assert(lb && "upper bound has to be set in multi-dim array"); + assert(lb != NULL && "lower bound has to be set in multi-dim array"); + assert(ub != NULL && "upper bound has to be set in multi-dim array"); /* Elements in one Dimension */ elms = new_rd_Sub(dbg, irg, bl, ub, lb, mode_Int); @@ -170,7 +170,7 @@ static void lower_sel(ir_node *sel) { * Normalize index, id lower bound is set, also assume * lower bound == 0 */ - if (lb) + if (lb != NULL) ind = new_rd_Sub(dbg, irg, bl, ind, lb, mode_Int); n = new_rd_Mul(dbg, irg, bl, ind, last_size, mode_Int); @@ -195,9 +195,9 @@ static void lower_sel(ir_node *sel) { mode); } } else if (is_Method_type(get_entity_type(ent)) && - is_Class_type(owner) && - (owner != get_glob_type()) && - (!is_frame_type(owner))) { + is_Class_type(owner) && + (owner != get_glob_type()) && + (!is_frame_type(owner))) { ir_node *add; ir_mode *ent_mode = get_type_mode(get_entity_type(ent)); @@ -214,16 +214,24 @@ static void lower_sel(ir_node *sel) { #endif newn = new_r_Proj(irg, bl, newn, ent_mode, pn_Load_res); - } else if (get_entity_owner(ent) != get_glob_type()) { + } else if (get_entity_owner(ent) != get_glob_type()) { + int offset; + /* replace Sel by add(obj, const(ent.offset)) */ assert(!(get_entity_allocation(ent) == allocation_static && (get_entity_n_overwrites(ent) == 0 && get_entity_n_overwrittenby(ent) == 0))); - tv = new_tarval_from_long(get_entity_offset(ent), mode_Int); - cnst = new_r_Const(irg, get_irg_start_block(irg), mode_Int, tv); - newn = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel), cnst, mode); + newn = get_Sel_ptr(sel); + offset = get_entity_offset(ent); + if (offset != 0) { + ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode); + + tv = new_tarval_from_long(offset, mode_UInt); + cnst = new_r_Const(irg, get_irg_start_block(irg), mode_UInt, tv); + newn = new_rd_Add(dbg, irg, bl, newn, cnst, mode); + } } else { /* global_type */ - newn = new_rd_SymConst_addr_ent(NULL, current_ir_graph, ent, firm_unknown_type); + newn = new_rd_SymConst_addr_ent(NULL, current_ir_graph, mode, ent, firm_unknown_type); } } /* run the hooks */ @@ -352,6 +360,10 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) { ent = get_Sel_entity(sel); bf_type = get_entity_type(ent); + /* must be a bitfield type */ + if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL) + return; + /* We have a bitfield access, if either a bit offset is given, or the size is not integral. */ bf_mode = get_type_mode(bf_type); @@ -368,7 +380,6 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) { bits = get_mode_size_bits(mode); offset = get_entity_offset(ent); - bit_offset += 8 * offset; /* * ok, here we are: now convert the Proj_mode_bf(Load) into And(Shr(Proj_mode(Load)) for unsigned @@ -378,6 +389,7 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) { /* abandon bitfield sel */ ptr = get_Sel_ptr(sel); db = get_irn_dbg_info(sel); + ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr)); set_Load_ptr(load, ptr); set_Load_mode(load, mode); @@ -441,6 +453,10 @@ static void lower_bitfields_stores(ir_node *store) { ent = get_Sel_entity(sel); bf_type = get_entity_type(ent); + /* must be a bitfield type */ + if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL) + return; + /* We have a bitfield access, if either a bit offset is given, or the size is not integral. */ bf_mode = get_type_mode(bf_type); @@ -462,7 +478,6 @@ static void lower_bitfields_stores(ir_node *store) { */ mem = get_Store_mem(store); offset = get_entity_offset(ent); - bit_offset += 8 * offset; bits_mask = get_mode_size_bits(mode) - bf_bits; mask = ((unsigned)-1) >> bits_mask; @@ -472,6 +487,7 @@ static void lower_bitfields_stores(ir_node *store) { /* abandon bitfield sel */ ptr = get_Sel_ptr(sel); db = get_irn_dbg_info(sel); + ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr)); if (neg_mask) { /* there are some bits, normal case */ @@ -498,6 +514,22 @@ static void lower_bitfields_stores(ir_node *store) { set_Store_ptr(store, ptr); } /* lower_bitfields_stores */ +/** + * Lowers unaligned Loads. + */ +static void lower_unaligned_Load(ir_node *load) { + (void) load; + /* NYI */ +} + +/** + * Lowers unaligned Stores + */ +static void lower_unaligned_Store(ir_node *store) { + (void) store; + /* NYI */ +} + /** * lowers IR-nodes, called from walker */ @@ -510,6 +542,17 @@ static void lower_irnode(ir_node *irn, void *env) { case iro_SymConst: lower_symconst(irn); break; + case iro_Load: + if (env != NULL && get_Load_align(irn) == align_non_aligned) + lower_unaligned_Load(irn); + break; + case iro_Store: + if (env != NULL && get_Store_align(irn) == align_non_aligned) + lower_unaligned_Store(irn); + break; + case iro_Cast: + exchange(irn, get_Cast_op(irn)); + break; default: break; } @@ -545,19 +588,37 @@ static void lower_bf_access(ir_node *irn, void *env) { * Replace Sel nodes by address computation. Also resolves array access. * Handle Bitfields by added And/Or calculations. */ -void lower_highlevel(void) { +void lower_highlevel_graph(ir_graph *irg, int lower_bitfields) { + + if (lower_bitfields) { + /* First step: lower bitfield access: must be run as long as Sels still + * exists. */ + irg_walk_graph(irg, NULL, lower_bf_access, NULL); + } + + /* Finally: lower SymConst-Size and Sel nodes, Casts, unaligned Load/Stores. */ + irg_walk_graph(irg, NULL, lower_irnode, NULL); +} /* lower_highlevel_graph */ + +/* + * does the same as lower_highlevel() for all nodes on the const code irg + */ +void lower_const_code(void) { + walk_const_code(NULL, lower_irnode, NULL); +} /* lower_const_code */ + +/* + * Replaces SymConsts by a real constant if possible. + * Replace Sel nodes by address computation. Also resolves array access. + * Handle Bitfields by added And/Or calculations. + */ +void lower_highlevel(int lower_bitfields) { int i, n; n = get_irp_n_irgs(); for (i = 0; i < n; ++i) { ir_graph *irg = get_irp_irg(i); - - /* First step: lower bitfield access: must be run as long as Sels still exists. */ - irg_walk_graph(irg, NULL, lower_bf_access, NULL); - - /* Finally: lower SymConst-Size and Sel nodes. */ - irg_walk_graph(irg, NULL, lower_irnode, NULL); - - set_irg_phase_low(irg); + lower_highlevel_graph(irg, lower_bitfields); } + lower_const_code(); } /* lower_highlevel */