X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Flower%2Flower_hl.c;h=38d1e4b168be9745c274f570f312acbbe3b0f3aa;hb=eb0672129c35f41c7d1e9efdeb4835585947d33b;hp=09eb96068cacc8a022d6f4b716f575f879c47d82;hpb=5fa28fab4163f46880424becee219225e0edcb06;p=libfirm diff --git a/ir/lower/lower_hl.c b/ir/lower/lower_hl.c index 09eb96068..38d1e4b16 100644 --- a/ir/lower/lower_hl.c +++ b/ir/lower/lower_hl.c @@ -352,6 +352,10 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) { ent = get_Sel_entity(sel); bf_type = get_entity_type(ent); + /* must be a bitfield type */ + if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL) + return; + /* We have a bitfield access, if either a bit offset is given, or the size is not integral. */ bf_mode = get_type_mode(bf_type); @@ -363,9 +367,11 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) { bf_bits = get_mode_size_bits(bf_mode); bit_offset = get_entity_offset_bits_remainder(ent); + if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_Load_mode(load)) + return; + bits = get_mode_size_bits(mode); offset = get_entity_offset(ent); - bit_offset += 8 * offset; /* * ok, here we are: now convert the Proj_mode_bf(Load) into And(Shr(Proj_mode(Load)) for unsigned @@ -375,6 +381,7 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) { /* abandon bitfield sel */ ptr = get_Sel_ptr(sel); db = get_irn_dbg_info(sel); + ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr)); set_Load_ptr(load, ptr); set_Load_mode(load, mode); @@ -438,6 +445,10 @@ static void lower_bitfields_stores(ir_node *store) { ent = get_Sel_entity(sel); bf_type = get_entity_type(ent); + /* must be a bitfield type */ + if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL) + return; + /* We have a bitfield access, if either a bit offset is given, or the size is not integral. */ bf_mode = get_type_mode(bf_type); @@ -451,12 +462,14 @@ static void lower_bitfields_stores(ir_node *store) { bf_bits = get_mode_size_bits(bf_mode); bit_offset = get_entity_offset_bits_remainder(ent); + if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_irn_mode(value)) + return; + /* * ok, here we are: now convert the Store(Sel(), value) into Or(And(Load(Sel),c), And(Value,c)) */ mem = get_Store_mem(store); offset = get_entity_offset(ent); - bit_offset += 8 * offset; bits_mask = get_mode_size_bits(mode) - bf_bits; mask = ((unsigned)-1) >> bits_mask; @@ -466,6 +479,7 @@ static void lower_bitfields_stores(ir_node *store) { /* abandon bitfield sel */ ptr = get_Sel_ptr(sel); db = get_irn_dbg_info(sel); + ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr)); if (neg_mask) { /* there are some bits, normal case */ @@ -492,6 +506,20 @@ static void lower_bitfields_stores(ir_node *store) { set_Store_ptr(store, ptr); } /* lower_bitfields_stores */ +/** + * Lowers unaligned Loads. + */ +static void lower_unaligned_Load(ir_node *load) { + /* NYI */ +} + +/** + * Lowers unaligned Stores + */ +static void lower_unaligned_Store(ir_node *store) { + /* NYI */ +} + /** * lowers IR-nodes, called from walker */ @@ -504,6 +532,14 @@ static void lower_irnode(ir_node *irn, void *env) { case iro_SymConst: lower_symconst(irn); break; + case iro_Load: + if (env != NULL && get_Load_align(irn) == align_non_aligned) + lower_unaligned_Load(irn); + break; + case iro_Store: + if (env != NULL && get_Store_align(irn) == align_non_aligned) + lower_unaligned_Store(irn); + break; default: break; } @@ -549,7 +585,7 @@ void lower_highlevel(void) { /* First step: lower bitfield access: must be run as long as Sels still exists. */ irg_walk_graph(irg, NULL, lower_bf_access, NULL); - /* Finally: lower SymConst-Size and Sel nodes. */ + /* Finally: lower SymConst-Size and Sel nodes, unaligned Load/Stores. */ irg_walk_graph(irg, NULL, lower_irnode, NULL); set_irg_phase_low(irg);