/* run the hooks */
hook_lower(symc);
exchange(symc, newn);
- break;
+ break;
+ case symconst_label:
+ /* leave */
+ break;
default:
assert(!"unknown SymConst kind");
ent = get_Sel_entity(sel);
bf_type = get_entity_type(ent);
+ /* must be a bitfield type */
+ if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
+ return;
+
/* We have a bitfield access, if either a bit offset is given, or
the size is not integral. */
bf_mode = get_type_mode(bf_type);
block = get_nodes_block(proj);
bf_bits = get_mode_size_bits(bf_mode);
bit_offset = get_entity_offset_bits_remainder(ent);
- if (bit_offset == 0 && is_integral_size(bf_bits)) {
- if (mode != bf_mode) {
- /* we have an integral size and can replace the load by a load
- of a smaller mode */
- set_Load_mode(load, bf_mode);
- db = get_irn_dbg_info(load);
- res = new_rd_Proj(get_irn_dbg_info(proj), current_ir_graph, block, load, bf_mode, pn_Load_res);
- res = new_rd_Conv(db, current_ir_graph, block, res, mode);
-
- exchange(proj, res);
- }
+
+ if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_Load_mode(load))
return;
- }
bits = get_mode_size_bits(mode);
offset = get_entity_offset(ent);
ent = get_Sel_entity(sel);
bf_type = get_entity_type(ent);
+ /* must be a bitfield type */
+ if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
+ return;
+
/* We have a bitfield access, if either a bit offset is given, or
the size is not integral. */
bf_mode = get_type_mode(bf_type);
if (! bf_mode)
return;
+ value = get_Store_value(store);
+ mode = get_irn_mode(value);
+ block = get_nodes_block(store);
+
bf_bits = get_mode_size_bits(bf_mode);
bit_offset = get_entity_offset_bits_remainder(ent);
- if (bit_offset == 0 && is_integral_size(bf_bits))
- return;
- value = get_Store_value(store);
- mode = get_irn_mode(value);
+ if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_irn_mode(value))
+ return;
/*
* ok, here we are: now convert the Store(Sel(), value) into Or(And(Load(Sel),c), And(Value,c))
*/
mem = get_Store_mem(store);
- block = get_nodes_block(store);
- bit_offset = get_entity_offset_bits_remainder(ent);
offset = get_entity_offset(ent);
bits_mask = get_mode_size_bits(mode) - bf_bits;
set_Store_ptr(store, ptr);
} /* lower_bitfields_stores */
+/**
+ * Lowers unaligned Loads.
+ */
+static void lower_unaligned_Load(ir_node *load) {
+ /* NYI */
+}
+
+/**
+ * Lowers unaligned Stores
+ */
+static void lower_unaligned_Store(ir_node *store) {
+ /* NYI */
+}
+
/**
* lowers IR-nodes, called from walker
*/
case iro_SymConst:
lower_symconst(irn);
break;
+ case iro_Load:
+ if (env != NULL && get_Load_align(irn) == align_non_aligned)
+ lower_unaligned_Load(irn);
+ break;
+ case iro_Store:
+ if (env != NULL && get_Store_align(irn) == align_non_aligned)
+ lower_unaligned_Store(irn);
+ break;
default:
break;
}
/* First step: lower bitfield access: must be run as long as Sels still exists. */
irg_walk_graph(irg, NULL, lower_bf_access, NULL);
- /* Finally: lower SymConst-Size and Sel nodes. */
+ /* Finally: lower SymConst-Size and Sel nodes, unaligned Load/Stores. */
irg_walk_graph(irg, NULL, lower_irnode, NULL);
set_irg_phase_low(irg);