/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
#include "irgwalk.h"
/**
- * Lower a Sel node. Do not touch Sels accessing entities on teh frame type.
+ * Lower a Sel node. Do not touch Sels accessing entities on the frame type.
*/
static void lower_sel(ir_node *sel) {
ir_graph *irg = current_ir_graph;
sym.entity_p = ent;
bl = get_nodes_block(sel);
- cnst = new_rd_SymConst(dbg, irg, bl, sym, symconst_addr_ent);
+ cnst = new_rd_SymConst(dbg, irg, bl, mode, sym, symconst_addr_ent);
newn = new_rd_Add(dbg, irg, bl, ptr, cnst, mode);
} else {
/* not TLS */
basemode = mode_P_data;
assert(basemode && "no mode for lowering Sel");
- assert((get_mode_size_bytes(basemode) != -1) && "can not deal with unorthodox modes");
+ assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes");
index = get_Sel_index(sel, 0);
if (is_Array_type(owner)) {
mode);
}
} else if (is_Method_type(get_entity_type(ent)) &&
- is_Class_type(owner) &&
- (owner != get_glob_type()) &&
- (!is_frame_type(owner))) {
+ is_Class_type(owner) &&
+ (owner != get_glob_type()) &&
+ (!is_frame_type(owner))) {
ir_node *add;
ir_mode *ent_mode = get_type_mode(get_entity_type(ent));
newn = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel), cnst, mode);
} else {
/* global_type */
- newn = new_rd_SymConst_addr_ent(NULL, current_ir_graph, ent, firm_unknown_type);
+ newn = new_rd_SymConst_addr_ent(NULL, current_ir_graph, mode, ent, firm_unknown_type);
}
}
/* run the hooks */
/* run the hooks */
hook_lower(symc);
exchange(symc, newn);
- break;
+ break;
+ case symconst_label:
+ /* leave */
+ break;
default:
assert(!"unknown SymConst kind");
ent = get_Sel_entity(sel);
bf_type = get_entity_type(ent);
+ /* must be a bitfield type */
+ if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
+ return;
+
/* We have a bitfield access, if either a bit offset is given, or
the size is not integral. */
bf_mode = get_type_mode(bf_type);
if (! bf_mode)
return;
+ mode = get_irn_mode(proj);
+ block = get_nodes_block(proj);
bf_bits = get_mode_size_bits(bf_mode);
bit_offset = get_entity_offset_bits_remainder(ent);
- if (bit_offset == 0 && is_integral_size(bf_bits))
+
+ if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_Load_mode(load))
return;
- mode = get_irn_mode(proj);
bits = get_mode_size_bits(mode);
offset = get_entity_offset(ent);
- block = get_nodes_block(proj);
/*
* ok, here we are: now convert the Proj_mode_bf(Load) into And(Shr(Proj_mode(Load)) for unsigned
ent = get_Sel_entity(sel);
bf_type = get_entity_type(ent);
+ /* must be a bitfield type */
+ if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
+ return;
+
/* We have a bitfield access, if either a bit offset is given, or
the size is not integral. */
bf_mode = get_type_mode(bf_type);
if (! bf_mode)
return;
+ value = get_Store_value(store);
+ mode = get_irn_mode(value);
+ block = get_nodes_block(store);
+
bf_bits = get_mode_size_bits(bf_mode);
bit_offset = get_entity_offset_bits_remainder(ent);
- if (bit_offset == 0 && is_integral_size(bf_bits))
- return;
- value = get_Store_value(store);
- mode = get_irn_mode(value);
+ if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_irn_mode(value))
+ return;
/*
* ok, here we are: now convert the Store(Sel(), value) into Or(And(Load(Sel),c), And(Value,c))
*/
mem = get_Store_mem(store);
- block = get_nodes_block(store);
- bit_offset = get_entity_offset_bits_remainder(ent);
offset = get_entity_offset(ent);
bits_mask = get_mode_size_bits(mode) - bf_bits;
set_Store_ptr(store, ptr);
} /* lower_bitfields_stores */
+/**
+ * Lowers unaligned Loads.
+ */
+static void lower_unaligned_Load(ir_node *load) {
+ (void) load;
+ /* NYI */
+}
+
+/**
+ * Lowers unaligned Stores
+ */
+static void lower_unaligned_Store(ir_node *store) {
+ (void) store;
+ /* NYI */
+}
+
/**
* lowers IR-nodes, called from walker
*/
case iro_SymConst:
lower_symconst(irn);
break;
+ case iro_Load:
+ if (env != NULL && get_Load_align(irn) == align_non_aligned)
+ lower_unaligned_Load(irn);
+ break;
+ case iro_Store:
+ if (env != NULL && get_Store_align(irn) == align_non_aligned)
+ lower_unaligned_Store(irn);
+ break;
+ case iro_Cast:
+ exchange(irn, get_Cast_op(irn));
+ break;
default:
break;
}
* Replace Sel nodes by address computation. Also resolves array access.
* Handle Bitfields by added And/Or calculations.
*/
-void lower_highlevel(void) {
+void lower_highlevel_graph(ir_graph *irg, int lower_bitfields) {
+
+ if(lower_bitfields) {
+ /* First step: lower bitfield access: must be run as long as Sels still
+ * exists. */
+ irg_walk_graph(irg, NULL, lower_bf_access, NULL);
+ }
+
+ /* Finally: lower SymConst-Size and Sel nodes, Casts, unaligned Load/Stores. */
+ irg_walk_graph(irg, NULL, lower_irnode, NULL);
+ set_irg_phase_low(irg);
+} /* lower_highlevel */
+
+/*
+ * Replaces SymConsts by a real constant if possible.
+ * Replace Sel nodes by address computation. Also resolves array access.
+ * Handle Bitfields by added And/Or calculations.
+ */
+void lower_highlevel(int lower_bitfields) {
int i, n;
n = get_irp_n_irgs();
for (i = 0; i < n; ++i) {
ir_graph *irg = get_irp_irg(i);
-
- /* First step: lower bitfield access: must be run as long as Sels still exists. */
- irg_walk_graph(irg, lower_bf_access, NULL, NULL);
-
- /* Finally: lower SymConst-Size and Sel nodes. */
- irg_walk_graph(irg, lower_irnode, NULL, NULL);
-
- set_irg_phase_low(irg);
+ lower_highlevel_graph(irg, lower_bitfields);
}
} /* lower_highlevel */