/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* @author Boris Boesler, Goetz Lindenmaier, Michael Beck
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
#include "lowering.h"
#include "irmode_t.h"
assert(is_Sel(sel));
+ /* Do not lower frame type/global offset table access: must be lowered by the backend. */
+ ptr = get_Sel_ptr(sel);
+ if (ptr == get_irg_frame(current_ir_graph))
+ return;
+
ent = get_Sel_entity(sel);
owner = get_entity_owner(ent);
- /* Do not lower frame type access: must be lowered by the backend. */
- if (is_frame_type(owner))
- return;
-
/*
- * Cannot handle value param entities here.
+ * Cannot handle value param entities or frame type entities here.
* Must be lowered by the backend.
*/
- if (is_value_param_type(owner))
+ if (is_value_param_type(owner) || is_frame_type(owner))
return;
- ptr = get_Sel_ptr(sel);
dbg = get_irn_dbg_info(sel);
mode = get_irn_mode(sel);
sym.entity_p = ent;
bl = get_nodes_block(sel);
- cnst = new_rd_SymConst(dbg, irg, bl, sym, symconst_addr_ent);
- newn = new_rd_Add(dbg, irg, bl, ptr, cnst, mode);
+ cnst = new_rd_SymConst(dbg, irg, mode, sym, symconst_addr_ent);
+ newn = new_rd_Add(dbg, bl, ptr, cnst, mode);
} else {
/* not TLS */
basemode = mode_P_data;
assert(basemode && "no mode for lowering Sel");
- assert((get_mode_size_bytes(basemode) != -1) && "can not deal with unorthodox modes");
+ assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes");
index = get_Sel_index(sel, 0);
if (is_Array_type(owner)) {
- ir_node *last_size;
ir_type *arr_ty = owner;
- int dims = get_array_n_dimensions(arr_ty);
- int *map = alloca(sizeof(int) * dims);
- int i;
+ int dims = get_array_n_dimensions(arr_ty);
+ int *map = ALLOCAN(int, dims);
+ ir_node *last_size;
+ int i;
assert(dims == get_Sel_n_indexs(sel)
&& "array dimension must match number of indices of Sel node");
/* Size of the array element */
tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int);
- last_size = new_rd_Const(dbg, irg, get_irg_start_block(irg), mode_Int, tv);
+ last_size = new_rd_Const(dbg, irg, tv);
/*
* We compute the offset part of dimension d_i recursively
ub = get_array_upper_bound(arr_ty, dim);
assert(irg == current_ir_graph);
- if (get_irn_op(lb) != op_Unknown)
- lb = new_rd_Conv(dbg, irg, bl, copy_const_value(get_irn_dbg_info(sel), lb), mode_Int);
+ if (! is_Unknown(lb))
+ lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb), mode_Int);
else
lb = NULL;
- if (get_irn_op(ub) != op_Unknown)
- ub = new_rd_Conv(dbg, irg, bl, copy_const_value(get_irn_dbg_info(sel), ub), mode_Int);
+ if (! is_Unknown(ub))
+ ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub), mode_Int);
else
ub = NULL;
* bounds have to be set in the non-last dimension.
*/
if (i > 0) {
- assert(lb && "lower bound has to be set in multi-dim array");
- assert(lb && "upper bound has to be set in multi-dim array");
+ assert(lb != NULL && "lower bound has to be set in multi-dim array");
+ assert(ub != NULL && "upper bound has to be set in multi-dim array");
/* Elements in one Dimension */
- elms = new_rd_Sub(dbg, irg, bl, ub, lb, mode_Int);
+ elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int);
}
- ind = new_rd_Conv(dbg, irg, bl, get_Sel_index(sel, dim), mode_Int);
+ ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int);
/*
* Normalize index, id lower bound is set, also assume
* lower bound == 0
*/
- if (lb)
- ind = new_rd_Sub(dbg, irg, bl, ind, lb, mode_Int);
+ if (lb != NULL)
+ ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int);
- n = new_rd_Mul(dbg, irg, bl, ind, last_size, mode_Int);
+ n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int);
/*
* see comment above.
*/
if (i > 0)
- last_size = new_rd_Mul(dbg, irg, bl, last_size, elms, mode_Int);
+ last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int);
- newn = new_rd_Add(dbg, irg, bl, newn, n, mode);
+ newn = new_rd_Add(dbg, bl, newn, n, mode);
}
} else {
/* no array type */
ir_mode *idx_mode = get_irn_mode(index);
tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);
- newn = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel),
- new_rd_Mul(dbg, irg, bl, index,
- new_r_Const(irg, get_irg_start_block(irg), idx_mode, tv),
+ newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel),
+ new_rd_Mul(dbg, bl, index,
+ new_r_Const(irg, tv),
idx_mode),
mode);
}
} else if (is_Method_type(get_entity_type(ent)) &&
- is_Class_type(owner) &&
- (owner != get_glob_type()) &&
- (!is_frame_type(owner))) {
+ is_Class_type(owner) &&
+ (owner != get_glob_type()) &&
+ (!is_frame_type(owner))) {
ir_node *add;
ir_mode *ent_mode = get_type_mode(get_entity_type(ent));
/* We need an additional load when accessing methods from a dispatch table. */
tv = new_tarval_from_long(get_entity_offset(ent), mode_Int);
- cnst = new_rd_Const(dbg, irg, get_irg_start_block(irg), mode_Int, tv);
- add = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel), cnst, mode);
+ cnst = new_rd_Const(dbg, irg, tv);
+ add = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode);
#ifdef DO_CACHEOPT /* cacheopt version */
- newn = new_rd_Load(dbg, irg, bl, get_Sel_mem(sel), sel, ent_mode);
+ newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), sel, ent_mode, 0);
cacheopt_map_addrs_register_node(newn);
set_Load_ptr(newn, add);
#else /* normal code */
- newn = new_rd_Load(dbg, irg, bl, get_Sel_mem(sel), add, ent_mode);
+ newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), add, ent_mode, 0);
#endif
- newn = new_r_Proj(irg, bl, newn, ent_mode, pn_Load_res);
+ newn = new_r_Proj(bl, newn, ent_mode, pn_Load_res);
+
+ } else if (get_entity_owner(ent) != get_glob_type()) {
+ int offset;
- } else if (get_entity_owner(ent) != get_glob_type()) {
/* replace Sel by add(obj, const(ent.offset)) */
assert(!(get_entity_allocation(ent) == allocation_static &&
(get_entity_n_overwrites(ent) == 0 && get_entity_n_overwrittenby(ent) == 0)));
- tv = new_tarval_from_long(get_entity_offset(ent), mode_Int);
- cnst = new_r_Const(irg, get_irg_start_block(irg), mode_Int, tv);
- newn = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel), cnst, mode);
+ newn = get_Sel_ptr(sel);
+ offset = get_entity_offset(ent);
+ if (offset != 0) {
+ ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
+
+ tv = new_tarval_from_long(offset, mode_UInt);
+ cnst = new_r_Const(irg, tv);
+ newn = new_rd_Add(dbg, bl, newn, cnst, mode);
+ }
} else {
/* global_type */
- newn = new_rd_SymConst_addr_ent(NULL, current_ir_graph, ent, firm_unknown_type);
+ newn = new_rd_SymConst_addr_ent(NULL, irg, mode, ent, firm_unknown_type);
}
}
/* run the hooks */
tp = get_SymConst_type(symc);
assert(get_type_state(tp) == layout_fixed);
mode = get_irn_mode(symc);
- tv = new_tarval_from_long(get_type_size_bytes(tp), mode);
- newn = new_r_Const(current_ir_graph,
- get_irg_start_block(current_ir_graph),
- get_irn_mode(symc), tv);
+ newn = new_Const_long(mode, get_type_size_bytes(tp));
assert(newn);
/* run the hooks */
hook_lower(symc);
tp = get_SymConst_type(symc);
assert(get_type_state(tp) == layout_fixed);
mode = get_irn_mode(symc);
- tv = new_tarval_from_long(get_type_alignment_bytes(tp), mode);
- newn = new_r_Const(current_ir_graph,
- get_irg_start_block(current_ir_graph),
- mode, tv);
+ newn = new_Const_long(mode, get_type_alignment_bytes(tp));
assert(newn);
/* run the hooks */
hook_lower(symc);
ent = get_SymConst_entity(symc);
assert(get_type_state(get_entity_type(ent)) == layout_fixed);
mode = get_irn_mode(symc);
- tv = new_tarval_from_long(get_entity_offset(ent), mode);
- newn = new_r_Const(current_ir_graph,
- get_irg_start_block(current_ir_graph),
- mode, tv);
+ newn = new_Const_long(mode, get_entity_offset(ent));
assert(newn);
/* run the hooks */
hook_lower(symc);
ec = get_SymConst_enum(symc);
assert(get_type_state(get_enumeration_owner(ec)) == layout_fixed);
tv = get_enumeration_value(ec);
- newn = new_r_Const(current_ir_graph,
- get_irg_start_block(current_ir_graph),
- get_irn_mode(symc), tv);
+ newn = new_Const(tv);
assert(newn);
/* run the hooks */
hook_lower(symc);
exchange(symc, newn);
break;
- case symconst_label:
- /* leave */
- break;
default:
assert(!"unknown SymConst kind");
int offset, bit_offset, bits, bf_bits, old_cse;
dbg_info *db;
- if (get_irn_op(sel) != op_Sel)
+ if (!is_Sel(sel))
return;
ent = get_Sel_entity(sel);
bf_type = get_entity_type(ent);
+ /* must be a bitfield type */
+ if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
+ return;
+
/* We have a bitfield access, if either a bit offset is given, or
the size is not integral. */
bf_mode = get_type_mode(bf_type);
block = get_nodes_block(proj);
bf_bits = get_mode_size_bits(bf_mode);
bit_offset = get_entity_offset_bits_remainder(ent);
- if (bit_offset == 0 && is_integral_size(bf_bits)) {
- if (mode != bf_mode) {
- /* we have an integral size and can replace the load by a load
- of a smaller mode */
- set_Load_mode(load, bf_mode);
- db = get_irn_dbg_info(load);
- res = new_rd_Proj(get_irn_dbg_info(proj), current_ir_graph, block, load, bf_mode, pn_Load_res);
- res = new_rd_Conv(db, current_ir_graph, block, res, mode);
-
- exchange(proj, res);
- }
+
+ if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_Load_mode(load))
return;
- }
bits = get_mode_size_bits(mode);
offset = get_entity_offset(ent);
/* abandon bitfield sel */
ptr = get_Sel_ptr(sel);
db = get_irn_dbg_info(sel);
- ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
+ ptr = new_rd_Add(db, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
set_Load_ptr(load, ptr);
set_Load_mode(load, mode);
/* create new proj, switch off CSE or we may get the old one back */
old_cse = get_opt_cse();
set_opt_cse(0);
- res = n_proj = new_r_Proj(current_ir_graph, block, load, mode, pn_Load_res);
+ res = n_proj = new_r_Proj(block, load, mode, pn_Load_res);
set_opt_cse(old_cse);
if (mode_is_signed(mode)) { /* signed */
int shift_count_down = bits - bf_bits;
if (shift_count_up) {
- res = new_r_Shl(current_ir_graph, block, res,
- new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(shift_count_up, mode_Iu)), mode);
+ res = new_r_Shl(block, res, new_Const_long(mode_Iu, shift_count_up), mode);
}
if (shift_count_down) {
- res = new_r_Shrs(current_ir_graph, block, res,
- new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(shift_count_down, mode_Iu)), mode);
+ res = new_r_Shrs(block, res, new_Const_long(mode_Iu, shift_count_down), mode);
}
} else { /* unsigned */
int shift_count_down = bit_offset;
unsigned mask = ((unsigned)-1) >> (bits - bf_bits);
if (shift_count_down) {
- res = new_r_Shr(current_ir_graph, block, res,
- new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(shift_count_down, mode_Iu)), mode);
+ res = new_r_Shr(block, res, new_Const_long(mode_Iu, shift_count_down), mode);
}
if (bits != bf_bits) {
- res = new_r_And(current_ir_graph, block, res,
- new_r_Const(current_ir_graph, block, mode, new_tarval_from_long(mask, mode)), mode);
+ res = new_r_And(block, res, new_Const_long(mode, mask), mode);
}
}
dbg_info *db;
/* check bitfield access */
- if (get_irn_op(sel) != op_Sel)
+ if (!is_Sel(sel))
return;
ent = get_Sel_entity(sel);
bf_type = get_entity_type(ent);
+ /* must be a bitfield type */
+ if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
+ return;
+
/* We have a bitfield access, if either a bit offset is given, or
the size is not integral. */
bf_mode = get_type_mode(bf_type);
if (! bf_mode)
return;
+ value = get_Store_value(store);
+ mode = get_irn_mode(value);
+ block = get_nodes_block(store);
+
bf_bits = get_mode_size_bits(bf_mode);
bit_offset = get_entity_offset_bits_remainder(ent);
- if (bit_offset == 0 && is_integral_size(bf_bits))
- return;
- value = get_Store_value(store);
- mode = get_irn_mode(value);
+ if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_irn_mode(value))
+ return;
/*
* ok, here we are: now convert the Store(Sel(), value) into Or(And(Load(Sel),c), And(Value,c))
*/
mem = get_Store_mem(store);
- block = get_nodes_block(store);
- bit_offset = get_entity_offset_bits_remainder(ent);
offset = get_entity_offset(ent);
bits_mask = get_mode_size_bits(mode) - bf_bits;
/* abandon bitfield sel */
ptr = get_Sel_ptr(sel);
db = get_irn_dbg_info(sel);
- ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
+ ptr = new_rd_Add(db, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
if (neg_mask) {
/* there are some bits, normal case */
- irn = new_r_Load(current_ir_graph, block, mem, ptr, mode);
- mem = new_r_Proj(current_ir_graph, block, irn, mode_M, pn_Load_M);
- irn = new_r_Proj(current_ir_graph, block, irn, mode, pn_Load_res);
+ irn = new_r_Load( block, mem, ptr, mode, 0);
+ mem = new_r_Proj( block, irn, mode_M, pn_Load_M);
+ irn = new_r_Proj( block, irn, mode, pn_Load_res);
- irn = new_r_And(current_ir_graph, block, irn,
- new_r_Const(current_ir_graph, block, mode, new_tarval_from_long(neg_mask, mode)), mode);
+ irn = new_r_And(block, irn, new_Const_long(mode, neg_mask), mode);
if (bit_offset > 0) {
- value = new_r_Shl(current_ir_graph, block, value,
- new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(bit_offset, mode_Iu)), mode);
+ value = new_r_Shl(block, value, new_Const_long(mode_Iu, bit_offset), mode);
}
- value = new_r_And(current_ir_graph, block, value,
- new_r_Const(current_ir_graph, block, mode, new_tarval_from_long(mask, mode)), mode);
+ value = new_r_And(block, value, new_Const_long(mode, mask), mode);
- value = new_r_Or(current_ir_graph, block, value, irn, mode);
+ value = new_r_Or(block, value, irn, mode);
}
set_Store_mem(store, mem);
set_Store_ptr(store, ptr);
} /* lower_bitfields_stores */
+/**
+ * Lowers unaligned Loads.
+ */
+static void lower_unaligned_Load(ir_node *load) {
+ (void) load;
+ /* NYI */
+}
+
+/**
+ * Lowers unaligned Stores
+ */
+static void lower_unaligned_Store(ir_node *store) {
+ (void) store;
+ /* NYI */
+}
+
/**
* lowers IR-nodes, called from walker
*/
case iro_SymConst:
lower_symconst(irn);
break;
+ case iro_Load:
+ if (env != NULL && get_Load_align(irn) == align_non_aligned)
+ lower_unaligned_Load(irn);
+ break;
+ case iro_Store:
+ if (env != NULL && get_Store_align(irn) == align_non_aligned)
+ lower_unaligned_Store(irn);
+ break;
+ case iro_Cast:
+ exchange(irn, get_Cast_op(irn));
+ break;
default:
break;
}
{
long proj = get_Proj_proj(irn);
ir_node *pred = get_Proj_pred(irn);
- ir_op *op = get_irn_op(pred);
- if ((proj == pn_Load_res) && (op == op_Load))
+ if (proj == pn_Load_res && is_Load(pred))
lower_bitfields_loads(irn, pred);
break;
}
* Replace Sel nodes by address computation. Also resolves array access.
* Handle Bitfields by added And/Or calculations.
*/
-void lower_highlevel(void) {
+void lower_highlevel_graph(ir_graph *irg, int lower_bitfields) {
+
+ if (lower_bitfields) {
+ /* First step: lower bitfield access: must be run as long as Sels still
+ * exists. */
+ irg_walk_graph(irg, NULL, lower_bf_access, NULL);
+ }
+
+ /* Finally: lower SymConst-Size and Sel nodes, Casts, unaligned Load/Stores. */
+ irg_walk_graph(irg, NULL, lower_irnode, NULL);
+} /* lower_highlevel_graph */
+
+/*
+ * does the same as lower_highlevel() for all nodes on the const code irg
+ */
+void lower_const_code(void) {
+ walk_const_code(NULL, lower_irnode, NULL);
+} /* lower_const_code */
+
+/*
+ * Replaces SymConsts by a real constant if possible.
+ * Replace Sel nodes by address computation. Also resolves array access.
+ * Handle Bitfields by added And/Or calculations.
+ */
+void lower_highlevel(int lower_bitfields) {
int i, n;
n = get_irp_n_irgs();
for (i = 0; i < n; ++i) {
ir_graph *irg = get_irp_irg(i);
-
- /* First step: lower bitfield access: must be run as long as Sels still exists. */
- irg_walk_graph(irg, NULL, lower_bf_access, NULL);
-
- /* Finally: lower SymConst-Size and Sel nodes. */
- irg_walk_graph(irg, NULL, lower_irnode, NULL);
-
- set_irg_phase_low(irg);
+ lower_highlevel_graph(irg, lower_bitfields);
}
+ lower_const_code();
} /* lower_highlevel */