#include "irhooks.h"
#include "irgmod.h"
#include "irgwalk.h"
+#include "irtools.h"
+#include "irpass_t.h"
/**
* Lower a Sel node. Do not touch Sels accessing entities on the frame type.
*/
-static void lower_sel(ir_node *sel) {
- ir_graph *irg = current_ir_graph;
- ir_entity *ent;
- ir_node *newn, *cnst, *index, *ptr, *bl;
- tarval *tv;
- ir_mode *basemode, *mode, *mode_Int;
- ir_type *basetyp, *owner;
- dbg_info *dbg;
+static void lower_sel(ir_node *sel)
+{
+ ir_graph *irg = current_ir_graph;
+ ir_entity *ent;
+ ir_node *newn, *cnst, *index, *ptr, *bl;
+ ir_tarval *tv;
+ ir_mode *basemode, *mode, *mode_Int;
+ ir_type *basetyp, *owner;
+ dbg_info *dbg;
assert(is_Sel(sel));
assert(irg == current_ir_graph);
if (! is_Unknown(lb))
- lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb), mode_Int);
+ lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int);
else
lb = NULL;
if (! is_Unknown(ub))
- ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub), mode_Int);
+ ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int);
else
ub = NULL;
}
} else {
/* no array type */
- ir_mode *idx_mode = get_irn_mode(index);
- tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);
+ ir_mode *idx_mode = get_irn_mode(index);
+ ir_tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);
newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel),
new_rd_Mul(dbg, bl, index,
cnst = new_rd_Const(dbg, irg, tv);
add = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode);
#ifdef DO_CACHEOPT /* cacheopt version */
- newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), sel, ent_mode, 0);
+ newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), sel, ent_mode, cons_none);
cacheopt_map_addrs_register_node(newn);
set_Load_ptr(newn, add);
#else /* normal code */
- newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), add, ent_mode, 0);
+ newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), add, ent_mode, cons_none);
#endif
- newn = new_r_Proj(bl, newn, ent_mode, pn_Load_res);
+ newn = new_r_Proj(newn, ent_mode, pn_Load_res);
} else if (get_entity_owner(ent) != get_glob_type()) {
int offset;
/* replace Sel by add(obj, const(ent.offset)) */
- assert(!(get_entity_allocation(ent) == allocation_static &&
- (get_entity_n_overwrites(ent) == 0 && get_entity_n_overwrittenby(ent) == 0)));
newn = get_Sel_ptr(sel);
offset = get_entity_offset(ent);
if (offset != 0) {
}
} else {
/* global_type */
- newn = new_rd_SymConst_addr_ent(NULL, irg, mode, ent, firm_unknown_type);
+ newn = new_rd_SymConst_addr_ent(NULL, irg, mode, ent);
}
}
/* run the hooks */
/**
* Lower a all possible SymConst nodes.
*/
-static void lower_symconst(ir_node *symc) {
+static void lower_symconst(ir_node *symc)
+{
ir_node *newn;
ir_type *tp;
ir_entity *ent;
- tarval *tv;
+ ir_tarval *tv;
ir_enum_const *ec;
ir_mode *mode;
+ ir_graph *irg;
switch (get_SymConst_kind(symc)) {
case symconst_type_tag:
break;
case symconst_type_size:
/* rewrite the SymConst node by a Const node */
+ irg = get_irn_irg(symc);
tp = get_SymConst_type(symc);
assert(get_type_state(tp) == layout_fixed);
mode = get_irn_mode(symc);
- newn = new_Const_long(mode, get_type_size_bytes(tp));
+ newn = new_r_Const_long(irg, mode, get_type_size_bytes(tp));
assert(newn);
/* run the hooks */
hook_lower(symc);
break;
case symconst_type_align:
/* rewrite the SymConst node by a Const node */
+ irg = get_irn_irg(symc);
tp = get_SymConst_type(symc);
assert(get_type_state(tp) == layout_fixed);
mode = get_irn_mode(symc);
- newn = new_Const_long(mode, get_type_alignment_bytes(tp));
+ newn = new_r_Const_long(irg, mode, get_type_alignment_bytes(tp));
assert(newn);
/* run the hooks */
hook_lower(symc);
exchange(symc, newn);
break;
- case symconst_addr_name:
- /* do not rewrite - pass info to back end */
- break;
case symconst_addr_ent:
/* leave */
break;
case symconst_ofs_ent:
/* rewrite the SymConst node by a Const node */
+ irg = get_irn_irg(symc);
ent = get_SymConst_entity(symc);
assert(get_type_state(get_entity_type(ent)) == layout_fixed);
mode = get_irn_mode(symc);
- newn = new_Const_long(mode, get_entity_offset(ent));
+ newn = new_r_Const_long(irg, mode, get_entity_offset(ent));
assert(newn);
/* run the hooks */
hook_lower(symc);
break;
case symconst_enum_const:
/* rewrite the SymConst node by a Const node */
+ irg = get_irn_irg(symc);
ec = get_SymConst_enum(symc);
assert(get_type_state(get_enumeration_owner(ec)) == layout_fixed);
tv = get_enumeration_value(ec);
- newn = new_Const(tv);
+ newn = new_r_Const(irg, tv);
assert(newn);
/* run the hooks */
hook_lower(symc);
*
* @param size the size on bits
*/
-static int is_integral_size(int size) {
+static int is_integral_size(int size)
+{
/* must be a 2^n */
if (size & (size-1))
return 0;
* @param proj the Proj(result) node
* @param load the Load node
*/
-static void lower_bitfields_loads(ir_node *proj, ir_node *load) {
+static void lower_bitfields_loads(ir_node *proj, ir_node *load)
+{
ir_node *sel = get_Load_ptr(load);
ir_node *block, *n_proj, *res, *ptr;
+ ir_graph *irg;
ir_entity *ent;
ir_type *bf_type;
ir_mode *bf_mode, *mode;
*/
/* abandon bitfield sel */
+ irg = get_irn_irg(sel);
ptr = get_Sel_ptr(sel);
db = get_irn_dbg_info(sel);
- ptr = new_rd_Add(db, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
+ ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr));
set_Load_ptr(load, ptr);
set_Load_mode(load, mode);
/* create new proj, switch off CSE or we may get the old one back */
old_cse = get_opt_cse();
set_opt_cse(0);
- res = n_proj = new_r_Proj(block, load, mode, pn_Load_res);
+ res = n_proj = new_r_Proj(load, mode, pn_Load_res);
set_opt_cse(old_cse);
if (mode_is_signed(mode)) { /* signed */
int shift_count_down = bits - bf_bits;
if (shift_count_up) {
- res = new_r_Shl(block, res, new_Const_long(mode_Iu, shift_count_up), mode);
+ res = new_r_Shl(block, res, new_r_Const_long(irg, mode_Iu, shift_count_up), mode);
}
if (shift_count_down) {
- res = new_r_Shrs(block, res, new_Const_long(mode_Iu, shift_count_down), mode);
+ res = new_r_Shrs(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode);
}
} else { /* unsigned */
int shift_count_down = bit_offset;
unsigned mask = ((unsigned)-1) >> (bits - bf_bits);
if (shift_count_down) {
- res = new_r_Shr(block, res, new_Const_long(mode_Iu, shift_count_down), mode);
+ res = new_r_Shr(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode);
}
if (bits != bf_bits) {
- res = new_r_And(block, res, new_Const_long(mode, mask), mode);
+ res = new_r_And(block, res, new_r_Const_long(irg, mode, mask), mode);
}
}
*
* @todo: It adds a load which may produce an exception!
*/
-static void lower_bitfields_stores(ir_node *store) {
+static void lower_bitfields_stores(ir_node *store)
+{
ir_node *sel = get_Store_ptr(store);
ir_node *ptr, *value;
ir_entity *ent;
ir_type *bf_type;
ir_mode *bf_mode, *mode;
ir_node *mem, *irn, *block;
+ ir_graph *irg;
unsigned mask, neg_mask;
int bf_bits, bits_mask, offset, bit_offset;
dbg_info *db;
neg_mask = ~mask;
/* abandon bitfield sel */
+ irg = get_irn_irg(sel);
ptr = get_Sel_ptr(sel);
db = get_irn_dbg_info(sel);
- ptr = new_rd_Add(db, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
+ ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr));
if (neg_mask) {
/* there are some bits, normal case */
- irn = new_r_Load( block, mem, ptr, mode, 0);
- mem = new_r_Proj( block, irn, mode_M, pn_Load_M);
- irn = new_r_Proj( block, irn, mode, pn_Load_res);
+ irn = new_r_Load(block, mem, ptr, mode, cons_none);
+ mem = new_r_Proj(irn, mode_M, pn_Load_M);
+ irn = new_r_Proj(irn, mode, pn_Load_res);
- irn = new_r_And(block, irn, new_Const_long(mode, neg_mask), mode);
+ irn = new_r_And(block, irn, new_r_Const_long(irg, mode, neg_mask), mode);
if (bit_offset > 0) {
- value = new_r_Shl(block, value, new_Const_long(mode_Iu, bit_offset), mode);
+ value = new_r_Shl(block, value, new_r_Const_long(irg, mode_Iu, bit_offset), mode);
}
- value = new_r_And(block, value, new_Const_long(mode, mask), mode);
+ value = new_r_And(block, value, new_r_Const_long(irg, mode, mask), mode);
value = new_r_Or(block, value, irn, mode);
}
/**
* Lowers unaligned Loads.
*/
-static void lower_unaligned_Load(ir_node *load) {
+static void lower_unaligned_Load(ir_node *load)
+{
(void) load;
/* NYI */
}
/**
* Lowers unaligned Stores
*/
-static void lower_unaligned_Store(ir_node *store) {
+static void lower_unaligned_Store(ir_node *store)
+{
(void) store;
/* NYI */
}
/**
* lowers IR-nodes, called from walker
*/
-static void lower_irnode(ir_node *irn, void *env) {
+static void lower_irnode(ir_node *irn, void *env)
+{
(void) env;
switch (get_irn_opcode(irn)) {
case iro_Sel:
default:
break;
}
-} /* lower_irnode */
+}
/**
* Walker: lowers IR-nodes for bitfield access
*/
-static void lower_bf_access(ir_node *irn, void *env) {
+static void lower_bf_access(ir_node *irn, void *env)
+{
(void) env;
switch (get_irn_opcode(irn)) {
case iro_Proj:
default:
break;
}
-} /* lower_bf_access */
+}
/*
* Replaces SymConsts by a real constant if possible.
* Replace Sel nodes by address computation. Also resolves array access.
* Handle Bitfields by added And/Or calculations.
*/
-void lower_highlevel_graph(ir_graph *irg, int lower_bitfields) {
-
+void lower_highlevel_graph(ir_graph *irg, int lower_bitfields)
+{
if (lower_bitfields) {
/* First step: lower bitfield access: must be run as long as Sels still
* exists. */
/* Finally: lower SymConst-Size and Sel nodes, Casts, unaligned Load/Stores. */
irg_walk_graph(irg, NULL, lower_irnode, NULL);
-} /* lower_highlevel_graph */
+
+ set_irg_outs_inconsistent(irg);
+}
+
+typedef struct pass_t {
+ ir_graph_pass_t pass;
+ int lower_bitfields;
+} pass_t;
+
+/**
+ * Wrapper for running lower_highlevel_graph() as an ir_graph pass.
+ */
+static int lower_highlevel_graph_wrapper(ir_graph *irg, void *context)
+{
+ pass_t *pass = (pass_t*)context;
+
+ lower_highlevel_graph(irg, pass->lower_bitfields);
+ return 0;
+} /* lower_highlevel_graph_wrapper */
+
+ir_graph_pass_t *lower_highlevel_graph_pass(const char *name, int lower_bitfields)
+{
+ pass_t *pass = XMALLOCZ(pass_t);
+
+ pass->lower_bitfields = lower_bitfields;
+ return def_graph_pass_constructor(
+ &pass->pass, name ? name : "lower_hl", lower_highlevel_graph_wrapper);
+} /* lower_highlevel_graph_pass */
/*
* does the same as lower_highlevel() for all nodes on the const code irg
*/
-void lower_const_code(void) {
+void lower_const_code(void)
+{
walk_const_code(NULL, lower_irnode, NULL);
} /* lower_const_code */
+ir_prog_pass_t *lower_const_code_pass(const char *name)
+{
+ return def_prog_pass(name ? name : "lower_const_code", lower_const_code);
+}
+
/*
* Replaces SymConsts by a real constant if possible.
* Replace Sel nodes by address computation. Also resolves array access.
* Handle Bitfields by added And/Or calculations.
*/
-void lower_highlevel(int lower_bitfields) {
- int i, n;
+void lower_highlevel(int lower_bitfields)
+{
+ size_t i, n;
n = get_irp_n_irgs();
for (i = 0; i < n; ++i) {