{
type *tp = unknown_type;
/* removing this somehow causes errors in jack. */
- if (tarval_is_entity(con))
- tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con)));
-
return new_rd_Const_type (db, irg, block, mode, con, tp);
}
assert(res);
assert(get_Proj_pred(res));
- assert(get_nodes_Block(get_Proj_pred(res)));
+ assert(get_nodes_block(get_Proj_pred(res)));
res = optimize_node(res);
{
ir_node *res;
+ assert(is_atomic_type(to_tp));
+
res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
res->attr.cast.totype = to_tp;
res = optimize_node(res);
res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
- assert(is_method_type(tp));
+ assert((get_unknown_type() == tp) || is_method_type(tp));
set_Call_type(res, tp);
- res->attr.call.callee_arr = NULL;
+ res->attr.call.exc.pin_state = op_pin_state_pinned;
+ res->attr.call.callee_arr = NULL;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
INLINE ir_node *
new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr)
+ ir_node *store, ir_node *adr, ir_mode *mode)
{
ir_node *in[2];
ir_node *res;
in[0] = store;
in[1] = adr;
res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
+ res->attr.load.exc.pin_state = op_pin_state_pinned;
+ res->attr.load.load_mode = mode;
+ res->attr.load.volatility = volatility_non_volatile;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
in[1] = adr;
in[2] = val;
res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
+ res->attr.store.exc.pin_state = op_pin_state_pinned;
+ res->attr.store.volatility = volatility_non_volatile;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
in[0] = store;
in[1] = size;
res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
- res->attr.a.where = where;
- res->attr.a.type = alloc_type;
+ res->attr.a.exc.pin_state = op_pin_state_pinned;
+ res->attr.a.where = where;
+ res->attr.a.type = alloc_type;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
INLINE ir_node *
new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
- symconst_kind symkind, type *tp)
-{
+ symconst_kind symkind, type *tp) {
ir_node *res;
ir_mode *mode;
mode = mode_P_mach;
else
mode = mode_Iu;
+
res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
res->attr.i.num = symkind;
return res;
}
+ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
+ symconst_symbol sym = {(type *)symbol};
+ return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
+}
+
+ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
+ symconst_symbol sym = {(type *)symbol};
+ return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
+}
+
+ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
+ symconst_symbol sym = {symbol};
+ return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
+}
+
+ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
+ symconst_symbol sym = {symbol};
+ return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
+}
+
INLINE ir_node *
new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
{
assert(res);
assert(get_Proj_pred(res));
- assert(get_nodes_Block(get_Proj_pred(res)));
+ assert(get_nodes_block(get_Proj_pred(res)));
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return new_rd_Phi(NULL, irg, block, arity, in, mode);
}
INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr) {
- return new_rd_Load(NULL, irg, block, store, adr);
+ ir_node *store, ir_node *adr, ir_mode *mode) {
+ return new_rd_Load(NULL, irg, block, store, adr, mode);
}
INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
ir_node *store, ir_node *adr, ir_node *val) {
res = new_rd_Block(db, current_ir_graph, arity, in);
/* Create and initialize array for Phi-node construction. */
- res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
- current_ir_graph->n_loc);
- memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
+ if (get_irg_phase_state(current_ir_graph) == phase_building) {
+ res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
+ current_ir_graph->n_loc);
+ memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
+ }
for (i = arity-1; i >= 0; i--)
if (get_irn_op(in[i]) == op_Unknown) {
Call Graph: ( A ---> B == A "calls" B)
- get_value mature_block
+ get_value mature_immBlock
| |
| |
| |
res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
- res->stack = NEW_ARR_F (ir_node *, 1);
+ res->stack = NEW_ARR_F (ir_node *, 0);
res->pos = 0;
return res;
The call order
get_value (makes Phi0, put's it into graph_arr)
set_value (overwrites Phi0 in graph_arr)
- mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
+ mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
the proper value.)
fails. */
if (!block->attr.block.graph_arr[pos]) {
/* The block is not mature, we don't know how many in's are needed. A Phi
with zero predecessors is created. Such a Phi node is called Phi0
node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
- to the list of Phi0 nodes in this block to be matured by mature_block
+ to the list of Phi0 nodes in this block to be matured by mature_immBlock
later.
The Phi0 has to remember the pos of it's internal value. If the real
Phi is computed, pos is used to update the array with the local
return arr;
}
+/**
+ * returns the frag_arr from a node
+ */
static INLINE ir_node **
get_frag_arr (ir_node *n) {
- if (get_irn_op(n) == op_Call) {
- return n->attr.call.frag_arr;
- } else if (get_irn_op(n) == op_Alloc) {
- return n->attr.a.frag_arr;
- } else {
- return n->attr.frag_arr;
+ switch (get_irn_opcode(n)) {
+ case iro_Call:
+ return n->attr.call.exc.frag_arr;
+ case iro_Alloc:
+ return n->attr.a.exc.frag_arr;
+ case iro_Load:
+ return n->attr.load.exc.frag_arr;
+ case iro_Store:
+ return n->attr.store.exc.frag_arr;
+ default:
+ return n->attr.except.frag_arr;
}
}
optimization possibilities.
The Phi0 node either is allocated in this function, or it comes from
a former call to get_r_value_internal. In this case we may not yet
- exchange phi0, as this is done in mature_block. */
+ exchange phi0, as this is done in mature_immBlock. */
if (!phi0) {
phi0_all = block->attr.block.graph_arr[pos];
if (!((get_irn_op(phi0_all) == op_Phi) &&
/* The block is not mature, we don't know how many in's are needed. A Phi
with zero predecessors is created. Such a Phi node is called Phi0
node. The Phi0 is then added to the list of Phi0 nodes in this block
- to be matured by mature_block later.
+ to be matured by mature_immBlock later.
The Phi0 has to remember the pos of it's internal value. If the real
Phi is computed, pos is used to update the array with the local
values. */
/** Finalize a Block node, when all control flows are known. */
/** Acceptable parameters are only Block nodes. */
void
-mature_block (ir_node *block)
+mature_immBlock (ir_node *block)
{
int ins;
ir_node *res;
res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
+ res->attr.except.pin_state = op_pin_state_pinned;
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Quot, &res->attr.frag_arr); /* Could be optimized away. */
+ allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
#endif
return res;
ir_node *res;
res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
+ res->attr.except.pin_state = op_pin_state_pinned;
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_DivMod, &res->attr.frag_arr); /* Could be optimized away. */
+ allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
#endif
return res;
ir_node *res;
res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
+ res->attr.except.pin_state = op_pin_state_pinned;
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Div, &res->attr.frag_arr); /* Could be optimized away. */
+ allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
#endif
return res;
ir_node *res;
res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
memop, op1, op2);
+ res->attr.except.pin_state = op_pin_state_pinned;
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Mod, &res->attr.frag_arr); /* Could be optimized away. */
+ allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
#endif
return res;
res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
store, callee, arity, in, tp);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Call, &res->attr.call.frag_arr); /* Could be optimized away. */
+ allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
#endif
return res;
}
ir_node *
-new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
+new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
{
ir_node *res;
res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
- store, addr);
+ store, addr, mode);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Load, &res->attr.frag_arr); /* Could be optimized away. */
+ allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
#endif
return res;
res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
store, addr, val);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Store, &res->attr.frag_arr); /* Could be optimized away. */
+ allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
#endif
return res;
res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
store, size, alloc_type, where);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Alloc, &res->attr.a.frag_arr); /* Could be optimized away. */
+ allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
#endif
return res;
/* add an adge to a jmp/control flow node */
void
-add_in_edge (ir_node *block, ir_node *jmp)
+add_immBlock_pred (ir_node *block, ir_node *jmp)
{
if (block->attr.block.matured) {
assert(0 && "Error: Block already matured!\n");
/* changing the current block */
void
-switch_block (ir_node *target)
+set_cur_block (ir_node *target)
{
current_ir_graph->current_block = target;
}
/** Useful access routines **/
/* Returns the current block of the current graph. To set the current
- block use switch_block(). */
+ block use set_cur_block. */
ir_node *get_cur_block() {
return get_irg_current_block(current_ir_graph);
}
ir_node *new_Const (ir_mode *mode, tarval *con) {
return new_d_Const(NULL, mode, con);
}
+
+ir_node *new_Const_type(tarval *con, type *tp) {
+ return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
+}
+
ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
return new_d_SymConst(NULL, value, kind);
}
ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
return new_d_Phi(NULL, arity, in, mode);
}
-ir_node *new_Load (ir_node *store, ir_node *addr) {
- return new_d_Load(NULL, store, addr);
+ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
+ return new_d_Load(NULL, store, addr, mode);
}
ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
return new_d_Store(NULL, store, addr, val);