int r_arity;
ir_graph *irg = current_ir_graph;
- r_arity = arity+2;
+ r_arity = arity + 2;
NEW_ARR_A(ir_node *, r_in, r_arity);
r_in[0] = store;
r_in[1] = callee;
return res;
} /* new_bd_Call */
+static ir_node *
+new_bd_Builtin(dbg_info *db, ir_node *block, ir_node *store,
+ ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
+ ir_node **r_in;
+ ir_node *res;
+ int r_arity;
+ ir_graph *irg = current_ir_graph;
+
+ r_arity = arity + 1;
+ NEW_ARR_A(ir_node *, r_in, r_arity);
+ r_in[0] = store;
+ memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
+
+ res = new_ir_node(db, irg, block, op_Builtin, mode_T, r_arity, r_in);
+
+ assert((get_unknown_type() == tp) || is_Method_type(tp));
+ res->attr.builtin.exc.pin_state = op_pin_state_pinned;
+ res->attr.builtin.kind = kind;
+ res->attr.builtin.builtin_tp = tp;
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
+ return res;
+} /* new_bd_Buildin */
+
static ir_node *
new_bd_Return(dbg_info *db, ir_node *block,
ir_node *store, int arity, ir_node **in) {
static ir_node *
new_bd_Load(dbg_info *db, ir_node *block,
- ir_node *store, ir_node *adr, ir_mode *mode) {
+ ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
ir_node *in[2];
ir_node *res;
ir_graph *irg = current_ir_graph;
in[0] = store;
in[1] = adr;
res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
- res->attr.load.exc.pin_state = op_pin_state_pinned;
+ res->attr.load.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
res->attr.load.load_mode = mode;
- res->attr.load.volatility = volatility_non_volatile;
- res->attr.load.aligned = align_is_aligned;
+ res->attr.load.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
+ res->attr.load.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
static ir_node *
new_bd_Store(dbg_info *db, ir_node *block,
- ir_node *store, ir_node *adr, ir_node *val) {
+ ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
ir_node *in[3];
ir_node *res;
ir_graph *irg = current_ir_graph;
in[1] = adr;
in[2] = val;
res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
- res->attr.store.exc.pin_state = op_pin_state_pinned;
- res->attr.store.volatility = volatility_non_volatile;
- res->attr.store.aligned = align_is_aligned;
+ res->attr.store.exc.pin_state = flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned;
+ res->attr.store.volatility = flags & cons_volatile ? volatility_is_volatile : volatility_non_volatile;
+ res->attr.store.aligned = flags & cons_unaligned ? align_non_aligned : align_is_aligned;
+
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
int n_outs, ir_asm_constraint *outputs, int n_clobber, ident *clobber[], ident *asm_text) {
ir_node *res;
ir_graph *irg = current_ir_graph;
- (void) clobber;
res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
res->attr.assem.pin_state = op_pin_state_pinned;
return res;
} /* new_rd_Call */
+ir_node *
+new_rd_Builtin(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
+ ir_builtin_kind kind, int arity, ir_node **in, ir_type *tp) {
+ ir_node *res;
+ ir_graph *rem = current_ir_graph;
+
+ current_ir_graph = irg;
+ res = new_bd_Builtin(db, block, store, kind, arity, in, tp);
+ current_ir_graph = rem;
+
+ return res;
+} /* new_rd_Builtin */
+
ir_node *
new_rd_Return(dbg_info *db, ir_graph *irg, ir_node *block,
ir_node *store, int arity, ir_node **in) {
ir_node *
new_rd_Load(dbg_info *db, ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr, ir_mode *mode) {
+ ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
ir_node *res;
ir_graph *rem = current_ir_graph;
current_ir_graph = irg;
- res = new_bd_Load(db, block, store, adr, mode);
+ res = new_bd_Load(db, block, store, adr, mode, flags);
current_ir_graph = rem;
return res;
ir_node *
new_rd_Store(dbg_info *db, ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr, ir_node *val) {
+ ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
ir_node *res;
ir_graph *rem = current_ir_graph;
current_ir_graph = irg;
- res = new_bd_Store(db, block, store, adr, val);
+ res = new_bd_Store(db, block, store, adr, val, flags);
current_ir_graph = rem;
return res;
ir_type *tp) {
return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
}
+ir_node *new_r_Builtin(ir_graph *irg, ir_node *block, ir_node *store,
+ ir_builtin_kind kind, int arity, ir_node **in,
+ ir_type *tp) {
+ return new_rd_Builtin(NULL, irg, block, store, kind, arity, in, tp);
+}
#ifdef USE_ORIGINAL
ir_node *new_r_Add(ir_graph *irg, ir_node *block,
ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_rd_Phi(NULL, irg, block, arity, in, mode);
}
ir_node *new_r_Load(ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr, ir_mode *mode) {
- return new_rd_Load(NULL, irg, block, store, adr, mode);
+ ir_node *store, ir_node *adr, ir_mode *mode, ir_cons_flags flags) {
+ return new_rd_Load(NULL, irg, block, store, adr, mode, flags);
}
ir_node *new_r_Store(ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr, ir_node *val) {
- return new_rd_Store(NULL, irg, block, store, adr, val);
+ ir_node *store, ir_node *adr, ir_node *val, ir_cons_flags flags) {
+ return new_rd_Store(NULL, irg, block, store, adr, val, flags);
}
ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
}
if (!has_unknown) res = optimize_node(res);
- current_ir_graph->current_block = res;
IRN_VRFY_IRG(res, current_ir_graph);
/* This loop checks whether the Phi has more than one predecessor.
If so, it is a real Phi node and we break the loop. Else the
Phi node merges the same definition on several paths and therefore
- is not needed. Don't consider Bad nodes! */
+ is not needed.
+ Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
known = res;
for (i = ins - 1; i >= 0; --i) {
assert(in[i]);
if (phi0 && in[i] == phi0)
in[i] = res;
- if (in[i] == res || in[i] == known || is_Bad(in[i]))
+ if (in[i] == res || in[i] == known)
continue;
if (known == res)
exchange phi0, as this is done in mature_immBlock(). */
if (phi0 == NULL) {
phi0_all = block->attr.block.graph_arr[pos];
- if (!(is_Phi(phi0_all) &&
- (get_irn_arity(phi0_all) == 0) &&
- (get_nodes_block(phi0_all) == block)))
+ if (! is_Phi0(phi0_all) ||
+ get_irn_arity(phi0_all) != 0 ||
+ get_nodes_block(phi0_all) != block)
phi0_all = NULL;
} else {
phi0_all = phi0;
ir_node *
new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
ir_node *res;
- (void) db;
assert(arg->op == op_Cond);
arg->attr.cond.kind = fragmentary;
arg->attr.cond.default_proj = max_proj;
- res = new_Proj(arg, mode_X, max_proj);
+ res = new_d_Proj(db, arg, mode_X, max_proj);
return res;
} /* new_d_defaultProj */
return res;
} /* new_d_Call */
+ir_node *
+new_d_Builtin(dbg_info *db, ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
+ ir_type *tp) {
+ return new_bd_Builtin(db, current_ir_graph->current_block, store, kind, arity, in, tp);
+} /* new_d_Builtin */
+
ir_node *
new_d_Return(dbg_info *db, ir_node* store, int arity, ir_node **in) {
return new_bd_Return(db, current_ir_graph->current_block,
} /* new_d_Return */
ir_node *
-new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode) {
+new_d_Load(dbg_info *db, ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
ir_node *res;
res = new_bd_Load(db, current_ir_graph->current_block,
- store, addr, mode);
+ store, addr, mode, flags);
#if PRECISE_EXC_CONTEXT
firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
#endif
} /* new_d_Load */
ir_node *
-new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val) {
+new_d_Store(dbg_info *db, ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
ir_node *res;
res = new_bd_Store(db, current_ir_graph->current_block,
- store, addr, val);
+ store, addr, val, flags);
#if PRECISE_EXC_CONTEXT
firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
#endif
assert(get_irg_phase_state(current_ir_graph) == phase_building);
/* creates a new dynamic in-array as length of in is -1 */
res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
- current_ir_graph->current_block = res;
/* macroblock head */
res->in[0] = res;
inc_irg_visited(irg);
(void) db;
+ assert(pos >= 0);
+
return get_r_value_internal(irg->current_block, pos + 1, mode);
} /* get_d_value */
set_value(int pos, ir_node *value) {
ir_graph *irg = current_ir_graph;
assert(get_irg_phase_state(irg) == phase_building);
+ assert(pos >= 0);
assert(pos+1 < irg->n_loc);
assert(is_ir_node(value));
irg->current_block->attr.block.graph_arr[pos + 1] = value;
ir_type *tp) {
return new_d_Call(NULL, store, callee, arity, in, tp);
}
+ir_node *new_Builtin(ir_node *store, ir_builtin_kind kind, int arity, ir_node **in,
+ ir_type *tp) {
+ return new_d_Builtin(NULL, store, kind, arity, in, tp);
+}
#ifdef USE_ORIGINAL
ir_node *new_Add(ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_d_Add(NULL, op1, op2, mode);
ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
return new_d_Phi(NULL, arity, in, mode);
}
-ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode) {
- return new_d_Load(NULL, store, addr, mode);
+ir_node *new_Load(ir_node *store, ir_node *addr, ir_mode *mode, ir_cons_flags flags) {
+ return new_d_Load(NULL, store, addr, mode, flags);
}
-ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val) {
- return new_d_Store(NULL, store, addr, val);
+ir_node *new_Store(ir_node *store, ir_node *addr, ir_node *val, ir_cons_flags flags) {
+ return new_d_Store(NULL, store, addr, val, flags);
}
ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
ir_where_alloc where) {