* @param *irg The IR graph the node belongs to.
* @param *block The IR block the node belongs to.
*/
-FIRM_API ir_node *new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block);
+FIRM_API ir_node *new_rd_Start(dbg_info *db, ir_node *block);
/** Constructor for a End node.
*
* @param *irg The IR graph the node belongs to.
* @param *block The IR block the node belongs to.
*/
-FIRM_API ir_node *new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block);
+FIRM_API ir_node *new_rd_End(dbg_info *db, ir_node *block);
/** Constructor for a Jmp node.
*
/** Constructor for a Start node.
*
- * @param *irg The IR graph the node belongs to.
* @param *block The IR block the node belongs to.
*/
-FIRM_API ir_node *new_r_Start(ir_graph *irg, ir_node *block);
+FIRM_API ir_node *new_r_Start(ir_node *block);
/** Constructor for a End node.
*
- * @param *irg The IR graph the node belongs to.
* @param *block The IR block the node belongs to.
*/
-FIRM_API ir_node *new_r_End(ir_graph *irg, ir_node *block);
+FIRM_API ir_node *new_r_End(ir_node *block);
/** Constructor for a Jmp node.
*
/**
* Copies a Firm subgraph that complies to the restrictions for
- * constant expressions to current_block in current_ir_graph.
+ * constant expressions to block.
*
* @param dbg debug info for all newly created nodes
* @param n the node
* Set current_ir_graph to get_const_code_irg() to generate a constant
* expression.
*/
-FIRM_API ir_node *copy_const_value(dbg_info *dbg, ir_node *n);
+FIRM_API ir_node *copy_const_value(dbg_info *dbg, ir_node *n, ir_node *to_block);
/* Set has no effect for existent entities of type method. */
FIRM_API ir_node *get_atomic_ent_value(ir_entity *ent);
* So we could replace the Sel node by a SymConst.
* This method must exists.
*/
- set_irg_current_block(current_ir_graph, get_nodes_block(node));
assert(get_entity_irg(arr[0]) != NULL);
- new_node = copy_const_value(get_irn_dbg_info(node), get_atomic_ent_value(arr[0]));
+ new_node = copy_const_value(get_irn_dbg_info(node), get_atomic_ent_value(arr[0]), get_nodes_block(node));
DBG_OPT_POLY(node, new_node);
exchange(node, new_node);
}
* We can replace the input with true/false.
*/
if (con == NULL) {
- con = new_Const(pnc == pn_Cond_true ? tarval_b_true : tarval_b_false);
+ ir_graph *irg = get_irn_irg(block);
+ con = new_r_Const(irg, pnc == pn_Cond_true ? tarval_b_true : tarval_b_false);
}
old = get_irn_n(user, pos);
set_irn_n(user, pos, con);
NEW_ARR_A(ir_node *, in, n);
/* ok, ALL predecessors are either dominated by block OR other block */
if (c_b == NULL) {
- ir_node *c_true = new_Const(tarval_b_true);
- ir_node *c_false = new_Const(tarval_b_false);
+ ir_graph *irg = get_irn_irg(block);
+ ir_node *c_true = new_r_Const(irg, tarval_b_true);
+ ir_node *c_false = new_r_Const(irg, tarval_b_false);
env->num_consts += 2;
if (pnc == pn_Cond_true) {
c_b = c_true;
* We can replace the input with a Confirm(ptr, !=, NULL).
*/
if (c == NULL) {
- ir_mode *mode = get_irn_mode(ptr);
- c = new_Const(get_mode_null(mode));
-
+ ir_mode *mode = get_irn_mode(ptr);
+ ir_graph *irg = get_irn_irg(block);
+ c = new_r_Const(irg, get_mode_null(mode));
c = new_r_Confirm(block, ptr, c, pn_Cmp_Lg);
}
assert(is_Block(bl));
if (get_Block_dom_depth(bl) == -1) {
/* This block is not reachable from Start */
- return new_Bad();
+ ir_graph *irg = get_irn_irg(bl);
+ return new_r_Bad(irg);
}
return get_dom_info(bl)->idom;
}
assert(is_Block(bl));
if (get_Block_postdom_depth(bl) == -1) {
/* This block is not reachable from Start */
- return new_Bad();
+ ir_graph *irg = get_irn_irg(bl);
+ return new_r_Bad(irg);
}
return get_pdom_info(bl)->idom;
}
ir_node *block = get_nodes_block(node);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *ptr = get_irg_frame(irg);
- ir_node *mem = new_NoMem();
+ ir_node *mem = new_r_NoMem(irg);
ir_node *val = get_irn_n(node, be_pos_Spill_val);
//ir_mode *mode = get_irn_mode(val);
ir_entity *entity = be_get_frame_entity(node);
* registers... */
ir_graph *irg = current_ir_graph;
ir_node *stack = get_irg_frame(irg);
- ir_node *nomem = new_NoMem();
+ ir_node *nomem = new_r_NoMem(irg);
ir_node *str0 = new_bd_arm_Str(dbgi, block, stack, node0, nomem, mode_gp,
NULL, 0, 0, true);
ir_node *str1 = new_bd_arm_Str(dbgi, block, stack, node1, nomem, mode_gp,
ldf = new_bd_arm_Ldf(dbgi, block, stack, sync, mode_D, NULL, 0, 0, true);
set_irn_pinned(ldf, op_pin_state_floats);
- return new_Proj(ldf, mode_fp, pn_arm_Ldf_res);
+ return new_r_Proj(ldf, mode_fp, pn_arm_Ldf_res);
}
static ir_node *int_to_float(dbg_info *dbgi, ir_node *block, ir_node *node)
{
ir_graph *irg = current_ir_graph;
ir_node *stack = get_irg_frame(irg);
- ir_node *nomem = new_NoMem();
+ ir_node *nomem = new_r_NoMem(irg);
ir_node *str = new_bd_arm_Str(dbgi, block, stack, node, nomem, mode_gp,
NULL, 0, 0, true);
ir_node *ldf;
ldf = new_bd_arm_Ldf(dbgi, block, stack, str, mode_F, NULL, 0, 0, true);
set_irn_pinned(ldf, op_pin_state_floats);
- return new_Proj(ldf, mode_fp, pn_arm_Ldf_res);
+ return new_r_Proj(ldf, mode_fp, pn_arm_Ldf_res);
}
static ir_node *float_to_int(dbg_info *dbgi, ir_node *block, ir_node *node)
{
ir_graph *irg = current_ir_graph;
ir_node *stack = get_irg_frame(irg);
- ir_node *nomem = new_NoMem();
+ ir_node *nomem = new_r_NoMem(irg);
ir_node *stf = new_bd_arm_Stf(dbgi, block, stack, node, nomem, mode_F,
NULL, 0, 0, true);
ir_node *ldr;
ldr = new_bd_arm_Ldr(dbgi, block, stack, stf, mode_gp, NULL, 0, 0, true);
set_irn_pinned(ldr, op_pin_state_floats);
- return new_Proj(ldr, mode_gp, pn_arm_Ldr_res);
+ return new_r_Proj(ldr, mode_gp, pn_arm_Ldr_res);
}
static void double_to_ints(dbg_info *dbgi, ir_node *block, ir_node *node,
{
ir_graph *irg = current_ir_graph;
ir_node *stack = get_irg_frame(irg);
- ir_node *nomem = new_NoMem();
+ ir_node *nomem = new_r_NoMem(irg);
ir_node *stf = new_bd_arm_Stf(dbgi, block, stack, node, nomem, mode_D,
NULL, 0, 0, true);
ir_node *ldr0, *ldr1;
ldr1 = new_bd_arm_Ldr(dbgi, block, stack, stf, mode_gp, NULL, 0, 4, true);
set_irn_pinned(ldr1, op_pin_state_floats);
- *out_value0 = new_Proj(ldr0, mode_gp, pn_arm_Ldr_res);
- *out_value1 = new_Proj(ldr1, mode_gp, pn_arm_Ldr_res);
+ *out_value0 = new_r_Proj(ldr0, mode_gp, pn_arm_Ldr_res);
+ *out_value1 = new_r_Proj(ldr1, mode_gp, pn_arm_Ldr_res);
}
static ir_node *gen_CopyB(ir_node *node)
return be_prolog_get_reg_value(abihelper, sp_reg);
case pn_Start_P_tls:
- return new_Bad();
+ return new_r_Bad(get_irn_irg(node));
case pn_Start_max:
break;
ir_node *ldr = new_bd_arm_Ldr(NULL, new_block, fp, mem,
mode_gp, param->entity,
0, 0, true);
- value1 = new_Proj(ldr, mode_gp, pn_arm_Ldr_res);
+ value1 = new_r_Proj(ldr, mode_gp, pn_arm_Ldr_res);
}
/* convert integer value to float */
ir_node *block = get_nodes_block(node);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *ptr = get_irn_n(node, be_pos_Spill_frame);
- ir_node *mem = new_NoMem();
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *mem = new_r_NoMem(irg);
ir_node *val = get_irn_n(node, be_pos_Spill_val);
ir_mode *mode = get_irn_mode(val);
ir_entity *entity = be_get_frame_entity(node);
/* Insert a store for primitive arguments. */
if (is_atomic_type(param_type)) {
ir_node *store;
- ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
+ ir_node *mem_input = do_seq ? curr_mem : new_r_NoMem(irg);
store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
mem = new_r_Proj(store, mode_M, pn_Store_M);
} else {
ir_mode *mode = get_type_mode(param_type);
ir_mode *load_mode = arg->load_mode;
- ir_node *load = new_r_Load(start_bl, new_NoMem(), addr, load_mode, cons_floats);
+ ir_node *load = new_r_Load(start_bl, new_r_NoMem(irg), addr, load_mode, cons_floats);
repl = new_r_Proj(load, load_mode, pn_Load_res);
if (mode != load_mode) {
/* we need an extra indirection for global data outside our current
module. The loads are always safe and can therefore float
and need no memory input */
- load = new_r_Load(block, new_NoMem(), add, mode, cons_floats);
+ load = new_r_Load(block, new_r_NoMem(irg), add, mode, cons_floats);
load_res = new_r_Proj(load, mode, pn_Load_res);
set_irn_n(node, i, load_res);
/* kill the Reload */
if (get_irn_n_edges(irn) == 0) {
+ ir_graph *irg = get_irn_irg(irn);
sched_remove(irn);
- set_irn_n(irn, be_pos_Reload_mem, new_Bad());
- set_irn_n(irn, be_pos_Reload_frame, new_Bad());
+ set_irn_n(irn, be_pos_Reload_mem, new_r_Bad(irg));
+ set_irn_n(irn, be_pos_Reload_frame, new_r_Bad(irg));
}
}
}
}
-static void new_Phi_copy_attr(ir_graph *irg, const ir_node *old_node,
+static void new_phi_copy_attr(ir_graph *irg, const ir_node *old_node,
ir_node *new_node)
{
backend_info_t *old_info = be_get_info(old_node);
panic("double initialization of be_info");
old_phi_copy_attr = op_Phi->ops.copy_attr;
- op_Phi->ops.copy_attr = new_Phi_copy_attr;
+ op_Phi->ops.copy_attr = new_phi_copy_attr;
initialized = true;
/* phis have register and register requirements now which we want to dump */
if (!initialized)
panic("be_info_free called without prior init");
- assert(op_Phi->ops.copy_attr == new_Phi_copy_attr);
+ assert(op_Phi->ops.copy_attr == new_phi_copy_attr);
op_Phi->ops.copy_attr = old_phi_copy_attr;
initialized = false;
ir_node *pred;
ir_node *succ_block;
ir_node *jump = NULL;
+ ir_graph *irg = get_irn_irg(block);
if (irn_visited_else_mark(block))
return;
panic("Unexpected node %+F in block %+F with empty schedule", node, block);
}
- set_Block_cfgpred(block, 0, new_Bad());
+ set_Block_cfgpred(block, 0, new_r_Bad(irg));
kill_node(jump);
blocks_removed = 1;
const arch_register_class_t *cls)
{
const arch_register_req_t *req;
- struct obstack *obst = be_get_be_obst(get_irn_irg(block));
+ ir_graph *irg = get_irn_irg(block);
+ struct obstack *obst = be_get_be_obst(irg);
backend_info_t *info;
int i;
static void kill_node_and_preds(ir_node *node)
{
+ ir_graph *irg = get_irn_irg(node);
int arity, i;
arity = get_irn_arity(node);
for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(node, i);
- set_irn_n(node, i, new_Bad());
+ set_irn_n(node, i, new_r_Bad(irg));
if (get_irn_n_edges(pred) != 0)
continue;
be_set_MemPerm_in_entity(mempermnode, i, entry->in);
be_set_MemPerm_out_entity(mempermnode, i, entry->out);
set_irg_current_block(irg, memperm->block);
- proj = new_Proj(mempermnode, get_irn_mode(arg), i);
+ proj = new_r_Proj(mempermnode, get_irn_mode(arg), i);
set_irn_n(entry->node, entry->pos, proj);
}
* and simply always available. */
if (!sched_is_scheduled(insn)) {
/* override spillinfos or create a new one */
- spillinfo->spills->spill = new_NoMem();
+ ir_graph *irg = get_irn_irg(to_spill);
+ spillinfo->spills->spill = new_r_NoMem(irg);
DB((dbg, LEVEL_1, "don't spill %+F use NoMem\n", to_spill));
return;
}
static int is_value_available(spill_env_t *env, const ir_node *arg,
const ir_node *reloader)
{
- if (is_Unknown(arg) || arg == new_NoMem())
+ if (is_Unknown(arg) || is_NoMem(arg))
return 1;
if (be_is_Spill(skip_Proj_const(arg)))
* predecessor (of a PhiM) but this test might match other things too...
*/
if (!sched_is_scheduled(insn)) {
+ ir_graph *irg = get_irn_irg(to_spill);
/* override spillinfos or create a new one */
spill_t *spill = OALLOC(&env->obst, spill_t);
spill->after = NULL;
spill->next = NULL;
- spill->spill = new_NoMem();
+ spill->spill = new_r_NoMem(irg);
spillinfo->spills = spill;
spillinfo->spill_costs = 0;
/* Hack: some places in the code ask the Anchor for its register
requirements */
- new_anchor = new_Anchor(irg);
+ new_anchor = new_r_Anchor(irg);
info = be_get_info(new_anchor);
info->out_infos = NEW_ARR_D(reg_out_info_t, obst, 1);
memset(info->out_infos, 0, 1 * sizeof(info->out_infos[0]));
mode = get_irn_mode(irn);
irn_mode = get_irn_mode(irn);
noreg = get_irn_n(irn, 0);
- nomem = new_NoMem();
+ nomem = new_r_NoMem(irg);
dbg = get_irn_dbg_info(irn);
/* initialize structure */
ir_node *turn_back_am(ir_node *node)
{
dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_graph *irg = get_irn_irg(node);
ir_node *block = get_nodes_block(node);
ir_node *base = get_irn_n(node, n_ia32_base);
ir_node *index = get_irn_n(node, n_ia32_index);
ia32_copy_am_attrs(load, node);
if (is_ia32_is_reload(node))
set_ia32_is_reload(load);
- set_irn_n(node, n_ia32_mem, new_NoMem());
+ set_irn_n(node, n_ia32_mem, new_r_NoMem(irg));
switch (get_ia32_am_support(node)) {
case ia32_am_unary:
const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
ir_mode *mode = get_spill_mode(spillval);
ir_node *noreg = ia32_new_NoReg_gp(irg);
- ir_node *nomem = new_NoMem();
+ ir_node *nomem = new_r_NoMem(irg);
ir_node *ptr = get_irg_frame(irg);
ir_node *val = get_irn_n(node, be_pos_Spill_val);
ir_node *store;
ir_node *noreg = ia32_new_NoReg_gp(irg);
ir_node *frame = get_irg_frame(irg);
- ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg, new_NoMem(), sp);
+ ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg, new_r_NoMem(irg), sp);
set_ia32_frame_ent(pop, ent);
set_ia32_use_frame(pop);
sp = create_spproj(node, push, pn_ia32_Push_stack);
}
- set_irn_n(node, i, new_Bad());
+ set_irn_n(node, i, new_r_Bad(irg));
}
/* create pops */
/* remove memperm */
arity = get_irn_arity(node);
for (i = 0; i < arity; ++i) {
- set_irn_n(node, i, new_Bad());
+ set_irn_n(node, i, new_r_Bad(irg));
}
sched_remove(node);
}
*/
static ir_node *ia32_create_trampoline_fkt(ir_node *block, ir_node *mem, ir_node *trampoline, ir_node *env, ir_node *callee)
{
- ir_node *st, *p = trampoline;
- ir_mode *mode = get_irn_mode(p);
+ ir_graph *irg = get_irn_irg(block);
+ ir_node *p = trampoline;
+ ir_mode *mode = get_irn_mode(p);
+ ir_node *st;
/* mov ecx,<env> */
- st = new_r_Store(block, mem, p, new_Const_long(mode_Bu, 0xb9), 0);
+ st = new_r_Store(block, mem, p, new_r_Const_long(irg, mode_Bu, 0xb9), 0);
mem = new_r_Proj(st, mode_M, pn_Store_M);
- p = new_r_Add(block, p, new_Const_long(mode_Iu, 1), mode);
+ p = new_r_Add(block, p, new_r_Const_long(irg, mode_Iu, 1), mode);
st = new_r_Store(block, mem, p, env, 0);
mem = new_r_Proj(st, mode_M, pn_Store_M);
- p = new_r_Add(block, p, new_Const_long(mode_Iu, 4), mode);
+ p = new_r_Add(block, p, new_r_Const_long(irg, mode_Iu, 4), mode);
/* jmp <callee> */
- st = new_r_Store(block, mem, p, new_Const_long(mode_Bu, 0xe9), 0);
+ st = new_r_Store(block, mem, p, new_r_Const_long(irg, mode_Bu, 0xe9), 0);
mem = new_r_Proj(st, mode_M, pn_Store_M);
- p = new_r_Add(block, p, new_Const_long(mode_Iu, 1), mode);
+ p = new_r_Add(block, p, new_r_Const_long(irg, mode_Iu, 1), mode);
st = new_r_Store(block, mem, p, callee, 0);
mem = new_r_Proj(st, mode_M, pn_Store_M);
- p = new_r_Add(block, p, new_Const_long(mode_Iu, 4), mode);
+ p = new_r_Add(block, p, new_r_Const_long(irg, mode_Iu, 4), mode);
return mem;
}
irg = get_irn_irg(irn);
noreg = ia32_new_NoReg_gp(irg);
noreg_fp = ia32_new_NoReg_xmm(irg);
- nomem = new_NoMem();
+ nomem = new_r_NoMem(irg);
in1 = get_irn_n(irn, n_ia32_binary_left);
in2 = get_irn_n(irn, n_ia32_binary_right);
in1_reg = arch_get_irn_register(in1);
ir_graph *irg = get_irn_irg(state);
ir_node *block = get_nodes_block(state);
ir_node *noreg = ia32_new_NoReg_gp(irg);
- ir_node *nomem = new_NoMem();
+ ir_node *nomem = new_r_NoMem(irg);
ir_node *frame = get_irg_frame(irg);
spill = new_bd_ia32_FnstCW(NULL, block, frame, noreg, nomem, state);
static ir_node *create_fldcw_ent(ir_node *block, ir_entity *entity)
{
ir_graph *irg = get_irn_irg(block);
- ir_node *nomem = new_NoMem();
+ ir_node *nomem = new_r_NoMem(irg);
ir_node *noreg = ia32_new_NoReg_gp(irg);
ir_node *reload;
sched_add_before(before, reload);
} else {
ir_mode *lsmode = ia32_reg_classes[CLASS_ia32_fp_cw].mode;
- ir_node *nomem = new_NoMem();
+ ir_node *nomem = new_r_NoMem(irg);
ir_node *cwstore, *load, *load_res, *or, *store, *fldcw;
ir_node *or_const;
*/
/* TODO: give a hint to the backend somehow to not create a cltd here... */
- sign = new_rd_Shrs(dbg, block, a_h, new_Const_long(l_mode, 31), h_mode);
+ sign = new_rd_Shrs(dbg, block, a_h, new_r_Const_long(irg, l_mode, 31), h_mode);
sign_l = new_rd_Conv(dbg, block, sign, l_mode);
sub_l = new_rd_Eor(dbg, block, a_l, sign_l, l_mode);
sub_h = new_rd_Eor(dbg, block, a_h, sign, h_mode);
/* convert from float to signed 64bit */
ir_mode *flt_mode = get_irn_mode(a_f);
tarval *flt_tv = new_tarval_from_str("9223372036854775808", 19, flt_mode);
- ir_node *flt_corr = new_Const(flt_tv);
+ ir_node *flt_corr = new_r_Const(irg, flt_tv);
ir_node *lower_blk = block;
ir_node *upper_blk;
ir_node *cmp, *proj, *cond, *blk, *int_phi, *flt_phi;
set_irn_in(lower_blk, 2, in);
/* create to Phis */
- in[0] = new_Const(get_mode_null(h_res_mode));
- in[1] = new_Const_long(h_res_mode, 0x80000000);
+ in[0] = new_r_Const(irg, get_mode_null(h_res_mode));
+ in[1] = new_r_Const_long(irg, h_res_mode, 0x80000000);
int_phi = new_r_Phi(lower_blk, 2, in, h_res_mode);
ir_node *val = ia32_new_NoReg_gp(cg);
ir_node *noreg = ia32_new_NoReg_gp(cg);
- ir_node *nomem = new_NoMem();
+ ir_graph *irg = get_irn_irg(block);
+ ir_node *nomem = new_r_NoMem(irg);
ir_node *push = new_bd_ia32_Push(dbgi, block, noreg, noreg, nomem, val, stack);
sched_add_before(schedpoint, push);
const arch_register_t *reg)
{
const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
+ ir_graph *irg = get_irn_irg(block);
ir_node *pop;
ir_node *keep;
ir_node *val;
ir_node *in[1];
- pop = new_bd_ia32_Pop(dbgi, block, new_NoMem(), stack);
+ pop = new_bd_ia32_Pop(dbgi, block, new_r_NoMem(irg), stack);
stack = new_r_Proj(pop, mode_Iu, pn_ia32_Pop_stack);
arch_set_irn_register(stack, esp);
block = get_nodes_block(node);
irg = get_irn_irg(node);
noreg = ia32_new_NoReg_gp(irg);
- nomem = new_NoMem();
+ nomem = new_r_NoMem(irg);
res = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, op1, op2);
arch_set_irn_register(res, out_reg);
set_ia32_commutative(res);
block = get_nodes_block(node);
irg = get_irn_irg(node);
noreg = ia32_new_NoReg_gp(irg);
- nomem = new_NoMem();
+ nomem = new_r_NoMem(irg);
res = new_bd_ia32_Shl(dbgi, block, op1, op2);
arch_set_irn_register(res, out_reg);
goto exchange;
x87_patch_insn(n, op_p);
} else {
ir_node *vfld, *mem, *block, *rproj, *mproj;
- ir_graph *irg;
+ ir_graph *irg = get_irn_irg(n);
/* stack full here: need fstp + load */
x87_pop(state);
x87_patch_insn(n, op_p);
block = get_nodes_block(n);
- vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), new_NoMem(), get_ia32_ls_mode(n));
+ vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), new_r_NoMem(irg), get_ia32_ls_mode(n));
/* copy all attributes */
set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
arch_set_irn_register(rproj, op2);
/* reroute all former users of the store memory to the load memory */
- irg = get_irn_irg(n);
edges_reroute(mem, mproj, irg);
/* set the memory input of the load to the store memory */
set_irn_n(vfld, n_ia32_vfld_mem, mem);
ir_node *block = get_nodes_block(node);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *ptr = get_irn_n(node, be_pos_Spill_frame);
- ir_node *mem = new_NoMem();
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *mem = new_r_NoMem(irg);
ir_node *val = get_irn_n(node, be_pos_Spill_val);
ir_mode *mode = get_irn_mode(val);
ir_entity *entity = be_get_frame_entity(node);
tarval *tv = get_Const_tarval(node);
ir_entity *entity = create_float_const_entity(tv);
ir_node *hi = new_bd_sparc_SetHi(dbgi, block, entity, 0);
- ir_node *mem = new_NoMem();
+ ir_node *mem = new_r_NoMem(current_ir_graph);
ir_node *new_op
= create_ldf(dbgi, block, hi, mem, mode, entity, 0, false);
- ir_node *proj = new_Proj(new_op, mode, pn_sparc_Ldf_res);
+ ir_node *proj = new_r_Proj(new_op, mode, pn_sparc_Ldf_res);
be_dep_on_frame(hi);
set_irn_pinned(new_op, op_pin_state_floats);
/* scale index */
index = new_bd_sparc_Sll_imm(dbgi, block, new_selector, NULL, 2);
/* load from jumptable */
- load = new_bd_sparc_Ld_reg(dbgi, block, table_address, index, new_NoMem(),
+ load = new_bd_sparc_Ld_reg(dbgi, block, table_address, index,
+ new_r_NoMem(current_ir_graph),
mode_gp);
address = new_r_Proj(load, mode_gp, pn_sparc_Ld_res);
{
ir_graph *irg = current_ir_graph;
ir_node *sp = get_irg_frame(irg);
- ir_node *nomem = new_NoMem();
+ ir_node *nomem = new_r_NoMem(irg);
ir_node *st = new_bd_sparc_St_imm(dbgi, block, value0, sp, nomem,
mode_gp, NULL, 0, true);
ir_mode *mode;
ldf = create_ldf(dbgi, block, sp, mem, mode, NULL, 0, true);
set_irn_pinned(ldf, op_pin_state_floats);
- return new_Proj(ldf, mode, pn_sparc_Ldf_res);
+ return new_r_Proj(ldf, mode, pn_sparc_Ldf_res);
}
static void bitcast_float_to_int(dbg_info *dbgi, ir_node *block,
{
ir_graph *irg = current_ir_graph;
ir_node *stack = get_irg_frame(irg);
- ir_node *nomem = new_NoMem();
+ ir_node *nomem = new_r_NoMem(irg);
ir_node *stf = create_stf(dbgi, block, node, stack, nomem, float_mode,
NULL, 0, true);
int bits = get_mode_size_bits(float_mode);
ld = new_bd_sparc_Ld_imm(dbgi, block, stack, stf, mode_gp, NULL, 0, true);
set_irn_pinned(ld, op_pin_state_floats);
- result[0] = new_Proj(ld, mode_gp, pn_sparc_Ld_res);
+ result[0] = new_r_Proj(ld, mode_gp, pn_sparc_Ld_res);
if (bits == 64) {
ir_node *ld2 = new_bd_sparc_Ld_imm(dbgi, block, stack, stf, mode_gp,
NULL, 4, true);
set_irn_pinned(ld, op_pin_state_floats);
- result[1] = new_Proj(ld2, mode_gp, pn_sparc_Ld_res);
+ result[1] = new_r_Proj(ld2, mode_gp, pn_sparc_Ld_res);
arch_irn_add_flags(ld, sparc_arch_irn_flag_needs_64bit_spillslot);
arch_irn_add_flags(ld2, sparc_arch_irn_flag_needs_64bit_spillslot);
case pn_Start_P_frame_base:
return be_prolog_get_reg_value(abihelper, fp_reg);
case pn_Start_P_tls:
- return new_Bad();
+ return new_r_Bad(current_ir_graph);
case pn_Start_max:
break;
}
ir_node *ld = new_bd_sparc_Ld_imm(NULL, new_block, fp, mem,
mode_gp, param->entity,
0, true);
- value1 = new_Proj(ld, mode_gp, pn_sparc_Ld_res);
+ value1 = new_r_Proj(ld, mode_gp, pn_sparc_Ld_res);
}
/* convert integer value to float */
instruction *root; /**< the root of the instruction tree */
ir_node *op; /**< the operand that is multiplied */
ir_node *blk; /**< the block where the new graph is built */
+ ir_graph *irg;
dbg_info *dbg; /**< the debug info for the new graph. */
ir_mode *shf_mode; /**< the (unsigned) mode for the shift constants */
int fail; /**< set to 1 if the instruction sequence fails the constraints */
static ir_node *build_graph(mul_env *env, instruction *inst)
{
ir_node *l, *r, *c;
+ ir_graph *irg = env->irg;
if (inst->irn)
return inst->irn;
case LEA:
l = build_graph(env, inst->in[0]);
r = build_graph(env, inst->in[1]);
- c = new_Const_long(env->shf_mode, inst->shift_count);
+ c = new_r_Const_long(irg, env->shf_mode, inst->shift_count);
r = new_rd_Shl(env->dbg, env->blk, r, c, env->mode);
return inst->irn = new_rd_Add(env->dbg, env->blk, l, r, env->mode);
case SHIFT:
l = build_graph(env, inst->in[0]);
- c = new_Const_long(env->shf_mode, inst->shift_count);
+ c = new_r_Const_long(irg, env->shf_mode, inst->shift_count);
return inst->irn = new_rd_Shl(env->dbg, env->blk, l, c, env->mode);
case SUB:
l = build_graph(env, inst->in[0]);
r = build_graph(env, inst->in[1]);
return inst->irn = new_rd_Add(env->dbg, env->blk, l, r, env->mode);
case ZERO:
- return inst->irn = new_Const(get_mode_null(env->mode));
+ return inst->irn = new_r_Const(irg, get_mode_null(env->mode));
default:
panic("Unsupported instruction kind");
}
env.fail = 0;
env.n_shift = env.params->maximum_shifts;
env.evaluate = env.params->evaluate != NULL ? env.params->evaluate : default_evaluate;
+ env.irg = get_irn_irg(irn);
R = value_to_condensed(&env, tv, &r);
inst = decompose_mul(&env, R, r, tv);
return div;
if (mode_is_signed(mode)) {
+ ir_graph *irg = get_irn_irg(div);
struct ms mag = magic(tv);
/* generate the Mulh instruction */
- c = new_Const(mag.M);
+ c = new_r_Const(irg, mag.M);
q = new_rd_Mulh(dbg, block, n, c, mode);
/* do we need an Add or Sub */
/* Do we need the shift */
if (mag.s > 0) {
- c = new_Const_long(mode_Iu, mag.s);
+ c = new_r_Const_long(irg, mode_Iu, mag.s);
q = new_rd_Shrs(dbg, block, q, c, mode);
}
/* final */
- c = new_Const_long(mode_Iu, bits - 1);
+ c = new_r_Const_long(irg, mode_Iu, bits - 1);
t = new_rd_Shr(dbg, block, q, c, mode);
q = new_rd_Add(dbg, block, q, t, mode);
} else {
struct mu mag = magicu(tv);
ir_node *c;
+ ir_graph *irg = get_irn_irg(div);
/* generate the Mulh instruction */
- c = new_Const(mag.M);
+ c = new_r_Const(irg, mag.M);
q = new_rd_Mulh(dbg, block, n, c, mode);
if (mag.need_add) {
/* use the GM scheme */
t = new_rd_Sub(dbg, block, n, q, mode);
- c = new_Const(get_mode_one(mode_Iu));
+ c = new_r_Const(irg, get_mode_one(mode_Iu));
t = new_rd_Shr(dbg, block, t, c, mode);
t = new_rd_Add(dbg, block, t, q, mode);
- c = new_Const_long(mode_Iu, mag.s - 1);
+ c = new_r_Const_long(irg, mode_Iu, mag.s - 1);
q = new_rd_Shr(dbg, block, t, c, mode);
} else {
/* use the default scheme */
q = new_rd_Add(dbg, block, q, n, mode);
}
} else if (mag.s > 0) { /* default scheme, shift needed */
- c = new_Const_long(mode_Iu, mag.s);
+ c = new_r_Const_long(irg, mode_Iu, mag.s);
q = new_rd_Shr(dbg, block, q, c, mode);
}
}
}
if (k >= 0) { /* division by 2^k or -2^k */
+ ir_graph *irg = get_irn_irg(irn);
if (mode_is_signed(mode)) {
ir_node *k_node;
ir_node *curr = left;
/* create the correction code for signed values only if there might be a remainder */
if (! get_Div_no_remainder(irn)) {
if (k != 1) {
- k_node = new_Const_long(mode_Iu, k - 1);
+ k_node = new_r_Const_long(irg, mode_Iu, k - 1);
curr = new_rd_Shrs(dbg, block, left, k_node, mode);
}
- k_node = new_Const_long(mode_Iu, bits - k);
+ k_node = new_r_Const_long(irg, mode_Iu, bits - k);
curr = new_rd_Shr(dbg, block, curr, k_node, mode);
curr = new_rd_Add(dbg, block, left, curr, mode);
k_node = left;
}
- k_node = new_Const_long(mode_Iu, k);
+ k_node = new_r_Const_long(irg, mode_Iu, k);
res = new_rd_Shrs(dbg, block, curr, k_node, mode);
if (n_flag) { /* negate the result */
ir_node *k_node;
- k_node = new_Const(get_mode_null(mode));
+ k_node = new_r_Const(irg, get_mode_null(mode));
res = new_rd_Sub(dbg, block, k_node, res, mode);
}
} else { /* unsigned case */
ir_node *k_node;
- k_node = new_Const_long(mode_Iu, k);
+ k_node = new_r_Const_long(irg, mode_Iu, k);
res = new_rd_Shr(dbg, block, left, k_node, mode);
}
} else {
}
if (k >= 0) {
+ ir_graph *irg = get_irn_irg(irn);
/* division by 2^k or -2^k:
* we use "modulus" here, so x % y == x % -y that's why is no difference between the case 2^k and -2^k
*/
ir_node *curr = left;
if (k != 1) {
- k_node = new_Const_long(mode_Iu, k - 1);
+ k_node = new_r_Const_long(irg, mode_Iu, k - 1);
curr = new_rd_Shrs(dbg, block, left, k_node, mode);
}
- k_node = new_Const_long(mode_Iu, bits - k);
+ k_node = new_r_Const_long(irg, mode_Iu, bits - k);
curr = new_rd_Shr(dbg, block, curr, k_node, mode);
curr = new_rd_Add(dbg, block, left, curr, mode);
- k_node = new_Const_long(mode, (-1) << k);
+ k_node = new_r_Const_long(irg, mode, (-1) << k);
curr = new_rd_And(dbg, block, curr, k_node, mode);
res = new_rd_Sub(dbg, block, left, curr, mode);
} else { /* unsigned case */
ir_node *k_node;
- k_node = new_Const_long(mode, (1 << k) - 1);
+ k_node = new_r_Const_long(irg, mode, (1 << k) - 1);
res = new_rd_And(dbg, block, left, k_node, mode);
}
} else {
}
if (k >= 0) { /* division by 2^k or -2^k */
+ ir_graph *irg = get_irn_irg(irn);
if (mode_is_signed(mode)) {
ir_node *k_node, *c_k;
ir_node *curr = left;
if (k != 1) {
- k_node = new_Const_long(mode_Iu, k - 1);
+ k_node = new_r_Const_long(irg, mode_Iu, k - 1);
curr = new_rd_Shrs(dbg, block, left, k_node, mode);
}
- k_node = new_Const_long(mode_Iu, bits - k);
+ k_node = new_r_Const_long(irg, mode_Iu, bits - k);
curr = new_rd_Shr(dbg, block, curr, k_node, mode);
curr = new_rd_Add(dbg, block, left, curr, mode);
- c_k = new_Const_long(mode_Iu, k);
+ c_k = new_r_Const_long(irg, mode_Iu, k);
*div = new_rd_Shrs(dbg, block, curr, c_k, mode);
if (n_flag) { /* negate the div result */
- ir_node *k_node;
-
- k_node = new_Const(get_mode_null(mode));
+ ir_node *k_node = new_r_Const(irg, get_mode_null(mode));
*div = new_rd_Sub(dbg, block, k_node, *div, mode);
}
- k_node = new_Const_long(mode, (-1) << k);
+ k_node = new_r_Const_long(irg, mode, (-1) << k);
curr = new_rd_And(dbg, block, curr, k_node, mode);
*mod = new_rd_Sub(dbg, block, left, curr, mode);
} else { /* unsigned case */
- ir_node *k_node;
-
- k_node = new_Const_long(mode_Iu, k);
+ ir_node *k_node = new_r_Const_long(irg, mode_Iu, k);
*div = new_rd_Shr(dbg, block, left, k_node, mode);
- k_node = new_Const_long(mode, (1 << k) - 1);
+ k_node = new_r_Const_long(irg, mode, (1 << k) - 1);
*mod = new_rd_And(dbg, block, left, k_node, mode);
}
} else {
IRN_VERIFY_IRG(res, irg);
return res;
-} /* new_bd_Start */
+}
static ir_node *new_bd_End(dbg_info *db, ir_node *block)
{
IRN_VERIFY_IRG(res, irg);
return res;
-} /* new_bd_End */
+}
/**
* Creates a Phi node with all predecessors. Calling this constructor
static ir_node *new_bd_Const(dbg_info *db, tarval *con)
{
ir_graph *irg = current_ir_graph;
-
return new_rd_Const_type(db, irg, con, firm_unknown_type);
} /* new_bd_Const */
static ir_node *new_bd_Const_long(dbg_info *db, ir_mode *mode, long value)
{
ir_graph *irg = current_ir_graph;
-
return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
} /* new_bd_Const_long */
/* private interfaces, for professional use only */
/* --------------------------------------------- */
-ir_node *new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
+ir_node *new_rd_Start(dbg_info *db, ir_node *block)
{
- ir_graph *rem = current_ir_graph;
- ir_node *res;
-
- current_ir_graph = irg;
- res = new_bd_Start(db, block);
- current_ir_graph = rem;
-
- return res;
-} /* new_rd_Start */
+ return new_bd_Start(db, block);
+}
-ir_node *new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
+ir_node *new_rd_End(dbg_info *db, ir_node *block)
{
- ir_node *res;
- ir_graph *rem = current_ir_graph;
-
- current_ir_graph = irg;
- res = new_bd_End(db, block);
- current_ir_graph = rem;
-
- return res;
-} /* new_rd_End */
+ return new_bd_End(db, block);
+}
/* Creates a Phi node with all predecessors. Calling this constructor
is only allowed if the corresponding block is mature. */
return res;
} /* new_rd_ASM */
-ir_node *new_r_Start(ir_graph *irg, ir_node *block)
+ir_node *new_r_Start(ir_node *block)
{
- return new_rd_Start(NULL, irg, block);
+ return new_rd_Start(NULL, block);
}
-ir_node *new_r_End(ir_graph *irg, ir_node *block)
+ir_node *new_r_End(ir_node *block)
{
- return new_rd_End(NULL, irg, block);
+ return new_rd_End(NULL, block);
}
ir_node *new_r_Const(ir_graph *irg, tarval *con)
{
{
ir_node *res;
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
op_Start, mode_T, 0, NULL);
ir_node *new_d_End(dbg_info *db)
{
ir_node *res;
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
op_End, mode_X, -1, NULL);
res = optimize_node(res);
res = known;
} else {
/* A undefined value, e.g., in unreachable code. */
- res = new_Bad();
+ res = new_r_Bad(irg);
}
} else {
res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
*/
static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
{
+ ir_graph *irg = current_ir_graph;
ir_node *prevBlock, *res, *phi0, *phi0_all;
int i;
Else we may not set graph_arr as there a later value is remembered. */
phi0 = NULL;
if (block->attr.block.graph_arr[pos] == NULL) {
- ir_graph *irg = current_ir_graph;
if (block == get_irg_start_block(irg)) {
/* Collapsing to Bad tarvals is no good idea.
block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
set_cur_block(rem);
} else {
- block->attr.block.graph_arr[pos] = new_Unknown(mode);
+ block->attr.block.graph_arr[pos] = new_r_Unknown(irg, mode);
}
return block->attr.block.graph_arr[pos];
} else {
if (is_Bad(prevCfOp)) {
/* In case a Cond has been optimized we would get right to the start block
with an invalid definition. */
- nin[i-1] = new_Bad();
+ nin[i-1] = new_r_Bad(irg);
continue;
}
prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
if (!is_Bad(prevBlock)) {
nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
} else {
- nin[i-1] = new_Bad();
+ nin[i-1] = new_r_Bad(irg);
}
}
ir_node *new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
{
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
} /* new_d_Phi */
ir_node *new_d_Const(dbg_info *db, tarval *con)
{
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
return new_bd_Const(db, con);
} /* new_d_Const */
ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
{
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
return new_bd_Const_long(db, mode, value);
} /* new_d_Const_long */
ir_node *new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp)
{
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
return new_bd_Const_type(db, con, tp);
} /* new_d_Const_type */
{
ir_node *res;
assert(is_Cond(arg));
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
arg->attr.cond.default_proj = max_proj;
res = new_d_Proj(db, arg, mode_X, max_proj);
return res;
/* GL: objptr was called frame before. Frame was a bad choice for the name
as the operand could as well be a pointer to a dynamic object. */
{
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
return new_bd_Sel(db, current_ir_graph->current_block,
store, objptr, 0, NULL, ent);
} /* new_d_simpleSel */
ir_node *new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp)
{
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
value, kind, tp);
} /* new_d_SymConst_type */
ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind)
{
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
value, kind, firm_unknown_type);
} /* new_d_SymConst */
ir_node *new_d_Sync(dbg_info *db, int arity, ir_node *in[])
{
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
} /* new_d_Sync */
int n_outs, ir_asm_constraint *outputs, int n_clobber,
ident *clobber[], ident *text)
{
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
} /* new_d_ASM */
}
ir_node *new_Bad(void)
{
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
return get_irg_bad(current_ir_graph);
}
ir_node *new_NoMem(void)
{
+ assert(get_irg_phase_state(current_ir_graph) == phase_building);
return get_irg_no_mem(current_ir_graph);
}
ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
}
/* create a new anchor node */
-ir_node *new_Anchor(ir_graph *irg)
+ir_node *new_r_Anchor(ir_graph *irg)
{
ir_node *in[anchor_last];
ir_node *res;
/**
* Creates a new Anchor node.
*/
-ir_node *new_Anchor(ir_graph *irg);
+ir_node *new_r_Anchor(ir_graph *irg);
/**
* Allocate a frag array for a node if the current graph state is phase_building.
res->frame_type = new_type_frame();
/* the Anchor node must be created first */
- res->anchor = new_Anchor(res);
+ res->anchor = new_r_Anchor(res);
/*-- Nodes needed in every graph --*/
set_irg_end_block (res, new_immBlock());
- set_cur_block(get_irg_end_block(res));
- end = new_End();
- set_irg_end (res, end);
+ end = new_r_End(get_irg_end_block(res));
+ set_irg_end(res, end);
start_block = new_immBlock();
- set_cur_block(start_block);
set_irg_start_block(res, start_block);
bad = new_ir_node(NULL, res, start_block, op_Bad, mode_T, 0, NULL);
bad->attr.irg.irg = res;
set_irg_bad (res, bad);
set_irg_no_mem (res, new_ir_node(NULL, res, start_block, op_NoMem, mode_M, 0, NULL));
- start = new_Start();
+ start = new_r_Start(start_block);
set_irg_start (res, start);
/* Proj results of start node */
- projX = new_Proj(start, mode_X, pn_Start_X_initial_exec);
+ projX = new_r_Proj(start, mode_X, pn_Start_X_initial_exec);
set_irg_initial_exec (res, projX);
- set_irg_frame (res, new_Proj(start, mode_P_data, pn_Start_P_frame_base));
- set_irg_tls (res, new_Proj(start, mode_P_data, pn_Start_P_tls));
- set_irg_args (res, new_Proj(start, mode_T, pn_Start_T_args));
- initial_mem = new_Proj(start, mode_M, pn_Start_M);
+ set_irg_frame (res, new_r_Proj(start, mode_P_data, pn_Start_P_frame_base));
+ set_irg_tls (res, new_r_Proj(start, mode_P_data, pn_Start_P_tls));
+ set_irg_args (res, new_r_Proj(start, mode_T, pn_Start_T_args));
+ initial_mem = new_r_Proj(start, mode_M, pn_Start_M);
set_irg_initial_mem(res, initial_mem);
+ set_cur_block(start_block);
set_store(initial_mem);
res->index = get_irp_new_irg_idx();
res->graph_nr = get_irp_new_node_nr();
#endif
- mature_immBlock(res->current_block);
+ mature_immBlock(start_block);
/*-- Make a block to start with --*/
first_block = new_immBlock();
res->frame_type = NULL;
/* the Anchor node must be created first */
- res->anchor = new_Anchor(res);
+ res->anchor = new_r_Anchor(res);
/* -- The end block -- */
end_block = new_immBlock();
set_irg_end_block(res, end_block);
- set_cur_block(end_block);
- end = new_End();
- set_irg_end (res, end);
+ end = new_r_End(end_block);
+ set_irg_end(res, end);
mature_immBlock(end_block);
/* -- The start block -- */
- start_block = new_immBlock();
+ start_block = new_immBlock();
set_cur_block(start_block);
set_irg_start_block(res, start_block);
bad = new_ir_node(NULL, res, start_block, op_Bad, mode_T, 0, NULL);
set_irg_bad(res, bad);
no_mem = new_ir_node(NULL, res, start_block, op_NoMem, mode_M, 0, NULL);
set_irg_no_mem(res, no_mem);
- start = new_Start();
+ start = new_r_Start(start_block);
set_irg_start(res, start);
/* Proj results of start node */
- set_irg_initial_mem(res, new_Proj(start, mode_M, pn_Start_M));
- projX = new_Proj(start, mode_X, pn_Start_X_initial_exec);
+ set_irg_initial_mem(res, new_r_Proj(start, mode_M, pn_Start_M));
+ projX = new_r_Proj(start, mode_X, pn_Start_X_initial_exec);
mature_immBlock(start_block);
body_block = new_immBlock();
int line;
ir_type **fixedtypes;
struct obstack obst;
+ ir_graph *irg;
} io_env_t;
typedef enum typetag_t
{
ir_node *node = get_node_or_null(env, nodenr);
if (node == NULL) {
- node = new_Dummy(mode_X);
+ node = new_r_Dummy(env->irg, mode_X);
set_id(env, nodenr, node);
}
return node;
/** Parses the whole type graph. */
static int parse_typegraph(io_env_t *env)
{
+ ir_graph *old_irg = env->irg;
keyword_t kwkind;
EXPECT('{');
- current_ir_graph = get_const_code_irg();
+ env->irg = get_const_code_irg();
/* parse all types first */
while (true) {
break;
}
}
+ env->irg = old_irg;
return 1;
}
const char *nodename;
ir_node *node, *newnode;
- current_ir_graph = irg;
+ env->irg = irg;
EXPECT('{');
case iro_End:
{
ir_node *newendblock = get_node(env, preds[0]);
- newnode = get_irg_end(current_ir_graph);
+ newnode = get_irg_end(irg);
exchange(get_nodes_block(newnode), newendblock);
for (i = 0; i < numpreds - 1; i++)
add_irn_n(newnode, prednodes[i]);
case iro_Start:
{
ir_node *newstartblock = get_node(env, preds[0]);
- newnode = get_irg_start(current_ir_graph);
+ newnode = get_irg_start(irg);
exchange(get_nodes_block(newnode), newstartblock);
break;
}
goto endloop;
}
- newnode = new_Block(numpreds - 1, prednodes);
+ newnode = new_r_Block(irg, numpreds - 1, prednodes);
break;
}
case iro_Anchor:
- newnode = current_ir_graph->anchor;
+ newnode = irg->anchor;
for (i = 0; i < numpreds - 1; i++)
set_irn_n(newnode, i, prednodes[i]);
set_irn_n(newnode, -1, get_node(env, preds[0]));
long entnr = read_long(env);
union symconst_symbol sym;
sym.entity_p = get_entity(env, entnr);
- newnode = new_SymConst(mode_P, sym, symconst_addr_ent);
+ newnode = new_r_SymConst(irg, mode_P, sym, symconst_addr_ent);
break;
}
get_irn_dbg_info(n),
block,
a,
- new_Const_long(mode, 2),
+ new_r_Const_long(irg, mode, 2),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_A);
return n;
}
if (op == b) {
/* ~x + x = -1 */
- n = new_Const(get_mode_minus_one(mode));
+ n = new_r_Const(irg, get_mode_minus_one(mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_X_NOT_X);
return n;
}
if (op == a) {
/* x + ~x = -1 */
- n = new_Const(get_mode_minus_one(mode));
+ n = new_r_Const(irg, get_mode_minus_one(mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_X_NOT_X);
return n;
}
ir_node *mb = get_Mul_right(a);
if (ma == b) {
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_nodes_block(n);
+ ir_graph *irg = get_irn_irg(n);
n = new_rd_Mul(
get_irn_dbg_info(n),
blk,
get_irn_dbg_info(n),
blk,
mb,
- new_Const(get_mode_one(mode)),
+ new_r_Const(irg, get_mode_one(mode)),
mode),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
return n;
} else if (mb == b) {
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_nodes_block(n);
+ ir_graph *irg = get_irn_irg(n);
n = new_rd_Mul(
get_irn_dbg_info(n),
blk,
get_irn_dbg_info(n),
blk,
ma,
- new_Const(get_mode_one(mode)),
+ new_r_Const(irg, get_mode_one(mode)),
mode),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
tv = tarval_add(tv, get_mode_one(mode));
if (tv != tarval_bad) {
- ir_node *blk = get_nodes_block(n);
- ir_node *c = new_Const(tv);
+ ir_node *blk = get_nodes_block(n);
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *c = new_r_Const(irg, tv);
n = new_rd_Add(get_irn_dbg_info(n), blk, get_Not_op(b), c, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_C_NOT_X);
return n;
value = n;
if (a == b && value_not_zero(a, &dummy)) {
+ ir_graph *irg = get_irn_irg(n);
/* BEWARE: we can optimize a/a to 1 only if this cannot cause a exception */
- value = new_Const(get_mode_one(mode));
+ value = new_r_Const(irg, get_mode_one(mode));
DBG_OPT_CSTEVAL(n, value);
goto make_tuple;
} else {
if (value != n) {
ir_node *mem, *blk;
+ ir_graph *irg;
make_tuple:
/* Turn Div into a tuple (mem, jmp, bad, value) */
mem = get_Div_mem(n);
blk = get_nodes_block(n);
+ irg = get_irn_irg(blk);
/* skip a potential Pin */
mem = skip_Pin(mem);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Div_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Div_X_except, new_r_Bad(irg));
set_Tuple_pred(n, pn_Div_res, value);
}
return n;
ir_mode *mode = get_Mod_resmode(n);
ir_node *a = get_Mod_left(n);
ir_node *b = get_Mod_right(n);
+ ir_graph *irg;
ir_node *value;
tarval *tv;
value = n;
tv = value_of(n);
+ irg = get_irn_irg(n);
if (tv != tarval_bad) {
- value = new_Const(tv);
+ value = new_r_Const(irg, tv);
DBG_OPT_CSTEVAL(n, value);
goto make_tuple;
if (a == b && value_not_zero(a, &dummy)) {
/* BEWARE: we can optimize a%a to 0 only if this cannot cause a exception */
- value = new_Const(get_mode_null(mode));
+ value = new_r_Const(irg, get_mode_null(mode));
DBG_OPT_CSTEVAL(n, value);
goto make_tuple;
} else {
if (tv == get_mode_minus_one(mode)) {
/* a % -1 = 0 */
- value = new_Const(get_mode_null(mode));
+ value = new_r_Const(irg, get_mode_null(mode));
DBG_OPT_CSTEVAL(n, value);
goto make_tuple;
}
if (value != n) {
ir_node *mem, *blk;
+ ir_graph *irg;
make_tuple:
/* Turn Mod into a tuple (mem, jmp, bad, value) */
mem = get_Mod_mem(n);
blk = get_nodes_block(n);
+ irg = get_irn_irg(blk);
/* skip a potential Pin */
mem = skip_Pin(mem);
turn_into_tuple(n, pn_Mod_max);
set_Tuple_pred(n, pn_Mod_M, mem);
set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Mod_X_except, new_r_Bad(irg));
set_Tuple_pred(n, pn_Mod_res, value);
}
return n;
ir_node *b = get_DivMod_right(n);
ir_mode *mode = get_DivMod_resmode(n);
ir_node *va, *vb;
+ ir_graph *irg = get_irn_irg(n);
tarval *ta, *tb;
int evaluated = 0;
if (tb != tarval_bad) {
if (tb == get_mode_one(get_tarval_mode(tb))) {
va = a;
- vb = new_Const(get_mode_null(mode));
+ vb = new_r_Const(irg, get_mode_null(mode));
DBG_OPT_CSTEVAL(n, vb);
goto make_tuple;
} else if (ta != tarval_bad) {
Jmp for X result!? */
resb = tarval_mod(ta, tb);
if (resb == tarval_bad) return n; /* Causes exception! */
- va = new_Const(resa);
- vb = new_Const(resb);
+ va = new_r_Const(irg, resa);
+ vb = new_r_Const(irg, resb);
DBG_OPT_CSTEVAL(n, va);
DBG_OPT_CSTEVAL(n, vb);
goto make_tuple;
} else if (mode_is_signed(mode) && tb == get_mode_minus_one(mode)) {
va = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), a, mode);
- vb = new_Const(get_mode_null(mode));
+ vb = new_r_Const(irg, get_mode_null(mode));
DBG_OPT_CSTEVAL(n, va);
DBG_OPT_CSTEVAL(n, vb);
goto make_tuple;
} else if (a == b) {
if (value_not_zero(a, &dummy)) {
/* a/a && a != 0 */
- va = new_Const(get_mode_one(mode));
- vb = new_Const(get_mode_null(mode));
+ va = new_r_Const(irg, get_mode_one(mode));
+ vb = new_r_Const(irg, get_mode_null(mode));
DBG_OPT_CSTEVAL(n, va);
DBG_OPT_CSTEVAL(n, vb);
goto make_tuple;
turn_into_tuple(n, pn_DivMod_max);
set_Tuple_pred(n, pn_DivMod_M, mem);
set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_DivMod_X_except, new_r_Bad(irg)); /*no exception*/
set_Tuple_pred(n, pn_DivMod_res_div, va);
set_Tuple_pred(n, pn_DivMod_res_mod, vb);
}
if (tv != tarval_bad &&
(tarval_ieee754_get_exact() || (get_irg_fp_model(get_irn_irg(n)) & fp_strict_algebraic) == 0)) {
ir_node *blk = get_nodes_block(n);
- ir_node *c = new_Const(tv);
+ ir_graph *irg = get_irn_irg(blk);
+ ir_node *c = new_r_Const(irg, tv);
ir_node *a = get_Quot_left(n);
ir_node *m = new_rd_Mul(get_irn_dbg_info(n), blk, a, c, mode);
ir_node *mem = get_Quot_mem(n);
turn_into_tuple(n, pn_Quot_max);
set_Tuple_pred(n, pn_Quot_M, mem);
set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Quot_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Quot_X_except, new_r_Bad(irg));
set_Tuple_pred(n, pn_Quot_res, m);
DBG_OPT_ALGSIM1(oldn, a, b, m, FS_OPT_FP_INV_MUL);
}
jmp = new_r_Jmp(blk);
turn_into_tuple(n, pn_Cond_max);
if (ta == tarval_b_true) {
- set_Tuple_pred(n, pn_Cond_false, new_Bad());
+ set_Tuple_pred(n, pn_Cond_false, new_r_Bad(irg));
set_Tuple_pred(n, pn_Cond_true, jmp);
} else {
set_Tuple_pred(n, pn_Cond_false, jmp);
- set_Tuple_pred(n, pn_Cond_true, new_Bad());
+ set_Tuple_pred(n, pn_Cond_true, new_r_Bad(irg));
}
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(irg), blk);
ir_node *op = get_Not_op(a);
tarval *tv = get_mode_one(mode);
ir_node *blk = get_nodes_block(n);
- ir_node *c = new_Const(tv);
+ ir_graph *irg = get_irn_irg(blk);
+ ir_node *c = new_r_Const(irg, tv);
n = new_rd_Add(get_irn_dbg_info(n), blk, op, c, mode);
DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_NOT);
return n;
if (tv != tarval_bad) {
tv = tarval_neg(tv);
if (tv != tarval_bad) {
- ir_node *cnst = new_Const(tv);
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *cnst = new_r_Const(irg, tv);
dbg_info *dbg = get_irn_dbg_info(a);
ir_node *block = get_nodes_block(a);
n = new_rd_Mul(dbg, block, mul_l, cnst, mode);
case pn_Div_X_regular:
return new_r_Jmp(get_nodes_block(div));
- case pn_Div_X_except:
+ case pn_Div_X_except: {
+ ir_graph *irg = get_irn_irg(proj);
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
- return new_Bad();
+ return new_r_Bad(irg);
+ }
case pn_Div_M: {
ir_graph *irg = get_irn_irg(proj);
case pn_Mod_X_regular:
return new_r_Jmp(get_irn_n(mod, -1));
- case pn_Mod_X_except:
+ case pn_Mod_X_except: {
+ ir_graph *irg = get_irn_irg(proj);
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
- return new_Bad();
+ return new_r_Bad(irg);
+ }
case pn_Mod_M: {
ir_graph *irg = get_irn_irg(proj);
case pn_Mod_res:
if (get_Mod_left(mod) == b) {
/* a % a = 0 if a != 0 */
- ir_mode *mode = get_irn_mode(proj);
- ir_node *res = new_Const(get_mode_null(mode));
+ ir_graph *irg = get_irn_irg(proj);
+ ir_mode *mode = get_irn_mode(proj);
+ ir_node *res = new_r_Const(irg, get_mode_null(mode));
DBG_OPT_CSTEVAL(mod, res);
return res;
case pn_DivMod_X_regular:
return new_r_Jmp(get_nodes_block(divmod));
- case pn_DivMod_X_except:
+ case pn_DivMod_X_except: {
/* we found an exception handler, remove it */
+ ir_graph *irg = get_irn_irg(proj);
DBG_OPT_EXC_REM(proj);
- return new_Bad();
+ return new_r_Bad(irg);
+ }
case pn_DivMod_M: {
ir_graph *irg = get_irn_irg(proj);
case pn_DivMod_res_mod:
if (get_DivMod_left(divmod) == b) {
/* a % a = 0 if a != 0 */
- ir_mode *mode = get_irn_mode(proj);
- ir_node *res = new_Const(get_mode_null(mode));
+ ir_graph *irg = get_irn_irg(proj);
+ ir_mode *mode = get_irn_mode(proj);
+ ir_node *res = new_r_Const(irg, get_mode_null(mode));
DBG_OPT_CSTEVAL(divmod, res);
return res;
/**
* Create a 0 constant of given mode.
*/
-static ir_node *create_zero_const(ir_mode *mode)
+static ir_node *create_zero_const(ir_graph *irg, ir_mode *mode)
{
- tarval *tv = get_mode_null(mode);
- ir_node *cnst = new_Const(tv);
+ tarval *tv = get_mode_null(mode);
+ ir_node *cnst = new_r_Const(irg, tv);
return cnst;
}
*/
static ir_node *transform_node_Proj_Cmp(ir_node *proj)
{
- ir_node *n = get_Proj_pred(proj);
- ir_node *left = get_Cmp_left(n);
- ir_node *right = get_Cmp_right(n);
+ ir_node *n = get_Proj_pred(proj);
+ ir_node *left = get_Cmp_left(n);
+ ir_node *right = get_Cmp_right(n);
tarval *tv = NULL;
int changed = 0;
ir_mode *mode = NULL;
/* we can evaluate some cases directly */
switch (proj_nr) {
- case pn_Cmp_False:
- return new_Const(get_tarval_b_false());
- case pn_Cmp_True:
- return new_Const(get_tarval_b_true());
+ case pn_Cmp_False: {
+ ir_graph *irg = get_irn_irg(proj);
+ return new_r_Const(irg, get_tarval_b_false());
+ }
+ case pn_Cmp_True: {
+ ir_graph *irg = get_irn_irg(proj);
+ return new_r_Const(irg, get_tarval_b_true());
+ }
case pn_Cmp_Leg:
- if (!mode_is_float(get_irn_mode(left)))
- return new_Const(get_tarval_b_true());
+ if (!mode_is_float(get_irn_mode(left))) {
+ ir_graph *irg = get_irn_irg(proj);
+ return new_r_Const(irg, get_tarval_b_true());
+ }
break;
default:
break;
lr = tmp;
}
if (ll == right) {
+ ir_graph *irg = get_irn_irg(n);
left = lr;
- right = create_zero_const(get_irn_mode(left));
+ right = create_zero_const(irg, get_irn_mode(left));
changed |= 1;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
}
rr = tmp;
}
if (rl == left) {
+ ir_graph *irg = get_irn_irg(n);
left = rr;
- right = create_zero_const(get_irn_mode(left));
+ right = create_zero_const(irg, get_irn_mode(left));
changed |= 1;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
}
ir_node *llr = get_Shr_right(ll);
if (is_Const(llr)) {
dbg_info *dbg = get_irn_dbg_info(left);
+ ir_graph *irg = get_irn_irg(left);
tarval *c1 = get_Const_tarval(llr);
tarval *c2 = get_Const_tarval(lr);
tarval *mask = tarval_shl(c2, c1);
tarval *value = tarval_shl(c3, c1);
- left = new_rd_And(dbg, block, get_Shr_left(ll), new_Const(mask), mode);
- right = new_Const(value);
+ left = new_rd_And(dbg, block, get_Shr_left(ll), new_r_Const(irg, mask), mode);
+ right = new_r_Const(irg, value);
changed |= 1;
}
}
tarval *mask = tarval_and(get_Const_tarval(c1), tv);
if (mask != tv) {
/* TODO: move to constant evaluation */
+ ir_graph *irg = get_irn_irg(n);
tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
- c1 = new_Const(tv);
+ c1 = new_r_Const(irg, tv);
DBG_OPT_CSTEVAL(proj, c1);
return c1;
}
*/
if (! tarval_is_null(get_Const_tarval(c1))) {
/* TODO: move to constant evaluation */
+ ir_graph *irg = get_irn_irg(n);
tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
- c1 = new_Const(tv);
+ c1 = new_r_Const(irg, tv);
DBG_OPT_CSTEVAL(proj, c1);
return c1;
}
*/
c1 = get_Shl_right(left);
if (is_Const(c1)) {
+ ir_graph *irg = get_irn_irg(c1);
tarval *tv1 = get_Const_tarval(c1);
ir_mode *mode = get_irn_mode(left);
tarval *minus1 = get_mode_all_one(mode);
if (tarval_and(tv, cmask) != tv) {
/* condition not met */
tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
- c1 = new_Const(tv);
+ c1 = new_r_Const(irg, tv);
DBG_OPT_CSTEVAL(proj, c1);
return c1;
}
sl = get_Shl_left(left);
blk = get_nodes_block(n);
- left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_Const(amask), mode);
+ left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_r_Const(irg, amask), mode);
tv = tarval_shr(tv, tv1);
changed |= 2;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
*/
c1 = get_Shr_right(left);
if (is_Const(c1)) {
+ ir_graph *irg = get_irn_irg(c1);
tarval *tv1 = get_Const_tarval(c1);
ir_mode *mode = get_irn_mode(left);
tarval *minus1 = get_mode_all_one(mode);
if (tarval_and(tv, cmask) != tv) {
/* condition not met */
tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
- c1 = new_Const(tv);
+ c1 = new_r_Const(irg, tv);
DBG_OPT_CSTEVAL(proj, c1);
return c1;
}
sl = get_Shr_left(left);
blk = get_nodes_block(n);
- left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_Const(amask), mode);
+ left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_r_Const(irg, amask), mode);
tv = tarval_shl(tv, tv1);
changed |= 2;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
*/
c1 = get_Shrs_right(left);
if (is_Const(c1)) {
+ ir_graph *irg = get_irn_irg(c1);
tarval *tv1 = get_Const_tarval(c1);
ir_mode *mode = get_irn_mode(left);
tarval *minus1 = get_mode_all_one(mode);
if (!tarval_is_all_one(cond) && !tarval_is_null(cond)) {
/* condition not met */
tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
- c1 = new_Const(tv);
+ c1 = new_r_Const(irg, tv);
DBG_OPT_CSTEVAL(proj, c1);
return c1;
}
sl = get_Shrs_left(left);
blk = get_nodes_block(n);
- left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_Const(amask), mode);
+ left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_r_Const(irg, amask), mode);
tv = tarval_shl(tv, tv1);
changed |= 2;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
} /* tarval != bad */
}
- if (changed & 2) /* need a new Const */
- right = new_Const(tv);
+ if (changed & 2) { /* need a new Const */
+ ir_graph *irg = get_irn_irg(n);
+ right = new_r_Const(irg, tv);
+ }
if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) && is_Const(right) && is_Const_null(right) && is_Proj(left)) {
ir_node *op = get_Proj_pred(left);
/* special case: (x % 2^n) CMP 0 ==> x & (2^n-1) CMP 0 */
ir_node *v = get_binop_left(op);
ir_node *blk = get_irn_n(op, -1);
+ ir_graph *irg = get_irn_irg(op);
ir_mode *mode = get_irn_mode(v);
tv = tarval_sub(tv, get_mode_one(mode), NULL);
- left = new_rd_And(get_irn_dbg_info(op), blk, v, new_Const(tv), mode);
+ left = new_rd_And(get_irn_dbg_info(op), blk, v, new_r_Const(irg, tv), mode);
changed |= 1;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_MOD_TO_AND);
}
/* Beware of Phi0 */
if (n > 0) {
ir_node *pred = get_irn_n(phi, 0);
- ir_node *bound, *new_Phi, *block, **in;
+ ir_node *bound, *new_phi, *block, **in;
pn_Cmp pnc;
if (! is_Confirm(pred))
}
/* move the Confirm nodes "behind" the Phi */
block = get_irn_n(phi, -1);
- new_Phi = new_r_Phi(block, n, in, get_irn_mode(phi));
- return new_r_Confirm(block, new_Phi, bound, pnc);
+ new_phi = new_r_Phi(block, n, in, get_irn_mode(phi));
+ return new_r_Confirm(block, new_phi, bound, pnc);
}
}
return phi;
tarval *tv1, *tv2, *tv3, *tv4, *tv;
for (;;) {
+ ir_graph *irg;
get_comm_Binop_Ops(or, &and, &c1);
if (!is_Const(c1) || !is_And(and))
return or;
/* ok, all conditions met */
block = get_irn_n(or, -1);
+ irg = get_irn_irg(block);
- new_and = new_r_And(block, value, new_Const(tarval_and(tv4, tv2)), mode);
+ new_and = new_r_And(block, value, new_r_Const(irg, tarval_and(tv4, tv2)), mode);
- new_const = new_Const(tarval_or(tv3, tv1));
+ new_const = new_r_Const(irg, tarval_or(tv3, tv1));
set_Or_left(or, new_and);
set_Or_right(or, new_const);
ir_mode *mode;
tarval *tv1, *tv2, *res;
ir_node *in[2], *irn, *block;
+ ir_graph *irg;
left = get_binop_left(n);
res = tarval_add(tv1, tv2);
mode = get_irn_mode(n);
+ irg = get_irn_irg(n);
/* beware: a simple replacement works only, if res < modulo shift */
if (!is_Rotl(n)) {
ir_node *block = get_nodes_block(n);
dbg_info *dbgi = get_irn_dbg_info(n);
ir_mode *smode = get_irn_mode(right);
- ir_node *cnst = new_Const_long(smode, get_mode_size_bits(mode) - 1);
+ ir_node *cnst = new_r_Const_long(irg, smode, get_mode_size_bits(mode) - 1);
return new_rd_Shrs(dbgi, block, get_binop_left(left), cnst, mode);
}
- return new_Const(get_mode_null(mode));
+ return new_r_Const(irg, get_mode_null(mode));
}
}
} else {
block = get_nodes_block(n);
in[0] = get_binop_left(left);
- in[1] = new_Const(res);
+ in[1] = new_r_Const(irg, res);
irn = new_ir_node(NULL, get_Block_irg(block), block, get_irn_op(n), mode, 2, in);
ir_op *op_left;
ir_node *block;
dbg_info *dbgi;
+ ir_graph *irg;
ir_node *new_shift;
ir_node *new_bitop;
ir_node *new_const;
}
assert(get_tarval_mode(tv_shift) == mode);
- new_const = new_Const(tv_shift);
+ irg = get_irn_irg(n);
+ new_const = new_r_Const(irg, tv_shift);
if (op_left == op_And) {
new_bitop = new_rd_And(dbgi, block, new_shift, new_const, mode);
tarval *tv_shr;
tarval *tv_shift;
tarval *tv_mask;
+ ir_graph *irg;
pn_Cmp pnc;
int need_shrs = 0;
assert(get_tarval_mode(tv_mask) == mode);
block = get_nodes_block(n);
+ irg = get_irn_irg(block);
dbgi = get_irn_dbg_info(n);
pnc = tarval_cmp(tv_shl, tv_shr);
if (pnc == pn_Cmp_Lt || pnc == pn_Cmp_Eq) {
tv_shift = tarval_sub(tv_shr, tv_shl, NULL);
- new_const = new_Const(tv_shift);
+ new_const = new_r_Const(irg, tv_shift);
if (need_shrs) {
new_shift = new_rd_Shrs(dbgi, block, x, new_const, mode);
} else {
} else {
assert(pnc == pn_Cmp_Gt);
tv_shift = tarval_sub(tv_shl, tv_shr, NULL);
- new_const = new_Const(tv_shift);
+ new_const = new_r_Const(irg, tv_shift);
new_shift = new_rd_Shl(dbgi, block, x, new_const, mode);
}
- new_const = new_Const(tv_mask);
+ new_const = new_r_Const(irg, tv_mask);
new_and = new_rd_And(dbgi, block, new_shift, new_const, mode);
return new_and;
/* remove garbage blocks by looking at control flow that leaves the block
and replacing the control flow by Bad. */
if (get_irn_mode(node) == mode_X) {
- ir_node *block = get_nodes_block(skip_Proj(node));
+ ir_node *block = get_nodes_block(skip_Proj(node));
+ ir_graph *irg = get_irn_irg(block);
/* Don't optimize nodes in immature blocks. */
if (!get_Block_matured(block))
if (is_Block(block)) {
if (is_Block_dead(block)) {
/* control flow from dead block is dead */
- return new_Bad();
+ return new_r_Bad(irg);
}
for (i = get_irn_arity(block) - 1; i >= 0; --i) {
* but can be found by irg_walk()!
*/
set_Block_dead(block);
- return new_Bad();
+ return new_r_Bad(irg);
}
}
}
/* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
blocks predecessors is dead. */
if (op != op_Block && op != op_Phi && op != op_Tuple) {
+ ir_graph *irg = get_irn_irg(node);
irn_arity = get_irn_arity(node);
/*
*/
if (is_irn_pinned_in_irg(node) &&
is_Block_dead(get_nodes_block(skip_Proj(node))))
- return new_Bad();
+ return new_r_Bad(irg);
for (i = 0; i < irn_arity; i++) {
ir_node *pred = get_irn_n(node, i);
if (is_Bad(pred))
- return new_Bad();
+ return new_r_Bad(irg);
#if 0
/* Propagating Unknowns here seems to be a bad idea, because
sometimes we need a node as a input and did not want that
However, it might be useful to move this into a later phase
(if you think that optimizing such code is useful). */
if (is_Unknown(pred) && mode_is_data(get_irn_mode(node)))
- return new_Unknown(get_irn_mode(node));
+ return new_r_Unknown(irg, get_irn_mode(node));
#endif
}
}
for (i = 0; i < irn_arity; i++) {
if (!is_Bad(get_irn_n(node, i))) break;
}
- if (i == irn_arity) node = new_Bad();
+ if (i == irn_arity) node = new_r_Bad(irg);
}
#endif
return node;
/* evaluation was successful -- replace the node. */
irg_kill_node(irg, n);
- nw = new_Const(tv);
+ nw = new_r_Const(irg, tv);
if (old_tp && get_type_mode(old_tp) == get_tarval_mode(tv))
set_Const_type(nw, old_tp);
if (tv != tarval_bad) {
/* evaluation was successful -- replace the node. */
ir_type *old_tp = get_irn_type(n);
+ ir_graph *irg = get_irn_irg(n);
int i, arity = get_irn_arity(n);
/*
for (i = 0; i < arity && !old_tp; ++i)
old_tp = get_irn_type(get_irn_n(n, i));
- n = new_Const(tv);
+ n = new_r_Const(irg, tv);
if (old_tp && get_type_mode(old_tp) == get_tarval_mode(tv))
set_Const_type(n, old_tp);
int filename_len = strlen(filename)+1;
ident *cur_ident;
unsigned align_l, align_n, size;
- ir_graph *rem;
block_id_walker_data_t wd;
symconst_symbol sym;
}
for (n = get_irp_n_irgs() - 1; n >= 0; --n) {
- ir_graph *irg = get_irp_irg(n);
- int i;
- ir_node *endbb = get_irg_end_block(irg);
- fix_env env;
-
- set_current_ir_graph(irg);
+ ir_graph *irg = get_irp_irg(i);
+ ir_node *endbb = get_irg_end_block(irg);
+ int i;
+ fix_env env;
/* generate a symbolic constant pointing to the count array */
sym.entity_p = bblock_counts;
wd.symconst = new_r_SymConst(irg, mode_P_data, sym, symconst_addr_ent);
irg_block_walk_graph(irg, block_id_walker, NULL, &wd);
- env.end_block = get_irg_end_block(irg);
+ env.end_block = get_irg_end_block(irg);
irg_block_walk_graph(irg, fix_ssa, NULL, &env);
for (i = get_Block_n_cfgpreds(endbb) - 1; i >= 0; --i) {
ir_node *node = skip_Proj(get_Block_cfgpred(endbb, i));
set_array_entity_values(bblock_id, tarval_array, n_blocks);
if (flags & profile_with_locations) {
+ ir_graph *irg = get_const_code_irg();
/* build the initializer for the locations */
- rem = current_ir_graph;
- current_ir_graph = get_const_code_irg();
ent = get_array_element_entity(loc_type);
set_entity_linkage(ent_locations, IR_LINKAGE_CONSTANT);
for (i = 0; i < n_blocks; ++i) {
set_compound_graph_path_node(path, 0, ent);
set_compound_graph_path_node(path, 1, loc_lineno);
tv = new_tarval_from_long(wd.locs[i].lineno, mode_Iu);
- add_compound_ent_value_w_path(ent_locations, new_Const(tv), path);
+ add_compound_ent_value_w_path(ent_locations, new_r_Const(irg, tv), path);
/* name */
path = new_compound_graph_path(loc_type, 2);
set_compound_graph_path_node(path, 1, loc_name);
if (wd.locs[i].fname) {
sym.entity_p = wd.locs[i].fname;
- n = new_SymConst(mode_P_data, sym, symconst_addr_ent);
+ n = new_r_SymConst(irg, mode_P_data, sym, symconst_addr_ent);
} else {
- n = new_Const(get_mode_null(mode_P_data));
+ n = new_r_Const(irg, get_mode_null(mode_P_data));
}
add_compound_ent_value_w_path(ent_locations, n, path);
}
pmap_destroy(wd.fname_map);
- current_ir_graph = rem;
}
return gen_initializer_irg(ent_filename, bblock_id, bblock_counts, n_blocks);
}
ir_node *right = get_Rotl_right(node);
ir_node *left, *shl, *shr, *or, *block, *sub, *c;
ir_mode *omode, *rmode;
+ ir_graph *irg;
dbg_info *dbg;
optimization_state_t state;
}
/* replace the Rotl(x,y) by an Or(Shl(x,y), Shr(x,64-y)) and lower those */
+ irg = get_irn_irg(node);
dbg = get_irn_dbg_info(node);
omode = get_irn_mode(node);
left = get_Rotl_left(node);
block = get_nodes_block(node);
shl = new_rd_Shl(dbg, block, left, right, omode);
rmode = get_irn_mode(right);
- c = new_Const_long(rmode, get_mode_size_bits(omode));
+ c = new_r_Const_long(irg, rmode, get_mode_size_bits(omode));
sub = new_rd_Sub(dbg, block, c, right, rmode);
shr = new_rd_Shr(dbg, block, left, sub, omode);
ir_node *low = new_r_Conv(block, lentry->low_word, mode);
ir_node *high = new_r_Conv(block, lentry->high_word, mode);
ir_node *or = new_rd_Or(dbg, block, low, high, mode);
- ir_node *cmp = new_rd_Cmp(dbg, block, or, new_Const_long(mode, 0));
+ ir_node *cmp = new_rd_Cmp(dbg, block, or, new_r_Const_long(irg, mode, 0));
ir_node *proj = new_r_Proj(cmp, mode_b, pnc);
set_Cond_selector(node, proj);
if (mode_is_signed(imode)) {
int c = get_mode_size_bits(low_signed) - 1;
- ir_node *cnst = new_Const_long(low_unsigned, c);
+ ir_node *cnst = new_r_Const_long(irg, low_unsigned, c);
if (get_irn_mode(op) != low_signed)
op = new_rd_Conv(dbg, block, op, low_signed);
entry->high_word = new_rd_Shrs(dbg, block, op, cnst,
low_signed);
} else {
- entry->high_word = new_Const(get_mode_null(low_signed));
+ entry->high_word = new_r_Const(irg, get_mode_null(low_signed));
}
}
} else if (imode == mode_b) {
entry->low_word = new_rd_Conv(dbg, block, op, low_unsigned);
- entry->high_word = new_Const(get_mode_null(low_signed));
+ entry->high_word = new_r_Const(irg, get_mode_null(low_signed));
} else {
ir_node *irn, *call;
ir_type *mtp = get_conv_type(imode, omode, env);
assert(irg == current_ir_graph);
if (! is_Unknown(lb))
- lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb), mode_Int);
+ lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int);
else
lb = NULL;
if (! is_Unknown(ub))
- ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub), mode_Int);
+ ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int);
else
ub = NULL;
tarval *tv;
ir_enum_const *ec;
ir_mode *mode;
+ ir_graph *irg;
switch (get_SymConst_kind(symc)) {
case symconst_type_tag:
break;
case symconst_type_size:
/* rewrite the SymConst node by a Const node */
+ irg = get_irn_irg(symc);
tp = get_SymConst_type(symc);
assert(get_type_state(tp) == layout_fixed);
mode = get_irn_mode(symc);
- newn = new_Const_long(mode, get_type_size_bytes(tp));
+ newn = new_r_Const_long(irg, mode, get_type_size_bytes(tp));
assert(newn);
/* run the hooks */
hook_lower(symc);
break;
case symconst_type_align:
/* rewrite the SymConst node by a Const node */
+ irg = get_irn_irg(symc);
tp = get_SymConst_type(symc);
assert(get_type_state(tp) == layout_fixed);
mode = get_irn_mode(symc);
- newn = new_Const_long(mode, get_type_alignment_bytes(tp));
+ newn = new_r_Const_long(irg, mode, get_type_alignment_bytes(tp));
assert(newn);
/* run the hooks */
hook_lower(symc);
break;
case symconst_ofs_ent:
/* rewrite the SymConst node by a Const node */
+ irg = get_irn_irg(symc);
ent = get_SymConst_entity(symc);
assert(get_type_state(get_entity_type(ent)) == layout_fixed);
mode = get_irn_mode(symc);
- newn = new_Const_long(mode, get_entity_offset(ent));
+ newn = new_r_Const_long(irg, mode, get_entity_offset(ent));
assert(newn);
/* run the hooks */
hook_lower(symc);
break;
case symconst_enum_const:
/* rewrite the SymConst node by a Const node */
+ irg = get_irn_irg(symc);
ec = get_SymConst_enum(symc);
assert(get_type_state(get_enumeration_owner(ec)) == layout_fixed);
tv = get_enumeration_value(ec);
- newn = new_Const(tv);
+ newn = new_r_Const(irg, tv);
assert(newn);
/* run the hooks */
hook_lower(symc);
{
ir_node *sel = get_Load_ptr(load);
ir_node *block, *n_proj, *res, *ptr;
+ ir_graph *irg;
ir_entity *ent;
ir_type *bf_type;
ir_mode *bf_mode, *mode;
*/
/* abandon bitfield sel */
+ irg = get_irn_irg(sel);
ptr = get_Sel_ptr(sel);
db = get_irn_dbg_info(sel);
- ptr = new_rd_Add(db, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
+ ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr));
set_Load_ptr(load, ptr);
set_Load_mode(load, mode);
int shift_count_down = bits - bf_bits;
if (shift_count_up) {
- res = new_r_Shl(block, res, new_Const_long(mode_Iu, shift_count_up), mode);
+ res = new_r_Shl(block, res, new_r_Const_long(irg, mode_Iu, shift_count_up), mode);
}
if (shift_count_down) {
- res = new_r_Shrs(block, res, new_Const_long(mode_Iu, shift_count_down), mode);
+ res = new_r_Shrs(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode);
}
} else { /* unsigned */
int shift_count_down = bit_offset;
unsigned mask = ((unsigned)-1) >> (bits - bf_bits);
if (shift_count_down) {
- res = new_r_Shr(block, res, new_Const_long(mode_Iu, shift_count_down), mode);
+ res = new_r_Shr(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode);
}
if (bits != bf_bits) {
- res = new_r_And(block, res, new_Const_long(mode, mask), mode);
+ res = new_r_And(block, res, new_r_Const_long(irg, mode, mask), mode);
}
}
ir_type *bf_type;
ir_mode *bf_mode, *mode;
ir_node *mem, *irn, *block;
+ ir_graph *irg;
unsigned mask, neg_mask;
int bf_bits, bits_mask, offset, bit_offset;
dbg_info *db;
neg_mask = ~mask;
/* abandon bitfield sel */
+ irg = get_irn_irg(sel);
ptr = get_Sel_ptr(sel);
db = get_irn_dbg_info(sel);
- ptr = new_rd_Add(db, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
+ ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr));
if (neg_mask) {
/* there are some bits, normal case */
mem = new_r_Proj(irn, mode_M, pn_Load_M);
irn = new_r_Proj(irn, mode, pn_Load_res);
- irn = new_r_And(block, irn, new_Const_long(mode, neg_mask), mode);
+ irn = new_r_And(block, irn, new_r_Const_long(irg, mode, neg_mask), mode);
if (bit_offset > 0) {
- value = new_r_Shl(block, value, new_Const_long(mode_Iu, bit_offset), mode);
+ value = new_r_Shl(block, value, new_r_Const_long(irg, mode_Iu, bit_offset), mode);
}
- value = new_r_And(block, value, new_Const_long(mode, mask), mode);
+ value = new_r_And(block, value, new_r_Const_long(irg, mode, mask), mode);
value = new_r_Or(block, value, irn, mode);
}
*/
static void replace_call(ir_node *irn, ir_node *call, ir_node *mem, ir_node *reg_jmp, ir_node *exc_jmp)
{
- ir_node *block = get_nodes_block(call);
+ ir_node *block = get_nodes_block(call);
+ ir_graph *irg = get_irn_irg(block);
if (reg_jmp == NULL) {
set_opt_cse(0);
reg_jmp = new_r_Jmp(block);
set_opt_cse(old_cse);
- exc_jmp = new_Bad();
+ exc_jmp = new_r_Bad(irg);
}
irn = new_r_Tuple(block, 1, &irn);
set_Tuple_pred(call, pn_Call_X_regular, reg_jmp);
set_Tuple_pred(call, pn_Call_X_except, exc_jmp);
set_Tuple_pred(call, pn_Call_T_result, irn);
- set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
+ set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
} /* replace_call */
/* A mapper for the integer abs. */
int i_mapper_pow(ir_node *call, void *ctx)
{
dbg_info *dbg;
- ir_node *mem;
- ir_node *left = get_Call_param(call, 0);
- ir_node *right = get_Call_param(call, 1);
- ir_node *block = get_nodes_block(call);
- ir_node *irn, *reg_jmp = NULL, *exc_jmp = NULL;
+ ir_node *mem;
+ ir_node *left = get_Call_param(call, 0);
+ ir_node *right = get_Call_param(call, 1);
+ ir_node *block = get_nodes_block(call);
+ ir_graph *irg = get_irn_irg(block);
+ ir_node *irn, *reg_jmp = NULL, *exc_jmp = NULL;
(void) ctx;
if (is_Const(left) && is_Const_one(left)) {
if (tarval_is_null(tv)) {
/* pow(x, 0.0) = 1.0 */
ir_mode *mode = get_tarval_mode(tv);
- irn = new_Const(get_mode_one(mode));
+ irn = new_r_Const(irg, get_mode_one(mode));
} else if (tarval_is_one(tv)) {
/* pow(x, 1.0) = x */
irn = left;
ir_mode *mode = get_irn_mode(left);
ir_node *quot;
- irn = new_Const(get_mode_one(mode));
+ irn = new_r_Const(irg, get_mode_one(mode));
quot = new_rd_Quot(dbg, block, mem, irn, left, mode, op_pin_state_pinned);
mem = new_r_Proj(quot, mode_M, pn_Quot_M);
irn = new_r_Proj(quot, mode, pn_Quot_res);
if (is_Const(val) && is_Const_null(val)) {
/* exp(0.0) = 1.0 */
+ ir_graph *irg = get_irn_irg(val);
ir_mode *mode = get_irn_mode(val);
- ir_node *irn = new_Const(get_mode_one(mode));
+ ir_node *irn = new_r_Const(irg, get_mode_one(mode));
ir_node *mem = get_Call_mem(call);
DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_EXP);
replace_call(irn, call, mem, NULL, NULL);
if (is_Const(val) && is_Const_one(val)) {
/* acos(1.0) = 0.0 */
- ir_mode *mode = get_irn_mode(val);
- ir_node *irn = new_Const(get_mode_null(mode));
- ir_node *mem = get_Call_mem(call);
+ ir_graph *irg = get_irn_irg(val);
+ ir_mode *mode = get_irn_mode(val);
+ ir_node *irn = new_r_Const(irg, get_mode_null(mode));
+ ir_node *mem = get_Call_mem(call);
DBG_OPT_ALGSIM0(call, irn, reason);
replace_call(irn, call, mem, NULL, NULL);
return 1;
if (is_Const(val) && is_Const_null(val)) {
/* f(0.0) = 1.0 */
+ ir_graph *irg = get_irn_irg(val);
ir_mode *mode = get_irn_mode(val);
- ir_node *irn = new_Const(get_mode_one(mode));
+ ir_node *irn = new_r_Const(irg, get_mode_one(mode));
ir_node *mem = get_Call_mem(call);
DBG_OPT_ALGSIM0(call, irn, reason);
replace_call(irn, call, mem, NULL, NULL);
* @return a Const node containing the strlen() result or NULL
* if the evaluation fails
*/
-static ir_node *eval_strlen(ir_entity *ent, ir_type *res_tp)
+static ir_node *eval_strlen(ir_graph *irg, ir_entity *ent, ir_type *res_tp)
{
ir_type *tp = get_entity_type(ent);
ir_mode *mode;
}
if (len >= 0) {
tarval *tv = new_tarval_from_long(len, get_type_mode(res_tp));
- return new_Const_type(tv, res_tp);
+ return new_r_Const_type(irg, tv, res_tp);
}
return NULL;
}
ir_initializer_t *val = get_initializer_compound_value(initializer, i);
if (initializer_val_is_null(val)) {
tarval *tv = new_tarval_from_long(i, get_type_mode(res_tp));
- return new_Const_type(tv, res_tp);
+ return new_r_Const_type(irg, tv, res_tp);
}
}
ir_node *irn;
tp = get_method_res_type(tp, 0);
- irn = eval_strlen(ent, tp);
+ irn = eval_strlen(get_irn_irg(call), ent, tp);
if (irn) {
ir_node *mem = get_Call_mem(call);
* @return a Const node containing the strcmp() result or NULL
* if the evaluation fails
*/
-static ir_node *eval_strcmp(ir_entity *left, ir_entity *right, ir_type *res_tp)
+static ir_node *eval_strcmp(ir_graph *irg, ir_entity *left, ir_entity *right,
+ ir_type *res_tp)
{
ir_type *tp;
ir_mode *mode;
if (i < n) {
/* we found an end */
tarval *tv = new_tarval_from_long(res, get_type_mode(res_tp));
- return new_Const_type(tv, res_tp);
+ return new_r_Const_type(irg, tv, res_tp);
}
return NULL;
}
if (left == right) {
/* a strcmp(s, s) ==> 0 */
+ ir_graph *irg = get_irn_irg(call);
ir_node *mem = get_Call_mem(call);
ir_mode *mode = get_type_mode(res_tp);
- irn = new_Const(get_mode_null(mode));
+ irn = new_r_Const(irg, get_mode_null(mode));
DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_STRCMP);
replace_call(irn, call, mem, NULL, NULL);
return 1;
if (ent_l != NULL && ent_r != NULL) {
/* both entities are const, try to evaluate */
- irn = eval_strcmp(ent_l, ent_r, res_tp);
+ irn = eval_strcmp(get_irn_irg(call), ent_l, ent_r, res_tp);
} else if (ent_l != NULL) {
if (is_empty_string(ent_l)) {
/* s strcmp("", s) ==> -(*s)*/
if (left == right || (is_Const(len) && is_Const_null(len))) {
/* a strncmp(s, s, len) ==> 0 OR
a strncmp(a, b, 0) ==> 0 */
+ ir_graph *irg = get_irn_irg(call);
ir_node *mem = get_Call_mem(call);
ir_node *adr = get_Call_ptr(call);
ir_entity *ent = get_SymConst_entity(adr);
ir_type *res_tp = get_method_res_type(call_tp, 0);
ir_mode *mode = get_type_mode(res_tp);
- irn = new_Const(get_mode_null(mode));
+ irn = new_r_Const(irg, get_mode_null(mode));
DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_STRNCMP);
replace_call(irn, call, mem, NULL, NULL);
return 1;
if (left == right || (is_Const(len) && is_Const_null(len))) {
/* a memcmp(s, s, len) ==> 0 OR
a memcmp(a, b, 0) ==> 0 */
+ ir_graph *irg = get_irn_irg(call);
ir_node *mem = get_Call_mem(call);
ir_node *adr = get_Call_ptr(call);
ir_entity *ent = get_SymConst_entity(adr);
ir_type *res_tp = get_method_res_type(call_tp, 0);
ir_mode *mode = get_type_mode(res_tp);
- irn = new_Const(get_mode_null(mode));
+ irn = new_r_Const(irg, get_mode_null(mode));
DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_STRNCMP);
replace_call(irn, call, mem, NULL, NULL);
return 1;
if (numcases == 0) {
/* zero cases: "goto default;" */
- ARR_APP1(ir_node*, env->defusers, new_Jmp());
+ ARR_APP1(ir_node*, env->defusers, new_r_Jmp(block));
} else if (numcases == 1) {
/* only one case: "if (sel == val) goto target else goto default;" */
ir_node *val = new_r_Const_long(irg, cmp_mode, curcases[0].value);
tarval * tv_lo = cpair->tv_lo;
tarval * tv_hi = cpair->tv_hi;
ir_mode * mode = cpair->lo_mode;
+ ir_graph * irg = get_irn_irg(cmp_lo);
if (pnc_lo == pn_Cmp_Eq && pnc_hi == pn_Cmp_Eq &&
tarval_is_null(tv_lo) && tarval_is_null(tv_hi) &&
hil = get_Cmp_left(cmp_hi);
hil = new_r_Conv(dst_block, hil, mode);
p = new_r_And(dst_block, lol, hil, mode);
- c = new_Const(tv_lo);
+ c = new_r_Const(irg, tv_lo);
cmp = new_r_Cmp(dst_block, p, c);
p = new_r_Proj(cmp, mode_b, pn_Cmp_Eq);
return p;
if ((pnc_lo == pn_Cmp_Lt || pnc_lo == pn_Cmp_Le || pnc_lo == pn_Cmp_Eq) &&
(pnc_hi == pn_Cmp_Eq || pnc_hi == pn_Cmp_Ge || pnc_hi == pn_Cmp_Gt)) {
/* x <|<=|== lo && x ==|>=|> hi ==> false */
- ir_node *const t = new_Const(tarval_b_false);
+ ir_node *const t = new_r_Const(irg, tarval_b_false);
return t;
} else if ((pnc_lo == pn_Cmp_Lt || pnc_lo == pn_Cmp_Le || pnc_lo == pn_Cmp_Eq) &&
(pnc_hi == pn_Cmp_Lt || pnc_hi == pn_Cmp_Le || pnc_hi == pn_Cmp_Lg)) {
return p;
} else if (pnc_hi == pn_Cmp_Lt) {
/* x > c && x < c + 1 ==> false */
- ir_node *const t = new_Const(tarval_b_false);
+ ir_node *const t = new_r_Const(irg, tarval_b_false);
return t;
} else if (pnc_hi == pn_Cmp_Le) {
/* x > c && x <= c + 1 ==> x != c + 1 */
if (tv_lo == tarval_bad || tv_hi == tarval_bad)
return NULL;
}
- c = new_Const(tv_lo);
+ c = new_r_Const(irg, tv_lo);
sub = new_r_Sub(block, x, c, mode);
- subc = new_r_Sub(block, new_Const(tv_hi), c, mode);
+ subc = new_r_Sub(block, new_r_Const(irg, tv_hi), c, mode);
cmp = new_r_Cmp(block, sub, subc);
p = new_r_Proj(cmp, mode_b, pnc_hi);
return p;
tarval * tv_lo = cpair->tv_lo;
tarval * tv_hi = cpair->tv_hi;
ir_mode * mode = cpair->lo_mode;
+ ir_graph * irg = get_irn_irg(cmp_lo);
if (pnc_lo == pn_Cmp_Lg && pnc_hi == pn_Cmp_Lg &&
tarval_is_null(tv_lo) && tarval_is_null(tv_hi) &&
hil = get_Cmp_left(cmp_hi);
hil = new_r_Conv(dst_block, hil, mode);
p = new_r_Or(dst_block, lol, hil, mode);
- c = new_Const(tv_lo);
+ c = new_r_Const(irg, tv_lo);
cmp = new_r_Cmp(dst_block, p, c);
p = new_r_Proj(cmp, mode_b, pn_Cmp_Lg);
return p;
if ((pnc_lo == pn_Cmp_Ge || pnc_lo == pn_Cmp_Gt || pnc_lo == pn_Cmp_Lg) &&
(pnc_hi == pn_Cmp_Lt || pnc_hi == pn_Cmp_Le || pnc_hi == pn_Cmp_Lg)) {
/* x >=|>|!= lo | x <|<=|!= hi ==> true */
- ir_node *const t = new_Const(tarval_b_true);
+ ir_node *const t = new_r_Const(irg, tarval_b_true);
return t;
} else if ((pnc_lo == pn_Cmp_Lt || pnc_lo == pn_Cmp_Le || pnc_lo == pn_Cmp_Eq) &&
(pnc_hi == pn_Cmp_Lt || pnc_hi == pn_Cmp_Le || pnc_hi == pn_Cmp_Lg)) {
return p;
} else if (pnc_hi == pn_Cmp_Ge) {
/* x <= c || x >= c + 1 ==> true */
- ir_node *const t = new_Const(tarval_b_true);
+ ir_node *const t = new_r_Const(irg, tarval_b_true);
return t;
} else if (pnc_hi == pn_Cmp_Gt) {
/* x <= c || x > c + 1 ==> x != c + 1 */
if (tv_lo == tarval_bad || tv_hi == tarval_bad)
return NULL;
}
- c = new_Const(tv_lo);
+ c = new_r_Const(irg, tv_lo);
sub = new_r_Sub(block, x, c, mode);
- subc = new_r_Sub(block, new_Const(tv_hi), c, mode);
+ subc = new_r_Sub(block, new_r_Const(irg, tv_hi), c, mode);
cmp = new_r_Cmp(block, sub, subc);
p = new_r_Proj(cmp, mode_b, pnc_hi);
return p;
ir_node *cond_j = skip_Proj(pred_j);
if (cond_j == cond_i) {
- ir_node *jmp = new_r_Jmp(get_nodes_block(cond_i));
+ ir_graph *irg = get_irn_irg(bl);
+ ir_node *jmp = new_r_Jmp(get_nodes_block(cond_i));
set_irn_n(bl, i, jmp);
- set_irn_n(bl, j, new_Bad());
+ set_irn_n(bl, j, new_r_Bad(irg));
DBG_OPT_IFSIM2(cond_i, jmp);
changed = 1;
* prevented, so just set it's cf to Bad.
*/
if (is_Block_dead(new_block)) {
- exchange(node, new_Bad());
+ ir_graph *irg = get_irn_irg(node);
+ exchange(node, new_r_Bad(irg));
env->changed = 1;
}
}
ir_node *pred_bl = get_nodes_block(skip_Proj(skipped));
if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
+ ir_graph *irg = get_irn_irg(block);
set_Block_dead(pred_bl);
- exchange(pred_X, new_Bad());
+ exchange(pred_X, new_r_Bad(irg));
*changed = 1;
} else if (skipped != pred_X) {
set_Block_cfgpred(block, i, skipped);
if (get_Block_idom(b) != predb) {
/* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
- exchange(phi, new_Bad());
+ ir_graph *irg = get_irn_irg(b);
+ exchange(phi, new_r_Bad(irg));
} else {
/* predb is the direct dominator of b. There might be uses of the Phi nodes from
predb in further block, so move this phi from the predecessor into the block b */
in[n_preds++] = pred_X;
}
/* Remove block as it might be kept alive. */
- exchange(pred, b/*new_Bad()*/);
+ exchange(pred, b/*new_r_Bad(irg)*/);
} else {
/* case 3: */
in[n_preds++] = get_Block_cfgpred(b, i);
if (tv != tarval_bad) {
/* we have a constant switch */
- long num = get_tarval_long(tv);
- long def_num = get_Cond_default_proj(cond);
+ long num = get_tarval_long(tv);
+ long def_num = get_Cond_default_proj(cond);
+ ir_graph *irg = get_irn_irg(cond);
if (def_num == get_Proj_proj(proj1)) {
/* first one is the defProj */
if (num == get_Proj_proj(proj2)) {
jmp = new_r_Jmp(blk);
exchange(proj2, jmp);
- exchange(proj1, new_Bad());
+ exchange(proj1, new_r_Bad(irg));
return 1;
}
} else if (def_num == get_Proj_proj(proj2)) {
if (num == get_Proj_proj(proj1)) {
jmp = new_r_Jmp(blk);
exchange(proj1, jmp);
- exchange(proj2, new_Bad());
+ exchange(proj2, new_r_Bad(irg));
return 1;
}
} else {
if (num == get_Proj_proj(proj1)) {
jmp = new_r_Jmp(blk);
exchange(proj1, jmp);
- exchange(proj2, new_Bad());
+ exchange(proj2, new_r_Bad(irg));
return 1;
} else if (num == get_Proj_proj(proj2)) {
jmp = new_r_Jmp(blk);
exchange(proj2, jmp);
- exchange(proj1, new_Bad());
+ exchange(proj1, new_r_Bad(irg));
return 1;
}
}
if (is_Block(ka)) {
/* do NOT keep dead blocks */
if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
- set_End_keepalive(end, i, new_Bad());
+ set_End_keepalive(end, i, new_r_Bad(irg));
changed = 1;
}
} else {
if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
/* do NOT keep nodes in dead blocks */
- set_End_keepalive(end, i, new_Bad());
+ set_End_keepalive(end, i, new_r_Bad(irg));
changed = 1;
}
}
if (is_Block(ka)) {
/* do NOT keep dead blocks */
if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
- set_End_keepalive(end, i, new_Bad());
+ set_End_keepalive(end, i, new_r_Bad(irg));
changed = 1;
}
} else {
if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
/* do NOT keep nodes in dead blocks */
- set_End_keepalive(end, i, new_Bad());
+ set_End_keepalive(end, i, new_r_Bad(irg));
changed = 1;
}
}
if (is_tarval(node->type.tv) && tarval_is_constant(node->type.tv)) {
/* this Phi is replaced by a constant */
tarval *tv = node->type.tv;
- ir_node *c = new_Const(tv);
+ ir_node *c = new_r_Const(current_ir_graph, tv);
set_irn_node(c, node);
node->node = c;
*/
if (! is_Const(irn) && get_irn_mode(irn) != mode_T) {
/* can be replaced by a constant */
- ir_node *c = new_Const(tv);
+ ir_node *c = new_r_Const(current_ir_graph, tv);
set_irn_node(c, node);
node->node = c;
DB((dbg, LEVEL_1, "%+F is replaced by %+F\n", irn, c));
static ir_node *conv_transform(ir_node *node, ir_mode *dest_mode)
{
ir_mode *mode = get_irn_mode(node);
+ ir_graph *irg = get_irn_irg(node);
size_t arity;
size_t conv_arity;
size_t i;
ir_node *new_node;
- ir_graph *irg;
ir_node **ins;
if (mode == dest_mode)
if (tv == tarval_bad) {
return place_conv(node, dest_mode);
} else {
- return new_Const(tv);
+ return new_r_Const(irg, tv);
}
}
// We want to create a new node with the right mode
arity = get_irn_arity(node);
- irg = get_irn_irg(node);
ins = ALLOCAN(ir_node *, arity);
// The shift count does not participate in the conv optimisation
return new_node;
}
-/* TODO, backends (at least ia32) can't handle it at the moment,
- and it's probably not more efficient on most archs */
-#if 0
-static void try_optimize_cmp(ir_node *node)
-{
- ir_node *left = get_Cmp_left(node);
- ir_node *right = get_Cmp_right(node);
- ir_node *conv = NULL;
-
- if (is_downconv
-}
-#endif
-
static void conv_opt_walker(ir_node *node, void *data)
{
ir_node *transformed;
int costs;
bool *changed = data;
-#if 0
- if (is_Cmp(node)) {
- try_optimize_cmp(node);
- return;
- }
-#endif
-
if (!is_Conv(node))
return;
ir_mode* const m = get_irn_mode(irn);
ir_node* n;
if (mode_is_intb(m)) {
- n = new_Const(z);
+ ir_graph *irg = get_irn_irg(irn);
+ n = new_r_Const(irg, z);
} else if (m == mode_X) {
ir_node* const block = get_nodes_block(irn);
ir_graph* const irg = get_Block_irg(block);
/* ignore bad blocks. */
if (is_Bad(pred_blk)) {
- in[pos] = new_Bad();
+ ir_graph *irg = get_irn_irg(pred_blk);
+ in[pos] = new_r_Bad(irg);
continue;
}
/* produce a shift to adjust offset delta */
if (delta > 0) {
ir_node *cnst;
+ ir_graph *irg = get_irn_irg(load);
/* FIXME: only true for little endian */
- cnst = new_Const_long(mode_Iu, delta * 8);
+ cnst = new_r_Const_long(irg, mode_Iu, delta * 8);
store_value = new_r_Shr(get_nodes_block(load),
store_value, cnst, store_mode);
}
res = 0;
/* no exception */
if (info->projs[pn_Load_X_except]) {
- exchange( info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange( info->projs[pn_Load_X_except], new_r_Bad(irg));
res |= CF_CHANGED;
}
if (info->projs[pn_Load_X_regular]) {
/* no exception */
if (info->projs[pn_Load_X_except]) {
- exchange(info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
res |= CF_CHANGED;
}
if (info->projs[pn_Load_X_regular]) {
*/
ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
{
- ir_mode *c_mode = get_irn_mode(c);
- ir_mode *l_mode = get_Load_mode(load);
- ir_node *res = NULL;
+ ir_mode *c_mode = get_irn_mode(c);
+ ir_mode *l_mode = get_Load_mode(load);
+ ir_node *block = get_nodes_block(load);
+ dbg_info *dbgi = get_irn_dbg_info(load);
+ ir_node *res = copy_const_value(dbgi, c, block);
if (c_mode != l_mode) {
/* check, if the mode matches OR can be easily converted info */
if (is_reinterpret_cast(c_mode, l_mode)) {
- /* we can safely cast */
- dbg_info *dbg = get_irn_dbg_info(load);
- ir_node *block = get_nodes_block(load);
-
/* copy the value from the const code irg and cast it */
- res = copy_const_value(dbg, c);
- res = new_rd_Conv(dbg, block, res, l_mode);
+ res = new_rd_Conv(dbgi, block, res, l_mode);
}
- } else {
- /* copy the value from the const code irg */
- res = copy_const_value(get_irn_dbg_info(load), c);
+ return NULL;
}
return res;
-} /* can_replace_load_by_const */
+}
/**
* optimize a Load
/* no exception, clear the info field as it might be checked later again */
if (info->projs[pn_Load_X_except]) {
- exchange(info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
info->projs[pn_Load_X_except] = NULL;
res |= CF_CHANGED;
}
if (value != NULL) {
/* we completely replace the load by this value */
if (info->projs[pn_Load_X_except]) {
- exchange(info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
info->projs[pn_Load_X_except] = NULL;
res |= CF_CHANGED;
}
/* Prevents creation of phi that would be bad anyway.
* Dead and bad blocks. */
if (get_irn_arity(block) < 1 || is_Bad(block)) {
+ ir_graph *irg = get_irn_irg(block);
DB((dbg, LEVEL_5, "ssa bad %N\n", block));
- return new_Bad();
+ return new_r_Bad(irg);
}
if (block == ssa_second_def_block && !first) {
/* create a new Phi */
NEW_ARR_A(ir_node*, in, n_cfgpreds);
for (i = 0; i < n_cfgpreds; ++i)
- in[i] = new_Unknown(mode);
+ in[i] = new_r_Unknown(irg, mode);
phi = new_r_Phi(block, n_cfgpreds, in, mode);
/* Important: always keep block phi list up to date. */
ir_node **phis;
ir_node *phi, *next;
ir_node *head_cp = get_inversion_copy(loop_head);
+ ir_graph *irg = get_irn_irg(head_cp);
int arity = get_irn_arity(head_cp);
int backedges = get_backedge_n(head_cp, 0);
int new_arity = arity - backedges;
ins[pos++] = get_irn_n(head_cp, i);
}
- new_head = new_Block(new_arity, ins);
+ new_head = new_r_Block(irg, new_arity, ins);
phis = NEW_ARR_F(ir_node *, 0);
ir_node **ins;
ir_node *phi, *next;
ir_node **phis;
+ ir_graph *irg = get_irn_irg(loop_head);
int arity = get_irn_arity(loop_head);
int backedges = get_backedge_n(loop_head, 0);
int new_arity = backedges;
ins[pos++] = get_irn_n(loop_head, i);
}
- new_head = new_Block(new_arity, ins);
+ new_head = new_r_Block(irg, new_arity, ins);
phis = NEW_ARR_F(ir_node *, 0);
{
ir_node *ins[2];
ir_node *phi;
- ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, 0);
+ ir_node *proj = new_r_Proj(loop_info.duff_cond, mode_X, 0);
ir_node *head_pred = get_irn_n(loop_head, loop_info.be_src_pos);
ir_node *loop_condition = get_unroll_copy(head_pred, unroll_nr - 1);
* and one from the previous unrolled loop. */
ir_node *ins[2];
/* Calculate corresponding projection of mod result for this copy c */
- ir_node *proj = new_Proj(loop_info.duff_cond, mode_X, unroll_nr - c - 1);
+ ir_node *proj = new_r_Proj(loop_info.duff_cond, mode_X, unroll_nr - c - 1);
ins[0] = new_jmp;
ins[1] = proj;
static ir_node *clone_block_sans_bes(ir_node *node, ir_node *be_block)
{
ir_node **ins;
+ ir_graph *irg = get_irn_irg(node);
int arity = get_irn_arity(node);
int i, c = 0;
}
}
- return new_Block(c, ins);
+ return new_r_Block(irg, c, ins);
}
/* Creates blocks for duffs device, using previously obtained
{
ir_mode *mode;
+ ir_graph *irg = get_irn_irg(loop_head);
ir_node *block1, *count_block, *duff_block;
ir_node *ems, *ems_divmod, *ems_mod_proj, *cmp_null,
*cmp_proj, *ems_mode_cond, *x_true, *x_false, *const_null;
ir_node *cmp_bad_count, *good_count, *bad_count, *count_phi, *bad_count_neg;
mode = get_irn_mode(loop_info.end_val);
- const_null = new_Const(get_mode_null(mode));
+ const_null = new_r_Const(irg, get_mode_null(mode));
/* TODO naming
* 1. Calculate first approach to count.
get_irn_mode(loop_info.end_val));
ems_divmod = new_r_DivMod(block1,
- new_NoMem(),
+ new_r_NoMem(irg),
ems,
loop_info.step,
mode,
ems_mod_proj = new_r_Proj(ems_divmod, mode, pn_DivMod_res_mod);
cmp_null = new_r_Cmp(block1, ems_mod_proj, const_null);
cmp_proj = new_r_Proj(cmp_null, mode, pn_Cmp_Eq);
- ems_mode_cond = new_Cond(cmp_proj);
+ ems_mode_cond = new_r_Cond(block1, cmp_proj);
/* ems % step == 0 */
- x_true = new_Proj(ems_mode_cond, mode_X, pn_Cond_true);
+ x_true = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_true);
/* ems % step != 0 */
- x_false = new_Proj(ems_mode_cond, mode_X, pn_Cond_false);
+ x_false = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_false);
/* 2. Second block.
ins[0] = x_true;
ins[1] = x_false;
- count_block = new_Block(2, ins);
+ count_block = new_r_Block(irg, 2, ins);
/* Increase loop-taken-count depending on the loop condition
* uses the latest iv to compare to. */
if (loop_info.latest_value == 1) {
/* ems % step == 0 : +0 */
- true_val = new_Const(get_mode_null(mode));
+ true_val = new_r_Const(irg, get_mode_null(mode));
/* ems % step != 0 : +1 */
- false_val = new_Const(get_mode_one(mode));
+ false_val = new_r_Const(irg, get_mode_one(mode));
} else {
tarval *tv_two = new_tarval_from_long(2, mode);
/* ems % step == 0 : +1 */
- true_val = new_Const(get_mode_one(mode));
+ true_val = new_r_Const(irg, get_mode_one(mode));
/* ems % step != 0 : +2 */
- false_val = new_Const(tv_two);
+ false_val = new_r_Const(irg, tv_two);
}
ins[0] = true_val;
count = new_r_Proj(ems_divmod, mode, pn_DivMod_res_div);
/* (end - start) / step + correction */
- count = new_Add(count, correction, mode);
+ count = new_r_Add(count_block, count, correction, mode);
cmp_bad_count = new_r_Cmp(count_block, count, const_null);
bad_count_neg = new_r_Proj(cmp_bad_count, mode_X, pn_Cmp_Gt);
}
- bad_count_neg = new_Cond(bad_count_neg);
- good_count = new_Proj(bad_count_neg, mode_X, pn_Cond_true);
- bad_count = new_Proj(ems_mode_cond, mode_X, pn_Cond_false);
+ bad_count_neg = new_r_Cond(count_block, bad_count_neg);
+ good_count = new_r_Proj(bad_count_neg, mode_X, pn_Cond_true);
+ bad_count = new_r_Proj(ems_mode_cond, mode_X, pn_Cond_false);
/* 3. Duff Block
* Contains module to decide which loop to start from. */
ins[0] = good_count;
ins[1] = bad_count;
- duff_block = new_Block(2, ins);
+ duff_block = new_r_Block(irg, 2, ins);
/* Matze: I commented this line out because I was in the process of
* removing the Abs node. I don't understand that line at all anyway
ins[0] = get_Abs_op(count);
#endif
/* Manually feed the aforementioned count = 1 (bad case)*/
- ins[1] = new_Const(get_mode_one(mode));
+ ins[1] = new_r_Const(irg, get_mode_one(mode));
count_phi = new_r_Phi(duff_block, 2, ins, mode);
- unroll_c = new_Const(new_tarval_from_long((long)unroll_nr, mode));
+ unroll_c = new_r_Const(irg, new_tarval_from_long((long)unroll_nr, mode));
/* count % unroll_nr */
duff_mod = new_r_Mod(duff_block,
- new_NoMem(),
+ new_r_NoMem(irg),
count_phi,
unroll_c,
mode,
op_pin_state_pinned);
- proj = new_Proj(duff_mod, mode_X, pn_Mod_res);
- cond = new_Cond(proj);
+ proj = new_r_Proj(duff_mod, mode_X, pn_Mod_res);
+ cond = new_r_Cond(duff_block, proj);
loop_info.duff_cond = cond;
}
/* no exception, clear the m fields as it might be checked later again */
if (m->projs[pn_Load_X_except]) {
- exchange(m->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(ptr);
+ exchange(m->projs[pn_Load_X_except], new_r_Bad(irg));
m->projs[pn_Load_X_except] = NULL;
m->flags &= ~FLAG_EXCEPTION;
env.changed = 1;
}
proj = op->projs[pn_Load_X_except];
if (proj != NULL) {
- exchange(proj, new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange(proj, new_r_Bad(irg));
}
proj = op->projs[pn_Load_X_regular];
if (proj != NULL) {
}
proj = op->projs[pn_Store_X_except];
if (proj != NULL) {
- exchange(proj, new_Bad());
+ ir_graph *irg = get_irn_irg(store);
+ exchange(proj, new_r_Bad(irg));
}
proj = op->projs[pn_Store_X_regular];
if (proj != NULL) {
tarval *tv_l, *tv_r, *tv, *tv_init, *tv_incr, *tv_end;
tarval_int_overflow_mode_t ovmode;
scc *pscc;
+ ir_graph *irg;
if (! is_counter_iv(iv, env)) {
DB((dbg, LEVEL_4, " not counter IV"));
DB((dbg, LEVEL_4, " = OVERFLOW"));
return NULL;
}
- return new_Const(tv);
+ irg = get_irn_irg(iv);
+ return new_r_Const(irg, tv);
}
return do_apply(e->code, NULL, rc, e->rc, get_irn_mode(e->dst));
} /* applyOneEdge */
/* We could remove the Call depending on this Sel. */
new_node = node;
} else {
- ir_node *rem_block = get_cur_block();
- set_cur_block(get_nodes_block(node));
- new_node = copy_const_value(get_irn_dbg_info(node), get_atomic_ent_value(ent));
- set_cur_block(rem_block);
+ new_node = copy_const_value(get_irn_dbg_info(node), get_atomic_ent_value(ent), get_nodes_block(node));
DBG_OPT_POLY(node, new_node);
}
return new_node;
if (dyn_tp != firm_unknown_type) {
ir_entity *called_ent;
- ir_node *rem_block;
/* We know which method will be called, no dispatch necessary. */
called_ent = resolve_ent_polymorphy(dyn_tp, ent);
- rem_block = get_cur_block();
- set_cur_block(get_nodes_block(node));
- new_node = copy_const_value(get_irn_dbg_info(node), get_atomic_ent_value(called_ent));
- set_cur_block(rem_block);
+ new_node = copy_const_value(get_irn_dbg_info(node), get_atomic_ent_value(called_ent), get_nodes_block(node));
DBG_OPT_POLY(node, new_node);
return new_node;
*/
static int reassoc_Shl(ir_node **node)
{
- ir_node *n = *node;
- ir_node *c = get_Shl_right(n);
- ir_node *x, *blk, *irn;
- ir_mode *mode;
- tarval *tv;
+ ir_node *n = *node;
+ ir_node *c = get_Shl_right(n);
+ ir_node *x, *blk, *irn;
+ ir_graph *irg;
+ ir_mode *mode;
+ tarval *tv;
if (! is_Const(c))
return 0;
return 0;
blk = get_nodes_block(n);
- c = new_Const(tv);
+ irg = get_irn_irg(blk);
+ c = new_r_Const(irg, tv);
irn = new_rd_Mul(get_irn_dbg_info(n), blk, x, c, mode);
if (irn != n) {
*/
static void topologic_walker(ir_node *node, void *ctx)
{
- env_t *env = ctx;
- ir_node *adr, *block, *mem, *val;
- ir_mode *mode;
- unsigned vnum;
+ env_t *env = ctx;
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *adr, *block, *mem, *val;
+ ir_mode *mode;
+ unsigned vnum;
if (is_Load(node)) {
/* a load, check if we can resolve it */
Handle this here. */
mode = get_Load_mode(node);
if (mode != get_irn_mode(val))
- val = new_d_Conv(get_irn_dbg_info(node), val, mode);
+ val = new_rd_Conv(get_irn_dbg_info(node), block, val, mode);
mem = get_Load_mem(node);
turn_into_tuple(node, pn_Load_max);
set_Tuple_pred(node, pn_Load_M, mem);
set_Tuple_pred(node, pn_Load_res, val);
- set_Tuple_pred(node, pn_Load_X_regular, new_Jmp());
- set_Tuple_pred(node, pn_Load_X_except, new_Bad());
+ set_Tuple_pred(node, pn_Load_X_regular, new_r_Jmp(block));
+ set_Tuple_pred(node, pn_Load_X_except, new_r_Bad(irg));
} else if (is_Store(node)) {
DB((dbg, SET_LEVEL_3, " checking %+F for replacement ", node));
/* Beware: A Store can contain a hidden conversion in Firm. */
val = get_Store_value(node);
if (get_irn_mode(val) != env->modes[vnum])
- val = new_d_Conv(get_irn_dbg_info(node), val, env->modes[vnum]);
+ val = new_rd_Conv(get_irn_dbg_info(node), block, val, env->modes[vnum]);
set_value(vnum, val);
mem = get_Store_mem(node);
turn_into_tuple(node, pn_Store_max);
set_Tuple_pred(node, pn_Store_M, mem);
- set_Tuple_pred(node, pn_Store_X_regular, new_Jmp());
- set_Tuple_pred(node, pn_Store_X_except, new_Bad());
+ set_Tuple_pred(node, pn_Store_X_regular, new_r_Jmp(block));
+ set_Tuple_pred(node, pn_Store_X_except, new_r_Bad(irg));
}
}
modes[i] = mode;
if (env->variants[i] == TR_ADD) {
- set_value(i, new_Const(get_mode_null(mode)));
+ set_value(i, new_r_Const(irg, get_mode_null(mode)));
} else if (env->variants[i] == TR_MUL) {
- set_value(i, new_Const(get_mode_one(mode)));
+ set_value(i, new_r_Const(irg, get_mode_one(mode)));
}
}
mature_immBlock(start_block);
/* create a new jump, free of CSE */
set_optimize(0);
- jmp = new_Jmp();
+ jmp = new_r_Jmp(block);
set_optimize(rem);
for (i = 0; i < env->n_ress; ++i) {
}
}
/* create a new tuple for the return values */
- tuple = new_Tuple(env->n_ress, in);
+ tuple = new_r_Tuple(block, env->n_ress, in);
turn_into_tuple(call, pn_Call_max);
set_Tuple_pred(call, pn_Call_M, mem);
end_block = get_irg_end_block(irg);
for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
ir_node *ret = get_Block_cfgpred(end_block, i);
+ ir_node *block;
/* search all Returns of a block */
if (! is_Return(ret))
continue;
- set_cur_block(get_nodes_block(ret));
+ block = get_nodes_block(ret);
+ set_cur_block(block);
for (j = 0; j < env->n_ress; ++j) {
ir_node *pred = get_Return_res(ret, j);
ir_node *n;
case TR_ADD:
n = get_value(j, modes[j]);
- n = new_Add(n, pred, modes[j]);
+ n = new_r_Add(block, n, pred, modes[j]);
set_Return_res(ret, j, n);
break;
case TR_MUL:
n = get_value(j, modes[j]);
- n = new_Mul(n, pred, modes[j]);
+ n = new_r_Mul(block, n, pred, modes[j]);
set_Return_res(ret, j, n);
break;
{
ir_type *fromtype = get_irn_typeinfo_type(pred);
ir_node *new_cast = pred;
+ ir_node *block;
int ref_depth = 0;
if (totype == fromtype) return pred; /* Case for optimization! */
return pred;
}
- set_cur_block(get_nodes_block(pred));
+ block = get_nodes_block(pred);
if (is_SubClass_of(totype, fromtype)) {
/* downcast */
assert(new_type);
fromtype = new_type;
new_type = pointerize_type(new_type, ref_depth);
- new_cast = new_Cast(pred, new_type);
+ new_cast = new_r_Cast(block, pred, new_type);
pred = new_cast;
n_casts_normalized ++;
set_irn_typeinfo_type(new_cast, new_type); /* keep type information up to date. */
assert(new_type);
fromtype = new_type;
new_type = pointerize_type(new_type, ref_depth);
- new_cast = new_Cast(pred, new_type);
+ new_cast = new_r_Cast(block, pred, new_type);
pred = new_cast;
n_casts_normalized ++;
set_irn_typeinfo_type(new_cast, new_type); /* keep type information up to date. */
*/
static int remove_Cmp_Null_cast(ir_node *cmp)
{
+ ir_graph *irg;
ir_node *cast, *null, *new_null;
int cast_pos, null_pos;
ir_type *fromtype;
return 0;
/* Transform Cmp */
+ irg = get_irn_irg(cmp);
set_irn_n(cmp, cast_pos, get_Cast_op(cast));
fromtype = get_irn_typeinfo_type(get_Cast_op(cast));
- new_null = new_Const_type(get_Const_tarval(null), fromtype);
+ new_null = new_r_Const_type(irg, get_Const_tarval(null), fromtype);
set_irn_typeinfo_type(new_null, fromtype);
set_irn_n(cmp, null_pos, new_null);
++n_casts_removed;
void set_array_entity_values(ir_entity *ent, tarval **values, int num_vals)
{
int i;
- ir_graph *rem = current_ir_graph;
- ir_type *arrtp = get_entity_type(ent);
- ir_node *val;
- ir_type *elttp = get_array_element_type(arrtp);
+ ir_type *arrtp = get_entity_type(ent);
+ ir_node *val;
+ ir_type *elttp = get_array_element_type(arrtp);
+ ir_graph *irg = get_const_code_irg();
assert(is_Array_type(arrtp));
assert(get_array_n_dimensions(arrtp) == 1);
/* One bound is sufficient, the number of constant fields makes the
size. */
assert(get_array_lower_bound (arrtp, 0) || get_array_upper_bound (arrtp, 0));
- current_ir_graph = get_const_code_irg();
for (i = 0; i < num_vals; i++) {
- val = new_Const_type(values[i], elttp);
+ val = new_r_Const_type(irg, values[i], elttp);
add_compound_ent_value(ent, val, get_array_element_entity(arrtp));
set_compound_graph_path_array_index(get_compound_ent_value_path(ent, i), 0, i);
}
- current_ir_graph = rem;
}
unsigned get_compound_ent_value_offset_bytes(const ir_entity *ent, int pos)
dbg_info *db)
{
ir_entity *res;
- ir_graph *rem;
assert(!id_contains_char(name, ' ') && "entity name should not contain spaces");
res->repr_class = NULL;
if (is_Method_type(type)) {
+ ir_graph *irg = get_const_code_irg();
symconst_symbol sym;
ir_mode *mode = is_Method_type(type) ? mode_P_code : mode_P_data;
sym.entity_p = res;
- rem = current_ir_graph;
- current_ir_graph = get_const_code_irg();
- set_atomic_ent_value(res, new_SymConst(mode, sym, symconst_addr_ent));
- current_ir_graph = rem;
+ set_atomic_ent_value(res, new_r_SymConst(irg, mode, sym, symconst_addr_ent));
res->linkage = IR_LINKAGE_CONSTANT;
res->attr.mtd_attr.irg_add_properties = mtp_property_inherited;
res->attr.mtd_attr.vtable_number = IR_VTABLE_NUM_NOT_SET;
/*
* Copies a firm subgraph that complies to the restrictions for
- * constant expressions to current_block in current_ir_graph.
+ * constant expressions to block.
*/
-ir_node *copy_const_value(dbg_info *dbg, ir_node *n)
+ir_node *copy_const_value(dbg_info *dbg, ir_node *n, ir_node *block)
{
+ ir_graph *irg = get_irn_irg(block);
ir_node *nn;
ir_mode *m;
m = get_irn_mode(n);
switch (get_irn_opcode(n)) {
case iro_Const:
- nn = new_d_Const_type(dbg, get_Const_tarval(n), get_Const_type(n));
+ nn = new_rd_Const_type(dbg, irg, get_Const_tarval(n), get_Const_type(n));
break;
case iro_SymConst:
- nn = new_d_SymConst_type(dbg, get_irn_mode(n), get_SymConst_symbol(n), get_SymConst_kind(n),
+ nn = new_rd_SymConst_type(dbg, irg, get_irn_mode(n), get_SymConst_symbol(n), get_SymConst_kind(n),
get_SymConst_value_type(n));
break;
case iro_Add:
- nn = new_d_Add(dbg, copy_const_value(dbg, get_Add_left(n)),
- copy_const_value(dbg, get_Add_right(n)), m); break;
+ nn = new_rd_Add(dbg, block,
+ copy_const_value(dbg, get_Add_left(n), block),
+ copy_const_value(dbg, get_Add_right(n), block), m);
+ break;
case iro_Sub:
- nn = new_d_Sub(dbg, copy_const_value(dbg, get_Sub_left(n)),
- copy_const_value(dbg, get_Sub_right(n)), m); break;
+ nn = new_rd_Sub(dbg, block,
+ copy_const_value(dbg, get_Sub_left(n), block),
+ copy_const_value(dbg, get_Sub_right(n), block), m);
+ break;
case iro_Mul:
- nn = new_d_Mul(dbg, copy_const_value(dbg, get_Mul_left(n)),
- copy_const_value(dbg, get_Mul_right(n)), m); break;
+ nn = new_rd_Mul(dbg, block,
+ copy_const_value(dbg, get_Mul_left(n), block),
+ copy_const_value(dbg, get_Mul_right(n), block), m);
+ break;
case iro_And:
- nn = new_d_And(dbg, copy_const_value(dbg, get_And_left(n)),
- copy_const_value(dbg, get_And_right(n)), m); break;
+ nn = new_rd_And(dbg, block,
+ copy_const_value(dbg, get_And_left(n), block),
+ copy_const_value(dbg, get_And_right(n), block), m);
+ break;
case iro_Or:
- nn = new_d_Or(dbg, copy_const_value(dbg, get_Or_left(n)),
- copy_const_value(dbg, get_Or_right(n)), m); break;
+ nn = new_rd_Or(dbg, block,
+ copy_const_value(dbg, get_Or_left(n), block),
+ copy_const_value(dbg, get_Or_right(n), block), m);
+ break;
case iro_Eor:
- nn = new_d_Eor(dbg, copy_const_value(dbg, get_Eor_left(n)),
- copy_const_value(dbg, get_Eor_right(n)), m); break;
+ nn = new_rd_Eor(dbg, block,
+ copy_const_value(dbg, get_Eor_left(n), block),
+ copy_const_value(dbg, get_Eor_right(n), block), m);
+ break;
case iro_Cast:
- nn = new_d_Cast(dbg, copy_const_value(dbg, get_Cast_op(n)), get_Cast_type(n)); break;
+ nn = new_rd_Cast(dbg, block,
+ copy_const_value(dbg, get_Cast_op(n), block),
+ get_Cast_type(n));
+ break;
case iro_Conv:
- nn = new_d_Conv(dbg, copy_const_value(dbg, get_Conv_op(n)), m); break;
+ nn = new_rd_Conv(dbg, block,
+ copy_const_value(dbg, get_Conv_op(n), block), m);
+ break;
case iro_Unknown:
- nn = new_Unknown(m); break;
+ nn = new_r_Unknown(irg, m); break;
default:
panic("opcode invalid or not implemented");
}
ir_type *res;
int i;
ir_node *unk;
- ir_graph *rem = current_ir_graph;
+ ir_graph *irg = get_const_code_irg();
assert(!is_Method_type(element_type));
res->attr.aa.upper_bound = XMALLOCNZ(ir_node*, n_dimensions);
res->attr.aa.order = XMALLOCNZ(int, n_dimensions);
- current_ir_graph = get_const_code_irg();
- unk = new_Unknown(mode_Iu);
+ unk = new_r_Unknown(irg, mode_Iu);
for (i = 0; i < n_dimensions; i++) {
res->attr.aa.lower_bound[i] =
res->attr.aa.upper_bound[i] = unk;
res->attr.aa.order[i] = i;
}
- current_ir_graph = rem;
res->attr.aa.element_type = element_type;
res->attr.aa.element_ent
void set_array_bounds_int(ir_type *array, int dimension, int lower_bound,
int upper_bound)
{
- ir_graph *rem = current_ir_graph;
- current_ir_graph = get_const_code_irg();
+ ir_graph *irg = get_const_code_irg();
set_array_bounds(array, dimension,
- new_Const_long(mode_Iu, lower_bound),
- new_Const_long(mode_Iu, upper_bound));
- current_ir_graph = rem;
+ new_r_Const_long(irg, mode_Iu, lower_bound),
+ new_r_Const_long(irg, mode_Iu, upper_bound));
}
void set_array_lower_bound(ir_type *array, int dimension, ir_node *lower_bound)
void set_array_lower_bound_int(ir_type *array, int dimension, int lower_bound)
{
- ir_graph *rem = current_ir_graph;
- current_ir_graph = get_const_code_irg();
+ ir_graph *irg = get_const_code_irg();
set_array_lower_bound(array, dimension,
- new_Const_long(mode_Iu, lower_bound));
- current_ir_graph = rem;
+ new_r_Const_long(irg, mode_Iu, lower_bound));
}
void set_array_upper_bound(ir_type *array, int dimension, ir_node *upper_bound)
void set_array_upper_bound_int(ir_type *array, int dimension, int upper_bound)
{
- ir_graph *rem = current_ir_graph;
- current_ir_graph = get_const_code_irg();
+ ir_graph *irg = get_const_code_irg();
set_array_upper_bound(array, dimension,
- new_Const_long(mode_Iu, upper_bound));
- current_ir_graph = rem;
+ new_r_Const_long(irg, mode_Iu, upper_bound));
}
int has_array_lower_bound(const ir_type *array, int dimension)