/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
res->attr.divmod.exc.pin_state = state; \
res->attr.divmod.res_mode = mode; \
+ res->attr.divmod.no_remainder = 0; \
res = optimize_node(res); \
IRN_VRFY_IRG(res, irg); \
return res; \
res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
- res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
+ res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
for (i = arity - 1; i >= 0; --i)
if (get_irn_op(in[i]) == op_Unknown) {
/* Memory Phis in endless loops must be kept alive.
As we can't distinguish these easily we keep all of them alive. */
- if ((res->op == op_Phi) && (mode == mode_M))
+ if (is_Phi(res) && mode == mode_M)
add_End_keepalive(get_irg_end(irg), res);
return res;
} /* new_bd_Phi */
NEW_BD_BINOP(Carry)
NEW_BD_BINOP(Borrow)
+/** Creates a remainderless Div node. */
+static ir_node *new_bd_DivRL(dbg_info *db, ir_node *block,
+ ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
+{
+ ir_node *in[3];
+ ir_node *res;
+ ir_graph *irg = current_ir_graph;
+ in[0] = memop;
+ in[1] = op1;
+ in[2] = op2;
+ res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
+ res->attr.divmod.exc.pin_state = state;
+ res->attr.divmod.res_mode = mode;
+ res->attr.divmod.no_remainder = 1;
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
+ return res;
+}
+
static ir_node *
new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2) {
ir_node *in[2];
static ir_node *
new_bd_Alloc(dbg_info *db, ir_node *block, ir_node *store,
- ir_node *size, ir_type *alloc_type, where_alloc where) {
+ ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
ir_node *in[2];
ir_node *res;
ir_graph *irg = current_ir_graph;
static ir_node *
new_bd_Free(dbg_info *db, ir_node *block, ir_node *store,
- ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
+ ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
ir_node *in[3];
ir_node *res;
ir_graph *irg = current_ir_graph;
} /* new_bd_Sel */
static ir_node *
-new_bd_SymConst_type(dbg_info *db, ir_node *block, symconst_symbol value,
- symconst_kind symkind, ir_type *tp) {
- ir_node *res;
- ir_mode *mode;
+new_bd_SymConst_type(dbg_info *db, ir_node *block, ir_mode *mode,
+ symconst_symbol value,symconst_kind symkind, ir_type *tp) {
ir_graph *irg = current_ir_graph;
+ ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
- if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
- mode = mode_P_data; /* FIXME: can be mode_P_code */
- else
- mode = mode_Iu;
-
- res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
-
- res->attr.symc.num = symkind;
- res->attr.symc.sym = value;
- res->attr.symc.tp = tp;
+ res->attr.symc.kind = symkind;
+ res->attr.symc.sym = value;
+ res->attr.symc.tp = tp;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
+ memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
NEW_RD_BINOP(Carry)
NEW_RD_BINOP(Borrow)
+/* creates a rd constructor for an divRL */
+ir_node *new_rd_DivRL(dbg_info *db, ir_graph *irg, ir_node *block,
+ ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state)
+{
+ ir_node *res;
+ ir_graph *rem = current_ir_graph;
+ current_ir_graph = irg;
+ res = new_bd_DivRL(db, block, memop, op1, op2, mode, state);
+ current_ir_graph = rem;
+ return res;
+}
+
ir_node *
new_rd_Cmp(dbg_info *db, ir_graph *irg, ir_node *block,
ir_node *op1, ir_node *op2) {
ir_node *
new_rd_Alloc(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *size, ir_type *alloc_type, where_alloc where) {
+ ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
ir_node *res;
ir_graph *rem = current_ir_graph;
ir_node *
new_rd_Free(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
+ ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
ir_node *res;
ir_graph *rem = current_ir_graph;
} /* new_rd_Sel */
ir_node *
-new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
- symconst_kind symkind, ir_type *tp) {
+new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
+ symconst_symbol value, symconst_kind symkind, ir_type *tp) {
ir_node *res;
ir_graph *rem = current_ir_graph;
current_ir_graph = irg;
- res = new_bd_SymConst_type(db, block, value, symkind, tp);
+ res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
current_ir_graph = rem;
return res;
} /* new_rd_SymConst_type */
ir_node *
-new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, symconst_symbol value,
- symconst_kind symkind) {
- ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type);
- return res;
+new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_node *block, ir_mode *mode,
+ symconst_symbol value, symconst_kind symkind) {
+ return new_rd_SymConst_type(db, irg, block, mode, value, symkind, firm_unknown_type);
} /* new_rd_SymConst */
- ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
+ ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
symconst_symbol sym;
sym.entity_p = symbol;
- return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_ent, tp);
+ return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_ent, tp);
} /* new_rd_SymConst_addr_ent */
-ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_entity *symbol, ir_type *tp) {
+ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
symconst_symbol sym;
sym.entity_p = symbol;
- return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_ofs_ent, tp);
+ return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_ofs_ent, tp);
} /* new_rd_SymConst_ofs_ent */
-ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ident *symbol, ir_type *tp) {
+ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
symconst_symbol sym;
sym.ident_p = symbol;
- return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_addr_name, tp);
+ return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_addr_name, tp);
} /* new_rd_SymConst_addr_name */
-ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
+ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
symconst_symbol sym;
sym.type_p = symbol;
- return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_tag, tp);
+ return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_tag, tp);
} /* new_rd_SymConst_type_tag */
-ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
+ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
symconst_symbol sym;
sym.type_p = symbol;
- return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_size, tp);
+ return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_size, tp);
} /* new_rd_SymConst_size */
-ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_type *symbol, ir_type *tp) {
+ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
symconst_symbol sym;
sym.type_p = symbol;
- return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), sym, symconst_type_align, tp);
+ return new_rd_SymConst_type(db, irg, get_irg_start_block(irg), mode, sym, symconst_type_align, tp);
} /* new_rd_SymConst_align */
ir_node *
ir_mode *mode, tarval *con, ir_type *tp) {
return new_rd_Const_type(NULL, irg, block, mode, con, tp);
}
-ir_node *new_r_SymConst(ir_graph *irg, ir_node *block,
+ir_node *new_r_SymConst(ir_graph *irg, ir_node *block, ir_mode *mode,
symconst_symbol value, symconst_kind symkind) {
- return new_rd_SymConst(NULL, irg, block, value, symkind);
+ return new_rd_SymConst(NULL, irg, block, mode, value, symkind);
}
ir_node *new_r_simpleSel(ir_graph *irg, ir_node *block, ir_node *store,
ir_node *objptr, ir_entity *ent) {
ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
return new_rd_Div(NULL, irg, block, memop, op1, op2, mode, state);
}
+ir_node *new_r_DivRL(ir_graph *irg, ir_node *block,
+ ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
+ return new_rd_DivRL(NULL, irg, block, memop, op1, op2, mode, state);
+}
ir_node *new_r_Mod(ir_graph *irg, ir_node *block,
ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
return new_rd_Mod(NULL, irg, block, memop, op1, op2, mode, state);
return new_rd_Store(NULL, irg, block, store, adr, val);
}
ir_node *new_r_Alloc(ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *size, ir_type *alloc_type, where_alloc where) {
+ ir_node *size, ir_type *alloc_type, ir_where_alloc where) {
return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
}
ir_node *new_r_Free(ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *ptr, ir_node *size, ir_type *free_type, where_alloc where) {
+ ir_node *ptr, ir_node *size, ir_type *free_type, ir_where_alloc where) {
return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where);
}
ir_node *new_r_Sync(ir_graph *irg, ir_node *block, int arity, ir_node *in[]) {
} /* new_rd_Phi0 */
+/**
+ * Internal constructor of a Phi node by a phi_merge operation.
+ *
+ * @param irg the graph on which the Phi will be constructed
+ * @param block the block in which the Phi will be constructed
+ * @param mode the mod eof the Phi node
+ * @param in the input array of the phi node
+ * @param ins number of elements in the input array
+ * @param phi0 in non-NULL: the Phi0 node in the same block that represents
+ * the value for which the new Phi is constructed
+ */
static INLINE ir_node *
new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
ir_node **in, int ins, ir_node *phi0) {
/* Allocate a new node on the obstack. The allocation copies the in
array. */
- res = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
- res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
+ res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
+ res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
/* This loop checks whether the Phi has more than one predecessor.
If so, it is a real Phi node and we break the loop. Else the
Phi node merges the same definition on several paths and therefore
is not needed. Don't consider Bad nodes! */
known = res;
- for (i=0; i < ins; ++i)
- {
+ for (i = ins - 1; i >= 0; --i) {
assert(in[i]);
in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
/* Optimize self referencing Phis: We can't detect them yet properly, as
they still refer to the Phi0 they will replace. So replace right now. */
- if (phi0 && in[i] == phi0) in[i] = res;
+ if (phi0 && in[i] == phi0)
+ in[i] = res;
- if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
+ if (in[i] == res || in[i] == known || is_Bad(in[i]))
+ continue;
- if (known==res)
+ if (known == res)
known = in[i];
else
break;
}
- /* i==ins: there is at most one predecessor, we don't need a phi node. */
- if (i == ins) {
+ /* i < 0: there is at most one predecessor, we don't need a phi node. */
+ if (i < 0) {
if (res != known) {
edges_node_deleted(res, current_ir_graph);
- obstack_free (current_ir_graph->obst, res);
+ obstack_free(current_ir_graph->obst, res);
if (is_Phi(known)) {
/* If pred is a phi node we want to optimize it: If loops are matured in a bad
order, an enclosing Phi know may get superfluous. */
res = optimize_in_place_2(known);
if (res != known)
exchange(known, res);
-
}
else
res = known;
res = new_Bad();
}
} else {
- res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
+ res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
IRN_VRFY_IRG(res, irg);
/* Memory Phis in endless loops must be kept alive.
As we can't distinguish these easily we keep all of them alive. */
- if ((res->op == op_Phi) && (mode == mode_M))
+ if (is_Phi(res) && mode == mode_M)
add_End_keepalive(get_irg_end(irg), res);
}
static void
set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
-#if 0
- if (!frag_arr[pos]) frag_arr[pos] = val;
- if (frag_arr[current_ir_graph->n_loc - 1]) {
- ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
- assert(arr != frag_arr && "Endless recursion detected");
- set_frag_value(arr, pos, val);
- }
-#else
+#ifdef DEBUG_libfirm
int i;
- for (i = 0; i < 1000; ++i) {
- if (!frag_arr[pos]) {
+ for (i = 1024; i >= 0; --i)
+#else
+ for (;;)
+#endif
+ {
+ if (frag_arr[pos] == NULL)
frag_arr[pos] = val;
- }
- if (frag_arr[current_ir_graph->n_loc - 1]) {
+ if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
+ assert(arr != frag_arr && "Endless recursion detected");
frag_arr = arr;
- }
- else
+ } else
return;
}
- assert(0 && "potential endless recursion");
-#endif
+ assert(!"potential endless recursion in set_frag_value");
} /* set_frag_value */
static ir_node *
ir_node *res;
ir_node **frag_arr;
- assert(is_fragile_op(cfOp) && (get_irn_op(cfOp) != op_Bad));
+ assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
frag_arr = get_frag_arr(cfOp);
res = frag_arr[pos];
- if (!res) {
- if (block->attr.block.graph_arr[pos]) {
- /* There was a set_value() after the cfOp and no get_value before that
+ if (res == NULL) {
+ if (block->attr.block.graph_arr[pos] != NULL) {
+ /* There was a set_value() after the cfOp and no get_value() before that
set_value(). We must build a Phi node now. */
if (block->attr.block.is_matured) {
int ins = get_irn_arity(block);
res = phi_merge(block, pos, mode, nin, ins);
} else {
res = new_rd_Phi0(current_ir_graph, block, mode);
- res->attr.phi0.pos = pos;
- res->link = block->link;
- block->link = res;
+ res->attr.phi.u.pos = pos;
+ res->attr.phi.next = block->attr.block.phis;
+ block->attr.block.phis = res;
}
- assert(res);
- /* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
- but this should be better: (remove comment if this works) */
+ assert(res != NULL);
/* It's a Phi, we can write this into all graph_arrs with NULL */
set_frag_value(block->attr.block.graph_arr, pos, res);
} else {
#endif /* PRECISE_EXC_CONTEXT */
/**
- * check whether a control flow cf_pred is a exception flow.
+ * Check whether a control flownode cf_pred represents an exception flow.
*
* @param cf_pred the control flow node
* @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
*/
static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
- /* all projections from a raise are exceptional control flow */
- if (is_Raise(prev_cf_op))
- return 1;
+ /*
+ * Note: all projections from a raise are "exceptional control flow" we we handle it
+ * like a normal Jmp, because there is no "regular" one.
+ * That's why Raise is no "fragile_op"!
+ */
if (is_fragile_op(prev_cf_op)) {
if (is_Proj(cf_pred)) {
if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
in graph_arr to break recursions.
Else we may not set graph_arr as there a later value is remembered. */
phi0 = NULL;
- if (!block->attr.block.graph_arr[pos]) {
- if (block == get_irg_start_block(current_ir_graph)) {
+ if (block->attr.block.graph_arr[pos] == NULL) {
+ ir_graph *irg = current_ir_graph;
+
+ if (block == get_irg_start_block(irg)) {
/* Collapsing to Bad tarvals is no good idea.
So we call a user-supplied routine here that deals with this case as
appropriate for the given language. Sorrily the only help we can give
However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
before recurring.
*/
- if (default_initialize_local_variable) {
+ if (default_initialize_local_variable != NULL) {
ir_node *rem = get_cur_block();
set_cur_block(block);
- block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1);
+ block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
set_cur_block(rem);
}
else
- block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
+ block->attr.block.graph_arr[pos] = new_Unknown(mode);
/* We don't need to care about exception ops in the start block.
There are none by definition. */
return block->attr.block.graph_arr[pos];
} else {
- phi0 = new_rd_Phi0(current_ir_graph, block, mode);
+ phi0 = new_rd_Phi0(irg, block, mode);
block->attr.block.graph_arr[pos] = phi0;
#if PRECISE_EXC_CONTEXT
if (get_opt_precise_exc_context()) {
/* We want to pass the Phi0 node to the constructor: this finds additional
optimization possibilities.
The Phi0 node either is allocated in this function, or it comes from
- a former call to get_r_value_internal. In this case we may not yet
- exchange phi0, as this is done in mature_immBlock. */
- if (!phi0) {
+ a former call to get_r_value_internal(). In this case we may not yet
+ exchange phi0, as this is done in mature_immBlock(). */
+ if (phi0 == NULL) {
phi0_all = block->attr.block.graph_arr[pos];
- if (!((get_irn_op(phi0_all) == op_Phi) &&
+ if (!(is_Phi(phi0_all) &&
(get_irn_arity(phi0_all) == 0) &&
(get_nodes_block(phi0_all) == block)))
phi0_all = NULL;
/* In case we allocated a Phi0 node at the beginning of this procedure,
we need to exchange this Phi0 with the real Phi. */
- if (phi0) {
+ if (phi0 != NULL) {
exchange(phi0, res);
block->attr.block.graph_arr[pos] = res;
/* Don't set_frag_value as it does not overwrite. Doesn't matter, is
1. The block is not mature and we visit it the first time. We can not
create a proper Phi node, therefore a Phi0, i.e., a Phi without
- predecessors is returned. This node is added to the linked list (field
- "link") of the containing block to be completed when this block is
+ predecessors is returned. This node is added to the linked list (block
+ attribute "phis") of the containing block to be completed when this block is
matured. (Completion will add a new Phi and turn the Phi0 into an Id
node.)
res = block->attr.block.graph_arr[pos];
/* case 2 -- If the value is actually computed, return it. */
- if (res) { return res; };
+ if (res != NULL)
+ return res;
if (block->attr.block.is_matured) { /* case 3 */
/* The Phi has the same amount of ins as the corresponding block. */
int ins = get_irn_arity(block);
ir_node **nin;
- NEW_ARR_A (ir_node *, nin, ins);
+ NEW_ARR_A(ir_node *, nin, ins);
/* Phi merge collects the predecessors and then creates a node. */
- res = phi_merge (block, pos, mode, nin, ins);
+ res = phi_merge(block, pos, mode, nin, ins);
} else { /* case 1 */
/* The block is not mature, we don't know how many in's are needed. A Phi
Phi is computed, pos is used to update the array with the local
values. */
res = new_rd_Phi0(current_ir_graph, block, mode);
- res->attr.phi0.pos = pos;
- res->link = block->link;
- block->link = res;
+ res->attr.phi.u.pos = pos;
+ res->attr.phi.next = block->attr.block.phis;
+ block->attr.block.phis = res;
}
- /* If we get here, the frontend missed a use-before-definition error */
- if (!res) {
- /* Error Message */
- printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
- assert(mode->code >= irm_F && mode->code <= irm_P);
- res = new_rd_Const(NULL, current_ir_graph, block, mode,
- get_mode_null(mode));
- }
+ assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
/* The local valid value is available now. */
block->attr.block.graph_arr[pos] = res;
assert(is_Block(block));
if (!get_Block_matured(block)) {
- ins = ARR_LEN(block->in)-1;
+ ir_graph *irg = current_ir_graph;
+
+ ins = ARR_LEN(block->in) - 1;
/* Fix block parameters */
- block->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, ins);
+ block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
/* An array for building the Phi nodes. */
NEW_ARR_A(ir_node *, nin, ins);
/* Traverse a chain of Phi nodes attached to this block and mature
these, too. **/
- for (n = block->link; n; n = next) {
- inc_irg_visited(current_ir_graph);
- next = n->link;
- exchange(n, phi_merge(block, n->attr.phi0.pos, n->mode, nin, ins));
+ for (n = block->attr.block.phis; n; n = next) {
+ inc_irg_visited(irg);
+ next = n->attr.phi.next;
+ exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
}
block->attr.block.is_matured = 1;
- /* Now, as the block is a finished firm node, we can optimize it.
+ /* Now, as the block is a finished Firm node, we can optimize it.
Since other nodes have been allocated since the block was created
we can not free the node on the obstack. Therefore we have to call
- optimize_in_place.
+ optimize_in_place().
Unfortunately the optimization does not change a lot, as all allocated
nodes refer to the unoptimized node.
- We can call _2, as global cse has no effect on blocks. */
+ We can call optimize_in_place_2(), as global cse has no effect on blocks. */
block = optimize_in_place_2(block);
- IRN_VRFY_IRG(block, current_ir_graph);
+ IRN_VRFY_IRG(block, irg);
}
} /* mature_immBlock */
NEW_D_BINOP(Mulh)
/**
- * Allocate the frag array.
+ * Allocate a frag array for a node if the current graph state is phase_building.
+ *
+ * @param irn the node for which the frag array should be allocated
+ * @param op the opcode of the (original) node, if does not match opcode of irn,
+ * nothing is done
+ * @param frag_store the address of the frag store in irn attributes, if this
+ * address contains a value != NULL, does nothing
*/
-static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
+void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
if (get_opt_precise_exc_context()) {
if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op) && /* Could be optimized away. */
+ (get_irn_op(irn) == op) && /* Could be optimized away. */
!*frag_store) /* Could be a cse where the arr is already set. */ {
- *frag_store = new_frag_arr(res);
+ *frag_store = new_frag_arr(irn);
}
}
-} /* allocate_frag_arr */
+} /* firm_alloc_frag_arr */
ir_node *
new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
ir_node *res;
res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
#endif
return res;
ir_node *res;
res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
#endif
return res;
ir_node *res;
res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
#endif
return res;
-}
+} /* new_d_Div */
+
+ir_node *
+new_d_DivRL(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
+ ir_node *res;
+ res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
+#if PRECISE_EXC_CONTEXT
+ firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
+#endif
+
+ return res;
+} /* new_d_DivRL */
ir_node *
new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
ir_node *res;
res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
#endif
return res;
res = new_bd_Call(db, current_ir_graph->current_block,
store, callee, arity, in, tp);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
#endif
return res;
res = new_bd_Load(db, current_ir_graph->current_block,
store, addr, mode);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
#endif
return res;
res = new_bd_Store(db, current_ir_graph->current_block,
store, addr, val);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
#endif
return res;
ir_node *
new_d_Alloc(dbg_info *db, ir_node *store, ir_node *size, ir_type *alloc_type,
- where_alloc where) {
+ ir_where_alloc where) {
ir_node *res;
res = new_bd_Alloc(db, current_ir_graph->current_block,
store, size, alloc_type, where);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
#endif
return res;
ir_node *
new_d_Free(dbg_info *db, ir_node *store, ir_node *ptr,
- ir_node *size, ir_type *free_type, where_alloc where) {
+ ir_node *size, ir_type *free_type, ir_where_alloc where) {
return new_bd_Free(db, current_ir_graph->current_block,
store, ptr, size, free_type, where);
}
} /* new_d_Sel */
ir_node *
-new_d_SymConst_type(dbg_info *db, symconst_symbol value, symconst_kind kind, ir_type *tp) {
- return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
+new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
+ return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
value, kind, tp);
} /* new_d_SymConst_type */
ir_node *
-new_d_SymConst(dbg_info *db, symconst_symbol value, symconst_kind kind) {
- return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph),
+new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
+ return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
value, kind, firm_unknown_type);
} /* new_d_SymConst */
res = new_bd_CopyB(db, current_ir_graph->current_block,
store, dst, src, data_type);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
+ firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
#endif
return res;
} /* new_d_CopyB */
res = new_bd_Bound(db, current_ir_graph->current_block,
store, idx, lower, upper);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
+ firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
#endif
return res;
} /* new_d_Bound */
assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
- assert(jmp != NULL);
+ assert(is_ir_node(jmp));
ARR_APP1(ir_node *, block->in, jmp);
/* Call the hook */
ir_graph *irg = current_ir_graph;
assert(get_irg_phase_state(irg) == phase_building);
assert(pos+1 < irg->n_loc);
+ assert(is_ir_node(value));
irg->current_block->attr.block.graph_arr[pos + 1] = value;
} /* set_value */
/* call once for each run of the library */
void
-init_cons(uninitialized_local_variable_func_t *func) {
+firm_init_cons(uninitialized_local_variable_func_t *func) {
default_initialize_local_variable = func;
-} /* init_cons */
+} /* firm_init_cons */
void
irp_finalize_cons(void) {
return new_d_Const_type(NULL, get_type_mode(tp), con, tp);
}
-ir_node *new_SymConst_type(symconst_symbol value, symconst_kind kind, ir_type *type) {
- return new_d_SymConst_type(NULL, value, kind, type);
+ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
+ return new_d_SymConst_type(NULL, mode, value, kind, type);
}
-ir_node *new_SymConst(symconst_symbol value, symconst_kind kind) {
- return new_d_SymConst(NULL, value, kind);
+ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
+ return new_d_SymConst(NULL, mode, value, kind);
}
ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
return new_d_simpleSel(NULL, store, objptr, ent);
ir_node *new_Div(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
return new_d_Div(NULL, memop, op1, op2, mode, state);
}
+ir_node *new_DivRL(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
+ return new_d_DivRL(NULL, memop, op1, op2, mode, state);
+}
ir_node *new_Mod(ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
return new_d_Mod(NULL, memop, op1, op2, mode, state);
}
return new_d_Store(NULL, store, addr, val);
}
ir_node *new_Alloc(ir_node *store, ir_node *size, ir_type *alloc_type,
- where_alloc where) {
+ ir_where_alloc where) {
return new_d_Alloc(NULL, store, size, alloc_type, where);
}
ir_node *new_Free(ir_node *store, ir_node *ptr, ir_node *size,
- ir_type *free_type, where_alloc where) {
+ ir_type *free_type, ir_where_alloc where) {
return new_d_Free(NULL, store, ptr, size, free_type, where);
}
ir_node *new_Sync(int arity, ir_node *in[]) {