# include "irouts.h"
# include "irloop.h"
# include "irbackedge_t.h"
+# include "irflag_t.h"
/* Defined in iropt.c */
pset *new_identities (void);
void del_identities (pset *value_table);
void add_identities (pset *value_table, ir_node *node);
-/********************************************************************/
+/*------------------------------------------------------------------*/
/* apply optimizations of iropt to all nodes. */
-/********************************************************************/
+/*------------------------------------------------------------------*/
static void init_link (ir_node *n, void *env) {
set_irn_link(n, NULL);
current_ir_graph = rem;
}
-/********************************************************************/
+/*------------------------------------------------------------------*/
/* Routines for dead node elimination / copying garbage collection */
/* of the obstack. */
-/********************************************************************/
+/*------------------------------------------------------------------*/
-/* Remeber the new node in the old node by using a field all nodes have. */
+/**
+ * Remember the new node in the old node by using a field all nodes have.
+ */
static INLINE void
set_new_node (ir_node *old, ir_node *new)
{
old->link = new;
}
-/* Get this new node, before the old node is forgotton.*/
+/**
+ * Get this new node, before the old node is forgotton.
+ */
static INLINE ir_node *
get_new_node (ir_node * n)
{
return n->link;
}
-/* We use the block_visited flag to mark that we have computed the
- number of useful predecessors for this block.
- Further we encode the new arity in this flag in the old blocks.
- Remembering the arity is useful, as it saves a lot of pointer
- accesses. This function is called for all Phi and Block nodes
- in a Block. */
+/**
+ * We use the block_visited flag to mark that we have computed the
+ * number of useful predecessors for this block.
+ * Further we encode the new arity in this flag in the old blocks.
+ * Remembering the arity is useful, as it saves a lot of pointer
+ * accesses. This function is called for all Phi and Block nodes
+ * in a Block.
+ */
static INLINE int
compute_new_arity(ir_node *b) {
int i, res, irn_arity;
}
}
+/* TODO: add an ir_op operation */
static INLINE void new_backedge_info(ir_node *n) {
switch(get_irn_opcode(n)) {
case iro_Block:
}
}
-/* Copies the node to the new obstack. The Ins of the new node point to
- the predecessors on the old obstack. For block/phi nodes not all
- predecessors might be copied. n->link points to the new node.
- For Phi and Block nodes the function allocates in-arrays with an arity
- only for useful predecessors. The arity is determined by counting
- the non-bad predecessors of the block. */
+/**
+ * Copies the node to the new obstack. The Ins of the new node point to
+ * the predecessors on the old obstack. For block/phi nodes not all
+ * predecessors might be copied. n->link points to the new node.
+ * For Phi and Block nodes the function allocates in-arrays with an arity
+ * only for useful predecessors. The arity is determined by counting
+ * the non-bad predecessors of the block.
+ */
static void
copy_node (ir_node *n, void *env) {
ir_node *nn, *block;
}
-/* Copies new predecessors of old node to new node remembered in link.
- Spare the Bad predecessors of Phi and Block nodes. */
+/**
+ * Copies new predecessors of old node to new node remembered in link.
+ * Spare the Bad predecessors of Phi and Block nodes.
+ */
static void
copy_preds (ir_node *n, void *env) {
ir_node *nn, *block;
add_identities (current_ir_graph->value_table, nn);
}
-/* Copies the graph recursively, compacts the keepalive of the end node. */
+/**
+ * Copies the graph recursively, compacts the keepalive of the end node.
+ */
static void
copy_graph (void) {
ir_node *oe, *ne; /* old end, new end */
/* copy_preds for the end node ... */
set_nodes_Block(ne, get_new_node(get_nodes_Block(oe)));
- /** ... and now the keep alives. **/
+ /*- ... and now the keep alives. -*/
/* First pick the not marked block nodes and walk them. We must pick these
first as else we will oversee blocks reachable from Phis. */
irn_arity = get_irn_arity(oe);
}
}
-/* Copies the graph reachable from current_ir_graph->end to the obstack
- in current_ir_graph and fixes the environment.
- Then fixes the fields in current_ir_graph containing nodes of the
- graph. */
+/**
+ * Copies the graph reachable from current_ir_graph->end to the obstack
+ * in current_ir_graph and fixes the environment.
+ * Then fixes the fields in current_ir_graph containing nodes of the
+ * graph.
+ */
static void
copy_graph_env (void) {
ir_node *old_end;
*/
}
-/* Copies all reachable nodes to a new obstack. Removes bad inputs
- from block nodes and the corresponding inputs from Phi nodes.
- Merges single exit blocks with single entry blocks and removes
- 1-input Phis.
- Adds all new nodes to a new hash table for cse. Does not
- perform cse, so the hash table might contain common subexpressions. */
+/**
+ * Copies all reachable nodes to a new obstack. Removes bad inputs
+ * from block nodes and the corresponding inputs from Phi nodes.
+ * Merges single exit blocks with single entry blocks and removes
+ * 1-input Phis.
+ * Adds all new nodes to a new hash table for cse. Does not
+ * perform cse, so the hash table might contain common subexpressions.
+ */
/* Amroq call this emigrate() */
void
dead_node_elimination(ir_graph *irg) {
free_outs(current_ir_graph);
/* @@@ so far we loose loops when copying */
- set_irg_loop(current_ir_graph, NULL);
+ free_loop_information(current_ir_graph);
- if (get_optimize() && get_opt_dead_node_elimination()) {
+ if (get_opt_optimize() && get_opt_dead_node_elimination()) {
/* A quiet place, where the old obstack can rest in peace,
until it will be cremated. */
current_ir_graph = rem;
}
-/* Relink bad predeseccors of a block and store the old in array to the
- link field. This function is called by relink_bad_predecessors().
- The array of link field starts with the block operand at position 0.
- If block has bad predecessors, create a new in array without bad preds.
- Otherwise let in array untouched. */
+/**
+ * Relink bad predeseccors of a block and store the old in array to the
+ * link field. This function is called by relink_bad_predecessors().
+ * The array of link field starts with the block operand at position 0.
+ * If block has bad predecessors, create a new in array without bad preds.
+ * Otherwise let in array untouched.
+ */
static void relink_bad_block_predecessors(ir_node *n, void *env) {
ir_node **new_in, *irn;
int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
} /* Block is not relinked */
}
-/* Relinks Bad predecesors from Bocks and Phis called by walker
- remove_bad_predecesors(). If n is a Block, call
- relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
- function of Phi's Block. If this block has bad predecessors, relink preds
- of the Phinode. */
+/**
+ * Relinks Bad predecesors from Bocks and Phis called by walker
+ * remove_bad_predecesors(). If n is a Block, call
+ * relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
+ * function of Phi's Block. If this block has bad predecessors, relink preds
+ * of the Phinode.
+ */
static void relink_bad_predecessors(ir_node *n, void *env) {
ir_node *block, **old_in;
int i, old_irn_arity, new_irn_arity;
} /* n is a Phi node */
}
-/* Removes Bad Bad predecesors from Blocks and the corresponding
- inputs to Phi nodes as in dead_node_elimination but without
- copying the graph.
- On walking up set the link field to NULL, on walking down call
- relink_bad_predecessors() (This function stores the old in array
- to the link field and sets a new in array if arity of predecessors
- changes) */
+/**
+ * Removes Bad Bad predecesors from Blocks and the corresponding
+ * inputs to Phi nodes as in dead_node_elimination but without
+ * copying the graph.
+ * On walking up set the link field to NULL, on walking down call
+ * relink_bad_predecessors() (This function stores the old in array
+ * to the link field and sets a new in array if arity of predecessors
+ * changes).
+ */
void remove_bad_predecessors(ir_graph *irg) {
irg_walk_graph(irg, init_link, relink_bad_predecessors, NULL);
}
-/**********************************************************************/
+/*--------------------------------------------------------------------*/
/* Funcionality for inlining */
-/**********************************************************************/
+/*--------------------------------------------------------------------*/
-/* Copy node for inlineing. Updates attributes that change when
+/**
+ * Copy node for inlineing. Updates attributes that change when
* inlineing but not for dead node elimination.
*
* Copies the node by calling copy_node and then updates the entity if
* it's a local one. env must be a pointer of the frame type of the
* inlined procedure. The new entities must be in the link field of
- * the entities. */
+ * the entities.
+ */
static INLINE void
copy_node_inline (ir_node *n, void *env) {
ir_node *new;
ir_node **cf_pred;
ir_node *ret, *phi;
int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
- int exc_handling; ir_node *proj;
+ int exc_handling;
type *called_frame;
- if (!get_optimize() || !get_opt_inline()) return;
+ if ( !(get_irg_inline_property(called_graph) == irg_inline_forced) && (!get_opt_optimize() || !get_opt_inline() ||
+ (get_irg_inline_property(called_graph) == irg_inline_forbidden))) return;
+
/* -- Turn off optimizations, this can cause problems when allocating new nodes. -- */
- rem_opt = get_optimize();
+ rem_opt = get_opt_optimize();
set_optimize(0);
/* Handle graph state */
exc_handling: 0 There is a handler.
1 Branches to End.
2 Exception handling not represented in Firm. -- */
- exc_handling = 2;
- for (proj = (ir_node *)get_irn_link(call); proj; proj = (ir_node *)get_irn_link(proj)) {
- assert(get_irn_op(proj) == op_Proj);
- if (get_Proj_proj(proj) == pn_Call_M_except) { exc_handling = 0; break;}
- if (get_Proj_proj(proj) == pn_Call_X_except) { exc_handling = 1; }
- }
-
{
ir_node *proj, *Mproj = NULL, *Xproj = NULL;
for (proj = (ir_node *)get_irn_link(call); proj; proj = (ir_node *)get_irn_link(proj)) {
if (get_Proj_proj(proj) == pn_Call_X_except) Xproj = proj;
if (get_Proj_proj(proj) == pn_Call_M_except) Mproj = proj;
}
- if (Mproj) { assert(Xproj); exc_handling = 0; }
- else if (Xproj) { exc_handling = 1; }
- else { exc_handling = 2; }
+ if (Mproj) { assert(Xproj); exc_handling = 0; } // Mproj
+ else if (Xproj) { exc_handling = 1; } //!Mproj && Xproj
+ else { exc_handling = 2; } //!Mproj && !Xproj
}
Second the Call branches to End, the exception is not handled. Just
add all inlined exception branches to the End node.
Third: there is no Exception edge at all. Handle as case two. */
- if (exc_handler == 0) {
+ if (exc_handling == 0) {
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
int main_end_bl_arity;
ir_node **end_preds;
- /* assert(exc_handler == 1 || no exceptions. ) */
+ /* assert(exc_handling == 1 || no exceptions. ) */
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret = get_irn_n(end_bl, i);
I didn't get a version with NEW_ARR_F to run. */
#define MAX_INLINE 1024
-/* given an Call node, returns the irg called. NULL if not
- * known. */
+/**
+ * Returns the irg called from a Call node. If the irg is not
+ * known, NULL is returned.
+ */
static ir_graph *get_call_called_irg(ir_node *call) {
ir_node *addr;
tarval *tv;
}
}
-/* Inlines all small methods at call sites where the called address comes
- from a Const node that references the entity representing the called
- method.
- The size argument is a rough measure for the code size of the method:
- Methods where the obstack containing the firm graph is smaller than
- size are inlined. */
+/**
+ * Inlines all small methods at call sites where the called address comes
+ * from a Const node that references the entity representing the called
+ * method.
+ * The size argument is a rough measure for the code size of the method:
+ * Methods where the obstack containing the firm graph is smaller than
+ * size are inlined.
+ */
void inline_small_irgs(ir_graph *irg, int size) {
int i;
ir_node *calls[MAX_INLINE];
ir_graph *rem = current_ir_graph;
- if (!(get_optimize() && get_opt_inline())) return;
+ if (!(get_opt_optimize() && get_opt_inline())) return;
current_ir_graph = irg;
/* Handle graph state */
ir_graph *callee;
tv = get_Const_tarval(get_Call_ptr(calls[i]));
callee = get_entity_irg(tarval_to_entity(tv));
- if ((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) {
+ if (((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) ||
+ (get_irg_inline_property(callee) == irg_inline_forced)) {
inline_method(calls[i], callee);
}
}
current_ir_graph = rem;
}
+/**
+ * Environment for inlining irgs.
+ */
typedef struct {
- int n_nodes; /* Nodes in graph except Id, Tuple, Proj, Start, End */
- int n_nodes_orig; /* for statistics */
- eset *call_nodes; /* All call nodes in this graph */
+ int n_nodes; /**< Nodes in graph except Id, Tuple, Proj, Start, End */
+ int n_nodes_orig; /**< for statistics */
+ eset *call_nodes; /**< All call nodes in this graph */
int n_call_nodes;
- int n_call_nodes_orig; /* for statistics */
- int n_callers; /* Number of known graphs that call this graphs. */
- int n_callers_orig; /* for statistics */
+ int n_call_nodes_orig; /**< for statistics */
+ int n_callers; /**< Number of known graphs that call this graphs. */
+ int n_callers_orig; /**< for statistics */
} inline_irg_env;
static inline_irg_env *new_inline_irg_env(void) {
}
-/* Inlines small leave methods at call sites where the called address comes
- from a Const node that references the entity representing the called
- method.
- The size argument is a rough measure for the code size of the method:
- Methods where the obstack containing the firm graph is smaller than
- size are inlined. */
+/**
+ * Inlines small leave methods at call sites where the called address comes
+ * from a Const node that references the entity representing the called
+ * method.
+ * The size argument is a rough measure for the code size of the method:
+ * Methods where the obstack containing the firm graph is smaller than
+ * size are inlined.
+ */
void inline_leave_functions(int maxsize, int leavesize, int size) {
inline_irg_env *env;
int i, n_irgs = get_irp_n_irgs();
ir_graph *rem = current_ir_graph;
int did_inline = 1;
- if (!(get_optimize() && get_opt_inline())) return;
+ if (!(get_opt_optimize() && get_opt_inline())) return;
- /* extend all irgs by a temporary datastructure for inlineing. */
+ /* extend all irgs by a temporary data structure for inlineing. */
for (i = 0; i < n_irgs; ++i)
set_irg_link(get_irp_irg(i), new_inline_irg_env());
- /* Precompute information in temporary datastructure. */
+ /* Precompute information in temporary data structure. */
for (i = 0; i < n_irgs; ++i) {
current_ir_graph = get_irp_irg(i);
assert(get_irg_phase_state(current_ir_graph) != phase_building);
ir_graph *callee = get_call_called_irg(call);
if (env->n_nodes > maxsize) break;
- if (callee && is_leave(callee) && is_smaller(callee, leavesize)) {
+ if (callee &&
+ ((is_leave(callee) && is_smaller(callee, leavesize)) ||
+ (get_irg_inline_property(callee) == irg_inline_forced))) {
if (!phiproj_computed) {
phiproj_computed = 1;
collect_phiprojs(current_ir_graph);
current_ir_graph = rem;
}
-/********************************************************************/
-/* Code Placement. Pinns all floating nodes to a block where they */
-/* will be executed only if needed. */
-/********************************************************************/
+/*-----------------------------------------------------------------*/
+/* Code Placement. Pins all floating nodes to a block where they */
+/* will be executed only if needed. */
+/*-----------------------------------------------------------------*/
-static pdeq *worklist; /* worklist of ir_node*s */
-
-/* Find the earliest correct block for N. --- Place N into the
- same Block as its dominance-deepest Input. */
+/**
+ * Find the earliest correct block for N. --- Place N into the
+ * same Block as its dominance-deepest Input.
+ */
static void
-place_floats_early (ir_node *n)
+place_floats_early(ir_node *n, pdeq *worklist)
{
int i, start, irn_arity;
ir_node *dep_block;
if ((irn_not_visited(dep)) &&
(get_op_pinned(get_irn_op(dep)) == floats)) {
- place_floats_early (dep);
+ place_floats_early(dep, worklist);
}
/* Because all loops contain at least one pinned node, now all
our inputs are either pinned or place_early has already
}
}
-/* Floating nodes form subgraphs that begin at nodes as Const, Load,
- Start, Call and end at pinned nodes as Store, Call. Place_early
- places all floating nodes reachable from its argument through floating
- nodes and adds all beginnings at pinned nodes to the worklist. */
-static INLINE void place_early (void) {
+/**
+ * Floating nodes form subgraphs that begin at nodes as Const, Load,
+ * Start, Call and end at pinned nodes as Store, Call. Place_early
+ * places all floating nodes reachable from its argument through floating
+ * nodes and adds all beginnings at pinned nodes to the worklist.
+ */
+static INLINE void place_early(pdeq* worklist) {
assert(worklist);
inc_irg_visited(current_ir_graph);
/* this inits the worklist */
- place_floats_early (get_irg_end(current_ir_graph));
+ place_floats_early(get_irg_end(current_ir_graph), worklist);
/* Work the content of the worklist. */
while (!pdeq_empty (worklist)) {
ir_node *n = pdeq_getl (worklist);
- if (irn_not_visited(n)) place_floats_early (n);
+ if (irn_not_visited(n)) place_floats_early(n, worklist);
}
set_irg_outs_inconsistent(current_ir_graph);
}
-/* deepest common dominance ancestor of DCA and CONSUMER of PRODUCER */
+/** deepest common dominance ancestor of DCA and CONSUMER of PRODUCER. */
static ir_node *
consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer)
{
/* Compute the latest block into which we can place a node so that it is
before consumer. */
if (get_irn_op(consumer) == op_Phi) {
- /* our comsumer is a Phi-node, the effective use is in all those
+ /* our consumer is a Phi-node, the effective use is in all those
blocks through which the Phi-node reaches producer */
int i, irn_arity;
ir_node *phi_block = get_nodes_Block(consumer);
return get_loop_depth(get_irn_loop(n));
}
-/* Move n to a block with less loop depth than it's current block. The
- new block must be dominated by early. */
+/**
+ * Move n to a block with less loop depth than it's current block. The
+ * new block must be dominated by early.
+ */
static void
move_out_of_loops (ir_node *n, ir_node *early)
{
}
}
-/* Find the latest legal block for N and place N into the
- `optimal' Block between the latest and earliest legal block.
- The `optimal' block is the dominance-deepest block of those
- with the least loop-nesting-depth. This places N out of as many
- loops as possible and then makes it as controldependant as
- possible. */
+/**
+ * Find the latest legal block for N and place N into the
+ * `optimal' Block between the latest and earliest legal block.
+ * The `optimal' block is the dominance-deepest block of those
+ * with the least loop-nesting-depth. This places N out of as many
+ * loops as possible and then makes it as control dependant as
+ * possible.
+ */
static void
-place_floats_late (ir_node *n)
+place_floats_late(ir_node *n, pdeq *worklist)
{
int i;
ir_node *early;
if ((get_irn_op(n) != op_Block) &&
(!is_cfop(n)) &&
(get_irn_mode(n) != mode_X)) {
- /* Remember the early palacement of this block to move it
+ /* Remember the early placement of this block to move it
out of loop no further than the early placement. */
early = get_nodes_Block(n);
/* Assure that our users are all placed, except the Phi-nodes.
- --- Each dataflow cycle contains at least one Phi-node. We
+ --- Each data flow cycle contains at least one Phi-node. We
have to break the `user has to be placed before the
- producer' dependance cycle and the Phi-nodes are the
+ producer' dependence cycle and the Phi-nodes are the
place to do so, because we need to base our placement on the
final region of our users, which is OK with Phi-nodes, as they
are pinned, and they never have to be placed after a
for (i = 0; i < get_irn_n_outs(n); i++) {
ir_node *succ = get_irn_out(n, i);
if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
- place_floats_late (succ);
+ place_floats_late(succ, worklist);
}
/* We have to determine the final block of this node... except for
}
}
-static INLINE void place_late(void) {
+static INLINE void place_late(pdeq* worklist) {
assert(worklist);
inc_irg_visited(current_ir_graph);
/* This fills the worklist initially. */
- place_floats_late(get_irg_start_block(current_ir_graph));
+ place_floats_late(get_irg_start_block(current_ir_graph), worklist);
/* And now empty the worklist again... */
while (!pdeq_empty (worklist)) {
ir_node *n = pdeq_getl (worklist);
- if (irn_not_visited(n)) place_floats_late(n);
+ if (irn_not_visited(n)) place_floats_late(n, worklist);
}
}
void place_code(ir_graph *irg) {
+ pdeq* worklist;
ir_graph *rem = current_ir_graph;
+
current_ir_graph = irg;
- if (!(get_optimize() && get_opt_global_cse())) return;
+ if (!(get_opt_optimize() && get_opt_global_cse())) return;
/* Handle graph state */
assert(get_irg_phase_state(irg) != phase_building);
if (get_irg_dom_state(irg) != dom_consistent)
compute_doms(irg);
- construct_backedges(irg);
+ if (get_irg_loopinfo_state(irg) != loopinfo_consistent) {
+ free_loop_information(irg);
+ construct_backedges(irg);
+ }
/* Place all floating nodes as early as possible. This guarantees
a legal code placement. */
- worklist = new_pdeq ();
- place_early();
+ worklist = new_pdeq();
+ place_early(worklist);
/* place_early invalidates the outs, place_late needs them. */
compute_outs(irg);
/* Now move the nodes down in the dominator tree. This reduces the
unnecessary executions of the node. */
- place_late();
+ place_late(worklist);
set_irg_outs_inconsistent(current_ir_graph);
- del_pdeq (worklist);
+ set_irg_loopinfo_inconsistent(current_ir_graph);
+ del_pdeq(worklist);
current_ir_graph = rem;
}
/* semantics of Phi nodes. */
/********************************************************************/
-/* Removes Tuples from Block control flow predecessors.
- Optimizes blocks with equivalent_node().
- Replaces n by Bad if n is unreachable control flow. */
+/**
+ * Removes Tuples from Block control flow predecessors.
+ * Optimizes blocks with equivalent_node().
+ * Replaces n by Bad if n is unreachable control flow.
+ */
static void merge_blocks(ir_node *n, void *env) {
int i;
set_irn_link(n, NULL);
if (get_irn_op(n) == op_Block) {
/* Remove Tuples */
for (i = 0; i < get_Block_n_cfgpreds(n); i++)
- /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go throug.
+ /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through.
A different order of optimizations might cause problems. */
if (get_opt_normalize())
set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
- } else if (get_optimize() && (get_irn_mode(n) == mode_X)) {
+ } else if (get_opt_optimize() && (get_irn_mode(n) == mode_X)) {
/* We will soon visit a block. Optimize it before visiting! */
ir_node *b = get_nodes_Block(n);
- ir_node *new = equivalent_node(b);
- while (irn_not_visited(b) && (!is_Bad(new)) && (new != b)) {
+ ir_node *new_node = equivalent_node(b);
+ while (irn_not_visited(b) && (!is_Bad(new_node)) && (new_node != b)) {
/* We would have to run gigo if new is bad, so we
promote it directly below. */
- assert(((b == new) ||
+ assert(((b == new_node) ||
get_opt_control_flow_straightening() ||
get_opt_control_flow_weak_simplification()) &&
("strange flag setting"));
- exchange (b, new);
- b = new;
- new = equivalent_node(b);
+ exchange (b, new_node);
+ b = new_node;
+ new_node = equivalent_node(b);
}
/* GL @@@ get_opt_normalize hinzugefuegt, 5.5.2003 */
- if (is_Bad(new) && get_opt_normalize()) exchange (n, new_Bad());
+ if (is_Bad(new_node) && get_opt_normalize()) exchange(n, new_Bad());
}
}
-/* Collects all Phi nodes in link list of Block.
- Marks all blocks "block_visited" if they contain a node other
- than Jmp. */
+/**
+ * Collects all Phi nodes in link list of Block.
+ * Marks all blocks "block_visited" if they contain a node other
+ * than Jmp.
+ */
static void collect_nodes(ir_node *n, void *env) {
if (is_no_Block(n)) {
ir_node *b = get_nodes_Block(n);
}
}
-/* Returns true if pred is pred of block */
+/** Returns true if pred is predecessor of block. */
static int is_pred_of(ir_node *pred, ir_node *b) {
int i;
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
if (get_Block_block_visited(pred) + 1
< get_irg_block_visited(current_ir_graph)) {
- if (!get_optimize() || !get_opt_control_flow_strong_simplification()) {
+ if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) {
/* Mark block so that is will not be removed. */
set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
return 1;
}
in = (ir_node **) malloc(max_preds * sizeof(ir_node *));
-/**
+/*-
printf(" working on "); DDMN(b);
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
pred = get_nodes_Block(get_Block_cfgpred(b, i));
printf(" removing pred %i ", i); DDMN(pred);
} else { printf(" Nothing to do for "); DDMN(pred); }
}
- * end Debug output **/
+ * end Debug output -*/
- /** Fix the Phi nodes **/
+ /*- Fix the Phi nodes -*/
phi = get_irn_link(b);
while (phi) {
assert(get_irn_op(phi) == op_Phi);
phi = get_irn_link(phi);
}
-/**
- This happens only if merge between loop backedge and single loop entry. **/
+/*-
+ This happens only if merge between loop backedge and single loop entry. -*/
for (k = 0; k < get_Block_n_cfgpreds(b); k++) {
pred = get_nodes_Block(get_Block_cfgpred(b, k));
if (get_Block_block_visited(pred) +1
}
}
- /** Fix the block **/
+ /*- Fix the block -*/
n_preds = 0;
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
pred = get_nodes_Block(get_Block_cfgpred(b, i));
* Called by walker of remove_critical_cf_edges.
*
* Place an empty block to an edge between a blocks of multiple
- * predecessors and a block of multiple sucessors.
+ * predecessors and a block of multiple successors.
*
* @param n IR node
- * @param env Envirnment of walker. This field is unused and has
+ * @param env Environment of walker. This field is unused and has
* the value NULL.
*/
static void walk_critical_cf_edges(ir_node *n, void *env) {
for (i=0; i<arity; i++) {
pre = get_irn_n(n, i);
- /* Predecessor has multiple sucessors. Insert new flow edge */
+ /* Predecessor has multiple successors. Insert new flow edge */
if ((NULL != pre) &&
(op_Proj == get_irn_op(pre)) &&
op_Raise != get_irn_op(skip_Proj(pre))) {
switch_block(block);
jmp = new_Jmp();
switch_block(n);
- /* set sucessor of new block */
+ /* set successor of new block */
set_irn_n(n, i, jmp);
- } /* predecessor has multiple sucessors */
+ } /* predecessor has multiple successors */
} /* for all predecessors */
} /* n is a block */
}