X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firgopt.c;h=a68c20e30b3c01d182f6cc61592ddcd1e26e356a;hb=5f8ddee6b08c8040c0a304a347d65045c1141d52;hp=cbe8daba7270ba6f65c217d2faaf48b654753278;hpb=5244f0ab8c6bbfd06ae3b3dd84abca60fe01af0e;p=libfirm diff --git a/ir/ir/irgopt.c b/ir/ir/irgopt.c index cbe8daba7..a68c20e30 100644 --- a/ir/ir/irgopt.c +++ b/ir/ir/irgopt.c @@ -3,17 +3,23 @@ ** ** Author: Christian Schaefer ** -** dead node elemination -** walks one time through the whole graph and copies it into another graph, -** so unreachable nodes will be lost. +** Optimizations for a whole ir graph, i.e., a procedure. */ +# include + # include "irgopt.h" -# include "irnode.h" +# include "irnode_t.h" +# include "irgraph_t.h" # include "iropt.h" # include "irgwalk.h" -# include "irgraph.h" # include "ircons.h" +# include "misc.h" +# include "irgmod.h" + +/********************************************************************/ +/* apply optimizations of iropt to all nodes. */ +/********************************************************************/ void optimize_in_place_wrapper (ir_node *n, void *env) { @@ -28,7 +34,6 @@ optimize_in_place_wrapper (ir_node *n, void *env) { } } - void local_optimize_graph (ir_graph *irg) { ir_graph *rem = current_ir_graph; @@ -40,312 +45,199 @@ local_optimize_graph (ir_graph *irg) { current_ir_graph = rem; } -/* Remeber the new node in the old node, - by using a field that all nodes have. */ -void * +/********************************************************************/ +/* Routines for dead node elimination / copying garbage collection */ +/* of the obstack. */ +/********************************************************************/ + +/* Remeber the new node in the old node by using a field all nodes have. */ +inline void set_new_node (ir_node *old, ir_node *new) { - old->in[0] = new; - return old; + old->link = new; } /* Get this new node, before the old node is forgotton.*/ -ir_node * +inline ir_node * get_new_node (ir_node * n) { - return n->in[0]; + return n->link; } +/* We use the block_visited flag to mark that we have computed the + number of useful predecessors for this block. + Further we encode the new arity in this flag. Remembering the arity is + useful, as it saves a lot of pointer accesses. This function is called + for all Phi and Block nodes in a Block. */ +inline int +compute_new_arity(ir_node *b) { + int i, res; + int irg_v, block_v; + + irg_v = get_irg_block_visited(current_ir_graph); + block_v = get_Block_block_visited(b); + if (block_v >= irg_v) { + /* we computed the number of preds for this block and saved it in the + block_v flag */ + return block_v - irg_v; + } else { + /* compute the number of good predecessors */ + res = get_irn_arity(b); + for (i = 0; i < get_irn_arity(b); i++) + if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--; + /* save it in the flag. */ + set_Block_block_visited(b, irg_v + res); + return res; + } +} -/* Create this node on a new obstack. */ -void +/* Copies the node to the new obstack. The Ins of the new node point to + the predecessors on the old obstack. n->link points to the new node. + For Phi and Block nodes the function allocate in arrays with an arity + only for useful predecessors. The arity is determined by counting + the non-bad predecessors of the block. */ +inline void copy_node (ir_node *n, void *env) { - int i; - ir_node *res, *a, *b; - res = (ir_node *) malloc (sizeof (ir_node)); - a = (ir_node *) malloc (sizeof (ir_node)); - b = (ir_node *) malloc (sizeof (ir_node)); - - if (is_binop(n)) { - a = get_binop_left(n); - b = get_binop_right(n); - } else if (is_unop(n)) { - a = get_unop_op(n); + ir_node *nn, *block; + int new_arity; + + if (get_irn_opcode(n) == iro_Block) { + block = NULL; + new_arity = compute_new_arity(n); + } else { + block = get_nodes_Block(n); + if (get_irn_opcode(n) == iro_Phi) { + new_arity = compute_new_arity(block); + } else { + new_arity = get_irn_arity(n); + } } + nn = new_ir_node(current_ir_graph, + block, + get_irn_op(n), + get_irn_mode(n), + new_arity, + get_irn_in(n)); + copy_attrs(n, nn); + set_new_node(n, nn); +} - switch (get_irn_opcode(n)) { - case iro_Block: - { - /*CS malloc*/ - ir_node *in [get_Block_n_cfgpreds(n)]; - - for (i=0; i <(get_Block_n_cfgpreds(n)); i++) { - in[i] = get_Block_cfgpred (n, i); - } - res = new_r_Block (current_ir_graph, get_Block_n_cfgpreds(n), in); - set_new_node(n, res); - } - break; - case iro_Start: - res = new_r_Start (current_ir_graph, get_new_node(n)); - set_new_node(n, res); - break; - case iro_End: - res = new_r_End (current_ir_graph, get_new_node(n)); - set_new_node(n, res); - break; - case iro_Jmp: - res = new_r_Jmp (current_ir_graph, get_new_node(n)); - set_new_node(n, res); - break; - case iro_Cond: - res = new_r_Cond (current_ir_graph, get_new_node(n), - get_Cond_selector(n)); - set_new_node(n, res); - break; - case iro_Return: - { - /*CS malloc*/ - ir_node *in [get_Return_n_res(n)]; - for (i=0; i <(get_Return_n_res(n)); i++) { - in[i] = get_Return_res (n, i); +/* Copies new predecessors of old node to new node remembered in link. + Spare the Bad predecessors of Phi and Block nodes. */ +inline void +copy_preds (ir_node *n, void *env) { + ir_node *nn, *block/*, *on*/; + int i, j; + + nn = get_new_node(n); + + if (get_irn_opcode(n) == iro_Block) { + /* Don't copy Bad nodes. */ + j = 0; + for (i = 0; i < get_irn_arity(n); i++) + if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) { + set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); + j++; } - res = new_r_Return (current_ir_graph, get_new_node(n), - get_Return_mem (n), get_Return_n_res(n), in); - set_new_node(n, res); - } - break; - case iro_Raise: - res = new_r_Raise (current_ir_graph, get_new_node(n), - get_Raise_mem(n), get_Raise_exoptr(n)); - set_new_node(n, res); - break; - case iro_Const: - res = new_r_Const (current_ir_graph, get_new_node(n), - get_irn_mode(n), get_Const_tarval(n)); - set_new_node(n, res); - break; - case iro_SymConst: - { - type_or_id *value; - value = (type_or_id *) malloc (sizeof (type_or_id)); - if ((get_SymConst_kind(n)==type_tag) || (get_SymConst_kind(n)==size)) - { - value = (type_or_id *) get_SymConst_type(n); - } - else - { - if (get_SymConst_kind(n)==linkage_ptr_info) - { - value = (type_or_id *) get_SymConst_ptrinfo(n); - } - } - res = new_r_SymConst (current_ir_graph, get_new_node(n), value, - get_SymConst_kind (n)); - set_new_node(n, res); - } - break; - case iro_Sel: - { - /*CS*/ - ir_node *in [get_Sel_n_index(n)]; - for (i=0; i <(get_Sel_n_index(n)); i++) { - in[i] = get_Sel_index (n, i); - } - res = new_r_Sel (current_ir_graph, get_new_node(n), - get_Sel_mem(n), get_Sel_ptr(n), get_Sel_n_index(n), - in, get_Sel_entity(n)); - set_new_node(n, res); - } - break; - case iro_Call: - { - /*CS*/ - ir_node *in [get_Call_arity(n)]; - for (i=0; i <(get_Call_arity(n)); i++) { - in[i] = get_Call_param (n, i); + /* repair the block visited flag from above misuse */ + set_Block_block_visited(nn, 0); + /* Local optimization could not merge two subsequent blocks if + in array contained Bads. Now it's possible. * + on = optimize_in_place(nn); + if (nn != on) exchange(nn, on);*/ + } else if (get_irn_opcode(n) == iro_Phi) { + /* Don't copy node if corresponding predecessor in block is Bad. + The Block itself should not be Bad. */ + block = get_nodes_Block(n); + set_irn_n (nn, -1, get_new_node(block)); + j = 0; + for (i = 0; i < get_irn_arity(n); i++) + if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) { + set_irn_n (nn, j, get_new_node(get_irn_n(n, i))); + j++; } - res = new_r_Call (current_ir_graph, get_new_node(n), get_Call_mem(n), - get_Call_ptr(n), get_Call_arity(n), - in, get_Call_type (n)); - set_new_node(n, res); - } - break; - case iro_Add: - res = new_r_Add (current_ir_graph, get_new_node(n), - get_new_node(a), get_new_node(b), get_irn_mode(n)); - set_new_node(n, res); - break; - case iro_Sub: - { - ir_node *temp_node; - temp_node = get_nodes_Block(n); - res = new_r_Sub (current_ir_graph, get_new_node(temp_node), - get_new_node(a), get_new_node(b), get_irn_mode(n)); - set_new_node(n, res); - } - break; - case iro_Minus: - res = new_r_Minus (current_ir_graph, get_new_node(n), get_new_node(a), - get_irn_mode(n)); - set_new_node(n, res); - break; - case iro_Mul: - res = new_r_Mul (current_ir_graph, get_new_node(n), get_new_node(a), - get_new_node(b), get_irn_mode(n)); - break; - case iro_Quot: - res = new_r_Quot (current_ir_graph, get_new_node(n), get_Quot_mem (n), - get_new_node(a), get_new_node(b)); - break; - case iro_DivMod: - res = new_r_DivMod (current_ir_graph, get_new_node(n), get_DivMod_mem(n), - get_new_node(a), get_new_node(b)); - break; - case iro_Div: - res = new_r_Div (current_ir_graph, get_new_node(n), get_Div_mem(n), - get_new_node(a), get_new_node(b)); - break; - case iro_Mod: - res = new_r_Mod (current_ir_graph, get_new_node(n), get_Mod_mem(n), - get_new_node(a), get_new_node(b)); - break; - case iro_Abs: - res = new_r_Abs (current_ir_graph, get_new_node(n), get_Abs_op(n), - get_irn_mode(n)); - break; - case iro_And: - res = new_r_And (current_ir_graph, get_new_node(n), get_new_node(a), - get_new_node(b), get_irn_mode(n)); - break; - case iro_Or: - res = new_r_Or (current_ir_graph, get_new_node(n), get_new_node(a), - get_new_node(b), get_irn_mode(n)); - break; - case iro_Eor: - res = new_r_Eor (current_ir_graph, get_new_node(n), get_new_node(a), - get_new_node(b), get_irn_mode(n)); - break; - case iro_Not: - res = new_r_Not (current_ir_graph, get_new_node(n), get_Not_op(n), - get_irn_mode(n)); - break; - case iro_Cmp: - res = new_r_Cmp (current_ir_graph, get_new_node(n), get_Cmp_left(n), - get_Cmp_right(n)); - break; - case iro_Shl: - res = new_r_Shl (current_ir_graph, get_new_node(n), get_Shl_left(n), - get_Shl_right(n), get_irn_mode(n)); - break; - case iro_Shr: - res = new_r_Shr (current_ir_graph, get_new_node(n), get_Shr_left(n), - get_Shr_right(n), get_irn_mode(n)); - break; - case iro_Shrs: - res = new_r_Shrs (current_ir_graph, get_new_node(n), get_Shrs_left(n), - get_Shrs_right(n), get_irn_mode(n)); - break; - case iro_Rot: - res = new_r_Rot (current_ir_graph, get_new_node(n), get_Rot_left(n), - get_Rot_right(n), get_irn_mode(n)); - break; - case iro_Conv: - res = new_r_Conv (current_ir_graph, get_new_node(n), get_Conv_op(n), - get_irn_mode(n)); - break; - case iro_Phi: - /*CS malloc*/ - { - ir_node *in [get_Phi_n_preds(n)]; - for (i=0; i <(get_Phi_n_preds(n)); i++) { - in[i] = get_Phi_pred (n, i); - } - res = new_r_Phi (current_ir_graph, get_new_node(n), - get_Phi_n_preds(n), in, get_irn_mode(n)); - set_new_node(n, res); - } - break; - case iro_Load: - res = new_r_Load (current_ir_graph, get_new_node(n), get_Load_mem(n), - get_Load_ptr(n)); - break; - case iro_Store: - res = new_r_Store (current_ir_graph, get_new_node(n), get_Store_mem(n), - get_Store_ptr(n), get_Store_value(n)); - break; - case iro_Alloc: - res = new_r_Alloc (current_ir_graph, get_new_node(n), - get_Alloc_mem(n), get_Alloc_size(n), - get_Alloc_type(n), get_Alloc_where(n)); - - break; - case iro_Free: - res = new_r_Free (current_ir_graph, get_new_node(n), - get_Free_mem(n), get_Free_ptr(n), - get_Free_size(n), get_Free_type(n)); - break; - case iro_Sync: - /*CS malloc*/ - { - ir_node *in [get_Sync_n_preds(n)]; - for (i=0; i <(get_Sync_n_preds(n)); i++) { - in[i] = get_Sync_pred (n, i); - } - res = new_r_Sync (current_ir_graph, get_new_node(n), - get_Sync_n_preds(n), in); - set_new_node(n, res); - } - break; - case iro_Proj: - res = new_r_Proj (current_ir_graph, get_new_node(n), - get_Proj_pred(n), get_irn_mode(n), - get_Proj_proj(n)); - break; - case iro_Tuple: - /*CS malloc*/ - { - ir_node *in [get_Tuple_n_preds(n)]; - for (i=0; i <(get_Tuple_n_preds(n)); i++) { - in[i] = get_Tuple_pred (n, i); - } - res = new_r_Tuple (current_ir_graph, get_new_node(n), - get_Tuple_n_preds(n), in); - set_new_node(n, res); - } - break; - case iro_Id: - res = new_r_Id (current_ir_graph, get_new_node(n), - get_Id_pred(n), get_irn_mode(n)); - break; - case iro_Bad: - res = new_r_Bad (get_new_node(n)); - break; + /* Compacting the Phi's ins might generate Phis with only one + predecessor. * + if (get_irn_arity(n) == 1) + exchange(n, get_irn_n(n, 0)); */ + } else { + for (i = -1; i < get_irn_arity(n); i++) + set_irn_n (nn, i, get_new_node(get_irn_n(n, i))); } +} +/* Copies the graph reachable from current_ir_graph->end to the obstack + in current_ir_graph. + Then fixes the fields in current_ir_graph containing nodes of the + graph. */ +void +copy_graph () { + /* Not all nodes remembered in current_ir_graph might be reachable + from the end node. Assure their link is set to NULL so that + we can test whether new nodes have been computed. */ + set_irn_link(get_irg_frame (current_ir_graph), NULL); + set_irn_link(get_irg_globals(current_ir_graph), NULL); + set_irn_link(get_irg_args (current_ir_graph), NULL); + + /* we use the block walk flag for removing Bads from Blocks ins. */ + inc_irg_block_visited(current_ir_graph); + + /* copy the graph */ + irg_walk(get_irg_end(current_ir_graph), copy_node, copy_preds, NULL); + + /* fix the fields in current_ir_graph */ + set_irg_end (current_ir_graph, get_new_node(get_irg_end(current_ir_graph))); + set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph))); + if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) + irg_walk(get_irg_frame(current_ir_graph), copy_node, copy_preds, NULL); + if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL) + irg_walk(get_irg_globals(current_ir_graph), copy_node, copy_preds, NULL); + if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) + irg_walk(get_irg_args(current_ir_graph), copy_node, copy_preds, NULL); + set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph))); + set_irg_start_block(current_ir_graph, + get_new_node(get_irg_start_block(current_ir_graph))); + set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph))); + set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph))); + set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph))); + if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) { + copy_node(get_irg_bad(current_ir_graph), NULL); + copy_preds(get_irg_bad(current_ir_graph), NULL); + } + set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph))); } +/* Amroq call this emigrate() */ void -dead_node_elemination(ir_graph *irg) { - struct obstack *graveyard_obst; - struct obstack *rebirth_obst; +dead_node_elimination(ir_graph *irg) { + ir_graph *rem; + struct obstack *graveyard_obst = NULL; + struct obstack *rebirth_obst = NULL; + + /* Remember external state of current_ir_graph. */ + rem = current_ir_graph; + current_ir_graph = irg; + + if (get_optimize() && get_opt_dead_node_elimination()) { - /* A quiet place, where the old obstack can rest in peace, - until it will be cremated. */ - graveyard_obst = irg->obst; + /* A quiet place, where the old obstack can rest in peace, + until it will be cremated. */ + graveyard_obst = irg->obst; - /* A new obstack, where the reachable nodes will be copied to. */ - rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack)); - current_ir_graph->obst = rebirth_obst; - obstack_init (current_ir_graph->obst); + /* A new obstack, where the reachable nodes will be copied to. */ + rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack)); + current_ir_graph->obst = rebirth_obst; + obstack_init (current_ir_graph->obst); - /* Walks the graph once, and at the recursive way do the copy thing. - all reachable nodes will be copied to a new obstack. */ - irg_walk(irg->end, NULL, copy_node, NULL); + /* Copy the graph from the old to the new obstack */ + copy_graph(); - /* Free memory from old unoptimized obstack */ - xfree (graveyard_obst); + /* Free memory from old unoptimized obstack */ + obstack_free(graveyard_obst, 0); /* First empty the obstack ... */ + xfree (graveyard_obst); /* ... then free it. */ + } + current_ir_graph = rem; }