/*
- * Project: libFIRM
- * File name: ir/ir/irgopt.c
- * Purpose: Optimizations for a whole ir graph, i.e., a procedure.
- * Author: Christian Schaefer, Goetz Lindenmaier
- * Modified by: Sebastian Felis, Michael Beck
- * Created:
- * CVS-ID: $Id$
- * Copyright: (c) 1998-2007 Universität Karlsruhe
- * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
+ * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/**
+ * @file
+ * @brief Optimizations for a whole ir graph, i.e., a procedure.
+ * @author Christian Schaefer, Goetz Lindenmaier, Sebastian Felis,
+ * Michael Beck
+ * @version $Id$
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#include "irgraph_t.h"
#include "irprog_t.h"
+#include "iroptimize.h"
#include "ircons.h"
#include "iropt_t.h"
-#include "cfopt.h"
#include "irgopt.h"
#include "irgmod.h"
#include "irgwalk.h"
*/
static void optimize_in_place_wrapper (ir_node *n, void *env) {
ir_node *optimized = optimize_in_place_2(n);
- if (optimized != n) exchange (n, optimized);
+ (void) env;
+
+ if (optimized != n) {
+ exchange (n, optimized);
+ }
}
/**
* Block-Walker: uses dominance depth to mark dead blocks.
*/
static void kill_dead_blocks(ir_node *block, void *env) {
+ (void) env;
+
if (get_Block_dom_depth(block) < 0) {
/*
* Note that the new dominance code correctly handles
pdeq *waitq = new_pdeq();
int state = edges_activated(irg);
ir_graph *rem = current_ir_graph;
+ ir_node *end;
+ int i;
current_ir_graph = irg;
set_using_irn_link(irg);
- /* walk over the graph */
- irg_walk_graph(irg, NULL, opt_walker, waitq);
+ /* walk over the graph, but don't touch keep-alives */
+ irg_walk(get_irg_end_block(irg), NULL, opt_walker, waitq);
+
+ end = get_irg_end(irg);
+
+ /* optimize keep-alives by removing superfluous ones */
+ for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
+ ir_node *ka = get_End_keepalive(end, i);
+
+ if (irn_visited(ka) && !is_irn_keep(ka)) {
+ /* this node can be regularly visited, no need to keep it */
+ set_End_keepalive(end, i, get_irg_bad(irg));
+ }
+ }
+ /* now walk again and visit all not yet visited nodes */
+ set_irg_visited(current_ir_graph, get_irg_visited(irg) - 1);
+ irg_walk(get_irg_end(irg), NULL, opt_walker, waitq);
/* finish the wait queue */
while (! pdeq_empty(waitq)) {
copy_preds(ir_node *n, void *env) {
ir_node *nn, *block;
int i, j, irn_arity;
+ (void) env;
nn = get_new_node(n);
static void relink_bad_block_predecessors(ir_node *n, void *env) {
ir_node **new_in, *irn;
int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
+ (void) env;
/* if link field of block is NULL, look for bad predecessors otherwise
this is already done */
static void dead_node_hook(void *context, ir_graph *irg, int start) {
survive_dce_t *sd = context;
+ (void) irg;
/* Create a new map before the dead node elimination is performed. */
if (start) {
static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw) {
survive_dce_t *sd = context;
survive_dce_list_t *list = pmap_get(sd->places, old);
+ (void) irg;
/* If the node is to be patched back, write the new address to all registered locations. */
if (list) {
return res;
}
+enum exc_mode {
+ exc_handler = 0, /**< There is a handler. */
+ exc_to_end = 1, /**< Branches to End. */
+ exc_no_handler = 2 /**< Exception handling not represented. */
+};
+
/* Inlines a method at the given call site. */
int inline_method(ir_node *call, ir_graph *called_graph) {
ir_node *pre_call;
ir_node **cf_pred;
ir_node *ret, *phi;
int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
- int exc_handling;
+ enum exc_mode exc_handling;
ir_type *called_frame;
irg_inline_property prop = get_irg_inline_property(called_graph);
{
ir_node *proj, *Mproj = NULL, *Xproj = NULL;
for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
- assert(is_Proj(proj));
- if (get_Proj_proj(proj) == pn_Call_X_except) Xproj = proj;
- if (get_Proj_proj(proj) == pn_Call_M_except) Mproj = proj;
+ long proj_nr = get_Proj_proj(proj);
+ if (proj_nr == pn_Call_X_except) Xproj = proj;
+ if (proj_nr == pn_Call_M_except) Mproj = proj;
}
- if (Mproj) { assert(Xproj); exc_handling = 0; } /* Mproj */
- else if (Xproj) { exc_handling = 1; } /* !Mproj && Xproj */
- else { exc_handling = 2; } /* !Mproj && !Xproj */
+ if (Mproj) { assert(Xproj); exc_handling = exc_handler; } /* Mproj */
+ else if (Xproj) { exc_handling = exc_to_end; } /* !Mproj && Xproj */
+ else { exc_handling = exc_no_handler; } /* !Mproj && !Xproj */
}
/* --
/* -- Performing dead node elimination inlines the graph -- */
/* Copies the nodes to the obstack of current_ir_graph. Updates links to new
entities. */
- /* @@@ endless loops are not copied!! -- they should be, I think... */
irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
get_irg_frame_type(called_graph));
arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
n_res = get_method_n_ress(get_Call_type(call));
- res_pred = xmalloc (n_res * sizeof(*res_pred));
- cf_pred = xmalloc (arity * sizeof(*res_pred));
+ res_pred = xmalloc(n_res * sizeof(*res_pred));
+ cf_pred = xmalloc(arity * sizeof(*res_pred));
set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
/* -- archive keepalives -- */
irn_arity = get_irn_arity(end);
- for (i = 0; i < irn_arity; i++)
- add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
+ for (i = 0; i < irn_arity; i++) {
+ ir_node *ka = get_End_keepalive(end, i);
+ if (! is_Bad(ka))
+ add_End_keepalive(get_irg_end(current_ir_graph), ka);
+ }
/* The new end node will die. We need not free as the in array is on the obstack:
copy_node() only generated 'D' arrays. */
/* -- Build a Tuple for all results of the method.
Add Phi node if there was more than one Return. -- */
- turn_into_tuple(post_call, 4);
+ turn_into_tuple(post_call, pn_Call_max);
/* First the Memory-Phi */
n_ret = 0;
for (i = 0; i < arity; i++) {
} else {
set_Tuple_pred(call, pn_Call_T_result, new_Bad());
}
+
+ /* For now, we cannot inline calls with value_base */
+ set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
+
/* Finally the exception control flow.
We have two (three) possible situations:
First if the Call branches to an exception handler: We need to add a Phi node to
Second the Call branches to End, the exception is not handled. Just
add all inlined exception branches to the End node.
Third: there is no Exception edge at all. Handle as case two. */
- if (exc_handling == 0) {
+ if (exc_handling == exc_handler) {
n_exc = 0;
for (i = 0; i < arity; i++) {
- ir_node *ret;
+ ir_node *ret, *irn;
ret = get_irn_n(end_bl, i);
- if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
+ irn = skip_Proj(ret);
+ if (is_fragile_op(irn) || (get_irn_op(irn) == op_Raise)) {
cf_pred[n_exc] = ret;
- n_exc++;
+ ++n_exc;
}
}
if (n_exc > 0) {
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
set_Tuple_pred(call, pn_Call_M_except, new_Bad());
}
+ set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
} else {
ir_node *main_end_bl;
int main_end_bl_arity;
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret = get_irn_n(end_bl, i);
+ ir_node *irn = skip_Proj(ret);
- if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
+ if (is_fragile_op(irn) || (get_irn_op(irn) == op_Raise)) {
cf_pred[n_exc] = ret;
n_exc++;
}
}
main_end_bl = get_irg_end_block(current_ir_graph);
main_end_bl_arity = get_irn_arity(main_end_bl);
- end_preds = xmalloc ((n_exc + main_end_bl_arity) * sizeof(*end_preds));
+ end_preds = xmalloc((n_exc + main_end_bl_arity) * sizeof(*end_preds));
for (i = 0; i < main_end_bl_arity; ++i)
end_preds[i] = get_irn_n(main_end_bl, i);
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- set_Tuple_pred(call, pn_Call_M_except, new_Bad());
+ set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
+ set_Tuple_pred(call, pn_Call_X_except, new_Bad());
+ set_Tuple_pred(call, pn_Call_M_except, new_Bad());
free(end_preds);
}
free(res_pred);
}
}
+/* deepest common ancestor in the dominator tree of all nodes'
+ blocks depending on us; our final placement has to dominate DCA. */
+static ir_node *get_deepest_common_ancestor(ir_node *node, ir_node *dca)
+{
+ int i;
+
+ for (i = get_irn_n_outs(node) - 1; i >= 0; --i) {
+ ir_node *succ = get_irn_out(node, i);
+ ir_node *succ_blk;
+
+ if (is_End(succ)) {
+ /*
+ * This consumer is the End node, a keep alive edge.
+ * This is not a real consumer, so we ignore it
+ */
+ continue;
+ }
+
+ if(is_Proj(succ)) {
+ dca = get_deepest_common_ancestor(succ, dca);
+ } else {
+ /* ignore if succ is in dead code */
+ succ_blk = get_irn_n(succ, -1);
+ if (is_Block_unreachable(succ_blk))
+ continue;
+ dca = consumer_dom_dca(dca, succ, node);
+ }
+ }
+
+ return dca;
+}
+
+static void set_projs_block(ir_node *node, ir_node *block)
+{
+ int i;
+
+ for (i = get_irn_n_outs(node) - 1; i >= 0; --i) {
+ ir_node *succ = get_irn_out(node, i);
+
+ assert(is_Proj(succ));
+
+ if(get_irn_mode(succ) == mode_T) {
+ set_projs_block(succ, block);
+ }
+ set_nodes_block(succ, block);
+ }
+}
+
/**
* Find the latest legal block for N and place N into the
* `optimal' Block between the latest and earliest legal block.
(op != op_SymConst) &&
(op != op_Proj))
{
- ir_node *dca = NULL; /* deepest common ancestor in the
- dominator tree of all nodes'
- blocks depending on us; our final
- placement has to dominate DCA. */
- for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
- ir_node *succ = get_irn_out(n, i);
- ir_node *succ_blk;
-
- if (get_irn_op(succ) == op_End) {
- /*
- * This consumer is the End node, a keep alive edge.
- * This is not a real consumer, so we ignore it
- */
- continue;
- }
-
- /* ignore if succ is in dead code */
- succ_blk = get_irn_n(succ, -1);
- if (is_Block_unreachable(succ_blk))
- continue;
- dca = consumer_dom_dca(dca, succ, n);
- }
- if (dca) {
+ /* deepest common ancestor in the dominator tree of all nodes'
+ blocks depending on us; our final placement has to dominate
+ DCA. */
+ ir_node *dca = get_deepest_common_ancestor(n, NULL);
+ if (dca != NULL) {
set_nodes_block(n, dca);
move_out_of_loops(n, early_blk);
+ if(get_irn_mode(n) == mode_T) {
+ set_projs_block(n, get_nodes_block(n));
+ }
}
}
}