2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief restarting SSA construction for values.
23 * @author Michael Beck
31 #include "irgraph_t.h"
35 typedef struct walk_env_t {
36 int n_loc; /**< Number of newly allocated locations. */
40 * Post-walker: prepare the graph nodes for new SSA construction cycle by allocation
43 static void prepare_nodes(ir_node *irn, void *ctx) {
46 switch (get_irn_opcode(irn)) {
48 /* reset mature flag */
49 irn->attr.block.is_matured = 0;
50 irn->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
51 current_ir_graph->n_loc);
52 memset(irn->attr.block.graph_arr, 0, sizeof(ir_node *) * current_ir_graph->n_loc);
53 irn->attr.block.phis = NULL;
55 #if PRECISE_EXC_CONTEXT
56 /* note that the frag array must be cleared first, else firm_alloc_frag_arr()
57 will not allocate new memory. */
59 irn->attr.except.frag_arr = NULL;
60 firm_alloc_frag_arr(irn, op_Quot, &irn->attr.except.frag_arr);
63 irn->attr.except.frag_arr = NULL;
64 firm_alloc_frag_arr(irn, op_DivMod, &irn->attr.except.frag_arr);
67 irn->attr.except.frag_arr = NULL;
68 firm_alloc_frag_arr(irn, op_Div, &irn->attr.except.frag_arr);
71 irn->attr.except.frag_arr = NULL;
72 firm_alloc_frag_arr(irn, op_Mod, &irn->attr.except.frag_arr);
75 irn->attr.call.exc.frag_arr = NULL;
76 firm_alloc_frag_arr(irn, op_Call, &irn->attr.call.exc.frag_arr);
79 irn->attr.load.exc.frag_arr = NULL;
80 firm_alloc_frag_arr(irn, op_Load, &irn->attr.load.exc.frag_arr);
83 irn->attr.store.exc.frag_arr = NULL;
84 firm_alloc_frag_arr(irn, op_Store, &irn->attr.store.exc.frag_arr);
87 irn->attr.alloc.exc.frag_arr = NULL;
88 firm_alloc_frag_arr(irn, op_Alloc, &irn->attr.alloc.exc.frag_arr);
91 irn->attr.copyb.exc.frag_arr = NULL;
92 firm_alloc_frag_arr(irn, op_CopyB, &irn->attr.copyb.exc.frag_arr);
95 irn->attr.bound.exc.frag_arr = NULL;
96 firm_alloc_frag_arr(irn, op_Bound, &irn->attr.bound.exc.frag_arr);
105 * Restarts SSA construction on the given graph with n_loc
108 * @param irg the graph on which the SSA construction is restarted
109 * @param n_loc number of new variables
111 * After this function is complete, the graph is in phase_building
112 * again and set_value()/get_value() and mature_block() can be used
113 * to construct new values.
115 void ssa_cons_start(ir_graph *irg, int n_loc) {
118 /* for now we support only phase_high graphs */
119 assert(irg->phase_state == phase_high);
121 /* reset the phase to phase building: some optimization might depend on it */
122 set_irg_phase_state(irg, phase_building);
124 irg_set_nloc(irg, n_loc);
127 * Note: we could try to reuse existing frag arrays, but it does not
128 * seems worth to do this. First, we have to check if they really exists and
129 * then clear them. We do not expect SSA construction is used often.
131 irg_walk_graph(irg, NULL, prepare_nodes, &env);
135 * mature all immature Blocks.
137 static void finish_block(ir_node *block, void *env) {
138 if (!get_Block_matured(block))
139 mature_immBlock(block);
143 * Finalize the (restarted) SSA construction. Matures all blocks that are
144 * not matured yet and reset the graph state to phase_high.
146 void ssa_cons_finish(ir_graph *irg) {
147 irg_block_walk_graph(irg, NULL, finish_block, NULL);
148 irg_finalize_cons(irg);