2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Normalize returns.
23 * @author Michael Beck
28 #include "iroptimize.h"
29 #include "irgraph_t.h"
34 #define set_bit(n) (returns[(n) >> 3] |= 1 << ((n) & 7))
35 #define get_bit(n) (returns[(n) >> 3] & (1 << ((n) & 7)))
38 #define IMAX(a, b) ((a) > (b) ? (a) : (b))
41 * Normalize the Returns of a graph by creating a new End block
42 * with One Return(Phi).
43 * This is the preferred input for the if-conversion.
45 * In pseudocode, it means:
60 void normalize_one_return(ir_graph *irg) {
61 ir_node *endbl = get_irg_end_block(irg);
62 int i, j, k, n, last_idx, n_rets, n_ret_vals = -1;
63 unsigned char *returns;
64 ir_node **in, **retvals, **endbl_in;
68 /* look, if we have more than one return */
69 n = get_Block_n_cfgpreds(endbl);
71 /* The end block has no predecessors, we have an endless
72 loop. In that case, no returns exists. */
76 returns = ALLOCANZ(unsigned char, (n + 7) >> 3);
78 for (n_rets = i = 0; i < n; ++i) {
79 ir_node *node = get_Block_cfgpred(endbl, i);
81 if (is_Return(node)) {
87 n_ret_vals = get_irn_arity(node);
91 /* there should be at least one Return node in Firm */
95 in = ALLOCAN(ir_node*, IMAX(n_rets, n_ret_vals));
96 retvals = ALLOCAN(ir_node*, n_rets * n_ret_vals);
97 endbl_in = ALLOCAN(ir_node*, n);
100 for (j = i = 0; i < n; ++i) {
101 ir_node *ret = get_Block_cfgpred(endbl, i);
104 ir_node *block = get_nodes_block(ret);
106 /* create a new Jmp for every Ret and place the in in */
107 in[j] = new_r_Jmp(irg, block);
109 /* save the return values and shuffle them */
110 for (k = 0; k < n_ret_vals; ++k)
111 retvals[j + k*n_rets] = get_irn_n(ret, k);
115 endbl_in[last_idx++] = ret;
118 /* ok, create a new block with all created in's */
119 block = new_r_Block(irg, n_rets, in);
121 /* now create the Phi nodes */
122 for (j = i = 0; i < n_ret_vals; ++i, j += n_rets) {
125 /* the return values are already shuffled */
127 /* Beware: normally the Phi constructor automatically replaces a Phi(a,...a) into a
128 but NOT, if a is Unknown. Here, we known that this case can be optimize also,
130 first = retvals[j + 0];
131 for (k = 1; k < n_rets; ++k) {
132 if (retvals[j + k] != first) {
140 in[i] = new_r_Phi(irg, block, n_rets, &retvals[j], get_irn_mode(retvals[j]));
143 endbl_in[last_idx++] = new_r_Return(irg, block, in[0], n_ret_vals-1, &in[1]);
145 set_irn_in(endbl, last_idx, endbl_in);
147 /* invalidate analysis information:
148 * a new Block was added, so dominator, outs and loop are inconsistent,
149 * trouts and callee-state should be still valid
151 set_irg_doms_inconsistent(irg);
152 set_irg_outs_inconsistent(irg);
153 set_irg_extblk_inconsistent(irg);
154 set_irg_loopinfo_inconsistent(irg);
158 * Check, whether a Return can be moved on block upwards.
160 * In a block with a Return, all live nodes must be linked
161 * with the Return, otherwise they are dead (because the Return leaves
162 * the graph, so no more users of the other nodes can exists.
164 * We can move a Return, if it's predecessors are Phi nodes or
165 * comes from another block. In the later case, it is always possible
166 * to move the Return one block up, because the predecessor block must
167 * dominate the Return block (SSA) and then it dominates the predecessor
168 * block of the Return block as well.
170 * All predecessors of the Return block must be Jmp's of course, or we
171 * cannot move it up, so we add blocks if needed.
173 static int can_move_ret(ir_node *ret) {
174 ir_node *retbl = get_nodes_block(ret);
175 int i, n = get_irn_arity(ret);
177 for (i = 0; i < n; ++i) {
178 ir_node *pred = get_irn_n(ret, i);
180 if (! is_Phi(pred) && retbl == get_nodes_block(pred)) {
181 /* first condition failed, found a non-Phi predecessor
182 * then is in the Return block */
187 /* check, that predecessors are Jmps */
188 n = get_Block_n_cfgpreds(retbl);
191 for (i = 0; i < n; ++i) {
192 ir_node *pred = get_Block_cfgpred(retbl, i);
194 pred = skip_Tuple(pred);
195 if (! is_Jmp(pred) && !is_Bad(pred)) {
196 /* simply place a new block here */
197 ir_graph *irg = get_irn_irg(retbl);
198 ir_node *block = new_r_Block(irg, 1, &pred);
199 ir_node *jmp = new_r_Jmp(irg, block);
200 set_Block_cfgpred(retbl, i, jmp);
207 * Normalize the Returns of a graph by moving
208 * the Returns upwards as much as possible.
209 * This might be preferred for code generation.
211 * In pseudocode, it means:
219 * is transformed into
226 void normalize_n_returns(ir_graph *irg) {
227 int i, j, n, n_rets, n_finals, n_ret_vals;
228 ir_node *list = NULL;
229 ir_node *final = NULL;
231 ir_node *endbl = get_irg_end_block(irg);
235 * First, link all returns:
236 * These must be predecessors of the endblock.
237 * Place Returns that can be moved on list, all others
240 n = get_Block_n_cfgpreds(endbl);
241 for (n_finals = n_rets = i = 0; i < n; ++i) {
242 ir_node *ret = get_Block_cfgpred(endbl, i);
244 if (is_Return(ret) && can_move_ret(ret)) {
246 * Ok, all conditions met, we can move this Return, put it
249 set_irn_link(ret, list);
253 /* Put all nodes that are not changed on the final list. */
254 set_irn_link(ret, final);
264 * Now move the Returns upwards. We move always one block up (and create n
265 * new Returns), than we check if a newly created Return can be moved even further.
266 * If yes, we simply add it to our work list, else to the final list.
268 end = get_irg_end(irg);
269 n_ret_vals = get_irn_arity(list);
270 in = ALLOCAN(ir_node*, n_ret_vals);
273 ir_node *block = get_nodes_block(ret);
276 list = get_irn_link(ret);
279 n = get_Block_n_cfgpreds(block);
280 for (i = 0; i < n; ++i) {
281 ir_node *jmp = get_Block_cfgpred(block, i);
282 ir_node *new_bl, *new_ret;
288 new_bl = get_nodes_block(jmp);
290 /* create the in-array for the new Return */
291 for (j = 0; j < n_ret_vals; ++j) {
292 ir_node *pred = get_irn_n(ret, j);
294 in[j] = (is_Phi(pred) && get_nodes_block(pred) == block) ? get_Phi_pred(pred, i) : pred;
297 new_ret = new_r_Return(irg, new_bl, in[0], n_ret_vals - 1, &in[1]);
299 if (! is_Bad(new_ret)) {
301 * The newly created node might be bad, if we
302 * create it in a block with only Bad predecessors.
303 * In that case ignore this block.
305 * We could even kill the jmp then ...
307 if (can_move_ret(new_ret)) {
308 set_irn_link(new_ret, list);
312 set_irn_link(new_ret, final);
318 /* remove the Jmp, we have placed a Return here */
319 exchange(jmp, new_r_Bad(irg));
323 * if the memory of the old Return is a PhiM, remove it
324 * from the keep-alives, or it will keep the block which
325 * will crash the dominator algorithm.
327 phiM = get_Return_mem(ret);
329 n = get_End_n_keepalives(end);
330 for (i = 0; i < n; ++i) {
331 if (get_End_keepalive(end, i) == phiM) {
332 set_End_keepalive(end, i, new_r_Bad(irg));
340 * Last step: Create a new endblock, with all nodes on the final
341 * list as predecessors.
343 in = ALLOCAN(ir_node*, n_finals);
345 for (i = 0; final; ++i, final = get_irn_link(final))
348 exchange(endbl, new_r_Block(irg, n_finals, in));
350 /* the end block is not automatically skipped, so do it here */
351 set_irg_end_block(irg, skip_Id(get_irg_end_block(irg)));
353 /* Invalidate analysis information:
354 * Blocks become dead and new Returns were deleted, so dominator, outs and loop are inconsistent,
355 * trouts and callee-state should be still valid
357 set_irg_doms_inconsistent(irg);
358 set_irg_extblk_inconsistent(irg); /* may not be needed */
359 set_irg_outs_inconsistent(irg);
360 set_irg_loopinfo_inconsistent(current_ir_graph);