2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
47 /* when we need verifying */
49 # define IRN_VERIFY_IRG(res, irg)
51 # define IRN_VERIFY_IRG(res, irg) irn_verify_irg(res, irg)
55 * Language dependent variable initialization callback.
57 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
59 #include "gen_ir_cons.c.inl"
61 static ir_node *new_bd_Start(dbg_info *db, ir_node *block)
64 ir_graph *irg = get_irn_irg(block);
66 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
68 IRN_VERIFY_IRG(res, irg);
72 static ir_node *new_bd_End(dbg_info *db, ir_node *block)
75 ir_graph *irg = get_irn_irg(block);
77 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
79 IRN_VERIFY_IRG(res, irg);
84 * Creates a Phi node with all predecessors. Calling this constructor
85 * is only allowed if the corresponding block is mature.
87 static ir_node *new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
90 ir_graph *irg = get_irn_irg(block);
94 /* Don't assert that block matured: the use of this constructor is strongly
96 if (get_Block_matured(block))
97 assert(get_irn_arity(block) == arity);
99 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
101 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
103 for (i = arity - 1; i >= 0; --i)
104 if (is_Unknown(in[i])) {
109 if (!has_unknown) res = optimize_node(res);
110 IRN_VERIFY_IRG(res, irg);
112 /* Memory Phis in endless loops must be kept alive.
113 As we can't distinguish these easily we keep all of them alive. */
114 if (is_Phi(res) && mode == mode_M)
115 add_End_keepalive(get_irg_end(irg), res);
119 static ir_node *new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp)
122 ir_graph *irg = current_ir_graph;
124 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
125 res->attr.con.tarval = con;
126 set_Const_type(res, tp); /* Call method because of complex assertion. */
127 res = optimize_node (res);
128 assert(get_Const_type(res) == tp);
129 IRN_VERIFY_IRG(res, irg);
132 } /* new_bd_Const_type */
134 static ir_node *new_bd_Const(dbg_info *db, tarval *con)
136 ir_graph *irg = current_ir_graph;
138 return new_rd_Const_type(db, irg, con, firm_unknown_type);
141 static ir_node *new_bd_Const_long(dbg_info *db, ir_mode *mode, long value)
143 ir_graph *irg = current_ir_graph;
145 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
146 } /* new_bd_Const_long */
148 static ir_node *new_bd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
152 assert(arg->op == op_Cond);
153 arg->attr.cond.default_proj = max_proj;
154 res = new_rd_Proj(db, arg, mode_X, max_proj);
156 } /* new_bd_defaultProj */
158 static ir_node *new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store,
159 ir_node *objptr, int arity, ir_node **in,
165 ir_graph *irg = get_irn_irg(block);
166 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
168 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
171 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
174 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
176 * Sel's can select functions which should be of mode mode_P_code.
178 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
179 res->attr.sel.entity = ent;
180 res = optimize_node(res);
181 IRN_VERIFY_IRG(res, irg);
185 static ir_node *new_bd_SymConst_type(dbg_info *db, ir_node *block,
186 ir_mode *mode, symconst_symbol value,
187 symconst_kind symkind, ir_type *tp)
189 ir_graph *irg = get_irn_irg(block);
190 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
192 res->attr.symc.kind = symkind;
193 res->attr.symc.sym = value;
194 res->attr.symc.tp = tp;
196 res = optimize_node(res);
197 IRN_VERIFY_IRG(res, irg);
199 } /* new_bd_SymConst_type */
201 static ir_node *new_bd_Sync(dbg_info *db, ir_node *block)
204 ir_graph *irg = get_irn_irg(block);
206 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
207 /* no need to call optimize node here, Sync are always created with no predecessors */
208 IRN_VERIFY_IRG(res, irg);
212 static ir_node *new_bd_ASM(dbg_info *db, ir_node *block, int arity,
213 ir_node *in[], ir_asm_constraint *inputs, int n_outs,
214 ir_asm_constraint *outputs, int n_clobber,
215 ident *clobber[], ident *text)
218 ir_graph *irg = get_irn_irg(block);
220 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
221 res->attr.assem.pin_state = op_pin_state_pinned;
222 res->attr.assem.input_constraints
223 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
224 res->attr.assem.output_constraints
225 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
226 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
227 res->attr.assem.text = text;
229 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
230 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
231 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
233 res = optimize_node(res);
234 IRN_VERIFY_IRG(res, irg);
238 /* --------------------------------------------- */
239 /* private interfaces, for professional use only */
240 /* --------------------------------------------- */
242 ir_node *new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
244 ir_graph *rem = current_ir_graph;
247 current_ir_graph = irg;
248 res = new_bd_Start(db, block);
249 current_ir_graph = rem;
254 ir_node *new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
257 ir_graph *rem = current_ir_graph;
259 current_ir_graph = irg;
260 res = new_bd_End(db, block);
261 current_ir_graph = rem;
266 /* Creates a Phi node with all predecessors. Calling this constructor
267 is only allowed if the corresponding block is mature. */
268 ir_node *new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
271 ir_graph *rem = current_ir_graph;
273 current_ir_graph = get_Block_irg(block);
274 res = new_bd_Phi(db, block,arity, in, mode);
275 current_ir_graph = rem;
280 ir_node *new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp)
283 ir_graph *rem = current_ir_graph;
285 current_ir_graph = irg;
286 res = new_bd_Const_type(db, con, tp);
287 current_ir_graph = rem;
290 } /* new_rd_Const_type */
292 ir_node *new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con)
295 //#ifdef USE_ORIGINAL
296 ir_graph *rem = current_ir_graph;
298 current_ir_graph = irg;
299 res = new_bd_Const_type(db, con, firm_unknown_type);
300 current_ir_graph = rem;
302 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
308 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value)
310 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
311 } /* new_rd_Const_long */
313 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
315 return new_bd_defaultProj(db, arg, max_proj);
316 } /* new_rd_defaultProj */
318 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
319 ir_node *objptr, ir_entity *ent)
322 ir_graph *rem = current_ir_graph;
324 current_ir_graph = get_Block_irg(block);
325 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
326 current_ir_graph = rem;
329 } /* new_rd_simpleSel */
331 ir_node *new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_mode *mode,
332 symconst_symbol value, symconst_kind symkind,
336 ir_graph *rem = current_ir_graph;
337 ir_node *block = get_irg_start_block(irg);
339 current_ir_graph = irg;
340 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
341 current_ir_graph = rem;
344 } /* new_rd_SymConst_type */
346 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
347 symconst_symbol value, symconst_kind symkind)
349 return new_rd_SymConst_type(db, irg, mode, value, symkind, firm_unknown_type);
350 } /* new_rd_SymConst */
352 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
355 sym.entity_p = symbol;
356 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_ent, tp);
357 } /* new_rd_SymConst_addr_ent */
359 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
362 sym.entity_p = symbol;
363 return new_rd_SymConst_type(db, irg, mode, sym, symconst_ofs_ent, tp);
364 } /* new_rd_SymConst_ofs_ent */
366 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
370 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_tag, tp);
371 } /* new_rd_SymConst_type_tag */
373 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
377 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_size, tp);
378 } /* new_rd_SymConst_size */
380 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
384 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_align, tp);
385 } /* new_rd_SymConst_align */
387 ir_node *new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[])
390 ir_graph *rem = current_ir_graph;
393 current_ir_graph = get_Block_irg(block);
394 res = new_bd_Sync(db, block);
395 current_ir_graph = rem;
397 for (i = 0; i < arity; ++i)
398 add_Sync_pred(res, in[i]);
403 ir_node *new_rd_ASM(dbg_info *db, ir_node *block,
404 int arity, ir_node *in[], ir_asm_constraint *inputs,
405 int n_outs, ir_asm_constraint *outputs,
406 int n_clobber, ident *clobber[], ident *text)
409 ir_graph *rem = current_ir_graph;
411 current_ir_graph = get_Block_irg(block);
412 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
413 current_ir_graph = rem;
418 ir_node *new_r_Start(ir_graph *irg, ir_node *block)
420 return new_rd_Start(NULL, irg, block);
422 ir_node *new_r_End(ir_graph *irg, ir_node *block)
424 return new_rd_End(NULL, irg, block);
426 ir_node *new_r_Const(ir_graph *irg, tarval *con)
428 return new_rd_Const(NULL, irg, con);
430 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
432 return new_rd_Const_long(NULL, irg, mode, value);
434 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp)
436 return new_rd_Const_type(NULL, irg, con, tp);
438 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
439 symconst_kind symkind)
441 return new_rd_SymConst(NULL, irg, mode, value, symkind);
443 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
446 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
448 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode)
450 return new_rd_Phi(NULL, block, arity, in, mode);
452 ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[])
454 return new_rd_Sync(NULL, block, arity, in);
456 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
458 return new_rd_defaultProj(NULL, arg, max_proj);
460 ir_node *new_r_Bad(ir_graph *irg)
462 return get_irg_bad(irg);
464 ir_node *new_r_NoMem(ir_graph *irg)
466 return get_irg_no_mem(irg);
468 ir_node *new_r_ASM(ir_node *block,
469 int arity, ir_node *in[], ir_asm_constraint *inputs,
470 int n_outs, ir_asm_constraint *outputs,
471 int n_clobber, ident *clobber[], ident *text)
473 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
476 /** ********************/
477 /** public interfaces */
478 /** construction tools */
480 ir_node *new_d_Start(dbg_info *db)
484 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
485 op_Start, mode_T, 0, NULL);
487 res = optimize_node(res);
488 IRN_VERIFY_IRG(res, current_ir_graph);
492 ir_node *new_d_End(dbg_info *db)
495 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
496 op_End, mode_X, -1, NULL);
497 res = optimize_node(res);
498 IRN_VERIFY_IRG(res, current_ir_graph);
503 /* ***********************************************************************/
504 /* Methods necessary for automatic Phi node creation */
506 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
507 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
508 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
509 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
511 Call Graph: ( A ---> B == A "calls" B)
513 get_value mature_immBlock
521 get_r_value_internal |
525 new_rd_Phi0 new_rd_Phi_in
527 * *************************************************************************** */
529 /** Creates a Phi node with 0 predecessors. */
530 static inline ir_node *new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
534 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
535 IRN_VERIFY_IRG(res, irg);
541 * Internal constructor of a Phi node by a phi_merge operation.
543 * @param irg the graph on which the Phi will be constructed
544 * @param block the block in which the Phi will be constructed
545 * @param mode the mod eof the Phi node
546 * @param in the input array of the phi node
547 * @param ins number of elements in the input array
548 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
549 * the value for which the new Phi is constructed
551 static inline ir_node *new_rd_Phi_in(ir_graph *irg, ir_node *block,
552 ir_mode *mode, ir_node **in, int ins,
556 ir_node *res, *known;
558 /* Allocate a new node on the obstack. The allocation copies the in
560 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
561 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
563 /* This loop checks whether the Phi has more than one predecessor.
564 If so, it is a real Phi node and we break the loop. Else the
565 Phi node merges the same definition on several paths and therefore
567 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
569 for (i = ins - 1; i >= 0; --i) {
572 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
574 /* Optimize self referencing Phis: We can't detect them yet properly, as
575 they still refer to the Phi0 they will replace. So replace right now. */
576 if (phi0 && in[i] == phi0)
579 if (in[i] == res || in[i] == known)
588 /* i < 0: there is at most one predecessor, we don't need a phi node. */
591 edges_node_deleted(res, current_ir_graph);
592 obstack_free(current_ir_graph->obst, res);
594 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
595 order, an enclosing Phi know may get superfluous. */
596 res = optimize_in_place_2(known);
598 exchange(known, res);
603 /* A undefined value, e.g., in unreachable code. */
607 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
608 IRN_VERIFY_IRG(res, irg);
609 /* Memory Phis in endless loops must be kept alive.
610 As we can't distinguish these easily we keep all of them alive. */
611 if (is_Phi(res) && mode == mode_M)
612 add_End_keepalive(get_irg_end(irg), res);
616 } /* new_rd_Phi_in */
618 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
620 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
623 * Construct a new frag_array for node n.
624 * Copy the content from the current graph_arr of the corresponding block:
625 * this is the current state.
626 * Set ProjM(n) as current memory state.
627 * Further the last entry in frag_arr of current block points to n. This
628 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
630 static inline ir_node **new_frag_arr(ir_node *n)
635 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
636 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
637 sizeof(ir_node *)*current_ir_graph->n_loc);
639 /* turn off optimization before allocating Proj nodes, as res isn't
641 opt = get_opt_optimize(); set_optimize(0);
642 /* Here we rely on the fact that all frag ops have Memory as first result! */
644 arr[0] = new_Proj(n, mode_M, pn_Call_M);
645 } else if (is_CopyB(n)) {
646 arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
648 assert((pn_Quot_M == pn_DivMod_M) &&
649 (pn_Quot_M == pn_Div_M) &&
650 (pn_Quot_M == pn_Mod_M) &&
651 (pn_Quot_M == pn_Load_M) &&
652 (pn_Quot_M == pn_Store_M) &&
653 (pn_Quot_M == pn_Alloc_M) &&
654 (pn_Quot_M == pn_Bound_M));
655 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
659 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
664 * Returns the frag_arr from a node.
666 static inline ir_node **get_frag_arr(ir_node *n)
668 switch (get_irn_opcode(n)) {
670 return n->attr.call.exc.frag_arr;
672 return n->attr.alloc.exc.frag_arr;
674 return n->attr.load.exc.frag_arr;
676 return n->attr.store.exc.frag_arr;
678 return n->attr.except.frag_arr;
682 static void set_frag_value(ir_node **frag_arr, int pos, ir_node *val)
687 for (i = 1024; i >= 0; --i)
692 if (frag_arr[pos] == NULL)
694 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
695 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
696 assert(arr != frag_arr && "Endless recursion detected");
701 assert(!"potential endless recursion in set_frag_value");
702 } /* set_frag_value */
704 static ir_node *get_r_frag_value_internal(ir_node *block, ir_node *cfOp,
705 int pos, ir_mode *mode)
710 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
712 frag_arr = get_frag_arr(cfOp);
715 if (block->attr.block.graph_arr[pos] != NULL) {
716 /* There was a set_value() after the cfOp and no get_value() before that
717 set_value(). We must build a Phi node now. */
718 if (block->attr.block.is_matured) {
719 int ins = get_irn_arity(block);
721 NEW_ARR_A(ir_node *, nin, ins);
722 res = phi_merge(block, pos, mode, nin, ins);
724 res = new_rd_Phi0(current_ir_graph, block, mode);
725 res->attr.phi.u.pos = pos;
726 res->attr.phi.next = block->attr.block.phis;
727 block->attr.block.phis = res;
730 /* It's a Phi, we can write this into all graph_arrs with NULL */
731 set_frag_value(block->attr.block.graph_arr, pos, res);
733 res = get_r_value_internal(block, pos, mode);
734 set_frag_value(block->attr.block.graph_arr, pos, res);
738 } /* get_r_frag_value_internal */
741 * Check whether a control flownode cf_pred represents an exception flow.
743 * @param cf_pred the control flow node
744 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
746 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op)
749 * Note: all projections from a raise are "exceptional control flow" we we handle it
750 * like a normal Jmp, because there is no "regular" one.
751 * That's why Raise is no "fragile_op"!
753 if (is_fragile_op(prev_cf_op)) {
754 if (is_Proj(cf_pred)) {
755 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
756 /* the regular control flow, NO exception */
759 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
762 /* Hmm, exception but not a Proj? */
763 panic("unexpected condition: fragile op without a proj");
766 } /* is_exception_flow */
769 * Computes the predecessors for the real phi node, and then
770 * allocates and returns this node. The routine called to allocate the
771 * node might optimize it away and return a real value.
772 * This function must be called with an in-array of proper size.
774 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
776 ir_node *prevBlock, *res, *phi0, *phi0_all;
779 /* If this block has no value at pos create a Phi0 and remember it
780 in graph_arr to break recursions.
781 Else we may not set graph_arr as there a later value is remembered. */
783 if (block->attr.block.graph_arr[pos] == NULL) {
784 ir_graph *irg = current_ir_graph;
786 if (block == get_irg_start_block(irg)) {
787 /* Collapsing to Bad tarvals is no good idea.
788 So we call a user-supplied routine here that deals with this case as
789 appropriate for the given language. Sorrily the only help we can give
790 here is the position.
792 Even if all variables are defined before use, it can happen that
793 we get to the start block, if a Cond has been replaced by a tuple
794 (bad, jmp). In this case we call the function needlessly, eventually
795 generating an non existent error.
796 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
799 if (default_initialize_local_variable != NULL) {
800 ir_node *rem = get_cur_block();
802 set_cur_block(block);
803 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
807 block->attr.block.graph_arr[pos] = new_Unknown(mode);
808 /* We don't need to care about exception ops in the start block.
809 There are none by definition. */
810 return block->attr.block.graph_arr[pos];
812 phi0 = new_rd_Phi0(irg, block, mode);
813 block->attr.block.graph_arr[pos] = phi0;
814 if (get_opt_precise_exc_context()) {
815 /* Set graph_arr for fragile ops. Also here we should break recursion.
816 We could choose a cyclic path through an cfop. But the recursion would
817 break at some point. */
818 set_frag_value(block->attr.block.graph_arr, pos, phi0);
823 /* This loop goes to all predecessor blocks of the block the Phi node
824 is in and there finds the operands of the Phi node by calling
825 get_r_value_internal. */
826 for (i = 1; i <= ins; ++i) {
827 ir_node *cf_pred = block->in[i];
828 ir_node *prevCfOp = skip_Proj(cf_pred);
830 if (is_Bad(prevCfOp)) {
831 /* In case a Cond has been optimized we would get right to the start block
832 with an invalid definition. */
833 nin[i-1] = new_Bad();
836 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
838 if (!is_Bad(prevBlock)) {
839 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
840 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
841 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
843 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
845 nin[i-1] = new_Bad();
849 /* We want to pass the Phi0 node to the constructor: this finds additional
850 optimization possibilities.
851 The Phi0 node either is allocated in this function, or it comes from
852 a former call to get_r_value_internal(). In this case we may not yet
853 exchange phi0, as this is done in mature_immBlock(). */
855 phi0_all = block->attr.block.graph_arr[pos];
856 if (! is_Phi0(phi0_all) ||
857 get_irn_arity(phi0_all) != 0 ||
858 get_nodes_block(phi0_all) != block)
864 /* After collecting all predecessors into the array nin a new Phi node
865 with these predecessors is created. This constructor contains an
866 optimization: If all predecessors of the Phi node are identical it
867 returns the only operand instead of a new Phi node. */
868 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
870 /* In case we allocated a Phi0 node at the beginning of this procedure,
871 we need to exchange this Phi0 with the real Phi. */
874 block->attr.block.graph_arr[pos] = res;
875 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
876 only an optimization. */
883 * This function returns the last definition of a value. In case
884 * this value was last defined in a previous block, Phi nodes are
885 * inserted. If the part of the firm graph containing the definition
886 * is not yet constructed, a dummy Phi node is returned.
888 * @param block the current block
889 * @param pos the value number of the value searched
890 * @param mode the mode of this value (needed for Phi construction)
892 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
895 /* There are 4 cases to treat.
897 1. The block is not mature and we visit it the first time. We can not
898 create a proper Phi node, therefore a Phi0, i.e., a Phi without
899 predecessors is returned. This node is added to the linked list (block
900 attribute "phis") of the containing block to be completed when this block is
901 matured. (Completion will add a new Phi and turn the Phi0 into an Id
904 2. The value is already known in this block, graph_arr[pos] is set and we
905 visit the block the first time. We can return the value without
906 creating any new nodes.
908 3. The block is mature and we visit it the first time. A Phi node needs
909 to be created (phi_merge). If the Phi is not needed, as all it's
910 operands are the same value reaching the block through different
911 paths, it's optimized away and the value itself is returned.
913 4. The block is mature, and we visit it the second time. Now two
914 subcases are possible:
915 * The value was computed completely the last time we were here. This
916 is the case if there is no loop. We can return the proper value.
917 * The recursion that visited this node and set the flag did not
918 return yet. We are computing a value in a loop and need to
919 break the recursion. This case only happens if we visited
920 the same block with phi_merge before, which inserted a Phi0.
921 So we return the Phi0.
924 /* case 4 -- already visited. */
925 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
926 /* As phi_merge allocates a Phi0 this value is always defined. Here
927 is the critical difference of the two algorithms. */
928 assert(block->attr.block.graph_arr[pos]);
929 return block->attr.block.graph_arr[pos];
932 /* visited the first time */
933 set_irn_visited(block, get_irg_visited(current_ir_graph));
935 /* Get the local valid value */
936 res = block->attr.block.graph_arr[pos];
938 /* case 2 -- If the value is actually computed, return it. */
942 if (block->attr.block.is_matured) { /* case 3 */
944 /* The Phi has the same amount of ins as the corresponding block. */
945 int ins = get_irn_arity(block);
947 NEW_ARR_A(ir_node *, nin, ins);
949 /* Phi merge collects the predecessors and then creates a node. */
950 res = phi_merge(block, pos, mode, nin, ins);
952 } else { /* case 1 */
953 /* The block is not mature, we don't know how many in's are needed. A Phi
954 with zero predecessors is created. Such a Phi node is called Phi0
955 node. The Phi0 is then added to the list of Phi0 nodes in this block
956 to be matured by mature_immBlock later.
957 The Phi0 has to remember the pos of it's internal value. If the real
958 Phi is computed, pos is used to update the array with the local
960 res = new_rd_Phi0(current_ir_graph, block, mode);
961 res->attr.phi.u.pos = pos;
962 res->attr.phi.next = block->attr.block.phis;
963 block->attr.block.phis = res;
966 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
968 /* The local valid value is available now. */
969 block->attr.block.graph_arr[pos] = res;
972 } /* get_r_value_internal */
974 /* ************************************************************************** */
977 * Finalize a Block node, when all control flows are known.
978 * Acceptable parameters are only Block nodes.
980 void mature_immBlock(ir_node *block)
986 assert(is_Block(block));
987 if (!get_Block_matured(block)) {
988 ir_graph *irg = current_ir_graph;
990 ins = ARR_LEN(block->in) - 1;
991 /* Fix block parameters */
992 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
994 /* An array for building the Phi nodes. */
995 NEW_ARR_A(ir_node *, nin, ins);
997 /* Traverse a chain of Phi nodes attached to this block and mature
999 for (n = block->attr.block.phis; n; n = next) {
1000 inc_irg_visited(irg);
1001 next = n->attr.phi.next;
1002 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
1005 block->attr.block.is_matured = 1;
1007 /* Now, as the block is a finished Firm node, we can optimize it.
1008 Since other nodes have been allocated since the block was created
1009 we can not free the node on the obstack. Therefore we have to call
1010 optimize_in_place().
1011 Unfortunately the optimization does not change a lot, as all allocated
1012 nodes refer to the unoptimized node.
1013 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
1014 block = optimize_in_place_2(block);
1015 IRN_VERIFY_IRG(block, irg);
1017 } /* mature_immBlock */
1019 ir_node *new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
1021 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
1024 ir_node *new_d_Const(dbg_info *db, tarval *con)
1026 return new_bd_Const(db, con);
1029 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
1031 return new_bd_Const_long(db, mode, value);
1032 } /* new_d_Const_long */
1034 ir_node *new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp)
1036 return new_bd_Const_type(db, con, tp);
1037 } /* new_d_Const_type */
1040 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
1043 assert(arg->op == op_Cond);
1044 arg->attr.cond.default_proj = max_proj;
1045 res = new_d_Proj(db, arg, mode_X, max_proj);
1047 } /* new_d_defaultProj */
1050 * Allocate a frag array for a node if the current graph state is phase_building.
1052 * @param irn the node for which the frag array should be allocated
1053 * @param op the opcode of the (original) node, if does not match opcode of irn,
1055 * @param frag_store the address of the frag store in irn attributes, if this
1056 * address contains a value != NULL, does nothing
1058 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store)
1060 if (get_opt_precise_exc_context()) {
1061 if ((current_ir_graph->phase_state == phase_building) &&
1062 (get_irn_op(irn) == op) && /* Could be optimized away. */
1063 !*frag_store) /* Could be a cse where the arr is already set. */ {
1064 *frag_store = new_frag_arr(irn);
1067 } /* firm_alloc_frag_arr */
1069 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
1070 /* GL: objptr was called frame before. Frame was a bad choice for the name
1071 as the operand could as well be a pointer to a dynamic object. */
1073 return new_bd_Sel(db, current_ir_graph->current_block,
1074 store, objptr, 0, NULL, ent);
1075 } /* new_d_simpleSel */
1077 ir_node *new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp)
1079 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1081 } /* new_d_SymConst_type */
1083 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind)
1085 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1086 value, kind, firm_unknown_type);
1087 } /* new_d_SymConst */
1089 ir_node *new_d_Sync(dbg_info *db, int arity, ir_node *in[])
1091 return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
1094 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
1095 int n_outs, ir_asm_constraint *outputs, int n_clobber,
1096 ident *clobber[], ident *text)
1098 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
1101 /* ********************************************************************* */
1102 /* Comfortable interface with automatic Phi node construction. */
1103 /* (Uses also constructors of ?? interface, except new_Block. */
1104 /* ********************************************************************* */
1106 /* Block construction */
1107 /* immature Block without predecessors */
1108 ir_node *new_d_immBlock(dbg_info *db)
1112 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1113 /* creates a new dynamic in-array as length of in is -1 */
1114 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
1116 /* macroblock head */
1119 res->attr.block.is_matured = 0;
1120 res->attr.block.is_dead = 0;
1121 res->attr.block.is_mb_head = 1;
1122 res->attr.block.irg.irg = current_ir_graph;
1123 res->attr.block.backedge = NULL;
1124 res->attr.block.in_cg = NULL;
1125 res->attr.block.cg_backedge = NULL;
1126 res->attr.block.extblk = NULL;
1127 res->attr.block.region = NULL;
1128 res->attr.block.mb_depth = 0;
1129 res->attr.block.entity = NULL;
1131 set_Block_block_visited(res, 0);
1133 /* Create and initialize array for Phi-node construction. */
1134 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1135 current_ir_graph->n_loc);
1136 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1138 /* Immature block may not be optimized! */
1139 IRN_VERIFY_IRG(res, current_ir_graph);
1142 } /* new_d_immBlock */
1144 ir_node *new_immBlock(void)
1146 return new_d_immBlock(NULL);
1147 } /* new_immBlock */
1149 /* immature PartBlock with its predecessors */
1150 ir_node *new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp)
1152 ir_node *res = new_d_immBlock(db);
1153 ir_node *blk = get_nodes_block(pred_jmp);
1155 res->in[0] = blk->in[0];
1156 assert(res->in[0] != NULL);
1157 add_immBlock_pred(res, pred_jmp);
1159 res->attr.block.is_mb_head = 0;
1160 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
1163 } /* new_d_immPartBlock */
1165 ir_node *new_immPartBlock(ir_node *pred_jmp)
1167 return new_d_immPartBlock(NULL, pred_jmp);
1168 } /* new_immPartBlock */
1170 /* add an edge to a jmp/control flow node */
1171 void add_immBlock_pred(ir_node *block, ir_node *jmp)
1173 int n = ARR_LEN(block->in) - 1;
1175 assert(is_Block(block) && "Error: Must be a Block");
1176 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
1177 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
1178 assert(is_ir_node(jmp));
1180 ARR_APP1(ir_node *, block->in, jmp);
1182 hook_set_irn_n(block, n, jmp, NULL);
1183 } /* add_immBlock_pred */
1185 /* changing the current block */
1186 void set_cur_block(ir_node *target)
1188 current_ir_graph->current_block = target;
1189 } /* set_cur_block */
1191 /* ************************ */
1192 /* parameter administration */
1194 /* get a value from the parameter array from the current block by its index */
1195 ir_node *get_d_value(dbg_info *db, int pos, ir_mode *mode)
1197 ir_graph *irg = current_ir_graph;
1198 assert(get_irg_phase_state(irg) == phase_building);
1199 inc_irg_visited(irg);
1204 return get_r_value_internal(irg->current_block, pos + 1, mode);
1207 /* get a value from the parameter array from the current block by its index */
1208 ir_node *get_value(int pos, ir_mode *mode)
1210 return get_d_value(NULL, pos, mode);
1214 * helper function for guess_mode: recursively look for a definition for
1215 * local variable @p pos, returns its mode if found.
1217 static ir_mode *guess_recursively(ir_node *block, int pos)
1223 if (irn_visited(block))
1225 mark_irn_visited(block);
1227 /* already have a defintion -> we can simply look at its mode */
1228 value = block->attr.block.graph_arr[pos];
1230 return get_irn_mode(value);
1232 /* now we try to guess, by looking at the predecessor blocks */
1233 n_preds = get_irn_arity(block);
1234 for (i = 0; i < n_preds; ++i) {
1235 ir_node *pred_block = get_Block_cfgpred_block(block, i);
1236 ir_mode *mode = guess_recursively(pred_block, pos);
1241 /* no way to guess */
1245 ir_mode *ir_guess_mode(int pos)
1247 ir_graph *irg = current_ir_graph;
1248 ir_node *block = irg->current_block;
1249 ir_node *value = block->attr.block.graph_arr[pos+1];
1252 /* already have a defintion -> we can simply look at its mode */
1254 return get_irn_mode(value);
1256 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1257 inc_irg_visited(current_ir_graph);
1258 mode = guess_recursively(block, pos+1);
1259 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1264 /* set a value at position pos in the parameter array from the current block */
1265 void set_value(int pos, ir_node *value)
1267 ir_graph *irg = current_ir_graph;
1268 assert(get_irg_phase_state(irg) == phase_building);
1270 assert(pos+1 < irg->n_loc);
1271 assert(is_ir_node(value));
1272 irg->current_block->attr.block.graph_arr[pos + 1] = value;
1275 /* Find the value number for a node in the current block.*/
1276 int find_value(ir_node *value)
1279 ir_node *bl = current_ir_graph->current_block;
1281 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
1282 if (bl->attr.block.graph_arr[i] == value)
1287 /* get the current store */
1288 ir_node *get_store(void)
1290 ir_graph *irg = current_ir_graph;
1292 assert(get_irg_phase_state(irg) == phase_building);
1293 /* GL: one could call get_value instead */
1294 inc_irg_visited(irg);
1295 return get_r_value_internal(irg->current_block, 0, mode_M);
1298 /* set the current store: handles automatic Sync construction for Load nodes */
1299 void set_store(ir_node *store)
1301 ir_node *load, *pload, *pred, *in[2];
1303 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1304 /* Beware: due to dead code elimination, a store might become a Bad node even in
1305 the construction phase. */
1306 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
1308 if (get_opt_auto_create_sync()) {
1309 /* handle non-volatile Load nodes by automatically creating Sync's */
1310 load = skip_Proj(store);
1311 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
1312 pred = get_Load_mem(load);
1314 if (is_Sync(pred)) {
1315 /* a Load after a Sync: move it up */
1316 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
1318 set_Load_mem(load, get_memop_mem(mem));
1319 add_Sync_pred(pred, store);
1322 pload = skip_Proj(pred);
1323 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
1324 /* a Load after a Load: create a new Sync */
1325 set_Load_mem(load, get_Load_mem(pload));
1329 store = new_Sync(2, in);
1334 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1337 void keep_alive(ir_node *ka)
1339 add_End_keepalive(get_irg_end(current_ir_graph), ka);
1342 /* --- Useful access routines --- */
1343 /* Returns the current block of the current graph. To set the current
1344 block use set_cur_block. */
1345 ir_node *get_cur_block(void)
1347 return get_irg_current_block(current_ir_graph);
1348 } /* get_cur_block */
1350 /* Returns the frame type of the current graph */
1351 ir_type *get_cur_frame_type(void)
1353 return get_irg_frame_type(current_ir_graph);
1354 } /* get_cur_frame_type */
1357 /* ********************************************************************* */
1360 /* call once for each run of the library */
1361 void ir_set_uninitialized_local_variable_func(
1362 uninitialized_local_variable_func_t *func)
1364 default_initialize_local_variable = func;
1367 void irg_finalize_cons(ir_graph *irg)
1369 set_irg_phase_state(irg, phase_high);
1372 void irp_finalize_cons(void)
1375 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1376 irg_finalize_cons(get_irp_irg(i));
1378 irp->phase_state = phase_high;
1381 ir_node *new_Start(void)
1383 return new_d_Start(NULL);
1385 ir_node *new_End(void)
1387 return new_d_End(NULL);
1389 ir_node *new_Const(tarval *con)
1391 return new_d_Const(NULL, con);
1394 ir_node *new_Const_long(ir_mode *mode, long value)
1396 return new_d_Const_long(NULL, mode, value);
1399 ir_node *new_Const_type(tarval *con, ir_type *tp)
1401 return new_d_Const_type(NULL, con, tp);
1404 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type)
1406 return new_d_SymConst_type(NULL, mode, value, kind, type);
1408 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
1410 return new_d_SymConst(NULL, mode, value, kind);
1412 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
1414 return new_d_simpleSel(NULL, store, objptr, ent);
1416 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode)
1418 return new_d_Phi(NULL, arity, in, mode);
1420 ir_node *new_Sync(int arity, ir_node *in[])
1422 return new_d_Sync(NULL, arity, in);
1424 ir_node *new_defaultProj(ir_node *arg, long max_proj)
1426 return new_d_defaultProj(NULL, arg, max_proj);
1428 ir_node *new_Bad(void)
1430 return get_irg_bad(current_ir_graph);
1432 ir_node *new_NoMem(void)
1434 return get_irg_no_mem(current_ir_graph);
1436 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1437 int n_outs, ir_asm_constraint *outputs,
1438 int n_clobber, ident *clobber[], ident *text)
1440 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
1443 /* create a new anchor node */
1444 ir_node *new_Anchor(ir_graph *irg)
1446 ir_node *in[anchor_last];
1448 memset(in, 0, sizeof(in));
1449 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);
1451 /* hack to get get_irn_irg working: set block to ourself and allow
1452 * get_Block_irg for anchor */
1453 res->attr.irg.irg = irg;