2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
47 /* when we need verifying */
49 # define IRN_VERIFY_IRG(res, irg)
51 # define IRN_VERIFY_IRG(res, irg) irn_verify_irg(res, irg)
55 * Language dependent variable initialization callback.
57 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
59 #include "gen_ir_cons.c.inl"
61 static ir_node *new_bd_Start(dbg_info *db, ir_node *block)
64 ir_graph *irg = get_irn_irg(block);
66 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
68 IRN_VERIFY_IRG(res, irg);
72 static ir_node *new_bd_End(dbg_info *db, ir_node *block)
75 ir_graph *irg = get_irn_irg(block);
77 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
79 IRN_VERIFY_IRG(res, irg);
84 * Creates a Phi node with all predecessors. Calling this constructor
85 * is only allowed if the corresponding block is mature.
87 static ir_node *new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
90 ir_graph *irg = get_irn_irg(block);
94 /* Don't assert that block matured: the use of this constructor is strongly
96 if (get_Block_matured(block))
97 assert(get_irn_arity(block) == arity);
99 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
101 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
103 for (i = arity - 1; i >= 0; --i)
104 if (is_Unknown(in[i])) {
109 if (!has_unknown) res = optimize_node(res);
110 IRN_VERIFY_IRG(res, irg);
112 /* Memory Phis in endless loops must be kept alive.
113 As we can't distinguish these easily we keep all of them alive. */
114 if (is_Phi(res) && mode == mode_M)
115 add_End_keepalive(get_irg_end(irg), res);
119 static ir_node *new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp)
122 ir_graph *irg = current_ir_graph;
124 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
125 res->attr.con.tarval = con;
126 set_Const_type(res, tp); /* Call method because of complex assertion. */
127 res = optimize_node (res);
128 assert(get_Const_type(res) == tp);
129 IRN_VERIFY_IRG(res, irg);
132 } /* new_bd_Const_type */
134 static ir_node *new_bd_Const(dbg_info *db, tarval *con)
136 ir_graph *irg = current_ir_graph;
137 return new_rd_Const_type(db, irg, con, firm_unknown_type);
140 static ir_node *new_bd_Const_long(dbg_info *db, ir_mode *mode, long value)
142 ir_graph *irg = current_ir_graph;
143 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
144 } /* new_bd_Const_long */
146 static ir_node *new_bd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
150 assert(is_Cond(arg));
151 arg->attr.cond.default_proj = max_proj;
152 res = new_rd_Proj(db, arg, mode_X, max_proj);
154 } /* new_bd_defaultProj */
156 static ir_node *new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store,
157 ir_node *objptr, int arity, ir_node **in,
163 ir_graph *irg = get_irn_irg(block);
164 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
166 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
169 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
172 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
174 * Sel's can select functions which should be of mode mode_P_code.
176 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
177 res->attr.sel.entity = ent;
178 res = optimize_node(res);
179 IRN_VERIFY_IRG(res, irg);
183 static ir_node *new_bd_SymConst_type(dbg_info *db, ir_node *block,
184 ir_mode *mode, symconst_symbol value,
185 symconst_kind symkind, ir_type *tp)
187 ir_graph *irg = get_irn_irg(block);
188 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
190 res->attr.symc.kind = symkind;
191 res->attr.symc.sym = value;
192 res->attr.symc.tp = tp;
194 res = optimize_node(res);
195 IRN_VERIFY_IRG(res, irg);
197 } /* new_bd_SymConst_type */
199 static ir_node *new_bd_Sync(dbg_info *db, ir_node *block)
202 ir_graph *irg = get_irn_irg(block);
204 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
205 /* no need to call optimize node here, Sync are always created with no predecessors */
206 IRN_VERIFY_IRG(res, irg);
210 static ir_node *new_bd_ASM(dbg_info *db, ir_node *block, int arity,
211 ir_node *in[], ir_asm_constraint *inputs, int n_outs,
212 ir_asm_constraint *outputs, int n_clobber,
213 ident *clobber[], ident *text)
216 ir_graph *irg = get_irn_irg(block);
218 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
219 res->attr.assem.pin_state = op_pin_state_pinned;
220 res->attr.assem.input_constraints
221 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
222 res->attr.assem.output_constraints
223 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
224 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
225 res->attr.assem.text = text;
227 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
228 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
229 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
231 res = optimize_node(res);
232 IRN_VERIFY_IRG(res, irg);
236 /* --------------------------------------------- */
237 /* private interfaces, for professional use only */
238 /* --------------------------------------------- */
240 ir_node *new_rd_Start(dbg_info *db, ir_node *block)
242 return new_bd_Start(db, block);
245 ir_node *new_rd_End(dbg_info *db, ir_node *block)
247 return new_bd_End(db, block);
250 /* Creates a Phi node with all predecessors. Calling this constructor
251 is only allowed if the corresponding block is mature. */
252 ir_node *new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
255 ir_graph *rem = current_ir_graph;
257 current_ir_graph = get_Block_irg(block);
258 res = new_bd_Phi(db, block,arity, in, mode);
259 current_ir_graph = rem;
264 ir_node *new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp)
267 ir_graph *rem = current_ir_graph;
269 current_ir_graph = irg;
270 res = new_bd_Const_type(db, con, tp);
271 current_ir_graph = rem;
274 } /* new_rd_Const_type */
276 ir_node *new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con)
279 ir_graph *rem = current_ir_graph;
281 current_ir_graph = irg;
282 res = new_bd_Const_type(db, con, firm_unknown_type);
283 current_ir_graph = rem;
288 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value)
290 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
291 } /* new_rd_Const_long */
293 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
295 return new_bd_defaultProj(db, arg, max_proj);
296 } /* new_rd_defaultProj */
298 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
299 ir_node *objptr, ir_entity *ent)
302 ir_graph *rem = current_ir_graph;
304 current_ir_graph = get_Block_irg(block);
305 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
306 current_ir_graph = rem;
309 } /* new_rd_simpleSel */
311 ir_node *new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_mode *mode,
312 symconst_symbol value, symconst_kind symkind,
316 ir_graph *rem = current_ir_graph;
317 ir_node *block = get_irg_start_block(irg);
319 current_ir_graph = irg;
320 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
321 current_ir_graph = rem;
324 } /* new_rd_SymConst_type */
326 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
327 symconst_symbol value, symconst_kind symkind)
329 return new_rd_SymConst_type(db, irg, mode, value, symkind, firm_unknown_type);
330 } /* new_rd_SymConst */
332 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
335 sym.entity_p = symbol;
336 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_ent, tp);
337 } /* new_rd_SymConst_addr_ent */
339 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
342 sym.entity_p = symbol;
343 return new_rd_SymConst_type(db, irg, mode, sym, symconst_ofs_ent, tp);
344 } /* new_rd_SymConst_ofs_ent */
346 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
350 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_tag, tp);
351 } /* new_rd_SymConst_type_tag */
353 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
357 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_size, tp);
358 } /* new_rd_SymConst_size */
360 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
364 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_align, tp);
365 } /* new_rd_SymConst_align */
367 ir_node *new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[])
370 ir_graph *rem = current_ir_graph;
373 current_ir_graph = get_Block_irg(block);
374 res = new_bd_Sync(db, block);
375 current_ir_graph = rem;
377 for (i = 0; i < arity; ++i)
378 add_Sync_pred(res, in[i]);
383 ir_node *new_rd_ASM(dbg_info *db, ir_node *block,
384 int arity, ir_node *in[], ir_asm_constraint *inputs,
385 int n_outs, ir_asm_constraint *outputs,
386 int n_clobber, ident *clobber[], ident *text)
389 ir_graph *rem = current_ir_graph;
391 current_ir_graph = get_Block_irg(block);
392 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
393 current_ir_graph = rem;
398 ir_node *new_r_Start(ir_node *block)
400 return new_rd_Start(NULL, block);
402 ir_node *new_r_End(ir_node *block)
404 return new_rd_End(NULL, block);
406 ir_node *new_r_Const(ir_graph *irg, tarval *con)
408 return new_rd_Const(NULL, irg, con);
410 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
412 return new_rd_Const_long(NULL, irg, mode, value);
414 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp)
416 return new_rd_Const_type(NULL, irg, con, tp);
418 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
419 symconst_kind symkind)
421 return new_rd_SymConst(NULL, irg, mode, value, symkind);
423 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
426 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
428 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode)
430 return new_rd_Phi(NULL, block, arity, in, mode);
432 ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[])
434 return new_rd_Sync(NULL, block, arity, in);
436 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
438 return new_rd_defaultProj(NULL, arg, max_proj);
440 ir_node *new_r_Bad(ir_graph *irg)
442 return get_irg_bad(irg);
444 ir_node *new_r_NoMem(ir_graph *irg)
446 return get_irg_no_mem(irg);
448 ir_node *new_r_ASM(ir_node *block,
449 int arity, ir_node *in[], ir_asm_constraint *inputs,
450 int n_outs, ir_asm_constraint *outputs,
451 int n_clobber, ident *clobber[], ident *text)
453 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
456 /** ********************/
457 /** public interfaces */
458 /** construction tools */
460 ir_node *new_d_Start(dbg_info *db)
464 assert(get_irg_phase_state(current_ir_graph) == phase_building);
465 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
466 op_Start, mode_T, 0, NULL);
468 res = optimize_node(res);
469 IRN_VERIFY_IRG(res, current_ir_graph);
473 ir_node *new_d_End(dbg_info *db)
476 assert(get_irg_phase_state(current_ir_graph) == phase_building);
477 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
478 op_End, mode_X, -1, NULL);
479 res = optimize_node(res);
480 IRN_VERIFY_IRG(res, current_ir_graph);
485 /* ***********************************************************************/
486 /* Methods necessary for automatic Phi node creation */
488 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
489 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
490 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
491 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
493 Call Graph: ( A ---> B == A "calls" B)
495 get_value mature_immBlock
503 get_r_value_internal |
507 new_rd_Phi0 new_rd_Phi_in
509 * *************************************************************************** */
511 /** Creates a Phi node with 0 predecessors. */
512 static inline ir_node *new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
516 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
517 IRN_VERIFY_IRG(res, irg);
523 * Internal constructor of a Phi node by a phi_merge operation.
525 * @param irg the graph on which the Phi will be constructed
526 * @param block the block in which the Phi will be constructed
527 * @param mode the mod eof the Phi node
528 * @param in the input array of the phi node
529 * @param ins number of elements in the input array
530 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
531 * the value for which the new Phi is constructed
533 static inline ir_node *new_rd_Phi_in(ir_graph *irg, ir_node *block,
534 ir_mode *mode, ir_node **in, int ins,
538 ir_node *res, *known;
540 /* Allocate a new node on the obstack. The allocation copies the in
542 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
543 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
545 /* This loop checks whether the Phi has more than one predecessor.
546 If so, it is a real Phi node and we break the loop. Else the
547 Phi node merges the same definition on several paths and therefore
549 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
551 for (i = ins - 1; i >= 0; --i) {
554 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
556 /* Optimize self referencing Phis: We can't detect them yet properly, as
557 they still refer to the Phi0 they will replace. So replace right now. */
558 if (phi0 && in[i] == phi0)
561 if (in[i] == res || in[i] == known)
570 /* i < 0: there is at most one predecessor, we don't need a phi node. */
573 edges_node_deleted(res, current_ir_graph);
574 obstack_free(current_ir_graph->obst, res);
576 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
577 order, an enclosing Phi know may get superfluous. */
578 res = optimize_in_place_2(known);
580 exchange(known, res);
585 /* A undefined value, e.g., in unreachable code. */
586 res = new_r_Bad(irg);
589 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
590 IRN_VERIFY_IRG(res, irg);
591 /* Memory Phis in endless loops must be kept alive.
592 As we can't distinguish these easily we keep all of them alive. */
593 if (is_Phi(res) && mode == mode_M)
594 add_End_keepalive(get_irg_end(irg), res);
598 } /* new_rd_Phi_in */
600 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
602 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
605 * Computes the predecessors for the real phi node, and then
606 * allocates and returns this node. The routine called to allocate the
607 * node might optimize it away and return a real value.
608 * This function must be called with an in-array of proper size.
610 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
612 ir_graph *irg = current_ir_graph;
613 ir_node *prevBlock, *res, *phi0, *phi0_all;
616 /* If this block has no value at pos create a Phi0 and remember it
617 in graph_arr to break recursions.
618 Else we may not set graph_arr as there a later value is remembered. */
620 if (block->attr.block.graph_arr[pos] == NULL) {
622 if (block == get_irg_start_block(irg)) {
623 /* Collapsing to Bad tarvals is no good idea.
624 So we call a user-supplied routine here that deals with this
625 case as appropriate for the given language. Sorrily the only
626 help we can give here is the position.
628 Even if all variables are defined before use, it can happen that
629 we get to the start block, if a Cond has been replaced by a tuple
630 (bad, jmp). In this case we call the function needlessly,
631 eventually generating an non existent error.
632 However, this SHOULD NOT HAPPEN, as bad control flow nodes are
633 intercepted before recurring.
635 if (default_initialize_local_variable != NULL) {
636 ir_node *rem = get_cur_block();
638 set_cur_block(block);
639 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
642 block->attr.block.graph_arr[pos] = new_r_Unknown(irg, mode);
644 return block->attr.block.graph_arr[pos];
646 phi0 = new_rd_Phi0(irg, block, mode);
647 block->attr.block.graph_arr[pos] = phi0;
651 /* This loop goes to all predecessor blocks of the block the Phi node
652 is in and there finds the operands of the Phi node by calling
653 get_r_value_internal. */
654 for (i = 1; i <= ins; ++i) {
655 ir_node *cf_pred = block->in[i];
656 ir_node *prevCfOp = skip_Proj(cf_pred);
658 if (is_Bad(prevCfOp)) {
659 /* In case a Cond has been optimized we would get right to the start block
660 with an invalid definition. */
661 nin[i-1] = new_r_Bad(irg);
664 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
666 if (!is_Bad(prevBlock)) {
667 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
669 nin[i-1] = new_r_Bad(irg);
673 /* We want to pass the Phi0 node to the constructor: this finds additional
674 optimization possibilities.
675 The Phi0 node either is allocated in this function, or it comes from
676 a former call to get_r_value_internal(). In this case we may not yet
677 exchange phi0, as this is done in mature_immBlock(). */
679 phi0_all = block->attr.block.graph_arr[pos];
680 if (! is_Phi0(phi0_all) ||
681 get_irn_arity(phi0_all) != 0 ||
682 get_nodes_block(phi0_all) != block)
688 /* After collecting all predecessors into the array nin a new Phi node
689 with these predecessors is created. This constructor contains an
690 optimization: If all predecessors of the Phi node are identical it
691 returns the only operand instead of a new Phi node. */
692 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
694 /* In case we allocated a Phi0 node at the beginning of this procedure,
695 we need to exchange this Phi0 with the real Phi. */
698 block->attr.block.graph_arr[pos] = res;
705 * This function returns the last definition of a value. In case
706 * this value was last defined in a previous block, Phi nodes are
707 * inserted. If the part of the firm graph containing the definition
708 * is not yet constructed, a dummy Phi node is returned.
710 * @param block the current block
711 * @param pos the value number of the value searched
712 * @param mode the mode of this value (needed for Phi construction)
714 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
717 /* There are 4 cases to treat.
719 1. The block is not mature and we visit it the first time. We can not
720 create a proper Phi node, therefore a Phi0, i.e., a Phi without
721 predecessors is returned. This node is added to the linked list (block
722 attribute "phis") of the containing block to be completed when this block is
723 matured. (Completion will add a new Phi and turn the Phi0 into an Id
726 2. The value is already known in this block, graph_arr[pos] is set and we
727 visit the block the first time. We can return the value without
728 creating any new nodes.
730 3. The block is mature and we visit it the first time. A Phi node needs
731 to be created (phi_merge). If the Phi is not needed, as all it's
732 operands are the same value reaching the block through different
733 paths, it's optimized away and the value itself is returned.
735 4. The block is mature, and we visit it the second time. Now two
736 subcases are possible:
737 * The value was computed completely the last time we were here. This
738 is the case if there is no loop. We can return the proper value.
739 * The recursion that visited this node and set the flag did not
740 return yet. We are computing a value in a loop and need to
741 break the recursion. This case only happens if we visited
742 the same block with phi_merge before, which inserted a Phi0.
743 So we return the Phi0.
746 /* case 4 -- already visited. */
747 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
748 /* As phi_merge allocates a Phi0 this value is always defined. Here
749 is the critical difference of the two algorithms. */
750 assert(block->attr.block.graph_arr[pos]);
751 return block->attr.block.graph_arr[pos];
754 /* visited the first time */
755 set_irn_visited(block, get_irg_visited(current_ir_graph));
757 /* Get the local valid value */
758 res = block->attr.block.graph_arr[pos];
760 /* case 2 -- If the value is actually computed, return it. */
764 if (block->attr.block.is_matured) { /* case 3 */
766 /* The Phi has the same amount of ins as the corresponding block. */
767 int ins = get_irn_arity(block);
769 NEW_ARR_A(ir_node *, nin, ins);
771 /* Phi merge collects the predecessors and then creates a node. */
772 res = phi_merge(block, pos, mode, nin, ins);
774 } else { /* case 1 */
775 /* The block is not mature, we don't know how many in's are needed. A Phi
776 with zero predecessors is created. Such a Phi node is called Phi0
777 node. The Phi0 is then added to the list of Phi0 nodes in this block
778 to be matured by mature_immBlock later.
779 The Phi0 has to remember the pos of it's internal value. If the real
780 Phi is computed, pos is used to update the array with the local
782 res = new_rd_Phi0(current_ir_graph, block, mode);
783 res->attr.phi.u.pos = pos;
784 res->attr.phi.next = block->attr.block.phis;
785 block->attr.block.phis = res;
788 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
790 /* The local valid value is available now. */
791 block->attr.block.graph_arr[pos] = res;
794 } /* get_r_value_internal */
796 /* ************************************************************************** */
799 * Finalize a Block node, when all control flows are known.
800 * Acceptable parameters are only Block nodes.
802 void mature_immBlock(ir_node *block)
808 assert(is_Block(block));
809 if (!get_Block_matured(block)) {
810 ir_graph *irg = current_ir_graph;
812 ins = ARR_LEN(block->in) - 1;
813 /* Fix block parameters */
814 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
816 /* An array for building the Phi nodes. */
817 NEW_ARR_A(ir_node *, nin, ins);
819 /* Traverse a chain of Phi nodes attached to this block and mature
821 for (n = block->attr.block.phis; n; n = next) {
822 inc_irg_visited(irg);
823 next = n->attr.phi.next;
824 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
827 block->attr.block.is_matured = 1;
829 /* Now, as the block is a finished Firm node, we can optimize it.
830 Since other nodes have been allocated since the block was created
831 we can not free the node on the obstack. Therefore we have to call
833 Unfortunately the optimization does not change a lot, as all allocated
834 nodes refer to the unoptimized node.
835 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
836 block = optimize_in_place_2(block);
837 IRN_VERIFY_IRG(block, irg);
839 } /* mature_immBlock */
841 ir_node *new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
843 assert(get_irg_phase_state(current_ir_graph) == phase_building);
844 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
847 ir_node *new_d_Const(dbg_info *db, tarval *con)
849 assert(get_irg_phase_state(current_ir_graph) == phase_building);
850 return new_bd_Const(db, con);
853 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
855 assert(get_irg_phase_state(current_ir_graph) == phase_building);
856 return new_bd_Const_long(db, mode, value);
857 } /* new_d_Const_long */
859 ir_node *new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp)
861 assert(get_irg_phase_state(current_ir_graph) == phase_building);
862 return new_bd_Const_type(db, con, tp);
863 } /* new_d_Const_type */
866 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
869 assert(is_Cond(arg));
870 assert(get_irg_phase_state(current_ir_graph) == phase_building);
871 arg->attr.cond.default_proj = max_proj;
872 res = new_d_Proj(db, arg, mode_X, max_proj);
874 } /* new_d_defaultProj */
876 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
877 /* GL: objptr was called frame before. Frame was a bad choice for the name
878 as the operand could as well be a pointer to a dynamic object. */
880 assert(get_irg_phase_state(current_ir_graph) == phase_building);
881 return new_bd_Sel(db, current_ir_graph->current_block,
882 store, objptr, 0, NULL, ent);
883 } /* new_d_simpleSel */
885 ir_node *new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp)
887 assert(get_irg_phase_state(current_ir_graph) == phase_building);
888 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
890 } /* new_d_SymConst_type */
892 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind)
894 assert(get_irg_phase_state(current_ir_graph) == phase_building);
895 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
896 value, kind, firm_unknown_type);
897 } /* new_d_SymConst */
899 ir_node *new_d_Sync(dbg_info *db, int arity, ir_node *in[])
901 assert(get_irg_phase_state(current_ir_graph) == phase_building);
902 return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
905 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
906 int n_outs, ir_asm_constraint *outputs, int n_clobber,
907 ident *clobber[], ident *text)
909 assert(get_irg_phase_state(current_ir_graph) == phase_building);
910 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
913 /* ********************************************************************* */
914 /* Comfortable interface with automatic Phi node construction. */
915 /* (Uses also constructors of ?? interface, except new_Block. */
916 /* ********************************************************************* */
918 /* Block construction */
919 /* immature Block without predecessors */
920 ir_node *new_d_immBlock(dbg_info *db)
924 assert(get_irg_phase_state(current_ir_graph) == phase_building);
925 /* creates a new dynamic in-array as length of in is -1 */
926 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
928 res->attr.block.is_matured = 0;
929 res->attr.block.is_dead = 0;
930 res->attr.block.irg.irg = current_ir_graph;
931 res->attr.block.backedge = NULL;
932 res->attr.block.in_cg = NULL;
933 res->attr.block.cg_backedge = NULL;
934 res->attr.block.extblk = NULL;
935 res->attr.block.region = NULL;
936 res->attr.block.entity = NULL;
938 set_Block_block_visited(res, 0);
940 /* Create and initialize array for Phi-node construction. */
941 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
942 current_ir_graph->n_loc);
943 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
945 /* Immature block may not be optimized! */
946 IRN_VERIFY_IRG(res, current_ir_graph);
949 } /* new_d_immBlock */
951 ir_node *new_immBlock(void)
953 return new_d_immBlock(NULL);
956 /* add an edge to a jmp/control flow node */
957 void add_immBlock_pred(ir_node *block, ir_node *jmp)
959 int n = ARR_LEN(block->in) - 1;
961 assert(is_Block(block) && "Error: Must be a Block");
962 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
963 assert(is_ir_node(jmp));
965 ARR_APP1(ir_node *, block->in, jmp);
967 hook_set_irn_n(block, n, jmp, NULL);
968 } /* add_immBlock_pred */
970 /* changing the current block */
971 void set_cur_block(ir_node *target)
973 current_ir_graph->current_block = target;
974 } /* set_cur_block */
976 /* ************************ */
977 /* parameter administration */
979 /* get a value from the parameter array from the current block by its index */
980 ir_node *get_d_value(dbg_info *db, int pos, ir_mode *mode)
982 ir_graph *irg = current_ir_graph;
983 assert(get_irg_phase_state(irg) == phase_building);
984 inc_irg_visited(irg);
989 return get_r_value_internal(irg->current_block, pos + 1, mode);
992 /* get a value from the parameter array from the current block by its index */
993 ir_node *get_value(int pos, ir_mode *mode)
995 return get_d_value(NULL, pos, mode);
999 * helper function for guess_mode: recursively look for a definition for
1000 * local variable @p pos, returns its mode if found.
1002 static ir_mode *guess_recursively(ir_node *block, int pos)
1008 if (irn_visited(block))
1010 mark_irn_visited(block);
1012 /* already have a defintion -> we can simply look at its mode */
1013 value = block->attr.block.graph_arr[pos];
1015 return get_irn_mode(value);
1017 /* now we try to guess, by looking at the predecessor blocks */
1018 n_preds = get_irn_arity(block);
1019 for (i = 0; i < n_preds; ++i) {
1020 ir_node *pred_block = get_Block_cfgpred_block(block, i);
1021 ir_mode *mode = guess_recursively(pred_block, pos);
1026 /* no way to guess */
1030 ir_mode *ir_guess_mode(int pos)
1032 ir_graph *irg = current_ir_graph;
1033 ir_node *block = irg->current_block;
1034 ir_node *value = block->attr.block.graph_arr[pos+1];
1037 /* already have a defintion -> we can simply look at its mode */
1039 return get_irn_mode(value);
1041 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1042 inc_irg_visited(current_ir_graph);
1043 mode = guess_recursively(block, pos+1);
1044 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
1049 /* set a value at position pos in the parameter array from the current block */
1050 void set_value(int pos, ir_node *value)
1052 ir_graph *irg = current_ir_graph;
1053 assert(get_irg_phase_state(irg) == phase_building);
1055 assert(pos+1 < irg->n_loc);
1056 assert(is_ir_node(value));
1057 irg->current_block->attr.block.graph_arr[pos + 1] = value;
1060 /* Find the value number for a node in the current block.*/
1061 int find_value(ir_node *value)
1064 ir_node *bl = current_ir_graph->current_block;
1066 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
1067 if (bl->attr.block.graph_arr[i] == value)
1072 /* get the current store */
1073 ir_node *get_store(void)
1075 ir_graph *irg = current_ir_graph;
1077 assert(get_irg_phase_state(irg) == phase_building);
1078 /* GL: one could call get_value instead */
1079 inc_irg_visited(irg);
1080 return get_r_value_internal(irg->current_block, 0, mode_M);
1083 /* set the current store: handles automatic Sync construction for Load nodes */
1084 void set_store(ir_node *store)
1086 ir_node *load, *pload, *pred, *in[2];
1088 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1089 /* Beware: due to dead code elimination, a store might become a Bad node even in
1090 the construction phase. */
1091 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
1093 if (get_opt_auto_create_sync()) {
1094 /* handle non-volatile Load nodes by automatically creating Sync's */
1095 load = skip_Proj(store);
1096 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
1097 pred = get_Load_mem(load);
1099 if (is_Sync(pred)) {
1100 /* a Load after a Sync: move it up */
1101 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
1103 set_Load_mem(load, get_memop_mem(mem));
1104 add_Sync_pred(pred, store);
1107 pload = skip_Proj(pred);
1108 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
1109 /* a Load after a Load: create a new Sync */
1110 set_Load_mem(load, get_Load_mem(pload));
1114 store = new_Sync(2, in);
1119 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1122 void keep_alive(ir_node *ka)
1124 add_End_keepalive(get_irg_end(current_ir_graph), ka);
1127 /* --- Useful access routines --- */
1128 /* Returns the current block of the current graph. To set the current
1129 block use set_cur_block. */
1130 ir_node *get_cur_block(void)
1132 return get_irg_current_block(current_ir_graph);
1133 } /* get_cur_block */
1135 /* Returns the frame type of the current graph */
1136 ir_type *get_cur_frame_type(void)
1138 return get_irg_frame_type(current_ir_graph);
1139 } /* get_cur_frame_type */
1142 /* ********************************************************************* */
1145 /* call once for each run of the library */
1146 void ir_set_uninitialized_local_variable_func(
1147 uninitialized_local_variable_func_t *func)
1149 default_initialize_local_variable = func;
1152 void irg_finalize_cons(ir_graph *irg)
1154 set_irg_phase_state(irg, phase_high);
1157 void irp_finalize_cons(void)
1160 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1161 irg_finalize_cons(get_irp_irg(i));
1163 irp->phase_state = phase_high;
1166 ir_node *new_Start(void)
1168 return new_d_Start(NULL);
1170 ir_node *new_End(void)
1172 return new_d_End(NULL);
1174 ir_node *new_Const(tarval *con)
1176 return new_d_Const(NULL, con);
1179 ir_node *new_Const_long(ir_mode *mode, long value)
1181 return new_d_Const_long(NULL, mode, value);
1184 ir_node *new_Const_type(tarval *con, ir_type *tp)
1186 return new_d_Const_type(NULL, con, tp);
1189 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type)
1191 return new_d_SymConst_type(NULL, mode, value, kind, type);
1193 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
1195 return new_d_SymConst(NULL, mode, value, kind);
1197 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
1199 return new_d_simpleSel(NULL, store, objptr, ent);
1201 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode)
1203 return new_d_Phi(NULL, arity, in, mode);
1205 ir_node *new_Sync(int arity, ir_node *in[])
1207 return new_d_Sync(NULL, arity, in);
1209 ir_node *new_defaultProj(ir_node *arg, long max_proj)
1211 return new_d_defaultProj(NULL, arg, max_proj);
1213 ir_node *new_Bad(void)
1215 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1216 return get_irg_bad(current_ir_graph);
1218 ir_node *new_NoMem(void)
1220 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1221 return get_irg_no_mem(current_ir_graph);
1223 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1224 int n_outs, ir_asm_constraint *outputs,
1225 int n_clobber, ident *clobber[], ident *text)
1227 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
1230 /* create a new anchor node */
1231 ir_node *new_r_Anchor(ir_graph *irg)
1233 ir_node *in[anchor_last];
1235 memset(in, 0, sizeof(in));
1236 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);
1238 /* hack to get get_irn_irg working: set block to ourself and allow
1239 * get_Block_irg for anchor */
1240 res->attr.irg.irg = irg;