2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
47 #include "gen_ir_cons.c.inl"
50 * Language dependent variable initialization callback.
52 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
54 ir_node *new_rd_Start(dbg_info *db, ir_graph *irg)
56 ir_node *block = get_irg_start_block(irg);
57 ir_node *res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
59 res = optimize_node(res);
60 irn_verify_irg(res, irg);
64 ir_node *new_rd_End(dbg_info *db, ir_graph *irg)
66 ir_node *block = get_irg_end_block(irg);
67 ir_node *res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
69 res = optimize_node(res);
70 irn_verify_irg(res, irg);
75 * Creates a Phi node with all predecessors. Calling this constructor
76 * is only allowed if the corresponding block is mature.
78 ir_node *new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in,
81 ir_graph *irg = get_irn_irg(block);
82 ir_node *res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
83 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
85 res = optimize_node(res);
86 irn_verify_irg(res, irg);
88 /* Memory Phis in endless loops must be kept alive.
89 As we can't distinguish these easily we keep all of them alive. */
90 if (is_Phi(res) && mode == mode_M)
91 add_End_keepalive(get_irg_end(irg), res);
95 ir_node *new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con)
97 ir_node *block = get_irg_start_block(irg);
98 ir_mode *mode = get_tarval_mode(con);
99 ir_node *res = new_ir_node(db, irg, block, op_Const, mode, 0, NULL);
100 res->attr.con.tarval = con;
102 res = optimize_node (res);
103 irn_verify_irg(res, irg);
108 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
111 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
114 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
118 assert(is_Cond(arg));
119 arg->attr.cond.default_proj = max_proj;
120 res = new_rd_Proj(db, arg, mode_X, max_proj);
124 ir_node *new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[])
126 ir_graph *irg = get_irn_irg(block);
127 ir_node *res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
130 for (i = 0; i < arity; ++i)
131 add_Sync_pred(res, in[i]);
133 res = optimize_node(res);
134 irn_verify_irg(res, irg);
138 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
139 ir_asm_constraint *inputs, int n_outs,
140 ir_asm_constraint *outputs, int n_clobber,
141 ident *clobber[], ident *text)
143 ir_graph *irg = get_irn_irg(block);
144 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
146 res->attr.assem.pin_state = op_pin_state_pinned;
147 res->attr.assem.input_constraints
148 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
149 res->attr.assem.output_constraints
150 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
151 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
152 res->attr.assem.text = text;
154 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
155 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
156 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
158 res = optimize_node(res);
159 irn_verify_irg(res, irg);
163 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
164 ir_node *objptr, ir_entity *ent)
166 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
169 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
170 symconst_symbol value, symconst_kind symkind)
172 ir_node *block = get_irg_start_block(irg);
173 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
174 res->attr.symc.kind = symkind;
175 res->attr.symc.sym = value;
177 res = optimize_node(res);
178 irn_verify_irg(res, irg);
182 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
185 sym.entity_p = symbol;
186 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
189 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
192 sym.entity_p = symbol;
193 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
196 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
200 return new_rd_SymConst(db, irg, mode, sym, symconst_type_tag);
203 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
207 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
210 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
214 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
217 ir_node *new_r_Start(ir_graph *irg)
219 return new_rd_Start(NULL, irg);
221 ir_node *new_r_End(ir_graph *irg)
223 return new_rd_End(NULL, irg);
225 ir_node *new_r_Const(ir_graph *irg, tarval *con)
227 return new_rd_Const(NULL, irg, con);
229 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
231 return new_rd_Const_long(NULL, irg, mode, value);
233 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
234 symconst_kind symkind)
236 return new_rd_SymConst(NULL, irg, mode, value, symkind);
238 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
241 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
243 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode)
245 return new_rd_Phi(NULL, block, arity, in, mode);
247 ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[])
249 return new_rd_Sync(NULL, block, arity, in);
251 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
253 return new_rd_defaultProj(NULL, arg, max_proj);
255 ir_node *new_r_Bad(ir_graph *irg)
257 return get_irg_bad(irg);
259 ir_node *new_r_NoMem(ir_graph *irg)
261 return get_irg_no_mem(irg);
263 ir_node *new_r_ASM(ir_node *block,
264 int arity, ir_node *in[], ir_asm_constraint *inputs,
265 int n_outs, ir_asm_constraint *outputs,
266 int n_clobber, ident *clobber[], ident *text)
268 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
271 /** ********************/
272 /** public interfaces */
273 /** construction tools */
275 ir_node *new_d_Start(dbg_info *db)
279 assert(get_irg_phase_state(current_ir_graph) == phase_building);
280 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
281 op_Start, mode_T, 0, NULL);
283 res = optimize_node(res);
284 irn_verify_irg(res, current_ir_graph);
288 ir_node *new_d_End(dbg_info *db)
291 assert(get_irg_phase_state(current_ir_graph) == phase_building);
292 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
293 op_End, mode_X, -1, NULL);
294 res = optimize_node(res);
295 irn_verify_irg(res, current_ir_graph);
300 /* ***********************************************************************/
301 /* Methods necessary for automatic Phi node creation */
303 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
304 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
305 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
306 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
308 Call Graph: ( A ---> B == A "calls" B)
310 get_value mature_immBlock
318 get_r_value_internal |
322 new_rd_Phi0 new_rd_Phi_in
324 * *************************************************************************** */
326 /** Creates a Phi node with 0 predecessors. */
327 static inline ir_node *new_rd_Phi0(ir_node *block, ir_mode *mode)
329 ir_graph *irg = get_irn_irg(block);
330 ir_node *res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
331 irn_verify_irg(res, irg);
336 * Internal constructor of a Phi node by a phi_merge operation.
338 * @param block the block in which the Phi will be constructed
339 * @param mode the mod eof the Phi node
340 * @param in the input array of the phi node
341 * @param n_in number of elements in the input array
342 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
343 * the value for which the new Phi is constructed
345 static ir_node *new_rd_Phi_in(ir_node *block, ir_mode *mode,
346 int n_in, ir_node **in, ir_node *phi0)
349 ir_node *res, *known;
350 ir_graph *irg = get_irn_irg(block);
352 /* Allocate a new node on the obstack. The allocation copies the in
354 res = new_ir_node(NULL, irg, block, op_Phi, mode, n_in, in);
355 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, n_in);
357 /* This loop checks whether the Phi has more than one predecessor.
358 If so, it is a real Phi node and we break the loop. Else the
359 Phi node merges the same definition on several paths and therefore
361 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
363 for (i = n_in - 1; i >= 0; --i) {
366 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
368 /* Optimize self referencing Phis: We can't detect them yet properly, as
369 they still refer to the Phi0 they will replace. So replace right now. */
370 if (phi0 && in[i] == phi0)
373 if (in[i] == res || in[i] == known)
382 /* i < 0: there is at most one predecessor, we don't need a phi node. */
385 edges_node_deleted(res, current_ir_graph);
386 obstack_free(current_ir_graph->obst, res);
388 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
389 order, an enclosing Phi know may get superfluous. */
390 res = optimize_in_place_2(known);
392 exchange(known, res);
397 /* A undefined value, e.g., in unreachable code. */
398 res = new_r_Bad(irg);
401 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
402 irn_verify_irg(res, irg);
403 /* Memory Phis in endless loops must be kept alive.
404 As we can't distinguish these easily we keep all of them alive. */
405 if (is_Phi(res) && mode == mode_M)
406 add_End_keepalive(get_irg_end(irg), res);
412 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
415 * Computes the predecessors for the real phi node, and then
416 * allocates and returns this node. The routine called to allocate the
417 * node might optimize it away and return a real value.
418 * This function must be called with an in-array of proper size.
420 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode,
421 int n_ins, ir_node **ins)
423 ir_graph *irg = get_irn_irg(block);
424 ir_node *prevBlock, *res, *phi0, *phi0_all;
427 /* If this block has no value at pos create a Phi0 and remember it
428 in graph_arr to break recursions.
429 Else we may not set graph_arr as there a later value is remembered. */
431 if (block->attr.block.graph_arr[pos] == NULL) {
433 if (block == get_irg_start_block(irg)) {
434 /* Collapsing to Bad tarvals is no good idea.
435 So we call a user-supplied routine here that deals with this
436 case as appropriate for the given language. Sorrily the only
437 help we can give here is the position.
439 Even if all variables are defined before use, it can happen that
440 we get to the start block, if a Cond has been replaced by a tuple
441 (bad, jmp). In this case we call the function needlessly,
442 eventually generating an non existent error.
443 However, this SHOULD NOT HAPPEN, as bad control flow nodes are
444 intercepted before recurring.
446 if (default_initialize_local_variable != NULL) {
447 ir_node *rem = get_r_cur_block(irg);
449 set_r_cur_block(irg, block);
450 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
451 set_r_cur_block(irg, rem);
453 block->attr.block.graph_arr[pos] = new_r_Unknown(irg, mode);
455 return block->attr.block.graph_arr[pos];
457 phi0 = new_rd_Phi0(block, mode);
458 block->attr.block.graph_arr[pos] = phi0;
462 /* This loop goes to all predecessor blocks of the block the Phi node
463 is in and there finds the operands of the Phi node by calling
464 get_r_value_internal. */
465 for (i = 1; i <= n_ins; ++i) {
466 ir_node *cf_pred = block->in[i];
467 ir_node *prevCfOp = skip_Proj(cf_pred);
469 if (is_Bad(prevCfOp)) {
470 /* In case a Cond has been optimized we would get right to the start block
471 with an invalid definition. */
472 ins[i-1] = new_r_Bad(irg);
475 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
477 if (!is_Bad(prevBlock)) {
478 ins[i-1] = get_r_value_internal(prevBlock, pos, mode);
480 ins[i-1] = new_r_Bad(irg);
484 /* We want to pass the Phi0 node to the constructor: this finds additional
485 optimization possibilities.
486 The Phi0 node either is allocated in this function, or it comes from
487 a former call to get_r_value_internal(). In this case we may not yet
488 exchange phi0, as this is done in mature_immBlock(). */
490 phi0_all = block->attr.block.graph_arr[pos];
491 if (! is_Phi0(phi0_all) ||
492 get_irn_arity(phi0_all) != 0 ||
493 get_nodes_block(phi0_all) != block)
499 /* After collecting all predecessors into the array ins a new Phi node
500 with these predecessors is created. This constructor contains an
501 optimization: If all predecessors of the Phi node are identical it
502 returns the only operand instead of a new Phi node. */
503 res = new_rd_Phi_in(block, mode, n_ins, ins, phi0_all);
505 /* In case we allocated a Phi0 node at the beginning of this procedure,
506 we need to exchange this Phi0 with the real Phi. */
509 block->attr.block.graph_arr[pos] = res;
516 * This function returns the last definition of a value. In case
517 * this value was last defined in a previous block, Phi nodes are
518 * inserted. If the part of the firm graph containing the definition
519 * is not yet constructed, a dummy Phi node is returned.
521 * @param block the current block
522 * @param pos the value number of the value searched
523 * @param mode the mode of this value (needed for Phi construction)
525 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
528 /* There are 4 cases to treat.
530 1. The block is not mature and we visit it the first time. We can not
531 create a proper Phi node, therefore a Phi0, i.e., a Phi without
532 predecessors is returned. This node is added to the linked list (block
533 attribute "phis") of the containing block to be completed when this block is
534 matured. (Completion will add a new Phi and turn the Phi0 into an Id
537 2. The value is already known in this block, graph_arr[pos] is set and we
538 visit the block the first time. We can return the value without
539 creating any new nodes.
541 3. The block is mature and we visit it the first time. A Phi node needs
542 to be created (phi_merge). If the Phi is not needed, as all it's
543 operands are the same value reaching the block through different
544 paths, it's optimized away and the value itself is returned.
546 4. The block is mature, and we visit it the second time. Now two
547 subcases are possible:
548 * The value was computed completely the last time we were here. This
549 is the case if there is no loop. We can return the proper value.
550 * The recursion that visited this node and set the flag did not
551 return yet. We are computing a value in a loop and need to
552 break the recursion. This case only happens if we visited
553 the same block with phi_merge before, which inserted a Phi0.
554 So we return the Phi0.
557 /* case 4 -- already visited. */
558 if (irn_visited(block)) {
559 /* As phi_merge allocates a Phi0 this value is always defined. Here
560 is the critical difference of the two algorithms. */
561 assert(block->attr.block.graph_arr[pos]);
562 return block->attr.block.graph_arr[pos];
565 /* visited the first time */
566 mark_irn_visited(block);
568 /* Get the local valid value */
569 res = block->attr.block.graph_arr[pos];
571 /* case 2 -- If the value is actually computed, return it. */
575 if (block->attr.block.is_matured) { /* case 3 */
577 /* The Phi has the same amount of ins as the corresponding block. */
578 int n_in = get_irn_arity(block);
580 NEW_ARR_A(ir_node *, in, n_in);
582 /* Phi merge collects the predecessors and then creates a node. */
583 res = phi_merge(block, pos, mode, n_in, in);
584 } else { /* case 1 */
585 /* The block is not mature, we don't know how many in's are needed. A Phi
586 with zero predecessors is created. Such a Phi node is called Phi0
587 node. The Phi0 is then added to the list of Phi0 nodes in this block
588 to be matured by mature_immBlock later.
589 The Phi0 has to remember the pos of it's internal value. If the real
590 Phi is computed, pos is used to update the array with the local
592 res = new_rd_Phi0(block, mode);
593 res->attr.phi.u.pos = pos;
594 res->attr.phi.next = block->attr.block.phis;
595 block->attr.block.phis = res;
598 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
600 /* The local valid value is available now. */
601 block->attr.block.graph_arr[pos] = res;
606 /* ************************************************************************** */
609 * Finalize a Block node, when all control flows are known.
610 * Acceptable parameters are only Block nodes.
612 void mature_immBlock(ir_node *block)
618 assert(is_Block(block));
619 if (!get_Block_matured(block)) {
620 ir_graph *irg = current_ir_graph;
622 ins = ARR_LEN(block->in) - 1;
623 /* Fix block parameters */
624 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
626 /* An array for building the Phi nodes. */
627 NEW_ARR_A(ir_node *, nin, ins);
629 /* Traverse a chain of Phi nodes attached to this block and mature
631 for (n = block->attr.block.phis; n; n = next) {
632 inc_irg_visited(irg);
633 next = n->attr.phi.next;
634 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, ins, nin));
637 block->attr.block.is_matured = 1;
639 /* Now, as the block is a finished Firm node, we can optimize it.
640 Since other nodes have been allocated since the block was created
641 we can not free the node on the obstack. Therefore we have to call
643 Unfortunately the optimization does not change a lot, as all allocated
644 nodes refer to the unoptimized node.
645 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
646 block = optimize_in_place_2(block);
647 irn_verify_irg(block, irg);
651 ir_node *new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
653 assert(get_irg_phase_state(current_ir_graph) == phase_building);
654 return new_rd_Phi(db, current_ir_graph->current_block, arity, in, mode);
657 ir_node *new_d_Const(dbg_info *db, tarval *con)
659 assert(get_irg_phase_state(current_ir_graph) == phase_building);
660 return new_rd_Const(db, current_ir_graph, con);
663 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
665 assert(get_irg_phase_state(current_ir_graph) == phase_building);
666 return new_rd_Const_long(db, current_ir_graph, mode, value);
669 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
672 assert(is_Cond(arg));
673 assert(get_irg_phase_state(current_ir_graph) == phase_building);
674 arg->attr.cond.default_proj = max_proj;
675 res = new_d_Proj(db, arg, mode_X, max_proj);
679 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
682 assert(get_irg_phase_state(current_ir_graph) == phase_building);
683 return new_rd_Sel(db, current_ir_graph->current_block,
684 store, objptr, 0, NULL, ent);
687 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
690 assert(get_irg_phase_state(current_ir_graph) == phase_building);
691 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
694 ir_node *new_d_Sync(dbg_info *db, int arity, ir_node *in[])
696 assert(get_irg_phase_state(current_ir_graph) == phase_building);
697 return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
700 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
701 ir_asm_constraint *inputs,
702 int n_outs, ir_asm_constraint *outputs, int n_clobber,
703 ident *clobber[], ident *text)
705 assert(get_irg_phase_state(current_ir_graph) == phase_building);
706 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
707 n_outs, outputs, n_clobber, clobber, text);
710 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
714 assert(get_irg_phase_state(irg) == phase_building);
715 /* creates a new dynamic in-array as length of in is -1 */
716 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
718 res->attr.block.is_matured = 0;
719 res->attr.block.is_dead = 0;
720 res->attr.block.irg.irg = irg;
721 res->attr.block.backedge = NULL;
722 res->attr.block.in_cg = NULL;
723 res->attr.block.cg_backedge = NULL;
724 res->attr.block.extblk = NULL;
725 res->attr.block.region = NULL;
726 res->attr.block.entity = NULL;
728 set_Block_block_visited(res, 0);
730 /* Create and initialize array for Phi-node construction. */
731 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
732 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
734 /* Immature block may not be optimized! */
735 irn_verify_irg(res, irg);
740 ir_node *new_r_immBlock(ir_graph *irg)
742 return new_rd_immBlock(NULL, irg);
745 ir_node *new_d_immBlock(dbg_info *dbgi)
747 return new_rd_immBlock(dbgi, current_ir_graph);
750 ir_node *new_immBlock(void)
752 return new_rd_immBlock(NULL, current_ir_graph);
755 void add_immBlock_pred(ir_node *block, ir_node *jmp)
757 int n = ARR_LEN(block->in) - 1;
759 assert(is_Block(block) && "Error: Must be a Block");
760 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
761 assert(is_ir_node(jmp));
763 ARR_APP1(ir_node *, block->in, jmp);
765 hook_set_irn_n(block, n, jmp, NULL);
768 void set_cur_block(ir_node *target)
770 current_ir_graph->current_block = target;
773 void set_r_cur_block(ir_graph *irg, ir_node *target)
775 irg->current_block = target;
778 ir_node *get_r_cur_block(ir_graph *irg)
780 return irg->current_block;
783 ir_node *get_cur_block(void)
785 return get_r_cur_block(current_ir_graph);
788 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
790 assert(get_irg_phase_state(irg) == phase_building);
791 inc_irg_visited(irg);
795 return get_r_value_internal(irg->current_block, pos + 1, mode);
798 ir_node *get_value(int pos, ir_mode *mode)
800 return get_r_value(current_ir_graph, pos, mode);
804 * helper function for guess_mode: recursively look for a definition for
805 * local variable @p pos, returns its mode if found.
807 static ir_mode *guess_recursively(ir_node *block, int pos)
813 if (irn_visited(block))
815 mark_irn_visited(block);
817 /* already have a defintion -> we can simply look at its mode */
818 value = block->attr.block.graph_arr[pos];
820 return get_irn_mode(value);
822 /* now we try to guess, by looking at the predecessor blocks */
823 n_preds = get_irn_arity(block);
824 for (i = 0; i < n_preds; ++i) {
825 ir_node *pred_block = get_Block_cfgpred_block(block, i);
826 ir_mode *mode = guess_recursively(pred_block, pos);
831 /* no way to guess */
835 ir_mode *ir_guess_mode(int pos)
837 ir_graph *irg = current_ir_graph;
838 ir_node *block = irg->current_block;
839 ir_node *value = block->attr.block.graph_arr[pos+1];
842 /* already have a defintion -> we can simply look at its mode */
844 return get_irn_mode(value);
846 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
847 inc_irg_visited(current_ir_graph);
848 mode = guess_recursively(block, pos+1);
849 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
854 void set_r_value(ir_graph *irg, int pos, ir_node *value)
856 assert(get_irg_phase_state(irg) == phase_building);
858 assert(pos+1 < irg->n_loc);
859 assert(is_ir_node(value));
860 irg->current_block->attr.block.graph_arr[pos + 1] = value;
863 void set_value(int pos, ir_node *value)
865 set_r_value(current_ir_graph, pos, value);
868 int find_value(ir_node *value)
871 ir_node *bl = current_ir_graph->current_block;
873 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
874 if (bl->attr.block.graph_arr[i] == value)
879 ir_node *get_r_store(ir_graph *irg)
881 assert(get_irg_phase_state(irg) == phase_building);
882 inc_irg_visited(irg);
883 return get_r_value_internal(irg->current_block, 0, mode_M);
886 ir_node *get_store(void)
888 return get_r_store(current_ir_graph);
891 void set_r_store(ir_graph *irg, ir_node *store)
893 ir_node *load, *pload, *pred, *in[2];
895 assert(get_irg_phase_state(irg) == phase_building);
896 /* Beware: due to dead code elimination, a store might become a Bad node even in
897 the construction phase. */
898 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
900 if (get_opt_auto_create_sync()) {
901 /* handle non-volatile Load nodes by automatically creating Sync's */
902 load = skip_Proj(store);
903 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
904 pred = get_Load_mem(load);
907 /* a Load after a Sync: move it up */
908 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
910 set_Load_mem(load, get_memop_mem(mem));
911 add_Sync_pred(pred, store);
914 pload = skip_Proj(pred);
915 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
916 /* a Load after a Load: create a new Sync */
917 set_Load_mem(load, get_Load_mem(pload));
921 store = new_r_Sync(irg->current_block, 2, in);
926 irg->current_block->attr.block.graph_arr[0] = store;
929 void set_store(ir_node *store)
931 set_r_store(current_ir_graph, store);
934 void keep_alive(ir_node *ka)
936 add_End_keepalive(get_irg_end(current_ir_graph), ka);
939 void ir_set_uninitialized_local_variable_func(
940 uninitialized_local_variable_func_t *func)
942 default_initialize_local_variable = func;
945 void irg_finalize_cons(ir_graph *irg)
947 set_irg_phase_state(irg, phase_high);
950 void irp_finalize_cons(void)
953 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
954 irg_finalize_cons(get_irp_irg(i));
956 irp->phase_state = phase_high;
959 ir_node *new_Start(void)
961 return new_d_Start(NULL);
963 ir_node *new_End(void)
965 return new_d_End(NULL);
967 ir_node *new_Const(tarval *con)
969 return new_d_Const(NULL, con);
972 ir_node *new_Const_long(ir_mode *mode, long value)
974 return new_d_Const_long(NULL, mode, value);
977 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
979 return new_d_SymConst(NULL, mode, value, kind);
981 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
983 return new_d_simpleSel(NULL, store, objptr, ent);
985 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode)
987 return new_d_Phi(NULL, arity, in, mode);
989 ir_node *new_Sync(int arity, ir_node *in[])
991 return new_d_Sync(NULL, arity, in);
993 ir_node *new_defaultProj(ir_node *arg, long max_proj)
995 return new_d_defaultProj(NULL, arg, max_proj);
997 ir_node *new_Bad(void)
999 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1000 return get_irg_bad(current_ir_graph);
1002 ir_node *new_NoMem(void)
1004 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1005 return get_irg_no_mem(current_ir_graph);
1007 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1008 int n_outs, ir_asm_constraint *outputs,
1009 int n_clobber, ident *clobber[], ident *text)
1011 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
1014 ir_node *new_r_Anchor(ir_graph *irg)
1016 ir_node *in[anchor_last];
1018 memset(in, 0, sizeof(in));
1019 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);
1021 /* hack to get get_irn_irg working: set block to ourself and allow
1022 * get_Block_irg for anchor */
1023 res->attr.irg.irg = irg;