2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
47 #include "gen_ir_cons.c.inl"
50 * Language dependent variable initialization callback.
52 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
55 * Creates a Phi node with all predecessors. Calling this constructor
56 * is only allowed if the corresponding block is mature.
58 ir_node *new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in,
61 ir_graph *irg = get_irn_irg(block);
62 ir_node *res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
63 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
65 res = optimize_node(res);
66 irn_verify_irg(res, irg);
68 /* Memory Phis in endless loops must be kept alive.
69 As we can't distinguish these easily we keep all of them alive. */
70 if (is_Phi(res) && mode == mode_M)
71 add_End_keepalive(get_irg_end(irg), res);
75 ir_node *new_rd_Const(dbg_info *db, ir_graph *irg, ir_tarval *con)
77 ir_node *block = get_irg_start_block(irg);
78 ir_mode *mode = get_tarval_mode(con);
79 ir_node *res = new_ir_node(db, irg, block, op_Const, mode, 0, NULL);
80 res->attr.con.tarval = con;
82 res = optimize_node (res);
83 irn_verify_irg(res, irg);
88 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
91 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
94 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
99 arg->attr.cond.default_proj = max_proj;
100 res = new_rd_Proj(db, arg, mode_X, max_proj);
104 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
105 ir_asm_constraint *inputs, int n_outs,
106 ir_asm_constraint *outputs, int n_clobber,
107 ident *clobber[], ident *text)
109 ir_graph *irg = get_irn_irg(block);
110 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
112 res->attr.assem.pin_state = op_pin_state_pinned;
113 res->attr.assem.input_constraints
114 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
115 res->attr.assem.output_constraints
116 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
117 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
118 res->attr.assem.text = text;
120 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
121 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
122 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
124 res = optimize_node(res);
125 irn_verify_irg(res, irg);
129 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
130 ir_node *objptr, ir_entity *ent)
132 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
135 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
136 symconst_symbol value, symconst_kind symkind)
138 ir_node *block = get_irg_start_block(irg);
139 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
140 res->attr.symc.kind = symkind;
141 res->attr.symc.sym = value;
143 res = optimize_node(res);
144 irn_verify_irg(res, irg);
148 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
151 sym.entity_p = symbol;
152 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
155 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
158 sym.entity_p = symbol;
159 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
162 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
166 return new_rd_SymConst(db, irg, mode, sym, symconst_type_tag);
169 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
173 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
176 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
180 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
183 ir_node *new_r_Const(ir_graph *irg, ir_tarval *con)
185 return new_rd_Const(NULL, irg, con);
187 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
189 return new_rd_Const_long(NULL, irg, mode, value);
191 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
192 symconst_kind symkind)
194 return new_rd_SymConst(NULL, irg, mode, value, symkind);
196 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
199 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
201 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode)
203 return new_rd_Phi(NULL, block, arity, in, mode);
205 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
207 return new_rd_defaultProj(NULL, arg, max_proj);
209 ir_node *new_r_Bad(ir_graph *irg)
211 return get_irg_bad(irg);
213 ir_node *new_r_NoMem(ir_graph *irg)
215 return get_irg_no_mem(irg);
217 ir_node *new_r_ASM(ir_node *block,
218 int arity, ir_node *in[], ir_asm_constraint *inputs,
219 int n_outs, ir_asm_constraint *outputs,
220 int n_clobber, ident *clobber[], ident *text)
222 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
225 /* ***********************************************************************/
226 /* Methods necessary for automatic Phi node creation */
228 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
229 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
230 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
231 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
233 Call Graph: ( A ---> B == A "calls" B)
235 get_value mature_immBlock
243 get_r_value_internal |
247 new_rd_Phi0 new_rd_Phi_in
249 * *************************************************************************** */
251 /** Creates a Phi node with 0 predecessors. */
252 static inline ir_node *new_rd_Phi0(ir_node *block, ir_mode *mode)
254 ir_graph *irg = get_irn_irg(block);
255 ir_node *res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
256 irn_verify_irg(res, irg);
261 * Internal constructor of a Phi node by a phi_merge operation.
263 * @param block the block in which the Phi will be constructed
264 * @param mode the mod eof the Phi node
265 * @param in the input array of the phi node
266 * @param n_in number of elements in the input array
267 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
268 * the value for which the new Phi is constructed
270 static ir_node *new_rd_Phi_in(ir_node *block, ir_mode *mode,
271 int n_in, ir_node **in, ir_node *phi0)
274 ir_node *res, *known;
275 ir_graph *irg = get_irn_irg(block);
277 /* Allocate a new node on the obstack. The allocation copies the in
279 res = new_ir_node(NULL, irg, block, op_Phi, mode, n_in, in);
280 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, n_in);
282 /* This loop checks whether the Phi has more than one predecessor.
283 If so, it is a real Phi node and we break the loop. Else the
284 Phi node merges the same definition on several paths and therefore
286 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
288 for (i = n_in - 1; i >= 0; --i) {
291 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
293 /* Optimize self referencing Phis: We can't detect them yet properly, as
294 they still refer to the Phi0 they will replace. So replace right now. */
295 if (phi0 && in[i] == phi0)
298 if (in[i] == res || in[i] == known)
307 /* i < 0: there is at most one predecessor, we don't need a phi node. */
310 edges_node_deleted(res, current_ir_graph);
311 obstack_free(current_ir_graph->obst, res);
313 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
314 order, an enclosing Phi know may get superfluous. */
315 res = optimize_in_place_2(known);
317 exchange(known, res);
322 /* A undefined value, e.g., in unreachable code. */
323 res = new_r_Bad(irg);
326 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
327 irn_verify_irg(res, irg);
328 /* Memory Phis in endless loops must be kept alive.
329 As we can't distinguish these easily we keep all of them alive. */
330 if (is_Phi(res) && mode == mode_M)
331 add_End_keepalive(get_irg_end(irg), res);
337 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
340 * Computes the predecessors for the real phi node, and then
341 * allocates and returns this node. The routine called to allocate the
342 * node might optimize it away and return a real value.
343 * This function must be called with an in-array of proper size.
345 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode,
346 int n_ins, ir_node **ins)
348 ir_graph *irg = get_irn_irg(block);
349 ir_node *prevBlock, *res, *phi0, *phi0_all;
352 /* If this block has no value at pos create a Phi0 and remember it
353 in graph_arr to break recursions.
354 Else we may not set graph_arr as there a later value is remembered. */
356 if (block->attr.block.graph_arr[pos] == NULL) {
358 if (block == get_irg_start_block(irg)) {
359 /* Collapsing to Bad tarvals is no good idea.
360 So we call a user-supplied routine here that deals with this
361 case as appropriate for the given language. Sorrily the only
362 help we can give here is the position.
364 Even if all variables are defined before use, it can happen that
365 we get to the start block, if a Cond has been replaced by a tuple
366 (bad, jmp). In this case we call the function needlessly,
367 eventually generating an non existent error.
368 However, this SHOULD NOT HAPPEN, as bad control flow nodes are
369 intercepted before recurring.
371 if (default_initialize_local_variable != NULL) {
372 ir_node *rem = get_r_cur_block(irg);
374 set_r_cur_block(irg, block);
375 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
376 set_r_cur_block(irg, rem);
378 block->attr.block.graph_arr[pos] = new_r_Unknown(irg, mode);
380 return block->attr.block.graph_arr[pos];
382 phi0 = new_rd_Phi0(block, mode);
383 block->attr.block.graph_arr[pos] = phi0;
387 /* This loop goes to all predecessor blocks of the block the Phi node
388 is in and there finds the operands of the Phi node by calling
389 get_r_value_internal. */
390 for (i = 1; i <= n_ins; ++i) {
391 ir_node *cf_pred = block->in[i];
392 ir_node *prevCfOp = skip_Proj(cf_pred);
394 if (is_Bad(prevCfOp)) {
395 /* In case a Cond has been optimized we would get right to the start block
396 with an invalid definition. */
397 ins[i-1] = new_r_Bad(irg);
400 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
402 if (!is_Bad(prevBlock)) {
403 ins[i-1] = get_r_value_internal(prevBlock, pos, mode);
405 ins[i-1] = new_r_Bad(irg);
409 /* We want to pass the Phi0 node to the constructor: this finds additional
410 optimization possibilities.
411 The Phi0 node either is allocated in this function, or it comes from
412 a former call to get_r_value_internal(). In this case we may not yet
413 exchange phi0, as this is done in mature_immBlock(). */
415 phi0_all = block->attr.block.graph_arr[pos];
416 if (! is_Phi0(phi0_all) ||
417 get_irn_arity(phi0_all) != 0 ||
418 get_nodes_block(phi0_all) != block)
424 /* After collecting all predecessors into the array ins a new Phi node
425 with these predecessors is created. This constructor contains an
426 optimization: If all predecessors of the Phi node are identical it
427 returns the only operand instead of a new Phi node. */
428 res = new_rd_Phi_in(block, mode, n_ins, ins, phi0_all);
430 /* In case we allocated a Phi0 node at the beginning of this procedure,
431 we need to exchange this Phi0 with the real Phi. */
434 block->attr.block.graph_arr[pos] = res;
441 * This function returns the last definition of a value. In case
442 * this value was last defined in a previous block, Phi nodes are
443 * inserted. If the part of the firm graph containing the definition
444 * is not yet constructed, a dummy Phi node is returned.
446 * @param block the current block
447 * @param pos the value number of the value searched
448 * @param mode the mode of this value (needed for Phi construction)
450 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
453 /* There are 4 cases to treat.
455 1. The block is not mature and we visit it the first time. We can not
456 create a proper Phi node, therefore a Phi0, i.e., a Phi without
457 predecessors is returned. This node is added to the linked list (block
458 attribute "phis") of the containing block to be completed when this block is
459 matured. (Completion will add a new Phi and turn the Phi0 into an Id
462 2. The value is already known in this block, graph_arr[pos] is set and we
463 visit the block the first time. We can return the value without
464 creating any new nodes.
466 3. The block is mature and we visit it the first time. A Phi node needs
467 to be created (phi_merge). If the Phi is not needed, as all it's
468 operands are the same value reaching the block through different
469 paths, it's optimized away and the value itself is returned.
471 4. The block is mature, and we visit it the second time. Now two
472 subcases are possible:
473 * The value was computed completely the last time we were here. This
474 is the case if there is no loop. We can return the proper value.
475 * The recursion that visited this node and set the flag did not
476 return yet. We are computing a value in a loop and need to
477 break the recursion. This case only happens if we visited
478 the same block with phi_merge before, which inserted a Phi0.
479 So we return the Phi0.
482 /* case 4 -- already visited. */
483 if (irn_visited(block)) {
484 /* As phi_merge allocates a Phi0 this value is always defined. Here
485 is the critical difference of the two algorithms. */
486 assert(block->attr.block.graph_arr[pos]);
487 return block->attr.block.graph_arr[pos];
490 /* visited the first time */
491 mark_irn_visited(block);
493 /* Get the local valid value */
494 res = block->attr.block.graph_arr[pos];
496 /* case 2 -- If the value is actually computed, return it. */
500 if (block->attr.block.is_matured) { /* case 3 */
502 /* The Phi has the same amount of ins as the corresponding block. */
503 int n_in = get_irn_arity(block);
505 NEW_ARR_A(ir_node *, in, n_in);
507 /* Phi merge collects the predecessors and then creates a node. */
508 res = phi_merge(block, pos, mode, n_in, in);
509 } else { /* case 1 */
510 /* The block is not mature, we don't know how many in's are needed. A Phi
511 with zero predecessors is created. Such a Phi node is called Phi0
512 node. The Phi0 is then added to the list of Phi0 nodes in this block
513 to be matured by mature_immBlock later.
514 The Phi0 has to remember the pos of it's internal value. If the real
515 Phi is computed, pos is used to update the array with the local
517 res = new_rd_Phi0(block, mode);
518 res->attr.phi.u.pos = pos;
519 res->attr.phi.next = block->attr.block.phis;
520 block->attr.block.phis = res;
523 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
525 /* The local valid value is available now. */
526 block->attr.block.graph_arr[pos] = res;
531 /* ************************************************************************** */
534 * Finalize a Block node, when all control flows are known.
535 * Acceptable parameters are only Block nodes.
537 void mature_immBlock(ir_node *block)
543 assert(is_Block(block));
544 if (!get_Block_matured(block)) {
545 ir_graph *irg = current_ir_graph;
547 ins = ARR_LEN(block->in) - 1;
548 /* Fix block parameters */
549 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
551 /* An array for building the Phi nodes. */
552 NEW_ARR_A(ir_node *, nin, ins);
554 /* Traverse a chain of Phi nodes attached to this block and mature
556 for (n = block->attr.block.phis; n; n = next) {
557 inc_irg_visited(irg);
558 next = n->attr.phi.next;
559 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, ins, nin));
562 block->attr.block.is_matured = 1;
564 /* Now, as the block is a finished Firm node, we can optimize it.
565 Since other nodes have been allocated since the block was created
566 we can not free the node on the obstack. Therefore we have to call
568 Unfortunately the optimization does not change a lot, as all allocated
569 nodes refer to the unoptimized node.
570 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
571 block = optimize_in_place_2(block);
572 irn_verify_irg(block, irg);
576 ir_node *new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
578 assert(get_irg_phase_state(current_ir_graph) == phase_building);
579 return new_rd_Phi(db, current_ir_graph->current_block, arity, in, mode);
582 ir_node *new_d_Const(dbg_info *db, ir_tarval *con)
584 assert(get_irg_phase_state(current_ir_graph) == phase_building);
585 return new_rd_Const(db, current_ir_graph, con);
588 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
590 assert(get_irg_phase_state(current_ir_graph) == phase_building);
591 return new_rd_Const_long(db, current_ir_graph, mode, value);
594 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
597 assert(is_Cond(arg));
598 assert(get_irg_phase_state(current_ir_graph) == phase_building);
599 arg->attr.cond.default_proj = max_proj;
600 res = new_d_Proj(db, arg, mode_X, max_proj);
604 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
607 assert(get_irg_phase_state(current_ir_graph) == phase_building);
608 return new_rd_Sel(db, current_ir_graph->current_block,
609 store, objptr, 0, NULL, ent);
612 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
615 assert(get_irg_phase_state(current_ir_graph) == phase_building);
616 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
619 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
620 ir_asm_constraint *inputs,
621 int n_outs, ir_asm_constraint *outputs, int n_clobber,
622 ident *clobber[], ident *text)
624 assert(get_irg_phase_state(current_ir_graph) == phase_building);
625 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
626 n_outs, outputs, n_clobber, clobber, text);
629 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
633 assert(get_irg_phase_state(irg) == phase_building);
634 /* creates a new dynamic in-array as length of in is -1 */
635 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
637 res->attr.block.is_matured = 0;
638 res->attr.block.is_dead = 0;
639 res->attr.block.irg.irg = irg;
640 res->attr.block.backedge = NULL;
641 res->attr.block.in_cg = NULL;
642 res->attr.block.cg_backedge = NULL;
643 res->attr.block.extblk = NULL;
644 res->attr.block.region = NULL;
645 res->attr.block.entity = NULL;
647 set_Block_block_visited(res, 0);
649 /* Create and initialize array for Phi-node construction. */
650 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
651 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
653 /* Immature block may not be optimized! */
654 irn_verify_irg(res, irg);
659 ir_node *new_r_immBlock(ir_graph *irg)
661 return new_rd_immBlock(NULL, irg);
664 ir_node *new_d_immBlock(dbg_info *dbgi)
666 return new_rd_immBlock(dbgi, current_ir_graph);
669 ir_node *new_immBlock(void)
671 return new_rd_immBlock(NULL, current_ir_graph);
674 void add_immBlock_pred(ir_node *block, ir_node *jmp)
676 int n = ARR_LEN(block->in) - 1;
678 assert(is_Block(block) && "Error: Must be a Block");
679 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
680 assert(is_ir_node(jmp));
682 ARR_APP1(ir_node *, block->in, jmp);
684 hook_set_irn_n(block, n, jmp, NULL);
687 void set_cur_block(ir_node *target)
689 current_ir_graph->current_block = target;
692 void set_r_cur_block(ir_graph *irg, ir_node *target)
694 irg->current_block = target;
697 ir_node *get_r_cur_block(ir_graph *irg)
699 return irg->current_block;
702 ir_node *get_cur_block(void)
704 return get_r_cur_block(current_ir_graph);
707 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
709 assert(get_irg_phase_state(irg) == phase_building);
710 inc_irg_visited(irg);
714 return get_r_value_internal(irg->current_block, pos + 1, mode);
717 ir_node *get_value(int pos, ir_mode *mode)
719 return get_r_value(current_ir_graph, pos, mode);
723 * helper function for guess_mode: recursively look for a definition for
724 * local variable @p pos, returns its mode if found.
726 static ir_mode *guess_recursively(ir_node *block, int pos)
732 if (irn_visited(block))
734 mark_irn_visited(block);
736 /* already have a defintion -> we can simply look at its mode */
737 value = block->attr.block.graph_arr[pos];
739 return get_irn_mode(value);
741 /* now we try to guess, by looking at the predecessor blocks */
742 n_preds = get_irn_arity(block);
743 for (i = 0; i < n_preds; ++i) {
744 ir_node *pred_block = get_Block_cfgpred_block(block, i);
745 ir_mode *mode = guess_recursively(pred_block, pos);
750 /* no way to guess */
754 ir_mode *ir_guess_mode(int pos)
756 ir_graph *irg = current_ir_graph;
757 ir_node *block = irg->current_block;
758 ir_node *value = block->attr.block.graph_arr[pos+1];
761 /* already have a defintion -> we can simply look at its mode */
763 return get_irn_mode(value);
765 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
766 inc_irg_visited(current_ir_graph);
767 mode = guess_recursively(block, pos+1);
768 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
773 void set_r_value(ir_graph *irg, int pos, ir_node *value)
775 assert(get_irg_phase_state(irg) == phase_building);
777 assert(pos+1 < irg->n_loc);
778 assert(is_ir_node(value));
779 irg->current_block->attr.block.graph_arr[pos + 1] = value;
782 void set_value(int pos, ir_node *value)
784 set_r_value(current_ir_graph, pos, value);
787 int find_value(ir_node *value)
790 ir_node *bl = current_ir_graph->current_block;
792 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
793 if (bl->attr.block.graph_arr[i] == value)
798 ir_node *get_r_store(ir_graph *irg)
800 assert(get_irg_phase_state(irg) == phase_building);
801 inc_irg_visited(irg);
802 return get_r_value_internal(irg->current_block, 0, mode_M);
805 ir_node *get_store(void)
807 return get_r_store(current_ir_graph);
810 void set_r_store(ir_graph *irg, ir_node *store)
812 ir_node *load, *pload, *pred, *in[2];
814 assert(get_irg_phase_state(irg) == phase_building);
815 /* Beware: due to dead code elimination, a store might become a Bad node even in
816 the construction phase. */
817 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
819 if (get_opt_auto_create_sync()) {
820 /* handle non-volatile Load nodes by automatically creating Sync's */
821 load = skip_Proj(store);
822 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
823 pred = get_Load_mem(load);
826 /* a Load after a Sync: move it up */
827 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
829 set_Load_mem(load, get_memop_mem(mem));
830 add_Sync_pred(pred, store);
833 pload = skip_Proj(pred);
834 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
835 /* a Load after a Load: create a new Sync */
836 set_Load_mem(load, get_Load_mem(pload));
840 store = new_r_Sync(irg->current_block, 2, in);
845 irg->current_block->attr.block.graph_arr[0] = store;
848 void set_store(ir_node *store)
850 set_r_store(current_ir_graph, store);
853 void keep_alive(ir_node *ka)
855 add_End_keepalive(get_irg_end(current_ir_graph), ka);
858 void ir_set_uninitialized_local_variable_func(
859 uninitialized_local_variable_func_t *func)
861 default_initialize_local_variable = func;
864 void irg_finalize_cons(ir_graph *irg)
866 set_irg_phase_state(irg, phase_high);
869 void irp_finalize_cons(void)
872 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
873 irg_finalize_cons(get_irp_irg(i));
875 irp->phase_state = phase_high;
878 ir_node *new_Const(ir_tarval *con)
880 return new_d_Const(NULL, con);
883 ir_node *new_Const_long(ir_mode *mode, long value)
885 return new_d_Const_long(NULL, mode, value);
888 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
890 return new_d_SymConst(NULL, mode, value, kind);
892 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
894 return new_d_simpleSel(NULL, store, objptr, ent);
896 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode)
898 return new_d_Phi(NULL, arity, in, mode);
900 ir_node *new_defaultProj(ir_node *arg, long max_proj)
902 return new_d_defaultProj(NULL, arg, max_proj);
904 ir_node *new_Bad(void)
906 assert(get_irg_phase_state(current_ir_graph) == phase_building);
907 return get_irg_bad(current_ir_graph);
909 ir_node *new_NoMem(void)
911 assert(get_irg_phase_state(current_ir_graph) == phase_building);
912 return get_irg_no_mem(current_ir_graph);
914 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
915 int n_outs, ir_asm_constraint *outputs,
916 int n_clobber, ident *clobber[], ident *text)
918 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
921 ir_node *new_r_Anchor(ir_graph *irg)
923 ir_node *in[anchor_last];
925 memset(in, 0, sizeof(in));
926 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);
928 /* hack to get get_irn_irg working: set block to ourself and allow
929 * get_Block_irg for anchor */
930 res->attr.irg.irg = irg;
936 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
938 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
939 res->attr.block.irg.irg = irg;
940 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
941 set_Block_matured(res, 1);
942 /* Create and initialize array for Phi-node construction. */
943 if (get_irg_phase_state(irg) == phase_building) {
944 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
945 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
947 irn_verify_irg(res, irg);