2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
47 #include "gen_ir_cons.c.inl"
50 * Language dependent variable initialization callback.
52 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
54 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
57 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
60 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
65 arg->attr.cond.default_proj = max_proj;
66 res = new_rd_Proj(db, arg, mode_X, max_proj);
70 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
71 ir_asm_constraint *inputs, int n_outs,
72 ir_asm_constraint *outputs, int n_clobber,
73 ident *clobber[], ident *text)
75 ir_graph *irg = get_irn_irg(block);
76 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
78 res->attr.assem.pin_state = op_pin_state_pinned;
79 res->attr.assem.input_constraints
80 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
81 res->attr.assem.output_constraints
82 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
83 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
84 res->attr.assem.text = text;
86 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
87 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
88 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
90 res = optimize_node(res);
91 irn_verify_irg(res, irg);
95 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
96 ir_node *objptr, ir_entity *ent)
98 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
101 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
102 symconst_symbol value, symconst_kind symkind)
104 ir_node *block = get_irg_start_block(irg);
105 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
106 res->attr.symc.kind = symkind;
107 res->attr.symc.sym = value;
109 res = optimize_node(res);
110 irn_verify_irg(res, irg);
114 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
117 sym.entity_p = symbol;
118 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
121 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
124 sym.entity_p = symbol;
125 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
128 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
132 return new_rd_SymConst(db, irg, mode, sym, symconst_type_tag);
135 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
139 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
142 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
146 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
149 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
151 return new_rd_Const_long(NULL, irg, mode, value);
153 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
154 symconst_kind symkind)
156 return new_rd_SymConst(NULL, irg, mode, value, symkind);
158 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
161 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
163 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
165 return new_rd_defaultProj(NULL, arg, max_proj);
167 ir_node *new_r_ASM(ir_node *block,
168 int arity, ir_node *in[], ir_asm_constraint *inputs,
169 int n_outs, ir_asm_constraint *outputs,
170 int n_clobber, ident *clobber[], ident *text)
172 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
175 /* ***********************************************************************/
176 /* Methods necessary for automatic Phi node creation */
178 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
179 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
180 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
181 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
183 Call Graph: ( A ---> B == A "calls" B)
185 get_value mature_immBlock
193 get_r_value_internal |
197 new_rd_Phi0 new_rd_Phi_in
199 * *************************************************************************** */
201 /** Creates a Phi node with 0 predecessors. */
202 static inline ir_node *new_rd_Phi0(ir_node *block, ir_mode *mode)
204 ir_graph *irg = get_irn_irg(block);
205 ir_node *res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
206 irn_verify_irg(res, irg);
211 * Internal constructor of a Phi node by a phi_merge operation.
213 * @param block the block in which the Phi will be constructed
214 * @param mode the mod eof the Phi node
215 * @param in the input array of the phi node
216 * @param n_in number of elements in the input array
217 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
218 * the value for which the new Phi is constructed
220 static ir_node *new_rd_Phi_in(ir_node *block, ir_mode *mode,
221 int n_in, ir_node **in, ir_node *phi0)
224 ir_node *res, *known;
225 ir_graph *irg = get_irn_irg(block);
227 /* Allocate a new node on the obstack. The allocation copies the in
229 res = new_ir_node(NULL, irg, block, op_Phi, mode, n_in, in);
230 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, n_in);
232 /* This loop checks whether the Phi has more than one predecessor.
233 If so, it is a real Phi node and we break the loop. Else the
234 Phi node merges the same definition on several paths and therefore
236 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
238 for (i = n_in - 1; i >= 0; --i) {
241 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
243 /* Optimize self referencing Phis: We can't detect them yet properly, as
244 they still refer to the Phi0 they will replace. So replace right now. */
245 if (phi0 && in[i] == phi0)
248 if (in[i] == res || in[i] == known)
257 /* i < 0: there is at most one predecessor, we don't need a phi node. */
260 edges_node_deleted(res, irg);
261 obstack_free(irg->obst, res);
263 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
264 order, an enclosing Phi know may get superfluous. */
265 res = optimize_in_place_2(known);
267 exchange(known, res);
272 /* A undefined value, e.g., in unreachable code. */
273 res = new_r_Bad(irg);
276 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
277 irn_verify_irg(res, irg);
278 /* Memory Phis in endless loops must be kept alive.
279 As we can't distinguish these easily we keep all of them alive. */
280 if (is_Phi(res) && mode == mode_M)
281 add_End_keepalive(get_irg_end(irg), res);
287 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
290 * Computes the predecessors for the real phi node, and then
291 * allocates and returns this node. The routine called to allocate the
292 * node might optimize it away and return a real value.
293 * This function must be called with an in-array of proper size.
295 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode,
296 int n_ins, ir_node **ins)
298 ir_graph *irg = get_irn_irg(block);
299 ir_node *prevBlock, *res, *phi0, *phi0_all;
302 /* If this block has no value at pos create a Phi0 and remember it
303 in graph_arr to break recursions.
304 Else we may not set graph_arr as there a later value is remembered. */
306 if (block->attr.block.graph_arr[pos] == NULL) {
308 if (block == get_irg_start_block(irg)) {
309 /* Collapsing to Bad tarvals is no good idea.
310 So we call a user-supplied routine here that deals with this
311 case as appropriate for the given language. Sorrily the only
312 help we can give here is the position.
314 Even if all variables are defined before use, it can happen that
315 we get to the start block, if a Cond has been replaced by a tuple
316 (bad, jmp). In this case we call the function needlessly,
317 eventually generating an non existent error.
318 However, this SHOULD NOT HAPPEN, as bad control flow nodes are
319 intercepted before recurring.
321 if (default_initialize_local_variable != NULL) {
322 ir_node *rem = get_r_cur_block(irg);
324 set_r_cur_block(irg, block);
325 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
326 set_r_cur_block(irg, rem);
328 block->attr.block.graph_arr[pos] = new_r_Unknown(irg, mode);
330 return block->attr.block.graph_arr[pos];
332 phi0 = new_rd_Phi0(block, mode);
333 block->attr.block.graph_arr[pos] = phi0;
337 /* This loop goes to all predecessor blocks of the block the Phi node
338 is in and there finds the operands of the Phi node by calling
339 get_r_value_internal. */
340 for (i = 1; i <= n_ins; ++i) {
341 ir_node *cf_pred = block->in[i];
342 ir_node *prevCfOp = skip_Proj(cf_pred);
344 if (is_Bad(prevCfOp)) {
345 /* In case a Cond has been optimized we would get right to the start block
346 with an invalid definition. */
347 ins[i-1] = new_r_Bad(irg);
350 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
352 if (!is_Bad(prevBlock)) {
353 ins[i-1] = get_r_value_internal(prevBlock, pos, mode);
355 ins[i-1] = new_r_Bad(irg);
359 /* We want to pass the Phi0 node to the constructor: this finds additional
360 optimization possibilities.
361 The Phi0 node either is allocated in this function, or it comes from
362 a former call to get_r_value_internal(). In this case we may not yet
363 exchange phi0, as this is done in mature_immBlock(). */
365 phi0_all = block->attr.block.graph_arr[pos];
366 if (! is_Phi0(phi0_all) ||
367 get_irn_arity(phi0_all) != 0 ||
368 get_nodes_block(phi0_all) != block)
374 /* After collecting all predecessors into the array ins a new Phi node
375 with these predecessors is created. This constructor contains an
376 optimization: If all predecessors of the Phi node are identical it
377 returns the only operand instead of a new Phi node. */
378 res = new_rd_Phi_in(block, mode, n_ins, ins, phi0_all);
380 /* In case we allocated a Phi0 node at the beginning of this procedure,
381 we need to exchange this Phi0 with the real Phi. */
384 block->attr.block.graph_arr[pos] = res;
391 * This function returns the last definition of a value. In case
392 * this value was last defined in a previous block, Phi nodes are
393 * inserted. If the part of the firm graph containing the definition
394 * is not yet constructed, a dummy Phi node is returned.
396 * @param block the current block
397 * @param pos the value number of the value searched
398 * @param mode the mode of this value (needed for Phi construction)
400 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
403 /* There are 4 cases to treat.
405 1. The block is not mature and we visit it the first time. We can not
406 create a proper Phi node, therefore a Phi0, i.e., a Phi without
407 predecessors is returned. This node is added to the linked list (block
408 attribute "phis") of the containing block to be completed when this block is
409 matured. (Completion will add a new Phi and turn the Phi0 into an Id
412 2. The value is already known in this block, graph_arr[pos] is set and we
413 visit the block the first time. We can return the value without
414 creating any new nodes.
416 3. The block is mature and we visit it the first time. A Phi node needs
417 to be created (phi_merge). If the Phi is not needed, as all it's
418 operands are the same value reaching the block through different
419 paths, it's optimized away and the value itself is returned.
421 4. The block is mature, and we visit it the second time. Now two
422 subcases are possible:
423 * The value was computed completely the last time we were here. This
424 is the case if there is no loop. We can return the proper value.
425 * The recursion that visited this node and set the flag did not
426 return yet. We are computing a value in a loop and need to
427 break the recursion. This case only happens if we visited
428 the same block with phi_merge before, which inserted a Phi0.
429 So we return the Phi0.
432 /* case 4 -- already visited. */
433 if (irn_visited(block)) {
434 /* As phi_merge allocates a Phi0 this value is always defined. Here
435 is the critical difference of the two algorithms. */
436 assert(block->attr.block.graph_arr[pos]);
437 return block->attr.block.graph_arr[pos];
440 /* visited the first time */
441 mark_irn_visited(block);
443 /* Get the local valid value */
444 res = block->attr.block.graph_arr[pos];
446 /* case 2 -- If the value is actually computed, return it. */
450 if (block->attr.block.is_matured) { /* case 3 */
452 /* The Phi has the same amount of ins as the corresponding block. */
453 int n_in = get_irn_arity(block);
455 NEW_ARR_A(ir_node *, in, n_in);
457 /* Phi merge collects the predecessors and then creates a node. */
458 res = phi_merge(block, pos, mode, n_in, in);
459 } else { /* case 1 */
460 /* The block is not mature, we don't know how many in's are needed. A Phi
461 with zero predecessors is created. Such a Phi node is called Phi0
462 node. The Phi0 is then added to the list of Phi0 nodes in this block
463 to be matured by mature_immBlock later.
464 The Phi0 has to remember the pos of it's internal value. If the real
465 Phi is computed, pos is used to update the array with the local
467 res = new_rd_Phi0(block, mode);
468 res->attr.phi.u.pos = pos;
469 res->attr.phi.next = block->attr.block.phis;
470 block->attr.block.phis = res;
473 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
475 /* The local valid value is available now. */
476 block->attr.block.graph_arr[pos] = res;
481 /* ************************************************************************** */
484 * Finalize a Block node, when all control flows are known.
485 * Acceptable parameters are only Block nodes.
487 void mature_immBlock(ir_node *block)
494 assert(is_Block(block));
495 if (get_Block_matured(block))
498 irg = get_irn_irg(block);
499 ins = ARR_LEN(block->in) - 1;
500 /* Fix block parameters */
501 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
503 /* An array for building the Phi nodes. */
504 NEW_ARR_A(ir_node *, nin, ins);
506 /* Traverse a chain of Phi nodes attached to this block and mature
508 for (n = block->attr.block.phis; n; n = next) {
509 inc_irg_visited(irg);
510 next = n->attr.phi.next;
511 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, ins, nin));
514 block->attr.block.is_matured = 1;
516 /* Now, as the block is a finished Firm node, we can optimize it.
517 Since other nodes have been allocated since the block was created
518 we can not free the node on the obstack. Therefore we have to call
520 Unfortunately the optimization does not change a lot, as all allocated
521 nodes refer to the unoptimized node.
522 We can call optimize_in_place_2(), as global cse has no effect on blocks.
524 block = optimize_in_place_2(block);
525 irn_verify_irg(block, irg);
528 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
530 assert(get_irg_phase_state(current_ir_graph) == phase_building);
531 return new_rd_Const_long(db, current_ir_graph, mode, value);
534 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
537 assert(is_Cond(arg) || is_Bad(arg));
538 assert(get_irg_phase_state(current_ir_graph) == phase_building);
540 arg->attr.cond.default_proj = max_proj;
541 res = new_d_Proj(db, arg, mode_X, max_proj);
545 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
548 assert(get_irg_phase_state(current_ir_graph) == phase_building);
549 return new_rd_Sel(db, current_ir_graph->current_block,
550 store, objptr, 0, NULL, ent);
553 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
556 assert(get_irg_phase_state(current_ir_graph) == phase_building);
557 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
560 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
561 ir_asm_constraint *inputs,
562 int n_outs, ir_asm_constraint *outputs, int n_clobber,
563 ident *clobber[], ident *text)
565 assert(get_irg_phase_state(current_ir_graph) == phase_building);
566 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
567 n_outs, outputs, n_clobber, clobber, text);
570 ir_node *new_rd_strictConv(dbg_info *dbgi, ir_node *block, ir_node * irn_op, ir_mode * mode)
573 ir_graph *irg = get_Block_irg(block);
578 res = new_ir_node(dbgi, irg, block, op_Conv, mode, 1, in);
579 res->attr.conv.strict = 1;
580 res = optimize_node(res);
581 irn_verify_irg(res, irg);
585 ir_node *new_r_strictConv(ir_node *block, ir_node * irn_op, ir_mode * mode)
587 return new_rd_strictConv(NULL, block, irn_op, mode);
590 ir_node *new_d_strictConv(dbg_info *dbgi, ir_node * irn_op, ir_mode * mode)
593 assert(get_irg_phase_state(current_ir_graph) == phase_building);
594 res = new_rd_strictConv(dbgi, current_ir_graph->current_block, irn_op, mode);
598 ir_node *new_strictConv(ir_node * irn_op, ir_mode * mode)
600 return new_d_strictConv(NULL, irn_op, mode);
603 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
606 ir_graph *irg = get_Block_irg(block);
613 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
614 res->attr.divmod.resmode = resmode;
615 res->attr.divmod.no_remainder = 1;
616 res->attr.divmod.exc.pin_state = pin_state;
617 res = optimize_node(res);
618 irn_verify_irg(res, irg);
622 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
624 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
627 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
630 assert(get_irg_phase_state(current_ir_graph) == phase_building);
631 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
635 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
637 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
640 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
644 assert(get_irg_phase_state(irg) == phase_building);
645 /* creates a new dynamic in-array as length of in is -1 */
646 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
648 res->attr.block.is_matured = 0;
649 res->attr.block.is_dead = 0;
650 res->attr.block.irg.irg = irg;
651 res->attr.block.backedge = NULL;
652 res->attr.block.in_cg = NULL;
653 res->attr.block.cg_backedge = NULL;
654 res->attr.block.extblk = NULL;
655 res->attr.block.region = NULL;
656 res->attr.block.entity = NULL;
658 set_Block_block_visited(res, 0);
660 /* Create and initialize array for Phi-node construction. */
661 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
662 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
664 /* Immature block may not be optimized! */
665 irn_verify_irg(res, irg);
670 ir_node *new_r_immBlock(ir_graph *irg)
672 return new_rd_immBlock(NULL, irg);
675 ir_node *new_d_immBlock(dbg_info *dbgi)
677 return new_rd_immBlock(dbgi, current_ir_graph);
680 ir_node *new_immBlock(void)
682 return new_rd_immBlock(NULL, current_ir_graph);
685 void add_immBlock_pred(ir_node *block, ir_node *jmp)
687 int n = ARR_LEN(block->in) - 1;
689 assert(is_Block(block) && "Error: Must be a Block");
690 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
691 assert(is_ir_node(jmp));
693 ARR_APP1(ir_node *, block->in, jmp);
695 hook_set_irn_n(block, n, jmp, NULL);
698 void set_cur_block(ir_node *target)
700 current_ir_graph->current_block = target;
703 void set_r_cur_block(ir_graph *irg, ir_node *target)
705 irg->current_block = target;
708 ir_node *get_r_cur_block(ir_graph *irg)
710 return irg->current_block;
713 ir_node *get_cur_block(void)
715 return get_r_cur_block(current_ir_graph);
718 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
720 assert(get_irg_phase_state(irg) == phase_building);
721 inc_irg_visited(irg);
725 return get_r_value_internal(irg->current_block, pos + 1, mode);
728 ir_node *get_value(int pos, ir_mode *mode)
730 return get_r_value(current_ir_graph, pos, mode);
734 * helper function for guess_mode: recursively look for a definition for
735 * local variable @p pos, returns its mode if found.
737 static ir_mode *guess_recursively(ir_node *block, int pos)
743 if (irn_visited(block))
745 mark_irn_visited(block);
747 /* already have a defintion -> we can simply look at its mode */
748 value = block->attr.block.graph_arr[pos];
750 return get_irn_mode(value);
752 /* now we try to guess, by looking at the predecessor blocks */
753 n_preds = get_irn_arity(block);
754 for (i = 0; i < n_preds; ++i) {
755 ir_node *pred_block = get_Block_cfgpred_block(block, i);
756 ir_mode *mode = guess_recursively(pred_block, pos);
761 /* no way to guess */
765 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
767 ir_node *block = irg->current_block;
768 ir_node *value = block->attr.block.graph_arr[pos+1];
771 /* already have a defintion -> we can simply look at its mode */
773 return get_irn_mode(value);
775 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
776 inc_irg_visited(irg);
777 mode = guess_recursively(block, pos+1);
778 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
783 ir_mode *ir_guess_mode(int pos)
785 return ir_r_guess_mode(current_ir_graph, pos);
788 void set_r_value(ir_graph *irg, int pos, ir_node *value)
790 assert(get_irg_phase_state(irg) == phase_building);
792 assert(pos+1 < irg->n_loc);
793 assert(is_ir_node(value));
794 irg->current_block->attr.block.graph_arr[pos + 1] = value;
797 void set_value(int pos, ir_node *value)
799 set_r_value(current_ir_graph, pos, value);
802 int r_find_value(ir_graph *irg, ir_node *value)
805 ir_node *bl = irg->current_block;
807 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
808 if (bl->attr.block.graph_arr[i] == value)
813 int find_value(ir_node *value)
815 return r_find_value(current_ir_graph, value);
818 ir_node *get_r_store(ir_graph *irg)
820 assert(get_irg_phase_state(irg) == phase_building);
821 inc_irg_visited(irg);
822 return get_r_value_internal(irg->current_block, 0, mode_M);
825 ir_node *get_store(void)
827 return get_r_store(current_ir_graph);
830 void set_r_store(ir_graph *irg, ir_node *store)
832 ir_node *load, *pload, *pred, *in[2];
834 assert(get_irg_phase_state(irg) == phase_building);
835 /* Beware: due to dead code elimination, a store might become a Bad node even in
836 the construction phase. */
837 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
839 if (get_opt_auto_create_sync()) {
840 /* handle non-volatile Load nodes by automatically creating Sync's */
841 load = skip_Proj(store);
842 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
843 pred = get_Load_mem(load);
846 /* a Load after a Sync: move it up */
847 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
849 set_Load_mem(load, get_memop_mem(mem));
850 add_Sync_pred(pred, store);
853 pload = skip_Proj(pred);
854 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
855 /* a Load after a Load: create a new Sync */
856 set_Load_mem(load, get_Load_mem(pload));
860 store = new_r_Sync(irg->current_block, 2, in);
865 irg->current_block->attr.block.graph_arr[0] = store;
868 void set_store(ir_node *store)
870 set_r_store(current_ir_graph, store);
873 void r_keep_alive(ir_graph *irg, ir_node *ka)
875 add_End_keepalive(get_irg_end(irg), ka);
878 void keep_alive(ir_node *ka)
880 r_keep_alive(current_ir_graph, ka);
883 void ir_set_uninitialized_local_variable_func(
884 uninitialized_local_variable_func_t *func)
886 default_initialize_local_variable = func;
889 void irg_finalize_cons(ir_graph *irg)
891 set_irg_phase_state(irg, phase_high);
894 void irp_finalize_cons(void)
897 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
898 irg_finalize_cons(get_irp_irg(i));
900 irp->phase_state = phase_high;
903 ir_node *new_Const_long(ir_mode *mode, long value)
905 return new_d_Const_long(NULL, mode, value);
908 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
910 return new_d_SymConst(NULL, mode, value, kind);
912 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
914 return new_d_simpleSel(NULL, store, objptr, ent);
916 ir_node *new_defaultProj(ir_node *arg, long max_proj)
918 return new_d_defaultProj(NULL, arg, max_proj);
920 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
921 int n_outs, ir_asm_constraint *outputs,
922 int n_clobber, ident *clobber[], ident *text)
924 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
927 ir_node *new_r_Anchor(ir_graph *irg)
929 ir_node *in[anchor_last];
931 memset(in, 0, sizeof(in));
932 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);
933 res->attr.anchor.irg.irg = irg;
935 /* hack to get get_irn_irg working: set block to ourself and allow
936 * get_Block_irg for anchor */
942 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
944 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
945 res->attr.block.irg.irg = irg;
946 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
947 set_Block_matured(res, 1);
948 /* Create and initialize array for Phi-node construction. */
949 if (get_irg_phase_state(irg) == phase_building) {
950 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
951 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
953 irn_verify_irg(res, irg);