2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
47 #include "gen_ir_cons.c.inl"
50 * Language dependent variable initialization callback.
52 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
54 ir_node *new_rd_Start(dbg_info *db, ir_graph *irg)
56 ir_node *block = get_irg_start_block(irg);
57 ir_node *res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
59 res = optimize_node(res);
60 irn_verify_irg(res, irg);
64 ir_node *new_rd_End(dbg_info *db, ir_graph *irg)
66 ir_node *block = get_irg_end_block(irg);
67 ir_node *res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
69 res = optimize_node(res);
70 irn_verify_irg(res, irg);
75 * Creates a Phi node with all predecessors. Calling this constructor
76 * is only allowed if the corresponding block is mature.
78 ir_node *new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in,
81 ir_graph *irg = get_irn_irg(block);
82 ir_node *res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
83 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
85 res = optimize_node(res);
86 irn_verify_irg(res, irg);
88 /* Memory Phis in endless loops must be kept alive.
89 As we can't distinguish these easily we keep all of them alive. */
90 if (is_Phi(res) && mode == mode_M)
91 add_End_keepalive(get_irg_end(irg), res);
95 ir_node *new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con,
98 ir_node *block = get_irg_start_block(irg);
99 ir_mode *mode = get_tarval_mode(con);
100 ir_node *res = new_ir_node(db, irg, block, op_Const, mode, 0, NULL);
101 res->attr.con.tarval = con;
102 set_Const_type(res, tp); /* Call method because of complex assertion. */
103 res = optimize_node (res);
104 assert(get_Const_type(res) == tp);
105 irn_verify_irg(res, irg);
110 ir_node *new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con)
112 return new_rd_Const_type(db, irg, con, firm_unknown_type);
115 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
118 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
121 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
125 assert(is_Cond(arg));
126 arg->attr.cond.default_proj = max_proj;
127 res = new_rd_Proj(db, arg, mode_X, max_proj);
131 ir_node *new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_mode *mode,
132 symconst_symbol value, symconst_kind symkind,
135 ir_node *block = get_irg_start_block(irg);
136 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
138 res->attr.symc.kind = symkind;
139 res->attr.symc.sym = value;
140 res->attr.symc.tp = tp;
142 res = optimize_node(res);
143 irn_verify_irg(res, irg);
147 ir_node *new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[])
149 ir_graph *irg = get_irn_irg(block);
150 ir_node *res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
153 for (i = 0; i < arity; ++i)
154 add_Sync_pred(res, in[i]);
156 res = optimize_node(res);
157 irn_verify_irg(res, irg);
161 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
162 ir_asm_constraint *inputs, int n_outs,
163 ir_asm_constraint *outputs, int n_clobber,
164 ident *clobber[], ident *text)
166 ir_graph *irg = get_irn_irg(block);
167 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
169 res->attr.assem.pin_state = op_pin_state_pinned;
170 res->attr.assem.input_constraints
171 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
172 res->attr.assem.output_constraints
173 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
174 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
175 res->attr.assem.text = text;
177 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
178 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
179 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
181 res = optimize_node(res);
182 irn_verify_irg(res, irg);
186 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
187 ir_node *objptr, ir_entity *ent)
189 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
192 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
193 symconst_symbol value, symconst_kind symkind)
195 return new_rd_SymConst_type(db, irg, mode, value, symkind, firm_unknown_type);
198 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
201 sym.entity_p = symbol;
202 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_ent, tp);
205 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
208 sym.entity_p = symbol;
209 return new_rd_SymConst_type(db, irg, mode, sym, symconst_ofs_ent, tp);
212 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
216 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_tag, tp);
219 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
223 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_size, tp);
226 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
230 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_align, tp);
233 ir_node *new_r_Start(ir_graph *irg)
235 return new_rd_Start(NULL, irg);
237 ir_node *new_r_End(ir_graph *irg)
239 return new_rd_End(NULL, irg);
241 ir_node *new_r_Const(ir_graph *irg, tarval *con)
243 return new_rd_Const(NULL, irg, con);
245 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
247 return new_rd_Const_long(NULL, irg, mode, value);
249 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp)
251 return new_rd_Const_type(NULL, irg, con, tp);
253 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
254 symconst_kind symkind)
256 return new_rd_SymConst(NULL, irg, mode, value, symkind);
258 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
261 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
263 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode)
265 return new_rd_Phi(NULL, block, arity, in, mode);
267 ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[])
269 return new_rd_Sync(NULL, block, arity, in);
271 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
273 return new_rd_defaultProj(NULL, arg, max_proj);
275 ir_node *new_r_Bad(ir_graph *irg)
277 return get_irg_bad(irg);
279 ir_node *new_r_NoMem(ir_graph *irg)
281 return get_irg_no_mem(irg);
283 ir_node *new_r_ASM(ir_node *block,
284 int arity, ir_node *in[], ir_asm_constraint *inputs,
285 int n_outs, ir_asm_constraint *outputs,
286 int n_clobber, ident *clobber[], ident *text)
288 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
291 /** ********************/
292 /** public interfaces */
293 /** construction tools */
295 ir_node *new_d_Start(dbg_info *db)
299 assert(get_irg_phase_state(current_ir_graph) == phase_building);
300 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
301 op_Start, mode_T, 0, NULL);
303 res = optimize_node(res);
304 irn_verify_irg(res, current_ir_graph);
308 ir_node *new_d_End(dbg_info *db)
311 assert(get_irg_phase_state(current_ir_graph) == phase_building);
312 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
313 op_End, mode_X, -1, NULL);
314 res = optimize_node(res);
315 irn_verify_irg(res, current_ir_graph);
320 /* ***********************************************************************/
321 /* Methods necessary for automatic Phi node creation */
323 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
324 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
325 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
326 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
328 Call Graph: ( A ---> B == A "calls" B)
330 get_value mature_immBlock
338 get_r_value_internal |
342 new_rd_Phi0 new_rd_Phi_in
344 * *************************************************************************** */
346 /** Creates a Phi node with 0 predecessors. */
347 static inline ir_node *new_rd_Phi0(ir_node *block, ir_mode *mode)
349 ir_graph *irg = get_irn_irg(block);
350 ir_node *res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
351 irn_verify_irg(res, irg);
356 * Internal constructor of a Phi node by a phi_merge operation.
358 * @param block the block in which the Phi will be constructed
359 * @param mode the mod eof the Phi node
360 * @param in the input array of the phi node
361 * @param n_in number of elements in the input array
362 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
363 * the value for which the new Phi is constructed
365 static ir_node *new_rd_Phi_in(ir_node *block, ir_mode *mode,
366 int n_in, ir_node **in, ir_node *phi0)
369 ir_node *res, *known;
370 ir_graph *irg = get_irn_irg(block);
372 /* Allocate a new node on the obstack. The allocation copies the in
374 res = new_ir_node(NULL, irg, block, op_Phi, mode, n_in, in);
375 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, n_in);
377 /* This loop checks whether the Phi has more than one predecessor.
378 If so, it is a real Phi node and we break the loop. Else the
379 Phi node merges the same definition on several paths and therefore
381 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
383 for (i = n_in - 1; i >= 0; --i) {
386 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
388 /* Optimize self referencing Phis: We can't detect them yet properly, as
389 they still refer to the Phi0 they will replace. So replace right now. */
390 if (phi0 && in[i] == phi0)
393 if (in[i] == res || in[i] == known)
402 /* i < 0: there is at most one predecessor, we don't need a phi node. */
405 edges_node_deleted(res, current_ir_graph);
406 obstack_free(current_ir_graph->obst, res);
408 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
409 order, an enclosing Phi know may get superfluous. */
410 res = optimize_in_place_2(known);
412 exchange(known, res);
417 /* A undefined value, e.g., in unreachable code. */
418 res = new_r_Bad(irg);
421 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
422 irn_verify_irg(res, irg);
423 /* Memory Phis in endless loops must be kept alive.
424 As we can't distinguish these easily we keep all of them alive. */
425 if (is_Phi(res) && mode == mode_M)
426 add_End_keepalive(get_irg_end(irg), res);
432 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
435 * Computes the predecessors for the real phi node, and then
436 * allocates and returns this node. The routine called to allocate the
437 * node might optimize it away and return a real value.
438 * This function must be called with an in-array of proper size.
440 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode,
441 int n_ins, ir_node **ins)
443 ir_graph *irg = get_irn_irg(block);
444 ir_node *prevBlock, *res, *phi0, *phi0_all;
447 /* If this block has no value at pos create a Phi0 and remember it
448 in graph_arr to break recursions.
449 Else we may not set graph_arr as there a later value is remembered. */
451 if (block->attr.block.graph_arr[pos] == NULL) {
453 if (block == get_irg_start_block(irg)) {
454 /* Collapsing to Bad tarvals is no good idea.
455 So we call a user-supplied routine here that deals with this
456 case as appropriate for the given language. Sorrily the only
457 help we can give here is the position.
459 Even if all variables are defined before use, it can happen that
460 we get to the start block, if a Cond has been replaced by a tuple
461 (bad, jmp). In this case we call the function needlessly,
462 eventually generating an non existent error.
463 However, this SHOULD NOT HAPPEN, as bad control flow nodes are
464 intercepted before recurring.
466 if (default_initialize_local_variable != NULL) {
467 ir_node *rem = get_r_cur_block(irg);
469 set_r_cur_block(irg, block);
470 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
471 set_r_cur_block(irg, rem);
473 block->attr.block.graph_arr[pos] = new_r_Unknown(irg, mode);
475 return block->attr.block.graph_arr[pos];
477 phi0 = new_rd_Phi0(block, mode);
478 block->attr.block.graph_arr[pos] = phi0;
482 /* This loop goes to all predecessor blocks of the block the Phi node
483 is in and there finds the operands of the Phi node by calling
484 get_r_value_internal. */
485 for (i = 1; i <= n_ins; ++i) {
486 ir_node *cf_pred = block->in[i];
487 ir_node *prevCfOp = skip_Proj(cf_pred);
489 if (is_Bad(prevCfOp)) {
490 /* In case a Cond has been optimized we would get right to the start block
491 with an invalid definition. */
492 ins[i-1] = new_r_Bad(irg);
495 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
497 if (!is_Bad(prevBlock)) {
498 ins[i-1] = get_r_value_internal(prevBlock, pos, mode);
500 ins[i-1] = new_r_Bad(irg);
504 /* We want to pass the Phi0 node to the constructor: this finds additional
505 optimization possibilities.
506 The Phi0 node either is allocated in this function, or it comes from
507 a former call to get_r_value_internal(). In this case we may not yet
508 exchange phi0, as this is done in mature_immBlock(). */
510 phi0_all = block->attr.block.graph_arr[pos];
511 if (! is_Phi0(phi0_all) ||
512 get_irn_arity(phi0_all) != 0 ||
513 get_nodes_block(phi0_all) != block)
519 /* After collecting all predecessors into the array ins a new Phi node
520 with these predecessors is created. This constructor contains an
521 optimization: If all predecessors of the Phi node are identical it
522 returns the only operand instead of a new Phi node. */
523 res = new_rd_Phi_in(block, mode, n_ins, ins, phi0_all);
525 /* In case we allocated a Phi0 node at the beginning of this procedure,
526 we need to exchange this Phi0 with the real Phi. */
529 block->attr.block.graph_arr[pos] = res;
536 * This function returns the last definition of a value. In case
537 * this value was last defined in a previous block, Phi nodes are
538 * inserted. If the part of the firm graph containing the definition
539 * is not yet constructed, a dummy Phi node is returned.
541 * @param block the current block
542 * @param pos the value number of the value searched
543 * @param mode the mode of this value (needed for Phi construction)
545 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
548 /* There are 4 cases to treat.
550 1. The block is not mature and we visit it the first time. We can not
551 create a proper Phi node, therefore a Phi0, i.e., a Phi without
552 predecessors is returned. This node is added to the linked list (block
553 attribute "phis") of the containing block to be completed when this block is
554 matured. (Completion will add a new Phi and turn the Phi0 into an Id
557 2. The value is already known in this block, graph_arr[pos] is set and we
558 visit the block the first time. We can return the value without
559 creating any new nodes.
561 3. The block is mature and we visit it the first time. A Phi node needs
562 to be created (phi_merge). If the Phi is not needed, as all it's
563 operands are the same value reaching the block through different
564 paths, it's optimized away and the value itself is returned.
566 4. The block is mature, and we visit it the second time. Now two
567 subcases are possible:
568 * The value was computed completely the last time we were here. This
569 is the case if there is no loop. We can return the proper value.
570 * The recursion that visited this node and set the flag did not
571 return yet. We are computing a value in a loop and need to
572 break the recursion. This case only happens if we visited
573 the same block with phi_merge before, which inserted a Phi0.
574 So we return the Phi0.
577 /* case 4 -- already visited. */
578 if (irn_visited(block)) {
579 /* As phi_merge allocates a Phi0 this value is always defined. Here
580 is the critical difference of the two algorithms. */
581 assert(block->attr.block.graph_arr[pos]);
582 return block->attr.block.graph_arr[pos];
585 /* visited the first time */
586 mark_irn_visited(block);
588 /* Get the local valid value */
589 res = block->attr.block.graph_arr[pos];
591 /* case 2 -- If the value is actually computed, return it. */
595 if (block->attr.block.is_matured) { /* case 3 */
597 /* The Phi has the same amount of ins as the corresponding block. */
598 int n_in = get_irn_arity(block);
600 NEW_ARR_A(ir_node *, in, n_in);
602 /* Phi merge collects the predecessors and then creates a node. */
603 res = phi_merge(block, pos, mode, n_in, in);
604 } else { /* case 1 */
605 /* The block is not mature, we don't know how many in's are needed. A Phi
606 with zero predecessors is created. Such a Phi node is called Phi0
607 node. The Phi0 is then added to the list of Phi0 nodes in this block
608 to be matured by mature_immBlock later.
609 The Phi0 has to remember the pos of it's internal value. If the real
610 Phi is computed, pos is used to update the array with the local
612 res = new_rd_Phi0(block, mode);
613 res->attr.phi.u.pos = pos;
614 res->attr.phi.next = block->attr.block.phis;
615 block->attr.block.phis = res;
618 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
620 /* The local valid value is available now. */
621 block->attr.block.graph_arr[pos] = res;
626 /* ************************************************************************** */
629 * Finalize a Block node, when all control flows are known.
630 * Acceptable parameters are only Block nodes.
632 void mature_immBlock(ir_node *block)
638 assert(is_Block(block));
639 if (!get_Block_matured(block)) {
640 ir_graph *irg = current_ir_graph;
642 ins = ARR_LEN(block->in) - 1;
643 /* Fix block parameters */
644 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
646 /* An array for building the Phi nodes. */
647 NEW_ARR_A(ir_node *, nin, ins);
649 /* Traverse a chain of Phi nodes attached to this block and mature
651 for (n = block->attr.block.phis; n; n = next) {
652 inc_irg_visited(irg);
653 next = n->attr.phi.next;
654 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, ins, nin));
657 block->attr.block.is_matured = 1;
659 /* Now, as the block is a finished Firm node, we can optimize it.
660 Since other nodes have been allocated since the block was created
661 we can not free the node on the obstack. Therefore we have to call
663 Unfortunately the optimization does not change a lot, as all allocated
664 nodes refer to the unoptimized node.
665 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
666 block = optimize_in_place_2(block);
667 irn_verify_irg(block, irg);
671 ir_node *new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
673 assert(get_irg_phase_state(current_ir_graph) == phase_building);
674 return new_rd_Phi(db, current_ir_graph->current_block, arity, in, mode);
677 ir_node *new_d_Const(dbg_info *db, tarval *con)
679 assert(get_irg_phase_state(current_ir_graph) == phase_building);
680 return new_rd_Const(db, current_ir_graph, con);
683 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
685 assert(get_irg_phase_state(current_ir_graph) == phase_building);
686 return new_rd_Const_long(db, current_ir_graph, mode, value);
689 ir_node *new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp)
691 assert(get_irg_phase_state(current_ir_graph) == phase_building);
692 return new_rd_Const_type(db, current_ir_graph, con, tp);
695 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
698 assert(is_Cond(arg));
699 assert(get_irg_phase_state(current_ir_graph) == phase_building);
700 arg->attr.cond.default_proj = max_proj;
701 res = new_d_Proj(db, arg, mode_X, max_proj);
705 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
708 assert(get_irg_phase_state(current_ir_graph) == phase_building);
709 return new_rd_Sel(db, current_ir_graph->current_block,
710 store, objptr, 0, NULL, ent);
713 ir_node *new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value,
714 symconst_kind kind, ir_type *tp)
716 assert(get_irg_phase_state(current_ir_graph) == phase_building);
717 return new_rd_SymConst_type(db, current_ir_graph, mode, value, kind, tp);
720 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
723 assert(get_irg_phase_state(current_ir_graph) == phase_building);
724 return new_rd_SymConst_type(db, current_ir_graph, mode, value, kind,
728 ir_node *new_d_Sync(dbg_info *db, int arity, ir_node *in[])
730 assert(get_irg_phase_state(current_ir_graph) == phase_building);
731 return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
734 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
735 ir_asm_constraint *inputs,
736 int n_outs, ir_asm_constraint *outputs, int n_clobber,
737 ident *clobber[], ident *text)
739 assert(get_irg_phase_state(current_ir_graph) == phase_building);
740 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
741 n_outs, outputs, n_clobber, clobber, text);
744 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
748 assert(get_irg_phase_state(irg) == phase_building);
749 /* creates a new dynamic in-array as length of in is -1 */
750 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
752 res->attr.block.is_matured = 0;
753 res->attr.block.is_dead = 0;
754 res->attr.block.irg.irg = irg;
755 res->attr.block.backedge = NULL;
756 res->attr.block.in_cg = NULL;
757 res->attr.block.cg_backedge = NULL;
758 res->attr.block.extblk = NULL;
759 res->attr.block.region = NULL;
760 res->attr.block.entity = NULL;
762 set_Block_block_visited(res, 0);
764 /* Create and initialize array for Phi-node construction. */
765 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
766 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
768 /* Immature block may not be optimized! */
769 irn_verify_irg(res, irg);
774 ir_node *new_r_immBlock(ir_graph *irg)
776 return new_rd_immBlock(NULL, irg);
779 ir_node *new_d_immBlock(dbg_info *dbgi)
781 return new_rd_immBlock(dbgi, current_ir_graph);
784 ir_node *new_immBlock(void)
786 return new_rd_immBlock(NULL, current_ir_graph);
789 void add_immBlock_pred(ir_node *block, ir_node *jmp)
791 int n = ARR_LEN(block->in) - 1;
793 assert(is_Block(block) && "Error: Must be a Block");
794 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
795 assert(is_ir_node(jmp));
797 ARR_APP1(ir_node *, block->in, jmp);
799 hook_set_irn_n(block, n, jmp, NULL);
802 void set_cur_block(ir_node *target)
804 current_ir_graph->current_block = target;
807 void set_r_cur_block(ir_graph *irg, ir_node *target)
809 irg->current_block = target;
812 ir_node *get_r_cur_block(ir_graph *irg)
814 return irg->current_block;
817 ir_node *get_cur_block(void)
819 return get_r_cur_block(current_ir_graph);
822 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
824 assert(get_irg_phase_state(irg) == phase_building);
825 inc_irg_visited(irg);
829 return get_r_value_internal(irg->current_block, pos + 1, mode);
832 ir_node *get_value(int pos, ir_mode *mode)
834 return get_r_value(current_ir_graph, pos, mode);
838 * helper function for guess_mode: recursively look for a definition for
839 * local variable @p pos, returns its mode if found.
841 static ir_mode *guess_recursively(ir_node *block, int pos)
847 if (irn_visited(block))
849 mark_irn_visited(block);
851 /* already have a defintion -> we can simply look at its mode */
852 value = block->attr.block.graph_arr[pos];
854 return get_irn_mode(value);
856 /* now we try to guess, by looking at the predecessor blocks */
857 n_preds = get_irn_arity(block);
858 for (i = 0; i < n_preds; ++i) {
859 ir_node *pred_block = get_Block_cfgpred_block(block, i);
860 ir_mode *mode = guess_recursively(pred_block, pos);
865 /* no way to guess */
869 ir_mode *ir_guess_mode(int pos)
871 ir_graph *irg = current_ir_graph;
872 ir_node *block = irg->current_block;
873 ir_node *value = block->attr.block.graph_arr[pos+1];
876 /* already have a defintion -> we can simply look at its mode */
878 return get_irn_mode(value);
880 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
881 inc_irg_visited(current_ir_graph);
882 mode = guess_recursively(block, pos+1);
883 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
888 void set_r_value(ir_graph *irg, int pos, ir_node *value)
890 assert(get_irg_phase_state(irg) == phase_building);
892 assert(pos+1 < irg->n_loc);
893 assert(is_ir_node(value));
894 irg->current_block->attr.block.graph_arr[pos + 1] = value;
897 void set_value(int pos, ir_node *value)
899 set_r_value(current_ir_graph, pos, value);
902 int find_value(ir_node *value)
905 ir_node *bl = current_ir_graph->current_block;
907 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
908 if (bl->attr.block.graph_arr[i] == value)
913 ir_node *get_r_store(ir_graph *irg)
915 assert(get_irg_phase_state(irg) == phase_building);
916 inc_irg_visited(irg);
917 return get_r_value_internal(irg->current_block, 0, mode_M);
920 ir_node *get_store(void)
922 return get_r_store(current_ir_graph);
925 void set_r_store(ir_graph *irg, ir_node *store)
927 ir_node *load, *pload, *pred, *in[2];
929 assert(get_irg_phase_state(irg) == phase_building);
930 /* Beware: due to dead code elimination, a store might become a Bad node even in
931 the construction phase. */
932 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
934 if (get_opt_auto_create_sync()) {
935 /* handle non-volatile Load nodes by automatically creating Sync's */
936 load = skip_Proj(store);
937 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
938 pred = get_Load_mem(load);
941 /* a Load after a Sync: move it up */
942 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
944 set_Load_mem(load, get_memop_mem(mem));
945 add_Sync_pred(pred, store);
948 pload = skip_Proj(pred);
949 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
950 /* a Load after a Load: create a new Sync */
951 set_Load_mem(load, get_Load_mem(pload));
955 store = new_r_Sync(irg->current_block, 2, in);
960 irg->current_block->attr.block.graph_arr[0] = store;
963 void set_store(ir_node *store)
965 set_r_store(current_ir_graph, store);
968 void keep_alive(ir_node *ka)
970 add_End_keepalive(get_irg_end(current_ir_graph), ka);
973 void ir_set_uninitialized_local_variable_func(
974 uninitialized_local_variable_func_t *func)
976 default_initialize_local_variable = func;
979 void irg_finalize_cons(ir_graph *irg)
981 set_irg_phase_state(irg, phase_high);
984 void irp_finalize_cons(void)
987 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
988 irg_finalize_cons(get_irp_irg(i));
990 irp->phase_state = phase_high;
993 ir_node *new_Start(void)
995 return new_d_Start(NULL);
997 ir_node *new_End(void)
999 return new_d_End(NULL);
1001 ir_node *new_Const(tarval *con)
1003 return new_d_Const(NULL, con);
1006 ir_node *new_Const_long(ir_mode *mode, long value)
1008 return new_d_Const_long(NULL, mode, value);
1011 ir_node *new_Const_type(tarval *con, ir_type *tp)
1013 return new_d_Const_type(NULL, con, tp);
1016 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type)
1018 return new_d_SymConst_type(NULL, mode, value, kind, type);
1020 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
1022 return new_d_SymConst(NULL, mode, value, kind);
1024 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
1026 return new_d_simpleSel(NULL, store, objptr, ent);
1028 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode)
1030 return new_d_Phi(NULL, arity, in, mode);
1032 ir_node *new_Sync(int arity, ir_node *in[])
1034 return new_d_Sync(NULL, arity, in);
1036 ir_node *new_defaultProj(ir_node *arg, long max_proj)
1038 return new_d_defaultProj(NULL, arg, max_proj);
1040 ir_node *new_Bad(void)
1042 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1043 return get_irg_bad(current_ir_graph);
1045 ir_node *new_NoMem(void)
1047 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1048 return get_irg_no_mem(current_ir_graph);
1050 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1051 int n_outs, ir_asm_constraint *outputs,
1052 int n_clobber, ident *clobber[], ident *text)
1054 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
1057 ir_node *new_r_Anchor(ir_graph *irg)
1059 ir_node *in[anchor_last];
1061 memset(in, 0, sizeof(in));
1062 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);
1064 /* hack to get get_irn_irg working: set block to ourself and allow
1065 * get_Block_irg for anchor */
1066 res->attr.irg.irg = irg;