2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
64 arg->attr.cond.default_proj = max_proj;
65 res = new_rd_Proj(db, arg, mode_X, max_proj);
69 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
70 ir_asm_constraint *inputs, size_t n_outs,
71 ir_asm_constraint *outputs, size_t n_clobber,
72 ident *clobber[], ident *text)
74 ir_graph *irg = get_irn_irg(block);
75 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
77 res->attr.assem.pin_state = op_pin_state_pinned;
78 res->attr.assem.input_constraints
79 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
80 res->attr.assem.output_constraints
81 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
82 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
83 res->attr.assem.text = text;
85 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
86 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
87 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
89 irn_verify_irg(res, irg);
90 res = optimize_node(res);
94 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
95 ir_node *objptr, ir_entity *ent)
97 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
100 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
101 symconst_symbol value, symconst_kind symkind)
103 ir_node *block = get_irg_start_block(irg);
104 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
105 res->attr.symc.kind = symkind;
106 res->attr.symc.sym = value;
108 irn_verify_irg(res, irg);
109 res = optimize_node(res);
113 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
116 sym.entity_p = symbol;
117 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
120 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
123 sym.entity_p = symbol;
124 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
127 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
131 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
134 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
138 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
141 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
143 return new_rd_Const_long(NULL, irg, mode, value);
145 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
146 symconst_kind symkind)
148 return new_rd_SymConst(NULL, irg, mode, value, symkind);
150 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
153 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
155 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
157 return new_rd_defaultProj(NULL, arg, max_proj);
159 ir_node *new_r_ASM(ir_node *block,
160 int arity, ir_node *in[], ir_asm_constraint *inputs,
161 size_t n_outs, ir_asm_constraint *outputs,
162 size_t n_clobber, ident *clobber[], ident *text)
164 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
167 /** Creates a Phi node with 0 predecessors. */
168 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
169 ir_mode *mode, int pos)
171 ir_graph *irg = get_irn_irg(block);
172 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
173 res->attr.phi.u.pos = pos;
174 irn_verify_irg(res, irg);
178 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
180 static void try_remove_unnecessary_phi(ir_node *phi)
182 ir_node *phi_value = NULL;
183 int arity = get_irn_arity(phi);
186 /* see if all inputs are either pointing to a single value or
187 * are self references */
188 for (i = 0; i < arity; ++i) {
189 ir_node *in = get_irn_n(phi, i);
194 /** found a different value from the one we already found, can't remove
196 if (phi_value != NULL)
200 if (phi_value == NULL)
203 /* if we're here then all phi inputs have been either phi_value
204 * or self-references, we can replace the phi by phi_value.
205 * We do this with an Id-node */
206 exchange(phi, phi_value);
208 /* recursively check phi_value, because it could be that we were the last
209 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
210 * the loop header which can be eliminated now */
211 if (is_Phi(phi_value)) {
212 try_remove_unnecessary_phi(phi_value);
217 * Computes the predecessors for the real phi node, and then
218 * allocates and returns this node. The routine called to allocate the
219 * node might optimize it away and return a real value.
220 * This function must be called with an in-array of proper size.
222 static ir_node *set_phi_arguments(ir_node *phi, int pos)
224 ir_node *block = get_nodes_block(phi);
225 ir_graph *irg = get_irn_irg(block);
226 int arity = get_irn_arity(block);
227 ir_node **in = ALLOCAN(ir_node*, arity);
228 ir_mode *mode = get_irn_mode(phi);
231 /* This loop goes to all predecessor blocks of the block the Phi node
232 is in and there finds the operands of the Phi node by calling
233 get_r_value_internal. */
234 for (i = 0; i < arity; ++i) {
235 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
237 if (is_Bad(cfgpred)) {
238 value = new_r_Bad(irg, mode);
240 inc_irg_visited(irg);
242 value = get_r_value_internal(cfgpred, pos, mode);
247 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
248 set_irn_in(phi, arity, in);
249 set_irn_op(phi, op_Phi);
251 irn_verify_irg(phi, irg);
253 /* Memory Phis in endless loops must be kept alive.
254 As we can't distinguish these easily we keep all of them alive. */
255 if (is_Phi(phi) && mode == mode_M)
256 add_End_keepalive(get_irg_end(irg), phi);
258 try_remove_unnecessary_phi(phi);
263 * This function returns the last definition of a value. In case
264 * this value was last defined in a previous block, Phi nodes are
265 * inserted. If the part of the firm graph containing the definition
266 * is not yet constructed, a dummy Phi node is returned.
268 * @param block the current block
269 * @param pos the value number of the value searched
270 * @param mode the mode of this value (needed for Phi construction)
272 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
274 ir_node *res = block->attr.block.graph_arr[pos];
275 ir_graph *irg = get_irn_irg(block);
279 /* We ran into a cycle. This may happen in unreachable loops. */
280 if (irn_visited_else_mark(block)) {
281 /* Since the loop is unreachable, return a Bad. */
282 return new_r_Bad(irg, mode);
285 /* in a matured block we can immediately determine the phi arguments */
286 if (get_Block_matured(block)) {
287 int arity = get_irn_arity(block);
288 /* no predecessors: use unknown value */
289 if (arity == 0 && block == get_irg_start_block(get_irn_irg(block))) {
290 if (default_initialize_local_variable != NULL) {
291 ir_node *rem = get_r_cur_block(irg);
292 set_r_cur_block(irg, block);
293 res = default_initialize_local_variable(irg, mode, pos - 1);
294 set_r_cur_block(irg, rem);
296 res = new_r_Unknown(irg, mode);
298 /* one predecessor just use its value */
299 } else if (arity == 1) {
300 ir_node *cfgpred = get_Block_cfgpred(block, 0);
301 if (is_Bad(cfgpred)) {
302 res = new_r_Bad(irg, mode);
304 ir_node *cfgpred_block = get_nodes_block(cfgpred);
305 res = get_r_value_internal(cfgpred_block, pos, mode);
307 /* multiple predecessors construct Phi */
309 res = new_rd_Phi0(NULL, block, mode, pos);
310 /* enter phi0 into our variable value table to break cycles
311 * arising from set_phi_arguments */
312 block->attr.block.graph_arr[pos] = res;
313 res = set_phi_arguments(res, pos);
316 /* in case of immature block we have to keep a Phi0 */
317 res = new_rd_Phi0(NULL, block, mode, pos);
318 /* enqueue phi so we can set arguments once the block matures */
319 res->attr.phi.next = block->attr.block.phis;
320 block->attr.block.phis = res;
322 block->attr.block.graph_arr[pos] = res;
326 /* ************************************************************************** */
329 * Finalize a Block node, when all control flows are known.
330 * Acceptable parameters are only Block nodes.
332 void mature_immBlock(ir_node *block)
339 assert(is_Block(block));
340 if (get_Block_matured(block))
343 irg = get_irn_irg(block);
344 n_preds = ARR_LEN(block->in) - 1;
345 /* Fix block parameters */
346 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
348 /* Traverse a chain of Phi nodes attached to this block and mature
350 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
352 int pos = phi->attr.phi.u.pos;
354 next = phi->attr.phi.next;
355 new_value = set_phi_arguments(phi, pos);
356 if (block->attr.block.graph_arr[pos] == phi) {
357 block->attr.block.graph_arr[pos] = new_value;
361 set_Block_matured(block, 1);
363 /* Now, as the block is a finished Firm node, we can optimize it.
364 Since other nodes have been allocated since the block was created
365 we can not free the node on the obstack. Therefore we have to call
367 Unfortunately the optimization does not change a lot, as all allocated
368 nodes refer to the unoptimized node.
369 We can call optimize_in_place_2(), as global cse has no effect on blocks.
371 irn_verify_irg(block, irg);
372 block = optimize_in_place_2(block);
375 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
377 assert(get_irg_phase_state(current_ir_graph) == phase_building);
378 return new_rd_Const_long(db, current_ir_graph, mode, value);
381 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
384 assert(is_Cond(arg) || is_Bad(arg));
385 assert(get_irg_phase_state(current_ir_graph) == phase_building);
387 arg->attr.cond.default_proj = max_proj;
388 res = new_d_Proj(db, arg, mode_X, max_proj);
392 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
395 assert(get_irg_phase_state(current_ir_graph) == phase_building);
396 return new_rd_Sel(db, current_ir_graph->current_block,
397 store, objptr, 0, NULL, ent);
400 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
403 assert(get_irg_phase_state(current_ir_graph) == phase_building);
404 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
407 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
408 ir_asm_constraint *inputs,
409 size_t n_outs, ir_asm_constraint *outputs,
410 size_t n_clobber, ident *clobber[], ident *text)
412 assert(get_irg_phase_state(current_ir_graph) == phase_building);
413 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
414 n_outs, outputs, n_clobber, clobber, text);
417 ir_node *new_rd_strictConv(dbg_info *dbgi, ir_node *block, ir_node * irn_op, ir_mode * mode)
420 ir_graph *irg = get_Block_irg(block);
425 res = new_ir_node(dbgi, irg, block, op_Conv, mode, 1, in);
426 res->attr.conv.strict = 1;
427 irn_verify_irg(res, irg);
428 res = optimize_node(res);
432 ir_node *new_r_strictConv(ir_node *block, ir_node * irn_op, ir_mode * mode)
434 return new_rd_strictConv(NULL, block, irn_op, mode);
437 ir_node *new_d_strictConv(dbg_info *dbgi, ir_node * irn_op, ir_mode * mode)
440 assert(get_irg_phase_state(current_ir_graph) == phase_building);
441 res = new_rd_strictConv(dbgi, current_ir_graph->current_block, irn_op, mode);
445 ir_node *new_strictConv(ir_node * irn_op, ir_mode * mode)
447 return new_d_strictConv(NULL, irn_op, mode);
450 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
453 ir_graph *irg = get_Block_irg(block);
460 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
461 res->attr.div.resmode = resmode;
462 res->attr.div.no_remainder = 1;
463 res->attr.div.exc.pin_state = pin_state;
464 irn_verify_irg(res, irg);
465 res = optimize_node(res);
469 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
471 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
474 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
477 assert(get_irg_phase_state(current_ir_graph) == phase_building);
478 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
482 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
484 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
487 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
491 assert(get_irg_phase_state(irg) == phase_building);
492 /* creates a new dynamic in-array as length of in is -1 */
493 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
495 set_Block_matured(res, 0);
496 res->attr.block.irg.irg = irg;
497 res->attr.block.backedge = NULL;
498 res->attr.block.in_cg = NULL;
499 res->attr.block.cg_backedge = NULL;
500 res->attr.block.extblk = NULL;
501 res->attr.block.entity = NULL;
503 set_Block_block_visited(res, 0);
505 /* Create and initialize array for Phi-node construction. */
506 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
507 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
509 /* Immature block may not be optimized! */
510 irn_verify_irg(res, irg);
515 ir_node *new_r_immBlock(ir_graph *irg)
517 return new_rd_immBlock(NULL, irg);
520 ir_node *new_d_immBlock(dbg_info *dbgi)
522 return new_rd_immBlock(dbgi, current_ir_graph);
525 ir_node *new_immBlock(void)
527 return new_rd_immBlock(NULL, current_ir_graph);
530 void add_immBlock_pred(ir_node *block, ir_node *jmp)
532 int n = ARR_LEN(block->in) - 1;
534 assert(is_Block(block) && "Error: Must be a Block");
535 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
536 assert(is_ir_node(jmp));
538 ARR_APP1(ir_node *, block->in, jmp);
540 hook_set_irn_n(block, n, jmp, NULL);
543 void set_cur_block(ir_node *target)
545 set_r_cur_block(current_ir_graph, target);
548 void set_r_cur_block(ir_graph *irg, ir_node *target)
550 assert(target == NULL || get_irn_mode(target) == mode_BB);
551 assert(target == NULL || get_irn_irg(target) == irg);
552 irg->current_block = target;
555 ir_node *get_r_cur_block(ir_graph *irg)
557 return irg->current_block;
560 ir_node *get_cur_block(void)
562 return get_r_cur_block(current_ir_graph);
565 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
567 assert(get_irg_phase_state(irg) == phase_building);
569 inc_irg_visited(irg);
571 return get_r_value_internal(irg->current_block, pos + 1, mode);
574 ir_node *get_value(int pos, ir_mode *mode)
576 return get_r_value(current_ir_graph, pos, mode);
580 * helper function for guess_mode: recursively look for a definition for
581 * local variable @p pos, returns its mode if found.
583 static ir_mode *guess_recursively(ir_node *block, int pos)
589 if (irn_visited_else_mark(block))
592 /* already have a defintion -> we can simply look at its mode */
593 value = block->attr.block.graph_arr[pos];
595 return get_irn_mode(value);
597 /* now we try to guess, by looking at the predecessor blocks */
598 n_preds = get_irn_arity(block);
599 for (i = 0; i < n_preds; ++i) {
600 ir_node *pred_block = get_Block_cfgpred_block(block, i);
601 ir_mode *mode = guess_recursively(pred_block, pos);
606 /* no way to guess */
610 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
612 ir_node *block = irg->current_block;
613 ir_node *value = block->attr.block.graph_arr[pos+1];
616 /* already have a defintion -> we can simply look at its mode */
618 return get_irn_mode(value);
620 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
621 inc_irg_visited(irg);
622 mode = guess_recursively(block, pos+1);
623 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
628 ir_mode *ir_guess_mode(int pos)
630 return ir_r_guess_mode(current_ir_graph, pos);
633 void set_r_value(ir_graph *irg, int pos, ir_node *value)
635 assert(get_irg_phase_state(irg) == phase_building);
637 assert(pos+1 < irg->n_loc);
638 assert(is_ir_node(value));
639 irg->current_block->attr.block.graph_arr[pos + 1] = value;
642 void set_value(int pos, ir_node *value)
644 set_r_value(current_ir_graph, pos, value);
647 int r_find_value(ir_graph *irg, ir_node *value)
650 ir_node *bl = irg->current_block;
652 for (i = ARR_LEN(bl->attr.block.graph_arr); i > 1;) {
653 if (bl->attr.block.graph_arr[--i] == value)
659 int find_value(ir_node *value)
661 return r_find_value(current_ir_graph, value);
664 ir_node *get_r_store(ir_graph *irg)
666 assert(get_irg_phase_state(irg) == phase_building);
667 inc_irg_visited(irg);
668 return get_r_value_internal(irg->current_block, 0, mode_M);
671 ir_node *get_store(void)
673 return get_r_store(current_ir_graph);
676 void set_r_store(ir_graph *irg, ir_node *store)
678 ir_node *load, *pload, *pred, *in[2];
680 assert(get_irg_phase_state(irg) == phase_building);
681 /* Beware: due to dead code elimination, a store might become a Bad node even in
682 the construction phase. */
683 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
685 if (get_opt_auto_create_sync()) {
686 /* handle non-volatile Load nodes by automatically creating Sync's */
687 load = skip_Proj(store);
688 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
689 pred = get_Load_mem(load);
692 /* a Load after a Sync: move it up */
693 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
695 set_Load_mem(load, get_memop_mem(mem));
696 add_Sync_pred(pred, store);
699 pload = skip_Proj(pred);
700 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
701 /* a Load after a Load: create a new Sync */
702 set_Load_mem(load, get_Load_mem(pload));
706 store = new_r_Sync(irg->current_block, 2, in);
711 irg->current_block->attr.block.graph_arr[0] = store;
714 void set_store(ir_node *store)
716 set_r_store(current_ir_graph, store);
719 void keep_alive(ir_node *ka)
721 ir_graph *irg = get_irn_irg(ka);
722 add_End_keepalive(get_irg_end(irg), ka);
725 void ir_set_uninitialized_local_variable_func(
726 uninitialized_local_variable_func_t *func)
728 default_initialize_local_variable = func;
731 void irg_finalize_cons(ir_graph *irg)
733 set_irg_phase_state(irg, phase_high);
736 void irp_finalize_cons(void)
739 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
740 irg_finalize_cons(get_irp_irg(i));
742 irp->phase_state = phase_high;
745 ir_node *new_Const_long(ir_mode *mode, long value)
747 return new_d_Const_long(NULL, mode, value);
750 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
752 return new_d_SymConst(NULL, mode, value, kind);
754 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
756 return new_d_simpleSel(NULL, store, objptr, ent);
758 ir_node *new_defaultProj(ir_node *arg, long max_proj)
760 return new_d_defaultProj(NULL, arg, max_proj);
762 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
763 size_t n_outs, ir_asm_constraint *outputs,
764 size_t n_clobber, ident *clobber[], ident *text)
766 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
769 ir_node *new_r_Anchor(ir_graph *irg)
771 ir_node *in[anchor_last+1];
774 memset(in, 0, sizeof(in));
775 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last+1, in);
776 res->attr.anchor.irg.irg = irg;
778 /* hack to get get_irn_irg working: set block to ourself and allow
779 * get_Block_irg for anchor */
782 /* we can't have NULL inputs so reference ourselfes for now */
783 for (i = 0; i <= (size_t)anchor_last; ++i) {
784 set_irn_n(res, i, res);
790 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
792 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
793 res->attr.block.irg.irg = irg;
794 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
795 set_Block_matured(res, 1);
796 /* Create and initialize array for Phi-node construction. */
797 if (get_irg_phase_state(irg) == phase_building) {
798 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
799 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
801 irn_verify_irg(res, irg);