2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
47 #include "gen_ir_cons.c.inl"
50 * Language dependent variable initialization callback.
52 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
54 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
57 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
60 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
65 arg->attr.cond.default_proj = max_proj;
66 res = new_rd_Proj(db, arg, mode_X, max_proj);
70 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
71 ir_asm_constraint *inputs, int n_outs,
72 ir_asm_constraint *outputs, int n_clobber,
73 ident *clobber[], ident *text)
75 ir_graph *irg = get_irn_irg(block);
76 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
78 res->attr.assem.pin_state = op_pin_state_pinned;
79 res->attr.assem.input_constraints
80 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
81 res->attr.assem.output_constraints
82 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
83 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
84 res->attr.assem.text = text;
86 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
87 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
88 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
90 res = optimize_node(res);
91 irn_verify_irg(res, irg);
95 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
96 ir_node *objptr, ir_entity *ent)
98 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
101 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
102 symconst_symbol value, symconst_kind symkind)
104 ir_node *block = get_irg_start_block(irg);
105 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
106 res->attr.symc.kind = symkind;
107 res->attr.symc.sym = value;
109 res = optimize_node(res);
110 irn_verify_irg(res, irg);
114 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
117 sym.entity_p = symbol;
118 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
121 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
124 sym.entity_p = symbol;
125 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
128 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
132 return new_rd_SymConst(db, irg, mode, sym, symconst_type_tag);
135 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
139 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
142 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
146 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
149 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
151 return new_rd_Const_long(NULL, irg, mode, value);
153 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
154 symconst_kind symkind)
156 return new_rd_SymConst(NULL, irg, mode, value, symkind);
158 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
161 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
163 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
165 return new_rd_defaultProj(NULL, arg, max_proj);
167 ir_node *new_r_ASM(ir_node *block,
168 int arity, ir_node *in[], ir_asm_constraint *inputs,
169 int n_outs, ir_asm_constraint *outputs,
170 int n_clobber, ident *clobber[], ident *text)
172 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
175 /** Creates a Phi node with 0 predecessors. */
176 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
177 ir_mode *mode, int pos)
179 ir_graph *irg = get_irn_irg(block);
180 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
181 res->attr.phi.u.pos = pos;
182 irn_verify_irg(res, irg);
186 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
188 static void try_remove_unnecessary_phi(ir_node *phi)
190 ir_node *phi_value = NULL;
191 int arity = get_irn_arity(phi);
194 /* see if all inputs are either pointing to a single value or
195 * are self references */
196 for (i = 0; i < arity; ++i) {
197 ir_node *in = get_irn_n(phi, i);
202 /** found a different value from the one we already found, can't remove
204 if (phi_value != NULL)
208 if (phi_value == NULL)
211 /* if we're here then all phi inputs have been either phi_value
212 * or self-references, we can replace the phi by phi_value.
213 * We do this with an Id-node */
214 exchange(phi, phi_value);
216 /* recursively check phi_value, because it could be that we were the last
217 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
218 * the loop header which can be eliminated now */
219 if (is_Phi(phi_value)) {
220 try_remove_unnecessary_phi(phi_value);
225 * Computes the predecessors for the real phi node, and then
226 * allocates and returns this node. The routine called to allocate the
227 * node might optimize it away and return a real value.
228 * This function must be called with an in-array of proper size.
230 static ir_node *set_phi_arguments(ir_node *phi, int pos)
232 ir_node *block = get_nodes_block(phi);
233 ir_graph *irg = get_irn_irg(block);
234 int arity = get_irn_arity(block);
235 ir_node **in = ALLOCAN(ir_node*, arity);
236 ir_mode *mode = get_irn_mode(phi);
239 /* This loop goes to all predecessor blocks of the block the Phi node
240 is in and there finds the operands of the Phi node by calling
241 get_r_value_internal. */
242 for (i = 0; i < arity; ++i) {
243 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
245 if (is_Bad(cfgpred)) {
246 value = new_r_Bad(irg);
248 inc_irg_visited(irg);
250 value = get_r_value_internal(cfgpred, pos, mode);
255 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
256 set_irn_in(phi, arity, in);
257 set_irn_op(phi, op_Phi);
259 irn_verify_irg(phi, irg);
261 /* Memory Phis in endless loops must be kept alive.
262 As we can't distinguish these easily we keep all of them alive. */
263 if (is_Phi(phi) && mode == mode_M)
264 add_End_keepalive(get_irg_end(irg), phi);
266 try_remove_unnecessary_phi(phi);
271 * This function returns the last definition of a value. In case
272 * this value was last defined in a previous block, Phi nodes are
273 * inserted. If the part of the firm graph containing the definition
274 * is not yet constructed, a dummy Phi node is returned.
276 * @param block the current block
277 * @param pos the value number of the value searched
278 * @param mode the mode of this value (needed for Phi construction)
280 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
282 ir_node *res = block->attr.block.graph_arr[pos];
283 ir_graph *irg = get_irn_irg(block);
287 /* We ran into a cycle. This may happen in unreachable loops. */
288 if (irn_visited(block)) {
289 /* Since the loop is unreachable, return a Bad. */
290 return new_r_Bad(irg);
293 mark_irn_visited(block);
295 /* in a matured block we can immediately determine the phi arguments */
296 if (get_Block_matured(block)) {
297 int arity = get_irn_arity(block);
298 /* no predecessors: use unknown value */
299 if (arity == 0 && block == get_irg_start_block(get_irn_irg(block))) {
300 ir_graph *irg = get_irn_irg(block);
301 if (default_initialize_local_variable != NULL) {
302 ir_node *rem = get_r_cur_block(irg);
303 set_r_cur_block(irg, block);
304 res = default_initialize_local_variable(irg, mode, pos - 1);
305 set_r_cur_block(irg, rem);
307 res = new_r_Unknown(irg, mode);
309 /* one predecessor just use its value */
310 } else if (arity == 1) {
311 ir_node *cfgpred = get_Block_cfgpred_block(block, 0);
312 if (is_Bad(cfgpred)) {
315 res = get_r_value_internal(cfgpred, pos, mode);
317 /* multiple predecessors construct Phi */
319 res = new_rd_Phi0(NULL, block, mode, pos);
320 /* enter phi0 into our variable value table to break cycles
321 * arising from set_phi_arguments */
322 block->attr.block.graph_arr[pos] = res;
323 res = set_phi_arguments(res, pos);
326 /* in case of immature block we have to keep a Phi0 */
327 res = new_rd_Phi0(NULL, block, mode, pos);
328 /* enqueue phi so we can set arguments once the block matures */
329 res->attr.phi.next = block->attr.block.phis;
330 block->attr.block.phis = res;
332 block->attr.block.graph_arr[pos] = res;
336 /* ************************************************************************** */
339 * Finalize a Block node, when all control flows are known.
340 * Acceptable parameters are only Block nodes.
342 void mature_immBlock(ir_node *block)
349 assert(is_Block(block));
350 if (get_Block_matured(block))
353 irg = get_irn_irg(block);
354 n_preds = ARR_LEN(block->in) - 1;
355 /* Fix block parameters */
356 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
358 /* Traverse a chain of Phi nodes attached to this block and mature
360 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
362 int pos = phi->attr.phi.u.pos;
364 next = phi->attr.phi.next;
365 new_value = set_phi_arguments(phi, pos);
366 if (block->attr.block.graph_arr[pos] == phi) {
367 block->attr.block.graph_arr[pos] = new_value;
371 set_Block_matured(block, 1);
373 /* Now, as the block is a finished Firm node, we can optimize it.
374 Since other nodes have been allocated since the block was created
375 we can not free the node on the obstack. Therefore we have to call
377 Unfortunately the optimization does not change a lot, as all allocated
378 nodes refer to the unoptimized node.
379 We can call optimize_in_place_2(), as global cse has no effect on blocks.
381 block = optimize_in_place_2(block);
382 irn_verify_irg(block, irg);
385 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
387 assert(get_irg_phase_state(current_ir_graph) == phase_building);
388 return new_rd_Const_long(db, current_ir_graph, mode, value);
391 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
394 assert(is_Cond(arg) || is_Bad(arg));
395 assert(get_irg_phase_state(current_ir_graph) == phase_building);
397 arg->attr.cond.default_proj = max_proj;
398 res = new_d_Proj(db, arg, mode_X, max_proj);
402 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
405 assert(get_irg_phase_state(current_ir_graph) == phase_building);
406 return new_rd_Sel(db, current_ir_graph->current_block,
407 store, objptr, 0, NULL, ent);
410 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
413 assert(get_irg_phase_state(current_ir_graph) == phase_building);
414 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
417 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
418 ir_asm_constraint *inputs,
419 int n_outs, ir_asm_constraint *outputs, int n_clobber,
420 ident *clobber[], ident *text)
422 assert(get_irg_phase_state(current_ir_graph) == phase_building);
423 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
424 n_outs, outputs, n_clobber, clobber, text);
427 ir_node *new_rd_strictConv(dbg_info *dbgi, ir_node *block, ir_node * irn_op, ir_mode * mode)
430 ir_graph *irg = get_Block_irg(block);
435 res = new_ir_node(dbgi, irg, block, op_Conv, mode, 1, in);
436 res->attr.conv.strict = 1;
437 res = optimize_node(res);
438 irn_verify_irg(res, irg);
442 ir_node *new_r_strictConv(ir_node *block, ir_node * irn_op, ir_mode * mode)
444 return new_rd_strictConv(NULL, block, irn_op, mode);
447 ir_node *new_d_strictConv(dbg_info *dbgi, ir_node * irn_op, ir_mode * mode)
450 assert(get_irg_phase_state(current_ir_graph) == phase_building);
451 res = new_rd_strictConv(dbgi, current_ir_graph->current_block, irn_op, mode);
455 ir_node *new_strictConv(ir_node * irn_op, ir_mode * mode)
457 return new_d_strictConv(NULL, irn_op, mode);
460 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
463 ir_graph *irg = get_Block_irg(block);
470 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
471 res->attr.div.resmode = resmode;
472 res->attr.div.no_remainder = 1;
473 res->attr.div.exc.pin_state = pin_state;
474 res = optimize_node(res);
475 irn_verify_irg(res, irg);
479 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
481 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
484 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
487 assert(get_irg_phase_state(current_ir_graph) == phase_building);
488 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
492 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
494 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
497 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
501 assert(get_irg_phase_state(irg) == phase_building);
502 /* creates a new dynamic in-array as length of in is -1 */
503 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
505 set_Block_matured(res, 0);
506 res->attr.block.is_dead = 0;
507 res->attr.block.irg.irg = irg;
508 res->attr.block.backedge = NULL;
509 res->attr.block.in_cg = NULL;
510 res->attr.block.cg_backedge = NULL;
511 res->attr.block.extblk = NULL;
512 res->attr.block.region = NULL;
513 res->attr.block.entity = NULL;
515 set_Block_block_visited(res, 0);
517 /* Create and initialize array for Phi-node construction. */
518 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
519 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
521 /* Immature block may not be optimized! */
522 irn_verify_irg(res, irg);
527 ir_node *new_r_immBlock(ir_graph *irg)
529 return new_rd_immBlock(NULL, irg);
532 ir_node *new_d_immBlock(dbg_info *dbgi)
534 return new_rd_immBlock(dbgi, current_ir_graph);
537 ir_node *new_immBlock(void)
539 return new_rd_immBlock(NULL, current_ir_graph);
542 void add_immBlock_pred(ir_node *block, ir_node *jmp)
544 int n = ARR_LEN(block->in) - 1;
546 assert(is_Block(block) && "Error: Must be a Block");
547 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
548 assert(is_ir_node(jmp));
550 ARR_APP1(ir_node *, block->in, jmp);
552 hook_set_irn_n(block, n, jmp, NULL);
555 void set_cur_block(ir_node *target)
557 assert(target == NULL || current_ir_graph == get_irn_irg(target));
558 current_ir_graph->current_block = target;
561 void set_r_cur_block(ir_graph *irg, ir_node *target)
563 assert(target == NULL || irg == get_irn_irg(target));
564 irg->current_block = target;
567 ir_node *get_r_cur_block(ir_graph *irg)
569 return irg->current_block;
572 ir_node *get_cur_block(void)
574 return get_r_cur_block(current_ir_graph);
577 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
579 assert(get_irg_phase_state(irg) == phase_building);
581 inc_irg_visited(irg);
583 return get_r_value_internal(irg->current_block, pos + 1, mode);
586 ir_node *get_value(int pos, ir_mode *mode)
588 return get_r_value(current_ir_graph, pos, mode);
592 * helper function for guess_mode: recursively look for a definition for
593 * local variable @p pos, returns its mode if found.
595 static ir_mode *guess_recursively(ir_node *block, int pos)
601 if (irn_visited(block))
603 mark_irn_visited(block);
605 /* already have a defintion -> we can simply look at its mode */
606 value = block->attr.block.graph_arr[pos];
608 return get_irn_mode(value);
610 /* now we try to guess, by looking at the predecessor blocks */
611 n_preds = get_irn_arity(block);
612 for (i = 0; i < n_preds; ++i) {
613 ir_node *pred_block = get_Block_cfgpred_block(block, i);
614 ir_mode *mode = guess_recursively(pred_block, pos);
619 /* no way to guess */
623 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
625 ir_node *block = irg->current_block;
626 ir_node *value = block->attr.block.graph_arr[pos+1];
629 /* already have a defintion -> we can simply look at its mode */
631 return get_irn_mode(value);
633 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
634 inc_irg_visited(irg);
635 mode = guess_recursively(block, pos+1);
636 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
641 ir_mode *ir_guess_mode(int pos)
643 return ir_r_guess_mode(current_ir_graph, pos);
646 void set_r_value(ir_graph *irg, int pos, ir_node *value)
648 assert(get_irg_phase_state(irg) == phase_building);
650 assert(pos+1 < irg->n_loc);
651 assert(is_ir_node(value));
652 irg->current_block->attr.block.graph_arr[pos + 1] = value;
655 void set_value(int pos, ir_node *value)
657 set_r_value(current_ir_graph, pos, value);
660 int r_find_value(ir_graph *irg, ir_node *value)
663 ir_node *bl = irg->current_block;
665 for (i = ARR_LEN(bl->attr.block.graph_arr); i > 1;) {
666 if (bl->attr.block.graph_arr[--i] == value)
672 int find_value(ir_node *value)
674 return r_find_value(current_ir_graph, value);
677 ir_node *get_r_store(ir_graph *irg)
679 assert(get_irg_phase_state(irg) == phase_building);
680 inc_irg_visited(irg);
681 return get_r_value_internal(irg->current_block, 0, mode_M);
684 ir_node *get_store(void)
686 return get_r_store(current_ir_graph);
689 void set_r_store(ir_graph *irg, ir_node *store)
691 ir_node *load, *pload, *pred, *in[2];
693 assert(get_irg_phase_state(irg) == phase_building);
694 /* Beware: due to dead code elimination, a store might become a Bad node even in
695 the construction phase. */
696 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
698 if (get_opt_auto_create_sync()) {
699 /* handle non-volatile Load nodes by automatically creating Sync's */
700 load = skip_Proj(store);
701 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
702 pred = get_Load_mem(load);
705 /* a Load after a Sync: move it up */
706 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
708 set_Load_mem(load, get_memop_mem(mem));
709 add_Sync_pred(pred, store);
712 pload = skip_Proj(pred);
713 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
714 /* a Load after a Load: create a new Sync */
715 set_Load_mem(load, get_Load_mem(pload));
719 store = new_r_Sync(irg->current_block, 2, in);
724 irg->current_block->attr.block.graph_arr[0] = store;
727 void set_store(ir_node *store)
729 set_r_store(current_ir_graph, store);
732 void keep_alive(ir_node *ka)
734 ir_graph *irg = get_irn_irg(ka);
735 add_End_keepalive(get_irg_end(irg), ka);
738 void ir_set_uninitialized_local_variable_func(
739 uninitialized_local_variable_func_t *func)
741 default_initialize_local_variable = func;
744 void irg_finalize_cons(ir_graph *irg)
746 set_irg_phase_state(irg, phase_high);
749 void irp_finalize_cons(void)
752 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
753 irg_finalize_cons(get_irp_irg(i));
755 irp->phase_state = phase_high;
758 ir_node *new_Const_long(ir_mode *mode, long value)
760 return new_d_Const_long(NULL, mode, value);
763 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
765 return new_d_SymConst(NULL, mode, value, kind);
767 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
769 return new_d_simpleSel(NULL, store, objptr, ent);
771 ir_node *new_defaultProj(ir_node *arg, long max_proj)
773 return new_d_defaultProj(NULL, arg, max_proj);
775 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
776 int n_outs, ir_asm_constraint *outputs,
777 int n_clobber, ident *clobber[], ident *text)
779 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
782 ir_node *new_r_Anchor(ir_graph *irg)
784 ir_node *in[anchor_last];
786 memset(in, 0, sizeof(in));
787 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);
788 res->attr.anchor.irg.irg = irg;
790 /* hack to get get_irn_irg working: set block to ourself and allow
791 * get_Block_irg for anchor */
797 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
799 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
800 res->attr.block.irg.irg = irg;
801 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
802 set_Block_matured(res, 1);
803 /* Create and initialize array for Phi-node construction. */
804 if (get_irg_phase_state(irg) == phase_building) {
805 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
806 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
808 irn_verify_irg(res, irg);