2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
47 #include "gen_ir_cons.c.inl"
50 * Language dependent variable initialization callback.
52 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
54 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
57 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
60 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
65 arg->attr.cond.default_proj = max_proj;
66 res = new_rd_Proj(db, arg, mode_X, max_proj);
70 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
71 ir_asm_constraint *inputs, int n_outs,
72 ir_asm_constraint *outputs, int n_clobber,
73 ident *clobber[], ident *text)
75 ir_graph *irg = get_irn_irg(block);
76 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
78 res->attr.assem.pin_state = op_pin_state_pinned;
79 res->attr.assem.input_constraints
80 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
81 res->attr.assem.output_constraints
82 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
83 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
84 res->attr.assem.text = text;
86 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
87 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
88 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
90 irn_verify_irg(res, irg);
91 res = optimize_node(res);
95 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
96 ir_node *objptr, ir_entity *ent)
98 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
101 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
102 symconst_symbol value, symconst_kind symkind)
104 ir_node *block = get_irg_start_block(irg);
105 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
106 res->attr.symc.kind = symkind;
107 res->attr.symc.sym = value;
109 irn_verify_irg(res, irg);
110 res = optimize_node(res);
114 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
117 sym.entity_p = symbol;
118 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
121 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
124 sym.entity_p = symbol;
125 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
128 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
132 return new_rd_SymConst(db, irg, mode, sym, symconst_type_tag);
135 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
139 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
142 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
146 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
149 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
151 return new_rd_Const_long(NULL, irg, mode, value);
153 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
154 symconst_kind symkind)
156 return new_rd_SymConst(NULL, irg, mode, value, symkind);
158 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
161 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
163 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
165 return new_rd_defaultProj(NULL, arg, max_proj);
167 ir_node *new_r_ASM(ir_node *block,
168 int arity, ir_node *in[], ir_asm_constraint *inputs,
169 int n_outs, ir_asm_constraint *outputs,
170 int n_clobber, ident *clobber[], ident *text)
172 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
175 /** Creates a Phi node with 0 predecessors. */
176 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
177 ir_mode *mode, int pos)
179 ir_graph *irg = get_irn_irg(block);
180 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
181 res->attr.phi.u.pos = pos;
182 irn_verify_irg(res, irg);
186 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
188 static void try_remove_unnecessary_phi(ir_node *phi)
190 ir_node *phi_value = NULL;
191 int arity = get_irn_arity(phi);
194 /* see if all inputs are either pointing to a single value or
195 * are self references */
196 for (i = 0; i < arity; ++i) {
197 ir_node *in = get_irn_n(phi, i);
202 /** found a different value from the one we already found, can't remove
204 if (phi_value != NULL)
208 if (phi_value == NULL)
211 /* if we're here then all phi inputs have been either phi_value
212 * or self-references, we can replace the phi by phi_value.
213 * We do this with an Id-node */
214 exchange(phi, phi_value);
216 /* recursively check phi_value, because it could be that we were the last
217 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
218 * the loop header which can be eliminated now */
219 if (is_Phi(phi_value)) {
220 try_remove_unnecessary_phi(phi_value);
225 * Computes the predecessors for the real phi node, and then
226 * allocates and returns this node. The routine called to allocate the
227 * node might optimize it away and return a real value.
228 * This function must be called with an in-array of proper size.
230 static ir_node *set_phi_arguments(ir_node *phi, int pos)
232 ir_node *block = get_nodes_block(phi);
233 ir_graph *irg = get_irn_irg(block);
234 int arity = get_irn_arity(block);
235 ir_node **in = ALLOCAN(ir_node*, arity);
236 ir_mode *mode = get_irn_mode(phi);
239 /* This loop goes to all predecessor blocks of the block the Phi node
240 is in and there finds the operands of the Phi node by calling
241 get_r_value_internal. */
242 for (i = 0; i < arity; ++i) {
243 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
245 if (is_Bad(cfgpred)) {
246 value = new_r_Bad(irg, mode);
248 inc_irg_visited(irg);
250 value = get_r_value_internal(cfgpred, pos, mode);
255 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
256 set_irn_in(phi, arity, in);
257 set_irn_op(phi, op_Phi);
259 irn_verify_irg(phi, irg);
261 /* Memory Phis in endless loops must be kept alive.
262 As we can't distinguish these easily we keep all of them alive. */
263 if (is_Phi(phi) && mode == mode_M)
264 add_End_keepalive(get_irg_end(irg), phi);
266 try_remove_unnecessary_phi(phi);
271 * This function returns the last definition of a value. In case
272 * this value was last defined in a previous block, Phi nodes are
273 * inserted. If the part of the firm graph containing the definition
274 * is not yet constructed, a dummy Phi node is returned.
276 * @param block the current block
277 * @param pos the value number of the value searched
278 * @param mode the mode of this value (needed for Phi construction)
280 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
282 ir_node *res = block->attr.block.graph_arr[pos];
283 ir_graph *irg = get_irn_irg(block);
287 /* We ran into a cycle. This may happen in unreachable loops. */
288 if (irn_visited_else_mark(block)) {
289 /* Since the loop is unreachable, return a Bad. */
290 return new_r_Bad(irg, mode);
293 /* in a matured block we can immediately determine the phi arguments */
294 if (get_Block_matured(block)) {
295 int arity = get_irn_arity(block);
296 /* no predecessors: use unknown value */
297 if (arity == 0 && block == get_irg_start_block(get_irn_irg(block))) {
298 ir_graph *irg = get_irn_irg(block);
299 if (default_initialize_local_variable != NULL) {
300 ir_node *rem = get_r_cur_block(irg);
301 set_r_cur_block(irg, block);
302 res = default_initialize_local_variable(irg, mode, pos - 1);
303 set_r_cur_block(irg, rem);
305 res = new_r_Unknown(irg, mode);
307 /* one predecessor just use its value */
308 } else if (arity == 1) {
309 ir_node *cfgpred = get_Block_cfgpred(block, 0);
310 if (is_Bad(cfgpred)) {
311 res = new_r_Bad(irg, mode);
313 ir_node *cfgpred_block = get_nodes_block(cfgpred);
314 res = get_r_value_internal(cfgpred_block, pos, mode);
316 /* multiple predecessors construct Phi */
318 res = new_rd_Phi0(NULL, block, mode, pos);
319 /* enter phi0 into our variable value table to break cycles
320 * arising from set_phi_arguments */
321 block->attr.block.graph_arr[pos] = res;
322 res = set_phi_arguments(res, pos);
325 /* in case of immature block we have to keep a Phi0 */
326 res = new_rd_Phi0(NULL, block, mode, pos);
327 /* enqueue phi so we can set arguments once the block matures */
328 res->attr.phi.next = block->attr.block.phis;
329 block->attr.block.phis = res;
331 block->attr.block.graph_arr[pos] = res;
335 /* ************************************************************************** */
338 * Finalize a Block node, when all control flows are known.
339 * Acceptable parameters are only Block nodes.
341 void mature_immBlock(ir_node *block)
348 assert(is_Block(block));
349 if (get_Block_matured(block))
352 irg = get_irn_irg(block);
353 n_preds = ARR_LEN(block->in) - 1;
354 /* Fix block parameters */
355 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
357 /* Traverse a chain of Phi nodes attached to this block and mature
359 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
361 int pos = phi->attr.phi.u.pos;
363 next = phi->attr.phi.next;
364 new_value = set_phi_arguments(phi, pos);
365 if (block->attr.block.graph_arr[pos] == phi) {
366 block->attr.block.graph_arr[pos] = new_value;
370 set_Block_matured(block, 1);
372 /* Now, as the block is a finished Firm node, we can optimize it.
373 Since other nodes have been allocated since the block was created
374 we can not free the node on the obstack. Therefore we have to call
376 Unfortunately the optimization does not change a lot, as all allocated
377 nodes refer to the unoptimized node.
378 We can call optimize_in_place_2(), as global cse has no effect on blocks.
380 irn_verify_irg(block, irg);
381 block = optimize_in_place_2(block);
384 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
386 assert(get_irg_phase_state(current_ir_graph) == phase_building);
387 return new_rd_Const_long(db, current_ir_graph, mode, value);
390 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
393 assert(is_Cond(arg) || is_Bad(arg));
394 assert(get_irg_phase_state(current_ir_graph) == phase_building);
396 arg->attr.cond.default_proj = max_proj;
397 res = new_d_Proj(db, arg, mode_X, max_proj);
401 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
404 assert(get_irg_phase_state(current_ir_graph) == phase_building);
405 return new_rd_Sel(db, current_ir_graph->current_block,
406 store, objptr, 0, NULL, ent);
409 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
412 assert(get_irg_phase_state(current_ir_graph) == phase_building);
413 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
416 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
417 ir_asm_constraint *inputs,
418 int n_outs, ir_asm_constraint *outputs, int n_clobber,
419 ident *clobber[], ident *text)
421 assert(get_irg_phase_state(current_ir_graph) == phase_building);
422 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
423 n_outs, outputs, n_clobber, clobber, text);
426 ir_node *new_rd_strictConv(dbg_info *dbgi, ir_node *block, ir_node * irn_op, ir_mode * mode)
429 ir_graph *irg = get_Block_irg(block);
434 res = new_ir_node(dbgi, irg, block, op_Conv, mode, 1, in);
435 res->attr.conv.strict = 1;
436 irn_verify_irg(res, irg);
437 res = optimize_node(res);
441 ir_node *new_r_strictConv(ir_node *block, ir_node * irn_op, ir_mode * mode)
443 return new_rd_strictConv(NULL, block, irn_op, mode);
446 ir_node *new_d_strictConv(dbg_info *dbgi, ir_node * irn_op, ir_mode * mode)
449 assert(get_irg_phase_state(current_ir_graph) == phase_building);
450 res = new_rd_strictConv(dbgi, current_ir_graph->current_block, irn_op, mode);
454 ir_node *new_strictConv(ir_node * irn_op, ir_mode * mode)
456 return new_d_strictConv(NULL, irn_op, mode);
459 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
462 ir_graph *irg = get_Block_irg(block);
469 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
470 res->attr.div.resmode = resmode;
471 res->attr.div.no_remainder = 1;
472 res->attr.div.exc.pin_state = pin_state;
473 irn_verify_irg(res, irg);
474 res = optimize_node(res);
478 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
480 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
483 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
486 assert(get_irg_phase_state(current_ir_graph) == phase_building);
487 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
491 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
493 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
496 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
500 assert(get_irg_phase_state(irg) == phase_building);
501 /* creates a new dynamic in-array as length of in is -1 */
502 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
504 set_Block_matured(res, 0);
505 res->attr.block.irg.irg = irg;
506 res->attr.block.backedge = NULL;
507 res->attr.block.in_cg = NULL;
508 res->attr.block.cg_backedge = NULL;
509 res->attr.block.extblk = NULL;
510 res->attr.block.region = NULL;
511 res->attr.block.entity = NULL;
513 set_Block_block_visited(res, 0);
515 /* Create and initialize array for Phi-node construction. */
516 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
517 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
519 /* Immature block may not be optimized! */
520 irn_verify_irg(res, irg);
525 ir_node *new_r_immBlock(ir_graph *irg)
527 return new_rd_immBlock(NULL, irg);
530 ir_node *new_d_immBlock(dbg_info *dbgi)
532 return new_rd_immBlock(dbgi, current_ir_graph);
535 ir_node *new_immBlock(void)
537 return new_rd_immBlock(NULL, current_ir_graph);
540 void add_immBlock_pred(ir_node *block, ir_node *jmp)
542 int n = ARR_LEN(block->in) - 1;
544 assert(is_Block(block) && "Error: Must be a Block");
545 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
546 assert(is_ir_node(jmp));
548 ARR_APP1(ir_node *, block->in, jmp);
550 hook_set_irn_n(block, n, jmp, NULL);
553 void set_cur_block(ir_node *target)
555 set_r_cur_block(current_ir_graph, target);
558 void set_r_cur_block(ir_graph *irg, ir_node *target)
560 assert(target == NULL || get_irn_mode(target) == mode_BB);
561 assert(target == NULL || get_irn_irg(target) == irg);
562 irg->current_block = target;
565 ir_node *get_r_cur_block(ir_graph *irg)
567 return irg->current_block;
570 ir_node *get_cur_block(void)
572 return get_r_cur_block(current_ir_graph);
575 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
577 assert(get_irg_phase_state(irg) == phase_building);
579 inc_irg_visited(irg);
581 return get_r_value_internal(irg->current_block, pos + 1, mode);
584 ir_node *get_value(int pos, ir_mode *mode)
586 return get_r_value(current_ir_graph, pos, mode);
590 * helper function for guess_mode: recursively look for a definition for
591 * local variable @p pos, returns its mode if found.
593 static ir_mode *guess_recursively(ir_node *block, int pos)
599 if (irn_visited_else_mark(block))
602 /* already have a defintion -> we can simply look at its mode */
603 value = block->attr.block.graph_arr[pos];
605 return get_irn_mode(value);
607 /* now we try to guess, by looking at the predecessor blocks */
608 n_preds = get_irn_arity(block);
609 for (i = 0; i < n_preds; ++i) {
610 ir_node *pred_block = get_Block_cfgpred_block(block, i);
611 ir_mode *mode = guess_recursively(pred_block, pos);
616 /* no way to guess */
620 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
622 ir_node *block = irg->current_block;
623 ir_node *value = block->attr.block.graph_arr[pos+1];
626 /* already have a defintion -> we can simply look at its mode */
628 return get_irn_mode(value);
630 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
631 inc_irg_visited(irg);
632 mode = guess_recursively(block, pos+1);
633 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
638 ir_mode *ir_guess_mode(int pos)
640 return ir_r_guess_mode(current_ir_graph, pos);
643 void set_r_value(ir_graph *irg, int pos, ir_node *value)
645 assert(get_irg_phase_state(irg) == phase_building);
647 assert(pos+1 < irg->n_loc);
648 assert(is_ir_node(value));
649 irg->current_block->attr.block.graph_arr[pos + 1] = value;
652 void set_value(int pos, ir_node *value)
654 set_r_value(current_ir_graph, pos, value);
657 int r_find_value(ir_graph *irg, ir_node *value)
660 ir_node *bl = irg->current_block;
662 for (i = ARR_LEN(bl->attr.block.graph_arr); i > 1;) {
663 if (bl->attr.block.graph_arr[--i] == value)
669 int find_value(ir_node *value)
671 return r_find_value(current_ir_graph, value);
674 ir_node *get_r_store(ir_graph *irg)
676 assert(get_irg_phase_state(irg) == phase_building);
677 inc_irg_visited(irg);
678 return get_r_value_internal(irg->current_block, 0, mode_M);
681 ir_node *get_store(void)
683 return get_r_store(current_ir_graph);
686 void set_r_store(ir_graph *irg, ir_node *store)
688 ir_node *load, *pload, *pred, *in[2];
690 assert(get_irg_phase_state(irg) == phase_building);
691 /* Beware: due to dead code elimination, a store might become a Bad node even in
692 the construction phase. */
693 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
695 if (get_opt_auto_create_sync()) {
696 /* handle non-volatile Load nodes by automatically creating Sync's */
697 load = skip_Proj(store);
698 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
699 pred = get_Load_mem(load);
702 /* a Load after a Sync: move it up */
703 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
705 set_Load_mem(load, get_memop_mem(mem));
706 add_Sync_pred(pred, store);
709 pload = skip_Proj(pred);
710 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
711 /* a Load after a Load: create a new Sync */
712 set_Load_mem(load, get_Load_mem(pload));
716 store = new_r_Sync(irg->current_block, 2, in);
721 irg->current_block->attr.block.graph_arr[0] = store;
724 void set_store(ir_node *store)
726 set_r_store(current_ir_graph, store);
729 void keep_alive(ir_node *ka)
731 ir_graph *irg = get_irn_irg(ka);
732 add_End_keepalive(get_irg_end(irg), ka);
735 void ir_set_uninitialized_local_variable_func(
736 uninitialized_local_variable_func_t *func)
738 default_initialize_local_variable = func;
741 void irg_finalize_cons(ir_graph *irg)
743 set_irg_phase_state(irg, phase_high);
746 void irp_finalize_cons(void)
749 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
750 irg_finalize_cons(get_irp_irg(i));
752 irp->phase_state = phase_high;
755 ir_node *new_Const_long(ir_mode *mode, long value)
757 return new_d_Const_long(NULL, mode, value);
760 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
762 return new_d_SymConst(NULL, mode, value, kind);
764 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
766 return new_d_simpleSel(NULL, store, objptr, ent);
768 ir_node *new_defaultProj(ir_node *arg, long max_proj)
770 return new_d_defaultProj(NULL, arg, max_proj);
772 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
773 int n_outs, ir_asm_constraint *outputs,
774 int n_clobber, ident *clobber[], ident *text)
776 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
779 ir_node *new_r_Anchor(ir_graph *irg)
781 ir_node *in[anchor_last];
783 memset(in, 0, sizeof(in));
784 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);
785 res->attr.anchor.irg.irg = irg;
787 /* hack to get get_irn_irg working: set block to ourself and allow
788 * get_Block_irg for anchor */
794 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
796 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
797 res->attr.block.irg.irg = irg;
798 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
799 set_Block_matured(res, 1);
800 /* Create and initialize array for Phi-node construction. */
801 if (get_irg_phase_state(irg) == phase_building) {
802 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
803 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
805 irn_verify_irg(res, irg);