2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
47 #include "gen_ir_cons.c.inl"
50 * Language dependent variable initialization callback.
52 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
54 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
57 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
60 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
65 arg->attr.cond.default_proj = max_proj;
66 res = new_rd_Proj(db, arg, mode_X, max_proj);
70 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
71 ir_asm_constraint *inputs, int n_outs,
72 ir_asm_constraint *outputs, int n_clobber,
73 ident *clobber[], ident *text)
75 ir_graph *irg = get_irn_irg(block);
76 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
78 res->attr.assem.pin_state = op_pin_state_pinned;
79 res->attr.assem.input_constraints
80 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
81 res->attr.assem.output_constraints
82 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
83 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
84 res->attr.assem.text = text;
86 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
87 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
88 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
90 res = optimize_node(res);
91 irn_verify_irg(res, irg);
95 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
96 ir_node *objptr, ir_entity *ent)
98 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
101 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
102 symconst_symbol value, symconst_kind symkind)
104 ir_node *block = get_irg_start_block(irg);
105 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
106 res->attr.symc.kind = symkind;
107 res->attr.symc.sym = value;
109 res = optimize_node(res);
110 irn_verify_irg(res, irg);
114 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
117 sym.entity_p = symbol;
118 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
121 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
124 sym.entity_p = symbol;
125 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
128 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
132 return new_rd_SymConst(db, irg, mode, sym, symconst_type_tag);
135 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
139 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
142 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
146 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
149 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
151 return new_rd_Const_long(NULL, irg, mode, value);
153 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
154 symconst_kind symkind)
156 return new_rd_SymConst(NULL, irg, mode, value, symkind);
158 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
161 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
163 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
165 return new_rd_defaultProj(NULL, arg, max_proj);
167 ir_node *new_r_ASM(ir_node *block,
168 int arity, ir_node *in[], ir_asm_constraint *inputs,
169 int n_outs, ir_asm_constraint *outputs,
170 int n_clobber, ident *clobber[], ident *text)
172 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
175 /** Creates a Phi node with 0 predecessors. */
176 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
177 ir_mode *mode, int pos)
179 ir_graph *irg = get_irn_irg(block);
180 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
181 res->attr.phi.u.pos = pos;
182 irn_verify_irg(res, irg);
186 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
188 static void try_remove_unnecessary_phi(ir_node *phi)
190 ir_node *phi_value = NULL;
191 int arity = get_irn_arity(phi);
194 /* see if all inputs are either pointing to a single value or
195 * are self references */
196 for (i = 0; i < arity; ++i) {
197 ir_node *in = get_irn_n(phi, i);
202 /** found a different value from the one we already found, can't remove
204 if (phi_value != NULL)
208 if (phi_value == NULL)
211 /* if we're here then all phi inputs have been either phi_value
212 * or self-references, we can replace the phi by phi_value.
213 * We do this with an Id-node */
214 exchange(phi, phi_value);
216 /* recursively check phi_value, because it could be that we were the last
217 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
218 * the loop header which can be eliminated now */
219 if (is_Phi(phi_value)) {
220 try_remove_unnecessary_phi(phi_value);
225 * Computes the predecessors for the real phi node, and then
226 * allocates and returns this node. The routine called to allocate the
227 * node might optimize it away and return a real value.
228 * This function must be called with an in-array of proper size.
230 static ir_node *set_phi_arguments(ir_node *phi, int pos)
232 ir_node *block = get_nodes_block(phi);
233 ir_graph *irg = get_irn_irg(block);
234 int arity = get_irn_arity(block);
235 ir_node **in = ALLOCAN(ir_node*, arity);
236 ir_mode *mode = get_irn_mode(phi);
239 /* This loop goes to all predecessor blocks of the block the Phi node
240 is in and there finds the operands of the Phi node by calling
241 get_r_value_internal. */
242 for (i = 0; i < arity; ++i) {
243 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
245 if (is_Bad(cfgpred)) {
246 value = new_r_Bad(irg);
248 value = get_r_value_internal(cfgpred, pos, mode);
253 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
254 set_irn_in(phi, arity, in);
255 set_irn_op(phi, op_Phi);
257 irn_verify_irg(phi, irg);
259 /* Memory Phis in endless loops must be kept alive.
260 As we can't distinguish these easily we keep all of them alive. */
261 if (is_Phi(phi) && mode == mode_M)
262 add_End_keepalive(get_irg_end(irg), phi);
264 try_remove_unnecessary_phi(phi);
269 * This function returns the last definition of a value. In case
270 * this value was last defined in a previous block, Phi nodes are
271 * inserted. If the part of the firm graph containing the definition
272 * is not yet constructed, a dummy Phi node is returned.
274 * @param block the current block
275 * @param pos the value number of the value searched
276 * @param mode the mode of this value (needed for Phi construction)
278 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
280 ir_node *res = block->attr.block.graph_arr[pos];
284 /* in a matured block we can immediated determine the phi arguments */
285 if (block->attr.block.is_matured) {
286 int arity = get_irn_arity(block);
287 /* no predecessors: use unknown value */
288 if (arity == 0 && block == get_irg_start_block(get_irn_irg(block))) {
289 ir_graph *irg = get_irn_irg(block);
290 if (default_initialize_local_variable != NULL) {
291 ir_node *rem = get_r_cur_block(irg);
292 set_r_cur_block(irg, block);
293 res = default_initialize_local_variable(irg, mode, pos - 1);
294 set_r_cur_block(irg, rem);
296 res = new_r_Unknown(irg, mode);
298 /* one predecessor just use its value */
299 } else if (arity == 1) {
300 ir_node *cfgpred = get_Block_cfgpred_block(block, 0);
301 if (is_Bad(cfgpred)) {
304 res = get_r_value_internal(cfgpred, pos, mode);
306 /* multiple predecessors construct Phi */
308 res = new_rd_Phi0(NULL, block, mode, pos);
309 /* enter phi0 into our variable value table to break cycles
310 * arising from set_phi_arguments */
311 block->attr.block.graph_arr[pos] = res;
312 res = set_phi_arguments(res, pos);
315 /* in case of immature block we have to keep a Phi0 */
316 res = new_rd_Phi0(NULL, block, mode, pos);
317 /* enqueue phi so we can set arguments once the block matures */
318 res->attr.phi.next = block->attr.block.phis;
319 block->attr.block.phis = res;
321 block->attr.block.graph_arr[pos] = res;
325 /* ************************************************************************** */
328 * Finalize a Block node, when all control flows are known.
329 * Acceptable parameters are only Block nodes.
331 void mature_immBlock(ir_node *block)
338 assert(is_Block(block));
339 if (get_Block_matured(block))
342 irg = get_irn_irg(block);
343 n_preds = ARR_LEN(block->in) - 1;
344 /* Fix block parameters */
345 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
347 /* Traverse a chain of Phi nodes attached to this block and mature
349 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
351 int pos = phi->attr.phi.u.pos;
353 next = phi->attr.phi.next;
354 new_value = set_phi_arguments(phi, pos);
355 if (block->attr.block.graph_arr[pos] == phi) {
356 block->attr.block.graph_arr[pos] = new_value;
360 block->attr.block.is_matured = 1;
362 /* Now, as the block is a finished Firm node, we can optimize it.
363 Since other nodes have been allocated since the block was created
364 we can not free the node on the obstack. Therefore we have to call
366 Unfortunately the optimization does not change a lot, as all allocated
367 nodes refer to the unoptimized node.
368 We can call optimize_in_place_2(), as global cse has no effect on blocks.
370 block = optimize_in_place_2(block);
371 irn_verify_irg(block, irg);
374 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
376 assert(get_irg_phase_state(current_ir_graph) == phase_building);
377 return new_rd_Const_long(db, current_ir_graph, mode, value);
380 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
383 assert(is_Cond(arg) || is_Bad(arg));
384 assert(get_irg_phase_state(current_ir_graph) == phase_building);
386 arg->attr.cond.default_proj = max_proj;
387 res = new_d_Proj(db, arg, mode_X, max_proj);
391 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
394 assert(get_irg_phase_state(current_ir_graph) == phase_building);
395 return new_rd_Sel(db, current_ir_graph->current_block,
396 store, objptr, 0, NULL, ent);
399 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
402 assert(get_irg_phase_state(current_ir_graph) == phase_building);
403 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
406 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
407 ir_asm_constraint *inputs,
408 int n_outs, ir_asm_constraint *outputs, int n_clobber,
409 ident *clobber[], ident *text)
411 assert(get_irg_phase_state(current_ir_graph) == phase_building);
412 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
413 n_outs, outputs, n_clobber, clobber, text);
416 ir_node *new_rd_strictConv(dbg_info *dbgi, ir_node *block, ir_node * irn_op, ir_mode * mode)
419 ir_graph *irg = get_Block_irg(block);
424 res = new_ir_node(dbgi, irg, block, op_Conv, mode, 1, in);
425 res->attr.conv.strict = 1;
426 res = optimize_node(res);
427 irn_verify_irg(res, irg);
431 ir_node *new_r_strictConv(ir_node *block, ir_node * irn_op, ir_mode * mode)
433 return new_rd_strictConv(NULL, block, irn_op, mode);
436 ir_node *new_d_strictConv(dbg_info *dbgi, ir_node * irn_op, ir_mode * mode)
439 assert(get_irg_phase_state(current_ir_graph) == phase_building);
440 res = new_rd_strictConv(dbgi, current_ir_graph->current_block, irn_op, mode);
444 ir_node *new_strictConv(ir_node * irn_op, ir_mode * mode)
446 return new_d_strictConv(NULL, irn_op, mode);
449 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
452 ir_graph *irg = get_Block_irg(block);
459 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
460 res->attr.divmod.resmode = resmode;
461 res->attr.divmod.no_remainder = 1;
462 res->attr.divmod.exc.pin_state = pin_state;
463 res = optimize_node(res);
464 irn_verify_irg(res, irg);
468 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
470 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
473 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
476 assert(get_irg_phase_state(current_ir_graph) == phase_building);
477 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
481 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
483 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
486 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
490 assert(get_irg_phase_state(irg) == phase_building);
491 /* creates a new dynamic in-array as length of in is -1 */
492 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
494 res->attr.block.is_matured = 0;
495 res->attr.block.is_dead = 0;
496 res->attr.block.irg.irg = irg;
497 res->attr.block.backedge = NULL;
498 res->attr.block.in_cg = NULL;
499 res->attr.block.cg_backedge = NULL;
500 res->attr.block.extblk = NULL;
501 res->attr.block.region = NULL;
502 res->attr.block.entity = NULL;
504 set_Block_block_visited(res, 0);
506 /* Create and initialize array for Phi-node construction. */
507 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
508 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
510 /* Immature block may not be optimized! */
511 irn_verify_irg(res, irg);
516 ir_node *new_r_immBlock(ir_graph *irg)
518 return new_rd_immBlock(NULL, irg);
521 ir_node *new_d_immBlock(dbg_info *dbgi)
523 return new_rd_immBlock(dbgi, current_ir_graph);
526 ir_node *new_immBlock(void)
528 return new_rd_immBlock(NULL, current_ir_graph);
531 void add_immBlock_pred(ir_node *block, ir_node *jmp)
533 int n = ARR_LEN(block->in) - 1;
535 assert(is_Block(block) && "Error: Must be a Block");
536 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
537 assert(is_ir_node(jmp));
539 ARR_APP1(ir_node *, block->in, jmp);
541 hook_set_irn_n(block, n, jmp, NULL);
544 void set_cur_block(ir_node *target)
546 current_ir_graph->current_block = target;
549 void set_r_cur_block(ir_graph *irg, ir_node *target)
551 irg->current_block = target;
554 ir_node *get_r_cur_block(ir_graph *irg)
556 return irg->current_block;
559 ir_node *get_cur_block(void)
561 return get_r_cur_block(current_ir_graph);
564 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
566 assert(get_irg_phase_state(irg) == phase_building);
569 return get_r_value_internal(irg->current_block, pos + 1, mode);
572 ir_node *get_value(int pos, ir_mode *mode)
574 return get_r_value(current_ir_graph, pos, mode);
578 * helper function for guess_mode: recursively look for a definition for
579 * local variable @p pos, returns its mode if found.
581 static ir_mode *guess_recursively(ir_node *block, int pos)
587 if (irn_visited(block))
589 mark_irn_visited(block);
591 /* already have a defintion -> we can simply look at its mode */
592 value = block->attr.block.graph_arr[pos];
594 return get_irn_mode(value);
596 /* now we try to guess, by looking at the predecessor blocks */
597 n_preds = get_irn_arity(block);
598 for (i = 0; i < n_preds; ++i) {
599 ir_node *pred_block = get_Block_cfgpred_block(block, i);
600 ir_mode *mode = guess_recursively(pred_block, pos);
605 /* no way to guess */
609 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
611 ir_node *block = irg->current_block;
612 ir_node *value = block->attr.block.graph_arr[pos+1];
615 /* already have a defintion -> we can simply look at its mode */
617 return get_irn_mode(value);
619 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
620 inc_irg_visited(irg);
621 mode = guess_recursively(block, pos+1);
622 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
627 ir_mode *ir_guess_mode(int pos)
629 return ir_r_guess_mode(current_ir_graph, pos);
632 void set_r_value(ir_graph *irg, int pos, ir_node *value)
634 assert(get_irg_phase_state(irg) == phase_building);
636 assert(pos+1 < irg->n_loc);
637 assert(is_ir_node(value));
638 irg->current_block->attr.block.graph_arr[pos + 1] = value;
641 void set_value(int pos, ir_node *value)
643 set_r_value(current_ir_graph, pos, value);
646 int r_find_value(ir_graph *irg, ir_node *value)
649 ir_node *bl = irg->current_block;
651 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
652 if (bl->attr.block.graph_arr[i] == value)
657 int find_value(ir_node *value)
659 return r_find_value(current_ir_graph, value);
662 ir_node *get_r_store(ir_graph *irg)
664 assert(get_irg_phase_state(irg) == phase_building);
665 return get_r_value_internal(irg->current_block, 0, mode_M);
668 ir_node *get_store(void)
670 return get_r_store(current_ir_graph);
673 void set_r_store(ir_graph *irg, ir_node *store)
675 ir_node *load, *pload, *pred, *in[2];
677 assert(get_irg_phase_state(irg) == phase_building);
678 /* Beware: due to dead code elimination, a store might become a Bad node even in
679 the construction phase. */
680 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
682 if (get_opt_auto_create_sync()) {
683 /* handle non-volatile Load nodes by automatically creating Sync's */
684 load = skip_Proj(store);
685 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
686 pred = get_Load_mem(load);
689 /* a Load after a Sync: move it up */
690 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
692 set_Load_mem(load, get_memop_mem(mem));
693 add_Sync_pred(pred, store);
696 pload = skip_Proj(pred);
697 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
698 /* a Load after a Load: create a new Sync */
699 set_Load_mem(load, get_Load_mem(pload));
703 store = new_r_Sync(irg->current_block, 2, in);
708 irg->current_block->attr.block.graph_arr[0] = store;
711 void set_store(ir_node *store)
713 set_r_store(current_ir_graph, store);
716 void r_keep_alive(ir_graph *irg, ir_node *ka)
718 add_End_keepalive(get_irg_end(irg), ka);
721 void keep_alive(ir_node *ka)
723 r_keep_alive(current_ir_graph, ka);
726 void ir_set_uninitialized_local_variable_func(
727 uninitialized_local_variable_func_t *func)
729 default_initialize_local_variable = func;
732 void irg_finalize_cons(ir_graph *irg)
734 set_irg_phase_state(irg, phase_high);
737 void irp_finalize_cons(void)
740 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
741 irg_finalize_cons(get_irp_irg(i));
743 irp->phase_state = phase_high;
746 ir_node *new_Const_long(ir_mode *mode, long value)
748 return new_d_Const_long(NULL, mode, value);
751 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
753 return new_d_SymConst(NULL, mode, value, kind);
755 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
757 return new_d_simpleSel(NULL, store, objptr, ent);
759 ir_node *new_defaultProj(ir_node *arg, long max_proj)
761 return new_d_defaultProj(NULL, arg, max_proj);
763 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
764 int n_outs, ir_asm_constraint *outputs,
765 int n_clobber, ident *clobber[], ident *text)
767 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
770 ir_node *new_r_Anchor(ir_graph *irg)
772 ir_node *in[anchor_last];
774 memset(in, 0, sizeof(in));
775 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);
776 res->attr.anchor.irg.irg = irg;
778 /* hack to get get_irn_irg working: set block to ourself and allow
779 * get_Block_irg for anchor */
785 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
787 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
788 res->attr.block.irg.irg = irg;
789 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
790 set_Block_matured(res, 1);
791 /* Create and initialize array for Phi-node construction. */
792 if (get_irg_phase_state(irg) == phase_building) {
793 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
794 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
796 irn_verify_irg(res, irg);