2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
31 #include "irgraph_t.h"
41 #include "irbackedge_t.h"
43 #include "iredges_t.h"
47 /* when we need verifying */
49 # define IRN_VRFY_IRG(res, irg)
51 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
55 * Language dependent variable initialization callback.
57 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
59 /* creates a bd constructor for a binop */
60 #define NEW_BD_BINOP(instr) \
62 new_bd_##instr(dbg_info *db, ir_node *block, \
63 ir_node *op1, ir_node *op2, ir_mode *mode) \
67 ir_graph *irg = current_ir_graph; \
70 res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
71 res = optimize_node(res); \
72 IRN_VRFY_IRG(res, irg); \
76 /* creates a bd constructor for an unop */
77 #define NEW_BD_UNOP(instr) \
79 new_bd_##instr(dbg_info *db, ir_node *block, \
80 ir_node *op, ir_mode *mode) \
83 ir_graph *irg = current_ir_graph; \
84 res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
85 res = optimize_node(res); \
86 IRN_VRFY_IRG(res, irg); \
90 /* creates a bd constructor for an divop */
91 #define NEW_BD_DIVOP(instr) \
93 new_bd_##instr(dbg_info *db, ir_node *block, \
94 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
98 ir_graph *irg = current_ir_graph; \
102 res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
103 res->attr.divmod.exc.pin_state = state; \
104 res->attr.divmod.resmode = mode; \
105 res->attr.divmod.no_remainder = 0; \
106 res = optimize_node(res); \
107 IRN_VRFY_IRG(res, irg); \
111 /* creates a rd constructor for a binop */
112 #define NEW_RD_BINOP(instr) \
114 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
115 ir_node *op1, ir_node *op2, ir_mode *mode) \
118 ir_graph *rem = current_ir_graph; \
119 current_ir_graph = irg; \
120 res = new_bd_##instr(db, block, op1, op2, mode); \
121 current_ir_graph = rem; \
125 /* creates a rd constructor for an unop */
126 #define NEW_RD_UNOP(instr) \
128 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
129 ir_node *op, ir_mode *mode) \
132 ir_graph *rem = current_ir_graph; \
133 current_ir_graph = irg; \
134 res = new_bd_##instr(db, block, op, mode); \
135 current_ir_graph = rem; \
139 /* creates a rd constructor for an divop */
140 #define NEW_RD_DIVOP(instr) \
142 new_rd_##instr(dbg_info *db, ir_graph *irg, ir_node *block, \
143 ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) \
146 ir_graph *rem = current_ir_graph; \
147 current_ir_graph = irg; \
148 res = new_bd_##instr(db, block, memop, op1, op2, mode, state);\
149 current_ir_graph = rem; \
153 /* creates a d constructor for an binop */
154 #define NEW_D_BINOP(instr) \
156 new_d_##instr(dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) { \
157 return new_bd_##instr(db, current_ir_graph->current_block, op1, op2, mode); \
160 /* creates a d constructor for an unop */
161 #define NEW_D_UNOP(instr) \
163 new_d_##instr(dbg_info *db, ir_node *op, ir_mode *mode) { \
164 return new_bd_##instr(db, current_ir_graph->current_block, op, mode); \
167 #include "gen_ir_cons.c.inl"
169 static ir_node *new_bd_Start(dbg_info *db, ir_node *block)
172 ir_graph *irg = current_ir_graph;
174 res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
176 IRN_VRFY_IRG(res, irg);
180 static ir_node *new_bd_End(dbg_info *db, ir_node *block)
183 ir_graph *irg = current_ir_graph;
185 res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
187 IRN_VRFY_IRG(res, irg);
192 * Creates a Phi node with all predecessors. Calling this constructor
193 * is only allowed if the corresponding block is mature.
195 static ir_node *new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
198 ir_graph *irg = current_ir_graph;
202 /* Don't assert that block matured: the use of this constructor is strongly
204 if (get_Block_matured(block))
205 assert(get_irn_arity(block) == arity);
207 res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
209 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
211 for (i = arity - 1; i >= 0; --i)
212 if (is_Unknown(in[i])) {
217 if (!has_unknown) res = optimize_node(res);
218 IRN_VRFY_IRG(res, irg);
220 /* Memory Phis in endless loops must be kept alive.
221 As we can't distinguish these easily we keep all of them alive. */
222 if (is_Phi(res) && mode == mode_M)
223 add_End_keepalive(get_irg_end(irg), res);
227 static ir_node *new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp)
230 ir_graph *irg = current_ir_graph;
232 res = new_ir_node(db, irg, get_irg_start_block(irg), op_Const, get_tarval_mode(con), 0, NULL);
233 res->attr.con.tarval = con;
234 set_Const_type(res, tp); /* Call method because of complex assertion. */
235 res = optimize_node (res);
236 assert(get_Const_type(res) == tp);
237 IRN_VRFY_IRG(res, irg);
240 } /* new_bd_Const_type */
242 static ir_node *new_bd_Const(dbg_info *db, tarval *con)
244 ir_graph *irg = current_ir_graph;
246 return new_rd_Const_type(db, irg, con, firm_unknown_type);
249 static ir_node *new_bd_Const_long(dbg_info *db, ir_mode *mode, long value)
251 ir_graph *irg = current_ir_graph;
253 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
254 } /* new_bd_Const_long */
256 static ir_node *new_bd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
260 assert(arg->op == op_Cond);
261 arg->attr.cond.default_proj = max_proj;
262 res = new_rd_Proj(db, arg, mode_X, max_proj);
264 } /* new_bd_defaultProj */
266 static ir_node *new_bd_Sel(dbg_info *db, ir_node *block, ir_node *store,
267 ir_node *objptr, int arity, ir_node **in,
273 ir_graph *irg = current_ir_graph;
274 ir_mode *mode = is_Method_type(get_entity_type(ent)) ? mode_P_code : mode_P_data;
276 assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
279 NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
282 memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
284 * Sel's can select functions which should be of mode mode_P_code.
286 res = new_ir_node(db, irg, block, op_Sel, mode, r_arity, r_in);
287 res->attr.sel.entity = ent;
288 res = optimize_node(res);
289 IRN_VRFY_IRG(res, irg);
293 static ir_node *new_bd_SymConst_type(dbg_info *db, ir_node *block,
294 ir_mode *mode, symconst_symbol value,
295 symconst_kind symkind, ir_type *tp)
297 ir_graph *irg = current_ir_graph;
298 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
300 res->attr.symc.kind = symkind;
301 res->attr.symc.sym = value;
302 res->attr.symc.tp = tp;
304 res = optimize_node(res);
305 IRN_VRFY_IRG(res, irg);
307 } /* new_bd_SymConst_type */
309 static ir_node *new_bd_Sync(dbg_info *db, ir_node *block)
312 ir_graph *irg = current_ir_graph;
314 res = new_ir_node(db, irg, block, op_Sync, mode_M, -1, NULL);
315 /* no need to call optimize node here, Sync are always created with no predecessors */
316 IRN_VRFY_IRG(res, irg);
320 static ir_node *new_bd_ASM(dbg_info *db, ir_node *block, int arity,
321 ir_node *in[], ir_asm_constraint *inputs, int n_outs,
322 ir_asm_constraint *outputs, int n_clobber,
323 ident *clobber[], ident *text)
326 ir_graph *irg = current_ir_graph;
328 res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
329 res->attr.assem.pin_state = op_pin_state_pinned;
330 res->attr.assem.input_constraints
331 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
332 res->attr.assem.output_constraints
333 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
334 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
335 res->attr.assem.text = text;
337 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
338 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
339 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
341 res = optimize_node(res);
342 IRN_VRFY_IRG(res, irg);
346 /* --------------------------------------------- */
347 /* private interfaces, for professional use only */
348 /* --------------------------------------------- */
350 ir_node *new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
352 ir_graph *rem = current_ir_graph;
355 current_ir_graph = irg;
356 res = new_bd_Start(db, block);
357 current_ir_graph = rem;
362 ir_node *new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
365 ir_graph *rem = current_ir_graph;
367 current_ir_graph = irg;
368 res = new_bd_End(db, block);
369 current_ir_graph = rem;
374 /* Creates a Phi node with all predecessors. Calling this constructor
375 is only allowed if the corresponding block is mature. */
376 ir_node *new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
379 ir_graph *rem = current_ir_graph;
381 current_ir_graph = get_Block_irg(block);
382 res = new_bd_Phi(db, block,arity, in, mode);
383 current_ir_graph = rem;
388 ir_node *new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp)
391 ir_graph *rem = current_ir_graph;
393 current_ir_graph = irg;
394 res = new_bd_Const_type(db, con, tp);
395 current_ir_graph = rem;
398 } /* new_rd_Const_type */
400 ir_node *new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con)
403 //#ifdef USE_ORIGINAL
404 ir_graph *rem = current_ir_graph;
406 current_ir_graph = irg;
407 res = new_bd_Const_type(db, con, firm_unknown_type);
408 current_ir_graph = rem;
410 // res = new_rd_Const_type(db, irg, con, firm_unknown_type);
416 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value)
418 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
419 } /* new_rd_Const_long */
421 ir_node *new_rd_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
423 return new_bd_defaultProj(db, arg, max_proj);
424 } /* new_rd_defaultProj */
426 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
427 ir_node *objptr, ir_entity *ent)
430 ir_graph *rem = current_ir_graph;
432 current_ir_graph = get_Block_irg(block);
433 res = new_bd_Sel(db, block, store, objptr, 0, NULL, ent);
434 current_ir_graph = rem;
437 } /* new_rd_simpleSel */
439 ir_node *new_rd_SymConst_type(dbg_info *db, ir_graph *irg, ir_mode *mode,
440 symconst_symbol value, symconst_kind symkind,
444 ir_graph *rem = current_ir_graph;
445 ir_node *block = get_irg_start_block(irg);
447 current_ir_graph = irg;
448 res = new_bd_SymConst_type(db, block, mode, value, symkind, tp);
449 current_ir_graph = rem;
452 } /* new_rd_SymConst_type */
454 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
455 symconst_symbol value, symconst_kind symkind)
457 return new_rd_SymConst_type(db, irg, mode, value, symkind, firm_unknown_type);
458 } /* new_rd_SymConst */
460 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
463 sym.entity_p = symbol;
464 return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_ent, tp);
465 } /* new_rd_SymConst_addr_ent */
467 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
470 sym.entity_p = symbol;
471 return new_rd_SymConst_type(db, irg, mode, sym, symconst_ofs_ent, tp);
472 } /* new_rd_SymConst_ofs_ent */
474 ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
478 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_tag, tp);
479 } /* new_rd_SymConst_type_tag */
481 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
485 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_size, tp);
486 } /* new_rd_SymConst_size */
488 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
492 return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_align, tp);
493 } /* new_rd_SymConst_align */
495 ir_node *new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[])
498 ir_graph *rem = current_ir_graph;
501 current_ir_graph = get_Block_irg(block);
502 res = new_bd_Sync(db, block);
503 current_ir_graph = rem;
505 for (i = 0; i < arity; ++i)
506 add_Sync_pred(res, in[i]);
511 ir_node *new_rd_ASM(dbg_info *db, ir_node *block,
512 int arity, ir_node *in[], ir_asm_constraint *inputs,
513 int n_outs, ir_asm_constraint *outputs,
514 int n_clobber, ident *clobber[], ident *text)
517 ir_graph *rem = current_ir_graph;
519 current_ir_graph = get_Block_irg(block);
520 res = new_bd_ASM(db, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
521 current_ir_graph = rem;
526 ir_node *new_r_Start(ir_graph *irg, ir_node *block)
528 return new_rd_Start(NULL, irg, block);
530 ir_node *new_r_End(ir_graph *irg, ir_node *block)
532 return new_rd_End(NULL, irg, block);
534 ir_node *new_r_Const(ir_graph *irg, tarval *con)
536 return new_rd_Const(NULL, irg, con);
538 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
540 return new_rd_Const_long(NULL, irg, mode, value);
542 ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp)
544 return new_rd_Const_type(NULL, irg, con, tp);
546 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
547 symconst_kind symkind)
549 return new_rd_SymConst(NULL, irg, mode, value, symkind);
551 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
554 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
556 ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode)
558 return new_rd_Phi(NULL, block, arity, in, mode);
560 ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[])
562 return new_rd_Sync(NULL, block, arity, in);
564 ir_node *new_r_defaultProj(ir_node *arg, long max_proj)
566 return new_rd_defaultProj(NULL, arg, max_proj);
568 ir_node *new_r_Bad(ir_graph *irg)
570 return get_irg_bad(irg);
572 ir_node *new_r_NoMem(ir_graph *irg)
574 return get_irg_no_mem(irg);
576 ir_node *new_r_ASM(ir_node *block,
577 int arity, ir_node *in[], ir_asm_constraint *inputs,
578 int n_outs, ir_asm_constraint *outputs,
579 int n_clobber, ident *clobber[], ident *text)
581 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
584 /** ********************/
585 /** public interfaces */
586 /** construction tools */
588 ir_node *new_d_Start(dbg_info *db)
592 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
593 op_Start, mode_T, 0, NULL);
595 res = optimize_node(res);
596 IRN_VRFY_IRG(res, current_ir_graph);
600 ir_node *new_d_End(dbg_info *db)
603 res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
604 op_End, mode_X, -1, NULL);
605 res = optimize_node(res);
606 IRN_VRFY_IRG(res, current_ir_graph);
611 /* ***********************************************************************/
612 /* Methods necessary for automatic Phi node creation */
614 ir_node *phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
615 ir_node *get_r_value_internal (ir_node *block, int pos, ir_mode *mode);
616 ir_node *new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
617 ir_node *new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
619 Call Graph: ( A ---> B == A "calls" B)
621 get_value mature_immBlock
629 get_r_value_internal |
633 new_rd_Phi0 new_rd_Phi_in
635 * *************************************************************************** */
637 /** Creates a Phi node with 0 predecessors. */
638 static inline ir_node *new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
642 res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
643 IRN_VRFY_IRG(res, irg);
649 * Internal constructor of a Phi node by a phi_merge operation.
651 * @param irg the graph on which the Phi will be constructed
652 * @param block the block in which the Phi will be constructed
653 * @param mode the mod eof the Phi node
654 * @param in the input array of the phi node
655 * @param ins number of elements in the input array
656 * @param phi0 in non-NULL: the Phi0 node in the same block that represents
657 * the value for which the new Phi is constructed
659 static inline ir_node *new_rd_Phi_in(ir_graph *irg, ir_node *block,
660 ir_mode *mode, ir_node **in, int ins,
664 ir_node *res, *known;
666 /* Allocate a new node on the obstack. The allocation copies the in
668 res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
669 res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
671 /* This loop checks whether the Phi has more than one predecessor.
672 If so, it is a real Phi node and we break the loop. Else the
673 Phi node merges the same definition on several paths and therefore
675 Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
677 for (i = ins - 1; i >= 0; --i) {
680 in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
682 /* Optimize self referencing Phis: We can't detect them yet properly, as
683 they still refer to the Phi0 they will replace. So replace right now. */
684 if (phi0 && in[i] == phi0)
687 if (in[i] == res || in[i] == known)
696 /* i < 0: there is at most one predecessor, we don't need a phi node. */
699 edges_node_deleted(res, current_ir_graph);
700 obstack_free(current_ir_graph->obst, res);
702 /* If pred is a phi node we want to optimize it: If loops are matured in a bad
703 order, an enclosing Phi know may get superfluous. */
704 res = optimize_in_place_2(known);
706 exchange(known, res);
711 /* A undefined value, e.g., in unreachable code. */
715 res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
716 IRN_VRFY_IRG(res, irg);
717 /* Memory Phis in endless loops must be kept alive.
718 As we can't distinguish these easily we keep all of them alive. */
719 if (is_Phi(res) && mode == mode_M)
720 add_End_keepalive(get_irg_end(irg), res);
724 } /* new_rd_Phi_in */
726 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
728 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
731 * Construct a new frag_array for node n.
732 * Copy the content from the current graph_arr of the corresponding block:
733 * this is the current state.
734 * Set ProjM(n) as current memory state.
735 * Further the last entry in frag_arr of current block points to n. This
736 * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
738 static inline ir_node **new_frag_arr(ir_node *n)
743 arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
744 memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
745 sizeof(ir_node *)*current_ir_graph->n_loc);
747 /* turn off optimization before allocating Proj nodes, as res isn't
749 opt = get_opt_optimize(); set_optimize(0);
750 /* Here we rely on the fact that all frag ops have Memory as first result! */
752 arr[0] = new_Proj(n, mode_M, pn_Call_M);
753 } else if (is_CopyB(n)) {
754 arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
756 assert((pn_Quot_M == pn_DivMod_M) &&
757 (pn_Quot_M == pn_Div_M) &&
758 (pn_Quot_M == pn_Mod_M) &&
759 (pn_Quot_M == pn_Load_M) &&
760 (pn_Quot_M == pn_Store_M) &&
761 (pn_Quot_M == pn_Alloc_M) &&
762 (pn_Quot_M == pn_Bound_M));
763 arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
767 current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
772 * Returns the frag_arr from a node.
774 static inline ir_node **get_frag_arr(ir_node *n)
776 switch (get_irn_opcode(n)) {
778 return n->attr.call.exc.frag_arr;
780 return n->attr.alloc.exc.frag_arr;
782 return n->attr.load.exc.frag_arr;
784 return n->attr.store.exc.frag_arr;
786 return n->attr.except.frag_arr;
790 static void set_frag_value(ir_node **frag_arr, int pos, ir_node *val)
795 for (i = 1024; i >= 0; --i)
800 if (frag_arr[pos] == NULL)
802 if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
803 ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
804 assert(arr != frag_arr && "Endless recursion detected");
809 assert(!"potential endless recursion in set_frag_value");
810 } /* set_frag_value */
812 static ir_node *get_r_frag_value_internal(ir_node *block, ir_node *cfOp,
813 int pos, ir_mode *mode)
818 assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
820 frag_arr = get_frag_arr(cfOp);
823 if (block->attr.block.graph_arr[pos] != NULL) {
824 /* There was a set_value() after the cfOp and no get_value() before that
825 set_value(). We must build a Phi node now. */
826 if (block->attr.block.is_matured) {
827 int ins = get_irn_arity(block);
829 NEW_ARR_A(ir_node *, nin, ins);
830 res = phi_merge(block, pos, mode, nin, ins);
832 res = new_rd_Phi0(current_ir_graph, block, mode);
833 res->attr.phi.u.pos = pos;
834 res->attr.phi.next = block->attr.block.phis;
835 block->attr.block.phis = res;
838 /* It's a Phi, we can write this into all graph_arrs with NULL */
839 set_frag_value(block->attr.block.graph_arr, pos, res);
841 res = get_r_value_internal(block, pos, mode);
842 set_frag_value(block->attr.block.graph_arr, pos, res);
846 } /* get_r_frag_value_internal */
849 * Check whether a control flownode cf_pred represents an exception flow.
851 * @param cf_pred the control flow node
852 * @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
854 static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op)
857 * Note: all projections from a raise are "exceptional control flow" we we handle it
858 * like a normal Jmp, because there is no "regular" one.
859 * That's why Raise is no "fragile_op"!
861 if (is_fragile_op(prev_cf_op)) {
862 if (is_Proj(cf_pred)) {
863 if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
864 /* the regular control flow, NO exception */
867 assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
870 /* Hmm, exception but not a Proj? */
871 panic("unexpected condition: fragile op without a proj");
874 } /* is_exception_flow */
877 * Computes the predecessors for the real phi node, and then
878 * allocates and returns this node. The routine called to allocate the
879 * node might optimize it away and return a real value.
880 * This function must be called with an in-array of proper size.
882 static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
884 ir_node *prevBlock, *res, *phi0, *phi0_all;
887 /* If this block has no value at pos create a Phi0 and remember it
888 in graph_arr to break recursions.
889 Else we may not set graph_arr as there a later value is remembered. */
891 if (block->attr.block.graph_arr[pos] == NULL) {
892 ir_graph *irg = current_ir_graph;
894 if (block == get_irg_start_block(irg)) {
895 /* Collapsing to Bad tarvals is no good idea.
896 So we call a user-supplied routine here that deals with this case as
897 appropriate for the given language. Sorrily the only help we can give
898 here is the position.
900 Even if all variables are defined before use, it can happen that
901 we get to the start block, if a Cond has been replaced by a tuple
902 (bad, jmp). In this case we call the function needlessly, eventually
903 generating an non existent error.
904 However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
907 if (default_initialize_local_variable != NULL) {
908 ir_node *rem = get_cur_block();
910 set_cur_block(block);
911 block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
915 block->attr.block.graph_arr[pos] = new_Unknown(mode);
916 /* We don't need to care about exception ops in the start block.
917 There are none by definition. */
918 return block->attr.block.graph_arr[pos];
920 phi0 = new_rd_Phi0(irg, block, mode);
921 block->attr.block.graph_arr[pos] = phi0;
922 if (get_opt_precise_exc_context()) {
923 /* Set graph_arr for fragile ops. Also here we should break recursion.
924 We could choose a cyclic path through an cfop. But the recursion would
925 break at some point. */
926 set_frag_value(block->attr.block.graph_arr, pos, phi0);
931 /* This loop goes to all predecessor blocks of the block the Phi node
932 is in and there finds the operands of the Phi node by calling
933 get_r_value_internal. */
934 for (i = 1; i <= ins; ++i) {
935 ir_node *cf_pred = block->in[i];
936 ir_node *prevCfOp = skip_Proj(cf_pred);
938 if (is_Bad(prevCfOp)) {
939 /* In case a Cond has been optimized we would get right to the start block
940 with an invalid definition. */
941 nin[i-1] = new_Bad();
944 prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
946 if (!is_Bad(prevBlock)) {
947 if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
948 assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
949 nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
951 nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
953 nin[i-1] = new_Bad();
957 /* We want to pass the Phi0 node to the constructor: this finds additional
958 optimization possibilities.
959 The Phi0 node either is allocated in this function, or it comes from
960 a former call to get_r_value_internal(). In this case we may not yet
961 exchange phi0, as this is done in mature_immBlock(). */
963 phi0_all = block->attr.block.graph_arr[pos];
964 if (! is_Phi0(phi0_all) ||
965 get_irn_arity(phi0_all) != 0 ||
966 get_nodes_block(phi0_all) != block)
972 /* After collecting all predecessors into the array nin a new Phi node
973 with these predecessors is created. This constructor contains an
974 optimization: If all predecessors of the Phi node are identical it
975 returns the only operand instead of a new Phi node. */
976 res = new_rd_Phi_in(current_ir_graph, block, mode, nin, ins, phi0_all);
978 /* In case we allocated a Phi0 node at the beginning of this procedure,
979 we need to exchange this Phi0 with the real Phi. */
982 block->attr.block.graph_arr[pos] = res;
983 /* Don't set_frag_value as it does not overwrite. Doesn't matter, is
984 only an optimization. */
991 * This function returns the last definition of a value. In case
992 * this value was last defined in a previous block, Phi nodes are
993 * inserted. If the part of the firm graph containing the definition
994 * is not yet constructed, a dummy Phi node is returned.
996 * @param block the current block
997 * @param pos the value number of the value searched
998 * @param mode the mode of this value (needed for Phi construction)
1000 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
1003 /* There are 4 cases to treat.
1005 1. The block is not mature and we visit it the first time. We can not
1006 create a proper Phi node, therefore a Phi0, i.e., a Phi without
1007 predecessors is returned. This node is added to the linked list (block
1008 attribute "phis") of the containing block to be completed when this block is
1009 matured. (Completion will add a new Phi and turn the Phi0 into an Id
1012 2. The value is already known in this block, graph_arr[pos] is set and we
1013 visit the block the first time. We can return the value without
1014 creating any new nodes.
1016 3. The block is mature and we visit it the first time. A Phi node needs
1017 to be created (phi_merge). If the Phi is not needed, as all it's
1018 operands are the same value reaching the block through different
1019 paths, it's optimized away and the value itself is returned.
1021 4. The block is mature, and we visit it the second time. Now two
1022 subcases are possible:
1023 * The value was computed completely the last time we were here. This
1024 is the case if there is no loop. We can return the proper value.
1025 * The recursion that visited this node and set the flag did not
1026 return yet. We are computing a value in a loop and need to
1027 break the recursion. This case only happens if we visited
1028 the same block with phi_merge before, which inserted a Phi0.
1029 So we return the Phi0.
1032 /* case 4 -- already visited. */
1033 if (get_irn_visited(block) == get_irg_visited(current_ir_graph)) {
1034 /* As phi_merge allocates a Phi0 this value is always defined. Here
1035 is the critical difference of the two algorithms. */
1036 assert(block->attr.block.graph_arr[pos]);
1037 return block->attr.block.graph_arr[pos];
1040 /* visited the first time */
1041 set_irn_visited(block, get_irg_visited(current_ir_graph));
1043 /* Get the local valid value */
1044 res = block->attr.block.graph_arr[pos];
1046 /* case 2 -- If the value is actually computed, return it. */
1050 if (block->attr.block.is_matured) { /* case 3 */
1052 /* The Phi has the same amount of ins as the corresponding block. */
1053 int ins = get_irn_arity(block);
1055 NEW_ARR_A(ir_node *, nin, ins);
1057 /* Phi merge collects the predecessors and then creates a node. */
1058 res = phi_merge(block, pos, mode, nin, ins);
1060 } else { /* case 1 */
1061 /* The block is not mature, we don't know how many in's are needed. A Phi
1062 with zero predecessors is created. Such a Phi node is called Phi0
1063 node. The Phi0 is then added to the list of Phi0 nodes in this block
1064 to be matured by mature_immBlock later.
1065 The Phi0 has to remember the pos of it's internal value. If the real
1066 Phi is computed, pos is used to update the array with the local
1068 res = new_rd_Phi0(current_ir_graph, block, mode);
1069 res->attr.phi.u.pos = pos;
1070 res->attr.phi.next = block->attr.block.phis;
1071 block->attr.block.phis = res;
1074 assert(is_ir_node(res) && "phi_merge() failed to construct a definition");
1076 /* The local valid value is available now. */
1077 block->attr.block.graph_arr[pos] = res;
1080 } /* get_r_value_internal */
1082 /* ************************************************************************** */
1085 * Finalize a Block node, when all control flows are known.
1086 * Acceptable parameters are only Block nodes.
1088 void mature_immBlock(ir_node *block)
1094 assert(is_Block(block));
1095 if (!get_Block_matured(block)) {
1096 ir_graph *irg = current_ir_graph;
1098 ins = ARR_LEN(block->in) - 1;
1099 /* Fix block parameters */
1100 block->attr.block.backedge = new_backedge_arr(irg->obst, ins);
1102 /* An array for building the Phi nodes. */
1103 NEW_ARR_A(ir_node *, nin, ins);
1105 /* Traverse a chain of Phi nodes attached to this block and mature
1107 for (n = block->attr.block.phis; n; n = next) {
1108 inc_irg_visited(irg);
1109 next = n->attr.phi.next;
1110 exchange(n, phi_merge(block, n->attr.phi.u.pos, n->mode, nin, ins));
1113 block->attr.block.is_matured = 1;
1115 /* Now, as the block is a finished Firm node, we can optimize it.
1116 Since other nodes have been allocated since the block was created
1117 we can not free the node on the obstack. Therefore we have to call
1118 optimize_in_place().
1119 Unfortunately the optimization does not change a lot, as all allocated
1120 nodes refer to the unoptimized node.
1121 We can call optimize_in_place_2(), as global cse has no effect on blocks. */
1122 block = optimize_in_place_2(block);
1123 IRN_VRFY_IRG(block, irg);
1125 } /* mature_immBlock */
1127 ir_node *new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
1129 return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
1132 ir_node *new_d_Const(dbg_info *db, tarval *con)
1134 return new_bd_Const(db, con);
1137 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
1139 return new_bd_Const_long(db, mode, value);
1140 } /* new_d_Const_long */
1142 ir_node *new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp)
1144 return new_bd_Const_type(db, con, tp);
1145 } /* new_d_Const_type */
1148 ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
1151 assert(arg->op == op_Cond);
1152 arg->attr.cond.default_proj = max_proj;
1153 res = new_d_Proj(db, arg, mode_X, max_proj);
1155 } /* new_d_defaultProj */
1158 * Allocate a frag array for a node if the current graph state is phase_building.
1160 * @param irn the node for which the frag array should be allocated
1161 * @param op the opcode of the (original) node, if does not match opcode of irn,
1163 * @param frag_store the address of the frag store in irn attributes, if this
1164 * address contains a value != NULL, does nothing
1166 void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store)
1168 if (get_opt_precise_exc_context()) {
1169 if ((current_ir_graph->phase_state == phase_building) &&
1170 (get_irn_op(irn) == op) && /* Could be optimized away. */
1171 !*frag_store) /* Could be a cse where the arr is already set. */ {
1172 *frag_store = new_frag_arr(irn);
1175 } /* firm_alloc_frag_arr */
1177 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
1178 /* GL: objptr was called frame before. Frame was a bad choice for the name
1179 as the operand could as well be a pointer to a dynamic object. */
1181 return new_bd_Sel(db, current_ir_graph->current_block,
1182 store, objptr, 0, NULL, ent);
1183 } /* new_d_simpleSel */
1185 ir_node *new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp)
1187 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1189 } /* new_d_SymConst_type */
1191 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind)
1193 return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
1194 value, kind, firm_unknown_type);
1195 } /* new_d_SymConst */
1197 ir_node *new_d_Sync(dbg_info *db, int arity, ir_node *in[])
1199 return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
1202 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[], ir_asm_constraint *inputs,
1203 int n_outs, ir_asm_constraint *outputs, int n_clobber,
1204 ident *clobber[], ident *text)
1206 return new_bd_ASM(db, current_ir_graph->current_block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
1209 /* ********************************************************************* */
1210 /* Comfortable interface with automatic Phi node construction. */
1211 /* (Uses also constructors of ?? interface, except new_Block. */
1212 /* ********************************************************************* */
1214 /* Block construction */
1215 /* immature Block without predecessors */
1216 ir_node *new_d_immBlock(dbg_info *db)
1220 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1221 /* creates a new dynamic in-array as length of in is -1 */
1222 res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
1224 /* macroblock head */
1227 res->attr.block.is_matured = 0;
1228 res->attr.block.is_dead = 0;
1229 res->attr.block.is_mb_head = 1;
1230 res->attr.block.irg.irg = current_ir_graph;
1231 res->attr.block.backedge = NULL;
1232 res->attr.block.in_cg = NULL;
1233 res->attr.block.cg_backedge = NULL;
1234 res->attr.block.extblk = NULL;
1235 res->attr.block.region = NULL;
1236 res->attr.block.mb_depth = 0;
1237 res->attr.block.entity = NULL;
1239 set_Block_block_visited(res, 0);
1241 /* Create and initialize array for Phi-node construction. */
1242 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
1243 current_ir_graph->n_loc);
1244 memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
1246 /* Immature block may not be optimized! */
1247 IRN_VRFY_IRG(res, current_ir_graph);
1250 } /* new_d_immBlock */
1252 ir_node *new_immBlock(void)
1254 return new_d_immBlock(NULL);
1255 } /* new_immBlock */
1257 /* immature PartBlock with its predecessors */
1258 ir_node *new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp)
1260 ir_node *res = new_d_immBlock(db);
1261 ir_node *blk = get_nodes_block(pred_jmp);
1263 res->in[0] = blk->in[0];
1264 assert(res->in[0] != NULL);
1265 add_immBlock_pred(res, pred_jmp);
1267 res->attr.block.is_mb_head = 0;
1268 res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
1271 } /* new_d_immPartBlock */
1273 ir_node *new_immPartBlock(ir_node *pred_jmp)
1275 return new_d_immPartBlock(NULL, pred_jmp);
1276 } /* new_immPartBlock */
1278 /* add an edge to a jmp/control flow node */
1279 void add_immBlock_pred(ir_node *block, ir_node *jmp)
1281 int n = ARR_LEN(block->in) - 1;
1283 assert(is_Block(block) && "Error: Must be a Block");
1284 assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
1285 assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
1286 assert(is_ir_node(jmp));
1288 ARR_APP1(ir_node *, block->in, jmp);
1290 hook_set_irn_n(block, n, jmp, NULL);
1291 } /* add_immBlock_pred */
1293 /* changing the current block */
1294 void set_cur_block(ir_node *target)
1296 current_ir_graph->current_block = target;
1297 } /* set_cur_block */
1299 /* ************************ */
1300 /* parameter administration */
1302 /* get a value from the parameter array from the current block by its index */
1303 ir_node *get_d_value(dbg_info *db, int pos, ir_mode *mode)
1305 ir_graph *irg = current_ir_graph;
1306 assert(get_irg_phase_state(irg) == phase_building);
1307 inc_irg_visited(irg);
1312 return get_r_value_internal(irg->current_block, pos + 1, mode);
1315 /* get a value from the parameter array from the current block by its index */
1316 ir_node *get_value(int pos, ir_mode *mode)
1318 return get_d_value(NULL, pos, mode);
1321 /* set a value at position pos in the parameter array from the current block */
1322 void set_value(int pos, ir_node *value)
1324 ir_graph *irg = current_ir_graph;
1325 assert(get_irg_phase_state(irg) == phase_building);
1327 assert(pos+1 < irg->n_loc);
1328 assert(is_ir_node(value));
1329 irg->current_block->attr.block.graph_arr[pos + 1] = value;
1332 /* Find the value number for a node in the current block.*/
1333 int find_value(ir_node *value)
1336 ir_node *bl = current_ir_graph->current_block;
1338 for (i = ARR_LEN(bl->attr.block.graph_arr) - 1; i >= 1; --i)
1339 if (bl->attr.block.graph_arr[i] == value)
1344 /* get the current store */
1345 ir_node *get_store(void)
1347 ir_graph *irg = current_ir_graph;
1349 assert(get_irg_phase_state(irg) == phase_building);
1350 /* GL: one could call get_value instead */
1351 inc_irg_visited(irg);
1352 return get_r_value_internal(irg->current_block, 0, mode_M);
1355 /* set the current store: handles automatic Sync construction for Load nodes */
1356 void set_store(ir_node *store)
1358 ir_node *load, *pload, *pred, *in[2];
1360 assert(get_irg_phase_state(current_ir_graph) == phase_building);
1361 /* Beware: due to dead code elimination, a store might become a Bad node even in
1362 the construction phase. */
1363 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
1365 if (get_opt_auto_create_sync()) {
1366 /* handle non-volatile Load nodes by automatically creating Sync's */
1367 load = skip_Proj(store);
1368 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
1369 pred = get_Load_mem(load);
1371 if (is_Sync(pred)) {
1372 /* a Load after a Sync: move it up */
1373 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
1375 set_Load_mem(load, get_memop_mem(mem));
1376 add_Sync_pred(pred, store);
1379 pload = skip_Proj(pred);
1380 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
1381 /* a Load after a Load: create a new Sync */
1382 set_Load_mem(load, get_Load_mem(pload));
1386 store = new_Sync(2, in);
1391 current_ir_graph->current_block->attr.block.graph_arr[0] = store;
1394 void keep_alive(ir_node *ka)
1396 add_End_keepalive(get_irg_end(current_ir_graph), ka);
1399 /* --- Useful access routines --- */
1400 /* Returns the current block of the current graph. To set the current
1401 block use set_cur_block. */
1402 ir_node *get_cur_block(void)
1404 return get_irg_current_block(current_ir_graph);
1405 } /* get_cur_block */
1407 /* Returns the frame type of the current graph */
1408 ir_type *get_cur_frame_type(void)
1410 return get_irg_frame_type(current_ir_graph);
1411 } /* get_cur_frame_type */
1414 /* ********************************************************************* */
1417 /* call once for each run of the library */
1418 void ir_set_uninitialized_local_variable_func(
1419 uninitialized_local_variable_func_t *func)
1421 default_initialize_local_variable = func;
1424 void irg_finalize_cons(ir_graph *irg)
1426 set_irg_phase_state(irg, phase_high);
1429 void irp_finalize_cons(void)
1432 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1433 irg_finalize_cons(get_irp_irg(i));
1435 irp->phase_state = phase_high;
1438 ir_node *new_Start(void)
1440 return new_d_Start(NULL);
1442 ir_node *new_End(void)
1444 return new_d_End(NULL);
1446 ir_node *new_Const(tarval *con)
1448 return new_d_Const(NULL, con);
1451 ir_node *new_Const_long(ir_mode *mode, long value)
1453 return new_d_Const_long(NULL, mode, value);
1456 ir_node *new_Const_type(tarval *con, ir_type *tp)
1458 return new_d_Const_type(NULL, con, tp);
1461 ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type)
1463 return new_d_SymConst_type(NULL, mode, value, kind, type);
1465 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
1467 return new_d_SymConst(NULL, mode, value, kind);
1469 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
1471 return new_d_simpleSel(NULL, store, objptr, ent);
1473 ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode)
1475 return new_d_Phi(NULL, arity, in, mode);
1477 ir_node *new_Sync(int arity, ir_node *in[])
1479 return new_d_Sync(NULL, arity, in);
1481 ir_node *new_defaultProj(ir_node *arg, long max_proj)
1483 return new_d_defaultProj(NULL, arg, max_proj);
1485 ir_node *new_Bad(void)
1487 return get_irg_bad(current_ir_graph);
1489 ir_node *new_NoMem(void)
1491 return get_irg_no_mem(current_ir_graph);
1493 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
1494 int n_outs, ir_asm_constraint *outputs,
1495 int n_clobber, ident *clobber[], ident *text)
1497 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
1500 /* create a new anchor node */
1501 ir_node *new_Anchor(ir_graph *irg)
1503 ir_node *in[anchor_last];
1504 memset(in, 0, sizeof(in));
1505 return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);