2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
60 ir_asm_constraint *inputs, size_t n_outs,
61 ir_asm_constraint *outputs, size_t n_clobber,
62 ident *clobber[], ident *text)
64 ir_graph *irg = get_irn_irg(block);
65 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
67 res->attr.assem.pin_state = op_pin_state_pinned;
68 res->attr.assem.input_constraints
69 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
70 res->attr.assem.output_constraints
71 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
72 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
73 res->attr.assem.text = text;
75 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
76 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
77 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
79 irn_verify_irg(res, irg);
80 res = optimize_node(res);
84 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
85 ir_node *objptr, ir_entity *ent)
87 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
90 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
91 symconst_symbol value, symconst_kind symkind)
93 ir_node *block = get_irg_start_block(irg);
94 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
95 res->attr.symc.kind = symkind;
96 res->attr.symc.sym = value;
98 irn_verify_irg(res, irg);
99 res = optimize_node(res);
103 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
106 sym.entity_p = symbol;
107 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
110 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
113 sym.entity_p = symbol;
114 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
117 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
121 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
124 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
128 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
131 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
133 return new_rd_Const_long(NULL, irg, mode, value);
135 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
136 symconst_kind symkind)
138 return new_rd_SymConst(NULL, irg, mode, value, symkind);
140 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
143 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
145 ir_node *new_r_ASM(ir_node *block,
146 int arity, ir_node *in[], ir_asm_constraint *inputs,
147 size_t n_outs, ir_asm_constraint *outputs,
148 size_t n_clobber, ident *clobber[], ident *text)
150 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
153 /** Creates a Phi node with 0 predecessors. */
154 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
155 ir_mode *mode, int pos)
157 ir_graph *irg = get_irn_irg(block);
158 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
159 res->attr.phi.u.pos = pos;
160 irn_verify_irg(res, irg);
164 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
166 static void try_remove_unnecessary_phi(ir_node *phi)
168 ir_node *phi_value = NULL;
169 int arity = get_irn_arity(phi);
172 /* see if all inputs are either pointing to a single value or
173 * are self references */
174 for (i = 0; i < arity; ++i) {
175 ir_node *in = get_irn_n(phi, i);
180 /** found a different value from the one we already found, can't remove
182 if (phi_value != NULL)
186 if (phi_value == NULL)
189 /* if we're here then all phi inputs have been either phi_value
190 * or self-references, we can replace the phi by phi_value.
191 * We do this with an Id-node */
192 exchange(phi, phi_value);
194 /* recursively check phi_value, because it could be that we were the last
195 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
196 * the loop header which can be eliminated now */
197 if (is_Phi(phi_value)) {
198 try_remove_unnecessary_phi(phi_value);
203 * Computes the predecessors for the real phi node, and then
204 * allocates and returns this node. The routine called to allocate the
205 * node might optimize it away and return a real value.
206 * This function must be called with an in-array of proper size.
208 static ir_node *set_phi_arguments(ir_node *phi, int pos)
210 ir_node *block = get_nodes_block(phi);
211 ir_graph *irg = get_irn_irg(block);
212 int arity = get_irn_arity(block);
213 ir_node **in = ALLOCAN(ir_node*, arity);
214 ir_mode *mode = get_irn_mode(phi);
217 /* This loop goes to all predecessor blocks of the block the Phi node
218 is in and there finds the operands of the Phi node by calling
219 get_r_value_internal. */
220 for (i = 0; i < arity; ++i) {
221 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
223 if (is_Bad(cfgpred)) {
224 value = new_r_Bad(irg, mode);
226 inc_irg_visited(irg);
228 value = get_r_value_internal(cfgpred, pos, mode);
233 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
234 set_irn_in(phi, arity, in);
236 irn_verify_irg(phi, irg);
238 /* Memory Phis in endless loops must be kept alive.
239 As we can't distinguish these easily we keep all of them alive. */
241 add_End_keepalive(get_irg_end(irg), phi);
243 try_remove_unnecessary_phi(phi);
248 * This function returns the last definition of a value. In case
249 * this value was last defined in a previous block, Phi nodes are
250 * inserted. If the part of the firm graph containing the definition
251 * is not yet constructed, a dummy Phi node is returned.
253 * @param block the current block
254 * @param pos the value number of the value searched
255 * @param mode the mode of this value (needed for Phi construction)
257 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
259 ir_node *res = block->attr.block.graph_arr[pos];
260 ir_graph *irg = get_irn_irg(block);
264 /* We ran into a cycle. This may happen in unreachable loops. */
265 if (irn_visited_else_mark(block)) {
266 /* Since the loop is unreachable, return a Bad. */
267 return new_r_Bad(irg, mode);
270 /* in a matured block we can immediately determine the phi arguments */
271 if (get_Block_matured(block)) {
272 int arity = get_irn_arity(block);
273 /* no predecessors: use unknown value */
275 if (block == get_irg_start_block(irg)) {
276 if (default_initialize_local_variable != NULL) {
277 ir_node *rem = get_r_cur_block(irg);
278 set_r_cur_block(irg, block);
279 res = default_initialize_local_variable(irg, mode, pos - 1);
280 set_r_cur_block(irg, rem);
282 res = new_r_Unknown(irg, mode);
285 /* unreachable block, use Bad */
286 res = new_r_Bad(irg, mode);
288 /* one predecessor just use its value */
289 } else if (arity == 1) {
290 ir_node *cfgpred = get_Block_cfgpred(block, 0);
291 if (is_Bad(cfgpred)) {
292 res = new_r_Bad(irg, mode);
294 ir_node *cfgpred_block = get_nodes_block(cfgpred);
295 res = get_r_value_internal(cfgpred_block, pos, mode);
297 /* multiple predecessors construct Phi */
299 res = new_rd_Phi0(NULL, block, mode, pos);
300 /* enter phi0 into our variable value table to break cycles
301 * arising from set_phi_arguments */
302 block->attr.block.graph_arr[pos] = res;
303 res = set_phi_arguments(res, pos);
306 /* in case of immature block we have to keep a Phi0 */
307 res = new_rd_Phi0(NULL, block, mode, pos);
308 /* enqueue phi so we can set arguments once the block matures */
309 res->attr.phi.next = block->attr.block.phis;
310 block->attr.block.phis = res;
312 block->attr.block.graph_arr[pos] = res;
316 void mature_immBlock(ir_node *block)
324 assert(is_Block(block));
325 if (get_Block_matured(block))
328 irg = get_irn_irg(block);
329 n_preds = ARR_LEN(block->in) - 1;
330 /* Fix block parameters */
331 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
333 /* Traverse a chain of Phi nodes attached to this block and mature
335 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
337 int pos = phi->attr.phi.u.pos;
339 next = phi->attr.phi.next;
340 new_value = set_phi_arguments(phi, pos);
341 if (block->attr.block.graph_arr[pos] == phi) {
342 block->attr.block.graph_arr[pos] = new_value;
346 set_Block_matured(block, 1);
348 /* create final in-array for the block */
349 if (block->attr.block.dynamic_ins) {
350 new_in = NEW_ARR_D(ir_node*, irg->obst, n_preds+1);
351 memcpy(new_in, block->in, (n_preds+1) * sizeof(new_in[0]));
352 DEL_ARR_F(block->in);
354 block->attr.block.dynamic_ins = false;
357 /* Now, as the block is a finished Firm node, we can optimize it.
358 Since other nodes have been allocated since the block was created
359 we can not free the node on the obstack. Therefore we have to call
361 Unfortunately the optimization does not change a lot, as all allocated
362 nodes refer to the unoptimized node.
363 We can call optimize_in_place_2(), as global cse has no effect on blocks.
365 irn_verify_irg(block, irg);
366 block = optimize_in_place_2(block);
369 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
371 assert(get_irg_phase_state(current_ir_graph) == phase_building);
372 return new_rd_Const_long(db, current_ir_graph, mode, value);
375 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
378 assert(get_irg_phase_state(current_ir_graph) == phase_building);
379 return new_rd_Sel(db, current_ir_graph->current_block,
380 store, objptr, 0, NULL, ent);
383 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
386 assert(get_irg_phase_state(current_ir_graph) == phase_building);
387 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
390 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
391 ir_asm_constraint *inputs,
392 size_t n_outs, ir_asm_constraint *outputs,
393 size_t n_clobber, ident *clobber[], ident *text)
395 assert(get_irg_phase_state(current_ir_graph) == phase_building);
396 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
397 n_outs, outputs, n_clobber, clobber, text);
400 ir_node *new_rd_strictConv(dbg_info *dbgi, ir_node *block, ir_node * irn_op, ir_mode * mode)
403 ir_graph *irg = get_Block_irg(block);
408 res = new_ir_node(dbgi, irg, block, op_Conv, mode, 1, in);
409 res->attr.conv.strict = 1;
410 irn_verify_irg(res, irg);
411 res = optimize_node(res);
415 ir_node *new_r_strictConv(ir_node *block, ir_node * irn_op, ir_mode * mode)
417 return new_rd_strictConv(NULL, block, irn_op, mode);
420 ir_node *new_d_strictConv(dbg_info *dbgi, ir_node * irn_op, ir_mode * mode)
423 assert(get_irg_phase_state(current_ir_graph) == phase_building);
424 res = new_rd_strictConv(dbgi, current_ir_graph->current_block, irn_op, mode);
428 ir_node *new_strictConv(ir_node * irn_op, ir_mode * mode)
430 return new_d_strictConv(NULL, irn_op, mode);
433 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
436 ir_graph *irg = get_Block_irg(block);
443 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
444 res->attr.div.resmode = resmode;
445 res->attr.div.no_remainder = 1;
446 res->attr.div.exc.pin_state = pin_state;
447 irn_verify_irg(res, irg);
448 res = optimize_node(res);
452 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
454 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
457 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
460 assert(get_irg_phase_state(current_ir_graph) == phase_building);
461 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
465 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
467 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
470 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
474 assert(get_irg_phase_state(irg) == phase_building);
475 /* creates a new dynamic in-array as length of in is -1 */
476 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
478 set_Block_matured(res, 0);
479 res->attr.block.dynamic_ins = true;
480 res->attr.block.irg.irg = irg;
481 res->attr.block.backedge = NULL;
482 res->attr.block.extblk = NULL;
483 res->attr.block.entity = NULL;
485 set_Block_block_visited(res, 0);
487 /* Create and initialize array for Phi-node construction. */
488 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
489 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
491 /* Immature block may not be optimized! */
492 irn_verify_irg(res, irg);
497 ir_node *new_r_immBlock(ir_graph *irg)
499 return new_rd_immBlock(NULL, irg);
502 ir_node *new_d_immBlock(dbg_info *dbgi)
504 return new_rd_immBlock(dbgi, current_ir_graph);
507 ir_node *new_immBlock(void)
509 return new_rd_immBlock(NULL, current_ir_graph);
512 void add_immBlock_pred(ir_node *block, ir_node *jmp)
514 int n = ARR_LEN(block->in) - 1;
516 assert(is_Block(block) && "Error: Must be a Block");
517 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
518 assert(is_ir_node(jmp));
520 ARR_APP1(ir_node *, block->in, jmp);
522 hook_set_irn_n(block, n, jmp, NULL);
525 void set_cur_block(ir_node *target)
527 set_r_cur_block(current_ir_graph, target);
530 void set_r_cur_block(ir_graph *irg, ir_node *target)
532 assert(get_irg_phase_state(irg) == phase_building);
533 assert(target == NULL || is_Block(target));
534 assert(target == NULL || get_irn_irg(target) == irg);
535 irg->current_block = target;
538 ir_node *get_r_cur_block(ir_graph *irg)
540 assert(get_irg_phase_state(irg) == phase_building);
541 return irg->current_block;
544 ir_node *get_cur_block(void)
546 return get_r_cur_block(current_ir_graph);
549 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
551 assert(get_irg_phase_state(irg) == phase_building);
553 inc_irg_visited(irg);
555 return get_r_value_internal(irg->current_block, pos + 1, mode);
558 ir_node *get_value(int pos, ir_mode *mode)
560 return get_r_value(current_ir_graph, pos, mode);
564 * helper function for guess_mode: recursively look for a definition for
565 * local variable @p pos, returns its mode if found.
567 static ir_mode *guess_recursively(ir_node *block, int pos)
573 if (irn_visited_else_mark(block))
576 /* already have a defintion -> we can simply look at its mode */
577 value = block->attr.block.graph_arr[pos];
579 return get_irn_mode(value);
581 /* now we try to guess, by looking at the predecessor blocks */
582 n_preds = get_irn_arity(block);
583 for (i = 0; i < n_preds; ++i) {
584 ir_node *pred_block = get_Block_cfgpred_block(block, i);
585 ir_mode *mode = guess_recursively(pred_block, pos);
590 /* no way to guess */
594 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
596 ir_node *block = irg->current_block;
597 ir_node *value = block->attr.block.graph_arr[pos+1];
600 /* already have a defintion -> we can simply look at its mode */
602 return get_irn_mode(value);
604 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
605 inc_irg_visited(irg);
606 mode = guess_recursively(block, pos+1);
607 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
612 ir_mode *ir_guess_mode(int pos)
614 return ir_r_guess_mode(current_ir_graph, pos);
617 void set_r_value(ir_graph *irg, int pos, ir_node *value)
619 assert(get_irg_phase_state(irg) == phase_building);
621 assert(pos+1 < irg->n_loc);
622 assert(is_ir_node(value));
623 irg->current_block->attr.block.graph_arr[pos + 1] = value;
626 void set_value(int pos, ir_node *value)
628 set_r_value(current_ir_graph, pos, value);
631 ir_node *get_r_store(ir_graph *irg)
633 assert(get_irg_phase_state(irg) == phase_building);
634 inc_irg_visited(irg);
635 return get_r_value_internal(irg->current_block, 0, mode_M);
638 ir_node *get_store(void)
640 return get_r_store(current_ir_graph);
643 void set_r_store(ir_graph *irg, ir_node *store)
645 ir_node *load, *pload, *pred, *in[2];
647 assert(get_irg_phase_state(irg) == phase_building);
648 /* Beware: due to dead code elimination, a store might become a Bad node even in
649 the construction phase. */
650 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
652 if (get_opt_auto_create_sync()) {
653 /* handle non-volatile Load nodes by automatically creating Sync's */
654 load = skip_Proj(store);
655 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
656 pred = get_Load_mem(load);
659 /* a Load after a Sync: move it up */
660 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
662 set_Load_mem(load, get_memop_mem(mem));
663 add_Sync_pred(pred, store);
666 pload = skip_Proj(pred);
667 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
668 /* a Load after a Load: create a new Sync */
669 set_Load_mem(load, get_Load_mem(pload));
673 store = new_r_Sync(irg->current_block, 2, in);
678 irg->current_block->attr.block.graph_arr[0] = store;
681 void set_store(ir_node *store)
683 set_r_store(current_ir_graph, store);
686 void keep_alive(ir_node *ka)
688 ir_graph *irg = get_irn_irg(ka);
689 add_End_keepalive(get_irg_end(irg), ka);
692 void ir_set_uninitialized_local_variable_func(
693 uninitialized_local_variable_func_t *func)
695 default_initialize_local_variable = func;
698 void irg_finalize_cons(ir_graph *irg)
700 ir_node *end_block = get_irg_end_block(irg);
701 mature_immBlock(end_block);
703 set_irg_phase_state(irg, phase_high);
706 void irp_finalize_cons(void)
709 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
710 irg_finalize_cons(get_irp_irg(i));
712 irp->phase_state = phase_high;
715 ir_node *new_Const_long(ir_mode *mode, long value)
717 return new_d_Const_long(NULL, mode, value);
720 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
722 return new_d_SymConst(NULL, mode, value, kind);
724 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
726 return new_d_simpleSel(NULL, store, objptr, ent);
728 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
729 size_t n_outs, ir_asm_constraint *outputs,
730 size_t n_clobber, ident *clobber[], ident *text)
732 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
735 ir_node *new_r_Anchor(ir_graph *irg)
737 ir_node *in[anchor_last+1];
740 memset(in, 0, sizeof(in));
741 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last+1, in);
742 res->attr.anchor.irg.irg = irg;
744 /* hack to get get_irn_irg working: set block to ourself and allow
745 * get_Block_irg for anchor */
748 /* we can't have NULL inputs so reference ourselfes for now */
749 for (i = 0; i <= (size_t)anchor_last; ++i) {
750 set_irn_n(res, i, res);
756 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
758 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
759 res->attr.block.irg.irg = irg;
760 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
761 set_Block_matured(res, 1);
762 /* Create and initialize array for Phi-node construction. */
763 if (get_irg_phase_state(irg) == phase_building) {
764 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
765 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
767 irn_verify_irg(res, irg);