2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
60 ir_asm_constraint *inputs, size_t n_outs,
61 ir_asm_constraint *outputs, size_t n_clobber,
62 ident *clobber[], ident *text)
64 ir_graph *irg = get_irn_irg(block);
65 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
67 res->attr.assem.pin_state = op_pin_state_pinned;
68 res->attr.assem.input_constraints
69 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
70 res->attr.assem.output_constraints
71 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
72 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
73 res->attr.assem.text = text;
75 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
76 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
77 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
79 irn_verify_irg(res, irg);
80 res = optimize_node(res);
84 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
85 ir_node *objptr, ir_entity *ent)
87 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
90 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
91 symconst_symbol value, symconst_kind symkind)
93 ir_node *block = get_irg_start_block(irg);
94 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
95 res->attr.symc.kind = symkind;
96 res->attr.symc.sym = value;
98 irn_verify_irg(res, irg);
99 res = optimize_node(res);
103 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
106 sym.entity_p = symbol;
107 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
110 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
113 sym.entity_p = symbol;
114 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
117 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
121 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
124 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
128 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
131 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
133 return new_rd_Const_long(NULL, irg, mode, value);
135 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
136 symconst_kind symkind)
138 return new_rd_SymConst(NULL, irg, mode, value, symkind);
140 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
143 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
145 ir_node *new_r_ASM(ir_node *block,
146 int arity, ir_node *in[], ir_asm_constraint *inputs,
147 size_t n_outs, ir_asm_constraint *outputs,
148 size_t n_clobber, ident *clobber[], ident *text)
150 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
153 /** Creates a Phi node with 0 predecessors. */
154 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
155 ir_mode *mode, int pos)
157 ir_graph *irg = get_irn_irg(block);
158 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
159 res->attr.phi.u.pos = pos;
160 irn_verify_irg(res, irg);
164 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
166 static void try_remove_unnecessary_phi(ir_node *phi)
168 ir_node *phi_value = NULL;
169 int arity = get_irn_arity(phi);
172 /* see if all inputs are either pointing to a single value or
173 * are self references */
174 for (i = 0; i < arity; ++i) {
175 ir_node *in = get_irn_n(phi, i);
180 /** found a different value from the one we already found, can't remove
182 if (phi_value != NULL)
186 if (phi_value == NULL)
189 /* if we're here then all phi inputs have been either phi_value
190 * or self-references, we can replace the phi by phi_value.
191 * We do this with an Id-node */
192 exchange(phi, phi_value);
194 /* recursively check phi_value, because it could be that we were the last
195 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
196 * the loop header which can be eliminated now */
197 if (is_Phi(phi_value)) {
198 try_remove_unnecessary_phi(phi_value);
203 * Computes the predecessors for the real phi node, and then
204 * allocates and returns this node. The routine called to allocate the
205 * node might optimize it away and return a real value.
206 * This function must be called with an in-array of proper size.
208 static ir_node *set_phi_arguments(ir_node *phi, int pos)
210 ir_node *block = get_nodes_block(phi);
211 ir_graph *irg = get_irn_irg(block);
212 int arity = get_irn_arity(block);
213 ir_node **in = ALLOCAN(ir_node*, arity);
214 ir_mode *mode = get_irn_mode(phi);
217 /* This loop goes to all predecessor blocks of the block the Phi node
218 is in and there finds the operands of the Phi node by calling
219 get_r_value_internal. */
220 for (i = 0; i < arity; ++i) {
221 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
223 if (is_Bad(cfgpred)) {
224 value = new_r_Bad(irg, mode);
226 inc_irg_visited(irg);
228 value = get_r_value_internal(cfgpred, pos, mode);
233 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
234 set_irn_in(phi, arity, in);
235 set_irn_op(phi, op_Phi);
237 irn_verify_irg(phi, irg);
239 /* Memory Phis in endless loops must be kept alive.
240 As we can't distinguish these easily we keep all of them alive. */
241 if (is_Phi(phi) && mode == mode_M)
242 add_End_keepalive(get_irg_end(irg), phi);
244 try_remove_unnecessary_phi(phi);
249 * This function returns the last definition of a value. In case
250 * this value was last defined in a previous block, Phi nodes are
251 * inserted. If the part of the firm graph containing the definition
252 * is not yet constructed, a dummy Phi node is returned.
254 * @param block the current block
255 * @param pos the value number of the value searched
256 * @param mode the mode of this value (needed for Phi construction)
258 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
260 ir_node *res = block->attr.block.graph_arr[pos];
261 ir_graph *irg = get_irn_irg(block);
265 /* We ran into a cycle. This may happen in unreachable loops. */
266 if (irn_visited_else_mark(block)) {
267 /* Since the loop is unreachable, return a Bad. */
268 return new_r_Bad(irg, mode);
271 /* in a matured block we can immediately determine the phi arguments */
272 if (get_Block_matured(block)) {
273 int arity = get_irn_arity(block);
274 /* no predecessors: use unknown value */
275 if (arity == 0 && block == get_irg_start_block(get_irn_irg(block))) {
276 if (default_initialize_local_variable != NULL) {
277 ir_node *rem = get_r_cur_block(irg);
278 set_r_cur_block(irg, block);
279 res = default_initialize_local_variable(irg, mode, pos - 1);
280 set_r_cur_block(irg, rem);
282 res = new_r_Unknown(irg, mode);
284 /* one predecessor just use its value */
285 } else if (arity == 1) {
286 ir_node *cfgpred = get_Block_cfgpred(block, 0);
287 if (is_Bad(cfgpred)) {
288 res = new_r_Bad(irg, mode);
290 ir_node *cfgpred_block = get_nodes_block(cfgpred);
291 res = get_r_value_internal(cfgpred_block, pos, mode);
293 /* multiple predecessors construct Phi */
295 res = new_rd_Phi0(NULL, block, mode, pos);
296 /* enter phi0 into our variable value table to break cycles
297 * arising from set_phi_arguments */
298 block->attr.block.graph_arr[pos] = res;
299 res = set_phi_arguments(res, pos);
302 /* in case of immature block we have to keep a Phi0 */
303 res = new_rd_Phi0(NULL, block, mode, pos);
304 /* enqueue phi so we can set arguments once the block matures */
305 res->attr.phi.next = block->attr.block.phis;
306 block->attr.block.phis = res;
308 block->attr.block.graph_arr[pos] = res;
312 /* ************************************************************************** */
315 * Finalize a Block node, when all control flows are known.
316 * Acceptable parameters are only Block nodes.
318 void mature_immBlock(ir_node *block)
325 assert(is_Block(block));
326 if (get_Block_matured(block))
329 irg = get_irn_irg(block);
330 n_preds = ARR_LEN(block->in) - 1;
331 /* Fix block parameters */
332 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
334 /* Traverse a chain of Phi nodes attached to this block and mature
336 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
338 int pos = phi->attr.phi.u.pos;
340 next = phi->attr.phi.next;
341 new_value = set_phi_arguments(phi, pos);
342 if (block->attr.block.graph_arr[pos] == phi) {
343 block->attr.block.graph_arr[pos] = new_value;
347 set_Block_matured(block, 1);
349 /* Now, as the block is a finished Firm node, we can optimize it.
350 Since other nodes have been allocated since the block was created
351 we can not free the node on the obstack. Therefore we have to call
353 Unfortunately the optimization does not change a lot, as all allocated
354 nodes refer to the unoptimized node.
355 We can call optimize_in_place_2(), as global cse has no effect on blocks.
357 irn_verify_irg(block, irg);
358 block = optimize_in_place_2(block);
361 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
363 assert(get_irg_phase_state(current_ir_graph) == phase_building);
364 return new_rd_Const_long(db, current_ir_graph, mode, value);
367 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
370 assert(get_irg_phase_state(current_ir_graph) == phase_building);
371 return new_rd_Sel(db, current_ir_graph->current_block,
372 store, objptr, 0, NULL, ent);
375 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
378 assert(get_irg_phase_state(current_ir_graph) == phase_building);
379 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
382 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
383 ir_asm_constraint *inputs,
384 size_t n_outs, ir_asm_constraint *outputs,
385 size_t n_clobber, ident *clobber[], ident *text)
387 assert(get_irg_phase_state(current_ir_graph) == phase_building);
388 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
389 n_outs, outputs, n_clobber, clobber, text);
392 ir_node *new_rd_strictConv(dbg_info *dbgi, ir_node *block, ir_node * irn_op, ir_mode * mode)
395 ir_graph *irg = get_Block_irg(block);
400 res = new_ir_node(dbgi, irg, block, op_Conv, mode, 1, in);
401 res->attr.conv.strict = 1;
402 irn_verify_irg(res, irg);
403 res = optimize_node(res);
407 ir_node *new_r_strictConv(ir_node *block, ir_node * irn_op, ir_mode * mode)
409 return new_rd_strictConv(NULL, block, irn_op, mode);
412 ir_node *new_d_strictConv(dbg_info *dbgi, ir_node * irn_op, ir_mode * mode)
415 assert(get_irg_phase_state(current_ir_graph) == phase_building);
416 res = new_rd_strictConv(dbgi, current_ir_graph->current_block, irn_op, mode);
420 ir_node *new_strictConv(ir_node * irn_op, ir_mode * mode)
422 return new_d_strictConv(NULL, irn_op, mode);
425 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
428 ir_graph *irg = get_Block_irg(block);
435 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
436 res->attr.div.resmode = resmode;
437 res->attr.div.no_remainder = 1;
438 res->attr.div.exc.pin_state = pin_state;
439 irn_verify_irg(res, irg);
440 res = optimize_node(res);
444 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
446 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
449 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
452 assert(get_irg_phase_state(current_ir_graph) == phase_building);
453 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
457 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
459 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
462 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
466 assert(get_irg_phase_state(irg) == phase_building);
467 /* creates a new dynamic in-array as length of in is -1 */
468 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
470 set_Block_matured(res, 0);
471 res->attr.block.irg.irg = irg;
472 res->attr.block.backedge = NULL;
473 res->attr.block.in_cg = NULL;
474 res->attr.block.cg_backedge = NULL;
475 res->attr.block.extblk = NULL;
476 res->attr.block.entity = NULL;
478 set_Block_block_visited(res, 0);
480 /* Create and initialize array for Phi-node construction. */
481 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
482 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
484 /* Immature block may not be optimized! */
485 irn_verify_irg(res, irg);
490 ir_node *new_r_immBlock(ir_graph *irg)
492 return new_rd_immBlock(NULL, irg);
495 ir_node *new_d_immBlock(dbg_info *dbgi)
497 return new_rd_immBlock(dbgi, current_ir_graph);
500 ir_node *new_immBlock(void)
502 return new_rd_immBlock(NULL, current_ir_graph);
505 void add_immBlock_pred(ir_node *block, ir_node *jmp)
507 int n = ARR_LEN(block->in) - 1;
509 assert(is_Block(block) && "Error: Must be a Block");
510 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
511 assert(is_ir_node(jmp));
513 ARR_APP1(ir_node *, block->in, jmp);
515 hook_set_irn_n(block, n, jmp, NULL);
518 void set_cur_block(ir_node *target)
520 set_r_cur_block(current_ir_graph, target);
523 void set_r_cur_block(ir_graph *irg, ir_node *target)
525 assert(target == NULL || get_irn_mode(target) == mode_BB);
526 assert(target == NULL || get_irn_irg(target) == irg);
527 irg->current_block = target;
530 ir_node *get_r_cur_block(ir_graph *irg)
532 return irg->current_block;
535 ir_node *get_cur_block(void)
537 return get_r_cur_block(current_ir_graph);
540 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
542 assert(get_irg_phase_state(irg) == phase_building);
544 inc_irg_visited(irg);
546 return get_r_value_internal(irg->current_block, pos + 1, mode);
549 ir_node *get_value(int pos, ir_mode *mode)
551 return get_r_value(current_ir_graph, pos, mode);
555 * helper function for guess_mode: recursively look for a definition for
556 * local variable @p pos, returns its mode if found.
558 static ir_mode *guess_recursively(ir_node *block, int pos)
564 if (irn_visited_else_mark(block))
567 /* already have a defintion -> we can simply look at its mode */
568 value = block->attr.block.graph_arr[pos];
570 return get_irn_mode(value);
572 /* now we try to guess, by looking at the predecessor blocks */
573 n_preds = get_irn_arity(block);
574 for (i = 0; i < n_preds; ++i) {
575 ir_node *pred_block = get_Block_cfgpred_block(block, i);
576 ir_mode *mode = guess_recursively(pred_block, pos);
581 /* no way to guess */
585 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
587 ir_node *block = irg->current_block;
588 ir_node *value = block->attr.block.graph_arr[pos+1];
591 /* already have a defintion -> we can simply look at its mode */
593 return get_irn_mode(value);
595 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
596 inc_irg_visited(irg);
597 mode = guess_recursively(block, pos+1);
598 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
603 ir_mode *ir_guess_mode(int pos)
605 return ir_r_guess_mode(current_ir_graph, pos);
608 void set_r_value(ir_graph *irg, int pos, ir_node *value)
610 assert(get_irg_phase_state(irg) == phase_building);
612 assert(pos+1 < irg->n_loc);
613 assert(is_ir_node(value));
614 irg->current_block->attr.block.graph_arr[pos + 1] = value;
617 void set_value(int pos, ir_node *value)
619 set_r_value(current_ir_graph, pos, value);
622 int r_find_value(ir_graph *irg, ir_node *value)
625 ir_node *bl = irg->current_block;
627 for (i = ARR_LEN(bl->attr.block.graph_arr); i > 1;) {
628 if (bl->attr.block.graph_arr[--i] == value)
634 int find_value(ir_node *value)
636 return r_find_value(current_ir_graph, value);
639 ir_node *get_r_store(ir_graph *irg)
641 assert(get_irg_phase_state(irg) == phase_building);
642 inc_irg_visited(irg);
643 return get_r_value_internal(irg->current_block, 0, mode_M);
646 ir_node *get_store(void)
648 return get_r_store(current_ir_graph);
651 void set_r_store(ir_graph *irg, ir_node *store)
653 ir_node *load, *pload, *pred, *in[2];
655 assert(get_irg_phase_state(irg) == phase_building);
656 /* Beware: due to dead code elimination, a store might become a Bad node even in
657 the construction phase. */
658 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
660 if (get_opt_auto_create_sync()) {
661 /* handle non-volatile Load nodes by automatically creating Sync's */
662 load = skip_Proj(store);
663 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
664 pred = get_Load_mem(load);
667 /* a Load after a Sync: move it up */
668 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
670 set_Load_mem(load, get_memop_mem(mem));
671 add_Sync_pred(pred, store);
674 pload = skip_Proj(pred);
675 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
676 /* a Load after a Load: create a new Sync */
677 set_Load_mem(load, get_Load_mem(pload));
681 store = new_r_Sync(irg->current_block, 2, in);
686 irg->current_block->attr.block.graph_arr[0] = store;
689 void set_store(ir_node *store)
691 set_r_store(current_ir_graph, store);
694 void keep_alive(ir_node *ka)
696 ir_graph *irg = get_irn_irg(ka);
697 add_End_keepalive(get_irg_end(irg), ka);
700 void ir_set_uninitialized_local_variable_func(
701 uninitialized_local_variable_func_t *func)
703 default_initialize_local_variable = func;
706 void irg_finalize_cons(ir_graph *irg)
708 set_irg_phase_state(irg, phase_high);
711 void irp_finalize_cons(void)
714 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
715 irg_finalize_cons(get_irp_irg(i));
717 irp->phase_state = phase_high;
720 ir_node *new_Const_long(ir_mode *mode, long value)
722 return new_d_Const_long(NULL, mode, value);
725 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
727 return new_d_SymConst(NULL, mode, value, kind);
729 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
731 return new_d_simpleSel(NULL, store, objptr, ent);
733 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
734 size_t n_outs, ir_asm_constraint *outputs,
735 size_t n_clobber, ident *clobber[], ident *text)
737 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
740 ir_node *new_r_Anchor(ir_graph *irg)
742 ir_node *in[anchor_last+1];
745 memset(in, 0, sizeof(in));
746 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last+1, in);
747 res->attr.anchor.irg.irg = irg;
749 /* hack to get get_irn_irg working: set block to ourself and allow
750 * get_Block_irg for anchor */
753 /* we can't have NULL inputs so reference ourselfes for now */
754 for (i = 0; i <= (size_t)anchor_last; ++i) {
755 set_irn_n(res, i, res);
761 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
763 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
764 res->attr.block.irg.irg = irg;
765 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
766 set_Block_matured(res, 1);
767 /* Create and initialize array for Phi-node construction. */
768 if (get_irg_phase_state(irg) == phase_building) {
769 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
770 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
772 irn_verify_irg(res, irg);