2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
60 ir_asm_constraint *inputs, size_t n_outs,
61 ir_asm_constraint *outputs, size_t n_clobber,
62 ident *clobber[], ident *text)
64 ir_graph *irg = get_irn_irg(block);
65 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
67 res->attr.assem.pin_state = op_pin_state_pinned;
68 res->attr.assem.input_constraints
69 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
70 res->attr.assem.output_constraints
71 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
72 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
73 res->attr.assem.text = text;
75 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
76 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
77 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
79 irn_verify_irg(res, irg);
80 res = optimize_node(res);
84 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
85 ir_node *objptr, ir_entity *ent)
87 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
90 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
91 symconst_symbol value, symconst_kind symkind)
93 ir_node *block = get_irg_start_block(irg);
94 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
95 res->attr.symc.kind = symkind;
96 res->attr.symc.sym = value;
98 irn_verify_irg(res, irg);
99 res = optimize_node(res);
103 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
106 sym.entity_p = symbol;
107 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
110 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
113 sym.entity_p = symbol;
114 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
117 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
121 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
124 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
128 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
131 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
133 return new_rd_Const_long(NULL, irg, mode, value);
135 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
136 symconst_kind symkind)
138 return new_rd_SymConst(NULL, irg, mode, value, symkind);
140 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
143 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
145 ir_node *new_r_ASM(ir_node *block,
146 int arity, ir_node *in[], ir_asm_constraint *inputs,
147 size_t n_outs, ir_asm_constraint *outputs,
148 size_t n_clobber, ident *clobber[], ident *text)
150 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
153 /** Creates a Phi node with 0 predecessors. */
154 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
155 ir_mode *mode, int pos)
157 ir_graph *irg = get_irn_irg(block);
158 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
159 res->attr.phi.u.pos = pos;
160 irn_verify_irg(res, irg);
164 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
166 static void try_remove_unnecessary_phi(ir_node *phi)
168 ir_node *phi_value = NULL;
169 int arity = get_irn_arity(phi);
172 /* see if all inputs are either pointing to a single value or
173 * are self references */
174 for (i = 0; i < arity; ++i) {
175 ir_node *in = get_irn_n(phi, i);
180 /** found a different value from the one we already found, can't remove
182 if (phi_value != NULL)
186 if (phi_value == NULL)
189 /* if we're here then all phi inputs have been either phi_value
190 * or self-references, we can replace the phi by phi_value.
191 * We do this with an Id-node */
192 exchange(phi, phi_value);
194 /* recursively check phi_value, because it could be that we were the last
195 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
196 * the loop header which can be eliminated now */
197 if (is_Phi(phi_value)) {
198 try_remove_unnecessary_phi(phi_value);
203 * Computes the predecessors for the real phi node, and then
204 * allocates and returns this node. The routine called to allocate the
205 * node might optimize it away and return a real value.
206 * This function must be called with an in-array of proper size.
208 static ir_node *set_phi_arguments(ir_node *phi, int pos)
210 ir_node *block = get_nodes_block(phi);
211 ir_graph *irg = get_irn_irg(block);
212 int arity = get_irn_arity(block);
213 ir_node **in = ALLOCAN(ir_node*, arity);
214 ir_mode *mode = get_irn_mode(phi);
217 /* This loop goes to all predecessor blocks of the block the Phi node
218 is in and there finds the operands of the Phi node by calling
219 get_r_value_internal. */
220 for (i = 0; i < arity; ++i) {
221 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
223 if (is_Bad(cfgpred)) {
224 value = new_r_Bad(irg, mode);
226 inc_irg_visited(irg);
228 value = get_r_value_internal(cfgpred, pos, mode);
233 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
234 set_irn_in(phi, arity, in);
235 set_irn_op(phi, op_Phi);
237 irn_verify_irg(phi, irg);
239 /* Memory Phis in endless loops must be kept alive.
240 As we can't distinguish these easily we keep all of them alive. */
241 if (is_Phi(phi) && mode == mode_M)
242 add_End_keepalive(get_irg_end(irg), phi);
244 try_remove_unnecessary_phi(phi);
249 * This function returns the last definition of a value. In case
250 * this value was last defined in a previous block, Phi nodes are
251 * inserted. If the part of the firm graph containing the definition
252 * is not yet constructed, a dummy Phi node is returned.
254 * @param block the current block
255 * @param pos the value number of the value searched
256 * @param mode the mode of this value (needed for Phi construction)
258 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
260 ir_node *res = block->attr.block.graph_arr[pos];
261 ir_graph *irg = get_irn_irg(block);
265 /* We ran into a cycle. This may happen in unreachable loops. */
266 if (irn_visited_else_mark(block)) {
267 /* Since the loop is unreachable, return a Bad. */
268 return new_r_Bad(irg, mode);
271 /* in a matured block we can immediately determine the phi arguments */
272 if (get_Block_matured(block)) {
273 int arity = get_irn_arity(block);
274 /* no predecessors: use unknown value */
275 if (arity == 0 && block == get_irg_start_block(get_irn_irg(block))) {
276 if (default_initialize_local_variable != NULL) {
277 ir_node *rem = get_r_cur_block(irg);
278 set_r_cur_block(irg, block);
279 res = default_initialize_local_variable(irg, mode, pos - 1);
280 set_r_cur_block(irg, rem);
282 res = new_r_Unknown(irg, mode);
284 /* one predecessor just use its value */
285 } else if (arity == 1) {
286 ir_node *cfgpred = get_Block_cfgpred(block, 0);
287 if (is_Bad(cfgpred)) {
288 res = new_r_Bad(irg, mode);
290 ir_node *cfgpred_block = get_nodes_block(cfgpred);
291 res = get_r_value_internal(cfgpred_block, pos, mode);
293 /* multiple predecessors construct Phi */
295 res = new_rd_Phi0(NULL, block, mode, pos);
296 /* enter phi0 into our variable value table to break cycles
297 * arising from set_phi_arguments */
298 block->attr.block.graph_arr[pos] = res;
299 res = set_phi_arguments(res, pos);
302 /* in case of immature block we have to keep a Phi0 */
303 res = new_rd_Phi0(NULL, block, mode, pos);
304 /* enqueue phi so we can set arguments once the block matures */
305 res->attr.phi.next = block->attr.block.phis;
306 block->attr.block.phis = res;
308 block->attr.block.graph_arr[pos] = res;
312 void mature_immBlock(ir_node *block)
320 assert(is_Block(block));
321 if (get_Block_matured(block))
324 irg = get_irn_irg(block);
325 n_preds = ARR_LEN(block->in) - 1;
326 /* Fix block parameters */
327 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
329 /* Traverse a chain of Phi nodes attached to this block and mature
331 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
333 int pos = phi->attr.phi.u.pos;
335 next = phi->attr.phi.next;
336 new_value = set_phi_arguments(phi, pos);
337 if (block->attr.block.graph_arr[pos] == phi) {
338 block->attr.block.graph_arr[pos] = new_value;
342 set_Block_matured(block, 1);
344 /* create final in-array for the block */
345 if (block->attr.block.dynamic_ins) {
346 new_in = NEW_ARR_D(ir_node*, irg->obst, n_preds+1);
347 memcpy(new_in, block->in, (n_preds+1) * sizeof(new_in[0]));
348 DEL_ARR_F(block->in);
350 block->attr.block.dynamic_ins = false;
353 /* Now, as the block is a finished Firm node, we can optimize it.
354 Since other nodes have been allocated since the block was created
355 we can not free the node on the obstack. Therefore we have to call
357 Unfortunately the optimization does not change a lot, as all allocated
358 nodes refer to the unoptimized node.
359 We can call optimize_in_place_2(), as global cse has no effect on blocks.
361 irn_verify_irg(block, irg);
362 block = optimize_in_place_2(block);
365 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
367 assert(get_irg_phase_state(current_ir_graph) == phase_building);
368 return new_rd_Const_long(db, current_ir_graph, mode, value);
371 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
374 assert(get_irg_phase_state(current_ir_graph) == phase_building);
375 return new_rd_Sel(db, current_ir_graph->current_block,
376 store, objptr, 0, NULL, ent);
379 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
382 assert(get_irg_phase_state(current_ir_graph) == phase_building);
383 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
386 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
387 ir_asm_constraint *inputs,
388 size_t n_outs, ir_asm_constraint *outputs,
389 size_t n_clobber, ident *clobber[], ident *text)
391 assert(get_irg_phase_state(current_ir_graph) == phase_building);
392 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
393 n_outs, outputs, n_clobber, clobber, text);
396 ir_node *new_rd_strictConv(dbg_info *dbgi, ir_node *block, ir_node * irn_op, ir_mode * mode)
399 ir_graph *irg = get_Block_irg(block);
404 res = new_ir_node(dbgi, irg, block, op_Conv, mode, 1, in);
405 res->attr.conv.strict = 1;
406 irn_verify_irg(res, irg);
407 res = optimize_node(res);
411 ir_node *new_r_strictConv(ir_node *block, ir_node * irn_op, ir_mode * mode)
413 return new_rd_strictConv(NULL, block, irn_op, mode);
416 ir_node *new_d_strictConv(dbg_info *dbgi, ir_node * irn_op, ir_mode * mode)
419 assert(get_irg_phase_state(current_ir_graph) == phase_building);
420 res = new_rd_strictConv(dbgi, current_ir_graph->current_block, irn_op, mode);
424 ir_node *new_strictConv(ir_node * irn_op, ir_mode * mode)
426 return new_d_strictConv(NULL, irn_op, mode);
429 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
432 ir_graph *irg = get_Block_irg(block);
439 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
440 res->attr.div.resmode = resmode;
441 res->attr.div.no_remainder = 1;
442 res->attr.div.exc.pin_state = pin_state;
443 irn_verify_irg(res, irg);
444 res = optimize_node(res);
448 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
450 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
453 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
456 assert(get_irg_phase_state(current_ir_graph) == phase_building);
457 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
461 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
463 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
466 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
470 assert(get_irg_phase_state(irg) == phase_building);
471 /* creates a new dynamic in-array as length of in is -1 */
472 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
474 set_Block_matured(res, 0);
475 res->attr.block.dynamic_ins = true;
476 res->attr.block.irg.irg = irg;
477 res->attr.block.backedge = NULL;
478 res->attr.block.in_cg = NULL;
479 res->attr.block.cg_backedge = NULL;
480 res->attr.block.extblk = NULL;
481 res->attr.block.entity = NULL;
483 set_Block_block_visited(res, 0);
485 /* Create and initialize array for Phi-node construction. */
486 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
487 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
489 /* Immature block may not be optimized! */
490 irn_verify_irg(res, irg);
495 ir_node *new_r_immBlock(ir_graph *irg)
497 return new_rd_immBlock(NULL, irg);
500 ir_node *new_d_immBlock(dbg_info *dbgi)
502 return new_rd_immBlock(dbgi, current_ir_graph);
505 ir_node *new_immBlock(void)
507 return new_rd_immBlock(NULL, current_ir_graph);
510 void add_immBlock_pred(ir_node *block, ir_node *jmp)
512 int n = ARR_LEN(block->in) - 1;
514 assert(is_Block(block) && "Error: Must be a Block");
515 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
516 assert(is_ir_node(jmp));
518 ARR_APP1(ir_node *, block->in, jmp);
520 hook_set_irn_n(block, n, jmp, NULL);
523 void set_cur_block(ir_node *target)
525 set_r_cur_block(current_ir_graph, target);
528 void set_r_cur_block(ir_graph *irg, ir_node *target)
530 assert(get_irg_phase_state(irg) == phase_building);
531 assert(target == NULL || get_irn_mode(target) == mode_BB);
532 assert(target == NULL || get_irn_irg(target) == irg);
533 irg->current_block = target;
536 ir_node *get_r_cur_block(ir_graph *irg)
538 assert(get_irg_phase_state(irg) == phase_building);
539 return irg->current_block;
542 ir_node *get_cur_block(void)
544 return get_r_cur_block(current_ir_graph);
547 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
549 assert(get_irg_phase_state(irg) == phase_building);
551 inc_irg_visited(irg);
553 return get_r_value_internal(irg->current_block, pos + 1, mode);
556 ir_node *get_value(int pos, ir_mode *mode)
558 return get_r_value(current_ir_graph, pos, mode);
562 * helper function for guess_mode: recursively look for a definition for
563 * local variable @p pos, returns its mode if found.
565 static ir_mode *guess_recursively(ir_node *block, int pos)
571 if (irn_visited_else_mark(block))
574 /* already have a defintion -> we can simply look at its mode */
575 value = block->attr.block.graph_arr[pos];
577 return get_irn_mode(value);
579 /* now we try to guess, by looking at the predecessor blocks */
580 n_preds = get_irn_arity(block);
581 for (i = 0; i < n_preds; ++i) {
582 ir_node *pred_block = get_Block_cfgpred_block(block, i);
583 ir_mode *mode = guess_recursively(pred_block, pos);
588 /* no way to guess */
592 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
594 ir_node *block = irg->current_block;
595 ir_node *value = block->attr.block.graph_arr[pos+1];
598 /* already have a defintion -> we can simply look at its mode */
600 return get_irn_mode(value);
602 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
603 inc_irg_visited(irg);
604 mode = guess_recursively(block, pos+1);
605 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
610 ir_mode *ir_guess_mode(int pos)
612 return ir_r_guess_mode(current_ir_graph, pos);
615 void set_r_value(ir_graph *irg, int pos, ir_node *value)
617 assert(get_irg_phase_state(irg) == phase_building);
619 assert(pos+1 < irg->n_loc);
620 assert(is_ir_node(value));
621 irg->current_block->attr.block.graph_arr[pos + 1] = value;
624 void set_value(int pos, ir_node *value)
626 set_r_value(current_ir_graph, pos, value);
629 ir_node *get_r_store(ir_graph *irg)
631 assert(get_irg_phase_state(irg) == phase_building);
632 inc_irg_visited(irg);
633 return get_r_value_internal(irg->current_block, 0, mode_M);
636 ir_node *get_store(void)
638 return get_r_store(current_ir_graph);
641 void set_r_store(ir_graph *irg, ir_node *store)
643 ir_node *load, *pload, *pred, *in[2];
645 assert(get_irg_phase_state(irg) == phase_building);
646 /* Beware: due to dead code elimination, a store might become a Bad node even in
647 the construction phase. */
648 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
650 if (get_opt_auto_create_sync()) {
651 /* handle non-volatile Load nodes by automatically creating Sync's */
652 load = skip_Proj(store);
653 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
654 pred = get_Load_mem(load);
657 /* a Load after a Sync: move it up */
658 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
660 set_Load_mem(load, get_memop_mem(mem));
661 add_Sync_pred(pred, store);
664 pload = skip_Proj(pred);
665 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
666 /* a Load after a Load: create a new Sync */
667 set_Load_mem(load, get_Load_mem(pload));
671 store = new_r_Sync(irg->current_block, 2, in);
676 irg->current_block->attr.block.graph_arr[0] = store;
679 void set_store(ir_node *store)
681 set_r_store(current_ir_graph, store);
684 void keep_alive(ir_node *ka)
686 ir_graph *irg = get_irn_irg(ka);
687 add_End_keepalive(get_irg_end(irg), ka);
690 void ir_set_uninitialized_local_variable_func(
691 uninitialized_local_variable_func_t *func)
693 default_initialize_local_variable = func;
696 void irg_finalize_cons(ir_graph *irg)
698 ir_node *end_block = get_irg_end_block(irg);
699 mature_immBlock(end_block);
701 set_irg_phase_state(irg, phase_high);
704 void irp_finalize_cons(void)
707 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
708 irg_finalize_cons(get_irp_irg(i));
710 irp->phase_state = phase_high;
713 ir_node *new_Const_long(ir_mode *mode, long value)
715 return new_d_Const_long(NULL, mode, value);
718 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
720 return new_d_SymConst(NULL, mode, value, kind);
722 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
724 return new_d_simpleSel(NULL, store, objptr, ent);
726 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
727 size_t n_outs, ir_asm_constraint *outputs,
728 size_t n_clobber, ident *clobber[], ident *text)
730 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
733 ir_node *new_r_Anchor(ir_graph *irg)
735 ir_node *in[anchor_last+1];
738 memset(in, 0, sizeof(in));
739 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last+1, in);
740 res->attr.anchor.irg.irg = irg;
742 /* hack to get get_irn_irg working: set block to ourself and allow
743 * get_Block_irg for anchor */
746 /* we can't have NULL inputs so reference ourselfes for now */
747 for (i = 0; i <= (size_t)anchor_last; ++i) {
748 set_irn_n(res, i, res);
754 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
756 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
757 res->attr.block.irg.irg = irg;
758 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
759 set_Block_matured(res, 1);
760 /* Create and initialize array for Phi-node construction. */
761 if (get_irg_phase_state(irg) == phase_building) {
762 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
763 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
765 irn_verify_irg(res, irg);