2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
60 ir_asm_constraint *inputs, size_t n_outs,
61 ir_asm_constraint *outputs, size_t n_clobber,
62 ident *clobber[], ident *text)
64 ir_graph *irg = get_irn_irg(block);
65 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
67 res->attr.assem.pin_state = op_pin_state_pinned;
68 res->attr.assem.input_constraints
69 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
70 res->attr.assem.output_constraints
71 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
72 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
73 res->attr.assem.text = text;
75 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
76 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
77 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
79 irn_verify_irg(res, irg);
80 res = optimize_node(res);
84 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
85 ir_node *objptr, ir_entity *ent)
87 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
90 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
91 symconst_symbol value, symconst_kind symkind)
93 ir_node *block = get_irg_start_block(irg);
94 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
95 res->attr.symc.kind = symkind;
96 res->attr.symc.sym = value;
98 irn_verify_irg(res, irg);
99 res = optimize_node(res);
103 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
106 sym.entity_p = symbol;
107 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
110 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
113 sym.entity_p = symbol;
114 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
117 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
121 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
124 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
128 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
131 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
133 return new_rd_Const_long(NULL, irg, mode, value);
135 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
136 symconst_kind symkind)
138 return new_rd_SymConst(NULL, irg, mode, value, symkind);
140 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
143 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
145 ir_node *new_r_ASM(ir_node *block,
146 int arity, ir_node *in[], ir_asm_constraint *inputs,
147 size_t n_outs, ir_asm_constraint *outputs,
148 size_t n_clobber, ident *clobber[], ident *text)
150 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
153 /** Creates a Phi node with 0 predecessors. */
154 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
155 ir_mode *mode, int pos)
157 ir_graph *irg = get_irn_irg(block);
158 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
159 res->attr.phi.u.pos = pos;
160 irn_verify_irg(res, irg);
164 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
166 static void try_remove_unnecessary_phi(ir_node *phi)
168 ir_node *phi_value = NULL;
169 int arity = get_irn_arity(phi);
172 /* see if all inputs are either pointing to a single value or
173 * are self references */
174 for (i = 0; i < arity; ++i) {
175 ir_node *in = get_irn_n(phi, i);
180 /** found a different value from the one we already found, can't remove
182 if (phi_value != NULL)
186 if (phi_value == NULL)
189 /* if we're here then all phi inputs have been either phi_value
190 * or self-references, we can replace the phi by phi_value.
191 * We do this with an Id-node */
192 exchange(phi, phi_value);
194 /* recursively check phi_value, because it could be that we were the last
195 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
196 * the loop header which can be eliminated now */
197 if (is_Phi(phi_value)) {
198 try_remove_unnecessary_phi(phi_value);
203 * Computes the predecessors for the real phi node, and then
204 * allocates and returns this node. The routine called to allocate the
205 * node might optimize it away and return a real value.
206 * This function must be called with an in-array of proper size.
208 static ir_node *set_phi_arguments(ir_node *phi, int pos)
210 ir_node *block = get_nodes_block(phi);
211 ir_graph *irg = get_irn_irg(block);
212 int arity = get_irn_arity(block);
213 ir_node **in = ALLOCAN(ir_node*, arity);
214 ir_mode *mode = get_irn_mode(phi);
217 /* This loop goes to all predecessor blocks of the block the Phi node
218 is in and there finds the operands of the Phi node by calling
219 get_r_value_internal. */
220 for (i = 0; i < arity; ++i) {
221 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
223 if (is_Bad(cfgpred)) {
224 value = new_r_Bad(irg, mode);
226 inc_irg_visited(irg);
228 value = get_r_value_internal(cfgpred, pos, mode);
233 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
234 set_irn_in(phi, arity, in);
236 irn_verify_irg(phi, irg);
238 /* Memory Phis in endless loops must be kept alive.
239 As we can't distinguish these easily we keep all of them alive. */
241 add_End_keepalive(get_irg_end(irg), phi);
243 try_remove_unnecessary_phi(phi);
248 * This function returns the last definition of a value. In case
249 * this value was last defined in a previous block, Phi nodes are
250 * inserted. If the part of the firm graph containing the definition
251 * is not yet constructed, a dummy Phi node is returned.
253 * @param block the current block
254 * @param pos the value number of the value searched
255 * @param mode the mode of this value (needed for Phi construction)
257 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
259 ir_node *res = block->attr.block.graph_arr[pos];
260 ir_graph *irg = get_irn_irg(block);
264 /* We ran into a cycle. This may happen in unreachable loops. */
265 if (irn_visited_else_mark(block)) {
266 /* Since the loop is unreachable, return a Bad. */
267 return new_r_Bad(irg, mode);
270 /* in a matured block we can immediately determine the phi arguments */
271 if (get_Block_matured(block)) {
272 int arity = get_irn_arity(block);
273 /* no predecessors: use unknown value */
275 if (block == get_irg_start_block(irg)) {
276 if (default_initialize_local_variable != NULL) {
277 ir_node *rem = get_r_cur_block(irg);
278 set_r_cur_block(irg, block);
279 res = default_initialize_local_variable(irg, mode, pos - 1);
280 set_r_cur_block(irg, rem);
282 res = new_r_Unknown(irg, mode);
285 /* unreachable block, use Bad */
286 res = new_r_Bad(irg, mode);
288 /* one predecessor just use its value */
289 } else if (arity == 1) {
290 ir_node *cfgpred = get_Block_cfgpred(block, 0);
291 if (is_Bad(cfgpred)) {
292 res = new_r_Bad(irg, mode);
294 ir_node *cfgpred_block = get_nodes_block(cfgpred);
295 res = get_r_value_internal(cfgpred_block, pos, mode);
297 /* multiple predecessors construct Phi */
299 res = new_rd_Phi0(NULL, block, mode, pos);
300 /* enter phi0 into our variable value table to break cycles
301 * arising from set_phi_arguments */
302 block->attr.block.graph_arr[pos] = res;
303 res = set_phi_arguments(res, pos);
306 /* in case of immature block we have to keep a Phi0 */
307 res = new_rd_Phi0(NULL, block, mode, pos);
308 /* enqueue phi so we can set arguments once the block matures */
309 res->attr.phi.next = block->attr.block.phis;
310 block->attr.block.phis = res;
312 block->attr.block.graph_arr[pos] = res;
316 void mature_immBlock(ir_node *block)
324 assert(is_Block(block));
325 if (get_Block_matured(block))
328 irg = get_irn_irg(block);
329 n_preds = ARR_LEN(block->in) - 1;
330 /* Fix block parameters */
331 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
333 /* Traverse a chain of Phi nodes attached to this block and mature
335 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
337 int pos = phi->attr.phi.u.pos;
339 next = phi->attr.phi.next;
340 new_value = set_phi_arguments(phi, pos);
341 if (block->attr.block.graph_arr[pos] == phi) {
342 block->attr.block.graph_arr[pos] = new_value;
346 set_Block_matured(block, 1);
348 /* create final in-array for the block */
349 if (block->attr.block.dynamic_ins) {
350 new_in = NEW_ARR_D(ir_node*, irg->obst, n_preds+1);
351 memcpy(new_in, block->in, (n_preds+1) * sizeof(new_in[0]));
352 DEL_ARR_F(block->in);
354 block->attr.block.dynamic_ins = false;
357 /* Now, as the block is a finished Firm node, we can optimize it.
358 Since other nodes have been allocated since the block was created
359 we can not free the node on the obstack. Therefore we have to call
361 Unfortunately the optimization does not change a lot, as all allocated
362 nodes refer to the unoptimized node.
363 We can call optimize_in_place_2(), as global cse has no effect on blocks.
365 irn_verify_irg(block, irg);
366 block = optimize_in_place_2(block);
369 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
371 assert(get_irg_phase_state(current_ir_graph) == phase_building);
372 return new_rd_Const_long(db, current_ir_graph, mode, value);
375 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
378 assert(get_irg_phase_state(current_ir_graph) == phase_building);
379 return new_rd_Sel(db, current_ir_graph->current_block,
380 store, objptr, 0, NULL, ent);
383 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
386 assert(get_irg_phase_state(current_ir_graph) == phase_building);
387 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
390 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
391 ir_asm_constraint *inputs,
392 size_t n_outs, ir_asm_constraint *outputs,
393 size_t n_clobber, ident *clobber[], ident *text)
395 assert(get_irg_phase_state(current_ir_graph) == phase_building);
396 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
397 n_outs, outputs, n_clobber, clobber, text);
400 ir_node *new_rd_strictConv(dbg_info *dbgi, ir_node *block, ir_node * irn_op, ir_mode * mode)
403 ir_graph *irg = get_Block_irg(block);
408 res = new_ir_node(dbgi, irg, block, op_Conv, mode, 1, in);
409 res->attr.conv.strict = 1;
410 irn_verify_irg(res, irg);
411 res = optimize_node(res);
415 ir_node *new_r_strictConv(ir_node *block, ir_node * irn_op, ir_mode * mode)
417 return new_rd_strictConv(NULL, block, irn_op, mode);
420 ir_node *new_d_strictConv(dbg_info *dbgi, ir_node * irn_op, ir_mode * mode)
423 assert(get_irg_phase_state(current_ir_graph) == phase_building);
424 res = new_rd_strictConv(dbgi, current_ir_graph->current_block, irn_op, mode);
428 ir_node *new_strictConv(ir_node * irn_op, ir_mode * mode)
430 return new_d_strictConv(NULL, irn_op, mode);
433 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
436 ir_graph *irg = get_Block_irg(block);
443 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
444 res->attr.div.resmode = resmode;
445 res->attr.div.no_remainder = 1;
446 res->attr.div.exc.pin_state = pin_state;
447 irn_verify_irg(res, irg);
448 res = optimize_node(res);
452 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
454 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
457 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
460 assert(get_irg_phase_state(current_ir_graph) == phase_building);
461 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
465 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
467 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
470 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
474 assert(get_irg_phase_state(irg) == phase_building);
475 /* creates a new dynamic in-array as length of in is -1 */
476 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
478 set_Block_matured(res, 0);
479 res->attr.block.dynamic_ins = true;
480 res->attr.block.irg.irg = irg;
481 res->attr.block.backedge = NULL;
482 res->attr.block.entity = NULL;
484 set_Block_block_visited(res, 0);
486 /* Create and initialize array for Phi-node construction. */
487 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
488 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
490 /* Immature block may not be optimized! */
491 irn_verify_irg(res, irg);
496 ir_node *new_r_immBlock(ir_graph *irg)
498 return new_rd_immBlock(NULL, irg);
501 ir_node *new_d_immBlock(dbg_info *dbgi)
503 return new_rd_immBlock(dbgi, current_ir_graph);
506 ir_node *new_immBlock(void)
508 return new_rd_immBlock(NULL, current_ir_graph);
511 void add_immBlock_pred(ir_node *block, ir_node *jmp)
513 int n = ARR_LEN(block->in) - 1;
515 assert(is_Block(block) && "Error: Must be a Block");
516 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
517 assert(is_ir_node(jmp));
519 ARR_APP1(ir_node *, block->in, jmp);
521 hook_set_irn_n(block, n, jmp, NULL);
524 void set_cur_block(ir_node *target)
526 set_r_cur_block(current_ir_graph, target);
529 void set_r_cur_block(ir_graph *irg, ir_node *target)
531 assert(get_irg_phase_state(irg) == phase_building);
532 assert(target == NULL || is_Block(target));
533 assert(target == NULL || get_irn_irg(target) == irg);
534 irg->current_block = target;
537 ir_node *get_r_cur_block(ir_graph *irg)
539 assert(get_irg_phase_state(irg) == phase_building);
540 return irg->current_block;
543 ir_node *get_cur_block(void)
545 return get_r_cur_block(current_ir_graph);
548 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
550 assert(get_irg_phase_state(irg) == phase_building);
552 inc_irg_visited(irg);
554 return get_r_value_internal(irg->current_block, pos + 1, mode);
557 ir_node *get_value(int pos, ir_mode *mode)
559 return get_r_value(current_ir_graph, pos, mode);
563 * helper function for guess_mode: recursively look for a definition for
564 * local variable @p pos, returns its mode if found.
566 static ir_mode *guess_recursively(ir_node *block, int pos)
572 if (irn_visited_else_mark(block))
575 /* already have a defintion -> we can simply look at its mode */
576 value = block->attr.block.graph_arr[pos];
578 return get_irn_mode(value);
580 /* now we try to guess, by looking at the predecessor blocks */
581 n_preds = get_irn_arity(block);
582 for (i = 0; i < n_preds; ++i) {
583 ir_node *pred_block = get_Block_cfgpred_block(block, i);
584 ir_mode *mode = guess_recursively(pred_block, pos);
589 /* no way to guess */
593 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
595 ir_node *block = irg->current_block;
596 ir_node *value = block->attr.block.graph_arr[pos+1];
599 /* already have a defintion -> we can simply look at its mode */
601 return get_irn_mode(value);
603 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
604 inc_irg_visited(irg);
605 mode = guess_recursively(block, pos+1);
606 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
611 ir_mode *ir_guess_mode(int pos)
613 return ir_r_guess_mode(current_ir_graph, pos);
616 void set_r_value(ir_graph *irg, int pos, ir_node *value)
618 assert(get_irg_phase_state(irg) == phase_building);
620 assert(pos+1 < irg->n_loc);
621 assert(is_ir_node(value));
622 irg->current_block->attr.block.graph_arr[pos + 1] = value;
625 void set_value(int pos, ir_node *value)
627 set_r_value(current_ir_graph, pos, value);
630 ir_node *get_r_store(ir_graph *irg)
632 assert(get_irg_phase_state(irg) == phase_building);
633 inc_irg_visited(irg);
634 return get_r_value_internal(irg->current_block, 0, mode_M);
637 ir_node *get_store(void)
639 return get_r_store(current_ir_graph);
642 void set_r_store(ir_graph *irg, ir_node *store)
644 ir_node *load, *pload, *pred, *in[2];
646 assert(get_irg_phase_state(irg) == phase_building);
647 /* Beware: due to dead code elimination, a store might become a Bad node even in
648 the construction phase. */
649 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
651 if (get_opt_auto_create_sync()) {
652 /* handle non-volatile Load nodes by automatically creating Sync's */
653 load = skip_Proj(store);
654 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
655 pred = get_Load_mem(load);
658 /* a Load after a Sync: move it up */
659 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
661 set_Load_mem(load, get_memop_mem(mem));
662 add_Sync_pred(pred, store);
665 pload = skip_Proj(pred);
666 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
667 /* a Load after a Load: create a new Sync */
668 set_Load_mem(load, get_Load_mem(pload));
672 store = new_r_Sync(irg->current_block, 2, in);
677 irg->current_block->attr.block.graph_arr[0] = store;
680 void set_store(ir_node *store)
682 set_r_store(current_ir_graph, store);
685 void keep_alive(ir_node *ka)
687 ir_graph *irg = get_irn_irg(ka);
688 add_End_keepalive(get_irg_end(irg), ka);
691 void ir_set_uninitialized_local_variable_func(
692 uninitialized_local_variable_func_t *func)
694 default_initialize_local_variable = func;
697 void irg_finalize_cons(ir_graph *irg)
699 ir_node *end_block = get_irg_end_block(irg);
700 mature_immBlock(end_block);
702 set_irg_phase_state(irg, phase_high);
705 void irp_finalize_cons(void)
708 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
709 irg_finalize_cons(get_irp_irg(i));
711 irp->phase_state = phase_high;
714 ir_node *new_Const_long(ir_mode *mode, long value)
716 return new_d_Const_long(NULL, mode, value);
719 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
721 return new_d_SymConst(NULL, mode, value, kind);
723 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
725 return new_d_simpleSel(NULL, store, objptr, ent);
727 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
728 size_t n_outs, ir_asm_constraint *outputs,
729 size_t n_clobber, ident *clobber[], ident *text)
731 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
734 ir_node *new_r_Anchor(ir_graph *irg)
736 ir_node *in[anchor_last+1];
739 memset(in, 0, sizeof(in));
740 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last+1, in);
741 res->attr.anchor.irg.irg = irg;
743 /* hack to get get_irn_irg working: set block to ourself and allow
744 * get_Block_irg for anchor */
747 /* we can't have NULL inputs so reference ourselfes for now */
748 for (i = 0; i <= (size_t)anchor_last; ++i) {
749 set_irn_n(res, i, res);
755 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
757 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
758 res->attr.block.irg.irg = irg;
759 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
760 set_Block_matured(res, 1);
761 /* Create and initialize array for Phi-node construction. */
762 if (get_irg_phase_state(irg) == phase_building) {
763 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
764 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
766 irn_verify_irg(res, irg);