2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, ir_node *mem,
60 int arity, ir_node *in[], ir_asm_constraint *inputs,
61 size_t n_outs, ir_asm_constraint *outputs, size_t n_clobber,
62 ident *clobber[], ident *text)
64 ir_graph *irg = get_irn_irg(block);
66 int r_arity = arity+1;
68 NEW_ARR_A(ir_node*, r_in, r_arity);
70 memcpy(&r_in[1], in, arity*sizeof(ir_node*));
72 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, r_arity, r_in);
74 res->attr.assem.pin_state = op_pin_state_pinned;
75 res->attr.assem.input_constraints
76 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
77 res->attr.assem.output_constraints
78 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
79 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
80 res->attr.assem.text = text;
82 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
83 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
84 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
86 irn_verify_irg(res, irg);
87 res = optimize_node(res);
91 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
92 ir_node *objptr, ir_entity *ent)
94 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
97 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
98 symconst_symbol value, symconst_kind symkind)
100 ir_node *block = get_irg_start_block(irg);
101 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
102 res->attr.symc.kind = symkind;
103 res->attr.symc.sym = value;
105 irn_verify_irg(res, irg);
106 res = optimize_node(res);
110 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
113 sym.entity_p = symbol;
114 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
117 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
120 sym.entity_p = symbol;
121 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
124 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
128 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
131 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
135 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
138 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
140 return new_rd_Const_long(NULL, irg, mode, value);
142 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
143 symconst_kind symkind)
145 return new_rd_SymConst(NULL, irg, mode, value, symkind);
147 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
150 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
152 ir_node *new_r_ASM(ir_node *block, ir_node *mem,
153 int arity, ir_node *in[], ir_asm_constraint *inputs,
154 size_t n_outs, ir_asm_constraint *outputs,
155 size_t n_clobber, ident *clobber[], ident *text)
157 return new_rd_ASM(NULL, block, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
160 /** Creates a Phi node with 0 predecessors. */
161 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
162 ir_mode *mode, int pos)
164 ir_graph *irg = get_irn_irg(block);
165 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
166 res->attr.phi.u.pos = pos;
167 irn_verify_irg(res, irg);
171 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
173 static void try_remove_unnecessary_phi(ir_node *phi)
175 ir_node *phi_value = NULL;
176 int arity = get_irn_arity(phi);
179 /* see if all inputs are either pointing to a single value or
180 * are self references */
181 for (i = 0; i < arity; ++i) {
182 ir_node *in = get_irn_n(phi, i);
187 /** found a different value from the one we already found, can't remove
189 if (phi_value != NULL)
193 if (phi_value == NULL)
196 /* if we're here then all phi inputs have been either phi_value
197 * or self-references, we can replace the phi by phi_value.
198 * We do this with an Id-node */
199 exchange(phi, phi_value);
201 /* recursively check phi_value, because it could be that we were the last
202 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
203 * the loop header which can be eliminated now */
204 if (is_Phi(phi_value)) {
205 try_remove_unnecessary_phi(phi_value);
210 * Computes the predecessors for the real phi node, and then
211 * allocates and returns this node. The routine called to allocate the
212 * node might optimize it away and return a real value.
213 * This function must be called with an in-array of proper size.
215 static ir_node *set_phi_arguments(ir_node *phi, int pos)
217 ir_node *block = get_nodes_block(phi);
218 ir_graph *irg = get_irn_irg(block);
219 int arity = get_irn_arity(block);
220 ir_node **in = ALLOCAN(ir_node*, arity);
221 ir_mode *mode = get_irn_mode(phi);
224 /* This loop goes to all predecessor blocks of the block the Phi node
225 is in and there finds the operands of the Phi node by calling
226 get_r_value_internal. */
227 for (i = 0; i < arity; ++i) {
228 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
230 if (is_Bad(cfgpred)) {
231 value = new_r_Bad(irg, mode);
233 inc_irg_visited(irg);
235 value = get_r_value_internal(cfgpred, pos, mode);
240 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
241 set_irn_in(phi, arity, in);
243 irn_verify_irg(phi, irg);
245 /* Memory Phis in endless loops must be kept alive.
246 As we can't distinguish these easily we keep all of them alive. */
248 add_End_keepalive(get_irg_end(irg), phi);
250 try_remove_unnecessary_phi(phi);
255 * This function returns the last definition of a value. In case
256 * this value was last defined in a previous block, Phi nodes are
257 * inserted. If the part of the firm graph containing the definition
258 * is not yet constructed, a dummy Phi node is returned.
260 * @param block the current block
261 * @param pos the value number of the value searched
262 * @param mode the mode of this value (needed for Phi construction)
264 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
266 ir_node *res = block->attr.block.graph_arr[pos];
267 ir_graph *irg = get_irn_irg(block);
271 /* We ran into a cycle. This may happen in unreachable loops. */
272 if (irn_visited_else_mark(block)) {
273 /* Since the loop is unreachable, return a Bad. */
274 return new_r_Bad(irg, mode);
277 /* in a matured block we can immediately determine the phi arguments */
278 if (get_Block_matured(block)) {
279 int arity = get_irn_arity(block);
280 /* no predecessors: use unknown value */
282 if (block == get_irg_start_block(irg)) {
283 if (default_initialize_local_variable != NULL) {
284 ir_node *rem = get_r_cur_block(irg);
285 set_r_cur_block(irg, block);
286 res = default_initialize_local_variable(irg, mode, pos - 1);
287 set_r_cur_block(irg, rem);
289 res = new_r_Unknown(irg, mode);
292 /* unreachable block, use Bad */
293 res = new_r_Bad(irg, mode);
295 /* one predecessor just use its value */
296 } else if (arity == 1) {
297 ir_node *cfgpred = get_Block_cfgpred(block, 0);
298 if (is_Bad(cfgpred)) {
299 res = new_r_Bad(irg, mode);
301 ir_node *cfgpred_block = get_nodes_block(cfgpred);
302 res = get_r_value_internal(cfgpred_block, pos, mode);
304 /* multiple predecessors construct Phi */
306 res = new_rd_Phi0(NULL, block, mode, pos);
307 /* enter phi0 into our variable value table to break cycles
308 * arising from set_phi_arguments */
309 block->attr.block.graph_arr[pos] = res;
310 res = set_phi_arguments(res, pos);
313 /* in case of immature block we have to keep a Phi0 */
314 res = new_rd_Phi0(NULL, block, mode, pos);
315 /* enqueue phi so we can set arguments once the block matures */
316 res->attr.phi.next = block->attr.block.phis;
317 block->attr.block.phis = res;
319 block->attr.block.graph_arr[pos] = res;
323 void mature_immBlock(ir_node *block)
331 assert(is_Block(block));
332 if (get_Block_matured(block))
335 irg = get_irn_irg(block);
336 n_preds = ARR_LEN(block->in) - 1;
337 /* Fix block parameters */
338 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
340 /* Traverse a chain of Phi nodes attached to this block and mature
342 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
344 int pos = phi->attr.phi.u.pos;
346 next = phi->attr.phi.next;
347 new_value = set_phi_arguments(phi, pos);
348 if (block->attr.block.graph_arr[pos] == phi) {
349 block->attr.block.graph_arr[pos] = new_value;
353 set_Block_matured(block, 1);
355 /* create final in-array for the block */
356 if (block->attr.block.dynamic_ins) {
357 new_in = NEW_ARR_D(ir_node*, irg->obst, n_preds+1);
358 memcpy(new_in, block->in, (n_preds+1) * sizeof(new_in[0]));
359 DEL_ARR_F(block->in);
361 block->attr.block.dynamic_ins = false;
364 /* Now, as the block is a finished Firm node, we can optimize it.
365 Since other nodes have been allocated since the block was created
366 we can not free the node on the obstack. Therefore we have to call
368 Unfortunately the optimization does not change a lot, as all allocated
369 nodes refer to the unoptimized node.
370 We can call optimize_in_place_2(), as global cse has no effect on blocks.
372 irn_verify_irg(block, irg);
373 block = optimize_in_place_2(block);
376 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
378 assert(get_irg_phase_state(current_ir_graph) == phase_building);
379 return new_rd_Const_long(db, current_ir_graph, mode, value);
382 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
385 assert(get_irg_phase_state(current_ir_graph) == phase_building);
386 return new_rd_Sel(db, current_ir_graph->current_block,
387 store, objptr, 0, NULL, ent);
390 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
393 assert(get_irg_phase_state(current_ir_graph) == phase_building);
394 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
397 ir_node *new_d_ASM(dbg_info *db, ir_node *mem, int arity, ir_node *in[],
398 ir_asm_constraint *inputs,
399 size_t n_outs, ir_asm_constraint *outputs,
400 size_t n_clobber, ident *clobber[], ident *text)
402 assert(get_irg_phase_state(current_ir_graph) == phase_building);
403 return new_rd_ASM(db, current_ir_graph->current_block, mem, arity, in,
404 inputs, n_outs, outputs, n_clobber, clobber, text);
407 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
410 ir_graph *irg = get_Block_irg(block);
417 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
418 res->attr.div.resmode = resmode;
419 res->attr.div.no_remainder = 1;
420 res->attr.div.exc.pin_state = pin_state;
421 irn_verify_irg(res, irg);
422 res = optimize_node(res);
426 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
428 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
431 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
434 assert(get_irg_phase_state(current_ir_graph) == phase_building);
435 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
439 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
441 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
444 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
448 assert(get_irg_phase_state(irg) == phase_building);
449 /* creates a new dynamic in-array as length of in is -1 */
450 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
452 set_Block_matured(res, 0);
453 res->attr.block.dynamic_ins = true;
454 res->attr.block.irg.irg = irg;
455 res->attr.block.backedge = NULL;
456 res->attr.block.entity = NULL;
458 set_Block_block_visited(res, 0);
460 /* Create and initialize array for Phi-node construction. */
461 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
462 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
464 /* Immature block may not be optimized! */
465 irn_verify_irg(res, irg);
470 ir_node *new_r_immBlock(ir_graph *irg)
472 return new_rd_immBlock(NULL, irg);
475 ir_node *new_d_immBlock(dbg_info *dbgi)
477 return new_rd_immBlock(dbgi, current_ir_graph);
480 ir_node *new_immBlock(void)
482 return new_rd_immBlock(NULL, current_ir_graph);
485 void add_immBlock_pred(ir_node *block, ir_node *jmp)
487 int n = ARR_LEN(block->in) - 1;
489 assert(is_Block(block) && "Error: Must be a Block");
490 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
491 assert(is_ir_node(jmp));
493 ARR_APP1(ir_node *, block->in, jmp);
495 hook_set_irn_n(block, n, jmp, NULL);
498 void set_cur_block(ir_node *target)
500 set_r_cur_block(current_ir_graph, target);
503 void set_r_cur_block(ir_graph *irg, ir_node *target)
505 assert(get_irg_phase_state(irg) == phase_building);
506 assert(target == NULL || is_Block(target));
507 assert(target == NULL || get_irn_irg(target) == irg);
508 irg->current_block = target;
511 ir_node *get_r_cur_block(ir_graph *irg)
513 assert(get_irg_phase_state(irg) == phase_building);
514 return irg->current_block;
517 ir_node *get_cur_block(void)
519 return get_r_cur_block(current_ir_graph);
522 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
524 assert(get_irg_phase_state(irg) == phase_building);
526 inc_irg_visited(irg);
528 return get_r_value_internal(irg->current_block, pos + 1, mode);
531 ir_node *get_value(int pos, ir_mode *mode)
533 return get_r_value(current_ir_graph, pos, mode);
537 * helper function for guess_mode: recursively look for a definition for
538 * local variable @p pos, returns its mode if found.
540 static ir_mode *guess_recursively(ir_node *block, int pos)
546 if (irn_visited_else_mark(block))
549 /* already have a defintion -> we can simply look at its mode */
550 value = block->attr.block.graph_arr[pos];
552 return get_irn_mode(value);
554 /* now we try to guess, by looking at the predecessor blocks */
555 n_preds = get_irn_arity(block);
556 for (i = 0; i < n_preds; ++i) {
557 ir_node *pred_block = get_Block_cfgpred_block(block, i);
558 ir_mode *mode = guess_recursively(pred_block, pos);
563 /* no way to guess */
567 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
569 ir_node *block = irg->current_block;
570 ir_node *value = block->attr.block.graph_arr[pos+1];
573 /* already have a defintion -> we can simply look at its mode */
575 return get_irn_mode(value);
577 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
578 inc_irg_visited(irg);
579 mode = guess_recursively(block, pos+1);
580 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
585 ir_mode *ir_guess_mode(int pos)
587 return ir_r_guess_mode(current_ir_graph, pos);
590 void set_r_value(ir_graph *irg, int pos, ir_node *value)
592 assert(get_irg_phase_state(irg) == phase_building);
594 assert(pos+1 < irg->n_loc);
595 assert(is_ir_node(value));
596 irg->current_block->attr.block.graph_arr[pos + 1] = value;
599 void set_value(int pos, ir_node *value)
601 set_r_value(current_ir_graph, pos, value);
604 ir_node *get_r_store(ir_graph *irg)
606 assert(get_irg_phase_state(irg) == phase_building);
607 inc_irg_visited(irg);
608 return get_r_value_internal(irg->current_block, 0, mode_M);
611 ir_node *get_store(void)
613 return get_r_store(current_ir_graph);
616 void set_r_store(ir_graph *irg, ir_node *store)
618 ir_node *load, *pload, *pred, *in[2];
620 assert(get_irg_phase_state(irg) == phase_building);
621 /* Beware: due to dead code elimination, a store might become a Bad node even in
622 the construction phase. */
623 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
625 if (get_opt_auto_create_sync()) {
626 /* handle non-volatile Load nodes by automatically creating Sync's */
627 load = skip_Proj(store);
628 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
629 pred = get_Load_mem(load);
632 /* a Load after a Sync: move it up */
633 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
635 set_Load_mem(load, get_memop_mem(mem));
636 add_Sync_pred(pred, store);
639 pload = skip_Proj(pred);
640 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
641 /* a Load after a Load: create a new Sync */
642 set_Load_mem(load, get_Load_mem(pload));
646 store = new_r_Sync(irg->current_block, 2, in);
651 irg->current_block->attr.block.graph_arr[0] = store;
654 void set_store(ir_node *store)
656 set_r_store(current_ir_graph, store);
659 void keep_alive(ir_node *ka)
661 ir_graph *irg = get_irn_irg(ka);
662 add_End_keepalive(get_irg_end(irg), ka);
665 void ir_set_uninitialized_local_variable_func(
666 uninitialized_local_variable_func_t *func)
668 default_initialize_local_variable = func;
671 void irg_finalize_cons(ir_graph *irg)
673 ir_node *end_block = get_irg_end_block(irg);
674 mature_immBlock(end_block);
676 set_irg_phase_state(irg, phase_high);
679 void irp_finalize_cons(void)
682 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
683 irg_finalize_cons(get_irp_irg(i));
685 irp->phase_state = phase_high;
688 ir_node *new_Const_long(ir_mode *mode, long value)
690 return new_d_Const_long(NULL, mode, value);
693 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
695 return new_d_SymConst(NULL, mode, value, kind);
697 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
699 return new_d_simpleSel(NULL, store, objptr, ent);
701 ir_node *new_ASM(ir_node *mem, int arity, ir_node *in[],
702 ir_asm_constraint *inputs, size_t n_outs,
703 ir_asm_constraint *outputs, size_t n_clobber,
704 ident *clobber[], ident *text)
706 return new_d_ASM(NULL, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
709 ir_node *new_r_Anchor(ir_graph *irg)
711 ir_node *in[anchor_last+1];
714 memset(in, 0, sizeof(in));
715 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last+1, in);
716 res->attr.anchor.irg.irg = irg;
718 /* hack to get get_irn_irg working: set block to ourself and allow
719 * get_Block_irg for anchor */
722 /* we can't have NULL inputs so reference ourselfes for now */
723 for (i = 0; i <= (size_t)anchor_last; ++i) {
724 set_irn_n(res, i, res);
730 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
732 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
733 res->attr.block.irg.irg = irg;
734 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
735 set_Block_matured(res, 1);
736 /* Create and initialize array for Phi-node construction. */
737 if (get_irg_phase_state(irg) == phase_building) {
738 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
739 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
741 irn_verify_irg(res, irg);