2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, ir_node *mem,
60 int arity, ir_node *in[], ir_asm_constraint *inputs,
61 size_t n_outs, ir_asm_constraint *outputs, size_t n_clobber,
62 ident *clobber[], ident *text)
64 ir_graph *irg = get_irn_irg(block);
66 int r_arity = arity+1;
68 NEW_ARR_A(ir_node*, r_in, r_arity);
70 memcpy(&r_in[1], in, arity*sizeof(ir_node*));
72 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, r_arity, r_in);
74 res->attr.assem.pin_state = op_pin_state_pinned;
75 res->attr.assem.input_constraints
76 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
77 res->attr.assem.output_constraints
78 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
79 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
80 res->attr.assem.text = text;
82 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
83 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
84 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
86 irn_verify_irg(res, irg);
87 res = optimize_node(res);
91 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
92 ir_node *objptr, ir_entity *ent)
94 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
97 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
98 symconst_symbol value, symconst_kind symkind)
100 ir_node *block = get_irg_start_block(irg);
101 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
102 res->attr.symc.kind = symkind;
103 res->attr.symc.sym = value;
105 irn_verify_irg(res, irg);
106 res = optimize_node(res);
110 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
113 sym.entity_p = symbol;
114 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
117 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
120 sym.entity_p = symbol;
121 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
124 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
128 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
131 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
135 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
138 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
140 return new_rd_Const_long(NULL, irg, mode, value);
142 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
143 symconst_kind symkind)
145 return new_rd_SymConst(NULL, irg, mode, value, symkind);
147 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
150 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
152 ir_node *new_r_ASM(ir_node *block, ir_node *mem,
153 int arity, ir_node *in[], ir_asm_constraint *inputs,
154 size_t n_outs, ir_asm_constraint *outputs,
155 size_t n_clobber, ident *clobber[], ident *text)
157 return new_rd_ASM(NULL, block, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
160 /** Creates a Phi node with 0 predecessors. */
161 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
162 ir_mode *mode, int pos)
164 ir_graph *irg = get_irn_irg(block);
165 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
166 res->attr.phi.u.pos = pos;
167 irn_verify_irg(res, irg);
171 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
173 static void try_remove_unnecessary_phi(ir_node *phi)
175 ir_node *phi_value = NULL;
176 int arity = get_irn_arity(phi);
179 /* see if all inputs are either pointing to a single value or
180 * are self references */
181 for (i = 0; i < arity; ++i) {
182 ir_node *in = get_irn_n(phi, i);
187 /** found a different value from the one we already found, can't remove
189 if (phi_value != NULL)
193 if (phi_value == NULL)
196 /* if we're here then all phi inputs have been either phi_value
197 * or self-references, we can replace the phi by phi_value.
198 * We do this with an Id-node */
199 exchange(phi, phi_value);
201 /* recursively check phi_value, because it could be that we were the last
202 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
203 * the loop header which can be eliminated now */
204 if (is_Phi(phi_value)) {
205 try_remove_unnecessary_phi(phi_value);
210 * Computes the predecessors for the real phi node, and then
211 * allocates and returns this node. The routine called to allocate the
212 * node might optimize it away and return a real value.
213 * This function must be called with an in-array of proper size.
215 static ir_node *set_phi_arguments(ir_node *phi, int pos)
217 ir_node *block = get_nodes_block(phi);
218 ir_graph *irg = get_irn_irg(block);
219 int arity = get_irn_arity(block);
220 ir_node **in = ALLOCAN(ir_node*, arity);
221 ir_mode *mode = get_irn_mode(phi);
224 /* This loop goes to all predecessor blocks of the block the Phi node
225 is in and there finds the operands of the Phi node by calling
226 get_r_value_internal. */
227 for (i = 0; i < arity; ++i) {
228 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
230 if (is_Bad(cfgpred)) {
231 value = new_r_Bad(irg, mode);
233 inc_irg_visited(irg);
235 value = get_r_value_internal(cfgpred, pos, mode);
240 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
241 set_irn_in(phi, arity, in);
243 irn_verify_irg(phi, irg);
245 /* Memory Phis in endless loops must be kept alive.
246 As we can't distinguish these easily we keep all of them alive. */
248 add_End_keepalive(get_irg_end(irg), phi);
250 try_remove_unnecessary_phi(phi);
255 * This function returns the last definition of a value. In case
256 * this value was last defined in a previous block, Phi nodes are
257 * inserted. If the part of the firm graph containing the definition
258 * is not yet constructed, a dummy Phi node is returned.
260 * @param block the current block
261 * @param pos the value number of the value searched
262 * @param mode the mode of this value (needed for Phi construction)
264 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
266 ir_node *res = block->attr.block.graph_arr[pos];
267 ir_graph *irg = get_irn_irg(block);
271 /* We ran into a cycle. This may happen in unreachable loops. */
272 if (irn_visited_else_mark(block)) {
273 /* Since the loop is unreachable, return a Bad. */
274 return new_r_Bad(irg, mode);
277 /* in a matured block we can immediately determine the phi arguments */
278 if (get_Block_matured(block)) {
279 int arity = get_irn_arity(block);
280 /* no predecessors: use unknown value */
282 if (block == get_irg_start_block(irg)) {
283 if (default_initialize_local_variable != NULL) {
284 ir_node *rem = get_r_cur_block(irg);
285 set_r_cur_block(irg, block);
286 res = default_initialize_local_variable(irg, mode, pos - 1);
287 set_r_cur_block(irg, rem);
289 res = new_r_Unknown(irg, mode);
292 /* unreachable block, use Bad */
293 res = new_r_Bad(irg, mode);
295 /* one predecessor just use its value */
296 } else if (arity == 1) {
297 ir_node *cfgpred = get_Block_cfgpred(block, 0);
298 if (is_Bad(cfgpred)) {
299 res = new_r_Bad(irg, mode);
301 ir_node *cfgpred_block = get_nodes_block(cfgpred);
302 res = get_r_value_internal(cfgpred_block, pos, mode);
304 /* multiple predecessors construct Phi */
306 res = new_rd_Phi0(NULL, block, mode, pos);
307 /* enter phi0 into our variable value table to break cycles
308 * arising from set_phi_arguments */
309 block->attr.block.graph_arr[pos] = res;
310 res = set_phi_arguments(res, pos);
313 /* in case of immature block we have to keep a Phi0 */
314 res = new_rd_Phi0(NULL, block, mode, pos);
315 /* enqueue phi so we can set arguments once the block matures */
316 res->attr.phi.next = block->attr.block.phis;
317 block->attr.block.phis = res;
319 block->attr.block.graph_arr[pos] = res;
323 void mature_immBlock(ir_node *block)
331 assert(is_Block(block));
332 if (get_Block_matured(block))
335 irg = get_irn_irg(block);
336 n_preds = ARR_LEN(block->in) - 1;
337 /* Fix block parameters */
338 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
340 /* Traverse a chain of Phi nodes attached to this block and mature
342 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
344 int pos = phi->attr.phi.u.pos;
346 next = phi->attr.phi.next;
347 new_value = set_phi_arguments(phi, pos);
348 if (block->attr.block.graph_arr[pos] == phi) {
349 block->attr.block.graph_arr[pos] = new_value;
353 set_Block_matured(block, 1);
355 /* create final in-array for the block */
356 if (block->attr.block.dynamic_ins) {
357 new_in = NEW_ARR_D(ir_node*, irg->obst, n_preds+1);
358 memcpy(new_in, block->in, (n_preds+1) * sizeof(new_in[0]));
359 DEL_ARR_F(block->in);
361 block->attr.block.dynamic_ins = false;
364 /* Now, as the block is a finished Firm node, we can optimize it.
365 Since other nodes have been allocated since the block was created
366 we can not free the node on the obstack. Therefore we have to call
368 Unfortunately the optimization does not change a lot, as all allocated
369 nodes refer to the unoptimized node.
370 We can call optimize_in_place_2(), as global cse has no effect on blocks.
372 irn_verify_irg(block, irg);
373 block = optimize_in_place_2(block);
376 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
378 assert(get_irg_phase_state(current_ir_graph) == phase_building);
379 return new_rd_Const_long(db, current_ir_graph, mode, value);
382 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
385 assert(get_irg_phase_state(current_ir_graph) == phase_building);
386 return new_rd_Sel(db, current_ir_graph->current_block,
387 store, objptr, 0, NULL, ent);
390 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
393 assert(get_irg_phase_state(current_ir_graph) == phase_building);
394 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
397 ir_node *new_d_ASM(dbg_info *db, ir_node *mem, int arity, ir_node *in[],
398 ir_asm_constraint *inputs,
399 size_t n_outs, ir_asm_constraint *outputs,
400 size_t n_clobber, ident *clobber[], ident *text)
402 assert(get_irg_phase_state(current_ir_graph) == phase_building);
403 return new_rd_ASM(db, current_ir_graph->current_block, mem, arity, in,
404 inputs, n_outs, outputs, n_clobber, clobber, text);
407 ir_node *new_rd_strictConv(dbg_info *dbgi, ir_node *block, ir_node * irn_op, ir_mode * mode)
410 ir_graph *irg = get_Block_irg(block);
415 res = new_ir_node(dbgi, irg, block, op_Conv, mode, 1, in);
416 res->attr.conv.strict = 1;
417 irn_verify_irg(res, irg);
418 res = optimize_node(res);
422 ir_node *new_r_strictConv(ir_node *block, ir_node * irn_op, ir_mode * mode)
424 return new_rd_strictConv(NULL, block, irn_op, mode);
427 ir_node *new_d_strictConv(dbg_info *dbgi, ir_node * irn_op, ir_mode * mode)
430 assert(get_irg_phase_state(current_ir_graph) == phase_building);
431 res = new_rd_strictConv(dbgi, current_ir_graph->current_block, irn_op, mode);
435 ir_node *new_strictConv(ir_node * irn_op, ir_mode * mode)
437 return new_d_strictConv(NULL, irn_op, mode);
440 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
443 ir_graph *irg = get_Block_irg(block);
450 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
451 res->attr.div.resmode = resmode;
452 res->attr.div.no_remainder = 1;
453 res->attr.div.exc.pin_state = pin_state;
454 irn_verify_irg(res, irg);
455 res = optimize_node(res);
459 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
461 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
464 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
467 assert(get_irg_phase_state(current_ir_graph) == phase_building);
468 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
472 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
474 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
477 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
481 assert(get_irg_phase_state(irg) == phase_building);
482 /* creates a new dynamic in-array as length of in is -1 */
483 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
485 set_Block_matured(res, 0);
486 res->attr.block.dynamic_ins = true;
487 res->attr.block.irg.irg = irg;
488 res->attr.block.backedge = NULL;
489 res->attr.block.entity = NULL;
491 set_Block_block_visited(res, 0);
493 /* Create and initialize array for Phi-node construction. */
494 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
495 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
497 /* Immature block may not be optimized! */
498 irn_verify_irg(res, irg);
503 ir_node *new_r_immBlock(ir_graph *irg)
505 return new_rd_immBlock(NULL, irg);
508 ir_node *new_d_immBlock(dbg_info *dbgi)
510 return new_rd_immBlock(dbgi, current_ir_graph);
513 ir_node *new_immBlock(void)
515 return new_rd_immBlock(NULL, current_ir_graph);
518 void add_immBlock_pred(ir_node *block, ir_node *jmp)
520 int n = ARR_LEN(block->in) - 1;
522 assert(is_Block(block) && "Error: Must be a Block");
523 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
524 assert(is_ir_node(jmp));
526 ARR_APP1(ir_node *, block->in, jmp);
528 hook_set_irn_n(block, n, jmp, NULL);
531 void set_cur_block(ir_node *target)
533 set_r_cur_block(current_ir_graph, target);
536 void set_r_cur_block(ir_graph *irg, ir_node *target)
538 assert(get_irg_phase_state(irg) == phase_building);
539 assert(target == NULL || is_Block(target));
540 assert(target == NULL || get_irn_irg(target) == irg);
541 irg->current_block = target;
544 ir_node *get_r_cur_block(ir_graph *irg)
546 assert(get_irg_phase_state(irg) == phase_building);
547 return irg->current_block;
550 ir_node *get_cur_block(void)
552 return get_r_cur_block(current_ir_graph);
555 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
557 assert(get_irg_phase_state(irg) == phase_building);
559 inc_irg_visited(irg);
561 return get_r_value_internal(irg->current_block, pos + 1, mode);
564 ir_node *get_value(int pos, ir_mode *mode)
566 return get_r_value(current_ir_graph, pos, mode);
570 * helper function for guess_mode: recursively look for a definition for
571 * local variable @p pos, returns its mode if found.
573 static ir_mode *guess_recursively(ir_node *block, int pos)
579 if (irn_visited_else_mark(block))
582 /* already have a defintion -> we can simply look at its mode */
583 value = block->attr.block.graph_arr[pos];
585 return get_irn_mode(value);
587 /* now we try to guess, by looking at the predecessor blocks */
588 n_preds = get_irn_arity(block);
589 for (i = 0; i < n_preds; ++i) {
590 ir_node *pred_block = get_Block_cfgpred_block(block, i);
591 ir_mode *mode = guess_recursively(pred_block, pos);
596 /* no way to guess */
600 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
602 ir_node *block = irg->current_block;
603 ir_node *value = block->attr.block.graph_arr[pos+1];
606 /* already have a defintion -> we can simply look at its mode */
608 return get_irn_mode(value);
610 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
611 inc_irg_visited(irg);
612 mode = guess_recursively(block, pos+1);
613 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
618 ir_mode *ir_guess_mode(int pos)
620 return ir_r_guess_mode(current_ir_graph, pos);
623 void set_r_value(ir_graph *irg, int pos, ir_node *value)
625 assert(get_irg_phase_state(irg) == phase_building);
627 assert(pos+1 < irg->n_loc);
628 assert(is_ir_node(value));
629 irg->current_block->attr.block.graph_arr[pos + 1] = value;
632 void set_value(int pos, ir_node *value)
634 set_r_value(current_ir_graph, pos, value);
637 ir_node *get_r_store(ir_graph *irg)
639 assert(get_irg_phase_state(irg) == phase_building);
640 inc_irg_visited(irg);
641 return get_r_value_internal(irg->current_block, 0, mode_M);
644 ir_node *get_store(void)
646 return get_r_store(current_ir_graph);
649 void set_r_store(ir_graph *irg, ir_node *store)
651 ir_node *load, *pload, *pred, *in[2];
653 assert(get_irg_phase_state(irg) == phase_building);
654 /* Beware: due to dead code elimination, a store might become a Bad node even in
655 the construction phase. */
656 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
658 if (get_opt_auto_create_sync()) {
659 /* handle non-volatile Load nodes by automatically creating Sync's */
660 load = skip_Proj(store);
661 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
662 pred = get_Load_mem(load);
665 /* a Load after a Sync: move it up */
666 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
668 set_Load_mem(load, get_memop_mem(mem));
669 add_Sync_pred(pred, store);
672 pload = skip_Proj(pred);
673 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
674 /* a Load after a Load: create a new Sync */
675 set_Load_mem(load, get_Load_mem(pload));
679 store = new_r_Sync(irg->current_block, 2, in);
684 irg->current_block->attr.block.graph_arr[0] = store;
687 void set_store(ir_node *store)
689 set_r_store(current_ir_graph, store);
692 void keep_alive(ir_node *ka)
694 ir_graph *irg = get_irn_irg(ka);
695 add_End_keepalive(get_irg_end(irg), ka);
698 void ir_set_uninitialized_local_variable_func(
699 uninitialized_local_variable_func_t *func)
701 default_initialize_local_variable = func;
704 void irg_finalize_cons(ir_graph *irg)
706 ir_node *end_block = get_irg_end_block(irg);
707 mature_immBlock(end_block);
709 set_irg_phase_state(irg, phase_high);
712 void irp_finalize_cons(void)
715 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
716 irg_finalize_cons(get_irp_irg(i));
718 irp->phase_state = phase_high;
721 ir_node *new_Const_long(ir_mode *mode, long value)
723 return new_d_Const_long(NULL, mode, value);
726 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
728 return new_d_SymConst(NULL, mode, value, kind);
730 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
732 return new_d_simpleSel(NULL, store, objptr, ent);
734 ir_node *new_ASM(ir_node *mem, int arity, ir_node *in[],
735 ir_asm_constraint *inputs, size_t n_outs,
736 ir_asm_constraint *outputs, size_t n_clobber,
737 ident *clobber[], ident *text)
739 return new_d_ASM(NULL, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
742 ir_node *new_r_Anchor(ir_graph *irg)
744 ir_node *in[anchor_last+1];
747 memset(in, 0, sizeof(in));
748 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last+1, in);
749 res->attr.anchor.irg.irg = irg;
751 /* hack to get get_irn_irg working: set block to ourself and allow
752 * get_Block_irg for anchor */
755 /* we can't have NULL inputs so reference ourselfes for now */
756 for (i = 0; i <= (size_t)anchor_last; ++i) {
757 set_irn_n(res, i, res);
763 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
765 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
766 res->attr.block.irg.irg = irg;
767 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
768 set_Block_matured(res, 1);
769 /* Create and initialize array for Phi-node construction. */
770 if (get_irg_phase_state(irg) == phase_building) {
771 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
772 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
774 irn_verify_irg(res, irg);