2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
60 ir_asm_constraint *inputs, size_t n_outs,
61 ir_asm_constraint *outputs, size_t n_clobber,
62 ident *clobber[], ident *text)
64 ir_graph *irg = get_irn_irg(block);
65 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
67 res->attr.assem.pin_state = op_pin_state_pinned;
68 res->attr.assem.input_constraints
69 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
70 res->attr.assem.output_constraints
71 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
72 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
73 res->attr.assem.text = text;
75 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
76 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
77 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
79 irn_verify_irg(res, irg);
80 res = optimize_node(res);
84 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
85 ir_node *objptr, ir_entity *ent)
87 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
90 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
91 symconst_symbol value, symconst_kind symkind)
93 ir_node *block = get_irg_start_block(irg);
94 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
95 res->attr.symc.kind = symkind;
96 res->attr.symc.sym = value;
98 irn_verify_irg(res, irg);
99 res = optimize_node(res);
103 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
106 sym.entity_p = symbol;
107 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
110 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
113 sym.entity_p = symbol;
114 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
117 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
121 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
124 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
128 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
131 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
133 return new_rd_Const_long(NULL, irg, mode, value);
135 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
136 symconst_kind symkind)
138 return new_rd_SymConst(NULL, irg, mode, value, symkind);
140 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
143 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
145 ir_node *new_r_ASM(ir_node *block,
146 int arity, ir_node *in[], ir_asm_constraint *inputs,
147 size_t n_outs, ir_asm_constraint *outputs,
148 size_t n_clobber, ident *clobber[], ident *text)
150 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
153 /** Creates a Phi node with 0 predecessors. */
154 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
155 ir_mode *mode, int pos)
157 ir_graph *irg = get_irn_irg(block);
158 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
159 res->attr.phi.u.pos = pos;
160 irn_verify_irg(res, irg);
164 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
166 static void try_remove_unnecessary_phi(ir_node *phi)
168 ir_node *phi_value = NULL;
169 int arity = get_irn_arity(phi);
172 /* see if all inputs are either pointing to a single value or
173 * are self references */
174 for (i = 0; i < arity; ++i) {
175 ir_node *in = get_irn_n(phi, i);
180 /** found a different value from the one we already found, can't remove
182 if (phi_value != NULL)
186 if (phi_value == NULL)
189 /* if we're here then all phi inputs have been either phi_value
190 * or self-references, we can replace the phi by phi_value.
191 * We do this with an Id-node */
192 exchange(phi, phi_value);
194 /* recursively check phi_value, because it could be that we were the last
195 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
196 * the loop header which can be eliminated now */
197 if (is_Phi(phi_value)) {
198 try_remove_unnecessary_phi(phi_value);
203 * Computes the predecessors for the real phi node, and then
204 * allocates and returns this node. The routine called to allocate the
205 * node might optimize it away and return a real value.
206 * This function must be called with an in-array of proper size.
208 static ir_node *set_phi_arguments(ir_node *phi, int pos)
210 ir_node *block = get_nodes_block(phi);
211 ir_graph *irg = get_irn_irg(block);
212 int arity = get_irn_arity(block);
213 ir_node **in = ALLOCAN(ir_node*, arity);
214 ir_mode *mode = get_irn_mode(phi);
217 /* This loop goes to all predecessor blocks of the block the Phi node
218 is in and there finds the operands of the Phi node by calling
219 get_r_value_internal. */
220 for (i = 0; i < arity; ++i) {
221 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
223 if (is_Bad(cfgpred)) {
224 value = new_r_Bad(irg, mode);
226 inc_irg_visited(irg);
228 value = get_r_value_internal(cfgpred, pos, mode);
233 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
234 set_irn_in(phi, arity, in);
236 irn_verify_irg(phi, irg);
238 /* Memory Phis in endless loops must be kept alive.
239 As we can't distinguish these easily we keep all of them alive. */
241 add_End_keepalive(get_irg_end(irg), phi);
243 try_remove_unnecessary_phi(phi);
248 * This function returns the last definition of a value. In case
249 * this value was last defined in a previous block, Phi nodes are
250 * inserted. If the part of the firm graph containing the definition
251 * is not yet constructed, a dummy Phi node is returned.
253 * @param block the current block
254 * @param pos the value number of the value searched
255 * @param mode the mode of this value (needed for Phi construction)
257 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
259 ir_node *res = block->attr.block.graph_arr[pos];
260 ir_graph *irg = get_irn_irg(block);
264 /* We ran into a cycle. This may happen in unreachable loops. */
265 if (irn_visited_else_mark(block)) {
266 /* Since the loop is unreachable, return a Bad. */
267 return new_r_Bad(irg, mode);
270 /* in a matured block we can immediately determine the phi arguments */
271 if (get_Block_matured(block)) {
272 int arity = get_irn_arity(block);
273 /* no predecessors: use unknown value */
274 if (arity == 0 && block == get_irg_start_block(get_irn_irg(block))) {
275 if (default_initialize_local_variable != NULL) {
276 ir_node *rem = get_r_cur_block(irg);
277 set_r_cur_block(irg, block);
278 res = default_initialize_local_variable(irg, mode, pos - 1);
279 set_r_cur_block(irg, rem);
281 res = new_r_Unknown(irg, mode);
283 /* one predecessor just use its value */
284 } else if (arity == 1) {
285 ir_node *cfgpred = get_Block_cfgpred(block, 0);
286 if (is_Bad(cfgpred)) {
287 res = new_r_Bad(irg, mode);
289 ir_node *cfgpred_block = get_nodes_block(cfgpred);
290 res = get_r_value_internal(cfgpred_block, pos, mode);
292 /* multiple predecessors construct Phi */
294 res = new_rd_Phi0(NULL, block, mode, pos);
295 /* enter phi0 into our variable value table to break cycles
296 * arising from set_phi_arguments */
297 block->attr.block.graph_arr[pos] = res;
298 res = set_phi_arguments(res, pos);
301 /* in case of immature block we have to keep a Phi0 */
302 res = new_rd_Phi0(NULL, block, mode, pos);
303 /* enqueue phi so we can set arguments once the block matures */
304 res->attr.phi.next = block->attr.block.phis;
305 block->attr.block.phis = res;
307 block->attr.block.graph_arr[pos] = res;
311 void mature_immBlock(ir_node *block)
319 assert(is_Block(block));
320 if (get_Block_matured(block))
323 irg = get_irn_irg(block);
324 n_preds = ARR_LEN(block->in) - 1;
325 /* Fix block parameters */
326 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
328 /* Traverse a chain of Phi nodes attached to this block and mature
330 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
332 int pos = phi->attr.phi.u.pos;
334 next = phi->attr.phi.next;
335 new_value = set_phi_arguments(phi, pos);
336 if (block->attr.block.graph_arr[pos] == phi) {
337 block->attr.block.graph_arr[pos] = new_value;
341 set_Block_matured(block, 1);
343 /* create final in-array for the block */
344 if (block->attr.block.dynamic_ins) {
345 new_in = NEW_ARR_D(ir_node*, irg->obst, n_preds+1);
346 memcpy(new_in, block->in, (n_preds+1) * sizeof(new_in[0]));
347 DEL_ARR_F(block->in);
349 block->attr.block.dynamic_ins = false;
352 /* Now, as the block is a finished Firm node, we can optimize it.
353 Since other nodes have been allocated since the block was created
354 we can not free the node on the obstack. Therefore we have to call
356 Unfortunately the optimization does not change a lot, as all allocated
357 nodes refer to the unoptimized node.
358 We can call optimize_in_place_2(), as global cse has no effect on blocks.
360 irn_verify_irg(block, irg);
361 block = optimize_in_place_2(block);
364 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
366 assert(get_irg_phase_state(current_ir_graph) == phase_building);
367 return new_rd_Const_long(db, current_ir_graph, mode, value);
370 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
373 assert(get_irg_phase_state(current_ir_graph) == phase_building);
374 return new_rd_Sel(db, current_ir_graph->current_block,
375 store, objptr, 0, NULL, ent);
378 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
381 assert(get_irg_phase_state(current_ir_graph) == phase_building);
382 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
385 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
386 ir_asm_constraint *inputs,
387 size_t n_outs, ir_asm_constraint *outputs,
388 size_t n_clobber, ident *clobber[], ident *text)
390 assert(get_irg_phase_state(current_ir_graph) == phase_building);
391 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
392 n_outs, outputs, n_clobber, clobber, text);
395 ir_node *new_rd_strictConv(dbg_info *dbgi, ir_node *block, ir_node * irn_op, ir_mode * mode)
398 ir_graph *irg = get_Block_irg(block);
403 res = new_ir_node(dbgi, irg, block, op_Conv, mode, 1, in);
404 res->attr.conv.strict = 1;
405 irn_verify_irg(res, irg);
406 res = optimize_node(res);
410 ir_node *new_r_strictConv(ir_node *block, ir_node * irn_op, ir_mode * mode)
412 return new_rd_strictConv(NULL, block, irn_op, mode);
415 ir_node *new_d_strictConv(dbg_info *dbgi, ir_node * irn_op, ir_mode * mode)
418 assert(get_irg_phase_state(current_ir_graph) == phase_building);
419 res = new_rd_strictConv(dbgi, current_ir_graph->current_block, irn_op, mode);
423 ir_node *new_strictConv(ir_node * irn_op, ir_mode * mode)
425 return new_d_strictConv(NULL, irn_op, mode);
428 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
431 ir_graph *irg = get_Block_irg(block);
438 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
439 res->attr.div.resmode = resmode;
440 res->attr.div.no_remainder = 1;
441 res->attr.div.exc.pin_state = pin_state;
442 irn_verify_irg(res, irg);
443 res = optimize_node(res);
447 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
449 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
452 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
455 assert(get_irg_phase_state(current_ir_graph) == phase_building);
456 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
460 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
462 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
465 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
469 assert(get_irg_phase_state(irg) == phase_building);
470 /* creates a new dynamic in-array as length of in is -1 */
471 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
473 set_Block_matured(res, 0);
474 res->attr.block.dynamic_ins = true;
475 res->attr.block.irg.irg = irg;
476 res->attr.block.backedge = NULL;
477 res->attr.block.extblk = NULL;
478 res->attr.block.entity = NULL;
480 set_Block_block_visited(res, 0);
482 /* Create and initialize array for Phi-node construction. */
483 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
484 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
486 /* Immature block may not be optimized! */
487 irn_verify_irg(res, irg);
492 ir_node *new_r_immBlock(ir_graph *irg)
494 return new_rd_immBlock(NULL, irg);
497 ir_node *new_d_immBlock(dbg_info *dbgi)
499 return new_rd_immBlock(dbgi, current_ir_graph);
502 ir_node *new_immBlock(void)
504 return new_rd_immBlock(NULL, current_ir_graph);
507 void add_immBlock_pred(ir_node *block, ir_node *jmp)
509 int n = ARR_LEN(block->in) - 1;
511 assert(is_Block(block) && "Error: Must be a Block");
512 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
513 assert(is_ir_node(jmp));
515 ARR_APP1(ir_node *, block->in, jmp);
517 hook_set_irn_n(block, n, jmp, NULL);
520 void set_cur_block(ir_node *target)
522 set_r_cur_block(current_ir_graph, target);
525 void set_r_cur_block(ir_graph *irg, ir_node *target)
527 assert(get_irg_phase_state(irg) == phase_building);
528 assert(target == NULL || is_Block(target));
529 assert(target == NULL || get_irn_irg(target) == irg);
530 irg->current_block = target;
533 ir_node *get_r_cur_block(ir_graph *irg)
535 assert(get_irg_phase_state(irg) == phase_building);
536 return irg->current_block;
539 ir_node *get_cur_block(void)
541 return get_r_cur_block(current_ir_graph);
544 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
546 assert(get_irg_phase_state(irg) == phase_building);
548 inc_irg_visited(irg);
550 return get_r_value_internal(irg->current_block, pos + 1, mode);
553 ir_node *get_value(int pos, ir_mode *mode)
555 return get_r_value(current_ir_graph, pos, mode);
559 * helper function for guess_mode: recursively look for a definition for
560 * local variable @p pos, returns its mode if found.
562 static ir_mode *guess_recursively(ir_node *block, int pos)
568 if (irn_visited_else_mark(block))
571 /* already have a defintion -> we can simply look at its mode */
572 value = block->attr.block.graph_arr[pos];
574 return get_irn_mode(value);
576 /* now we try to guess, by looking at the predecessor blocks */
577 n_preds = get_irn_arity(block);
578 for (i = 0; i < n_preds; ++i) {
579 ir_node *pred_block = get_Block_cfgpred_block(block, i);
580 ir_mode *mode = guess_recursively(pred_block, pos);
585 /* no way to guess */
589 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
591 ir_node *block = irg->current_block;
592 ir_node *value = block->attr.block.graph_arr[pos+1];
595 /* already have a defintion -> we can simply look at its mode */
597 return get_irn_mode(value);
599 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
600 inc_irg_visited(irg);
601 mode = guess_recursively(block, pos+1);
602 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
607 ir_mode *ir_guess_mode(int pos)
609 return ir_r_guess_mode(current_ir_graph, pos);
612 void set_r_value(ir_graph *irg, int pos, ir_node *value)
614 assert(get_irg_phase_state(irg) == phase_building);
616 assert(pos+1 < irg->n_loc);
617 assert(is_ir_node(value));
618 irg->current_block->attr.block.graph_arr[pos + 1] = value;
621 void set_value(int pos, ir_node *value)
623 set_r_value(current_ir_graph, pos, value);
626 ir_node *get_r_store(ir_graph *irg)
628 assert(get_irg_phase_state(irg) == phase_building);
629 inc_irg_visited(irg);
630 return get_r_value_internal(irg->current_block, 0, mode_M);
633 ir_node *get_store(void)
635 return get_r_store(current_ir_graph);
638 void set_r_store(ir_graph *irg, ir_node *store)
640 ir_node *load, *pload, *pred, *in[2];
642 assert(get_irg_phase_state(irg) == phase_building);
643 /* Beware: due to dead code elimination, a store might become a Bad node even in
644 the construction phase. */
645 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
647 if (get_opt_auto_create_sync()) {
648 /* handle non-volatile Load nodes by automatically creating Sync's */
649 load = skip_Proj(store);
650 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
651 pred = get_Load_mem(load);
654 /* a Load after a Sync: move it up */
655 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
657 set_Load_mem(load, get_memop_mem(mem));
658 add_Sync_pred(pred, store);
661 pload = skip_Proj(pred);
662 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
663 /* a Load after a Load: create a new Sync */
664 set_Load_mem(load, get_Load_mem(pload));
668 store = new_r_Sync(irg->current_block, 2, in);
673 irg->current_block->attr.block.graph_arr[0] = store;
676 void set_store(ir_node *store)
678 set_r_store(current_ir_graph, store);
681 void keep_alive(ir_node *ka)
683 ir_graph *irg = get_irn_irg(ka);
684 add_End_keepalive(get_irg_end(irg), ka);
687 void ir_set_uninitialized_local_variable_func(
688 uninitialized_local_variable_func_t *func)
690 default_initialize_local_variable = func;
693 void irg_finalize_cons(ir_graph *irg)
695 ir_node *end_block = get_irg_end_block(irg);
696 mature_immBlock(end_block);
698 set_irg_phase_state(irg, phase_high);
701 void irp_finalize_cons(void)
704 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
705 irg_finalize_cons(get_irp_irg(i));
707 irp->phase_state = phase_high;
710 ir_node *new_Const_long(ir_mode *mode, long value)
712 return new_d_Const_long(NULL, mode, value);
715 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
717 return new_d_SymConst(NULL, mode, value, kind);
719 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
721 return new_d_simpleSel(NULL, store, objptr, ent);
723 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
724 size_t n_outs, ir_asm_constraint *outputs,
725 size_t n_clobber, ident *clobber[], ident *text)
727 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
730 ir_node *new_r_Anchor(ir_graph *irg)
732 ir_node *in[anchor_last+1];
735 memset(in, 0, sizeof(in));
736 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last+1, in);
737 res->attr.anchor.irg.irg = irg;
739 /* hack to get get_irn_irg working: set block to ourself and allow
740 * get_Block_irg for anchor */
743 /* we can't have NULL inputs so reference ourselfes for now */
744 for (i = 0; i <= (size_t)anchor_last; ++i) {
745 set_irn_n(res, i, res);
751 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
753 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
754 res->attr.block.irg.irg = irg;
755 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
756 set_Block_matured(res, 1);
757 /* Create and initialize array for Phi-node construction. */
758 if (get_irg_phase_state(irg) == phase_building) {
759 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
760 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
762 irn_verify_irg(res, irg);