2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, int arity, ir_node *in[],
60 ir_asm_constraint *inputs, size_t n_outs,
61 ir_asm_constraint *outputs, size_t n_clobber,
62 ident *clobber[], ident *text)
64 ir_graph *irg = get_irn_irg(block);
65 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, arity, in);
67 res->attr.assem.pin_state = op_pin_state_pinned;
68 res->attr.assem.input_constraints
69 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
70 res->attr.assem.output_constraints
71 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
72 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
73 res->attr.assem.text = text;
75 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
76 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
77 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
79 irn_verify_irg(res, irg);
80 res = optimize_node(res);
84 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
85 ir_node *objptr, ir_entity *ent)
87 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
90 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
91 symconst_symbol value, symconst_kind symkind)
93 ir_node *block = get_irg_start_block(irg);
94 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
95 res->attr.symc.kind = symkind;
96 res->attr.symc.sym = value;
98 irn_verify_irg(res, irg);
99 res = optimize_node(res);
103 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
106 sym.entity_p = symbol;
107 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
110 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
113 sym.entity_p = symbol;
114 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
117 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
121 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
124 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
128 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
131 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
133 return new_rd_Const_long(NULL, irg, mode, value);
135 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
136 symconst_kind symkind)
138 return new_rd_SymConst(NULL, irg, mode, value, symkind);
140 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
143 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
145 ir_node *new_r_ASM(ir_node *block,
146 int arity, ir_node *in[], ir_asm_constraint *inputs,
147 size_t n_outs, ir_asm_constraint *outputs,
148 size_t n_clobber, ident *clobber[], ident *text)
150 return new_rd_ASM(NULL, block, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
153 /** Creates a Phi node with 0 predecessors. */
154 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
155 ir_mode *mode, int pos)
157 ir_graph *irg = get_irn_irg(block);
158 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
159 res->attr.phi.u.pos = pos;
160 irn_verify_irg(res, irg);
164 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
166 static void try_remove_unnecessary_phi(ir_node *phi)
168 ir_node *phi_value = NULL;
169 int arity = get_irn_arity(phi);
172 /* see if all inputs are either pointing to a single value or
173 * are self references */
174 for (i = 0; i < arity; ++i) {
175 ir_node *in = get_irn_n(phi, i);
180 /** found a different value from the one we already found, can't remove
182 if (phi_value != NULL)
186 if (phi_value == NULL)
189 /* if we're here then all phi inputs have been either phi_value
190 * or self-references, we can replace the phi by phi_value.
191 * We do this with an Id-node */
192 exchange(phi, phi_value);
194 /* recursively check phi_value, because it could be that we were the last
195 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
196 * the loop header which can be eliminated now */
197 if (is_Phi(phi_value)) {
198 try_remove_unnecessary_phi(phi_value);
203 * Computes the predecessors for the real phi node, and then
204 * allocates and returns this node. The routine called to allocate the
205 * node might optimize it away and return a real value.
206 * This function must be called with an in-array of proper size.
208 static ir_node *set_phi_arguments(ir_node *phi, int pos)
210 ir_node *block = get_nodes_block(phi);
211 ir_graph *irg = get_irn_irg(block);
212 int arity = get_irn_arity(block);
213 ir_node **in = ALLOCAN(ir_node*, arity);
214 ir_mode *mode = get_irn_mode(phi);
217 /* This loop goes to all predecessor blocks of the block the Phi node
218 is in and there finds the operands of the Phi node by calling
219 get_r_value_internal. */
220 for (i = 0; i < arity; ++i) {
221 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
223 if (is_Bad(cfgpred)) {
224 value = new_r_Bad(irg, mode);
226 inc_irg_visited(irg);
228 value = get_r_value_internal(cfgpred, pos, mode);
233 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
234 set_irn_in(phi, arity, in);
235 set_irn_op(phi, op_Phi);
237 irn_verify_irg(phi, irg);
239 /* Memory Phis in endless loops must be kept alive.
240 As we can't distinguish these easily we keep all of them alive. */
241 if (is_Phi(phi) && mode == mode_M)
242 add_End_keepalive(get_irg_end(irg), phi);
244 try_remove_unnecessary_phi(phi);
249 * This function returns the last definition of a value. In case
250 * this value was last defined in a previous block, Phi nodes are
251 * inserted. If the part of the firm graph containing the definition
252 * is not yet constructed, a dummy Phi node is returned.
254 * @param block the current block
255 * @param pos the value number of the value searched
256 * @param mode the mode of this value (needed for Phi construction)
258 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
260 ir_node *res = block->attr.block.graph_arr[pos];
261 ir_graph *irg = get_irn_irg(block);
265 /* We ran into a cycle. This may happen in unreachable loops. */
266 if (irn_visited_else_mark(block)) {
267 /* Since the loop is unreachable, return a Bad. */
268 return new_r_Bad(irg, mode);
271 /* in a matured block we can immediately determine the phi arguments */
272 if (get_Block_matured(block)) {
273 int arity = get_irn_arity(block);
274 /* no predecessors: use unknown value */
275 if (arity == 0 && block == get_irg_start_block(get_irn_irg(block))) {
276 if (default_initialize_local_variable != NULL) {
277 ir_node *rem = get_r_cur_block(irg);
278 set_r_cur_block(irg, block);
279 res = default_initialize_local_variable(irg, mode, pos - 1);
280 set_r_cur_block(irg, rem);
282 res = new_r_Unknown(irg, mode);
284 /* one predecessor just use its value */
285 } else if (arity == 1) {
286 ir_node *cfgpred = get_Block_cfgpred(block, 0);
287 if (is_Bad(cfgpred)) {
288 res = new_r_Bad(irg, mode);
290 ir_node *cfgpred_block = get_nodes_block(cfgpred);
291 res = get_r_value_internal(cfgpred_block, pos, mode);
293 /* multiple predecessors construct Phi */
295 res = new_rd_Phi0(NULL, block, mode, pos);
296 /* enter phi0 into our variable value table to break cycles
297 * arising from set_phi_arguments */
298 block->attr.block.graph_arr[pos] = res;
299 res = set_phi_arguments(res, pos);
302 /* in case of immature block we have to keep a Phi0 */
303 res = new_rd_Phi0(NULL, block, mode, pos);
304 /* enqueue phi so we can set arguments once the block matures */
305 res->attr.phi.next = block->attr.block.phis;
306 block->attr.block.phis = res;
308 block->attr.block.graph_arr[pos] = res;
312 void mature_immBlock(ir_node *block)
320 assert(is_Block(block));
321 if (get_Block_matured(block))
324 irg = get_irn_irg(block);
325 n_preds = ARR_LEN(block->in) - 1;
326 /* Fix block parameters */
327 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
329 /* Traverse a chain of Phi nodes attached to this block and mature
331 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
333 int pos = phi->attr.phi.u.pos;
335 next = phi->attr.phi.next;
336 new_value = set_phi_arguments(phi, pos);
337 if (block->attr.block.graph_arr[pos] == phi) {
338 block->attr.block.graph_arr[pos] = new_value;
342 set_Block_matured(block, 1);
344 /* create final in-array for the block */
345 if (block->attr.block.dynamic_ins) {
346 new_in = NEW_ARR_D(ir_node*, irg->obst, n_preds+1);
347 memcpy(new_in, block->in, (n_preds+1) * sizeof(new_in[0]));
348 DEL_ARR_F(block->in);
350 block->attr.block.dynamic_ins = false;
353 /* Now, as the block is a finished Firm node, we can optimize it.
354 Since other nodes have been allocated since the block was created
355 we can not free the node on the obstack. Therefore we have to call
357 Unfortunately the optimization does not change a lot, as all allocated
358 nodes refer to the unoptimized node.
359 We can call optimize_in_place_2(), as global cse has no effect on blocks.
361 irn_verify_irg(block, irg);
362 block = optimize_in_place_2(block);
365 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
367 assert(get_irg_phase_state(current_ir_graph) == phase_building);
368 return new_rd_Const_long(db, current_ir_graph, mode, value);
371 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
374 assert(get_irg_phase_state(current_ir_graph) == phase_building);
375 return new_rd_Sel(db, current_ir_graph->current_block,
376 store, objptr, 0, NULL, ent);
379 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
382 assert(get_irg_phase_state(current_ir_graph) == phase_building);
383 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
386 ir_node *new_d_ASM(dbg_info *db, int arity, ir_node *in[],
387 ir_asm_constraint *inputs,
388 size_t n_outs, ir_asm_constraint *outputs,
389 size_t n_clobber, ident *clobber[], ident *text)
391 assert(get_irg_phase_state(current_ir_graph) == phase_building);
392 return new_rd_ASM(db, current_ir_graph->current_block, arity, in, inputs,
393 n_outs, outputs, n_clobber, clobber, text);
396 ir_node *new_rd_strictConv(dbg_info *dbgi, ir_node *block, ir_node * irn_op, ir_mode * mode)
399 ir_graph *irg = get_Block_irg(block);
404 res = new_ir_node(dbgi, irg, block, op_Conv, mode, 1, in);
405 res->attr.conv.strict = 1;
406 irn_verify_irg(res, irg);
407 res = optimize_node(res);
411 ir_node *new_r_strictConv(ir_node *block, ir_node * irn_op, ir_mode * mode)
413 return new_rd_strictConv(NULL, block, irn_op, mode);
416 ir_node *new_d_strictConv(dbg_info *dbgi, ir_node * irn_op, ir_mode * mode)
419 assert(get_irg_phase_state(current_ir_graph) == phase_building);
420 res = new_rd_strictConv(dbgi, current_ir_graph->current_block, irn_op, mode);
424 ir_node *new_strictConv(ir_node * irn_op, ir_mode * mode)
426 return new_d_strictConv(NULL, irn_op, mode);
429 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
432 ir_graph *irg = get_Block_irg(block);
439 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
440 res->attr.div.resmode = resmode;
441 res->attr.div.no_remainder = 1;
442 res->attr.div.exc.pin_state = pin_state;
443 irn_verify_irg(res, irg);
444 res = optimize_node(res);
448 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
450 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
453 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
456 assert(get_irg_phase_state(current_ir_graph) == phase_building);
457 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
461 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
463 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
466 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
470 assert(get_irg_phase_state(irg) == phase_building);
471 /* creates a new dynamic in-array as length of in is -1 */
472 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
474 set_Block_matured(res, 0);
475 res->attr.block.dynamic_ins = true;
476 res->attr.block.irg.irg = irg;
477 res->attr.block.backedge = NULL;
478 res->attr.block.extblk = NULL;
479 res->attr.block.entity = NULL;
481 set_Block_block_visited(res, 0);
483 /* Create and initialize array for Phi-node construction. */
484 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
485 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
487 /* Immature block may not be optimized! */
488 irn_verify_irg(res, irg);
493 ir_node *new_r_immBlock(ir_graph *irg)
495 return new_rd_immBlock(NULL, irg);
498 ir_node *new_d_immBlock(dbg_info *dbgi)
500 return new_rd_immBlock(dbgi, current_ir_graph);
503 ir_node *new_immBlock(void)
505 return new_rd_immBlock(NULL, current_ir_graph);
508 void add_immBlock_pred(ir_node *block, ir_node *jmp)
510 int n = ARR_LEN(block->in) - 1;
512 assert(is_Block(block) && "Error: Must be a Block");
513 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
514 assert(is_ir_node(jmp));
516 ARR_APP1(ir_node *, block->in, jmp);
518 hook_set_irn_n(block, n, jmp, NULL);
521 void set_cur_block(ir_node *target)
523 set_r_cur_block(current_ir_graph, target);
526 void set_r_cur_block(ir_graph *irg, ir_node *target)
528 assert(get_irg_phase_state(irg) == phase_building);
529 assert(target == NULL || is_Block(target));
530 assert(target == NULL || get_irn_irg(target) == irg);
531 irg->current_block = target;
534 ir_node *get_r_cur_block(ir_graph *irg)
536 assert(get_irg_phase_state(irg) == phase_building);
537 return irg->current_block;
540 ir_node *get_cur_block(void)
542 return get_r_cur_block(current_ir_graph);
545 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
547 assert(get_irg_phase_state(irg) == phase_building);
549 inc_irg_visited(irg);
551 return get_r_value_internal(irg->current_block, pos + 1, mode);
554 ir_node *get_value(int pos, ir_mode *mode)
556 return get_r_value(current_ir_graph, pos, mode);
560 * helper function for guess_mode: recursively look for a definition for
561 * local variable @p pos, returns its mode if found.
563 static ir_mode *guess_recursively(ir_node *block, int pos)
569 if (irn_visited_else_mark(block))
572 /* already have a defintion -> we can simply look at its mode */
573 value = block->attr.block.graph_arr[pos];
575 return get_irn_mode(value);
577 /* now we try to guess, by looking at the predecessor blocks */
578 n_preds = get_irn_arity(block);
579 for (i = 0; i < n_preds; ++i) {
580 ir_node *pred_block = get_Block_cfgpred_block(block, i);
581 ir_mode *mode = guess_recursively(pred_block, pos);
586 /* no way to guess */
590 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
592 ir_node *block = irg->current_block;
593 ir_node *value = block->attr.block.graph_arr[pos+1];
596 /* already have a defintion -> we can simply look at its mode */
598 return get_irn_mode(value);
600 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
601 inc_irg_visited(irg);
602 mode = guess_recursively(block, pos+1);
603 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
608 ir_mode *ir_guess_mode(int pos)
610 return ir_r_guess_mode(current_ir_graph, pos);
613 void set_r_value(ir_graph *irg, int pos, ir_node *value)
615 assert(get_irg_phase_state(irg) == phase_building);
617 assert(pos+1 < irg->n_loc);
618 assert(is_ir_node(value));
619 irg->current_block->attr.block.graph_arr[pos + 1] = value;
622 void set_value(int pos, ir_node *value)
624 set_r_value(current_ir_graph, pos, value);
627 ir_node *get_r_store(ir_graph *irg)
629 assert(get_irg_phase_state(irg) == phase_building);
630 inc_irg_visited(irg);
631 return get_r_value_internal(irg->current_block, 0, mode_M);
634 ir_node *get_store(void)
636 return get_r_store(current_ir_graph);
639 void set_r_store(ir_graph *irg, ir_node *store)
641 ir_node *load, *pload, *pred, *in[2];
643 assert(get_irg_phase_state(irg) == phase_building);
644 /* Beware: due to dead code elimination, a store might become a Bad node even in
645 the construction phase. */
646 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
648 if (get_opt_auto_create_sync()) {
649 /* handle non-volatile Load nodes by automatically creating Sync's */
650 load = skip_Proj(store);
651 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
652 pred = get_Load_mem(load);
655 /* a Load after a Sync: move it up */
656 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
658 set_Load_mem(load, get_memop_mem(mem));
659 add_Sync_pred(pred, store);
662 pload = skip_Proj(pred);
663 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
664 /* a Load after a Load: create a new Sync */
665 set_Load_mem(load, get_Load_mem(pload));
669 store = new_r_Sync(irg->current_block, 2, in);
674 irg->current_block->attr.block.graph_arr[0] = store;
677 void set_store(ir_node *store)
679 set_r_store(current_ir_graph, store);
682 void keep_alive(ir_node *ka)
684 ir_graph *irg = get_irn_irg(ka);
685 add_End_keepalive(get_irg_end(irg), ka);
688 void ir_set_uninitialized_local_variable_func(
689 uninitialized_local_variable_func_t *func)
691 default_initialize_local_variable = func;
694 void irg_finalize_cons(ir_graph *irg)
696 ir_node *end_block = get_irg_end_block(irg);
697 mature_immBlock(end_block);
699 set_irg_phase_state(irg, phase_high);
702 void irp_finalize_cons(void)
705 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
706 irg_finalize_cons(get_irp_irg(i));
708 irp->phase_state = phase_high;
711 ir_node *new_Const_long(ir_mode *mode, long value)
713 return new_d_Const_long(NULL, mode, value);
716 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
718 return new_d_SymConst(NULL, mode, value, kind);
720 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
722 return new_d_simpleSel(NULL, store, objptr, ent);
724 ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
725 size_t n_outs, ir_asm_constraint *outputs,
726 size_t n_clobber, ident *clobber[], ident *text)
728 return new_d_ASM(NULL, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
731 ir_node *new_r_Anchor(ir_graph *irg)
733 ir_node *in[anchor_last+1];
736 memset(in, 0, sizeof(in));
737 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last+1, in);
738 res->attr.anchor.irg.irg = irg;
740 /* hack to get get_irn_irg working: set block to ourself and allow
741 * get_Block_irg for anchor */
744 /* we can't have NULL inputs so reference ourselfes for now */
745 for (i = 0; i <= (size_t)anchor_last; ++i) {
746 set_irn_n(res, i, res);
752 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
754 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
755 res->attr.block.irg.irg = irg;
756 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
757 set_Block_matured(res, 1);
758 /* Create and initialize array for Phi-node construction. */
759 if (get_irg_phase_state(irg) == phase_building) {
760 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
761 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
763 irn_verify_irg(res, irg);