2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, ir_node *mem,
60 int arity, ir_node *in[], ir_asm_constraint *inputs,
61 size_t n_outs, ir_asm_constraint *outputs, size_t n_clobber,
62 ident *clobber[], ident *text)
64 ir_graph *irg = get_irn_irg(block);
66 int r_arity = arity+1;
68 NEW_ARR_A(ir_node*, r_in, r_arity);
70 memcpy(&r_in[1], in, arity*sizeof(ir_node*));
72 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, r_arity, r_in);
74 res->attr.assem.pin_state = op_pin_state_pinned;
75 res->attr.assem.input_constraints
76 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
77 res->attr.assem.output_constraints
78 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
79 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
80 res->attr.assem.text = text;
82 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
83 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
84 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
86 irn_verify_irg(res, irg);
87 res = optimize_node(res);
91 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
92 ir_node *objptr, ir_entity *ent)
94 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
97 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
98 symconst_symbol value, symconst_kind symkind)
100 ir_node *block = get_irg_start_block(irg);
101 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
102 res->attr.symc.kind = symkind;
103 res->attr.symc.sym = value;
105 irn_verify_irg(res, irg);
106 res = optimize_node(res);
110 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
113 sym.entity_p = symbol;
114 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
117 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
120 sym.entity_p = symbol;
121 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
124 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
128 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
131 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
135 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
138 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
140 return new_rd_Const_long(NULL, irg, mode, value);
142 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
143 symconst_kind symkind)
145 return new_rd_SymConst(NULL, irg, mode, value, symkind);
147 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
150 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
152 ir_node *new_r_ASM(ir_node *block, ir_node *mem,
153 int arity, ir_node *in[], ir_asm_constraint *inputs,
154 size_t n_outs, ir_asm_constraint *outputs,
155 size_t n_clobber, ident *clobber[], ident *text)
157 return new_rd_ASM(NULL, block, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
160 /** Creates a Phi node with 0 predecessors. */
161 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
162 ir_mode *mode, int pos)
164 ir_graph *irg = get_irn_irg(block);
165 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
166 res->attr.phi.u.pos = pos;
167 irn_verify_irg(res, irg);
171 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
173 static void try_remove_unnecessary_phi(ir_node *phi)
175 ir_node *phi_value = NULL;
176 int arity = get_irn_arity(phi);
179 /* see if all inputs are either pointing to a single value or
180 * are self references */
181 for (i = 0; i < arity; ++i) {
182 ir_node *in = get_irn_n(phi, i);
187 /** found a different value from the one we already found, can't remove
189 if (phi_value != NULL)
193 if (phi_value == NULL)
196 /* if we're here then all phi inputs have been either phi_value
197 * or self-references, we can replace the phi by phi_value.
198 * We do this with an Id-node */
199 exchange(phi, phi_value);
201 /* recursively check phi_value, because it could be that we were the last
202 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
203 * the loop header which can be eliminated now */
204 if (is_Phi(phi_value)) {
205 try_remove_unnecessary_phi(phi_value);
210 * Computes the predecessors for the real phi node, and then
211 * allocates and returns this node. The routine called to allocate the
212 * node might optimize it away and return a real value.
213 * This function must be called with an in-array of proper size.
215 static ir_node *set_phi_arguments(ir_node *phi, int pos)
217 ir_node *block = get_nodes_block(phi);
218 ir_graph *irg = get_irn_irg(block);
219 int arity = get_irn_arity(block);
220 ir_node **in = ALLOCAN(ir_node*, arity);
221 ir_mode *mode = get_irn_mode(phi);
224 /* This loop goes to all predecessor blocks of the block the Phi node
225 is in and there finds the operands of the Phi node by calling
226 get_r_value_internal. */
227 for (i = 0; i < arity; ++i) {
228 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
230 if (is_Bad(cfgpred)) {
231 value = new_r_Bad(irg, mode);
233 value = get_r_value_internal(cfgpred, pos, mode);
238 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
239 set_irn_in(phi, arity, in);
241 irn_verify_irg(phi, irg);
243 /* Memory Phis in endless loops must be kept alive.
244 As we can't distinguish these easily we keep all of them alive. */
246 add_End_keepalive(get_irg_end(irg), phi);
248 try_remove_unnecessary_phi(phi);
253 * This function returns the last definition of a value. In case
254 * this value was last defined in a previous block, Phi nodes are
255 * inserted. If the part of the firm graph containing the definition
256 * is not yet constructed, a dummy Phi node is returned.
258 * @param block the current block
259 * @param pos the value number of the value searched
260 * @param mode the mode of this value (needed for Phi construction)
262 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
264 ir_node *res = block->attr.block.graph_arr[pos];
265 ir_graph *irg = get_irn_irg(block);
269 /* in a matured block we can immediately determine the phi arguments */
270 if (get_Block_matured(block)) {
271 int arity = get_irn_arity(block);
272 /* no predecessors: use unknown value */
274 if (block == get_irg_start_block(irg)) {
275 if (default_initialize_local_variable != NULL) {
276 ir_node *rem = get_r_cur_block(irg);
277 set_r_cur_block(irg, block);
278 res = default_initialize_local_variable(irg, mode, pos - 1);
279 set_r_cur_block(irg, rem);
281 res = new_r_Unknown(irg, mode);
284 /* unreachable block, use Bad */
285 res = new_r_Bad(irg, mode);
287 /* one predecessor just use its value */
288 } else if (arity == 1) {
289 ir_node *cfgpred = get_Block_cfgpred(block, 0);
290 if (is_Bad(cfgpred)) {
291 res = new_r_Bad(irg, mode);
293 ir_node *cfgpred_block = get_nodes_block(cfgpred);
294 res = get_r_value_internal(cfgpred_block, pos, mode);
296 /* multiple predecessors construct Phi */
298 res = new_rd_Phi0(NULL, block, mode, pos);
299 /* enter phi0 into our variable value table to break cycles
300 * arising from set_phi_arguments */
301 block->attr.block.graph_arr[pos] = res;
302 res = set_phi_arguments(res, pos);
305 /* in case of immature block we have to keep a Phi0 */
306 res = new_rd_Phi0(NULL, block, mode, pos);
307 /* enqueue phi so we can set arguments once the block matures */
308 res->attr.phi.next = block->attr.block.phis;
309 block->attr.block.phis = res;
311 block->attr.block.graph_arr[pos] = res;
315 void mature_immBlock(ir_node *block)
323 assert(is_Block(block));
324 if (get_Block_matured(block))
327 irg = get_irn_irg(block);
328 n_preds = ARR_LEN(block->in) - 1;
329 /* Fix block parameters */
330 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
332 /* Traverse a chain of Phi nodes attached to this block and mature
334 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
336 int pos = phi->attr.phi.u.pos;
338 next = phi->attr.phi.next;
339 new_value = set_phi_arguments(phi, pos);
340 if (block->attr.block.graph_arr[pos] == phi) {
341 block->attr.block.graph_arr[pos] = new_value;
345 set_Block_matured(block, 1);
347 /* create final in-array for the block */
348 if (block->attr.block.dynamic_ins) {
349 new_in = NEW_ARR_D(ir_node*, irg->obst, n_preds+1);
350 memcpy(new_in, block->in, (n_preds+1) * sizeof(new_in[0]));
351 DEL_ARR_F(block->in);
353 block->attr.block.dynamic_ins = false;
356 /* Now, as the block is a finished Firm node, we can optimize it.
357 Since other nodes have been allocated since the block was created
358 we can not free the node on the obstack. Therefore we have to call
360 Unfortunately the optimization does not change a lot, as all allocated
361 nodes refer to the unoptimized node.
362 We can call optimize_in_place_2(), as global cse has no effect on blocks.
364 irn_verify_irg(block, irg);
365 optimize_in_place_2(block);
368 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
370 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
371 return new_rd_Const_long(db, current_ir_graph, mode, value);
374 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
377 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
378 return new_rd_Sel(db, current_ir_graph->current_block,
379 store, objptr, 0, NULL, ent);
382 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
385 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
386 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
389 ir_node *new_d_ASM(dbg_info *db, ir_node *mem, int arity, ir_node *in[],
390 ir_asm_constraint *inputs,
391 size_t n_outs, ir_asm_constraint *outputs,
392 size_t n_clobber, ident *clobber[], ident *text)
394 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
395 return new_rd_ASM(db, current_ir_graph->current_block, mem, arity, in,
396 inputs, n_outs, outputs, n_clobber, clobber, text);
399 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
402 ir_graph *irg = get_Block_irg(block);
409 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
410 res->attr.div.resmode = resmode;
411 res->attr.div.no_remainder = 1;
412 res->attr.div.exc.pin_state = pin_state;
413 irn_verify_irg(res, irg);
414 res = optimize_node(res);
418 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
420 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
423 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
426 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
427 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
431 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
433 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
436 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
440 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
441 /* creates a new dynamic in-array as length of in is -1 */
442 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
444 set_Block_matured(res, 0);
445 res->attr.block.dynamic_ins = true;
446 res->attr.block.irg.irg = irg;
447 res->attr.block.backedge = NULL;
448 res->attr.block.entity = NULL;
450 set_Block_block_visited(res, 0);
452 /* Create and initialize array for Phi-node construction. */
453 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
454 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
456 /* Immature block may not be optimized! */
457 irn_verify_irg(res, irg);
462 ir_node *new_r_immBlock(ir_graph *irg)
464 return new_rd_immBlock(NULL, irg);
467 ir_node *new_d_immBlock(dbg_info *dbgi)
469 return new_rd_immBlock(dbgi, current_ir_graph);
472 ir_node *new_immBlock(void)
474 return new_rd_immBlock(NULL, current_ir_graph);
477 void add_immBlock_pred(ir_node *block, ir_node *jmp)
479 int n = ARR_LEN(block->in) - 1;
481 assert(is_Block(block) && "Error: Must be a Block");
482 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
483 assert(is_ir_node(jmp));
485 ARR_APP1(ir_node *, block->in, jmp);
487 hook_set_irn_n(block, n, jmp, NULL);
490 void set_cur_block(ir_node *target)
492 set_r_cur_block(current_ir_graph, target);
495 void set_r_cur_block(ir_graph *irg, ir_node *target)
497 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
498 assert(target == NULL || is_Block(target));
499 assert(target == NULL || get_irn_irg(target) == irg);
500 irg->current_block = target;
503 ir_node *get_r_cur_block(ir_graph *irg)
505 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
506 return irg->current_block;
509 ir_node *get_cur_block(void)
511 return get_r_cur_block(current_ir_graph);
514 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
516 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
519 return get_r_value_internal(irg->current_block, pos + 1, mode);
522 ir_node *get_value(int pos, ir_mode *mode)
524 return get_r_value(current_ir_graph, pos, mode);
528 * helper function for guess_mode: recursively look for a definition for
529 * local variable @p pos, returns its mode if found.
531 static ir_mode *guess_recursively(ir_node *block, int pos)
537 if (irn_visited_else_mark(block))
540 /* already have a defintion -> we can simply look at its mode */
541 value = block->attr.block.graph_arr[pos];
543 return get_irn_mode(value);
545 /* now we try to guess, by looking at the predecessor blocks */
546 n_preds = get_irn_arity(block);
547 for (i = 0; i < n_preds; ++i) {
548 ir_node *pred_block = get_Block_cfgpred_block(block, i);
549 ir_mode *mode = guess_recursively(pred_block, pos);
554 /* no way to guess */
558 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
560 ir_node *block = irg->current_block;
561 ir_node *value = block->attr.block.graph_arr[pos+1];
564 /* already have a defintion -> we can simply look at its mode */
566 return get_irn_mode(value);
568 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
569 inc_irg_visited(irg);
570 mode = guess_recursively(block, pos+1);
571 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
576 ir_mode *ir_guess_mode(int pos)
578 return ir_r_guess_mode(current_ir_graph, pos);
581 void set_r_value(ir_graph *irg, int pos, ir_node *value)
583 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
585 assert(pos+1 < irg->n_loc);
586 assert(is_ir_node(value));
587 irg->current_block->attr.block.graph_arr[pos + 1] = value;
590 void set_value(int pos, ir_node *value)
592 set_r_value(current_ir_graph, pos, value);
595 ir_node *get_r_store(ir_graph *irg)
597 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
598 return get_r_value_internal(irg->current_block, 0, mode_M);
601 ir_node *get_store(void)
603 return get_r_store(current_ir_graph);
606 void set_r_store(ir_graph *irg, ir_node *store)
608 ir_node *load, *pload, *pred, *in[2];
610 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
611 /* Beware: due to dead code elimination, a store might become a Bad node even in
612 the construction phase. */
613 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
615 if (get_opt_auto_create_sync()) {
616 /* handle non-volatile Load nodes by automatically creating Sync's */
617 load = skip_Proj(store);
618 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
619 pred = get_Load_mem(load);
622 /* a Load after a Sync: move it up */
623 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
625 set_Load_mem(load, get_memop_mem(mem));
626 add_Sync_pred(pred, store);
629 pload = skip_Proj(pred);
630 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
631 /* a Load after a Load: create a new Sync */
632 set_Load_mem(load, get_Load_mem(pload));
636 store = new_r_Sync(irg->current_block, 2, in);
641 irg->current_block->attr.block.graph_arr[0] = store;
644 void set_store(ir_node *store)
646 set_r_store(current_ir_graph, store);
649 void keep_alive(ir_node *ka)
651 ir_graph *irg = get_irn_irg(ka);
652 add_End_keepalive(get_irg_end(irg), ka);
655 void ir_set_uninitialized_local_variable_func(
656 uninitialized_local_variable_func_t *func)
658 default_initialize_local_variable = func;
661 void irg_finalize_cons(ir_graph *irg)
663 ir_node *end_block = get_irg_end_block(irg);
664 mature_immBlock(end_block);
666 clear_irg_constraints(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION);
669 void irp_finalize_cons(void)
672 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
673 irg_finalize_cons(get_irp_irg(i));
677 ir_node *new_Const_long(ir_mode *mode, long value)
679 return new_d_Const_long(NULL, mode, value);
682 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
684 return new_d_SymConst(NULL, mode, value, kind);
686 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
688 return new_d_simpleSel(NULL, store, objptr, ent);
690 ir_node *new_ASM(ir_node *mem, int arity, ir_node *in[],
691 ir_asm_constraint *inputs, size_t n_outs,
692 ir_asm_constraint *outputs, size_t n_clobber,
693 ident *clobber[], ident *text)
695 return new_d_ASM(NULL, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
698 ir_node *new_r_Anchor(ir_graph *irg)
700 ir_node *in[anchor_last+1];
703 memset(in, 0, sizeof(in));
704 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last+1, in);
705 res->attr.anchor.irg.irg = irg;
707 /* hack to get get_irn_irg working: set block to ourself and allow
708 * get_Block_irg for anchor */
711 /* we can't have NULL inputs so reference ourselfes for now */
712 for (i = 0; i <= (size_t)anchor_last; ++i) {
713 set_irn_n(res, i, res);
719 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
721 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
722 res->attr.block.irg.irg = irg;
723 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
724 set_Block_matured(res, 1);
725 /* Create and initialize array for Phi-node construction. */
726 if (irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION)) {
727 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
728 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
730 irn_verify_irg(res, irg);