2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, ir_node *mem,
60 int arity, ir_node *in[], ir_asm_constraint *inputs,
61 size_t n_outs, ir_asm_constraint *outputs, size_t n_clobber,
62 ident *clobber[], ident *text)
64 ir_graph *irg = get_irn_irg(block);
66 int r_arity = arity+1;
68 NEW_ARR_A(ir_node*, r_in, r_arity);
70 memcpy(&r_in[1], in, arity*sizeof(ir_node*));
72 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, r_arity, r_in);
74 struct obstack *const obst = get_irg_obstack(irg);
75 res->attr.assem.pin_state = op_pin_state_pinned;
76 res->attr.assem.input_constraints = NEW_ARR_D(ir_asm_constraint, obst, arity);
77 res->attr.assem.output_constraints = NEW_ARR_D(ir_asm_constraint, obst, n_outs);
78 res->attr.assem.clobbers = NEW_ARR_D(ident*, obst, n_clobber);
79 res->attr.assem.text = text;
81 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
82 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
83 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
85 irn_verify_irg(res, irg);
86 res = optimize_node(res);
90 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
91 ir_node *objptr, ir_entity *ent)
93 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
96 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
97 symconst_symbol value, symconst_kind symkind)
99 ir_node *block = get_irg_start_block(irg);
100 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
101 res->attr.symc.kind = symkind;
102 res->attr.symc.sym = value;
104 irn_verify_irg(res, irg);
105 res = optimize_node(res);
109 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
112 sym.entity_p = symbol;
113 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
116 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
119 sym.entity_p = symbol;
120 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
123 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
127 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
130 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
134 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
137 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
139 return new_rd_Const_long(NULL, irg, mode, value);
141 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
142 symconst_kind symkind)
144 return new_rd_SymConst(NULL, irg, mode, value, symkind);
146 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
149 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
151 ir_node *new_r_ASM(ir_node *block, ir_node *mem,
152 int arity, ir_node *in[], ir_asm_constraint *inputs,
153 size_t n_outs, ir_asm_constraint *outputs,
154 size_t n_clobber, ident *clobber[], ident *text)
156 return new_rd_ASM(NULL, block, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
159 /** Creates a Phi node with 0 predecessors. */
160 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
161 ir_mode *mode, int pos)
163 ir_graph *irg = get_irn_irg(block);
164 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
165 res->attr.phi.u.pos = pos;
166 irn_verify_irg(res, irg);
170 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
172 static void try_remove_unnecessary_phi(ir_node *phi)
174 ir_node *phi_value = NULL;
175 int arity = get_irn_arity(phi);
178 /* see if all inputs are either pointing to a single value or
179 * are self references */
180 for (i = 0; i < arity; ++i) {
181 ir_node *in = get_irn_n(phi, i);
186 /** found a different value from the one we already found, can't remove
188 if (phi_value != NULL)
192 if (phi_value == NULL)
195 /* if we're here then all phi inputs have been either phi_value
196 * or self-references, we can replace the phi by phi_value.
197 * We do this with an Id-node */
198 exchange(phi, phi_value);
200 /* recursively check phi_value, because it could be that we were the last
201 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
202 * the loop header which can be eliminated now */
203 if (is_Phi(phi_value)) {
204 try_remove_unnecessary_phi(phi_value);
209 * Computes the predecessors for the real phi node, and then
210 * allocates and returns this node. The routine called to allocate the
211 * node might optimize it away and return a real value.
212 * This function must be called with an in-array of proper size.
214 static ir_node *set_phi_arguments(ir_node *phi, int pos)
216 ir_node *block = get_nodes_block(phi);
217 ir_graph *irg = get_irn_irg(block);
218 int arity = get_irn_arity(block);
219 ir_node **in = ALLOCAN(ir_node*, arity);
220 ir_mode *mode = get_irn_mode(phi);
223 /* This loop goes to all predecessor blocks of the block the Phi node
224 is in and there finds the operands of the Phi node by calling
225 get_r_value_internal. */
226 for (i = 0; i < arity; ++i) {
227 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
229 if (is_Bad(cfgpred)) {
230 value = new_r_Bad(irg, mode);
232 value = get_r_value_internal(cfgpred, pos, mode);
237 phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), arity);
238 set_irn_in(phi, arity, in);
240 irn_verify_irg(phi, irg);
242 try_remove_unnecessary_phi(phi);
247 * This function returns the last definition of a value. In case
248 * this value was last defined in a previous block, Phi nodes are
249 * inserted. If the part of the firm graph containing the definition
250 * is not yet constructed, a dummy Phi node is returned.
252 * @param block the current block
253 * @param pos the value number of the value searched
254 * @param mode the mode of this value (needed for Phi construction)
256 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
258 ir_node *res = block->attr.block.graph_arr[pos];
259 ir_graph *irg = get_irn_irg(block);
263 /* in a matured block we can immediately determine the phi arguments */
264 if (get_Block_matured(block)) {
265 int arity = get_irn_arity(block);
266 /* no predecessors: use unknown value */
268 if (block == get_irg_start_block(irg)) {
269 if (default_initialize_local_variable != NULL) {
270 ir_node *rem = get_r_cur_block(irg);
271 set_r_cur_block(irg, block);
272 res = default_initialize_local_variable(irg, mode, pos - 1);
273 set_r_cur_block(irg, rem);
275 res = new_r_Unknown(irg, mode);
278 /* unreachable block, use Bad */
279 res = new_r_Bad(irg, mode);
281 /* one predecessor just use its value */
282 } else if (arity == 1) {
283 ir_node *cfgpred = get_Block_cfgpred(block, 0);
284 if (is_Bad(cfgpred)) {
285 res = new_r_Bad(irg, mode);
287 ir_node *cfgpred_block = get_nodes_block(cfgpred);
288 res = get_r_value_internal(cfgpred_block, pos, mode);
290 /* multiple predecessors construct Phi */
292 res = new_rd_Phi0(NULL, block, mode, pos);
293 /* enter phi0 into our variable value table to break cycles
294 * arising from set_phi_arguments */
295 block->attr.block.graph_arr[pos] = res;
296 res = set_phi_arguments(res, pos);
299 /* in case of immature block we have to keep a Phi0 */
300 res = new_rd_Phi0(NULL, block, mode, pos);
301 /* enqueue phi so we can set arguments once the block matures */
302 res->attr.phi.next = block->attr.block.phis;
303 block->attr.block.phis = res;
305 block->attr.block.graph_arr[pos] = res;
309 void mature_immBlock(ir_node *block)
316 if (get_Block_matured(block))
319 irg = get_irn_irg(block);
320 n_preds = ARR_LEN(block->in) - 1;
321 /* Fix block parameters */
322 block->attr.block.backedge = new_backedge_arr(get_irg_obstack(irg), n_preds);
324 /* Traverse a chain of Phi nodes attached to this block and mature
326 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
328 int pos = phi->attr.phi.u.pos;
330 next = phi->attr.phi.next;
331 new_value = set_phi_arguments(phi, pos);
332 if (block->attr.block.graph_arr[pos] == phi) {
333 block->attr.block.graph_arr[pos] = new_value;
337 set_Block_matured(block, 1);
339 /* create final in-array for the block */
340 if (block->attr.block.dynamic_ins) {
341 ir_node **const new_in = DUP_ARR_D(ir_node*, get_irg_obstack(irg), block->in);
342 DEL_ARR_F(block->in);
344 block->attr.block.dynamic_ins = false;
347 /* Now, as the block is a finished Firm node, we can optimize it.
348 Since other nodes have been allocated since the block was created
349 we can not free the node on the obstack. Therefore we have to call
351 Unfortunately the optimization does not change a lot, as all allocated
352 nodes refer to the unoptimized node.
353 We can call optimize_in_place_2(), as global cse has no effect on blocks.
355 irn_verify_irg(block, irg);
356 optimize_in_place_2(block);
359 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
361 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
362 return new_rd_Const_long(db, current_ir_graph, mode, value);
365 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
368 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
369 return new_rd_Sel(db, current_ir_graph->current_block,
370 store, objptr, 0, NULL, ent);
373 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
376 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
377 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
380 ir_node *new_d_ASM(dbg_info *db, ir_node *mem, int arity, ir_node *in[],
381 ir_asm_constraint *inputs,
382 size_t n_outs, ir_asm_constraint *outputs,
383 size_t n_clobber, ident *clobber[], ident *text)
385 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
386 return new_rd_ASM(db, current_ir_graph->current_block, mem, arity, in,
387 inputs, n_outs, outputs, n_clobber, clobber, text);
390 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
393 ir_graph *irg = get_Block_irg(block);
400 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
401 res->attr.div.resmode = resmode;
402 res->attr.div.no_remainder = 1;
403 res->attr.div.exc.pin_state = pin_state;
404 irn_verify_irg(res, irg);
405 res = optimize_node(res);
409 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
411 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
414 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
417 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
418 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
422 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
424 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
427 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
431 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
432 /* creates a new dynamic in-array as length of in is -1 */
433 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
435 set_Block_matured(res, 0);
436 res->attr.block.dynamic_ins = true;
437 res->attr.block.irg.irg = irg;
438 res->attr.block.backedge = NULL;
439 res->attr.block.entity = NULL;
441 set_Block_block_visited(res, 0);
443 /* Create and initialize array for Phi-node construction. */
444 res->attr.block.graph_arr = NEW_ARR_DZ(ir_node*, get_irg_obstack(irg), irg->n_loc);
446 /* Immature block may not be optimized! */
447 irn_verify_irg(res, irg);
452 ir_node *new_r_immBlock(ir_graph *irg)
454 return new_rd_immBlock(NULL, irg);
457 ir_node *new_d_immBlock(dbg_info *dbgi)
459 return new_rd_immBlock(dbgi, current_ir_graph);
462 ir_node *new_immBlock(void)
464 return new_rd_immBlock(NULL, current_ir_graph);
467 void add_immBlock_pred(ir_node *block, ir_node *jmp)
469 int n = ARR_LEN(block->in) - 1;
471 assert(is_Block(block) && "Error: Must be a Block");
472 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
473 assert(is_ir_node(jmp));
475 ARR_APP1(ir_node *, block->in, jmp);
477 hook_set_irn_n(block, n, jmp, NULL);
480 void set_cur_block(ir_node *target)
482 set_r_cur_block(current_ir_graph, target);
485 void set_r_cur_block(ir_graph *irg, ir_node *target)
487 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
488 assert(target == NULL || is_Block(target));
489 assert(target == NULL || get_irn_irg(target) == irg);
490 irg->current_block = target;
493 ir_node *get_r_cur_block(ir_graph *irg)
495 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
496 return irg->current_block;
499 ir_node *get_cur_block(void)
501 return get_r_cur_block(current_ir_graph);
504 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
506 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
509 return get_r_value_internal(irg->current_block, pos + 1, mode);
512 ir_node *get_value(int pos, ir_mode *mode)
514 return get_r_value(current_ir_graph, pos, mode);
518 * helper function for guess_mode: recursively look for a definition for
519 * local variable @p pos, returns its mode if found.
521 static ir_mode *guess_recursively(ir_node *block, int pos)
527 if (irn_visited_else_mark(block))
530 /* already have a defintion -> we can simply look at its mode */
531 value = block->attr.block.graph_arr[pos];
533 return get_irn_mode(value);
535 /* now we try to guess, by looking at the predecessor blocks */
536 n_preds = get_irn_arity(block);
537 for (i = 0; i < n_preds; ++i) {
538 ir_node *pred_block = get_Block_cfgpred_block(block, i);
539 ir_mode *mode = guess_recursively(pred_block, pos);
544 /* no way to guess */
548 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
550 ir_node *block = irg->current_block;
551 ir_node *value = block->attr.block.graph_arr[pos+1];
554 /* already have a defintion -> we can simply look at its mode */
556 return get_irn_mode(value);
558 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
559 inc_irg_visited(irg);
560 mode = guess_recursively(block, pos+1);
561 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
566 ir_mode *ir_guess_mode(int pos)
568 return ir_r_guess_mode(current_ir_graph, pos);
571 void set_r_value(ir_graph *irg, int pos, ir_node *value)
573 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
575 assert(pos+1 < irg->n_loc);
576 assert(is_ir_node(value));
577 irg->current_block->attr.block.graph_arr[pos + 1] = value;
580 void set_value(int pos, ir_node *value)
582 set_r_value(current_ir_graph, pos, value);
585 ir_node *get_r_store(ir_graph *irg)
587 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
588 return get_r_value_internal(irg->current_block, 0, mode_M);
591 ir_node *get_store(void)
593 return get_r_store(current_ir_graph);
596 void set_r_store(ir_graph *irg, ir_node *store)
598 ir_node *load, *pload, *pred, *in[2];
600 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
601 /* Beware: due to dead code elimination, a store might become a Bad node even in
602 the construction phase. */
603 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
605 if (get_opt_auto_create_sync()) {
606 /* handle non-volatile Load nodes by automatically creating Sync's */
607 load = skip_Proj(store);
608 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
609 pred = get_Load_mem(load);
612 /* a Load after a Sync: move it up */
613 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
615 set_Load_mem(load, get_memop_mem(mem));
616 add_Sync_pred(pred, store);
619 pload = skip_Proj(pred);
620 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
621 /* a Load after a Load: create a new Sync */
622 set_Load_mem(load, get_Load_mem(pload));
626 store = new_r_Sync(irg->current_block, 2, in);
631 irg->current_block->attr.block.graph_arr[0] = store;
634 void set_store(ir_node *store)
636 set_r_store(current_ir_graph, store);
639 void keep_alive(ir_node *ka)
641 ir_graph *irg = get_irn_irg(ka);
642 add_End_keepalive(get_irg_end(irg), ka);
645 void ir_set_uninitialized_local_variable_func(
646 uninitialized_local_variable_func_t *func)
648 default_initialize_local_variable = func;
651 void irg_finalize_cons(ir_graph *irg)
653 ir_node *end_block = get_irg_end_block(irg);
654 mature_immBlock(end_block);
656 clear_irg_constraints(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION);
659 void irp_finalize_cons(void)
662 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
663 irg_finalize_cons(get_irp_irg(i));
667 ir_node *new_Const_long(ir_mode *mode, long value)
669 return new_d_Const_long(NULL, mode, value);
672 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
674 return new_d_SymConst(NULL, mode, value, kind);
676 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
678 return new_d_simpleSel(NULL, store, objptr, ent);
680 ir_node *new_ASM(ir_node *mem, int arity, ir_node *in[],
681 ir_asm_constraint *inputs, size_t n_outs,
682 ir_asm_constraint *outputs, size_t n_clobber,
683 ident *clobber[], ident *text)
685 return new_d_ASM(NULL, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
688 ir_node *new_r_Anchor(ir_graph *irg)
690 ir_node *in[anchor_last+1];
693 memset(in, 0, sizeof(in));
694 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, 0, NULL);
695 res->attr.anchor.irg.irg = irg;
697 /* hack to get get_irn_irg in set_irn_in() working */
700 /* we can't have NULL inputs so reference ourselves for now */
701 for (i = 0; i <= (size_t)anchor_last; ++i) {
704 set_irn_in(res, anchor_last+1, in);
709 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
711 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
712 res->attr.block.irg.irg = irg;
713 res->attr.block.backedge = new_backedge_arr(get_irg_obstack(irg), arity);
714 set_Block_matured(res, 1);
715 /* Create and initialize array for Phi-node construction. */
716 if (irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION)) {
717 res->attr.block.graph_arr = NEW_ARR_DZ(ir_node*, get_irg_obstack(irg), irg->n_loc);
719 irn_verify_irg(res, irg);