2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, ir_node *mem,
60 int arity, ir_node *in[], ir_asm_constraint *inputs,
61 size_t n_outs, ir_asm_constraint *outputs, size_t n_clobber,
62 ident *clobber[], ident *text)
64 ir_graph *irg = get_irn_irg(block);
66 int r_arity = arity+1;
68 NEW_ARR_A(ir_node*, r_in, r_arity);
70 memcpy(&r_in[1], in, arity*sizeof(ir_node*));
72 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, r_arity, r_in);
74 struct obstack *const obst = get_irg_obstack(irg);
75 res->attr.assem.pin_state = op_pin_state_pinned;
76 res->attr.assem.input_constraints = NEW_ARR_D(ir_asm_constraint, obst, arity);
77 res->attr.assem.output_constraints = NEW_ARR_D(ir_asm_constraint, obst, n_outs);
78 res->attr.assem.clobbers = NEW_ARR_D(ident*, obst, n_clobber);
79 res->attr.assem.text = text;
81 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
82 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
83 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
85 irn_verify_irg(res, irg);
86 res = optimize_node(res);
90 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
91 ir_node *objptr, ir_entity *ent)
93 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
96 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
97 symconst_symbol value, symconst_kind symkind)
99 ir_node *block = get_irg_start_block(irg);
100 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
101 res->attr.symc.kind = symkind;
102 res->attr.symc.sym = value;
104 irn_verify_irg(res, irg);
105 res = optimize_node(res);
109 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
112 sym.entity_p = symbol;
113 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
116 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
119 sym.entity_p = symbol;
120 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
123 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
127 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
130 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
134 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
137 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
139 return new_rd_Const_long(NULL, irg, mode, value);
141 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
142 symconst_kind symkind)
144 return new_rd_SymConst(NULL, irg, mode, value, symkind);
146 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
149 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
151 ir_node *new_r_ASM(ir_node *block, ir_node *mem,
152 int arity, ir_node *in[], ir_asm_constraint *inputs,
153 size_t n_outs, ir_asm_constraint *outputs,
154 size_t n_clobber, ident *clobber[], ident *text)
156 return new_rd_ASM(NULL, block, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
159 /** Creates a Phi node with 0 predecessors. */
160 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
161 ir_mode *mode, int pos)
163 ir_graph *irg = get_irn_irg(block);
164 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
165 res->attr.phi.u.pos = pos;
166 irn_verify_irg(res, irg);
170 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
172 static void try_remove_unnecessary_phi(ir_node *phi)
174 ir_node *phi_value = NULL;
175 int arity = get_irn_arity(phi);
178 /* see if all inputs are either pointing to a single value or
179 * are self references */
180 for (i = 0; i < arity; ++i) {
181 ir_node *in = get_irn_n(phi, i);
186 /** found a different value from the one we already found, can't remove
188 if (phi_value != NULL)
192 if (phi_value == NULL)
195 /* if we're here then all phi inputs have been either phi_value
196 * or self-references, we can replace the phi by phi_value.
197 * We do this with an Id-node */
198 exchange(phi, phi_value);
200 /* recursively check phi_value, because it could be that we were the last
201 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
202 * the loop header which can be eliminated now */
203 if (is_Phi(phi_value)) {
204 try_remove_unnecessary_phi(phi_value);
209 * Computes the predecessors for the real phi node, and then
210 * allocates and returns this node. The routine called to allocate the
211 * node might optimize it away and return a real value.
212 * This function must be called with an in-array of proper size.
214 static ir_node *set_phi_arguments(ir_node *phi, int pos)
216 ir_node *block = get_nodes_block(phi);
217 ir_graph *irg = get_irn_irg(block);
218 int arity = get_irn_arity(block);
219 ir_node **in = ALLOCAN(ir_node*, arity);
220 ir_mode *mode = get_irn_mode(phi);
223 /* This loop goes to all predecessor blocks of the block the Phi node
224 is in and there finds the operands of the Phi node by calling
225 get_r_value_internal. */
226 for (i = 0; i < arity; ++i) {
227 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
229 if (is_Bad(cfgpred)) {
230 value = new_r_Bad(irg, mode);
232 value = get_r_value_internal(cfgpred, pos, mode);
237 phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), arity);
238 set_irn_in(phi, arity, in);
240 irn_verify_irg(phi, irg);
242 try_remove_unnecessary_phi(phi);
247 * This function returns the last definition of a value. In case
248 * this value was last defined in a previous block, Phi nodes are
249 * inserted. If the part of the firm graph containing the definition
250 * is not yet constructed, a dummy Phi node is returned.
252 * @param block the current block
253 * @param pos the value number of the value searched
254 * @param mode the mode of this value (needed for Phi construction)
256 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
258 ir_node *res = block->attr.block.graph_arr[pos];
259 ir_graph *irg = get_irn_irg(block);
263 /* in a matured block we can immediately determine the phi arguments */
264 if (get_Block_matured(block)) {
265 int arity = get_irn_arity(block);
266 /* no predecessors: use unknown value */
268 if (block == get_irg_start_block(irg)) {
269 if (default_initialize_local_variable != NULL) {
270 ir_node *rem = get_r_cur_block(irg);
271 set_r_cur_block(irg, block);
272 res = default_initialize_local_variable(irg, mode, pos - 1);
273 set_r_cur_block(irg, rem);
275 res = new_r_Unknown(irg, mode);
278 /* unreachable block, use Bad */
279 res = new_r_Bad(irg, mode);
281 /* one predecessor just use its value */
282 } else if (arity == 1) {
283 ir_node *cfgpred = get_Block_cfgpred(block, 0);
284 if (is_Bad(cfgpred)) {
285 res = new_r_Bad(irg, mode);
287 ir_node *cfgpred_block = get_nodes_block(cfgpred);
288 res = get_r_value_internal(cfgpred_block, pos, mode);
290 /* multiple predecessors construct Phi */
292 res = new_rd_Phi0(NULL, block, mode, pos);
293 /* enter phi0 into our variable value table to break cycles
294 * arising from set_phi_arguments */
295 block->attr.block.graph_arr[pos] = res;
296 res = set_phi_arguments(res, pos);
299 /* in case of immature block we have to keep a Phi0 */
300 res = new_rd_Phi0(NULL, block, mode, pos);
301 /* enqueue phi so we can set arguments once the block matures */
302 res->attr.phi.next = block->attr.block.phis;
303 block->attr.block.phis = res;
305 block->attr.block.graph_arr[pos] = res;
309 void mature_immBlock(ir_node *block)
316 assert(is_Block(block));
317 if (get_Block_matured(block))
320 irg = get_irn_irg(block);
321 n_preds = ARR_LEN(block->in) - 1;
322 /* Fix block parameters */
323 block->attr.block.backedge = new_backedge_arr(get_irg_obstack(irg), n_preds);
325 /* Traverse a chain of Phi nodes attached to this block and mature
327 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
329 int pos = phi->attr.phi.u.pos;
331 next = phi->attr.phi.next;
332 new_value = set_phi_arguments(phi, pos);
333 if (block->attr.block.graph_arr[pos] == phi) {
334 block->attr.block.graph_arr[pos] = new_value;
338 set_Block_matured(block, 1);
340 /* create final in-array for the block */
341 if (block->attr.block.dynamic_ins) {
342 ir_node **const new_in = DUP_ARR_D(ir_node*, get_irg_obstack(irg), block->in);
343 DEL_ARR_F(block->in);
345 block->attr.block.dynamic_ins = false;
348 /* Now, as the block is a finished Firm node, we can optimize it.
349 Since other nodes have been allocated since the block was created
350 we can not free the node on the obstack. Therefore we have to call
352 Unfortunately the optimization does not change a lot, as all allocated
353 nodes refer to the unoptimized node.
354 We can call optimize_in_place_2(), as global cse has no effect on blocks.
356 irn_verify_irg(block, irg);
357 optimize_in_place_2(block);
360 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
362 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
363 return new_rd_Const_long(db, current_ir_graph, mode, value);
366 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
369 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
370 return new_rd_Sel(db, current_ir_graph->current_block,
371 store, objptr, 0, NULL, ent);
374 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
377 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
378 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
381 ir_node *new_d_ASM(dbg_info *db, ir_node *mem, int arity, ir_node *in[],
382 ir_asm_constraint *inputs,
383 size_t n_outs, ir_asm_constraint *outputs,
384 size_t n_clobber, ident *clobber[], ident *text)
386 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
387 return new_rd_ASM(db, current_ir_graph->current_block, mem, arity, in,
388 inputs, n_outs, outputs, n_clobber, clobber, text);
391 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
394 ir_graph *irg = get_Block_irg(block);
401 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
402 res->attr.div.resmode = resmode;
403 res->attr.div.no_remainder = 1;
404 res->attr.div.exc.pin_state = pin_state;
405 irn_verify_irg(res, irg);
406 res = optimize_node(res);
410 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
412 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
415 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
418 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
419 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
423 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
425 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
428 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
432 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
433 /* creates a new dynamic in-array as length of in is -1 */
434 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
436 set_Block_matured(res, 0);
437 res->attr.block.dynamic_ins = true;
438 res->attr.block.irg.irg = irg;
439 res->attr.block.backedge = NULL;
440 res->attr.block.entity = NULL;
442 set_Block_block_visited(res, 0);
444 /* Create and initialize array for Phi-node construction. */
445 res->attr.block.graph_arr = NEW_ARR_DZ(ir_node*, get_irg_obstack(irg), irg->n_loc);
447 /* Immature block may not be optimized! */
448 irn_verify_irg(res, irg);
453 ir_node *new_r_immBlock(ir_graph *irg)
455 return new_rd_immBlock(NULL, irg);
458 ir_node *new_d_immBlock(dbg_info *dbgi)
460 return new_rd_immBlock(dbgi, current_ir_graph);
463 ir_node *new_immBlock(void)
465 return new_rd_immBlock(NULL, current_ir_graph);
468 void add_immBlock_pred(ir_node *block, ir_node *jmp)
470 int n = ARR_LEN(block->in) - 1;
472 assert(is_Block(block) && "Error: Must be a Block");
473 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
474 assert(is_ir_node(jmp));
476 ARR_APP1(ir_node *, block->in, jmp);
478 hook_set_irn_n(block, n, jmp, NULL);
481 void set_cur_block(ir_node *target)
483 set_r_cur_block(current_ir_graph, target);
486 void set_r_cur_block(ir_graph *irg, ir_node *target)
488 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
489 assert(target == NULL || is_Block(target));
490 assert(target == NULL || get_irn_irg(target) == irg);
491 irg->current_block = target;
494 ir_node *get_r_cur_block(ir_graph *irg)
496 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
497 return irg->current_block;
500 ir_node *get_cur_block(void)
502 return get_r_cur_block(current_ir_graph);
505 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
507 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
510 return get_r_value_internal(irg->current_block, pos + 1, mode);
513 ir_node *get_value(int pos, ir_mode *mode)
515 return get_r_value(current_ir_graph, pos, mode);
519 * helper function for guess_mode: recursively look for a definition for
520 * local variable @p pos, returns its mode if found.
522 static ir_mode *guess_recursively(ir_node *block, int pos)
528 if (irn_visited_else_mark(block))
531 /* already have a defintion -> we can simply look at its mode */
532 value = block->attr.block.graph_arr[pos];
534 return get_irn_mode(value);
536 /* now we try to guess, by looking at the predecessor blocks */
537 n_preds = get_irn_arity(block);
538 for (i = 0; i < n_preds; ++i) {
539 ir_node *pred_block = get_Block_cfgpred_block(block, i);
540 ir_mode *mode = guess_recursively(pred_block, pos);
545 /* no way to guess */
549 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
551 ir_node *block = irg->current_block;
552 ir_node *value = block->attr.block.graph_arr[pos+1];
555 /* already have a defintion -> we can simply look at its mode */
557 return get_irn_mode(value);
559 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
560 inc_irg_visited(irg);
561 mode = guess_recursively(block, pos+1);
562 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
567 ir_mode *ir_guess_mode(int pos)
569 return ir_r_guess_mode(current_ir_graph, pos);
572 void set_r_value(ir_graph *irg, int pos, ir_node *value)
574 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
576 assert(pos+1 < irg->n_loc);
577 assert(is_ir_node(value));
578 irg->current_block->attr.block.graph_arr[pos + 1] = value;
581 void set_value(int pos, ir_node *value)
583 set_r_value(current_ir_graph, pos, value);
586 ir_node *get_r_store(ir_graph *irg)
588 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
589 return get_r_value_internal(irg->current_block, 0, mode_M);
592 ir_node *get_store(void)
594 return get_r_store(current_ir_graph);
597 void set_r_store(ir_graph *irg, ir_node *store)
599 ir_node *load, *pload, *pred, *in[2];
601 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
602 /* Beware: due to dead code elimination, a store might become a Bad node even in
603 the construction phase. */
604 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
606 if (get_opt_auto_create_sync()) {
607 /* handle non-volatile Load nodes by automatically creating Sync's */
608 load = skip_Proj(store);
609 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
610 pred = get_Load_mem(load);
613 /* a Load after a Sync: move it up */
614 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
616 set_Load_mem(load, get_memop_mem(mem));
617 add_Sync_pred(pred, store);
620 pload = skip_Proj(pred);
621 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
622 /* a Load after a Load: create a new Sync */
623 set_Load_mem(load, get_Load_mem(pload));
627 store = new_r_Sync(irg->current_block, 2, in);
632 irg->current_block->attr.block.graph_arr[0] = store;
635 void set_store(ir_node *store)
637 set_r_store(current_ir_graph, store);
640 void keep_alive(ir_node *ka)
642 ir_graph *irg = get_irn_irg(ka);
643 add_End_keepalive(get_irg_end(irg), ka);
646 void ir_set_uninitialized_local_variable_func(
647 uninitialized_local_variable_func_t *func)
649 default_initialize_local_variable = func;
652 void irg_finalize_cons(ir_graph *irg)
654 ir_node *end_block = get_irg_end_block(irg);
655 mature_immBlock(end_block);
657 clear_irg_constraints(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION);
660 void irp_finalize_cons(void)
663 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
664 irg_finalize_cons(get_irp_irg(i));
668 ir_node *new_Const_long(ir_mode *mode, long value)
670 return new_d_Const_long(NULL, mode, value);
673 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
675 return new_d_SymConst(NULL, mode, value, kind);
677 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
679 return new_d_simpleSel(NULL, store, objptr, ent);
681 ir_node *new_ASM(ir_node *mem, int arity, ir_node *in[],
682 ir_asm_constraint *inputs, size_t n_outs,
683 ir_asm_constraint *outputs, size_t n_clobber,
684 ident *clobber[], ident *text)
686 return new_d_ASM(NULL, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
689 ir_node *new_r_Anchor(ir_graph *irg)
691 ir_node *in[anchor_last+1];
694 memset(in, 0, sizeof(in));
695 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, 0, NULL);
696 res->attr.anchor.irg.irg = irg;
698 /* hack to get get_irn_irg in set_irn_in() working */
701 /* we can't have NULL inputs so reference ourselves for now */
702 for (i = 0; i <= (size_t)anchor_last; ++i) {
705 set_irn_in(res, anchor_last+1, in);
710 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
712 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
713 res->attr.block.irg.irg = irg;
714 res->attr.block.backedge = new_backedge_arr(get_irg_obstack(irg), arity);
715 set_Block_matured(res, 1);
716 /* Create and initialize array for Phi-node construction. */
717 if (irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION)) {
718 res->attr.block.graph_arr = NEW_ARR_DZ(ir_node*, get_irg_obstack(irg), irg->n_loc);
720 irn_verify_irg(res, irg);