2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, ir_node *mem,
60 int arity, ir_node *in[], ir_asm_constraint *inputs,
61 size_t n_outs, ir_asm_constraint *outputs, size_t n_clobber,
62 ident *clobber[], ident *text)
64 ir_graph *irg = get_irn_irg(block);
66 int r_arity = arity+1;
68 NEW_ARR_A(ir_node*, r_in, r_arity);
70 memcpy(&r_in[1], in, arity*sizeof(ir_node*));
72 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, r_arity, r_in);
74 struct obstack *const obst = get_irg_obstack(irg);
75 res->attr.assem.pin_state = op_pin_state_pinned;
76 res->attr.assem.input_constraints = NEW_ARR_D(ir_asm_constraint, obst, arity);
77 res->attr.assem.output_constraints = NEW_ARR_D(ir_asm_constraint, obst, n_outs);
78 res->attr.assem.clobbers = NEW_ARR_D(ident*, obst, n_clobber);
79 res->attr.assem.text = text;
81 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
82 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
83 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
85 irn_verify_irg(res, irg);
86 res = optimize_node(res);
90 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
91 ir_node *objptr, ir_entity *ent)
93 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
96 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
97 symconst_symbol value, symconst_kind symkind)
99 ir_node *block = get_irg_start_block(irg);
100 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
101 res->attr.symc.kind = symkind;
102 res->attr.symc.sym = value;
104 irn_verify_irg(res, irg);
105 res = optimize_node(res);
109 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
112 sym.entity_p = symbol;
113 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
116 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
119 sym.entity_p = symbol;
120 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
123 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
127 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
130 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
134 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
137 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
139 return new_rd_Const_long(NULL, irg, mode, value);
141 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
142 symconst_kind symkind)
144 return new_rd_SymConst(NULL, irg, mode, value, symkind);
146 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
149 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
151 ir_node *new_r_ASM(ir_node *block, ir_node *mem,
152 int arity, ir_node *in[], ir_asm_constraint *inputs,
153 size_t n_outs, ir_asm_constraint *outputs,
154 size_t n_clobber, ident *clobber[], ident *text)
156 return new_rd_ASM(NULL, block, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
159 /** Creates a Phi node with 0 predecessors. */
160 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
161 ir_mode *mode, int pos)
163 ir_graph *irg = get_irn_irg(block);
164 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
165 res->attr.phi.u.pos = pos;
166 irn_verify_irg(res, irg);
170 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
172 static void try_remove_unnecessary_phi(ir_node *phi)
174 ir_node *phi_value = NULL;
175 int arity = get_irn_arity(phi);
178 /* see if all inputs are either pointing to a single value or
179 * are self references */
180 for (i = 0; i < arity; ++i) {
181 ir_node *in = get_irn_n(phi, i);
186 /** found a different value from the one we already found, can't remove
188 if (phi_value != NULL)
192 if (phi_value == NULL)
195 /* if we're here then all phi inputs have been either phi_value
196 * or self-references, we can replace the phi by phi_value.
197 * We do this with an Id-node */
198 exchange(phi, phi_value);
200 /* recursively check phi_value, because it could be that we were the last
201 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
202 * the loop header which can be eliminated now */
203 if (is_Phi(phi_value)) {
204 try_remove_unnecessary_phi(phi_value);
209 * Computes the predecessors for the real phi node, and then
210 * allocates and returns this node. The routine called to allocate the
211 * node might optimize it away and return a real value.
212 * This function must be called with an in-array of proper size.
214 static ir_node *set_phi_arguments(ir_node *phi, int pos)
216 ir_node *block = get_nodes_block(phi);
217 ir_graph *irg = get_irn_irg(block);
218 int arity = get_irn_arity(block);
219 ir_node **in = ALLOCAN(ir_node*, arity);
220 ir_mode *mode = get_irn_mode(phi);
223 /* This loop goes to all predecessor blocks of the block the Phi node
224 is in and there finds the operands of the Phi node by calling
225 get_r_value_internal. */
226 for (i = 0; i < arity; ++i) {
227 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
229 if (is_Bad(cfgpred)) {
230 value = new_r_Bad(irg, mode);
232 value = get_r_value_internal(cfgpred, pos, mode);
237 phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), arity);
238 set_irn_in(phi, arity, in);
240 irn_verify_irg(phi, irg);
242 try_remove_unnecessary_phi(phi);
247 * This function returns the last definition of a value. In case
248 * this value was last defined in a previous block, Phi nodes are
249 * inserted. If the part of the firm graph containing the definition
250 * is not yet constructed, a dummy Phi node is returned.
252 * @param block the current block
253 * @param pos the value number of the value searched
254 * @param mode the mode of this value (needed for Phi construction)
256 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
258 ir_node *res = block->attr.block.graph_arr[pos];
259 ir_graph *irg = get_irn_irg(block);
263 /* in a matured block we can immediately determine the phi arguments */
264 if (get_Block_matured(block)) {
265 int arity = get_irn_arity(block);
266 /* no predecessors: use unknown value */
268 if (block == get_irg_start_block(irg)) {
269 if (default_initialize_local_variable != NULL) {
270 ir_node *rem = get_r_cur_block(irg);
271 set_r_cur_block(irg, block);
272 res = default_initialize_local_variable(irg, mode, pos - 1);
273 set_r_cur_block(irg, rem);
275 res = new_r_Unknown(irg, mode);
278 /* unreachable block, use Bad */
279 res = new_r_Bad(irg, mode);
281 /* one predecessor just use its value */
282 } else if (arity == 1) {
283 ir_node *cfgpred = get_Block_cfgpred(block, 0);
284 if (is_Bad(cfgpred)) {
285 res = new_r_Bad(irg, mode);
287 ir_node *cfgpred_block = get_nodes_block(cfgpred);
288 res = get_r_value_internal(cfgpred_block, pos, mode);
290 /* multiple predecessors construct Phi */
292 res = new_rd_Phi0(NULL, block, mode, pos);
293 /* enter phi0 into our variable value table to break cycles
294 * arising from set_phi_arguments */
295 block->attr.block.graph_arr[pos] = res;
296 res = set_phi_arguments(res, pos);
299 /* in case of immature block we have to keep a Phi0 */
300 res = new_rd_Phi0(NULL, block, mode, pos);
301 /* enqueue phi so we can set arguments once the block matures */
302 res->attr.phi.next = block->attr.block.phis;
303 block->attr.block.phis = res;
305 block->attr.block.graph_arr[pos] = res;
309 void mature_immBlock(ir_node *block)
317 assert(is_Block(block));
318 if (get_Block_matured(block))
321 irg = get_irn_irg(block);
322 n_preds = ARR_LEN(block->in) - 1;
323 /* Fix block parameters */
324 block->attr.block.backedge = new_backedge_arr(get_irg_obstack(irg), n_preds);
326 /* Traverse a chain of Phi nodes attached to this block and mature
328 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
330 int pos = phi->attr.phi.u.pos;
332 next = phi->attr.phi.next;
333 new_value = set_phi_arguments(phi, pos);
334 if (block->attr.block.graph_arr[pos] == phi) {
335 block->attr.block.graph_arr[pos] = new_value;
339 set_Block_matured(block, 1);
341 /* create final in-array for the block */
342 if (block->attr.block.dynamic_ins) {
343 new_in = NEW_ARR_D(ir_node*, get_irg_obstack(irg), n_preds + 1);
344 memcpy(new_in, block->in, (n_preds+1) * sizeof(new_in[0]));
345 DEL_ARR_F(block->in);
347 block->attr.block.dynamic_ins = false;
350 /* Now, as the block is a finished Firm node, we can optimize it.
351 Since other nodes have been allocated since the block was created
352 we can not free the node on the obstack. Therefore we have to call
354 Unfortunately the optimization does not change a lot, as all allocated
355 nodes refer to the unoptimized node.
356 We can call optimize_in_place_2(), as global cse has no effect on blocks.
358 irn_verify_irg(block, irg);
359 optimize_in_place_2(block);
362 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
364 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
365 return new_rd_Const_long(db, current_ir_graph, mode, value);
368 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
371 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
372 return new_rd_Sel(db, current_ir_graph->current_block,
373 store, objptr, 0, NULL, ent);
376 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
379 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
380 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
383 ir_node *new_d_ASM(dbg_info *db, ir_node *mem, int arity, ir_node *in[],
384 ir_asm_constraint *inputs,
385 size_t n_outs, ir_asm_constraint *outputs,
386 size_t n_clobber, ident *clobber[], ident *text)
388 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
389 return new_rd_ASM(db, current_ir_graph->current_block, mem, arity, in,
390 inputs, n_outs, outputs, n_clobber, clobber, text);
393 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
396 ir_graph *irg = get_Block_irg(block);
403 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
404 res->attr.div.resmode = resmode;
405 res->attr.div.no_remainder = 1;
406 res->attr.div.exc.pin_state = pin_state;
407 irn_verify_irg(res, irg);
408 res = optimize_node(res);
412 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
414 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
417 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
420 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
421 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
425 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
427 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
430 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
434 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
435 /* creates a new dynamic in-array as length of in is -1 */
436 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
438 set_Block_matured(res, 0);
439 res->attr.block.dynamic_ins = true;
440 res->attr.block.irg.irg = irg;
441 res->attr.block.backedge = NULL;
442 res->attr.block.entity = NULL;
444 set_Block_block_visited(res, 0);
446 /* Create and initialize array for Phi-node construction. */
447 res->attr.block.graph_arr = NEW_ARR_D(ir_node*, get_irg_obstack(irg), irg->n_loc);
448 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
450 /* Immature block may not be optimized! */
451 irn_verify_irg(res, irg);
456 ir_node *new_r_immBlock(ir_graph *irg)
458 return new_rd_immBlock(NULL, irg);
461 ir_node *new_d_immBlock(dbg_info *dbgi)
463 return new_rd_immBlock(dbgi, current_ir_graph);
466 ir_node *new_immBlock(void)
468 return new_rd_immBlock(NULL, current_ir_graph);
471 void add_immBlock_pred(ir_node *block, ir_node *jmp)
473 int n = ARR_LEN(block->in) - 1;
475 assert(is_Block(block) && "Error: Must be a Block");
476 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
477 assert(is_ir_node(jmp));
479 ARR_APP1(ir_node *, block->in, jmp);
481 hook_set_irn_n(block, n, jmp, NULL);
484 void set_cur_block(ir_node *target)
486 set_r_cur_block(current_ir_graph, target);
489 void set_r_cur_block(ir_graph *irg, ir_node *target)
491 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
492 assert(target == NULL || is_Block(target));
493 assert(target == NULL || get_irn_irg(target) == irg);
494 irg->current_block = target;
497 ir_node *get_r_cur_block(ir_graph *irg)
499 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
500 return irg->current_block;
503 ir_node *get_cur_block(void)
505 return get_r_cur_block(current_ir_graph);
508 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
510 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
513 return get_r_value_internal(irg->current_block, pos + 1, mode);
516 ir_node *get_value(int pos, ir_mode *mode)
518 return get_r_value(current_ir_graph, pos, mode);
522 * helper function for guess_mode: recursively look for a definition for
523 * local variable @p pos, returns its mode if found.
525 static ir_mode *guess_recursively(ir_node *block, int pos)
531 if (irn_visited_else_mark(block))
534 /* already have a defintion -> we can simply look at its mode */
535 value = block->attr.block.graph_arr[pos];
537 return get_irn_mode(value);
539 /* now we try to guess, by looking at the predecessor blocks */
540 n_preds = get_irn_arity(block);
541 for (i = 0; i < n_preds; ++i) {
542 ir_node *pred_block = get_Block_cfgpred_block(block, i);
543 ir_mode *mode = guess_recursively(pred_block, pos);
548 /* no way to guess */
552 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
554 ir_node *block = irg->current_block;
555 ir_node *value = block->attr.block.graph_arr[pos+1];
558 /* already have a defintion -> we can simply look at its mode */
560 return get_irn_mode(value);
562 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
563 inc_irg_visited(irg);
564 mode = guess_recursively(block, pos+1);
565 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
570 ir_mode *ir_guess_mode(int pos)
572 return ir_r_guess_mode(current_ir_graph, pos);
575 void set_r_value(ir_graph *irg, int pos, ir_node *value)
577 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
579 assert(pos+1 < irg->n_loc);
580 assert(is_ir_node(value));
581 irg->current_block->attr.block.graph_arr[pos + 1] = value;
584 void set_value(int pos, ir_node *value)
586 set_r_value(current_ir_graph, pos, value);
589 ir_node *get_r_store(ir_graph *irg)
591 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
592 return get_r_value_internal(irg->current_block, 0, mode_M);
595 ir_node *get_store(void)
597 return get_r_store(current_ir_graph);
600 void set_r_store(ir_graph *irg, ir_node *store)
602 ir_node *load, *pload, *pred, *in[2];
604 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
605 /* Beware: due to dead code elimination, a store might become a Bad node even in
606 the construction phase. */
607 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
609 if (get_opt_auto_create_sync()) {
610 /* handle non-volatile Load nodes by automatically creating Sync's */
611 load = skip_Proj(store);
612 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
613 pred = get_Load_mem(load);
616 /* a Load after a Sync: move it up */
617 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
619 set_Load_mem(load, get_memop_mem(mem));
620 add_Sync_pred(pred, store);
623 pload = skip_Proj(pred);
624 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
625 /* a Load after a Load: create a new Sync */
626 set_Load_mem(load, get_Load_mem(pload));
630 store = new_r_Sync(irg->current_block, 2, in);
635 irg->current_block->attr.block.graph_arr[0] = store;
638 void set_store(ir_node *store)
640 set_r_store(current_ir_graph, store);
643 void keep_alive(ir_node *ka)
645 ir_graph *irg = get_irn_irg(ka);
646 add_End_keepalive(get_irg_end(irg), ka);
649 void ir_set_uninitialized_local_variable_func(
650 uninitialized_local_variable_func_t *func)
652 default_initialize_local_variable = func;
655 void irg_finalize_cons(ir_graph *irg)
657 ir_node *end_block = get_irg_end_block(irg);
658 mature_immBlock(end_block);
660 clear_irg_constraints(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION);
663 void irp_finalize_cons(void)
666 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
667 irg_finalize_cons(get_irp_irg(i));
671 ir_node *new_Const_long(ir_mode *mode, long value)
673 return new_d_Const_long(NULL, mode, value);
676 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
678 return new_d_SymConst(NULL, mode, value, kind);
680 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
682 return new_d_simpleSel(NULL, store, objptr, ent);
684 ir_node *new_ASM(ir_node *mem, int arity, ir_node *in[],
685 ir_asm_constraint *inputs, size_t n_outs,
686 ir_asm_constraint *outputs, size_t n_clobber,
687 ident *clobber[], ident *text)
689 return new_d_ASM(NULL, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
692 ir_node *new_r_Anchor(ir_graph *irg)
694 ir_node *in[anchor_last+1];
697 memset(in, 0, sizeof(in));
698 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, 0, NULL);
699 res->attr.anchor.irg.irg = irg;
701 /* hack to get get_irn_irg in set_irn_in() working */
704 /* we can't have NULL inputs so reference ourselves for now */
705 for (i = 0; i <= (size_t)anchor_last; ++i) {
708 set_irn_in(res, anchor_last+1, in);
713 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
715 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
716 res->attr.block.irg.irg = irg;
717 res->attr.block.backedge = new_backedge_arr(get_irg_obstack(irg), arity);
718 set_Block_matured(res, 1);
719 /* Create and initialize array for Phi-node construction. */
720 if (irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION)) {
721 res->attr.block.graph_arr = NEW_ARR_D(ir_node*, get_irg_obstack(irg), irg->n_loc);
722 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
724 irn_verify_irg(res, irg);