2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various irnode constructors. Automatic construction of SSA
24 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
25 * Michael Beck, Matthias Braun
30 #include "irgraph_t.h"
40 #include "irbackedge_t.h"
42 #include "iredges_t.h"
46 #include "gen_ir_cons.c.inl"
49 * Language dependent variable initialization callback.
51 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
53 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
56 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
59 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, ir_node *mem,
60 int arity, ir_node *in[], ir_asm_constraint *inputs,
61 size_t n_outs, ir_asm_constraint *outputs, size_t n_clobber,
62 ident *clobber[], ident *text)
64 ir_graph *irg = get_irn_irg(block);
66 int r_arity = arity+1;
68 NEW_ARR_A(ir_node*, r_in, r_arity);
70 memcpy(&r_in[1], in, arity*sizeof(ir_node*));
72 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, r_arity, r_in);
74 res->attr.assem.pin_state = op_pin_state_pinned;
75 res->attr.assem.input_constraints
76 = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
77 res->attr.assem.output_constraints
78 = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
79 res->attr.assem.clobbers = NEW_ARR_D(ident *, irg->obst, n_clobber);
80 res->attr.assem.text = text;
82 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
83 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
84 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
86 irn_verify_irg(res, irg);
87 res = optimize_node(res);
91 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
92 ir_node *objptr, ir_entity *ent)
94 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
97 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
98 symconst_symbol value, symconst_kind symkind)
100 ir_node *block = get_irg_start_block(irg);
101 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
102 res->attr.symc.kind = symkind;
103 res->attr.symc.sym = value;
105 irn_verify_irg(res, irg);
106 res = optimize_node(res);
110 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
113 sym.entity_p = symbol;
114 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
117 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
120 sym.entity_p = symbol;
121 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
124 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
128 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
131 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
135 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
138 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
140 return new_rd_Const_long(NULL, irg, mode, value);
142 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
143 symconst_kind symkind)
145 return new_rd_SymConst(NULL, irg, mode, value, symkind);
147 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
150 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
152 ir_node *new_r_ASM(ir_node *block, ir_node *mem,
153 int arity, ir_node *in[], ir_asm_constraint *inputs,
154 size_t n_outs, ir_asm_constraint *outputs,
155 size_t n_clobber, ident *clobber[], ident *text)
157 return new_rd_ASM(NULL, block, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
160 /** Creates a Phi node with 0 predecessors. */
161 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
162 ir_mode *mode, int pos)
164 ir_graph *irg = get_irn_irg(block);
165 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
166 res->attr.phi.u.pos = pos;
167 irn_verify_irg(res, irg);
171 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
173 static void try_remove_unnecessary_phi(ir_node *phi)
175 ir_node *phi_value = NULL;
176 int arity = get_irn_arity(phi);
179 /* see if all inputs are either pointing to a single value or
180 * are self references */
181 for (i = 0; i < arity; ++i) {
182 ir_node *in = get_irn_n(phi, i);
187 /** found a different value from the one we already found, can't remove
189 if (phi_value != NULL)
193 if (phi_value == NULL)
196 /* if we're here then all phi inputs have been either phi_value
197 * or self-references, we can replace the phi by phi_value.
198 * We do this with an Id-node */
199 exchange(phi, phi_value);
201 /* recursively check phi_value, because it could be that we were the last
202 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
203 * the loop header which can be eliminated now */
204 if (is_Phi(phi_value)) {
205 try_remove_unnecessary_phi(phi_value);
210 * Computes the predecessors for the real phi node, and then
211 * allocates and returns this node. The routine called to allocate the
212 * node might optimize it away and return a real value.
213 * This function must be called with an in-array of proper size.
215 static ir_node *set_phi_arguments(ir_node *phi, int pos)
217 ir_node *block = get_nodes_block(phi);
218 ir_graph *irg = get_irn_irg(block);
219 int arity = get_irn_arity(block);
220 ir_node **in = ALLOCAN(ir_node*, arity);
221 ir_mode *mode = get_irn_mode(phi);
224 /* This loop goes to all predecessor blocks of the block the Phi node
225 is in and there finds the operands of the Phi node by calling
226 get_r_value_internal. */
227 for (i = 0; i < arity; ++i) {
228 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
230 if (is_Bad(cfgpred)) {
231 value = new_r_Bad(irg, mode);
233 value = get_r_value_internal(cfgpred, pos, mode);
238 phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
239 set_irn_in(phi, arity, in);
241 irn_verify_irg(phi, irg);
243 try_remove_unnecessary_phi(phi);
248 * This function returns the last definition of a value. In case
249 * this value was last defined in a previous block, Phi nodes are
250 * inserted. If the part of the firm graph containing the definition
251 * is not yet constructed, a dummy Phi node is returned.
253 * @param block the current block
254 * @param pos the value number of the value searched
255 * @param mode the mode of this value (needed for Phi construction)
257 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
259 ir_node *res = block->attr.block.graph_arr[pos];
260 ir_graph *irg = get_irn_irg(block);
264 /* in a matured block we can immediately determine the phi arguments */
265 if (get_Block_matured(block)) {
266 int arity = get_irn_arity(block);
267 /* no predecessors: use unknown value */
269 if (block == get_irg_start_block(irg)) {
270 if (default_initialize_local_variable != NULL) {
271 ir_node *rem = get_r_cur_block(irg);
272 set_r_cur_block(irg, block);
273 res = default_initialize_local_variable(irg, mode, pos - 1);
274 set_r_cur_block(irg, rem);
276 res = new_r_Unknown(irg, mode);
279 /* unreachable block, use Bad */
280 res = new_r_Bad(irg, mode);
282 /* one predecessor just use its value */
283 } else if (arity == 1) {
284 ir_node *cfgpred = get_Block_cfgpred(block, 0);
285 if (is_Bad(cfgpred)) {
286 res = new_r_Bad(irg, mode);
288 ir_node *cfgpred_block = get_nodes_block(cfgpred);
289 res = get_r_value_internal(cfgpred_block, pos, mode);
291 /* multiple predecessors construct Phi */
293 res = new_rd_Phi0(NULL, block, mode, pos);
294 /* enter phi0 into our variable value table to break cycles
295 * arising from set_phi_arguments */
296 block->attr.block.graph_arr[pos] = res;
297 res = set_phi_arguments(res, pos);
300 /* in case of immature block we have to keep a Phi0 */
301 res = new_rd_Phi0(NULL, block, mode, pos);
302 /* enqueue phi so we can set arguments once the block matures */
303 res->attr.phi.next = block->attr.block.phis;
304 block->attr.block.phis = res;
306 block->attr.block.graph_arr[pos] = res;
310 void mature_immBlock(ir_node *block)
318 assert(is_Block(block));
319 if (get_Block_matured(block))
322 irg = get_irn_irg(block);
323 n_preds = ARR_LEN(block->in) - 1;
324 /* Fix block parameters */
325 block->attr.block.backedge = new_backedge_arr(irg->obst, n_preds);
327 /* Traverse a chain of Phi nodes attached to this block and mature
329 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
331 int pos = phi->attr.phi.u.pos;
333 next = phi->attr.phi.next;
334 new_value = set_phi_arguments(phi, pos);
335 if (block->attr.block.graph_arr[pos] == phi) {
336 block->attr.block.graph_arr[pos] = new_value;
340 set_Block_matured(block, 1);
342 /* create final in-array for the block */
343 if (block->attr.block.dynamic_ins) {
344 new_in = NEW_ARR_D(ir_node*, irg->obst, n_preds+1);
345 memcpy(new_in, block->in, (n_preds+1) * sizeof(new_in[0]));
346 DEL_ARR_F(block->in);
348 block->attr.block.dynamic_ins = false;
351 /* Now, as the block is a finished Firm node, we can optimize it.
352 Since other nodes have been allocated since the block was created
353 we can not free the node on the obstack. Therefore we have to call
355 Unfortunately the optimization does not change a lot, as all allocated
356 nodes refer to the unoptimized node.
357 We can call optimize_in_place_2(), as global cse has no effect on blocks.
359 irn_verify_irg(block, irg);
360 optimize_in_place_2(block);
363 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
365 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
366 return new_rd_Const_long(db, current_ir_graph, mode, value);
369 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
372 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
373 return new_rd_Sel(db, current_ir_graph->current_block,
374 store, objptr, 0, NULL, ent);
377 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
380 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
381 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
384 ir_node *new_d_ASM(dbg_info *db, ir_node *mem, int arity, ir_node *in[],
385 ir_asm_constraint *inputs,
386 size_t n_outs, ir_asm_constraint *outputs,
387 size_t n_clobber, ident *clobber[], ident *text)
389 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
390 return new_rd_ASM(db, current_ir_graph->current_block, mem, arity, in,
391 inputs, n_outs, outputs, n_clobber, clobber, text);
394 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
397 ir_graph *irg = get_Block_irg(block);
404 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
405 res->attr.div.resmode = resmode;
406 res->attr.div.no_remainder = 1;
407 res->attr.div.exc.pin_state = pin_state;
408 irn_verify_irg(res, irg);
409 res = optimize_node(res);
413 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
415 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
418 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
421 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
422 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
426 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
428 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
431 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
435 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
436 /* creates a new dynamic in-array as length of in is -1 */
437 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
439 set_Block_matured(res, 0);
440 res->attr.block.dynamic_ins = true;
441 res->attr.block.irg.irg = irg;
442 res->attr.block.backedge = NULL;
443 res->attr.block.entity = NULL;
445 set_Block_block_visited(res, 0);
447 /* Create and initialize array for Phi-node construction. */
448 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
449 memset(res->attr.block.graph_arr, 0, sizeof(ir_node*) * irg->n_loc);
451 /* Immature block may not be optimized! */
452 irn_verify_irg(res, irg);
457 ir_node *new_r_immBlock(ir_graph *irg)
459 return new_rd_immBlock(NULL, irg);
462 ir_node *new_d_immBlock(dbg_info *dbgi)
464 return new_rd_immBlock(dbgi, current_ir_graph);
467 ir_node *new_immBlock(void)
469 return new_rd_immBlock(NULL, current_ir_graph);
472 void add_immBlock_pred(ir_node *block, ir_node *jmp)
474 int n = ARR_LEN(block->in) - 1;
476 assert(is_Block(block) && "Error: Must be a Block");
477 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
478 assert(is_ir_node(jmp));
480 ARR_APP1(ir_node *, block->in, jmp);
482 hook_set_irn_n(block, n, jmp, NULL);
485 void set_cur_block(ir_node *target)
487 set_r_cur_block(current_ir_graph, target);
490 void set_r_cur_block(ir_graph *irg, ir_node *target)
492 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
493 assert(target == NULL || is_Block(target));
494 assert(target == NULL || get_irn_irg(target) == irg);
495 irg->current_block = target;
498 ir_node *get_r_cur_block(ir_graph *irg)
500 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
501 return irg->current_block;
504 ir_node *get_cur_block(void)
506 return get_r_cur_block(current_ir_graph);
509 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
511 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
514 return get_r_value_internal(irg->current_block, pos + 1, mode);
517 ir_node *get_value(int pos, ir_mode *mode)
519 return get_r_value(current_ir_graph, pos, mode);
523 * helper function for guess_mode: recursively look for a definition for
524 * local variable @p pos, returns its mode if found.
526 static ir_mode *guess_recursively(ir_node *block, int pos)
532 if (irn_visited_else_mark(block))
535 /* already have a defintion -> we can simply look at its mode */
536 value = block->attr.block.graph_arr[pos];
538 return get_irn_mode(value);
540 /* now we try to guess, by looking at the predecessor blocks */
541 n_preds = get_irn_arity(block);
542 for (i = 0; i < n_preds; ++i) {
543 ir_node *pred_block = get_Block_cfgpred_block(block, i);
544 ir_mode *mode = guess_recursively(pred_block, pos);
549 /* no way to guess */
553 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
555 ir_node *block = irg->current_block;
556 ir_node *value = block->attr.block.graph_arr[pos+1];
559 /* already have a defintion -> we can simply look at its mode */
561 return get_irn_mode(value);
563 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
564 inc_irg_visited(irg);
565 mode = guess_recursively(block, pos+1);
566 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
571 ir_mode *ir_guess_mode(int pos)
573 return ir_r_guess_mode(current_ir_graph, pos);
576 void set_r_value(ir_graph *irg, int pos, ir_node *value)
578 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
580 assert(pos+1 < irg->n_loc);
581 assert(is_ir_node(value));
582 irg->current_block->attr.block.graph_arr[pos + 1] = value;
585 void set_value(int pos, ir_node *value)
587 set_r_value(current_ir_graph, pos, value);
590 ir_node *get_r_store(ir_graph *irg)
592 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
593 return get_r_value_internal(irg->current_block, 0, mode_M);
596 ir_node *get_store(void)
598 return get_r_store(current_ir_graph);
601 void set_r_store(ir_graph *irg, ir_node *store)
603 ir_node *load, *pload, *pred, *in[2];
605 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
606 /* Beware: due to dead code elimination, a store might become a Bad node even in
607 the construction phase. */
608 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
610 if (get_opt_auto_create_sync()) {
611 /* handle non-volatile Load nodes by automatically creating Sync's */
612 load = skip_Proj(store);
613 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
614 pred = get_Load_mem(load);
617 /* a Load after a Sync: move it up */
618 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
620 set_Load_mem(load, get_memop_mem(mem));
621 add_Sync_pred(pred, store);
624 pload = skip_Proj(pred);
625 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
626 /* a Load after a Load: create a new Sync */
627 set_Load_mem(load, get_Load_mem(pload));
631 store = new_r_Sync(irg->current_block, 2, in);
636 irg->current_block->attr.block.graph_arr[0] = store;
639 void set_store(ir_node *store)
641 set_r_store(current_ir_graph, store);
644 void keep_alive(ir_node *ka)
646 ir_graph *irg = get_irn_irg(ka);
647 add_End_keepalive(get_irg_end(irg), ka);
650 void ir_set_uninitialized_local_variable_func(
651 uninitialized_local_variable_func_t *func)
653 default_initialize_local_variable = func;
656 void irg_finalize_cons(ir_graph *irg)
658 ir_node *end_block = get_irg_end_block(irg);
659 mature_immBlock(end_block);
661 clear_irg_constraints(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION);
664 void irp_finalize_cons(void)
667 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
668 irg_finalize_cons(get_irp_irg(i));
672 ir_node *new_Const_long(ir_mode *mode, long value)
674 return new_d_Const_long(NULL, mode, value);
677 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
679 return new_d_SymConst(NULL, mode, value, kind);
681 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
683 return new_d_simpleSel(NULL, store, objptr, ent);
685 ir_node *new_ASM(ir_node *mem, int arity, ir_node *in[],
686 ir_asm_constraint *inputs, size_t n_outs,
687 ir_asm_constraint *outputs, size_t n_clobber,
688 ident *clobber[], ident *text)
690 return new_d_ASM(NULL, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
693 ir_node *new_r_Anchor(ir_graph *irg)
695 ir_node *in[anchor_last+1];
698 memset(in, 0, sizeof(in));
699 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, 0, NULL);
700 res->attr.anchor.irg.irg = irg;
702 /* hack to get get_irn_irg in set_irn_in() working */
705 /* we can't have NULL inputs so reference ourselves for now */
706 for (i = 0; i <= (size_t)anchor_last; ++i) {
709 set_irn_in(res, anchor_last+1, in);
714 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
716 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
717 res->attr.block.irg.irg = irg;
718 res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
719 set_Block_matured(res, 1);
720 /* Create and initialize array for Phi-node construction. */
721 if (irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION)) {
722 res->attr.block.graph_arr = NEW_ARR_D(ir_node *, irg->obst, irg->n_loc);
723 memset(res->attr.block.graph_arr, 0, irg->n_loc * sizeof(ir_node*));
725 irn_verify_irg(res, irg);