2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief Various irnode constructors. Automatic construction of SSA
10 * @author Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Boris Boesler
11 * Michael Beck, Matthias Braun
16 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
28 #include "iredges_t.h"
32 #include "gen_ir_cons.c.inl"
35 * Language dependent variable initialization callback.
37 static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
39 ir_node *new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode,
42 return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
45 ir_node *new_rd_ASM(dbg_info *db, ir_node *block, ir_node *mem,
46 int arity, ir_node *in[], ir_asm_constraint *inputs,
47 size_t n_outs, ir_asm_constraint *outputs, size_t n_clobber,
48 ident *clobber[], ident *text)
50 ir_graph *irg = get_irn_irg(block);
52 int r_arity = arity+1;
54 NEW_ARR_A(ir_node*, r_in, r_arity);
56 memcpy(&r_in[1], in, arity*sizeof(ir_node*));
58 ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, r_arity, r_in);
60 struct obstack *const obst = get_irg_obstack(irg);
61 res->attr.assem.pin_state = op_pin_state_pinned;
62 res->attr.assem.input_constraints = NEW_ARR_D(ir_asm_constraint, obst, arity);
63 res->attr.assem.output_constraints = NEW_ARR_D(ir_asm_constraint, obst, n_outs);
64 res->attr.assem.clobbers = NEW_ARR_D(ident*, obst, n_clobber);
65 res->attr.assem.text = text;
67 memcpy(res->attr.assem.input_constraints, inputs, sizeof(inputs[0]) * arity);
68 memcpy(res->attr.assem.output_constraints, outputs, sizeof(outputs[0]) * n_outs);
69 memcpy(res->attr.assem.clobbers, clobber, sizeof(clobber[0]) * n_clobber);
71 irn_verify_irg(res, irg);
72 res = optimize_node(res);
76 ir_node *new_rd_simpleSel(dbg_info *db, ir_node *block, ir_node *store,
77 ir_node *objptr, ir_entity *ent)
79 return new_rd_Sel(db, block, store, objptr, 0, NULL, ent);
82 ir_node *new_rd_SymConst(dbg_info *db, ir_graph *irg, ir_mode *mode,
83 symconst_symbol value, symconst_kind symkind)
85 ir_node *block = get_irg_start_block(irg);
86 ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
87 res->attr.symc.kind = symkind;
88 res->attr.symc.sym = value;
90 irn_verify_irg(res, irg);
91 res = optimize_node(res);
95 ir_node *new_rd_SymConst_addr_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
98 sym.entity_p = symbol;
99 return new_rd_SymConst(db, irg, mode, sym, symconst_addr_ent);
102 ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol)
105 sym.entity_p = symbol;
106 return new_rd_SymConst(db, irg, mode, sym, symconst_ofs_ent);
109 ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
113 return new_rd_SymConst(db, irg, mode, sym, symconst_type_size);
116 ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol)
120 return new_rd_SymConst(db, irg, mode, sym, symconst_type_align);
123 ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
125 return new_rd_Const_long(NULL, irg, mode, value);
127 ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode, symconst_symbol value,
128 symconst_kind symkind)
130 return new_rd_SymConst(NULL, irg, mode, value, symkind);
132 ir_node *new_r_simpleSel(ir_node *block, ir_node *store, ir_node *objptr,
135 return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
137 ir_node *new_r_ASM(ir_node *block, ir_node *mem,
138 int arity, ir_node *in[], ir_asm_constraint *inputs,
139 size_t n_outs, ir_asm_constraint *outputs,
140 size_t n_clobber, ident *clobber[], ident *text)
142 return new_rd_ASM(NULL, block, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
145 /** Creates a Phi node with 0 predecessors. */
146 static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block,
147 ir_mode *mode, int pos)
149 ir_graph *irg = get_irn_irg(block);
150 ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL);
151 res->attr.phi.u.pos = pos;
152 irn_verify_irg(res, irg);
156 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
158 static void try_remove_unnecessary_phi(ir_node *phi)
160 ir_node *phi_value = NULL;
161 int arity = get_irn_arity(phi);
164 /* see if all inputs are either pointing to a single value or
165 * are self references */
166 for (i = 0; i < arity; ++i) {
167 ir_node *in = get_irn_n(phi, i);
172 /** found a different value from the one we already found, can't remove
174 if (phi_value != NULL)
178 if (phi_value == NULL)
181 /* if we're here then all phi inputs have been either phi_value
182 * or self-references, we can replace the phi by phi_value.
183 * We do this with an Id-node */
184 exchange(phi, phi_value);
186 /* recursively check phi_value, because it could be that we were the last
187 * phi-node in a loop-body. Then our arguments is an unnecessary phi in
188 * the loop header which can be eliminated now */
189 if (is_Phi(phi_value)) {
190 try_remove_unnecessary_phi(phi_value);
195 * Computes the predecessors for the real phi node, and then
196 * allocates and returns this node. The routine called to allocate the
197 * node might optimize it away and return a real value.
198 * This function must be called with an in-array of proper size.
200 static ir_node *set_phi_arguments(ir_node *phi, int pos)
202 ir_node *block = get_nodes_block(phi);
203 ir_graph *irg = get_irn_irg(block);
204 int arity = get_irn_arity(block);
205 ir_node **in = ALLOCAN(ir_node*, arity);
206 ir_mode *mode = get_irn_mode(phi);
209 /* This loop goes to all predecessor blocks of the block the Phi node
210 is in and there finds the operands of the Phi node by calling
211 get_r_value_internal. */
212 for (i = 0; i < arity; ++i) {
213 ir_node *cfgpred = get_Block_cfgpred_block(block, i);
215 if (is_Bad(cfgpred)) {
216 value = new_r_Bad(irg, mode);
218 value = get_r_value_internal(cfgpred, pos, mode);
223 phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), arity);
224 set_irn_in(phi, arity, in);
226 irn_verify_irg(phi, irg);
228 try_remove_unnecessary_phi(phi);
233 * This function returns the last definition of a value. In case
234 * this value was last defined in a previous block, Phi nodes are
235 * inserted. If the part of the firm graph containing the definition
236 * is not yet constructed, a dummy Phi node is returned.
238 * @param block the current block
239 * @param pos the value number of the value searched
240 * @param mode the mode of this value (needed for Phi construction)
242 static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
244 ir_node *res = block->attr.block.graph_arr[pos];
245 ir_graph *irg = get_irn_irg(block);
249 /* in a matured block we can immediately determine the phi arguments */
250 if (get_Block_matured(block)) {
251 int arity = get_irn_arity(block);
252 /* no predecessors: use unknown value */
254 if (block == get_irg_start_block(irg)) {
255 if (default_initialize_local_variable != NULL) {
256 ir_node *rem = get_r_cur_block(irg);
257 set_r_cur_block(irg, block);
258 res = default_initialize_local_variable(irg, mode, pos - 1);
259 set_r_cur_block(irg, rem);
261 res = new_r_Unknown(irg, mode);
264 /* unreachable block, use Bad */
265 res = new_r_Bad(irg, mode);
267 /* one predecessor just use its value */
268 } else if (arity == 1) {
269 ir_node *cfgpred = get_Block_cfgpred(block, 0);
270 if (is_Bad(cfgpred)) {
271 res = new_r_Bad(irg, mode);
273 ir_node *cfgpred_block = get_nodes_block(cfgpred);
274 res = get_r_value_internal(cfgpred_block, pos, mode);
276 /* multiple predecessors construct Phi */
278 res = new_rd_Phi0(NULL, block, mode, pos);
279 /* enter phi0 into our variable value table to break cycles
280 * arising from set_phi_arguments */
281 block->attr.block.graph_arr[pos] = res;
282 res = set_phi_arguments(res, pos);
285 /* in case of immature block we have to keep a Phi0 */
286 res = new_rd_Phi0(NULL, block, mode, pos);
287 /* enqueue phi so we can set arguments once the block matures */
288 res->attr.phi.next = block->attr.block.phis;
289 block->attr.block.phis = res;
291 block->attr.block.graph_arr[pos] = res;
295 void mature_immBlock(ir_node *block)
302 if (get_Block_matured(block))
305 irg = get_irn_irg(block);
306 n_preds = ARR_LEN(block->in) - 1;
307 /* Fix block parameters */
308 block->attr.block.backedge = new_backedge_arr(get_irg_obstack(irg), n_preds);
310 /* Traverse a chain of Phi nodes attached to this block and mature
312 for (phi = block->attr.block.phis; phi != NULL; phi = next) {
314 int pos = phi->attr.phi.u.pos;
316 next = phi->attr.phi.next;
317 new_value = set_phi_arguments(phi, pos);
318 if (block->attr.block.graph_arr[pos] == phi) {
319 block->attr.block.graph_arr[pos] = new_value;
323 set_Block_matured(block, 1);
325 /* create final in-array for the block */
326 if (block->attr.block.dynamic_ins) {
327 ir_node **const new_in = DUP_ARR_D(ir_node*, get_irg_obstack(irg), block->in);
328 DEL_ARR_F(block->in);
330 block->attr.block.dynamic_ins = false;
333 /* Now, as the block is a finished Firm node, we can optimize it.
334 Since other nodes have been allocated since the block was created
335 we can not free the node on the obstack. Therefore we have to call
337 Unfortunately the optimization does not change a lot, as all allocated
338 nodes refer to the unoptimized node.
339 We can call optimize_in_place_2(), as global cse has no effect on blocks.
341 irn_verify_irg(block, irg);
342 optimize_in_place_2(block);
345 ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
347 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
348 return new_rd_Const_long(db, current_ir_graph, mode, value);
351 ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr,
354 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
355 return new_rd_Sel(db, current_ir_graph->current_block,
356 store, objptr, 0, NULL, ent);
359 ir_node *new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value,
362 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
363 return new_rd_SymConst(db, current_ir_graph, mode, value, kind);
366 ir_node *new_d_ASM(dbg_info *db, ir_node *mem, int arity, ir_node *in[],
367 ir_asm_constraint *inputs,
368 size_t n_outs, ir_asm_constraint *outputs,
369 size_t n_clobber, ident *clobber[], ident *text)
371 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
372 return new_rd_ASM(db, current_ir_graph->current_block, mem, arity, in,
373 inputs, n_outs, outputs, n_clobber, clobber, text);
376 ir_node *new_rd_DivRL(dbg_info *dbgi, ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
379 ir_graph *irg = get_Block_irg(block);
386 res = new_ir_node(dbgi, irg, block, op_Div, mode_T, 3, in);
387 res->attr.div.resmode = resmode;
388 res->attr.div.no_remainder = 1;
389 res->attr.div.exc.pin_state = pin_state;
390 irn_verify_irg(res, irg);
391 res = optimize_node(res);
395 ir_node *new_r_DivRL(ir_node *block, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
397 return new_rd_DivRL(NULL, block, irn_mem, irn_left, irn_right, resmode, pin_state);
400 ir_node *new_d_DivRL(dbg_info *dbgi, ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
403 assert(irg_is_constrained(current_ir_graph, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
404 res = new_rd_DivRL(dbgi, current_ir_graph->current_block, irn_mem, irn_left, irn_right, resmode, pin_state);
408 ir_node *new_DivRL(ir_node * irn_mem, ir_node * irn_left, ir_node * irn_right, ir_mode* resmode, op_pin_state pin_state)
410 return new_d_DivRL(NULL, irn_mem, irn_left, irn_right, resmode, pin_state);
413 ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
417 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
418 /* creates a new dynamic in-array as length of in is -1 */
419 res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
421 set_Block_matured(res, 0);
422 res->attr.block.dynamic_ins = true;
423 res->attr.block.irg.irg = irg;
424 res->attr.block.backedge = NULL;
425 res->attr.block.entity = NULL;
427 set_Block_block_visited(res, 0);
429 /* Create and initialize array for Phi-node construction. */
430 res->attr.block.graph_arr = NEW_ARR_DZ(ir_node*, get_irg_obstack(irg), irg->n_loc);
432 /* Immature block may not be optimized! */
433 irn_verify_irg(res, irg);
438 ir_node *new_r_immBlock(ir_graph *irg)
440 return new_rd_immBlock(NULL, irg);
443 ir_node *new_d_immBlock(dbg_info *dbgi)
445 return new_rd_immBlock(dbgi, current_ir_graph);
448 ir_node *new_immBlock(void)
450 return new_rd_immBlock(NULL, current_ir_graph);
453 void add_immBlock_pred(ir_node *block, ir_node *jmp)
455 int n = ARR_LEN(block->in) - 1;
457 assert(is_Block(block) && "Error: Must be a Block");
458 assert(!get_Block_matured(block) && "Error: Block already matured!\n");
459 assert(is_ir_node(jmp));
461 ARR_APP1(ir_node *, block->in, jmp);
463 hook_set_irn_n(block, n, jmp, NULL);
466 void set_cur_block(ir_node *target)
468 set_r_cur_block(current_ir_graph, target);
471 void set_r_cur_block(ir_graph *irg, ir_node *target)
473 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
474 assert(target == NULL || is_Block(target));
475 assert(target == NULL || get_irn_irg(target) == irg);
476 irg->current_block = target;
479 ir_node *get_r_cur_block(ir_graph *irg)
481 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
482 return irg->current_block;
485 ir_node *get_cur_block(void)
487 return get_r_cur_block(current_ir_graph);
490 ir_node *get_r_value(ir_graph *irg, int pos, ir_mode *mode)
492 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
495 return get_r_value_internal(irg->current_block, pos + 1, mode);
498 ir_node *get_value(int pos, ir_mode *mode)
500 return get_r_value(current_ir_graph, pos, mode);
504 * helper function for guess_mode: recursively look for a definition for
505 * local variable @p pos, returns its mode if found.
507 static ir_mode *guess_recursively(ir_node *block, int pos)
513 if (irn_visited_else_mark(block))
516 /* already have a defintion -> we can simply look at its mode */
517 value = block->attr.block.graph_arr[pos];
519 return get_irn_mode(value);
521 /* now we try to guess, by looking at the predecessor blocks */
522 n_preds = get_irn_arity(block);
523 for (i = 0; i < n_preds; ++i) {
524 ir_node *pred_block = get_Block_cfgpred_block(block, i);
525 ir_mode *mode = guess_recursively(pred_block, pos);
530 /* no way to guess */
534 ir_mode *ir_r_guess_mode(ir_graph *irg, int pos)
536 ir_node *block = irg->current_block;
537 ir_node *value = block->attr.block.graph_arr[pos+1];
540 /* already have a defintion -> we can simply look at its mode */
542 return get_irn_mode(value);
544 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
545 inc_irg_visited(irg);
546 mode = guess_recursively(block, pos+1);
547 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
552 ir_mode *ir_guess_mode(int pos)
554 return ir_r_guess_mode(current_ir_graph, pos);
557 void set_r_value(ir_graph *irg, int pos, ir_node *value)
559 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
561 assert(pos+1 < irg->n_loc);
562 assert(is_ir_node(value));
563 irg->current_block->attr.block.graph_arr[pos + 1] = value;
566 void set_value(int pos, ir_node *value)
568 set_r_value(current_ir_graph, pos, value);
571 ir_node *get_r_store(ir_graph *irg)
573 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
574 return get_r_value_internal(irg->current_block, 0, mode_M);
577 ir_node *get_store(void)
579 return get_r_store(current_ir_graph);
582 void set_r_store(ir_graph *irg, ir_node *store)
584 ir_node *load, *pload, *pred, *in[2];
586 assert(irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION));
587 /* Beware: due to dead code elimination, a store might become a Bad node even in
588 the construction phase. */
589 assert((get_irn_mode(store) == mode_M || is_Bad(store)) && "storing non-memory node");
591 if (get_opt_auto_create_sync()) {
592 /* handle non-volatile Load nodes by automatically creating Sync's */
593 load = skip_Proj(store);
594 if (is_Load(load) && get_Load_volatility(load) == volatility_non_volatile) {
595 pred = get_Load_mem(load);
598 /* a Load after a Sync: move it up */
599 ir_node *mem = skip_Proj(get_Sync_pred(pred, 0));
601 set_Load_mem(load, get_memop_mem(mem));
602 add_Sync_pred(pred, store);
605 pload = skip_Proj(pred);
606 if (is_Load(pload) && get_Load_volatility(pload) == volatility_non_volatile) {
607 /* a Load after a Load: create a new Sync */
608 set_Load_mem(load, get_Load_mem(pload));
612 store = new_r_Sync(irg->current_block, 2, in);
617 irg->current_block->attr.block.graph_arr[0] = store;
620 void set_store(ir_node *store)
622 set_r_store(current_ir_graph, store);
625 void keep_alive(ir_node *ka)
627 ir_graph *irg = get_irn_irg(ka);
628 add_End_keepalive(get_irg_end(irg), ka);
631 void ir_set_uninitialized_local_variable_func(
632 uninitialized_local_variable_func_t *func)
634 default_initialize_local_variable = func;
637 void irg_finalize_cons(ir_graph *irg)
639 ir_node *end_block = get_irg_end_block(irg);
640 mature_immBlock(end_block);
642 clear_irg_constraints(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION);
645 void irp_finalize_cons(void)
648 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
649 irg_finalize_cons(get_irp_irg(i));
653 ir_node *new_Const_long(ir_mode *mode, long value)
655 return new_d_Const_long(NULL, mode, value);
658 ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
660 return new_d_SymConst(NULL, mode, value, kind);
662 ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
664 return new_d_simpleSel(NULL, store, objptr, ent);
666 ir_node *new_ASM(ir_node *mem, int arity, ir_node *in[],
667 ir_asm_constraint *inputs, size_t n_outs,
668 ir_asm_constraint *outputs, size_t n_clobber,
669 ident *clobber[], ident *text)
671 return new_d_ASM(NULL, mem, arity, in, inputs, n_outs, outputs, n_clobber, clobber, text);
674 ir_node *new_r_Anchor(ir_graph *irg)
676 ir_node *in[anchor_last+1];
679 memset(in, 0, sizeof(in));
680 res = new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, 0, NULL);
681 res->attr.anchor.irg.irg = irg;
683 /* hack to get get_irn_irg in set_irn_in() working */
686 /* we can't have NULL inputs so reference ourselves for now */
687 for (i = 0; i <= (size_t)anchor_last; ++i) {
690 set_irn_in(res, anchor_last+1, in);
695 ir_node *new_r_Block_noopt(ir_graph *irg, int arity, ir_node *in[])
697 ir_node *res = new_ir_node(NULL, irg, NULL, op_Block, mode_BB, arity, in);
698 res->attr.block.irg.irg = irg;
699 res->attr.block.backedge = new_backedge_arr(get_irg_obstack(irg), arity);
700 set_Block_matured(res, 1);
701 /* Create and initialize array for Phi-node construction. */
702 if (irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_CONSTRUCTION)) {
703 res->attr.block.graph_arr = NEW_ARR_DZ(ir_node*, get_irg_obstack(irg), irg->n_loc);
705 irn_verify_irg(res, irg);