3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
35 /* some constants fixing the positions of nodes predecessors
37 #define CALL_PARAM_OFFSET 2
38 #define FUNCCALL_PARAM_OFFSET 1
39 #define SEL_INDEX_OFFSET 2
40 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
41 #define END_KEEPALIVE_OFFSET 0
43 static const char *pnc_name_arr [] = {
44 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
45 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
46 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
47 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
51 * returns the pnc name from an pnc constant
53 const char *get_pnc_string(int pnc) {
54 return pnc_name_arr[pnc];
58 * Calculates the negated (Complement(R)) pnc condition.
60 int get_negated_pnc(int pnc, ir_mode *mode) {
63 /* do NOT add the Uo bit for non-floating point values */
64 if (! mode_is_float(mode))
70 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
72 get_inversed_pnc(int pnc) {
73 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
74 int lesser = pnc & pn_Cmp_Lt;
75 int greater = pnc & pn_Cmp_Gt;
77 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
82 const char *pns_name_arr [] = {
83 "initial_exec", "global_store",
84 "frame_base", "globals", "args"
87 const char *symconst_name_arr [] = {
88 "type_tag", "size", "addr_name", "addr_ent"
92 * Indicates, whether additional data can be registered to ir nodes.
93 * If set to 1, this is not possible anymore.
95 static int forbid_new_data = 0;
98 * The amount of additional space for custom data to be allocated upon
99 * creating a new node.
101 unsigned firm_add_node_size = 0;
104 /* register new space for every node */
105 unsigned register_additional_node_data(unsigned size) {
106 assert(!forbid_new_data && "Too late to register additional node data");
111 return firm_add_node_size += size;
117 /* Forbid the addition of new data to an ir node. */
122 * irnode constructor.
123 * Create a new irnode in irg, with an op, mode, arity and
124 * some incoming irnodes.
125 * If arity is negative, a node with a dynamic array is created.
128 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
129 int arity, ir_node **in)
132 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
135 assert(irg && op && mode);
136 p = obstack_alloc (irg->obst, node_size);
137 memset(p, 0, node_size);
138 res = (ir_node *) (p + firm_add_node_size);
140 res->kind = k_ir_node;
146 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
148 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
149 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
153 set_irn_dbg_info(res, db);
157 res->node_nr = get_irp_new_node_nr();
160 #if FIRM_EDGES_INPLACE
163 int is_bl = is_Block(res);
165 INIT_LIST_HEAD(&res->edge_info.outs_head);
167 INIT_LIST_HEAD(&res->attr.block.succ_head);
170 for (i = is_bl; i <= arity; ++i)
171 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
175 hook_new_node(irg, res);
180 /*-- getting some parameters from ir_nodes --*/
183 (is_ir_node)(const void *thing) {
184 return _is_ir_node(thing);
188 (get_irn_intra_arity)(const ir_node *node) {
189 return _get_irn_intra_arity(node);
193 (get_irn_inter_arity)(const ir_node *node) {
194 return _get_irn_inter_arity(node);
197 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
200 (get_irn_arity)(const ir_node *node) {
201 return _get_irn_arity(node);
204 /* Returns the array with ins. This array is shifted with respect to the
205 array accessed by get_irn_n: The block operand is at position 0 not -1.
206 (@@@ This should be changed.)
207 The order of the predecessors in this array is not guaranteed, except that
208 lists of operands as predecessors of Block or arguments of a Call are
211 get_irn_in (const ir_node *node) {
213 if (get_interprocedural_view()) { /* handle Filter and Block specially */
214 if (get_irn_opcode(node) == iro_Filter) {
215 assert(node->attr.filter.in_cg);
216 return node->attr.filter.in_cg;
217 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
218 return node->attr.block.in_cg;
220 /* else fall through */
226 set_irn_in (ir_node *node, int arity, ir_node **in) {
229 ir_graph *irg = current_ir_graph;
231 if (get_interprocedural_view()) { /* handle Filter and Block specially */
232 if (get_irn_opcode(node) == iro_Filter) {
233 assert(node->attr.filter.in_cg);
234 arr = &node->attr.filter.in_cg;
235 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
236 arr = &node->attr.block.in_cg;
244 for (i = 0; i < arity; i++) {
245 if (i < ARR_LEN(*arr)-1)
246 edges_notify_edge(node, i, in[i], (*arr)[i+1], irg);
248 edges_notify_edge(node, i, in[i], NULL, irg);
250 for(;i < ARR_LEN(*arr)-1; i++) {
251 edges_notify_edge(node, i, NULL, (*arr)[i+1], irg);
254 if (arity != ARR_LEN(*arr) - 1) {
255 ir_node * block = (*arr)[0];
256 *arr = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
259 fix_backedges(irg->obst, node);
261 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
265 (get_irn_intra_n)(const ir_node *node, int n) {
266 return _get_irn_intra_n (node, n);
270 (get_irn_inter_n)(const ir_node *node, int n) {
271 return _get_irn_inter_n (node, n);
274 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
277 (get_irn_n)(const ir_node *node, int n) {
278 return _get_irn_n(node, n);
282 set_irn_n (ir_node *node, int n, ir_node *in) {
283 assert(node && node->kind == k_ir_node);
285 assert(n < get_irn_arity(node));
286 assert(in && in->kind == k_ir_node);
288 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
289 /* Change block pred in both views! */
290 node->in[n + 1] = in;
291 assert(node->attr.filter.in_cg);
292 node->attr.filter.in_cg[n + 1] = in;
295 if (get_interprocedural_view()) { /* handle Filter and Block specially */
296 if (get_irn_opcode(node) == iro_Filter) {
297 assert(node->attr.filter.in_cg);
298 node->attr.filter.in_cg[n + 1] = in;
300 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
301 node->attr.block.in_cg[n + 1] = in;
304 /* else fall through */
308 hook_set_irn_n(node, n, in, node->in[n + 1]);
310 /* Here, we rely on src and tgt being in the current ir graph */
311 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
313 node->in[n + 1] = in;
317 (get_irn_mode)(const ir_node *node) {
318 return _get_irn_mode(node);
322 (set_irn_mode)(ir_node *node, ir_mode *mode)
324 _set_irn_mode(node, mode);
328 get_irn_modecode (const ir_node *node)
331 return node->mode->code;
334 /** Gets the string representation of the mode .*/
336 get_irn_modename (const ir_node *node)
339 return get_mode_name(node->mode);
343 get_irn_modeident (const ir_node *node)
346 return get_mode_ident(node->mode);
350 (get_irn_op)(const ir_node *node) {
351 return _get_irn_op(node);
354 /* should be private to the library: */
356 (set_irn_op)(ir_node *node, ir_op *op) {
357 _set_irn_op(node, op);
361 (get_irn_opcode)(const ir_node *node)
363 return _get_irn_opcode(node);
367 get_irn_opname (const ir_node *node)
370 if ((get_irn_op((ir_node *)node) == op_Phi) &&
371 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
372 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
373 return get_id_str(node->op->name);
377 get_irn_opident (const ir_node *node)
380 return node->op->name;
384 (get_irn_visited)(const ir_node *node)
386 return _get_irn_visited(node);
390 (set_irn_visited)(ir_node *node, unsigned long visited)
392 _set_irn_visited(node, visited);
396 (mark_irn_visited)(ir_node *node) {
397 _mark_irn_visited(node);
401 (irn_not_visited)(const ir_node *node) {
402 return _irn_not_visited(node);
406 (irn_visited)(const ir_node *node) {
407 return _irn_visited(node);
411 (set_irn_link)(ir_node *node, void *link) {
412 _set_irn_link(node, link);
416 (get_irn_link)(const ir_node *node) {
417 return _get_irn_link(node);
421 (get_irn_pinned)(const ir_node *node) {
422 return _get_irn_pinned(node);
426 (is_irn_pinned_in_irg) (const ir_node *node) {
427 return _is_irn_pinned_in_irg(node);
430 void set_irn_pinned(ir_node *node, op_pin_state state) {
431 /* due to optimization an opt may be turned into a Tuple */
432 if (get_irn_op(node) == op_Tuple)
435 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
436 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
438 node->attr.except.pin_state = state;
441 #ifdef DO_HEAPANALYSIS
442 /* Access the abstract interpretation information of a node.
443 Returns NULL if no such information is available. */
444 struct abstval *get_irn_abst_value(ir_node *n) {
447 /* Set the abstract interpretation information of a node. */
448 void set_irn_abst_value(ir_node *n, struct abstval *os) {
451 struct section *firm_get_irn_section(ir_node *n) {
454 void firm_set_irn_section(ir_node *n, struct section *s) {
458 /* Dummies needed for firmjni. */
459 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
460 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
461 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
462 void firm_set_irn_section(ir_node *n, struct section *s) {}
463 #endif /* DO_HEAPANALYSIS */
466 /* Outputs a unique number for this node */
467 long get_irn_node_nr(const ir_node *node) {
470 return node->node_nr;
472 return (long)PTR_TO_INT(node);
477 get_irn_const_attr (ir_node *node)
479 assert (node->op == op_Const);
480 return node->attr.con;
484 get_irn_proj_attr (ir_node *node)
486 assert (node->op == op_Proj);
487 return node->attr.proj;
491 get_irn_alloc_attr (ir_node *node)
493 assert (node->op == op_Alloc);
498 get_irn_free_attr (ir_node *node)
500 assert (node->op == op_Free);
505 get_irn_symconst_attr (ir_node *node)
507 assert (node->op == op_SymConst);
512 get_irn_call_attr (ir_node *node)
514 assert (node->op == op_Call);
515 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
519 get_irn_sel_attr (ir_node *node)
521 assert (node->op == op_Sel);
526 get_irn_phi_attr (ir_node *node)
528 assert (node->op == op_Phi);
529 return node->attr.phi0_pos;
533 get_irn_block_attr (ir_node *node)
535 assert (node->op == op_Block);
536 return node->attr.block;
540 get_irn_load_attr (ir_node *node)
542 assert (node->op == op_Load);
543 return node->attr.load;
547 get_irn_store_attr (ir_node *node)
549 assert (node->op == op_Store);
550 return node->attr.store;
554 get_irn_except_attr (ir_node *node)
556 assert (node->op == op_Div || node->op == op_Quot ||
557 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
558 return node->attr.except;
562 get_irn_generic_attr (ir_node *node) {
566 /** manipulate fields of individual nodes **/
568 /* this works for all except Block */
570 get_nodes_block (const ir_node *node) {
571 assert (!(node->op == op_Block));
572 assert (is_irn_pinned_in_irg(node) && "block info may be incorrect");
573 return get_irn_n(node, -1);
577 set_nodes_block (ir_node *node, ir_node *block) {
578 assert (!(node->op == op_Block));
579 set_irn_n(node, -1, block);
582 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
583 * from Start. If so returns frame type, else Null. */
584 ir_type *is_frame_pointer(ir_node *n) {
585 if ((get_irn_op(n) == op_Proj) &&
586 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
587 ir_node *start = get_Proj_pred(n);
588 if (get_irn_op(start) == op_Start) {
589 return get_irg_frame_type(get_irn_irg(start));
595 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
596 * from Start. If so returns global type, else Null. */
597 ir_type *is_globals_pointer(ir_node *n) {
598 if ((get_irn_op(n) == op_Proj) &&
599 (get_Proj_proj(n) == pn_Start_P_globals)) {
600 ir_node *start = get_Proj_pred(n);
601 if (get_irn_op(start) == op_Start) {
602 return get_glob_type();
608 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
609 * from Start. If so returns 1, else 0. */
610 int is_value_arg_pointer(ir_node *n) {
611 if ((get_irn_op(n) == op_Proj) &&
612 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
613 (get_irn_op(get_Proj_pred(n)) == op_Start))
618 /* Returns an array with the predecessors of the Block. Depending on
619 the implementation of the graph data structure this can be a copy of
620 the internal representation of predecessors as well as the internal
621 array itself. Therefore writing to this array might obstruct the ir. */
623 get_Block_cfgpred_arr (ir_node *node)
625 assert ((node->op == op_Block));
626 return (ir_node **)&(get_irn_in(node)[1]);
630 (get_Block_n_cfgpreds)(ir_node *node) {
631 return _get_Block_n_cfgpreds(node);
635 (get_Block_cfgpred)(ir_node *node, int pos) {
636 return _get_Block_cfgpred(node, pos);
640 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
641 assert (node->op == op_Block);
642 set_irn_n(node, pos, pred);
646 (get_Block_cfgpred_block)(ir_node *node, int pos) {
647 return _get_Block_cfgpred_block(node, pos);
651 get_Block_matured (ir_node *node) {
652 assert (node->op == op_Block);
653 return (int)node->attr.block.matured;
657 set_Block_matured (ir_node *node, int matured) {
658 assert (node->op == op_Block);
659 node->attr.block.matured = matured;
663 (get_Block_block_visited)(ir_node *node) {
664 return _get_Block_block_visited(node);
668 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
669 _set_Block_block_visited(node, visit);
672 /* For this current_ir_graph must be set. */
674 (mark_Block_block_visited)(ir_node *node) {
675 _mark_Block_block_visited(node);
679 (Block_not_block_visited)(ir_node *node) {
680 return _Block_not_block_visited(node);
684 get_Block_graph_arr (ir_node *node, int pos) {
685 assert (node->op == op_Block);
686 return node->attr.block.graph_arr[pos+1];
690 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
691 assert (node->op == op_Block);
692 node->attr.block.graph_arr[pos+1] = value;
695 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
696 assert(node->op == op_Block);
697 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
698 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
699 node->attr.block.in_cg[0] = NULL;
700 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
702 /* Fix backedge array. fix_backedges() operates depending on
703 interprocedural_view. */
704 int ipv = get_interprocedural_view();
705 set_interprocedural_view(1);
706 fix_backedges(current_ir_graph->obst, node);
707 set_interprocedural_view(ipv);
710 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
713 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
714 assert(node->op == op_Block &&
715 node->attr.block.in_cg &&
716 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
717 node->attr.block.in_cg[pos + 1] = pred;
720 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
721 assert(node->op == op_Block);
722 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
725 int get_Block_cg_n_cfgpreds(ir_node * node) {
726 assert(node->op == op_Block);
727 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
730 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
731 assert(node->op == op_Block && node->attr.block.in_cg);
732 return node->attr.block.in_cg[pos + 1];
735 void remove_Block_cg_cfgpred_arr(ir_node * node) {
736 assert(node->op == op_Block);
737 node->attr.block.in_cg = NULL;
740 ir_node *(set_Block_dead)(ir_node *block) {
741 return _set_Block_dead(block);
744 int (is_Block_dead)(const ir_node *block) {
745 return _is_Block_dead(block);
748 ir_extblk *get_Block_extbb(const ir_node *block) {
750 assert(is_Block(block));
751 res = block->attr.block.extblk;
752 assert(res == NULL || is_ir_extbb(res));
756 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
757 assert(is_Block(block));
758 assert(extblk == NULL || is_ir_extbb(extblk));
759 block->attr.block.extblk = extblk;
763 get_End_n_keepalives(ir_node *end) {
764 assert (end->op == op_End);
765 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
769 get_End_keepalive(ir_node *end, int pos) {
770 assert (end->op == op_End);
771 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
775 add_End_keepalive (ir_node *end, ir_node *ka) {
776 assert (end->op == op_End);
777 ARR_APP1 (ir_node *, end->in, ka);
781 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
782 assert (end->op == op_End);
783 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
787 free_End (ir_node *end) {
788 assert (end->op == op_End);
790 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
791 end->in = NULL; /* @@@ make sure we get an error if we use the
792 in array afterwards ... */
795 /* Return the target address of an IJmp */
796 ir_node *get_IJmp_target(ir_node *ijmp) {
797 assert(ijmp->op == op_IJmp);
798 return get_irn_n(ijmp, 0);
801 /** Sets the target address of an IJmp */
802 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
803 assert(ijmp->op == op_IJmp);
804 set_irn_n(ijmp, 0, tgt);
808 > Implementing the case construct (which is where the constant Proj node is
809 > important) involves far more than simply determining the constant values.
810 > We could argue that this is more properly a function of the translator from
811 > Firm to the target machine. That could be done if there was some way of
812 > projecting "default" out of the Cond node.
813 I know it's complicated.
814 Basically there are two proglems:
815 - determining the gaps between the projs
816 - determining the biggest case constant to know the proj number for
818 I see several solutions:
819 1. Introduce a ProjDefault node. Solves both problems.
820 This means to extend all optimizations executed during construction.
821 2. Give the Cond node for switch two flavors:
822 a) there are no gaps in the projs (existing flavor)
823 b) gaps may exist, default proj is still the Proj with the largest
824 projection number. This covers also the gaps.
825 3. Fix the semantic of the Cond to that of 2b)
827 Solution 2 seems to be the best:
828 Computing the gaps in the Firm representation is not too hard, i.e.,
829 libFIRM can implement a routine that transforms between the two
830 flavours. This is also possible for 1) but 2) does not require to
831 change any existing optimization.
832 Further it should be far simpler to determine the biggest constant than
834 I don't want to choose 3) as 2a) seems to have advantages for
835 dataflow analysis and 3) does not allow to convert the representation to
839 get_Cond_selector (ir_node *node) {
840 assert (node->op == op_Cond);
841 return get_irn_n(node, 0);
845 set_Cond_selector (ir_node *node, ir_node *selector) {
846 assert (node->op == op_Cond);
847 set_irn_n(node, 0, selector);
851 get_Cond_kind (ir_node *node) {
852 assert (node->op == op_Cond);
853 return node->attr.c.kind;
857 set_Cond_kind (ir_node *node, cond_kind kind) {
858 assert (node->op == op_Cond);
859 node->attr.c.kind = kind;
863 get_Cond_defaultProj (ir_node *node) {
864 assert (node->op == op_Cond);
865 return node->attr.c.default_proj;
869 get_Return_mem (ir_node *node) {
870 assert (node->op == op_Return);
871 return get_irn_n(node, 0);
875 set_Return_mem (ir_node *node, ir_node *mem) {
876 assert (node->op == op_Return);
877 set_irn_n(node, 0, mem);
881 get_Return_n_ress (ir_node *node) {
882 assert (node->op == op_Return);
883 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
887 get_Return_res_arr (ir_node *node)
889 assert ((node->op == op_Return));
890 if (get_Return_n_ress(node) > 0)
891 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
898 set_Return_n_res (ir_node *node, int results) {
899 assert (node->op == op_Return);
904 get_Return_res (ir_node *node, int pos) {
905 assert (node->op == op_Return);
906 assert (get_Return_n_ress(node) > pos);
907 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
911 set_Return_res (ir_node *node, int pos, ir_node *res){
912 assert (node->op == op_Return);
913 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
916 tarval *(get_Const_tarval)(ir_node *node) {
917 return _get_Const_tarval(node);
921 set_Const_tarval (ir_node *node, tarval *con) {
922 assert (node->op == op_Const);
923 node->attr.con.tv = con;
926 cnst_classify_t (classify_Const)(ir_node *node)
928 return _classify_Const(node);
932 /* The source language type. Must be an atomic type. Mode of type must
933 be mode of node. For tarvals from entities type must be pointer to
936 get_Const_type (ir_node *node) {
937 assert (node->op == op_Const);
938 return node->attr.con.tp;
942 set_Const_type (ir_node *node, ir_type *tp) {
943 assert (node->op == op_Const);
944 if (tp != firm_unknown_type) {
945 assert (is_atomic_type(tp));
946 assert (get_type_mode(tp) == get_irn_mode(node));
948 node->attr.con.tp = tp;
953 get_SymConst_kind (const ir_node *node) {
954 assert (node->op == op_SymConst);
955 return node->attr.i.num;
959 set_SymConst_kind (ir_node *node, symconst_kind num) {
960 assert (node->op == op_SymConst);
961 node->attr.i.num = num;
965 get_SymConst_type (ir_node *node) {
966 assert ( (node->op == op_SymConst)
967 && ( get_SymConst_kind(node) == symconst_type_tag
968 || get_SymConst_kind(node) == symconst_size));
969 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
973 set_SymConst_type (ir_node *node, ir_type *tp) {
974 assert ( (node->op == op_SymConst)
975 && ( get_SymConst_kind(node) == symconst_type_tag
976 || get_SymConst_kind(node) == symconst_size));
977 node->attr.i.sym.type_p = tp;
981 get_SymConst_name (ir_node *node) {
982 assert ( (node->op == op_SymConst)
983 && (get_SymConst_kind(node) == symconst_addr_name));
984 return node->attr.i.sym.ident_p;
988 set_SymConst_name (ir_node *node, ident *name) {
989 assert ( (node->op == op_SymConst)
990 && (get_SymConst_kind(node) == symconst_addr_name));
991 node->attr.i.sym.ident_p = name;
995 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
996 entity *get_SymConst_entity (ir_node *node) {
997 assert ( (node->op == op_SymConst)
998 && (get_SymConst_kind (node) == symconst_addr_ent));
999 return node->attr.i.sym.entity_p;
1002 void set_SymConst_entity (ir_node *node, entity *ent) {
1003 assert ( (node->op == op_SymConst)
1004 && (get_SymConst_kind(node) == symconst_addr_ent));
1005 node->attr.i.sym.entity_p = ent;
1008 union symconst_symbol
1009 get_SymConst_symbol (ir_node *node) {
1010 assert (node->op == op_SymConst);
1011 return node->attr.i.sym;
1015 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1016 assert (node->op == op_SymConst);
1017 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1018 node->attr.i.sym = sym;
1022 get_SymConst_value_type (ir_node *node) {
1023 assert (node->op == op_SymConst);
1024 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1025 return node->attr.i.tp;
1029 set_SymConst_value_type (ir_node *node, ir_type *tp) {
1030 assert (node->op == op_SymConst);
1031 node->attr.i.tp = tp;
1035 get_Sel_mem (ir_node *node) {
1036 assert (node->op == op_Sel);
1037 return get_irn_n(node, 0);
1041 set_Sel_mem (ir_node *node, ir_node *mem) {
1042 assert (node->op == op_Sel);
1043 set_irn_n(node, 0, mem);
1047 get_Sel_ptr (ir_node *node) {
1048 assert (node->op == op_Sel);
1049 return get_irn_n(node, 1);
1053 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1054 assert (node->op == op_Sel);
1055 set_irn_n(node, 1, ptr);
1059 get_Sel_n_indexs (ir_node *node) {
1060 assert (node->op == op_Sel);
1061 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1065 get_Sel_index_arr (ir_node *node)
1067 assert ((node->op == op_Sel));
1068 if (get_Sel_n_indexs(node) > 0)
1069 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1075 get_Sel_index (ir_node *node, int pos) {
1076 assert (node->op == op_Sel);
1077 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1081 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1082 assert (node->op == op_Sel);
1083 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1087 get_Sel_entity (ir_node *node) {
1088 assert (node->op == op_Sel);
1089 return node->attr.s.ent;
1093 set_Sel_entity (ir_node *node, entity *ent) {
1094 assert (node->op == op_Sel);
1095 node->attr.s.ent = ent;
1099 /* For unary and binary arithmetic operations the access to the
1100 operands can be factored out. Left is the first, right the
1101 second arithmetic value as listed in tech report 0999-33.
1102 unops are: Minus, Abs, Not, Conv, Cast
1103 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1104 Shr, Shrs, Rotate, Cmp */
1108 get_Call_mem (ir_node *node) {
1109 assert (node->op == op_Call);
1110 return get_irn_n(node, 0);
1114 set_Call_mem (ir_node *node, ir_node *mem) {
1115 assert (node->op == op_Call);
1116 set_irn_n(node, 0, mem);
1120 get_Call_ptr (ir_node *node) {
1121 assert (node->op == op_Call);
1122 return get_irn_n(node, 1);
1126 set_Call_ptr (ir_node *node, ir_node *ptr) {
1127 assert (node->op == op_Call);
1128 set_irn_n(node, 1, ptr);
1132 get_Call_param_arr (ir_node *node) {
1133 assert (node->op == op_Call);
1134 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1138 get_Call_n_params (ir_node *node) {
1139 assert (node->op == op_Call);
1140 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1144 get_Call_arity (ir_node *node) {
1145 assert (node->op == op_Call);
1146 return get_Call_n_params(node);
1150 set_Call_arity (ir_node *node, ir_node *arity) {
1151 assert (node->op == op_Call);
1156 get_Call_param (ir_node *node, int pos) {
1157 assert (node->op == op_Call);
1158 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1162 set_Call_param (ir_node *node, int pos, ir_node *param) {
1163 assert (node->op == op_Call);
1164 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1168 get_Call_type (ir_node *node) {
1169 assert (node->op == op_Call);
1170 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1174 set_Call_type (ir_node *node, ir_type *tp) {
1175 assert (node->op == op_Call);
1176 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1177 node->attr.call.cld_tp = tp;
1180 int Call_has_callees(ir_node *node) {
1181 assert(node && node->op == op_Call);
1182 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1183 (node->attr.call.callee_arr != NULL));
1186 int get_Call_n_callees(ir_node * node) {
1187 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1188 return ARR_LEN(node->attr.call.callee_arr);
1191 entity * get_Call_callee(ir_node * node, int pos) {
1192 assert(pos >= 0 && pos < get_Call_n_callees(node));
1193 return node->attr.call.callee_arr[pos];
1196 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1197 assert(node->op == op_Call);
1198 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1199 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1201 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1204 void remove_Call_callee_arr(ir_node * node) {
1205 assert(node->op == op_Call);
1206 node->attr.call.callee_arr = NULL;
1209 ir_node * get_CallBegin_ptr (ir_node *node) {
1210 assert(node->op == op_CallBegin);
1211 return get_irn_n(node, 0);
1213 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1214 assert(node->op == op_CallBegin);
1215 set_irn_n(node, 0, ptr);
1217 ir_node * get_CallBegin_call (ir_node *node) {
1218 assert(node->op == op_CallBegin);
1219 return node->attr.callbegin.call;
1221 void set_CallBegin_call (ir_node *node, ir_node *call) {
1222 assert(node->op == op_CallBegin);
1223 node->attr.callbegin.call = call;
1228 ir_node * get_##OP##_left(ir_node *node) { \
1229 assert(node->op == op_##OP); \
1230 return get_irn_n(node, node->op->op_index); \
1232 void set_##OP##_left(ir_node *node, ir_node *left) { \
1233 assert(node->op == op_##OP); \
1234 set_irn_n(node, node->op->op_index, left); \
1236 ir_node *get_##OP##_right(ir_node *node) { \
1237 assert(node->op == op_##OP); \
1238 return get_irn_n(node, node->op->op_index + 1); \
1240 void set_##OP##_right(ir_node *node, ir_node *right) { \
1241 assert(node->op == op_##OP); \
1242 set_irn_n(node, node->op->op_index + 1, right); \
1246 ir_node *get_##OP##_op(ir_node *node) { \
1247 assert(node->op == op_##OP); \
1248 return get_irn_n(node, node->op->op_index); \
1250 void set_##OP##_op (ir_node *node, ir_node *op) { \
1251 assert(node->op == op_##OP); \
1252 set_irn_n(node, node->op->op_index, op); \
1262 get_Quot_mem (ir_node *node) {
1263 assert (node->op == op_Quot);
1264 return get_irn_n(node, 0);
1268 set_Quot_mem (ir_node *node, ir_node *mem) {
1269 assert (node->op == op_Quot);
1270 set_irn_n(node, 0, mem);
1276 get_DivMod_mem (ir_node *node) {
1277 assert (node->op == op_DivMod);
1278 return get_irn_n(node, 0);
1282 set_DivMod_mem (ir_node *node, ir_node *mem) {
1283 assert (node->op == op_DivMod);
1284 set_irn_n(node, 0, mem);
1290 get_Div_mem (ir_node *node) {
1291 assert (node->op == op_Div);
1292 return get_irn_n(node, 0);
1296 set_Div_mem (ir_node *node, ir_node *mem) {
1297 assert (node->op == op_Div);
1298 set_irn_n(node, 0, mem);
1304 get_Mod_mem (ir_node *node) {
1305 assert (node->op == op_Mod);
1306 return get_irn_n(node, 0);
1310 set_Mod_mem (ir_node *node, ir_node *mem) {
1311 assert (node->op == op_Mod);
1312 set_irn_n(node, 0, mem);
1329 get_Cast_type (ir_node *node) {
1330 assert (node->op == op_Cast);
1331 return node->attr.cast.totype;
1335 set_Cast_type (ir_node *node, ir_type *to_tp) {
1336 assert (node->op == op_Cast);
1337 node->attr.cast.totype = to_tp;
1341 /* Checks for upcast.
1343 * Returns true if the Cast node casts a class type to a super type.
1345 int is_Cast_upcast(ir_node *node) {
1346 ir_type *totype = get_Cast_type(node);
1347 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1348 ir_graph *myirg = get_irn_irg(node);
1350 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1353 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1354 totype = get_pointer_points_to_type(totype);
1355 fromtype = get_pointer_points_to_type(fromtype);
1360 if (!is_Class_type(totype)) return 0;
1361 return is_SubClass_of(fromtype, totype);
1364 /* Checks for downcast.
1366 * Returns true if the Cast node casts a class type to a sub type.
1368 int is_Cast_downcast(ir_node *node) {
1369 ir_type *totype = get_Cast_type(node);
1370 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1372 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1375 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1376 totype = get_pointer_points_to_type(totype);
1377 fromtype = get_pointer_points_to_type(fromtype);
1382 if (!is_Class_type(totype)) return 0;
1383 return is_SubClass_of(totype, fromtype);
1387 (is_unop)(const ir_node *node) {
1388 return _is_unop(node);
1392 get_unop_op (ir_node *node) {
1393 if (node->op->opar == oparity_unary)
1394 return get_irn_n(node, node->op->op_index);
1396 assert(node->op->opar == oparity_unary);
1401 set_unop_op (ir_node *node, ir_node *op) {
1402 if (node->op->opar == oparity_unary)
1403 set_irn_n(node, node->op->op_index, op);
1405 assert(node->op->opar == oparity_unary);
1409 (is_binop)(const ir_node *node) {
1410 return _is_binop(node);
1414 get_binop_left (ir_node *node) {
1415 if (node->op->opar == oparity_binary)
1416 return get_irn_n(node, node->op->op_index);
1418 assert(node->op->opar == oparity_binary);
1423 set_binop_left (ir_node *node, ir_node *left) {
1424 if (node->op->opar == oparity_binary)
1425 set_irn_n(node, node->op->op_index, left);
1427 assert (node->op->opar == oparity_binary);
1431 get_binop_right (ir_node *node) {
1432 if (node->op->opar == oparity_binary)
1433 return get_irn_n(node, node->op->op_index + 1);
1435 assert(node->op->opar == oparity_binary);
1440 set_binop_right (ir_node *node, ir_node *right) {
1441 if (node->op->opar == oparity_binary)
1442 set_irn_n(node, node->op->op_index + 1, right);
1444 assert (node->op->opar == oparity_binary);
1447 int is_Phi (const ir_node *n) {
1453 if (op == op_Filter) return get_interprocedural_view();
1456 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1457 (get_irn_arity(n) > 0));
1462 int is_Phi0 (const ir_node *n) {
1465 return ((get_irn_op(n) == op_Phi) &&
1466 (get_irn_arity(n) == 0) &&
1467 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1471 get_Phi_preds_arr (ir_node *node) {
1472 assert (node->op == op_Phi);
1473 return (ir_node **)&(get_irn_in(node)[1]);
1477 get_Phi_n_preds (ir_node *node) {
1478 assert (is_Phi(node) || is_Phi0(node));
1479 return (get_irn_arity(node));
1483 void set_Phi_n_preds (ir_node *node, int n_preds) {
1484 assert (node->op == op_Phi);
1489 get_Phi_pred (ir_node *node, int pos) {
1490 assert (is_Phi(node) || is_Phi0(node));
1491 return get_irn_n(node, pos);
1495 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1496 assert (is_Phi(node) || is_Phi0(node));
1497 set_irn_n(node, pos, pred);
1501 int is_memop(ir_node *node) {
1502 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1505 ir_node *get_memop_mem (ir_node *node) {
1506 assert(is_memop(node));
1507 return get_irn_n(node, 0);
1510 void set_memop_mem (ir_node *node, ir_node *mem) {
1511 assert(is_memop(node));
1512 set_irn_n(node, 0, mem);
1515 ir_node *get_memop_ptr (ir_node *node) {
1516 assert(is_memop(node));
1517 return get_irn_n(node, 1);
1520 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1521 assert(is_memop(node));
1522 set_irn_n(node, 1, ptr);
1526 get_Load_mem (ir_node *node) {
1527 assert (node->op == op_Load);
1528 return get_irn_n(node, 0);
1532 set_Load_mem (ir_node *node, ir_node *mem) {
1533 assert (node->op == op_Load);
1534 set_irn_n(node, 0, mem);
1538 get_Load_ptr (ir_node *node) {
1539 assert (node->op == op_Load);
1540 return get_irn_n(node, 1);
1544 set_Load_ptr (ir_node *node, ir_node *ptr) {
1545 assert (node->op == op_Load);
1546 set_irn_n(node, 1, ptr);
1550 get_Load_mode (ir_node *node) {
1551 assert (node->op == op_Load);
1552 return node->attr.load.load_mode;
1556 set_Load_mode (ir_node *node, ir_mode *mode) {
1557 assert (node->op == op_Load);
1558 node->attr.load.load_mode = mode;
1562 get_Load_volatility (ir_node *node) {
1563 assert (node->op == op_Load);
1564 return node->attr.load.volatility;
1568 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1569 assert (node->op == op_Load);
1570 node->attr.load.volatility = volatility;
1575 get_Store_mem (ir_node *node) {
1576 assert (node->op == op_Store);
1577 return get_irn_n(node, 0);
1581 set_Store_mem (ir_node *node, ir_node *mem) {
1582 assert (node->op == op_Store);
1583 set_irn_n(node, 0, mem);
1587 get_Store_ptr (ir_node *node) {
1588 assert (node->op == op_Store);
1589 return get_irn_n(node, 1);
1593 set_Store_ptr (ir_node *node, ir_node *ptr) {
1594 assert (node->op == op_Store);
1595 set_irn_n(node, 1, ptr);
1599 get_Store_value (ir_node *node) {
1600 assert (node->op == op_Store);
1601 return get_irn_n(node, 2);
1605 set_Store_value (ir_node *node, ir_node *value) {
1606 assert (node->op == op_Store);
1607 set_irn_n(node, 2, value);
1611 get_Store_volatility (ir_node *node) {
1612 assert (node->op == op_Store);
1613 return node->attr.store.volatility;
1617 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1618 assert (node->op == op_Store);
1619 node->attr.store.volatility = volatility;
1624 get_Alloc_mem (ir_node *node) {
1625 assert (node->op == op_Alloc);
1626 return get_irn_n(node, 0);
1630 set_Alloc_mem (ir_node *node, ir_node *mem) {
1631 assert (node->op == op_Alloc);
1632 set_irn_n(node, 0, mem);
1636 get_Alloc_size (ir_node *node) {
1637 assert (node->op == op_Alloc);
1638 return get_irn_n(node, 1);
1642 set_Alloc_size (ir_node *node, ir_node *size) {
1643 assert (node->op == op_Alloc);
1644 set_irn_n(node, 1, size);
1648 get_Alloc_type (ir_node *node) {
1649 assert (node->op == op_Alloc);
1650 return node->attr.a.type = skip_tid(node->attr.a.type);
1654 set_Alloc_type (ir_node *node, ir_type *tp) {
1655 assert (node->op == op_Alloc);
1656 node->attr.a.type = tp;
1660 get_Alloc_where (ir_node *node) {
1661 assert (node->op == op_Alloc);
1662 return node->attr.a.where;
1666 set_Alloc_where (ir_node *node, where_alloc where) {
1667 assert (node->op == op_Alloc);
1668 node->attr.a.where = where;
1673 get_Free_mem (ir_node *node) {
1674 assert (node->op == op_Free);
1675 return get_irn_n(node, 0);
1679 set_Free_mem (ir_node *node, ir_node *mem) {
1680 assert (node->op == op_Free);
1681 set_irn_n(node, 0, mem);
1685 get_Free_ptr (ir_node *node) {
1686 assert (node->op == op_Free);
1687 return get_irn_n(node, 1);
1691 set_Free_ptr (ir_node *node, ir_node *ptr) {
1692 assert (node->op == op_Free);
1693 set_irn_n(node, 1, ptr);
1697 get_Free_size (ir_node *node) {
1698 assert (node->op == op_Free);
1699 return get_irn_n(node, 2);
1703 set_Free_size (ir_node *node, ir_node *size) {
1704 assert (node->op == op_Free);
1705 set_irn_n(node, 2, size);
1709 get_Free_type (ir_node *node) {
1710 assert (node->op == op_Free);
1711 return node->attr.f.type = skip_tid(node->attr.f.type);
1715 set_Free_type (ir_node *node, ir_type *tp) {
1716 assert (node->op == op_Free);
1717 node->attr.f.type = tp;
1721 get_Free_where (ir_node *node) {
1722 assert (node->op == op_Free);
1723 return node->attr.f.where;
1727 set_Free_where (ir_node *node, where_alloc where) {
1728 assert (node->op == op_Free);
1729 node->attr.f.where = where;
1733 get_Sync_preds_arr (ir_node *node) {
1734 assert (node->op == op_Sync);
1735 return (ir_node **)&(get_irn_in(node)[1]);
1739 get_Sync_n_preds (ir_node *node) {
1740 assert (node->op == op_Sync);
1741 return (get_irn_arity(node));
1746 set_Sync_n_preds (ir_node *node, int n_preds) {
1747 assert (node->op == op_Sync);
1752 get_Sync_pred (ir_node *node, int pos) {
1753 assert (node->op == op_Sync);
1754 return get_irn_n(node, pos);
1758 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1759 assert (node->op == op_Sync);
1760 set_irn_n(node, pos, pred);
1763 ir_type *get_Proj_type(ir_node *n)
1766 ir_node *pred = get_Proj_pred(n);
1768 switch (get_irn_opcode(pred)) {
1771 /* Deal with Start / Call here: we need to know the Proj Nr. */
1772 assert(get_irn_mode(pred) == mode_T);
1773 pred_pred = get_Proj_pred(pred);
1774 if (get_irn_op(pred_pred) == op_Start) {
1775 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1776 tp = get_method_param_type(mtp, get_Proj_proj(n));
1777 } else if (get_irn_op(pred_pred) == op_Call) {
1778 ir_type *mtp = get_Call_type(pred_pred);
1779 tp = get_method_res_type(mtp, get_Proj_proj(n));
1782 case iro_Start: break;
1783 case iro_Call: break;
1785 ir_node *a = get_Load_ptr(pred);
1787 tp = get_entity_type(get_Sel_entity(a));
1796 get_Proj_pred (const ir_node *node) {
1797 assert (is_Proj(node));
1798 return get_irn_n(node, 0);
1802 set_Proj_pred (ir_node *node, ir_node *pred) {
1803 assert (is_Proj(node));
1804 set_irn_n(node, 0, pred);
1808 get_Proj_proj (const ir_node *node) {
1809 assert (is_Proj(node));
1810 if (get_irn_opcode(node) == iro_Proj) {
1811 return node->attr.proj;
1813 assert(get_irn_opcode(node) == iro_Filter);
1814 return node->attr.filter.proj;
1819 set_Proj_proj (ir_node *node, long proj) {
1820 assert (node->op == op_Proj);
1821 node->attr.proj = proj;
1825 get_Tuple_preds_arr (ir_node *node) {
1826 assert (node->op == op_Tuple);
1827 return (ir_node **)&(get_irn_in(node)[1]);
1831 get_Tuple_n_preds (ir_node *node) {
1832 assert (node->op == op_Tuple);
1833 return (get_irn_arity(node));
1838 set_Tuple_n_preds (ir_node *node, int n_preds) {
1839 assert (node->op == op_Tuple);
1844 get_Tuple_pred (ir_node *node, int pos) {
1845 assert (node->op == op_Tuple);
1846 return get_irn_n(node, pos);
1850 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1851 assert (node->op == op_Tuple);
1852 set_irn_n(node, pos, pred);
1856 get_Id_pred (ir_node *node) {
1857 assert (node->op == op_Id);
1858 return get_irn_n(node, 0);
1862 set_Id_pred (ir_node *node, ir_node *pred) {
1863 assert (node->op == op_Id);
1864 set_irn_n(node, 0, pred);
1867 ir_node *get_Confirm_value (ir_node *node) {
1868 assert (node->op == op_Confirm);
1869 return get_irn_n(node, 0);
1871 void set_Confirm_value (ir_node *node, ir_node *value) {
1872 assert (node->op == op_Confirm);
1873 set_irn_n(node, 0, value);
1875 ir_node *get_Confirm_bound (ir_node *node) {
1876 assert (node->op == op_Confirm);
1877 return get_irn_n(node, 1);
1879 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1880 assert (node->op == op_Confirm);
1881 set_irn_n(node, 0, bound);
1883 pn_Cmp get_Confirm_cmp (ir_node *node) {
1884 assert (node->op == op_Confirm);
1885 return node->attr.confirm_cmp;
1887 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1888 assert (node->op == op_Confirm);
1889 node->attr.confirm_cmp = cmp;
1894 get_Filter_pred (ir_node *node) {
1895 assert(node->op == op_Filter);
1899 set_Filter_pred (ir_node *node, ir_node *pred) {
1900 assert(node->op == op_Filter);
1904 get_Filter_proj(ir_node *node) {
1905 assert(node->op == op_Filter);
1906 return node->attr.filter.proj;
1909 set_Filter_proj (ir_node *node, long proj) {
1910 assert(node->op == op_Filter);
1911 node->attr.filter.proj = proj;
1914 /* Don't use get_irn_arity, get_irn_n in implementation as access
1915 shall work independent of view!!! */
1916 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1917 assert(node->op == op_Filter);
1918 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1919 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1920 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1921 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1922 node->attr.filter.in_cg[0] = node->in[0];
1924 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1927 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1928 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1929 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1930 node->attr.filter.in_cg[pos + 1] = pred;
1932 int get_Filter_n_cg_preds(ir_node *node) {
1933 assert(node->op == op_Filter && node->attr.filter.in_cg);
1934 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1936 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1938 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1940 arity = ARR_LEN(node->attr.filter.in_cg);
1941 assert(pos < arity - 1);
1942 return node->attr.filter.in_cg[pos + 1];
1946 ir_node *get_Mux_sel (ir_node *node) {
1947 if (node->op == op_Psi) {
1948 assert(get_irn_arity(node) == 3);
1949 return get_Psi_cond(node, 0);
1951 assert(node->op == op_Mux);
1954 void set_Mux_sel (ir_node *node, ir_node *sel) {
1955 if (node->op == op_Psi) {
1956 assert(get_irn_arity(node) == 3);
1957 set_Psi_cond(node, 0, sel);
1960 assert(node->op == op_Mux);
1965 ir_node *get_Mux_false (ir_node *node) {
1966 if (node->op == op_Psi) {
1967 assert(get_irn_arity(node) == 3);
1968 return get_Psi_default(node);
1970 assert(node->op == op_Mux);
1973 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1974 if (node->op == op_Psi) {
1975 assert(get_irn_arity(node) == 3);
1976 set_Psi_default(node, ir_false);
1979 assert(node->op == op_Mux);
1980 node->in[2] = ir_false;
1984 ir_node *get_Mux_true (ir_node *node) {
1985 if (node->op == op_Psi) {
1986 assert(get_irn_arity(node) == 3);
1987 return get_Psi_val(node, 0);
1989 assert(node->op == op_Mux);
1992 void set_Mux_true (ir_node *node, ir_node *ir_true) {
1993 if (node->op == op_Psi) {
1994 assert(get_irn_arity(node) == 3);
1995 set_Psi_val(node, 0, ir_true);
1998 assert(node->op == op_Mux);
1999 node->in[3] = ir_true;
2004 ir_node *get_Psi_cond (ir_node *node, int pos) {
2005 int num_conds = get_Psi_n_conds(node);
2006 assert(node->op == op_Psi);
2007 assert(pos < num_conds);
2008 return node->in[1 + 2 * pos];
2011 void set_Psi_cond (ir_node *node, int pos, ir_node *cond) {
2012 int num_conds = get_Psi_n_conds(node);
2013 assert(node->op == op_Psi);
2014 assert(pos < num_conds);
2015 node->in[1 + 2 * pos] = cond;
2018 ir_node *get_Psi_val (ir_node *node, int pos) {
2019 int num_vals = get_Psi_n_conds(node);
2020 assert(node->op == op_Psi);
2021 assert(pos < num_vals);
2022 return node->in[1 + 2 * pos + 1];
2025 void set_Psi_val (ir_node *node, int pos, ir_node *val) {
2026 int num_vals = get_Psi_n_conds(node);
2027 assert(node->op == op_Psi);
2028 assert(pos < num_vals);
2029 node->in[1 + 2 * pos + 1] = val;
2032 ir_node *get_Psi_default(ir_node *node) {
2033 int def_pos = get_irn_arity(node);
2034 assert(node->op == op_Psi);
2035 return node->in[def_pos];
2038 void set_Psi_default(ir_node *node, ir_node *val) {
2039 int def_pos = get_irn_arity(node);
2040 assert(node->op == op_Psi);
2041 node->in[def_pos] = node;
2044 int (get_Psi_n_conds)(ir_node *node) {
2045 return _get_Psi_n_conds(node);
2049 ir_node *get_CopyB_mem (ir_node *node) {
2050 assert (node->op == op_CopyB);
2051 return get_irn_n(node, 0);
2054 void set_CopyB_mem (ir_node *node, ir_node *mem) {
2055 assert (node->op == op_CopyB);
2056 set_irn_n(node, 0, mem);
2059 ir_node *get_CopyB_dst (ir_node *node) {
2060 assert (node->op == op_CopyB);
2061 return get_irn_n(node, 1);
2064 void set_CopyB_dst (ir_node *node, ir_node *dst) {
2065 assert (node->op == op_CopyB);
2066 set_irn_n(node, 1, dst);
2069 ir_node *get_CopyB_src (ir_node *node) {
2070 assert (node->op == op_CopyB);
2071 return get_irn_n(node, 2);
2074 void set_CopyB_src (ir_node *node, ir_node *src) {
2075 assert (node->op == op_CopyB);
2076 set_irn_n(node, 2, src);
2079 ir_type *get_CopyB_type(ir_node *node) {
2080 assert (node->op == op_CopyB);
2081 return node->attr.copyb.data_type;
2084 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2085 assert (node->op == op_CopyB && data_type);
2086 node->attr.copyb.data_type = data_type;
2091 get_InstOf_type (ir_node *node) {
2092 assert (node->op = op_InstOf);
2093 return node->attr.io.type;
2097 set_InstOf_type (ir_node *node, ir_type *type) {
2098 assert (node->op = op_InstOf);
2099 node->attr.io.type = type;
2103 get_InstOf_store (ir_node *node) {
2104 assert (node->op = op_InstOf);
2105 return get_irn_n(node, 0);
2109 set_InstOf_store (ir_node *node, ir_node *obj) {
2110 assert (node->op = op_InstOf);
2111 set_irn_n(node, 0, obj);
2115 get_InstOf_obj (ir_node *node) {
2116 assert (node->op = op_InstOf);
2117 return get_irn_n(node, 1);
2121 set_InstOf_obj (ir_node *node, ir_node *obj) {
2122 assert (node->op = op_InstOf);
2123 set_irn_n(node, 1, obj);
2126 /* Returns the memory input of a Raise operation. */
2128 get_Raise_mem (ir_node *node) {
2129 assert (node->op == op_Raise);
2130 return get_irn_n(node, 0);
2134 set_Raise_mem (ir_node *node, ir_node *mem) {
2135 assert (node->op == op_Raise);
2136 set_irn_n(node, 0, mem);
2140 get_Raise_exo_ptr (ir_node *node) {
2141 assert (node->op == op_Raise);
2142 return get_irn_n(node, 1);
2146 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
2147 assert (node->op == op_Raise);
2148 set_irn_n(node, 1, exo_ptr);
2153 /* Returns the memory input of a Bound operation. */
2154 ir_node *get_Bound_mem(ir_node *bound) {
2155 assert (bound->op == op_Bound);
2156 return get_irn_n(bound, 0);
2159 void set_Bound_mem (ir_node *bound, ir_node *mem) {
2160 assert (bound->op == op_Bound);
2161 set_irn_n(bound, 0, mem);
2164 /* Returns the index input of a Bound operation. */
2165 ir_node *get_Bound_index(ir_node *bound) {
2166 assert (bound->op == op_Bound);
2167 return get_irn_n(bound, 1);
2170 void set_Bound_index(ir_node *bound, ir_node *idx) {
2171 assert (bound->op == op_Bound);
2172 set_irn_n(bound, 1, idx);
2175 /* Returns the lower bound input of a Bound operation. */
2176 ir_node *get_Bound_lower(ir_node *bound) {
2177 assert (bound->op == op_Bound);
2178 return get_irn_n(bound, 2);
2181 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2182 assert (bound->op == op_Bound);
2183 set_irn_n(bound, 2, lower);
2186 /* Returns the upper bound input of a Bound operation. */
2187 ir_node *get_Bound_upper(ir_node *bound) {
2188 assert (bound->op == op_Bound);
2189 return get_irn_n(bound, 3);
2192 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2193 assert (bound->op == op_Bound);
2194 set_irn_n(bound, 3, upper);
2197 /* returns the graph of a node */
2199 get_irn_irg(const ir_node *node) {
2201 * Do not use get_nodes_Block() here, because this
2202 * will check the pinned state.
2203 * However even a 'wrong' block is always in the proper
2206 if (! is_Block(node))
2207 node = get_irn_n(node, -1);
2208 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2209 node = get_irn_n(node, -1);
2210 assert(get_irn_op(node) == op_Block);
2211 return node->attr.block.irg;
2215 /*----------------------------------------------------------------*/
2216 /* Auxiliary routines */
2217 /*----------------------------------------------------------------*/
2220 skip_Proj (ir_node *node) {
2221 /* don't assert node !!! */
2222 if (node && is_Proj(node)) {
2223 return get_Proj_pred(node);
2230 skip_Tuple (ir_node *node) {
2234 if (!get_opt_normalize()) return node;
2237 node = skip_Id(node);
2238 if (get_irn_op(node) == op_Proj) {
2239 pred = skip_Id(get_Proj_pred(node));
2240 op = get_irn_op(pred);
2243 * Looks strange but calls get_irn_op() only once
2244 * in most often cases.
2246 if (op == op_Proj) { /* nested Tuple ? */
2247 pred = skip_Id(skip_Tuple(pred));
2248 op = get_irn_op(pred);
2250 if (op == op_Tuple) {
2251 node = get_Tuple_pred(pred, get_Proj_proj(node));
2255 else if (op == op_Tuple) {
2256 node = get_Tuple_pred(pred, get_Proj_proj(node));
2263 /* returns operand of node if node is a Cast */
2264 ir_node *skip_Cast (ir_node *node) {
2265 if (node && get_irn_op(node) == op_Cast)
2266 return get_Cast_op(node);
2270 /* returns operand of node if node is a Confirm */
2271 ir_node *skip_Confirm (ir_node *node) {
2272 if (node && get_irn_op(node) == op_Confirm)
2273 return get_Confirm_value(node);
2277 /* skip all high-level ops */
2278 ir_node *skip_HighLevel(ir_node *node) {
2279 if (node && is_op_highlevel(get_irn_op(node)))
2280 return get_irn_n(node, 0);
2285 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2286 * than any other approach, as Id chains are resolved and all point to the real node, or
2287 * all id's are self loops.
2289 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2292 skip_Id (ir_node *node) {
2293 /* don't assert node !!! */
2295 /* Don't use get_Id_pred: We get into an endless loop for
2296 self-referencing Ids. */
2297 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2298 ir_node *rem_pred = node->in[0+1];
2301 assert (get_irn_arity (node) > 0);
2303 node->in[0+1] = node;
2304 res = skip_Id(rem_pred);
2305 if (res->op == op_Id) /* self-loop */ return node;
2307 node->in[0+1] = res;
2314 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2315 * than any other approach, as Id chains are resolved and all point to the real node, or
2316 * all id's are self loops.
2318 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2319 * a little bit "hand optimized".
2321 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2324 skip_Id (ir_node *node) {
2326 /* don't assert node !!! */
2328 if (!node || (node->op != op_Id)) return node;
2330 /* Don't use get_Id_pred(): We get into an endless loop for
2331 self-referencing Ids. */
2332 pred = node->in[0+1];
2334 if (pred->op != op_Id) return pred;
2336 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2337 ir_node *rem_pred, *res;
2339 if (pred->op != op_Id) return pred; /* shortcut */
2342 assert (get_irn_arity (node) > 0);
2344 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2345 res = skip_Id(rem_pred);
2346 if (res->op == op_Id) /* self-loop */ return node;
2348 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2356 void skip_Id_and_store(ir_node **node) {
2359 if (!n || (n->op != op_Id)) return;
2361 /* Don't use get_Id_pred(): We get into an endless loop for
2362 self-referencing Ids. */
2367 (is_Bad)(const ir_node *node) {
2368 return _is_Bad(node);
2372 (is_Const)(const ir_node *node) {
2373 return _is_Const(node);
2377 (is_no_Block)(const ir_node *node) {
2378 return _is_no_Block(node);
2382 (is_Block)(const ir_node *node) {
2383 return _is_Block(node);
2386 /* returns true if node is an Unknown node. */
2388 (is_Unknown)(const ir_node *node) {
2389 return _is_Unknown(node);
2392 /* returns true if node is a Return node. */
2394 (is_Return)(const ir_node *node) {
2395 return _is_Return(node);
2398 /* returns true if node is a Call node. */
2400 (is_Call)(const ir_node *node) {
2401 return _is_Call(node);
2404 /* returns true if node is a Sel node. */
2406 (is_Sel)(const ir_node *node) {
2407 return _is_Sel(node);
2410 /* returns true if node is a Mux node or a Psi with only one condition. */
2412 (is_Mux)(const ir_node *node) {
2413 return _is_Mux(node);
2417 is_Proj (const ir_node *node) {
2419 return node->op == op_Proj
2420 || (!get_interprocedural_view() && node->op == op_Filter);
2423 /* Returns true if the operation manipulates control flow. */
2425 is_cfop(const ir_node *node) {
2426 return is_cfopcode(get_irn_op(node));
2429 /* Returns true if the operation manipulates interprocedural control flow:
2430 CallBegin, EndReg, EndExcept */
2431 int is_ip_cfop(const ir_node *node) {
2432 return is_ip_cfopcode(get_irn_op(node));
2435 /* Returns true if the operation can change the control flow because
2438 is_fragile_op(const ir_node *node) {
2439 return is_op_fragile(get_irn_op(node));
2442 /* Returns the memory operand of fragile operations. */
2443 ir_node *get_fragile_op_mem(ir_node *node) {
2444 assert(node && is_fragile_op(node));
2446 switch (get_irn_opcode (node)) {
2455 return get_irn_n(node, 0);
2460 assert(0 && "should not be reached");
2465 /* Returns true if the operation is a forking control flow operation. */
2466 int (is_irn_forking)(const ir_node *node) {
2467 return _is_irn_forking(node);
2470 /* Return the type associated with the value produced by n
2471 * if the node remarks this type as it is the case for
2472 * Cast, Const, SymConst and some Proj nodes. */
2473 ir_type *(get_irn_type)(ir_node *node) {
2474 return _get_irn_type(node);
2477 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2479 ir_type *(get_irn_type_attr)(ir_node *node) {
2480 return _get_irn_type_attr(node);
2483 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2484 entity *(get_irn_entity_attr)(ir_node *node) {
2485 return _get_irn_entity_attr(node);
2488 /* Returns non-zero for constant-like nodes. */
2489 int (is_irn_constlike)(const ir_node *node) {
2490 return _is_irn_constlike(node);
2494 * Returns non-zero for nodes that are allowed to have keep-alives and
2495 * are neither Block nor PhiM.
2497 int (is_irn_keep)(const ir_node *node) {
2498 return _is_irn_keep(node);
2501 /* Gets the string representation of the jump prediction .*/
2502 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
2506 case COND_JMP_PRED_NONE: return "no prediction";
2507 case COND_JMP_PRED_TRUE: return "true taken";
2508 case COND_JMP_PRED_FALSE: return "false taken";
2512 /* Returns the conditional jump prediction of a Cond node. */
2513 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2514 return _get_Cond_jmp_pred(cond);
2517 /* Sets a new conditional jump prediction. */
2518 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2519 _set_Cond_jmp_pred(cond, pred);
2522 /** the get_type operation must be always implemented and return a firm type */
2523 static ir_type *get_Default_type(ir_node *n) {
2524 return get_unknown_type();
2527 /* Sets the get_type operation for an ir_op_ops. */
2528 ir_op_ops *firm_set_default_get_type(opcode code, ir_op_ops *ops)
2531 case iro_Const: ops->get_type = get_Const_type; break;
2532 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2533 case iro_Cast: ops->get_type = get_Cast_type; break;
2534 case iro_Proj: ops->get_type = get_Proj_type; break;
2536 /* not allowed to be NULL */
2537 if (! ops->get_type)
2538 ops->get_type = get_Default_type;
2544 /** Return the attribute type of a SymConst node if exists */
2545 static ir_type *get_SymConst_attr_type(ir_node *self) {
2546 symconst_kind kind = get_SymConst_kind(self);
2547 if (kind == symconst_type_tag || kind == symconst_size)
2548 return get_SymConst_type(self);
2552 /** Return the attribute entity of a SymConst node if exists */
2553 static entity *get_SymConst_attr_entity(ir_node *self) {
2554 symconst_kind kind = get_SymConst_kind(self);
2555 if (kind == symconst_addr_ent)
2556 return get_SymConst_entity(self);
2560 /** the get_type_attr operation must be always implemented */
2561 static ir_type *get_Null_type(ir_node *n) {
2562 return firm_unknown_type;
2565 /* Sets the get_type operation for an ir_op_ops. */
2566 ir_op_ops *firm_set_default_get_type_attr(opcode code, ir_op_ops *ops)
2569 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2570 case iro_Call: ops->get_type_attr = get_Call_type; break;
2571 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2572 case iro_Free: ops->get_type_attr = get_Free_type; break;
2573 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2575 /* not allowed to be NULL */
2576 if (! ops->get_type_attr)
2577 ops->get_type_attr = get_Null_type;
2583 /** the get_entity_attr operation must be always implemented */
2584 static entity *get_Null_ent(ir_node *n) {
2588 /* Sets the get_type operation for an ir_op_ops. */
2589 ir_op_ops *firm_set_default_get_entity_attr(opcode code, ir_op_ops *ops)
2592 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2593 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2595 /* not allowed to be NULL */
2596 if (! ops->get_entity_attr)
2597 ops->get_entity_attr = get_Null_ent;
2603 #ifdef DEBUG_libfirm
2604 void dump_irn (ir_node *n) {
2605 int i, arity = get_irn_arity(n);
2606 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2608 ir_node *pred = get_irn_n(n, -1);
2609 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2610 get_irn_node_nr(pred), (void *)pred);
2612 printf(" preds: \n");
2613 for (i = 0; i < arity; ++i) {
2614 ir_node *pred = get_irn_n(n, i);
2615 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2616 get_irn_node_nr(pred), (void *)pred);
2620 #else /* DEBUG_libfirm */
2621 void dump_irn (ir_node *n) {}
2622 #endif /* DEBUG_libfirm */