3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated (Complement(R)) pnc condition.
59 int get_negated_pnc(int pnc, ir_mode *mode) {
62 /* do NOT add the Uo bit for non-floating point values */
63 if (! mode_is_float(mode))
69 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
71 get_inversed_pnc(int pnc) {
72 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
73 int lesser = pnc & pn_Cmp_Lt;
74 int greater = pnc & pn_Cmp_Gt;
76 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
81 const char *pns_name_arr [] = {
82 "initial_exec", "global_store",
83 "frame_base", "globals", "args"
86 const char *symconst_name_arr [] = {
87 "type_tag", "size", "addr_name", "addr_ent"
91 * Indicates, whether additional data can be registered to ir nodes.
92 * If set to 1, this is not possible anymore.
94 static int forbid_new_data = 0;
97 * The amount of additional space for custom data to be allocated upon
98 * creating a new node.
100 unsigned firm_add_node_size = 0;
103 /* register new space for every node */
104 unsigned register_additional_node_data(unsigned size) {
105 assert(!forbid_new_data && "Too late to register additional node data");
110 return firm_add_node_size += size;
116 /* Forbid the addition of new data to an ir node. */
121 * irnode constructor.
122 * Create a new irnode in irg, with an op, mode, arity and
123 * some incoming irnodes.
124 * If arity is negative, a node with a dynamic array is created.
127 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
128 int arity, ir_node **in)
131 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
134 assert(irg && op && mode);
135 p = obstack_alloc (irg->obst, node_size);
136 memset(p, 0, node_size);
137 res = (ir_node *) (p + firm_add_node_size);
139 res->kind = k_ir_node;
145 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
147 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
148 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
152 set_irn_dbg_info(res, db);
156 res->node_nr = get_irp_new_node_nr();
159 #if FIRM_EDGES_INPLACE
162 int not_a_block = is_no_Block(res);
164 INIT_LIST_HEAD(&res->edge_info.outs_head);
166 INIT_LIST_HEAD(&res->attr.block.succ_head);
169 for (i = 0, n = arity + not_a_block; i < n; ++i)
170 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
174 hook_new_node(irg, res);
179 /*-- getting some parameters from ir_nodes --*/
182 (is_ir_node)(const void *thing) {
183 return _is_ir_node(thing);
187 (get_irn_intra_arity)(const ir_node *node) {
188 return _get_irn_intra_arity(node);
192 (get_irn_inter_arity)(const ir_node *node) {
193 return _get_irn_inter_arity(node);
196 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
199 (get_irn_arity)(const ir_node *node) {
200 return _get_irn_arity(node);
203 /* Returns the array with ins. This array is shifted with respect to the
204 array accessed by get_irn_n: The block operand is at position 0 not -1.
205 (@@@ This should be changed.)
206 The order of the predecessors in this array is not guaranteed, except that
207 lists of operands as predecessors of Block or arguments of a Call are
210 get_irn_in (const ir_node *node) {
212 if (get_interprocedural_view()) { /* handle Filter and Block specially */
213 if (get_irn_opcode(node) == iro_Filter) {
214 assert(node->attr.filter.in_cg);
215 return node->attr.filter.in_cg;
216 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
217 return node->attr.block.in_cg;
219 /* else fall through */
225 set_irn_in (ir_node *node, int arity, ir_node **in) {
228 if (get_interprocedural_view()) { /* handle Filter and Block specially */
229 if (get_irn_opcode(node) == iro_Filter) {
230 assert(node->attr.filter.in_cg);
231 arr = &node->attr.filter.in_cg;
232 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
233 arr = &node->attr.block.in_cg;
240 if (arity != ARR_LEN(*arr) - 1) {
241 ir_node * block = (*arr)[0];
242 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
245 fix_backedges(current_ir_graph->obst, node);
246 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
250 (get_irn_intra_n)(const ir_node *node, int n) {
251 return _get_irn_intra_n (node, n);
255 (get_irn_inter_n)(const ir_node *node, int n) {
256 return _get_irn_inter_n (node, n);
259 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
262 (get_irn_n)(const ir_node *node, int n) {
263 return _get_irn_n(node, n);
267 set_irn_n (ir_node *node, int n, ir_node *in) {
268 assert(node && node->kind == k_ir_node);
270 assert(n < get_irn_arity(node));
271 assert(in && in->kind == k_ir_node);
273 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
274 /* Change block pred in both views! */
275 node->in[n + 1] = in;
276 assert(node->attr.filter.in_cg);
277 node->attr.filter.in_cg[n + 1] = in;
280 if (get_interprocedural_view()) { /* handle Filter and Block specially */
281 if (get_irn_opcode(node) == iro_Filter) {
282 assert(node->attr.filter.in_cg);
283 node->attr.filter.in_cg[n + 1] = in;
285 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
286 node->attr.block.in_cg[n + 1] = in;
289 /* else fall through */
293 hook_set_irn_n(node, n, in, node->in[n + 1]);
295 /* Here, we rely on src and tgt being in the current ir graph */
296 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
298 node->in[n + 1] = in;
302 (get_irn_mode)(const ir_node *node) {
303 return _get_irn_mode(node);
307 (set_irn_mode)(ir_node *node, ir_mode *mode)
309 _set_irn_mode(node, mode);
313 get_irn_modecode (const ir_node *node)
316 return node->mode->code;
319 /** Gets the string representation of the mode .*/
321 get_irn_modename (const ir_node *node)
324 return get_mode_name(node->mode);
328 get_irn_modeident (const ir_node *node)
331 return get_mode_ident(node->mode);
335 (get_irn_op)(const ir_node *node)
337 return _get_irn_op(node);
340 /* should be private to the library: */
342 set_irn_op (ir_node *node, ir_op *op)
349 (get_irn_opcode)(const ir_node *node)
351 return _get_irn_opcode(node);
355 get_irn_opname (const ir_node *node)
358 if ((get_irn_op((ir_node *)node) == op_Phi) &&
359 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
360 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
361 return get_id_str(node->op->name);
365 get_irn_opident (const ir_node *node)
368 return node->op->name;
372 (get_irn_visited)(const ir_node *node)
374 return _get_irn_visited(node);
378 (set_irn_visited)(ir_node *node, unsigned long visited)
380 _set_irn_visited(node, visited);
384 (mark_irn_visited)(ir_node *node) {
385 _mark_irn_visited(node);
389 (irn_not_visited)(const ir_node *node) {
390 return _irn_not_visited(node);
394 (irn_visited)(const ir_node *node) {
395 return _irn_visited(node);
399 (set_irn_link)(ir_node *node, void *link) {
400 _set_irn_link(node, link);
404 (get_irn_link)(const ir_node *node) {
405 return _get_irn_link(node);
409 (get_irn_pinned)(const ir_node *node) {
410 return _get_irn_pinned(node);
414 (is_irn_pinned_in_irg) (const ir_node *node) {
415 return _is_irn_pinned_in_irg(node);
418 void set_irn_pinned(ir_node *node, op_pin_state state) {
419 /* due to optimization an opt may be turned into a Tuple */
420 if (get_irn_op(node) == op_Tuple)
423 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
424 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
426 node->attr.except.pin_state = state;
429 #ifdef DO_HEAPANALYSIS
430 /* Access the abstract interpretation information of a node.
431 Returns NULL if no such information is available. */
432 struct abstval *get_irn_abst_value(ir_node *n) {
435 /* Set the abstract interpretation information of a node. */
436 void set_irn_abst_value(ir_node *n, struct abstval *os) {
439 struct section *firm_get_irn_section(ir_node *n) {
442 void firm_set_irn_section(ir_node *n, struct section *s) {
446 /* Dummies needed for firmjni. */
447 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
448 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
449 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
450 void firm_set_irn_section(ir_node *n, struct section *s) {}
451 #endif /* DO_HEAPANALYSIS */
454 /* Outputs a unique number for this node */
456 get_irn_node_nr(const ir_node *node) {
459 return node->node_nr;
466 get_irn_const_attr (ir_node *node)
468 assert (node->op == op_Const);
469 return node->attr.con;
473 get_irn_proj_attr (ir_node *node)
475 assert (node->op == op_Proj);
476 return node->attr.proj;
480 get_irn_alloc_attr (ir_node *node)
482 assert (node->op == op_Alloc);
487 get_irn_free_attr (ir_node *node)
489 assert (node->op == op_Free);
494 get_irn_symconst_attr (ir_node *node)
496 assert (node->op == op_SymConst);
501 get_irn_call_attr (ir_node *node)
503 assert (node->op == op_Call);
504 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
508 get_irn_sel_attr (ir_node *node)
510 assert (node->op == op_Sel);
515 get_irn_phi_attr (ir_node *node)
517 assert (node->op == op_Phi);
518 return node->attr.phi0_pos;
522 get_irn_block_attr (ir_node *node)
524 assert (node->op == op_Block);
525 return node->attr.block;
529 get_irn_load_attr (ir_node *node)
531 assert (node->op == op_Load);
532 return node->attr.load;
536 get_irn_store_attr (ir_node *node)
538 assert (node->op == op_Store);
539 return node->attr.store;
543 get_irn_except_attr (ir_node *node)
545 assert (node->op == op_Div || node->op == op_Quot ||
546 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
547 return node->attr.except;
551 get_irn_generic_attr (ir_node *node) {
555 /** manipulate fields of individual nodes **/
557 /* this works for all except Block */
559 get_nodes_block (const ir_node *node) {
560 assert (!(node->op == op_Block));
561 assert (is_irn_pinned_in_irg(node) && "block info may be incorrect");
562 return get_irn_n(node, -1);
566 set_nodes_block (ir_node *node, ir_node *block) {
567 assert (!(node->op == op_Block));
568 set_irn_n(node, -1, block);
571 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
572 * from Start. If so returns frame type, else Null. */
573 type *is_frame_pointer(ir_node *n) {
574 if ((get_irn_op(n) == op_Proj) &&
575 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
576 ir_node *start = get_Proj_pred(n);
577 if (get_irn_op(start) == op_Start) {
578 return get_irg_frame_type(get_irn_irg(start));
584 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
585 * from Start. If so returns global type, else Null. */
586 type *is_globals_pointer(ir_node *n) {
587 if ((get_irn_op(n) == op_Proj) &&
588 (get_Proj_proj(n) == pn_Start_P_globals)) {
589 ir_node *start = get_Proj_pred(n);
590 if (get_irn_op(start) == op_Start) {
591 return get_glob_type();
597 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
598 * from Start. If so returns 1, else 0. */
599 int is_value_arg_pointer(ir_node *n) {
600 if ((get_irn_op(n) == op_Proj) &&
601 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
602 (get_irn_op(get_Proj_pred(n)) == op_Start))
607 /* Returns an array with the predecessors of the Block. Depending on
608 the implementation of the graph data structure this can be a copy of
609 the internal representation of predecessors as well as the internal
610 array itself. Therefore writing to this array might obstruct the ir. */
612 get_Block_cfgpred_arr (ir_node *node)
614 assert ((node->op == op_Block));
615 return (ir_node **)&(get_irn_in(node)[1]);
619 (get_Block_n_cfgpreds)(ir_node *node) {
620 return get_Block_n_cfgpreds(node);
624 (get_Block_cfgpred)(ir_node *node, int pos) {
625 return get_Block_cfgpred(node, pos);
629 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
630 assert (node->op == op_Block);
631 set_irn_n(node, pos, pred);
635 (get_Block_cfgpred_block)(ir_node *node, int pos) {
636 return _get_Block_cfgpred_block(node, pos);
640 get_Block_matured (ir_node *node) {
641 assert (node->op == op_Block);
642 return node->attr.block.matured;
646 set_Block_matured (ir_node *node, bool matured) {
647 assert (node->op == op_Block);
648 node->attr.block.matured = matured;
652 (get_Block_block_visited)(ir_node *node) {
653 return _get_Block_block_visited(node);
657 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
658 _set_Block_block_visited(node, visit);
661 /* For this current_ir_graph must be set. */
663 (mark_Block_block_visited)(ir_node *node) {
664 _mark_Block_block_visited(node);
668 (Block_not_block_visited)(ir_node *node) {
669 return _Block_not_block_visited(node);
673 get_Block_graph_arr (ir_node *node, int pos) {
674 assert (node->op == op_Block);
675 return node->attr.block.graph_arr[pos+1];
679 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
680 assert (node->op == op_Block);
681 node->attr.block.graph_arr[pos+1] = value;
684 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
685 assert(node->op == op_Block);
686 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
687 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
688 node->attr.block.in_cg[0] = NULL;
689 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
691 /* Fix backedge array. fix_backedges operates depending on
692 interprocedural_view. */
693 int ipv = get_interprocedural_view();
694 set_interprocedural_view(true);
695 fix_backedges(current_ir_graph->obst, node);
696 set_interprocedural_view(ipv);
699 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
702 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
703 assert(node->op == op_Block &&
704 node->attr.block.in_cg &&
705 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
706 node->attr.block.in_cg[pos + 1] = pred;
709 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
710 assert(node->op == op_Block);
711 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
714 int get_Block_cg_n_cfgpreds(ir_node * node) {
715 assert(node->op == op_Block);
716 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
719 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
720 assert(node->op == op_Block && node->attr.block.in_cg);
721 return node->attr.block.in_cg[pos + 1];
724 void remove_Block_cg_cfgpred_arr(ir_node * node) {
725 assert(node->op == op_Block);
726 node->attr.block.in_cg = NULL;
729 ir_node *(set_Block_dead)(ir_node *block) {
730 return _set_Block_dead(block);
733 int (is_Block_dead)(const ir_node *block) {
734 return _is_Block_dead(block);
737 ir_extblk *get_Block_extbb(const ir_node *block) {
738 assert(is_Block(block));
739 return block->attr.block.extblk;
742 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
743 assert(is_Block(block));
744 block->attr.block.extblk = extblk;
748 set_Start_irg(ir_node *node, ir_graph *irg) {
749 assert(node->op == op_Start);
750 assert(is_ir_graph(irg));
751 assert(0 && " Why set irg? -- use set_irn_irg");
755 get_End_n_keepalives(ir_node *end) {
756 assert (end->op == op_End);
757 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
761 get_End_keepalive(ir_node *end, int pos) {
762 assert (end->op == op_End);
763 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
767 add_End_keepalive (ir_node *end, ir_node *ka) {
768 assert (end->op == op_End);
769 ARR_APP1 (ir_node *, end->in, ka);
773 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
774 assert (end->op == op_End);
775 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
779 free_End (ir_node *end) {
780 assert (end->op == op_End);
782 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
783 end->in = NULL; /* @@@ make sure we get an error if we use the
784 in array afterwards ... */
787 /* Return the target address of an IJmp */
788 ir_node *get_IJmp_target(ir_node *ijmp) {
789 assert(ijmp->op == op_IJmp);
790 return get_irn_n(ijmp, 0);
793 /** Sets the target address of an IJmp */
794 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
795 assert(ijmp->op == op_IJmp);
796 set_irn_n(ijmp, 0, tgt);
800 > Implementing the case construct (which is where the constant Proj node is
801 > important) involves far more than simply determining the constant values.
802 > We could argue that this is more properly a function of the translator from
803 > Firm to the target machine. That could be done if there was some way of
804 > projecting "default" out of the Cond node.
805 I know it's complicated.
806 Basically there are two proglems:
807 - determining the gaps between the projs
808 - determining the biggest case constant to know the proj number for
810 I see several solutions:
811 1. Introduce a ProjDefault node. Solves both problems.
812 This means to extend all optimizations executed during construction.
813 2. Give the Cond node for switch two flavors:
814 a) there are no gaps in the projs (existing flavor)
815 b) gaps may exist, default proj is still the Proj with the largest
816 projection number. This covers also the gaps.
817 3. Fix the semantic of the Cond to that of 2b)
819 Solution 2 seems to be the best:
820 Computing the gaps in the Firm representation is not too hard, i.e.,
821 libFIRM can implement a routine that transforms between the two
822 flavours. This is also possible for 1) but 2) does not require to
823 change any existing optimization.
824 Further it should be far simpler to determine the biggest constant than
826 I don't want to choose 3) as 2a) seems to have advantages for
827 dataflow analysis and 3) does not allow to convert the representation to
831 get_Cond_selector (ir_node *node) {
832 assert (node->op == op_Cond);
833 return get_irn_n(node, 0);
837 set_Cond_selector (ir_node *node, ir_node *selector) {
838 assert (node->op == op_Cond);
839 set_irn_n(node, 0, selector);
843 get_Cond_kind (ir_node *node) {
844 assert (node->op == op_Cond);
845 return node->attr.c.kind;
849 set_Cond_kind (ir_node *node, cond_kind kind) {
850 assert (node->op == op_Cond);
851 node->attr.c.kind = kind;
855 get_Cond_defaultProj (ir_node *node) {
856 assert (node->op == op_Cond);
857 return node->attr.c.default_proj;
861 get_Return_mem (ir_node *node) {
862 assert (node->op == op_Return);
863 return get_irn_n(node, 0);
867 set_Return_mem (ir_node *node, ir_node *mem) {
868 assert (node->op == op_Return);
869 set_irn_n(node, 0, mem);
873 get_Return_n_ress (ir_node *node) {
874 assert (node->op == op_Return);
875 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
879 get_Return_res_arr (ir_node *node)
881 assert ((node->op == op_Return));
882 if (get_Return_n_ress(node) > 0)
883 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
890 set_Return_n_res (ir_node *node, int results) {
891 assert (node->op == op_Return);
896 get_Return_res (ir_node *node, int pos) {
897 assert (node->op == op_Return);
898 assert (get_Return_n_ress(node) > pos);
899 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
903 set_Return_res (ir_node *node, int pos, ir_node *res){
904 assert (node->op == op_Return);
905 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
909 get_Raise_mem (ir_node *node) {
910 assert (node->op == op_Raise);
911 return get_irn_n(node, 0);
915 set_Raise_mem (ir_node *node, ir_node *mem) {
916 assert (node->op == op_Raise);
917 set_irn_n(node, 0, mem);
921 get_Raise_exo_ptr (ir_node *node) {
922 assert (node->op == op_Raise);
923 return get_irn_n(node, 1);
927 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
928 assert (node->op == op_Raise);
929 set_irn_n(node, 1, exo_ptr);
932 tarval *(get_Const_tarval)(ir_node *node) {
933 return _get_Const_tarval(node);
937 set_Const_tarval (ir_node *node, tarval *con) {
938 assert (node->op == op_Const);
939 node->attr.con.tv = con;
942 cnst_classify_t (classify_Const)(ir_node *node)
944 return _classify_Const(node);
948 /* The source language type. Must be an atomic type. Mode of type must
949 be mode of node. For tarvals from entities type must be pointer to
952 get_Const_type (ir_node *node) {
953 assert (node->op == op_Const);
954 return node->attr.con.tp;
958 set_Const_type (ir_node *node, type *tp) {
959 assert (node->op == op_Const);
960 if (tp != firm_unknown_type) {
961 assert (is_atomic_type(tp));
962 assert (get_type_mode(tp) == get_irn_mode(node));
964 node->attr.con.tp = tp;
969 get_SymConst_kind (const ir_node *node) {
970 assert (node->op == op_SymConst);
971 return node->attr.i.num;
975 set_SymConst_kind (ir_node *node, symconst_kind num) {
976 assert (node->op == op_SymConst);
977 node->attr.i.num = num;
981 get_SymConst_type (ir_node *node) {
982 assert ( (node->op == op_SymConst)
983 && ( get_SymConst_kind(node) == symconst_type_tag
984 || get_SymConst_kind(node) == symconst_size));
985 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
989 set_SymConst_type (ir_node *node, type *tp) {
990 assert ( (node->op == op_SymConst)
991 && ( get_SymConst_kind(node) == symconst_type_tag
992 || get_SymConst_kind(node) == symconst_size));
993 node->attr.i.sym.type_p = tp;
997 get_SymConst_name (ir_node *node) {
998 assert ( (node->op == op_SymConst)
999 && (get_SymConst_kind(node) == symconst_addr_name));
1000 return node->attr.i.sym.ident_p;
1004 set_SymConst_name (ir_node *node, ident *name) {
1005 assert ( (node->op == op_SymConst)
1006 && (get_SymConst_kind(node) == symconst_addr_name));
1007 node->attr.i.sym.ident_p = name;
1011 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1012 entity *get_SymConst_entity (ir_node *node) {
1013 assert ( (node->op == op_SymConst)
1014 && (get_SymConst_kind (node) == symconst_addr_ent));
1015 return node->attr.i.sym.entity_p;
1018 void set_SymConst_entity (ir_node *node, entity *ent) {
1019 assert ( (node->op == op_SymConst)
1020 && (get_SymConst_kind(node) == symconst_addr_ent));
1021 node->attr.i.sym.entity_p = ent;
1024 union symconst_symbol
1025 get_SymConst_symbol (ir_node *node) {
1026 assert (node->op == op_SymConst);
1027 return node->attr.i.sym;
1031 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1032 assert (node->op == op_SymConst);
1033 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1034 node->attr.i.sym = sym;
1038 get_SymConst_value_type (ir_node *node) {
1039 assert (node->op == op_SymConst);
1040 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1041 return node->attr.i.tp;
1045 set_SymConst_value_type (ir_node *node, type *tp) {
1046 assert (node->op == op_SymConst);
1047 node->attr.i.tp = tp;
1051 get_Sel_mem (ir_node *node) {
1052 assert (node->op == op_Sel);
1053 return get_irn_n(node, 0);
1057 set_Sel_mem (ir_node *node, ir_node *mem) {
1058 assert (node->op == op_Sel);
1059 set_irn_n(node, 0, mem);
1063 get_Sel_ptr (ir_node *node) {
1064 assert (node->op == op_Sel);
1065 return get_irn_n(node, 1);
1069 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1070 assert (node->op == op_Sel);
1071 set_irn_n(node, 1, ptr);
1075 get_Sel_n_indexs (ir_node *node) {
1076 assert (node->op == op_Sel);
1077 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1081 get_Sel_index_arr (ir_node *node)
1083 assert ((node->op == op_Sel));
1084 if (get_Sel_n_indexs(node) > 0)
1085 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1091 get_Sel_index (ir_node *node, int pos) {
1092 assert (node->op == op_Sel);
1093 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1097 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1098 assert (node->op == op_Sel);
1099 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1103 get_Sel_entity (ir_node *node) {
1104 assert (node->op == op_Sel);
1105 return node->attr.s.ent;
1109 set_Sel_entity (ir_node *node, entity *ent) {
1110 assert (node->op == op_Sel);
1111 node->attr.s.ent = ent;
1115 get_InstOf_ent (ir_node *node) {
1116 assert (node->op = op_InstOf);
1117 return (node->attr.io.ent);
1121 set_InstOf_ent (ir_node *node, type *ent) {
1122 assert (node->op = op_InstOf);
1123 node->attr.io.ent = ent;
1127 get_InstOf_store (ir_node *node) {
1128 assert (node->op = op_InstOf);
1129 return (get_irn_n (node, 0));
1133 set_InstOf_store (ir_node *node, ir_node *obj) {
1134 assert (node->op = op_InstOf);
1135 set_irn_n (node, 0, obj);
1139 get_InstOf_obj (ir_node *node) {
1140 assert (node->op = op_InstOf);
1141 return (get_irn_n (node, 1));
1145 set_InstOf_obj (ir_node *node, ir_node *obj) {
1146 assert (node->op = op_InstOf);
1147 set_irn_n (node, 1, obj);
1151 /* For unary and binary arithmetic operations the access to the
1152 operands can be factored out. Left is the first, right the
1153 second arithmetic value as listed in tech report 0999-33.
1154 unops are: Minus, Abs, Not, Conv, Cast
1155 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1156 Shr, Shrs, Rotate, Cmp */
1160 get_Call_mem (ir_node *node) {
1161 assert (node->op == op_Call);
1162 return get_irn_n(node, 0);
1166 set_Call_mem (ir_node *node, ir_node *mem) {
1167 assert (node->op == op_Call);
1168 set_irn_n(node, 0, mem);
1172 get_Call_ptr (ir_node *node) {
1173 assert (node->op == op_Call);
1174 return get_irn_n(node, 1);
1178 set_Call_ptr (ir_node *node, ir_node *ptr) {
1179 assert (node->op == op_Call);
1180 set_irn_n(node, 1, ptr);
1184 get_Call_param_arr (ir_node *node) {
1185 assert (node->op == op_Call);
1186 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1190 get_Call_n_params (ir_node *node) {
1191 assert (node->op == op_Call);
1192 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1196 get_Call_arity (ir_node *node) {
1197 assert (node->op == op_Call);
1198 return get_Call_n_params(node);
1202 set_Call_arity (ir_node *node, ir_node *arity) {
1203 assert (node->op == op_Call);
1208 get_Call_param (ir_node *node, int pos) {
1209 assert (node->op == op_Call);
1210 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1214 set_Call_param (ir_node *node, int pos, ir_node *param) {
1215 assert (node->op == op_Call);
1216 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1220 get_Call_type (ir_node *node) {
1221 assert (node->op == op_Call);
1222 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1226 set_Call_type (ir_node *node, type *tp) {
1227 assert (node->op == op_Call);
1228 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1229 node->attr.call.cld_tp = tp;
1232 int Call_has_callees(ir_node *node) {
1233 assert(node && node->op == op_Call);
1234 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1235 (node->attr.call.callee_arr != NULL));
1238 int get_Call_n_callees(ir_node * node) {
1239 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1240 return ARR_LEN(node->attr.call.callee_arr);
1243 entity * get_Call_callee(ir_node * node, int pos) {
1244 assert(pos >= 0 && pos < get_Call_n_callees(node));
1245 return node->attr.call.callee_arr[pos];
1248 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1249 assert(node->op == op_Call);
1250 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1251 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1253 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1256 void remove_Call_callee_arr(ir_node * node) {
1257 assert(node->op == op_Call);
1258 node->attr.call.callee_arr = NULL;
1261 ir_node * get_CallBegin_ptr (ir_node *node) {
1262 assert(node->op == op_CallBegin);
1263 return get_irn_n(node, 0);
1265 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1266 assert(node->op == op_CallBegin);
1267 set_irn_n(node, 0, ptr);
1269 ir_node * get_CallBegin_call (ir_node *node) {
1270 assert(node->op == op_CallBegin);
1271 return node->attr.callbegin.call;
1273 void set_CallBegin_call (ir_node *node, ir_node *call) {
1274 assert(node->op == op_CallBegin);
1275 node->attr.callbegin.call = call;
1280 ir_node * get_##OP##_left(ir_node *node) { \
1281 assert(node->op == op_##OP); \
1282 return get_irn_n(node, node->op->op_index); \
1284 void set_##OP##_left(ir_node *node, ir_node *left) { \
1285 assert(node->op == op_##OP); \
1286 set_irn_n(node, node->op->op_index, left); \
1288 ir_node *get_##OP##_right(ir_node *node) { \
1289 assert(node->op == op_##OP); \
1290 return get_irn_n(node, node->op->op_index + 1); \
1292 void set_##OP##_right(ir_node *node, ir_node *right) { \
1293 assert(node->op == op_##OP); \
1294 set_irn_n(node, node->op->op_index + 1, right); \
1298 ir_node *get_##OP##_op(ir_node *node) { \
1299 assert(node->op == op_##OP); \
1300 return get_irn_n(node, node->op->op_index); \
1302 void set_##OP##_op (ir_node *node, ir_node *op) { \
1303 assert(node->op == op_##OP); \
1304 set_irn_n(node, node->op->op_index, op); \
1314 get_Quot_mem (ir_node *node) {
1315 assert (node->op == op_Quot);
1316 return get_irn_n(node, 0);
1320 set_Quot_mem (ir_node *node, ir_node *mem) {
1321 assert (node->op == op_Quot);
1322 set_irn_n(node, 0, mem);
1328 get_DivMod_mem (ir_node *node) {
1329 assert (node->op == op_DivMod);
1330 return get_irn_n(node, 0);
1334 set_DivMod_mem (ir_node *node, ir_node *mem) {
1335 assert (node->op == op_DivMod);
1336 set_irn_n(node, 0, mem);
1342 get_Div_mem (ir_node *node) {
1343 assert (node->op == op_Div);
1344 return get_irn_n(node, 0);
1348 set_Div_mem (ir_node *node, ir_node *mem) {
1349 assert (node->op == op_Div);
1350 set_irn_n(node, 0, mem);
1356 get_Mod_mem (ir_node *node) {
1357 assert (node->op == op_Mod);
1358 return get_irn_n(node, 0);
1362 set_Mod_mem (ir_node *node, ir_node *mem) {
1363 assert (node->op == op_Mod);
1364 set_irn_n(node, 0, mem);
1381 get_Cast_type (ir_node *node) {
1382 assert (node->op == op_Cast);
1383 return node->attr.cast.totype;
1387 set_Cast_type (ir_node *node, type *to_tp) {
1388 assert (node->op == op_Cast);
1389 node->attr.cast.totype = to_tp;
1393 /* Checks for upcast.
1395 * Returns true if the Cast node casts a class type to a super type.
1397 int is_Cast_upcast(ir_node *node) {
1398 type *totype = get_Cast_type(node);
1399 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1400 ir_graph *myirg = get_irn_irg(node);
1402 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1405 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1406 totype = get_pointer_points_to_type(totype);
1407 fromtype = get_pointer_points_to_type(fromtype);
1412 if (!is_Class_type(totype)) return false;
1413 return is_subclass_of(fromtype, totype);
1416 /* Checks for downcast.
1418 * Returns true if the Cast node casts a class type to a sub type.
1420 int is_Cast_downcast(ir_node *node) {
1421 type *totype = get_Cast_type(node);
1422 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1424 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1427 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1428 totype = get_pointer_points_to_type(totype);
1429 fromtype = get_pointer_points_to_type(fromtype);
1434 if (!is_Class_type(totype)) return false;
1435 return is_subclass_of(totype, fromtype);
1439 (is_unop)(const ir_node *node) {
1440 return _is_unop(node);
1444 get_unop_op (ir_node *node) {
1445 if (node->op->opar == oparity_unary)
1446 return get_irn_n(node, node->op->op_index);
1448 assert(node->op->opar == oparity_unary);
1453 set_unop_op (ir_node *node, ir_node *op) {
1454 if (node->op->opar == oparity_unary)
1455 set_irn_n(node, node->op->op_index, op);
1457 assert(node->op->opar == oparity_unary);
1461 (is_binop)(const ir_node *node) {
1462 return _is_binop(node);
1466 get_binop_left (ir_node *node) {
1467 if (node->op->opar == oparity_binary)
1468 return get_irn_n(node, node->op->op_index);
1470 assert(node->op->opar == oparity_binary);
1475 set_binop_left (ir_node *node, ir_node *left) {
1476 if (node->op->opar == oparity_binary)
1477 set_irn_n(node, node->op->op_index, left);
1479 assert (node->op->opar == oparity_binary);
1483 get_binop_right (ir_node *node) {
1484 if (node->op->opar == oparity_binary)
1485 return get_irn_n(node, node->op->op_index + 1);
1487 assert(node->op->opar == oparity_binary);
1492 set_binop_right (ir_node *node, ir_node *right) {
1493 if (node->op->opar == oparity_binary)
1494 set_irn_n(node, node->op->op_index + 1, right);
1496 assert (node->op->opar == oparity_binary);
1499 int is_Phi (const ir_node *n) {
1505 if (op == op_Filter) return get_interprocedural_view();
1508 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1509 (get_irn_arity(n) > 0));
1514 int is_Phi0 (const ir_node *n) {
1517 return ((get_irn_op(n) == op_Phi) &&
1518 (get_irn_arity(n) == 0) &&
1519 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1523 get_Phi_preds_arr (ir_node *node) {
1524 assert (node->op == op_Phi);
1525 return (ir_node **)&(get_irn_in(node)[1]);
1529 get_Phi_n_preds (ir_node *node) {
1530 assert (is_Phi(node) || is_Phi0(node));
1531 return (get_irn_arity(node));
1535 void set_Phi_n_preds (ir_node *node, int n_preds) {
1536 assert (node->op == op_Phi);
1541 get_Phi_pred (ir_node *node, int pos) {
1542 assert (is_Phi(node) || is_Phi0(node));
1543 return get_irn_n(node, pos);
1547 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1548 assert (is_Phi(node) || is_Phi0(node));
1549 set_irn_n(node, pos, pred);
1553 int is_memop(ir_node *node) {
1554 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1557 ir_node *get_memop_mem (ir_node *node) {
1558 assert(is_memop(node));
1559 return get_irn_n(node, 0);
1562 void set_memop_mem (ir_node *node, ir_node *mem) {
1563 assert(is_memop(node));
1564 set_irn_n(node, 0, mem);
1567 ir_node *get_memop_ptr (ir_node *node) {
1568 assert(is_memop(node));
1569 return get_irn_n(node, 1);
1572 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1573 assert(is_memop(node));
1574 set_irn_n(node, 1, ptr);
1578 get_Load_mem (ir_node *node) {
1579 assert (node->op == op_Load);
1580 return get_irn_n(node, 0);
1584 set_Load_mem (ir_node *node, ir_node *mem) {
1585 assert (node->op == op_Load);
1586 set_irn_n(node, 0, mem);
1590 get_Load_ptr (ir_node *node) {
1591 assert (node->op == op_Load);
1592 return get_irn_n(node, 1);
1596 set_Load_ptr (ir_node *node, ir_node *ptr) {
1597 assert (node->op == op_Load);
1598 set_irn_n(node, 1, ptr);
1602 get_Load_mode (ir_node *node) {
1603 assert (node->op == op_Load);
1604 return node->attr.load.load_mode;
1608 set_Load_mode (ir_node *node, ir_mode *mode) {
1609 assert (node->op == op_Load);
1610 node->attr.load.load_mode = mode;
1614 get_Load_volatility (ir_node *node) {
1615 assert (node->op == op_Load);
1616 return node->attr.load.volatility;
1620 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1621 assert (node->op == op_Load);
1622 node->attr.load.volatility = volatility;
1627 get_Store_mem (ir_node *node) {
1628 assert (node->op == op_Store);
1629 return get_irn_n(node, 0);
1633 set_Store_mem (ir_node *node, ir_node *mem) {
1634 assert (node->op == op_Store);
1635 set_irn_n(node, 0, mem);
1639 get_Store_ptr (ir_node *node) {
1640 assert (node->op == op_Store);
1641 return get_irn_n(node, 1);
1645 set_Store_ptr (ir_node *node, ir_node *ptr) {
1646 assert (node->op == op_Store);
1647 set_irn_n(node, 1, ptr);
1651 get_Store_value (ir_node *node) {
1652 assert (node->op == op_Store);
1653 return get_irn_n(node, 2);
1657 set_Store_value (ir_node *node, ir_node *value) {
1658 assert (node->op == op_Store);
1659 set_irn_n(node, 2, value);
1663 get_Store_volatility (ir_node *node) {
1664 assert (node->op == op_Store);
1665 return node->attr.store.volatility;
1669 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1670 assert (node->op == op_Store);
1671 node->attr.store.volatility = volatility;
1676 get_Alloc_mem (ir_node *node) {
1677 assert (node->op == op_Alloc);
1678 return get_irn_n(node, 0);
1682 set_Alloc_mem (ir_node *node, ir_node *mem) {
1683 assert (node->op == op_Alloc);
1684 set_irn_n(node, 0, mem);
1688 get_Alloc_size (ir_node *node) {
1689 assert (node->op == op_Alloc);
1690 return get_irn_n(node, 1);
1694 set_Alloc_size (ir_node *node, ir_node *size) {
1695 assert (node->op == op_Alloc);
1696 set_irn_n(node, 1, size);
1700 get_Alloc_type (ir_node *node) {
1701 assert (node->op == op_Alloc);
1702 return node->attr.a.type = skip_tid(node->attr.a.type);
1706 set_Alloc_type (ir_node *node, type *tp) {
1707 assert (node->op == op_Alloc);
1708 node->attr.a.type = tp;
1712 get_Alloc_where (ir_node *node) {
1713 assert (node->op == op_Alloc);
1714 return node->attr.a.where;
1718 set_Alloc_where (ir_node *node, where_alloc where) {
1719 assert (node->op == op_Alloc);
1720 node->attr.a.where = where;
1725 get_Free_mem (ir_node *node) {
1726 assert (node->op == op_Free);
1727 return get_irn_n(node, 0);
1731 set_Free_mem (ir_node *node, ir_node *mem) {
1732 assert (node->op == op_Free);
1733 set_irn_n(node, 0, mem);
1737 get_Free_ptr (ir_node *node) {
1738 assert (node->op == op_Free);
1739 return get_irn_n(node, 1);
1743 set_Free_ptr (ir_node *node, ir_node *ptr) {
1744 assert (node->op == op_Free);
1745 set_irn_n(node, 1, ptr);
1749 get_Free_size (ir_node *node) {
1750 assert (node->op == op_Free);
1751 return get_irn_n(node, 2);
1755 set_Free_size (ir_node *node, ir_node *size) {
1756 assert (node->op == op_Free);
1757 set_irn_n(node, 2, size);
1761 get_Free_type (ir_node *node) {
1762 assert (node->op == op_Free);
1763 return node->attr.f.type = skip_tid(node->attr.f.type);
1767 set_Free_type (ir_node *node, type *tp) {
1768 assert (node->op == op_Free);
1769 node->attr.f.type = tp;
1773 get_Free_where (ir_node *node) {
1774 assert (node->op == op_Free);
1775 return node->attr.f.where;
1779 set_Free_where (ir_node *node, where_alloc where) {
1780 assert (node->op == op_Free);
1781 node->attr.f.where = where;
1785 get_Sync_preds_arr (ir_node *node) {
1786 assert (node->op == op_Sync);
1787 return (ir_node **)&(get_irn_in(node)[1]);
1791 get_Sync_n_preds (ir_node *node) {
1792 assert (node->op == op_Sync);
1793 return (get_irn_arity(node));
1798 set_Sync_n_preds (ir_node *node, int n_preds) {
1799 assert (node->op == op_Sync);
1804 get_Sync_pred (ir_node *node, int pos) {
1805 assert (node->op == op_Sync);
1806 return get_irn_n(node, pos);
1810 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1811 assert (node->op == op_Sync);
1812 set_irn_n(node, pos, pred);
1815 type *get_Proj_type(ir_node *n)
1818 ir_node *pred = get_Proj_pred(n);
1820 switch (get_irn_opcode(pred)) {
1823 /* Deal with Start / Call here: we need to know the Proj Nr. */
1824 assert(get_irn_mode(pred) == mode_T);
1825 pred_pred = get_Proj_pred(pred);
1826 if (get_irn_op(pred_pred) == op_Start) {
1827 type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1828 tp = get_method_param_type(mtp, get_Proj_proj(n));
1829 } else if (get_irn_op(pred_pred) == op_Call) {
1830 type *mtp = get_Call_type(pred_pred);
1831 tp = get_method_res_type(mtp, get_Proj_proj(n));
1834 case iro_Start: break;
1835 case iro_Call: break;
1837 ir_node *a = get_Load_ptr(pred);
1838 if (get_irn_op(a) == op_Sel)
1839 tp = get_entity_type(get_Sel_entity(a));
1848 get_Proj_pred (const ir_node *node) {
1849 assert (is_Proj(node));
1850 return get_irn_n(node, 0);
1854 set_Proj_pred (ir_node *node, ir_node *pred) {
1855 assert (is_Proj(node));
1856 set_irn_n(node, 0, pred);
1860 get_Proj_proj (const ir_node *node) {
1861 assert (is_Proj(node));
1862 if (get_irn_opcode(node) == iro_Proj) {
1863 return node->attr.proj;
1865 assert(get_irn_opcode(node) == iro_Filter);
1866 return node->attr.filter.proj;
1871 set_Proj_proj (ir_node *node, long proj) {
1872 assert (node->op == op_Proj);
1873 node->attr.proj = proj;
1877 get_Tuple_preds_arr (ir_node *node) {
1878 assert (node->op == op_Tuple);
1879 return (ir_node **)&(get_irn_in(node)[1]);
1883 get_Tuple_n_preds (ir_node *node) {
1884 assert (node->op == op_Tuple);
1885 return (get_irn_arity(node));
1890 set_Tuple_n_preds (ir_node *node, int n_preds) {
1891 assert (node->op == op_Tuple);
1896 get_Tuple_pred (ir_node *node, int pos) {
1897 assert (node->op == op_Tuple);
1898 return get_irn_n(node, pos);
1902 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1903 assert (node->op == op_Tuple);
1904 set_irn_n(node, pos, pred);
1908 get_Id_pred (ir_node *node) {
1909 assert (node->op == op_Id);
1910 return get_irn_n(node, 0);
1914 set_Id_pred (ir_node *node, ir_node *pred) {
1915 assert (node->op == op_Id);
1916 set_irn_n(node, 0, pred);
1919 ir_node *get_Confirm_value (ir_node *node) {
1920 assert (node->op == op_Confirm);
1921 return get_irn_n(node, 0);
1923 void set_Confirm_value (ir_node *node, ir_node *value) {
1924 assert (node->op == op_Confirm);
1925 set_irn_n(node, 0, value);
1927 ir_node *get_Confirm_bound (ir_node *node) {
1928 assert (node->op == op_Confirm);
1929 return get_irn_n(node, 1);
1931 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1932 assert (node->op == op_Confirm);
1933 set_irn_n(node, 0, bound);
1935 pn_Cmp get_Confirm_cmp (ir_node *node) {
1936 assert (node->op == op_Confirm);
1937 return node->attr.confirm_cmp;
1939 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1940 assert (node->op == op_Confirm);
1941 node->attr.confirm_cmp = cmp;
1946 get_Filter_pred (ir_node *node) {
1947 assert(node->op == op_Filter);
1951 set_Filter_pred (ir_node *node, ir_node *pred) {
1952 assert(node->op == op_Filter);
1956 get_Filter_proj(ir_node *node) {
1957 assert(node->op == op_Filter);
1958 return node->attr.filter.proj;
1961 set_Filter_proj (ir_node *node, long proj) {
1962 assert(node->op == op_Filter);
1963 node->attr.filter.proj = proj;
1966 /* Don't use get_irn_arity, get_irn_n in implementation as access
1967 shall work independent of view!!! */
1968 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1969 assert(node->op == op_Filter);
1970 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1971 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1972 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1973 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1974 node->attr.filter.in_cg[0] = node->in[0];
1976 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1979 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1980 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1981 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1982 node->attr.filter.in_cg[pos + 1] = pred;
1984 int get_Filter_n_cg_preds(ir_node *node) {
1985 assert(node->op == op_Filter && node->attr.filter.in_cg);
1986 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1988 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1990 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1992 arity = ARR_LEN(node->attr.filter.in_cg);
1993 assert(pos < arity - 1);
1994 return node->attr.filter.in_cg[pos + 1];
1998 ir_node *get_Mux_sel (ir_node *node) {
1999 assert(node->op == op_Mux);
2002 void set_Mux_sel (ir_node *node, ir_node *sel) {
2003 assert(node->op == op_Mux);
2007 ir_node *get_Mux_false (ir_node *node) {
2008 assert(node->op == op_Mux);
2011 void set_Mux_false (ir_node *node, ir_node *ir_false) {
2012 assert(node->op == op_Mux);
2013 node->in[2] = ir_false;
2016 ir_node *get_Mux_true (ir_node *node) {
2017 assert(node->op == op_Mux);
2020 void set_Mux_true (ir_node *node, ir_node *ir_true) {
2021 assert(node->op == op_Mux);
2022 node->in[3] = ir_true;
2026 get_irn_irg(const ir_node *node) {
2028 * Do not use get_nodes_Block() here, because this
2029 * will check the pinned state.
2030 * However even a 'wrong' block is always in the proper
2033 if (! is_Block(node))
2034 node = get_irn_n(node, -1);
2035 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2036 node = get_irn_n(node, -1);
2037 assert(get_irn_op(node) == op_Block);
2038 return node->attr.block.irg;
2042 /*----------------------------------------------------------------*/
2043 /* Auxiliary routines */
2044 /*----------------------------------------------------------------*/
2047 skip_Proj (ir_node *node) {
2048 /* don't assert node !!! */
2049 if (node && is_Proj(node)) {
2050 return get_Proj_pred(node);
2057 skip_Tuple (ir_node *node) {
2061 if (!get_opt_normalize()) return node;
2064 node = skip_Id(node);
2065 if (get_irn_op(node) == op_Proj) {
2066 pred = skip_Id(get_Proj_pred(node));
2067 op = get_irn_op(pred);
2070 * Looks strange but calls get_irn_op() only once
2071 * in most often cases.
2073 if (op == op_Proj) { /* nested Tuple ? */
2074 pred = skip_Id(skip_Tuple(pred));
2075 op = get_irn_op(pred);
2077 if (op == op_Tuple) {
2078 node = get_Tuple_pred(pred, get_Proj_proj(node));
2082 else if (op == op_Tuple) {
2083 node = get_Tuple_pred(pred, get_Proj_proj(node));
2090 /* returns operand of node if node is a Cast */
2091 ir_node *skip_Cast (ir_node *node) {
2092 if (node && get_irn_op(node) == op_Cast)
2093 return get_Cast_op(node);
2097 /* returns operand of node if node is a Confirm */
2098 ir_node *skip_Confirm (ir_node *node) {
2099 if (node && get_irn_op(node) == op_Confirm)
2100 return get_Confirm_value(node);
2104 /* skip all high-level ops */
2105 ir_node *skip_HighLevel(ir_node *node) {
2106 if (node && is_op_highlevel(get_irn_op(node)))
2107 return get_irn_n(node, 0);
2112 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2113 than any other approach, as Id chains are resolved and all point to the real node, or
2114 all id's are self loops. */
2116 skip_Id (ir_node *node) {
2117 /* don't assert node !!! */
2119 if (!get_opt_normalize()) return node;
2121 /* Don't use get_Id_pred: We get into an endless loop for
2122 self-referencing Ids. */
2123 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2124 ir_node *rem_pred = node->in[0+1];
2127 assert (get_irn_arity (node) > 0);
2129 node->in[0+1] = node;
2130 res = skip_Id(rem_pred);
2131 if (res->op == op_Id) /* self-loop */ return node;
2133 node->in[0+1] = res;
2140 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2141 than any other approach, as Id chains are resolved and all point to the real node, or
2142 all id's are self loops. */
2144 skip_Id (ir_node *node) {
2146 /* don't assert node !!! */
2148 if (!node || (node->op != op_Id)) return node;
2150 if (!get_opt_normalize()) return node;
2152 /* Don't use get_Id_pred: We get into an endless loop for
2153 self-referencing Ids. */
2154 pred = node->in[0+1];
2156 if (pred->op != op_Id) return pred;
2158 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2159 ir_node *rem_pred, *res;
2161 if (pred->op != op_Id) return pred; /* shortcut */
2164 assert (get_irn_arity (node) > 0);
2166 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2167 res = skip_Id(rem_pred);
2168 if (res->op == op_Id) /* self-loop */ return node;
2170 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2179 (is_Bad)(const ir_node *node) {
2180 return _is_Bad(node);
2184 (is_Const)(const ir_node *node) {
2185 return _is_Const(node);
2189 (is_no_Block)(const ir_node *node) {
2190 return _is_no_Block(node);
2194 (is_Block)(const ir_node *node) {
2195 return _is_Block(node);
2198 /* returns true if node is a Unknown node. */
2200 (is_Unknown)(const ir_node *node) {
2201 return _is_Unknown(node);
2205 is_Proj (const ir_node *node) {
2207 return node->op == op_Proj
2208 || (!get_interprocedural_view() && node->op == op_Filter);
2211 /* Returns true if the operation manipulates control flow. */
2213 is_cfop(const ir_node *node) {
2214 return is_cfopcode(get_irn_op(node));
2217 /* Returns true if the operation manipulates interprocedural control flow:
2218 CallBegin, EndReg, EndExcept */
2219 int is_ip_cfop(const ir_node *node) {
2220 return is_ip_cfopcode(get_irn_op(node));
2223 /* Returns true if the operation can change the control flow because
2226 is_fragile_op(const ir_node *node) {
2227 return is_op_fragile(get_irn_op(node));
2230 /* Returns the memory operand of fragile operations. */
2231 ir_node *get_fragile_op_mem(ir_node *node) {
2232 assert(node && is_fragile_op(node));
2234 switch (get_irn_opcode (node)) {
2243 return get_irn_n(node, 0);
2248 assert(0 && "should not be reached");
2253 /* Returns true if the operation is a forking control flow operation. */
2254 int (is_irn_forking)(const ir_node *node) {
2255 return _is_irn_forking(node);
2258 type *(get_irn_type)(ir_node *node) {
2259 return _get_irn_type(node);
2262 /* Returns non-zero for constant-like nodes. */
2263 int (is_irn_constlike)(const ir_node *node) {
2264 return _is_irn_constlike(node);
2267 /* Gets the string representation of the jump prediction .*/
2268 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
2272 case COND_JMP_PRED_NONE: return "no prediction";
2273 case COND_JMP_PRED_TRUE: return "true taken";
2274 case COND_JMP_PRED_FALSE: return "false taken";
2278 /* Returns the conditional jump prediction of a Cond node. */
2279 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2280 return _get_Cond_jmp_pred(cond);
2283 /* Sets a new conditional jump prediction. */
2284 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2285 _set_Cond_jmp_pred(cond, pred);
2288 /** the get_type operation must be always implemented */
2289 static type *get_Null_type(ir_node *n) {
2293 /* set the get_type operation */
2294 ir_op *firm_set_default_get_type(ir_op *op)
2297 case iro_Const: op->get_type = get_Const_type; break;
2298 case iro_SymConst: op->get_type = get_SymConst_value_type; break;
2299 case iro_Cast: op->get_type = get_Cast_type; break;
2300 case iro_Proj: op->get_type = get_Proj_type; break;
2301 default: op->get_type = get_Null_type; break;
2306 #ifdef DEBUG_libfirm
2307 void dump_irn (ir_node *n) {
2308 int i, arity = get_irn_arity(n);
2309 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2311 ir_node *pred = get_irn_n(n, -1);
2312 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2313 get_irn_node_nr(pred), (void *)pred);
2315 printf(" preds: \n");
2316 for (i = 0; i < arity; ++i) {
2317 ir_node *pred = get_irn_n(n, i);
2318 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2319 get_irn_node_nr(pred), (void *)pred);
2323 #else /* DEBUG_libfirm */
2324 void dump_irn (ir_node *n) {}
2325 #endif /* DEBUG_libfirm */