3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
35 /* some constants fixing the positions of nodes predecessors
37 #define CALL_PARAM_OFFSET 2
38 #define FUNCCALL_PARAM_OFFSET 1
39 #define SEL_INDEX_OFFSET 2
40 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
41 #define END_KEEPALIVE_OFFSET 0
43 static const char *pnc_name_arr [] = {
44 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
45 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
46 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
47 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
51 * returns the pnc name from an pnc constant
53 const char *get_pnc_string(int pnc) {
54 return pnc_name_arr[pnc];
58 * Calculates the negated (Complement(R)) pnc condition.
60 int get_negated_pnc(int pnc, ir_mode *mode) {
63 /* do NOT add the Uo bit for non-floating point values */
64 if (! mode_is_float(mode))
70 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
72 get_inversed_pnc(int pnc) {
73 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
74 int lesser = pnc & pn_Cmp_Lt;
75 int greater = pnc & pn_Cmp_Gt;
77 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
82 const char *pns_name_arr [] = {
83 "initial_exec", "global_store",
84 "frame_base", "globals", "args"
87 const char *symconst_name_arr [] = {
88 "type_tag", "size", "addr_name", "addr_ent"
92 * Indicates, whether additional data can be registered to ir nodes.
93 * If set to 1, this is not possible anymore.
95 static int forbid_new_data = 0;
98 * The amount of additional space for custom data to be allocated upon
99 * creating a new node.
101 unsigned firm_add_node_size = 0;
104 /* register new space for every node */
105 unsigned register_additional_node_data(unsigned size) {
106 assert(!forbid_new_data && "Too late to register additional node data");
111 return firm_add_node_size += size;
117 /* Forbid the addition of new data to an ir node. */
122 * irnode constructor.
123 * Create a new irnode in irg, with an op, mode, arity and
124 * some incoming irnodes.
125 * If arity is negative, a node with a dynamic array is created.
128 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
129 int arity, ir_node **in)
132 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
135 assert(irg && op && mode);
136 p = obstack_alloc (irg->obst, node_size);
137 memset(p, 0, node_size);
138 res = (ir_node *) (p + firm_add_node_size);
140 res->kind = k_ir_node;
144 res->node_idx = get_irg_next_node_idx(irg);
147 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
149 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
150 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
154 set_irn_dbg_info(res, db);
158 res->node_nr = get_irp_new_node_nr();
161 #if FIRM_EDGES_INPLACE
164 int is_bl = is_Block(res);
166 INIT_LIST_HEAD(&res->edge_info.outs_head);
168 INIT_LIST_HEAD(&res->attr.block.succ_head);
171 for (i = is_bl; i <= arity; ++i)
172 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
176 hook_new_node(irg, res);
181 /*-- getting some parameters from ir_nodes --*/
184 (is_ir_node)(const void *thing) {
185 return _is_ir_node(thing);
189 (get_irn_intra_arity)(const ir_node *node) {
190 return _get_irn_intra_arity(node);
194 (get_irn_inter_arity)(const ir_node *node) {
195 return _get_irn_inter_arity(node);
198 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
201 (get_irn_arity)(const ir_node *node) {
202 return _get_irn_arity(node);
205 /* Returns the array with ins. This array is shifted with respect to the
206 array accessed by get_irn_n: The block operand is at position 0 not -1.
207 (@@@ This should be changed.)
208 The order of the predecessors in this array is not guaranteed, except that
209 lists of operands as predecessors of Block or arguments of a Call are
212 get_irn_in (const ir_node *node) {
214 if (get_interprocedural_view()) { /* handle Filter and Block specially */
215 if (get_irn_opcode(node) == iro_Filter) {
216 assert(node->attr.filter.in_cg);
217 return node->attr.filter.in_cg;
218 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
219 return node->attr.block.in_cg;
221 /* else fall through */
227 set_irn_in (ir_node *node, int arity, ir_node **in) {
230 ir_graph *irg = current_ir_graph;
232 if (get_interprocedural_view()) { /* handle Filter and Block specially */
233 if (get_irn_opcode(node) == iro_Filter) {
234 assert(node->attr.filter.in_cg);
235 arr = &node->attr.filter.in_cg;
236 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
237 arr = &node->attr.block.in_cg;
245 for (i = 0; i < arity; i++) {
246 if (i < ARR_LEN(*arr)-1)
247 edges_notify_edge(node, i, in[i], (*arr)[i+1], irg);
249 edges_notify_edge(node, i, in[i], NULL, irg);
251 for(;i < ARR_LEN(*arr)-1; i++) {
252 edges_notify_edge(node, i, NULL, (*arr)[i+1], irg);
255 if (arity != ARR_LEN(*arr) - 1) {
256 ir_node * block = (*arr)[0];
257 *arr = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
260 fix_backedges(irg->obst, node);
262 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
266 (get_irn_intra_n)(const ir_node *node, int n) {
267 return _get_irn_intra_n (node, n);
271 (get_irn_inter_n)(const ir_node *node, int n) {
272 return _get_irn_inter_n (node, n);
275 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
278 (get_irn_n)(const ir_node *node, int n) {
279 return _get_irn_n(node, n);
283 set_irn_n (ir_node *node, int n, ir_node *in) {
284 assert(node && node->kind == k_ir_node);
286 assert(n < get_irn_arity(node));
287 assert(in && in->kind == k_ir_node);
289 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
290 /* Change block pred in both views! */
291 node->in[n + 1] = in;
292 assert(node->attr.filter.in_cg);
293 node->attr.filter.in_cg[n + 1] = in;
296 if (get_interprocedural_view()) { /* handle Filter and Block specially */
297 if (get_irn_opcode(node) == iro_Filter) {
298 assert(node->attr.filter.in_cg);
299 node->attr.filter.in_cg[n + 1] = in;
301 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
302 node->attr.block.in_cg[n + 1] = in;
305 /* else fall through */
309 hook_set_irn_n(node, n, in, node->in[n + 1]);
311 /* Here, we rely on src and tgt being in the current ir graph */
312 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
314 node->in[n + 1] = in;
318 (get_irn_mode)(const ir_node *node) {
319 return _get_irn_mode(node);
323 (set_irn_mode)(ir_node *node, ir_mode *mode)
325 _set_irn_mode(node, mode);
329 get_irn_modecode (const ir_node *node)
332 return node->mode->code;
335 /** Gets the string representation of the mode .*/
337 get_irn_modename (const ir_node *node)
340 return get_mode_name(node->mode);
344 get_irn_modeident (const ir_node *node)
347 return get_mode_ident(node->mode);
351 (get_irn_op)(const ir_node *node) {
352 return _get_irn_op(node);
355 /* should be private to the library: */
357 (set_irn_op)(ir_node *node, ir_op *op) {
358 _set_irn_op(node, op);
362 (get_irn_opcode)(const ir_node *node)
364 return _get_irn_opcode(node);
368 get_irn_opname (const ir_node *node)
371 if ((get_irn_op((ir_node *)node) == op_Phi) &&
372 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
373 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
374 return get_id_str(node->op->name);
378 get_irn_opident (const ir_node *node)
381 return node->op->name;
385 (get_irn_visited)(const ir_node *node)
387 return _get_irn_visited(node);
391 (set_irn_visited)(ir_node *node, unsigned long visited)
393 _set_irn_visited(node, visited);
397 (mark_irn_visited)(ir_node *node) {
398 _mark_irn_visited(node);
402 (irn_not_visited)(const ir_node *node) {
403 return _irn_not_visited(node);
407 (irn_visited)(const ir_node *node) {
408 return _irn_visited(node);
412 (set_irn_link)(ir_node *node, void *link) {
413 _set_irn_link(node, link);
417 (get_irn_link)(const ir_node *node) {
418 return _get_irn_link(node);
422 (get_irn_pinned)(const ir_node *node) {
423 return _get_irn_pinned(node);
427 (is_irn_pinned_in_irg) (const ir_node *node) {
428 return _is_irn_pinned_in_irg(node);
431 void set_irn_pinned(ir_node *node, op_pin_state state) {
432 /* due to optimization an opt may be turned into a Tuple */
433 if (get_irn_op(node) == op_Tuple)
436 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
437 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
439 node->attr.except.pin_state = state;
442 #ifdef DO_HEAPANALYSIS
443 /* Access the abstract interpretation information of a node.
444 Returns NULL if no such information is available. */
445 struct abstval *get_irn_abst_value(ir_node *n) {
448 /* Set the abstract interpretation information of a node. */
449 void set_irn_abst_value(ir_node *n, struct abstval *os) {
452 struct section *firm_get_irn_section(ir_node *n) {
455 void firm_set_irn_section(ir_node *n, struct section *s) {
459 /* Dummies needed for firmjni. */
460 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
461 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
462 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
463 void firm_set_irn_section(ir_node *n, struct section *s) {}
464 #endif /* DO_HEAPANALYSIS */
467 /* Outputs a unique number for this node */
468 long get_irn_node_nr(const ir_node *node) {
471 return node->node_nr;
473 return (long)PTR_TO_INT(node);
478 get_irn_const_attr (ir_node *node)
480 assert (node->op == op_Const);
481 return node->attr.con;
485 get_irn_proj_attr (ir_node *node)
487 assert (node->op == op_Proj);
488 return node->attr.proj;
492 get_irn_alloc_attr (ir_node *node)
494 assert (node->op == op_Alloc);
499 get_irn_free_attr (ir_node *node)
501 assert (node->op == op_Free);
506 get_irn_symconst_attr (ir_node *node)
508 assert (node->op == op_SymConst);
513 get_irn_call_attr (ir_node *node)
515 assert (node->op == op_Call);
516 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
520 get_irn_sel_attr (ir_node *node)
522 assert (node->op == op_Sel);
527 get_irn_phi_attr (ir_node *node)
529 assert (node->op == op_Phi);
530 return node->attr.phi0_pos;
534 get_irn_block_attr (ir_node *node)
536 assert (node->op == op_Block);
537 return node->attr.block;
541 get_irn_load_attr (ir_node *node)
543 assert (node->op == op_Load);
544 return node->attr.load;
548 get_irn_store_attr (ir_node *node)
550 assert (node->op == op_Store);
551 return node->attr.store;
555 get_irn_except_attr (ir_node *node)
557 assert (node->op == op_Div || node->op == op_Quot ||
558 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
559 return node->attr.except;
563 get_irn_generic_attr (ir_node *node) {
567 unsigned (get_irn_idx)(const ir_node *node) {
568 assert(is_ir_node(node));
569 return _get_irn_idx(node);
572 /** manipulate fields of individual nodes **/
574 /* this works for all except Block */
576 get_nodes_block (const ir_node *node) {
577 assert (!(node->op == op_Block));
578 assert (is_irn_pinned_in_irg(node) && "block info may be incorrect");
579 return get_irn_n(node, -1);
583 set_nodes_block (ir_node *node, ir_node *block) {
584 assert (!(node->op == op_Block));
585 set_irn_n(node, -1, block);
588 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
589 * from Start. If so returns frame type, else Null. */
590 ir_type *is_frame_pointer(ir_node *n) {
591 if ((get_irn_op(n) == op_Proj) &&
592 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
593 ir_node *start = get_Proj_pred(n);
594 if (get_irn_op(start) == op_Start) {
595 return get_irg_frame_type(get_irn_irg(start));
601 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
602 * from Start. If so returns global type, else Null. */
603 ir_type *is_globals_pointer(ir_node *n) {
604 if ((get_irn_op(n) == op_Proj) &&
605 (get_Proj_proj(n) == pn_Start_P_globals)) {
606 ir_node *start = get_Proj_pred(n);
607 if (get_irn_op(start) == op_Start) {
608 return get_glob_type();
614 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
615 * from Start. If so returns 1, else 0. */
616 int is_value_arg_pointer(ir_node *n) {
617 if ((get_irn_op(n) == op_Proj) &&
618 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
619 (get_irn_op(get_Proj_pred(n)) == op_Start))
624 /* Returns an array with the predecessors of the Block. Depending on
625 the implementation of the graph data structure this can be a copy of
626 the internal representation of predecessors as well as the internal
627 array itself. Therefore writing to this array might obstruct the ir. */
629 get_Block_cfgpred_arr (ir_node *node)
631 assert ((node->op == op_Block));
632 return (ir_node **)&(get_irn_in(node)[1]);
636 (get_Block_n_cfgpreds)(ir_node *node) {
637 return _get_Block_n_cfgpreds(node);
641 (get_Block_cfgpred)(ir_node *node, int pos) {
642 return _get_Block_cfgpred(node, pos);
646 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
647 assert (node->op == op_Block);
648 set_irn_n(node, pos, pred);
652 (get_Block_cfgpred_block)(ir_node *node, int pos) {
653 return _get_Block_cfgpred_block(node, pos);
657 get_Block_matured (ir_node *node) {
658 assert (node->op == op_Block);
659 return (int)node->attr.block.matured;
663 set_Block_matured (ir_node *node, int matured) {
664 assert (node->op == op_Block);
665 node->attr.block.matured = matured;
669 (get_Block_block_visited)(ir_node *node) {
670 return _get_Block_block_visited(node);
674 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
675 _set_Block_block_visited(node, visit);
678 /* For this current_ir_graph must be set. */
680 (mark_Block_block_visited)(ir_node *node) {
681 _mark_Block_block_visited(node);
685 (Block_not_block_visited)(ir_node *node) {
686 return _Block_not_block_visited(node);
690 get_Block_graph_arr (ir_node *node, int pos) {
691 assert (node->op == op_Block);
692 return node->attr.block.graph_arr[pos+1];
696 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
697 assert (node->op == op_Block);
698 node->attr.block.graph_arr[pos+1] = value;
701 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
702 assert(node->op == op_Block);
703 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
704 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
705 node->attr.block.in_cg[0] = NULL;
706 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
708 /* Fix backedge array. fix_backedges() operates depending on
709 interprocedural_view. */
710 int ipv = get_interprocedural_view();
711 set_interprocedural_view(1);
712 fix_backedges(current_ir_graph->obst, node);
713 set_interprocedural_view(ipv);
716 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
719 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
720 assert(node->op == op_Block &&
721 node->attr.block.in_cg &&
722 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
723 node->attr.block.in_cg[pos + 1] = pred;
726 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
727 assert(node->op == op_Block);
728 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
731 int get_Block_cg_n_cfgpreds(ir_node * node) {
732 assert(node->op == op_Block);
733 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
736 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
737 assert(node->op == op_Block && node->attr.block.in_cg);
738 return node->attr.block.in_cg[pos + 1];
741 void remove_Block_cg_cfgpred_arr(ir_node * node) {
742 assert(node->op == op_Block);
743 node->attr.block.in_cg = NULL;
746 ir_node *(set_Block_dead)(ir_node *block) {
747 return _set_Block_dead(block);
750 int (is_Block_dead)(const ir_node *block) {
751 return _is_Block_dead(block);
754 ir_extblk *get_Block_extbb(const ir_node *block) {
756 assert(is_Block(block));
757 res = block->attr.block.extblk;
758 assert(res == NULL || is_ir_extbb(res));
762 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
763 assert(is_Block(block));
764 assert(extblk == NULL || is_ir_extbb(extblk));
765 block->attr.block.extblk = extblk;
769 get_End_n_keepalives(ir_node *end) {
770 assert (end->op == op_End);
771 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
775 get_End_keepalive(ir_node *end, int pos) {
776 assert (end->op == op_End);
777 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
781 add_End_keepalive (ir_node *end, ir_node *ka) {
782 assert (end->op == op_End);
783 ARR_APP1 (ir_node *, end->in, ka);
787 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
788 assert (end->op == op_End);
789 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
793 free_End (ir_node *end) {
794 assert (end->op == op_End);
796 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
797 end->in = NULL; /* @@@ make sure we get an error if we use the
798 in array afterwards ... */
801 /* Return the target address of an IJmp */
802 ir_node *get_IJmp_target(ir_node *ijmp) {
803 assert(ijmp->op == op_IJmp);
804 return get_irn_n(ijmp, 0);
807 /** Sets the target address of an IJmp */
808 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
809 assert(ijmp->op == op_IJmp);
810 set_irn_n(ijmp, 0, tgt);
814 > Implementing the case construct (which is where the constant Proj node is
815 > important) involves far more than simply determining the constant values.
816 > We could argue that this is more properly a function of the translator from
817 > Firm to the target machine. That could be done if there was some way of
818 > projecting "default" out of the Cond node.
819 I know it's complicated.
820 Basically there are two proglems:
821 - determining the gaps between the projs
822 - determining the biggest case constant to know the proj number for
824 I see several solutions:
825 1. Introduce a ProjDefault node. Solves both problems.
826 This means to extend all optimizations executed during construction.
827 2. Give the Cond node for switch two flavors:
828 a) there are no gaps in the projs (existing flavor)
829 b) gaps may exist, default proj is still the Proj with the largest
830 projection number. This covers also the gaps.
831 3. Fix the semantic of the Cond to that of 2b)
833 Solution 2 seems to be the best:
834 Computing the gaps in the Firm representation is not too hard, i.e.,
835 libFIRM can implement a routine that transforms between the two
836 flavours. This is also possible for 1) but 2) does not require to
837 change any existing optimization.
838 Further it should be far simpler to determine the biggest constant than
840 I don't want to choose 3) as 2a) seems to have advantages for
841 dataflow analysis and 3) does not allow to convert the representation to
845 get_Cond_selector (ir_node *node) {
846 assert (node->op == op_Cond);
847 return get_irn_n(node, 0);
851 set_Cond_selector (ir_node *node, ir_node *selector) {
852 assert (node->op == op_Cond);
853 set_irn_n(node, 0, selector);
857 get_Cond_kind (ir_node *node) {
858 assert (node->op == op_Cond);
859 return node->attr.c.kind;
863 set_Cond_kind (ir_node *node, cond_kind kind) {
864 assert (node->op == op_Cond);
865 node->attr.c.kind = kind;
869 get_Cond_defaultProj (ir_node *node) {
870 assert (node->op == op_Cond);
871 return node->attr.c.default_proj;
875 get_Return_mem (ir_node *node) {
876 assert (node->op == op_Return);
877 return get_irn_n(node, 0);
881 set_Return_mem (ir_node *node, ir_node *mem) {
882 assert (node->op == op_Return);
883 set_irn_n(node, 0, mem);
887 get_Return_n_ress (ir_node *node) {
888 assert (node->op == op_Return);
889 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
893 get_Return_res_arr (ir_node *node)
895 assert ((node->op == op_Return));
896 if (get_Return_n_ress(node) > 0)
897 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
904 set_Return_n_res (ir_node *node, int results) {
905 assert (node->op == op_Return);
910 get_Return_res (ir_node *node, int pos) {
911 assert (node->op == op_Return);
912 assert (get_Return_n_ress(node) > pos);
913 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
917 set_Return_res (ir_node *node, int pos, ir_node *res){
918 assert (node->op == op_Return);
919 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
922 tarval *(get_Const_tarval)(ir_node *node) {
923 return _get_Const_tarval(node);
927 set_Const_tarval (ir_node *node, tarval *con) {
928 assert (node->op == op_Const);
929 node->attr.con.tv = con;
932 cnst_classify_t (classify_Const)(ir_node *node)
934 return _classify_Const(node);
938 /* The source language type. Must be an atomic type. Mode of type must
939 be mode of node. For tarvals from entities type must be pointer to
942 get_Const_type (ir_node *node) {
943 assert (node->op == op_Const);
944 return node->attr.con.tp;
948 set_Const_type (ir_node *node, ir_type *tp) {
949 assert (node->op == op_Const);
950 if (tp != firm_unknown_type) {
951 assert (is_atomic_type(tp));
952 assert (get_type_mode(tp) == get_irn_mode(node));
954 node->attr.con.tp = tp;
959 get_SymConst_kind (const ir_node *node) {
960 assert (node->op == op_SymConst);
961 return node->attr.i.num;
965 set_SymConst_kind (ir_node *node, symconst_kind num) {
966 assert (node->op == op_SymConst);
967 node->attr.i.num = num;
971 get_SymConst_type (ir_node *node) {
972 assert ( (node->op == op_SymConst)
973 && ( get_SymConst_kind(node) == symconst_type_tag
974 || get_SymConst_kind(node) == symconst_size));
975 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
979 set_SymConst_type (ir_node *node, ir_type *tp) {
980 assert ( (node->op == op_SymConst)
981 && ( get_SymConst_kind(node) == symconst_type_tag
982 || get_SymConst_kind(node) == symconst_size));
983 node->attr.i.sym.type_p = tp;
987 get_SymConst_name (ir_node *node) {
988 assert ( (node->op == op_SymConst)
989 && (get_SymConst_kind(node) == symconst_addr_name));
990 return node->attr.i.sym.ident_p;
994 set_SymConst_name (ir_node *node, ident *name) {
995 assert ( (node->op == op_SymConst)
996 && (get_SymConst_kind(node) == symconst_addr_name));
997 node->attr.i.sym.ident_p = name;
1001 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1002 entity *get_SymConst_entity (ir_node *node) {
1003 assert ( (node->op == op_SymConst)
1004 && (get_SymConst_kind (node) == symconst_addr_ent));
1005 return node->attr.i.sym.entity_p;
1008 void set_SymConst_entity (ir_node *node, entity *ent) {
1009 assert ( (node->op == op_SymConst)
1010 && (get_SymConst_kind(node) == symconst_addr_ent));
1011 node->attr.i.sym.entity_p = ent;
1014 union symconst_symbol
1015 get_SymConst_symbol (ir_node *node) {
1016 assert (node->op == op_SymConst);
1017 return node->attr.i.sym;
1021 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1022 assert (node->op == op_SymConst);
1023 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1024 node->attr.i.sym = sym;
1028 get_SymConst_value_type (ir_node *node) {
1029 assert (node->op == op_SymConst);
1030 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1031 return node->attr.i.tp;
1035 set_SymConst_value_type (ir_node *node, ir_type *tp) {
1036 assert (node->op == op_SymConst);
1037 node->attr.i.tp = tp;
1041 get_Sel_mem (ir_node *node) {
1042 assert (node->op == op_Sel);
1043 return get_irn_n(node, 0);
1047 set_Sel_mem (ir_node *node, ir_node *mem) {
1048 assert (node->op == op_Sel);
1049 set_irn_n(node, 0, mem);
1053 get_Sel_ptr (ir_node *node) {
1054 assert (node->op == op_Sel);
1055 return get_irn_n(node, 1);
1059 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1060 assert (node->op == op_Sel);
1061 set_irn_n(node, 1, ptr);
1065 get_Sel_n_indexs (ir_node *node) {
1066 assert (node->op == op_Sel);
1067 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1071 get_Sel_index_arr (ir_node *node)
1073 assert ((node->op == op_Sel));
1074 if (get_Sel_n_indexs(node) > 0)
1075 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1081 get_Sel_index (ir_node *node, int pos) {
1082 assert (node->op == op_Sel);
1083 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1087 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1088 assert (node->op == op_Sel);
1089 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1093 get_Sel_entity (ir_node *node) {
1094 assert (node->op == op_Sel);
1095 return node->attr.s.ent;
1099 set_Sel_entity (ir_node *node, entity *ent) {
1100 assert (node->op == op_Sel);
1101 node->attr.s.ent = ent;
1105 /* For unary and binary arithmetic operations the access to the
1106 operands can be factored out. Left is the first, right the
1107 second arithmetic value as listed in tech report 0999-33.
1108 unops are: Minus, Abs, Not, Conv, Cast
1109 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1110 Shr, Shrs, Rotate, Cmp */
1114 get_Call_mem (ir_node *node) {
1115 assert (node->op == op_Call);
1116 return get_irn_n(node, 0);
1120 set_Call_mem (ir_node *node, ir_node *mem) {
1121 assert (node->op == op_Call);
1122 set_irn_n(node, 0, mem);
1126 get_Call_ptr (ir_node *node) {
1127 assert (node->op == op_Call);
1128 return get_irn_n(node, 1);
1132 set_Call_ptr (ir_node *node, ir_node *ptr) {
1133 assert (node->op == op_Call);
1134 set_irn_n(node, 1, ptr);
1138 get_Call_param_arr (ir_node *node) {
1139 assert (node->op == op_Call);
1140 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1144 get_Call_n_params (ir_node *node) {
1145 assert (node->op == op_Call);
1146 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1150 get_Call_arity (ir_node *node) {
1151 assert (node->op == op_Call);
1152 return get_Call_n_params(node);
1156 set_Call_arity (ir_node *node, ir_node *arity) {
1157 assert (node->op == op_Call);
1162 get_Call_param (ir_node *node, int pos) {
1163 assert (node->op == op_Call);
1164 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1168 set_Call_param (ir_node *node, int pos, ir_node *param) {
1169 assert (node->op == op_Call);
1170 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1174 get_Call_type (ir_node *node) {
1175 assert (node->op == op_Call);
1176 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1180 set_Call_type (ir_node *node, ir_type *tp) {
1181 assert (node->op == op_Call);
1182 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1183 node->attr.call.cld_tp = tp;
1186 int Call_has_callees(ir_node *node) {
1187 assert(node && node->op == op_Call);
1188 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1189 (node->attr.call.callee_arr != NULL));
1192 int get_Call_n_callees(ir_node * node) {
1193 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1194 return ARR_LEN(node->attr.call.callee_arr);
1197 entity * get_Call_callee(ir_node * node, int pos) {
1198 assert(pos >= 0 && pos < get_Call_n_callees(node));
1199 return node->attr.call.callee_arr[pos];
1202 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1203 assert(node->op == op_Call);
1204 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1205 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1207 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1210 void remove_Call_callee_arr(ir_node * node) {
1211 assert(node->op == op_Call);
1212 node->attr.call.callee_arr = NULL;
1215 ir_node * get_CallBegin_ptr (ir_node *node) {
1216 assert(node->op == op_CallBegin);
1217 return get_irn_n(node, 0);
1219 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1220 assert(node->op == op_CallBegin);
1221 set_irn_n(node, 0, ptr);
1223 ir_node * get_CallBegin_call (ir_node *node) {
1224 assert(node->op == op_CallBegin);
1225 return node->attr.callbegin.call;
1227 void set_CallBegin_call (ir_node *node, ir_node *call) {
1228 assert(node->op == op_CallBegin);
1229 node->attr.callbegin.call = call;
1234 ir_node * get_##OP##_left(ir_node *node) { \
1235 assert(node->op == op_##OP); \
1236 return get_irn_n(node, node->op->op_index); \
1238 void set_##OP##_left(ir_node *node, ir_node *left) { \
1239 assert(node->op == op_##OP); \
1240 set_irn_n(node, node->op->op_index, left); \
1242 ir_node *get_##OP##_right(ir_node *node) { \
1243 assert(node->op == op_##OP); \
1244 return get_irn_n(node, node->op->op_index + 1); \
1246 void set_##OP##_right(ir_node *node, ir_node *right) { \
1247 assert(node->op == op_##OP); \
1248 set_irn_n(node, node->op->op_index + 1, right); \
1252 ir_node *get_##OP##_op(ir_node *node) { \
1253 assert(node->op == op_##OP); \
1254 return get_irn_n(node, node->op->op_index); \
1256 void set_##OP##_op (ir_node *node, ir_node *op) { \
1257 assert(node->op == op_##OP); \
1258 set_irn_n(node, node->op->op_index, op); \
1268 get_Quot_mem (ir_node *node) {
1269 assert (node->op == op_Quot);
1270 return get_irn_n(node, 0);
1274 set_Quot_mem (ir_node *node, ir_node *mem) {
1275 assert (node->op == op_Quot);
1276 set_irn_n(node, 0, mem);
1282 get_DivMod_mem (ir_node *node) {
1283 assert (node->op == op_DivMod);
1284 return get_irn_n(node, 0);
1288 set_DivMod_mem (ir_node *node, ir_node *mem) {
1289 assert (node->op == op_DivMod);
1290 set_irn_n(node, 0, mem);
1296 get_Div_mem (ir_node *node) {
1297 assert (node->op == op_Div);
1298 return get_irn_n(node, 0);
1302 set_Div_mem (ir_node *node, ir_node *mem) {
1303 assert (node->op == op_Div);
1304 set_irn_n(node, 0, mem);
1310 get_Mod_mem (ir_node *node) {
1311 assert (node->op == op_Mod);
1312 return get_irn_n(node, 0);
1316 set_Mod_mem (ir_node *node, ir_node *mem) {
1317 assert (node->op == op_Mod);
1318 set_irn_n(node, 0, mem);
1335 get_Cast_type (ir_node *node) {
1336 assert (node->op == op_Cast);
1337 return node->attr.cast.totype;
1341 set_Cast_type (ir_node *node, ir_type *to_tp) {
1342 assert (node->op == op_Cast);
1343 node->attr.cast.totype = to_tp;
1347 /* Checks for upcast.
1349 * Returns true if the Cast node casts a class type to a super type.
1351 int is_Cast_upcast(ir_node *node) {
1352 ir_type *totype = get_Cast_type(node);
1353 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1354 ir_graph *myirg = get_irn_irg(node);
1356 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1359 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1360 totype = get_pointer_points_to_type(totype);
1361 fromtype = get_pointer_points_to_type(fromtype);
1366 if (!is_Class_type(totype)) return 0;
1367 return is_SubClass_of(fromtype, totype);
1370 /* Checks for downcast.
1372 * Returns true if the Cast node casts a class type to a sub type.
1374 int is_Cast_downcast(ir_node *node) {
1375 ir_type *totype = get_Cast_type(node);
1376 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1378 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1381 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1382 totype = get_pointer_points_to_type(totype);
1383 fromtype = get_pointer_points_to_type(fromtype);
1388 if (!is_Class_type(totype)) return 0;
1389 return is_SubClass_of(totype, fromtype);
1393 (is_unop)(const ir_node *node) {
1394 return _is_unop(node);
1398 get_unop_op (ir_node *node) {
1399 if (node->op->opar == oparity_unary)
1400 return get_irn_n(node, node->op->op_index);
1402 assert(node->op->opar == oparity_unary);
1407 set_unop_op (ir_node *node, ir_node *op) {
1408 if (node->op->opar == oparity_unary)
1409 set_irn_n(node, node->op->op_index, op);
1411 assert(node->op->opar == oparity_unary);
1415 (is_binop)(const ir_node *node) {
1416 return _is_binop(node);
1420 get_binop_left (ir_node *node) {
1421 if (node->op->opar == oparity_binary)
1422 return get_irn_n(node, node->op->op_index);
1424 assert(node->op->opar == oparity_binary);
1429 set_binop_left (ir_node *node, ir_node *left) {
1430 if (node->op->opar == oparity_binary)
1431 set_irn_n(node, node->op->op_index, left);
1433 assert (node->op->opar == oparity_binary);
1437 get_binop_right (ir_node *node) {
1438 if (node->op->opar == oparity_binary)
1439 return get_irn_n(node, node->op->op_index + 1);
1441 assert(node->op->opar == oparity_binary);
1446 set_binop_right (ir_node *node, ir_node *right) {
1447 if (node->op->opar == oparity_binary)
1448 set_irn_n(node, node->op->op_index + 1, right);
1450 assert (node->op->opar == oparity_binary);
1453 int is_Phi (const ir_node *n) {
1459 if (op == op_Filter) return get_interprocedural_view();
1462 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1463 (get_irn_arity(n) > 0));
1468 int is_Phi0 (const ir_node *n) {
1471 return ((get_irn_op(n) == op_Phi) &&
1472 (get_irn_arity(n) == 0) &&
1473 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1477 get_Phi_preds_arr (ir_node *node) {
1478 assert (node->op == op_Phi);
1479 return (ir_node **)&(get_irn_in(node)[1]);
1483 get_Phi_n_preds (ir_node *node) {
1484 assert (is_Phi(node) || is_Phi0(node));
1485 return (get_irn_arity(node));
1489 void set_Phi_n_preds (ir_node *node, int n_preds) {
1490 assert (node->op == op_Phi);
1495 get_Phi_pred (ir_node *node, int pos) {
1496 assert (is_Phi(node) || is_Phi0(node));
1497 return get_irn_n(node, pos);
1501 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1502 assert (is_Phi(node) || is_Phi0(node));
1503 set_irn_n(node, pos, pred);
1507 int is_memop(ir_node *node) {
1508 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1511 ir_node *get_memop_mem (ir_node *node) {
1512 assert(is_memop(node));
1513 return get_irn_n(node, 0);
1516 void set_memop_mem (ir_node *node, ir_node *mem) {
1517 assert(is_memop(node));
1518 set_irn_n(node, 0, mem);
1521 ir_node *get_memop_ptr (ir_node *node) {
1522 assert(is_memop(node));
1523 return get_irn_n(node, 1);
1526 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1527 assert(is_memop(node));
1528 set_irn_n(node, 1, ptr);
1532 get_Load_mem (ir_node *node) {
1533 assert (node->op == op_Load);
1534 return get_irn_n(node, 0);
1538 set_Load_mem (ir_node *node, ir_node *mem) {
1539 assert (node->op == op_Load);
1540 set_irn_n(node, 0, mem);
1544 get_Load_ptr (ir_node *node) {
1545 assert (node->op == op_Load);
1546 return get_irn_n(node, 1);
1550 set_Load_ptr (ir_node *node, ir_node *ptr) {
1551 assert (node->op == op_Load);
1552 set_irn_n(node, 1, ptr);
1556 get_Load_mode (ir_node *node) {
1557 assert (node->op == op_Load);
1558 return node->attr.load.load_mode;
1562 set_Load_mode (ir_node *node, ir_mode *mode) {
1563 assert (node->op == op_Load);
1564 node->attr.load.load_mode = mode;
1568 get_Load_volatility (ir_node *node) {
1569 assert (node->op == op_Load);
1570 return node->attr.load.volatility;
1574 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1575 assert (node->op == op_Load);
1576 node->attr.load.volatility = volatility;
1581 get_Store_mem (ir_node *node) {
1582 assert (node->op == op_Store);
1583 return get_irn_n(node, 0);
1587 set_Store_mem (ir_node *node, ir_node *mem) {
1588 assert (node->op == op_Store);
1589 set_irn_n(node, 0, mem);
1593 get_Store_ptr (ir_node *node) {
1594 assert (node->op == op_Store);
1595 return get_irn_n(node, 1);
1599 set_Store_ptr (ir_node *node, ir_node *ptr) {
1600 assert (node->op == op_Store);
1601 set_irn_n(node, 1, ptr);
1605 get_Store_value (ir_node *node) {
1606 assert (node->op == op_Store);
1607 return get_irn_n(node, 2);
1611 set_Store_value (ir_node *node, ir_node *value) {
1612 assert (node->op == op_Store);
1613 set_irn_n(node, 2, value);
1617 get_Store_volatility (ir_node *node) {
1618 assert (node->op == op_Store);
1619 return node->attr.store.volatility;
1623 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1624 assert (node->op == op_Store);
1625 node->attr.store.volatility = volatility;
1630 get_Alloc_mem (ir_node *node) {
1631 assert (node->op == op_Alloc);
1632 return get_irn_n(node, 0);
1636 set_Alloc_mem (ir_node *node, ir_node *mem) {
1637 assert (node->op == op_Alloc);
1638 set_irn_n(node, 0, mem);
1642 get_Alloc_size (ir_node *node) {
1643 assert (node->op == op_Alloc);
1644 return get_irn_n(node, 1);
1648 set_Alloc_size (ir_node *node, ir_node *size) {
1649 assert (node->op == op_Alloc);
1650 set_irn_n(node, 1, size);
1654 get_Alloc_type (ir_node *node) {
1655 assert (node->op == op_Alloc);
1656 return node->attr.a.type = skip_tid(node->attr.a.type);
1660 set_Alloc_type (ir_node *node, ir_type *tp) {
1661 assert (node->op == op_Alloc);
1662 node->attr.a.type = tp;
1666 get_Alloc_where (ir_node *node) {
1667 assert (node->op == op_Alloc);
1668 return node->attr.a.where;
1672 set_Alloc_where (ir_node *node, where_alloc where) {
1673 assert (node->op == op_Alloc);
1674 node->attr.a.where = where;
1679 get_Free_mem (ir_node *node) {
1680 assert (node->op == op_Free);
1681 return get_irn_n(node, 0);
1685 set_Free_mem (ir_node *node, ir_node *mem) {
1686 assert (node->op == op_Free);
1687 set_irn_n(node, 0, mem);
1691 get_Free_ptr (ir_node *node) {
1692 assert (node->op == op_Free);
1693 return get_irn_n(node, 1);
1697 set_Free_ptr (ir_node *node, ir_node *ptr) {
1698 assert (node->op == op_Free);
1699 set_irn_n(node, 1, ptr);
1703 get_Free_size (ir_node *node) {
1704 assert (node->op == op_Free);
1705 return get_irn_n(node, 2);
1709 set_Free_size (ir_node *node, ir_node *size) {
1710 assert (node->op == op_Free);
1711 set_irn_n(node, 2, size);
1715 get_Free_type (ir_node *node) {
1716 assert (node->op == op_Free);
1717 return node->attr.f.type = skip_tid(node->attr.f.type);
1721 set_Free_type (ir_node *node, ir_type *tp) {
1722 assert (node->op == op_Free);
1723 node->attr.f.type = tp;
1727 get_Free_where (ir_node *node) {
1728 assert (node->op == op_Free);
1729 return node->attr.f.where;
1733 set_Free_where (ir_node *node, where_alloc where) {
1734 assert (node->op == op_Free);
1735 node->attr.f.where = where;
1739 get_Sync_preds_arr (ir_node *node) {
1740 assert (node->op == op_Sync);
1741 return (ir_node **)&(get_irn_in(node)[1]);
1745 get_Sync_n_preds (ir_node *node) {
1746 assert (node->op == op_Sync);
1747 return (get_irn_arity(node));
1752 set_Sync_n_preds (ir_node *node, int n_preds) {
1753 assert (node->op == op_Sync);
1758 get_Sync_pred (ir_node *node, int pos) {
1759 assert (node->op == op_Sync);
1760 return get_irn_n(node, pos);
1764 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1765 assert (node->op == op_Sync);
1766 set_irn_n(node, pos, pred);
1769 ir_type *get_Proj_type(ir_node *n)
1772 ir_node *pred = get_Proj_pred(n);
1774 switch (get_irn_opcode(pred)) {
1777 /* Deal with Start / Call here: we need to know the Proj Nr. */
1778 assert(get_irn_mode(pred) == mode_T);
1779 pred_pred = get_Proj_pred(pred);
1780 if (get_irn_op(pred_pred) == op_Start) {
1781 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1782 tp = get_method_param_type(mtp, get_Proj_proj(n));
1783 } else if (get_irn_op(pred_pred) == op_Call) {
1784 ir_type *mtp = get_Call_type(pred_pred);
1785 tp = get_method_res_type(mtp, get_Proj_proj(n));
1788 case iro_Start: break;
1789 case iro_Call: break;
1791 ir_node *a = get_Load_ptr(pred);
1793 tp = get_entity_type(get_Sel_entity(a));
1802 get_Proj_pred (const ir_node *node) {
1803 assert (is_Proj(node));
1804 return get_irn_n(node, 0);
1808 set_Proj_pred (ir_node *node, ir_node *pred) {
1809 assert (is_Proj(node));
1810 set_irn_n(node, 0, pred);
1814 get_Proj_proj (const ir_node *node) {
1815 assert (is_Proj(node));
1816 if (get_irn_opcode(node) == iro_Proj) {
1817 return node->attr.proj;
1819 assert(get_irn_opcode(node) == iro_Filter);
1820 return node->attr.filter.proj;
1825 set_Proj_proj (ir_node *node, long proj) {
1826 assert (node->op == op_Proj);
1827 node->attr.proj = proj;
1831 get_Tuple_preds_arr (ir_node *node) {
1832 assert (node->op == op_Tuple);
1833 return (ir_node **)&(get_irn_in(node)[1]);
1837 get_Tuple_n_preds (ir_node *node) {
1838 assert (node->op == op_Tuple);
1839 return (get_irn_arity(node));
1844 set_Tuple_n_preds (ir_node *node, int n_preds) {
1845 assert (node->op == op_Tuple);
1850 get_Tuple_pred (ir_node *node, int pos) {
1851 assert (node->op == op_Tuple);
1852 return get_irn_n(node, pos);
1856 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1857 assert (node->op == op_Tuple);
1858 set_irn_n(node, pos, pred);
1862 get_Id_pred (ir_node *node) {
1863 assert (node->op == op_Id);
1864 return get_irn_n(node, 0);
1868 set_Id_pred (ir_node *node, ir_node *pred) {
1869 assert (node->op == op_Id);
1870 set_irn_n(node, 0, pred);
1873 ir_node *get_Confirm_value (ir_node *node) {
1874 assert (node->op == op_Confirm);
1875 return get_irn_n(node, 0);
1877 void set_Confirm_value (ir_node *node, ir_node *value) {
1878 assert (node->op == op_Confirm);
1879 set_irn_n(node, 0, value);
1881 ir_node *get_Confirm_bound (ir_node *node) {
1882 assert (node->op == op_Confirm);
1883 return get_irn_n(node, 1);
1885 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1886 assert (node->op == op_Confirm);
1887 set_irn_n(node, 0, bound);
1889 pn_Cmp get_Confirm_cmp (ir_node *node) {
1890 assert (node->op == op_Confirm);
1891 return node->attr.confirm_cmp;
1893 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1894 assert (node->op == op_Confirm);
1895 node->attr.confirm_cmp = cmp;
1900 get_Filter_pred (ir_node *node) {
1901 assert(node->op == op_Filter);
1905 set_Filter_pred (ir_node *node, ir_node *pred) {
1906 assert(node->op == op_Filter);
1910 get_Filter_proj(ir_node *node) {
1911 assert(node->op == op_Filter);
1912 return node->attr.filter.proj;
1915 set_Filter_proj (ir_node *node, long proj) {
1916 assert(node->op == op_Filter);
1917 node->attr.filter.proj = proj;
1920 /* Don't use get_irn_arity, get_irn_n in implementation as access
1921 shall work independent of view!!! */
1922 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1923 assert(node->op == op_Filter);
1924 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1925 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1926 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1927 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1928 node->attr.filter.in_cg[0] = node->in[0];
1930 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1933 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1934 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1935 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1936 node->attr.filter.in_cg[pos + 1] = pred;
1938 int get_Filter_n_cg_preds(ir_node *node) {
1939 assert(node->op == op_Filter && node->attr.filter.in_cg);
1940 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1942 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1944 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1946 arity = ARR_LEN(node->attr.filter.in_cg);
1947 assert(pos < arity - 1);
1948 return node->attr.filter.in_cg[pos + 1];
1952 ir_node *get_Mux_sel (ir_node *node) {
1953 if (node->op == op_Psi) {
1954 assert(get_irn_arity(node) == 3);
1955 return get_Psi_cond(node, 0);
1957 assert(node->op == op_Mux);
1960 void set_Mux_sel (ir_node *node, ir_node *sel) {
1961 if (node->op == op_Psi) {
1962 assert(get_irn_arity(node) == 3);
1963 set_Psi_cond(node, 0, sel);
1966 assert(node->op == op_Mux);
1971 ir_node *get_Mux_false (ir_node *node) {
1972 if (node->op == op_Psi) {
1973 assert(get_irn_arity(node) == 3);
1974 return get_Psi_default(node);
1976 assert(node->op == op_Mux);
1979 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1980 if (node->op == op_Psi) {
1981 assert(get_irn_arity(node) == 3);
1982 set_Psi_default(node, ir_false);
1985 assert(node->op == op_Mux);
1986 node->in[2] = ir_false;
1990 ir_node *get_Mux_true (ir_node *node) {
1991 if (node->op == op_Psi) {
1992 assert(get_irn_arity(node) == 3);
1993 return get_Psi_val(node, 0);
1995 assert(node->op == op_Mux);
1998 void set_Mux_true (ir_node *node, ir_node *ir_true) {
1999 if (node->op == op_Psi) {
2000 assert(get_irn_arity(node) == 3);
2001 set_Psi_val(node, 0, ir_true);
2004 assert(node->op == op_Mux);
2005 node->in[3] = ir_true;
2010 ir_node *get_Psi_cond (ir_node *node, int pos) {
2011 int num_conds = get_Psi_n_conds(node);
2012 assert(node->op == op_Psi);
2013 assert(pos < num_conds);
2014 return node->in[1 + 2 * pos];
2017 void set_Psi_cond (ir_node *node, int pos, ir_node *cond) {
2018 int num_conds = get_Psi_n_conds(node);
2019 assert(node->op == op_Psi);
2020 assert(pos < num_conds);
2021 node->in[1 + 2 * pos] = cond;
2024 ir_node *get_Psi_val (ir_node *node, int pos) {
2025 int num_vals = get_Psi_n_conds(node);
2026 assert(node->op == op_Psi);
2027 assert(pos < num_vals);
2028 return node->in[1 + 2 * pos + 1];
2031 void set_Psi_val (ir_node *node, int pos, ir_node *val) {
2032 int num_vals = get_Psi_n_conds(node);
2033 assert(node->op == op_Psi);
2034 assert(pos < num_vals);
2035 node->in[1 + 2 * pos + 1] = val;
2038 ir_node *get_Psi_default(ir_node *node) {
2039 int def_pos = get_irn_arity(node);
2040 assert(node->op == op_Psi);
2041 return node->in[def_pos];
2044 void set_Psi_default(ir_node *node, ir_node *val) {
2045 int def_pos = get_irn_arity(node);
2046 assert(node->op == op_Psi);
2047 node->in[def_pos] = node;
2050 int (get_Psi_n_conds)(ir_node *node) {
2051 return _get_Psi_n_conds(node);
2055 ir_node *get_CopyB_mem (ir_node *node) {
2056 assert (node->op == op_CopyB);
2057 return get_irn_n(node, 0);
2060 void set_CopyB_mem (ir_node *node, ir_node *mem) {
2061 assert (node->op == op_CopyB);
2062 set_irn_n(node, 0, mem);
2065 ir_node *get_CopyB_dst (ir_node *node) {
2066 assert (node->op == op_CopyB);
2067 return get_irn_n(node, 1);
2070 void set_CopyB_dst (ir_node *node, ir_node *dst) {
2071 assert (node->op == op_CopyB);
2072 set_irn_n(node, 1, dst);
2075 ir_node *get_CopyB_src (ir_node *node) {
2076 assert (node->op == op_CopyB);
2077 return get_irn_n(node, 2);
2080 void set_CopyB_src (ir_node *node, ir_node *src) {
2081 assert (node->op == op_CopyB);
2082 set_irn_n(node, 2, src);
2085 ir_type *get_CopyB_type(ir_node *node) {
2086 assert (node->op == op_CopyB);
2087 return node->attr.copyb.data_type;
2090 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2091 assert (node->op == op_CopyB && data_type);
2092 node->attr.copyb.data_type = data_type;
2097 get_InstOf_type (ir_node *node) {
2098 assert (node->op = op_InstOf);
2099 return node->attr.io.type;
2103 set_InstOf_type (ir_node *node, ir_type *type) {
2104 assert (node->op = op_InstOf);
2105 node->attr.io.type = type;
2109 get_InstOf_store (ir_node *node) {
2110 assert (node->op = op_InstOf);
2111 return get_irn_n(node, 0);
2115 set_InstOf_store (ir_node *node, ir_node *obj) {
2116 assert (node->op = op_InstOf);
2117 set_irn_n(node, 0, obj);
2121 get_InstOf_obj (ir_node *node) {
2122 assert (node->op = op_InstOf);
2123 return get_irn_n(node, 1);
2127 set_InstOf_obj (ir_node *node, ir_node *obj) {
2128 assert (node->op = op_InstOf);
2129 set_irn_n(node, 1, obj);
2132 /* Returns the memory input of a Raise operation. */
2134 get_Raise_mem (ir_node *node) {
2135 assert (node->op == op_Raise);
2136 return get_irn_n(node, 0);
2140 set_Raise_mem (ir_node *node, ir_node *mem) {
2141 assert (node->op == op_Raise);
2142 set_irn_n(node, 0, mem);
2146 get_Raise_exo_ptr (ir_node *node) {
2147 assert (node->op == op_Raise);
2148 return get_irn_n(node, 1);
2152 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
2153 assert (node->op == op_Raise);
2154 set_irn_n(node, 1, exo_ptr);
2159 /* Returns the memory input of a Bound operation. */
2160 ir_node *get_Bound_mem(ir_node *bound) {
2161 assert (bound->op == op_Bound);
2162 return get_irn_n(bound, 0);
2165 void set_Bound_mem (ir_node *bound, ir_node *mem) {
2166 assert (bound->op == op_Bound);
2167 set_irn_n(bound, 0, mem);
2170 /* Returns the index input of a Bound operation. */
2171 ir_node *get_Bound_index(ir_node *bound) {
2172 assert (bound->op == op_Bound);
2173 return get_irn_n(bound, 1);
2176 void set_Bound_index(ir_node *bound, ir_node *idx) {
2177 assert (bound->op == op_Bound);
2178 set_irn_n(bound, 1, idx);
2181 /* Returns the lower bound input of a Bound operation. */
2182 ir_node *get_Bound_lower(ir_node *bound) {
2183 assert (bound->op == op_Bound);
2184 return get_irn_n(bound, 2);
2187 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2188 assert (bound->op == op_Bound);
2189 set_irn_n(bound, 2, lower);
2192 /* Returns the upper bound input of a Bound operation. */
2193 ir_node *get_Bound_upper(ir_node *bound) {
2194 assert (bound->op == op_Bound);
2195 return get_irn_n(bound, 3);
2198 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2199 assert (bound->op == op_Bound);
2200 set_irn_n(bound, 3, upper);
2203 /* returns the graph of a node */
2205 get_irn_irg(const ir_node *node) {
2207 * Do not use get_nodes_Block() here, because this
2208 * will check the pinned state.
2209 * However even a 'wrong' block is always in the proper
2212 if (! is_Block(node))
2213 node = get_irn_n(node, -1);
2214 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2215 node = get_irn_n(node, -1);
2216 assert(get_irn_op(node) == op_Block);
2217 return node->attr.block.irg;
2221 /*----------------------------------------------------------------*/
2222 /* Auxiliary routines */
2223 /*----------------------------------------------------------------*/
2226 skip_Proj (ir_node *node) {
2227 /* don't assert node !!! */
2228 if (node && is_Proj(node)) {
2229 return get_Proj_pred(node);
2236 skip_Tuple (ir_node *node) {
2240 if (!get_opt_normalize()) return node;
2243 node = skip_Id(node);
2244 if (get_irn_op(node) == op_Proj) {
2245 pred = skip_Id(get_Proj_pred(node));
2246 op = get_irn_op(pred);
2249 * Looks strange but calls get_irn_op() only once
2250 * in most often cases.
2252 if (op == op_Proj) { /* nested Tuple ? */
2253 pred = skip_Id(skip_Tuple(pred));
2254 op = get_irn_op(pred);
2256 if (op == op_Tuple) {
2257 node = get_Tuple_pred(pred, get_Proj_proj(node));
2261 else if (op == op_Tuple) {
2262 node = get_Tuple_pred(pred, get_Proj_proj(node));
2269 /* returns operand of node if node is a Cast */
2270 ir_node *skip_Cast (ir_node *node) {
2271 if (node && get_irn_op(node) == op_Cast)
2272 return get_Cast_op(node);
2276 /* returns operand of node if node is a Confirm */
2277 ir_node *skip_Confirm (ir_node *node) {
2278 if (node && get_irn_op(node) == op_Confirm)
2279 return get_Confirm_value(node);
2283 /* skip all high-level ops */
2284 ir_node *skip_HighLevel(ir_node *node) {
2285 if (node && is_op_highlevel(get_irn_op(node)))
2286 return get_irn_n(node, 0);
2291 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2292 * than any other approach, as Id chains are resolved and all point to the real node, or
2293 * all id's are self loops.
2295 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2298 skip_Id (ir_node *node) {
2299 /* don't assert node !!! */
2301 /* Don't use get_Id_pred: We get into an endless loop for
2302 self-referencing Ids. */
2303 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2304 ir_node *rem_pred = node->in[0+1];
2307 assert (get_irn_arity (node) > 0);
2309 node->in[0+1] = node;
2310 res = skip_Id(rem_pred);
2311 if (res->op == op_Id) /* self-loop */ return node;
2313 node->in[0+1] = res;
2320 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2321 * than any other approach, as Id chains are resolved and all point to the real node, or
2322 * all id's are self loops.
2324 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2325 * a little bit "hand optimized".
2327 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2330 skip_Id (ir_node *node) {
2332 /* don't assert node !!! */
2334 if (!node || (node->op != op_Id)) return node;
2336 /* Don't use get_Id_pred(): We get into an endless loop for
2337 self-referencing Ids. */
2338 pred = node->in[0+1];
2340 if (pred->op != op_Id) return pred;
2342 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2343 ir_node *rem_pred, *res;
2345 if (pred->op != op_Id) return pred; /* shortcut */
2348 assert (get_irn_arity (node) > 0);
2350 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2351 res = skip_Id(rem_pred);
2352 if (res->op == op_Id) /* self-loop */ return node;
2354 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2362 void skip_Id_and_store(ir_node **node) {
2365 if (!n || (n->op != op_Id)) return;
2367 /* Don't use get_Id_pred(): We get into an endless loop for
2368 self-referencing Ids. */
2373 (is_Bad)(const ir_node *node) {
2374 return _is_Bad(node);
2378 (is_Const)(const ir_node *node) {
2379 return _is_Const(node);
2383 (is_no_Block)(const ir_node *node) {
2384 return _is_no_Block(node);
2388 (is_Block)(const ir_node *node) {
2389 return _is_Block(node);
2392 /* returns true if node is an Unknown node. */
2394 (is_Unknown)(const ir_node *node) {
2395 return _is_Unknown(node);
2398 /* returns true if node is a Return node. */
2400 (is_Return)(const ir_node *node) {
2401 return _is_Return(node);
2404 /* returns true if node is a Call node. */
2406 (is_Call)(const ir_node *node) {
2407 return _is_Call(node);
2410 /* returns true if node is a Sel node. */
2412 (is_Sel)(const ir_node *node) {
2413 return _is_Sel(node);
2416 /* returns true if node is a Mux node or a Psi with only one condition. */
2418 (is_Mux)(const ir_node *node) {
2419 return _is_Mux(node);
2423 is_Proj (const ir_node *node) {
2425 return node->op == op_Proj
2426 || (!get_interprocedural_view() && node->op == op_Filter);
2429 /* Returns true if the operation manipulates control flow. */
2431 is_cfop(const ir_node *node) {
2432 return is_cfopcode(get_irn_op(node));
2435 /* Returns true if the operation manipulates interprocedural control flow:
2436 CallBegin, EndReg, EndExcept */
2437 int is_ip_cfop(const ir_node *node) {
2438 return is_ip_cfopcode(get_irn_op(node));
2441 /* Returns true if the operation can change the control flow because
2444 is_fragile_op(const ir_node *node) {
2445 return is_op_fragile(get_irn_op(node));
2448 /* Returns the memory operand of fragile operations. */
2449 ir_node *get_fragile_op_mem(ir_node *node) {
2450 assert(node && is_fragile_op(node));
2452 switch (get_irn_opcode (node)) {
2461 return get_irn_n(node, 0);
2466 assert(0 && "should not be reached");
2471 /* Returns true if the operation is a forking control flow operation. */
2472 int (is_irn_forking)(const ir_node *node) {
2473 return _is_irn_forking(node);
2476 /* Return the type associated with the value produced by n
2477 * if the node remarks this type as it is the case for
2478 * Cast, Const, SymConst and some Proj nodes. */
2479 ir_type *(get_irn_type)(ir_node *node) {
2480 return _get_irn_type(node);
2483 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2485 ir_type *(get_irn_type_attr)(ir_node *node) {
2486 return _get_irn_type_attr(node);
2489 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2490 entity *(get_irn_entity_attr)(ir_node *node) {
2491 return _get_irn_entity_attr(node);
2494 /* Returns non-zero for constant-like nodes. */
2495 int (is_irn_constlike)(const ir_node *node) {
2496 return _is_irn_constlike(node);
2500 * Returns non-zero for nodes that are allowed to have keep-alives and
2501 * are neither Block nor PhiM.
2503 int (is_irn_keep)(const ir_node *node) {
2504 return _is_irn_keep(node);
2507 /* Returns non-zero for nodes that are machine operations. */
2508 int (is_irn_machine_op)(const ir_node *node) {
2509 return _is_irn_machine_op(node);
2512 /* Returns non-zero for nodes that are machine operands. */
2513 int (is_irn_machine_operand)(const ir_node *node) {
2514 return _is_irn_machine_operand(node);
2517 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2518 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2519 return _is_irn_machine_user(node, n);
2523 /* Gets the string representation of the jump prediction .*/
2524 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
2528 case COND_JMP_PRED_NONE: return "no prediction";
2529 case COND_JMP_PRED_TRUE: return "true taken";
2530 case COND_JMP_PRED_FALSE: return "false taken";
2534 /* Returns the conditional jump prediction of a Cond node. */
2535 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2536 return _get_Cond_jmp_pred(cond);
2539 /* Sets a new conditional jump prediction. */
2540 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2541 _set_Cond_jmp_pred(cond, pred);
2544 /** the get_type operation must be always implemented and return a firm type */
2545 static ir_type *get_Default_type(ir_node *n) {
2546 return get_unknown_type();
2549 /* Sets the get_type operation for an ir_op_ops. */
2550 ir_op_ops *firm_set_default_get_type(opcode code, ir_op_ops *ops)
2553 case iro_Const: ops->get_type = get_Const_type; break;
2554 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2555 case iro_Cast: ops->get_type = get_Cast_type; break;
2556 case iro_Proj: ops->get_type = get_Proj_type; break;
2558 /* not allowed to be NULL */
2559 if (! ops->get_type)
2560 ops->get_type = get_Default_type;
2566 /** Return the attribute type of a SymConst node if exists */
2567 static ir_type *get_SymConst_attr_type(ir_node *self) {
2568 symconst_kind kind = get_SymConst_kind(self);
2569 if (kind == symconst_type_tag || kind == symconst_size)
2570 return get_SymConst_type(self);
2574 /** Return the attribute entity of a SymConst node if exists */
2575 static entity *get_SymConst_attr_entity(ir_node *self) {
2576 symconst_kind kind = get_SymConst_kind(self);
2577 if (kind == symconst_addr_ent)
2578 return get_SymConst_entity(self);
2582 /** the get_type_attr operation must be always implemented */
2583 static ir_type *get_Null_type(ir_node *n) {
2584 return firm_unknown_type;
2587 /* Sets the get_type operation for an ir_op_ops. */
2588 ir_op_ops *firm_set_default_get_type_attr(opcode code, ir_op_ops *ops)
2591 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2592 case iro_Call: ops->get_type_attr = get_Call_type; break;
2593 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2594 case iro_Free: ops->get_type_attr = get_Free_type; break;
2595 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2597 /* not allowed to be NULL */
2598 if (! ops->get_type_attr)
2599 ops->get_type_attr = get_Null_type;
2605 /** the get_entity_attr operation must be always implemented */
2606 static entity *get_Null_ent(ir_node *n) {
2610 /* Sets the get_type operation for an ir_op_ops. */
2611 ir_op_ops *firm_set_default_get_entity_attr(opcode code, ir_op_ops *ops)
2614 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2615 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2617 /* not allowed to be NULL */
2618 if (! ops->get_entity_attr)
2619 ops->get_entity_attr = get_Null_ent;
2625 #ifdef DEBUG_libfirm
2626 void dump_irn (ir_node *n) {
2627 int i, arity = get_irn_arity(n);
2628 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2630 ir_node *pred = get_irn_n(n, -1);
2631 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2632 get_irn_node_nr(pred), (void *)pred);
2634 printf(" preds: \n");
2635 for (i = 0; i < arity; ++i) {
2636 ir_node *pred = get_irn_n(n, i);
2637 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2638 get_irn_node_nr(pred), (void *)pred);
2642 #else /* DEBUG_libfirm */
2643 void dump_irn (ir_node *n) {}
2644 #endif /* DEBUG_libfirm */