3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
35 /* some constants fixing the positions of nodes predecessors
37 #define CALL_PARAM_OFFSET 2
38 #define FUNCCALL_PARAM_OFFSET 1
39 #define SEL_INDEX_OFFSET 2
40 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
41 #define END_KEEPALIVE_OFFSET 0
43 static const char *pnc_name_arr [] = {
44 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
45 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
46 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
47 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
51 * returns the pnc name from an pnc constant
53 const char *get_pnc_string(int pnc) {
54 return pnc_name_arr[pnc];
58 * Calculates the negated (Complement(R)) pnc condition.
60 int get_negated_pnc(int pnc, ir_mode *mode) {
63 /* do NOT add the Uo bit for non-floating point values */
64 if (! mode_is_float(mode))
70 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
72 get_inversed_pnc(int pnc) {
73 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
74 int lesser = pnc & pn_Cmp_Lt;
75 int greater = pnc & pn_Cmp_Gt;
77 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
82 const char *pns_name_arr [] = {
83 "initial_exec", "global_store",
84 "frame_base", "globals", "args"
87 const char *symconst_name_arr [] = {
88 "type_tag", "size", "addr_name", "addr_ent"
92 * Indicates, whether additional data can be registered to ir nodes.
93 * If set to 1, this is not possible anymore.
95 static int forbid_new_data = 0;
98 * The amount of additional space for custom data to be allocated upon
99 * creating a new node.
101 unsigned firm_add_node_size = 0;
104 /* register new space for every node */
105 unsigned register_additional_node_data(unsigned size) {
106 assert(!forbid_new_data && "Too late to register additional node data");
111 return firm_add_node_size += size;
117 /* Forbid the addition of new data to an ir node. */
122 * irnode constructor.
123 * Create a new irnode in irg, with an op, mode, arity and
124 * some incoming irnodes.
125 * If arity is negative, a node with a dynamic array is created.
128 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
129 int arity, ir_node **in)
132 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
136 assert(irg && op && mode);
137 p = obstack_alloc (irg->obst, node_size);
138 memset(p, 0, node_size);
139 res = (ir_node *) (p + firm_add_node_size);
141 res->kind = k_ir_node;
145 res->node_idx = irg_register_node_idx(irg, res);
148 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
150 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
151 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
155 set_irn_dbg_info(res, db);
159 res->node_nr = get_irp_new_node_nr();
162 INIT_LIST_HEAD(&res->edge_info.outs_head);
163 is_bl = is_Block(res);
165 INIT_LIST_HEAD(&res->attr.block.succ_head);
168 for (i = is_bl; i <= arity; ++i)
169 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
171 hook_new_node(irg, res);
176 /*-- getting some parameters from ir_nodes --*/
179 (is_ir_node)(const void *thing) {
180 return _is_ir_node(thing);
184 (get_irn_intra_arity)(const ir_node *node) {
185 return _get_irn_intra_arity(node);
189 (get_irn_inter_arity)(const ir_node *node) {
190 return _get_irn_inter_arity(node);
193 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
196 (get_irn_arity)(const ir_node *node) {
197 return _get_irn_arity(node);
200 /* Returns the array with ins. This array is shifted with respect to the
201 array accessed by get_irn_n: The block operand is at position 0 not -1.
202 (@@@ This should be changed.)
203 The order of the predecessors in this array is not guaranteed, except that
204 lists of operands as predecessors of Block or arguments of a Call are
207 get_irn_in (const ir_node *node) {
209 if (get_interprocedural_view()) { /* handle Filter and Block specially */
210 if (get_irn_opcode(node) == iro_Filter) {
211 assert(node->attr.filter.in_cg);
212 return node->attr.filter.in_cg;
213 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
214 return node->attr.block.in_cg;
216 /* else fall through */
222 set_irn_in (ir_node *node, int arity, ir_node **in) {
225 ir_graph *irg = current_ir_graph;
227 if (get_interprocedural_view()) { /* handle Filter and Block specially */
228 if (get_irn_opcode(node) == iro_Filter) {
229 assert(node->attr.filter.in_cg);
230 arr = &node->attr.filter.in_cg;
231 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
232 arr = &node->attr.block.in_cg;
240 for (i = 0; i < arity; i++) {
241 if (i < ARR_LEN(*arr)-1)
242 edges_notify_edge(node, i, in[i], (*arr)[i+1], irg);
244 edges_notify_edge(node, i, in[i], NULL, irg);
246 for(;i < ARR_LEN(*arr)-1; i++) {
247 edges_notify_edge(node, i, NULL, (*arr)[i+1], irg);
250 if (arity != ARR_LEN(*arr) - 1) {
251 ir_node * block = (*arr)[0];
252 *arr = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
255 fix_backedges(irg->obst, node);
257 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
261 (get_irn_intra_n)(const ir_node *node, int n) {
262 return _get_irn_intra_n (node, n);
266 (get_irn_inter_n)(const ir_node *node, int n) {
267 return _get_irn_inter_n (node, n);
270 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
273 (get_irn_n)(const ir_node *node, int n) {
274 return _get_irn_n(node, n);
278 set_irn_n (ir_node *node, int n, ir_node *in) {
279 assert(node && node->kind == k_ir_node);
281 assert(n < get_irn_arity(node));
282 assert(in && in->kind == k_ir_node);
284 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
285 /* Change block pred in both views! */
286 node->in[n + 1] = in;
287 assert(node->attr.filter.in_cg);
288 node->attr.filter.in_cg[n + 1] = in;
291 if (get_interprocedural_view()) { /* handle Filter and Block specially */
292 if (get_irn_opcode(node) == iro_Filter) {
293 assert(node->attr.filter.in_cg);
294 node->attr.filter.in_cg[n + 1] = in;
296 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
297 node->attr.block.in_cg[n + 1] = in;
300 /* else fall through */
304 hook_set_irn_n(node, n, in, node->in[n + 1]);
306 /* Here, we rely on src and tgt being in the current ir graph */
307 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
309 node->in[n + 1] = in;
313 (get_irn_mode)(const ir_node *node) {
314 return _get_irn_mode(node);
318 (set_irn_mode)(ir_node *node, ir_mode *mode)
320 _set_irn_mode(node, mode);
324 get_irn_modecode (const ir_node *node)
327 return node->mode->code;
330 /** Gets the string representation of the mode .*/
332 get_irn_modename (const ir_node *node)
335 return get_mode_name(node->mode);
339 get_irn_modeident (const ir_node *node)
342 return get_mode_ident(node->mode);
346 (get_irn_op)(const ir_node *node) {
347 return _get_irn_op(node);
350 /* should be private to the library: */
352 (set_irn_op)(ir_node *node, ir_op *op) {
353 _set_irn_op(node, op);
357 (get_irn_opcode)(const ir_node *node)
359 return _get_irn_opcode(node);
363 get_irn_opname (const ir_node *node)
366 if ((get_irn_op((ir_node *)node) == op_Phi) &&
367 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
368 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
369 return get_id_str(node->op->name);
373 get_irn_opident (const ir_node *node)
376 return node->op->name;
380 (get_irn_visited)(const ir_node *node)
382 return _get_irn_visited(node);
386 (set_irn_visited)(ir_node *node, unsigned long visited)
388 _set_irn_visited(node, visited);
392 (mark_irn_visited)(ir_node *node) {
393 _mark_irn_visited(node);
397 (irn_not_visited)(const ir_node *node) {
398 return _irn_not_visited(node);
402 (irn_visited)(const ir_node *node) {
403 return _irn_visited(node);
407 (set_irn_link)(ir_node *node, void *link) {
408 _set_irn_link(node, link);
412 (get_irn_link)(const ir_node *node) {
413 return _get_irn_link(node);
417 (get_irn_pinned)(const ir_node *node) {
418 return _get_irn_pinned(node);
422 (is_irn_pinned_in_irg) (const ir_node *node) {
423 return _is_irn_pinned_in_irg(node);
426 void set_irn_pinned(ir_node *node, op_pin_state state) {
427 /* due to optimization an opt may be turned into a Tuple */
428 if (get_irn_op(node) == op_Tuple)
431 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
432 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
434 node->attr.except.pin_state = state;
437 #ifdef DO_HEAPANALYSIS
438 /* Access the abstract interpretation information of a node.
439 Returns NULL if no such information is available. */
440 struct abstval *get_irn_abst_value(ir_node *n) {
443 /* Set the abstract interpretation information of a node. */
444 void set_irn_abst_value(ir_node *n, struct abstval *os) {
447 struct section *firm_get_irn_section(ir_node *n) {
450 void firm_set_irn_section(ir_node *n, struct section *s) {
454 /* Dummies needed for firmjni. */
455 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
456 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
457 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
458 void firm_set_irn_section(ir_node *n, struct section *s) {}
459 #endif /* DO_HEAPANALYSIS */
462 /* Outputs a unique number for this node */
463 long get_irn_node_nr(const ir_node *node) {
466 return node->node_nr;
468 return (long)PTR_TO_INT(node);
473 get_irn_const_attr (ir_node *node)
475 assert (node->op == op_Const);
476 return node->attr.con;
480 get_irn_proj_attr (ir_node *node)
482 assert (node->op == op_Proj);
483 return node->attr.proj;
487 get_irn_alloc_attr (ir_node *node)
489 assert (node->op == op_Alloc);
494 get_irn_free_attr (ir_node *node)
496 assert (node->op == op_Free);
501 get_irn_symconst_attr (ir_node *node)
503 assert (node->op == op_SymConst);
508 get_irn_call_attr (ir_node *node)
510 assert (node->op == op_Call);
511 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
515 get_irn_sel_attr (ir_node *node)
517 assert (node->op == op_Sel);
522 get_irn_phi_attr (ir_node *node)
524 assert (node->op == op_Phi);
525 return node->attr.phi0_pos;
529 get_irn_block_attr (ir_node *node)
531 assert (node->op == op_Block);
532 return node->attr.block;
536 get_irn_load_attr (ir_node *node)
538 assert (node->op == op_Load);
539 return node->attr.load;
543 get_irn_store_attr (ir_node *node)
545 assert (node->op == op_Store);
546 return node->attr.store;
550 get_irn_except_attr (ir_node *node)
552 assert (node->op == op_Div || node->op == op_Quot ||
553 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
554 return node->attr.except;
558 get_irn_generic_attr (ir_node *node) {
562 unsigned (get_irn_idx)(const ir_node *node) {
563 assert(is_ir_node(node));
564 return _get_irn_idx(node);
567 int get_irn_pred_pos(ir_node *node, ir_node *arg) {
569 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
570 if (get_irn_n(node, i) == arg)
576 /** manipulate fields of individual nodes **/
578 /* this works for all except Block */
580 get_nodes_block (const ir_node *node) {
581 assert (!(node->op == op_Block));
582 assert (is_irn_pinned_in_irg(node) && "block info may be incorrect");
583 return get_irn_n(node, -1);
587 set_nodes_block (ir_node *node, ir_node *block) {
588 assert (!(node->op == op_Block));
589 set_irn_n(node, -1, block);
592 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
593 * from Start. If so returns frame type, else Null. */
594 ir_type *is_frame_pointer(ir_node *n) {
595 if ((get_irn_op(n) == op_Proj) &&
596 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
597 ir_node *start = get_Proj_pred(n);
598 if (get_irn_op(start) == op_Start) {
599 return get_irg_frame_type(get_irn_irg(start));
605 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
606 * from Start. If so returns global type, else Null. */
607 ir_type *is_globals_pointer(ir_node *n) {
608 if ((get_irn_op(n) == op_Proj) &&
609 (get_Proj_proj(n) == pn_Start_P_globals)) {
610 ir_node *start = get_Proj_pred(n);
611 if (get_irn_op(start) == op_Start) {
612 return get_glob_type();
618 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
619 * from Start. If so returns 1, else 0. */
620 int is_value_arg_pointer(ir_node *n) {
621 if ((get_irn_op(n) == op_Proj) &&
622 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
623 (get_irn_op(get_Proj_pred(n)) == op_Start))
628 /* Returns an array with the predecessors of the Block. Depending on
629 the implementation of the graph data structure this can be a copy of
630 the internal representation of predecessors as well as the internal
631 array itself. Therefore writing to this array might obstruct the ir. */
633 get_Block_cfgpred_arr (ir_node *node)
635 assert ((node->op == op_Block));
636 return (ir_node **)&(get_irn_in(node)[1]);
640 (get_Block_n_cfgpreds)(ir_node *node) {
641 return _get_Block_n_cfgpreds(node);
645 (get_Block_cfgpred)(ir_node *node, int pos) {
646 return _get_Block_cfgpred(node, pos);
650 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
651 assert (node->op == op_Block);
652 set_irn_n(node, pos, pred);
656 (get_Block_cfgpred_block)(ir_node *node, int pos) {
657 return _get_Block_cfgpred_block(node, pos);
661 get_Block_matured (ir_node *node) {
662 assert (node->op == op_Block);
663 return (int)node->attr.block.matured;
667 set_Block_matured (ir_node *node, int matured) {
668 assert (node->op == op_Block);
669 node->attr.block.matured = matured;
673 (get_Block_block_visited)(ir_node *node) {
674 return _get_Block_block_visited(node);
678 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
679 _set_Block_block_visited(node, visit);
682 /* For this current_ir_graph must be set. */
684 (mark_Block_block_visited)(ir_node *node) {
685 _mark_Block_block_visited(node);
689 (Block_not_block_visited)(ir_node *node) {
690 return _Block_not_block_visited(node);
694 get_Block_graph_arr (ir_node *node, int pos) {
695 assert (node->op == op_Block);
696 return node->attr.block.graph_arr[pos+1];
700 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
701 assert (node->op == op_Block);
702 node->attr.block.graph_arr[pos+1] = value;
705 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
706 assert(node->op == op_Block);
707 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
708 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
709 node->attr.block.in_cg[0] = NULL;
710 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
712 /* Fix backedge array. fix_backedges() operates depending on
713 interprocedural_view. */
714 int ipv = get_interprocedural_view();
715 set_interprocedural_view(1);
716 fix_backedges(current_ir_graph->obst, node);
717 set_interprocedural_view(ipv);
720 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
723 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
724 assert(node->op == op_Block &&
725 node->attr.block.in_cg &&
726 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
727 node->attr.block.in_cg[pos + 1] = pred;
730 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
731 assert(node->op == op_Block);
732 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
735 int get_Block_cg_n_cfgpreds(ir_node * node) {
736 assert(node->op == op_Block);
737 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
740 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
741 assert(node->op == op_Block && node->attr.block.in_cg);
742 return node->attr.block.in_cg[pos + 1];
745 void remove_Block_cg_cfgpred_arr(ir_node * node) {
746 assert(node->op == op_Block);
747 node->attr.block.in_cg = NULL;
750 ir_node *(set_Block_dead)(ir_node *block) {
751 return _set_Block_dead(block);
754 int (is_Block_dead)(const ir_node *block) {
755 return _is_Block_dead(block);
758 ir_extblk *get_Block_extbb(const ir_node *block) {
760 assert(is_Block(block));
761 res = block->attr.block.extblk;
762 assert(res == NULL || is_ir_extbb(res));
766 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
767 assert(is_Block(block));
768 assert(extblk == NULL || is_ir_extbb(extblk));
769 block->attr.block.extblk = extblk;
773 get_End_n_keepalives(ir_node *end) {
774 assert (end->op == op_End);
775 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
779 get_End_keepalive(ir_node *end, int pos) {
780 assert (end->op == op_End);
781 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
785 add_End_keepalive (ir_node *end, ir_node *ka) {
786 assert (end->op == op_End);
787 ARR_APP1 (ir_node *, end->in, ka);
791 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
792 assert (end->op == op_End);
793 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
796 /* Set new keep-alives */
797 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
799 ir_graph *irg = get_irn_irg(end);
801 /* notify that edges are deleted */
802 for (i = 1 + END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in); ++i) {
803 edges_notify_edge(end, i, end->in[i], NULL, irg);
805 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
807 for (i = 0; i < n; ++i) {
808 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
809 edges_notify_edge(end, 1 + END_KEEPALIVE_OFFSET + i, NULL, end->in[1 + END_KEEPALIVE_OFFSET + i], irg);
814 free_End (ir_node *end) {
815 assert (end->op == op_End);
818 end->in = NULL; /* @@@ make sure we get an error if we use the
819 in array afterwards ... */
822 /* Return the target address of an IJmp */
823 ir_node *get_IJmp_target(ir_node *ijmp) {
824 assert(ijmp->op == op_IJmp);
825 return get_irn_n(ijmp, 0);
828 /** Sets the target address of an IJmp */
829 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
830 assert(ijmp->op == op_IJmp);
831 set_irn_n(ijmp, 0, tgt);
835 > Implementing the case construct (which is where the constant Proj node is
836 > important) involves far more than simply determining the constant values.
837 > We could argue that this is more properly a function of the translator from
838 > Firm to the target machine. That could be done if there was some way of
839 > projecting "default" out of the Cond node.
840 I know it's complicated.
841 Basically there are two proglems:
842 - determining the gaps between the projs
843 - determining the biggest case constant to know the proj number for
845 I see several solutions:
846 1. Introduce a ProjDefault node. Solves both problems.
847 This means to extend all optimizations executed during construction.
848 2. Give the Cond node for switch two flavors:
849 a) there are no gaps in the projs (existing flavor)
850 b) gaps may exist, default proj is still the Proj with the largest
851 projection number. This covers also the gaps.
852 3. Fix the semantic of the Cond to that of 2b)
854 Solution 2 seems to be the best:
855 Computing the gaps in the Firm representation is not too hard, i.e.,
856 libFIRM can implement a routine that transforms between the two
857 flavours. This is also possible for 1) but 2) does not require to
858 change any existing optimization.
859 Further it should be far simpler to determine the biggest constant than
861 I don't want to choose 3) as 2a) seems to have advantages for
862 dataflow analysis and 3) does not allow to convert the representation to
866 get_Cond_selector (ir_node *node) {
867 assert (node->op == op_Cond);
868 return get_irn_n(node, 0);
872 set_Cond_selector (ir_node *node, ir_node *selector) {
873 assert (node->op == op_Cond);
874 set_irn_n(node, 0, selector);
878 get_Cond_kind (ir_node *node) {
879 assert (node->op == op_Cond);
880 return node->attr.c.kind;
884 set_Cond_kind (ir_node *node, cond_kind kind) {
885 assert (node->op == op_Cond);
886 node->attr.c.kind = kind;
890 get_Cond_defaultProj (ir_node *node) {
891 assert (node->op == op_Cond);
892 return node->attr.c.default_proj;
896 get_Return_mem (ir_node *node) {
897 assert (node->op == op_Return);
898 return get_irn_n(node, 0);
902 set_Return_mem (ir_node *node, ir_node *mem) {
903 assert (node->op == op_Return);
904 set_irn_n(node, 0, mem);
908 get_Return_n_ress (ir_node *node) {
909 assert (node->op == op_Return);
910 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
914 get_Return_res_arr (ir_node *node)
916 assert ((node->op == op_Return));
917 if (get_Return_n_ress(node) > 0)
918 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
925 set_Return_n_res (ir_node *node, int results) {
926 assert (node->op == op_Return);
931 get_Return_res (ir_node *node, int pos) {
932 assert (node->op == op_Return);
933 assert (get_Return_n_ress(node) > pos);
934 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
938 set_Return_res (ir_node *node, int pos, ir_node *res){
939 assert (node->op == op_Return);
940 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
943 tarval *(get_Const_tarval)(ir_node *node) {
944 return _get_Const_tarval(node);
948 set_Const_tarval (ir_node *node, tarval *con) {
949 assert (node->op == op_Const);
950 node->attr.con.tv = con;
953 cnst_classify_t (classify_Const)(ir_node *node)
955 return _classify_Const(node);
959 /* The source language type. Must be an atomic type. Mode of type must
960 be mode of node. For tarvals from entities type must be pointer to
963 get_Const_type (ir_node *node) {
964 assert (node->op == op_Const);
965 return node->attr.con.tp;
969 set_Const_type (ir_node *node, ir_type *tp) {
970 assert (node->op == op_Const);
971 if (tp != firm_unknown_type) {
972 assert (is_atomic_type(tp));
973 assert (get_type_mode(tp) == get_irn_mode(node));
975 node->attr.con.tp = tp;
980 get_SymConst_kind (const ir_node *node) {
981 assert (node->op == op_SymConst);
982 return node->attr.i.num;
986 set_SymConst_kind (ir_node *node, symconst_kind num) {
987 assert (node->op == op_SymConst);
988 node->attr.i.num = num;
992 get_SymConst_type (ir_node *node) {
993 assert ( (node->op == op_SymConst)
994 && ( get_SymConst_kind(node) == symconst_type_tag
995 || get_SymConst_kind(node) == symconst_size));
996 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
1000 set_SymConst_type (ir_node *node, ir_type *tp) {
1001 assert ( (node->op == op_SymConst)
1002 && ( get_SymConst_kind(node) == symconst_type_tag
1003 || get_SymConst_kind(node) == symconst_size));
1004 node->attr.i.sym.type_p = tp;
1008 get_SymConst_name (ir_node *node) {
1009 assert ( (node->op == op_SymConst)
1010 && (get_SymConst_kind(node) == symconst_addr_name));
1011 return node->attr.i.sym.ident_p;
1015 set_SymConst_name (ir_node *node, ident *name) {
1016 assert ( (node->op == op_SymConst)
1017 && (get_SymConst_kind(node) == symconst_addr_name));
1018 node->attr.i.sym.ident_p = name;
1022 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1023 entity *get_SymConst_entity (ir_node *node) {
1024 assert ( (node->op == op_SymConst)
1025 && (get_SymConst_kind (node) == symconst_addr_ent));
1026 return node->attr.i.sym.entity_p;
1029 void set_SymConst_entity (ir_node *node, entity *ent) {
1030 assert ( (node->op == op_SymConst)
1031 && (get_SymConst_kind(node) == symconst_addr_ent));
1032 node->attr.i.sym.entity_p = ent;
1035 union symconst_symbol
1036 get_SymConst_symbol (ir_node *node) {
1037 assert (node->op == op_SymConst);
1038 return node->attr.i.sym;
1042 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1043 assert (node->op == op_SymConst);
1044 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1045 node->attr.i.sym = sym;
1049 get_SymConst_value_type (ir_node *node) {
1050 assert (node->op == op_SymConst);
1051 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1052 return node->attr.i.tp;
1056 set_SymConst_value_type (ir_node *node, ir_type *tp) {
1057 assert (node->op == op_SymConst);
1058 node->attr.i.tp = tp;
1062 get_Sel_mem (ir_node *node) {
1063 assert (node->op == op_Sel);
1064 return get_irn_n(node, 0);
1068 set_Sel_mem (ir_node *node, ir_node *mem) {
1069 assert (node->op == op_Sel);
1070 set_irn_n(node, 0, mem);
1074 get_Sel_ptr (ir_node *node) {
1075 assert (node->op == op_Sel);
1076 return get_irn_n(node, 1);
1080 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1081 assert (node->op == op_Sel);
1082 set_irn_n(node, 1, ptr);
1086 get_Sel_n_indexs (ir_node *node) {
1087 assert (node->op == op_Sel);
1088 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1092 get_Sel_index_arr (ir_node *node)
1094 assert ((node->op == op_Sel));
1095 if (get_Sel_n_indexs(node) > 0)
1096 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1102 get_Sel_index (ir_node *node, int pos) {
1103 assert (node->op == op_Sel);
1104 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1108 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1109 assert (node->op == op_Sel);
1110 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1114 get_Sel_entity (ir_node *node) {
1115 assert (node->op == op_Sel);
1116 return node->attr.s.ent;
1120 set_Sel_entity (ir_node *node, entity *ent) {
1121 assert (node->op == op_Sel);
1122 node->attr.s.ent = ent;
1126 /* For unary and binary arithmetic operations the access to the
1127 operands can be factored out. Left is the first, right the
1128 second arithmetic value as listed in tech report 0999-33.
1129 unops are: Minus, Abs, Not, Conv, Cast
1130 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1131 Shr, Shrs, Rotate, Cmp */
1135 get_Call_mem (ir_node *node) {
1136 assert (node->op == op_Call);
1137 return get_irn_n(node, 0);
1141 set_Call_mem (ir_node *node, ir_node *mem) {
1142 assert (node->op == op_Call);
1143 set_irn_n(node, 0, mem);
1147 get_Call_ptr (ir_node *node) {
1148 assert (node->op == op_Call);
1149 return get_irn_n(node, 1);
1153 set_Call_ptr (ir_node *node, ir_node *ptr) {
1154 assert (node->op == op_Call);
1155 set_irn_n(node, 1, ptr);
1159 get_Call_param_arr (ir_node *node) {
1160 assert (node->op == op_Call);
1161 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1165 get_Call_n_params (ir_node *node) {
1166 assert (node->op == op_Call);
1167 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1171 get_Call_arity (ir_node *node) {
1172 assert (node->op == op_Call);
1173 return get_Call_n_params(node);
1177 set_Call_arity (ir_node *node, ir_node *arity) {
1178 assert (node->op == op_Call);
1183 get_Call_param (ir_node *node, int pos) {
1184 assert (node->op == op_Call);
1185 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1189 set_Call_param (ir_node *node, int pos, ir_node *param) {
1190 assert (node->op == op_Call);
1191 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1195 get_Call_type (ir_node *node) {
1196 assert (node->op == op_Call);
1197 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1201 set_Call_type (ir_node *node, ir_type *tp) {
1202 assert (node->op == op_Call);
1203 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1204 node->attr.call.cld_tp = tp;
1207 int Call_has_callees(ir_node *node) {
1208 assert(node && node->op == op_Call);
1209 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1210 (node->attr.call.callee_arr != NULL));
1213 int get_Call_n_callees(ir_node * node) {
1214 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1215 return ARR_LEN(node->attr.call.callee_arr);
1218 entity * get_Call_callee(ir_node * node, int pos) {
1219 assert(pos >= 0 && pos < get_Call_n_callees(node));
1220 return node->attr.call.callee_arr[pos];
1223 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1224 assert(node->op == op_Call);
1225 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1226 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1228 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1231 void remove_Call_callee_arr(ir_node * node) {
1232 assert(node->op == op_Call);
1233 node->attr.call.callee_arr = NULL;
1236 ir_node * get_CallBegin_ptr (ir_node *node) {
1237 assert(node->op == op_CallBegin);
1238 return get_irn_n(node, 0);
1240 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1241 assert(node->op == op_CallBegin);
1242 set_irn_n(node, 0, ptr);
1244 ir_node * get_CallBegin_call (ir_node *node) {
1245 assert(node->op == op_CallBegin);
1246 return node->attr.callbegin.call;
1248 void set_CallBegin_call (ir_node *node, ir_node *call) {
1249 assert(node->op == op_CallBegin);
1250 node->attr.callbegin.call = call;
1255 ir_node * get_##OP##_left(ir_node *node) { \
1256 assert(node->op == op_##OP); \
1257 return get_irn_n(node, node->op->op_index); \
1259 void set_##OP##_left(ir_node *node, ir_node *left) { \
1260 assert(node->op == op_##OP); \
1261 set_irn_n(node, node->op->op_index, left); \
1263 ir_node *get_##OP##_right(ir_node *node) { \
1264 assert(node->op == op_##OP); \
1265 return get_irn_n(node, node->op->op_index + 1); \
1267 void set_##OP##_right(ir_node *node, ir_node *right) { \
1268 assert(node->op == op_##OP); \
1269 set_irn_n(node, node->op->op_index + 1, right); \
1273 ir_node *get_##OP##_op(ir_node *node) { \
1274 assert(node->op == op_##OP); \
1275 return get_irn_n(node, node->op->op_index); \
1277 void set_##OP##_op (ir_node *node, ir_node *op) { \
1278 assert(node->op == op_##OP); \
1279 set_irn_n(node, node->op->op_index, op); \
1289 get_Quot_mem (ir_node *node) {
1290 assert (node->op == op_Quot);
1291 return get_irn_n(node, 0);
1295 set_Quot_mem (ir_node *node, ir_node *mem) {
1296 assert (node->op == op_Quot);
1297 set_irn_n(node, 0, mem);
1303 get_DivMod_mem (ir_node *node) {
1304 assert (node->op == op_DivMod);
1305 return get_irn_n(node, 0);
1309 set_DivMod_mem (ir_node *node, ir_node *mem) {
1310 assert (node->op == op_DivMod);
1311 set_irn_n(node, 0, mem);
1317 get_Div_mem (ir_node *node) {
1318 assert (node->op == op_Div);
1319 return get_irn_n(node, 0);
1323 set_Div_mem (ir_node *node, ir_node *mem) {
1324 assert (node->op == op_Div);
1325 set_irn_n(node, 0, mem);
1331 get_Mod_mem (ir_node *node) {
1332 assert (node->op == op_Mod);
1333 return get_irn_n(node, 0);
1337 set_Mod_mem (ir_node *node, ir_node *mem) {
1338 assert (node->op == op_Mod);
1339 set_irn_n(node, 0, mem);
1356 get_Cast_type (ir_node *node) {
1357 assert (node->op == op_Cast);
1358 return node->attr.cast.totype;
1362 set_Cast_type (ir_node *node, ir_type *to_tp) {
1363 assert (node->op == op_Cast);
1364 node->attr.cast.totype = to_tp;
1368 /* Checks for upcast.
1370 * Returns true if the Cast node casts a class type to a super type.
1372 int is_Cast_upcast(ir_node *node) {
1373 ir_type *totype = get_Cast_type(node);
1374 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1375 ir_graph *myirg = get_irn_irg(node);
1377 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1380 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1381 totype = get_pointer_points_to_type(totype);
1382 fromtype = get_pointer_points_to_type(fromtype);
1387 if (!is_Class_type(totype)) return 0;
1388 return is_SubClass_of(fromtype, totype);
1391 /* Checks for downcast.
1393 * Returns true if the Cast node casts a class type to a sub type.
1395 int is_Cast_downcast(ir_node *node) {
1396 ir_type *totype = get_Cast_type(node);
1397 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1399 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1402 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1403 totype = get_pointer_points_to_type(totype);
1404 fromtype = get_pointer_points_to_type(fromtype);
1409 if (!is_Class_type(totype)) return 0;
1410 return is_SubClass_of(totype, fromtype);
1414 (is_unop)(const ir_node *node) {
1415 return _is_unop(node);
1419 get_unop_op (ir_node *node) {
1420 if (node->op->opar == oparity_unary)
1421 return get_irn_n(node, node->op->op_index);
1423 assert(node->op->opar == oparity_unary);
1428 set_unop_op (ir_node *node, ir_node *op) {
1429 if (node->op->opar == oparity_unary)
1430 set_irn_n(node, node->op->op_index, op);
1432 assert(node->op->opar == oparity_unary);
1436 (is_binop)(const ir_node *node) {
1437 return _is_binop(node);
1441 get_binop_left (ir_node *node) {
1442 if (node->op->opar == oparity_binary)
1443 return get_irn_n(node, node->op->op_index);
1445 assert(node->op->opar == oparity_binary);
1450 set_binop_left (ir_node *node, ir_node *left) {
1451 if (node->op->opar == oparity_binary)
1452 set_irn_n(node, node->op->op_index, left);
1454 assert (node->op->opar == oparity_binary);
1458 get_binop_right (ir_node *node) {
1459 if (node->op->opar == oparity_binary)
1460 return get_irn_n(node, node->op->op_index + 1);
1462 assert(node->op->opar == oparity_binary);
1467 set_binop_right (ir_node *node, ir_node *right) {
1468 if (node->op->opar == oparity_binary)
1469 set_irn_n(node, node->op->op_index + 1, right);
1471 assert (node->op->opar == oparity_binary);
1474 int is_Phi (const ir_node *n) {
1480 if (op == op_Filter) return get_interprocedural_view();
1483 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1484 (get_irn_arity(n) > 0));
1489 int is_Phi0 (const ir_node *n) {
1492 return ((get_irn_op(n) == op_Phi) &&
1493 (get_irn_arity(n) == 0) &&
1494 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1498 get_Phi_preds_arr (ir_node *node) {
1499 assert (node->op == op_Phi);
1500 return (ir_node **)&(get_irn_in(node)[1]);
1504 get_Phi_n_preds (ir_node *node) {
1505 assert (is_Phi(node) || is_Phi0(node));
1506 return (get_irn_arity(node));
1510 void set_Phi_n_preds (ir_node *node, int n_preds) {
1511 assert (node->op == op_Phi);
1516 get_Phi_pred (ir_node *node, int pos) {
1517 assert (is_Phi(node) || is_Phi0(node));
1518 return get_irn_n(node, pos);
1522 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1523 assert (is_Phi(node) || is_Phi0(node));
1524 set_irn_n(node, pos, pred);
1528 int is_memop(ir_node *node) {
1529 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1532 ir_node *get_memop_mem (ir_node *node) {
1533 assert(is_memop(node));
1534 return get_irn_n(node, 0);
1537 void set_memop_mem (ir_node *node, ir_node *mem) {
1538 assert(is_memop(node));
1539 set_irn_n(node, 0, mem);
1542 ir_node *get_memop_ptr (ir_node *node) {
1543 assert(is_memop(node));
1544 return get_irn_n(node, 1);
1547 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1548 assert(is_memop(node));
1549 set_irn_n(node, 1, ptr);
1553 get_Load_mem (ir_node *node) {
1554 assert (node->op == op_Load);
1555 return get_irn_n(node, 0);
1559 set_Load_mem (ir_node *node, ir_node *mem) {
1560 assert (node->op == op_Load);
1561 set_irn_n(node, 0, mem);
1565 get_Load_ptr (ir_node *node) {
1566 assert (node->op == op_Load);
1567 return get_irn_n(node, 1);
1571 set_Load_ptr (ir_node *node, ir_node *ptr) {
1572 assert (node->op == op_Load);
1573 set_irn_n(node, 1, ptr);
1577 get_Load_mode (ir_node *node) {
1578 assert (node->op == op_Load);
1579 return node->attr.load.load_mode;
1583 set_Load_mode (ir_node *node, ir_mode *mode) {
1584 assert (node->op == op_Load);
1585 node->attr.load.load_mode = mode;
1589 get_Load_volatility (ir_node *node) {
1590 assert (node->op == op_Load);
1591 return node->attr.load.volatility;
1595 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1596 assert (node->op == op_Load);
1597 node->attr.load.volatility = volatility;
1602 get_Store_mem (ir_node *node) {
1603 assert (node->op == op_Store);
1604 return get_irn_n(node, 0);
1608 set_Store_mem (ir_node *node, ir_node *mem) {
1609 assert (node->op == op_Store);
1610 set_irn_n(node, 0, mem);
1614 get_Store_ptr (ir_node *node) {
1615 assert (node->op == op_Store);
1616 return get_irn_n(node, 1);
1620 set_Store_ptr (ir_node *node, ir_node *ptr) {
1621 assert (node->op == op_Store);
1622 set_irn_n(node, 1, ptr);
1626 get_Store_value (ir_node *node) {
1627 assert (node->op == op_Store);
1628 return get_irn_n(node, 2);
1632 set_Store_value (ir_node *node, ir_node *value) {
1633 assert (node->op == op_Store);
1634 set_irn_n(node, 2, value);
1638 get_Store_volatility (ir_node *node) {
1639 assert (node->op == op_Store);
1640 return node->attr.store.volatility;
1644 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1645 assert (node->op == op_Store);
1646 node->attr.store.volatility = volatility;
1651 get_Alloc_mem (ir_node *node) {
1652 assert (node->op == op_Alloc);
1653 return get_irn_n(node, 0);
1657 set_Alloc_mem (ir_node *node, ir_node *mem) {
1658 assert (node->op == op_Alloc);
1659 set_irn_n(node, 0, mem);
1663 get_Alloc_size (ir_node *node) {
1664 assert (node->op == op_Alloc);
1665 return get_irn_n(node, 1);
1669 set_Alloc_size (ir_node *node, ir_node *size) {
1670 assert (node->op == op_Alloc);
1671 set_irn_n(node, 1, size);
1675 get_Alloc_type (ir_node *node) {
1676 assert (node->op == op_Alloc);
1677 return node->attr.a.type = skip_tid(node->attr.a.type);
1681 set_Alloc_type (ir_node *node, ir_type *tp) {
1682 assert (node->op == op_Alloc);
1683 node->attr.a.type = tp;
1687 get_Alloc_where (ir_node *node) {
1688 assert (node->op == op_Alloc);
1689 return node->attr.a.where;
1693 set_Alloc_where (ir_node *node, where_alloc where) {
1694 assert (node->op == op_Alloc);
1695 node->attr.a.where = where;
1700 get_Free_mem (ir_node *node) {
1701 assert (node->op == op_Free);
1702 return get_irn_n(node, 0);
1706 set_Free_mem (ir_node *node, ir_node *mem) {
1707 assert (node->op == op_Free);
1708 set_irn_n(node, 0, mem);
1712 get_Free_ptr (ir_node *node) {
1713 assert (node->op == op_Free);
1714 return get_irn_n(node, 1);
1718 set_Free_ptr (ir_node *node, ir_node *ptr) {
1719 assert (node->op == op_Free);
1720 set_irn_n(node, 1, ptr);
1724 get_Free_size (ir_node *node) {
1725 assert (node->op == op_Free);
1726 return get_irn_n(node, 2);
1730 set_Free_size (ir_node *node, ir_node *size) {
1731 assert (node->op == op_Free);
1732 set_irn_n(node, 2, size);
1736 get_Free_type (ir_node *node) {
1737 assert (node->op == op_Free);
1738 return node->attr.f.type = skip_tid(node->attr.f.type);
1742 set_Free_type (ir_node *node, ir_type *tp) {
1743 assert (node->op == op_Free);
1744 node->attr.f.type = tp;
1748 get_Free_where (ir_node *node) {
1749 assert (node->op == op_Free);
1750 return node->attr.f.where;
1754 set_Free_where (ir_node *node, where_alloc where) {
1755 assert (node->op == op_Free);
1756 node->attr.f.where = where;
1760 get_Sync_preds_arr (ir_node *node) {
1761 assert (node->op == op_Sync);
1762 return (ir_node **)&(get_irn_in(node)[1]);
1766 get_Sync_n_preds (ir_node *node) {
1767 assert (node->op == op_Sync);
1768 return (get_irn_arity(node));
1773 set_Sync_n_preds (ir_node *node, int n_preds) {
1774 assert (node->op == op_Sync);
1779 get_Sync_pred (ir_node *node, int pos) {
1780 assert (node->op == op_Sync);
1781 return get_irn_n(node, pos);
1785 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1786 assert (node->op == op_Sync);
1787 set_irn_n(node, pos, pred);
1790 ir_type *get_Proj_type(ir_node *n)
1793 ir_node *pred = get_Proj_pred(n);
1795 switch (get_irn_opcode(pred)) {
1798 /* Deal with Start / Call here: we need to know the Proj Nr. */
1799 assert(get_irn_mode(pred) == mode_T);
1800 pred_pred = get_Proj_pred(pred);
1801 if (get_irn_op(pred_pred) == op_Start) {
1802 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1803 tp = get_method_param_type(mtp, get_Proj_proj(n));
1804 } else if (get_irn_op(pred_pred) == op_Call) {
1805 ir_type *mtp = get_Call_type(pred_pred);
1806 tp = get_method_res_type(mtp, get_Proj_proj(n));
1809 case iro_Start: break;
1810 case iro_Call: break;
1812 ir_node *a = get_Load_ptr(pred);
1814 tp = get_entity_type(get_Sel_entity(a));
1823 get_Proj_pred (const ir_node *node) {
1824 assert (is_Proj(node));
1825 return get_irn_n(node, 0);
1829 set_Proj_pred (ir_node *node, ir_node *pred) {
1830 assert (is_Proj(node));
1831 set_irn_n(node, 0, pred);
1835 get_Proj_proj (const ir_node *node) {
1836 assert (is_Proj(node));
1837 if (get_irn_opcode(node) == iro_Proj) {
1838 return node->attr.proj;
1840 assert(get_irn_opcode(node) == iro_Filter);
1841 return node->attr.filter.proj;
1846 set_Proj_proj (ir_node *node, long proj) {
1847 assert (node->op == op_Proj);
1848 node->attr.proj = proj;
1852 get_Tuple_preds_arr (ir_node *node) {
1853 assert (node->op == op_Tuple);
1854 return (ir_node **)&(get_irn_in(node)[1]);
1858 get_Tuple_n_preds (ir_node *node) {
1859 assert (node->op == op_Tuple);
1860 return (get_irn_arity(node));
1865 set_Tuple_n_preds (ir_node *node, int n_preds) {
1866 assert (node->op == op_Tuple);
1871 get_Tuple_pred (ir_node *node, int pos) {
1872 assert (node->op == op_Tuple);
1873 return get_irn_n(node, pos);
1877 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1878 assert (node->op == op_Tuple);
1879 set_irn_n(node, pos, pred);
1883 get_Id_pred (ir_node *node) {
1884 assert (node->op == op_Id);
1885 return get_irn_n(node, 0);
1889 set_Id_pred (ir_node *node, ir_node *pred) {
1890 assert (node->op == op_Id);
1891 set_irn_n(node, 0, pred);
1894 ir_node *get_Confirm_value (ir_node *node) {
1895 assert (node->op == op_Confirm);
1896 return get_irn_n(node, 0);
1898 void set_Confirm_value (ir_node *node, ir_node *value) {
1899 assert (node->op == op_Confirm);
1900 set_irn_n(node, 0, value);
1902 ir_node *get_Confirm_bound (ir_node *node) {
1903 assert (node->op == op_Confirm);
1904 return get_irn_n(node, 1);
1906 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1907 assert (node->op == op_Confirm);
1908 set_irn_n(node, 0, bound);
1910 pn_Cmp get_Confirm_cmp (ir_node *node) {
1911 assert (node->op == op_Confirm);
1912 return node->attr.confirm_cmp;
1914 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1915 assert (node->op == op_Confirm);
1916 node->attr.confirm_cmp = cmp;
1921 get_Filter_pred (ir_node *node) {
1922 assert(node->op == op_Filter);
1926 set_Filter_pred (ir_node *node, ir_node *pred) {
1927 assert(node->op == op_Filter);
1931 get_Filter_proj(ir_node *node) {
1932 assert(node->op == op_Filter);
1933 return node->attr.filter.proj;
1936 set_Filter_proj (ir_node *node, long proj) {
1937 assert(node->op == op_Filter);
1938 node->attr.filter.proj = proj;
1941 /* Don't use get_irn_arity, get_irn_n in implementation as access
1942 shall work independent of view!!! */
1943 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1944 assert(node->op == op_Filter);
1945 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1946 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1947 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1948 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1949 node->attr.filter.in_cg[0] = node->in[0];
1951 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1954 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1955 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1956 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1957 node->attr.filter.in_cg[pos + 1] = pred;
1959 int get_Filter_n_cg_preds(ir_node *node) {
1960 assert(node->op == op_Filter && node->attr.filter.in_cg);
1961 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1963 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1965 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1967 arity = ARR_LEN(node->attr.filter.in_cg);
1968 assert(pos < arity - 1);
1969 return node->attr.filter.in_cg[pos + 1];
1973 ir_node *get_Mux_sel (ir_node *node) {
1974 if (node->op == op_Psi) {
1975 assert(get_irn_arity(node) == 3);
1976 return get_Psi_cond(node, 0);
1978 assert(node->op == op_Mux);
1981 void set_Mux_sel (ir_node *node, ir_node *sel) {
1982 if (node->op == op_Psi) {
1983 assert(get_irn_arity(node) == 3);
1984 set_Psi_cond(node, 0, sel);
1987 assert(node->op == op_Mux);
1992 ir_node *get_Mux_false (ir_node *node) {
1993 if (node->op == op_Psi) {
1994 assert(get_irn_arity(node) == 3);
1995 return get_Psi_default(node);
1997 assert(node->op == op_Mux);
2000 void set_Mux_false (ir_node *node, ir_node *ir_false) {
2001 if (node->op == op_Psi) {
2002 assert(get_irn_arity(node) == 3);
2003 set_Psi_default(node, ir_false);
2006 assert(node->op == op_Mux);
2007 node->in[2] = ir_false;
2011 ir_node *get_Mux_true (ir_node *node) {
2012 if (node->op == op_Psi) {
2013 assert(get_irn_arity(node) == 3);
2014 return get_Psi_val(node, 0);
2016 assert(node->op == op_Mux);
2019 void set_Mux_true (ir_node *node, ir_node *ir_true) {
2020 if (node->op == op_Psi) {
2021 assert(get_irn_arity(node) == 3);
2022 set_Psi_val(node, 0, ir_true);
2025 assert(node->op == op_Mux);
2026 node->in[3] = ir_true;
2031 ir_node *get_Psi_cond (ir_node *node, int pos) {
2032 int num_conds = get_Psi_n_conds(node);
2033 assert(node->op == op_Psi);
2034 assert(pos < num_conds);
2035 return get_irn_n(node, 2 * pos);
2038 void set_Psi_cond (ir_node *node, int pos, ir_node *cond) {
2039 int num_conds = get_Psi_n_conds(node);
2040 assert(node->op == op_Psi);
2041 assert(pos < num_conds);
2042 set_irn_n(node, 2 * pos, cond);
2045 ir_node *get_Psi_val (ir_node *node, int pos) {
2046 int num_vals = get_Psi_n_conds(node);
2047 assert(node->op == op_Psi);
2048 assert(pos < num_vals);
2049 return get_irn_n(node, 2 * pos + 1);
2052 void set_Psi_val (ir_node *node, int pos, ir_node *val) {
2053 int num_vals = get_Psi_n_conds(node);
2054 assert(node->op == op_Psi);
2055 assert(pos < num_vals);
2056 set_irn_n(node, 2 * pos + 1, val);
2059 ir_node *get_Psi_default(ir_node *node) {
2060 int def_pos = get_irn_arity(node) - 1;
2061 assert(node->op == op_Psi);
2062 return get_irn_n(node, def_pos);
2065 void set_Psi_default(ir_node *node, ir_node *val) {
2066 int def_pos = get_irn_arity(node);
2067 assert(node->op == op_Psi);
2068 set_irn_n(node, def_pos, val);
2071 int (get_Psi_n_conds)(ir_node *node) {
2072 return _get_Psi_n_conds(node);
2076 ir_node *get_CopyB_mem (ir_node *node) {
2077 assert (node->op == op_CopyB);
2078 return get_irn_n(node, 0);
2081 void set_CopyB_mem (ir_node *node, ir_node *mem) {
2082 assert (node->op == op_CopyB);
2083 set_irn_n(node, 0, mem);
2086 ir_node *get_CopyB_dst (ir_node *node) {
2087 assert (node->op == op_CopyB);
2088 return get_irn_n(node, 1);
2091 void set_CopyB_dst (ir_node *node, ir_node *dst) {
2092 assert (node->op == op_CopyB);
2093 set_irn_n(node, 1, dst);
2096 ir_node *get_CopyB_src (ir_node *node) {
2097 assert (node->op == op_CopyB);
2098 return get_irn_n(node, 2);
2101 void set_CopyB_src (ir_node *node, ir_node *src) {
2102 assert (node->op == op_CopyB);
2103 set_irn_n(node, 2, src);
2106 ir_type *get_CopyB_type(ir_node *node) {
2107 assert (node->op == op_CopyB);
2108 return node->attr.copyb.data_type;
2111 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2112 assert (node->op == op_CopyB && data_type);
2113 node->attr.copyb.data_type = data_type;
2118 get_InstOf_type (ir_node *node) {
2119 assert (node->op = op_InstOf);
2120 return node->attr.io.type;
2124 set_InstOf_type (ir_node *node, ir_type *type) {
2125 assert (node->op = op_InstOf);
2126 node->attr.io.type = type;
2130 get_InstOf_store (ir_node *node) {
2131 assert (node->op = op_InstOf);
2132 return get_irn_n(node, 0);
2136 set_InstOf_store (ir_node *node, ir_node *obj) {
2137 assert (node->op = op_InstOf);
2138 set_irn_n(node, 0, obj);
2142 get_InstOf_obj (ir_node *node) {
2143 assert (node->op = op_InstOf);
2144 return get_irn_n(node, 1);
2148 set_InstOf_obj (ir_node *node, ir_node *obj) {
2149 assert (node->op = op_InstOf);
2150 set_irn_n(node, 1, obj);
2153 /* Returns the memory input of a Raise operation. */
2155 get_Raise_mem (ir_node *node) {
2156 assert (node->op == op_Raise);
2157 return get_irn_n(node, 0);
2161 set_Raise_mem (ir_node *node, ir_node *mem) {
2162 assert (node->op == op_Raise);
2163 set_irn_n(node, 0, mem);
2167 get_Raise_exo_ptr (ir_node *node) {
2168 assert (node->op == op_Raise);
2169 return get_irn_n(node, 1);
2173 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
2174 assert (node->op == op_Raise);
2175 set_irn_n(node, 1, exo_ptr);
2180 /* Returns the memory input of a Bound operation. */
2181 ir_node *get_Bound_mem(ir_node *bound) {
2182 assert (bound->op == op_Bound);
2183 return get_irn_n(bound, 0);
2186 void set_Bound_mem (ir_node *bound, ir_node *mem) {
2187 assert (bound->op == op_Bound);
2188 set_irn_n(bound, 0, mem);
2191 /* Returns the index input of a Bound operation. */
2192 ir_node *get_Bound_index(ir_node *bound) {
2193 assert (bound->op == op_Bound);
2194 return get_irn_n(bound, 1);
2197 void set_Bound_index(ir_node *bound, ir_node *idx) {
2198 assert (bound->op == op_Bound);
2199 set_irn_n(bound, 1, idx);
2202 /* Returns the lower bound input of a Bound operation. */
2203 ir_node *get_Bound_lower(ir_node *bound) {
2204 assert (bound->op == op_Bound);
2205 return get_irn_n(bound, 2);
2208 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2209 assert (bound->op == op_Bound);
2210 set_irn_n(bound, 2, lower);
2213 /* Returns the upper bound input of a Bound operation. */
2214 ir_node *get_Bound_upper(ir_node *bound) {
2215 assert (bound->op == op_Bound);
2216 return get_irn_n(bound, 3);
2219 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2220 assert (bound->op == op_Bound);
2221 set_irn_n(bound, 3, upper);
2224 /* returns the graph of a node */
2226 get_irn_irg(const ir_node *node) {
2228 * Do not use get_nodes_Block() here, because this
2229 * will check the pinned state.
2230 * However even a 'wrong' block is always in the proper
2233 if (! is_Block(node))
2234 node = get_irn_n(node, -1);
2235 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2236 node = get_irn_n(node, -1);
2237 assert(get_irn_op(node) == op_Block);
2238 return node->attr.block.irg;
2242 /*----------------------------------------------------------------*/
2243 /* Auxiliary routines */
2244 /*----------------------------------------------------------------*/
2247 skip_Proj (ir_node *node) {
2248 /* don't assert node !!! */
2249 if (node && is_Proj(node)) {
2250 return get_Proj_pred(node);
2257 skip_Tuple (ir_node *node) {
2261 if (!get_opt_normalize()) return node;
2264 node = skip_Id(node);
2265 if (get_irn_op(node) == op_Proj) {
2266 pred = skip_Id(get_Proj_pred(node));
2267 op = get_irn_op(pred);
2270 * Looks strange but calls get_irn_op() only once
2271 * in most often cases.
2273 if (op == op_Proj) { /* nested Tuple ? */
2274 pred = skip_Id(skip_Tuple(pred));
2275 op = get_irn_op(pred);
2277 if (op == op_Tuple) {
2278 node = get_Tuple_pred(pred, get_Proj_proj(node));
2282 else if (op == op_Tuple) {
2283 node = get_Tuple_pred(pred, get_Proj_proj(node));
2290 /* returns operand of node if node is a Cast */
2291 ir_node *skip_Cast (ir_node *node) {
2292 if (node && get_irn_op(node) == op_Cast)
2293 return get_Cast_op(node);
2297 /* returns operand of node if node is a Confirm */
2298 ir_node *skip_Confirm (ir_node *node) {
2299 if (node && get_irn_op(node) == op_Confirm)
2300 return get_Confirm_value(node);
2304 /* skip all high-level ops */
2305 ir_node *skip_HighLevel(ir_node *node) {
2306 if (node && is_op_highlevel(get_irn_op(node)))
2307 return get_irn_n(node, 0);
2312 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2313 * than any other approach, as Id chains are resolved and all point to the real node, or
2314 * all id's are self loops.
2316 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2319 skip_Id (ir_node *node) {
2320 /* don't assert node !!! */
2322 /* Don't use get_Id_pred: We get into an endless loop for
2323 self-referencing Ids. */
2324 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2325 ir_node *rem_pred = node->in[0+1];
2328 assert (get_irn_arity (node) > 0);
2330 node->in[0+1] = node;
2331 res = skip_Id(rem_pred);
2332 if (res->op == op_Id) /* self-loop */ return node;
2334 node->in[0+1] = res;
2341 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2342 * than any other approach, as Id chains are resolved and all point to the real node, or
2343 * all id's are self loops.
2345 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2346 * a little bit "hand optimized".
2348 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2351 skip_Id (ir_node *node) {
2353 /* don't assert node !!! */
2355 if (!node || (node->op != op_Id)) return node;
2357 /* Don't use get_Id_pred(): We get into an endless loop for
2358 self-referencing Ids. */
2359 pred = node->in[0+1];
2361 if (pred->op != op_Id) return pred;
2363 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2364 ir_node *rem_pred, *res;
2366 if (pred->op != op_Id) return pred; /* shortcut */
2369 assert (get_irn_arity (node) > 0);
2371 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2372 res = skip_Id(rem_pred);
2373 if (res->op == op_Id) /* self-loop */ return node;
2375 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2383 void skip_Id_and_store(ir_node **node) {
2386 if (!n || (n->op != op_Id)) return;
2388 /* Don't use get_Id_pred(): We get into an endless loop for
2389 self-referencing Ids. */
2394 (is_Bad)(const ir_node *node) {
2395 return _is_Bad(node);
2399 (is_Const)(const ir_node *node) {
2400 return _is_Const(node);
2404 (is_no_Block)(const ir_node *node) {
2405 return _is_no_Block(node);
2409 (is_Block)(const ir_node *node) {
2410 return _is_Block(node);
2413 /* returns true if node is an Unknown node. */
2415 (is_Unknown)(const ir_node *node) {
2416 return _is_Unknown(node);
2419 /* returns true if node is a Return node. */
2421 (is_Return)(const ir_node *node) {
2422 return _is_Return(node);
2425 /* returns true if node is a Call node. */
2427 (is_Call)(const ir_node *node) {
2428 return _is_Call(node);
2431 /* returns true if node is a Sel node. */
2433 (is_Sel)(const ir_node *node) {
2434 return _is_Sel(node);
2437 /* returns true if node is a Mux node or a Psi with only one condition. */
2439 (is_Mux)(const ir_node *node) {
2440 return _is_Mux(node);
2444 is_Proj (const ir_node *node) {
2446 return node->op == op_Proj
2447 || (!get_interprocedural_view() && node->op == op_Filter);
2450 /* Returns true if the operation manipulates control flow. */
2452 is_cfop(const ir_node *node) {
2453 return is_cfopcode(get_irn_op(node));
2456 /* Returns true if the operation manipulates interprocedural control flow:
2457 CallBegin, EndReg, EndExcept */
2458 int is_ip_cfop(const ir_node *node) {
2459 return is_ip_cfopcode(get_irn_op(node));
2462 /* Returns true if the operation can change the control flow because
2465 is_fragile_op(const ir_node *node) {
2466 return is_op_fragile(get_irn_op(node));
2469 /* Returns the memory operand of fragile operations. */
2470 ir_node *get_fragile_op_mem(ir_node *node) {
2471 assert(node && is_fragile_op(node));
2473 switch (get_irn_opcode (node)) {
2482 return get_irn_n(node, 0);
2487 assert(0 && "should not be reached");
2492 /* Returns true if the operation is a forking control flow operation. */
2493 int (is_irn_forking)(const ir_node *node) {
2494 return _is_irn_forking(node);
2497 /* Return the type associated with the value produced by n
2498 * if the node remarks this type as it is the case for
2499 * Cast, Const, SymConst and some Proj nodes. */
2500 ir_type *(get_irn_type)(ir_node *node) {
2501 return _get_irn_type(node);
2504 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2506 ir_type *(get_irn_type_attr)(ir_node *node) {
2507 return _get_irn_type_attr(node);
2510 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2511 entity *(get_irn_entity_attr)(ir_node *node) {
2512 return _get_irn_entity_attr(node);
2515 /* Returns non-zero for constant-like nodes. */
2516 int (is_irn_constlike)(const ir_node *node) {
2517 return _is_irn_constlike(node);
2521 * Returns non-zero for nodes that are allowed to have keep-alives and
2522 * are neither Block nor PhiM.
2524 int (is_irn_keep)(const ir_node *node) {
2525 return _is_irn_keep(node);
2528 /* Returns non-zero for nodes that are machine operations. */
2529 int (is_irn_machine_op)(const ir_node *node) {
2530 return _is_irn_machine_op(node);
2533 /* Returns non-zero for nodes that are machine operands. */
2534 int (is_irn_machine_operand)(const ir_node *node) {
2535 return _is_irn_machine_operand(node);
2538 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2539 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2540 return _is_irn_machine_user(node, n);
2544 /* Gets the string representation of the jump prediction .*/
2545 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
2549 case COND_JMP_PRED_NONE: return "no prediction";
2550 case COND_JMP_PRED_TRUE: return "true taken";
2551 case COND_JMP_PRED_FALSE: return "false taken";
2555 /* Returns the conditional jump prediction of a Cond node. */
2556 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2557 return _get_Cond_jmp_pred(cond);
2560 /* Sets a new conditional jump prediction. */
2561 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2562 _set_Cond_jmp_pred(cond, pred);
2565 /** the get_type operation must be always implemented and return a firm type */
2566 static ir_type *get_Default_type(ir_node *n) {
2567 return get_unknown_type();
2570 /* Sets the get_type operation for an ir_op_ops. */
2571 ir_op_ops *firm_set_default_get_type(opcode code, ir_op_ops *ops)
2574 case iro_Const: ops->get_type = get_Const_type; break;
2575 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2576 case iro_Cast: ops->get_type = get_Cast_type; break;
2577 case iro_Proj: ops->get_type = get_Proj_type; break;
2579 /* not allowed to be NULL */
2580 if (! ops->get_type)
2581 ops->get_type = get_Default_type;
2587 /** Return the attribute type of a SymConst node if exists */
2588 static ir_type *get_SymConst_attr_type(ir_node *self) {
2589 symconst_kind kind = get_SymConst_kind(self);
2590 if (kind == symconst_type_tag || kind == symconst_size)
2591 return get_SymConst_type(self);
2595 /** Return the attribute entity of a SymConst node if exists */
2596 static entity *get_SymConst_attr_entity(ir_node *self) {
2597 symconst_kind kind = get_SymConst_kind(self);
2598 if (kind == symconst_addr_ent)
2599 return get_SymConst_entity(self);
2603 /** the get_type_attr operation must be always implemented */
2604 static ir_type *get_Null_type(ir_node *n) {
2605 return firm_unknown_type;
2608 /* Sets the get_type operation for an ir_op_ops. */
2609 ir_op_ops *firm_set_default_get_type_attr(opcode code, ir_op_ops *ops)
2612 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2613 case iro_Call: ops->get_type_attr = get_Call_type; break;
2614 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2615 case iro_Free: ops->get_type_attr = get_Free_type; break;
2616 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2618 /* not allowed to be NULL */
2619 if (! ops->get_type_attr)
2620 ops->get_type_attr = get_Null_type;
2626 /** the get_entity_attr operation must be always implemented */
2627 static entity *get_Null_ent(ir_node *n) {
2631 /* Sets the get_type operation for an ir_op_ops. */
2632 ir_op_ops *firm_set_default_get_entity_attr(opcode code, ir_op_ops *ops)
2635 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2636 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2638 /* not allowed to be NULL */
2639 if (! ops->get_entity_attr)
2640 ops->get_entity_attr = get_Null_ent;
2646 #ifdef DEBUG_libfirm
2647 void dump_irn (ir_node *n) {
2648 int i, arity = get_irn_arity(n);
2649 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2651 ir_node *pred = get_irn_n(n, -1);
2652 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2653 get_irn_node_nr(pred), (void *)pred);
2655 printf(" preds: \n");
2656 for (i = 0; i < arity; ++i) {
2657 ir_node *pred = get_irn_n(n, i);
2658 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2659 get_irn_node_nr(pred), (void *)pred);
2663 #else /* DEBUG_libfirm */
2664 void dump_irn (ir_node *n) {}
2665 #endif /* DEBUG_libfirm */