3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
35 /* some constants fixing the positions of nodes predecessors
37 #define CALL_PARAM_OFFSET 2
38 #define FUNCCALL_PARAM_OFFSET 1
39 #define SEL_INDEX_OFFSET 2
40 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
41 #define END_KEEPALIVE_OFFSET 0
43 static const char *pnc_name_arr [] = {
44 "pn_Cmp_False", "pn_Cmp_Eq", "pn_Cmp_Lt", "pn_Cmp_Le",
45 "pn_Cmp_Gt", "pn_Cmp_Ge", "pn_Cmp_Lg", "pn_Cmp_Leg",
46 "pn_Cmp_Uo", "pn_Cmp_Ue", "pn_Cmp_Ul", "pn_Cmp_Ule",
47 "pn_Cmp_Ug", "pn_Cmp_Uge", "pn_Cmp_Ne", "pn_Cmp_True"
51 * returns the pnc name from an pnc constant
53 const char *get_pnc_string(int pnc) {
54 return pnc_name_arr[pnc];
58 * Calculates the negated (Complement(R)) pnc condition.
60 int get_negated_pnc(int pnc, ir_mode *mode) {
63 /* do NOT add the Uo bit for non-floating point values */
64 if (! mode_is_float(mode))
70 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
72 get_inversed_pnc(int pnc) {
73 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
74 int lesser = pnc & pn_Cmp_Lt;
75 int greater = pnc & pn_Cmp_Gt;
77 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
82 const char *pns_name_arr [] = {
83 "initial_exec", "global_store",
84 "frame_base", "globals", "args"
87 const char *symconst_name_arr [] = {
88 "type_tag", "size", "addr_name", "addr_ent"
92 * Indicates, whether additional data can be registered to ir nodes.
93 * If set to 1, this is not possible anymore.
95 static int forbid_new_data = 0;
98 * The amount of additional space for custom data to be allocated upon
99 * creating a new node.
101 unsigned firm_add_node_size = 0;
104 /* register new space for every node */
105 unsigned register_additional_node_data(unsigned size) {
106 assert(!forbid_new_data && "Too late to register additional node data");
111 return firm_add_node_size += size;
117 /* Forbid the addition of new data to an ir node. */
122 * irnode constructor.
123 * Create a new irnode in irg, with an op, mode, arity and
124 * some incoming irnodes.
125 * If arity is negative, a node with a dynamic array is created.
128 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
129 int arity, ir_node **in)
132 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
135 assert(irg && op && mode);
136 p = obstack_alloc (irg->obst, node_size);
137 memset(p, 0, node_size);
138 res = (ir_node *) (p + firm_add_node_size);
140 res->kind = k_ir_node;
144 res->node_idx = get_irg_next_node_idx(irg);
147 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
149 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
150 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
154 set_irn_dbg_info(res, db);
158 res->node_nr = get_irp_new_node_nr();
161 #if FIRM_EDGES_INPLACE
164 int is_bl = is_Block(res);
166 INIT_LIST_HEAD(&res->edge_info.outs_head);
168 INIT_LIST_HEAD(&res->attr.block.succ_head);
171 for (i = is_bl; i <= arity; ++i)
172 edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
176 hook_new_node(irg, res);
181 /*-- getting some parameters from ir_nodes --*/
184 (is_ir_node)(const void *thing) {
185 return _is_ir_node(thing);
189 (get_irn_intra_arity)(const ir_node *node) {
190 return _get_irn_intra_arity(node);
194 (get_irn_inter_arity)(const ir_node *node) {
195 return _get_irn_inter_arity(node);
198 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
201 (get_irn_arity)(const ir_node *node) {
202 return _get_irn_arity(node);
205 /* Returns the array with ins. This array is shifted with respect to the
206 array accessed by get_irn_n: The block operand is at position 0 not -1.
207 (@@@ This should be changed.)
208 The order of the predecessors in this array is not guaranteed, except that
209 lists of operands as predecessors of Block or arguments of a Call are
212 get_irn_in (const ir_node *node) {
214 if (get_interprocedural_view()) { /* handle Filter and Block specially */
215 if (get_irn_opcode(node) == iro_Filter) {
216 assert(node->attr.filter.in_cg);
217 return node->attr.filter.in_cg;
218 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
219 return node->attr.block.in_cg;
221 /* else fall through */
227 set_irn_in (ir_node *node, int arity, ir_node **in) {
230 ir_graph *irg = current_ir_graph;
232 if (get_interprocedural_view()) { /* handle Filter and Block specially */
233 if (get_irn_opcode(node) == iro_Filter) {
234 assert(node->attr.filter.in_cg);
235 arr = &node->attr.filter.in_cg;
236 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
237 arr = &node->attr.block.in_cg;
245 for (i = 0; i < arity; i++) {
246 if (i < ARR_LEN(*arr)-1)
247 edges_notify_edge(node, i, in[i], (*arr)[i+1], irg);
249 edges_notify_edge(node, i, in[i], NULL, irg);
251 for(;i < ARR_LEN(*arr)-1; i++) {
252 edges_notify_edge(node, i, NULL, (*arr)[i+1], irg);
255 if (arity != ARR_LEN(*arr) - 1) {
256 ir_node * block = (*arr)[0];
257 *arr = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
260 fix_backedges(irg->obst, node);
262 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
266 (get_irn_intra_n)(const ir_node *node, int n) {
267 return _get_irn_intra_n (node, n);
271 (get_irn_inter_n)(const ir_node *node, int n) {
272 return _get_irn_inter_n (node, n);
275 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
278 (get_irn_n)(const ir_node *node, int n) {
279 return _get_irn_n(node, n);
283 set_irn_n (ir_node *node, int n, ir_node *in) {
284 assert(node && node->kind == k_ir_node);
286 assert(n < get_irn_arity(node));
287 assert(in && in->kind == k_ir_node);
289 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
290 /* Change block pred in both views! */
291 node->in[n + 1] = in;
292 assert(node->attr.filter.in_cg);
293 node->attr.filter.in_cg[n + 1] = in;
296 if (get_interprocedural_view()) { /* handle Filter and Block specially */
297 if (get_irn_opcode(node) == iro_Filter) {
298 assert(node->attr.filter.in_cg);
299 node->attr.filter.in_cg[n + 1] = in;
301 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
302 node->attr.block.in_cg[n + 1] = in;
305 /* else fall through */
309 hook_set_irn_n(node, n, in, node->in[n + 1]);
311 /* Here, we rely on src and tgt being in the current ir graph */
312 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
314 node->in[n + 1] = in;
318 (get_irn_mode)(const ir_node *node) {
319 return _get_irn_mode(node);
323 (set_irn_mode)(ir_node *node, ir_mode *mode)
325 _set_irn_mode(node, mode);
329 get_irn_modecode (const ir_node *node)
332 return node->mode->code;
335 /** Gets the string representation of the mode .*/
337 get_irn_modename (const ir_node *node)
340 return get_mode_name(node->mode);
344 get_irn_modeident (const ir_node *node)
347 return get_mode_ident(node->mode);
351 (get_irn_op)(const ir_node *node) {
352 return _get_irn_op(node);
355 /* should be private to the library: */
357 (set_irn_op)(ir_node *node, ir_op *op) {
358 _set_irn_op(node, op);
362 (get_irn_opcode)(const ir_node *node)
364 return _get_irn_opcode(node);
368 get_irn_opname (const ir_node *node)
371 if ((get_irn_op((ir_node *)node) == op_Phi) &&
372 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
373 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
374 return get_id_str(node->op->name);
378 get_irn_opident (const ir_node *node)
381 return node->op->name;
385 (get_irn_visited)(const ir_node *node)
387 return _get_irn_visited(node);
391 (set_irn_visited)(ir_node *node, unsigned long visited)
393 _set_irn_visited(node, visited);
397 (mark_irn_visited)(ir_node *node) {
398 _mark_irn_visited(node);
402 (irn_not_visited)(const ir_node *node) {
403 return _irn_not_visited(node);
407 (irn_visited)(const ir_node *node) {
408 return _irn_visited(node);
412 (set_irn_link)(ir_node *node, void *link) {
413 _set_irn_link(node, link);
417 (get_irn_link)(const ir_node *node) {
418 return _get_irn_link(node);
422 (get_irn_pinned)(const ir_node *node) {
423 return _get_irn_pinned(node);
427 (is_irn_pinned_in_irg) (const ir_node *node) {
428 return _is_irn_pinned_in_irg(node);
431 void set_irn_pinned(ir_node *node, op_pin_state state) {
432 /* due to optimization an opt may be turned into a Tuple */
433 if (get_irn_op(node) == op_Tuple)
436 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
437 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
439 node->attr.except.pin_state = state;
442 #ifdef DO_HEAPANALYSIS
443 /* Access the abstract interpretation information of a node.
444 Returns NULL if no such information is available. */
445 struct abstval *get_irn_abst_value(ir_node *n) {
448 /* Set the abstract interpretation information of a node. */
449 void set_irn_abst_value(ir_node *n, struct abstval *os) {
452 struct section *firm_get_irn_section(ir_node *n) {
455 void firm_set_irn_section(ir_node *n, struct section *s) {
459 /* Dummies needed for firmjni. */
460 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
461 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
462 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
463 void firm_set_irn_section(ir_node *n, struct section *s) {}
464 #endif /* DO_HEAPANALYSIS */
467 /* Outputs a unique number for this node */
468 long get_irn_node_nr(const ir_node *node) {
471 return node->node_nr;
473 return (long)PTR_TO_INT(node);
478 get_irn_const_attr (ir_node *node)
480 assert (node->op == op_Const);
481 return node->attr.con;
485 get_irn_proj_attr (ir_node *node)
487 assert (node->op == op_Proj);
488 return node->attr.proj;
492 get_irn_alloc_attr (ir_node *node)
494 assert (node->op == op_Alloc);
499 get_irn_free_attr (ir_node *node)
501 assert (node->op == op_Free);
506 get_irn_symconst_attr (ir_node *node)
508 assert (node->op == op_SymConst);
513 get_irn_call_attr (ir_node *node)
515 assert (node->op == op_Call);
516 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
520 get_irn_sel_attr (ir_node *node)
522 assert (node->op == op_Sel);
527 get_irn_phi_attr (ir_node *node)
529 assert (node->op == op_Phi);
530 return node->attr.phi0_pos;
534 get_irn_block_attr (ir_node *node)
536 assert (node->op == op_Block);
537 return node->attr.block;
541 get_irn_load_attr (ir_node *node)
543 assert (node->op == op_Load);
544 return node->attr.load;
548 get_irn_store_attr (ir_node *node)
550 assert (node->op == op_Store);
551 return node->attr.store;
555 get_irn_except_attr (ir_node *node)
557 assert (node->op == op_Div || node->op == op_Quot ||
558 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
559 return node->attr.except;
563 get_irn_generic_attr (ir_node *node) {
567 unsigned (get_irn_idx)(const ir_node *node) {
568 assert(is_ir_node(node));
569 return _get_irn_idx(node);
572 /** manipulate fields of individual nodes **/
574 /* this works for all except Block */
576 get_nodes_block (const ir_node *node) {
577 assert (!(node->op == op_Block));
578 assert (is_irn_pinned_in_irg(node) && "block info may be incorrect");
579 return get_irn_n(node, -1);
583 set_nodes_block (ir_node *node, ir_node *block) {
584 assert (!(node->op == op_Block));
585 set_irn_n(node, -1, block);
588 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
589 * from Start. If so returns frame type, else Null. */
590 ir_type *is_frame_pointer(ir_node *n) {
591 if ((get_irn_op(n) == op_Proj) &&
592 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
593 ir_node *start = get_Proj_pred(n);
594 if (get_irn_op(start) == op_Start) {
595 return get_irg_frame_type(get_irn_irg(start));
601 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
602 * from Start. If so returns global type, else Null. */
603 ir_type *is_globals_pointer(ir_node *n) {
604 if ((get_irn_op(n) == op_Proj) &&
605 (get_Proj_proj(n) == pn_Start_P_globals)) {
606 ir_node *start = get_Proj_pred(n);
607 if (get_irn_op(start) == op_Start) {
608 return get_glob_type();
614 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
615 * from Start. If so returns 1, else 0. */
616 int is_value_arg_pointer(ir_node *n) {
617 if ((get_irn_op(n) == op_Proj) &&
618 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
619 (get_irn_op(get_Proj_pred(n)) == op_Start))
624 /* Returns an array with the predecessors of the Block. Depending on
625 the implementation of the graph data structure this can be a copy of
626 the internal representation of predecessors as well as the internal
627 array itself. Therefore writing to this array might obstruct the ir. */
629 get_Block_cfgpred_arr (ir_node *node)
631 assert ((node->op == op_Block));
632 return (ir_node **)&(get_irn_in(node)[1]);
636 (get_Block_n_cfgpreds)(ir_node *node) {
637 return _get_Block_n_cfgpreds(node);
641 (get_Block_cfgpred)(ir_node *node, int pos) {
642 return _get_Block_cfgpred(node, pos);
646 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
647 assert (node->op == op_Block);
648 set_irn_n(node, pos, pred);
652 (get_Block_cfgpred_block)(ir_node *node, int pos) {
653 return _get_Block_cfgpred_block(node, pos);
657 get_Block_matured (ir_node *node) {
658 assert (node->op == op_Block);
659 return (int)node->attr.block.matured;
663 set_Block_matured (ir_node *node, int matured) {
664 assert (node->op == op_Block);
665 node->attr.block.matured = matured;
669 (get_Block_block_visited)(ir_node *node) {
670 return _get_Block_block_visited(node);
674 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
675 _set_Block_block_visited(node, visit);
678 /* For this current_ir_graph must be set. */
680 (mark_Block_block_visited)(ir_node *node) {
681 _mark_Block_block_visited(node);
685 (Block_not_block_visited)(ir_node *node) {
686 return _Block_not_block_visited(node);
690 get_Block_graph_arr (ir_node *node, int pos) {
691 assert (node->op == op_Block);
692 return node->attr.block.graph_arr[pos+1];
696 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
697 assert (node->op == op_Block);
698 node->attr.block.graph_arr[pos+1] = value;
701 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
702 assert(node->op == op_Block);
703 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
704 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
705 node->attr.block.in_cg[0] = NULL;
706 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
708 /* Fix backedge array. fix_backedges() operates depending on
709 interprocedural_view. */
710 int ipv = get_interprocedural_view();
711 set_interprocedural_view(1);
712 fix_backedges(current_ir_graph->obst, node);
713 set_interprocedural_view(ipv);
716 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
719 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
720 assert(node->op == op_Block &&
721 node->attr.block.in_cg &&
722 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
723 node->attr.block.in_cg[pos + 1] = pred;
726 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
727 assert(node->op == op_Block);
728 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
731 int get_Block_cg_n_cfgpreds(ir_node * node) {
732 assert(node->op == op_Block);
733 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
736 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
737 assert(node->op == op_Block && node->attr.block.in_cg);
738 return node->attr.block.in_cg[pos + 1];
741 void remove_Block_cg_cfgpred_arr(ir_node * node) {
742 assert(node->op == op_Block);
743 node->attr.block.in_cg = NULL;
746 ir_node *(set_Block_dead)(ir_node *block) {
747 return _set_Block_dead(block);
750 int (is_Block_dead)(const ir_node *block) {
751 return _is_Block_dead(block);
754 ir_extblk *get_Block_extbb(const ir_node *block) {
756 assert(is_Block(block));
757 res = block->attr.block.extblk;
758 assert(res == NULL || is_ir_extbb(res));
762 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
763 assert(is_Block(block));
764 assert(extblk == NULL || is_ir_extbb(extblk));
765 block->attr.block.extblk = extblk;
769 get_End_n_keepalives(ir_node *end) {
770 assert (end->op == op_End);
771 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
775 get_End_keepalive(ir_node *end, int pos) {
776 assert (end->op == op_End);
777 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
781 add_End_keepalive (ir_node *end, ir_node *ka) {
782 assert (end->op == op_End);
783 ARR_APP1 (ir_node *, end->in, ka);
787 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
788 assert (end->op == op_End);
789 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
792 /* Set new keep-alives */
793 void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
795 ir_graph *irg = get_irn_irg(end);
797 /* notify that edges are deleted */
798 for (i = END_KEEPALIVE_OFFSET; i < ARR_LEN(end->in); ++i) {
799 edges_notify_edge(end, i, in[i], NULL, irg);
801 ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
803 for (i = 0; i < n; ++i) {
804 end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
805 edges_notify_edge(end, 1 + END_KEEPALIVE_OFFSET + i, NULL, end->in[1 + END_KEEPALIVE_OFFSET + i], irg);
810 free_End (ir_node *end) {
811 assert (end->op == op_End);
814 end->in = NULL; /* @@@ make sure we get an error if we use the
815 in array afterwards ... */
818 /* Return the target address of an IJmp */
819 ir_node *get_IJmp_target(ir_node *ijmp) {
820 assert(ijmp->op == op_IJmp);
821 return get_irn_n(ijmp, 0);
824 /** Sets the target address of an IJmp */
825 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
826 assert(ijmp->op == op_IJmp);
827 set_irn_n(ijmp, 0, tgt);
831 > Implementing the case construct (which is where the constant Proj node is
832 > important) involves far more than simply determining the constant values.
833 > We could argue that this is more properly a function of the translator from
834 > Firm to the target machine. That could be done if there was some way of
835 > projecting "default" out of the Cond node.
836 I know it's complicated.
837 Basically there are two proglems:
838 - determining the gaps between the projs
839 - determining the biggest case constant to know the proj number for
841 I see several solutions:
842 1. Introduce a ProjDefault node. Solves both problems.
843 This means to extend all optimizations executed during construction.
844 2. Give the Cond node for switch two flavors:
845 a) there are no gaps in the projs (existing flavor)
846 b) gaps may exist, default proj is still the Proj with the largest
847 projection number. This covers also the gaps.
848 3. Fix the semantic of the Cond to that of 2b)
850 Solution 2 seems to be the best:
851 Computing the gaps in the Firm representation is not too hard, i.e.,
852 libFIRM can implement a routine that transforms between the two
853 flavours. This is also possible for 1) but 2) does not require to
854 change any existing optimization.
855 Further it should be far simpler to determine the biggest constant than
857 I don't want to choose 3) as 2a) seems to have advantages for
858 dataflow analysis and 3) does not allow to convert the representation to
862 get_Cond_selector (ir_node *node) {
863 assert (node->op == op_Cond);
864 return get_irn_n(node, 0);
868 set_Cond_selector (ir_node *node, ir_node *selector) {
869 assert (node->op == op_Cond);
870 set_irn_n(node, 0, selector);
874 get_Cond_kind (ir_node *node) {
875 assert (node->op == op_Cond);
876 return node->attr.c.kind;
880 set_Cond_kind (ir_node *node, cond_kind kind) {
881 assert (node->op == op_Cond);
882 node->attr.c.kind = kind;
886 get_Cond_defaultProj (ir_node *node) {
887 assert (node->op == op_Cond);
888 return node->attr.c.default_proj;
892 get_Return_mem (ir_node *node) {
893 assert (node->op == op_Return);
894 return get_irn_n(node, 0);
898 set_Return_mem (ir_node *node, ir_node *mem) {
899 assert (node->op == op_Return);
900 set_irn_n(node, 0, mem);
904 get_Return_n_ress (ir_node *node) {
905 assert (node->op == op_Return);
906 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
910 get_Return_res_arr (ir_node *node)
912 assert ((node->op == op_Return));
913 if (get_Return_n_ress(node) > 0)
914 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
921 set_Return_n_res (ir_node *node, int results) {
922 assert (node->op == op_Return);
927 get_Return_res (ir_node *node, int pos) {
928 assert (node->op == op_Return);
929 assert (get_Return_n_ress(node) > pos);
930 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
934 set_Return_res (ir_node *node, int pos, ir_node *res){
935 assert (node->op == op_Return);
936 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
939 tarval *(get_Const_tarval)(ir_node *node) {
940 return _get_Const_tarval(node);
944 set_Const_tarval (ir_node *node, tarval *con) {
945 assert (node->op == op_Const);
946 node->attr.con.tv = con;
949 cnst_classify_t (classify_Const)(ir_node *node)
951 return _classify_Const(node);
955 /* The source language type. Must be an atomic type. Mode of type must
956 be mode of node. For tarvals from entities type must be pointer to
959 get_Const_type (ir_node *node) {
960 assert (node->op == op_Const);
961 return node->attr.con.tp;
965 set_Const_type (ir_node *node, ir_type *tp) {
966 assert (node->op == op_Const);
967 if (tp != firm_unknown_type) {
968 assert (is_atomic_type(tp));
969 assert (get_type_mode(tp) == get_irn_mode(node));
971 node->attr.con.tp = tp;
976 get_SymConst_kind (const ir_node *node) {
977 assert (node->op == op_SymConst);
978 return node->attr.i.num;
982 set_SymConst_kind (ir_node *node, symconst_kind num) {
983 assert (node->op == op_SymConst);
984 node->attr.i.num = num;
988 get_SymConst_type (ir_node *node) {
989 assert ( (node->op == op_SymConst)
990 && ( get_SymConst_kind(node) == symconst_type_tag
991 || get_SymConst_kind(node) == symconst_size));
992 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
996 set_SymConst_type (ir_node *node, ir_type *tp) {
997 assert ( (node->op == op_SymConst)
998 && ( get_SymConst_kind(node) == symconst_type_tag
999 || get_SymConst_kind(node) == symconst_size));
1000 node->attr.i.sym.type_p = tp;
1004 get_SymConst_name (ir_node *node) {
1005 assert ( (node->op == op_SymConst)
1006 && (get_SymConst_kind(node) == symconst_addr_name));
1007 return node->attr.i.sym.ident_p;
1011 set_SymConst_name (ir_node *node, ident *name) {
1012 assert ( (node->op == op_SymConst)
1013 && (get_SymConst_kind(node) == symconst_addr_name));
1014 node->attr.i.sym.ident_p = name;
1018 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1019 entity *get_SymConst_entity (ir_node *node) {
1020 assert ( (node->op == op_SymConst)
1021 && (get_SymConst_kind (node) == symconst_addr_ent));
1022 return node->attr.i.sym.entity_p;
1025 void set_SymConst_entity (ir_node *node, entity *ent) {
1026 assert ( (node->op == op_SymConst)
1027 && (get_SymConst_kind(node) == symconst_addr_ent));
1028 node->attr.i.sym.entity_p = ent;
1031 union symconst_symbol
1032 get_SymConst_symbol (ir_node *node) {
1033 assert (node->op == op_SymConst);
1034 return node->attr.i.sym;
1038 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1039 assert (node->op == op_SymConst);
1040 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1041 node->attr.i.sym = sym;
1045 get_SymConst_value_type (ir_node *node) {
1046 assert (node->op == op_SymConst);
1047 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1048 return node->attr.i.tp;
1052 set_SymConst_value_type (ir_node *node, ir_type *tp) {
1053 assert (node->op == op_SymConst);
1054 node->attr.i.tp = tp;
1058 get_Sel_mem (ir_node *node) {
1059 assert (node->op == op_Sel);
1060 return get_irn_n(node, 0);
1064 set_Sel_mem (ir_node *node, ir_node *mem) {
1065 assert (node->op == op_Sel);
1066 set_irn_n(node, 0, mem);
1070 get_Sel_ptr (ir_node *node) {
1071 assert (node->op == op_Sel);
1072 return get_irn_n(node, 1);
1076 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1077 assert (node->op == op_Sel);
1078 set_irn_n(node, 1, ptr);
1082 get_Sel_n_indexs (ir_node *node) {
1083 assert (node->op == op_Sel);
1084 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1088 get_Sel_index_arr (ir_node *node)
1090 assert ((node->op == op_Sel));
1091 if (get_Sel_n_indexs(node) > 0)
1092 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1098 get_Sel_index (ir_node *node, int pos) {
1099 assert (node->op == op_Sel);
1100 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1104 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1105 assert (node->op == op_Sel);
1106 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1110 get_Sel_entity (ir_node *node) {
1111 assert (node->op == op_Sel);
1112 return node->attr.s.ent;
1116 set_Sel_entity (ir_node *node, entity *ent) {
1117 assert (node->op == op_Sel);
1118 node->attr.s.ent = ent;
1122 /* For unary and binary arithmetic operations the access to the
1123 operands can be factored out. Left is the first, right the
1124 second arithmetic value as listed in tech report 0999-33.
1125 unops are: Minus, Abs, Not, Conv, Cast
1126 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1127 Shr, Shrs, Rotate, Cmp */
1131 get_Call_mem (ir_node *node) {
1132 assert (node->op == op_Call);
1133 return get_irn_n(node, 0);
1137 set_Call_mem (ir_node *node, ir_node *mem) {
1138 assert (node->op == op_Call);
1139 set_irn_n(node, 0, mem);
1143 get_Call_ptr (ir_node *node) {
1144 assert (node->op == op_Call);
1145 return get_irn_n(node, 1);
1149 set_Call_ptr (ir_node *node, ir_node *ptr) {
1150 assert (node->op == op_Call);
1151 set_irn_n(node, 1, ptr);
1155 get_Call_param_arr (ir_node *node) {
1156 assert (node->op == op_Call);
1157 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1161 get_Call_n_params (ir_node *node) {
1162 assert (node->op == op_Call);
1163 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1167 get_Call_arity (ir_node *node) {
1168 assert (node->op == op_Call);
1169 return get_Call_n_params(node);
1173 set_Call_arity (ir_node *node, ir_node *arity) {
1174 assert (node->op == op_Call);
1179 get_Call_param (ir_node *node, int pos) {
1180 assert (node->op == op_Call);
1181 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1185 set_Call_param (ir_node *node, int pos, ir_node *param) {
1186 assert (node->op == op_Call);
1187 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1191 get_Call_type (ir_node *node) {
1192 assert (node->op == op_Call);
1193 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1197 set_Call_type (ir_node *node, ir_type *tp) {
1198 assert (node->op == op_Call);
1199 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1200 node->attr.call.cld_tp = tp;
1203 int Call_has_callees(ir_node *node) {
1204 assert(node && node->op == op_Call);
1205 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1206 (node->attr.call.callee_arr != NULL));
1209 int get_Call_n_callees(ir_node * node) {
1210 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1211 return ARR_LEN(node->attr.call.callee_arr);
1214 entity * get_Call_callee(ir_node * node, int pos) {
1215 assert(pos >= 0 && pos < get_Call_n_callees(node));
1216 return node->attr.call.callee_arr[pos];
1219 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1220 assert(node->op == op_Call);
1221 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1222 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1224 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1227 void remove_Call_callee_arr(ir_node * node) {
1228 assert(node->op == op_Call);
1229 node->attr.call.callee_arr = NULL;
1232 ir_node * get_CallBegin_ptr (ir_node *node) {
1233 assert(node->op == op_CallBegin);
1234 return get_irn_n(node, 0);
1236 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1237 assert(node->op == op_CallBegin);
1238 set_irn_n(node, 0, ptr);
1240 ir_node * get_CallBegin_call (ir_node *node) {
1241 assert(node->op == op_CallBegin);
1242 return node->attr.callbegin.call;
1244 void set_CallBegin_call (ir_node *node, ir_node *call) {
1245 assert(node->op == op_CallBegin);
1246 node->attr.callbegin.call = call;
1251 ir_node * get_##OP##_left(ir_node *node) { \
1252 assert(node->op == op_##OP); \
1253 return get_irn_n(node, node->op->op_index); \
1255 void set_##OP##_left(ir_node *node, ir_node *left) { \
1256 assert(node->op == op_##OP); \
1257 set_irn_n(node, node->op->op_index, left); \
1259 ir_node *get_##OP##_right(ir_node *node) { \
1260 assert(node->op == op_##OP); \
1261 return get_irn_n(node, node->op->op_index + 1); \
1263 void set_##OP##_right(ir_node *node, ir_node *right) { \
1264 assert(node->op == op_##OP); \
1265 set_irn_n(node, node->op->op_index + 1, right); \
1269 ir_node *get_##OP##_op(ir_node *node) { \
1270 assert(node->op == op_##OP); \
1271 return get_irn_n(node, node->op->op_index); \
1273 void set_##OP##_op (ir_node *node, ir_node *op) { \
1274 assert(node->op == op_##OP); \
1275 set_irn_n(node, node->op->op_index, op); \
1285 get_Quot_mem (ir_node *node) {
1286 assert (node->op == op_Quot);
1287 return get_irn_n(node, 0);
1291 set_Quot_mem (ir_node *node, ir_node *mem) {
1292 assert (node->op == op_Quot);
1293 set_irn_n(node, 0, mem);
1299 get_DivMod_mem (ir_node *node) {
1300 assert (node->op == op_DivMod);
1301 return get_irn_n(node, 0);
1305 set_DivMod_mem (ir_node *node, ir_node *mem) {
1306 assert (node->op == op_DivMod);
1307 set_irn_n(node, 0, mem);
1313 get_Div_mem (ir_node *node) {
1314 assert (node->op == op_Div);
1315 return get_irn_n(node, 0);
1319 set_Div_mem (ir_node *node, ir_node *mem) {
1320 assert (node->op == op_Div);
1321 set_irn_n(node, 0, mem);
1327 get_Mod_mem (ir_node *node) {
1328 assert (node->op == op_Mod);
1329 return get_irn_n(node, 0);
1333 set_Mod_mem (ir_node *node, ir_node *mem) {
1334 assert (node->op == op_Mod);
1335 set_irn_n(node, 0, mem);
1352 get_Cast_type (ir_node *node) {
1353 assert (node->op == op_Cast);
1354 return node->attr.cast.totype;
1358 set_Cast_type (ir_node *node, ir_type *to_tp) {
1359 assert (node->op == op_Cast);
1360 node->attr.cast.totype = to_tp;
1364 /* Checks for upcast.
1366 * Returns true if the Cast node casts a class type to a super type.
1368 int is_Cast_upcast(ir_node *node) {
1369 ir_type *totype = get_Cast_type(node);
1370 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1371 ir_graph *myirg = get_irn_irg(node);
1373 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1376 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1377 totype = get_pointer_points_to_type(totype);
1378 fromtype = get_pointer_points_to_type(fromtype);
1383 if (!is_Class_type(totype)) return 0;
1384 return is_SubClass_of(fromtype, totype);
1387 /* Checks for downcast.
1389 * Returns true if the Cast node casts a class type to a sub type.
1391 int is_Cast_downcast(ir_node *node) {
1392 ir_type *totype = get_Cast_type(node);
1393 ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1395 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1398 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1399 totype = get_pointer_points_to_type(totype);
1400 fromtype = get_pointer_points_to_type(fromtype);
1405 if (!is_Class_type(totype)) return 0;
1406 return is_SubClass_of(totype, fromtype);
1410 (is_unop)(const ir_node *node) {
1411 return _is_unop(node);
1415 get_unop_op (ir_node *node) {
1416 if (node->op->opar == oparity_unary)
1417 return get_irn_n(node, node->op->op_index);
1419 assert(node->op->opar == oparity_unary);
1424 set_unop_op (ir_node *node, ir_node *op) {
1425 if (node->op->opar == oparity_unary)
1426 set_irn_n(node, node->op->op_index, op);
1428 assert(node->op->opar == oparity_unary);
1432 (is_binop)(const ir_node *node) {
1433 return _is_binop(node);
1437 get_binop_left (ir_node *node) {
1438 if (node->op->opar == oparity_binary)
1439 return get_irn_n(node, node->op->op_index);
1441 assert(node->op->opar == oparity_binary);
1446 set_binop_left (ir_node *node, ir_node *left) {
1447 if (node->op->opar == oparity_binary)
1448 set_irn_n(node, node->op->op_index, left);
1450 assert (node->op->opar == oparity_binary);
1454 get_binop_right (ir_node *node) {
1455 if (node->op->opar == oparity_binary)
1456 return get_irn_n(node, node->op->op_index + 1);
1458 assert(node->op->opar == oparity_binary);
1463 set_binop_right (ir_node *node, ir_node *right) {
1464 if (node->op->opar == oparity_binary)
1465 set_irn_n(node, node->op->op_index + 1, right);
1467 assert (node->op->opar == oparity_binary);
1470 int is_Phi (const ir_node *n) {
1476 if (op == op_Filter) return get_interprocedural_view();
1479 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1480 (get_irn_arity(n) > 0));
1485 int is_Phi0 (const ir_node *n) {
1488 return ((get_irn_op(n) == op_Phi) &&
1489 (get_irn_arity(n) == 0) &&
1490 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1494 get_Phi_preds_arr (ir_node *node) {
1495 assert (node->op == op_Phi);
1496 return (ir_node **)&(get_irn_in(node)[1]);
1500 get_Phi_n_preds (ir_node *node) {
1501 assert (is_Phi(node) || is_Phi0(node));
1502 return (get_irn_arity(node));
1506 void set_Phi_n_preds (ir_node *node, int n_preds) {
1507 assert (node->op == op_Phi);
1512 get_Phi_pred (ir_node *node, int pos) {
1513 assert (is_Phi(node) || is_Phi0(node));
1514 return get_irn_n(node, pos);
1518 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1519 assert (is_Phi(node) || is_Phi0(node));
1520 set_irn_n(node, pos, pred);
1524 int is_memop(ir_node *node) {
1525 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1528 ir_node *get_memop_mem (ir_node *node) {
1529 assert(is_memop(node));
1530 return get_irn_n(node, 0);
1533 void set_memop_mem (ir_node *node, ir_node *mem) {
1534 assert(is_memop(node));
1535 set_irn_n(node, 0, mem);
1538 ir_node *get_memop_ptr (ir_node *node) {
1539 assert(is_memop(node));
1540 return get_irn_n(node, 1);
1543 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1544 assert(is_memop(node));
1545 set_irn_n(node, 1, ptr);
1549 get_Load_mem (ir_node *node) {
1550 assert (node->op == op_Load);
1551 return get_irn_n(node, 0);
1555 set_Load_mem (ir_node *node, ir_node *mem) {
1556 assert (node->op == op_Load);
1557 set_irn_n(node, 0, mem);
1561 get_Load_ptr (ir_node *node) {
1562 assert (node->op == op_Load);
1563 return get_irn_n(node, 1);
1567 set_Load_ptr (ir_node *node, ir_node *ptr) {
1568 assert (node->op == op_Load);
1569 set_irn_n(node, 1, ptr);
1573 get_Load_mode (ir_node *node) {
1574 assert (node->op == op_Load);
1575 return node->attr.load.load_mode;
1579 set_Load_mode (ir_node *node, ir_mode *mode) {
1580 assert (node->op == op_Load);
1581 node->attr.load.load_mode = mode;
1585 get_Load_volatility (ir_node *node) {
1586 assert (node->op == op_Load);
1587 return node->attr.load.volatility;
1591 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1592 assert (node->op == op_Load);
1593 node->attr.load.volatility = volatility;
1598 get_Store_mem (ir_node *node) {
1599 assert (node->op == op_Store);
1600 return get_irn_n(node, 0);
1604 set_Store_mem (ir_node *node, ir_node *mem) {
1605 assert (node->op == op_Store);
1606 set_irn_n(node, 0, mem);
1610 get_Store_ptr (ir_node *node) {
1611 assert (node->op == op_Store);
1612 return get_irn_n(node, 1);
1616 set_Store_ptr (ir_node *node, ir_node *ptr) {
1617 assert (node->op == op_Store);
1618 set_irn_n(node, 1, ptr);
1622 get_Store_value (ir_node *node) {
1623 assert (node->op == op_Store);
1624 return get_irn_n(node, 2);
1628 set_Store_value (ir_node *node, ir_node *value) {
1629 assert (node->op == op_Store);
1630 set_irn_n(node, 2, value);
1634 get_Store_volatility (ir_node *node) {
1635 assert (node->op == op_Store);
1636 return node->attr.store.volatility;
1640 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1641 assert (node->op == op_Store);
1642 node->attr.store.volatility = volatility;
1647 get_Alloc_mem (ir_node *node) {
1648 assert (node->op == op_Alloc);
1649 return get_irn_n(node, 0);
1653 set_Alloc_mem (ir_node *node, ir_node *mem) {
1654 assert (node->op == op_Alloc);
1655 set_irn_n(node, 0, mem);
1659 get_Alloc_size (ir_node *node) {
1660 assert (node->op == op_Alloc);
1661 return get_irn_n(node, 1);
1665 set_Alloc_size (ir_node *node, ir_node *size) {
1666 assert (node->op == op_Alloc);
1667 set_irn_n(node, 1, size);
1671 get_Alloc_type (ir_node *node) {
1672 assert (node->op == op_Alloc);
1673 return node->attr.a.type = skip_tid(node->attr.a.type);
1677 set_Alloc_type (ir_node *node, ir_type *tp) {
1678 assert (node->op == op_Alloc);
1679 node->attr.a.type = tp;
1683 get_Alloc_where (ir_node *node) {
1684 assert (node->op == op_Alloc);
1685 return node->attr.a.where;
1689 set_Alloc_where (ir_node *node, where_alloc where) {
1690 assert (node->op == op_Alloc);
1691 node->attr.a.where = where;
1696 get_Free_mem (ir_node *node) {
1697 assert (node->op == op_Free);
1698 return get_irn_n(node, 0);
1702 set_Free_mem (ir_node *node, ir_node *mem) {
1703 assert (node->op == op_Free);
1704 set_irn_n(node, 0, mem);
1708 get_Free_ptr (ir_node *node) {
1709 assert (node->op == op_Free);
1710 return get_irn_n(node, 1);
1714 set_Free_ptr (ir_node *node, ir_node *ptr) {
1715 assert (node->op == op_Free);
1716 set_irn_n(node, 1, ptr);
1720 get_Free_size (ir_node *node) {
1721 assert (node->op == op_Free);
1722 return get_irn_n(node, 2);
1726 set_Free_size (ir_node *node, ir_node *size) {
1727 assert (node->op == op_Free);
1728 set_irn_n(node, 2, size);
1732 get_Free_type (ir_node *node) {
1733 assert (node->op == op_Free);
1734 return node->attr.f.type = skip_tid(node->attr.f.type);
1738 set_Free_type (ir_node *node, ir_type *tp) {
1739 assert (node->op == op_Free);
1740 node->attr.f.type = tp;
1744 get_Free_where (ir_node *node) {
1745 assert (node->op == op_Free);
1746 return node->attr.f.where;
1750 set_Free_where (ir_node *node, where_alloc where) {
1751 assert (node->op == op_Free);
1752 node->attr.f.where = where;
1756 get_Sync_preds_arr (ir_node *node) {
1757 assert (node->op == op_Sync);
1758 return (ir_node **)&(get_irn_in(node)[1]);
1762 get_Sync_n_preds (ir_node *node) {
1763 assert (node->op == op_Sync);
1764 return (get_irn_arity(node));
1769 set_Sync_n_preds (ir_node *node, int n_preds) {
1770 assert (node->op == op_Sync);
1775 get_Sync_pred (ir_node *node, int pos) {
1776 assert (node->op == op_Sync);
1777 return get_irn_n(node, pos);
1781 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1782 assert (node->op == op_Sync);
1783 set_irn_n(node, pos, pred);
1786 ir_type *get_Proj_type(ir_node *n)
1789 ir_node *pred = get_Proj_pred(n);
1791 switch (get_irn_opcode(pred)) {
1794 /* Deal with Start / Call here: we need to know the Proj Nr. */
1795 assert(get_irn_mode(pred) == mode_T);
1796 pred_pred = get_Proj_pred(pred);
1797 if (get_irn_op(pred_pred) == op_Start) {
1798 ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1799 tp = get_method_param_type(mtp, get_Proj_proj(n));
1800 } else if (get_irn_op(pred_pred) == op_Call) {
1801 ir_type *mtp = get_Call_type(pred_pred);
1802 tp = get_method_res_type(mtp, get_Proj_proj(n));
1805 case iro_Start: break;
1806 case iro_Call: break;
1808 ir_node *a = get_Load_ptr(pred);
1810 tp = get_entity_type(get_Sel_entity(a));
1819 get_Proj_pred (const ir_node *node) {
1820 assert (is_Proj(node));
1821 return get_irn_n(node, 0);
1825 set_Proj_pred (ir_node *node, ir_node *pred) {
1826 assert (is_Proj(node));
1827 set_irn_n(node, 0, pred);
1831 get_Proj_proj (const ir_node *node) {
1832 assert (is_Proj(node));
1833 if (get_irn_opcode(node) == iro_Proj) {
1834 return node->attr.proj;
1836 assert(get_irn_opcode(node) == iro_Filter);
1837 return node->attr.filter.proj;
1842 set_Proj_proj (ir_node *node, long proj) {
1843 assert (node->op == op_Proj);
1844 node->attr.proj = proj;
1848 get_Tuple_preds_arr (ir_node *node) {
1849 assert (node->op == op_Tuple);
1850 return (ir_node **)&(get_irn_in(node)[1]);
1854 get_Tuple_n_preds (ir_node *node) {
1855 assert (node->op == op_Tuple);
1856 return (get_irn_arity(node));
1861 set_Tuple_n_preds (ir_node *node, int n_preds) {
1862 assert (node->op == op_Tuple);
1867 get_Tuple_pred (ir_node *node, int pos) {
1868 assert (node->op == op_Tuple);
1869 return get_irn_n(node, pos);
1873 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1874 assert (node->op == op_Tuple);
1875 set_irn_n(node, pos, pred);
1879 get_Id_pred (ir_node *node) {
1880 assert (node->op == op_Id);
1881 return get_irn_n(node, 0);
1885 set_Id_pred (ir_node *node, ir_node *pred) {
1886 assert (node->op == op_Id);
1887 set_irn_n(node, 0, pred);
1890 ir_node *get_Confirm_value (ir_node *node) {
1891 assert (node->op == op_Confirm);
1892 return get_irn_n(node, 0);
1894 void set_Confirm_value (ir_node *node, ir_node *value) {
1895 assert (node->op == op_Confirm);
1896 set_irn_n(node, 0, value);
1898 ir_node *get_Confirm_bound (ir_node *node) {
1899 assert (node->op == op_Confirm);
1900 return get_irn_n(node, 1);
1902 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1903 assert (node->op == op_Confirm);
1904 set_irn_n(node, 0, bound);
1906 pn_Cmp get_Confirm_cmp (ir_node *node) {
1907 assert (node->op == op_Confirm);
1908 return node->attr.confirm_cmp;
1910 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1911 assert (node->op == op_Confirm);
1912 node->attr.confirm_cmp = cmp;
1917 get_Filter_pred (ir_node *node) {
1918 assert(node->op == op_Filter);
1922 set_Filter_pred (ir_node *node, ir_node *pred) {
1923 assert(node->op == op_Filter);
1927 get_Filter_proj(ir_node *node) {
1928 assert(node->op == op_Filter);
1929 return node->attr.filter.proj;
1932 set_Filter_proj (ir_node *node, long proj) {
1933 assert(node->op == op_Filter);
1934 node->attr.filter.proj = proj;
1937 /* Don't use get_irn_arity, get_irn_n in implementation as access
1938 shall work independent of view!!! */
1939 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1940 assert(node->op == op_Filter);
1941 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1942 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1943 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1944 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1945 node->attr.filter.in_cg[0] = node->in[0];
1947 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1950 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1951 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1952 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1953 node->attr.filter.in_cg[pos + 1] = pred;
1955 int get_Filter_n_cg_preds(ir_node *node) {
1956 assert(node->op == op_Filter && node->attr.filter.in_cg);
1957 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1959 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1961 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1963 arity = ARR_LEN(node->attr.filter.in_cg);
1964 assert(pos < arity - 1);
1965 return node->attr.filter.in_cg[pos + 1];
1969 ir_node *get_Mux_sel (ir_node *node) {
1970 if (node->op == op_Psi) {
1971 assert(get_irn_arity(node) == 3);
1972 return get_Psi_cond(node, 0);
1974 assert(node->op == op_Mux);
1977 void set_Mux_sel (ir_node *node, ir_node *sel) {
1978 if (node->op == op_Psi) {
1979 assert(get_irn_arity(node) == 3);
1980 set_Psi_cond(node, 0, sel);
1983 assert(node->op == op_Mux);
1988 ir_node *get_Mux_false (ir_node *node) {
1989 if (node->op == op_Psi) {
1990 assert(get_irn_arity(node) == 3);
1991 return get_Psi_default(node);
1993 assert(node->op == op_Mux);
1996 void set_Mux_false (ir_node *node, ir_node *ir_false) {
1997 if (node->op == op_Psi) {
1998 assert(get_irn_arity(node) == 3);
1999 set_Psi_default(node, ir_false);
2002 assert(node->op == op_Mux);
2003 node->in[2] = ir_false;
2007 ir_node *get_Mux_true (ir_node *node) {
2008 if (node->op == op_Psi) {
2009 assert(get_irn_arity(node) == 3);
2010 return get_Psi_val(node, 0);
2012 assert(node->op == op_Mux);
2015 void set_Mux_true (ir_node *node, ir_node *ir_true) {
2016 if (node->op == op_Psi) {
2017 assert(get_irn_arity(node) == 3);
2018 set_Psi_val(node, 0, ir_true);
2021 assert(node->op == op_Mux);
2022 node->in[3] = ir_true;
2027 ir_node *get_Psi_cond (ir_node *node, int pos) {
2028 int num_conds = get_Psi_n_conds(node);
2029 assert(node->op == op_Psi);
2030 assert(pos < num_conds);
2031 return get_irn_n(node, 2 * pos);
2034 void set_Psi_cond (ir_node *node, int pos, ir_node *cond) {
2035 int num_conds = get_Psi_n_conds(node);
2036 assert(node->op == op_Psi);
2037 assert(pos < num_conds);
2038 set_irn_n(node, 2 * pos, cond);
2041 ir_node *get_Psi_val (ir_node *node, int pos) {
2042 int num_vals = get_Psi_n_conds(node);
2043 assert(node->op == op_Psi);
2044 assert(pos < num_vals);
2045 return get_irn_n(node, 2 * pos + 1);
2048 void set_Psi_val (ir_node *node, int pos, ir_node *val) {
2049 int num_vals = get_Psi_n_conds(node);
2050 assert(node->op == op_Psi);
2051 assert(pos < num_vals);
2052 set_irn_n(node, 2 * pos + 1, val);
2055 ir_node *get_Psi_default(ir_node *node) {
2056 int def_pos = get_irn_arity(node) - 1;
2057 assert(node->op == op_Psi);
2058 return get_irn_n(node, def_pos);
2061 void set_Psi_default(ir_node *node, ir_node *val) {
2062 int def_pos = get_irn_arity(node);
2063 assert(node->op == op_Psi);
2064 set_irn_n(node, def_pos, val);
2067 int (get_Psi_n_conds)(ir_node *node) {
2068 return _get_Psi_n_conds(node);
2072 ir_node *get_CopyB_mem (ir_node *node) {
2073 assert (node->op == op_CopyB);
2074 return get_irn_n(node, 0);
2077 void set_CopyB_mem (ir_node *node, ir_node *mem) {
2078 assert (node->op == op_CopyB);
2079 set_irn_n(node, 0, mem);
2082 ir_node *get_CopyB_dst (ir_node *node) {
2083 assert (node->op == op_CopyB);
2084 return get_irn_n(node, 1);
2087 void set_CopyB_dst (ir_node *node, ir_node *dst) {
2088 assert (node->op == op_CopyB);
2089 set_irn_n(node, 1, dst);
2092 ir_node *get_CopyB_src (ir_node *node) {
2093 assert (node->op == op_CopyB);
2094 return get_irn_n(node, 2);
2097 void set_CopyB_src (ir_node *node, ir_node *src) {
2098 assert (node->op == op_CopyB);
2099 set_irn_n(node, 2, src);
2102 ir_type *get_CopyB_type(ir_node *node) {
2103 assert (node->op == op_CopyB);
2104 return node->attr.copyb.data_type;
2107 void set_CopyB_type(ir_node *node, ir_type *data_type) {
2108 assert (node->op == op_CopyB && data_type);
2109 node->attr.copyb.data_type = data_type;
2114 get_InstOf_type (ir_node *node) {
2115 assert (node->op = op_InstOf);
2116 return node->attr.io.type;
2120 set_InstOf_type (ir_node *node, ir_type *type) {
2121 assert (node->op = op_InstOf);
2122 node->attr.io.type = type;
2126 get_InstOf_store (ir_node *node) {
2127 assert (node->op = op_InstOf);
2128 return get_irn_n(node, 0);
2132 set_InstOf_store (ir_node *node, ir_node *obj) {
2133 assert (node->op = op_InstOf);
2134 set_irn_n(node, 0, obj);
2138 get_InstOf_obj (ir_node *node) {
2139 assert (node->op = op_InstOf);
2140 return get_irn_n(node, 1);
2144 set_InstOf_obj (ir_node *node, ir_node *obj) {
2145 assert (node->op = op_InstOf);
2146 set_irn_n(node, 1, obj);
2149 /* Returns the memory input of a Raise operation. */
2151 get_Raise_mem (ir_node *node) {
2152 assert (node->op == op_Raise);
2153 return get_irn_n(node, 0);
2157 set_Raise_mem (ir_node *node, ir_node *mem) {
2158 assert (node->op == op_Raise);
2159 set_irn_n(node, 0, mem);
2163 get_Raise_exo_ptr (ir_node *node) {
2164 assert (node->op == op_Raise);
2165 return get_irn_n(node, 1);
2169 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
2170 assert (node->op == op_Raise);
2171 set_irn_n(node, 1, exo_ptr);
2176 /* Returns the memory input of a Bound operation. */
2177 ir_node *get_Bound_mem(ir_node *bound) {
2178 assert (bound->op == op_Bound);
2179 return get_irn_n(bound, 0);
2182 void set_Bound_mem (ir_node *bound, ir_node *mem) {
2183 assert (bound->op == op_Bound);
2184 set_irn_n(bound, 0, mem);
2187 /* Returns the index input of a Bound operation. */
2188 ir_node *get_Bound_index(ir_node *bound) {
2189 assert (bound->op == op_Bound);
2190 return get_irn_n(bound, 1);
2193 void set_Bound_index(ir_node *bound, ir_node *idx) {
2194 assert (bound->op == op_Bound);
2195 set_irn_n(bound, 1, idx);
2198 /* Returns the lower bound input of a Bound operation. */
2199 ir_node *get_Bound_lower(ir_node *bound) {
2200 assert (bound->op == op_Bound);
2201 return get_irn_n(bound, 2);
2204 void set_Bound_lower(ir_node *bound, ir_node *lower) {
2205 assert (bound->op == op_Bound);
2206 set_irn_n(bound, 2, lower);
2209 /* Returns the upper bound input of a Bound operation. */
2210 ir_node *get_Bound_upper(ir_node *bound) {
2211 assert (bound->op == op_Bound);
2212 return get_irn_n(bound, 3);
2215 void set_Bound_upper(ir_node *bound, ir_node *upper) {
2216 assert (bound->op == op_Bound);
2217 set_irn_n(bound, 3, upper);
2220 /* returns the graph of a node */
2222 get_irn_irg(const ir_node *node) {
2224 * Do not use get_nodes_Block() here, because this
2225 * will check the pinned state.
2226 * However even a 'wrong' block is always in the proper
2229 if (! is_Block(node))
2230 node = get_irn_n(node, -1);
2231 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2232 node = get_irn_n(node, -1);
2233 assert(get_irn_op(node) == op_Block);
2234 return node->attr.block.irg;
2238 /*----------------------------------------------------------------*/
2239 /* Auxiliary routines */
2240 /*----------------------------------------------------------------*/
2243 skip_Proj (ir_node *node) {
2244 /* don't assert node !!! */
2245 if (node && is_Proj(node)) {
2246 return get_Proj_pred(node);
2253 skip_Tuple (ir_node *node) {
2257 if (!get_opt_normalize()) return node;
2260 node = skip_Id(node);
2261 if (get_irn_op(node) == op_Proj) {
2262 pred = skip_Id(get_Proj_pred(node));
2263 op = get_irn_op(pred);
2266 * Looks strange but calls get_irn_op() only once
2267 * in most often cases.
2269 if (op == op_Proj) { /* nested Tuple ? */
2270 pred = skip_Id(skip_Tuple(pred));
2271 op = get_irn_op(pred);
2273 if (op == op_Tuple) {
2274 node = get_Tuple_pred(pred, get_Proj_proj(node));
2278 else if (op == op_Tuple) {
2279 node = get_Tuple_pred(pred, get_Proj_proj(node));
2286 /* returns operand of node if node is a Cast */
2287 ir_node *skip_Cast (ir_node *node) {
2288 if (node && get_irn_op(node) == op_Cast)
2289 return get_Cast_op(node);
2293 /* returns operand of node if node is a Confirm */
2294 ir_node *skip_Confirm (ir_node *node) {
2295 if (node && get_irn_op(node) == op_Confirm)
2296 return get_Confirm_value(node);
2300 /* skip all high-level ops */
2301 ir_node *skip_HighLevel(ir_node *node) {
2302 if (node && is_op_highlevel(get_irn_op(node)))
2303 return get_irn_n(node, 0);
2308 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2309 * than any other approach, as Id chains are resolved and all point to the real node, or
2310 * all id's are self loops.
2312 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2315 skip_Id (ir_node *node) {
2316 /* don't assert node !!! */
2318 /* Don't use get_Id_pred: We get into an endless loop for
2319 self-referencing Ids. */
2320 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2321 ir_node *rem_pred = node->in[0+1];
2324 assert (get_irn_arity (node) > 0);
2326 node->in[0+1] = node;
2327 res = skip_Id(rem_pred);
2328 if (res->op == op_Id) /* self-loop */ return node;
2330 node->in[0+1] = res;
2337 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2338 * than any other approach, as Id chains are resolved and all point to the real node, or
2339 * all id's are self loops.
2341 * Note: This function takes 10% of mostly ANY the compiler run, so it's
2342 * a little bit "hand optimized".
2344 * Moreover, it CANNOT be switched off using get_opt_normalize() ...
2347 skip_Id (ir_node *node) {
2349 /* don't assert node !!! */
2351 if (!node || (node->op != op_Id)) return node;
2353 /* Don't use get_Id_pred(): We get into an endless loop for
2354 self-referencing Ids. */
2355 pred = node->in[0+1];
2357 if (pred->op != op_Id) return pred;
2359 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2360 ir_node *rem_pred, *res;
2362 if (pred->op != op_Id) return pred; /* shortcut */
2365 assert (get_irn_arity (node) > 0);
2367 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2368 res = skip_Id(rem_pred);
2369 if (res->op == op_Id) /* self-loop */ return node;
2371 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2379 void skip_Id_and_store(ir_node **node) {
2382 if (!n || (n->op != op_Id)) return;
2384 /* Don't use get_Id_pred(): We get into an endless loop for
2385 self-referencing Ids. */
2390 (is_Bad)(const ir_node *node) {
2391 return _is_Bad(node);
2395 (is_Const)(const ir_node *node) {
2396 return _is_Const(node);
2400 (is_no_Block)(const ir_node *node) {
2401 return _is_no_Block(node);
2405 (is_Block)(const ir_node *node) {
2406 return _is_Block(node);
2409 /* returns true if node is an Unknown node. */
2411 (is_Unknown)(const ir_node *node) {
2412 return _is_Unknown(node);
2415 /* returns true if node is a Return node. */
2417 (is_Return)(const ir_node *node) {
2418 return _is_Return(node);
2421 /* returns true if node is a Call node. */
2423 (is_Call)(const ir_node *node) {
2424 return _is_Call(node);
2427 /* returns true if node is a Sel node. */
2429 (is_Sel)(const ir_node *node) {
2430 return _is_Sel(node);
2433 /* returns true if node is a Mux node or a Psi with only one condition. */
2435 (is_Mux)(const ir_node *node) {
2436 return _is_Mux(node);
2440 is_Proj (const ir_node *node) {
2442 return node->op == op_Proj
2443 || (!get_interprocedural_view() && node->op == op_Filter);
2446 /* Returns true if the operation manipulates control flow. */
2448 is_cfop(const ir_node *node) {
2449 return is_cfopcode(get_irn_op(node));
2452 /* Returns true if the operation manipulates interprocedural control flow:
2453 CallBegin, EndReg, EndExcept */
2454 int is_ip_cfop(const ir_node *node) {
2455 return is_ip_cfopcode(get_irn_op(node));
2458 /* Returns true if the operation can change the control flow because
2461 is_fragile_op(const ir_node *node) {
2462 return is_op_fragile(get_irn_op(node));
2465 /* Returns the memory operand of fragile operations. */
2466 ir_node *get_fragile_op_mem(ir_node *node) {
2467 assert(node && is_fragile_op(node));
2469 switch (get_irn_opcode (node)) {
2478 return get_irn_n(node, 0);
2483 assert(0 && "should not be reached");
2488 /* Returns true if the operation is a forking control flow operation. */
2489 int (is_irn_forking)(const ir_node *node) {
2490 return _is_irn_forking(node);
2493 /* Return the type associated with the value produced by n
2494 * if the node remarks this type as it is the case for
2495 * Cast, Const, SymConst and some Proj nodes. */
2496 ir_type *(get_irn_type)(ir_node *node) {
2497 return _get_irn_type(node);
2500 /* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
2502 ir_type *(get_irn_type_attr)(ir_node *node) {
2503 return _get_irn_type_attr(node);
2506 /* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
2507 entity *(get_irn_entity_attr)(ir_node *node) {
2508 return _get_irn_entity_attr(node);
2511 /* Returns non-zero for constant-like nodes. */
2512 int (is_irn_constlike)(const ir_node *node) {
2513 return _is_irn_constlike(node);
2517 * Returns non-zero for nodes that are allowed to have keep-alives and
2518 * are neither Block nor PhiM.
2520 int (is_irn_keep)(const ir_node *node) {
2521 return _is_irn_keep(node);
2524 /* Returns non-zero for nodes that are machine operations. */
2525 int (is_irn_machine_op)(const ir_node *node) {
2526 return _is_irn_machine_op(node);
2529 /* Returns non-zero for nodes that are machine operands. */
2530 int (is_irn_machine_operand)(const ir_node *node) {
2531 return _is_irn_machine_operand(node);
2534 /* Returns non-zero for nodes that have the n'th user machine flag set. */
2535 int (is_irn_machine_user)(const ir_node *node, unsigned n) {
2536 return _is_irn_machine_user(node, n);
2540 /* Gets the string representation of the jump prediction .*/
2541 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
2545 case COND_JMP_PRED_NONE: return "no prediction";
2546 case COND_JMP_PRED_TRUE: return "true taken";
2547 case COND_JMP_PRED_FALSE: return "false taken";
2551 /* Returns the conditional jump prediction of a Cond node. */
2552 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2553 return _get_Cond_jmp_pred(cond);
2556 /* Sets a new conditional jump prediction. */
2557 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2558 _set_Cond_jmp_pred(cond, pred);
2561 /** the get_type operation must be always implemented and return a firm type */
2562 static ir_type *get_Default_type(ir_node *n) {
2563 return get_unknown_type();
2566 /* Sets the get_type operation for an ir_op_ops. */
2567 ir_op_ops *firm_set_default_get_type(opcode code, ir_op_ops *ops)
2570 case iro_Const: ops->get_type = get_Const_type; break;
2571 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2572 case iro_Cast: ops->get_type = get_Cast_type; break;
2573 case iro_Proj: ops->get_type = get_Proj_type; break;
2575 /* not allowed to be NULL */
2576 if (! ops->get_type)
2577 ops->get_type = get_Default_type;
2583 /** Return the attribute type of a SymConst node if exists */
2584 static ir_type *get_SymConst_attr_type(ir_node *self) {
2585 symconst_kind kind = get_SymConst_kind(self);
2586 if (kind == symconst_type_tag || kind == symconst_size)
2587 return get_SymConst_type(self);
2591 /** Return the attribute entity of a SymConst node if exists */
2592 static entity *get_SymConst_attr_entity(ir_node *self) {
2593 symconst_kind kind = get_SymConst_kind(self);
2594 if (kind == symconst_addr_ent)
2595 return get_SymConst_entity(self);
2599 /** the get_type_attr operation must be always implemented */
2600 static ir_type *get_Null_type(ir_node *n) {
2601 return firm_unknown_type;
2604 /* Sets the get_type operation for an ir_op_ops. */
2605 ir_op_ops *firm_set_default_get_type_attr(opcode code, ir_op_ops *ops)
2608 case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
2609 case iro_Call: ops->get_type_attr = get_Call_type; break;
2610 case iro_Alloc: ops->get_type_attr = get_Alloc_type; break;
2611 case iro_Free: ops->get_type_attr = get_Free_type; break;
2612 case iro_Cast: ops->get_type_attr = get_Cast_type; break;
2614 /* not allowed to be NULL */
2615 if (! ops->get_type_attr)
2616 ops->get_type_attr = get_Null_type;
2622 /** the get_entity_attr operation must be always implemented */
2623 static entity *get_Null_ent(ir_node *n) {
2627 /* Sets the get_type operation for an ir_op_ops. */
2628 ir_op_ops *firm_set_default_get_entity_attr(opcode code, ir_op_ops *ops)
2631 case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
2632 case iro_Sel: ops->get_entity_attr = get_Sel_entity; break;
2634 /* not allowed to be NULL */
2635 if (! ops->get_entity_attr)
2636 ops->get_entity_attr = get_Null_ent;
2642 #ifdef DEBUG_libfirm
2643 void dump_irn (ir_node *n) {
2644 int i, arity = get_irn_arity(n);
2645 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2647 ir_node *pred = get_irn_n(n, -1);
2648 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2649 get_irn_node_nr(pred), (void *)pred);
2651 printf(" preds: \n");
2652 for (i = 0; i < arity; ++i) {
2653 ir_node *pred = get_irn_n(n, i);
2654 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2655 get_irn_node_nr(pred), (void *)pred);
2659 #else /* DEBUG_libfirm */
2660 void dump_irn (ir_node *n) {}
2661 #endif /* DEBUG_libfirm */