1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
4 * Authors: Martin Trapp, Christian Schaefer, Goetz Lindenmaier
16 #include "irgraph_t.h"
18 #include "typegmod_t.h"
20 #include "irbackedge_t.h"
28 /* some constants fixing the positions of nodes predecessors
30 #define CALL_PARAM_OFFSET 2
31 #define SEL_INDEX_OFFSET 2
32 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
33 #define END_KEEPALIVE_OFFSET 0
35 /* Declarations for inlineing */
36 INLINE ir_node ** get_irn_in (const ir_node *node);
37 INLINE ir_mode *get_irn_mode (const ir_node *node);
38 INLINE ir_op *get_irn_op (const ir_node *node);
39 INLINE opcode get_irn_opcode (const ir_node *node);
40 INLINE ident *get_irn_opident (const ir_node *node);
41 INLINE type *get_SymConst_type (ir_node *node);
42 INLINE ir_node *skip_nop (ir_node *node);
43 INLINE int is_Proj (const ir_node *node);
46 static const char *pnc_name_arr [] = {
47 "False", "Eq", "Lt", "Le",
48 "Gt", "Ge", "Lg", "Leg", "Uo",
49 "Ue", "Ul", "Ule", "Ug", "Uge",
53 INLINE const char *get_pnc_string(int pnc) {
54 return pnc_name_arr[pnc];
59 get_negated_pnc(int pnc) {
61 case False: return True; break;
62 case Eq: return Ne; break;
63 case Lt: return Uge; break;
64 case Le: return Ug; break;
65 case Gt: return Ule; break;
66 case Ge: return Ul; break;
67 case Lg: return Ue; break;
68 case Leg: return Uo; break;
69 case Uo: return Leg; break;
70 case Ue: return Lg; break;
71 case Ul: return Ge; break;
72 case Ule: return Gt; break;
73 case Ug: return Le; break;
74 case Uge: return Lt; break;
75 case Ne: return Eq; break;
76 case True: return False; break;
78 return 99; /* to shut up gcc */
81 const char *pns_name_arr [] = {
82 "initial_exec", "global_store",
83 "frame_base", "globals", "args"
86 const char *symconst_name_arr [] = {
87 "type_tag", "size", "linkage_ptr_info"
95 /* irnode constructor */
96 /* create a new irnode in irg, with an op, mode, arity and */
97 /* some incoming irnodes */
98 /* this constructor is used in every specified irnode constructor */
100 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
101 int arity, ir_node **in)
104 int node_size = offsetof (ir_node, attr) + op->attr_size;
106 res = (ir_node *) obstack_alloc (irg->obst, node_size);
108 res->kind = k_ir_node;
114 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
116 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
117 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
120 set_irn_dbg_info(res, db);
124 res->node_nr = get_irp_new_node_nr();
130 /* Copies all attributes stored in the old node to the new node.
131 Assumes both have the same opcode and sufficient size. */
133 copy_attrs (ir_node *old, ir_node *new) {
134 assert (get_irn_op(old) == get_irn_op(new));
135 memcpy (&new->attr, &old->attr, get_op_attr_size(get_irn_op(old)));
138 /** getting some parameters from ir_nodes **/
141 is_ir_node (void *thing) {
143 if (get_kind(thing) == k_ir_node)
149 /* returns the number of predecessors without the block predecessor. */
151 get_irn_arity (const ir_node *node) {
153 if (interprocedural_view) { /* handle Filter and Block specially */
154 if (get_irn_opcode(node) == iro_Filter) {
155 assert(node->attr.filter.in_cg);
156 return ARR_LEN(node->attr.filter.in_cg) - 1;
157 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
158 return ARR_LEN(node->attr.block.in_cg) - 1;
160 /* else fall through */
162 return ARR_LEN(node->in) - 1;
165 /* Returns the array with ins. This array is shifted with respect to the
166 array accessed by get_irn_n: The block operand is at position 0 not -1.
167 (@@@ This should be changed.)
168 The order of the predecessors in this array is not guaranteed, except that
169 lists of operands as predecessors of Block or arguments of a Call are
172 get_irn_in (const ir_node *node) {
174 if (interprocedural_view) { /* handle Filter and Block specially */
175 if (get_irn_opcode(node) == iro_Filter) {
176 assert(node->attr.filter.in_cg);
177 return node->attr.filter.in_cg;
178 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
179 return node->attr.block.in_cg;
181 /* else fall through */
187 set_irn_in (ir_node *node, int arity, ir_node **in) {
190 if (interprocedural_view) { /* handle Filter and Block specially */
191 if (get_irn_opcode(node) == iro_Filter) {
192 assert(node->attr.filter.in_cg);
193 arr = &node->attr.filter.in_cg;
194 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
195 arr = &node->attr.block.in_cg;
202 if (arity != ARR_LEN(*arr) - 1) {
203 ir_node * block = (*arr)[0];
204 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
207 fix_backedges(current_ir_graph->obst, node);
208 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
211 /* to iterate through the predecessors without touching the array */
212 /* To iterate over the operands iterate from 0 to i < get_irn_arity(),
213 to iterate includind the Block predecessor iterate from i = -1 to
215 If it is a block, the entry -1 is NULL. */
217 get_irn_n (ir_node *node, int n) {
219 if (-1 > n || get_irn_arity(node) <= n) {
220 printf("pos: %d, arity: %d ", n, get_irn_arity(node));
223 assert(node); assert(-1 <= n && n < get_irn_arity(node));
224 if (interprocedural_view) { /* handle Filter and Block specially */
225 if (get_irn_opcode(node) == iro_Filter) {
226 assert(node->attr.filter.in_cg);
227 return (node->attr.filter.in_cg[n + 1] = skip_nop(node->attr.filter.in_cg[n + 1]));
228 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
229 return (node->attr.block.in_cg[n + 1] = skip_nop(node->attr.block.in_cg[n + 1]));
231 /* else fall through */
233 return (node->in[n + 1] = skip_nop(node->in[n + 1]));
237 set_irn_n (ir_node *node, int n, ir_node *in) {
238 assert(node && -1 <= n && n < get_irn_arity(node));
239 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
240 /* Change block pred in both views! */
241 node->in[n + 1] = in;
242 assert(node->attr.filter.in_cg);
243 node->attr.filter.in_cg[n + 1] = in;
246 if (interprocedural_view) { /* handle Filter and Block specially */
247 if (get_irn_opcode(node) == iro_Filter) {
248 assert(node->attr.filter.in_cg);
249 node->attr.filter.in_cg[n + 1] = in;
251 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
252 node->attr.block.in_cg[n + 1] = in;
255 /* else fall through */
257 node->in[n + 1] = in;
261 get_irn_mode (const ir_node *node)
268 set_irn_mode (ir_node *node, ir_mode *mode)
276 get_irn_modecode (const ir_node *node)
279 return node->mode->code;
284 get_irn_modeident (const ir_node *node)
287 return get_mode_ident(node->mode);
291 get_irn_op (const ir_node *node)
297 /* should be private to the library: */
299 set_irn_op (ir_node *node, ir_op *op)
306 get_irn_opcode (const ir_node *node)
309 assert (k_ir_node == get_kind(node));
311 return node->op->code;
315 get_irn_opname (const ir_node *node)
318 return id_to_str(node->op->name);
322 get_irn_opident (const ir_node *node)
325 return node->op->name;
329 get_irn_visited (const ir_node *node)
332 return node->visited;
336 set_irn_visited (ir_node *node, unsigned long visited)
339 node->visited = visited;
343 mark_irn_visited (ir_node *node) {
345 node->visited = current_ir_graph->visited;
349 irn_not_visited (const ir_node *node) {
351 return (node->visited < current_ir_graph->visited);
355 irn_visited (const ir_node *node) {
357 return (node->visited >= current_ir_graph->visited);
361 set_irn_link (ir_node *node, void *link) {
363 /* Link field is used for Phi construction and various optimizations
365 assert(get_irg_phase_state(current_ir_graph) != phase_building);
371 get_irn_link (const ir_node *node) {
376 /* Outputs a unique number for this node */
378 get_irn_node_nr(const ir_node *node) {
381 return node->node_nr;
388 get_irn_const_attr (ir_node *node)
390 assert (node->op == op_Const);
391 return node->attr.con;
395 get_irn_proj_attr (ir_node *node)
397 assert (node->op == op_Proj);
398 return node->attr.proj;
402 get_irn_alloc_attr (ir_node *node)
404 assert (node->op == op_Alloc);
409 get_irn_free_attr (ir_node *node)
411 assert (node->op == op_Free);
412 return node->attr.f = skip_tid(node->attr.f);
416 get_irn_symconst_attr (ir_node *node)
418 assert (node->op == op_SymConst);
423 get_irn_call_attr (ir_node *node)
425 assert (node->op == op_Call);
426 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
430 get_irn_sel_attr (ir_node *node)
432 assert (node->op == op_Sel);
437 get_irn_phi_attr (ir_node *node)
439 assert (node->op == op_Phi);
440 return node->attr.phi0_pos;
444 get_irn_block_attr (ir_node *node)
446 assert (node->op == op_Block);
447 return node->attr.block;
450 /** manipulate fields of individual nodes **/
452 /* this works for all except Block */
454 get_nodes_Block (ir_node *node) {
455 assert (!(node->op == op_Block));
456 return get_irn_n(node, -1);
460 set_nodes_Block (ir_node *node, ir_node *block) {
461 assert (!(node->op == op_Block));
462 set_irn_n(node, -1, block);
465 /* Returns an array with the predecessors of the Block. Depending on
466 the implementation of the graph datastructure this can be a copy of
467 the internal representation of predecessors as well as the internal
468 array itself. Therefore writing to this array might obstruct the ir. */
470 get_Block_cfgpred_arr (ir_node *node)
472 assert ((node->op == op_Block));
473 return (ir_node **)&(get_irn_in(node)[1]);
478 get_Block_n_cfgpreds (ir_node *node) {
479 assert ((node->op == op_Block));
480 return (get_irn_arity(node));
484 get_Block_cfgpred (ir_node *node, int pos) {
485 assert (node->op == op_Block);
487 if (-1 > pos || get_irn_arity(node) <= pos) {
488 dump_ir_block_graph(current_ir_graph);
489 printf("pos: %d, arity: %d ", pos, get_irn_arity(node));
492 assert(node); assert(-1 <= pos && pos < get_irn_arity(node));
493 return get_irn_n(node, pos);
497 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
498 assert (node->op == op_Block);
499 set_irn_n(node, pos, pred);
503 get_Block_matured (ir_node *node) {
504 assert (node->op == op_Block);
505 return node->attr.block.matured;
509 set_Block_matured (ir_node *node, bool matured) {
510 assert (node->op == op_Block);
511 node->attr.block.matured = matured;
514 get_Block_block_visited (ir_node *node) {
515 assert (node->op == op_Block);
516 return node->attr.block.block_visited;
520 set_Block_block_visited (ir_node *node, unsigned long visit) {
521 assert (node->op == op_Block);
522 node->attr.block.block_visited = visit;
525 /* For this current_ir_graph must be set. */
527 mark_Block_block_visited (ir_node *node) {
528 assert (node->op == op_Block);
529 node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
533 Block_not_block_visited(ir_node *node) {
534 assert (node->op == op_Block);
535 return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph));
539 get_Block_graph_arr (ir_node *node, int pos) {
540 assert (node->op == op_Block);
541 return node->attr.block.graph_arr[pos+1];
545 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
546 assert (node->op == op_Block);
547 node->attr.block.graph_arr[pos+1] = value;
550 /* handler handling for Blocks */
552 set_Block_handler (ir_node *block, ir_node *handler) {
553 assert ((block->op == op_Block));
554 assert ((handler->op == op_Block));
555 block->attr.block.handler_entry = handler;
559 get_Block_handler (ir_node *block) {
560 assert ((block->op == op_Block));
561 return (block->attr.block.handler_entry);
564 /* handler handling for Nodes */
566 set_Node_handler (ir_node *node, ir_node *handler) {
567 set_Block_handler (get_nodes_Block (node), handler);
571 get_Node_handler (ir_node *node) {
572 return (get_Block_handler (get_nodes_Block (node)));
575 /* exc_t handling for Blocks */
576 void set_Block_exc (ir_node *block, exc_t exc) {
577 assert ((block->op == op_Block));
578 block->attr.block.exc = exc;
581 exc_t get_Block_exc (ir_node *block) {
582 assert ((block->op == op_Block));
584 return (block->attr.block.exc);
587 /* exc_t handling for Nodes */
588 void set_Node_exc (ir_node *node, exc_t exc) {
589 set_Block_exc (get_nodes_Block (node), exc);
592 exc_t get_Node_exc (ir_node *node) {
593 return (get_Block_exc (get_nodes_Block (node)));
596 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
597 assert(node->op == op_Block);
598 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
599 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
600 node->attr.block.in_cg[0] = NULL;
601 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
603 /* Fix backedge array. fix_backedges operates depending on
604 interprocedural_view. */
605 bool ipv = interprocedural_view;
606 interprocedural_view = true;
607 fix_backedges(current_ir_graph->obst, node);
608 interprocedural_view = ipv;
611 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
614 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
615 assert(node->op == op_Block &&
616 node->attr.block.in_cg &&
617 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
618 node->attr.block.in_cg[pos + 1] = pred;
621 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
622 assert(node->op == op_Block);
623 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
626 int get_Block_cg_n_cfgpreds(ir_node * node) {
627 assert(node->op == op_Block);
628 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
631 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
632 assert(node->op == op_Block && node->attr.block.in_cg);
633 return node->attr.block.in_cg[pos + 1];
636 void remove_Block_cg_cfgpred_arr(ir_node * node) {
637 assert(node->op == op_Block);
638 node->attr.block.in_cg = NULL;
641 /* Start references the irg it is in. */
643 get_Start_irg(ir_node *node) {
644 assert(node->op == op_Start);
645 return node->attr.start.irg;
649 set_Start_irg(ir_node *node, ir_graph *irg) {
650 assert(node->op == op_Start);
651 assert(is_ir_graph(irg));
652 node->attr.start.irg = irg;
656 get_End_n_keepalives(ir_node *end) {
657 assert (end->op == op_End);
658 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
662 get_End_keepalive(ir_node *end, int pos) {
663 assert (end->op == op_End);
664 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
668 add_End_keepalive (ir_node *end, ir_node *ka) {
669 assert (end->op == op_End);
670 ARR_APP1 (ir_node *, end->in, ka);
674 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
675 assert (end->op == op_End);
676 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
680 free_End (ir_node *end) {
681 assert (end->op == op_End);
682 /* DEL_ARR_F(end->in); GL @@@ tut nicht ! */
683 end->in = NULL; /* @@@ make sure we get an error if we use the
684 in array afterwards ... */
687 ir_graph *get_EndReg_irg (const ir_node *end) {
688 assert (end->op == op_EndReg);
689 return end->attr.end.irg;
692 ir_graph *get_EndExcept_irg (const ir_node *end) {
693 assert (end->op == op_EndReg);
694 return end->attr.end.irg;
698 > Implementing the case construct (which is where the constant Proj node is
699 > important) involves far more than simply determining the constant values.
700 > We could argue that this is more properly a function of the translator from
701 > Firm to the target machine. That could be done if there was some way of
702 > projecting "default" out of the Cond node.
703 I know it's complicated.
704 Basically there are two proglems:
705 - determining the gaps between the projs
706 - determining the biggest case constant to konw the proj number for
708 I see several solutions:
709 1. Introduce a ProjDefault node. Solves both problems.
710 This means to extend all optimizations executed during construction.
711 2. Give the Cond node for switch two flavors:
712 a) there are no gaps in the projs (existing flavor)
713 b) gaps may exist, default proj is still the Proj with the largest
714 projection number. This covers also the gaps.
715 3. Fix the semantic of the Cond to that of 2b)
717 Solution 2 seems to be the best:
718 Computing the gaps in the Firm representation is not too hard, i.e.,
719 libfirm can implement a routine that transforms betweeen the two
720 flavours. This is also possible for 1) but 2) does not require to
721 change any existing optimization.
722 Further it should be far simpler to determine the biggest constant than
724 I don't want to choose 3) as 2a) seems to have advantages for
725 dataflow analysis and 3) does not allow to convert the representation to
729 get_Cond_selector (ir_node *node) {
730 assert (node->op == op_Cond);
731 return get_irn_n(node, 0);
735 set_Cond_selector (ir_node *node, ir_node *selector) {
736 assert (node->op == op_Cond);
737 set_irn_n(node, 0, selector);
741 get_Cond_kind (ir_node *node) {
742 assert (node->op == op_Cond);
743 return node->attr.c.kind;
747 set_Cond_kind (ir_node *node, cond_kind kind) {
748 assert (node->op == op_Cond);
749 node->attr.c.kind = kind;
753 get_Return_mem (ir_node *node) {
754 assert (node->op == op_Return);
755 return get_irn_n(node, 0);
759 set_Return_mem (ir_node *node, ir_node *mem) {
760 assert (node->op == op_Return);
761 set_irn_n(node, 0, mem);
765 get_Return_n_ress (ir_node *node) {
766 assert (node->op == op_Return);
767 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
771 get_Return_res_arr (ir_node *node)
773 assert ((node->op == op_Return));
774 if (get_Return_n_ress(node) > 0)
775 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
782 set_Return_n_res (ir_node *node, int results) {
783 assert (node->op == op_Return);
788 get_Return_res (ir_node *node, int pos) {
789 assert (node->op == op_Return);
790 assert (get_Return_n_ress(node) > pos);
791 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
795 set_Return_res (ir_node *node, int pos, ir_node *res){
796 assert (node->op == op_Return);
797 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
801 get_Raise_mem (ir_node *node) {
802 assert (node->op == op_Raise);
803 return get_irn_n(node, 0);
807 set_Raise_mem (ir_node *node, ir_node *mem) {
808 assert (node->op == op_Raise);
809 set_irn_n(node, 0, mem);
813 get_Raise_exo_ptr (ir_node *node) {
814 assert (node->op == op_Raise);
815 return get_irn_n(node, 1);
819 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
820 assert (node->op == op_Raise);
821 set_irn_n(node, 1, exo_ptr);
824 INLINE tarval *get_Const_tarval (ir_node *node) {
825 assert (node->op == op_Const);
826 return get_irn_const_attr(node);
830 set_Const_tarval (ir_node *node, tarval *con) {
831 assert (node->op == op_Const);
832 node->attr.con = con;
836 get_SymConst_kind (const ir_node *node) {
837 assert (node->op == op_SymConst);
838 return node->attr.i.num;
842 set_SymConst_kind (ir_node *node, symconst_kind num) {
843 assert (node->op == op_SymConst);
844 node->attr.i.num = num;
848 get_SymConst_type (ir_node *node) {
849 assert ( (node->op == op_SymConst)
850 && ( get_SymConst_kind(node) == type_tag
851 || get_SymConst_kind(node) == size));
852 return node->attr.i.tori.typ = skip_tid(node->attr.i.tori.typ);
856 set_SymConst_type (ir_node *node, type *tp) {
857 assert ( (node->op == op_SymConst)
858 && ( get_SymConst_kind(node) == type_tag
859 || get_SymConst_kind(node) == size));
860 node->attr.i.tori.typ = tp;
864 get_SymConst_ptrinfo (ir_node *node) {
865 assert ( (node->op == op_SymConst)
866 && (get_SymConst_kind(node) == linkage_ptr_info));
867 return node->attr.i.tori.ptrinfo;
871 set_SymConst_ptrinfo (ir_node *node, ident *ptrinfo) {
872 assert ( (node->op == op_SymConst)
873 && (get_SymConst_kind(node) == linkage_ptr_info));
874 node->attr.i.tori.ptrinfo = ptrinfo;
878 get_SymConst_type_or_id (ir_node *node) {
879 assert (node->op == op_SymConst);
880 return &(node->attr.i.tori);
884 set_SymConst_type_or_id (ir_node *node, type_or_id_p tori) {
885 assert (node->op == op_SymConst);
886 memcpy (&(node->attr.i.tori), tori, sizeof(type_or_id));
890 get_Sel_mem (ir_node *node) {
891 assert (node->op == op_Sel);
892 return get_irn_n(node, 0);
896 set_Sel_mem (ir_node *node, ir_node *mem) {
897 assert (node->op == op_Sel);
898 set_irn_n(node, 0, mem);
902 get_Sel_ptr (ir_node *node) {
903 assert (node->op == op_Sel);
904 return get_irn_n(node, 1);
908 set_Sel_ptr (ir_node *node, ir_node *ptr) {
909 assert (node->op == op_Sel);
910 set_irn_n(node, 1, ptr);
914 get_Sel_n_indexs (ir_node *node) {
915 assert (node->op == op_Sel);
916 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
920 get_Sel_index_arr (ir_node *node)
922 assert ((node->op == op_Sel));
923 if (get_Sel_n_indexs(node) > 0)
924 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
930 get_Sel_index (ir_node *node, int pos) {
931 assert (node->op == op_Sel);
932 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
936 set_Sel_index (ir_node *node, int pos, ir_node *index) {
937 assert (node->op == op_Sel);
938 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
942 get_Sel_entity (ir_node *node) {
943 assert (node->op == op_Sel);
944 return node->attr.s.ent;
948 set_Sel_entity (ir_node *node, entity *ent) {
949 assert (node->op == op_Sel);
950 node->attr.s.ent = ent;
954 get_InstOf_ent (ir_node *node) {
955 assert (node->op = op_InstOf);
956 return (node->attr.io.ent);
960 set_InstOf_ent (ir_node *node, type *ent) {
961 assert (node->op = op_InstOf);
962 node->attr.io.ent = ent;
966 get_InstOf_store (ir_node *node) {
967 assert (node->op = op_InstOf);
968 return (get_irn_n (node, 0));
972 set_InstOf_store (ir_node *node, ir_node *obj) {
973 assert (node->op = op_InstOf);
974 set_irn_n (node, 0, obj);
978 get_InstOf_obj (ir_node *node) {
979 assert (node->op = op_InstOf);
980 return (get_irn_n (node, 1));
984 set_InstOf_obj (ir_node *node, ir_node *obj) {
985 assert (node->op = op_InstOf);
986 set_irn_n (node, 1, obj);
990 /* For unary and binary arithmetic operations the access to the
991 operands can be factored out. Left is the first, right the
992 second arithmetic value as listed in tech report 0999-33.
993 unops are: Minus, Abs, Not, Conv, Cast
994 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
995 Shr, Shrs, Rotate, Cmp */
999 get_Call_mem (ir_node *node) {
1000 assert (node->op == op_Call);
1001 return get_irn_n(node, 0);
1005 set_Call_mem (ir_node *node, ir_node *mem) {
1006 assert (node->op == op_Call);
1007 set_irn_n(node, 0, mem);
1011 get_Call_ptr (ir_node *node) {
1012 assert (node->op == op_Call);
1013 return get_irn_n(node, 1);
1017 set_Call_ptr (ir_node *node, ir_node *ptr) {
1018 assert (node->op == op_Call);
1019 set_irn_n(node, 1, ptr);
1023 get_Call_param_arr (ir_node *node) {
1024 assert (node->op == op_Call);
1025 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1029 get_Call_n_params (ir_node *node) {
1030 assert (node->op == op_Call);
1031 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1035 get_Call_arity (ir_node *node) {
1036 assert (node->op == op_Call);
1037 return get_Call_n_params(node);
1041 set_Call_arity (ir_node *node, ir_node *arity) {
1042 assert (node->op == op_Call);
1047 get_Call_param (ir_node *node, int pos) {
1048 assert (node->op == op_Call);
1049 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1053 set_Call_param (ir_node *node, int pos, ir_node *param) {
1054 assert (node->op == op_Call);
1055 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1059 get_Call_type (ir_node *node) {
1060 assert (node->op == op_Call);
1061 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1065 set_Call_type (ir_node *node, type *tp) {
1066 assert (node->op == op_Call);
1067 assert (is_method_type(tp));
1068 node->attr.call.cld_tp = tp;
1071 int get_Call_n_callees(ir_node * node) {
1072 assert(node->op == op_Call && node->attr.call.callee_arr);
1073 return ARR_LEN(node->attr.call.callee_arr);
1076 entity * get_Call_callee(ir_node * node, int pos) {
1077 assert(node->op == op_Call && node->attr.call.callee_arr);
1078 return node->attr.call.callee_arr[pos];
1081 void set_Call_callee_arr(ir_node * node, int n, entity ** arr) {
1082 assert(node->op == op_Call);
1083 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1084 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1086 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1089 void remove_Call_callee_arr(ir_node * node) {
1090 assert(node->op == op_Call);
1091 node->attr.call.callee_arr = NULL;
1094 ir_node * get_CallBegin_ptr (ir_node *node) {
1095 assert(node->op == op_CallBegin);
1096 return get_irn_n(node, 0);
1098 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1099 assert(node->op == op_CallBegin);
1100 set_irn_n(node, 0, ptr);
1102 ir_graph * get_CallBegin_irg (ir_node *node) {
1103 assert(node->op == op_CallBegin);
1104 return node->attr.callbegin.irg;
1106 ir_node * get_CallBegin_call (ir_node *node) {
1107 assert(node->op == op_CallBegin);
1108 return node->attr.callbegin.call;
1110 void set_CallBegin_call (ir_node *node, ir_node *call) {
1111 assert(node->op == op_CallBegin);
1112 node->attr.callbegin.call = call;
1116 get_Add_left (ir_node *node) {
1117 assert (node->op == op_Add);
1118 return get_irn_n(node, 0);
1122 set_Add_left (ir_node *node, ir_node *left) {
1123 assert (node->op == op_Add);
1124 set_irn_n(node, 0, left);
1128 get_Add_right (ir_node *node) {
1129 assert (node->op == op_Add);
1130 return get_irn_n(node, 1);
1134 set_Add_right (ir_node *node, ir_node *right) {
1135 assert (node->op == op_Add);
1136 set_irn_n(node, 1, right);
1140 get_Sub_left (ir_node *node) {
1141 assert (node->op == op_Sub);
1142 return get_irn_n(node, 0);
1146 set_Sub_left (ir_node *node, ir_node *left) {
1147 assert (node->op == op_Sub);
1148 set_irn_n(node, 0, left);
1152 get_Sub_right (ir_node *node) {
1153 assert (node->op == op_Sub);
1154 return get_irn_n(node, 1);
1158 set_Sub_right (ir_node *node, ir_node *right) {
1159 assert (node->op == op_Sub);
1160 set_irn_n(node, 1, right);
1165 get_Minus_op (ir_node *node) {
1166 assert (node->op == op_Minus);
1167 return get_irn_n(node, 0);
1171 set_Minus_op (ir_node *node, ir_node *op) {
1172 assert (node->op == op_Minus);
1173 set_irn_n(node, 0, op);
1178 get_Mul_left (ir_node *node) {
1179 assert (node->op == op_Mul);
1180 return get_irn_n(node, 0);
1184 set_Mul_left (ir_node *node, ir_node *left) {
1185 assert (node->op == op_Mul);
1186 set_irn_n(node, 0, left);
1190 get_Mul_right (ir_node *node) {
1191 assert (node->op == op_Mul);
1192 return get_irn_n(node, 1);
1196 set_Mul_right (ir_node *node, ir_node *right) {
1197 assert (node->op == op_Mul);
1198 set_irn_n(node, 1, right);
1202 get_Quot_left (ir_node *node) {
1203 assert (node->op == op_Quot);
1204 return get_irn_n(node, 1);
1208 set_Quot_left (ir_node *node, ir_node *left) {
1209 assert (node->op == op_Quot);
1210 set_irn_n(node, 1, left);
1214 get_Quot_right (ir_node *node) {
1215 assert (node->op == op_Quot);
1216 return get_irn_n(node, 2);
1220 set_Quot_right (ir_node *node, ir_node *right) {
1221 assert (node->op == op_Quot);
1222 set_irn_n(node, 2, right);
1226 get_Quot_mem (ir_node *node) {
1227 assert (node->op == op_Quot);
1228 return get_irn_n(node, 0);
1232 set_Quot_mem (ir_node *node, ir_node *mem) {
1233 assert (node->op == op_Quot);
1234 set_irn_n(node, 0, mem);
1238 get_DivMod_left (ir_node *node) {
1239 assert (node->op == op_DivMod);
1240 return get_irn_n(node, 1);
1244 set_DivMod_left (ir_node *node, ir_node *left) {
1245 assert (node->op == op_DivMod);
1246 set_irn_n(node, 1, left);
1250 get_DivMod_right (ir_node *node) {
1251 assert (node->op == op_DivMod);
1252 return get_irn_n(node, 2);
1256 set_DivMod_right (ir_node *node, ir_node *right) {
1257 assert (node->op == op_DivMod);
1258 set_irn_n(node, 2, right);
1262 get_DivMod_mem (ir_node *node) {
1263 assert (node->op == op_DivMod);
1264 return get_irn_n(node, 0);
1268 set_DivMod_mem (ir_node *node, ir_node *mem) {
1269 assert (node->op == op_DivMod);
1270 set_irn_n(node, 0, mem);
1274 get_Div_left (ir_node *node) {
1275 assert (node->op == op_Div);
1276 return get_irn_n(node, 1);
1280 set_Div_left (ir_node *node, ir_node *left) {
1281 assert (node->op == op_Div);
1282 set_irn_n(node, 1, left);
1286 get_Div_right (ir_node *node) {
1287 assert (node->op == op_Div);
1288 return get_irn_n(node, 2);
1292 set_Div_right (ir_node *node, ir_node *right) {
1293 assert (node->op == op_Div);
1294 set_irn_n(node, 2, right);
1298 get_Div_mem (ir_node *node) {
1299 assert (node->op == op_Div);
1300 return get_irn_n(node, 0);
1304 set_Div_mem (ir_node *node, ir_node *mem) {
1305 assert (node->op == op_Div);
1306 set_irn_n(node, 0, mem);
1310 get_Mod_left (ir_node *node) {
1311 assert (node->op == op_Mod);
1312 return get_irn_n(node, 1);
1316 set_Mod_left (ir_node *node, ir_node *left) {
1317 assert (node->op == op_Mod);
1318 set_irn_n(node, 1, left);
1322 get_Mod_right (ir_node *node) {
1323 assert (node->op == op_Mod);
1324 return get_irn_n(node, 2);
1328 set_Mod_right (ir_node *node, ir_node *right) {
1329 assert (node->op == op_Mod);
1330 set_irn_n(node, 2, right);
1334 get_Mod_mem (ir_node *node) {
1335 assert (node->op == op_Mod);
1336 return get_irn_n(node, 0);
1340 set_Mod_mem (ir_node *node, ir_node *mem) {
1341 assert (node->op == op_Mod);
1342 set_irn_n(node, 0, mem);
1346 get_Abs_op (ir_node *node) {
1347 assert (node->op == op_Abs);
1348 return get_irn_n(node, 0);
1352 set_Abs_op (ir_node *node, ir_node *op) {
1353 assert (node->op == op_Abs);
1354 set_irn_n(node, 0, op);
1358 get_And_left (ir_node *node) {
1359 assert (node->op == op_And);
1360 return get_irn_n(node, 0);
1364 set_And_left (ir_node *node, ir_node *left) {
1365 assert (node->op == op_And);
1366 set_irn_n(node, 0, left);
1370 get_And_right (ir_node *node) {
1371 assert (node->op == op_And);
1372 return get_irn_n(node, 1);
1376 set_And_right (ir_node *node, ir_node *right) {
1377 assert (node->op == op_And);
1378 set_irn_n(node, 1, right);
1382 get_Or_left (ir_node *node) {
1383 assert (node->op == op_Or);
1384 return get_irn_n(node, 0);
1388 set_Or_left (ir_node *node, ir_node *left) {
1389 assert (node->op == op_Or);
1390 set_irn_n(node, 0, left);
1394 get_Or_right (ir_node *node) {
1395 assert (node->op == op_Or);
1396 return get_irn_n(node, 1);
1400 set_Or_right (ir_node *node, ir_node *right) {
1401 assert (node->op == op_Or);
1402 set_irn_n(node, 1, right);
1406 get_Eor_left (ir_node *node) {
1407 assert (node->op == op_Eor);
1408 return get_irn_n(node, 0);
1412 set_Eor_left (ir_node *node, ir_node *left) {
1413 assert (node->op == op_Eor);
1414 set_irn_n(node, 0, left);
1418 get_Eor_right (ir_node *node) {
1419 assert (node->op == op_Eor);
1420 return get_irn_n(node, 1);
1424 set_Eor_right (ir_node *node, ir_node *right) {
1425 assert (node->op == op_Eor);
1426 set_irn_n(node, 1, right);
1431 get_Not_op (ir_node *node) {
1432 assert (node->op == op_Not);
1433 return get_irn_n(node, 0);
1437 set_Not_op (ir_node *node, ir_node *op) {
1438 assert (node->op == op_Not);
1439 set_irn_n(node, 0, op);
1444 get_Shl_left (ir_node *node) {
1445 assert (node->op == op_Shl);
1446 return get_irn_n(node, 0);
1450 set_Shl_left (ir_node *node, ir_node *left) {
1451 assert (node->op == op_Shl);
1452 set_irn_n(node, 0, left);
1456 get_Shl_right (ir_node *node) {
1457 assert (node->op == op_Shl);
1458 return get_irn_n(node, 1);
1462 set_Shl_right (ir_node *node, ir_node *right) {
1463 assert (node->op == op_Shl);
1464 set_irn_n(node, 1, right);
1468 get_Shr_left (ir_node *node) {
1469 assert (node->op == op_Shr);
1470 return get_irn_n(node, 0);
1474 set_Shr_left (ir_node *node, ir_node *left) {
1475 assert (node->op == op_Shr);
1476 set_irn_n(node, 0, left);
1480 get_Shr_right (ir_node *node) {
1481 assert (node->op == op_Shr);
1482 return get_irn_n(node, 1);
1486 set_Shr_right (ir_node *node, ir_node *right) {
1487 assert (node->op == op_Shr);
1488 set_irn_n(node, 1, right);
1492 get_Shrs_left (ir_node *node) {
1493 assert (node->op == op_Shrs);
1494 return get_irn_n(node, 0);
1498 set_Shrs_left (ir_node *node, ir_node *left) {
1499 assert (node->op == op_Shrs);
1500 set_irn_n(node, 0, left);
1504 get_Shrs_right (ir_node *node) {
1505 assert (node->op == op_Shrs);
1506 return get_irn_n(node, 1);
1510 set_Shrs_right (ir_node *node, ir_node *right) {
1511 assert (node->op == op_Shrs);
1512 set_irn_n(node, 1, right);
1516 get_Rot_left (ir_node *node) {
1517 assert (node->op == op_Rot);
1518 return get_irn_n(node, 0);
1522 set_Rot_left (ir_node *node, ir_node *left) {
1523 assert (node->op == op_Rot);
1524 set_irn_n(node, 0, left);
1528 get_Rot_right (ir_node *node) {
1529 assert (node->op == op_Rot);
1530 return get_irn_n(node, 1);
1534 set_Rot_right (ir_node *node, ir_node *right) {
1535 assert (node->op == op_Rot);
1536 set_irn_n(node, 1, right);
1540 get_Cmp_left (ir_node *node) {
1541 assert (node->op == op_Cmp);
1542 return get_irn_n(node, 0);
1546 set_Cmp_left (ir_node *node, ir_node *left) {
1547 assert (node->op == op_Cmp);
1548 set_irn_n(node, 0, left);
1552 get_Cmp_right (ir_node *node) {
1553 assert (node->op == op_Cmp);
1554 return get_irn_n(node, 1);
1558 set_Cmp_right (ir_node *node, ir_node *right) {
1559 assert (node->op == op_Cmp);
1560 set_irn_n(node, 1, right);
1564 get_Conv_op (ir_node *node) {
1565 assert (node->op == op_Conv);
1566 return get_irn_n(node, 0);
1570 set_Conv_op (ir_node *node, ir_node *op) {
1571 assert (node->op == op_Conv);
1572 set_irn_n(node, 0, op);
1576 get_Cast_op (ir_node *node) {
1577 assert (node->op == op_Cast);
1578 return get_irn_n(node, 0);
1582 set_Cast_op (ir_node *node, ir_node *op) {
1583 assert (node->op == op_Cast);
1584 set_irn_n(node, 0, op);
1588 get_Cast_type (ir_node *node) {
1589 assert (node->op == op_Cast);
1590 return node->attr.cast.totype;
1594 set_Cast_type (ir_node *node, type *to_tp) {
1595 assert (node->op == op_Cast);
1596 node->attr.cast.totype = to_tp;
1600 is_unop (ir_node *node) {
1601 return ( node->op == op_Minus ||
1602 node->op == op_Abs ||
1603 node->op == op_Not ||
1604 node->op == op_Conv ||
1605 node->op == op_Cast );
1609 get_unop_op (ir_node *node) {
1610 assert (is_unop(node));
1611 switch (get_irn_opcode (node)) {
1612 case iro_Minus: return get_Minus_op(node); break;
1613 case iro_Abs: return get_Abs_op(node); break;
1614 case iro_Not: return get_Not_op(node); break;
1615 case iro_Conv: return get_Conv_op(node); break;
1616 case iro_Cast: return get_Cast_op(node); break;
1617 default: return NULL;
1622 set_unop_op (ir_node *node, ir_node *op) {
1623 assert (is_unop(node));
1624 switch (get_irn_opcode (node)) {
1625 case iro_Minus: set_Minus_op(node, op); break;
1626 case iro_Abs: set_Abs_op(node, op); break;
1627 case iro_Not: set_Not_op(node, op); break;
1628 case iro_Conv: set_Conv_op(node, op); break;
1629 case iro_Cast: set_Cast_op(node, op); break;
1636 is_binop (ir_node *node) {
1637 return (node->op == op_Add ||
1638 node->op == op_Sub ||
1639 node->op == op_Mul ||
1640 node->op == op_Quot ||
1641 node->op == op_DivMod ||
1642 node->op == op_Div ||
1643 node->op == op_Mod ||
1644 node->op == op_And ||
1645 node->op == op_Or ||
1646 node->op == op_Eor ||
1647 node->op == op_Shl ||
1648 node->op == op_Shr ||
1649 node->op == op_Shrs ||
1650 node->op == op_Rot ||
1651 node->op == op_Cmp );
1655 get_binop_left (ir_node *node) {
1656 assert (node->op == op_Add ||
1657 node->op == op_Sub ||
1658 node->op == op_Mul ||
1659 node->op == op_Quot ||
1660 node->op == op_DivMod ||
1661 node->op == op_Div ||
1662 node->op == op_Mod ||
1663 node->op == op_And ||
1664 node->op == op_Or ||
1665 node->op == op_Eor ||
1666 node->op == op_Shl ||
1667 node->op == op_Shr ||
1668 node->op == op_Shrs ||
1669 node->op == op_Rot ||
1670 node->op == op_Cmp );
1672 switch (get_irn_opcode (node)) {
1673 case iro_Add : return get_Add_left(node); break;
1674 case iro_Sub : return get_Sub_left(node); break;
1675 case iro_Mul : return get_Mul_left(node); break;
1676 case iro_Quot : return get_Quot_left(node); break;
1677 case iro_DivMod: return get_DivMod_left(node); break;
1678 case iro_Div : return get_Div_left(node); break;
1679 case iro_Mod : return get_Mod_left(node); break;
1680 case iro_And : return get_And_left(node); break;
1681 case iro_Or : return get_Or_left(node); break;
1682 case iro_Eor : return get_Eor_left(node); break;
1683 case iro_Shl : return get_Shl_left(node); break;
1684 case iro_Shr : return get_Shr_left(node); break;
1685 case iro_Shrs : return get_Shrs_left(node); break;
1686 case iro_Rot : return get_Rot_left(node); break;
1687 case iro_Cmp : return get_Cmp_left(node); break;
1688 default: return NULL;
1693 set_binop_left (ir_node *node, ir_node *left) {
1694 assert (node->op == op_Add ||
1695 node->op == op_Sub ||
1696 node->op == op_Mul ||
1697 node->op == op_Quot ||
1698 node->op == op_DivMod ||
1699 node->op == op_Div ||
1700 node->op == op_Mod ||
1701 node->op == op_And ||
1702 node->op == op_Or ||
1703 node->op == op_Eor ||
1704 node->op == op_Shl ||
1705 node->op == op_Shr ||
1706 node->op == op_Shrs ||
1707 node->op == op_Rot ||
1708 node->op == op_Cmp );
1710 switch (get_irn_opcode (node)) {
1711 case iro_Add : set_Add_left(node, left); break;
1712 case iro_Sub : set_Sub_left(node, left); break;
1713 case iro_Mul : set_Mul_left(node, left); break;
1714 case iro_Quot : set_Quot_left(node, left); break;
1715 case iro_DivMod: set_DivMod_left(node, left); break;
1716 case iro_Div : set_Div_left(node, left); break;
1717 case iro_Mod : set_Mod_left(node, left); break;
1718 case iro_And : set_And_left(node, left); break;
1719 case iro_Or : set_Or_left(node, left); break;
1720 case iro_Eor : set_Eor_left(node, left); break;
1721 case iro_Shl : set_Shl_left(node, left); break;
1722 case iro_Shr : set_Shr_left(node, left); break;
1723 case iro_Shrs : set_Shrs_left(node, left); break;
1724 case iro_Rot : set_Rot_left(node, left); break;
1725 case iro_Cmp : set_Cmp_left(node, left); break;
1731 get_binop_right (ir_node *node) {
1732 assert (node->op == op_Add ||
1733 node->op == op_Sub ||
1734 node->op == op_Mul ||
1735 node->op == op_Quot ||
1736 node->op == op_DivMod ||
1737 node->op == op_Div ||
1738 node->op == op_Mod ||
1739 node->op == op_And ||
1740 node->op == op_Or ||
1741 node->op == op_Eor ||
1742 node->op == op_Shl ||
1743 node->op == op_Shr ||
1744 node->op == op_Shrs ||
1745 node->op == op_Rot ||
1746 node->op == op_Cmp );
1748 switch (get_irn_opcode (node)) {
1749 case iro_Add : return get_Add_right(node); break;
1750 case iro_Sub : return get_Sub_right(node); break;
1751 case iro_Mul : return get_Mul_right(node); break;
1752 case iro_Quot : return get_Quot_right(node); break;
1753 case iro_DivMod: return get_DivMod_right(node); break;
1754 case iro_Div : return get_Div_right(node); break;
1755 case iro_Mod : return get_Mod_right(node); break;
1756 case iro_And : return get_And_right(node); break;
1757 case iro_Or : return get_Or_right(node); break;
1758 case iro_Eor : return get_Eor_right(node); break;
1759 case iro_Shl : return get_Shl_right(node); break;
1760 case iro_Shr : return get_Shr_right(node); break;
1761 case iro_Shrs : return get_Shrs_right(node); break;
1762 case iro_Rot : return get_Rot_right(node); break;
1763 case iro_Cmp : return get_Cmp_right(node); break;
1764 default: return NULL;
1769 set_binop_right (ir_node *node, ir_node *right) {
1770 assert (node->op == op_Add ||
1771 node->op == op_Sub ||
1772 node->op == op_Mul ||
1773 node->op == op_Quot ||
1774 node->op == op_DivMod ||
1775 node->op == op_Div ||
1776 node->op == op_Mod ||
1777 node->op == op_And ||
1778 node->op == op_Or ||
1779 node->op == op_Eor ||
1780 node->op == op_Shl ||
1781 node->op == op_Shr ||
1782 node->op == op_Shrs ||
1783 node->op == op_Rot ||
1784 node->op == op_Cmp );
1786 switch (get_irn_opcode (node)) {
1787 case iro_Add : set_Add_right(node, right); break;
1788 case iro_Sub : set_Sub_right(node, right); break;
1789 case iro_Mul : set_Mul_right(node, right); break;
1790 case iro_Quot : set_Quot_right(node, right); break;
1791 case iro_DivMod: set_DivMod_right(node, right); break;
1792 case iro_Div : set_Div_right(node, right); break;
1793 case iro_Mod : set_Mod_right(node, right); break;
1794 case iro_And : set_And_right(node, right); break;
1795 case iro_Or : set_Or_right(node, right); break;
1796 case iro_Eor : set_Eor_right(node, right); break;
1797 case iro_Shl : set_Shl_right(node, right); break;
1798 case iro_Shr : set_Shr_right(node, right); break;
1799 case iro_Shrs : set_Shrs_right(node, right); break;
1800 case iro_Rot : set_Rot_right(node, right); break;
1801 case iro_Cmp : set_Cmp_right(node, right); break;
1808 get_Phi_preds_arr (ir_node *node) {
1809 assert (node->op == op_Phi);
1810 return (ir_node **)&(get_irn_in(node)[1]);
1814 get_Phi_n_preds (ir_node *node) {
1815 assert (node->op == op_Phi);
1816 return (get_irn_arity(node));
1820 INLINE void set_Phi_n_preds (ir_node *node, int n_preds) {
1821 assert (node->op == op_Phi);
1826 get_Phi_pred (ir_node *node, int pos) {
1827 assert (node->op == op_Phi);
1828 return get_irn_n(node, pos);
1832 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1833 assert (node->op == op_Phi);
1834 set_irn_n(node, pos, pred);
1838 get_Load_mem (ir_node *node) {
1839 assert (node->op == op_Load);
1840 return get_irn_n(node, 0);
1844 set_Load_mem (ir_node *node, ir_node *mem) {
1845 assert (node->op == op_Load);
1846 set_irn_n(node, 0, mem);
1850 get_Load_ptr (ir_node *node) {
1851 assert (node->op == op_Load);
1852 return get_irn_n(node, 1);
1856 set_Load_ptr (ir_node *node, ir_node *ptr) {
1857 assert (node->op == op_Load);
1858 set_irn_n(node, 1, ptr);
1863 get_Store_mem (ir_node *node) {
1864 assert (node->op == op_Store);
1865 return get_irn_n(node, 0);
1869 set_Store_mem (ir_node *node, ir_node *mem) {
1870 assert (node->op == op_Store);
1871 set_irn_n(node, 0, mem);
1875 get_Store_ptr (ir_node *node) {
1876 assert (node->op == op_Store);
1877 return get_irn_n(node, 1);
1881 set_Store_ptr (ir_node *node, ir_node *ptr) {
1882 assert (node->op == op_Store);
1883 set_irn_n(node, 1, ptr);
1887 get_Store_value (ir_node *node) {
1888 assert (node->op == op_Store);
1889 return get_irn_n(node, 2);
1893 set_Store_value (ir_node *node, ir_node *value) {
1894 assert (node->op == op_Store);
1895 set_irn_n(node, 2, value);
1899 get_Alloc_mem (ir_node *node) {
1900 assert (node->op == op_Alloc);
1901 return get_irn_n(node, 0);
1905 set_Alloc_mem (ir_node *node, ir_node *mem) {
1906 assert (node->op == op_Alloc);
1907 set_irn_n(node, 0, mem);
1911 get_Alloc_size (ir_node *node) {
1912 assert (node->op == op_Alloc);
1913 return get_irn_n(node, 1);
1917 set_Alloc_size (ir_node *node, ir_node *size) {
1918 assert (node->op == op_Alloc);
1919 set_irn_n(node, 1, size);
1923 get_Alloc_type (ir_node *node) {
1924 assert (node->op == op_Alloc);
1925 return node->attr.a.type = skip_tid(node->attr.a.type);
1929 set_Alloc_type (ir_node *node, type *tp) {
1930 assert (node->op == op_Alloc);
1931 node->attr.a.type = tp;
1935 get_Alloc_where (ir_node *node) {
1936 assert (node->op == op_Alloc);
1937 return node->attr.a.where;
1941 set_Alloc_where (ir_node *node, where_alloc where) {
1942 assert (node->op == op_Alloc);
1943 node->attr.a.where = where;
1948 get_Free_mem (ir_node *node) {
1949 assert (node->op == op_Free);
1950 return get_irn_n(node, 0);
1954 set_Free_mem (ir_node *node, ir_node *mem) {
1955 assert (node->op == op_Free);
1956 set_irn_n(node, 0, mem);
1960 get_Free_ptr (ir_node *node) {
1961 assert (node->op == op_Free);
1962 return get_irn_n(node, 1);
1966 set_Free_ptr (ir_node *node, ir_node *ptr) {
1967 assert (node->op == op_Free);
1968 set_irn_n(node, 1, ptr);
1972 get_Free_size (ir_node *node) {
1973 assert (node->op == op_Free);
1974 return get_irn_n(node, 2);
1978 set_Free_size (ir_node *node, ir_node *size) {
1979 assert (node->op == op_Free);
1980 set_irn_n(node, 2, size);
1984 get_Free_type (ir_node *node) {
1985 assert (node->op == op_Free);
1986 return node->attr.f = skip_tid(node->attr.f);
1990 set_Free_type (ir_node *node, type *tp) {
1991 assert (node->op == op_Free);
1996 get_Sync_preds_arr (ir_node *node) {
1997 assert (node->op == op_Sync);
1998 return (ir_node **)&(get_irn_in(node)[1]);
2002 get_Sync_n_preds (ir_node *node) {
2003 assert (node->op == op_Sync);
2004 return (get_irn_arity(node));
2009 set_Sync_n_preds (ir_node *node, int n_preds) {
2010 assert (node->op == op_Sync);
2015 get_Sync_pred (ir_node *node, int pos) {
2016 assert (node->op == op_Sync);
2017 return get_irn_n(node, pos);
2021 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
2022 assert (node->op == op_Sync);
2023 set_irn_n(node, pos, pred);
2027 get_Proj_pred (ir_node *node) {
2028 assert (is_Proj(node));
2029 return get_irn_n(node, 0);
2033 set_Proj_pred (ir_node *node, ir_node *pred) {
2034 assert (is_Proj(node));
2035 set_irn_n(node, 0, pred);
2039 get_Proj_proj (ir_node *node) {
2040 assert (is_Proj(node));
2041 if (get_irn_opcode(node) == iro_Proj) {
2042 return node->attr.proj;
2044 assert(get_irn_opcode(node) == iro_Filter);
2045 return node->attr.filter.proj;
2050 set_Proj_proj (ir_node *node, long proj) {
2051 assert (node->op == op_Proj);
2052 node->attr.proj = proj;
2056 get_Tuple_preds_arr (ir_node *node) {
2057 assert (node->op == op_Tuple);
2058 return (ir_node **)&(get_irn_in(node)[1]);
2062 get_Tuple_n_preds (ir_node *node) {
2063 assert (node->op == op_Tuple);
2064 return (get_irn_arity(node));
2069 set_Tuple_n_preds (ir_node *node, int n_preds) {
2070 assert (node->op == op_Tuple);
2075 get_Tuple_pred (ir_node *node, int pos) {
2076 assert (node->op == op_Tuple);
2077 return get_irn_n(node, pos);
2081 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
2082 assert (node->op == op_Tuple);
2083 set_irn_n(node, pos, pred);
2087 get_Id_pred (ir_node *node) {
2088 assert (node->op == op_Id);
2089 return get_irn_n(node, 0);
2093 set_Id_pred (ir_node *node, ir_node *pred) {
2094 assert (node->op == op_Id);
2095 set_irn_n(node, 0, pred);
2100 get_Filter_pred (ir_node *node) {
2101 assert(node->op == op_Filter);
2105 set_Filter_pred (ir_node *node, ir_node *pred) {
2106 assert(node->op == op_Filter);
2110 get_Filter_proj(ir_node *node) {
2111 assert(node->op == op_Filter);
2112 return node->attr.filter.proj;
2115 set_Filter_proj (ir_node *node, long proj) {
2116 assert(node->op == op_Filter);
2117 node->attr.filter.proj = proj;
2120 /* Don't use get_irn_arity, get_irn_n in implementation as access
2121 shall work independent of view!!! */
2122 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
2123 assert(node->op == op_Filter);
2124 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
2125 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
2126 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
2127 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
2128 node->attr.filter.in_cg[0] = node->in[0];
2130 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
2133 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
2134 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2135 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
2136 node->attr.filter.in_cg[pos + 1] = pred;
2138 int get_Filter_n_cg_preds(ir_node *node) {
2139 assert(node->op == op_Filter && node->attr.filter.in_cg);
2140 return (ARR_LEN(node->attr.filter.in_cg) - 1);
2142 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
2144 assert(node->op == op_Filter && node->attr.filter.in_cg &&
2146 arity = ARR_LEN(node->attr.filter.in_cg);
2147 assert(pos < arity - 1);
2148 return node->attr.filter.in_cg[pos + 1];
2153 get_irn_irg(ir_node *node) {
2154 if (get_irn_op(node) == op_CallBegin) {
2155 return node->attr.callbegin.irg;
2156 } else if (get_irn_op(node) == op_EndReg ||
2157 get_irn_op(node) == op_EndExcept) {
2158 return node->attr.end.irg;
2159 } else if (get_irn_op(node) == op_Start) {
2160 return node->attr.start.irg;
2162 assert(0 && "no irg attr");
2168 /******************************************************************/
2169 /* Auxiliary routines */
2170 /******************************************************************/
2173 skip_Proj (ir_node *node) {
2174 /* don't assert node !!! */
2175 if (node && is_Proj(node)) {
2176 return get_Proj_pred(node);
2183 skip_Tuple (ir_node *node) {
2186 if (!get_opt_normalize()) return node;
2188 node = skip_nop(node);
2189 if (get_irn_op(node) == op_Proj) {
2190 pred = skip_nop(get_Proj_pred(node));
2191 if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
2192 pred = skip_nop(skip_Tuple(pred));
2193 if (get_irn_op(pred) == op_Tuple)
2194 return get_Tuple_pred(pred, get_Proj_proj(node));
2199 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2200 than any other approach, as Id chains are resolved and all point to the real node, or
2201 all id's are self loops. */
2203 skip_nop (ir_node *node) {
2204 /* don't assert node !!! */
2206 if (!get_opt_normalize()) return node;
2208 /* Don't use get_Id_pred: We get into an endless loop for
2209 self-referencing Ids. */
2210 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2211 ir_node *rem_pred = node->in[0+1];
2214 assert (get_irn_arity (node) > 0);
2216 node->in[0+1] = node;
2217 res = skip_nop(rem_pred);
2218 if (res->op == op_Id) /* self-loop */ return node;
2220 node->in[0+1] = res;
2228 skip_Id (ir_node *node) {
2229 return skip_nop(node);
2233 is_Bad (ir_node *node) {
2235 if ((node) && get_irn_opcode(node) == iro_Bad)
2241 is_no_Block (ir_node *node) {
2243 return (get_irn_opcode(node) != iro_Block);
2247 is_Block (ir_node *node) {
2249 return (get_irn_opcode(node) == iro_Block);
2253 is_Proj (const ir_node *node) {
2255 return node->op == op_Proj
2256 || (!interprocedural_view && node->op == op_Filter);
2259 /* Returns true if the operation manipulates control flow. */
2261 is_cfop(ir_node *node) {
2262 return is_cfopcode(get_irn_op(node));
2265 /* Returns true if the operation manipulates interprocedural control flow:
2266 CallBegin, EndReg, EndExcept */
2267 INLINE int is_ip_cfop(ir_node *node) {
2268 return is_ip_cfopcode(get_irn_op(node));
2271 ir_graph *get_ip_cfop_irg(ir_node *n) {
2272 switch (get_irn_opcode(n)) {
2274 return get_EndReg_irg(n);
2276 return get_EndExcept_irg(n);
2278 return get_CallBegin_irg(n);
2280 assert(is_ip_cfop(n));
2282 return NULL; /* should never be reached */
2285 /* Returns true if the operation can change the control flow because
2288 is_fragile_op(ir_node *node) {
2289 return ( (get_irn_opcode(node) == iro_Call)
2290 || (get_irn_opcode(node) == iro_Quot)
2291 || (get_irn_opcode(node) == iro_DivMod)
2292 || (get_irn_opcode(node) == iro_Div)
2293 || (get_irn_opcode(node) == iro_Mod)
2294 || (get_irn_opcode(node) == iro_Load)
2295 || (get_irn_opcode(node) == iro_Store)
2296 || (get_irn_opcode(node) == iro_Alloc)
2297 || (get_irn_opcode(node) == iro_Bad)
2298 || (get_irn_opcode(node) == iro_Unknown));
2302 /* Returns the memory operand of fragile operations. */
2303 ir_node *get_fragile_op_mem(ir_node *node) {
2304 assert(node && is_fragile_op(node));
2306 switch (get_irn_opcode (node)) {
2315 return get_irn_n(node, 0);
2320 assert(0 && "not reached");