3 * File name: ir/ir/irnode.c
4 * Purpose: Representation of an intermediate operation.
5 * Author: Martin Trapp, Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 #include "irgraph_t.h"
26 #include "irbackedge_t.h"
30 #include "iredges_t.h"
34 /* some constants fixing the positions of nodes predecessors
36 #define CALL_PARAM_OFFSET 2
37 #define FUNCCALL_PARAM_OFFSET 1
38 #define SEL_INDEX_OFFSET 2
39 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */
40 #define END_KEEPALIVE_OFFSET 0
42 static const char *pnc_name_arr [] = {
43 "False", "Eq", "Lt", "Le",
44 "Gt", "Ge", "Lg", "Leg", "Uo",
45 "Ue", "Ul", "Ule", "Ug", "Uge",
50 * returns the pnc name from an pnc constant
52 const char *get_pnc_string(int pnc) {
53 return pnc_name_arr[pnc];
57 * Calculates the negated (Complement(R)) pnc condition.
59 int get_negated_pnc(int pnc, ir_mode *mode) {
62 /* do NOT add the Uo bit for non-floating point values */
63 if (! mode_is_float(mode))
69 /* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
71 get_inversed_pnc(int pnc) {
72 int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
73 int lesser = pnc & pn_Cmp_Lt;
74 int greater = pnc & pn_Cmp_Gt;
76 code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0);
81 const char *pns_name_arr [] = {
82 "initial_exec", "global_store",
83 "frame_base", "globals", "args"
86 const char *symconst_name_arr [] = {
87 "type_tag", "size", "addr_name", "addr_ent"
91 * Indicates, whether additional data can be registered to ir nodes.
92 * If set to 1, this is not possible anymore.
94 static int forbid_new_data = 0;
97 * The amount of additional space for custom data to be allocated upon
98 * creating a new node.
100 unsigned firm_add_node_size = 0;
103 /* register new space for every node */
104 unsigned register_additional_node_data(unsigned size) {
105 assert(!forbid_new_data && "Too late to register additional node data");
110 return firm_add_node_size += size;
116 /* Forbid the addition of new data to an ir node. */
121 * irnode constructor.
122 * Create a new irnode in irg, with an op, mode, arity and
123 * some incoming irnodes.
124 * If arity is negative, a node with a dynamic array is created.
127 new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
128 int arity, ir_node **in)
131 size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
134 assert(irg && op && mode);
135 p = obstack_alloc (irg->obst, node_size);
136 memset(p, 0, node_size);
137 res = (ir_node *) (p + firm_add_node_size);
139 res->kind = k_ir_node;
145 res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
147 res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1));
148 memcpy (&res->in[1], in, sizeof (ir_node *) * arity);
152 set_irn_dbg_info(res, db);
156 res->node_nr = get_irp_new_node_nr();
159 #if FIRM_EDGES_INPLACE
162 int not_a_block = is_no_Block(res);
164 INIT_LIST_HEAD(&res->edge_info.outs_head);
166 INIT_LIST_HEAD(&res->attr.block.succ_head);
169 for (i = 0, n = arity + not_a_block; i < n; ++i)
170 edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg);
174 hook_new_node(irg, res);
179 /*-- getting some parameters from ir_nodes --*/
182 (is_ir_node)(const void *thing) {
183 return _is_ir_node(thing);
187 (get_irn_intra_arity)(const ir_node *node) {
188 return _get_irn_intra_arity(node);
192 (get_irn_inter_arity)(const ir_node *node) {
193 return _get_irn_inter_arity(node);
196 int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
199 (get_irn_arity)(const ir_node *node) {
200 return _get_irn_arity(node);
203 /* Returns the array with ins. This array is shifted with respect to the
204 array accessed by get_irn_n: The block operand is at position 0 not -1.
205 (@@@ This should be changed.)
206 The order of the predecessors in this array is not guaranteed, except that
207 lists of operands as predecessors of Block or arguments of a Call are
210 get_irn_in (const ir_node *node) {
212 if (get_interprocedural_view()) { /* handle Filter and Block specially */
213 if (get_irn_opcode(node) == iro_Filter) {
214 assert(node->attr.filter.in_cg);
215 return node->attr.filter.in_cg;
216 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
217 return node->attr.block.in_cg;
219 /* else fall through */
225 set_irn_in (ir_node *node, int arity, ir_node **in) {
229 if (get_interprocedural_view()) { /* handle Filter and Block specially */
230 if (get_irn_opcode(node) == iro_Filter) {
231 assert(node->attr.filter.in_cg);
232 arr = &node->attr.filter.in_cg;
233 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
234 arr = &node->attr.block.in_cg;
241 if (arity != ARR_LEN(*arr) - 1) {
242 ir_node * block = (*arr)[0];
243 *arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
246 fix_backedges(current_ir_graph->obst, node);
248 for (i = 0; i < arity; i++) {
249 edges_notify_edge(node, i, in[i], (*arr)[i+1], current_ir_graph);
252 memcpy((*arr) + 1, in, sizeof(ir_node *) * arity);
256 (get_irn_intra_n)(const ir_node *node, int n) {
257 return _get_irn_intra_n (node, n);
261 (get_irn_inter_n)(const ir_node *node, int n) {
262 return _get_irn_inter_n (node, n);
265 ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
268 (get_irn_n)(const ir_node *node, int n) {
269 return _get_irn_n(node, n);
273 set_irn_n (ir_node *node, int n, ir_node *in) {
274 assert(node && node->kind == k_ir_node);
276 assert(n < get_irn_arity(node));
277 assert(in && in->kind == k_ir_node);
279 if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) {
280 /* Change block pred in both views! */
281 node->in[n + 1] = in;
282 assert(node->attr.filter.in_cg);
283 node->attr.filter.in_cg[n + 1] = in;
286 if (get_interprocedural_view()) { /* handle Filter and Block specially */
287 if (get_irn_opcode(node) == iro_Filter) {
288 assert(node->attr.filter.in_cg);
289 node->attr.filter.in_cg[n + 1] = in;
291 } else if (get_irn_opcode(node) == iro_Block && node->attr.block.in_cg) {
292 node->attr.block.in_cg[n + 1] = in;
295 /* else fall through */
299 hook_set_irn_n(node, n, in, node->in[n + 1]);
301 /* Here, we rely on src and tgt being in the current ir graph */
302 edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph);
304 node->in[n + 1] = in;
308 (get_irn_mode)(const ir_node *node) {
309 return _get_irn_mode(node);
313 (set_irn_mode)(ir_node *node, ir_mode *mode)
315 _set_irn_mode(node, mode);
319 get_irn_modecode (const ir_node *node)
322 return node->mode->code;
325 /** Gets the string representation of the mode .*/
327 get_irn_modename (const ir_node *node)
330 return get_mode_name(node->mode);
334 get_irn_modeident (const ir_node *node)
337 return get_mode_ident(node->mode);
341 (get_irn_op)(const ir_node *node)
343 return _get_irn_op(node);
346 /* should be private to the library: */
348 set_irn_op (ir_node *node, ir_op *op)
355 (get_irn_opcode)(const ir_node *node)
357 return _get_irn_opcode(node);
361 get_irn_opname (const ir_node *node)
364 if ((get_irn_op((ir_node *)node) == op_Phi) &&
365 (get_irg_phase_state(get_irn_irg((ir_node *)node)) == phase_building) &&
366 (get_irn_arity((ir_node *)node) == 0)) return "Phi0";
367 return get_id_str(node->op->name);
371 get_irn_opident (const ir_node *node)
374 return node->op->name;
378 (get_irn_visited)(const ir_node *node)
380 return _get_irn_visited(node);
384 (set_irn_visited)(ir_node *node, unsigned long visited)
386 _set_irn_visited(node, visited);
390 (mark_irn_visited)(ir_node *node) {
391 _mark_irn_visited(node);
395 (irn_not_visited)(const ir_node *node) {
396 return _irn_not_visited(node);
400 (irn_visited)(const ir_node *node) {
401 return _irn_visited(node);
405 (set_irn_link)(ir_node *node, void *link) {
406 _set_irn_link(node, link);
410 (get_irn_link)(const ir_node *node) {
411 return _get_irn_link(node);
415 (get_irn_pinned)(const ir_node *node) {
416 return _get_irn_pinned(node);
420 (is_irn_pinned_in_irg) (const ir_node *node) {
421 return _is_irn_pinned_in_irg(node);
424 void set_irn_pinned(ir_node *node, op_pin_state state) {
425 /* due to optimization an opt may be turned into a Tuple */
426 if (get_irn_op(node) == op_Tuple)
429 assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
430 assert(state == op_pin_state_pinned || state == op_pin_state_floats);
432 node->attr.except.pin_state = state;
435 #ifdef DO_HEAPANALYSIS
436 /* Access the abstract interpretation information of a node.
437 Returns NULL if no such information is available. */
438 struct abstval *get_irn_abst_value(ir_node *n) {
441 /* Set the abstract interpretation information of a node. */
442 void set_irn_abst_value(ir_node *n, struct abstval *os) {
445 struct section *firm_get_irn_section(ir_node *n) {
448 void firm_set_irn_section(ir_node *n, struct section *s) {
452 /* Dummies needed for firmjni. */
453 struct abstval *get_irn_abst_value(ir_node *n) { return NULL; }
454 void set_irn_abst_value(ir_node *n, struct abstval *os) {}
455 struct section *firm_get_irn_section(ir_node *n) { return NULL; }
456 void firm_set_irn_section(ir_node *n, struct section *s) {}
457 #endif /* DO_HEAPANALYSIS */
460 /* Outputs a unique number for this node */
462 get_irn_node_nr(const ir_node *node) {
465 return node->node_nr;
472 get_irn_const_attr (ir_node *node)
474 assert (node->op == op_Const);
475 return node->attr.con;
479 get_irn_proj_attr (ir_node *node)
481 assert (node->op == op_Proj);
482 return node->attr.proj;
486 get_irn_alloc_attr (ir_node *node)
488 assert (node->op == op_Alloc);
493 get_irn_free_attr (ir_node *node)
495 assert (node->op == op_Free);
500 get_irn_symconst_attr (ir_node *node)
502 assert (node->op == op_SymConst);
507 get_irn_call_attr (ir_node *node)
509 assert (node->op == op_Call);
510 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
514 get_irn_sel_attr (ir_node *node)
516 assert (node->op == op_Sel);
521 get_irn_phi_attr (ir_node *node)
523 assert (node->op == op_Phi);
524 return node->attr.phi0_pos;
528 get_irn_block_attr (ir_node *node)
530 assert (node->op == op_Block);
531 return node->attr.block;
535 get_irn_load_attr (ir_node *node)
537 assert (node->op == op_Load);
538 return node->attr.load;
542 get_irn_store_attr (ir_node *node)
544 assert (node->op == op_Store);
545 return node->attr.store;
549 get_irn_except_attr (ir_node *node)
551 assert (node->op == op_Div || node->op == op_Quot ||
552 node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc);
553 return node->attr.except;
557 get_irn_generic_attr (ir_node *node) {
561 /** manipulate fields of individual nodes **/
563 /* this works for all except Block */
565 get_nodes_block (const ir_node *node) {
566 assert (!(node->op == op_Block));
567 assert (is_irn_pinned_in_irg(node) && "block info may be incorrect");
568 return get_irn_n(node, -1);
572 set_nodes_block (ir_node *node, ir_node *block) {
573 assert (!(node->op == op_Block));
574 set_irn_n(node, -1, block);
577 /* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
578 * from Start. If so returns frame type, else Null. */
579 type *is_frame_pointer(ir_node *n) {
580 if ((get_irn_op(n) == op_Proj) &&
581 (get_Proj_proj(n) == pn_Start_P_frame_base)) {
582 ir_node *start = get_Proj_pred(n);
583 if (get_irn_op(start) == op_Start) {
584 return get_irg_frame_type(get_irn_irg(start));
590 /* Test whether arbitrary node is globals pointer, i.e. Proj(pn_Start_P_globals)
591 * from Start. If so returns global type, else Null. */
592 type *is_globals_pointer(ir_node *n) {
593 if ((get_irn_op(n) == op_Proj) &&
594 (get_Proj_proj(n) == pn_Start_P_globals)) {
595 ir_node *start = get_Proj_pred(n);
596 if (get_irn_op(start) == op_Start) {
597 return get_glob_type();
603 /* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base)
604 * from Start. If so returns 1, else 0. */
605 int is_value_arg_pointer(ir_node *n) {
606 if ((get_irn_op(n) == op_Proj) &&
607 (get_Proj_proj(n) == pn_Start_P_value_arg_base) &&
608 (get_irn_op(get_Proj_pred(n)) == op_Start))
613 /* Returns an array with the predecessors of the Block. Depending on
614 the implementation of the graph data structure this can be a copy of
615 the internal representation of predecessors as well as the internal
616 array itself. Therefore writing to this array might obstruct the ir. */
618 get_Block_cfgpred_arr (ir_node *node)
620 assert ((node->op == op_Block));
621 return (ir_node **)&(get_irn_in(node)[1]);
625 (get_Block_n_cfgpreds)(ir_node *node) {
626 return get_Block_n_cfgpreds(node);
630 (get_Block_cfgpred)(ir_node *node, int pos) {
631 return get_Block_cfgpred(node, pos);
635 set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) {
636 assert (node->op == op_Block);
637 set_irn_n(node, pos, pred);
641 (get_Block_cfgpred_block)(ir_node *node, int pos) {
642 return _get_Block_cfgpred_block(node, pos);
646 get_Block_matured (ir_node *node) {
647 assert (node->op == op_Block);
648 return node->attr.block.matured;
652 set_Block_matured (ir_node *node, bool matured) {
653 assert (node->op == op_Block);
654 node->attr.block.matured = matured;
658 (get_Block_block_visited)(ir_node *node) {
659 return _get_Block_block_visited(node);
663 (set_Block_block_visited)(ir_node *node, unsigned long visit) {
664 _set_Block_block_visited(node, visit);
667 /* For this current_ir_graph must be set. */
669 (mark_Block_block_visited)(ir_node *node) {
670 _mark_Block_block_visited(node);
674 (Block_not_block_visited)(ir_node *node) {
675 return _Block_not_block_visited(node);
679 get_Block_graph_arr (ir_node *node, int pos) {
680 assert (node->op == op_Block);
681 return node->attr.block.graph_arr[pos+1];
685 set_Block_graph_arr (ir_node *node, int pos, ir_node *value) {
686 assert (node->op == op_Block);
687 node->attr.block.graph_arr[pos+1] = value;
690 void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) {
691 assert(node->op == op_Block);
692 if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
693 node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
694 node->attr.block.in_cg[0] = NULL;
695 node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity);
697 /* Fix backedge array. fix_backedges() operates depending on
698 interprocedural_view. */
699 int ipv = get_interprocedural_view();
700 set_interprocedural_view(true);
701 fix_backedges(current_ir_graph->obst, node);
702 set_interprocedural_view(ipv);
705 memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
708 void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred) {
709 assert(node->op == op_Block &&
710 node->attr.block.in_cg &&
711 0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
712 node->attr.block.in_cg[pos + 1] = pred;
715 ir_node ** get_Block_cg_cfgpred_arr(ir_node * node) {
716 assert(node->op == op_Block);
717 return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
720 int get_Block_cg_n_cfgpreds(ir_node * node) {
721 assert(node->op == op_Block);
722 return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
725 ir_node * get_Block_cg_cfgpred(ir_node * node, int pos) {
726 assert(node->op == op_Block && node->attr.block.in_cg);
727 return node->attr.block.in_cg[pos + 1];
730 void remove_Block_cg_cfgpred_arr(ir_node * node) {
731 assert(node->op == op_Block);
732 node->attr.block.in_cg = NULL;
735 ir_node *(set_Block_dead)(ir_node *block) {
736 return _set_Block_dead(block);
739 int (is_Block_dead)(const ir_node *block) {
740 return _is_Block_dead(block);
743 ir_extblk *get_Block_extbb(const ir_node *block) {
744 assert(is_Block(block));
745 return block->attr.block.extblk;
748 void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
749 assert(is_Block(block));
750 block->attr.block.extblk = extblk;
754 set_Start_irg(ir_node *node, ir_graph *irg) {
755 assert(node->op == op_Start);
756 assert(is_ir_graph(irg));
757 assert(0 && " Why set irg? -- use set_irn_irg");
761 get_End_n_keepalives(ir_node *end) {
762 assert (end->op == op_End);
763 return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
767 get_End_keepalive(ir_node *end, int pos) {
768 assert (end->op == op_End);
769 return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
773 add_End_keepalive (ir_node *end, ir_node *ka) {
774 assert (end->op == op_End);
775 ARR_APP1 (ir_node *, end->in, ka);
779 set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
780 assert (end->op == op_End);
781 set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
785 free_End (ir_node *end) {
786 assert (end->op == op_End);
788 DEL_ARR_F(end->in); /* GL @@@ tut nicht ! */
789 end->in = NULL; /* @@@ make sure we get an error if we use the
790 in array afterwards ... */
793 /* Return the target address of an IJmp */
794 ir_node *get_IJmp_target(ir_node *ijmp) {
795 assert(ijmp->op == op_IJmp);
796 return get_irn_n(ijmp, 0);
799 /** Sets the target address of an IJmp */
800 void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
801 assert(ijmp->op == op_IJmp);
802 set_irn_n(ijmp, 0, tgt);
806 > Implementing the case construct (which is where the constant Proj node is
807 > important) involves far more than simply determining the constant values.
808 > We could argue that this is more properly a function of the translator from
809 > Firm to the target machine. That could be done if there was some way of
810 > projecting "default" out of the Cond node.
811 I know it's complicated.
812 Basically there are two proglems:
813 - determining the gaps between the projs
814 - determining the biggest case constant to know the proj number for
816 I see several solutions:
817 1. Introduce a ProjDefault node. Solves both problems.
818 This means to extend all optimizations executed during construction.
819 2. Give the Cond node for switch two flavors:
820 a) there are no gaps in the projs (existing flavor)
821 b) gaps may exist, default proj is still the Proj with the largest
822 projection number. This covers also the gaps.
823 3. Fix the semantic of the Cond to that of 2b)
825 Solution 2 seems to be the best:
826 Computing the gaps in the Firm representation is not too hard, i.e.,
827 libFIRM can implement a routine that transforms between the two
828 flavours. This is also possible for 1) but 2) does not require to
829 change any existing optimization.
830 Further it should be far simpler to determine the biggest constant than
832 I don't want to choose 3) as 2a) seems to have advantages for
833 dataflow analysis and 3) does not allow to convert the representation to
837 get_Cond_selector (ir_node *node) {
838 assert (node->op == op_Cond);
839 return get_irn_n(node, 0);
843 set_Cond_selector (ir_node *node, ir_node *selector) {
844 assert (node->op == op_Cond);
845 set_irn_n(node, 0, selector);
849 get_Cond_kind (ir_node *node) {
850 assert (node->op == op_Cond);
851 return node->attr.c.kind;
855 set_Cond_kind (ir_node *node, cond_kind kind) {
856 assert (node->op == op_Cond);
857 node->attr.c.kind = kind;
861 get_Cond_defaultProj (ir_node *node) {
862 assert (node->op == op_Cond);
863 return node->attr.c.default_proj;
867 get_Return_mem (ir_node *node) {
868 assert (node->op == op_Return);
869 return get_irn_n(node, 0);
873 set_Return_mem (ir_node *node, ir_node *mem) {
874 assert (node->op == op_Return);
875 set_irn_n(node, 0, mem);
879 get_Return_n_ress (ir_node *node) {
880 assert (node->op == op_Return);
881 return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
885 get_Return_res_arr (ir_node *node)
887 assert ((node->op == op_Return));
888 if (get_Return_n_ress(node) > 0)
889 return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
896 set_Return_n_res (ir_node *node, int results) {
897 assert (node->op == op_Return);
902 get_Return_res (ir_node *node, int pos) {
903 assert (node->op == op_Return);
904 assert (get_Return_n_ress(node) > pos);
905 return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
909 set_Return_res (ir_node *node, int pos, ir_node *res){
910 assert (node->op == op_Return);
911 set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
915 get_Raise_mem (ir_node *node) {
916 assert (node->op == op_Raise);
917 return get_irn_n(node, 0);
921 set_Raise_mem (ir_node *node, ir_node *mem) {
922 assert (node->op == op_Raise);
923 set_irn_n(node, 0, mem);
927 get_Raise_exo_ptr (ir_node *node) {
928 assert (node->op == op_Raise);
929 return get_irn_n(node, 1);
933 set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) {
934 assert (node->op == op_Raise);
935 set_irn_n(node, 1, exo_ptr);
938 tarval *(get_Const_tarval)(ir_node *node) {
939 return _get_Const_tarval(node);
943 set_Const_tarval (ir_node *node, tarval *con) {
944 assert (node->op == op_Const);
945 node->attr.con.tv = con;
948 cnst_classify_t (classify_Const)(ir_node *node)
950 return _classify_Const(node);
954 /* The source language type. Must be an atomic type. Mode of type must
955 be mode of node. For tarvals from entities type must be pointer to
958 get_Const_type (ir_node *node) {
959 assert (node->op == op_Const);
960 return node->attr.con.tp;
964 set_Const_type (ir_node *node, type *tp) {
965 assert (node->op == op_Const);
966 if (tp != firm_unknown_type) {
967 assert (is_atomic_type(tp));
968 assert (get_type_mode(tp) == get_irn_mode(node));
970 node->attr.con.tp = tp;
975 get_SymConst_kind (const ir_node *node) {
976 assert (node->op == op_SymConst);
977 return node->attr.i.num;
981 set_SymConst_kind (ir_node *node, symconst_kind num) {
982 assert (node->op == op_SymConst);
983 node->attr.i.num = num;
987 get_SymConst_type (ir_node *node) {
988 assert ( (node->op == op_SymConst)
989 && ( get_SymConst_kind(node) == symconst_type_tag
990 || get_SymConst_kind(node) == symconst_size));
991 return node->attr.i.sym.type_p = skip_tid(node->attr.i.sym.type_p);
995 set_SymConst_type (ir_node *node, type *tp) {
996 assert ( (node->op == op_SymConst)
997 && ( get_SymConst_kind(node) == symconst_type_tag
998 || get_SymConst_kind(node) == symconst_size));
999 node->attr.i.sym.type_p = tp;
1003 get_SymConst_name (ir_node *node) {
1004 assert ( (node->op == op_SymConst)
1005 && (get_SymConst_kind(node) == symconst_addr_name));
1006 return node->attr.i.sym.ident_p;
1010 set_SymConst_name (ir_node *node, ident *name) {
1011 assert ( (node->op == op_SymConst)
1012 && (get_SymConst_kind(node) == symconst_addr_name));
1013 node->attr.i.sym.ident_p = name;
1017 /* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
1018 entity *get_SymConst_entity (ir_node *node) {
1019 assert ( (node->op == op_SymConst)
1020 && (get_SymConst_kind (node) == symconst_addr_ent));
1021 return node->attr.i.sym.entity_p;
1024 void set_SymConst_entity (ir_node *node, entity *ent) {
1025 assert ( (node->op == op_SymConst)
1026 && (get_SymConst_kind(node) == symconst_addr_ent));
1027 node->attr.i.sym.entity_p = ent;
1030 union symconst_symbol
1031 get_SymConst_symbol (ir_node *node) {
1032 assert (node->op == op_SymConst);
1033 return node->attr.i.sym;
1037 set_SymConst_symbol (ir_node *node, union symconst_symbol sym) {
1038 assert (node->op == op_SymConst);
1039 //memcpy (&(node->attr.i.sym), sym, sizeof(type_or_id));
1040 node->attr.i.sym = sym;
1044 get_SymConst_value_type (ir_node *node) {
1045 assert (node->op == op_SymConst);
1046 if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp);
1047 return node->attr.i.tp;
1051 set_SymConst_value_type (ir_node *node, type *tp) {
1052 assert (node->op == op_SymConst);
1053 node->attr.i.tp = tp;
1057 get_Sel_mem (ir_node *node) {
1058 assert (node->op == op_Sel);
1059 return get_irn_n(node, 0);
1063 set_Sel_mem (ir_node *node, ir_node *mem) {
1064 assert (node->op == op_Sel);
1065 set_irn_n(node, 0, mem);
1069 get_Sel_ptr (ir_node *node) {
1070 assert (node->op == op_Sel);
1071 return get_irn_n(node, 1);
1075 set_Sel_ptr (ir_node *node, ir_node *ptr) {
1076 assert (node->op == op_Sel);
1077 set_irn_n(node, 1, ptr);
1081 get_Sel_n_indexs (ir_node *node) {
1082 assert (node->op == op_Sel);
1083 return (get_irn_arity(node) - SEL_INDEX_OFFSET);
1087 get_Sel_index_arr (ir_node *node)
1089 assert ((node->op == op_Sel));
1090 if (get_Sel_n_indexs(node) > 0)
1091 return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
1097 get_Sel_index (ir_node *node, int pos) {
1098 assert (node->op == op_Sel);
1099 return get_irn_n(node, pos + SEL_INDEX_OFFSET);
1103 set_Sel_index (ir_node *node, int pos, ir_node *index) {
1104 assert (node->op == op_Sel);
1105 set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
1109 get_Sel_entity (ir_node *node) {
1110 assert (node->op == op_Sel);
1111 return node->attr.s.ent;
1115 set_Sel_entity (ir_node *node, entity *ent) {
1116 assert (node->op == op_Sel);
1117 node->attr.s.ent = ent;
1121 get_InstOf_ent (ir_node *node) {
1122 assert (node->op = op_InstOf);
1123 return (node->attr.io.ent);
1127 set_InstOf_ent (ir_node *node, type *ent) {
1128 assert (node->op = op_InstOf);
1129 node->attr.io.ent = ent;
1133 get_InstOf_store (ir_node *node) {
1134 assert (node->op = op_InstOf);
1135 return (get_irn_n (node, 0));
1139 set_InstOf_store (ir_node *node, ir_node *obj) {
1140 assert (node->op = op_InstOf);
1141 set_irn_n (node, 0, obj);
1145 get_InstOf_obj (ir_node *node) {
1146 assert (node->op = op_InstOf);
1147 return (get_irn_n (node, 1));
1151 set_InstOf_obj (ir_node *node, ir_node *obj) {
1152 assert (node->op = op_InstOf);
1153 set_irn_n (node, 1, obj);
1157 /* For unary and binary arithmetic operations the access to the
1158 operands can be factored out. Left is the first, right the
1159 second arithmetic value as listed in tech report 0999-33.
1160 unops are: Minus, Abs, Not, Conv, Cast
1161 binops are: Add, Sub, Mul, Quot, DivMod, Div, Mod, And, Or, Eor, Shl,
1162 Shr, Shrs, Rotate, Cmp */
1166 get_Call_mem (ir_node *node) {
1167 assert (node->op == op_Call);
1168 return get_irn_n(node, 0);
1172 set_Call_mem (ir_node *node, ir_node *mem) {
1173 assert (node->op == op_Call);
1174 set_irn_n(node, 0, mem);
1178 get_Call_ptr (ir_node *node) {
1179 assert (node->op == op_Call);
1180 return get_irn_n(node, 1);
1184 set_Call_ptr (ir_node *node, ir_node *ptr) {
1185 assert (node->op == op_Call);
1186 set_irn_n(node, 1, ptr);
1190 get_Call_param_arr (ir_node *node) {
1191 assert (node->op == op_Call);
1192 return (ir_node **)&get_irn_in(node)[CALL_PARAM_OFFSET + 1];
1196 get_Call_n_params (ir_node *node) {
1197 assert (node->op == op_Call);
1198 return (get_irn_arity(node) - CALL_PARAM_OFFSET);
1202 get_Call_arity (ir_node *node) {
1203 assert (node->op == op_Call);
1204 return get_Call_n_params(node);
1208 set_Call_arity (ir_node *node, ir_node *arity) {
1209 assert (node->op == op_Call);
1214 get_Call_param (ir_node *node, int pos) {
1215 assert (node->op == op_Call);
1216 return get_irn_n(node, pos + CALL_PARAM_OFFSET);
1220 set_Call_param (ir_node *node, int pos, ir_node *param) {
1221 assert (node->op == op_Call);
1222 set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
1226 get_Call_type (ir_node *node) {
1227 assert (node->op == op_Call);
1228 return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp);
1232 set_Call_type (ir_node *node, type *tp) {
1233 assert (node->op == op_Call);
1234 assert ((get_unknown_type() == tp) || is_Method_type(tp));
1235 node->attr.call.cld_tp = tp;
1238 int Call_has_callees(ir_node *node) {
1239 assert(node && node->op == op_Call);
1240 return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
1241 (node->attr.call.callee_arr != NULL));
1244 int get_Call_n_callees(ir_node * node) {
1245 assert(node && node->op == op_Call && node->attr.call.callee_arr);
1246 return ARR_LEN(node->attr.call.callee_arr);
1249 entity * get_Call_callee(ir_node * node, int pos) {
1250 assert(pos >= 0 && pos < get_Call_n_callees(node));
1251 return node->attr.call.callee_arr[pos];
1254 void set_Call_callee_arr(ir_node * node, const int n, entity ** arr) {
1255 assert(node->op == op_Call);
1256 if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
1257 node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n);
1259 memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *));
1262 void remove_Call_callee_arr(ir_node * node) {
1263 assert(node->op == op_Call);
1264 node->attr.call.callee_arr = NULL;
1267 ir_node * get_CallBegin_ptr (ir_node *node) {
1268 assert(node->op == op_CallBegin);
1269 return get_irn_n(node, 0);
1271 void set_CallBegin_ptr (ir_node *node, ir_node *ptr) {
1272 assert(node->op == op_CallBegin);
1273 set_irn_n(node, 0, ptr);
1275 ir_node * get_CallBegin_call (ir_node *node) {
1276 assert(node->op == op_CallBegin);
1277 return node->attr.callbegin.call;
1279 void set_CallBegin_call (ir_node *node, ir_node *call) {
1280 assert(node->op == op_CallBegin);
1281 node->attr.callbegin.call = call;
1286 ir_node * get_##OP##_left(ir_node *node) { \
1287 assert(node->op == op_##OP); \
1288 return get_irn_n(node, node->op->op_index); \
1290 void set_##OP##_left(ir_node *node, ir_node *left) { \
1291 assert(node->op == op_##OP); \
1292 set_irn_n(node, node->op->op_index, left); \
1294 ir_node *get_##OP##_right(ir_node *node) { \
1295 assert(node->op == op_##OP); \
1296 return get_irn_n(node, node->op->op_index + 1); \
1298 void set_##OP##_right(ir_node *node, ir_node *right) { \
1299 assert(node->op == op_##OP); \
1300 set_irn_n(node, node->op->op_index + 1, right); \
1304 ir_node *get_##OP##_op(ir_node *node) { \
1305 assert(node->op == op_##OP); \
1306 return get_irn_n(node, node->op->op_index); \
1308 void set_##OP##_op (ir_node *node, ir_node *op) { \
1309 assert(node->op == op_##OP); \
1310 set_irn_n(node, node->op->op_index, op); \
1320 get_Quot_mem (ir_node *node) {
1321 assert (node->op == op_Quot);
1322 return get_irn_n(node, 0);
1326 set_Quot_mem (ir_node *node, ir_node *mem) {
1327 assert (node->op == op_Quot);
1328 set_irn_n(node, 0, mem);
1334 get_DivMod_mem (ir_node *node) {
1335 assert (node->op == op_DivMod);
1336 return get_irn_n(node, 0);
1340 set_DivMod_mem (ir_node *node, ir_node *mem) {
1341 assert (node->op == op_DivMod);
1342 set_irn_n(node, 0, mem);
1348 get_Div_mem (ir_node *node) {
1349 assert (node->op == op_Div);
1350 return get_irn_n(node, 0);
1354 set_Div_mem (ir_node *node, ir_node *mem) {
1355 assert (node->op == op_Div);
1356 set_irn_n(node, 0, mem);
1362 get_Mod_mem (ir_node *node) {
1363 assert (node->op == op_Mod);
1364 return get_irn_n(node, 0);
1368 set_Mod_mem (ir_node *node, ir_node *mem) {
1369 assert (node->op == op_Mod);
1370 set_irn_n(node, 0, mem);
1387 get_Cast_type (ir_node *node) {
1388 assert (node->op == op_Cast);
1389 return node->attr.cast.totype;
1393 set_Cast_type (ir_node *node, type *to_tp) {
1394 assert (node->op == op_Cast);
1395 node->attr.cast.totype = to_tp;
1399 /* Checks for upcast.
1401 * Returns true if the Cast node casts a class type to a super type.
1403 int is_Cast_upcast(ir_node *node) {
1404 type *totype = get_Cast_type(node);
1405 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1406 ir_graph *myirg = get_irn_irg(node);
1408 assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent);
1411 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1412 totype = get_pointer_points_to_type(totype);
1413 fromtype = get_pointer_points_to_type(fromtype);
1418 if (!is_Class_type(totype)) return false;
1419 return is_subclass_of(fromtype, totype);
1422 /* Checks for downcast.
1424 * Returns true if the Cast node casts a class type to a sub type.
1426 int is_Cast_downcast(ir_node *node) {
1427 type *totype = get_Cast_type(node);
1428 type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
1430 assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
1433 while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
1434 totype = get_pointer_points_to_type(totype);
1435 fromtype = get_pointer_points_to_type(fromtype);
1440 if (!is_Class_type(totype)) return false;
1441 return is_subclass_of(totype, fromtype);
1445 (is_unop)(const ir_node *node) {
1446 return _is_unop(node);
1450 get_unop_op (ir_node *node) {
1451 if (node->op->opar == oparity_unary)
1452 return get_irn_n(node, node->op->op_index);
1454 assert(node->op->opar == oparity_unary);
1459 set_unop_op (ir_node *node, ir_node *op) {
1460 if (node->op->opar == oparity_unary)
1461 set_irn_n(node, node->op->op_index, op);
1463 assert(node->op->opar == oparity_unary);
1467 (is_binop)(const ir_node *node) {
1468 return _is_binop(node);
1472 get_binop_left (ir_node *node) {
1473 if (node->op->opar == oparity_binary)
1474 return get_irn_n(node, node->op->op_index);
1476 assert(node->op->opar == oparity_binary);
1481 set_binop_left (ir_node *node, ir_node *left) {
1482 if (node->op->opar == oparity_binary)
1483 set_irn_n(node, node->op->op_index, left);
1485 assert (node->op->opar == oparity_binary);
1489 get_binop_right (ir_node *node) {
1490 if (node->op->opar == oparity_binary)
1491 return get_irn_n(node, node->op->op_index + 1);
1493 assert(node->op->opar == oparity_binary);
1498 set_binop_right (ir_node *node, ir_node *right) {
1499 if (node->op->opar == oparity_binary)
1500 set_irn_n(node, node->op->op_index + 1, right);
1502 assert (node->op->opar == oparity_binary);
1505 int is_Phi (const ir_node *n) {
1511 if (op == op_Filter) return get_interprocedural_view();
1514 return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) ||
1515 (get_irn_arity(n) > 0));
1520 int is_Phi0 (const ir_node *n) {
1523 return ((get_irn_op(n) == op_Phi) &&
1524 (get_irn_arity(n) == 0) &&
1525 (get_irg_phase_state(get_irn_irg(n)) == phase_building));
1529 get_Phi_preds_arr (ir_node *node) {
1530 assert (node->op == op_Phi);
1531 return (ir_node **)&(get_irn_in(node)[1]);
1535 get_Phi_n_preds (ir_node *node) {
1536 assert (is_Phi(node) || is_Phi0(node));
1537 return (get_irn_arity(node));
1541 void set_Phi_n_preds (ir_node *node, int n_preds) {
1542 assert (node->op == op_Phi);
1547 get_Phi_pred (ir_node *node, int pos) {
1548 assert (is_Phi(node) || is_Phi0(node));
1549 return get_irn_n(node, pos);
1553 set_Phi_pred (ir_node *node, int pos, ir_node *pred) {
1554 assert (is_Phi(node) || is_Phi0(node));
1555 set_irn_n(node, pos, pred);
1559 int is_memop(ir_node *node) {
1560 return ((get_irn_op(node) == op_Load) || (get_irn_op(node) == op_Store));
1563 ir_node *get_memop_mem (ir_node *node) {
1564 assert(is_memop(node));
1565 return get_irn_n(node, 0);
1568 void set_memop_mem (ir_node *node, ir_node *mem) {
1569 assert(is_memop(node));
1570 set_irn_n(node, 0, mem);
1573 ir_node *get_memop_ptr (ir_node *node) {
1574 assert(is_memop(node));
1575 return get_irn_n(node, 1);
1578 void set_memop_ptr (ir_node *node, ir_node *ptr) {
1579 assert(is_memop(node));
1580 set_irn_n(node, 1, ptr);
1584 get_Load_mem (ir_node *node) {
1585 assert (node->op == op_Load);
1586 return get_irn_n(node, 0);
1590 set_Load_mem (ir_node *node, ir_node *mem) {
1591 assert (node->op == op_Load);
1592 set_irn_n(node, 0, mem);
1596 get_Load_ptr (ir_node *node) {
1597 assert (node->op == op_Load);
1598 return get_irn_n(node, 1);
1602 set_Load_ptr (ir_node *node, ir_node *ptr) {
1603 assert (node->op == op_Load);
1604 set_irn_n(node, 1, ptr);
1608 get_Load_mode (ir_node *node) {
1609 assert (node->op == op_Load);
1610 return node->attr.load.load_mode;
1614 set_Load_mode (ir_node *node, ir_mode *mode) {
1615 assert (node->op == op_Load);
1616 node->attr.load.load_mode = mode;
1620 get_Load_volatility (ir_node *node) {
1621 assert (node->op == op_Load);
1622 return node->attr.load.volatility;
1626 set_Load_volatility (ir_node *node, ent_volatility volatility) {
1627 assert (node->op == op_Load);
1628 node->attr.load.volatility = volatility;
1633 get_Store_mem (ir_node *node) {
1634 assert (node->op == op_Store);
1635 return get_irn_n(node, 0);
1639 set_Store_mem (ir_node *node, ir_node *mem) {
1640 assert (node->op == op_Store);
1641 set_irn_n(node, 0, mem);
1645 get_Store_ptr (ir_node *node) {
1646 assert (node->op == op_Store);
1647 return get_irn_n(node, 1);
1651 set_Store_ptr (ir_node *node, ir_node *ptr) {
1652 assert (node->op == op_Store);
1653 set_irn_n(node, 1, ptr);
1657 get_Store_value (ir_node *node) {
1658 assert (node->op == op_Store);
1659 return get_irn_n(node, 2);
1663 set_Store_value (ir_node *node, ir_node *value) {
1664 assert (node->op == op_Store);
1665 set_irn_n(node, 2, value);
1669 get_Store_volatility (ir_node *node) {
1670 assert (node->op == op_Store);
1671 return node->attr.store.volatility;
1675 set_Store_volatility (ir_node *node, ent_volatility volatility) {
1676 assert (node->op == op_Store);
1677 node->attr.store.volatility = volatility;
1682 get_Alloc_mem (ir_node *node) {
1683 assert (node->op == op_Alloc);
1684 return get_irn_n(node, 0);
1688 set_Alloc_mem (ir_node *node, ir_node *mem) {
1689 assert (node->op == op_Alloc);
1690 set_irn_n(node, 0, mem);
1694 get_Alloc_size (ir_node *node) {
1695 assert (node->op == op_Alloc);
1696 return get_irn_n(node, 1);
1700 set_Alloc_size (ir_node *node, ir_node *size) {
1701 assert (node->op == op_Alloc);
1702 set_irn_n(node, 1, size);
1706 get_Alloc_type (ir_node *node) {
1707 assert (node->op == op_Alloc);
1708 return node->attr.a.type = skip_tid(node->attr.a.type);
1712 set_Alloc_type (ir_node *node, type *tp) {
1713 assert (node->op == op_Alloc);
1714 node->attr.a.type = tp;
1718 get_Alloc_where (ir_node *node) {
1719 assert (node->op == op_Alloc);
1720 return node->attr.a.where;
1724 set_Alloc_where (ir_node *node, where_alloc where) {
1725 assert (node->op == op_Alloc);
1726 node->attr.a.where = where;
1731 get_Free_mem (ir_node *node) {
1732 assert (node->op == op_Free);
1733 return get_irn_n(node, 0);
1737 set_Free_mem (ir_node *node, ir_node *mem) {
1738 assert (node->op == op_Free);
1739 set_irn_n(node, 0, mem);
1743 get_Free_ptr (ir_node *node) {
1744 assert (node->op == op_Free);
1745 return get_irn_n(node, 1);
1749 set_Free_ptr (ir_node *node, ir_node *ptr) {
1750 assert (node->op == op_Free);
1751 set_irn_n(node, 1, ptr);
1755 get_Free_size (ir_node *node) {
1756 assert (node->op == op_Free);
1757 return get_irn_n(node, 2);
1761 set_Free_size (ir_node *node, ir_node *size) {
1762 assert (node->op == op_Free);
1763 set_irn_n(node, 2, size);
1767 get_Free_type (ir_node *node) {
1768 assert (node->op == op_Free);
1769 return node->attr.f.type = skip_tid(node->attr.f.type);
1773 set_Free_type (ir_node *node, type *tp) {
1774 assert (node->op == op_Free);
1775 node->attr.f.type = tp;
1779 get_Free_where (ir_node *node) {
1780 assert (node->op == op_Free);
1781 return node->attr.f.where;
1785 set_Free_where (ir_node *node, where_alloc where) {
1786 assert (node->op == op_Free);
1787 node->attr.f.where = where;
1791 get_Sync_preds_arr (ir_node *node) {
1792 assert (node->op == op_Sync);
1793 return (ir_node **)&(get_irn_in(node)[1]);
1797 get_Sync_n_preds (ir_node *node) {
1798 assert (node->op == op_Sync);
1799 return (get_irn_arity(node));
1804 set_Sync_n_preds (ir_node *node, int n_preds) {
1805 assert (node->op == op_Sync);
1810 get_Sync_pred (ir_node *node, int pos) {
1811 assert (node->op == op_Sync);
1812 return get_irn_n(node, pos);
1816 set_Sync_pred (ir_node *node, int pos, ir_node *pred) {
1817 assert (node->op == op_Sync);
1818 set_irn_n(node, pos, pred);
1821 type *get_Proj_type(ir_node *n)
1824 ir_node *pred = get_Proj_pred(n);
1826 switch (get_irn_opcode(pred)) {
1829 /* Deal with Start / Call here: we need to know the Proj Nr. */
1830 assert(get_irn_mode(pred) == mode_T);
1831 pred_pred = get_Proj_pred(pred);
1832 if (get_irn_op(pred_pred) == op_Start) {
1833 type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
1834 tp = get_method_param_type(mtp, get_Proj_proj(n));
1835 } else if (get_irn_op(pred_pred) == op_Call) {
1836 type *mtp = get_Call_type(pred_pred);
1837 tp = get_method_res_type(mtp, get_Proj_proj(n));
1840 case iro_Start: break;
1841 case iro_Call: break;
1843 ir_node *a = get_Load_ptr(pred);
1844 if (get_irn_op(a) == op_Sel)
1845 tp = get_entity_type(get_Sel_entity(a));
1854 get_Proj_pred (const ir_node *node) {
1855 assert (is_Proj(node));
1856 return get_irn_n(node, 0);
1860 set_Proj_pred (ir_node *node, ir_node *pred) {
1861 assert (is_Proj(node));
1862 set_irn_n(node, 0, pred);
1866 get_Proj_proj (const ir_node *node) {
1867 assert (is_Proj(node));
1868 if (get_irn_opcode(node) == iro_Proj) {
1869 return node->attr.proj;
1871 assert(get_irn_opcode(node) == iro_Filter);
1872 return node->attr.filter.proj;
1877 set_Proj_proj (ir_node *node, long proj) {
1878 assert (node->op == op_Proj);
1879 node->attr.proj = proj;
1883 get_Tuple_preds_arr (ir_node *node) {
1884 assert (node->op == op_Tuple);
1885 return (ir_node **)&(get_irn_in(node)[1]);
1889 get_Tuple_n_preds (ir_node *node) {
1890 assert (node->op == op_Tuple);
1891 return (get_irn_arity(node));
1896 set_Tuple_n_preds (ir_node *node, int n_preds) {
1897 assert (node->op == op_Tuple);
1902 get_Tuple_pred (ir_node *node, int pos) {
1903 assert (node->op == op_Tuple);
1904 return get_irn_n(node, pos);
1908 set_Tuple_pred (ir_node *node, int pos, ir_node *pred) {
1909 assert (node->op == op_Tuple);
1910 set_irn_n(node, pos, pred);
1914 get_Id_pred (ir_node *node) {
1915 assert (node->op == op_Id);
1916 return get_irn_n(node, 0);
1920 set_Id_pred (ir_node *node, ir_node *pred) {
1921 assert (node->op == op_Id);
1922 set_irn_n(node, 0, pred);
1925 ir_node *get_Confirm_value (ir_node *node) {
1926 assert (node->op == op_Confirm);
1927 return get_irn_n(node, 0);
1929 void set_Confirm_value (ir_node *node, ir_node *value) {
1930 assert (node->op == op_Confirm);
1931 set_irn_n(node, 0, value);
1933 ir_node *get_Confirm_bound (ir_node *node) {
1934 assert (node->op == op_Confirm);
1935 return get_irn_n(node, 1);
1937 void set_Confirm_bound (ir_node *node, ir_node *bound) {
1938 assert (node->op == op_Confirm);
1939 set_irn_n(node, 0, bound);
1941 pn_Cmp get_Confirm_cmp (ir_node *node) {
1942 assert (node->op == op_Confirm);
1943 return node->attr.confirm_cmp;
1945 void set_Confirm_cmp (ir_node *node, pn_Cmp cmp) {
1946 assert (node->op == op_Confirm);
1947 node->attr.confirm_cmp = cmp;
1952 get_Filter_pred (ir_node *node) {
1953 assert(node->op == op_Filter);
1957 set_Filter_pred (ir_node *node, ir_node *pred) {
1958 assert(node->op == op_Filter);
1962 get_Filter_proj(ir_node *node) {
1963 assert(node->op == op_Filter);
1964 return node->attr.filter.proj;
1967 set_Filter_proj (ir_node *node, long proj) {
1968 assert(node->op == op_Filter);
1969 node->attr.filter.proj = proj;
1972 /* Don't use get_irn_arity, get_irn_n in implementation as access
1973 shall work independent of view!!! */
1974 void set_Filter_cg_pred_arr(ir_node * node, int arity, ir_node ** in) {
1975 assert(node->op == op_Filter);
1976 if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
1977 node->attr.filter.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
1978 node->attr.filter.backedge = NEW_ARR_D (int, current_ir_graph->obst, arity);
1979 memset(node->attr.filter.backedge, 0, sizeof(int) * arity);
1980 node->attr.filter.in_cg[0] = node->in[0];
1982 memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
1985 void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
1986 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1987 0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
1988 node->attr.filter.in_cg[pos + 1] = pred;
1990 int get_Filter_n_cg_preds(ir_node *node) {
1991 assert(node->op == op_Filter && node->attr.filter.in_cg);
1992 return (ARR_LEN(node->attr.filter.in_cg) - 1);
1994 ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
1996 assert(node->op == op_Filter && node->attr.filter.in_cg &&
1998 arity = ARR_LEN(node->attr.filter.in_cg);
1999 assert(pos < arity - 1);
2000 return node->attr.filter.in_cg[pos + 1];
2004 ir_node *get_Mux_sel (ir_node *node) {
2005 assert(node->op == op_Mux);
2008 void set_Mux_sel (ir_node *node, ir_node *sel) {
2009 assert(node->op == op_Mux);
2013 ir_node *get_Mux_false (ir_node *node) {
2014 assert(node->op == op_Mux);
2017 void set_Mux_false (ir_node *node, ir_node *ir_false) {
2018 assert(node->op == op_Mux);
2019 node->in[2] = ir_false;
2022 ir_node *get_Mux_true (ir_node *node) {
2023 assert(node->op == op_Mux);
2026 void set_Mux_true (ir_node *node, ir_node *ir_true) {
2027 assert(node->op == op_Mux);
2028 node->in[3] = ir_true;
2032 ir_node *get_CopyB_mem (ir_node *node) {
2033 assert (node->op == op_CopyB);
2034 return get_irn_n(node, 0);
2037 void set_CopyB_mem (ir_node *node, ir_node *mem) {
2038 assert (node->op == op_CopyB);
2039 set_irn_n(node, 0, mem);
2042 ir_node *get_CopyB_dst (ir_node *node) {
2043 assert (node->op == op_CopyB);
2044 return get_irn_n(node, 1);
2047 void set_CopyB_dst (ir_node *node, ir_node *dst) {
2048 assert (node->op == op_CopyB);
2049 set_irn_n(node, 1, dst);
2052 ir_node *get_CopyB_src (ir_node *node) {
2053 assert (node->op == op_CopyB);
2054 return get_irn_n(node, 2);
2057 void set_CopyB_src (ir_node *node, ir_node *src) {
2058 assert (node->op == op_CopyB);
2059 set_irn_n(node, 2, src);
2062 type *get_CopyB_type(ir_node *node) {
2063 assert (node->op == op_CopyB);
2064 return node->attr.copyb.data_type;
2067 void set_CopyB_type(ir_node *node, type *data_type) {
2068 assert (node->op == op_CopyB && data_type);
2069 node->attr.copyb.data_type = data_type;
2074 get_irn_irg(const ir_node *node) {
2076 * Do not use get_nodes_Block() here, because this
2077 * will check the pinned state.
2078 * However even a 'wrong' block is always in the proper
2081 if (! is_Block(node))
2082 node = get_irn_n(node, -1);
2083 if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */
2084 node = get_irn_n(node, -1);
2085 assert(get_irn_op(node) == op_Block);
2086 return node->attr.block.irg;
2090 /*----------------------------------------------------------------*/
2091 /* Auxiliary routines */
2092 /*----------------------------------------------------------------*/
2095 skip_Proj (ir_node *node) {
2096 /* don't assert node !!! */
2097 if (node && is_Proj(node)) {
2098 return get_Proj_pred(node);
2105 skip_Tuple (ir_node *node) {
2109 if (!get_opt_normalize()) return node;
2112 node = skip_Id(node);
2113 if (get_irn_op(node) == op_Proj) {
2114 pred = skip_Id(get_Proj_pred(node));
2115 op = get_irn_op(pred);
2118 * Looks strange but calls get_irn_op() only once
2119 * in most often cases.
2121 if (op == op_Proj) { /* nested Tuple ? */
2122 pred = skip_Id(skip_Tuple(pred));
2123 op = get_irn_op(pred);
2125 if (op == op_Tuple) {
2126 node = get_Tuple_pred(pred, get_Proj_proj(node));
2130 else if (op == op_Tuple) {
2131 node = get_Tuple_pred(pred, get_Proj_proj(node));
2138 /* returns operand of node if node is a Cast */
2139 ir_node *skip_Cast (ir_node *node) {
2140 if (node && get_irn_op(node) == op_Cast)
2141 return get_Cast_op(node);
2145 /* returns operand of node if node is a Confirm */
2146 ir_node *skip_Confirm (ir_node *node) {
2147 if (node && get_irn_op(node) == op_Confirm)
2148 return get_Confirm_value(node);
2152 /* skip all high-level ops */
2153 ir_node *skip_HighLevel(ir_node *node) {
2154 if (node && is_op_highlevel(get_irn_op(node)))
2155 return get_irn_n(node, 0);
2160 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2161 than any other approach, as Id chains are resolved and all point to the real node, or
2162 all id's are self loops. */
2164 skip_Id (ir_node *node) {
2165 /* don't assert node !!! */
2167 if (!get_opt_normalize()) return node;
2169 /* Don't use get_Id_pred: We get into an endless loop for
2170 self-referencing Ids. */
2171 if (node && (node->op == op_Id) && (node != node->in[0+1])) {
2172 ir_node *rem_pred = node->in[0+1];
2175 assert (get_irn_arity (node) > 0);
2177 node->in[0+1] = node;
2178 res = skip_Id(rem_pred);
2179 if (res->op == op_Id) /* self-loop */ return node;
2181 node->in[0+1] = res;
2188 /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
2189 than any other approach, as Id chains are resolved and all point to the real node, or
2190 all id's are self loops. */
2192 skip_Id (ir_node *node) {
2194 /* don't assert node !!! */
2196 if (!node || (node->op != op_Id)) return node;
2198 if (!get_opt_normalize()) return node;
2200 /* Don't use get_Id_pred: We get into an endless loop for
2201 self-referencing Ids. */
2202 pred = node->in[0+1];
2204 if (pred->op != op_Id) return pred;
2206 if (node != pred) { /* not a self referencing Id. Resolve Id chain. */
2207 ir_node *rem_pred, *res;
2209 if (pred->op != op_Id) return pred; /* shortcut */
2212 assert (get_irn_arity (node) > 0);
2214 node->in[0+1] = node; /* turn us into a self referencing Id: shorten Id cycles. */
2215 res = skip_Id(rem_pred);
2216 if (res->op == op_Id) /* self-loop */ return node;
2218 node->in[0+1] = res; /* Turn Id chain into Ids all referencing the chain end. */
2227 (is_Bad)(const ir_node *node) {
2228 return _is_Bad(node);
2232 (is_Const)(const ir_node *node) {
2233 return _is_Const(node);
2237 (is_no_Block)(const ir_node *node) {
2238 return _is_no_Block(node);
2242 (is_Block)(const ir_node *node) {
2243 return _is_Block(node);
2246 /* returns true if node is a Unknown node. */
2248 (is_Unknown)(const ir_node *node) {
2249 return _is_Unknown(node);
2253 is_Proj (const ir_node *node) {
2255 return node->op == op_Proj
2256 || (!get_interprocedural_view() && node->op == op_Filter);
2259 /* Returns true if the operation manipulates control flow. */
2261 is_cfop(const ir_node *node) {
2262 return is_cfopcode(get_irn_op(node));
2265 /* Returns true if the operation manipulates interprocedural control flow:
2266 CallBegin, EndReg, EndExcept */
2267 int is_ip_cfop(const ir_node *node) {
2268 return is_ip_cfopcode(get_irn_op(node));
2271 /* Returns true if the operation can change the control flow because
2274 is_fragile_op(const ir_node *node) {
2275 return is_op_fragile(get_irn_op(node));
2278 /* Returns the memory operand of fragile operations. */
2279 ir_node *get_fragile_op_mem(ir_node *node) {
2280 assert(node && is_fragile_op(node));
2282 switch (get_irn_opcode (node)) {
2291 return get_irn_n(node, 0);
2296 assert(0 && "should not be reached");
2301 /* Returns true if the operation is a forking control flow operation. */
2302 int (is_irn_forking)(const ir_node *node) {
2303 return _is_irn_forking(node);
2306 type *(get_irn_type)(ir_node *node) {
2307 return _get_irn_type(node);
2310 /* Returns non-zero for constant-like nodes. */
2311 int (is_irn_constlike)(const ir_node *node) {
2312 return _is_irn_constlike(node);
2315 /* Gets the string representation of the jump prediction .*/
2316 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
2320 case COND_JMP_PRED_NONE: return "no prediction";
2321 case COND_JMP_PRED_TRUE: return "true taken";
2322 case COND_JMP_PRED_FALSE: return "false taken";
2326 /* Returns the conditional jump prediction of a Cond node. */
2327 cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) {
2328 return _get_Cond_jmp_pred(cond);
2331 /* Sets a new conditional jump prediction. */
2332 void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
2333 _set_Cond_jmp_pred(cond, pred);
2336 /** the get_type operation must be always implemented */
2337 static type *get_Null_type(ir_node *n) {
2341 /* Sets the get_type operation for an ir_op_ops. */
2342 ir_op_ops *firm_set_default_get_type(opcode code, ir_op_ops *ops)
2345 case iro_Const: ops->get_type = get_Const_type; break;
2346 case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
2347 case iro_Cast: ops->get_type = get_Cast_type; break;
2348 case iro_Proj: ops->get_type = get_Proj_type; break;
2350 /* not allowed to be NULL */
2351 if (! ops->get_type)
2352 ops->get_type = get_Null_type;
2358 #ifdef DEBUG_libfirm
2359 void dump_irn (ir_node *n) {
2360 int i, arity = get_irn_arity(n);
2361 printf("%s%s: %ld (%p)\n", get_irn_opname(n), get_mode_name(get_irn_mode(n)), get_irn_node_nr(n), (void *)n);
2363 ir_node *pred = get_irn_n(n, -1);
2364 printf(" block: %s%s: %ld (%p)\n", get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2365 get_irn_node_nr(pred), (void *)pred);
2367 printf(" preds: \n");
2368 for (i = 0; i < arity; ++i) {
2369 ir_node *pred = get_irn_n(n, i);
2370 printf(" %d: %s%s: %ld (%p)\n", i, get_irn_opname(pred), get_mode_name(get_irn_mode(pred)),
2371 get_irn_node_nr(pred), (void *)pred);
2375 #else /* DEBUG_libfirm */
2376 void dump_irn (ir_node *n) {}
2377 #endif /* DEBUG_libfirm */