2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Memory disambiguator
23 * @author Michael Beck
34 #include "irgraph_t.h"
36 #include "irmemory_t.h"
48 /** The debug handle. */
49 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
50 DEBUG_ONLY(static firm_dbg_module_t *dbgcall = NULL;)
52 /** The source language specific language disambiguator function. */
53 static DISAMBIGUATOR_FUNC language_disambuigator = NULL;
55 /** The global memory disambiguator options. */
56 static unsigned global_mem_disamgig_opt = aa_opt_no_opt;
58 /* Returns a human readable name for an alias relation. */
59 const char *get_ir_alias_relation_name(ir_alias_relation rel) {
60 #define X(a) case a: return #a
65 default: assert(0); return "UNKNOWN";
70 /* Get the memory disambiguator options for a graph. */
71 unsigned get_irg_memory_disambiguator_options(ir_graph *irg) {
72 unsigned opt = irg->mem_disambig_opt;
73 if (opt & aa_opt_inherited)
74 return global_mem_disamgig_opt;
76 } /* get_irg_memory_disambiguator_options */
78 /* Set the memory disambiguator options for a graph. */
79 void set_irg_memory_disambiguator_options(ir_graph *irg, unsigned options) {
80 irg->mem_disambig_opt = options & ~aa_opt_inherited;
81 } /* set_irg_memory_disambiguator_options */
83 /* Set the global disambiguator options for all graphs not having local options. */
84 void set_irp_memory_disambiguator_options(unsigned options) {
85 global_mem_disamgig_opt = options;
86 } /* set_irp_memory_disambiguator_options */
89 * Find the base address and entity of an Sel node.
92 * @param pEnt after return points to the base entity.
94 * @return the base address.
96 static ir_node *find_base_adr(ir_node *sel, ir_entity **pEnt) {
97 ir_node *ptr = get_Sel_ptr(sel);
101 ptr = get_Sel_ptr(sel);
103 *pEnt = get_Sel_entity(sel);
105 } /* find_base_adr */
108 * Check if a given Const node is greater or equal a given size.
110 * @param cns a Const node
111 * @param size a integer size
113 * @return ir_no_alias if the Const is greater, ir_may_alias else
115 static ir_alias_relation check_const(ir_node *cns, int size) {
116 tarval *tv = get_Const_tarval(cns);
120 return tarval_is_null(tv) ? ir_may_alias : ir_no_alias;
121 tv_size = new_tarval_from_long(size, get_tarval_mode(tv));
122 return tarval_cmp(tv_size, tv) & (pn_Cmp_Eq|pn_Cmp_Lt) ? ir_no_alias : ir_may_alias;
126 * Treat idx1 and idx2 as integer indexes and check if they differ always more than size.
128 * @param idx1 a node representing the first index
129 * @param idx2 a node representing the second index
130 * @param size an integer size
132 * @return ir_sure_alias iff idx1 == idx2
133 * ir_no_alias iff they ALWAYS differ more than size
136 static ir_alias_relation different_index(ir_node *idx1, ir_node *idx2, int size) {
138 return ir_sure_alias;
139 if (is_Const(idx1) && is_Const(idx2)) {
140 /* both are const, we can compare them */
141 tarval *tv1 = get_Const_tarval(idx1);
142 tarval *tv2 = get_Const_tarval(idx2);
143 tarval *tv, *tv_size;
147 return tv1 == tv2 ? ir_sure_alias : ir_no_alias;
149 /* arg, modes may be different */
150 m1 = get_tarval_mode(tv1);
151 m2 = get_tarval_mode(tv2);
153 int size = get_mode_size_bits(m1) - get_mode_size_bits(m2);
156 /* m1 is a small mode, cast up */
157 m1 = mode_is_signed(m1) ? find_signed_mode(m2) : find_unsigned_mode(m2);
159 /* should NOT happen, but if it does we give up here */
162 tv1 = tarval_convert_to(tv1, m1);
163 } else if (size > 0) {
164 /* m2 is a small mode, cast up */
165 m2 = mode_is_signed(m2) ? find_signed_mode(m1) : find_unsigned_mode(m1);
167 /* should NOT happen, but if it does we give up here */
170 tv2 = tarval_convert_to(tv2, m2);
172 /* here the size should be identical, check for signed */
173 if (get_mode_sign(m1) != get_mode_sign(m2)) {
174 /* find the signed */
175 if (mode_is_signed(m2)) {
182 /* m1 is now the signed one */
183 if (tarval_cmp(tv1, get_tarval_null(m1)) & (pn_Cmp_Eq|pn_Cmp_Gt)) {
184 /* tv1 is signed, but >= 0, simply cast into unsigned */
185 tv1 = tarval_convert_to(tv1, m2);
187 tv_size = new_tarval_from_long(size, m2);
189 if (tarval_cmp(tv2, tv_size) & (pn_Cmp_Eq|pn_Cmp_Gt)) {
190 /* tv1 is negative and tv2 >= tv_size, so the difference is bigger than size */
193 /* tv_size > tv2, so we can subtract without overflow */
194 tv2 = tarval_sub(tv_size, tv2, NULL);
196 /* tv1 is < 0, so we can negate it */
197 tv1 = tarval_neg(tv1);
199 /* cast it into unsigned. for two-complement it does the right thing for MIN_INT */
200 tv1 = tarval_convert_to(tv1, m2);
202 /* now we can compare without overflow */
203 return tarval_cmp(tv1, tv2) & (pn_Cmp_Eq|pn_Cmp_Gt) ? ir_no_alias : ir_may_alias;
207 if (tarval_cmp(tv1, tv2) == pn_Cmp_Gt) {
212 /* tv1 is now the "smaller" one */
213 tv = tarval_sub(tv2, tv1, NULL);
214 tv_size = new_tarval_from_long(size, get_tarval_mode(tv));
215 return tarval_cmp(tv_size, tv) & (pn_Cmp_Eq|pn_Cmp_Lt) ? ir_no_alias : ir_may_alias;
218 /* Note: we rely here on the fact that normalization puts constants on the RIGHT side */
220 ir_node *l1 = get_Add_left(idx1);
221 ir_node *r1 = get_Add_right(idx1);
226 return check_const(r1, size);
229 /* both are Adds, check if they are of x + a == x + b kind */
230 ir_node *l2 = get_Add_left(idx2);
231 ir_node *r2 = get_Add_right(idx2);
234 return different_index(r1, r2, size);
236 return different_index(r1, l2, size);
238 return different_index(l1, l2, size);
240 return different_index(l1, r2, size);
244 ir_node *l2 = get_Add_left(idx2);
245 ir_node *r2 = get_Add_right(idx2);
250 return check_const(r2, size);
255 ir_node *l1 = get_Sub_left(idx1);
256 ir_node *r1 = get_Sub_right(idx1);
261 return check_const(r1, size);
265 /* both are Subs, check if they are of x - a == x - b kind */
266 ir_node *l2 = get_Sub_left(idx2);
269 ir_node *r2 = get_Sub_right(idx2);
270 return different_index(r1, r2, size);
275 ir_node *l2 = get_Sub_left(idx2);
276 ir_node *r2 = get_Sub_right(idx2);
281 return check_const(r2, size);
286 } /* different_index */
289 * Two Sel addresses have the same base address, check if there offsets are
292 * @param adr1 The first address.
293 * @param adr2 The second address.
295 static ir_alias_relation different_sel_offsets(ir_node *sel1, ir_node *sel2) {
296 /* seems to be broken */
300 ir_entity *ent1 = get_Sel_entity(sel1);
301 ir_entity *ent2 = get_Sel_entity(sel2);
302 int i, check_arr = 0;
307 ir_type *tp1 = get_entity_type(ent1);
308 ir_type *tp2 = get_entity_type(ent2);
312 else if (get_type_state(tp1) == layout_fixed && get_type_state(tp2) == layout_fixed &&
313 get_type_size_bits(tp1) == get_type_size_bits(tp2))
317 /* we select an entity of same size, check for indexes */
318 int n = get_Sel_n_indexs(sel1);
321 if (n > 0 && n == get_Sel_n_indexs(sel2)) {
322 /* same non-zero number of indexes, an array access, check */
323 for (i = 0; i < n; ++i) {
324 ir_node *idx1 = get_Sel_index(sel1, i);
325 ir_node *idx2 = get_Sel_index(sel2, i);
326 ir_alias_relation res = different_index(idx1, idx2, 0); /* we can safely IGNORE the size here if it's at least >0 */
328 if (res == may_alias)
330 else if (res == no_alias)
333 /* if we have at least one no_alias, there is no alias relation, else we have sure */
334 return have_no > 0 ? no_alias : sure_alias;
339 } /* different_sel_offsets */
342 * Determine the alias relation by checking if adr1 and adr2 are pointer
345 * @param adr1 The first address.
346 * @param adr2 The second address.
348 static ir_alias_relation different_types(ir_node *adr1, ir_node *adr2)
350 ir_entity *ent1 = NULL, *ent2 = NULL;
353 ent1 = get_Global_entity(adr1);
354 else if (is_Sel(adr1))
355 ent1 = get_Sel_entity(adr1);
358 ent2 = get_Global_entity(adr2);
359 else if (is_Sel(adr2))
360 ent2 = get_Sel_entity(adr2);
362 if (ent1 != NULL && ent2 != NULL) {
363 ir_type *tp1 = get_entity_type(ent1);
364 ir_type *tp2 = get_entity_type(ent2);
367 /* do deref until no pointer types are found */
368 while (is_Pointer_type(tp1) && is_Pointer_type(tp2)) {
369 tp1 = get_pointer_points_to_type(tp1);
370 tp2 = get_pointer_points_to_type(tp2);
373 if (get_type_tpop(tp1) != get_type_tpop(tp2)) {
374 /* different type structure */
377 if (is_Class_type(tp1)) {
378 /* check class hierarchy */
379 if (! is_SubClass_of(tp1, tp2) &&
380 ! is_SubClass_of(tp2, tp1))
383 /* different types */
389 } /* different_types */
392 * Returns non-zero if a node is a result on a malloc-like routine.
394 * @param node the Proj node to test
396 static int is_malloc_Result(ir_node *node) {
397 node = get_Proj_pred(node);
400 node = get_Proj_pred(node);
403 node = get_Call_ptr(node);
404 if (is_Global(node)) {
405 ir_entity *ent = get_Global_entity(node);
407 if (get_entity_additional_properties(ent) & mtp_property_malloc)
412 } /* is_malloc_Result */
415 * Classify a base pointer.
417 * @param irg the graph of the pointer
418 * @param irn the node representing the base address
419 * @param ent the base entity of the base address iff any
421 ir_storage_class_class_t classify_pointer(ir_graph *irg, ir_node *irn, ir_entity *ent)
423 ir_storage_class_class_t res = ir_sc_pointer;
424 if (is_Global(irn)) {
425 ir_entity *entity = get_Global_entity(irn);
426 res = ir_sc_globalvar;
427 if (! (get_entity_usage(entity) & ir_usage_address_taken))
428 res |= ir_sc_modifier_nottaken;
429 } else if (irn == get_irg_frame(irg)) {
430 res = ir_sc_localvar;
431 if (ent != NULL && !(get_entity_usage(ent) & ir_usage_address_taken))
432 res |= ir_sc_modifier_nottaken;
433 } else if (is_arg_Proj(irn)) {
434 return ir_sc_argument;
435 } else if (irn == get_irg_tls(irg)) {
437 if (ent != NULL && !(get_entity_usage(ent) & ir_usage_address_taken))
438 res |= ir_sc_modifier_nottaken;
439 } else if (is_Proj(irn) && is_malloc_Result(irn)) {
440 return ir_sc_malloced;
447 * If adr represents a Bitfield Sel, skip it
449 static ir_node *skip_Bitfield_Sels(ir_node *adr) {
451 ir_entity *ent = get_Sel_entity(adr);
452 ir_type *bf_type = get_entity_type(ent);
454 /* is it a bitfield type? */
455 if (is_Primitive_type(bf_type) && get_primitive_base_type(bf_type) != NULL)
456 adr = get_Sel_ptr(adr);
462 * Determine the alias relation between two addresses.
464 * @param irg the graph of both memory operations
465 * @param addr1 pointer address of the first memory operation
466 * @param mode1 the mode of the accessed data through addr1
467 * @param addr2 pointer address of the second memory operation
468 * @param mode2 the mode of the accessed data through addr2
470 * @return found memory relation
472 static ir_alias_relation _get_alias_relation(
474 ir_node *adr1, ir_mode *mode1,
475 ir_node *adr2, ir_mode *mode2)
477 ir_entity *ent1, *ent2;
483 ir_node *orig_adr1 = adr1;
484 ir_node *orig_adr2 = adr2;
486 ir_storage_class_class_t class1, class2;
487 int have_const_offsets;
489 if (! get_opt_alias_analysis())
493 return ir_sure_alias;
495 options = get_irg_memory_disambiguator_options(irg);
497 /* The Armageddon switch */
498 if (options & aa_opt_no_alias)
501 /* do the addresses have constants offsets?
502 * Note: nodes are normalized to have constants at right inputs,
503 * sub X, C is normalized to add X, -C
505 have_const_offsets = 1;
506 while (is_Add(adr1)) {
507 ir_node *add_right = get_Add_right(adr1);
508 if (is_Const(add_right) && !mode_is_reference(get_irn_mode(add_right))) {
509 tarval *tv = get_Const_tarval(add_right);
510 offset1 += get_tarval_long(tv);
511 adr1 = get_Add_left(adr1);
512 } else if (mode_is_reference(get_irn_mode(add_right))) {
514 have_const_offsets = 0;
516 adr1 = get_Add_left(adr1);
517 have_const_offsets = 0;
520 while (is_Add(adr2)) {
521 ir_node *add_right = get_Add_right(adr2);
522 if (is_Const(add_right) && !mode_is_reference(get_irn_mode(add_right))) {
523 tarval *tv = get_Const_tarval(add_right);
524 offset2 += get_tarval_long(tv);
525 adr2 = get_Add_left(adr2);
526 } else if (mode_is_reference(get_irn_mode(add_right))) {
528 have_const_offsets = 0;
530 adr2 = get_Add_left(adr2);
531 have_const_offsets = 0;
535 mode_size = get_mode_size_bytes(mode1);
536 if (get_mode_size_bytes(mode2) > mode_size) {
537 mode_size = get_mode_size_bytes(mode2);
540 /* same base address -> compare offsets if possible.
541 * FIXME: type long is not sufficient for this task ...
543 if (adr1 == adr2 && have_const_offsets) {
544 if ((unsigned long)labs(offset2 - offset1) >= mode_size)
547 return ir_sure_alias;
551 * Bitfields can be constructed as Sels from its base address.
552 * As they have different entities, the disambiguator would find that they are
553 * alias free. While this is true for it's values, it is false for the addresses
554 * (strictly speaking, the Sel's are NOT the addresses of the bitfields).
555 * So, skip those bitfield selecting Sel's.
557 adr1 = skip_Bitfield_Sels(adr1);
558 adr2 = skip_Bitfield_Sels(adr2);
566 base1 = find_base_adr(adr1, &ent1);
569 base2 = find_base_adr(adr2, &ent2);
572 /* same base address -> compare Sel entities */
573 if (base1 == base2 && ent1 != NULL && ent2 != NULL) {
576 else if (have_const_offsets)
577 return different_sel_offsets(adr1, adr2);
580 class1 = classify_pointer(irg, base1, ent1);
581 class2 = classify_pointer(irg, base2, ent2);
583 if (class1 == ir_sc_pointer) {
584 if (class2 & ir_sc_modifier_nottaken) {
585 /* a pointer and an object whose objects was never taken */
588 } else if (class2 == ir_sc_pointer) {
589 if (class1 & ir_sc_modifier_nottaken) {
590 /* a pointer and an object whose objects was never taken */
593 } else if (class1 != class2) {
594 /* two objects from different memory spaces */
597 /* both classes are equal */
598 if (class1 == ir_sc_globalvar) {
599 ir_entity *entity1 = get_SymConst_entity(base1);
600 ir_entity *entity2 = get_SymConst_entity(base2);
601 if (entity1 != entity2)
604 /* for some reason CSE didn't happen yet for the 2 SymConsts... */
609 /* Type based alias analysis */
610 if (options & aa_opt_type_based) {
611 ir_alias_relation rel;
613 if (options & aa_opt_byte_type_may_alias) {
614 if (get_mode_size_bits(mode1) == 8 || get_mode_size_bits(mode2) == 8) {
615 /* One of the modes address a byte. Assume a ir_may_alias and leave
616 the type based check. */
617 goto leave_type_based_alias;
620 /* cheap check: If the mode sizes did not match, the types MUST be different */
621 if (get_mode_size_bits(mode1) != get_mode_size_bits(mode2))
624 /* cheap test: if only one is a reference mode, no alias */
625 if (mode_is_reference(mode1) != mode_is_reference(mode2))
628 /* cheap test: if arithmetic is different, no alias */
629 if (get_mode_arithmetic(mode1) != get_mode_arithmetic(mode2))
633 rel = different_types(orig_adr1, orig_adr2);
634 if (rel != ir_may_alias)
636 leave_type_based_alias:;
639 /* do we have a language specific memory disambiguator? */
640 if (language_disambuigator) {
641 ir_alias_relation rel = (*language_disambuigator)(irg, orig_adr1, mode1, orig_adr2, mode2);
642 if (rel != ir_may_alias)
646 /* access points-to information here */
648 } /* _get_alias_relation */
651 * Determine the alias relation between two addresses.
653 ir_alias_relation get_alias_relation(
655 ir_node *adr1, ir_mode *mode1,
656 ir_node *adr2, ir_mode *mode2)
658 ir_alias_relation rel = _get_alias_relation(irg, adr1, mode1, adr2, mode2);
659 DB((dbg, LEVEL_1, "alias(%+F, %+F) = %s\n", adr1, adr2, get_ir_alias_relation_name(rel)));
661 } /* get_alias_relation */
663 /* Set a source language specific memory disambiguator function. */
664 void set_language_memory_disambiguator(DISAMBIGUATOR_FUNC func) {
665 language_disambuigator = func;
666 } /* set_language_memory_disambiguator */
668 /** The result cache for the memory disambiguator. */
669 static set *result_cache = NULL;
671 /** An entry in the relation cache. */
672 typedef struct mem_disambig_entry {
673 ir_node *adr1; /**< The first address. */
674 ir_node *adr2; /**< The second address. */
675 ir_alias_relation result; /**< The alias relation result. */
676 } mem_disambig_entry;
678 #define HASH_ENTRY(adr1, adr2) (HASH_PTR(adr1) ^ HASH_PTR(adr2))
681 * Compare two relation cache entries.
683 static int cmp_mem_disambig_entry(const void *elt, const void *key, size_t size) {
684 const mem_disambig_entry *p1 = elt;
685 const mem_disambig_entry *p2 = key;
688 return p1->adr1 == p2->adr1 && p1->adr2 == p2->adr2;
689 } /* cmp_mem_disambig_entry */
692 * Initialize the relation cache.
694 void mem_disambig_init(void) {
695 result_cache = new_set(cmp_mem_disambig_entry, 8);
696 } /* mem_disambig_init */
699 * Determine the alias relation between two addresses.
701 ir_alias_relation get_alias_relation_ex(
703 ir_node *adr1, ir_mode *mode1,
704 ir_node *adr2, ir_mode *mode2)
706 mem_disambig_entry key, *entry;
708 ir_fprintf(stderr, "%+F <-> %+F\n", adr1, adr2);
710 if (! get_opt_alias_analysis())
713 if (get_irn_opcode(adr1) > get_irn_opcode(adr2)) {
721 entry = set_find(result_cache, &key, sizeof(key), HASH_ENTRY(adr1, adr2));
723 return entry->result;
725 key.result = get_alias_relation(irg, adr1, mode1, adr2, mode2);
727 set_insert(result_cache, &key, sizeof(key), HASH_ENTRY(adr1, adr2));
729 } /* get_alias_relation_ex */
731 /* Free the relation cache. */
732 void mem_disambig_term(void) {
734 del_set(result_cache);
737 } /* mem_disambig_term */
740 * Check the mode of a Load/Store with the mode of the entity
742 * If the mode of the entity and the Load/Store mode do not match, we
743 * have the bad reinterpret case:
746 * char b = *(char *)&i;
748 * We do NOT count this as one value and return address_taken
750 * However, we support an often used case. If the mode is two-complement
751 * we allow casts between signed/unsigned.
753 * @param mode the mode of the Load/Store
754 * @param ent_mode the mode of the accessed entity
756 * @return non-zero if the Load/Store is a hidden cast, zero else
758 static int is_hidden_cast(ir_mode *mode, ir_mode *ent_mode) {
759 if (ent_mode == NULL)
762 if (ent_mode != mode) {
763 if (ent_mode == NULL ||
764 get_mode_size_bits(ent_mode) != get_mode_size_bits(mode) ||
765 get_mode_sort(ent_mode) != get_mode_sort(mode) ||
766 get_mode_arithmetic(ent_mode) != irma_twos_complement ||
767 get_mode_arithmetic(mode) != irma_twos_complement)
771 } /* is_hidden_cast */
774 * Determine the usage state of a node (or its successor Sels).
776 * @param irn the node
778 static ir_entity_usage determine_entity_usage(const ir_node *irn, ir_entity *entity) {
780 ir_mode *emode, *mode;
783 ir_entity_usage res = 0;
785 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
786 ir_node *succ = get_irn_out(irn, i);
788 switch (get_irn_opcode(succ)) {
790 /* beware: irn might be a Id node here, so irn might be not
791 equal to get_Load_ptr(succ) */
792 res |= ir_usage_read;
794 /* check if this load is not a hidden conversion */
795 mode = get_Load_mode(succ);
796 emode = get_type_mode(get_entity_type(entity));
797 if (is_hidden_cast(mode, emode))
798 res |= ir_usage_reinterpret_cast;
802 /* check that the node is not the Store's value */
803 if (irn == get_Store_value(succ)) {
804 res |= ir_usage_unknown;
806 if (irn == get_Store_ptr(succ)) {
807 res |= ir_usage_write;
809 /* check if this Store is not a hidden conversion */
810 value = get_Store_value(succ);
811 mode = get_irn_mode(value);
812 emode = get_type_mode(get_entity_type(entity));
813 if (is_hidden_cast(mode, emode))
814 res |= ir_usage_reinterpret_cast;
816 assert(irn != get_Store_mem(succ));
820 /* CopyB are like Loads/Stores */
821 tp = get_entity_type(entity);
822 if (tp != get_CopyB_type(succ)) {
823 /* bad, different types, might be a hidden conversion */
824 res |= ir_usage_reinterpret_cast;
826 if (irn == get_CopyB_dst(succ)) {
827 res |= ir_usage_write;
829 assert(irn == get_CopyB_src(succ));
830 res |= ir_usage_read;
837 /* Check the successor of irn. */
838 res |= determine_entity_usage(succ, entity);
843 if (irn == get_Call_ptr(succ)) {
844 /* TODO: we could check for reinterpret casts here...
845 * But I doubt anyone is interested in that bit for
846 * function entities and I'm too lazy to write the code now.
848 res |= ir_usage_read;
850 assert(irn != get_Call_mem(succ));
851 res |= ir_usage_unknown;
855 /* skip identities */
857 res |= determine_entity_usage(succ, entity);
863 for (input_nr = get_Tuple_n_preds(succ) - 1; input_nr >= 0;
865 ir_node *pred = get_Tuple_pred(succ, input_nr);
868 /* we found one input */
869 for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
870 ir_node *proj = get_irn_out(succ, k);
872 if (is_Proj(proj) && get_Proj_proj(proj) == input_nr) {
873 res |= determine_entity_usage(proj, entity);
883 /* another op, we don't know anything (we could do more advanced
884 * things like a dataflow analysis here) */
885 res |= ir_usage_unknown;
894 * Update the usage flags of all frame entities.
896 static void analyse_irg_entity_usage(ir_graph *irg) {
897 ir_type *ft = get_irg_frame_type(irg);
901 /* set initial state to not_taken, as this is the "smallest" state */
902 for (i = get_class_n_members(ft) - 1; i >= 0; --i) {
903 ir_entity *ent = get_class_member(ft, i);
904 ir_entity_usage flags =
905 get_entity_stickyness(ent) == stickyness_sticky ? ir_usage_unknown : 0;
907 set_entity_usage(ent, flags);
910 assure_irg_outs(irg);
912 irg_frame = get_irg_frame(irg);
914 for (i = get_irn_n_outs(irg_frame) - 1; i >= 0; --i) {
915 ir_node *succ = get_irn_out(irg_frame, i);
917 ir_entity_usage flags;
922 entity = get_Sel_entity(succ);
923 flags = get_entity_usage(entity);
924 flags |= determine_entity_usage(succ, entity);
925 set_entity_usage(entity, flags);
929 irg->entity_usage_state = ir_entity_usage_computed;
932 ir_entity_usage_computed_state get_irg_entity_usage_state(const ir_graph *irg) {
933 return irg->entity_usage_state;
936 void set_irg_entity_usage_state(ir_graph *irg, ir_entity_usage_computed_state state) {
937 irg->entity_usage_state = state;
940 void assure_irg_entity_usage_computed(ir_graph *irg) {
941 if (irg->entity_usage_state != ir_entity_usage_not_computed)
944 analyse_irg_entity_usage(irg);
949 * Initialize the entity_usage flag for a global type like type.
951 static void init_entity_usage(ir_type * tp) {
954 /* We have to be conservative: All external visible entities are unknown */
955 for (i = get_compound_n_members(tp) - 1; i >= 0; --i) {
956 ir_entity *ent = get_compound_member(tp, i);
957 ir_entity_usage flags = ir_usage_none;
958 ir_visibility vis = get_entity_visibility(ent);
960 if (vis == visibility_external_visible ||
961 vis == visibility_external_allocated ||
962 get_entity_stickyness(ent) == stickyness_sticky) {
963 flags |= ir_usage_unknown;
965 set_entity_usage(ent, flags);
970 * Mark all entities used in the initializer as unknown usage.
972 * @param initializer the initializer to check
974 static void check_initializer_nodes(ir_initializer_t *initializer)
979 switch (initializer->kind) {
980 case IR_INITIALIZER_CONST:
981 /* let's check if it's an address */
982 n = initializer->consti.value;
984 ir_entity *ent = get_Global_entity(n);
985 set_entity_usage(ent, ir_usage_unknown);
988 case IR_INITIALIZER_TARVAL:
989 case IR_INITIALIZER_NULL:
991 case IR_INITIALIZER_COMPOUND:
992 for (i = 0; i < initializer->compound.n_initializers; ++i) {
993 ir_initializer_t *sub_initializer
994 = initializer->compound.initializers[i];
995 check_initializer_nodes(sub_initializer);
999 panic("invalid initializer found");
1000 } /* check_initializer_nodes */
1003 * Mark all entities used in the initializer for the given entity as unknown
1006 * @param ent the entity
1008 static void check_initializer(ir_entity *ent) {
1012 /* do not check uninitialized values */
1013 if (get_entity_variability(ent) == variability_uninitialized)
1016 /* Beware: Methods are always initialized with "themself". This does not
1017 count as a taken address. */
1018 if (is_Method_type(get_entity_type(ent)))
1021 if (ent->has_initializer) {
1022 check_initializer_nodes(ent->attr.initializer);
1023 } else if (is_atomic_entity(ent)) {
1024 /* let's check if it's an address */
1025 n = get_atomic_ent_value(ent);
1027 ir_entity *ent = get_Global_entity(n);
1028 set_entity_usage(ent, ir_usage_unknown);
1031 for (i = get_compound_ent_n_values(ent) - 1; i >= 0; --i) {
1032 n = get_compound_ent_value(ent, i);
1034 /* let's check if it's an address */
1036 ir_entity *ent = get_Global_entity(n);
1037 set_entity_usage(ent, ir_usage_unknown);
1041 } /* check_initializer */
1045 * Mark all entities used in initializers as unknown usage.
1047 * @param tp a compound type
1049 static void check_initializers(ir_type *tp) {
1052 for (i = get_compound_n_members(tp) - 1; i >= 0; --i) {
1053 ir_entity *ent = get_compound_member(tp, i);
1055 check_initializer(ent);
1057 } /* check_initializers */
1059 #ifdef DEBUG_libfirm
1061 * Print the entity usage flags of all entities of a given type for debugging.
1063 * @param tp a compound type
1065 static void print_entity_usage_flags(ir_type *tp) {
1067 for (i = get_compound_n_members(tp) - 1; i >= 0; --i) {
1068 ir_entity *ent = get_compound_member(tp, i);
1069 ir_entity_usage flags = get_entity_usage(ent);
1073 ir_printf("%+F:", ent);
1074 if (flags & ir_usage_address_taken)
1075 printf(" address_taken");
1076 if (flags & ir_usage_read)
1078 if (flags & ir_usage_write)
1080 if (flags & ir_usage_reinterpret_cast)
1081 printf(" reinterp_cast");
1085 #endif /* DEBUG_libfirm */
1088 * Post-walker: check for global entity address
1090 static void check_global_address(ir_node *irn, void *env) {
1093 ir_entity_usage flags;
1095 if (is_Global(irn)) {
1097 ent = get_Global_entity(irn);
1098 } else if (is_Sel(irn) && get_Sel_ptr(irn) == tls) {
1099 /* A TLS variable. */
1100 ent = get_Sel_entity(irn);
1104 flags = get_entity_usage(ent);
1105 flags |= determine_entity_usage(irn, ent);
1106 set_entity_usage(ent, flags);
1107 } /* check_global_address */
1110 * Update the entity usage flags of all global entities.
1112 static void analyse_irp_globals_entity_usage(void) {
1116 for (s = IR_SEGMENT_FIRST; s < IR_SEGMENT_COUNT; ++s) {
1117 ir_type *type = get_segment_type(s);
1118 init_entity_usage(type);
1121 for (s = IR_SEGMENT_FIRST; s < IR_SEGMENT_COUNT; ++s) {
1122 ir_type *type = get_segment_type(s);
1123 check_initializers(type);
1126 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1127 ir_graph *irg = get_irp_irg(i);
1129 assure_irg_outs(irg);
1130 irg_walk_graph(irg, NULL, check_global_address, get_irg_tls(irg));
1133 #ifdef DEBUG_libfirm
1134 if (firm_dbg_get_mask(dbg) & LEVEL_1) {
1136 for (s = IR_SEGMENT_FIRST; s < IR_SEGMENT_COUNT; ++s) {
1137 print_entity_usage_flags(get_segment_type(s));
1140 #endif /* DEBUG_libfirm */
1143 irp->globals_entity_usage_state = ir_entity_usage_computed;
1146 /* Returns the current address taken state of the globals. */
1147 ir_entity_usage_computed_state get_irp_globals_entity_usage_state(void) {
1148 return irp->globals_entity_usage_state;
1151 /* Sets the current address taken state of the graph. */
1152 void set_irp_globals_entity_usage_state(ir_entity_usage_computed_state state) {
1153 irp->globals_entity_usage_state = state;
1156 /* Assure that the address taken flag is computed for the globals. */
1157 void assure_irp_globals_entity_usage_computed(void) {
1158 if (irp->globals_entity_usage_state != ir_entity_usage_not_computed)
1161 analyse_irp_globals_entity_usage();
1164 void firm_init_memory_disambiguator(void) {
1165 FIRM_DBG_REGISTER(dbg, "firm.ana.irmemory");
1166 FIRM_DBG_REGISTER(dbgcall, "firm.opt.cc");
1170 /** Maps method types to cloned method types. */
1171 static pmap *mtp_map;
1174 * Clone a method type if not already cloned.
1176 * @param tp the type to clone
1178 static ir_type *clone_type_and_cache(ir_type *tp) {
1179 static ident *prefix = NULL;
1181 pmap_entry *e = pmap_find(mtp_map, tp);
1187 prefix = new_id_from_chars("C", 1);
1189 res = clone_type_method(tp, prefix);
1190 pmap_insert(mtp_map, tp, res);
1191 DB((dbgcall, LEVEL_2, "cloned type %+F into %+F\n", tp, res));
1194 } /* clone_type_and_cache */
1197 * Walker: clone all call types of Calls to methods having the
1198 * mtp_property_private property set.
1200 static void update_calls_to_private(ir_node *call, void *env) {
1202 if (is_Call(call)) {
1203 ir_node *ptr = get_Call_ptr(call);
1205 if (is_SymConst(ptr)) {
1206 ir_entity *ent = get_SymConst_entity(ptr);
1207 ir_type *ctp = get_Call_type(call);
1209 if (get_entity_additional_properties(ent) & mtp_property_private) {
1210 if ((get_method_additional_properties(ctp) & mtp_property_private) == 0) {
1211 ctp = clone_type_and_cache(ctp);
1212 set_method_additional_property(ctp, mtp_property_private);
1213 set_Call_type(call, ctp);
1214 DB((dbgcall, LEVEL_1, "changed call to private method %+F\n", ent));
1219 } /* update_calls_to_private */
1221 /* Mark all private methods, i.e. those of which all call sites are known. */
1222 void mark_private_methods(void) {
1226 assure_irp_globals_entity_usage_computed();
1228 mtp_map = pmap_create();
1230 /* first step: change the calling conventions of the local non-escaped entities */
1231 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1232 ir_graph *irg = get_irp_irg(i);
1233 ir_entity *ent = get_irg_entity(irg);
1234 ir_entity_usage flags = get_entity_usage(ent);
1236 /* If an entity is sticky, it might be called from external
1237 places (like inline assembler), so do NOT mark it as private. */
1238 if (get_entity_visibility(ent) == visibility_local &&
1239 !(flags & ir_usage_address_taken) &&
1240 get_entity_stickyness(ent) != stickyness_sticky) {
1241 ir_type *mtp = get_entity_type(ent);
1243 set_entity_additional_property(ent, mtp_property_private);
1244 DB((dbgcall, LEVEL_1, "found private method %+F\n", ent));
1245 if ((get_method_additional_properties(mtp) & mtp_property_private) == 0) {
1246 /* need a new type */
1247 mtp = clone_type_and_cache(mtp);
1248 set_entity_type(ent, mtp);
1249 set_method_additional_property(mtp, mtp_property_private);
1256 all_irg_walk(NULL, update_calls_to_private, NULL);
1258 pmap_destroy(mtp_map);
1259 } /* mark_private_methods */