2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Memory disambiguator
23 * @author Michael Beck
34 #include "irgraph_t.h"
36 #include "irmemory_t.h"
49 /** The debug handle. */
50 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
51 DEBUG_ONLY(static firm_dbg_module_t *dbgcall = NULL;)
53 /** The source language specific language disambiguator function. */
54 static DISAMBIGUATOR_FUNC language_disambuigator = NULL;
56 /** The global memory disambiguator options. */
57 static unsigned global_mem_disamgig_opt = aa_opt_no_opt;
59 /* Returns a human readable name for an alias relation. */
60 const char *get_ir_alias_relation_name(ir_alias_relation rel)
62 #define X(a) case a: return #a
68 panic("UNKNOWN alias relation");
73 /* Get the memory disambiguator options for a graph. */
74 unsigned get_irg_memory_disambiguator_options(const ir_graph *irg)
76 unsigned opt = irg->mem_disambig_opt;
77 if (opt & aa_opt_inherited)
78 return global_mem_disamgig_opt;
80 } /* get_irg_memory_disambiguator_options */
82 /* Set the memory disambiguator options for a graph. */
83 void set_irg_memory_disambiguator_options(ir_graph *irg, unsigned options)
85 irg->mem_disambig_opt = options & ~aa_opt_inherited;
86 } /* set_irg_memory_disambiguator_options */
88 /* Set the global disambiguator options for all graphs not having local options. */
89 void set_irp_memory_disambiguator_options(unsigned options)
91 global_mem_disamgig_opt = options;
92 } /* set_irp_memory_disambiguator_options */
95 * Find the base address and entity of an Sel node.
98 * @param pEnt after return points to the base entity.
100 * @return the base address.
102 static ir_node *find_base_adr(const ir_node *sel, ir_entity **pEnt)
104 ir_node *ptr = get_Sel_ptr(sel);
106 while (is_Sel(ptr)) {
108 ptr = get_Sel_ptr(sel);
110 *pEnt = get_Sel_entity(sel);
112 } /* find_base_adr */
115 * Check if a given Const node is greater or equal a given size.
117 * @param cns a Const node
118 * @param size a integer size
120 * @return ir_no_alias if the Const is greater, ir_may_alias else
122 static ir_alias_relation check_const(const ir_node *cns, int size)
124 tarval *tv = get_Const_tarval(cns);
128 return tarval_is_null(tv) ? ir_may_alias : ir_no_alias;
129 tv_size = new_tarval_from_long(size, get_tarval_mode(tv));
130 return tarval_cmp(tv_size, tv) & (pn_Cmp_Eq|pn_Cmp_Lt) ? ir_no_alias : ir_may_alias;
134 * Treat idx1 and idx2 as integer indexes and check if they differ always more than size.
136 * @param idx1 a node representing the first index
137 * @param idx2 a node representing the second index
138 * @param size an integer size
140 * @return ir_sure_alias iff idx1 == idx2
141 * ir_no_alias iff they ALWAYS differ more than size
144 static ir_alias_relation different_index(const ir_node *idx1, const ir_node *idx2, int size)
147 return ir_sure_alias;
148 if (is_Const(idx1) && is_Const(idx2)) {
149 /* both are const, we can compare them */
150 tarval *tv1 = get_Const_tarval(idx1);
151 tarval *tv2 = get_Const_tarval(idx2);
152 tarval *tv, *tv_size;
156 return tv1 == tv2 ? ir_sure_alias : ir_no_alias;
158 /* arg, modes may be different */
159 m1 = get_tarval_mode(tv1);
160 m2 = get_tarval_mode(tv2);
162 int size = get_mode_size_bits(m1) - get_mode_size_bits(m2);
165 /* m1 is a small mode, cast up */
166 m1 = mode_is_signed(m1) ? find_signed_mode(m2) : find_unsigned_mode(m2);
168 /* should NOT happen, but if it does we give up here */
171 tv1 = tarval_convert_to(tv1, m1);
172 } else if (size > 0) {
173 /* m2 is a small mode, cast up */
174 m2 = mode_is_signed(m2) ? find_signed_mode(m1) : find_unsigned_mode(m1);
176 /* should NOT happen, but if it does we give up here */
179 tv2 = tarval_convert_to(tv2, m2);
181 /* here the size should be identical, check for signed */
182 if (get_mode_sign(m1) != get_mode_sign(m2)) {
183 /* find the signed */
184 if (mode_is_signed(m2)) {
191 /* m1 is now the signed one */
192 if (!tarval_is_negative(tv1)) {
193 /* tv1 is signed, but >= 0, simply cast into unsigned */
194 tv1 = tarval_convert_to(tv1, m2);
196 tv_size = new_tarval_from_long(size, m2);
198 if (tarval_cmp(tv2, tv_size) & (pn_Cmp_Eq|pn_Cmp_Gt)) {
199 /* tv1 is negative and tv2 >= tv_size, so the difference is bigger than size */
202 /* tv_size > tv2, so we can subtract without overflow */
203 tv2 = tarval_sub(tv_size, tv2, NULL);
205 /* tv1 is < 0, so we can negate it */
206 tv1 = tarval_neg(tv1);
208 /* cast it into unsigned. for two-complement it does the right thing for MIN_INT */
209 tv1 = tarval_convert_to(tv1, m2);
211 /* now we can compare without overflow */
212 return tarval_cmp(tv1, tv2) & (pn_Cmp_Eq|pn_Cmp_Gt) ? ir_no_alias : ir_may_alias;
216 if (tarval_cmp(tv1, tv2) == pn_Cmp_Gt) {
221 /* tv1 is now the "smaller" one */
222 tv = tarval_sub(tv2, tv1, NULL);
223 tv_size = new_tarval_from_long(size, get_tarval_mode(tv));
224 return tarval_cmp(tv_size, tv) & (pn_Cmp_Eq|pn_Cmp_Lt) ? ir_no_alias : ir_may_alias;
227 /* Note: we rely here on the fact that normalization puts constants on the RIGHT side */
229 ir_node *l1 = get_Add_left(idx1);
230 ir_node *r1 = get_Add_right(idx1);
235 return check_const(r1, size);
238 /* both are Adds, check if they are of x + a == x + b kind */
239 ir_node *l2 = get_Add_left(idx2);
240 ir_node *r2 = get_Add_right(idx2);
243 return different_index(r1, r2, size);
245 return different_index(r1, l2, size);
247 return different_index(l1, l2, size);
249 return different_index(l1, r2, size);
253 ir_node *l2 = get_Add_left(idx2);
254 ir_node *r2 = get_Add_right(idx2);
259 return check_const(r2, size);
264 ir_node *l1 = get_Sub_left(idx1);
265 ir_node *r1 = get_Sub_right(idx1);
270 return check_const(r1, size);
274 /* both are Subs, check if they are of x - a == x - b kind */
275 ir_node *l2 = get_Sub_left(idx2);
278 ir_node *r2 = get_Sub_right(idx2);
279 return different_index(r1, r2, size);
284 ir_node *l2 = get_Sub_left(idx2);
285 ir_node *r2 = get_Sub_right(idx2);
290 return check_const(r2, size);
295 } /* different_index */
298 * Two Sel addresses have the same base address, check if there offsets are
301 * @param adr1 The first address.
302 * @param adr2 The second address.
304 static ir_alias_relation different_sel_offsets(const ir_node *sel1, const ir_node *sel2)
306 /* seems to be broken */
310 ir_entity *ent1 = get_Sel_entity(sel1);
311 ir_entity *ent2 = get_Sel_entity(sel2);
312 int i, check_arr = 0;
317 ir_type *tp1 = get_entity_type(ent1);
318 ir_type *tp2 = get_entity_type(ent2);
322 else if (get_type_state(tp1) == layout_fixed && get_type_state(tp2) == layout_fixed &&
323 get_type_size_bits(tp1) == get_type_size_bits(tp2))
327 /* we select an entity of same size, check for indexes */
328 int n = get_Sel_n_indexs(sel1);
331 if (n > 0 && n == get_Sel_n_indexs(sel2)) {
332 /* same non-zero number of indexes, an array access, check */
333 for (i = 0; i < n; ++i) {
334 ir_node *idx1 = get_Sel_index(sel1, i);
335 ir_node *idx2 = get_Sel_index(sel2, i);
336 ir_alias_relation res = different_index(idx1, idx2, 0); /* we can safely IGNORE the size here if it's at least >0 */
338 if (res == may_alias)
340 else if (res == no_alias)
343 /* if we have at least one no_alias, there is no alias relation, else we have sure */
344 return have_no > 0 ? no_alias : sure_alias;
348 (void) different_index;
351 } /* different_sel_offsets */
354 * Determine the alias relation by checking if adr1 and adr2 are pointer
357 * @param adr1 The first address.
358 * @param adr2 The second address.
360 static ir_alias_relation different_types(const ir_node *adr1, const ir_node *adr2)
362 ir_entity *ent1 = NULL, *ent2 = NULL;
365 ent1 = get_Global_entity(adr1);
366 else if (is_Sel(adr1))
367 ent1 = get_Sel_entity(adr1);
370 ent2 = get_Global_entity(adr2);
371 else if (is_Sel(adr2))
372 ent2 = get_Sel_entity(adr2);
374 if (ent1 != NULL && ent2 != NULL) {
375 ir_type *tp1 = get_entity_type(ent1);
376 ir_type *tp2 = get_entity_type(ent2);
379 /* do deref until no pointer types are found */
380 while (is_Pointer_type(tp1) && is_Pointer_type(tp2)) {
381 tp1 = get_pointer_points_to_type(tp1);
382 tp2 = get_pointer_points_to_type(tp2);
385 if (get_type_tpop(tp1) != get_type_tpop(tp2)) {
386 /* different type structure */
389 if (is_Class_type(tp1)) {
390 /* check class hierarchy */
391 if (! is_SubClass_of(tp1, tp2) &&
392 ! is_SubClass_of(tp2, tp1))
395 /* different types */
401 } /* different_types */
404 * Returns non-zero if a node is a result on a malloc-like routine.
406 * @param node the Proj node to test
408 static int is_malloc_Result(const ir_node *node)
410 node = get_Proj_pred(node);
413 node = get_Proj_pred(node);
416 node = get_Call_ptr(node);
417 if (is_Global(node)) {
418 ir_entity *ent = get_Global_entity(node);
420 if (get_entity_additional_properties(ent) & mtp_property_malloc)
425 } /* is_malloc_Result */
428 * Classify a base pointer.
430 * @param irg the graph of the pointer
431 * @param irn the node representing the base address
432 * @param ent the base entity of the base address iff any
434 ir_storage_class_class_t classify_pointer(const ir_graph *irg, const ir_node *irn, const ir_entity *ent)
436 ir_storage_class_class_t res = ir_sc_pointer;
437 if (is_Global(irn)) {
438 ir_entity *entity = get_Global_entity(irn);
439 res = ir_sc_globalvar;
440 if (! (get_entity_usage(entity) & ir_usage_address_taken))
441 res |= ir_sc_modifier_nottaken;
442 } else if (irn == get_irg_frame(irg)) {
443 res = ir_sc_localvar;
444 if (ent != NULL && !(get_entity_usage(ent) & ir_usage_address_taken))
445 res |= ir_sc_modifier_nottaken;
446 } else if (irn == get_irg_tls(irg)) {
448 if (ent != NULL && !(get_entity_usage(ent) & ir_usage_address_taken))
449 res |= ir_sc_modifier_nottaken;
450 } else if (is_Proj(irn) && is_malloc_Result(irn)) {
451 return ir_sc_malloced;
452 } else if (is_Const(irn)) {
453 return ir_sc_globaladdr;
454 } else if (is_arg_Proj(irn)) {
455 res |= ir_sc_modifier_argument;
462 * If adr represents a Bitfield Sel, skip it
464 static const ir_node *skip_Bitfield_Sels(const ir_node *adr)
467 ir_entity *ent = get_Sel_entity(adr);
468 ir_type *bf_type = get_entity_type(ent);
470 /* is it a bitfield type? */
471 if (is_Primitive_type(bf_type) && get_primitive_base_type(bf_type) != NULL)
472 adr = get_Sel_ptr(adr);
478 * Determine the alias relation between two addresses.
480 * @param irg the graph of both memory operations
481 * @param addr1 pointer address of the first memory operation
482 * @param mode1 the mode of the accessed data through addr1
483 * @param addr2 pointer address of the second memory operation
484 * @param mode2 the mode of the accessed data through addr2
486 * @return found memory relation
488 static ir_alias_relation _get_alias_relation(
490 const ir_node *adr1, const ir_mode *mode1,
491 const ir_node *adr2, const ir_mode *mode2)
493 ir_entity *ent1, *ent2;
497 const ir_node *base1;
498 const ir_node *base2;
499 const ir_node *orig_adr1 = adr1;
500 const ir_node *orig_adr2 = adr2;
502 ir_storage_class_class_t class1, class2, mod1, mod2;
503 int have_const_offsets;
505 if (! get_opt_alias_analysis())
509 return ir_sure_alias;
511 options = get_irg_memory_disambiguator_options(irg);
513 /* The Armageddon switch */
514 if (options & aa_opt_no_alias)
517 /* do the addresses have constants offsets?
518 * Note: nodes are normalized to have constants at right inputs,
519 * sub X, C is normalized to add X, -C
521 have_const_offsets = 1;
522 while (is_Add(adr1)) {
523 ir_node *add_right = get_Add_right(adr1);
524 if (is_Const(add_right) && !mode_is_reference(get_irn_mode(add_right))) {
525 tarval *tv = get_Const_tarval(add_right);
526 offset1 += get_tarval_long(tv);
527 adr1 = get_Add_left(adr1);
528 } else if (mode_is_reference(get_irn_mode(add_right))) {
530 have_const_offsets = 0;
532 adr1 = get_Add_left(adr1);
533 have_const_offsets = 0;
536 while (is_Add(adr2)) {
537 ir_node *add_right = get_Add_right(adr2);
538 if (is_Const(add_right) && !mode_is_reference(get_irn_mode(add_right))) {
539 tarval *tv = get_Const_tarval(add_right);
540 offset2 += get_tarval_long(tv);
541 adr2 = get_Add_left(adr2);
542 } else if (mode_is_reference(get_irn_mode(add_right))) {
544 have_const_offsets = 0;
546 adr2 = get_Add_left(adr2);
547 have_const_offsets = 0;
551 mode_size = get_mode_size_bytes(mode1);
552 if (get_mode_size_bytes(mode2) > mode_size) {
553 mode_size = get_mode_size_bytes(mode2);
556 /* same base address -> compare offsets if possible.
557 * FIXME: type long is not sufficient for this task ...
559 if (adr1 == adr2 && have_const_offsets) {
560 if ((unsigned long)labs(offset2 - offset1) >= mode_size)
563 return ir_sure_alias;
567 * Bitfields can be constructed as Sels from its base address.
568 * As they have different entities, the disambiguator would find that they are
569 * alias free. While this is true for it's values, it is false for the addresses
570 * (strictly speaking, the Sel's are NOT the addresses of the bitfields).
571 * So, skip those bitfield selecting Sel's.
573 adr1 = skip_Bitfield_Sels(adr1);
574 adr2 = skip_Bitfield_Sels(adr2);
582 base1 = find_base_adr(adr1, &ent1);
585 base2 = find_base_adr(adr2, &ent2);
588 /* same base address -> compare Sel entities */
589 if (base1 == base2 && ent1 != NULL && ent2 != NULL) {
592 else if (have_const_offsets)
593 return different_sel_offsets(adr1, adr2);
596 mod1 = classify_pointer(irg, base1, ent1);
597 mod2 = classify_pointer(irg, base2, ent2);
599 class1 = GET_BASE_SC(mod1);
600 class2 = GET_BASE_SC(mod2);
602 if (class1 == ir_sc_pointer || class2 == ir_sc_pointer) {
603 /* swap pointer class to class1 */
604 if (class2 == ir_sc_pointer) {
605 ir_storage_class_class_t temp = mod1;
608 class1 = GET_BASE_SC(mod1);
609 class2 = GET_BASE_SC(mod2);
611 /* a pointer and an object whose address was never taken */
612 if (mod2 & ir_sc_modifier_nottaken) {
615 if (mod1 & ir_sc_modifier_argument) {
616 if ( (options & aa_opt_no_alias_args)
617 && (mod2 & ir_sc_modifier_argument))
619 if ( (options & aa_opt_no_alias_args_global)
620 && (class2 == ir_sc_globalvar
621 || class2 == ir_sc_tls
622 || class2 == ir_sc_globaladdr))
625 } else if (class1 != class2) {
626 /* two objects from different memory spaces */
629 /* both classes are equal */
630 if (class1 == ir_sc_globalvar) {
631 ir_entity *entity1 = get_SymConst_entity(base1);
632 ir_entity *entity2 = get_SymConst_entity(base2);
633 if (entity1 != entity2)
636 /* for some reason CSE didn't happen yet for the 2 SymConsts... */
638 } else if (class1 == ir_sc_globaladdr) {
639 tarval *tv = get_Const_tarval(base1);
640 offset1 += get_tarval_long(tv);
641 tv = get_Const_tarval(base2);
642 offset2 += get_tarval_long(tv);
644 if ((unsigned long)labs(offset2 - offset1) >= mode_size)
647 return ir_sure_alias;
651 /* Type based alias analysis */
652 if (options & aa_opt_type_based) {
653 ir_alias_relation rel;
655 if (options & aa_opt_byte_type_may_alias) {
656 if (get_mode_size_bits(mode1) == 8 || get_mode_size_bits(mode2) == 8) {
657 /* One of the modes address a byte. Assume a ir_may_alias and leave
658 the type based check. */
659 goto leave_type_based_alias;
662 /* cheap check: If the mode sizes did not match, the types MUST be different */
663 if (get_mode_size_bits(mode1) != get_mode_size_bits(mode2))
666 /* cheap test: if only one is a reference mode, no alias */
667 if (mode_is_reference(mode1) != mode_is_reference(mode2))
670 /* cheap test: if arithmetic is different, no alias */
671 if (get_mode_arithmetic(mode1) != get_mode_arithmetic(mode2))
675 rel = different_types(orig_adr1, orig_adr2);
676 if (rel != ir_may_alias)
678 leave_type_based_alias:;
681 /* do we have a language specific memory disambiguator? */
682 if (language_disambuigator != NULL) {
683 ir_alias_relation rel = language_disambuigator(irg, orig_adr1, mode1, orig_adr2, mode2);
684 if (rel != ir_may_alias)
688 /* access points-to information here */
690 } /* _get_alias_relation */
693 * Determine the alias relation between two addresses.
695 ir_alias_relation get_alias_relation(
697 const ir_node *adr1, const ir_mode *mode1,
698 const ir_node *adr2, const ir_mode *mode2)
700 ir_alias_relation rel = _get_alias_relation(irg, adr1, mode1, adr2, mode2);
701 DB((dbg, LEVEL_1, "alias(%+F, %+F) = %s\n", adr1, adr2, get_ir_alias_relation_name(rel)));
703 } /* get_alias_relation */
705 /* Set a source language specific memory disambiguator function. */
706 void set_language_memory_disambiguator(DISAMBIGUATOR_FUNC func)
708 language_disambuigator = func;
709 } /* set_language_memory_disambiguator */
711 /** The result cache for the memory disambiguator. */
712 static set *result_cache = NULL;
714 /** An entry in the relation cache. */
715 typedef struct mem_disambig_entry {
716 const ir_node *adr1; /**< The first address. */
717 const ir_mode *mode1; /**< The first address mode. */
718 const ir_node *adr2; /**< The second address. */
719 const ir_mode *mode2; /**< The second address mode. */
720 ir_alias_relation result; /**< The alias relation result. */
721 } mem_disambig_entry;
723 #define HASH_ENTRY(adr1, adr2) (HASH_PTR(adr1) ^ HASH_PTR(adr2))
726 * Compare two relation cache entries.
728 static int cmp_mem_disambig_entry(const void *elt, const void *key, size_t size)
730 const mem_disambig_entry *p1 = elt;
731 const mem_disambig_entry *p2 = key;
734 return p1->adr1 == p2->adr1 && p1->adr2 == p2->adr2 &&
735 p1->mode1 == p2->mode1 && p1->mode2 == p2->mode2;
736 } /* cmp_mem_disambig_entry */
739 * Initialize the relation cache.
741 void mem_disambig_init(void)
743 result_cache = new_set(cmp_mem_disambig_entry, 8);
744 } /* mem_disambig_init */
747 * Determine the alias relation between two addresses.
749 ir_alias_relation get_alias_relation_ex(
751 const ir_node *adr1, const ir_mode *mode1,
752 const ir_node *adr2, const ir_mode *mode2)
754 mem_disambig_entry key, *entry;
756 ir_fprintf(stderr, "%+F <-> %+F\n", adr1, adr2);
758 if (! get_opt_alias_analysis())
761 if (get_irn_opcode(adr1) > get_irn_opcode(adr2)) {
762 const ir_node *t = adr1;
771 entry = set_find(result_cache, &key, sizeof(key), HASH_ENTRY(adr1, adr2));
773 return entry->result;
775 key.result = get_alias_relation(irg, adr1, mode1, adr2, mode2);
777 set_insert(result_cache, &key, sizeof(key), HASH_ENTRY(adr1, adr2));
779 } /* get_alias_relation_ex */
781 /* Free the relation cache. */
782 void mem_disambig_term(void)
784 if (result_cache != NULL) {
785 del_set(result_cache);
788 } /* mem_disambig_term */
791 * Check the mode of a Load/Store with the mode of the entity
793 * If the mode of the entity and the Load/Store mode do not match, we
794 * have the bad reinterpret case:
797 * char b = *(char *)&i;
799 * We do NOT count this as one value and return address_taken
801 * However, we support an often used case. If the mode is two-complement
802 * we allow casts between signed/unsigned.
804 * @param mode the mode of the Load/Store
805 * @param ent_mode the mode of the accessed entity
807 * @return non-zero if the Load/Store is a hidden cast, zero else
809 static int is_hidden_cast(const ir_mode *mode, const ir_mode *ent_mode)
811 if (ent_mode == NULL)
814 if (ent_mode != mode) {
815 if (ent_mode == NULL ||
816 get_mode_size_bits(ent_mode) != get_mode_size_bits(mode) ||
817 get_mode_sort(ent_mode) != get_mode_sort(mode) ||
818 get_mode_arithmetic(ent_mode) != irma_twos_complement ||
819 get_mode_arithmetic(mode) != irma_twos_complement)
823 } /* is_hidden_cast */
826 * Determine the usage state of a node (or its successor Sels).
828 * @param irn the node
830 static ir_entity_usage determine_entity_usage(const ir_node *irn, ir_entity *entity)
833 ir_mode *emode, *mode;
836 ir_entity_usage res = 0;
838 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
839 ir_node *succ = get_irn_out(irn, i);
841 switch (get_irn_opcode(succ)) {
843 /* beware: irn might be a Id node here, so irn might be not
844 equal to get_Load_ptr(succ) */
845 res |= ir_usage_read;
847 /* check if this load is not a hidden conversion */
848 mode = get_Load_mode(succ);
849 emode = get_type_mode(get_entity_type(entity));
850 if (is_hidden_cast(mode, emode))
851 res |= ir_usage_reinterpret_cast;
855 /* check that the node is not the Store's value */
856 if (irn == get_Store_value(succ)) {
857 res |= ir_usage_unknown;
859 if (irn == get_Store_ptr(succ)) {
860 res |= ir_usage_write;
862 /* check if this Store is not a hidden conversion */
863 value = get_Store_value(succ);
864 mode = get_irn_mode(value);
865 emode = get_type_mode(get_entity_type(entity));
866 if (is_hidden_cast(mode, emode))
867 res |= ir_usage_reinterpret_cast;
869 assert(irn != get_Store_mem(succ));
873 /* CopyB are like Loads/Stores */
874 tp = get_entity_type(entity);
875 if (tp != get_CopyB_type(succ)) {
876 /* bad, different types, might be a hidden conversion */
877 res |= ir_usage_reinterpret_cast;
879 if (irn == get_CopyB_dst(succ)) {
880 res |= ir_usage_write;
882 assert(irn == get_CopyB_src(succ));
883 res |= ir_usage_read;
889 /* Check the successor of irn. */
890 res |= determine_entity_usage(succ, entity);
893 ir_entity *entity = get_Sel_entity(succ);
894 /* this analysis can't handle unions correctly */
895 if (is_Union_type(get_entity_owner(entity))) {
896 res |= ir_usage_unknown;
899 /* Check the successor of irn. */
900 res |= determine_entity_usage(succ, entity);
905 if (irn == get_Call_ptr(succ)) {
906 /* TODO: we could check for reinterpret casts here...
907 * But I doubt anyone is interested in that bit for
908 * function entities and I'm too lazy to write the code now.
910 res |= ir_usage_read;
912 assert(irn != get_Call_mem(succ));
913 res |= ir_usage_unknown;
917 /* skip identities */
919 res |= determine_entity_usage(succ, entity);
925 for (input_nr = get_Tuple_n_preds(succ) - 1; input_nr >= 0;
927 ir_node *pred = get_Tuple_pred(succ, input_nr);
930 /* we found one input */
931 for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
932 ir_node *proj = get_irn_out(succ, k);
934 if (is_Proj(proj) && get_Proj_proj(proj) == input_nr) {
935 res |= determine_entity_usage(proj, entity);
945 /* another op, we don't know anything (we could do more advanced
946 * things like a dataflow analysis here) */
947 res |= ir_usage_unknown;
956 * Update the usage flags of all frame entities.
958 static void analyse_irg_entity_usage(ir_graph *irg)
960 ir_type *ft = get_irg_frame_type(irg);
962 int i, j, k, static_link_arg;
964 /* set initial state to not_taken, as this is the "smallest" state */
965 for (i = get_class_n_members(ft) - 1; i >= 0; --i) {
966 ir_entity *ent = get_class_member(ft, i);
968 /* methods can only be analyzed globally */
969 if (! is_method_entity(ent)) {
970 ir_entity_usage flags = 0;
971 if (get_entity_linkage(ent) & IR_LINKAGE_HIDDEN_USER)
972 flags = ir_usage_unknown;
973 set_entity_usage(ent, flags);
977 assure_irg_outs(irg);
979 irg_frame = get_irg_frame(irg);
981 for (i = get_irn_n_outs(irg_frame) - 1; i >= 0; --i) {
982 ir_node *succ = get_irn_out(irg_frame, i);
984 ir_entity_usage flags;
989 entity = get_Sel_entity(succ);
990 flags = get_entity_usage(entity);
991 flags |= determine_entity_usage(succ, entity);
992 set_entity_usage(entity, flags);
995 /* check inner functions accessing outer frame */
997 for (i = get_class_n_members(ft) - 1; i >= 0; --i) {
998 ir_entity *ent = get_class_member(ft, i);
1002 if (! is_method_entity(ent))
1005 inner_irg = get_entity_irg(ent);
1006 if (inner_irg == NULL)
1009 assure_irg_outs(inner_irg);
1010 args = get_irg_args(inner_irg);
1011 for (j = get_irn_n_outs(args) - 1; j >= 0; --j) {
1012 ir_node *arg = get_irn_out(args, j);
1014 if (get_Proj_proj(arg) == static_link_arg) {
1015 for (k = get_irn_n_outs(arg) - 1; k >= 0; --k) {
1016 ir_node *succ = get_irn_out(arg, k);
1019 ir_entity *entity = get_Sel_entity(succ);
1021 if (get_entity_owner(entity) == ft) {
1022 /* found an access to the outer frame */
1023 ir_entity_usage flags;
1025 flags = get_entity_usage(entity);
1026 flags |= determine_entity_usage(succ, entity);
1027 set_entity_usage(entity, flags);
1037 irg->entity_usage_state = ir_entity_usage_computed;
1040 ir_entity_usage_computed_state get_irg_entity_usage_state(const ir_graph *irg)
1042 return irg->entity_usage_state;
1045 void set_irg_entity_usage_state(ir_graph *irg, ir_entity_usage_computed_state state)
1047 irg->entity_usage_state = state;
1050 void assure_irg_entity_usage_computed(ir_graph *irg)
1052 if (irg->entity_usage_state != ir_entity_usage_not_computed)
1055 analyse_irg_entity_usage(irg);
1060 * Initialize the entity_usage flag for a global type like type.
1062 static void init_entity_usage(ir_type *tp)
1066 /* We have to be conservative: All external visible entities are unknown */
1067 for (i = get_compound_n_members(tp) - 1; i >= 0; --i) {
1068 ir_entity *ent = get_compound_member(tp, i);
1069 ir_entity_usage flags = ir_usage_none;
1071 if (entity_is_externally_visible(ent)) {
1072 flags |= ir_usage_unknown;
1074 set_entity_usage(ent, flags);
1079 * Mark all entities used in the initializer as unknown usage.
1081 * @param initializer the initializer to check
1083 static void check_initializer_nodes(ir_initializer_t *initializer)
1088 switch (initializer->kind) {
1089 case IR_INITIALIZER_CONST:
1090 /* let's check if it's an address */
1091 n = initializer->consti.value;
1093 ir_entity *ent = get_Global_entity(n);
1094 set_entity_usage(ent, ir_usage_unknown);
1097 case IR_INITIALIZER_TARVAL:
1098 case IR_INITIALIZER_NULL:
1100 case IR_INITIALIZER_COMPOUND:
1101 for (i = 0; i < initializer->compound.n_initializers; ++i) {
1102 ir_initializer_t *sub_initializer
1103 = initializer->compound.initializers[i];
1104 check_initializer_nodes(sub_initializer);
1108 panic("invalid initializer found");
1109 } /* check_initializer_nodes */
1112 * Mark all entities used in the initializer for the given entity as unknown
1115 * @param ent the entity
1117 static void check_initializer(ir_entity *ent)
1122 /* Beware: Methods are always initialized with "themself". This does not
1123 * count as a taken address.
1124 * TODO: this initialisation with "themself" is wrong and should be removed
1126 if (is_Method_type(get_entity_type(ent)))
1129 if (ent->initializer != NULL) {
1130 check_initializer_nodes(ent->initializer);
1131 } else if (entity_has_compound_ent_values(ent)) {
1132 for (i = get_compound_ent_n_values(ent) - 1; i >= 0; --i) {
1133 n = get_compound_ent_value(ent, i);
1135 /* let's check if it's an address */
1137 ir_entity *ent = get_Global_entity(n);
1138 set_entity_usage(ent, ir_usage_unknown);
1146 * Mark all entities used in initializers as unknown usage.
1148 * @param tp a compound type
1150 static void check_initializers(ir_type *tp)
1154 for (i = get_compound_n_members(tp) - 1; i >= 0; --i) {
1155 ir_entity *ent = get_compound_member(tp, i);
1157 check_initializer(ent);
1159 } /* check_initializers */
1161 #ifdef DEBUG_libfirm
1163 * Print the entity usage flags of all entities of a given type for debugging.
1165 * @param tp a compound type
1167 static void print_entity_usage_flags(ir_type *tp)
1170 for (i = get_compound_n_members(tp) - 1; i >= 0; --i) {
1171 ir_entity *ent = get_compound_member(tp, i);
1172 ir_entity_usage flags = get_entity_usage(ent);
1176 ir_printf("%+F:", ent);
1177 if (flags & ir_usage_address_taken)
1178 printf(" address_taken");
1179 if (flags & ir_usage_read)
1181 if (flags & ir_usage_write)
1183 if (flags & ir_usage_reinterpret_cast)
1184 printf(" reinterp_cast");
1188 #endif /* DEBUG_libfirm */
1191 * Post-walker: check for global entity address
1193 static void check_global_address(ir_node *irn, void *env)
1197 ir_entity_usage flags;
1199 if (is_Global(irn)) {
1201 ent = get_Global_entity(irn);
1202 } else if (is_Sel(irn) && get_Sel_ptr(irn) == tls) {
1203 /* A TLS variable. */
1204 ent = get_Sel_entity(irn);
1208 flags = get_entity_usage(ent);
1209 flags |= determine_entity_usage(irn, ent);
1210 set_entity_usage(ent, flags);
1211 } /* check_global_address */
1214 * Update the entity usage flags of all global entities.
1216 static void analyse_irp_globals_entity_usage(void)
1221 for (s = IR_SEGMENT_FIRST; s <= IR_SEGMENT_LAST; ++s) {
1222 ir_type *type = get_segment_type(s);
1223 init_entity_usage(type);
1226 for (s = IR_SEGMENT_FIRST; s <= IR_SEGMENT_LAST; ++s) {
1227 ir_type *type = get_segment_type(s);
1228 check_initializers(type);
1231 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1232 ir_graph *irg = get_irp_irg(i);
1234 assure_irg_outs(irg);
1235 irg_walk_graph(irg, NULL, check_global_address, get_irg_tls(irg));
1238 #ifdef DEBUG_libfirm
1239 if (firm_dbg_get_mask(dbg) & LEVEL_1) {
1241 for (s = IR_SEGMENT_FIRST; s <= IR_SEGMENT_LAST; ++s) {
1242 print_entity_usage_flags(get_segment_type(s));
1245 #endif /* DEBUG_libfirm */
1248 irp->globals_entity_usage_state = ir_entity_usage_computed;
1251 /* Returns the current address taken state of the globals. */
1252 ir_entity_usage_computed_state get_irp_globals_entity_usage_state(void)
1254 return irp->globals_entity_usage_state;
1257 /* Sets the current address taken state of the graph. */
1258 void set_irp_globals_entity_usage_state(ir_entity_usage_computed_state state)
1260 irp->globals_entity_usage_state = state;
1263 /* Assure that the address taken flag is computed for the globals. */
1264 void assure_irp_globals_entity_usage_computed(void)
1266 if (irp->globals_entity_usage_state != ir_entity_usage_not_computed)
1269 analyse_irp_globals_entity_usage();
1272 void firm_init_memory_disambiguator(void)
1274 FIRM_DBG_REGISTER(dbg, "firm.ana.irmemory");
1275 FIRM_DBG_REGISTER(dbgcall, "firm.opt.cc");
1279 /** Maps method types to cloned method types. */
1280 static pmap *mtp_map;
1283 * Clone a method type if not already cloned.
1285 * @param tp the type to clone
1287 static ir_type *clone_type_and_cache(ir_type *tp)
1290 pmap_entry *e = pmap_find(mtp_map, tp);
1295 res = clone_type_method(tp);
1296 pmap_insert(mtp_map, tp, res);
1299 } /* clone_type_and_cache */
1302 * Walker: clone all call types of Calls to methods having the
1303 * mtp_property_private property set.
1305 static void update_calls_to_private(ir_node *call, void *env)
1308 if (is_Call(call)) {
1309 ir_node *ptr = get_Call_ptr(call);
1311 if (is_SymConst(ptr)) {
1312 ir_entity *ent = get_SymConst_entity(ptr);
1313 ir_type *ctp = get_Call_type(call);
1315 if (get_entity_additional_properties(ent) & mtp_property_private) {
1316 if ((get_method_additional_properties(ctp) & mtp_property_private) == 0) {
1317 ctp = clone_type_and_cache(ctp);
1318 set_method_additional_property(ctp, mtp_property_private);
1319 set_Call_type(call, ctp);
1320 DB((dbgcall, LEVEL_1, "changed call to private method %+F using cloned type %+F\n", ent, ctp));
1325 } /* update_calls_to_private */
1327 /* Mark all private methods, i.e. those of which all call sites are known. */
1328 void mark_private_methods(void)
1333 assure_irp_globals_entity_usage_computed();
1335 mtp_map = pmap_create();
1337 /* first step: change the calling conventions of the local non-escaped entities */
1338 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1339 ir_graph *irg = get_irp_irg(i);
1340 ir_entity *ent = get_irg_entity(irg);
1341 ir_entity_usage flags = get_entity_usage(ent);
1343 if (!entity_is_externally_visible(ent) &&
1344 !(flags & ir_usage_address_taken)) {
1345 ir_type *mtp = get_entity_type(ent);
1347 set_entity_additional_property(ent, mtp_property_private);
1348 DB((dbgcall, LEVEL_1, "found private method %+F\n", ent));
1349 if ((get_method_additional_properties(mtp) & mtp_property_private) == 0) {
1350 /* need a new type */
1351 mtp = clone_type_and_cache(mtp);
1352 set_method_additional_property(mtp, mtp_property_private);
1353 set_entity_type(ent, mtp);
1354 DB((dbgcall, LEVEL_2, "changed entity type of %+F to %+F\n", ent, mtp));
1361 all_irg_walk(NULL, update_calls_to_private, NULL);
1363 pmap_destroy(mtp_map);
1364 } /* mark_private_methods */
1366 /* create a pass for mark_private_methods() */
1367 ir_prog_pass_t *mark_private_methods_pass(const char *name)
1369 return def_prog_pass(name ? name : "mark_private_methods", mark_private_methods);
1370 } /* mark_private_methods_pass */