2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief Memory disambiguator
19 #include "irgraph_t.h"
21 #include "irmemory_t.h"
34 /** The debug handle. */
35 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
36 DEBUG_ONLY(static firm_dbg_module_t *dbgcall = NULL;)
38 /** The source language specific language disambiguator function. */
39 static DISAMBIGUATOR_FUNC language_disambuigator = NULL;
41 /** The global memory disambiguator options. */
42 static unsigned global_mem_disamgig_opt = aa_opt_no_opt;
44 const char *get_ir_alias_relation_name(ir_alias_relation rel)
46 #define X(a) case a: return #a
52 panic("UNKNOWN alias relation");
57 unsigned get_irg_memory_disambiguator_options(const ir_graph *irg)
59 unsigned opt = irg->mem_disambig_opt;
60 if (opt & aa_opt_inherited)
61 return global_mem_disamgig_opt;
65 void set_irg_memory_disambiguator_options(ir_graph *irg, unsigned options)
67 irg->mem_disambig_opt = options & ~aa_opt_inherited;
70 void set_irp_memory_disambiguator_options(unsigned options)
72 global_mem_disamgig_opt = options;
75 ir_storage_class_class_t get_base_sc(ir_storage_class_class_t x)
77 return x & ~ir_sc_modifiers;
81 * Find the base address and entity of an Sel node.
84 * @param pEnt after return points to the base entity.
86 * @return the base address.
88 static ir_node *find_base_adr(const ir_node *sel, ir_entity **pEnt)
90 ir_node *ptr = get_Sel_ptr(sel);
94 ptr = get_Sel_ptr(sel);
96 *pEnt = get_Sel_entity(sel);
101 * Check if a given Const node is greater or equal a given size.
103 * @param cns a Const node
104 * @param size a integer size
106 * @return ir_no_alias if the Const is greater, ir_may_alias else
108 static ir_alias_relation check_const(const ir_node *cns, int size)
110 ir_tarval *tv = get_Const_tarval(cns);
114 return tarval_is_null(tv) ? ir_may_alias : ir_no_alias;
115 tv_size = new_tarval_from_long(size, get_tarval_mode(tv));
116 return tarval_cmp(tv_size, tv) & (ir_relation_less_equal) ? ir_no_alias : ir_may_alias;
120 * Treat idx1 and idx2 as integer indexes and check if they differ always more than size.
122 * @param idx1 a node representing the first index
123 * @param idx2 a node representing the second index
124 * @param size an integer size
126 * @return ir_sure_alias iff idx1 == idx2
127 * ir_no_alias iff they ALWAYS differ more than size
130 static ir_alias_relation different_index(const ir_node *idx1, const ir_node *idx2, int size)
133 return ir_sure_alias;
134 if (is_Const(idx1) && is_Const(idx2)) {
135 /* both are const, we can compare them */
136 ir_tarval *tv1 = get_Const_tarval(idx1);
137 ir_tarval *tv2 = get_Const_tarval(idx2);
138 ir_tarval *tv, *tv_size;
142 return tv1 == tv2 ? ir_sure_alias : ir_no_alias;
144 /* arg, modes may be different */
145 m1 = get_tarval_mode(tv1);
146 m2 = get_tarval_mode(tv2);
148 int size = get_mode_size_bits(m1) - get_mode_size_bits(m2);
151 /* m1 is a small mode, cast up */
152 m1 = mode_is_signed(m1) ? find_signed_mode(m2) : find_unsigned_mode(m2);
154 /* should NOT happen, but if it does we give up here */
157 tv1 = tarval_convert_to(tv1, m1);
158 } else if (size > 0) {
159 /* m2 is a small mode, cast up */
160 m2 = mode_is_signed(m2) ? find_signed_mode(m1) : find_unsigned_mode(m1);
162 /* should NOT happen, but if it does we give up here */
165 tv2 = tarval_convert_to(tv2, m2);
167 /* here the size should be identical, check for signed */
168 if (get_mode_sign(m1) != get_mode_sign(m2)) {
169 /* find the signed */
170 if (mode_is_signed(m2)) {
177 /* m1 is now the signed one */
178 if (!tarval_is_negative(tv1)) {
179 /* tv1 is signed, but >= 0, simply cast into unsigned */
180 tv1 = tarval_convert_to(tv1, m2);
182 tv_size = new_tarval_from_long(size, m2);
184 if (tarval_cmp(tv2, tv_size) & (ir_relation_greater_equal)) {
185 /* tv1 is negative and tv2 >= tv_size, so the difference is bigger than size */
188 /* tv_size > tv2, so we can subtract without overflow */
189 tv2 = tarval_sub(tv_size, tv2, NULL);
191 /* tv1 is < 0, so we can negate it */
192 tv1 = tarval_neg(tv1);
194 /* cast it into unsigned. for two-complement it does the right thing for MIN_INT */
195 tv1 = tarval_convert_to(tv1, m2);
197 /* now we can compare without overflow */
198 return tarval_cmp(tv1, tv2) & (ir_relation_greater_equal) ? ir_no_alias : ir_may_alias;
202 if (tarval_cmp(tv1, tv2) == ir_relation_greater) {
207 /* tv1 is now the "smaller" one */
208 tv = tarval_sub(tv2, tv1, NULL);
209 tv_size = new_tarval_from_long(size, get_tarval_mode(tv));
210 return tarval_cmp(tv_size, tv) & (ir_relation_less_equal) ? ir_no_alias : ir_may_alias;
213 /* Note: we rely here on the fact that normalization puts constants on the RIGHT side */
215 ir_node *l1 = get_Add_left(idx1);
216 ir_node *r1 = get_Add_right(idx1);
221 return check_const(r1, size);
224 /* both are Adds, check if they are of x + a == x + b kind */
225 ir_node *l2 = get_Add_left(idx2);
226 ir_node *r2 = get_Add_right(idx2);
229 return different_index(r1, r2, size);
231 return different_index(r1, l2, size);
233 return different_index(l1, l2, size);
235 return different_index(l1, r2, size);
239 ir_node *l2 = get_Add_left(idx2);
240 ir_node *r2 = get_Add_right(idx2);
245 return check_const(r2, size);
250 ir_node *l1 = get_Sub_left(idx1);
251 ir_node *r1 = get_Sub_right(idx1);
256 return check_const(r1, size);
260 /* both are Subs, check if they are of x - a == x - b kind */
261 ir_node *l2 = get_Sub_left(idx2);
264 ir_node *r2 = get_Sub_right(idx2);
265 return different_index(r1, r2, size);
270 ir_node *l2 = get_Sub_left(idx2);
271 ir_node *r2 = get_Sub_right(idx2);
276 return check_const(r2, size);
284 * Two Sel addresses have the same base address, check if their offsets are
287 * @param adr1 The first address.
288 * @param adr2 The second address.
290 static ir_alias_relation different_sel_offsets(const ir_node *sel1, const ir_node *sel2)
296 ir_entity *ent1 = get_Sel_entity(sel1);
297 ir_entity *ent2 = get_Sel_entity(sel2);
298 int i, check_arr = 0;
303 ir_type *tp1 = get_entity_type(ent1);
304 ir_type *tp2 = get_entity_type(ent2);
308 else if (get_type_state(tp1) == layout_fixed && get_type_state(tp2) == layout_fixed &&
309 get_type_size_bits(tp1) == get_type_size_bits(tp2))
313 /* we select an entity of same size, check for indexes */
314 int n = get_Sel_n_indexs(sel1);
317 if (n > 0 && n == get_Sel_n_indexs(sel2)) {
318 /* same non-zero number of indexes, an array access, check */
319 for (i = 0; i < n; ++i) {
320 ir_node *idx1 = get_Sel_index(sel1, i);
321 ir_node *idx2 = get_Sel_index(sel2, i);
322 ir_alias_relation res = different_index(idx1, idx2, 0); /* we can safely IGNORE the size here if it's at least >0 */
324 if (res == may_alias)
326 else if (res == no_alias)
329 /* if we have at least one no_alias, there is no alias relation, else we have sure */
330 return have_no > 0 ? no_alias : sure_alias;
334 (void) different_index;
340 * Determine the alias relation by checking if adr1 and adr2 are pointer
343 * @param adr1 The first address.
344 * @param adr2 The second address.
346 static ir_alias_relation different_types(const ir_node *adr1, const ir_node *adr2)
348 ir_entity *ent1 = NULL, *ent2 = NULL;
350 if (is_SymConst_addr_ent(adr1))
351 ent1 = get_SymConst_entity(adr1);
352 else if (is_Sel(adr1))
353 ent1 = get_Sel_entity(adr1);
355 if (is_SymConst_addr_ent(adr2))
356 ent2 = get_SymConst_entity(adr2);
357 else if (is_Sel(adr2))
358 ent2 = get_Sel_entity(adr2);
360 if (ent1 != NULL && ent2 != NULL) {
361 ir_type *tp1 = get_entity_type(ent1);
362 ir_type *tp2 = get_entity_type(ent2);
365 /* do deref until no pointer types are found */
366 while (is_Pointer_type(tp1) && is_Pointer_type(tp2)) {
367 tp1 = get_pointer_points_to_type(tp1);
368 tp2 = get_pointer_points_to_type(tp2);
371 if (get_type_tpop(tp1) != get_type_tpop(tp2)) {
372 /* different type structure */
375 if (is_Class_type(tp1)) {
376 /* check class hierarchy */
377 if (! is_SubClass_of(tp1, tp2) &&
378 ! is_SubClass_of(tp2, tp1))
381 /* different types */
390 * Returns non-zero if a node is a result on a malloc-like routine.
392 * @param node the Proj node to test
394 static int is_malloc_Result(const ir_node *node)
396 node = get_Proj_pred(node);
399 node = get_Proj_pred(node);
402 node = get_Call_ptr(node);
403 if (is_SymConst_addr_ent(node)) {
404 ir_entity *ent = get_SymConst_entity(node);
406 if (get_entity_additional_properties(ent) & mtp_property_malloc)
413 ir_storage_class_class_t classify_pointer(const ir_node *irn,
414 const ir_entity *ent)
416 ir_graph *irg = get_irn_irg(irn);
417 ir_storage_class_class_t res = ir_sc_pointer;
418 if (is_SymConst_addr_ent(irn)) {
419 ir_entity *entity = get_SymConst_entity(irn);
420 ir_type *owner = get_entity_owner(entity);
421 res = owner == get_tls_type() ? ir_sc_tls : ir_sc_globalvar;
422 if (! (get_entity_usage(entity) & ir_usage_address_taken))
423 res |= ir_sc_modifier_nottaken;
424 } else if (irn == get_irg_frame(irg)) {
425 res = ir_sc_localvar;
426 if (ent != NULL && !(get_entity_usage(ent) & ir_usage_address_taken))
427 res |= ir_sc_modifier_nottaken;
428 } else if (is_Proj(irn) && is_malloc_Result(irn)) {
429 return ir_sc_malloced;
430 } else if (is_Const(irn)) {
431 return ir_sc_globaladdr;
432 } else if (is_arg_Proj(irn)) {
433 res |= ir_sc_modifier_argument;
440 * If adr represents a Bitfield Sel, skip it
442 static const ir_node *skip_Bitfield_Sels(const ir_node *adr)
445 ir_entity *ent = get_Sel_entity(adr);
446 ir_type *bf_type = get_entity_type(ent);
448 /* is it a bitfield type? */
449 if (is_Primitive_type(bf_type) && get_primitive_base_type(bf_type) != NULL)
450 adr = get_Sel_ptr(adr);
456 * Determine the alias relation between two addresses.
458 * @param addr1 pointer address of the first memory operation
459 * @param mode1 the mode of the accessed data through addr1
460 * @param addr2 pointer address of the second memory operation
461 * @param mode2 the mode of the accessed data through addr2
463 * @return found memory relation
465 static ir_alias_relation _get_alias_relation(
466 const ir_node *adr1, const ir_mode *mode1,
467 const ir_node *adr2, const ir_mode *mode2)
469 ir_entity *ent1, *ent2;
473 const ir_node *base1;
474 const ir_node *base2;
475 const ir_node *orig_adr1 = adr1;
476 const ir_node *orig_adr2 = adr2;
479 ir_storage_class_class_t class1, class2, mod1, mod2;
480 int have_const_offsets;
482 if (! get_opt_alias_analysis())
486 return ir_sure_alias;
488 irg = get_irn_irg(adr1);
489 options = get_irg_memory_disambiguator_options(irg);
491 /* The Armageddon switch */
492 if (options & aa_opt_no_alias)
495 /* do the addresses have constants offsets?
496 * Note: nodes are normalized to have constants at right inputs,
497 * sub X, C is normalized to add X, -C
499 have_const_offsets = 1;
500 while (is_Add(adr1)) {
501 ir_node *add_right = get_Add_right(adr1);
502 if (is_Const(add_right) && !mode_is_reference(get_irn_mode(add_right))) {
503 ir_tarval *tv = get_Const_tarval(add_right);
504 offset1 += get_tarval_long(tv);
505 adr1 = get_Add_left(adr1);
506 } else if (mode_is_reference(get_irn_mode(add_right))) {
508 have_const_offsets = 0;
510 adr1 = get_Add_left(adr1);
511 have_const_offsets = 0;
514 while (is_Add(adr2)) {
515 ir_node *add_right = get_Add_right(adr2);
516 if (is_Const(add_right) && !mode_is_reference(get_irn_mode(add_right))) {
517 ir_tarval *tv = get_Const_tarval(add_right);
518 offset2 += get_tarval_long(tv);
519 adr2 = get_Add_left(adr2);
520 } else if (mode_is_reference(get_irn_mode(add_right))) {
522 have_const_offsets = 0;
524 adr2 = get_Add_left(adr2);
525 have_const_offsets = 0;
529 mode_size = get_mode_size_bytes(mode1);
530 if (get_mode_size_bytes(mode2) > mode_size) {
531 mode_size = get_mode_size_bytes(mode2);
534 /* same base address -> compare offsets if possible.
535 * FIXME: type long is not sufficient for this task ...
537 if (adr1 == adr2 && have_const_offsets) {
538 if ((unsigned long)labs(offset2 - offset1) >= mode_size)
541 return ir_sure_alias;
545 * Bitfields can be constructed as Sels from its base address.
546 * As they have different entities, the disambiguator would find that they are
547 * alias free. While this is true for its values, it is false for the addresses
548 * (strictly speaking, the Sel's are NOT the addresses of the bitfields).
549 * So, skip those bitfield selecting Sel's.
551 adr1 = skip_Bitfield_Sels(adr1);
552 adr2 = skip_Bitfield_Sels(adr2);
560 base1 = find_base_adr(adr1, &ent1);
563 base2 = find_base_adr(adr2, &ent2);
566 /* same base address -> compare Sel entities */
567 if (base1 == base2 && ent1 != NULL && ent2 != NULL) {
570 else if (have_const_offsets)
571 return different_sel_offsets(adr1, adr2);
574 mod1 = classify_pointer(base1, ent1);
575 mod2 = classify_pointer(base2, ent2);
577 class1 = get_base_sc(mod1);
578 class2 = get_base_sc(mod2);
580 /* struct-access cannot alias with variables */
581 if (ent1 == NULL && ent2 != NULL && is_compound_type(get_entity_owner(ent2))
582 && (class1 == ir_sc_globalvar || class1 == ir_sc_localvar || class1 == ir_sc_tls || class1 == ir_sc_globaladdr)) {
585 if (ent2 == NULL && ent1 != NULL && is_compound_type(get_entity_owner(ent1))
586 && (class2 == ir_sc_globalvar || class2 == ir_sc_localvar || class2 == ir_sc_tls || class2 == ir_sc_globaladdr)) {
590 if (class1 == ir_sc_pointer || class2 == ir_sc_pointer) {
591 /* swap pointer class to class1 */
592 if (class2 == ir_sc_pointer) {
593 ir_storage_class_class_t temp = mod1;
596 class1 = get_base_sc(mod1);
597 class2 = get_base_sc(mod2);
599 /* a pointer and an object whose address was never taken */
600 if (mod2 & ir_sc_modifier_nottaken) {
603 if (mod1 & ir_sc_modifier_argument) {
604 if ( (options & aa_opt_no_alias_args)
605 && (mod2 & ir_sc_modifier_argument))
607 if ( (options & aa_opt_no_alias_args_global)
608 && (class2 == ir_sc_globalvar
609 || class2 == ir_sc_tls
610 || class2 == ir_sc_globaladdr))
613 } else if (class1 != class2) {
614 /* two objects from different memory spaces */
617 /* both classes are equal */
618 if (class1 == ir_sc_globalvar) {
619 ir_entity *entity1 = get_SymConst_entity(base1);
620 ir_entity *entity2 = get_SymConst_entity(base2);
621 if (entity1 != entity2)
624 /* for some reason CSE didn't happen yet for the 2 SymConsts... */
626 } else if (class1 == ir_sc_globaladdr) {
627 ir_tarval *tv = get_Const_tarval(base1);
628 offset1 += get_tarval_long(tv);
629 tv = get_Const_tarval(base2);
630 offset2 += get_tarval_long(tv);
632 if ((unsigned long)labs(offset2 - offset1) >= mode_size)
635 return ir_sure_alias;
639 /* Type based alias analysis */
640 if (options & aa_opt_type_based) {
641 ir_alias_relation rel;
643 if (options & aa_opt_byte_type_may_alias) {
644 if (get_mode_size_bits(mode1) == 8 || get_mode_size_bits(mode2) == 8) {
645 /* One of the modes address a byte. Assume a ir_may_alias and leave
646 the type based check. */
647 goto leave_type_based_alias;
650 /* cheap check: If the mode sizes did not match, the types MUST be different */
651 if (get_mode_size_bits(mode1) != get_mode_size_bits(mode2))
654 /* cheap test: if only one is a reference mode, no alias */
655 if (mode_is_reference(mode1) != mode_is_reference(mode2))
658 /* cheap test: if arithmetic is different, no alias */
659 if (get_mode_arithmetic(mode1) != get_mode_arithmetic(mode2))
663 rel = different_types(orig_adr1, orig_adr2);
664 if (rel != ir_may_alias)
666 leave_type_based_alias:;
669 /* do we have a language specific memory disambiguator? */
670 if (language_disambuigator != NULL) {
671 ir_alias_relation rel = language_disambuigator(orig_adr1, mode1, orig_adr2, mode2);
672 if (rel != ir_may_alias)
676 /* access points-to information here */
680 ir_alias_relation get_alias_relation(
681 const ir_node *adr1, const ir_mode *mode1,
682 const ir_node *adr2, const ir_mode *mode2)
684 ir_alias_relation rel = _get_alias_relation(adr1, mode1, adr2, mode2);
685 DB((dbg, LEVEL_1, "alias(%+F, %+F) = %s\n", adr1, adr2, get_ir_alias_relation_name(rel)));
689 void set_language_memory_disambiguator(DISAMBIGUATOR_FUNC func)
691 language_disambuigator = func;
694 /** The result cache for the memory disambiguator. */
695 static set *result_cache = NULL;
697 /** An entry in the relation cache. */
698 typedef struct mem_disambig_entry {
699 const ir_node *adr1; /**< The first address. */
700 const ir_mode *mode1; /**< The first address mode. */
701 const ir_node *adr2; /**< The second address. */
702 const ir_mode *mode2; /**< The second address mode. */
703 ir_alias_relation result; /**< The alias relation result. */
704 } mem_disambig_entry;
706 #define HASH_ENTRY(adr1, adr2) (hash_ptr(adr1) ^ hash_ptr(adr2))
709 * Compare two relation cache entries.
711 static int cmp_mem_disambig_entry(const void *elt, const void *key, size_t size)
713 const mem_disambig_entry *p1 = (const mem_disambig_entry*) elt;
714 const mem_disambig_entry *p2 = (const mem_disambig_entry*) key;
717 return p1->adr1 == p2->adr1 && p1->adr2 == p2->adr2 &&
718 p1->mode1 == p2->mode1 && p1->mode2 == p2->mode2;
721 void mem_disambig_init(void)
723 result_cache = new_set(cmp_mem_disambig_entry, 8);
726 ir_alias_relation get_alias_relation_ex(
727 const ir_node *adr1, const ir_mode *mode1,
728 const ir_node *adr2, const ir_mode *mode2)
730 mem_disambig_entry key, *entry;
732 ir_fprintf(stderr, "%+F <-> %+F\n", adr1, adr2);
734 if (! get_opt_alias_analysis())
737 if (get_irn_opcode(adr1) > get_irn_opcode(adr2)) {
738 const ir_node *t = adr1;
747 entry = set_find(mem_disambig_entry, result_cache, &key, sizeof(key), HASH_ENTRY(adr1, adr2));
749 return entry->result;
751 key.result = get_alias_relation(adr1, mode1, adr2, mode2);
753 (void)set_insert(mem_disambig_entry, result_cache, &key, sizeof(key), HASH_ENTRY(adr1, adr2));
757 void mem_disambig_term(void)
759 if (result_cache != NULL) {
760 del_set(result_cache);
766 * Check the mode of a Load/Store with the mode of the entity
768 * If the mode of the entity and the Load/Store mode do not match, we
769 * have the bad reinterpret case:
772 * char b = *(char *)&i;
774 * We do NOT count this as one value and return address_taken
776 * However, we support an often used case. If the mode is two-complement
777 * we allow casts between signed/unsigned.
779 * @param mode the mode of the Load/Store
780 * @param ent_mode the mode of the accessed entity
782 * @return non-zero if the Load/Store is a hidden cast, zero else
784 static int is_hidden_cast(const ir_mode *mode, const ir_mode *ent_mode)
786 if (ent_mode == NULL)
789 if (ent_mode != mode) {
790 if (ent_mode == NULL ||
791 get_mode_size_bits(ent_mode) != get_mode_size_bits(mode) ||
792 get_mode_arithmetic(ent_mode) != irma_twos_complement ||
793 get_mode_arithmetic(mode) != irma_twos_complement)
800 * Determine the usage state of a node (or its successor Sels).
802 * @param irn the node
804 static ir_entity_usage determine_entity_usage(const ir_node *irn, ir_entity *entity)
807 ir_mode *emode, *mode;
812 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
813 ir_node *succ = get_irn_out(irn, i);
815 switch (get_irn_opcode(succ)) {
817 /* beware: irn might be a Id node here, so irn might be not
818 equal to get_Load_ptr(succ) */
819 res |= ir_usage_read;
821 /* check if this load is not a hidden conversion */
822 mode = get_Load_mode(succ);
823 emode = get_type_mode(get_entity_type(entity));
824 if (is_hidden_cast(mode, emode))
825 res |= ir_usage_reinterpret_cast;
829 /* check that the node is not the Store's value */
830 if (irn == get_Store_value(succ)) {
831 res |= ir_usage_unknown;
833 if (irn == get_Store_ptr(succ)) {
834 res |= ir_usage_write;
836 /* check if this Store is not a hidden conversion */
837 value = get_Store_value(succ);
838 mode = get_irn_mode(value);
839 emode = get_type_mode(get_entity_type(entity));
840 if (is_hidden_cast(mode, emode))
841 res |= ir_usage_reinterpret_cast;
843 assert(irn != get_Store_mem(succ));
847 /* CopyB are like Loads/Stores */
848 tp = get_entity_type(entity);
849 if (tp != get_CopyB_type(succ)) {
850 /* bad, different types, might be a hidden conversion */
851 res |= ir_usage_reinterpret_cast;
853 if (irn == get_CopyB_dst(succ)) {
854 res |= ir_usage_write;
856 assert(irn == get_CopyB_src(succ));
857 res |= ir_usage_read;
863 /* Check the successor of irn. */
864 res |= determine_entity_usage(succ, entity);
867 ir_entity *sel_entity = get_Sel_entity(succ);
868 /* this analysis can't handle unions correctly */
869 if (is_Union_type(get_entity_owner(sel_entity))) {
870 res |= ir_usage_unknown;
873 /* Check the successor of irn. */
874 res |= determine_entity_usage(succ, sel_entity);
879 if (irn == get_Call_ptr(succ)) {
880 /* TODO: we could check for reinterpret casts here...
881 * But I doubt anyone is interested in that bit for
882 * function entities and I'm too lazy to write the code now.
884 res |= ir_usage_read;
886 assert(irn != get_Call_mem(succ));
887 res |= ir_usage_unknown;
891 /* skip identities */
893 res |= determine_entity_usage(succ, entity);
899 for (input_nr = get_Tuple_n_preds(succ) - 1; input_nr >= 0;
901 ir_node *pred = get_Tuple_pred(succ, input_nr);
904 /* we found one input */
905 for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
906 ir_node *proj = get_irn_out(succ, k);
908 if (is_Proj(proj) && get_Proj_proj(proj) == input_nr) {
909 res |= determine_entity_usage(proj, entity);
919 /* another op, we don't know anything (we could do more advanced
920 * things like a dataflow analysis here) */
921 res |= ir_usage_unknown;
926 return (ir_entity_usage) res;
930 * Update the usage flags of all frame entities.
932 static void analyse_irg_entity_usage(ir_graph *irg)
934 ir_type *ft = get_irg_frame_type(irg);
937 int j, k, static_link_arg;
939 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
941 /* set initial state to not_taken, as this is the "smallest" state */
942 for (i = 0, n = get_class_n_members(ft); i < n; ++i) {
943 ir_entity *ent = get_class_member(ft, i);
945 /* methods can only be analyzed globally */
946 if (! is_method_entity(ent)) {
947 ir_entity_usage flags = ir_usage_none;
948 if (get_entity_linkage(ent) & IR_LINKAGE_HIDDEN_USER)
949 flags = ir_usage_unknown;
950 set_entity_usage(ent, flags);
954 irg_frame = get_irg_frame(irg);
956 for (j = get_irn_n_outs(irg_frame) - 1; j >= 0; --j) {
957 ir_node *succ = get_irn_out(irg_frame, j);
964 entity = get_Sel_entity(succ);
965 flags = get_entity_usage(entity);
966 flags |= determine_entity_usage(succ, entity);
967 set_entity_usage(entity, (ir_entity_usage) flags);
970 /* check inner functions accessing outer frame */
972 for (i = 0, n = get_class_n_members(ft); i < n; ++i) {
973 ir_entity *ent = get_class_member(ft, i);
977 if (! is_method_entity(ent))
980 inner_irg = get_entity_irg(ent);
981 if (inner_irg == NULL)
984 assure_irg_outs(inner_irg);
985 args = get_irg_args(inner_irg);
986 for (j = get_irn_n_outs(args) - 1; j >= 0; --j) {
987 ir_node *arg = get_irn_out(args, j);
989 if (get_Proj_proj(arg) == static_link_arg) {
990 for (k = get_irn_n_outs(arg) - 1; k >= 0; --k) {
991 ir_node *succ = get_irn_out(arg, k);
994 ir_entity *entity = get_Sel_entity(succ);
996 if (get_entity_owner(entity) == ft) {
997 /* found an access to the outer frame */
1000 flags = get_entity_usage(entity);
1001 flags |= determine_entity_usage(succ, entity);
1002 set_entity_usage(entity, (ir_entity_usage) flags);
1011 add_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
1014 void assure_irg_entity_usage_computed(ir_graph *irg)
1016 if (irg_has_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE))
1019 analyse_irg_entity_usage(irg);
1024 * Initialize the entity_usage flag for a global type like type.
1026 static void init_entity_usage(ir_type *tp)
1030 /* We have to be conservative: All external visible entities are unknown */
1031 for (i = 0, n = get_compound_n_members(tp); i < n; ++i) {
1032 ir_entity *ent = get_compound_member(tp, i);
1033 unsigned flags = ir_usage_none;
1035 if (entity_is_externally_visible(ent)) {
1036 flags |= ir_usage_unknown;
1038 set_entity_usage(ent, (ir_entity_usage) flags);
1043 * Mark all entities used in the initializer as unknown usage.
1045 * @param initializer the initializer to check
1047 static void check_initializer_nodes(ir_initializer_t *initializer)
1052 switch (initializer->kind) {
1053 case IR_INITIALIZER_CONST:
1054 /* let's check if it's an address */
1055 n = initializer->consti.value;
1056 if (is_SymConst_addr_ent(n)) {
1057 ir_entity *ent = get_SymConst_entity(n);
1058 set_entity_usage(ent, ir_usage_unknown);
1061 case IR_INITIALIZER_TARVAL:
1062 case IR_INITIALIZER_NULL:
1064 case IR_INITIALIZER_COMPOUND:
1065 for (i = 0; i < initializer->compound.n_initializers; ++i) {
1066 ir_initializer_t *sub_initializer
1067 = initializer->compound.initializers[i];
1068 check_initializer_nodes(sub_initializer);
1072 panic("invalid initializer found");
1076 * Mark all entities used in the initializer for the given entity as unknown
1079 * @param ent the entity
1081 static void check_initializer(ir_entity *ent)
1083 /* Beware: Methods are always initialized with "themself". This does not
1084 * count as a taken address.
1085 * TODO: this initialisation with "themself" is wrong and should be removed
1087 if (is_Method_type(get_entity_type(ent)))
1090 if (ent->initializer != NULL) {
1091 check_initializer_nodes(ent->initializer);
1097 * Mark all entities used in initializers as unknown usage.
1099 * @param tp a compound type
1101 static void check_initializers(ir_type *tp)
1105 for (i = 0, n = get_compound_n_members(tp); i < n; ++i) {
1106 ir_entity *ent = get_compound_member(tp, i);
1108 check_initializer(ent);
1112 #ifdef DEBUG_libfirm
1114 * Print the entity usage flags of all entities of a given type for debugging.
1116 * @param tp a compound type
1118 static void print_entity_usage_flags(const ir_type *tp)
1121 for (i = 0, n = get_compound_n_members(tp); i < n; ++i) {
1122 ir_entity *ent = get_compound_member(tp, i);
1123 ir_entity_usage flags = get_entity_usage(ent);
1127 ir_printf("%+F:", ent);
1128 if (flags & ir_usage_address_taken)
1129 printf(" address_taken");
1130 if (flags & ir_usage_read)
1132 if (flags & ir_usage_write)
1134 if (flags & ir_usage_reinterpret_cast)
1135 printf(" reinterp_cast");
1139 #endif /* DEBUG_libfirm */
1142 * Post-walker: check for global entity address
1144 static void check_global_address(ir_node *irn, void *data)
1150 if (is_SymConst_addr_ent(irn)) {
1152 ent = get_SymConst_entity(irn);
1156 flags = get_entity_usage(ent);
1157 flags |= determine_entity_usage(irn, ent);
1158 set_entity_usage(ent, (ir_entity_usage) flags);
1162 * Update the entity usage flags of all global entities.
1164 static void analyse_irp_globals_entity_usage(void)
1169 for (s = IR_SEGMENT_FIRST; s <= IR_SEGMENT_LAST; ++s) {
1170 ir_type *type = get_segment_type(s);
1171 init_entity_usage(type);
1174 for (s = IR_SEGMENT_FIRST; s <= IR_SEGMENT_LAST; ++s) {
1175 ir_type *type = get_segment_type(s);
1176 check_initializers(type);
1179 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
1180 ir_graph *irg = get_irp_irg(i);
1182 assure_irg_outs(irg);
1183 irg_walk_graph(irg, NULL, check_global_address, NULL);
1186 #ifdef DEBUG_libfirm
1187 if (firm_dbg_get_mask(dbg) & LEVEL_1) {
1188 for (s = IR_SEGMENT_FIRST; s <= IR_SEGMENT_LAST; ++s) {
1189 print_entity_usage_flags(get_segment_type(s));
1192 #endif /* DEBUG_libfirm */
1195 irp->globals_entity_usage_state = ir_entity_usage_computed;
1198 ir_entity_usage_computed_state get_irp_globals_entity_usage_state(void)
1200 return irp->globals_entity_usage_state;
1203 void set_irp_globals_entity_usage_state(ir_entity_usage_computed_state state)
1205 irp->globals_entity_usage_state = state;
1208 void assure_irp_globals_entity_usage_computed(void)
1210 if (irp->globals_entity_usage_state != ir_entity_usage_not_computed)
1213 analyse_irp_globals_entity_usage();
1216 void firm_init_memory_disambiguator(void)
1218 FIRM_DBG_REGISTER(dbg, "firm.ana.irmemory");
1219 FIRM_DBG_REGISTER(dbgcall, "firm.opt.cc");
1223 /** Maps method types to cloned method types. */
1224 static pmap *mtp_map;
1227 * Clone a method type if not already cloned.
1229 * @param tp the type to clone
1231 static ir_type *clone_type_and_cache(ir_type *tp)
1233 ir_type *res = pmap_get(ir_type, mtp_map, tp);
1236 res = clone_type_method(tp);
1237 pmap_insert(mtp_map, tp, res);
1244 * Walker: clone all call types of Calls to methods having the
1245 * mtp_property_private property set.
1247 static void update_calls_to_private(ir_node *call, void *env)
1250 if (is_Call(call)) {
1251 ir_node *ptr = get_Call_ptr(call);
1253 if (is_SymConst(ptr)) {
1254 ir_entity *ent = get_SymConst_entity(ptr);
1255 ir_type *ctp = get_Call_type(call);
1257 if (get_entity_additional_properties(ent) & mtp_property_private) {
1258 if ((get_method_additional_properties(ctp) & mtp_property_private) == 0) {
1259 ctp = clone_type_and_cache(ctp);
1260 add_method_additional_properties(ctp, mtp_property_private);
1261 set_Call_type(call, ctp);
1262 DB((dbgcall, LEVEL_1, "changed call to private method %+F using cloned type %+F\n", ent, ctp));
1269 void mark_private_methods(void)
1274 assure_irp_globals_entity_usage_computed();
1276 mtp_map = pmap_create();
1278 /* first step: change the calling conventions of the local non-escaped entities */
1279 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
1280 ir_graph *irg = get_irp_irg(i);
1281 ir_entity *ent = get_irg_entity(irg);
1282 ir_entity_usage flags = get_entity_usage(ent);
1284 if (!(flags & ir_usage_address_taken) && !entity_is_externally_visible(ent)) {
1285 ir_type *mtp = get_entity_type(ent);
1287 add_entity_additional_properties(ent, mtp_property_private);
1288 DB((dbgcall, LEVEL_1, "found private method %+F\n", ent));
1289 if ((get_method_additional_properties(mtp) & mtp_property_private) == 0) {
1290 /* need a new type */
1291 mtp = clone_type_and_cache(mtp);
1292 add_method_additional_properties(mtp, mtp_property_private);
1293 set_entity_type(ent, mtp);
1294 DB((dbgcall, LEVEL_2, "changed entity type of %+F to %+F\n", ent, mtp));
1301 all_irg_walk(NULL, update_calls_to_private, NULL);
1303 pmap_destroy(mtp_map);
1306 ir_prog_pass_t *mark_private_methods_pass(const char *name)
1308 return def_prog_pass(name ? name : "mark_private_methods", mark_private_methods);