21 #include "pthread_impl.h"
25 static char errbuf[128];
29 #if ULONG_MAX == 0xffffffff
30 typedef Elf32_Ehdr Ehdr;
31 typedef Elf32_Phdr Phdr;
32 typedef Elf32_Sym Sym;
33 #define R_TYPE(x) ((x)&255)
34 #define R_SYM(x) ((x)>>8)
36 typedef Elf64_Ehdr Ehdr;
37 typedef Elf64_Phdr Phdr;
38 typedef Elf64_Sym Sym;
39 #define R_TYPE(x) ((x)&0xffffffff)
40 #define R_SYM(x) ((x)>>32)
43 #define MAXP2(a,b) (-(-(a)&-(b)))
44 #define ALIGN(x,y) ((x)+(y)-1 & -(y))
58 struct dso *next, *prev;
75 struct dso **deps, *needed_by;
78 size_t tls_len, tls_size, tls_align, tls_id, tls_offset;
80 unsigned char *new_tls;
81 int new_dtv_idx, new_tls_idx;
82 struct dso *fini_next;
94 void __init_ssp(size_t *);
95 void *__install_initial_tls(void *);
96 void __init_libc(char **, char *);
98 static struct dso *head, *tail, *ldso, *fini_head;
99 static char *env_path, *sys_path;
100 static unsigned long long gencnt;
104 static int ldso_fail;
106 static jmp_buf *rtld_fail;
107 static pthread_rwlock_t lock;
108 static struct debug debug;
109 static size_t tls_cnt, tls_offset, tls_align = 4*sizeof(size_t);
110 static pthread_mutex_t init_fini_lock = { ._m_type = PTHREAD_MUTEX_RECURSIVE };
112 struct debug *_dl_debug_addr = &debug;
117 static void decode_vec(size_t *v, size_t *a, size_t cnt)
119 memset(a, 0, cnt*sizeof(size_t));
120 for (; v[0]; v+=2) if (v[0]<cnt) {
126 static int search_vec(size_t *v, size_t *r, size_t key)
128 for (; v[0]!=key; v+=2)
134 static uint32_t sysv_hash(const char *s0)
136 const unsigned char *s = (void *)s0;
142 return h & 0xfffffff;
145 static uint32_t gnu_hash(const char *s0)
147 const unsigned char *s = (void *)s0;
148 uint_fast32_t h = 5381;
154 static Sym *sysv_lookup(const char *s, uint32_t h, struct dso *dso)
157 Sym *syms = dso->syms;
158 uint32_t *hashtab = dso->hashtab;
159 char *strings = dso->strings;
160 for (i=hashtab[2+h%hashtab[0]]; i; i=hashtab[2+hashtab[0]+i]) {
161 if ((!dso->versym || dso->versym[i] >= 0)
162 && (!strcmp(s, strings+syms[i].st_name)))
168 static Sym *gnu_lookup(const char *s, uint32_t h1, struct dso *dso)
170 Sym *syms = dso->syms;
171 char *strings = dso->strings;
172 uint32_t *hashtab = dso->ghashtab;
173 uint32_t nbuckets = hashtab[0];
174 uint32_t *buckets = hashtab + 4 + hashtab[2]*(sizeof(size_t)/4);
177 uint32_t i = buckets[h1 % nbuckets];
181 hashval = buckets + nbuckets + (i - hashtab[1]);
183 for (h1 |= 1; ; i++) {
185 if ((!dso->versym || dso->versym[i] >= 0)
186 && (h1 == (h2|1)) && !strcmp(s, strings + syms[i].st_name))
194 #define OK_TYPES (1<<STT_NOTYPE | 1<<STT_OBJECT | 1<<STT_FUNC | 1<<STT_COMMON | 1<<STT_TLS)
195 #define OK_BINDS (1<<STB_GLOBAL | 1<<STB_WEAK | 1<<STB_GNU_UNIQUE)
197 static struct symdef find_sym(struct dso *dso, const char *s, int need_def)
199 uint32_t h = 0, gh = 0;
200 struct symdef def = {0};
203 if (gh == 0x1f4039c9 && !strcmp(s, "__stack_chk_fail")) ssp_used = 1;
206 if (h == 0x595a4cc && !strcmp(s, "__stack_chk_fail")) ssp_used = 1;
208 for (; dso; dso=dso->next) {
210 if (!dso->global) continue;
212 if (!gh) gh = gnu_hash(s);
213 sym = gnu_lookup(s, gh, dso);
215 if (!h) h = sysv_hash(s);
216 sym = sysv_lookup(s, h, dso);
220 if (need_def || (sym->st_info&0xf) == STT_TLS)
223 if ((sym->st_info&0xf) != STT_TLS)
225 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
226 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
228 if (def.sym && sym->st_info>>4 == STB_WEAK) continue;
231 if (sym->st_info>>4 == STB_GLOBAL) break;
236 static void do_relocs(struct dso *dso, size_t *rel, size_t rel_size, size_t stride)
238 unsigned char *base = dso->base;
239 Sym *syms = dso->syms;
240 char *strings = dso->strings;
248 for (; rel_size; rel+=stride, rel_size-=stride*sizeof(size_t)) {
249 type = R_TYPE(rel[1]);
250 sym_index = R_SYM(rel[1]);
252 sym = syms + sym_index;
253 name = strings + sym->st_name;
254 ctx = IS_COPY(type) ? head->next : head;
255 def = find_sym(ctx, name, IS_PLT(type));
256 if (!def.sym && sym->st_info>>4 != STB_WEAK) {
257 snprintf(errbuf, sizeof errbuf,
258 "Error relocating %s: %s: symbol not found",
260 if (runtime) longjmp(*rtld_fail, 1);
261 dprintf(2, "%s\n", errbuf);
270 do_single_reloc(dso, base, (void *)(base + rel[0]), type,
271 stride>2 ? rel[2] : 0, sym, sym?sym->st_size:0, def,
272 def.sym?(size_t)(def.dso->base+def.sym->st_value):0);
276 /* A huge hack: to make up for the wastefulness of shared libraries
277 * needing at least a page of dirty memory even if they have no global
278 * data, we reclaim the gaps at the beginning and end of writable maps
279 * and "donate" them to the heap by setting up minimal malloc
280 * structures and then freeing them. */
282 static void reclaim(unsigned char *base, size_t start, size_t end)
285 start = start + 6*sizeof(size_t)-1 & -4*sizeof(size_t);
286 end = (end & -4*sizeof(size_t)) - 2*sizeof(size_t);
287 if (start>end || end-start < 4*sizeof(size_t)) return;
288 a = (size_t *)(base + start);
289 z = (size_t *)(base + end);
291 a[-1] = z[0] = end-start + 2*sizeof(size_t) | 1;
296 static void reclaim_gaps(unsigned char *base, Phdr *ph, size_t phent, size_t phcnt)
298 for (; phcnt--; ph=(void *)((char *)ph+phent)) {
299 if (ph->p_type!=PT_LOAD) continue;
300 if ((ph->p_flags&(PF_R|PF_W))!=(PF_R|PF_W)) continue;
301 reclaim(base, ph->p_vaddr & -PAGE_SIZE, ph->p_vaddr);
302 reclaim(base, ph->p_vaddr+ph->p_memsz,
303 ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE);
307 static void *map_library(int fd, struct dso *dso)
309 Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)];
310 void *allocated_buf=0;
312 size_t addr_min=SIZE_MAX, addr_max=0, map_len;
313 size_t this_min, this_max;
318 unsigned char *map=MAP_FAILED, *base;
323 ssize_t l = read(fd, buf, sizeof buf);
326 if (l<sizeof *eh || (eh->e_type != ET_DYN && eh->e_type != ET_EXEC))
328 phsize = eh->e_phentsize * eh->e_phnum;
329 if (phsize > sizeof buf - sizeof *eh) {
330 allocated_buf = malloc(phsize);
331 if (!allocated_buf) return 0;
332 l = pread(fd, allocated_buf, phsize, eh->e_phoff);
333 if (l < 0) goto error;
334 if (l != phsize) goto noexec;
335 ph = ph0 = allocated_buf;
336 } else if (eh->e_phoff + phsize > l) {
337 l = pread(fd, buf+1, phsize, eh->e_phoff);
338 if (l < 0) goto error;
339 if (l != phsize) goto noexec;
340 ph = ph0 = (void *)(buf + 1);
342 ph = ph0 = (void *)((char *)buf + eh->e_phoff);
344 for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
345 if (ph->p_type == PT_DYNAMIC)
347 if (ph->p_type == PT_TLS) {
348 tls_image = ph->p_vaddr;
349 dso->tls_align = ph->p_align;
350 dso->tls_len = ph->p_filesz;
351 dso->tls_size = ph->p_memsz;
353 if (ph->p_type != PT_LOAD) continue;
354 if (ph->p_vaddr < addr_min) {
355 addr_min = ph->p_vaddr;
356 off_start = ph->p_offset;
357 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
358 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
359 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
361 if (ph->p_vaddr+ph->p_memsz > addr_max) {
362 addr_max = ph->p_vaddr+ph->p_memsz;
365 if (!dyn) goto noexec;
366 addr_max += PAGE_SIZE-1;
367 addr_max &= -PAGE_SIZE;
368 addr_min &= -PAGE_SIZE;
369 off_start &= -PAGE_SIZE;
370 map_len = addr_max - addr_min + off_start;
371 /* The first time, we map too much, possibly even more than
372 * the length of the file. This is okay because we will not
373 * use the invalid part; we just need to reserve the right
374 * amount of virtual address space to map over later. */
375 map = mmap((void *)addr_min, map_len, prot, MAP_PRIVATE, fd, off_start);
376 if (map==MAP_FAILED) goto error;
377 /* If the loaded file is not relocatable and the requested address is
378 * not available, then the load operation must fail. */
379 if (eh->e_type != ET_DYN && addr_min && map!=(void *)addr_min) {
383 base = map - addr_min;
386 for (ph=ph0, i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
387 if (ph->p_type != PT_LOAD) continue;
388 /* Check if the programs headers are in this load segment, and
389 * if so, record the address for use by dl_iterate_phdr. */
390 if (!dso->phdr && eh->e_phoff >= ph->p_offset
391 && eh->e_phoff+phsize <= ph->p_offset+ph->p_filesz) {
392 dso->phdr = (void *)(base + ph->p_vaddr
393 + (eh->e_phoff-ph->p_offset));
394 dso->phnum = eh->e_phnum;
396 /* Reuse the existing mapping for the lowest-address LOAD */
397 if ((ph->p_vaddr & -PAGE_SIZE) == addr_min) continue;
398 this_min = ph->p_vaddr & -PAGE_SIZE;
399 this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE;
400 off_start = ph->p_offset & -PAGE_SIZE;
401 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
402 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
403 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
404 if (mmap(base+this_min, this_max-this_min, prot, MAP_PRIVATE|MAP_FIXED, fd, off_start) == MAP_FAILED)
406 if (ph->p_memsz > ph->p_filesz) {
407 size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz;
408 size_t pgbrk = brk+PAGE_SIZE-1 & -PAGE_SIZE;
409 memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE-1);
410 if (pgbrk-(size_t)base < this_max && mmap((void *)pgbrk, (size_t)base+this_max-pgbrk, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
414 for (i=0; ((size_t *)(base+dyn))[i]; i+=2)
415 if (((size_t *)(base+dyn))[i]==DT_TEXTREL) {
416 if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC) < 0)
420 if (!runtime) reclaim_gaps(base, ph0, eh->e_phentsize, eh->e_phnum);
422 dso->map_len = map_len;
424 dso->dynv = (void *)(base+dyn);
425 if (dso->tls_size) dso->tls_image = (void *)(base+tls_image);
431 if (map!=MAP_FAILED) munmap(map, map_len);
436 static int path_open(const char *name, const char *s, char *buf, size_t buf_size)
441 s += strspn(s, ":\n");
442 l = strcspn(s, ":\n");
443 if (l-1 >= INT_MAX) return -1;
444 if (snprintf(buf, buf_size, "%.*s/%s", (int)l, s, name) >= buf_size)
446 if ((fd = open(buf, O_RDONLY|O_CLOEXEC))>=0) return fd;
451 static void decode_dyn(struct dso *p)
453 size_t dyn[DYN_CNT] = {0};
454 decode_vec(p->dynv, dyn, DYN_CNT);
455 p->syms = (void *)(p->base + dyn[DT_SYMTAB]);
456 p->strings = (void *)(p->base + dyn[DT_STRTAB]);
457 if (dyn[0]&(1<<DT_HASH))
458 p->hashtab = (void *)(p->base + dyn[DT_HASH]);
459 if (dyn[0]&(1<<DT_RPATH))
460 p->rpath = (void *)(p->strings + dyn[DT_RPATH]);
461 if (search_vec(p->dynv, dyn, DT_GNU_HASH))
462 p->ghashtab = (void *)(p->base + *dyn);
463 if (search_vec(p->dynv, dyn, DT_VERSYM))
464 p->versym = (void *)(p->base + *dyn);
467 static struct dso *load_library(const char *name, struct dso *needed_by)
469 char buf[2*NAME_MAX+2];
470 const char *pathname;
472 struct dso *p, temp_dso = {0};
479 /* Catch and block attempts to reload the implementation itself */
480 if (name[0]=='l' && name[1]=='i' && name[2]=='b') {
481 static const char *rp, reserved[] =
482 "c\0pthread\0rt\0m\0dl\0util\0xnet\0";
483 char *z = strchr(name, '.');
486 for (rp=reserved; *rp && strncmp(name+3, rp, l-3); rp+=strlen(rp)+1);
489 /* Track which names have been resolved
490 * and only report each one once. */
491 static unsigned reported;
492 unsigned mask = 1U<<(rp-reserved);
493 if (!(reported & mask)) {
495 dprintf(1, "\t%s => %s (%p)\n",
504 if (!strcmp(name, ldso->name)) is_self = 1;
509 tail = ldso->next ? ldso->next : ldso;
513 if (strchr(name, '/')) {
515 fd = open(name, O_RDONLY|O_CLOEXEC);
517 /* Search for the name to see if it's already loaded */
518 for (p=head->next; p; p=p->next) {
519 if (p->shortname && !strcmp(p->shortname, name)) {
524 if (strlen(name) > NAME_MAX) return 0;
526 if (env_path) fd = path_open(name, env_path, buf, sizeof buf);
527 for (p=needed_by; fd < 0 && p; p=p->needed_by)
529 fd = path_open(name, p->rpath, buf, sizeof buf);
534 if (ldso->name[0]=='/') {
536 for (s=t=z=ldso->name; *s; s++)
537 if (*s=='/') z=t, t=s;
538 prefix_len = z-ldso->name;
539 if (prefix_len < PATH_MAX)
546 char etc_ldso_path[prefix_len + 1
547 + sizeof "/etc/ld-musl-" LDSO_ARCH ".path"];
548 snprintf(etc_ldso_path, sizeof etc_ldso_path,
549 "%.*s/etc/ld-musl-" LDSO_ARCH ".path",
550 (int)prefix_len, prefix);
551 FILE *f = fopen(etc_ldso_path, "rbe");
553 if (getdelim(&sys_path, (size_t[1]){0}, 0, f) <= 0) {
560 if (!sys_path) sys_path = "/lib:/usr/local/lib:/usr/lib";
561 fd = path_open(name, sys_path, buf, sizeof buf);
565 if (fd < 0) return 0;
566 if (fstat(fd, &st) < 0) {
570 for (p=head->next; p; p=p->next) {
571 if (p->dev == st.st_dev && p->ino == st.st_ino) {
572 /* If this library was previously loaded with a
573 * pathname but a search found the same inode,
574 * setup its shortname so it can be found by name. */
575 if (!p->shortname && pathname != name)
576 p->shortname = strrchr(p->name, '/')+1;
582 map = noload ? 0 : map_library(fd, &temp_dso);
586 /* Allocate storage for the new DSO. When there is TLS, this
587 * storage must include a reservation for all pre-existing
588 * threads to obtain copies of both the new TLS, and an
589 * extended DTV capable of storing an additional slot for
590 * the newly-loaded DSO. */
591 alloc_size = sizeof *p + strlen(pathname) + 1;
592 if (runtime && temp_dso.tls_image) {
593 size_t per_th = temp_dso.tls_size + temp_dso.tls_align
594 + sizeof(void *) * (tls_cnt+3);
595 n_th = libc.threads_minus_1 + 1;
596 if (n_th > SSIZE_MAX / per_th) alloc_size = SIZE_MAX;
597 else alloc_size += n_th * per_th;
599 p = calloc(1, alloc_size);
601 munmap(map, temp_dso.map_len);
604 memcpy(p, &temp_dso, sizeof temp_dso);
609 p->needed_by = needed_by;
611 strcpy(p->name, pathname);
612 /* Add a shortname only if name arg was not an explicit pathname. */
613 if (pathname != name) p->shortname = strrchr(p->name, '/')+1;
615 if (runtime && !__pthread_self_init()) {
616 munmap(map, p->map_len);
620 p->tls_id = ++tls_cnt;
621 tls_align = MAXP2(tls_align, p->tls_align);
623 p->tls_offset = tls_offset + ( (tls_align-1) &
624 -(tls_offset + (uintptr_t)p->tls_image) );
625 tls_offset += p->tls_size;
627 tls_offset += p->tls_size + p->tls_align - 1;
628 tls_offset -= (tls_offset + (uintptr_t)p->tls_image)
630 p->tls_offset = tls_offset;
632 p->new_dtv = (void *)(-sizeof(size_t) &
633 (uintptr_t)(p->name+strlen(p->name)+sizeof(size_t)));
634 p->new_tls = (void *)(p->new_dtv + n_th*(tls_cnt+1));
641 if (ldd_mode) dprintf(1, "\t%s => %s (%p)\n", name, pathname, p->base);
646 static void load_deps(struct dso *p)
649 struct dso ***deps = &p->deps, **tmp, *dep;
650 for (; p; p=p->next) {
651 for (i=0; p->dynv[i]; i+=2) {
652 if (p->dynv[i] != DT_NEEDED) continue;
653 dep = load_library(p->strings + p->dynv[i+1], p);
655 snprintf(errbuf, sizeof errbuf,
656 "Error loading shared library %s: %m (needed by %s)",
657 p->strings + p->dynv[i+1], p->name);
658 if (runtime) longjmp(*rtld_fail, 1);
659 dprintf(2, "%s\n", errbuf);
664 tmp = realloc(*deps, sizeof(*tmp)*(ndeps+2));
665 if (!tmp) longjmp(*rtld_fail, 1);
674 static void load_preload(char *s)
679 for ( ; *s && isspace(*s); s++);
680 for (z=s; *z && !isspace(*z); z++);
688 static void make_global(struct dso *p)
690 for (; p; p=p->next) p->global = 1;
693 static void reloc_all(struct dso *p)
695 size_t dyn[DYN_CNT] = {0};
696 for (; p; p=p->next) {
697 if (p->relocated) continue;
698 decode_vec(p->dynv, dyn, DYN_CNT);
699 #ifdef NEED_ARCH_RELOCS
700 do_arch_relocs(p, head);
702 do_relocs(p, (void *)(p->base+dyn[DT_JMPREL]), dyn[DT_PLTRELSZ],
703 2+(dyn[DT_PLTREL]==DT_RELA));
704 do_relocs(p, (void *)(p->base+dyn[DT_REL]), dyn[DT_RELSZ], 2);
705 do_relocs(p, (void *)(p->base+dyn[DT_RELA]), dyn[DT_RELASZ], 3);
710 static size_t find_dyn(Phdr *ph, size_t cnt, size_t stride)
712 for (; cnt--; ph = (void *)((char *)ph + stride))
713 if (ph->p_type == PT_DYNAMIC)
718 static void find_map_range(Phdr *ph, size_t cnt, size_t stride, struct dso *p)
720 size_t min_addr = -1, max_addr = 0;
721 for (; cnt--; ph = (void *)((char *)ph + stride)) {
722 if (ph->p_type != PT_LOAD) continue;
723 if (ph->p_vaddr < min_addr)
724 min_addr = ph->p_vaddr;
725 if (ph->p_vaddr+ph->p_memsz > max_addr)
726 max_addr = ph->p_vaddr+ph->p_memsz;
728 min_addr &= -PAGE_SIZE;
729 max_addr = (max_addr + PAGE_SIZE-1) & -PAGE_SIZE;
730 p->map = p->base + min_addr;
731 p->map_len = max_addr - min_addr;
734 static void do_fini()
737 size_t dyn[DYN_CNT] = {0};
738 for (p=fini_head; p; p=p->fini_next) {
739 if (!p->constructed) continue;
740 decode_vec(p->dynv, dyn, DYN_CNT);
741 if (dyn[0] & (1<<DT_FINI_ARRAY)) {
742 size_t n = dyn[DT_FINI_ARRAYSZ]/sizeof(size_t);
743 size_t *fn = (size_t *)(p->base + dyn[DT_FINI_ARRAY])+n;
744 while (n--) ((void (*)(void))*--fn)();
746 #ifndef NO_LEGACY_INITFINI
747 if ((dyn[0] & (1<<DT_FINI)) && dyn[DT_FINI])
748 ((void (*)(void))(p->base + dyn[DT_FINI]))();
753 static void do_init_fini(struct dso *p)
755 size_t dyn[DYN_CNT] = {0};
756 int need_locking = libc.threads_minus_1;
757 /* Allow recursive calls that arise when a library calls
758 * dlopen from one of its constructors, but block any
759 * other threads until all ctors have finished. */
760 if (need_locking) pthread_mutex_lock(&init_fini_lock);
761 for (; p; p=p->prev) {
762 if (p->constructed) continue;
764 decode_vec(p->dynv, dyn, DYN_CNT);
765 if (dyn[0] & ((1<<DT_FINI) | (1<<DT_FINI_ARRAY))) {
766 p->fini_next = fini_head;
769 #ifndef NO_LEGACY_INITFINI
770 if ((dyn[0] & (1<<DT_INIT)) && dyn[DT_INIT])
771 ((void (*)(void))(p->base + dyn[DT_INIT]))();
773 if (dyn[0] & (1<<DT_INIT_ARRAY)) {
774 size_t n = dyn[DT_INIT_ARRAYSZ]/sizeof(size_t);
775 size_t *fn = (void *)(p->base + dyn[DT_INIT_ARRAY]);
776 while (n--) ((void (*)(void))*fn++)();
778 if (!need_locking && libc.threads_minus_1) {
780 pthread_mutex_lock(&init_fini_lock);
783 if (need_locking) pthread_mutex_unlock(&init_fini_lock);
786 void _dl_debug_state(void)
792 pthread_t self = __pthread_self();
794 for (p=head; p; p=p->next) {
795 if (!p->tls_id || !self->dtv[p->tls_id]) continue;
796 memcpy(self->dtv[p->tls_id], p->tls_image, p->tls_len);
797 memset((char *)self->dtv[p->tls_id]+p->tls_len, 0,
798 p->tls_size - p->tls_len);
799 if (p->tls_id == (size_t)self->dtv[0]) break;
803 void *__copy_tls(unsigned char *mem)
808 if (!tls_cnt) return mem;
810 void **dtv = (void *)mem;
811 dtv[0] = (void *)tls_cnt;
814 mem += sizeof(void *) * (tls_cnt+1);
815 mem += -((uintptr_t)mem + sizeof(struct pthread)) & (tls_align-1);
817 mem += sizeof(struct pthread);
819 for (p=head; p; p=p->next) {
820 if (!p->tls_id) continue;
821 dtv[p->tls_id] = mem + p->tls_offset;
822 memcpy(dtv[p->tls_id], p->tls_image, p->tls_len);
825 mem += libc.tls_size - sizeof(struct pthread);
826 mem -= (uintptr_t)mem & (tls_align-1);
829 for (p=head; p; p=p->next) {
830 if (!p->tls_id) continue;
831 dtv[p->tls_id] = mem - p->tls_offset;
832 memcpy(dtv[p->tls_id], p->tls_image, p->tls_len);
839 void *__tls_get_addr(size_t *v)
841 pthread_t self = __pthread_self();
842 if (v[0]<=(size_t)self->dtv[0] && self->dtv[v[0]])
843 return (char *)self->dtv[v[0]]+v[1];
845 /* Block signals to make accessing new TLS async-signal-safe */
847 pthread_sigmask(SIG_BLOCK, SIGALL_SET, &set);
848 if (v[0]<=(size_t)self->dtv[0] && self->dtv[v[0]]) {
849 pthread_sigmask(SIG_SETMASK, &set, 0);
850 return (char *)self->dtv[v[0]]+v[1];
853 /* This is safe without any locks held because, if the caller
854 * is able to request the Nth entry of the DTV, the DSO list
855 * must be valid at least that far out and it was synchronized
856 * at program startup or by an already-completed call to dlopen. */
858 for (p=head; p->tls_id != v[0]; p=p->next);
860 /* Get new DTV space from new DSO if needed */
861 if (v[0] > (size_t)self->dtv[0]) {
862 void **newdtv = p->new_dtv +
863 (v[0]+1)*sizeof(void *)*a_fetch_add(&p->new_dtv_idx,1);
864 memcpy(newdtv, self->dtv,
865 ((size_t)self->dtv[0]+1) * sizeof(void *));
866 newdtv[0] = (void *)v[0];
870 /* Get new TLS memory from new DSO */
871 unsigned char *mem = p->new_tls +
872 (p->tls_size + p->tls_align) * a_fetch_add(&p->new_tls_idx,1);
873 mem += ((uintptr_t)p->tls_image - (uintptr_t)mem) & (p->tls_align-1);
874 self->dtv[v[0]] = mem;
875 memcpy(mem, p->tls_image, p->tls_len);
876 pthread_sigmask(SIG_SETMASK, &set, 0);
880 static void update_tls_size()
882 libc.tls_size = ALIGN(
883 (1+tls_cnt) * sizeof(void *) +
885 sizeof(struct pthread) +
890 void *__dynlink(int argc, char **argv)
892 size_t aux[AUX_CNT] = {0};
896 static struct dso builtin_dsos[3];
897 struct dso *const app = builtin_dsos+0;
898 struct dso *const lib = builtin_dsos+1;
899 struct dso *const vdso = builtin_dsos+2;
903 char **envp = argv+argc+1;
905 /* Find aux vector just past environ[] */
906 for (i=argc+1; argv[i]; i++)
907 if (!memcmp(argv[i], "LD_LIBRARY_PATH=", 16))
908 env_path = argv[i]+16;
909 else if (!memcmp(argv[i], "LD_PRELOAD=", 11))
910 env_preload = argv[i]+11;
911 auxv = (void *)(argv+i+1);
913 decode_vec(auxv, aux, AUX_CNT);
915 /* Only trust user/env if kernel says we're not suid/sgid */
916 if ((aux[0]&0x7800)!=0x7800 || aux[AT_UID]!=aux[AT_EUID]
917 || aux[AT_GID]!=aux[AT_EGID] || aux[AT_SECURE]) {
922 /* If the dynamic linker was invoked as a program itself, AT_BASE
923 * will not be set. In that case, we assume the base address is
924 * the start of the page containing the PHDRs; I don't know any
925 * better approach... */
927 aux[AT_BASE] = aux[AT_PHDR] & -PAGE_SIZE;
928 aux[AT_PHDR] = aux[AT_PHENT] = aux[AT_PHNUM] = 0;
931 /* The dynamic linker load address is passed by the kernel
932 * in the AUX vector, so this is easy. */
933 lib->base = (void *)aux[AT_BASE];
934 lib->name = lib->shortname = "libc.so";
936 ehdr = (void *)lib->base;
937 lib->phnum = ehdr->e_phnum;
938 lib->phdr = (void *)(aux[AT_BASE]+ehdr->e_phoff);
939 find_map_range(lib->phdr, ehdr->e_phnum, ehdr->e_phentsize, lib);
940 lib->dynv = (void *)(lib->base + find_dyn(lib->phdr,
941 ehdr->e_phnum, ehdr->e_phentsize));
945 size_t interp_off = 0;
946 size_t tls_image = 0;
947 /* Find load address of the main program, via AT_PHDR vs PT_PHDR. */
948 app->phdr = phdr = (void *)aux[AT_PHDR];
949 app->phnum = aux[AT_PHNUM];
950 for (i=aux[AT_PHNUM]; i; i--, phdr=(void *)((char *)phdr + aux[AT_PHENT])) {
951 if (phdr->p_type == PT_PHDR)
952 app->base = (void *)(aux[AT_PHDR] - phdr->p_vaddr);
953 else if (phdr->p_type == PT_INTERP)
954 interp_off = (size_t)phdr->p_vaddr;
955 else if (phdr->p_type == PT_TLS) {
956 tls_image = phdr->p_vaddr;
957 app->tls_len = phdr->p_filesz;
958 app->tls_size = phdr->p_memsz;
959 app->tls_align = phdr->p_align;
962 if (app->tls_size) app->tls_image = (char *)app->base + tls_image;
963 if (interp_off) lib->name = (char *)app->base + interp_off;
965 app->dynv = (void *)(app->base + find_dyn(
966 (void *)aux[AT_PHDR], aux[AT_PHNUM], aux[AT_PHENT]));
967 find_map_range((void *)aux[AT_PHDR],
968 aux[AT_PHNUM], aux[AT_PHENT], app);
971 char *ldname = argv[0];
972 size_t l = strlen(ldname);
973 if (l >= 3 && !strcmp(ldname+l-3, "ldd")) ldd_mode = 1;
974 *argv++ = (void *)-1;
975 if (argv[0] && !strcmp(argv[0], "--")) *argv++ = (void *)-1;
977 dprintf(2, "musl libc/dynamic program loader\n");
978 dprintf(2, "usage: %s pathname%s\n", ldname,
979 ldd_mode ? "" : " [args]");
982 fd = open(argv[0], O_RDONLY);
984 dprintf(2, "%s: cannot load %s: %s\n", ldname, argv[0], strerror(errno));
988 ehdr = (void *)map_library(fd, app);
990 dprintf(2, "%s: %s: Not a valid dynamic program\n", ldname, argv[0]);
997 aux[AT_ENTRY] = (size_t)app->base + ehdr->e_entry;
998 /* Find the name that would have been used for the dynamic
999 * linker had ldd not taken its place. */
1001 for (i=0; i<app->phnum; i++) {
1002 if (app->phdr[i].p_type == PT_INTERP)
1003 lib->name = (void *)(app->base
1004 + app->phdr[i].p_vaddr);
1006 dprintf(1, "\t%s (%p)\n", lib->name, lib->base);
1009 if (app->tls_size) {
1010 app->tls_id = tls_cnt = 1;
1012 app->tls_offset = 0;
1013 tls_offset = app->tls_size
1014 + ( -((uintptr_t)app->tls_image + app->tls_size)
1015 & (app->tls_align-1) );
1017 tls_offset = app->tls_offset = app->tls_size
1018 + ( -((uintptr_t)app->tls_image + app->tls_size)
1019 & (app->tls_align-1) );
1021 tls_align = MAXP2(tls_align, app->tls_align);
1026 /* Attach to vdso, if provided by the kernel */
1027 if (search_vec(auxv, &vdso_base, AT_SYSINFO_EHDR)) {
1028 ehdr = (void *)vdso_base;
1029 vdso->phdr = phdr = (void *)(vdso_base + ehdr->e_phoff);
1030 vdso->phnum = ehdr->e_phnum;
1031 for (i=ehdr->e_phnum; i; i--, phdr=(void *)((char *)phdr + ehdr->e_phentsize)) {
1032 if (phdr->p_type == PT_DYNAMIC)
1033 vdso->dynv = (void *)(vdso_base + phdr->p_offset);
1034 if (phdr->p_type == PT_LOAD)
1035 vdso->base = (void *)(vdso_base - phdr->p_vaddr + phdr->p_offset);
1038 vdso->shortname = "linux-gate.so.1";
1045 /* Initial dso chain consists only of the app. We temporarily
1046 * append the dynamic linker/libc so we can relocate it, then
1047 * restore the initial chain in preparation for loading third
1048 * party libraries (preload/needed). */
1055 /* PAST THIS POINT, ALL LIBC INTERFACES ARE FULLY USABLE. */
1057 /* Donate unused parts of app and library mapping to malloc */
1058 reclaim_gaps(app->base, (void *)aux[AT_PHDR], aux[AT_PHENT], aux[AT_PHNUM]);
1059 ehdr = (void *)lib->base;
1060 reclaim_gaps(lib->base, (void *)(lib->base+ehdr->e_phoff),
1061 ehdr->e_phentsize, ehdr->e_phnum);
1063 /* Load preload/needed libraries, add their symbols to the global
1064 * namespace, and perform all remaining relocations. The main
1065 * program must be relocated LAST since it may contain copy
1066 * relocations which depend on libraries' relocations. */
1067 if (env_preload) load_preload(env_preload);
1071 reloc_all(app->next);
1076 void *mem = mmap(0, libc.tls_size, PROT_READ|PROT_WRITE,
1077 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
1078 if (mem==MAP_FAILED ||
1079 !__install_initial_tls(__copy_tls(mem))) {
1080 dprintf(2, "%s: Error getting %zu bytes thread-local storage: %m\n",
1081 argv[0], libc.tls_size);
1086 if (ldso_fail) _exit(127);
1087 if (ldd_mode) _exit(0);
1089 /* Switch to runtime mode: any further failures in the dynamic
1090 * linker are a reportable failure rather than a fatal startup
1091 * error. If the dynamic loader (dlopen) will not be used, free
1092 * all memory used by the dynamic linker. */
1095 #ifndef DYNAMIC_IS_RO
1096 for (i=0; app->dynv[i]; i+=2)
1097 if (app->dynv[i]==DT_DEBUG)
1098 app->dynv[i+1] = (size_t)&debug;
1101 debug.bp = _dl_debug_state;
1103 debug.base = lib->base;
1107 if (ssp_used) __init_ssp((void *)aux[AT_RANDOM]);
1108 __init_libc(envp, argv[0]);
1113 return (void *)aux[AT_ENTRY];
1116 void *dlopen(const char *file, int mode)
1118 struct dso *volatile p, *orig_tail, *next;
1119 size_t orig_tls_cnt, orig_tls_offset, orig_tls_align;
1124 if (!file) return head;
1126 pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
1127 pthread_rwlock_wrlock(&lock);
1131 orig_tls_cnt = tls_cnt;
1132 orig_tls_offset = tls_offset;
1133 orig_tls_align = tls_align;
1135 noload = mode & RTLD_NOLOAD;
1138 if (setjmp(*rtld_fail)) {
1139 /* Clean up anything new that was (partially) loaded */
1140 if (p && p->deps) for (i=0; p->deps[i]; i++)
1141 if (p->deps[i]->global < 0)
1142 p->deps[i]->global = 0;
1143 for (p=orig_tail->next; p; p=next) {
1145 munmap(p->map, p->map_len);
1149 tls_cnt = orig_tls_cnt;
1150 tls_offset = orig_tls_offset;
1151 tls_align = orig_tls_align;
1157 } else p = load_library(file, 0);
1160 snprintf(errbuf, sizeof errbuf, noload ?
1161 "Library %s is not already loaded" :
1162 "Error loading shared library %s: %m",
1168 /* First load handling */
1171 if (p->deps) for (i=0; p->deps[i]; i++)
1172 if (!p->deps[i]->global)
1173 p->deps[i]->global = -1;
1174 if (!p->global) p->global = -1;
1176 if (p->deps) for (i=0; p->deps[i]; i++)
1177 if (p->deps[i]->global < 0)
1178 p->deps[i]->global = 0;
1179 if (p->global < 0) p->global = 0;
1182 if (mode & RTLD_GLOBAL) {
1183 if (p->deps) for (i=0; p->deps[i]; i++)
1184 p->deps[i]->global = 1;
1190 if (ssp_used) __init_ssp(libc.auxv);
1197 pthread_rwlock_unlock(&lock);
1198 if (p) do_init_fini(orig_tail);
1199 pthread_setcancelstate(cs, 0);
1203 static int invalid_dso_handle(void *h)
1206 for (p=head; p; p=p->next) if (h==p) return 0;
1207 snprintf(errbuf, sizeof errbuf, "Invalid library handle %p", (void *)h);
1212 static void *do_dlsym(struct dso *p, const char *s, void *ra)
1215 uint32_t h = 0, gh = 0;
1217 if (p == head || p == RTLD_DEFAULT || p == RTLD_NEXT) {
1218 if (p == RTLD_DEFAULT) {
1220 } else if (p == RTLD_NEXT) {
1221 for (p=head; p && (unsigned char *)ra-p->map>p->map_len; p=p->next);
1225 struct symdef def = find_sym(p, s, 0);
1226 if (!def.sym) goto failed;
1227 if ((def.sym->st_info&0xf) == STT_TLS)
1228 return __tls_get_addr((size_t []){def.dso->tls_id, def.sym->st_value});
1229 return def.dso->base + def.sym->st_value;
1231 if (p != RTLD_DEFAULT && p != RTLD_NEXT && invalid_dso_handle(p))
1235 sym = gnu_lookup(s, gh, p);
1238 sym = sysv_lookup(s, h, p);
1240 if (sym && (sym->st_info&0xf) == STT_TLS)
1241 return __tls_get_addr((size_t []){p->tls_id, sym->st_value});
1242 if (sym && sym->st_value && (1<<(sym->st_info&0xf) & OK_TYPES))
1243 return p->base + sym->st_value;
1244 if (p->deps) for (i=0; p->deps[i]; i++) {
1245 if (p->deps[i]->ghashtab) {
1246 if (!gh) gh = gnu_hash(s);
1247 sym = gnu_lookup(s, gh, p->deps[i]);
1249 if (!h) h = sysv_hash(s);
1250 sym = sysv_lookup(s, h, p->deps[i]);
1252 if (sym && (sym->st_info&0xf) == STT_TLS)
1253 return __tls_get_addr((size_t []){p->deps[i]->tls_id, sym->st_value});
1254 if (sym && sym->st_value && (1<<(sym->st_info&0xf) & OK_TYPES))
1255 return p->deps[i]->base + sym->st_value;
1259 snprintf(errbuf, sizeof errbuf, "Symbol not found: %s", s);
1263 int __dladdr(void *addr, Dl_info *info)
1273 pthread_rwlock_rdlock(&lock);
1274 for (p=head; p && (unsigned char *)addr-p->map>p->map_len; p=p->next);
1275 pthread_rwlock_unlock(&lock);
1280 strings = p->strings;
1282 nsym = p->hashtab[1];
1286 buckets = p->ghashtab + 4 + (p->ghashtab[2]*sizeof(size_t)/4);
1287 sym += p->ghashtab[1];
1288 for (i = 0; i < p->ghashtab[0]; i++) {
1289 if (buckets[i] > nsym)
1293 nsym -= p->ghashtab[1];
1294 hashval = buckets + p->ghashtab[0] + nsym;
1296 while (!(*hashval++ & 1));
1300 for (; nsym; nsym--, sym++) {
1302 && (1<<(sym->st_info&0xf) & OK_TYPES)
1303 && (1<<(sym->st_info>>4) & OK_BINDS)) {
1304 void *symaddr = p->base + sym->st_value;
1305 if (symaddr > addr || symaddr < best)
1308 bestname = strings + sym->st_name;
1309 if (addr == symaddr)
1314 if (!best) return 0;
1316 info->dli_fname = p->name;
1317 info->dli_fbase = p->base;
1318 info->dli_sname = bestname;
1319 info->dli_saddr = best;
1324 void *__dlsym(void *restrict p, const char *restrict s, void *restrict ra)
1327 pthread_rwlock_rdlock(&lock);
1328 res = do_dlsym(p, s, ra);
1329 pthread_rwlock_unlock(&lock);
1333 int dl_iterate_phdr(int(*callback)(struct dl_phdr_info *info, size_t size, void *data), void *data)
1335 struct dso *current;
1336 struct dl_phdr_info info;
1338 for(current = head; current;) {
1339 info.dlpi_addr = (uintptr_t)current->base;
1340 info.dlpi_name = current->name;
1341 info.dlpi_phdr = current->phdr;
1342 info.dlpi_phnum = current->phnum;
1343 info.dlpi_adds = gencnt;
1345 info.dlpi_tls_modid = current->tls_id;
1346 info.dlpi_tls_data = current->tls_image;
1348 ret = (callback)(&info, sizeof (info), data);
1350 if (ret != 0) break;
1352 pthread_rwlock_rdlock(&lock);
1353 current = current->next;
1354 pthread_rwlock_unlock(&lock);
1359 static int invalid_dso_handle(void *h)
1361 snprintf(errbuf, sizeof errbuf, "Invalid library handle %p", (void *)h);
1365 void *dlopen(const char *file, int mode)
1369 void *__dlsym(void *restrict p, const char *restrict s, void *restrict ra)
1373 int __dladdr (void *addr, Dl_info *info)
1379 int __dlinfo(void *dso, int req, void *res)
1381 if (invalid_dso_handle(dso)) return -1;
1382 if (req != RTLD_DI_LINKMAP) {
1383 snprintf(errbuf, sizeof errbuf, "Unsupported request %d", req);
1387 *(struct link_map **)res = dso;
1393 if (!errflag) return 0;
1398 int dlclose(void *p)
1400 return invalid_dso_handle(p);