20 #include "pthread_impl.h"
25 static char errbuf[128];
29 #if ULONG_MAX == 0xffffffff
30 typedef Elf32_Ehdr Ehdr;
31 typedef Elf32_Phdr Phdr;
32 typedef Elf32_Sym Sym;
33 #define R_TYPE(x) ((x)&255)
34 #define R_SYM(x) ((x)>>8)
36 typedef Elf64_Ehdr Ehdr;
37 typedef Elf64_Phdr Phdr;
38 typedef Elf64_Sym Sym;
39 #define R_TYPE(x) ((x)&0xffffffff)
40 #define R_SYM(x) ((x)>>32)
43 #define MAXP2(a,b) (-(-(a)&-(b)))
44 #define ALIGN(x,y) ((x)+(y)-1 & -(y))
58 struct dso *next, *prev;
74 size_t tls_len, tls_size, tls_align, tls_id, tls_offset;
76 unsigned char *new_tls;
77 int new_dtv_idx, new_tls_idx;
78 struct dso *fini_next;
90 void __init_ssp(size_t *);
91 void *__install_initial_tls(void *);
93 static struct dso *head, *tail, *libc, *fini_head;
94 static char *env_path, *sys_path, *r_path;
99 static jmp_buf rtld_fail;
100 static pthread_rwlock_t lock;
101 static struct debug debug;
103 static size_t tls_cnt, tls_offset, tls_start, tls_align = 4*sizeof(size_t);
104 static pthread_mutex_t init_fini_lock = { ._m_type = PTHREAD_MUTEX_RECURSIVE };
106 struct debug *_dl_debug_addr = &debug;
111 static void decode_vec(size_t *v, size_t *a, size_t cnt)
113 memset(a, 0, cnt*sizeof(size_t));
114 for (; v[0]; v+=2) if (v[0]<cnt) {
120 static int search_vec(size_t *v, size_t *r, size_t key)
122 for (; v[0]!=key; v+=2)
128 static uint32_t sysv_hash(const char *s0)
130 const unsigned char *s = (void *)s0;
136 return h & 0xfffffff;
139 static uint32_t gnu_hash(const char *s0)
141 const unsigned char *s = (void *)s0;
142 uint_fast32_t h = 5381;
148 static Sym *sysv_lookup(const char *s, uint32_t h, struct dso *dso)
151 Sym *syms = dso->syms;
152 uint32_t *hashtab = dso->hashtab;
153 char *strings = dso->strings;
154 for (i=hashtab[2+h%hashtab[0]]; i; i=hashtab[2+hashtab[0]+i]) {
155 if (!strcmp(s, strings+syms[i].st_name))
161 static Sym *gnu_lookup(const char *s, uint32_t h1, struct dso *dso)
165 uint32_t *hashtab = dso->ghashtab;
166 uint32_t nbuckets = hashtab[0];
167 uint32_t *buckets = hashtab + 4 + hashtab[2]*(sizeof(size_t)/4);
170 uint32_t n = buckets[h1 % nbuckets];
174 strings = dso->strings;
176 hashval = buckets + nbuckets + (n - hashtab[1]);
178 for (h1 |= 1; ; sym++) {
180 if ((h1 == (h2|1)) && !strcmp(s, strings + sym->st_name))
188 #define OK_TYPES (1<<STT_NOTYPE | 1<<STT_OBJECT | 1<<STT_FUNC | 1<<STT_COMMON | 1<<STT_TLS)
189 #define OK_BINDS (1<<STB_GLOBAL | 1<<STB_WEAK)
191 static struct symdef find_sym(struct dso *dso, const char *s, int need_def)
193 uint32_t h = 0, gh = 0;
194 struct symdef def = {0};
197 if (gh == 0x1f4039c9 && !strcmp(s, "__stack_chk_fail")) ssp_used = 1;
200 if (h == 0x595a4cc && !strcmp(s, "__stack_chk_fail")) ssp_used = 1;
202 for (; dso; dso=dso->next) {
204 if (!dso->global) continue;
206 if (!gh) gh = gnu_hash(s);
207 sym = gnu_lookup(s, gh, dso);
209 if (!h) h = sysv_hash(s);
210 sym = sysv_lookup(s, h, dso);
212 if (sym && (!need_def || sym->st_shndx) && sym->st_value
213 && (1<<(sym->st_info&0xf) & OK_TYPES)
214 && (1<<(sym->st_info>>4) & OK_BINDS)) {
215 if (def.sym && sym->st_info>>4 == STB_WEAK) continue;
218 if (sym->st_info>>4 == STB_GLOBAL) break;
224 static void do_relocs(struct dso *dso, size_t *rel, size_t rel_size, size_t stride)
226 unsigned char *base = dso->base;
227 Sym *syms = dso->syms;
228 char *strings = dso->strings;
236 for (; rel_size; rel+=stride, rel_size-=stride*sizeof(size_t)) {
237 type = R_TYPE(rel[1]);
238 sym_index = R_SYM(rel[1]);
240 sym = syms + sym_index;
241 name = strings + sym->st_name;
242 ctx = IS_COPY(type) ? head->next : head;
243 def = find_sym(ctx, name, IS_PLT(type));
244 if (!def.sym && sym->st_info>>4 != STB_WEAK) {
245 snprintf(errbuf, sizeof errbuf,
246 "Error relocating %s: %s: symbol not found",
248 if (runtime) longjmp(rtld_fail, 1);
249 dprintf(2, "%s\n", errbuf);
258 do_single_reloc(dso, base, (void *)(base + rel[0]), type,
259 stride>2 ? rel[2] : 0, sym, sym?sym->st_size:0, def,
260 def.sym?(size_t)(def.dso->base+def.sym->st_value):0);
264 /* A huge hack: to make up for the wastefulness of shared libraries
265 * needing at least a page of dirty memory even if they have no global
266 * data, we reclaim the gaps at the beginning and end of writable maps
267 * and "donate" them to the heap by setting up minimal malloc
268 * structures and then freeing them. */
270 static void reclaim(unsigned char *base, size_t start, size_t end)
273 start = start + 6*sizeof(size_t)-1 & -4*sizeof(size_t);
274 end = (end & -4*sizeof(size_t)) - 2*sizeof(size_t);
275 if (start>end || end-start < 4*sizeof(size_t)) return;
276 a = (size_t *)(base + start);
277 z = (size_t *)(base + end);
279 a[-1] = z[0] = end-start + 2*sizeof(size_t) | 1;
284 static void reclaim_gaps(unsigned char *base, Phdr *ph, size_t phent, size_t phcnt)
286 for (; phcnt--; ph=(void *)((char *)ph+phent)) {
287 if (ph->p_type!=PT_LOAD) continue;
288 if ((ph->p_flags&(PF_R|PF_W))!=(PF_R|PF_W)) continue;
289 reclaim(base, ph->p_vaddr & -PAGE_SIZE, ph->p_vaddr);
290 reclaim(base, ph->p_vaddr+ph->p_memsz,
291 ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE);
295 static void *map_library(int fd, struct dso *dso)
297 Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)];
299 size_t addr_min=SIZE_MAX, addr_max=0, map_len;
300 size_t this_min, this_max;
305 unsigned char *map, *base;
310 ssize_t l = read(fd, buf, sizeof buf);
311 if (l<sizeof *eh) return 0;
313 phsize = eh->e_phentsize * eh->e_phnum;
314 if (phsize + sizeof *eh > l) return 0;
315 if (eh->e_phoff + phsize > l) {
316 l = pread(fd, buf+1, phsize, eh->e_phoff);
317 if (l != phsize) return 0;
318 eh->e_phoff = sizeof *eh;
320 ph = (void *)((char *)buf + eh->e_phoff);
321 for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
322 if (ph->p_type == PT_DYNAMIC)
324 if (ph->p_type == PT_TLS) {
325 tls_image = ph->p_vaddr;
326 dso->tls_align = ph->p_align;
327 dso->tls_len = ph->p_filesz;
328 dso->tls_size = ph->p_memsz;
330 if (ph->p_type != PT_LOAD) continue;
331 if (ph->p_vaddr < addr_min) {
332 addr_min = ph->p_vaddr;
333 off_start = ph->p_offset;
334 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
335 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
336 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
338 if (ph->p_vaddr+ph->p_memsz > addr_max) {
339 addr_max = ph->p_vaddr+ph->p_memsz;
343 addr_max += PAGE_SIZE-1;
344 addr_max &= -PAGE_SIZE;
345 addr_min &= -PAGE_SIZE;
346 off_start &= -PAGE_SIZE;
347 map_len = addr_max - addr_min + off_start;
348 /* The first time, we map too much, possibly even more than
349 * the length of the file. This is okay because we will not
350 * use the invalid part; we just need to reserve the right
351 * amount of virtual address space to map over later. */
352 map = mmap((void *)addr_min, map_len, prot, MAP_PRIVATE, fd, off_start);
353 if (map==MAP_FAILED) return 0;
354 base = map - addr_min;
355 ph = (void *)((char *)buf + eh->e_phoff);
356 for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
357 if (ph->p_type != PT_LOAD) continue;
358 /* Reuse the existing mapping for the lowest-address LOAD */
359 if ((ph->p_vaddr & -PAGE_SIZE) == addr_min) continue;
360 this_min = ph->p_vaddr & -PAGE_SIZE;
361 this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE;
362 off_start = ph->p_offset & -PAGE_SIZE;
363 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
364 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
365 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
366 if (mmap(base+this_min, this_max-this_min, prot, MAP_PRIVATE|MAP_FIXED, fd, off_start) == MAP_FAILED)
368 if (ph->p_memsz > ph->p_filesz) {
369 size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz;
370 size_t pgbrk = brk+PAGE_SIZE-1 & -PAGE_SIZE;
371 memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE-1);
372 if (pgbrk-(size_t)base < this_max && mmap((void *)pgbrk, (size_t)base+this_max-pgbrk, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
376 for (i=0; ((size_t *)(base+dyn))[i]; i+=2)
377 if (((size_t *)(base+dyn))[i]==DT_TEXTREL) {
378 if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC) < 0)
382 if (!runtime) reclaim_gaps(base, (void *)((char *)buf + eh->e_phoff),
383 eh->e_phentsize, eh->e_phnum);
385 dso->map_len = map_len;
387 dso->dynv = (void *)(base+dyn);
388 if (dso->tls_size) dso->tls_image = (void *)(base+tls_image);
391 munmap(map, map_len);
395 static int path_open(const char *name, const char *search, char *buf, size_t buf_size)
397 const char *s=search, *z;
403 l = z ? z-s : strlen(s);
404 snprintf(buf, buf_size, "%.*s/%s", l, s, name);
405 if ((fd = open(buf, O_RDONLY|O_CLOEXEC))>=0) return fd;
410 static void decode_dyn(struct dso *p)
412 size_t dyn[DYN_CNT] = {0};
413 decode_vec(p->dynv, dyn, DYN_CNT);
414 p->syms = (void *)(p->base + dyn[DT_SYMTAB]);
415 p->strings = (void *)(p->base + dyn[DT_STRTAB]);
416 if (dyn[0]&(1<<DT_HASH))
417 p->hashtab = (void *)(p->base + dyn[DT_HASH]);
418 if (search_vec(p->dynv, dyn, DT_GNU_HASH))
419 p->ghashtab = (void *)(p->base + *dyn);
422 static struct dso *load_library(const char *name)
424 char buf[2*NAME_MAX+2];
425 const char *pathname;
426 unsigned char *base, *map;
427 size_t dyno, map_len;
428 struct dso *p, temp_dso = {0};
434 /* Catch and block attempts to reload the implementation itself */
435 if (name[0]=='l' && name[1]=='i' && name[2]=='b') {
436 static const char *rp, reserved[] =
437 "c\0pthread\0rt\0m\0dl\0util\0xnet\0";
438 char *z = strchr(name, '.');
441 for (rp=reserved; *rp && memcmp(name+3, rp, l-3); rp+=strlen(rp)+1);
446 tail = libc->next ? libc->next : libc;
452 if (strchr(name, '/')) {
454 fd = open(name, O_RDONLY|O_CLOEXEC);
456 /* Search for the name to see if it's already loaded */
457 for (p=head->next; p; p=p->next) {
458 if (p->shortname && !strcmp(p->shortname, name)) {
463 if (strlen(name) > NAME_MAX) return 0;
465 if (r_path) fd = path_open(name, r_path, buf, sizeof buf);
466 if (fd < 0 && env_path) fd = path_open(name, env_path, buf, sizeof buf);
469 FILE *f = fopen(ETC_LDSO_PATH, "rbe");
471 if (getline(&sys_path, (size_t[1]){0}, f) > 0)
472 sys_path[strlen(sys_path)-1]=0;
476 if (sys_path) fd = path_open(name, sys_path, buf, sizeof buf);
477 else fd = path_open(name, "/lib:/usr/local/lib:/usr/lib", buf, sizeof buf);
481 if (fd < 0) return 0;
482 if (fstat(fd, &st) < 0) {
486 for (p=head->next; p; p=p->next) {
487 if (p->dev == st.st_dev && p->ino == st.st_ino) {
488 /* If this library was previously loaded with a
489 * pathname but a search found the same inode,
490 * setup its shortname so it can be found by name. */
491 if (!p->shortname && pathname != name)
492 p->shortname = strrchr(p->name, '/')+1;
498 map = map_library(fd, &temp_dso);
502 /* Allocate storage for the new DSO. When there is TLS, this
503 * storage must include a reservation for all pre-existing
504 * threads to obtain copies of both the new TLS, and an
505 * extended DTV capable of storing an additional slot for
506 * the newly-loaded DSO. */
507 alloc_size = sizeof *p + strlen(pathname) + 1;
508 if (runtime && temp_dso.tls_image) {
509 size_t per_th = temp_dso.tls_size + temp_dso.tls_align
510 + sizeof(void *) * (tls_cnt+3);
511 n_th = __libc.threads_minus_1 + 1;
512 if (n_th > SSIZE_MAX / per_th) alloc_size = SIZE_MAX;
513 else alloc_size += n_th * per_th;
515 p = calloc(1, alloc_size);
517 munmap(map, map_len);
520 memcpy(p, &temp_dso, sizeof temp_dso);
526 strcpy(p->name, pathname);
527 /* Add a shortname only if name arg was not an explicit pathname. */
528 if (pathname != name) p->shortname = strrchr(p->name, '/')+1;
530 p->tls_id = ++tls_cnt;
531 tls_align = MAXP2(tls_align, p->tls_align);
532 tls_offset += p->tls_size + p->tls_align - 1;
533 tls_offset -= (tls_offset + (uintptr_t)p->tls_image)
535 p->tls_offset = tls_offset;
536 p->new_dtv = (void *)(-sizeof(size_t) &
537 (uintptr_t)(p->name+strlen(p->name)+sizeof(size_t)));
538 p->new_tls = (void *)(p->new_dtv + n_th*(tls_cnt+1));
545 if (ldd_mode) dprintf(1, "\t%s => %s (%p)\n", name, pathname, base);
550 static void load_deps(struct dso *p)
553 struct dso ***deps = &p->deps, **tmp, *dep;
554 for (; p; p=p->next) {
555 for (i=0; p->dynv[i]; i+=2) {
556 if (p->dynv[i] != DT_RPATH) continue;
557 r_path = (void *)(p->strings + p->dynv[i+1]);
559 for (i=0; p->dynv[i]; i+=2) {
560 if (p->dynv[i] != DT_NEEDED) continue;
561 dep = load_library(p->strings + p->dynv[i+1]);
563 snprintf(errbuf, sizeof errbuf,
564 "Error loading shared library %s: %m (needed by %s)",
565 p->strings + p->dynv[i+1], p->name);
566 if (runtime) longjmp(rtld_fail, 1);
567 dprintf(2, "%s\n", errbuf);
572 tmp = realloc(*deps, sizeof(*tmp)*(ndeps+2));
573 if (!tmp) longjmp(rtld_fail, 1);
583 static void load_preload(char *s)
588 for ( ; *s && isspace(*s); s++);
589 for (z=s; *z && !isspace(*z); z++);
597 static void make_global(struct dso *p)
599 for (; p; p=p->next) p->global = 1;
602 static void reloc_all(struct dso *p)
604 size_t dyn[DYN_CNT] = {0};
605 for (; p; p=p->next) {
606 if (p->relocated) continue;
607 decode_vec(p->dynv, dyn, DYN_CNT);
608 #ifdef NEED_ARCH_RELOCS
609 do_arch_relocs(p, head);
611 do_relocs(p, (void *)(p->base+dyn[DT_JMPREL]), dyn[DT_PLTRELSZ],
612 2+(dyn[DT_PLTREL]==DT_RELA));
613 do_relocs(p, (void *)(p->base+dyn[DT_REL]), dyn[DT_RELSZ], 2);
614 do_relocs(p, (void *)(p->base+dyn[DT_RELA]), dyn[DT_RELASZ], 3);
619 static void free_all(struct dso *p)
624 if (p->map && p!=libc && p!=head) free(p);
629 static size_t find_dyn(Phdr *ph, size_t cnt, size_t stride)
631 for (; cnt--; ph = (void *)((char *)ph + stride))
632 if (ph->p_type == PT_DYNAMIC)
637 static void find_map_range(Phdr *ph, size_t cnt, size_t stride, struct dso *p)
639 size_t min_addr = -1, max_addr = 0;
640 for (; cnt--; ph = (void *)((char *)ph + stride)) {
641 if (ph->p_type != PT_LOAD) continue;
642 if (ph->p_vaddr < min_addr)
643 min_addr = ph->p_vaddr;
644 if (ph->p_vaddr+ph->p_memsz > max_addr)
645 max_addr = ph->p_vaddr+ph->p_memsz;
647 min_addr &= -PAGE_SIZE;
648 max_addr = (max_addr + PAGE_SIZE-1) & -PAGE_SIZE;
649 p->map = p->base + min_addr;
650 p->map_len = max_addr - min_addr;
653 static void do_fini()
656 size_t dyn[DYN_CNT] = {0};
657 for (p=fini_head; p; p=p->fini_next) {
658 if (!p->constructed) continue;
659 decode_vec(p->dynv, dyn, DYN_CNT);
660 ((void (*)(void))(p->base + dyn[DT_FINI]))();
664 static void do_init_fini(struct dso *p)
666 size_t dyn[DYN_CNT] = {0};
667 int need_locking = __libc.threads_minus_1;
668 /* Allow recursive calls that arise when a library calls
669 * dlopen from one of its constructors, but block any
670 * other threads until all ctors have finished. */
671 if (need_locking) pthread_mutex_lock(&init_fini_lock);
672 for (; p; p=p->prev) {
673 if (p->constructed) continue;
675 decode_vec(p->dynv, dyn, DYN_CNT);
676 if (dyn[0] & (1<<DT_FINI)) {
677 p->fini_next = fini_head;
680 if (dyn[0] & (1<<DT_INIT))
681 ((void (*)(void))(p->base + dyn[DT_INIT]))();
683 if (need_locking) pthread_mutex_unlock(&init_fini_lock);
686 void _dl_debug_state(void)
690 void *__copy_tls(unsigned char *mem)
695 if (!tls_cnt) return mem;
697 void **dtv = (void *)mem;
698 dtv[0] = (void *)tls_cnt;
700 mem += __libc.tls_size - sizeof(struct pthread);
701 mem -= (uintptr_t)mem & (tls_align-1);
705 for (p=head; p; p=p->next) {
706 if (!p->tls_id) continue;
707 dtv[p->tls_id] = mem - p->tls_offset;
708 memcpy(dtv[p->tls_id], p->tls_image, p->tls_len);
714 void *__tls_get_addr(size_t *v)
716 pthread_t self = __pthread_self();
717 if (self->dtv && v[0]<=(size_t)self->dtv[0] && self->dtv[v[0]])
718 return (char *)self->dtv[v[0]]+v[1];
720 /* Block signals to make accessing new TLS async-signal-safe */
723 pthread_sigmask(SIG_BLOCK, &set, &set);
724 if (self->dtv && v[0]<=(size_t)self->dtv[0] && self->dtv[v[0]]) {
725 pthread_sigmask(SIG_SETMASK, &set, 0);
726 return (char *)self->dtv[v[0]]+v[1];
729 /* This is safe without any locks held because, if the caller
730 * is able to request the Nth entry of the DTV, the DSO list
731 * must be valid at least that far out and it was synchronized
732 * at program startup or by an already-completed call to dlopen. */
734 for (p=head; p->tls_id != v[0]; p=p->next);
736 /* Get new DTV space from new DSO if needed */
737 if (!self->dtv || v[0] > (size_t)self->dtv[0]) {
738 void **newdtv = p->new_dtv +
739 (v[0]+1)*sizeof(void *)*a_fetch_add(&p->new_dtv_idx,1);
740 if (self->dtv) memcpy(newdtv, self->dtv,
741 ((size_t)self->dtv[0]+1) * sizeof(void *));
742 newdtv[0] = (void *)v[0];
746 /* Get new TLS memory from new DSO */
747 unsigned char *mem = p->new_tls +
748 (p->tls_size + p->tls_align) * a_fetch_add(&p->new_tls_idx,1);
749 mem += ((uintptr_t)p->tls_image - (uintptr_t)mem) & (p->tls_align-1);
750 self->dtv[v[0]] = mem;
751 memcpy(mem, p->tls_image, p->tls_len);
752 pthread_sigmask(SIG_SETMASK, &set, 0);
756 static void update_tls_size()
758 size_t below_tp = (1+tls_cnt) * sizeof(void *) + tls_offset;
759 size_t above_tp = sizeof(struct pthread) + tls_start + tls_align;
760 __libc.tls_size = ALIGN(below_tp + above_tp, tls_align);
763 void *__dynlink(int argc, char **argv)
765 size_t aux[AUX_CNT] = {0};
769 static struct dso builtin_dsos[3];
770 struct dso *const app = builtin_dsos+0;
771 struct dso *const lib = builtin_dsos+1;
772 struct dso *const vdso = builtin_dsos+2;
776 /* Find aux vector just past environ[] */
777 for (i=argc+1; argv[i]; i++)
778 if (!memcmp(argv[i], "LD_LIBRARY_PATH=", 16))
779 env_path = argv[i]+16;
780 else if (!memcmp(argv[i], "LD_PRELOAD=", 11))
781 env_preload = argv[i]+11;
782 auxv = (void *)(argv+i+1);
784 decode_vec(auxv, aux, AUX_CNT);
786 /* Only trust user/env if kernel says we're not suid/sgid */
787 if ((aux[0]&0x7800)!=0x7800 || aux[AT_UID]!=aux[AT_EUID]
788 || aux[AT_GID]!=aux[AT_EGID] || aux[AT_SECURE]) {
793 /* If the dynamic linker was invoked as a program itself, AT_BASE
794 * will not be set. In that case, we assume the base address is
795 * the start of the page containing the PHDRs; I don't know any
796 * better approach... */
798 aux[AT_BASE] = aux[AT_PHDR] & -PAGE_SIZE;
799 aux[AT_PHDR] = aux[AT_PHENT] = aux[AT_PHNUM] = 0;
802 /* The dynamic linker load address is passed by the kernel
803 * in the AUX vector, so this is easy. */
804 lib->base = (void *)aux[AT_BASE];
805 lib->name = lib->shortname = "libc.so";
807 ehdr = (void *)lib->base;
808 find_map_range((void *)(aux[AT_BASE]+ehdr->e_phoff),
809 ehdr->e_phnum, ehdr->e_phentsize, lib);
810 lib->dynv = (void *)(lib->base + find_dyn(
811 (void *)(aux[AT_BASE]+ehdr->e_phoff),
812 ehdr->e_phnum, ehdr->e_phentsize));
816 size_t interp_off = 0;
817 size_t tls_image = 0;
818 /* Find load address of the main program, via AT_PHDR vs PT_PHDR. */
819 phdr = (void *)aux[AT_PHDR];
820 for (i=aux[AT_PHNUM]; i; i--, phdr=(void *)((char *)phdr + aux[AT_PHENT])) {
821 if (phdr->p_type == PT_PHDR)
822 app->base = (void *)(aux[AT_PHDR] - phdr->p_vaddr);
823 else if (phdr->p_type == PT_INTERP)
824 interp_off = (size_t)phdr->p_vaddr;
825 else if (phdr->p_type == PT_TLS) {
826 tls_image = phdr->p_vaddr;
827 app->tls_len = phdr->p_filesz;
828 app->tls_size = phdr->p_memsz;
829 app->tls_align = phdr->p_align;
832 if (app->tls_size) app->tls_image = (char *)app->base + tls_image;
833 if (interp_off) lib->name = (char *)app->base + interp_off;
835 app->dynv = (void *)(app->base + find_dyn(
836 (void *)aux[AT_PHDR], aux[AT_PHNUM], aux[AT_PHENT]));
837 find_map_range((void *)aux[AT_PHDR],
838 aux[AT_PHNUM], aux[AT_PHENT], app);
841 char *ldname = argv[0];
842 size_t dyno, l = strlen(ldname);
843 if (l >= 3 && !strcmp(ldname+l-3, "ldd")) ldd_mode = 1;
844 *argv++ = (void *)-1;
845 if (argv[0] && !strcmp(argv[0], "--")) *argv++ = (void *)-1;
847 dprintf(2, "musl libc/dynamic program loader\n");
848 dprintf(2, "usage: %s pathname%s\n", ldname,
849 ldd_mode ? "" : " [args]");
852 fd = open(argv[0], O_RDONLY);
854 dprintf(2, "%s: cannot load %s: %s\n", ldname, argv[0], strerror(errno));
858 ehdr = (void *)map_library(fd, app);
860 dprintf(2, "%s: %s: Not a valid dynamic program\n", ldname, argv[0]);
867 aux[AT_ENTRY] = ehdr->e_entry;
870 app->tls_id = tls_cnt = 1;
871 tls_offset = app->tls_offset = app->tls_size;
872 tls_start = -((uintptr_t)app->tls_image + app->tls_size)
873 & (app->tls_align-1);
874 tls_align = MAXP2(tls_align, app->tls_align);
877 app->constructed = 1;
880 /* Attach to vdso, if provided by the kernel */
881 if (search_vec(auxv, &vdso_base, AT_SYSINFO_EHDR)) {
882 ehdr = (void *)vdso_base;
883 phdr = (void *)(vdso_base + ehdr->e_phoff);
884 for (i=ehdr->e_phnum; i; i--, phdr=(void *)((char *)phdr + ehdr->e_phentsize)) {
885 if (phdr->p_type == PT_DYNAMIC)
886 vdso->dynv = (void *)(vdso_base + phdr->p_offset);
887 if (phdr->p_type == PT_LOAD)
888 vdso->base = (void *)(vdso_base - phdr->p_vaddr + phdr->p_offset);
890 vdso->name = vdso->shortname = "linux-gate.so.1";
897 /* Initial dso chain consists only of the app. We temporarily
898 * append the dynamic linker/libc so we can relocate it, then
899 * restore the initial chain in preparation for loading third
900 * party libraries (preload/needed). */
907 /* PAST THIS POINT, ALL LIBC INTERFACES ARE FULLY USABLE. */
909 /* Donate unused parts of app and library mapping to malloc */
910 reclaim_gaps(app->base, (void *)aux[AT_PHDR], aux[AT_PHENT], aux[AT_PHNUM]);
911 ehdr = (void *)lib->base;
912 reclaim_gaps(lib->base, (void *)(lib->base+ehdr->e_phoff),
913 ehdr->e_phentsize, ehdr->e_phnum);
915 /* Load preload/needed libraries, add their symbols to the global
916 * namespace, and perform all remaining relocations. The main
917 * program must be relocated LAST since it may contain copy
918 * relocations which depend on libraries' relocations. */
919 if (env_preload) load_preload(env_preload);
923 reloc_all(app->next);
929 void *mem = mmap(0, __libc.tls_size, PROT_READ|PROT_WRITE,
930 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
931 if (mem==MAP_FAILED ||
932 !__install_initial_tls(__copy_tls(mem))) {
933 dprintf(2, "%s: Error getting %zu bytes thread-local storage: %m\n",
934 argv[0], __libc.tls_size);
939 if (ldso_fail) _exit(127);
940 if (ldd_mode) _exit(0);
942 /* Switch to runtime mode: any further failures in the dynamic
943 * linker are a reportable failure rather than a fatal startup
944 * error. If the dynamic loader (dlopen) will not be used, free
945 * all memory used by the dynamic linker. */
948 #ifndef DYNAMIC_IS_RO
949 for (i=0; app->dynv[i]; i+=2)
950 if (app->dynv[i]==DT_DEBUG)
951 app->dynv[i+1] = (size_t)&debug;
954 debug.bp = _dl_debug_state;
956 debug.base = lib->base;
960 if (ssp_used) __init_ssp(auxv);
966 return (void *)aux[AT_ENTRY];
969 void *dlopen(const char *file, int mode)
971 struct dso *volatile p, *orig_tail, *next;
972 size_t orig_tls_cnt, orig_tls_offset, orig_tls_align;
976 if (!file) return head;
978 pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
979 pthread_rwlock_wrlock(&lock);
983 orig_tls_cnt = tls_cnt;
984 orig_tls_offset = tls_offset;
985 orig_tls_align = tls_align;
988 if (setjmp(rtld_fail)) {
989 /* Clean up anything new that was (partially) loaded */
990 if (p && p->deps) for (i=0; p->deps[i]; i++)
991 if (p->deps[i]->global < 0)
992 p->deps[i]->global = 0;
993 for (p=orig_tail->next; p; p=next) {
995 munmap(p->map, p->map_len);
999 tls_cnt = orig_tls_cnt;
1000 tls_offset = orig_tls_offset;
1001 tls_align = orig_tls_align;
1007 } else p = load_library(file);
1010 snprintf(errbuf, sizeof errbuf,
1011 "Error loading shared library %s: %m", file);
1016 /* First load handling */
1019 if (p->deps) for (i=0; p->deps[i]; i++)
1020 if (!p->deps[i]->global)
1021 p->deps[i]->global = -1;
1022 if (!p->global) p->global = -1;
1024 if (p->deps) for (i=0; p->deps[i]; i++)
1025 if (p->deps[i]->global < 0)
1026 p->deps[i]->global = 0;
1027 if (p->global < 0) p->global = 0;
1030 if (mode & RTLD_GLOBAL) {
1031 if (p->deps) for (i=0; p->deps[i]; i++)
1032 p->deps[i]->global = 1;
1038 if (ssp_used) __init_ssp(auxv);
1044 pthread_rwlock_unlock(&lock);
1045 if (p) do_init_fini(orig_tail);
1046 pthread_setcancelstate(cs, 0);
1050 static void *do_dlsym(struct dso *p, const char *s, void *ra)
1053 uint32_t h = 0, gh = 0;
1055 if (p == head || p == RTLD_DEFAULT || p == RTLD_NEXT) {
1056 if (p == RTLD_NEXT) {
1057 for (p=head; p && (unsigned char *)ra-p->map>p->map_len; p=p->next);
1060 struct symdef def = find_sym(p->next, s, 0);
1061 if (!def.sym) goto failed;
1062 return def.dso->base + def.sym->st_value;
1066 sym = gnu_lookup(s, gh, p);
1069 sym = sysv_lookup(s, h, p);
1071 if (sym && sym->st_value && (1<<(sym->st_info&0xf) & OK_TYPES))
1072 return p->base + sym->st_value;
1073 if (p->deps) for (i=0; p->deps[i]; i++) {
1074 if (p->deps[i]->ghashtab) {
1075 if (!gh) gh = gnu_hash(s);
1076 sym = gnu_lookup(s, gh, p->deps[i]);
1078 if (!h) h = sysv_hash(s);
1079 sym = sysv_lookup(s, h, p->deps[i]);
1081 if (sym && sym->st_value && (1<<(sym->st_info&0xf) & OK_TYPES))
1082 return p->deps[i]->base + sym->st_value;
1086 snprintf(errbuf, sizeof errbuf, "Symbol not found: %s", s);
1090 int __dladdr(void *addr, Dl_info *info)
1100 pthread_rwlock_rdlock(&lock);
1101 for (p=head; p && (unsigned char *)addr-p->map>p->map_len; p=p->next);
1102 pthread_rwlock_unlock(&lock);
1107 strings = p->strings;
1109 nsym = p->hashtab[1];
1113 buckets = p->ghashtab + 4 + (p->ghashtab[2]*sizeof(size_t)/4);
1114 sym += p->ghashtab[1];
1115 for (i = 0; i < p->ghashtab[0]; i++) {
1116 if (buckets[i] > nsym)
1120 nsym -= p->ghashtab[1];
1121 hashval = buckets + p->ghashtab[0] + nsym;
1123 while (!(*hashval++ & 1));
1127 for (; nsym; nsym--, sym++) {
1128 if (sym->st_shndx && sym->st_value
1129 && (1<<(sym->st_info&0xf) & OK_TYPES)
1130 && (1<<(sym->st_info>>4) & OK_BINDS)) {
1131 void *symaddr = p->base + sym->st_value;
1132 if (symaddr > addr || symaddr < best)
1135 bestname = strings + sym->st_name;
1136 if (addr == symaddr)
1141 if (!best) return 0;
1143 info->dli_fname = p->name;
1144 info->dli_fbase = p->base;
1145 info->dli_sname = bestname;
1146 info->dli_saddr = best;
1151 void *__dlsym(void *restrict p, const char *restrict s, void *restrict ra)
1154 pthread_rwlock_rdlock(&lock);
1155 res = do_dlsym(p, s, ra);
1156 pthread_rwlock_unlock(&lock);
1160 void *dlopen(const char *file, int mode)
1164 void *__dlsym(void *restrict p, const char *restrict s, void *restrict ra)
1168 int __dladdr (void *addr, Dl_info *info)
1176 if (!errflag) return 0;
1181 int dlclose(void *p)