X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ldso%2Fdynlink.c;h=db543c19168c1ad6691dd66a282b9aaeafc3419f;hb=94751d8ee40a866fa81e392d5c525ea144d2ee20;hp=e499b40e623b37f3772d0241adac5ea020ed5570;hpb=71db5dfaa9ddcb65e18ff128c9ed122790d72e2f;p=musl diff --git a/ldso/dynlink.c b/ldso/dynlink.c index e499b40e..db543c19 100644 --- a/ldso/dynlink.c +++ b/ldso/dynlink.c @@ -1,4 +1,5 @@ #define _GNU_SOURCE +#define SYSCALL_NO_TLS 1 #include #include #include @@ -30,6 +31,7 @@ static void error(const char *, ...); #define ALIGN(x,y) ((x)+(y)-1 & -(y)) #define container_of(p,t,m) ((t*)((char *)(p)-offsetof(t,m))) +#define countof(a) ((sizeof (a))/(sizeof (a)[0])) struct debug { int ver; @@ -71,14 +73,19 @@ struct dso { char relocated; char constructed; char kernel_mapped; + char mark; + char bfs_built; + char runtime_loaded; struct dso **deps, *needed_by; + size_t ndeps_direct; + size_t next_dep; + int ctor_visitor; char *rpath_orig, *rpath; struct tls_module tls; size_t tls_id; size_t relro_start, relro_end; uintptr_t *new_dtv; unsigned char *new_tls; - volatile int new_dtv_idx, new_tls_idx; struct td_index *td_index; struct dso *fini_next; char *shortname; @@ -118,16 +125,21 @@ static int runtime; static int ldd_mode; static int ldso_fail; static int noload; +static int shutting_down; static jmp_buf *rtld_fail; static pthread_rwlock_t lock; static struct debug debug; static struct tls_module *tls_tail; static size_t tls_cnt, tls_offset, tls_align = MIN_TLS_ALIGN; static size_t static_tls_cnt; -static pthread_mutex_t init_fini_lock = { ._m_type = PTHREAD_MUTEX_RECURSIVE }; +static pthread_mutex_t init_fini_lock; +static pthread_cond_t ctor_cond; +static struct dso *builtin_deps[2]; +static struct dso *const no_deps[1]; +static struct dso *builtin_ctor_queue[4]; +static struct dso **main_ctor_queue; static struct fdpic_loadmap *app_loadmap; static struct fdpic_dummy_loadmap app_dummy_loadmap; -static struct dso *const nodeps_dummy; struct debug *_dl_debug_addr = &debug; @@ -908,7 +920,7 @@ static void *dl_mmap(size_t n) #else p = (void *)__syscall(SYS_mmap, 0, n, prot, flags, -1, 0); #endif - return p == MAP_FAILED ? 0 : p; + return (unsigned long)p > -4096UL ? 0 : p; } static void makefuncdescs(struct dso *p) @@ -1105,6 +1117,7 @@ static struct dso *load_library(const char *name, struct dso *needed_by) p->ino = st.st_ino; p->needed_by = needed_by; p->name = p->buf; + p->runtime_loaded = runtime; strcpy(p->name, pathname); /* Add a shortname only if name arg was not an explicit pathname. */ if (pathname != name) p->shortname = strrchr(p->name, '/')+1; @@ -1112,9 +1125,9 @@ static struct dso *load_library(const char *name, struct dso *needed_by) p->tls_id = ++tls_cnt; tls_align = MAXP2(tls_align, p->tls.align); #ifdef TLS_ABOVE_TP - p->tls.offset = tls_offset + ( (tls_align-1) & - -(tls_offset + (uintptr_t)p->tls.image) ); - tls_offset += p->tls.size; + p->tls.offset = tls_offset + ( (p->tls.align-1) & + (-tls_offset + (uintptr_t)p->tls.image) ); + tls_offset = p->tls.offset + p->tls.size; #else tls_offset += p->tls.size + p->tls.align - 1; tls_offset -= (tls_offset + (uintptr_t)p->tls.image) @@ -1140,30 +1153,99 @@ static struct dso *load_library(const char *name, struct dso *needed_by) return p; } +static void load_direct_deps(struct dso *p) +{ + size_t i, cnt=0; + + if (p->deps) return; + /* For head, all preloads are direct pseudo-dependencies. + * Count and include them now to avoid realloc later. */ + if (p==head) for (struct dso *q=p->next; q; q=q->next) + cnt++; + for (i=0; p->dynv[i]; i+=2) + if (p->dynv[i] == DT_NEEDED) cnt++; + /* Use builtin buffer for apps with no external deps, to + * preserve property of no runtime failure paths. */ + p->deps = (p==head && cnt<2) ? builtin_deps : + calloc(cnt+1, sizeof *p->deps); + if (!p->deps) { + error("Error loading dependencies for %s", p->name); + if (runtime) longjmp(*rtld_fail, 1); + } + cnt=0; + if (p==head) for (struct dso *q=p->next; q; q=q->next) + p->deps[cnt++] = q; + for (i=0; p->dynv[i]; i+=2) { + if (p->dynv[i] != DT_NEEDED) continue; + struct dso *dep = load_library(p->strings + p->dynv[i+1], p); + if (!dep) { + error("Error loading shared library %s: %m (needed by %s)", + p->strings + p->dynv[i+1], p->name); + if (runtime) longjmp(*rtld_fail, 1); + continue; + } + p->deps[cnt++] = dep; + } + p->deps[cnt] = 0; + p->ndeps_direct = cnt; +} + static void load_deps(struct dso *p) { - size_t i, ndeps=0; - struct dso ***deps = &p->deps, **tmp, *dep; - for (; p; p=p->next) { - for (i=0; p->dynv[i]; i+=2) { - if (p->dynv[i] != DT_NEEDED) continue; - dep = load_library(p->strings + p->dynv[i+1], p); - if (!dep) { - error("Error loading shared library %s: %m (needed by %s)", - p->strings + p->dynv[i+1], p->name); - if (runtime) longjmp(*rtld_fail, 1); - continue; - } - if (runtime) { - tmp = realloc(*deps, sizeof(*tmp)*(ndeps+2)); - if (!tmp) longjmp(*rtld_fail, 1); - tmp[ndeps++] = dep; - tmp[ndeps] = 0; - *deps = tmp; - } + if (p->deps) return; + for (; p; p=p->next) + load_direct_deps(p); +} + +static void extend_bfs_deps(struct dso *p) +{ + size_t i, j, cnt, ndeps_all; + struct dso **tmp; + + /* Can't use realloc if the original p->deps was allocated at + * program entry and malloc has been replaced, or if it's + * the builtin non-allocated trivial main program deps array. */ + int no_realloc = (__malloc_replaced && !p->runtime_loaded) + || p->deps == builtin_deps; + + if (p->bfs_built) return; + ndeps_all = p->ndeps_direct; + + /* Mark existing (direct) deps so they won't be duplicated. */ + for (i=0; p->deps[i]; i++) + p->deps[i]->mark = 1; + + /* For each dependency already in the list, copy its list of direct + * dependencies to the list, excluding any items already in the + * list. Note that the list this loop iterates over will grow during + * the loop, but since duplicates are excluded, growth is bounded. */ + for (i=0; p->deps[i]; i++) { + struct dso *dep = p->deps[i]; + for (j=cnt=0; jndeps_direct; j++) + if (!dep->deps[j]->mark) cnt++; + tmp = no_realloc ? + malloc(sizeof(*tmp) * (ndeps_all+cnt+1)) : + realloc(p->deps, sizeof(*tmp) * (ndeps_all+cnt+1)); + if (!tmp) { + error("Error recording dependencies for %s", p->name); + if (runtime) longjmp(*rtld_fail, 1); + continue; + } + if (no_realloc) { + memcpy(tmp, p->deps, sizeof(*tmp) * (ndeps_all+1)); + no_realloc = 0; + } + p->deps = tmp; + for (j=0; jndeps_direct; j++) { + if (dep->deps[j]->mark) continue; + dep->deps[j]->mark = 1; + p->deps[ndeps_all++] = dep->deps[j]; } + p->deps[ndeps_all] = 0; } - if (!*deps) *deps = (struct dso **)&nodeps_dummy; + p->bfs_built = 1; + for (p=head; p; p=p->next) + p->mark = 0; } static void load_preload(char *s) @@ -1279,7 +1361,18 @@ void __libc_exit_fini() { struct dso *p; size_t dyn[DYN_CNT]; + int self = __pthread_self()->tid; + + /* Take both locks before setting shutting_down, so that + * either lock is sufficient to read its value. The lock + * order matches that in dlopen to avoid deadlock. */ + pthread_rwlock_wrlock(&lock); + pthread_mutex_lock(&init_fini_lock); + shutting_down = 1; + pthread_rwlock_unlock(&lock); for (p=fini_head; p; p=p->fini_next) { + while (p->ctor_visitor && p->ctor_visitor!=self) + pthread_cond_wait(&ctor_cond, &init_fini_lock); if (!p->constructed) continue; decode_vec(p->dynv, dyn, DYN_CNT); if (dyn[0] & (1<prev) { - if (p->constructed) continue; - p->constructed = 1; + size_t cnt, qpos, spos, i; + struct dso *p, **queue, **stack; + + if (ldd_mode) return 0; + + /* Bound on queue size is the total number of indirect deps. + * If a bfs deps list was built, we can use it. Otherwise, + * bound by the total number of DSOs, which is always safe and + * is reasonable we use it (for main app at startup). */ + if (dso->bfs_built) { + for (cnt=0; dso->deps[cnt]; cnt++) + dso->deps[cnt]->mark = 0; + cnt++; /* self, not included in deps */ + } else { + for (cnt=0, p=head; p; cnt++, p=p->next) + p->mark = 0; + } + cnt++; /* termination slot */ + if (dso==head && cnt <= countof(builtin_ctor_queue)) + queue = builtin_ctor_queue; + else + queue = calloc(cnt, sizeof *queue); + + if (!queue) { + error("Error allocating constructor queue: %m\n"); + if (runtime) longjmp(*rtld_fail, 1); + return 0; + } + + /* Opposite ends of the allocated buffer serve as an output queue + * and a working stack. Setup initial stack with just the argument + * dso and initial queue empty... */ + stack = queue; + qpos = 0; + spos = cnt; + stack[--spos] = dso; + dso->next_dep = 0; + dso->mark = 1; + + /* Then perform pseudo-DFS sort, but ignoring circular deps. */ + while (sposnext_dep < p->ndeps_direct) { + if (p->deps[p->next_dep]->mark) { + p->next_dep++; + } else { + stack[--spos] = p; + p = p->deps[p->next_dep]; + p->next_dep = 0; + p->mark = 1; + } + } + queue[qpos++] = p; + } + queue[qpos] = 0; + for (i=0; imark = 0; + + return queue; +} + +static void do_init_fini(struct dso **queue) +{ + struct dso *p; + size_t dyn[DYN_CNT], i; + int self = __pthread_self()->tid; + + pthread_mutex_lock(&init_fini_lock); + for (i=0; (p=queue[i]); i++) { + while ((p->ctor_visitor && p->ctor_visitor!=self) || shutting_down) + pthread_cond_wait(&ctor_cond, &init_fini_lock); + if (p->ctor_visitor || p->constructed) + continue; + p->ctor_visitor = self; + decode_vec(p->dynv, dyn, DYN_CNT); if (dyn[0] & ((1<fini_next = fini_head; fini_head = p; } + + pthread_mutex_unlock(&init_fini_lock); + #ifndef NO_LEGACY_INITFINI if ((dyn[0] & (1<ctor_visitor = 0; + p->constructed = 1; + pthread_cond_broadcast(&ctor_cond); } - if (need_locking) pthread_mutex_unlock(&init_fini_lock); + pthread_mutex_unlock(&init_fini_lock); } void __libc_start_init(void) { - do_init_fini(tail); + do_init_fini(main_ctor_queue); + if (!__malloc_replaced && main_ctor_queue != builtin_ctor_queue) + free(main_ctor_queue); + main_ctor_queue = 0; } static void dl_debug_state(void) @@ -1479,7 +1644,7 @@ hidden void __dls2(unsigned char *base, size_t *sp) * so that loads of the thread pointer and &errno can be pure/const and * thereby hoistable. */ -_Noreturn void __dls2b(size_t *sp) +void __dls2b(size_t *sp) { /* Setup early thread pointer in builtin_tls for ldso/libc itself to * use during dynamic linking. If possible it will also serve as the @@ -1500,7 +1665,7 @@ _Noreturn void __dls2b(size_t *sp) * process dependencies and relocations for the main application and * transfer control to its entry point. */ -_Noreturn void __dls3(size_t *sp) +void __dls3(size_t *sp) { static struct dso app, vdso; size_t aux[AUX_CNT], *auxv; @@ -1520,6 +1685,8 @@ _Noreturn void __dls3(size_t *sp) libc.auxv = auxv = (void *)(argv+i+1); decode_vec(auxv, aux, AUX_CNT); __hwcap = aux[AT_HWCAP]; + search_vec(auxv, &__sysinfo, AT_SYSINFO); + __pthread_self()->sysinfo = __sysinfo; libc.page_size = aux[AT_PAGESZ]; libc.secure = ((aux[0]&0x7800)!=0x7800 || aux[AT_UID]!=aux[AT_EUID] || aux[AT_GID]!=aux[AT_EGID] || aux[AT_SECURE]); @@ -1629,10 +1796,9 @@ _Noreturn void __dls3(size_t *sp) app.tls_id = tls_cnt = 1; #ifdef TLS_ABOVE_TP app.tls.offset = GAP_ABOVE_TP; - app.tls.offset += -GAP_ABOVE_TP & (app.tls.align-1); - tls_offset = app.tls.offset + app.tls.size - + ( -((uintptr_t)app.tls.image + app.tls.size) - & (app.tls.align-1) ); + app.tls.offset += (-GAP_ABOVE_TP + (uintptr_t)app.tls.image) + & (app.tls.align-1); + tls_offset = app.tls.offset + app.tls.size; #else tls_offset = app.tls.offset = app.tls.size + ( -((uintptr_t)app.tls.image + app.tls.size) @@ -1662,6 +1828,7 @@ _Noreturn void __dls3(size_t *sp) reclaim_gaps(&ldso); /* Load preload/needed libraries, add symbols to global namespace. */ + ldso.deps = (struct dso **)no_deps; if (env_preload) load_preload(env_preload); load_deps(&app); for (struct dso *p=head; p; p=p->next) @@ -1683,6 +1850,7 @@ _Noreturn void __dls3(size_t *sp) vdso.name = ""; vdso.shortname = "linux-gate.so.1"; vdso.relocated = 1; + vdso.deps = (struct dso **)no_deps; decode_dyn(&vdso); vdso.prev = tail; tail->next = &vdso; @@ -1698,6 +1866,14 @@ _Noreturn void __dls3(size_t *sp) } } + /* This must be done before final relocations, since it calls + * malloc, which may be provided by the application. Calling any + * application code prior to the jump to its entry point is not + * valid in our model and does not work with FDPIC, where there + * are additional relocation-like fixups that only the entry point + * code can see to perform. */ + main_ctor_queue = queue_ctors(&app); + /* The main program must be relocated LAST since it may contin * copy relocations which depend on libraries' relocations. */ reloc_all(app.next); @@ -1785,6 +1961,7 @@ void *dlopen(const char *file, int mode) size_t i; int cs; jmp_buf jb; + struct dso **volatile ctor_queue = 0; if (!file) return head; @@ -1793,6 +1970,10 @@ void *dlopen(const char *file, int mode) __inhibit_ptc(); p = 0; + if (shutting_down) { + error("Cannot dlopen while program is exiting."); + goto end; + } orig_tls_tail = tls_tail; orig_tls_cnt = tls_cnt; orig_tls_offset = tls_offset; @@ -1816,11 +1997,12 @@ void *dlopen(const char *file, int mode) free(p->funcdescs); if (p->rpath != p->rpath_orig) free(p->rpath); - if (p->deps != &nodeps_dummy) - free(p->deps); + free(p->deps); unmap_library(p); free(p); } + free(ctor_queue); + ctor_queue = 0; if (!orig_tls_tail) libc.tls_head = 0; tls_tail = orig_tls_tail; if (tls_tail) tls_tail->next = 0; @@ -1843,24 +2025,25 @@ void *dlopen(const char *file, int mode) } /* First load handling */ - int first_load = !p->deps; - if (first_load) { - load_deps(p); - if (!p->relocated && (mode & RTLD_LAZY)) { - prepare_lazy(p); - for (i=0; p->deps[i]; i++) - if (!p->deps[i]->relocated) - prepare_lazy(p->deps[i]); - } + load_deps(p); + extend_bfs_deps(p); + pthread_mutex_lock(&init_fini_lock); + if (!p->constructed) ctor_queue = queue_ctors(p); + pthread_mutex_unlock(&init_fini_lock); + if (!p->relocated && (mode & RTLD_LAZY)) { + prepare_lazy(p); + for (i=0; p->deps[i]; i++) + if (!p->deps[i]->relocated) + prepare_lazy(p->deps[i]); } - if (first_load || (mode & RTLD_GLOBAL)) { + if (!p->relocated || (mode & RTLD_GLOBAL)) { /* Make new symbols global, at least temporarily, so we can do * relocations. If not RTLD_GLOBAL, this is reverted below. */ add_syms(p); for (i=0; p->deps[i]; i++) add_syms(p->deps[i]); } - if (first_load) { + if (!p->relocated) { reloc_all(p); } @@ -1884,7 +2067,10 @@ end: __release_ptc(); if (p) gencnt++; pthread_rwlock_unlock(&lock); - if (p) do_init_fini(orig_tail); + if (ctor_queue) { + do_init_fini(ctor_queue); + free(ctor_queue); + } pthread_setcancelstate(cs, 0); return p; }