+static void find_map_range(Phdr *ph, size_t cnt, size_t stride, struct dso *p)
+{
+ size_t min_addr = -1, max_addr = 0;
+ for (; cnt--; ph = (void *)((char *)ph + stride)) {
+ if (ph->p_type != PT_LOAD) continue;
+ if (ph->p_vaddr < min_addr)
+ min_addr = ph->p_vaddr;
+ if (ph->p_vaddr+ph->p_memsz > max_addr)
+ max_addr = ph->p_vaddr+ph->p_memsz;
+ }
+ min_addr &= -PAGE_SIZE;
+ max_addr = (max_addr + PAGE_SIZE-1) & -PAGE_SIZE;
+ p->map = p->base + min_addr;
+ p->map_len = max_addr - min_addr;
+}
+
+static void do_fini()
+{
+ struct dso *p;
+ size_t dyn[DYN_CNT] = {0};
+ for (p=fini_head; p; p=p->fini_next) {
+ if (!p->constructed) continue;
+ decode_vec(p->dynv, dyn, DYN_CNT);
+ ((void (*)(void))(p->base + dyn[DT_FINI]))();
+ }
+}
+
+static void do_init_fini(struct dso *p)
+{
+ size_t dyn[DYN_CNT] = {0};
+ int need_locking = libc.threads_minus_1;
+ /* Allow recursive calls that arise when a library calls
+ * dlopen from one of its constructors, but block any
+ * other threads until all ctors have finished. */
+ if (need_locking) pthread_mutex_lock(&init_fini_lock);
+ for (; p; p=p->prev) {
+ if (p->constructed) continue;
+ p->constructed = 1;
+ decode_vec(p->dynv, dyn, DYN_CNT);
+ if (dyn[0] & (1<<DT_FINI)) {
+ p->fini_next = fini_head;
+ fini_head = p;
+ }
+ if (dyn[0] & (1<<DT_INIT))
+ ((void (*)(void))(p->base + dyn[DT_INIT]))();
+ }
+ if (need_locking) pthread_mutex_unlock(&init_fini_lock);
+}
+
+void _dl_debug_state(void)
+{
+}
+
+void *__copy_tls(unsigned char *mem)
+{
+ pthread_t td;
+ struct dso *p;
+
+ if (!tls_cnt) return mem;
+
+ void **dtv = (void *)mem;
+ dtv[0] = (void *)tls_cnt;
+
+#ifdef TLS_ABOVE_TP
+ mem += sizeof(void *) * (tls_cnt+1);
+ mem += -((uintptr_t)mem + sizeof(struct pthread)) & (tls_align-1);
+ td = (pthread_t)mem;
+ mem += sizeof(struct pthread);
+
+ for (p=head; p; p=p->next) {
+ if (!p->tls_id) continue;
+ dtv[p->tls_id] = mem + p->tls_offset;
+ memcpy(dtv[p->tls_id], p->tls_image, p->tls_len);
+ }
+#else
+ mem += libc.tls_size - sizeof(struct pthread);
+ mem -= (uintptr_t)mem & (tls_align-1);
+ td = (pthread_t)mem;
+
+ for (p=head; p; p=p->next) {
+ if (!p->tls_id) continue;
+ dtv[p->tls_id] = mem - p->tls_offset;
+ memcpy(dtv[p->tls_id], p->tls_image, p->tls_len);
+ }
+#endif
+ td->dtv = dtv;
+ return td;
+}
+
+void *__tls_get_addr(size_t *v)
+{
+ pthread_t self = __pthread_self();
+ if (self->dtv && v[0]<=(size_t)self->dtv[0] && self->dtv[v[0]])
+ return (char *)self->dtv[v[0]]+v[1];
+
+ /* Block signals to make accessing new TLS async-signal-safe */
+ sigset_t set;
+ pthread_sigmask(SIG_BLOCK, SIGALL_SET, &set);
+ if (self->dtv && v[0]<=(size_t)self->dtv[0] && self->dtv[v[0]]) {
+ pthread_sigmask(SIG_SETMASK, &set, 0);
+ return (char *)self->dtv[v[0]]+v[1];
+ }
+
+ /* This is safe without any locks held because, if the caller
+ * is able to request the Nth entry of the DTV, the DSO list
+ * must be valid at least that far out and it was synchronized
+ * at program startup or by an already-completed call to dlopen. */
+ struct dso *p;
+ for (p=head; p->tls_id != v[0]; p=p->next);
+
+ /* Get new DTV space from new DSO if needed */
+ if (!self->dtv || v[0] > (size_t)self->dtv[0]) {
+ void **newdtv = p->new_dtv +
+ (v[0]+1)*sizeof(void *)*a_fetch_add(&p->new_dtv_idx,1);
+ if (self->dtv) memcpy(newdtv, self->dtv,
+ ((size_t)self->dtv[0]+1) * sizeof(void *));
+ newdtv[0] = (void *)v[0];
+ self->dtv = newdtv;
+ }
+
+ /* Get new TLS memory from new DSO */
+ unsigned char *mem = p->new_tls +
+ (p->tls_size + p->tls_align) * a_fetch_add(&p->new_tls_idx,1);
+ mem += ((uintptr_t)p->tls_image - (uintptr_t)mem) & (p->tls_align-1);
+ self->dtv[v[0]] = mem;
+ memcpy(mem, p->tls_image, p->tls_len);
+ pthread_sigmask(SIG_SETMASK, &set, 0);
+ return mem + v[1];
+}
+
+static void update_tls_size()
+{
+ libc.tls_size = ALIGN(
+ (1+tls_cnt) * sizeof(void *) +
+ tls_offset +
+ sizeof(struct pthread) +
+ tls_align * 2,
+ tls_align);
+}
+