fix stale locks left behind when pthread_create fails
[musl] / src / thread / pthread_create.c
1 #include "pthread_impl.h"
2 #include "stdio_impl.h"
3 #include <sys/mman.h>
4
5 static void dummy_0()
6 {
7 }
8 weak_alias(dummy_0, __acquire_ptc);
9 weak_alias(dummy_0, __release_ptc);
10 weak_alias(dummy_0, __pthread_tsd_run_dtors);
11
12 _Noreturn void pthread_exit(void *result)
13 {
14         pthread_t self = pthread_self();
15         int n;
16
17         self->result = result;
18
19         while (self->cancelbuf) {
20                 void (*f)(void *) = self->cancelbuf->__f;
21                 void *x = self->cancelbuf->__x;
22                 self->cancelbuf = self->cancelbuf->__next;
23                 f(x);
24         }
25
26         __pthread_tsd_run_dtors();
27
28         __lock(self->exitlock);
29
30         /* Mark this thread dead before decrementing count */
31         __lock(self->killlock);
32         self->dead = 1;
33         __unlock(self->killlock);
34
35         do n = libc.threads_minus_1;
36         while (n && a_cas(&libc.threads_minus_1, n, n-1)!=n);
37         if (!n) exit(0);
38
39         if (self->detached && self->map_base) {
40                 if (self->detached == 2)
41                         __syscall(SYS_set_tid_address, 0);
42                 __syscall(SYS_rt_sigprocmask, SIG_BLOCK,
43                         SIGALL_SET, 0, __SYSCALL_SSLEN);
44                 __unmapself(self->map_base, self->map_size);
45         }
46
47         for (;;) __syscall(SYS_exit, 0);
48 }
49
50 void __do_cleanup_push(struct __ptcb *cb)
51 {
52         struct pthread *self = pthread_self();
53         cb->__next = self->cancelbuf;
54         self->cancelbuf = cb;
55 }
56
57 void __do_cleanup_pop(struct __ptcb *cb)
58 {
59         __pthread_self()->cancelbuf = cb->__next;
60 }
61
62 static int start(void *p)
63 {
64         pthread_t self = p;
65         if (self->startlock[0]) {
66                 __wait(self->startlock, 0, 1, 1);
67                 if (self->startlock[0]) {
68                         self->detached = 2;
69                         pthread_exit(0);
70                 }
71                 __syscall(SYS_rt_sigprocmask, SIG_SETMASK,
72                         self->sigmask, 0, __SYSCALL_SSLEN);
73         }
74         if (self->unblock_cancel)
75                 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK,
76                         SIGPT_SET, 0, __SYSCALL_SSLEN);
77         pthread_exit(self->start(self->start_arg));
78         return 0;
79 }
80
81 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
82
83 /* pthread_key_create.c overrides this */
84 static const size_t dummy = 0;
85 weak_alias(dummy, __pthread_tsd_size);
86
87 static FILE *const dummy_file = 0;
88 weak_alias(dummy_file, __stdin_used);
89 weak_alias(dummy_file, __stdout_used);
90 weak_alias(dummy_file, __stderr_used);
91
92 static void init_file_lock(FILE *f)
93 {
94         if (f && f->lock<0) f->lock = 0;
95 }
96
97 void *__copy_tls(unsigned char *);
98
99 int pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
100 {
101         int ret;
102         size_t size, guard;
103         struct pthread *self = pthread_self(), *new;
104         unsigned char *map = 0, *stack = 0, *tsd = 0;
105         unsigned flags = 0x7d8f00;
106         int do_sched = 0;
107         pthread_attr_t attr = {0};
108
109         if (!self) return ENOSYS;
110         if (!libc.threaded) {
111                 for (FILE *f=libc.ofl_head; f; f=f->next)
112                         init_file_lock(f);
113                 init_file_lock(__stdin_used);
114                 init_file_lock(__stdout_used);
115                 init_file_lock(__stderr_used);
116                 libc.threaded = 1;
117         }
118         if (attrp) attr = *attrp;
119
120         __acquire_ptc();
121
122         if (attr._a_stackaddr) {
123                 size_t need = libc.tls_size + __pthread_tsd_size;
124                 size = attr._a_stacksize + DEFAULT_STACK_SIZE;
125                 stack = (void *)(attr._a_stackaddr & -16);
126                 /* Use application-provided stack for TLS only when
127                  * it does not take more than ~12% or 2k of the
128                  * application's stack space. */
129                 if (need < size/8 && need < 2048) {
130                         tsd = stack - __pthread_tsd_size;
131                         stack = tsd - libc.tls_size;
132                 } else {
133                         size = ROUND(need);
134                         guard = 0;
135                 }
136         } else {
137                 guard = ROUND(DEFAULT_GUARD_SIZE + attr._a_guardsize);
138                 size = guard + ROUND(DEFAULT_STACK_SIZE + attr._a_stacksize
139                         + libc.tls_size +  __pthread_tsd_size);
140         }
141
142         if (!tsd) {
143                 if (guard) {
144                         map = mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
145                         if (map == MAP_FAILED) goto fail;
146                         if (mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)) {
147                                 munmap(map, size);
148                                 goto fail;
149                         }
150                 } else {
151                         map = mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
152                         if (map == MAP_FAILED) goto fail;
153                 }
154                 tsd = map + size - __pthread_tsd_size;
155                 if (!stack) stack = tsd - libc.tls_size;
156         }
157
158         new = __copy_tls(tsd - libc.tls_size);
159         new->map_base = map;
160         new->map_size = size;
161         new->pid = self->pid;
162         new->errno_ptr = &new->errno_val;
163         new->start = entry;
164         new->start_arg = arg;
165         new->self = new;
166         new->tsd = (void *)tsd;
167         if (attr._a_detach) {
168                 new->detached = 1;
169                 flags -= 0x200000;
170         }
171         if (attr._a_sched) {
172                 do_sched = new->startlock[0] = 1;
173                 __syscall(SYS_rt_sigprocmask, SIG_BLOCK,
174                         SIGALL_SET, self->sigmask, __SYSCALL_SSLEN);
175         }
176         new->unblock_cancel = self->cancel;
177         new->canary = self->canary;
178
179         a_inc(&libc.threads_minus_1);
180         ret = __clone(start, stack, flags, new, &new->tid, TP_ADJ(new), &new->tid);
181
182         __release_ptc();
183
184         if (do_sched) {
185                 __syscall(SYS_rt_sigprocmask, SIG_SETMASK,
186                         new->sigmask, 0, __SYSCALL_SSLEN);
187         }
188
189         if (ret < 0) {
190                 a_dec(&libc.threads_minus_1);
191                 if (map) munmap(map, size);
192                 return EAGAIN;
193         }
194
195         if (do_sched) {
196                 ret = __syscall(SYS_sched_setscheduler, new->tid,
197                         attr._a_policy, &attr._a_prio);
198                 a_store(new->startlock, ret<0 ? 2 : 0);
199                 __wake(new->startlock, 1, 1);
200                 if (ret < 0) return -ret;
201         }
202
203         *res = new;
204         return 0;
205 fail:
206         __release_ptc();
207         return EAGAIN;
208 }