cheap trick to further optimize locking normal mutexes
[musl] / src / thread / pthread_mutex_trylock.c
1 #include "pthread_impl.h"
2
3 int pthread_mutex_trylock(pthread_mutex_t *m)
4 {
5         int tid;
6         int own;
7         pthread_t self;
8
9         if (m->_m_type == PTHREAD_MUTEX_NORMAL)
10                 return a_swap(&m->_m_lock, EBUSY);
11
12         self = pthread_self();
13         tid = self->tid | 0x80000000;
14
15         if (m->_m_type >= 4) {
16                 if (!self->robust_list.off)
17                         syscall(SYS_set_robust_list,
18                                 &self->robust_list, 3*sizeof(long));
19                 self->robust_list.off = (char*)&m->_m_lock-(char *)&m->_m_next;
20                 self->robust_list.pending = &m->_m_next;
21         }
22
23         if (m->_m_lock == tid && (m->_m_type&3) == PTHREAD_MUTEX_RECURSIVE) {
24                 if ((unsigned)m->_m_count >= INT_MAX) return EAGAIN;
25                 m->_m_count++;
26                 return 0;
27         }
28
29         own = m->_m_lock;
30         if ((own && !(own & 0x40000000)) || a_cas(&m->_m_lock, own, tid)!=own)
31                 return EBUSY;
32
33         m->_m_count = 1;
34
35         if (m->_m_type < 4) return 0;
36
37         if (m->_m_type >= 8) {
38                 m->_m_lock = 0;
39                 return ENOTRECOVERABLE;
40         }
41         m->_m_next = self->robust_list.head;
42         m->_m_prev = &self->robust_list.head;
43         if (self->robust_list.head)
44                 self->robust_list.head[-1] = &m->_m_next;
45         self->robust_list.head = &m->_m_next;
46         self->robust_list.pending = 0;
47         if (own) {
48                 m->_m_type += 8;
49                 return EOWNERDEAD;
50         }
51
52         return 0;
53 }