From: Rich Felker Date: Mon, 24 Mar 2014 03:19:30 +0000 (-0400) Subject: reduce static linking overhead from TLS support by inlining mmap syscall X-Git-Url: http://nsz.repo.hu/git/?a=commitdiff_plain;ds=sidebyside;h=98221c36119d2abfc55fe1d919705f625709fe3b;p=musl reduce static linking overhead from TLS support by inlining mmap syscall the external mmap function is heavy because it has to handle error reporting that the kernel cannot do, and has to do some locking for arcane race-condition-avoidance purposes. for allocating initial TLS, we do not need any of that; the raw syscall suffices. on i386, this change shaves off 13% of the size of .text for the empty program. --- diff --git a/src/env/__init_tls.c b/src/env/__init_tls.c index 5c176811..dbfe62e7 100644 --- a/src/env/__init_tls.c +++ b/src/env/__init_tls.c @@ -5,6 +5,7 @@ #include "pthread_impl.h" #include "libc.h" #include "atomic.h" +#include "syscall.h" #ifndef SHARED @@ -87,8 +88,15 @@ void __init_tls(size_t *aux) libc.tls_size = 2*sizeof(void *)+T.size+T.align+sizeof(struct pthread); - mem = __mmap(0, libc.tls_size, PROT_READ|PROT_WRITE, + mem = (void *)__syscall( +#ifdef SYS_mmap2 + SYS_mmap2, +#else + SYS_mmap, +#endif + 0, libc.tls_size, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); + if (!__install_initial_tls(__copy_tls(mem))) a_crash(); } #else