1 #include "asan_impl.h"
2 #include "futex_impl.h"
3 #include "libc.h"
4 #include "threads_impl.h"
5 #include "zircon_impl.h"
6 #include "stdio_impl.h"
7 
8 #include <zircon/process.h>
9 #include <zircon/syscalls.h>
10 #include <pthread.h>
11 #include <runtime/thread.h>
12 #include <runtime/tls.h>
13 #include <stdatomic.h>
14 #include <stddef.h>
15 #include <string.h>
16 #include <sys/mman.h>
17 
prestart(void * arg)18 static inline pthread_t prestart(void* arg) {
19     pthread_t self = arg;
20     zxr_tp_set(zxr_thread_get_handle(&self->zxr_thread), pthread_to_tp(self));
21     __sanitizer_thread_start_hook(self->sanitizer_hook, (thrd_t)self);
22     return self;
23 }
24 
start_pthread(void * arg)25 static void start_pthread(void* arg) {
26     pthread_t self = prestart(arg);
27     __pthread_exit(self->start(self->start_arg));
28 }
29 
start_c11(void * arg)30 static void start_c11(void* arg) {
31     pthread_t self = prestart(arg);
32     int (*start)(void*) = (int (*)(void*))(uintptr_t)self->start;
33     __pthread_exit((void*)(intptr_t)start(self->start_arg));
34 }
35 
deallocate_region(const struct iovec * region)36 static void deallocate_region(const struct iovec* region) {
37     _zx_vmar_unmap(_zx_vmar_root_self(),
38                    (uintptr_t)region->iov_base, region->iov_len);
39 }
40 
__pthread_create(pthread_t * restrict res,const pthread_attr_t * restrict attrp,void * (* entry)(void *),void * restrict arg)41 int __pthread_create(pthread_t* restrict res, const pthread_attr_t* restrict attrp,
42                      void* (*entry)(void*), void* restrict arg) {
43     pthread_attr_t attr = attrp == NULL ? DEFAULT_PTHREAD_ATTR : *attrp;
44 
45     // We do not support providing a stack via pthread attributes.
46     if (attr._a_stackaddr != NULL)
47         return ENOTSUP;
48 
49     char thread_name[ZX_MAX_NAME_LEN];
50     thrd_t new = __allocate_thread(attr._a_guardsize,
51                                    attr._a_stacksize,
52                                    attr.__name != NULL ? attr.__name :
53                                    attr.__c11 ? "thrd_t" : "pthread_t",
54                                    thread_name);
55     if (new == NULL)
56         return EAGAIN;
57 
58     const char* name = attr.__name != NULL ? attr.__name : thread_name;
59     zx_status_t status =
60         zxr_thread_create(_zx_process_self(), name, attr._a_detach,
61                           &new->zxr_thread);
62     if (status != ZX_OK)
63         goto fail_after_alloc;
64 
65     zxr_thread_entry_t start = attr.__c11 ? start_c11 : start_pthread;
66 
67     new->start = entry;
68     new->start_arg = arg;
69 
70     void* sanitizer_hook = __sanitizer_before_thread_create_hook(
71         (thrd_t)new, attr._a_detach, name,
72         new->safe_stack.iov_base, new->safe_stack.iov_len);
73     new->sanitizer_hook = sanitizer_hook;
74 
75     // We have to publish the pointer now, and make sure it is
76     // visible, as in C11 the end of thrd_create synchronizes with the
77     // entry point of the new thread.
78     *res = new;
79     atomic_thread_fence(memory_order_release);
80 
81     atomic_fetch_add(&libc.thread_count, 1);
82 
83     // This will (hopefully) start the new thread. It could instantly
84     // run to completion and deallocate it self. As such, we can't
85     // access new->anything after this point.
86     status = zxr_thread_start(&new->zxr_thread,
87                               (uintptr_t)new->safe_stack.iov_base,
88                               new->safe_stack.iov_len, start, new);
89 
90     if (status == ZX_OK) {
91         __sanitizer_thread_create_hook(sanitizer_hook,
92                                        (thrd_t)new, thrd_success);
93         return 0;
94     }
95 
96     *res = NULL;
97     atomic_fetch_sub(&libc.thread_count, 1);
98 
99     __sanitizer_thread_create_hook(
100         sanitizer_hook, (thrd_t)new,
101         status == ZX_ERR_ACCESS_DENIED ? thrd_error : thrd_nomem);
102 
103 fail_after_alloc:
104     deallocate_region(&new->safe_stack_region);
105     deallocate_region(&new->unsafe_stack_region);
106     deallocate_region(&new->tcb_region);
107     return status == ZX_ERR_ACCESS_DENIED ? EPERM : EAGAIN;
108 }
109 
110 static _Noreturn void final_exit(pthread_t self)
111     __asm__("final_exit") __attribute__((used));
112 
final_exit(pthread_t self)113 static __NO_SAFESTACK NO_ASAN void final_exit(pthread_t self) {
114     deallocate_region(&self->safe_stack_region);
115     deallocate_region(&self->unsafe_stack_region);
116 
117     // This deallocates the TCB region too for the detached case.
118     // If not detached, pthread_join will deallocate it.
119     zxr_thread_exit_unmap_if_detached(&self->zxr_thread, _zx_vmar_root_self(),
120                                       (uintptr_t)self->tcb_region.iov_base,
121                                       self->tcb_region.iov_len);
122 }
123 
finish_exit(pthread_t self)124 static NO_ASAN _Noreturn void finish_exit(pthread_t self) {
125     __sanitizer_thread_exit_hook(self->sanitizer_hook, (thrd_t)self);
126 
127     /* It's impossible to determine whether this is "the last thread"
128      * until performing the atomic decrement, since multiple threads
129      * could exit at the same time. For the last thread, revert the
130      * decrement and unblock signals to give the atexit handlers and
131      * stdio cleanup code a consistent state. */
132     if (atomic_fetch_sub(&libc.thread_count, 1) == -1) {
133         atomic_store(&libc.thread_count, 0);
134         exit(0);
135     }
136 
137     // Switch off the thread's normal stack so it can be freed.  The TCB
138     // region stays alive so the pthread_t is still valid for pthread_join.
139     // The rest of the region is no longer used for TLS, so it can serve
140     // as the small amount of temporary stack needed for the exit calls.
141 
142 #ifdef __x86_64__
143     // The thread descriptor is at the end of the region, so the space
144     // before it is available as the temporary stack.
145     // The x86-64 ABI requires %rsp % 16 = 8 on entry.
146     __asm__("mov %[self], %%rsp\n"
147             "and $-16, %%rsp\n"
148             "call final_exit\n"
149             "# Target receives %[self]" : :
150             [self]"D"(self));
151 #elif defined(__aarch64__)
152     // The thread descriptor is at the start of the region, so the rest of
153     // the space up to the guard page is available as the temporary stack.
154     __asm__("add sp, %[base], %[len]\n"
155             "mov x0, %[self]\n"
156             "bl final_exit" : :
157             [base]"r"(self->tcb_region.iov_base),
158             [len]"r"(self->tcb_region.iov_len - PAGE_SIZE),
159             [self]"r"(self));
160 #else
161 #error what architecture?
162 #endif
163     __builtin_unreachable();
164 }
165 
__pthread_exit(void * result)166 _Noreturn void __pthread_exit(void* result) {
167     pthread_t self = __pthread_self();
168 
169     self->result = result;
170 
171     __tls_run_dtors();
172 
173     __thread_tsd_run_dtors();
174 
175     __dl_thread_cleanup();
176 
177     // After this point the sanitizer runtime will tear down its state,
178     // so we cannot run any more sanitized code.
179     finish_exit(self);
180 }
181 
182 weak_alias(__pthread_create, pthread_create);
183 weak_alias(__pthread_exit, pthread_exit);
184