1 #pragma once
2
3 #include "atomic.h"
4 #include "libc.h"
5 #include "pthread_arch.h"
6 #include <assert.h>
7 #include <errno.h>
8 #include <limits.h>
9 #include <locale.h>
10 #include <pthread.h>
11 #include <signal.h>
12 #include <sys/uio.h>
13 #include <threads.h>
14
15 #include <zircon/stack.h>
16 #include <zircon/tls.h>
17 #include <runtime/thread.h>
18 #include <runtime/tls.h>
19
20 #define pthread __pthread
21
22 // This is what the thread pointer points to directly. On TLS_ABOVE_TP
23 // machines, the size of this is part of the ABI known to the compiler
24 // and linker.
25 typedef struct {
26 // The position of this pointer is part of the ABI on x86.
27 // It has the same value as the thread pointer itself.
28 uintptr_t tp;
29 void** dtv;
30 } tcbhead_t;
31
32 // The locations of these fields is part of the ABI known to the compiler.
33 typedef struct {
34 uintptr_t stack_guard;
35 uintptr_t unsafe_sp;
36 } tp_abi_t;
37
38 struct tls_dtor;
39
40 struct pthread {
41 #ifndef TLS_ABOVE_TP
42 // These must be the very first members.
43 tcbhead_t head;
44 tp_abi_t abi;
45 #endif
46
47 zxr_thread_t zxr_thread;
48
49 // The *_region fields describe whole memory regions reserved,
50 // including guard pages (for deallocation). safe_stack and
51 // unsafe_stack describe just the actual stack block between the
52 // guards.
53 struct iovec tcb_region;
54 struct iovec safe_stack, safe_stack_region;
55 struct iovec unsafe_stack, unsafe_stack_region;
56
57 struct tls_dtor* tls_dtors;
58 void* tsd[PTHREAD_KEYS_MAX];
59 int tsd_used;
60 int errno_value;
61
62 void* sanitizer_hook;
63 void* start_arg;
64 void* (*start)(void*);
65 void* result;
66 locale_t locale;
67 char* dlerror_buf;
68 int dlerror_flag;
69
70 #ifdef TLS_ABOVE_TP
71 // These must be the very last members.
72 tp_abi_t abi;
73 tcbhead_t head;
74 #endif
75 };
76
77 #ifdef TLS_ABOVE_TP
78 #define PTHREAD_TP_OFFSET offsetof(struct pthread, head)
79 #else
80 #define PTHREAD_TP_OFFSET 0
81 #endif
82
83 #define TP_OFFSETOF(field) \
84 ((ptrdiff_t)offsetof(struct pthread, field) - PTHREAD_TP_OFFSET)
85
86 static_assert(TP_OFFSETOF(head) == 0,
87 "ABI tcbhead_t misplaced in struct pthread");
88
89 #ifdef ABI_TCBHEAD_SIZE
90 static_assert((sizeof(struct pthread) -
91 offsetof(struct pthread, head)) == ABI_TCBHEAD_SIZE,
92 "ABI tcbhead_t misplaced in struct pthread");
93 #endif
94
95 #if defined(__x86_64__) || defined(__aarch64__)
96 // The tlsdesc.s assembly code assumes this, though it's not part of the ABI.
97 static_assert(TP_OFFSETOF(head.dtv) == 8, "dtv misplaced in struct pthread");
98 #endif
99
100 static_assert(TP_OFFSETOF(abi.stack_guard) == ZX_TLS_STACK_GUARD_OFFSET,
101 "stack_guard not at ABI-mandated offset from thread pointer");
102 static_assert(TP_OFFSETOF(abi.unsafe_sp) == ZX_TLS_UNSAFE_SP_OFFSET,
103 "unsafe_sp not at ABI-mandated offset from thread pointer");
104
pthread_to_tp(struct pthread * thread)105 static inline void* pthread_to_tp(struct pthread* thread) {
106 return (void*)((char*)thread + PTHREAD_TP_OFFSET);
107 }
108
tp_to_pthread(void * tp)109 static inline struct pthread* tp_to_pthread(void* tp) {
110 return (struct pthread*)((char*)tp - PTHREAD_TP_OFFSET);
111 }
112
113 #define SIGALL_SET ((sigset_t*)(const unsigned long long[2]){-1, -1})
114
115 #define PTHREAD_MUTEX_MASK (PTHREAD_MUTEX_RECURSIVE | PTHREAD_MUTEX_ERRORCHECK)
116 // The bit used in the recursive and errorchecking cases, which track thread owners.
117 #define PTHREAD_MUTEX_OWNED_LOCK_BIT 0x80000000
118 #define PTHREAD_MUTEX_OWNED_LOCK_MASK 0x7fffffff
119
120 extern void* __pthread_tsd_main[];
121 extern volatile size_t __pthread_tsd_size;
122
123 void* __tls_get_new(size_t*) ATTR_LIBC_VISIBILITY;
124
__pthread_self(void)125 static inline pthread_t __pthread_self(void) {
126 return tp_to_pthread(zxr_tp_get());
127 }
128
__thrd_current(void)129 static inline thrd_t __thrd_current(void) {
130 return (thrd_t)__pthread_self();
131 }
132
__thread_get_tid(void)133 static inline pid_t __thread_get_tid(void) {
134 // We rely on the fact that the high bit is not set. For now,
135 // let's incur the cost of this check, until we consider the
136 // userspace handle value representation completely baked.
137 pid_t id = zxr_thread_get_handle(&__pthread_self()->zxr_thread);
138 if (id & PTHREAD_MUTEX_OWNED_LOCK_BIT) {
139 __builtin_trap();
140 }
141 return id;
142 }
143
144 int __pthread_create(pthread_t* __restrict, const pthread_attr_t* __restrict,
145 void* (*)(void*), void* __restrict) ATTR_LIBC_VISIBILITY;
146 int __pthread_detach(pthread_t t) ATTR_LIBC_VISIBILITY;
147 _Noreturn void __pthread_exit(void* result) ATTR_LIBC_VISIBILITY;
148 int __pthread_join(pthread_t t, void** result) ATTR_LIBC_VISIBILITY;
149
150 // Signal n (or all, for -1) threads on a pthread_cond_t or cnd_t.
151 void __private_cond_signal(void* condvar, int n) ATTR_LIBC_VISIBILITY;
152
153 // This is guaranteed to only return 0, EINVAL, or ETIMEDOUT.
154 int __timedwait(atomic_int*, int, clockid_t, const struct timespec*)
155 ATTR_LIBC_VISIBILITY;
156
157 // Loading a library can introduce more thread_local variables. Thread
158 // allocation bases bookkeeping decisions based on the current state
159 // of thread_locals in the program, so thread creation needs to be
160 // inhibited by a concurrent dlopen. This lock implements that
161 // exclusion.
162 void __thread_allocation_inhibit(void) ATTR_LIBC_VISIBILITY;
163 void __thread_allocation_release(void) ATTR_LIBC_VISIBILITY;
164
165 void __thread_tsd_run_dtors(void) ATTR_LIBC_VISIBILITY;
166
167 #define DEFAULT_PTHREAD_ATTR \
168 ((pthread_attr_t){ \
169 ._a_stacksize = ZIRCON_DEFAULT_STACK_SIZE, \
170 ._a_guardsize = PAGE_SIZE, \
171 })
172
173 thrd_t __allocate_thread(size_t guard_size,
174 size_t stack_size,
175 const char* thread_name,
176 char default_name[ZX_MAX_NAME_LEN])
177 __attribute__((nonnull(3))) ATTR_LIBC_VISIBILITY;
178
179 pthread_t __init_main_thread(zx_handle_t thread_self) ATTR_LIBC_VISIBILITY;
180
181 int __clock_gettime(clockid_t, struct timespec*) ATTR_LIBC_VISIBILITY;
182