1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
4 /* and Pavel Krauz (krauz@fsid.cvut.cz). */
5 /* */
6 /* This program is free software; you can redistribute it and/or */
7 /* modify it under the terms of the GNU Library General Public License */
8 /* as published by the Free Software Foundation; either version 2 */
9 /* of the License, or (at your option) any later version. */
10 /* */
11 /* This program is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Library General Public License for more details. */
15
16 /* Condition variables */
17
18 #include <errno.h>
19 #include <sched.h>
20 #include <stddef.h>
21 #include <string.h>
22 #include <sys/time.h>
23 #include "pthread.h"
24 #include "internals.h"
25 #include "spinlock.h"
26 #include "queue.h"
27 #include "restart.h"
28
pthread_cond_init(pthread_cond_t * cond,const pthread_condattr_t * cond_attr attribute_unused)29 int pthread_cond_init(pthread_cond_t *cond,
30 const pthread_condattr_t *cond_attr attribute_unused)
31 {
32 __pthread_init_lock(&cond->__c_lock);
33 cond->__c_waiting = NULL;
34 return 0;
35 }
libpthread_hidden_def(pthread_cond_init)36 libpthread_hidden_def(pthread_cond_init)
37
38 int pthread_cond_destroy(pthread_cond_t *cond)
39 {
40 if (cond->__c_waiting != NULL) return EBUSY;
41 return 0;
42 }
libpthread_hidden_def(pthread_cond_destroy)43 libpthread_hidden_def(pthread_cond_destroy)
44
45 /* Function called by pthread_cancel to remove the thread from
46 waiting on a condition variable queue. */
47
48 static int cond_extricate_func(void *obj, pthread_descr th)
49 {
50 volatile pthread_descr self = thread_self();
51 pthread_cond_t *cond = obj;
52 int did_remove = 0;
53
54 __pthread_lock(&cond->__c_lock, self);
55 did_remove = remove_from_queue(&cond->__c_waiting, th);
56 __pthread_unlock(&cond->__c_lock);
57
58 return did_remove;
59 }
60
pthread_cond_wait(pthread_cond_t * cond,pthread_mutex_t * mutex)61 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
62 {
63 volatile pthread_descr self = thread_self();
64 pthread_extricate_if extr;
65 int already_canceled = 0;
66 int spurious_wakeup_count;
67
68 /* Check whether the mutex is locked and owned by this thread. */
69 if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
70 && mutex->__m_kind != PTHREAD_MUTEX_ADAPTIVE_NP
71 && mutex->__m_owner != self)
72 return EINVAL;
73
74 /* Set up extrication interface */
75 extr.pu_object = cond;
76 extr.pu_extricate_func = cond_extricate_func;
77
78 /* Register extrication interface */
79 THREAD_SETMEM(self, p_condvar_avail, 0);
80 __pthread_set_own_extricate_if(self, &extr);
81
82 /* Atomically enqueue thread for waiting, but only if it is not
83 canceled. If the thread is canceled, then it will fall through the
84 suspend call below, and then call pthread_exit without
85 having to worry about whether it is still on the condition variable queue.
86 This depends on pthread_cancel setting p_canceled before calling the
87 extricate function. */
88
89 __pthread_lock(&cond->__c_lock, self);
90 if (!(THREAD_GETMEM(self, p_canceled)
91 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
92 enqueue(&cond->__c_waiting, self);
93 else
94 already_canceled = 1;
95 __pthread_unlock(&cond->__c_lock);
96
97 if (already_canceled) {
98 __pthread_set_own_extricate_if(self, 0);
99 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
100 }
101
102 __pthread_mutex_unlock(mutex);
103
104 spurious_wakeup_count = 0;
105 while (1)
106 {
107 suspend(self);
108 if (THREAD_GETMEM(self, p_condvar_avail) == 0
109 && (THREAD_GETMEM(self, p_woken_by_cancel) == 0
110 || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE))
111 {
112 /* Count resumes that don't belong to us. */
113 spurious_wakeup_count++;
114 continue;
115 }
116 break;
117 }
118
119 __pthread_set_own_extricate_if(self, 0);
120
121 /* Check for cancellation again, to provide correct cancellation
122 point behavior */
123
124 if (THREAD_GETMEM(self, p_woken_by_cancel)
125 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
126 THREAD_SETMEM(self, p_woken_by_cancel, 0);
127 __pthread_mutex_lock(mutex);
128 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
129 }
130
131 /* Put back any resumes we caught that don't belong to us. */
132 while (spurious_wakeup_count--)
133 restart(self);
134
135 __pthread_mutex_lock(mutex);
136 return 0;
137 }
libpthread_hidden_def(pthread_cond_wait)138 libpthread_hidden_def(pthread_cond_wait)
139
140 static int
141 pthread_cond_timedwait_relative(pthread_cond_t *cond,
142 pthread_mutex_t *mutex,
143 const struct timespec * abstime)
144 {
145 volatile pthread_descr self = thread_self();
146 int already_canceled = 0;
147 pthread_extricate_if extr;
148 int spurious_wakeup_count;
149
150 /* Check whether the mutex is locked and owned by this thread. */
151 if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
152 && mutex->__m_kind != PTHREAD_MUTEX_ADAPTIVE_NP
153 && mutex->__m_owner != self)
154 return EINVAL;
155
156 /* Set up extrication interface */
157 extr.pu_object = cond;
158 extr.pu_extricate_func = cond_extricate_func;
159
160 /* Register extrication interface */
161 THREAD_SETMEM(self, p_condvar_avail, 0);
162 __pthread_set_own_extricate_if(self, &extr);
163
164 /* Enqueue to wait on the condition and check for cancellation. */
165 __pthread_lock(&cond->__c_lock, self);
166 if (!(THREAD_GETMEM(self, p_canceled)
167 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
168 enqueue(&cond->__c_waiting, self);
169 else
170 already_canceled = 1;
171 __pthread_unlock(&cond->__c_lock);
172
173 if (already_canceled) {
174 __pthread_set_own_extricate_if(self, 0);
175 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
176 }
177
178 __pthread_mutex_unlock(mutex);
179
180 spurious_wakeup_count = 0;
181 while (1)
182 {
183 if (!timedsuspend(self, abstime)) {
184 int was_on_queue;
185
186 /* __pthread_lock will queue back any spurious restarts that
187 may happen to it. */
188
189 __pthread_lock(&cond->__c_lock, self);
190 was_on_queue = remove_from_queue(&cond->__c_waiting, self);
191 __pthread_unlock(&cond->__c_lock);
192
193 if (was_on_queue) {
194 __pthread_set_own_extricate_if(self, 0);
195 __pthread_mutex_lock(mutex);
196 return ETIMEDOUT;
197 }
198
199 /* Eat the outstanding restart() from the signaller */
200 suspend(self);
201 }
202
203 if (THREAD_GETMEM(self, p_condvar_avail) == 0
204 && (THREAD_GETMEM(self, p_woken_by_cancel) == 0
205 || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE))
206 {
207 /* Count resumes that don't belong to us. */
208 spurious_wakeup_count++;
209 continue;
210 }
211 break;
212 }
213
214 __pthread_set_own_extricate_if(self, 0);
215
216 /* The remaining logic is the same as in other cancellable waits,
217 such as pthread_join sem_wait or pthread_cond wait. */
218
219 if (THREAD_GETMEM(self, p_woken_by_cancel)
220 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
221 THREAD_SETMEM(self, p_woken_by_cancel, 0);
222 __pthread_mutex_lock(mutex);
223 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
224 }
225
226 /* Put back any resumes we caught that don't belong to us. */
227 while (spurious_wakeup_count--)
228 restart(self);
229
230 __pthread_mutex_lock(mutex);
231 return 0;
232 }
233
pthread_cond_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)234 int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
235 const struct timespec * abstime)
236 {
237 /* Indirect call through pointer! */
238 return pthread_cond_timedwait_relative(cond, mutex, abstime);
239 }
libpthread_hidden_def(pthread_cond_timedwait)240 libpthread_hidden_def(pthread_cond_timedwait)
241
242 int pthread_cond_signal(pthread_cond_t *cond)
243 {
244 pthread_descr th;
245
246 __pthread_lock(&cond->__c_lock, NULL);
247 th = dequeue(&cond->__c_waiting);
248 __pthread_unlock(&cond->__c_lock);
249 if (th != NULL) {
250 th->p_condvar_avail = 1;
251 WRITE_MEMORY_BARRIER();
252 restart(th);
253 }
254 return 0;
255 }
libpthread_hidden_def(pthread_cond_signal)256 libpthread_hidden_def(pthread_cond_signal)
257
258 int pthread_cond_broadcast(pthread_cond_t *cond)
259 {
260 pthread_descr tosignal, th;
261
262 __pthread_lock(&cond->__c_lock, NULL);
263 /* Copy the current state of the waiting queue and empty it */
264 tosignal = cond->__c_waiting;
265 cond->__c_waiting = NULL;
266 __pthread_unlock(&cond->__c_lock);
267 /* Now signal each process in the queue */
268 while ((th = dequeue(&tosignal)) != NULL) {
269 th->p_condvar_avail = 1;
270 WRITE_MEMORY_BARRIER();
271 restart(th);
272 }
273 return 0;
274 }
libpthread_hidden_def(pthread_cond_broadcast)275 libpthread_hidden_def(pthread_cond_broadcast)
276
277 int pthread_condattr_init(pthread_condattr_t *attr)
278 {
279 memset (attr, '\0', sizeof (*attr));
280 return 0;
281 }
libpthread_hidden_def(pthread_condattr_init)282 libpthread_hidden_def(pthread_condattr_init)
283
284 int pthread_condattr_destroy(pthread_condattr_t *attr attribute_unused)
285 {
286 return 0;
287 }
libpthread_hidden_def(pthread_condattr_destroy)288 libpthread_hidden_def(pthread_condattr_destroy)
289
290 int pthread_condattr_getpshared (const pthread_condattr_t *attr attribute_unused, int *pshared)
291 {
292 *pshared = PTHREAD_PROCESS_PRIVATE;
293 return 0;
294 }
295
pthread_condattr_setpshared(pthread_condattr_t * attr attribute_unused,int pshared)296 int pthread_condattr_setpshared (pthread_condattr_t *attr attribute_unused, int pshared)
297 {
298 if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
299 return EINVAL;
300
301 /* For now it is not possible to shared a conditional variable. */
302 if (pshared != PTHREAD_PROCESS_PRIVATE)
303 return ENOSYS;
304
305 return 0;
306 }
307
pthread_condattr_getclock(const pthread_condattr_t * attr,clockid_t * clock_id)308 int pthread_condattr_getclock (const pthread_condattr_t *attr, clockid_t *clock_id)
309 {
310 *clock_id = (((((const struct pthread_condattr *) attr)->value) >> 1)
311 & ((1 << COND_NWAITERS_SHIFT) - 1));
312 return 0;
313 }
314
pthread_condattr_setclock(pthread_condattr_t * attr,clockid_t clock_id)315 int pthread_condattr_setclock (pthread_condattr_t *attr, clockid_t clock_id)
316 {
317 /* Only a few clocks are allowed. CLOCK_REALTIME is always allowed.
318 CLOCK_MONOTONIC only if the kernel has the necessary support. */
319 if (clock_id == CLOCK_MONOTONIC)
320 {
321 #ifndef __ASSUME_POSIX_TIMERS
322 # ifdef __NR_clock_getres
323 /* Check whether the clock is available. */
324 static int avail;
325
326 if (avail == 0)
327 {
328 struct timespec ts;
329
330 INTERNAL_SYSCALL_DECL (err);
331 int val;
332 #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_clock_getres_time64)
333 val = INTERNAL_SYSCALL (clock_getres_time64, err, 2, CLOCK_MONOTONIC, &ts);
334 #else
335 val = INTERNAL_SYSCALL (clock_getres, err, 2, CLOCK_MONOTONIC, &ts);
336 #endif
337 avail = INTERNAL_SYSCALL_ERROR_P (val, err) ? -1 : 1;
338 }
339
340 if (avail < 0)
341 # endif
342 /* Not available. */
343 return EINVAL;
344 #endif
345 }
346 else if (clock_id != CLOCK_REALTIME)
347 /* If more clocks are allowed some day the storing of the clock ID
348 in the pthread_cond_t structure needs to be adjusted. */
349 return EINVAL;
350
351 int *valuep = &((struct pthread_condattr *) attr)->value;
352
353 *valuep = ((*valuep & ~(((1 << COND_NWAITERS_SHIFT) - 1) << 1))
354 | (clock_id << 1));
355
356 return 0;
357 }
358