1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2021/01/02 bernard the first version
9 * 2023-07-25 Shell Remove usage of rt_hw_interrupt API in the lwp
10 * Coding style: remove multiple `return` in a routine
11 * 2023-08-08 Shell Fix return value of futex(wait); Fix ops that only
12 * FUTEX_PRIVATE is supported currently
13 * 2023-11-03 Shell Add Support for ~FUTEX_PRIVATE
14 * 2023-11-16 xqyjlj Add Support for futex requeue and futex pi
15 */
16 #define __RT_IPC_SOURCE__
17
18 #include "lwp_futex_internal.h"
19 #include "sys/time.h"
20 #include <stdatomic.h>
21
22 struct rt_mutex _glob_futex;
23
lwp_futex_init(void)24 rt_err_t lwp_futex_init(void)
25 {
26 return rt_mutex_init(&_glob_futex, "glob_ftx", RT_IPC_FLAG_PRIO);
27 }
28
_futex_lock(rt_lwp_t lwp,int op_flags)29 static void _futex_lock(rt_lwp_t lwp, int op_flags)
30 {
31 rt_err_t error;
32 if (op_flags & FUTEX_PRIVATE)
33 {
34 LWP_LOCK(lwp);
35 }
36 else
37 {
38 error = lwp_mutex_take_safe(&_glob_futex, RT_WAITING_FOREVER, 0);
39 if (error)
40 {
41 LOG_E("%s: Should not failed", __func__);
42 RT_ASSERT(0);
43 }
44 }
45 }
46
_futex_unlock(rt_lwp_t lwp,int op_flags)47 static void _futex_unlock(rt_lwp_t lwp, int op_flags)
48 {
49 rt_err_t error;
50 if (op_flags & FUTEX_PRIVATE)
51 {
52 LWP_UNLOCK(lwp);
53 }
54 else
55 {
56 error = lwp_mutex_release_safe(&_glob_futex);
57 if (error)
58 {
59 LOG_E("%s: Should not failed", __func__);
60 RT_ASSERT(0);
61 }
62 }
63 }
64
65 /**
66 * Destroy a Private FuTeX (pftx)
67 * Note: must have futex address_search_head taken
68 */
_pftx_destroy_locked(void * data)69 static rt_err_t _pftx_destroy_locked(void *data)
70 {
71 rt_err_t ret = -1;
72 rt_futex_t futex = (rt_futex_t)data;
73
74 if (futex)
75 {
76 /**
77 * Brief: Delete the futex from lwp address_search_head
78 *
79 * Note: Critical Section
80 * - the lwp (READ. share by thread)
81 * - the lwp address_search_head (RW. protected by caller. for destroy
82 * routine, it's always safe because it has already taken a write lock
83 * to the lwp.)
84 */
85 lwp_avl_remove(&futex->node,
86 (struct lwp_avl_struct **)futex->node.data);
87
88 /* release object */
89 if (futex->mutex)
90 {
91 rt_mutex_delete(futex->mutex);
92 futex->mutex = RT_NULL;
93 }
94 rt_free(futex);
95 ret = 0;
96 }
97 return ret;
98 }
99
100 /**
101 * Create a Private FuTeX (pftx)
102 * Note: must have futex address_search_head taken
103 */
_pftx_create_locked(int * uaddr,struct rt_lwp * lwp)104 static rt_futex_t _pftx_create_locked(int *uaddr, struct rt_lwp *lwp)
105 {
106 rt_futex_t futex = RT_NULL;
107 struct rt_object *obj = RT_NULL;
108
109 /**
110 * Brief: Create a futex under current lwp
111 *
112 * Note: Critical Section
113 * - lwp (READ; share with thread)
114 */
115 if (lwp)
116 {
117 futex = (rt_futex_t)rt_malloc(sizeof(struct rt_futex));
118 if (futex)
119 {
120 /* Create a Private FuTeX (pftx) */
121 obj = rt_custom_object_create("pftx", (void *)futex,
122 _pftx_destroy_locked);
123 if (!obj)
124 {
125 rt_free(futex);
126 futex = RT_NULL;
127 }
128 else
129 {
130 /**
131 * Brief: Add futex to user object tree for resource recycling
132 *
133 * Note: Critical Section
134 * - lwp user object tree (RW; protected by API)
135 * - futex (if the adding is successful, others can find the
136 * unready futex. However, only the lwp_free will do this,
137 * and this is protected by the ref taken by the lwp thread
138 * that the lwp_free will never execute at the same time)
139 */
140 if (lwp_user_object_add(lwp, obj))
141 {
142 /* this will call a _pftx_destroy_locked, but that's okay */
143 rt_object_delete(obj);
144 rt_free(futex);
145 futex = RT_NULL;
146 }
147 else
148 {
149 futex->node.avl_key = (avl_key_t)uaddr;
150 futex->node.data = &lwp->address_search_head;
151 futex->custom_obj = obj;
152 futex->mutex = RT_NULL;
153 rt_list_init(&(futex->waiting_thread));
154
155 /**
156 * Brief: Insert into futex head
157 *
158 * Note: Critical Section
159 * - lwp address_search_head (RW; protected by caller)
160 */
161 lwp_avl_insert(&futex->node, &lwp->address_search_head);
162 }
163 }
164 }
165 }
166 return futex;
167 }
168
169 /**
170 * Get a Private FuTeX (pftx) match the (lwp, uaddr, op)
171 */
_pftx_get(void * uaddr,struct rt_lwp * lwp,int op,rt_err_t * rc)172 static rt_futex_t _pftx_get(void *uaddr, struct rt_lwp *lwp, int op,
173 rt_err_t *rc)
174 {
175 struct lwp_avl_struct *node = RT_NULL;
176 rt_futex_t futex = RT_NULL;
177 rt_err_t error = -1;
178
179 LWP_LOCK(lwp);
180
181 /**
182 * Note: Critical Section
183 * protect lwp address_search_head (READ)
184 */
185 node = lwp_avl_find((avl_key_t)uaddr, lwp->address_search_head);
186 if (node)
187 {
188 futex = rt_container_of(node, struct rt_futex, node);
189 error = 0;
190 }
191 else
192 {
193 /* create a futex according to this uaddr */
194 futex = _pftx_create_locked(uaddr, lwp);
195
196 if (!futex)
197 error = -ENOMEM;
198 else
199 error = 0;
200 }
201 LWP_UNLOCK(lwp);
202
203 *rc = error;
204 return futex;
205 }
206
207 /**
208 * Destroy a Shared FuTeX (pftx)
209 * Note: must have futex address_search_head taken
210 */
_sftx_destroy(void * data)211 static rt_err_t _sftx_destroy(void *data)
212 {
213 rt_err_t ret = -1;
214 rt_futex_t futex = (rt_futex_t)data;
215
216 if (futex)
217 {
218 /* delete it even it's not in the table */
219 futex_global_table_delete(&futex->entry.key);
220 if (futex->mutex)
221 {
222 rt_mutex_delete(futex->mutex);
223 futex->mutex = RT_NULL;
224 }
225 rt_free(futex);
226 ret = 0;
227 }
228 return ret;
229 }
230
231 /**
232 * Create a Shared FuTeX (sftx)
233 */
_sftx_create(struct shared_futex_key * key,struct rt_lwp * lwp)234 static rt_futex_t _sftx_create(struct shared_futex_key *key, struct rt_lwp *lwp)
235 {
236 rt_futex_t futex = RT_NULL;
237 struct rt_object *obj = RT_NULL;
238
239 if (lwp)
240 {
241 futex = (rt_futex_t)rt_calloc(1, sizeof(struct rt_futex));
242 if (futex)
243 {
244 /* create a Shared FuTeX (sftx) */
245 obj = rt_custom_object_create("sftx", (void *)futex, _sftx_destroy);
246 if (!obj)
247 {
248 rt_free(futex);
249 futex = RT_NULL;
250 }
251 else
252 {
253 if (futex_global_table_add(key, futex))
254 {
255 rt_object_delete(obj);
256 rt_free(futex);
257 futex = RT_NULL;
258 }
259 else
260 {
261 futex->mutex = RT_NULL;
262 rt_list_init(&(futex->waiting_thread));
263 futex->custom_obj = obj;
264 }
265 }
266 }
267 }
268 return futex;
269 }
270
271 /**
272 * Get a Shared FuTeX (sftx) match the (lwp, uaddr, op)
273 */
_sftx_get(void * uaddr,struct rt_lwp * lwp,int op,rt_err_t * rc)274 static rt_futex_t _sftx_get(void *uaddr, struct rt_lwp *lwp, int op,
275 rt_err_t *rc)
276 {
277 rt_futex_t futex = RT_NULL;
278 struct shared_futex_key key;
279 rt_varea_t varea;
280 rt_err_t error = -1;
281
282 RD_LOCK(lwp->aspace);
283 varea = rt_aspace_query(lwp->aspace, uaddr);
284 if (varea)
285 {
286 key.mobj = varea->mem_obj;
287 key.offset = ((varea->offset) << MM_PAGE_SHIFT) |
288 ((long)uaddr & ((1 << MM_PAGE_SHIFT) - 1));
289 RD_UNLOCK(lwp->aspace);
290
291 /* query for the key */
292 _futex_lock(lwp, op & ~FUTEX_PRIVATE);
293 error = futex_global_table_find(&key, &futex);
294 if (error != RT_EOK)
295 {
296 /* not found, do allocation */
297 futex = _sftx_create(&key, lwp);
298 if (!futex)
299 error = -ENOMEM;
300 else
301 error = 0;
302 }
303 _futex_unlock(lwp, op & ~FUTEX_PRIVATE);
304 }
305 else
306 {
307 RD_UNLOCK(lwp->aspace);
308 }
309
310 *rc = error;
311 return futex;
312 }
313
314 /* must have futex address_search_head taken */
_futex_get(void * uaddr,struct rt_lwp * lwp,int op_flags,rt_err_t * rc)315 static rt_futex_t _futex_get(void *uaddr, struct rt_lwp *lwp, int op_flags,
316 rt_err_t *rc)
317 {
318 rt_futex_t futex = RT_NULL;
319
320 if (op_flags & FUTEX_PRIVATE)
321 {
322 futex = _pftx_get(uaddr, lwp, op_flags, rc);
323 }
324 else
325 {
326 futex = _sftx_get(uaddr, lwp, op_flags, rc);
327 }
328
329 return futex;
330 }
331
_suspend_thread_timeout_locked(rt_thread_t thread,rt_futex_t futex,rt_tick_t timeout)332 static rt_err_t _suspend_thread_timeout_locked(rt_thread_t thread,
333 rt_futex_t futex,
334 rt_tick_t timeout)
335 {
336 rt_err_t rc;
337
338 /**
339 * Brief: Add current thread into futex waiting thread list
340 *
341 * Note: Critical Section
342 * - the futex waiting_thread list (RW)
343 */
344 rc = rt_thread_suspend_to_list(thread, &futex->waiting_thread,
345 RT_IPC_FLAG_FIFO, RT_INTERRUPTIBLE);
346
347 if (rc == RT_EOK)
348 {
349 /* start the timer of thread */
350 rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME,
351 &timeout);
352 rt_timer_start(&(thread->thread_timer));
353 rt_set_errno(ETIMEDOUT);
354 }
355
356 return rc;
357 }
358
_suspend_thread_locked(rt_thread_t thread,rt_futex_t futex)359 static rt_err_t _suspend_thread_locked(rt_thread_t thread, rt_futex_t futex)
360 {
361 /**
362 * Brief: Add current thread into futex waiting thread list
363 *
364 * Note: Critical Section
365 * - the futex waiting_thread list (RW)
366 */
367 return rt_thread_suspend_to_list(thread, &futex->waiting_thread,
368 RT_IPC_FLAG_FIFO, RT_INTERRUPTIBLE);
369 }
370
_futex_cmpxchg_value(int * curval,int * uaddr,int uval,int newval)371 rt_inline int _futex_cmpxchg_value(int *curval, int *uaddr, int uval,
372 int newval)
373 {
374 int err = 0;
375
376 if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
377 {
378 err = -EFAULT;
379 goto exit;
380 }
381
382 if (!atomic_compare_exchange_strong(uaddr, &uval, newval))
383 {
384 *curval = uval;
385 err = -EAGAIN;
386 }
387
388 exit:
389 return err;
390 }
391
_futex_wait(rt_futex_t futex,struct rt_lwp * lwp,int * uaddr,int value,const struct timespec * timeout,int op_flags)392 static int _futex_wait(rt_futex_t futex, struct rt_lwp *lwp, int *uaddr,
393 int value, const struct timespec *timeout, int op_flags)
394 {
395 rt_tick_t to;
396 rt_thread_t thread;
397 rt_err_t rc = -RT_EINTR;
398
399 /**
400 * Brief: Remove current thread from scheduler, besides appends it to
401 * the waiting thread list of the futex. If the timeout is specified
402 * a timer will be setup for current thread
403 *
404 * Note: Critical Section
405 * - futex.waiting (RW; Protected by lwp_lock)
406 * - the local cpu
407 */
408 _futex_lock(lwp, op_flags);
409 if (*uaddr == value)
410 {
411 thread = rt_thread_self();
412
413 if (timeout)
414 {
415 to = timeout->tv_sec * RT_TICK_PER_SECOND;
416 to +=
417 (timeout->tv_nsec * RT_TICK_PER_SECOND) / NANOSECOND_PER_SECOND;
418
419 if (to < 0)
420 {
421 rc = -EINVAL;
422 _futex_unlock(lwp, op_flags);
423 }
424 else
425 {
426 rt_enter_critical();
427 rc = _suspend_thread_timeout_locked(thread, futex, to);
428 _futex_unlock(lwp, op_flags);
429 rt_exit_critical();
430 }
431 }
432 else
433 {
434 rt_enter_critical();
435 rc = _suspend_thread_locked(thread, futex);
436 _futex_unlock(lwp, op_flags);
437 rt_exit_critical();
438 }
439
440 if (rc == RT_EOK)
441 {
442 /* do schedule */
443 rt_schedule();
444 /* check errno */
445 rc = rt_get_errno();
446 rc = rc > 0 ? -rc : rc;
447 }
448 }
449 else
450 {
451 _futex_unlock(lwp, op_flags);
452 rc = -EAGAIN;
453 rt_set_errno(EAGAIN);
454 }
455
456 return rc;
457 }
458
_futex_wake(rt_futex_t futex,struct rt_lwp * lwp,int number,int op_flags)459 static long _futex_wake(rt_futex_t futex, struct rt_lwp *lwp, int number,
460 int op_flags)
461 {
462 long woken_cnt = 0;
463 int is_empty = 0;
464
465 /**
466 * Brief: Wakeup a suspended thread on the futex waiting thread list
467 *
468 * Note: Critical Section
469 * - the futex waiting_thread list (RW)
470 */
471 while (number && !is_empty)
472 {
473 _futex_lock(lwp, op_flags);
474 if (rt_susp_list_dequeue(&futex->waiting_thread, RT_EOK))
475 {
476 number--;
477 woken_cnt++;
478 is_empty = RT_FALSE;
479 }
480 else
481 {
482 is_empty = RT_TRUE;
483 }
484 _futex_unlock(lwp, op_flags);
485 }
486
487 /* do schedule */
488 rt_schedule();
489 return woken_cnt;
490 }
491
492 /**
493 * Brief: Wake up to nr_wake futex1 threads.
494 * If there are more waiters waiting on futex1 than nr_wake,
495 * insert the remaining at most nr_requeue waiters waiting
496 * on futex1 into the waiting queue of futex2.
497 */
_futex_requeue(rt_futex_t futex1,rt_futex_t futex2,struct rt_lwp * lwp,int nr_wake,int nr_requeue,int opflags)498 static long _futex_requeue(rt_futex_t futex1, rt_futex_t futex2,
499 struct rt_lwp *lwp, int nr_wake, int nr_requeue,
500 int opflags)
501 {
502 long rtn;
503 long woken_cnt = 0;
504 int is_empty = 0;
505 rt_thread_t thread;
506
507 if (futex1 == futex2)
508 {
509 return -EINVAL;
510 }
511
512 /**
513 * Brief: Wakeup a suspended thread on the futex waiting thread list
514 *
515 * Note: Critical Section
516 * - the futex waiting_thread list (RW)
517 */
518 while (nr_wake && !is_empty)
519 {
520 if (rt_susp_list_dequeue(&futex1->waiting_thread, RT_EOK))
521 {
522 nr_wake--;
523 woken_cnt++;
524 is_empty = RT_FALSE;
525 }
526 else
527 {
528 is_empty = RT_TRUE;
529 }
530 }
531 rtn = woken_cnt;
532
533 /**
534 * Brief: Requeue
535 *
536 * Note: Critical Section
537 * - the futex waiting_thread list (RW)
538 */
539 while (!is_empty && nr_requeue)
540 {
541 rt_sched_lock_level_t slvl;
542 rt_sched_lock(&slvl);
543
544 /* moving from one susp list to another */
545 is_empty = rt_list_isempty(&(futex1->waiting_thread));
546
547 if (!is_empty)
548 {
549 thread = RT_THREAD_LIST_NODE_ENTRY(futex1->waiting_thread.next);
550 rt_list_remove(&RT_THREAD_LIST_NODE(thread));
551 rt_list_insert_before(&(futex2->waiting_thread),
552 &RT_THREAD_LIST_NODE(thread));
553 nr_requeue--;
554 rtn++;
555 }
556 rt_sched_unlock(slvl);
557 }
558
559 /* do schedule */
560 rt_schedule();
561
562 return rtn;
563 }
564
565 /* timeout argument measured against the CLOCK_REALTIME clock. */
_futex_lock_pi(rt_futex_t futex,struct rt_lwp * lwp,int * uaddr,const struct timespec * timeout,int op_flags,rt_bool_t trylock)566 static long _futex_lock_pi(rt_futex_t futex, struct rt_lwp *lwp, int *uaddr,
567 const struct timespec *timeout, int op_flags,
568 rt_bool_t trylock)
569 {
570 int word = 0, nword, cword;
571 int tid = 0;
572 rt_err_t err = 0;
573 rt_thread_t thread = RT_NULL, current_thread = RT_NULL;
574 rt_tick_t to = RT_WAITING_FOREVER;
575
576 if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
577 {
578 return -EFAULT;
579 }
580
581 current_thread = rt_thread_self();
582
583 _futex_lock(lwp, op_flags);
584
585 lwp_get_from_user(&word, (void *)uaddr, sizeof(int));
586 tid = word & FUTEX_TID_MASK;
587 if (word == 0)
588 {
589 /* If the value is 0, then the kernel tries
590 to atomically set the futex value to the caller's TID. */
591 nword = current_thread->tid;
592 if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
593 {
594 _futex_unlock(lwp, op_flags);
595 return -EAGAIN;
596 }
597 _futex_unlock(lwp, op_flags);
598 return 0;
599 }
600 else
601 {
602 thread = lwp_tid_get_thread_and_inc_ref(tid);
603 if (thread == RT_NULL)
604 {
605 _futex_unlock(lwp, op_flags);
606 return -ESRCH;
607 }
608 lwp_tid_dec_ref(thread);
609
610 nword =
611 word | FUTEX_WAITERS;
612 if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
613 {
614 _futex_unlock(lwp, op_flags);
615 return -EAGAIN;
616 }
617 word = nword;
618 }
619
620 if (futex->mutex == RT_NULL)
621 {
622 futex->mutex = rt_mutex_create("futexpi", RT_IPC_FLAG_PRIO);
623 if (futex->mutex == RT_NULL)
624 {
625 _futex_unlock(lwp, op_flags);
626 return -ENOMEM;
627 }
628
629 /* set mutex->owner */
630 rt_spin_lock(&(futex->mutex->spinlock));
631 futex->mutex->owner = thread;
632 futex->mutex->hold = 1;
633 rt_spin_unlock(&(futex->mutex->spinlock));
634 }
635 if (timeout)
636 {
637 to = rt_timespec_to_tick(timeout);
638 }
639
640 if (trylock)
641 {
642 to = RT_WAITING_NO;
643 }
644 _futex_unlock(lwp, op_flags);
645
646 err = rt_mutex_take_interruptible(futex->mutex, to);
647 if (err == -RT_ETIMEOUT)
648 {
649 err = -EDEADLK;
650 }
651
652 _futex_lock(lwp, op_flags);
653 nword = current_thread->tid | FUTEX_WAITERS;
654 if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
655 {
656 err = -EAGAIN;
657 }
658 _futex_unlock(lwp, op_flags);
659
660 return err;
661 }
662
_futex_unlock_pi(rt_futex_t futex,struct rt_lwp * lwp,int op_flags)663 static long _futex_unlock_pi(rt_futex_t futex, struct rt_lwp *lwp, int op_flags)
664 {
665 rt_err_t err = 0;
666 _futex_lock(lwp, op_flags);
667 if (!futex->mutex)
668 {
669 _futex_unlock(lwp, op_flags);
670 return -EPERM;
671 }
672 _futex_unlock(lwp, op_flags);
673
674 err = rt_mutex_release(futex->mutex);
675 return err;
676 }
677
678 #include <syscall_generic.h>
679
_timeout_ignored(int op)680 rt_inline rt_bool_t _timeout_ignored(int op)
681 {
682 /**
683 * if (op &
684 * (FUTEX_WAKE|FUTEX_FD|FUTEX_WAKE_BITSET|FUTEX_TRYLOCK_PI|FUTEX_UNLOCK_PI))
685 * was TRUE `timeout` should be ignored by implementation, according to
686 * POSIX futex(2) manual. since only FUTEX_WAKE is implemented in rt-smart,
687 * only FUTEX_WAKE was omitted currently
688 */
689 return ((op & (FUTEX_WAKE)) || (op & (FUTEX_REQUEUE)) ||
690 (op & (FUTEX_CMP_REQUEUE)) || (op & (FUTEX_UNLOCK_PI)) ||
691 (op & (FUTEX_TRYLOCK_PI)));
692 }
693
sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)694 sysret_t sys_futex(int *uaddr, int op, int val, const struct timespec *timeout,
695 int *uaddr2, int val3)
696 {
697 struct rt_lwp *lwp = RT_NULL;
698 sysret_t ret = 0;
699
700 if (!lwp_user_accessable(uaddr, sizeof(int)))
701 {
702 ret = -EFAULT;
703 }
704 else if (timeout && !_timeout_ignored(op) &&
705 !lwp_user_accessable((void *)timeout, sizeof(struct timespec)))
706 {
707 ret = -EINVAL;
708 }
709 else
710 {
711 lwp = lwp_self();
712 ret = lwp_futex(lwp, uaddr, op, val, timeout, uaddr2, val3);
713 }
714
715 return ret;
716 }
717
718 #define FUTEX_FLAGS (FUTEX_PRIVATE | FUTEX_CLOCK_REALTIME)
lwp_futex(struct rt_lwp * lwp,int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)719 rt_err_t lwp_futex(struct rt_lwp *lwp, int *uaddr, int op, int val,
720 const struct timespec *timeout, int *uaddr2, int val3)
721 {
722 rt_futex_t futex, futex2;
723 rt_err_t rc = 0;
724 int op_type = op & ~FUTEX_FLAGS;
725 int op_flags = op & FUTEX_FLAGS;
726
727 futex = _futex_get(uaddr, lwp, op_flags, &rc);
728 if (!rc)
729 {
730 switch (op_type)
731 {
732 case FUTEX_WAIT:
733 rc = _futex_wait(futex, lwp, uaddr, val, timeout, op_flags);
734 break;
735 case FUTEX_WAKE:
736 rc = _futex_wake(futex, lwp, val, op_flags);
737 break;
738 case FUTEX_REQUEUE:
739 futex2 = _futex_get(uaddr2, lwp, op_flags, &rc);
740 if (!rc)
741 {
742 _futex_lock(lwp, op_flags);
743 rc = _futex_requeue(futex, futex2, lwp, val, (long)timeout,
744 op_flags);
745 _futex_unlock(lwp, op_flags);
746 }
747 break;
748 case FUTEX_CMP_REQUEUE:
749 futex2 = _futex_get(uaddr2, lwp, op_flags, &rc);
750 _futex_lock(lwp, op_flags);
751 if (*uaddr == val3)
752 {
753 rc = 0;
754 }
755 else
756 {
757 rc = -EAGAIN;
758 }
759 if (rc == 0)
760 {
761 rc = _futex_requeue(futex, futex2, lwp, val,
762 (long)timeout, op_flags);
763 }
764 _futex_unlock(lwp, op_flags);
765 break;
766 case FUTEX_LOCK_PI:
767 rc = _futex_lock_pi(futex, lwp, uaddr, timeout, op_flags,
768 RT_FALSE);
769 break;
770 case FUTEX_UNLOCK_PI:
771 rc = _futex_unlock_pi(futex, lwp, op_flags);
772 break;
773 case FUTEX_TRYLOCK_PI:
774 rc = _futex_lock_pi(futex, lwp, uaddr, 0, op_flags, RT_TRUE);
775 break;
776 default:
777 LOG_W("User require op=%d which is not implemented", op);
778 rc = -ENOSYS;
779 break;
780 }
781 }
782
783 return rc;
784 }
785
_fetch_robust_entry(struct robust_list ** entry,struct robust_list ** head,rt_bool_t * is_pi)786 rt_inline int _fetch_robust_entry(struct robust_list **entry,
787 struct robust_list **head, rt_bool_t *is_pi)
788 {
789 unsigned long uentry;
790
791 if (!lwp_user_accessable((void *)head, sizeof(*head)))
792 {
793 return -EFAULT;
794 }
795
796 if (lwp_get_from_user(&uentry, (void *)head, sizeof(*head)) !=
797 sizeof(*head))
798 {
799 return -EFAULT;
800 }
801
802 *entry = (void *)(uentry & ~1UL);
803 *is_pi = uentry & 1;
804
805 return 0;
806 }
807
_handle_futex_death(int * uaddr,rt_thread_t thread,rt_bool_t is_pi,rt_bool_t is_pending_op)808 static int _handle_futex_death(int *uaddr, rt_thread_t thread, rt_bool_t is_pi,
809 rt_bool_t is_pending_op)
810 {
811 int word, cword = 0, nword;
812 rt_err_t rc;
813 struct rt_lwp *lwp;
814 rt_futex_t futex;
815
816 /* Futex address must be 32bit aligned */
817 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
818 return -1;
819
820 lwp = thread->lwp;
821 retry:
822
823 if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
824 {
825 return -1;
826 }
827
828 if (lwp_get_from_user(&word, (void *)uaddr, sizeof(*uaddr)) !=
829 sizeof(*uaddr))
830 {
831 return -1;
832 }
833
834 futex = _futex_get(uaddr, lwp, FUTEX_PRIVATE, &rc);
835 if (is_pending_op && !is_pi && !word)
836 {
837 _futex_wake(futex, lwp, 1, FUTEX_PRIVATE);
838 return 0;
839 }
840
841 if ((word & FUTEX_TID_MASK) != thread->tid)
842 return 0;
843
844 nword = (word & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
845
846 if ((rc = _futex_cmpxchg_value(&cword, uaddr, word, nword)))
847 {
848 switch (rc)
849 {
850 case -EFAULT:
851 return -1;
852 case -EAGAIN:
853 rt_schedule();
854 goto retry;
855 default:
856 LOG_W("unknown errno: %d in '%s'", rc, __FUNCTION__);
857 return rc;
858 }
859 }
860
861 if (cword != word)
862 goto retry;
863
864 if (!is_pi && (word & FUTEX_WAITERS))
865 _futex_wake(futex, lwp, 1, FUTEX_PRIVATE);
866
867 return 0;
868 }
869
870 /**
871 * Brief: Walk thread->robust_list mark
872 * any locks found there dead, and notify any waiters.
873 *
874 * note: very carefully, it's a userspace list!
875 */
lwp_futex_exit_robust_list(rt_thread_t thread)876 void lwp_futex_exit_robust_list(rt_thread_t thread)
877 {
878 struct robust_list *entry = RT_NULL;
879 struct robust_list *next_entry = RT_NULL;
880 struct robust_list *pending = RT_NULL;
881 struct robust_list_head *head;
882 unsigned int limit = 2048;
883 rt_bool_t pi, pip, next_pi;
884 unsigned long futex_offset;
885 int rc;
886
887 head = thread->robust_list;
888
889 if (head == RT_NULL)
890 return;
891
892 if (_fetch_robust_entry(&entry, &head->list.next, &pi))
893 return;
894
895 if (!lwp_user_accessable((void *)&head->futex_offset,
896 sizeof(head->futex_offset)))
897 {
898 return;
899 }
900
901 if (lwp_get_from_user(&futex_offset, (void *)&head->futex_offset,
902 sizeof(head->futex_offset)) !=
903 sizeof(head->futex_offset))
904 {
905 return;
906 }
907
908 if (_fetch_robust_entry(&pending, &head->list_op_pending, &pip))
909 {
910 return;
911 }
912
913 while (entry != &head->list)
914 {
915 rc = _fetch_robust_entry(&next_entry, &entry->next, &next_pi);
916 if (entry != pending)
917 {
918 if (_handle_futex_death((int *)((size_t)entry + futex_offset), thread, pi,
919 RT_FALSE))
920 return;
921 }
922 if (rc)
923 return;
924 entry = next_entry;
925 pi = next_pi;
926
927 if (!--limit)
928 break;
929 }
930
931 if (pending)
932 {
933 _handle_futex_death((void *)pending + futex_offset, thread, pip,
934 RT_TRUE);
935 }
936 }
937