1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2019-11-12 Jesven first version
9 * 2023-02-23 Shell Support sigtimedwait
10 * 2023-07-04 Shell Support siginfo, sigqueue
11 * remove lwp_signal_backup/restore() to reduce architecture codes
12 * update the generation, pending and delivery routines
13 * 2023-11-22 Shell Support for job control signal. Fixup of signal catch while
14 * some of the signals is blocked, but no more further dequeue is applied.
15 * Add itimer support
16 */
17 #define __RT_IPC_SOURCE__
18 #define DBG_TAG "lwp.signal"
19 #define DBG_LVL DBG_INFO
20 #include <rtdbg.h>
21
22 #include <rthw.h>
23 #include <rtthread.h>
24 #include <string.h>
25
26 #include "lwp_internal.h"
27 #include "sys/signal.h"
28 #include "syscall_generic.h"
29
valid_signo_check(unsigned long sig)30 rt_inline rt_err_t valid_signo_check(unsigned long sig)
31 {
32 return sig <= _LWP_NSIG ? 0 : -RT_EINVAL;
33 }
34
siginfo_create(rt_thread_t current,int signo,int code,lwp_siginfo_ext_t ext)35 static lwp_siginfo_t siginfo_create(rt_thread_t current, int signo, int code, lwp_siginfo_ext_t ext)
36 {
37 lwp_siginfo_t siginfo;
38 struct rt_lwp *self_lwp;
39 rt_thread_t self_thr;
40
41 siginfo = rt_malloc(sizeof(*siginfo));
42 if (siginfo)
43 {
44 siginfo->ksiginfo.signo = signo;
45 siginfo->ksiginfo.code = code;
46 siginfo->ext = ext;
47
48 self_thr = current;
49 self_lwp = current->lwp;
50 if (self_lwp)
51 {
52 siginfo->ksiginfo.from_pid = self_lwp->pid;
53 siginfo->ksiginfo.from_tid = self_thr->tid;
54 }
55 else
56 {
57 siginfo->ksiginfo.from_pid = 0;
58 siginfo->ksiginfo.from_tid = 0;
59 }
60 }
61
62 return siginfo;
63 }
64
siginfo_delete(lwp_siginfo_t siginfo)65 rt_inline void siginfo_delete(lwp_siginfo_t siginfo)
66 {
67 if (siginfo->ext)
68 {
69 rt_free(siginfo->ext);
70 siginfo->ext = RT_NULL;
71 }
72
73 rt_free(siginfo);
74 }
75
_sigorsets(lwp_sigset_t * dset,const lwp_sigset_t * set0,const lwp_sigset_t * set1)76 rt_inline void _sigorsets(lwp_sigset_t *dset, const lwp_sigset_t *set0, const lwp_sigset_t *set1)
77 {
78 switch (_LWP_NSIG_WORDS)
79 {
80 case 4:
81 dset->sig[3] = set0->sig[3] | set1->sig[3];
82 dset->sig[2] = set0->sig[2] | set1->sig[2];
83 case 2:
84 dset->sig[1] = set0->sig[1] | set1->sig[1];
85 case 1:
86 dset->sig[0] = set0->sig[0] | set1->sig[0];
87 default:
88 return;
89 }
90 }
91
_sigandsets(lwp_sigset_t * dset,const lwp_sigset_t * set0,const lwp_sigset_t * set1)92 rt_inline void _sigandsets(lwp_sigset_t *dset, const lwp_sigset_t *set0, const lwp_sigset_t *set1)
93 {
94 switch (_LWP_NSIG_WORDS)
95 {
96 case 4:
97 dset->sig[3] = set0->sig[3] & set1->sig[3];
98 dset->sig[2] = set0->sig[2] & set1->sig[2];
99 case 2:
100 dset->sig[1] = set0->sig[1] & set1->sig[1];
101 case 1:
102 dset->sig[0] = set0->sig[0] & set1->sig[0];
103 default:
104 return;
105 }
106 }
107
_signotsets(lwp_sigset_t * dset,const lwp_sigset_t * set)108 rt_inline void _signotsets(lwp_sigset_t *dset, const lwp_sigset_t *set)
109 {
110 switch (_LWP_NSIG_WORDS)
111 {
112 case 4:
113 dset->sig[3] = ~set->sig[3];
114 dset->sig[2] = ~set->sig[2];
115 case 2:
116 dset->sig[1] = ~set->sig[1];
117 case 1:
118 dset->sig[0] = ~set->sig[0];
119 default:
120 return;
121 }
122 }
123
_sigaddset(lwp_sigset_t * set,int _sig)124 rt_inline void _sigaddset(lwp_sigset_t *set, int _sig)
125 {
126 unsigned long sig = _sig - 1;
127
128 if (_LWP_NSIG_WORDS == 1)
129 {
130 set->sig[0] |= 1UL << sig;
131 }
132 else
133 {
134 set->sig[sig / _LWP_NSIG_BPW] |= 1UL << (sig % _LWP_NSIG_BPW);
135 }
136 }
137
_sigdelset(lwp_sigset_t * set,int _sig)138 rt_inline void _sigdelset(lwp_sigset_t *set, int _sig)
139 {
140 unsigned long sig = _sig - 1;
141
142 if (_LWP_NSIG_WORDS == 1)
143 {
144 set->sig[0] &= ~(1UL << sig);
145 }
146 else
147 {
148 set->sig[sig / _LWP_NSIG_BPW] &= ~(1UL << (sig % _LWP_NSIG_BPW));
149 }
150 }
151
_sigisemptyset(lwp_sigset_t * set)152 rt_inline int _sigisemptyset(lwp_sigset_t *set)
153 {
154 switch (_LWP_NSIG_WORDS)
155 {
156 case 4:
157 return (set->sig[3] | set->sig[2] |
158 set->sig[1] | set->sig[0]) == 0;
159 case 2:
160 return (set->sig[1] | set->sig[0]) == 0;
161 case 1:
162 return set->sig[0] == 0;
163 default:
164 return 1;
165 }
166 }
167
_sigismember(lwp_sigset_t * set,int _sig)168 rt_inline int _sigismember(lwp_sigset_t *set, int _sig)
169 {
170 unsigned long sig = _sig - 1;
171
172 if (_LWP_NSIG_WORDS == 1)
173 {
174 return 1 & (set->sig[0] >> sig);
175 }
176 else
177 {
178 return 1 & (set->sig[sig / _LWP_NSIG_BPW] >> (sig % _LWP_NSIG_BPW));
179 }
180 }
181
_next_signal(lwp_sigset_t * pending,lwp_sigset_t * mask)182 rt_inline int _next_signal(lwp_sigset_t *pending, lwp_sigset_t *mask)
183 {
184 unsigned long i, *s, *m, x;
185 int sig = 0;
186
187 s = pending->sig;
188 m = mask->sig;
189
190 x = *s & ~*m;
191 if (x)
192 {
193 sig = rt_hw_ffz(~x) + 1;
194 return sig;
195 }
196
197 switch (_LWP_NSIG_WORDS)
198 {
199 default:
200 for (i = 1; i < _LWP_NSIG_WORDS; ++i)
201 {
202 x = *++s &~ *++m;
203 if (!x)
204 continue;
205 sig = rt_hw_ffz(~x) + i*_LWP_NSIG_BPW + 1;
206 break;
207 }
208 break;
209
210 case 2:
211 x = s[1] &~ m[1];
212 if (!x)
213 break;
214 sig = rt_hw_ffz(~x) + _LWP_NSIG_BPW + 1;
215 break;
216
217 case 1:
218 /* Nothing to do */
219 break;
220 }
221
222 return sig;
223 }
224
225 #define _SIGQ(tp) (&(tp)->signal.sig_queue)
226
sigqueue_isempty(lwp_sigqueue_t sigqueue)227 rt_inline int sigqueue_isempty(lwp_sigqueue_t sigqueue)
228 {
229 return _sigisemptyset(&sigqueue->sigset_pending);
230 }
231
sigqueue_ismember(lwp_sigqueue_t sigqueue,int signo)232 rt_inline int sigqueue_ismember(lwp_sigqueue_t sigqueue, int signo)
233 {
234 return _sigismember(&sigqueue->sigset_pending, signo);
235 }
236
sigqueue_peek(lwp_sigqueue_t sigqueue,lwp_sigset_t * mask)237 rt_inline int sigqueue_peek(lwp_sigqueue_t sigqueue, lwp_sigset_t *mask)
238 {
239 return _next_signal(&sigqueue->sigset_pending, mask);
240 }
241
sigqueue_examine(lwp_sigqueue_t sigqueue,lwp_sigset_t * pending)242 rt_inline int sigqueue_examine(lwp_sigqueue_t sigqueue, lwp_sigset_t *pending)
243 {
244 int is_empty = sigqueue_isempty(sigqueue);
245 if (!is_empty)
246 {
247 _sigorsets(pending, &sigqueue->sigset_pending, &sigqueue->sigset_pending);
248 }
249 return is_empty;
250 }
251
sigqueue_enqueue(lwp_sigqueue_t sigqueue,lwp_siginfo_t siginfo)252 static void sigqueue_enqueue(lwp_sigqueue_t sigqueue, lwp_siginfo_t siginfo)
253 {
254 lwp_siginfo_t idx;
255 rt_bool_t inserted = RT_FALSE;
256 rt_list_for_each_entry(idx, &sigqueue->siginfo_list, node)
257 {
258 if (idx->ksiginfo.signo >= siginfo->ksiginfo.signo)
259 {
260 rt_list_insert_after(&idx->node, &siginfo->node);
261 inserted = RT_TRUE;
262 break;
263 }
264 }
265
266 if (!inserted)
267 rt_list_insert_before(&sigqueue->siginfo_list, &siginfo->node);
268
269 _sigaddset(&sigqueue->sigset_pending, siginfo->ksiginfo.signo);
270 return ;
271 }
272
273 /**
274 * dequeue a siginfo matching the signo which is likely to be existed, and
275 * test if any other siblings remains
276 */
sigqueue_dequeue(lwp_sigqueue_t sigqueue,int signo)277 static lwp_siginfo_t sigqueue_dequeue(lwp_sigqueue_t sigqueue, int signo)
278 {
279 lwp_siginfo_t found;
280 lwp_siginfo_t candidate;
281 lwp_siginfo_t next;
282 rt_bool_t is_empty;
283
284 found = RT_NULL;
285 is_empty = RT_TRUE;
286 rt_list_for_each_entry_safe(candidate, next, &sigqueue->siginfo_list, node)
287 {
288 if (candidate->ksiginfo.signo == signo)
289 {
290 if (found)
291 {
292 /* already found */
293 is_empty = RT_FALSE;
294 break;
295 }
296 else
297 {
298 /* found first */
299 found = candidate;
300 rt_list_remove(&found->node);
301 }
302 }
303 else if (candidate->ksiginfo.signo > signo)
304 break;
305 }
306
307 if (found && is_empty)
308 _sigdelset(&sigqueue->sigset_pending, signo);
309
310 return found;
311 }
312
313 /**
314 * Discard all the signal matching `signo` in sigqueue
315 */
sigqueue_discard(lwp_sigqueue_t sigqueue,int signo)316 static void sigqueue_discard(lwp_sigqueue_t sigqueue, int signo)
317 {
318 lwp_siginfo_t queuing_si;
319 while (sigqueue_ismember(sigqueue, signo))
320 {
321 queuing_si = sigqueue_dequeue(sigqueue, signo);
322 siginfo_delete(queuing_si);
323 }
324 }
325
326 /**
327 * Discard all the queuing signals in sigset
328 */
sigqueue_discard_sigset(lwp_sigqueue_t sigqueue,lwp_sigset_t * sigset)329 static void sigqueue_discard_sigset(lwp_sigqueue_t sigqueue, lwp_sigset_t *sigset)
330 {
331 lwp_siginfo_t queuing_si;
332 lwp_sigset_t mask;
333 int signo;
334
335 _signotsets(&mask, sigset);
336 while ((signo = sigqueue_peek(sigqueue, &mask)) != 0)
337 {
338 queuing_si = sigqueue_dequeue(sigqueue, signo);
339 siginfo_delete(queuing_si);
340 }
341 }
342
343 /* assuming that (void *) is compatible to long at length */
344 RT_STATIC_ASSERT(lp_width_same, sizeof(void *) == sizeof(long));
345
346 /** translate lwp siginfo to user siginfo_t */
siginfo_k2u(lwp_siginfo_t ksigi,siginfo_t * usigi)347 rt_inline void siginfo_k2u(lwp_siginfo_t ksigi, siginfo_t *usigi)
348 {
349 int signo = ksigi->ksiginfo.signo;
350 usigi->si_code = ksigi->ksiginfo.code;
351 usigi->si_signo = signo;
352 usigi->si_pid = ksigi->ksiginfo.from_pid;
353
354 if (ksigi->ext)
355 {
356 if (signo == SIGCHLD)
357 {
358 usigi->si_status = ksigi->ext->sigchld.status;
359 usigi->si_utime = ksigi->ext->sigchld.stime;
360 usigi->si_stime = ksigi->ext->sigchld.utime;
361 }
362 }
363
364 /* deprecated field */
365 usigi->si_errno = 0;
366 }
367
368 /* must called in locked context */
_get_sighandler_locked(struct rt_lwp * lwp,int signo)369 rt_inline lwp_sighandler_t _get_sighandler_locked(struct rt_lwp *lwp, int signo)
370 {
371 return lwp->signal.sig_action[signo - 1];
372 }
373
_mask_block_fn(rt_thread_t thread,const lwp_sigset_t * sigset,lwp_sigset_t * new_set)374 static lwp_sigset_t *_mask_block_fn(rt_thread_t thread, const lwp_sigset_t *sigset, lwp_sigset_t *new_set)
375 {
376 _sigorsets(new_set, &thread->signal.sigset_mask, sigset);
377 return new_set;
378 }
379
_mask_unblock_fn(rt_thread_t thread,const lwp_sigset_t * sigset,lwp_sigset_t * new_set)380 static lwp_sigset_t *_mask_unblock_fn(rt_thread_t thread, const lwp_sigset_t *sigset, lwp_sigset_t *new_set)
381 {
382 lwp_sigset_t complement;
383 _signotsets(&complement, sigset);
384 _sigandsets(new_set, &thread->signal.sigset_mask, &complement);
385 return new_set;
386 }
387
_mask_set_fn(rt_thread_t thread,const lwp_sigset_t * sigset,lwp_sigset_t * new_set)388 static lwp_sigset_t *_mask_set_fn(rt_thread_t thread, const lwp_sigset_t *sigset, lwp_sigset_t *new_set)
389 {
390 memcpy(new_set, sigset, sizeof(*sigset));
391 return new_set;
392 }
393
394 static lwp_sigset_t *(*_sig_mask_fn[__LWP_SIG_MASK_CMD_WATERMARK])
395 (rt_thread_t thread, const lwp_sigset_t *sigset, lwp_sigset_t *new_set) = {
396 [LWP_SIG_MASK_CMD_BLOCK] = _mask_block_fn,
397 [LWP_SIG_MASK_CMD_UNBLOCK] = _mask_unblock_fn,
398 [LWP_SIG_MASK_CMD_SET_MASK] = _mask_set_fn,
399 };
400
_thread_signal_mask(rt_thread_t thread,lwp_sig_mask_cmd_t how,const lwp_sigset_t * sigset,lwp_sigset_t * oset)401 static void _thread_signal_mask(rt_thread_t thread, lwp_sig_mask_cmd_t how,
402 const lwp_sigset_t *sigset, lwp_sigset_t *oset)
403 {
404 lwp_sigset_t new_set;
405
406 /**
407 * Note: POSIX wants this API to be capable to query the current mask
408 * by passing NULL in `sigset`
409 */
410 if (oset)
411 memcpy(oset, &thread->signal.sigset_mask, sizeof(lwp_sigset_t));
412
413 if (sigset)
414 {
415 _sig_mask_fn[how](thread, sigset, &new_set);
416
417 /* remove un-maskable signal from set */
418 _sigdelset(&new_set, SIGKILL);
419 _sigdelset(&new_set, SIGSTOP);
420
421 memcpy(&thread->signal.sigset_mask, &new_set, sizeof(lwp_sigset_t));
422 }
423 }
424
lwp_sigqueue_clear(lwp_sigqueue_t sigq)425 void lwp_sigqueue_clear(lwp_sigqueue_t sigq)
426 {
427 lwp_siginfo_t this, next;
428 if (!sigqueue_isempty(sigq))
429 {
430 rt_list_for_each_entry_safe(this, next, &sigq->siginfo_list, node)
431 {
432 siginfo_delete(this);
433 }
434 }
435 }
436
lwp_signal_notify(rt_slist_t * list_head,lwp_siginfo_t siginfo)437 static void lwp_signal_notify(rt_slist_t *list_head, lwp_siginfo_t siginfo)
438 {
439 rt_slist_t *node;
440
441 rt_slist_for_each(node, list_head)
442 {
443 struct rt_lwp_notify *n = rt_slist_entry(node, struct rt_lwp_notify, list_node);
444 if (n->notify)
445 {
446 n->notify(n->signalfd_queue, siginfo->ksiginfo.signo);
447 }
448 }
449 }
450
lwp_signal_init(struct lwp_signal * sig)451 rt_err_t lwp_signal_init(struct lwp_signal *sig)
452 {
453 rt_err_t rc = RT_EOK;
454
455 sig->real_timer = LWP_SIG_INVALID_TIMER;
456
457 memset(&sig->sig_dispatch_thr, 0, sizeof(sig->sig_dispatch_thr));
458
459 memset(&sig->sig_action, 0, sizeof(sig->sig_action));
460 memset(&sig->sig_action_nodefer, 0, sizeof(sig->sig_action_nodefer));
461 memset(&sig->sig_action_onstack, 0, sizeof(sig->sig_action_onstack));
462 memset(&sig->sig_action_restart, 0, sizeof(sig->sig_action_restart));
463 memset(&sig->sig_action_siginfo, 0, sizeof(sig->sig_action_siginfo));
464 memset(&sig->sig_action_nocldstop, 0, sizeof(sig->sig_action_nocldstop));
465 memset(&sig->sig_action_nocldwait, 0, sizeof(sig->sig_action_nocldwait));
466 lwp_sigqueue_init(&sig->sig_queue);
467
468 return rc;
469 }
470
lwp_signal_detach(struct lwp_signal * signal)471 rt_err_t lwp_signal_detach(struct lwp_signal *signal)
472 {
473 rt_err_t ret = RT_EOK;
474
475 timer_delete(signal->real_timer);
476 lwp_sigqueue_clear(&signal->sig_queue);
477
478 return ret;
479 }
480
lwp_thread_signal_suspend_check(rt_thread_t thread,int suspend_flag)481 int lwp_thread_signal_suspend_check(rt_thread_t thread, int suspend_flag)
482 {
483 struct rt_lwp *lwp = (struct rt_lwp *)thread->lwp;
484 lwp_sigset_t sigmask = thread->signal.sigset_mask;
485 int ret = 0;
486
487 _sigaddset(&sigmask, SIGCONT);
488 switch (suspend_flag)
489 {
490 case RT_INTERRUPTIBLE:
491 if (sigqueue_peek(_SIGQ(thread), &sigmask))
492 {
493 break;
494 }
495 if (thread->lwp && sigqueue_peek(_SIGQ(lwp), &sigmask))
496 {
497 break;
498 }
499 ret = 1;
500 break;
501 case RT_KILLABLE:
502 if (sigqueue_ismember(_SIGQ(thread), SIGKILL))
503 {
504 break;
505 }
506 if (thread->lwp && sigqueue_ismember(_SIGQ(lwp), SIGKILL))
507 {
508 break;
509 }
510 ret = 1;
511 break;
512 case RT_UNINTERRUPTIBLE:
513 ret = 1;
514 break;
515 default:
516 RT_ASSERT(0);
517 break;
518 }
519 return ret;
520 }
521
_is_jobctl_signal(rt_lwp_t lwp,int signo)522 rt_inline rt_bool_t _is_jobctl_signal(rt_lwp_t lwp, int signo)
523 {
524 lwp_sigset_t jobctl_sigset = lwp_sigset_init(LWP_SIG_JOBCTL_SET);
525
526 return lwp_sigismember(&jobctl_sigset, signo);
527 }
528
_is_stop_signal(rt_lwp_t lwp,int signo)529 rt_inline rt_bool_t _is_stop_signal(rt_lwp_t lwp, int signo)
530 {
531 lwp_sigset_t stop_sigset = lwp_sigset_init(LWP_SIG_STOP_SET);
532
533 return lwp_sigismember(&stop_sigset, signo);
534 }
535
_need_notify_status_changed(rt_lwp_t lwp,int signo)536 rt_inline rt_bool_t _need_notify_status_changed(rt_lwp_t lwp, int signo)
537 {
538 RT_ASSERT(lwp_sigismember(&lwp_sigset_init(LWP_SIG_JOBCTL_SET), signo));
539 return !lwp_sigismember(&lwp->signal.sig_action_nocldstop, SIGCHLD);
540 }
541
542 /**
543 * wakeup the waitpid_waiters if any, and try to generate SIGCHLD if they are
544 * not disable explicitly by user.
545 *
546 * TODO: This event is always per-process and doesn't make whole lot of
547 * sense for ptracers, who shouldn't consume the state via wait(2) either,
548 * but, for backward compatibility, notify the ptracer of the group leader
549 * too unless it's gonna be a duplicate.
550 */
_notify_parent_and_leader(rt_lwp_t child_lwp,rt_thread_t child_thr,int trig_signo,rt_bool_t is_stop)551 static void _notify_parent_and_leader(rt_lwp_t child_lwp, rt_thread_t child_thr, int trig_signo, rt_bool_t is_stop)
552 {
553 int si_code;
554 lwp_siginfo_ext_t ext;
555 rt_lwp_t parent_lwp = child_lwp->parent;
556
557 if (!parent_lwp)
558 return ;
559
560 /* prepare the event data for parent to query */
561 if (is_stop)
562 {
563 si_code = CLD_STOPPED;
564 child_lwp->lwp_status = LWP_CREATE_STAT_STOPPED(trig_signo);
565 }
566 else
567 {
568 si_code = CLD_CONTINUED;
569 child_lwp->lwp_status = LWP_CREATE_STAT_CONTINUED;
570 }
571
572 /* wakeup waiter on waitpid(2) */
573 lwp_waitpid_kick(parent_lwp, child_lwp);
574
575 if (_need_notify_status_changed(parent_lwp, trig_signo))
576 {
577 ext = rt_malloc(sizeof(struct lwp_siginfo_ext));
578 if (ext)
579 {
580 ext->sigchld.status = trig_signo;
581
582 /* TODO: signal usage is not supported */
583 ext->sigchld.stime = child_thr->system_time;
584 ext->sigchld.utime = child_thr->user_time;
585 }
586
587 /* generate SIGCHLD for parent */
588 lwp_signal_kill(parent_lwp, SIGCHLD, si_code, ext);
589 }
590 }
591
592 static int _do_signal_wakeup(rt_thread_t thread, int sig);
_stop_thread_locked(rt_lwp_t self_lwp,rt_thread_t cur_thr,int signo,lwp_siginfo_t si,lwp_sigqueue_t sq)593 static rt_err_t _stop_thread_locked(rt_lwp_t self_lwp, rt_thread_t cur_thr, int signo,
594 lwp_siginfo_t si, lwp_sigqueue_t sq)
595 {
596 rt_err_t error;
597 int jobctl_stopped = self_lwp->jobctl_stopped;
598 rt_thread_t iter;
599
600 /* race to setup jobctl stopped flags */
601 if (!jobctl_stopped)
602 {
603 self_lwp->jobctl_stopped = RT_TRUE;
604 self_lwp->wait_reap_stp = RT_FALSE;
605 rt_list_for_each_entry(iter, &self_lwp->t_grp, sibling)
606 {
607 if (iter != cur_thr)
608 _do_signal_wakeup(iter, signo);
609 }
610 }
611
612 /**
613 * raise the event again so siblings is able to catch it again.
614 * `si` will be discarded while SIGCONT is generatd
615 */
616 sigqueue_enqueue(sq, si);
617
618 /* release the lwp lock so we can happily suspend */
619 LWP_UNLOCK(self_lwp);
620
621 rt_set_errno(RT_EOK);
622
623 /* After suspension, only the SIGKILL and SIGCONT will wake this thread up */
624 error = rt_thread_suspend_with_flag(cur_thr, RT_KILLABLE);
625 if (error == RT_EOK)
626 {
627 rt_schedule();
628 error = rt_get_errno();
629 error = error > 0 ? -error : error;
630 }
631
632 if (!jobctl_stopped &&
633 (sigqueue_ismember(_SIGQ(self_lwp), SIGCONT) ||
634 sigqueue_ismember(_SIGQ(cur_thr), SIGCONT)))
635 {
636 /**
637 * if we are resumed by a SIGCONT and we are the winner of racing
638 * notify parent of the incoming event
639 */
640 _notify_parent_and_leader(self_lwp, cur_thr, SIGCONT, RT_FALSE);
641 }
642
643 /* reacquire the lock since we release it before */
644 LWP_LOCK(self_lwp);
645
646 return error;
647 }
648
_catch_signal_locked(rt_lwp_t lwp,rt_thread_t thread,int signo,lwp_siginfo_t siginfo,lwp_sighandler_t handler,void * exp_frame)649 static void _catch_signal_locked(rt_lwp_t lwp, rt_thread_t thread, int signo,
650 lwp_siginfo_t siginfo, lwp_sighandler_t handler,
651 void *exp_frame)
652 {
653 lwp_sigset_t new_sig_mask;
654 lwp_sigset_t save_sig_mask;
655 siginfo_t usiginfo;
656 siginfo_t *p_usi;
657
658 /* siginfo is need for signal action */
659 if (_sigismember(&lwp->signal.sig_action_siginfo, signo))
660 {
661 siginfo_k2u(siginfo, &usiginfo);
662 p_usi = &usiginfo;
663 }
664 else
665 {
666 p_usi = RT_NULL;
667 }
668
669 /**
670 * lock is acquired by caller. Release it so that we can happily go to the
671 * signal handler in user space
672 */
673 LWP_UNLOCK(lwp);
674
675 siginfo_delete(siginfo);
676
677 /* signal default handler */
678 if (handler == LWP_SIG_ACT_DFL)
679 {
680 lwp_sigset_t ign_sigset;
681
682 ign_sigset = lwp_sigset_init(LWP_SIG_IGNORE_SET);
683 if (signo == SIGCONT)
684 {
685 arch_syscall_set_errno(exp_frame, EINTR, ERESTART);
686 arch_thread_signal_enter(signo, p_usi, exp_frame, 0, &thread->signal.sigset_mask);
687 }
688 else if (!lwp_sigismember(&ign_sigset, signo) && !lwp->sig_protected)
689 {
690 /* for those defautl handler is to terminate process */
691 LOG_D("%s: default handler; and exit", __func__);
692
693 /* TODO: coredump if neccessary */
694 lwp_exit(lwp, LWP_CREATE_STAT_SIGNALED(signo, 0));
695 }
696 /**
697 * otherwise is to ignore the signal,
698 * -> then reacquire the lock and return
699 */
700 }
701 else if (handler == LWP_SIG_ACT_IGN)
702 {
703 /* do nothing */
704 }
705 else
706 {
707 /* copy the blocked signal mask from the registered signal action */
708 memcpy(&new_sig_mask, &lwp->signal.sig_action_mask[signo - 1], sizeof(new_sig_mask));
709
710 if (!_sigismember(&lwp->signal.sig_action_nodefer, signo))
711 _sigaddset(&new_sig_mask, signo);
712
713 _thread_signal_mask(thread, LWP_SIG_MASK_CMD_BLOCK, &new_sig_mask, &save_sig_mask);
714
715 if (_sigismember(&lwp->signal.sig_action_restart, signo))
716 {
717 arch_syscall_set_errno(exp_frame, EINTR, ERESTART);
718 }
719
720 /**
721 * enter signal action of user
722 * Note: that the p_usi is release before entering signal action by
723 * reseting the kernel sp.
724 */
725 LOG_D("%s: enter signal handler(signo=%d) at %p", __func__, signo, handler);
726 arch_thread_signal_enter(signo, p_usi, exp_frame, handler, &save_sig_mask);
727
728 /* the arch_thread_signal_enter() never return */
729 RT_ASSERT(0);
730 }
731
732 /* reacquire the lock because we release it before */
733 LWP_LOCK(lwp);
734 }
735
lwp_thread_signal_catch(void * exp_frame)736 void lwp_thread_signal_catch(void *exp_frame)
737 {
738 struct rt_thread *thread;
739 struct rt_lwp *lwp;
740 lwp_sigqueue_t pending;
741 lwp_sigset_t *sig_mask;
742 int retry_signal_catch;
743 int signo;
744
745 thread = rt_thread_self();
746 lwp = (struct rt_lwp *)thread->lwp;
747
748 RT_ASSERT(!!lwp);
749 LWP_LOCK(lwp);
750
751 do {
752 /* if stopped process resume, we will retry to catch the signal */
753 retry_signal_catch = 0;
754 signo = 0;
755
756 /* try to peek a signal which is pending and not blocked by this thread */
757 if (!sigqueue_isempty(_SIGQ(thread)))
758 {
759 pending = _SIGQ(thread);
760 sig_mask = &thread->signal.sigset_mask;
761 signo = sigqueue_peek(pending, sig_mask);
762 }
763 if (!signo && !sigqueue_isempty(_SIGQ(lwp)))
764 {
765 pending = _SIGQ(lwp);
766 sig_mask = &thread->signal.sigset_mask;
767 signo = sigqueue_peek(pending, sig_mask);
768 }
769
770 if (signo)
771 {
772 lwp_siginfo_t siginfo;
773 lwp_sighandler_t handler;
774
775 LOG_D("%s(signo=%d)", __func__, signo);
776
777 siginfo = sigqueue_dequeue(pending, signo);
778 RT_ASSERT(siginfo != RT_NULL);
779 handler = _get_sighandler_locked(lwp, signo);
780
781 if (_is_stop_signal(lwp, signo) && handler == LWP_SIG_ACT_DFL)
782 {
783 /* notify the status update for parent process */
784 _notify_parent_and_leader(lwp, thread, signo, RT_TRUE);
785
786 LOG_D("%s: pid=%d stopped", __func__, lwp->pid);
787 _stop_thread_locked(lwp, thread, signo, siginfo, pending);
788 LOG_D("%s: pid=%d continued", __func__, lwp->pid);
789
790 /* wakeup and retry to catch signals send to us */
791 retry_signal_catch = 1;
792 }
793 else
794 {
795 /* do a normal, non-jobctl signal handling */
796 _catch_signal_locked(lwp, thread, signo, siginfo, handler, exp_frame);
797 }
798 }
799 } while (retry_signal_catch);
800
801 LWP_UNLOCK(lwp);
802 }
803
_do_signal_wakeup(rt_thread_t thread,int sig)804 static int _do_signal_wakeup(rt_thread_t thread, int sig)
805 {
806 int need_schedule;
807 rt_sched_lock_level_t slvl;
808 if (!_sigismember(&thread->signal.sigset_mask, sig))
809 {
810 int stat;
811 rt_sched_lock(&slvl);
812 stat = rt_sched_thread_get_stat(thread);
813 if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
814 {
815 if ((stat & RT_SIGNAL_COMMON_WAKEUP_MASK) != RT_SIGNAL_COMMON_WAKEUP_MASK)
816 {
817 thread->error = RT_EINTR;
818 rt_sched_unlock(slvl);
819
820 rt_thread_wakeup(thread);
821 need_schedule = 1;
822 }
823 else if ((sig == SIGKILL || sig == SIGSTOP) &&
824 ((stat & RT_SIGNAL_KILL_WAKEUP_MASK) != RT_SIGNAL_KILL_WAKEUP_MASK))
825 {
826 thread->error = RT_EINTR;
827 rt_sched_unlock(slvl);
828
829 rt_thread_wakeup(thread);
830 need_schedule = 1;
831 }
832 else
833 {
834 rt_sched_unlock(slvl);
835 need_schedule = 0;
836 }
837 }
838 else
839 {
840 rt_sched_unlock(slvl);
841 need_schedule = 0;
842 }
843
844 RT_SCHED_DEBUG_IS_UNLOCKED;
845 }
846 else
847 need_schedule = 0;
848
849 return need_schedule;
850 }
851
852 /** find a candidate to be notified of the arrival */
_signal_find_catcher(struct rt_lwp * lwp,int signo)853 static rt_thread_t _signal_find_catcher(struct rt_lwp *lwp, int signo)
854 {
855 rt_thread_t catcher = RT_NULL;
856 rt_thread_t candidate;
857
858 candidate = lwp->signal.sig_dispatch_thr[signo - 1];
859 if (candidate != RT_NULL && !_sigismember(&candidate->signal.sigset_mask, signo))
860 {
861 catcher = candidate;
862 }
863 else
864 {
865 candidate = rt_thread_self();
866
867 /** Note: lwp of current is a const value that can be safely read */
868 if (candidate->lwp == lwp &&
869 !_sigismember(&candidate->signal.sigset_mask, signo))
870 {
871 catcher = candidate;
872 }
873 else
874 {
875 rt_list_for_each_entry(candidate, &lwp->t_grp, sibling)
876 {
877 if (!_sigismember(&candidate->signal.sigset_mask, signo))
878 {
879 catcher = candidate;
880 break;
881 }
882 }
883
884 /* fall back to main thread */
885 if (catcher == RT_NULL)
886 catcher = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
887 }
888
889 /* reset the cache thread to catcher (even if catcher is main thread) */
890 lwp->signal.sig_dispatch_thr[signo - 1] = catcher;
891 }
892
893 return catcher;
894 }
895
_siginfo_deliver_to_lwp(struct rt_lwp * lwp,lwp_siginfo_t siginfo)896 static int _siginfo_deliver_to_lwp(struct rt_lwp *lwp, lwp_siginfo_t siginfo)
897 {
898 rt_thread_t catcher;
899
900 catcher = _signal_find_catcher(lwp, siginfo->ksiginfo.signo);
901
902 sigqueue_enqueue(&lwp->signal.sig_queue, siginfo);
903 return _do_signal_wakeup(catcher, siginfo->ksiginfo.signo);
904 }
905
_siginfo_deliver_to_thread(rt_thread_t thread,lwp_siginfo_t siginfo)906 static int _siginfo_deliver_to_thread(rt_thread_t thread, lwp_siginfo_t siginfo)
907 {
908 sigqueue_enqueue(_SIGQ(thread), siginfo);
909 return _do_signal_wakeup(thread, siginfo->ksiginfo.signo);
910 }
911
_sighandler_is_ignored(struct rt_lwp * lwp,int signo)912 rt_inline rt_bool_t _sighandler_is_ignored(struct rt_lwp *lwp, int signo)
913 {
914 rt_bool_t is_ignored;
915 lwp_sighandler_t action;
916 lwp_sigset_t ign_set = lwp_sigset_init(LWP_SIG_IGNORE_SET);
917
918 action = _get_sighandler_locked(lwp, signo);
919
920 if (action == LWP_SIG_ACT_IGN)
921 is_ignored = RT_TRUE;
922 else if (action == LWP_SIG_ACT_DFL && _sigismember(&ign_set, signo))
923 is_ignored = RT_TRUE;
924 else
925 is_ignored = RT_FALSE;
926
927 return is_ignored;
928 }
929
_sighandler_cannot_caught(struct rt_lwp * lwp,int signo)930 rt_inline rt_bool_t _sighandler_cannot_caught(struct rt_lwp *lwp, int signo)
931 {
932 return signo == SIGKILL || signo == SIGSTOP;
933 }
934
935 /* before signal is killed to target process/thread */
_before_sending_jobctl_signal(int signo,rt_lwp_t target_lwp,lwp_siginfo_t si)936 static void _before_sending_jobctl_signal(int signo, rt_lwp_t target_lwp, lwp_siginfo_t si)
937 {
938 rt_thread_t thr_iter;
939 rt_sched_lock_level_t slvl;
940 lwp_sigset_t jobctl_sigset = lwp_sigset_init(LWP_SIG_JOBCTL_SET);
941
942 LWP_ASSERT_LOCKED(target_lwp);
943
944 /**
945 * dequeue all the pending jobctl signals (including
946 * the one we are adding, since we don't want to pend it)
947 */
948 sigqueue_discard_sigset(_SIGQ(target_lwp), &jobctl_sigset);
949
950 if (signo == SIGCONT)
951 {
952 target_lwp->jobctl_stopped = RT_FALSE;
953 rt_list_for_each_entry(thr_iter, &target_lwp->t_grp, sibling)
954 {
955 rt_base_t stat;
956 sigqueue_discard_sigset(_SIGQ(thr_iter), &jobctl_sigset);
957
958 /**
959 * Note: all stopped thread will be resumed
960 */
961 rt_sched_lock(&slvl);
962 stat = rt_sched_thread_get_stat(thr_iter);
963 if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK &&
964 (stat & RT_SIGNAL_KILL_WAKEUP_MASK) == 0)
965 {
966 thr_iter->error = RT_EINTR;
967
968 /**
969 * don't matter if we failed to resume the thread, since we
970 * only care about the event passing, but not ordering here
971 */
972 rt_sched_unlock(slvl);
973 rt_thread_wakeup(thr_iter);
974 }
975 else
976 {
977 rt_sched_unlock(slvl);
978 }
979
980 }
981 }
982 else
983 {
984 rt_list_for_each_entry(thr_iter, &target_lwp->t_grp, sibling)
985 {
986 sigqueue_discard_sigset(_SIGQ(thr_iter), &jobctl_sigset);
987 }
988 }
989 }
990
lwp_signal_kill(struct rt_lwp * lwp,long signo,long code,lwp_siginfo_ext_t value)991 rt_err_t lwp_signal_kill(struct rt_lwp *lwp, long signo, long code, lwp_siginfo_ext_t value)
992 {
993 rt_err_t ret = -1;
994
995 lwp_siginfo_t siginfo;
996 rt_bool_t terminated;
997 rt_bool_t need_schedule;
998
999 /** must be able to be suspended */
1000 RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
1001
1002 if (!lwp || signo < 0 || signo > _LWP_NSIG)
1003 {
1004 ret = -RT_EINVAL;
1005 }
1006 else if (signo == 0)
1007 {
1008 /* process exist and current process have privileges */
1009 ret = 0;
1010 }
1011 else
1012 {
1013 LOG_D("%s(lwp=%p \"%s\",signo=%ld,code=%ld,value=%ld)",
1014 __func__, lwp, lwp->cmd, signo, code, value);
1015
1016 need_schedule = RT_FALSE;
1017
1018 LWP_LOCK(lwp);
1019 terminated = lwp->terminated;
1020
1021 /* short-circuit code for inactive task, ignored signals */
1022 if (terminated)
1023 {
1024 /* no one rely on this, then free the resource */
1025 if (value)
1026 rt_free(value);
1027 ret = 0;
1028 }
1029 else
1030 {
1031 siginfo = siginfo_create(rt_thread_self(), signo, code, value);
1032
1033 if (siginfo)
1034 {
1035 if (_is_jobctl_signal(lwp, signo))
1036 _before_sending_jobctl_signal(signo, lwp, siginfo);
1037
1038 need_schedule = _siginfo_deliver_to_lwp(lwp, siginfo);
1039 lwp_signal_notify(&lwp->signalfd_notify_head, siginfo);
1040 ret = 0;
1041 }
1042 else
1043 {
1044 LOG_I("%s: siginfo malloc failed", __func__);
1045 ret = -RT_ENOMEM;
1046 }
1047 }
1048
1049 LWP_UNLOCK(lwp);
1050
1051 if (need_schedule)
1052 rt_schedule();
1053 }
1054 return ret;
1055 }
1056
_signal_action_flag_k2u(int signo,struct lwp_signal * signal,struct lwp_sigaction * act)1057 static void _signal_action_flag_k2u(int signo, struct lwp_signal *signal, struct lwp_sigaction *act)
1058 {
1059 long flags = 0;
1060 if (_sigismember(&signal->sig_action_nodefer, signo))
1061 flags |= SA_NODEFER;
1062 if (_sigismember(&signal->sig_action_onstack, signo))
1063 flags |= SA_ONSTACK;
1064 if (_sigismember(&signal->sig_action_restart, signo))
1065 flags |= SA_RESTART;
1066 if (_sigismember(&signal->sig_action_siginfo, signo))
1067 flags |= SA_SIGINFO;
1068 if (_sigismember(&signal->sig_action_nocldstop, signo))
1069 flags |= SA_NOCLDSTOP;
1070 if (_sigismember(&signal->sig_action_nocldwait, signo))
1071 flags |= SA_NOCLDWAIT;
1072
1073 act->sa_flags = flags;
1074 }
1075
_signal_action_flag_u2k(int signo,struct lwp_signal * signal,const struct lwp_sigaction * act)1076 static void _signal_action_flag_u2k(int signo, struct lwp_signal *signal, const struct lwp_sigaction *act)
1077 {
1078 long flags = act->sa_flags;
1079 if (flags & SA_NODEFER)
1080 _sigaddset(&signal->sig_action_nodefer, signo);
1081 if (flags & SA_ONSTACK)
1082 _sigaddset(&signal->sig_action_onstack, signo);
1083 if (flags & SA_RESTART)
1084 _sigaddset(&signal->sig_action_restart, signo);
1085 if (flags & SA_SIGINFO)
1086 _sigaddset(&signal->sig_action_siginfo, signo);
1087 if (signo == SIGCHLD)
1088 {
1089 /* These flags are meaningful only when establishing a handler for SIGCHLD */
1090 if (flags & SA_NOCLDSTOP)
1091 _sigaddset(&signal->sig_action_nocldstop, signo);
1092 if (flags & SA_NOCLDWAIT)
1093 _sigaddset(&signal->sig_action_nocldwait, signo);
1094 }
1095
1096 #define _HANDLE_FLAGS (SA_RESTORER | SA_NODEFER | SA_ONSTACK | SA_RESTART | SA_SIGINFO | SA_NOCLDSTOP | SA_NOCLDWAIT)
1097 if (flags & ~_HANDLE_FLAGS)
1098 LOG_W("Unhandled flags: 0x%lx", flags & ~_HANDLE_FLAGS);
1099 }
1100
lwp_sigisign(struct rt_lwp * lwp,int _sig)1101 rt_bool_t lwp_sigisign(struct rt_lwp *lwp, int _sig)
1102 {
1103 unsigned long sig = _sig - 1;
1104
1105 return lwp->signal.sig_action[sig] == LWP_SIG_ACT_IGN;
1106 }
1107
lwp_signal_action(struct rt_lwp * lwp,int signo,const struct lwp_sigaction * restrict act,struct lwp_sigaction * restrict oact)1108 rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
1109 const struct lwp_sigaction *restrict act,
1110 struct lwp_sigaction *restrict oact)
1111 {
1112 lwp_sighandler_t prev_handler;
1113 lwp_sigqueue_t thread_sigq;
1114 rt_list_t *thread_list;
1115 rt_err_t ret = RT_EOK;
1116
1117 if (lwp)
1118 {
1119 /** acquire READ access to lwp */
1120 LWP_LOCK(lwp);
1121
1122 if (oact)
1123 {
1124 oact->sa_mask = lwp->signal.sig_action_mask[signo - 1];
1125 oact->__sa_handler._sa_handler = lwp->signal.sig_action[signo - 1];
1126 oact->sa_restorer = RT_NULL;
1127 _signal_action_flag_k2u(signo, &lwp->signal, oact);
1128 }
1129
1130 if (act)
1131 {
1132 /**
1133 * Note: POSIX.1-2017 requires calls to sigaction() that supply a NULL act
1134 * argument succeed, even in the case of signals that cannot be caught or ignored
1135 */
1136 if (_sighandler_cannot_caught(lwp, signo))
1137 ret = -EINVAL;
1138 else
1139 {
1140 prev_handler = _get_sighandler_locked(lwp, signo);
1141 lwp->signal.sig_action_mask[signo - 1] = act->sa_mask;
1142 if (act->__sa_handler._sa_handler == SIG_IGN)
1143 {
1144 lwp_sigset_t no_ign_set = lwp_sigset_init(LWP_SIG_NO_IGN_SET);
1145 if (!lwp_sigismember(&no_ign_set, signo))
1146 {
1147 /* except those unignorable signals, discard them for proc */
1148 lwp->signal.sig_action[signo - 1] = LWP_SIG_ACT_IGN;
1149 }
1150 else
1151 {
1152 /* POSIX.1: SIG_IGN and SIG_DFL are equivalent for SIGCONT */
1153 lwp->signal.sig_action[signo - 1] = LWP_SIG_ACT_DFL;
1154 }
1155 }
1156 else
1157 {
1158 lwp->signal.sig_action[signo - 1] = act->__sa_handler._sa_handler;
1159 }
1160
1161 _signal_action_flag_u2k(signo, &lwp->signal, act);
1162
1163 /**
1164 * Brief: Discard the pending signal if signal action is set to SIG_IGN
1165 *
1166 * Note: POSIX.1-2017: Setting a signal action to SIG_IGN for a signal
1167 * that is pending shall cause the pending signal to be discarded,
1168 * whether or not it is blocked.
1169 */
1170 if (prev_handler != LWP_SIG_ACT_IGN &&
1171 _get_sighandler_locked(lwp, signo) == LWP_SIG_ACT_IGN)
1172 {
1173 sigqueue_discard(_SIGQ(lwp), signo);
1174 for (thread_list = lwp->t_grp.next;
1175 thread_list != &lwp->t_grp;
1176 thread_list = thread_list->next)
1177 {
1178 thread_sigq = _SIGQ(rt_list_entry(thread_list, struct rt_thread, sibling));
1179 sigqueue_discard(thread_sigq, signo);
1180 }
1181 }
1182 }
1183 }
1184
1185 LWP_UNLOCK(lwp);
1186 }
1187 else
1188 ret = -EINVAL;
1189
1190 return ret;
1191 }
1192
lwp_thread_signal_kill(rt_thread_t thread,long signo,long code,lwp_siginfo_ext_t value)1193 rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, lwp_siginfo_ext_t value)
1194 {
1195 rt_err_t ret = -1;
1196
1197 struct rt_lwp *lwp;
1198 lwp_siginfo_t siginfo;
1199 rt_bool_t need_schedule;
1200
1201 /** must be able to be suspended */
1202 RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
1203
1204 LOG_D("%s(signo=%d)", __func__, signo);
1205
1206 if (!thread || signo < 0 || signo >= _LWP_NSIG)
1207 {
1208 ret = -RT_EINVAL;
1209 }
1210 else if (signo == 0)
1211 {
1212 /* thread exist and current thread have privileges */
1213 ret = 0;
1214 }
1215 else
1216 {
1217 lwp = thread->lwp;
1218 need_schedule = RT_FALSE;
1219
1220 RT_ASSERT(lwp);
1221
1222 LWP_LOCK(lwp);
1223
1224 if (!lwp)
1225 ret = -RT_EPERM;
1226 else if (lwp->terminated || _sighandler_is_ignored(lwp, signo))
1227 ret = 0;
1228 else
1229 {
1230 siginfo = siginfo_create(rt_thread_self(), signo, code, value);
1231
1232 if (siginfo)
1233 {
1234 need_schedule = _siginfo_deliver_to_thread(thread, siginfo);
1235 lwp_signal_notify(&lwp->signalfd_notify_head, siginfo);
1236 ret = 0;
1237 }
1238 else
1239 {
1240 LOG_I("%s: siginfo malloc failed", __func__);
1241 ret = -RT_ENOMEM;
1242 }
1243 }
1244
1245 LWP_UNLOCK(lwp);
1246
1247 if (need_schedule)
1248 rt_schedule();
1249 }
1250
1251 return ret;
1252 }
1253
1254 #ifndef ARCH_MM_MMU
lwp_thread_sighandler_set(int sig,lwp_sighandler_t func)1255 void lwp_thread_sighandler_set(int sig, lwp_sighandler_t func)
1256 {
1257 rt_base_t level;
1258
1259 if (sig == 0 || sig > _LWP_NSIG)
1260 return;
1261 level = rt_hw_interrupt_disable();
1262 rt_thread_self()->signal_handler[sig - 1] = func;
1263 rt_hw_interrupt_enable(level);
1264 }
1265 #endif
1266
lwp_thread_signal_mask(rt_thread_t thread,lwp_sig_mask_cmd_t how,const lwp_sigset_t * sigset,lwp_sigset_t * oset)1267 rt_err_t lwp_thread_signal_mask(rt_thread_t thread, lwp_sig_mask_cmd_t how,
1268 const lwp_sigset_t *sigset, lwp_sigset_t *oset)
1269 {
1270 rt_err_t ret = -1;
1271 struct rt_lwp *lwp;
1272
1273 if (thread)
1274 {
1275 lwp = (struct rt_lwp *)thread->lwp;
1276 LWP_LOCK(lwp);
1277
1278 if (!lwp)
1279 {
1280 ret = -RT_EPERM;
1281 }
1282 else
1283 {
1284 ret = 0;
1285 _thread_signal_mask(thread, how, sigset, oset);
1286 }
1287
1288 LWP_UNLOCK(lwp);
1289 }
1290 else
1291 ret = -RT_EINVAL;
1292
1293 return ret;
1294 }
1295
_dequeue_signal(rt_thread_t thread,lwp_sigset_t * mask,siginfo_t * usi)1296 static int _dequeue_signal(rt_thread_t thread, lwp_sigset_t *mask, siginfo_t *usi)
1297 {
1298 int signo;
1299 lwp_siginfo_t si;
1300 struct rt_lwp *lwp;
1301 lwp_sigset_t *pending;
1302 lwp_sigqueue_t sigqueue;
1303
1304 lwp = thread->lwp;
1305 RT_ASSERT(lwp);
1306
1307 sigqueue = _SIGQ(thread);
1308 pending = &sigqueue->sigset_pending;
1309 signo = _next_signal(pending, mask);
1310 if (!signo)
1311 {
1312 sigqueue = _SIGQ(lwp);
1313 pending = &sigqueue->sigset_pending;
1314 signo = _next_signal(pending, mask);
1315 }
1316
1317 if (!signo)
1318 return signo;
1319
1320 si = sigqueue_dequeue(sigqueue, signo);
1321 RT_ASSERT(!!si);
1322
1323 siginfo_k2u(si, usi);
1324 siginfo_delete(si);
1325
1326 return signo;
1327 }
1328
lwp_thread_signal_timedwait(rt_thread_t thread,lwp_sigset_t * sigset,siginfo_t * usi,struct timespec * timeout)1329 rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
1330 siginfo_t *usi, struct timespec *timeout)
1331 {
1332 rt_err_t ret;
1333 lwp_sigset_t saved_sigset;
1334 lwp_sigset_t blocked_sigset;
1335 lwp_sigset_t dontwait_sigset;
1336 int sig;
1337 struct rt_lwp *lwp = thread->lwp;
1338
1339 /**
1340 * Brief: POSIX
1341 * If one of the signals in set is already pending for the calling thread,
1342 * sigwaitinfo() will return immediately
1343 */
1344
1345 /* Create a mask of signals user dont want or cannot catch */
1346 _sigdelset(sigset, SIGKILL);
1347 _sigdelset(sigset, SIGSTOP);
1348 _signotsets(&dontwait_sigset, sigset);
1349
1350 LWP_LOCK(lwp);
1351 sig = _dequeue_signal(thread, &dontwait_sigset, usi);
1352 LWP_UNLOCK(lwp);
1353 if (sig)
1354 return sig;
1355
1356 /**
1357 * Brief: POSIX
1358 * if none of the signals specified by set are pending, sigtimedwait() shall
1359 * wait for the time interval specified in the timespec structure referenced
1360 * by timeout.
1361 *
1362 * Note: If the pending signal arrives before thread suspend, the suspend
1363 * operation will return a failure
1364 */
1365 _sigandsets(&blocked_sigset, &thread->signal.sigset_mask, &dontwait_sigset);
1366 _thread_signal_mask(thread, LWP_SIG_MASK_CMD_SET_MASK, &blocked_sigset, &saved_sigset);
1367 if (timeout)
1368 {
1369 rt_tick_t time;
1370 time = (timeout->tv_sec * RT_TICK_PER_SECOND) + ((timeout->tv_nsec * RT_TICK_PER_SECOND) / NANOSECOND_PER_SECOND);
1371 /**
1372 * Brief: POSIX
1373 * If the timespec structure pointed to by timeout is zero-valued and
1374 * if none of the signals specified by set are pending, then
1375 * sigtimedwait() shall return immediately with an error
1376 */
1377 if (time == 0)
1378 return -EAGAIN;
1379
1380 rt_enter_critical();
1381 ret = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
1382 rt_timer_control(&(thread->thread_timer),
1383 RT_TIMER_CTRL_SET_TIME,
1384 &time);
1385 rt_timer_start(&(thread->thread_timer));
1386 rt_exit_critical();
1387
1388 }
1389 else
1390 {
1391 /* suspend kernel forever until signal was received */
1392 ret = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
1393 }
1394
1395 if (ret == RT_EOK)
1396 {
1397 rt_schedule();
1398 /* If thread->error reliable? */
1399 if (thread->error == RT_EINTR)
1400 ret = -EINTR;
1401 else
1402 ret = -EAGAIN;
1403 }
1404 /* else ret == -EINTR */
1405 _thread_signal_mask(thread, LWP_SIG_MASK_CMD_SET_MASK, &saved_sigset, RT_NULL);
1406
1407 LWP_LOCK(lwp);
1408 sig = _dequeue_signal(thread, &dontwait_sigset, usi);
1409 LWP_UNLOCK(lwp);
1410
1411 return sig ? sig : ret;
1412 }
1413
lwp_thread_signal_pending(rt_thread_t thread,lwp_sigset_t * pending)1414 void lwp_thread_signal_pending(rt_thread_t thread, lwp_sigset_t *pending)
1415 {
1416 struct rt_lwp *lwp;
1417 lwp = thread->lwp;
1418
1419 if (lwp)
1420 {
1421 memset(pending, 0, sizeof(*pending));
1422
1423 LWP_LOCK(lwp);
1424 sigqueue_examine(_SIGQ(thread), pending);
1425 sigqueue_examine(_SIGQ(lwp), pending);
1426 LWP_UNLOCK(lwp);
1427
1428 _sigandsets(pending, pending, &thread->signal.sigset_mask);
1429 }
1430 }
1431
lwp_pgrp_signal_kill(rt_processgroup_t pgrp,long signo,long code,lwp_siginfo_ext_t value)1432 rt_err_t lwp_pgrp_signal_kill(rt_processgroup_t pgrp, long signo, long code,
1433 lwp_siginfo_ext_t value)
1434 {
1435 struct rt_lwp *lwp;
1436 rt_err_t rc = 0;
1437
1438 PGRP_ASSERT_LOCKED(pgrp);
1439
1440 rc = valid_signo_check(signo);
1441 if (pgrp && !rc)
1442 {
1443 rt_list_for_each_entry(lwp, &pgrp->process, pgrp_node)
1444 {
1445 lwp_signal_kill(lwp, signo, code, value);
1446 }
1447 }
1448
1449 return rc;
1450 }
1451
1452 struct kill_all_param
1453 {
1454 long signo;
1455 long code;
1456 lwp_siginfo_ext_t value;
1457 };
1458
_kill_each(pid_t pid,void * data)1459 static int _kill_each(pid_t pid, void *data)
1460 {
1461 struct kill_all_param *param = data;
1462 rt_lwp_t lwp;
1463 rt_err_t error;
1464
1465 lwp = lwp_from_pid_locked(pid);
1466 if (lwp && !lwp->sig_protected)
1467 {
1468 error = lwp_signal_kill(lwp, param->signo, param->code, param->value);
1469 }
1470 else
1471 {
1472 error = RT_EOK;
1473 }
1474
1475 return error;
1476 }
1477
lwp_signal_kill_all(long signo,long code,lwp_siginfo_ext_t value)1478 rt_err_t lwp_signal_kill_all(long signo, long code, lwp_siginfo_ext_t value)
1479 {
1480 struct kill_all_param buf =
1481 {
1482 .signo = signo,
1483 .code = code,
1484 .value = value,
1485 };
1486
1487 return lwp_pid_for_each(_kill_each, &buf);
1488 }
1489