1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2019-10-16 zhangjun first version
9 * 2021-02-20 lizhirui fix warning
10 * 2023-06-26 shell clear ref to parent on waitpid()
11 * Remove recycling of lwp on waitpid() and leave it to defunct routine
12 * 2023-07-27 shell Move the detach of children process on parent exit to lwp_terminate.
13 * Make lwp_from_pid locked by caller to avoid possible use-after-free
14 * error
15 * 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
16 * Add reference on pid/tid, so the resource is not freed while using.
17 * Add support for waitpid(options=WNOHANG)
18 * 2023-11-16 xqyjlj Fix the case where pid is 0
19 * 2023-11-17 xqyjlj add process group and session support
20 * 2023-11-24 shell Support of waitpid(options=WNOTRACED|WCONTINUED);
21 * Reimplement the waitpid with a wait queue method, and fixup problem
22 * with waitpid(pid=-1)/waitpid(pid=-pgid)/waitpid(pid=0) that only one
23 * process can be traced while waiter suspend
24 * 2024-01-25 shell porting to new sched API
25 */
26
27 /* includes scheduler related API */
28 #define __RT_IPC_SOURCE__
29
30 /* for waitpid, we are compatible to GNU extension */
31 #define _GNU_SOURCE
32
33 #define DBG_TAG "lwp.pid"
34 #define DBG_LVL DBG_INFO
35 #include <rtdbg.h>
36
37 #include "lwp_internal.h"
38
39 #include <rthw.h>
40 #include <rtthread.h>
41 #include <dfs_file.h>
42 #include <unistd.h>
43 #include <stdio.h> /* rename() */
44 #include <stdlib.h>
45 #include <sys/stat.h>
46 #include <sys/statfs.h> /* statfs() */
47 #include <stdatomic.h>
48
49 #ifdef ARCH_MM_MMU
50 #include "lwp_user_mm.h"
51 #endif
52
53 #ifdef RT_USING_DFS_PROCFS
54 #include "proc.h"
55 #include "procfs.h"
56 #endif
57
58 #define PID_MAX 10000
59
60 #define PID_CT_ASSERT(name, x) \
61 struct assert_##name {char ary[2 * (x) - 1];}
62
63 PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
64 PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
65
66 static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
67 static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
68 static int lwp_pid_ary_alloced = 0;
69 static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
70 static pid_t current_pid = 0;
71 static struct rt_mutex pid_mtx;
72 static struct rt_wqueue _pid_emptyq;
73
lwp_pid_init(void)74 int lwp_pid_init(void)
75 {
76 rt_wqueue_init(&_pid_emptyq);
77 rt_mutex_init(&pid_mtx, "pidmtx", RT_IPC_FLAG_PRIO);
78 return 0;
79 }
80
lwp_pid_wait_for_empty(int wait_flags,rt_tick_t to)81 int lwp_pid_wait_for_empty(int wait_flags, rt_tick_t to)
82 {
83 int error;
84
85 if (wait_flags == RT_INTERRUPTIBLE)
86 {
87 error = rt_wqueue_wait_interruptible(&_pid_emptyq, 0, to);
88 }
89 else
90 {
91 error = rt_wqueue_wait_killable(&_pid_emptyq, 0, to);
92 }
93 return error;
94 }
95
lwp_pid_lock_take(void)96 void lwp_pid_lock_take(void)
97 {
98 LWP_DEF_RETURN_CODE(rc);
99
100 rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
101 /* should never failed */
102 RT_ASSERT(rc == RT_EOK);
103 RT_UNUSED(rc);
104 }
105
lwp_pid_lock_release(void)106 void lwp_pid_lock_release(void)
107 {
108 /* should never failed */
109 if (lwp_mutex_release_safe(&pid_mtx) != RT_EOK)
110 RT_ASSERT(0);
111 }
112
113 struct pid_foreach_param
114 {
115 int (*cb)(pid_t pid, void *data);
116 void *data;
117 };
118
_before_cb(struct lwp_avl_struct * node,void * data)119 static int _before_cb(struct lwp_avl_struct *node, void *data)
120 {
121 struct pid_foreach_param *param = data;
122 pid_t pid = node->avl_key;
123 return param->cb(pid, param->data);
124 }
125
lwp_pid_for_each(int (* cb)(pid_t pid,void * data),void * data)126 int lwp_pid_for_each(int (*cb)(pid_t pid, void *data), void *data)
127 {
128 int error;
129 struct pid_foreach_param buf =
130 {
131 .cb = cb,
132 .data = data,
133 };
134
135 lwp_pid_lock_take();
136 error = lwp_avl_traversal(lwp_pid_root, _before_cb, &buf);
137 lwp_pid_lock_release();
138
139 return error;
140 }
141
lwp_get_pid_ary(void)142 struct lwp_avl_struct *lwp_get_pid_ary(void)
143 {
144 return lwp_pid_ary;
145 }
146
lwp_pid_get_locked(void)147 static pid_t lwp_pid_get_locked(void)
148 {
149 struct lwp_avl_struct *p;
150 pid_t pid = 0;
151
152 p = lwp_pid_free_head;
153 if (p)
154 {
155 lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
156 }
157 else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
158 {
159 p = lwp_pid_ary + lwp_pid_ary_alloced;
160 lwp_pid_ary_alloced++;
161 }
162 if (p)
163 {
164 int found_noused = 0;
165
166 RT_ASSERT(p->data == RT_NULL);
167 for (pid = current_pid + 1; pid < PID_MAX; pid++)
168 {
169 if (!lwp_avl_find(pid, lwp_pid_root))
170 {
171 found_noused = 1;
172 break;
173 }
174 }
175 if (!found_noused)
176 {
177 for (pid = 1; pid <= current_pid; pid++)
178 {
179 if (!lwp_avl_find(pid, lwp_pid_root))
180 {
181 found_noused = 1;
182 break;
183 }
184 }
185 }
186 p->avl_key = pid;
187 lwp_avl_insert(p, &lwp_pid_root);
188 current_pid = pid;
189 }
190 return pid;
191 }
192
lwp_pid_put_locked(pid_t pid)193 static void lwp_pid_put_locked(pid_t pid)
194 {
195 struct lwp_avl_struct *p;
196
197 if (pid == 0)
198 {
199 return;
200 }
201
202 p = lwp_avl_find(pid, lwp_pid_root);
203 if (p)
204 {
205 p->data = RT_NULL;
206 lwp_avl_remove(p, &lwp_pid_root);
207 p->avl_right = lwp_pid_free_head;
208 lwp_pid_free_head = p;
209 }
210 }
211
212 #ifdef RT_USING_DFS_PROCFS
_free_proc_dentry(rt_lwp_t lwp)213 rt_inline void _free_proc_dentry(rt_lwp_t lwp)
214 {
215 char pid_str[64] = {0};
216
217 rt_snprintf(pid_str, 64, "%d", lwp->pid);
218 pid_str[63] = 0;
219 proc_remove_dentry(pid_str, 0);
220 }
221 #else
222 #define _free_proc_dentry(lwp)
223 #endif
224
lwp_pid_put(struct rt_lwp * lwp)225 void lwp_pid_put(struct rt_lwp *lwp)
226 {
227 _free_proc_dentry(lwp);
228
229 lwp_pid_lock_take();
230 lwp_pid_put_locked(lwp->pid);
231 if (lwp_pid_root == AVL_EMPTY)
232 {
233 rt_wqueue_wakeup_all(&_pid_emptyq, RT_NULL);
234 /* refuse any new pid allocation now */
235 }
236 else
237 {
238 lwp_pid_lock_release();
239 }
240
241 /* reset pid field */
242 lwp->pid = 0;
243 /* clear reference */
244 lwp_ref_dec(lwp);
245 }
246
lwp_pid_set_lwp_locked(pid_t pid,struct rt_lwp * lwp)247 static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
248 {
249 struct lwp_avl_struct *p;
250
251 p = lwp_avl_find(pid, lwp_pid_root);
252 if (p)
253 {
254 p->data = lwp;
255 lwp_ref_inc(lwp);
256
257 #ifdef RT_USING_DFS_PROCFS
258 if (pid)
259 {
260 proc_pid(pid);
261 }
262 #endif
263 }
264 }
265
__exit_files(struct rt_lwp * lwp)266 static void __exit_files(struct rt_lwp *lwp)
267 {
268 int fd = lwp->fdt.maxfd - 1;
269
270 while (fd >= 0)
271 {
272 struct dfs_file *d;
273
274 d = lwp->fdt.fds[fd];
275 if (d)
276 {
277 dfs_file_close(d);
278 fdt_fd_release(&lwp->fdt, fd);
279 }
280 fd--;
281 }
282 }
283
lwp_user_object_lock_init(struct rt_lwp * lwp)284 void lwp_user_object_lock_init(struct rt_lwp *lwp)
285 {
286 rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
287 }
288
lwp_user_object_lock_destroy(struct rt_lwp * lwp)289 void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
290 {
291 rt_mutex_detach(&lwp->object_mutex);
292 }
293
lwp_user_object_lock(struct rt_lwp * lwp)294 void lwp_user_object_lock(struct rt_lwp *lwp)
295 {
296 if (lwp)
297 {
298 rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
299 }
300 else
301 {
302 RT_ASSERT(0);
303 }
304 }
305
lwp_user_object_unlock(struct rt_lwp * lwp)306 void lwp_user_object_unlock(struct rt_lwp *lwp)
307 {
308 if (lwp)
309 {
310 rt_mutex_release(&lwp->object_mutex);
311 }
312 else
313 {
314 RT_ASSERT(0);
315 }
316 }
317
lwp_user_object_add(struct rt_lwp * lwp,rt_object_t object)318 int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
319 {
320 int ret = -1;
321
322 if (lwp && object)
323 {
324 lwp_user_object_lock(lwp);
325 if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
326 {
327 struct lwp_avl_struct *node;
328
329 node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
330 if (node)
331 {
332 rt_atomic_add(&object->lwp_ref_count, 1);
333 node->avl_key = (avl_key_t)object;
334 lwp_avl_insert(node, &lwp->object_root);
335 ret = 0;
336 }
337 }
338 lwp_user_object_unlock(lwp);
339 }
340 return ret;
341 }
342
_object_node_delete(struct rt_lwp * lwp,struct lwp_avl_struct * node)343 static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
344 {
345 rt_err_t ret = -1;
346 rt_object_t object;
347
348 if (!lwp || !node)
349 {
350 return ret;
351 }
352 object = (rt_object_t)node->avl_key;
353 object->lwp_ref_count--;
354 if (object->lwp_ref_count == 0)
355 {
356 /* remove from kernel object list */
357 switch (object->type)
358 {
359 case RT_Object_Class_Semaphore:
360 ret = rt_sem_delete((rt_sem_t)object);
361 break;
362 case RT_Object_Class_Mutex:
363 ret = rt_mutex_delete((rt_mutex_t)object);
364 break;
365 case RT_Object_Class_Event:
366 ret = rt_event_delete((rt_event_t)object);
367 break;
368 case RT_Object_Class_MailBox:
369 ret = rt_mb_delete((rt_mailbox_t)object);
370 break;
371 case RT_Object_Class_MessageQueue:
372 ret = rt_mq_delete((rt_mq_t)object);
373 break;
374 case RT_Object_Class_Timer:
375 ret = rt_timer_delete((rt_timer_t)object);
376 break;
377 case RT_Object_Class_Custom:
378 ret = rt_custom_object_destroy(object);
379 break;
380 default:
381 LOG_E("input object type(%d) error", object->type);
382 break;
383 }
384 }
385 else
386 {
387 ret = 0;
388 }
389 lwp_avl_remove(node, &lwp->object_root);
390 rt_free(node);
391 return ret;
392 }
393
lwp_user_object_delete(struct rt_lwp * lwp,rt_object_t object)394 rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
395 {
396 rt_err_t ret = -1;
397
398 if (lwp && object)
399 {
400 struct lwp_avl_struct *node;
401
402 lwp_user_object_lock(lwp);
403 node = lwp_avl_find((avl_key_t)object, lwp->object_root);
404 ret = _object_node_delete(lwp, node);
405 lwp_user_object_unlock(lwp);
406 }
407 return ret;
408 }
409
lwp_user_object_clear(struct rt_lwp * lwp)410 void lwp_user_object_clear(struct rt_lwp *lwp)
411 {
412 struct lwp_avl_struct *node;
413
414 lwp_user_object_lock(lwp);
415 while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
416 {
417 _object_node_delete(lwp, node);
418 }
419 lwp_user_object_unlock(lwp);
420 }
421
_object_dup(struct lwp_avl_struct * node,void * arg)422 static int _object_dup(struct lwp_avl_struct *node, void *arg)
423 {
424 rt_object_t object;
425 struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
426
427 object = (rt_object_t)node->avl_key;
428 lwp_user_object_add(dst_lwp, object);
429 return 0;
430 }
431
lwp_user_object_dup(struct rt_lwp * dst_lwp,struct rt_lwp * src_lwp)432 void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
433 {
434 lwp_user_object_lock(src_lwp);
435 lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
436 lwp_user_object_unlock(src_lwp);
437 }
438
lwp_create(rt_base_t flags)439 rt_lwp_t lwp_create(rt_base_t flags)
440 {
441 pid_t pid;
442 rt_lwp_t new_lwp = rt_calloc(1, sizeof(struct rt_lwp));
443
444 if (new_lwp)
445 {
446 /* minimal setup of lwp object */
447 new_lwp->ref = 1;
448 #ifdef RT_USING_SMP
449 new_lwp->bind_cpu = RT_CPUS_NR;
450 #endif
451 new_lwp->exe_file = RT_NULL;
452 rt_list_init(&new_lwp->t_grp);
453 rt_list_init(&new_lwp->pgrp_node);
454 rt_list_init(&new_lwp->timer);
455 lwp_user_object_lock_init(new_lwp);
456 rt_wqueue_init(&new_lwp->wait_queue);
457 rt_wqueue_init(&new_lwp->waitpid_waiters);
458 lwp_signal_init(&new_lwp->signal);
459 rt_mutex_init(&new_lwp->lwp_lock, "lwp_lock", RT_IPC_FLAG_PRIO);
460
461 if (flags & LWP_CREATE_FLAG_NOTRACE_EXEC)
462 new_lwp->did_exec = RT_TRUE;
463
464 /* lwp with pid */
465 if (flags & LWP_CREATE_FLAG_ALLOC_PID)
466 {
467 lwp_pid_lock_take();
468 pid = lwp_pid_get_locked();
469 if (pid == 0)
470 {
471 lwp_user_object_lock_destroy(new_lwp);
472 rt_free(new_lwp);
473 new_lwp = RT_NULL;
474 LOG_E("%s: pid slot fulled", __func__);
475 }
476 else
477 {
478 new_lwp->pid = pid;
479 lwp_pid_set_lwp_locked(pid, new_lwp);
480 }
481 lwp_pid_lock_release();
482 }
483 rt_memset(&new_lwp->rt_rusage,0, sizeof(new_lwp->rt_rusage));
484
485 if (flags & LWP_CREATE_FLAG_INIT_USPACE)
486 {
487 rt_err_t error = lwp_user_space_init(new_lwp, 0);
488 if (error)
489 {
490 lwp_pid_put(new_lwp);
491 lwp_user_object_lock_destroy(new_lwp);
492 rt_free(new_lwp);
493 new_lwp = RT_NULL;
494 LOG_E("%s: failed to initialize user space", __func__);
495 }
496 }
497 }
498
499 LOG_D("%s(pid=%d) => %p", __func__, new_lwp ? new_lwp->pid : -1, new_lwp);
500 return new_lwp;
501 }
502
503 /** when reference is 0, a lwp can be released */
lwp_free(struct rt_lwp * lwp)504 void lwp_free(struct rt_lwp* lwp)
505 {
506 rt_processgroup_t group = RT_NULL;
507
508 if (lwp == RT_NULL)
509 {
510 return;
511 }
512
513 /**
514 * Brief: Recycle the lwp when reference is cleared
515 *
516 * Note: Critical Section
517 * - lwp (RW. there is no other writer/reader compete with lwp_free, since
518 * all the reference is clear)
519 */
520 LOG_D("lwp free: %p", lwp);
521 rt_free(lwp->exe_file);
522 group = lwp_pgrp_find(lwp_pgid_get_byprocess(lwp));
523 if (group)
524 lwp_pgrp_remove(group, lwp);
525
526 LWP_LOCK(lwp);
527
528 if (lwp->args != RT_NULL)
529 {
530 #ifndef ARCH_MM_MMU
531 lwp->args_length = RT_NULL;
532 #ifndef ARCH_MM_MPU
533 rt_free(lwp->args);
534 #endif /* not defined ARCH_MM_MPU */
535 #endif /* ARCH_MM_MMU */
536 lwp->args = RT_NULL;
537 }
538
539 lwp_user_object_clear(lwp);
540 lwp_user_object_lock_destroy(lwp);
541
542 /* free data section */
543 if (lwp->data_entry != RT_NULL)
544 {
545 #ifdef ARCH_MM_MMU
546 rt_free_align(lwp->data_entry);
547 #else
548 #ifdef ARCH_MM_MPU
549 rt_lwp_umap_user(lwp, lwp->text_entry, 0);
550 rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
551 #else
552 rt_free_align(lwp->data_entry);
553 #endif /* ARCH_MM_MPU */
554 #endif /* ARCH_MM_MMU */
555 lwp->data_entry = RT_NULL;
556 }
557
558 /* free text section */
559 if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
560 {
561 if (lwp->text_entry)
562 {
563 LOG_D("lwp text free: %p", lwp->text_entry);
564 #ifndef ARCH_MM_MMU
565 rt_free((void*)lwp->text_entry);
566 #endif /* not defined ARCH_MM_MMU */
567 lwp->text_entry = RT_NULL;
568 }
569 }
570
571 #ifdef ARCH_MM_MMU
572 lwp_unmap_user_space(lwp);
573 #endif
574 timer_list_free(&lwp->timer);
575
576 LWP_UNLOCK(lwp);
577 RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
578 rt_mutex_detach(&lwp->lwp_lock);
579
580 /**
581 * pid must have release before enter lwp_free()
582 * otherwise this is a data racing
583 */
584 RT_ASSERT(lwp->pid == 0);
585 rt_free(lwp);
586 }
587
588 rt_inline rt_noreturn
_thread_exit(rt_lwp_t lwp,rt_thread_t thread)589 void _thread_exit(rt_lwp_t lwp, rt_thread_t thread)
590 {
591 LWP_LOCK(lwp);
592 lwp->rt_rusage.ru_stime.tv_sec += thread->system_time / RT_TICK_PER_SECOND;
593 lwp->rt_rusage.ru_stime.tv_usec += thread->system_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
594 lwp->rt_rusage.ru_utime.tv_sec += thread->user_time / RT_TICK_PER_SECOND;
595 lwp->rt_rusage.ru_utime.tv_usec += thread->user_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
596 rt_list_remove(&thread->sibling);
597 LWP_UNLOCK(lwp);
598 lwp_futex_exit_robust_list(thread);
599
600 /**
601 * Note: the tid tree always hold a reference to thread, hence the tid must
602 * be release before cleanup of thread
603 */
604 lwp_tid_put(thread->tid);
605 thread->tid = 0;
606
607 rt_thread_delete(thread);
608 rt_schedule();
609 while (1) ;
610 }
611
_clear_child_tid(rt_thread_t thread)612 rt_inline void _clear_child_tid(rt_thread_t thread)
613 {
614 if (thread->clear_child_tid)
615 {
616 int t = 0;
617 int *clear_child_tid = thread->clear_child_tid;
618
619 thread->clear_child_tid = RT_NULL;
620 lwp_put_to_user(clear_child_tid, &t, sizeof t);
621 sys_futex(clear_child_tid, FUTEX_WAKE, 1, RT_NULL, RT_NULL, 0);
622 }
623 }
624
lwp_exit(rt_lwp_t lwp,lwp_status_t status)625 void lwp_exit(rt_lwp_t lwp, lwp_status_t status)
626 {
627 rt_thread_t thread;
628
629 if (!lwp)
630 {
631 LOG_W("%s: lwp should not be null", __func__);
632 return ;
633 }
634
635 thread = rt_thread_self();
636 RT_ASSERT((struct rt_lwp *)thread->lwp == lwp);
637 LOG_D("process(lwp.pid=%d) exit", lwp->pid);
638
639 #ifdef ARCH_MM_MMU
640 _clear_child_tid(thread);
641
642 LWP_LOCK(lwp);
643 /**
644 * Brief: only one thread should calls exit_group(),
645 * but we can not ensured that during run-time
646 */
647 lwp->lwp_status = status;
648 LWP_UNLOCK(lwp);
649
650 lwp_terminate(lwp);
651 #else
652 main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
653 if (main_thread == tid)
654 {
655 rt_thread_t sub_thread;
656 rt_list_t *list;
657
658 lwp_terminate(lwp);
659
660 /* delete all subthread */
661 while ((list = tid->sibling.prev) != &lwp->t_grp)
662 {
663 sub_thread = rt_list_entry(list, struct rt_thread, sibling);
664 rt_list_remove(&sub_thread->sibling);
665 rt_thread_delete(sub_thread);
666 }
667 lwp->lwp_ret = value;
668 }
669 #endif /* ARCH_MM_MMU */
670
671 _thread_exit(lwp, thread);
672 }
673
lwp_thread_exit(rt_thread_t thread,int status)674 void lwp_thread_exit(rt_thread_t thread, int status)
675 {
676 rt_thread_t header_thr;
677 struct rt_lwp *lwp;
678
679 LOG_D("%s", __func__);
680
681 RT_ASSERT(thread == rt_thread_self());
682 lwp = (struct rt_lwp *)thread->lwp;
683 RT_ASSERT(lwp != RT_NULL);
684
685 #ifdef ARCH_MM_MMU
686 _clear_child_tid(thread);
687
688 LWP_LOCK(lwp);
689 header_thr = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
690 if (header_thr == thread && thread->sibling.prev == &lwp->t_grp)
691 {
692 /**
693 * if thread exit, treated as process exit normally.
694 * This is reasonable since trap event is exited through lwp_exit()
695 */
696 lwp->lwp_status = LWP_CREATE_STAT_EXIT(status);
697 LWP_UNLOCK(lwp);
698
699 lwp_terminate(lwp);
700 }
701 else
702 {
703 LWP_UNLOCK(lwp);
704 }
705 #endif /* ARCH_MM_MMU */
706
707 _thread_exit(lwp, thread);
708 }
709
710 /** @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock */
lwp_ref_inc(struct rt_lwp * lwp)711 int lwp_ref_inc(struct rt_lwp *lwp)
712 {
713 int ref;
714 ref = rt_atomic_add(&lwp->ref, 1);
715 LOG_D("%s(%p(%s)): before %d", __func__, lwp, lwp->cmd, ref);
716
717 return ref;
718 }
719
lwp_ref_dec(struct rt_lwp * lwp)720 int lwp_ref_dec(struct rt_lwp *lwp)
721 {
722 int ref;
723
724 ref = rt_atomic_add(&lwp->ref, -1);
725 LOG_D("%s(lwp=%p,lwp->cmd=%s): before ref=%d", __func__, lwp, lwp->cmd, ref);
726
727 if (ref == 1)
728 {
729 struct rt_channel_msg msg;
730
731 if (lwp->debug)
732 {
733 memset(&msg, 0, sizeof msg);
734 rt_raw_channel_send(gdb_server_channel(), &msg);
735 }
736
737 #ifndef ARCH_MM_MMU
738 #ifdef RT_LWP_USING_SHM
739 lwp_shm_lwp_free(lwp);
740 #endif /* RT_LWP_USING_SHM */
741 #endif /* not defined ARCH_MM_MMU */
742 lwp_free(lwp);
743 }
744 else
745 {
746 /* reference must be a positive integer */
747 RT_ASSERT(ref > 1);
748 }
749
750 return ref;
751 }
752
lwp_from_pid_raw_locked(pid_t pid)753 struct rt_lwp* lwp_from_pid_raw_locked(pid_t pid)
754 {
755 struct lwp_avl_struct *p;
756 struct rt_lwp *lwp = RT_NULL;
757
758 p = lwp_avl_find(pid, lwp_pid_root);
759 if (p)
760 {
761 lwp = (struct rt_lwp *)p->data;
762 }
763
764 return lwp;
765 }
766
lwp_from_pid_locked(pid_t pid)767 struct rt_lwp* lwp_from_pid_locked(pid_t pid)
768 {
769 struct rt_lwp* lwp;
770 lwp = pid ? lwp_from_pid_raw_locked(pid) : lwp_self();
771 return lwp;
772 }
773
lwp_to_pid(struct rt_lwp * lwp)774 pid_t lwp_to_pid(struct rt_lwp* lwp)
775 {
776 if (!lwp)
777 {
778 return 0;
779 }
780 return lwp->pid;
781 }
782
lwp_pid2name(int32_t pid)783 char* lwp_pid2name(int32_t pid)
784 {
785 struct rt_lwp *lwp;
786 char* process_name = RT_NULL;
787
788 lwp_pid_lock_take();
789 lwp = lwp_from_pid_locked(pid);
790 if (lwp)
791 {
792 process_name = strrchr(lwp->cmd, '/');
793 process_name = process_name? process_name + 1: lwp->cmd;
794 }
795 lwp_pid_lock_release();
796
797 return process_name;
798 }
799
lwp_name2pid(const char * name)800 pid_t lwp_name2pid(const char *name)
801 {
802 int idx;
803 pid_t pid = 0;
804 rt_thread_t main_thread;
805 char* process_name = RT_NULL;
806 rt_sched_lock_level_t slvl;
807
808 lwp_pid_lock_take();
809 for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
810 {
811 /* 0 is reserved */
812 struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
813
814 if (lwp)
815 {
816 process_name = strrchr(lwp->exe_file, '/');
817 process_name = process_name? process_name + 1: lwp->cmd;
818 if (!rt_strncmp(name, process_name, RT_NAME_MAX))
819 {
820 main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
821 rt_sched_lock(&slvl);
822 if (!(rt_sched_thread_get_stat(main_thread) == RT_THREAD_CLOSE))
823 {
824 pid = lwp->pid;
825 }
826 rt_sched_unlock(slvl);
827 }
828 }
829 }
830 lwp_pid_lock_release();
831 return pid;
832 }
833
lwp_getpid(void)834 int lwp_getpid(void)
835 {
836 rt_lwp_t lwp = lwp_self();
837 return lwp ? lwp->pid : 1;
838 // return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
839 }
840
_update_ru(struct rt_lwp * child,struct rt_lwp * self_lwp,struct rusage * uru)841 rt_inline void _update_ru(struct rt_lwp *child, struct rt_lwp *self_lwp, struct rusage *uru)
842 {
843 struct rusage rt_rusage;
844 if (uru != RT_NULL)
845 {
846 rt_rusage.ru_stime.tv_sec = child->rt_rusage.ru_stime.tv_sec;
847 rt_rusage.ru_stime.tv_usec = child->rt_rusage.ru_stime.tv_usec;
848 rt_rusage.ru_utime.tv_sec = child->rt_rusage.ru_utime.tv_sec;
849 rt_rusage.ru_utime.tv_usec = child->rt_rusage.ru_utime.tv_usec;
850 lwp_data_put(self_lwp, uru, &rt_rusage, sizeof(*uru));
851 }
852 }
853
854 /* do statistical summary and reap the child if neccessary */
_stats_and_reap_child(rt_lwp_t child,rt_thread_t cur_thr,struct rt_lwp * self_lwp,int * ustatus,int options,struct rusage * uru)855 static rt_err_t _stats_and_reap_child(rt_lwp_t child, rt_thread_t cur_thr,
856 struct rt_lwp *self_lwp, int *ustatus,
857 int options, struct rusage *uru)
858 {
859 int lwp_stat = child->lwp_status;
860
861 /* report statistical data to process */
862 _update_ru(child, self_lwp, uru);
863
864 if (child->terminated && !(options & WNOWAIT))
865 {
866 /** Reap the child process if it's exited */
867 LOG_D("func %s: child detached", __func__);
868 lwp_pid_put(child);
869 lwp_children_unregister(self_lwp, child);
870 }
871
872 if (ustatus)
873 lwp_data_put(self_lwp, ustatus, &lwp_stat, sizeof(*ustatus));
874
875 return RT_EOK;
876 }
877
878 #define HAS_CHILD_BUT_NO_EVT (-1024)
879
880 /* check if the process is already terminate */
_query_event_from_lwp(rt_lwp_t child,rt_thread_t cur_thr,rt_lwp_t self_lwp,int options,int * status)881 static sysret_t _query_event_from_lwp(rt_lwp_t child, rt_thread_t cur_thr, rt_lwp_t self_lwp,
882 int options, int *status)
883 {
884 sysret_t rc;
885
886 LWP_LOCK(child);
887 if (child->terminated)
888 {
889 rc = child->pid;
890 }
891 else if ((options & WSTOPPED) && child->jobctl_stopped && !child->wait_reap_stp)
892 {
893 child->wait_reap_stp = 1;
894 rc = child->pid;
895 }
896 else
897 {
898 rc = HAS_CHILD_BUT_NO_EVT;
899 }
900 LWP_UNLOCK(child);
901
902 LOG_D("%s(child_pid=%d ('%s'), stopped=%d) => %d", __func__, child->pid, child->cmd, child->jobctl_stopped, rc);
903 return rc;
904 }
905
906 /* verify if the process is child, and reap it */
_verify_child_and_reap(rt_thread_t cur_thr,rt_lwp_t self_lwp,pid_t wait_pid,int options,int * ustatus,struct rusage * uru)907 static pid_t _verify_child_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp,
908 pid_t wait_pid, int options, int *ustatus,
909 struct rusage *uru)
910 {
911 sysret_t rc;
912 struct rt_lwp *child;
913
914 /* check if pid is reference to a valid child */
915 lwp_pid_lock_take();
916 child = lwp_from_pid_locked(wait_pid);
917 if (!child)
918 rc = -EINVAL;
919 else if (child->parent != self_lwp)
920 rc = -ESRCH;
921 else
922 rc = wait_pid;
923
924 lwp_pid_lock_release();
925
926 if (rc > 0)
927 {
928 rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
929 if (rc > 0)
930 {
931 _stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
932 }
933 }
934 return rc;
935 }
936
937 /* try to reap any child */
_reap_any_child_pid(rt_thread_t cur_thr,rt_lwp_t self_lwp,pid_t pair_pgid,int options,int * ustatus,struct rusage * uru)938 static pid_t _reap_any_child_pid(rt_thread_t cur_thr, rt_lwp_t self_lwp, pid_t pair_pgid,
939 int options, int *ustatus, struct rusage *uru)
940 {
941 sysret_t rc = -ECHILD;
942 struct rt_lwp *child;
943
944 LWP_LOCK(self_lwp);
945 child = self_lwp->first_child;
946
947 /* find a exited child if any */
948 while (child)
949 {
950 if (pair_pgid && child->pgid != pair_pgid)
951 continue;
952
953 rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
954 if (rc > 0)
955 break;
956
957 child = child->sibling;
958 }
959 LWP_UNLOCK(self_lwp);
960
961 if (rc > 0)
962 {
963 _stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
964 }
965 return rc;
966 }
967
lwp_waitpid_kick(rt_lwp_t parent,rt_lwp_t self_lwp)968 rt_err_t lwp_waitpid_kick(rt_lwp_t parent, rt_lwp_t self_lwp)
969 {
970 /* waker provide the message mainly through its lwp_status */
971 rt_wqueue_wakeup(&parent->waitpid_waiters, self_lwp);
972 return RT_EOK;
973 }
974
975 struct waitpid_handle {
976 struct rt_wqueue_node wq_node;
977 int options;
978 rt_lwp_t waker_lwp;
979 };
980
981 /* the IPC message is setup and notify the parent */
_waitq_filter(struct rt_wqueue_node * wait_node,void * key)982 static int _waitq_filter(struct rt_wqueue_node *wait_node, void *key)
983 {
984 int can_accept_evt = 0;
985 rt_thread_t waiter = wait_node->polling_thread;
986 pid_t destiny = (pid_t)wait_node->key;
987 rt_lwp_t waker_lwp = key;
988 struct waitpid_handle *handle;
989 rt_ubase_t options;
990
991 handle = rt_container_of(wait_node, struct waitpid_handle, wq_node);
992
993 RT_ASSERT(waiter != RT_NULL);
994 options = handle->options;
995
996 /* filter out if waker is not the one */
997 if (destiny > 0)
998 {
999 /**
1000 * in waitpid immediately return routine, we already do the check
1001 * that pid is one of the child process of waiting thread
1002 */
1003 can_accept_evt = waker_lwp->pid == destiny;
1004 }
1005 else if (destiny == -1)
1006 {
1007 can_accept_evt = waker_lwp->parent == waiter->lwp;
1008 }
1009 else
1010 {
1011 /* destiny == 0 || destiny == -pgid */
1012 pid_t waiter_pgid;
1013 if (destiny == 0)
1014 {
1015 waiter_pgid = lwp_pgid_get_byprocess(waiter->lwp);
1016 }
1017 else
1018 {
1019 waiter_pgid = -destiny;
1020 }
1021 can_accept_evt = waiter_pgid == lwp_pgid_get_byprocess(waker_lwp);
1022 }
1023
1024 /* filter out if event is not desired */
1025 if (can_accept_evt)
1026 {
1027 if ((options & WEXITED) && waker_lwp->terminated)
1028 can_accept_evt = 1;
1029 else if ((options & WSTOPPED) && WIFSTOPPED(waker_lwp->lwp_status))
1030 can_accept_evt = 1;
1031 else if ((options & WCONTINUED) && WIFCONTINUED(waker_lwp->lwp_status))
1032 can_accept_evt = 1;
1033 else
1034 can_accept_evt = 0;
1035 }
1036
1037 /* setup message for waiter if accepted */
1038 if (can_accept_evt)
1039 handle->waker_lwp = waker_lwp;
1040
1041 /* 0 if event is accepted, otherwise discard */
1042 return !can_accept_evt;
1043 }
1044
1045 /* the waiter cleanup IPC message and wait for desired event here */
_wait_for_event(rt_thread_t cur_thr,rt_lwp_t self_lwp,struct waitpid_handle * handle,pid_t destiny)1046 static rt_err_t _wait_for_event(rt_thread_t cur_thr, rt_lwp_t self_lwp,
1047 struct waitpid_handle *handle, pid_t destiny)
1048 {
1049 rt_err_t ret;
1050
1051 /* current context checking */
1052 RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
1053
1054 handle->wq_node.polling_thread = cur_thr;
1055 handle->wq_node.key = destiny;
1056 handle->wq_node.wakeup = _waitq_filter;
1057 handle->wq_node.wqueue = &self_lwp->waitpid_waiters;
1058 rt_list_init(&handle->wq_node.list);
1059
1060 cur_thr->error = RT_EOK;
1061
1062 LOG_D("%s(self_lwp=%d) wait for event", __func__, self_lwp->pid);
1063
1064 rt_enter_critical();
1065 ret = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
1066 if (ret == RT_EOK)
1067 {
1068 rt_wqueue_add(handle->wq_node.wqueue, &handle->wq_node);
1069 rt_exit_critical();
1070
1071 rt_schedule();
1072
1073 ret = cur_thr->error;
1074
1075 /**
1076 * cur_thr error is a positive value, but some legacy implementation
1077 * use a negative one. So we check to avoid errors
1078 */
1079 ret = ret > 0 ? -ret : ret;
1080
1081 /**
1082 * we dont rely on this actually, but we cleanup it since wakeup API
1083 * set this up durint operation, and this will cause some messy condition
1084 */
1085 handle->wq_node.wqueue->flag = RT_WQ_FLAG_CLEAN;
1086 rt_wqueue_remove(&handle->wq_node);
1087 }
1088 else
1089 {
1090 /* failed to suspend, return immediately with failure */
1091 rt_exit_critical();
1092 }
1093
1094 return ret;
1095 }
1096
1097 /* wait for IPC event and do the cleanup if neccessary */
_wait_and_reap(rt_thread_t cur_thr,rt_lwp_t self_lwp,const pid_t pid,int options,int * ustatus,struct rusage * uru)1098 static sysret_t _wait_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp, const pid_t pid,
1099 int options, int *ustatus, struct rusage *uru)
1100 {
1101 sysret_t rc;
1102 struct waitpid_handle handle;
1103 rt_lwp_t waker;
1104
1105 /* wait for SIGCHLD or other async events */
1106 handle.options = options;
1107 handle.waker_lwp = 0;
1108 rc = _wait_for_event(cur_thr, self_lwp, &handle, pid);
1109
1110 waker = handle.waker_lwp;
1111 if (waker != RT_NULL)
1112 {
1113 rc = waker->pid;
1114
1115 /* check out if any process exited */
1116 LOG_D("%s: woken up by lwp=%d", __func__, waker->pid);
1117 _stats_and_reap_child(waker, cur_thr, self_lwp, ustatus, options, uru);
1118 }
1119 /**
1120 * else if (rc != RT_EOK)
1121 * unable to do a suspend, or wakeup unexpectedly
1122 * -> then returned a failure
1123 */
1124
1125 return rc;
1126 }
1127
lwp_waitpid(const pid_t pid,int * status,int options,struct rusage * ru)1128 pid_t lwp_waitpid(const pid_t pid, int *status, int options, struct rusage *ru)
1129 {
1130 pid_t rc = -1;
1131 struct rt_thread *cur_thr;
1132 struct rt_lwp *self_lwp;
1133
1134 cur_thr = rt_thread_self();
1135 self_lwp = lwp_self();
1136
1137 if (!cur_thr || !self_lwp)
1138 {
1139 rc = -EINVAL;
1140 }
1141 else
1142 {
1143 /* check if able to reap desired child immediately */
1144 if (pid > 0)
1145 {
1146 /* if pid is child then try to reap it */
1147 rc = _verify_child_and_reap(cur_thr, self_lwp, pid, options, status, ru);
1148 }
1149 else if (pid == -1)
1150 {
1151 /* any terminated child */
1152 rc = _reap_any_child_pid(cur_thr, self_lwp, 0, options, status, ru);
1153 }
1154 else
1155 {
1156 /**
1157 * (pid < -1 || pid == 0)
1158 * any terminated child with matched pgid
1159 */
1160
1161 pid_t pair_pgid;
1162 if (pid == 0)
1163 {
1164 pair_pgid = lwp_pgid_get_byprocess(self_lwp);
1165 }
1166 else
1167 {
1168 pair_pgid = -pid;
1169 }
1170 rc = _reap_any_child_pid(cur_thr, self_lwp, pair_pgid, options, status, ru);
1171 }
1172
1173 if (rc == HAS_CHILD_BUT_NO_EVT)
1174 {
1175 if (!(options & WNOHANG))
1176 {
1177 /* otherwise, arrange a suspend and wait for async event */
1178 options |= WEXITED;
1179 rc = _wait_and_reap(cur_thr, self_lwp, pid, options, status, ru);
1180 }
1181 else
1182 {
1183 /**
1184 * POSIX.1: If waitpid() was invoked with WNOHANG set in options,
1185 * it has at least one child process specified by pid for which
1186 * status is not available, and status is not available for any
1187 * process specified by pid, 0 is returned
1188 */
1189 rc = 0;
1190 }
1191 }
1192 else
1193 {
1194 RT_ASSERT(rc != 0);
1195 }
1196 }
1197
1198 LOG_D("waitpid() => %d, *status=0x%x", rc, status ? *status:0);
1199 return rc;
1200 }
1201
waitpid(pid_t pid,int * status,int options)1202 pid_t waitpid(pid_t pid, int *status, int options)
1203 {
1204 return lwp_waitpid(pid, status, options, RT_NULL);
1205 }
1206
1207 #ifdef RT_USING_FINSH
1208 /* copy from components/finsh/cmd.c */
object_split(int len)1209 static void object_split(int len)
1210 {
1211 while (len--)
1212 {
1213 rt_kprintf("-");
1214 }
1215 }
1216
print_thread_info(struct rt_thread * thread,int maxlen)1217 static void print_thread_info(struct rt_thread* thread, int maxlen)
1218 {
1219 rt_uint8_t *ptr;
1220 rt_uint8_t stat;
1221
1222 #ifdef RT_USING_SMP
1223 if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
1224 rt_kprintf("%3d %3d ", RT_SCHED_CTX(thread).oncpu, RT_SCHED_PRIV(thread).current_priority);
1225 else
1226 rt_kprintf("N/A %3d ", RT_SCHED_PRIV(thread).current_priority);
1227 #else
1228 rt_kprintf("%3d ", RT_SCHED_PRIV(thread).current_priority);
1229 #endif /*RT_USING_SMP*/
1230
1231 stat = (RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK);
1232 if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
1233 else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
1234 else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
1235 else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
1236 else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
1237
1238 #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
1239 ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
1240 while (*ptr == '#')ptr--;
1241
1242 rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
1243 ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
1244 thread->stack_size,
1245 ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
1246 thread->remaining_tick,
1247 thread->error);
1248 #else
1249 ptr = (rt_uint8_t *)thread->stack_addr;
1250 while (*ptr == '#')ptr++;
1251
1252 rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d",
1253 (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
1254 thread->stack_size,
1255 (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
1256 / thread->stack_size,
1257 RT_SCHED_PRIV(thread).remaining_tick,
1258 thread->error);
1259 #endif
1260 rt_kprintf(" %-.*s\n",rt_strlen(thread->parent.name), thread->parent.name);
1261 }
1262
list_process(void)1263 long list_process(void)
1264 {
1265 int index;
1266 int maxlen;
1267 rt_ubase_t level;
1268 struct rt_thread *thread;
1269 struct rt_list_node *node, *list;
1270 const char *item_title = "thread";
1271
1272 int count = 0;
1273 struct rt_thread **threads;
1274
1275 maxlen = RT_NAME_MAX;
1276 #ifdef RT_USING_SMP
1277 rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error %-*.s\n", 4, "PID", 4, "TID", maxlen, item_title, maxlen, "cmd");
1278 object_split(4);rt_kprintf(" ");object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
1279 rt_kprintf( "--- --- ------- ---------- ---------- -------- ---------- -----");rt_kprintf(" ");object_split(maxlen);rt_kprintf("\n");
1280 #else
1281 rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", 4, "TID", maxlen, item_title, maxlen, "cmd");
1282 object_split(4);rt_kprintf(" ");object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
1283 rt_kprintf( "--- ------- ---------- ---------- -------- ---------- -----");rt_kprintf(" ");object_split(maxlen);rt_kprintf("\n");
1284 #endif /*RT_USING_SMP*/
1285
1286 count = rt_object_get_length(RT_Object_Class_Thread);
1287 if (count > 0)
1288 {
1289 /* get thread pointers */
1290 threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
1291 if (threads)
1292 {
1293 index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
1294
1295 if (index > 0)
1296 {
1297 for (index = 0; index <count; index++)
1298 {
1299 struct rt_thread th;
1300
1301 thread = threads[index];
1302
1303 level = rt_spin_lock_irqsave(&thread->spinlock);
1304 if ((rt_object_get_type(&thread->parent) & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
1305 {
1306 rt_spin_unlock_irqrestore(&thread->spinlock, level);
1307 continue;
1308 }
1309
1310 rt_memcpy(&th, thread, sizeof(struct rt_thread));
1311 rt_spin_unlock_irqrestore(&thread->spinlock, level);
1312
1313 if (th.lwp == RT_NULL)
1314 {
1315 rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
1316 print_thread_info(&th, maxlen);
1317 }
1318 }
1319 }
1320 rt_free(threads);
1321 }
1322 }
1323
1324 for (index = 0; index < RT_LWP_MAX_NR; index++)
1325 {
1326 struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
1327
1328 if (lwp)
1329 {
1330 list = &lwp->t_grp;
1331 for (node = list->next; node != list; node = node->next)
1332 {
1333 thread = rt_list_entry(node, struct rt_thread, sibling);
1334 rt_kprintf("%4d %4d %-*.*s ", lwp_to_pid(lwp), thread->tid, maxlen, RT_NAME_MAX, lwp->cmd);
1335 print_thread_info(thread, maxlen);
1336 }
1337 }
1338 }
1339 return 0;
1340 }
1341 MSH_CMD_EXPORT(list_process, list process);
1342
cmd_kill(int argc,char ** argv)1343 static void cmd_kill(int argc, char** argv)
1344 {
1345 int pid;
1346 int sig = SIGKILL;
1347
1348 if (argc < 2)
1349 {
1350 rt_kprintf("kill pid or kill pid -s signal\n");
1351 return;
1352 }
1353
1354 pid = atoi(argv[1]);
1355 if (argc >= 4)
1356 {
1357 if (argv[2][0] == '-' && argv[2][1] == 's')
1358 {
1359 sig = atoi(argv[3]);
1360 }
1361 }
1362 lwp_pid_lock_take();
1363 lwp_signal_kill(lwp_from_pid_raw_locked(pid), sig, SI_USER, 0);
1364 lwp_pid_lock_release();
1365 }
1366 MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
1367
cmd_killall(int argc,char ** argv)1368 static void cmd_killall(int argc, char** argv)
1369 {
1370 int pid;
1371 if (argc < 2)
1372 {
1373 rt_kprintf("killall processes_name\n");
1374 return;
1375 }
1376
1377 while((pid = lwp_name2pid(argv[1])) > 0)
1378 {
1379 lwp_pid_lock_take();
1380 lwp_signal_kill(lwp_from_pid_raw_locked(pid), SIGKILL, SI_USER, 0);
1381 lwp_pid_lock_release();
1382 rt_thread_mdelay(100);
1383 }
1384 }
1385 MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
1386
1387 #endif
1388
lwp_check_exit_request(void)1389 int lwp_check_exit_request(void)
1390 {
1391 rt_thread_t thread = rt_thread_self();
1392 rt_size_t expected = LWP_EXIT_REQUEST_TRIGGERED;
1393
1394 if (!thread->lwp)
1395 {
1396 return 0;
1397 }
1398
1399 return atomic_compare_exchange_strong(&thread->exit_request, &expected,
1400 LWP_EXIT_REQUEST_IN_PROCESS);
1401 }
1402
1403 static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
1404 static void _resr_cleanup(struct rt_lwp *lwp);
1405
lwp_terminate(struct rt_lwp * lwp)1406 void lwp_terminate(struct rt_lwp *lwp)
1407 {
1408 if (!lwp)
1409 {
1410 /* kernel thread not support */
1411 return;
1412 }
1413
1414 LOG_D("%s(lwp=%p \"%s\")", __func__, lwp, lwp->cmd);
1415
1416 LWP_LOCK(lwp);
1417
1418 if (!lwp->terminated)
1419 {
1420 /* stop the receiving of signals */
1421 lwp->terminated = RT_TRUE;
1422 LWP_UNLOCK(lwp);
1423
1424 _wait_sibling_exit(lwp, rt_thread_self());
1425 _resr_cleanup(lwp);
1426 }
1427 else
1428 {
1429 LWP_UNLOCK(lwp);
1430 }
1431 }
1432
_wait_sibling_exit(rt_lwp_t lwp,rt_thread_t curr_thread)1433 static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
1434 {
1435 rt_sched_lock_level_t slvl;
1436 rt_list_t *list;
1437 rt_thread_t thread;
1438 rt_size_t expected = LWP_EXIT_REQUEST_NONE;
1439
1440 /* broadcast exit request for sibling threads */
1441 LWP_LOCK(lwp);
1442 for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
1443 {
1444 thread = rt_list_entry(list, struct rt_thread, sibling);
1445
1446 atomic_compare_exchange_strong(&thread->exit_request, &expected,
1447 LWP_EXIT_REQUEST_TRIGGERED);
1448
1449 rt_sched_lock(&slvl);
1450 /* dont release, otherwise thread may have been freed */
1451 if (rt_sched_thread_is_suspended(thread))
1452 {
1453 thread->error = RT_EINTR;
1454 rt_sched_unlock(slvl);
1455
1456 rt_thread_wakeup(thread);
1457 }
1458 else
1459 {
1460 rt_sched_unlock(slvl);
1461 }
1462 }
1463 LWP_UNLOCK(lwp);
1464
1465 while (1)
1466 {
1467 int subthread_is_terminated;
1468 LOG_D("%s: wait for subthread exiting", __func__);
1469
1470 /**
1471 * Brief: wait for all *running* sibling threads to exit
1472 *
1473 * Note: Critical Section
1474 * - sibling list of lwp (RW. It will clear all siblings finally)
1475 */
1476 LWP_LOCK(lwp);
1477 subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
1478 if (!subthread_is_terminated)
1479 {
1480 rt_sched_lock_level_t slvl;
1481 rt_thread_t sub_thread;
1482 rt_list_t *list;
1483 int all_subthread_in_init = 1;
1484
1485 /* check all subthread is in init state */
1486 for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
1487 {
1488 rt_sched_lock(&slvl);
1489 sub_thread = rt_list_entry(list, struct rt_thread, sibling);
1490 if (rt_sched_thread_get_stat(sub_thread) != RT_THREAD_INIT)
1491 {
1492 rt_sched_unlock(slvl);
1493 all_subthread_in_init = 0;
1494 break;
1495 }
1496 else
1497 {
1498 rt_sched_unlock(slvl);
1499 }
1500 }
1501 if (all_subthread_in_init)
1502 {
1503 /* delete all subthread */
1504 while ((list = curr_thread->sibling.prev) != &lwp->t_grp)
1505 {
1506 sub_thread = rt_list_entry(list, struct rt_thread, sibling);
1507 rt_list_remove(&sub_thread->sibling);
1508
1509 /**
1510 * Note: Critical Section
1511 * - thread control block (RW. Since it will free the thread
1512 * control block, it must ensure no one else can access
1513 * thread any more)
1514 */
1515 lwp_tid_put(sub_thread->tid);
1516 sub_thread->tid = 0;
1517 rt_thread_delete(sub_thread);
1518 }
1519 subthread_is_terminated = 1;
1520 }
1521 }
1522 LWP_UNLOCK(lwp);
1523
1524 if (subthread_is_terminated)
1525 {
1526 break;
1527 }
1528 rt_thread_mdelay(10);
1529 }
1530 }
1531
_notify_parent(rt_lwp_t lwp)1532 static void _notify_parent(rt_lwp_t lwp)
1533 {
1534 int si_code;
1535 int signo_or_exitcode;
1536 lwp_siginfo_ext_t ext;
1537 lwp_status_t lwp_status = lwp->lwp_status;
1538 rt_lwp_t parent = lwp->parent;
1539
1540 if (WIFSIGNALED(lwp_status))
1541 {
1542 si_code = (lwp_status & LWP_COREDUMP_FLAG) ? CLD_DUMPED : CLD_KILLED;
1543 signo_or_exitcode = WTERMSIG(lwp_status);
1544 }
1545 else
1546 {
1547 si_code = CLD_EXITED;
1548 signo_or_exitcode = WEXITSTATUS(lwp->lwp_status);
1549 }
1550
1551 lwp_waitpid_kick(parent, lwp);
1552
1553 ext = rt_malloc(sizeof(struct lwp_siginfo));
1554
1555 if (ext)
1556 {
1557 rt_thread_t cur_thr = rt_thread_self();
1558 ext->sigchld.status = signo_or_exitcode;
1559 ext->sigchld.stime = cur_thr->system_time;
1560 ext->sigchld.utime = cur_thr->user_time;
1561 }
1562 lwp_signal_kill(parent, SIGCHLD, si_code, ext);
1563 }
1564
_resr_cleanup(struct rt_lwp * lwp)1565 static void _resr_cleanup(struct rt_lwp *lwp)
1566 {
1567 int need_cleanup_pid = RT_FALSE;
1568 lwp_jobctrl_on_exit(lwp);
1569
1570 LWP_LOCK(lwp);
1571 lwp_signal_detach(&lwp->signal);
1572
1573 /**
1574 * @brief Detach children from lwp
1575 *
1576 * @note Critical Section
1577 * - the lwp (RW. Release lwp)
1578 * - the pid resource manager (RW. Release the pid)
1579 */
1580 while (lwp->first_child)
1581 {
1582 struct rt_lwp *child;
1583
1584 child = lwp->first_child;
1585 lwp->first_child = child->sibling;
1586
1587 /** @note safe since the slist node is release */
1588 LWP_UNLOCK(lwp);
1589 LWP_LOCK(child);
1590 if (child->terminated)
1591 {
1592 lwp_pid_put(child);
1593 }
1594 else
1595 {
1596 child->sibling = RT_NULL;
1597 /* info: this may cause an orphan lwp */
1598 child->parent = RT_NULL;
1599 }
1600
1601 LWP_UNLOCK(child);
1602 lwp_ref_dec(child);
1603 lwp_ref_dec(lwp);
1604
1605 LWP_LOCK(lwp);
1606 }
1607 LWP_UNLOCK(lwp);
1608
1609 /**
1610 * @brief Wakeup parent if it's waiting for this lwp, otherwise a signal
1611 * will be sent to parent
1612 *
1613 * @note Critical Section
1614 * - the parent lwp (RW.)
1615 */
1616 LWP_LOCK(lwp);
1617 if (lwp->parent &&
1618 !lwp_sigismember(&lwp->parent->signal.sig_action_nocldwait, SIGCHLD))
1619 {
1620 /* if successfully race to setup lwp->terminated before parent detach */
1621 LWP_UNLOCK(lwp);
1622
1623 /**
1624 * Note: children cannot detach itself and must wait for parent to take
1625 * care of it
1626 */
1627 _notify_parent(lwp);
1628 }
1629 else
1630 {
1631 LWP_UNLOCK(lwp);
1632
1633 /**
1634 * if process is orphan, it doesn't have parent to do the recycling.
1635 * Otherwise, its parent had setup a flag to mask out recycling event
1636 */
1637 need_cleanup_pid = RT_TRUE;
1638 }
1639
1640 LWP_LOCK(lwp);
1641 if (lwp->fdt.fds != RT_NULL)
1642 {
1643 struct dfs_file **fds;
1644
1645 /* auto clean fds */
1646 __exit_files(lwp);
1647 fds = lwp->fdt.fds;
1648 lwp->fdt.fds = RT_NULL;
1649 LWP_UNLOCK(lwp);
1650
1651 rt_free(fds);
1652 }
1653 else
1654 {
1655 LWP_UNLOCK(lwp);
1656 }
1657
1658 if (need_cleanup_pid)
1659 {
1660 lwp_pid_put(lwp);
1661 }
1662 }
1663
_lwp_setaffinity(int tid,int cpu)1664 static int _lwp_setaffinity(int tid, int cpu)
1665 {
1666 rt_thread_t thread;
1667 int ret = -1;
1668
1669 thread = lwp_tid_get_thread_and_inc_ref(tid);
1670
1671 if (thread)
1672 {
1673 #ifdef RT_USING_SMP
1674 rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_ubase_t)cpu);
1675 #endif
1676 ret = 0;
1677 }
1678 lwp_tid_dec_ref(thread);
1679 return ret;
1680 }
1681
lwp_setaffinity(int tid,int cpu)1682 int lwp_setaffinity(int tid, int cpu)
1683 {
1684 int ret;
1685
1686 #ifdef RT_USING_SMP
1687 if (cpu < 0 || cpu > RT_CPUS_NR)
1688 {
1689 cpu = RT_CPUS_NR;
1690 }
1691 #endif
1692 ret = _lwp_setaffinity(tid, cpu);
1693 return ret;
1694 }
1695
1696 #ifdef RT_USING_SMP
cmd_cpu_bind(int argc,char ** argv)1697 static void cmd_cpu_bind(int argc, char** argv)
1698 {
1699 int pid;
1700 int cpu;
1701
1702 if (argc < 3)
1703 {
1704 rt_kprintf("Useage: cpu_bind pid cpu\n");
1705 return;
1706 }
1707
1708 pid = atoi(argv[1]);
1709 cpu = atoi(argv[2]);
1710 lwp_setaffinity((pid_t)pid, cpu);
1711 }
1712 MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
1713 #endif
1714