1 /*
2 * Copyright (c) 2006-2024 RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2006-03-18 Bernard the first version
9 * 2006-04-26 Bernard add semaphore APIs
10 * 2006-08-10 Bernard add version information
11 * 2007-01-28 Bernard rename RT_OBJECT_Class_Static to RT_Object_Class_Static
12 * 2007-03-03 Bernard clean up the definitions to rtdef.h
13 * 2010-04-11 yi.qiu add module feature
14 * 2013-06-24 Bernard add rt_kprintf re-define when not use RT_USING_CONSOLE.
15 * 2016-08-09 ArdaFu add new thread and interrupt hook.
16 * 2018-11-22 Jesven add all cpu's lock and ipi handler
17 * 2021-02-28 Meco Man add RT_KSERVICE_USING_STDLIB
18 * 2021-11-14 Meco Man add rtlegacy.h for compatibility
19 * 2022-06-04 Meco Man remove strnlen
20 * 2023-05-20 Bernard add rtatomic.h header file to included files.
21 * 2023-06-30 ChuShicheng move debug check from the rtdebug.h
22 * 2023-10-16 Shell Support a new backtrace framework
23 * 2023-12-10 xqyjlj fix spinlock in up
24 * 2024-01-25 Shell Add rt_susp_list for IPC primitives
25 * 2024-03-10 Meco Man move std libc related functions to rtklibc
26 */
27
28 #ifndef __RT_THREAD_H__
29 #define __RT_THREAD_H__
30
31 #include <rtconfig.h>
32 #include <rtdef.h>
33 #include <rtservice.h>
34 #include <rtm.h>
35 #include <rtatomic.h>
36 #include <rtklibc.h>
37 #ifdef RT_USING_LEGACY
38 #include <rtlegacy.h>
39 #endif
40 #ifdef RT_USING_FINSH
41 #include <finsh.h>
42 #endif /* RT_USING_FINSH */
43
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47
48 #ifdef __GNUC__
49 int entry(void);
50 #endif
51
52 /**
53 * @addtogroup group_object_management
54 * @{
55 */
56
57 /*
58 * kernel object interface
59 */
60 struct rt_object_information *
61 rt_object_get_information(enum rt_object_class_type type);
62 int rt_object_get_length(enum rt_object_class_type type);
63 int rt_object_get_pointers(enum rt_object_class_type type, rt_object_t *pointers, int maxlen);
64
65 void rt_object_init(struct rt_object *object,
66 enum rt_object_class_type type,
67 const char *name);
68 void rt_object_detach(rt_object_t object);
69 #ifdef RT_USING_HEAP
70 rt_object_t rt_object_allocate(enum rt_object_class_type type, const char *name);
71 void rt_object_delete(rt_object_t object);
72 /* custom object */
73 rt_object_t rt_custom_object_create(const char *name, void *data, rt_err_t (*data_destroy)(void *));
74 rt_err_t rt_custom_object_destroy(rt_object_t obj);
75 #endif /* RT_USING_HEAP */
76 rt_bool_t rt_object_is_systemobject(rt_object_t object);
77 rt_uint8_t rt_object_get_type(rt_object_t object);
78 rt_err_t rt_object_for_each(rt_uint8_t type, rt_object_iter_t iter, void *data);
79 rt_object_t rt_object_find(const char *name, rt_uint8_t type);
80 rt_err_t rt_object_get_name(rt_object_t object, char *name, rt_uint8_t name_size);
81
82 #ifdef RT_USING_HOOK
83 void rt_object_attach_sethook(void (*hook)(struct rt_object *object));
84 void rt_object_detach_sethook(void (*hook)(struct rt_object *object));
85 void rt_object_trytake_sethook(void (*hook)(struct rt_object *object));
86 void rt_object_take_sethook(void (*hook)(struct rt_object *object));
87 void rt_object_put_sethook(void (*hook)(struct rt_object *object));
88 #endif /* RT_USING_HOOK */
89
90 /**@}*/
91
92 /**
93 * @addtogroup group_clock_management
94 * @{
95 */
96
97 /*
98 * clock & timer interface
99 */
100 rt_tick_t rt_tick_get(void);
101 rt_tick_t rt_tick_get_delta(rt_tick_t base);
102 void rt_tick_set(rt_tick_t tick);
103 void rt_tick_increase(void);
104 void rt_tick_increase_tick(rt_tick_t tick);
105 rt_tick_t rt_tick_from_millisecond(rt_int32_t ms);
106 rt_tick_t rt_tick_get_millisecond(void);
107 #ifdef RT_USING_HOOK
108 void rt_tick_sethook(void (*hook)(void));
109 #endif /* RT_USING_HOOK */
110
111 void rt_system_timer_init(void);
112 void rt_system_timer_thread_init(void);
113
114 void rt_timer_init(rt_timer_t timer,
115 const char *name,
116 void (*timeout)(void *parameter),
117 void *parameter,
118 rt_tick_t time,
119 rt_uint8_t flag);
120 rt_err_t rt_timer_detach(rt_timer_t timer);
121 #ifdef RT_USING_HEAP
122 rt_timer_t rt_timer_create(const char *name,
123 void (*timeout)(void *parameter),
124 void *parameter,
125 rt_tick_t time,
126 rt_uint8_t flag);
127 rt_err_t rt_timer_delete(rt_timer_t timer);
128 #endif /* RT_USING_HEAP */
129 rt_err_t rt_timer_start(rt_timer_t timer);
130 rt_err_t rt_timer_stop(rt_timer_t timer);
131 rt_err_t rt_timer_control(rt_timer_t timer, int cmd, void *arg);
132 rt_tick_t rt_timer_next_timeout_tick(void);
133 void rt_timer_check(void);
134 #ifdef RT_USING_HOOK
135 void rt_timer_enter_sethook(void (*hook)(struct rt_timer *timer));
136 void rt_timer_exit_sethook(void (*hook)(struct rt_timer *timer));
137 #endif /* RT_USING_HOOK */
138
139 /**@}*/
140
141 /**
142 * @addtogroup group_thread_management
143 * @{
144 */
145
146 /*
147 * thread interface
148 */
149 rt_err_t rt_thread_init(struct rt_thread *thread,
150 const char *name,
151 void (*entry)(void *parameter),
152 void *parameter,
153 void *stack_start,
154 rt_uint32_t stack_size,
155 rt_uint8_t priority,
156 rt_uint32_t tick);
157 rt_err_t rt_thread_detach(rt_thread_t thread);
158 #ifdef RT_USING_HEAP
159 rt_thread_t rt_thread_create(const char *name,
160 void (*entry)(void *parameter),
161 void *parameter,
162 rt_uint32_t stack_size,
163 rt_uint8_t priority,
164 rt_uint32_t tick);
165 rt_err_t rt_thread_delete(rt_thread_t thread);
166 #endif /* RT_USING_HEAP */
167 rt_err_t rt_thread_close(rt_thread_t thread);
168 rt_thread_t rt_thread_self(void);
169 rt_thread_t rt_thread_find(char *name);
170 rt_err_t rt_thread_startup(rt_thread_t thread);
171 rt_err_t rt_thread_yield(void);
172 rt_err_t rt_thread_delay(rt_tick_t tick);
173 rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick);
174 rt_err_t rt_thread_mdelay(rt_int32_t ms);
175 rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg);
176 rt_err_t rt_thread_suspend(rt_thread_t thread);
177 rt_err_t rt_thread_suspend_with_flag(rt_thread_t thread, int suspend_flag);
178 rt_err_t rt_thread_resume(rt_thread_t thread);
179 #ifdef RT_USING_SMART
180 rt_err_t rt_thread_wakeup(rt_thread_t thread);
181 void rt_thread_wakeup_set(struct rt_thread *thread, rt_wakeup_func_t func, void* user_data);
182 #endif /* RT_USING_SMART */
183 rt_err_t rt_thread_get_name(rt_thread_t thread, char *name, rt_uint8_t name_size);
184 #ifdef RT_USING_SIGNALS
185 void rt_thread_alloc_sig(rt_thread_t tid);
186 void rt_thread_free_sig(rt_thread_t tid);
187 int rt_thread_kill(rt_thread_t tid, int sig);
188 #endif /* RT_USING_SIGNALS */
189 #ifdef RT_USING_HOOK
190 void rt_thread_suspend_sethook(void (*hook)(rt_thread_t thread));
191 void rt_thread_resume_sethook (void (*hook)(rt_thread_t thread));
192
193 /**
194 * @brief Sets a hook function when a thread is initialized.
195 *
196 * @param thread is the target thread that initializing
197 */
198 typedef void (*rt_thread_inited_hookproto_t)(rt_thread_t thread);
199 RT_OBJECT_HOOKLIST_DECLARE(rt_thread_inited_hookproto_t, rt_thread_inited);
200
201 #endif /* RT_USING_HOOK */
202
203 /*
204 * idle thread interface
205 */
206 void rt_thread_idle_init(void);
207 #if defined(RT_USING_HOOK) || defined(RT_USING_IDLE_HOOK)
208 rt_err_t rt_thread_idle_sethook(void (*hook)(void));
209 rt_err_t rt_thread_idle_delhook(void (*hook)(void));
210 #endif /* defined(RT_USING_HOOK) || defined(RT_USING_IDLE_HOOK) */
211 rt_thread_t rt_thread_idle_gethandler(void);
212
213 /*
214 * schedule service
215 */
216 void rt_system_scheduler_init(void);
217 void rt_system_scheduler_start(void);
218
219 void rt_schedule(void);
220 void rt_scheduler_do_irq_switch(void *context);
221
222 #ifdef RT_USING_OVERFLOW_CHECK
223 void rt_scheduler_stack_check(struct rt_thread *thread);
224
225 #define RT_SCHEDULER_STACK_CHECK(thr) rt_scheduler_stack_check(thr)
226
227 #else /* !RT_USING_OVERFLOW_CHECK */
228
229 #define RT_SCHEDULER_STACK_CHECK(thr)
230
231 #endif /* RT_USING_OVERFLOW_CHECK */
232
233 rt_base_t rt_enter_critical(void);
234 void rt_exit_critical(void);
235 void rt_exit_critical_safe(rt_base_t critical_level);
236 rt_uint16_t rt_critical_level(void);
237
238 #ifdef RT_USING_HOOK
239 void rt_scheduler_sethook(void (*hook)(rt_thread_t from, rt_thread_t to));
240 void rt_scheduler_switch_sethook(void (*hook)(struct rt_thread *tid));
241 #endif /* RT_USING_HOOK */
242
243 #ifdef RT_USING_SMP
244 void rt_secondary_cpu_entry(void);
245 void rt_scheduler_ipi_handler(int vector, void *param);
246 #endif /* RT_USING_SMP */
247
248 /**@}*/
249
250 /**
251 * @addtogroup group_signal
252 * @{
253 */
254 #ifdef RT_USING_SIGNALS
255 void rt_signal_mask(int signo);
256 void rt_signal_unmask(int signo);
257 void *rt_signal_check(void* context);
258 rt_sighandler_t rt_signal_install(int signo, rt_sighandler_t handler);
259 int rt_signal_wait(const rt_sigset_t *set, rt_siginfo_t *si, rt_int32_t timeout);
260 int rt_system_signal_init(void);
261 #endif /* RT_USING_SIGNALS */
262 /**@}*/
263
264 /**
265 * @addtogroup group_memory_management
266 * @{
267 */
268
269 /*
270 * memory management interface
271 */
272 #ifdef RT_USING_MEMPOOL
273 /*
274 * memory pool interface
275 */
276 rt_err_t rt_mp_init(struct rt_mempool *mp,
277 const char *name,
278 void *start,
279 rt_size_t size,
280 rt_size_t block_size);
281 rt_err_t rt_mp_detach(struct rt_mempool *mp);
282 #ifdef RT_USING_HEAP
283 rt_mp_t rt_mp_create(const char *name,
284 rt_size_t block_count,
285 rt_size_t block_size);
286 rt_err_t rt_mp_delete(rt_mp_t mp);
287 #endif /* RT_USING_HEAP */
288 void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time);
289 void rt_mp_free(void *block);
290 #ifdef RT_USING_HOOK
291 void rt_mp_alloc_sethook(void (*hook)(struct rt_mempool *mp, void *block));
292 void rt_mp_free_sethook(void (*hook)(struct rt_mempool *mp, void *block));
293 #endif /* RT_USING_HOOK */
294
295 #endif /* RT_USING_MEMPOOL */
296
297 #ifdef RT_USING_HEAP
298 /*
299 * heap memory interface
300 */
301 void rt_system_heap_init(void *begin_addr, void *end_addr);
302 void rt_system_heap_init_generic(void *begin_addr, void *end_addr);
303
304 void *rt_malloc(rt_size_t size);
305 void rt_free(void *ptr);
306 void *rt_realloc(void *ptr, rt_size_t newsize);
307 void *rt_calloc(rt_size_t count, rt_size_t size);
308 void *rt_malloc_align(rt_size_t size, rt_size_t align);
309 void rt_free_align(void *ptr);
310
311 void rt_memory_info(rt_size_t *total,
312 rt_size_t *used,
313 rt_size_t *max_used);
314
315 #if defined(RT_USING_SLAB) && defined(RT_USING_SLAB_AS_HEAP)
316 void *rt_page_alloc(rt_size_t npages);
317 void rt_page_free(void *addr, rt_size_t npages);
318 #endif /* defined(RT_USING_SLAB) && defined(RT_USING_SLAB_AS_HEAP) */
319
320 /**
321 * @ingroup group_hook
322 * @{
323 */
324
325 #ifdef RT_USING_HOOK
326 void rt_malloc_sethook(void (*hook)(void **ptr, rt_size_t size));
327 void rt_realloc_set_entry_hook(void (*hook)(void **ptr, rt_size_t size));
328 void rt_realloc_set_exit_hook(void (*hook)(void **ptr, rt_size_t size));
329 void rt_free_sethook(void (*hook)(void **ptr));
330 #endif /* RT_USING_HOOK */
331 /**@}*/
332
333 #endif /* RT_USING_HEAP */
334
335 #ifdef RT_USING_SMALL_MEM
336 /**
337 * small memory object interface
338 */
339 rt_smem_t rt_smem_init(const char *name,
340 void *begin_addr,
341 rt_size_t size);
342 rt_err_t rt_smem_detach(rt_smem_t m);
343 void *rt_smem_alloc(rt_smem_t m, rt_size_t size);
344 void *rt_smem_realloc(rt_smem_t m, void *rmem, rt_size_t newsize);
345 void rt_smem_free(void *rmem);
346 #endif /* RT_USING_SMALL_MEM */
347
348 #ifdef RT_USING_MEMHEAP
349 /**
350 * memory heap object interface
351 */
352 rt_err_t rt_memheap_init(struct rt_memheap *memheap,
353 const char *name,
354 void *start_addr,
355 rt_size_t size);
356 rt_err_t rt_memheap_detach(struct rt_memheap *heap);
357 void *rt_memheap_alloc(struct rt_memheap *heap, rt_size_t size);
358 void *rt_memheap_realloc(struct rt_memheap *heap, void *ptr, rt_size_t newsize);
359 void rt_memheap_free(void *ptr);
360 void rt_memheap_info(struct rt_memheap *heap,
361 rt_size_t *total,
362 rt_size_t *used,
363 rt_size_t *max_used);
364 #endif /* RT_USING_MEMHEAP */
365
366 #ifdef RT_USING_MEMHEAP_AS_HEAP
367 /**
368 * memory heap as heap
369 */
370 void *_memheap_alloc(struct rt_memheap *heap, rt_size_t size);
371 void _memheap_free(void *rmem);
372 void *_memheap_realloc(struct rt_memheap *heap, void *rmem, rt_size_t newsize);
373 #endif
374
375 #ifdef RT_USING_SLAB
376 /**
377 * slab object interface
378 */
379 rt_slab_t rt_slab_init(const char *name, void *begin_addr, rt_size_t size);
380 rt_err_t rt_slab_detach(rt_slab_t m);
381 void *rt_slab_page_alloc(rt_slab_t m, rt_size_t npages);
382 void rt_slab_page_free(rt_slab_t m, void *addr, rt_size_t npages);
383 void *rt_slab_alloc(rt_slab_t m, rt_size_t size);
384 void *rt_slab_realloc(rt_slab_t m, void *ptr, rt_size_t size);
385 void rt_slab_free(rt_slab_t m, void *ptr);
386 #endif /* RT_USING_SLAB */
387
388 /**@}*/
389
390 /**
391 * @addtogroup group_thread_comm
392 * @{
393 */
394
395 /**
396 * Suspend list - A basic building block for IPC primitives which interacts with
397 * scheduler directly. Its API is similar to a FIFO list.
398 *
399 * Note: don't use in application codes directly
400 */
401 void rt_susp_list_print(rt_list_t *list);
402 /* reserve thread error while resuming it */
403 #define RT_THREAD_RESUME_RES_THR_ERR (-1)
404 struct rt_thread *rt_susp_list_dequeue(rt_list_t *susp_list, rt_err_t thread_error);
405 rt_err_t rt_susp_list_resume_all(rt_list_t *susp_list, rt_err_t thread_error);
406 rt_err_t rt_susp_list_resume_all_irq(rt_list_t *susp_list,
407 rt_err_t thread_error,
408 struct rt_spinlock *lock);
409
410 /* suspend and enqueue */
411 rt_err_t rt_thread_suspend_to_list(rt_thread_t thread, rt_list_t *susp_list, int ipc_flags, int suspend_flag);
412 /* only for a suspended thread, and caller must hold the scheduler lock */
413 rt_err_t rt_susp_list_enqueue(rt_list_t *susp_list, rt_thread_t thread, int ipc_flags);
414
415 /**
416 * @addtogroup group_semaphore Semaphore
417 * @{
418 */
419
420 #ifdef RT_USING_SEMAPHORE
421 /*
422 * semaphore interface
423 */
424 rt_err_t rt_sem_init(rt_sem_t sem,
425 const char *name,
426 rt_uint32_t value,
427 rt_uint8_t flag);
428 rt_err_t rt_sem_detach(rt_sem_t sem);
429 #ifdef RT_USING_HEAP
430 rt_sem_t rt_sem_create(const char *name, rt_uint32_t value, rt_uint8_t flag);
431 rt_err_t rt_sem_delete(rt_sem_t sem);
432 #endif /* RT_USING_HEAP */
433
434 rt_err_t rt_sem_take(rt_sem_t sem, rt_int32_t timeout);
435 rt_err_t rt_sem_take_interruptible(rt_sem_t sem, rt_int32_t timeout);
436 rt_err_t rt_sem_take_killable(rt_sem_t sem, rt_int32_t timeout);
437 rt_err_t rt_sem_trytake(rt_sem_t sem);
438 rt_err_t rt_sem_release(rt_sem_t sem);
439 rt_err_t rt_sem_control(rt_sem_t sem, int cmd, void *arg);
440 #endif /* RT_USING_SEMAPHORE */
441
442 /**@}*/
443
444 /**
445 * @addtogroup group_mutex Mutex
446 * @{
447 */
448
449 #ifdef RT_USING_MUTEX
450 /*
451 * mutex interface
452 */
453 rt_err_t rt_mutex_init(rt_mutex_t mutex, const char *name, rt_uint8_t flag);
454 rt_err_t rt_mutex_detach(rt_mutex_t mutex);
455 #ifdef RT_USING_HEAP
456 rt_mutex_t rt_mutex_create(const char *name, rt_uint8_t flag);
457 rt_err_t rt_mutex_delete(rt_mutex_t mutex);
458 #endif /* RT_USING_HEAP */
459 void rt_mutex_drop_thread(rt_mutex_t mutex, rt_thread_t thread);
460 rt_uint8_t rt_mutex_setprioceiling(rt_mutex_t mutex, rt_uint8_t priority);
461 rt_uint8_t rt_mutex_getprioceiling(rt_mutex_t mutex);
462
463 rt_err_t rt_mutex_take(rt_mutex_t mutex, rt_int32_t timeout);
464 rt_err_t rt_mutex_trytake(rt_mutex_t mutex);
465 rt_err_t rt_mutex_take_interruptible(rt_mutex_t mutex, rt_int32_t time);
466 rt_err_t rt_mutex_take_killable(rt_mutex_t mutex, rt_int32_t time);
467 rt_err_t rt_mutex_release(rt_mutex_t mutex);
468 rt_err_t rt_mutex_control(rt_mutex_t mutex, int cmd, void *arg);
469
rt_mutex_get_owner(rt_mutex_t mutex)470 rt_inline rt_thread_t rt_mutex_get_owner(rt_mutex_t mutex)
471 {
472 return mutex->owner;
473 }
rt_mutex_get_hold(rt_mutex_t mutex)474 rt_inline rt_ubase_t rt_mutex_get_hold(rt_mutex_t mutex)
475 {
476 return mutex->hold;
477 }
478
479 #endif /* RT_USING_MUTEX */
480
481 /**@}*/
482
483 /**
484 * @addtogroup group_event Event
485 * @{
486 */
487
488 #ifdef RT_USING_EVENT
489 /*
490 * event interface
491 */
492 rt_err_t rt_event_init(rt_event_t event, const char *name, rt_uint8_t flag);
493 rt_err_t rt_event_detach(rt_event_t event);
494 #ifdef RT_USING_HEAP
495 rt_event_t rt_event_create(const char *name, rt_uint8_t flag);
496 rt_err_t rt_event_delete(rt_event_t event);
497 #endif /* RT_USING_HEAP */
498
499 rt_err_t rt_event_send(rt_event_t event, rt_uint32_t set);
500 rt_err_t rt_event_recv(rt_event_t event,
501 rt_uint32_t set,
502 rt_uint8_t opt,
503 rt_int32_t timeout,
504 rt_uint32_t *recved);
505 rt_err_t rt_event_recv_interruptible(rt_event_t event,
506 rt_uint32_t set,
507 rt_uint8_t opt,
508 rt_int32_t timeout,
509 rt_uint32_t *recved);
510 rt_err_t rt_event_recv_killable(rt_event_t event,
511 rt_uint32_t set,
512 rt_uint8_t opt,
513 rt_int32_t timeout,
514 rt_uint32_t *recved);
515 rt_err_t rt_event_control(rt_event_t event, int cmd, void *arg);
516 #endif /* RT_USING_EVENT */
517
518 /**@}*/
519
520 /**
521 * @addtogroup group_mailbox MailBox
522 * @{
523 */
524
525 #ifdef RT_USING_MAILBOX
526 /*
527 * mailbox interface
528 */
529 rt_err_t rt_mb_init(rt_mailbox_t mb,
530 const char *name,
531 void *msgpool,
532 rt_size_t size,
533 rt_uint8_t flag);
534 rt_err_t rt_mb_detach(rt_mailbox_t mb);
535 #ifdef RT_USING_HEAP
536 rt_mailbox_t rt_mb_create(const char *name, rt_size_t size, rt_uint8_t flag);
537 rt_err_t rt_mb_delete(rt_mailbox_t mb);
538 #endif /* RT_USING_HEAP */
539
540 rt_err_t rt_mb_send(rt_mailbox_t mb, rt_ubase_t value);
541 rt_err_t rt_mb_send_interruptible(rt_mailbox_t mb, rt_ubase_t value);
542 rt_err_t rt_mb_send_killable(rt_mailbox_t mb, rt_ubase_t value);
543 rt_err_t rt_mb_send_wait(rt_mailbox_t mb,
544 rt_ubase_t value,
545 rt_int32_t timeout);
546 rt_err_t rt_mb_send_wait_interruptible(rt_mailbox_t mb,
547 rt_ubase_t value,
548 rt_int32_t timeout);
549 rt_err_t rt_mb_send_wait_killable(rt_mailbox_t mb,
550 rt_ubase_t value,
551 rt_int32_t timeout);
552 rt_err_t rt_mb_urgent(rt_mailbox_t mb, rt_ubase_t value);
553 rt_err_t rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout);
554 rt_err_t rt_mb_recv_interruptible(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout);
555 rt_err_t rt_mb_recv_killable(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout);
556 rt_err_t rt_mb_control(rt_mailbox_t mb, int cmd, void *arg);
557 #endif /* RT_USING_MAILBOX */
558
559 /**@}*/
560
561 /**
562 * @addtogroup group_messagequeue Message Queue
563 * @{
564 */
565 #ifdef RT_USING_MESSAGEQUEUE
566
567 struct rt_mq_message
568 {
569 struct rt_mq_message *next;
570 rt_ssize_t length;
571 #ifdef RT_USING_MESSAGEQUEUE_PRIORITY
572 rt_int32_t prio;
573 #endif /* RT_USING_MESSAGEQUEUE_PRIORITY */
574 };
575
576 #define RT_MQ_BUF_SIZE(msg_size, max_msgs) \
577 ((RT_ALIGN((msg_size), RT_ALIGN_SIZE) + sizeof(struct rt_mq_message)) * (max_msgs))
578
579 /*
580 * message queue interface
581 */
582 rt_err_t rt_mq_init(rt_mq_t mq,
583 const char *name,
584 void *msgpool,
585 rt_size_t msg_size,
586 rt_size_t pool_size,
587 rt_uint8_t flag);
588 rt_err_t rt_mq_detach(rt_mq_t mq);
589 #ifdef RT_USING_HEAP
590 rt_mq_t rt_mq_create(const char *name,
591 rt_size_t msg_size,
592 rt_size_t max_msgs,
593 rt_uint8_t flag);
594 rt_err_t rt_mq_delete(rt_mq_t mq);
595 #endif /* RT_USING_HEAP */
596
597 rt_err_t rt_mq_send(rt_mq_t mq, const void *buffer, rt_size_t size);
598 rt_err_t rt_mq_send_interruptible(rt_mq_t mq, const void *buffer, rt_size_t size);
599 rt_err_t rt_mq_send_killable(rt_mq_t mq, const void *buffer, rt_size_t size);
600 rt_err_t rt_mq_send_wait(rt_mq_t mq,
601 const void *buffer,
602 rt_size_t size,
603 rt_int32_t timeout);
604 rt_err_t rt_mq_send_wait_interruptible(rt_mq_t mq,
605 const void *buffer,
606 rt_size_t size,
607 rt_int32_t timeout);
608 rt_err_t rt_mq_send_wait_killable(rt_mq_t mq,
609 const void *buffer,
610 rt_size_t size,
611 rt_int32_t timeout);
612 rt_err_t rt_mq_urgent(rt_mq_t mq, const void *buffer, rt_size_t size);
613 rt_ssize_t rt_mq_recv(rt_mq_t mq,
614 void *buffer,
615 rt_size_t size,
616 rt_int32_t timeout);
617 rt_ssize_t rt_mq_recv_interruptible(rt_mq_t mq,
618 void *buffer,
619 rt_size_t size,
620 rt_int32_t timeout);
621 rt_ssize_t rt_mq_recv_killable(rt_mq_t mq,
622 void *buffer,
623 rt_size_t size,
624 rt_int32_t timeout);
625 rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg);
626
627 #ifdef RT_USING_MESSAGEQUEUE_PRIORITY
628 rt_err_t rt_mq_send_wait_prio(rt_mq_t mq,
629 const void *buffer,
630 rt_size_t size,
631 rt_int32_t prio,
632 rt_int32_t timeout,
633 int suspend_flag);
634 rt_ssize_t rt_mq_recv_prio(rt_mq_t mq,
635 void *buffer,
636 rt_size_t size,
637 rt_int32_t *prio,
638 rt_int32_t timeout,
639 int suspend_flag);
640 #endif /* RT_USING_MESSAGEQUEUE_PRIORITY */
641 #endif /* RT_USING_MESSAGEQUEUE */
642
643 /**@}*/
644
645 /* defunct */
646 void rt_thread_defunct_init(void);
647 void rt_thread_defunct_enqueue(rt_thread_t thread);
648 rt_thread_t rt_thread_defunct_dequeue(void);
649 void rt_defunct_execute(void);
650
651 /*
652 * spinlock
653 */
654 struct rt_spinlock;
655
656 void rt_spin_lock_init(struct rt_spinlock *lock);
657 void rt_spin_lock(struct rt_spinlock *lock);
658 void rt_spin_unlock(struct rt_spinlock *lock);
659 rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock);
660 void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level);
661
662 /**@}*/
663
664 #ifdef RT_USING_DEVICE
665 /**
666 * @addtogroup group_device_driver
667 * @{
668 */
669
670 /*
671 * device (I/O) system interface
672 */
673 rt_device_t rt_device_find(const char *name);
674
675 rt_err_t rt_device_register(rt_device_t dev,
676 const char *name,
677 rt_uint16_t flags);
678 rt_err_t rt_device_unregister(rt_device_t dev);
679
680 #ifdef RT_USING_HEAP
681 rt_device_t rt_device_create(int type, int attach_size);
682 void rt_device_destroy(rt_device_t device);
683 #endif /* RT_USING_HEAP */
684
685 rt_err_t
686 rt_device_set_rx_indicate(rt_device_t dev,
687 rt_err_t (*rx_ind)(rt_device_t dev, rt_size_t size));
688 rt_err_t
689 rt_device_set_tx_complete(rt_device_t dev,
690 rt_err_t (*tx_done)(rt_device_t dev, void *buffer));
691
692 rt_err_t rt_device_init (rt_device_t dev);
693 rt_err_t rt_device_open (rt_device_t dev, rt_uint16_t oflag);
694 rt_err_t rt_device_close(rt_device_t dev);
695 rt_ssize_t rt_device_read(rt_device_t dev,
696 rt_off_t pos,
697 void *buffer,
698 rt_size_t size);
699 rt_ssize_t rt_device_write(rt_device_t dev,
700 rt_off_t pos,
701 const void *buffer,
702 rt_size_t size);
703 rt_err_t rt_device_control(rt_device_t dev, int cmd, void *arg);
704
705 /**@}*/
706 #endif /* RT_USING_DEVICE */
707
708 /*
709 * interrupt service
710 */
711
712 /*
713 * rt_interrupt_enter and rt_interrupt_leave only can be called by BSP
714 */
715 void rt_interrupt_enter(void);
716 void rt_interrupt_leave(void);
717
718 void rt_interrupt_context_push(rt_interrupt_context_t this_ctx);
719 void rt_interrupt_context_pop(void);
720 void *rt_interrupt_context_get(void);
721
722 /**
723 * CPU object
724 */
725 struct rt_cpu *rt_cpu_self(void);
726 struct rt_cpu *rt_cpu_index(int index);
727
728 #ifdef RT_USING_SMP
729
730 /*
731 * smp cpus lock service
732 */
733
734 rt_base_t rt_cpus_lock(void);
735 void rt_cpus_unlock(rt_base_t level);
736 void rt_cpus_lock_status_restore(struct rt_thread *thread);
737
738 #ifdef RT_USING_DEBUG
739 rt_base_t rt_cpu_get_id(void);
740 #else /* !RT_USING_DEBUG */
741 #define rt_cpu_get_id rt_hw_cpu_id
742 #endif /* RT_USING_DEBUG */
743
744 #else /* !RT_USING_SMP */
745 #define rt_cpu_get_id() (0)
746
747 #endif /* RT_USING_SMP */
748
749 /*
750 * the number of nested interrupts.
751 */
752 rt_uint8_t rt_interrupt_get_nest(void);
753
754 #ifdef RT_USING_HOOK
755 void rt_interrupt_enter_sethook(void (*hook)(void));
756 void rt_interrupt_leave_sethook(void (*hook)(void));
757 #endif /* RT_USING_HOOK */
758
759 #ifdef RT_USING_COMPONENTS_INIT
760 void rt_components_init(void);
761 void rt_components_board_init(void);
762 #endif /* RT_USING_COMPONENTS_INIT */
763
764 /**
765 * @addtogroup group_kernel_service
766 * @{
767 */
768
769 /*
770 * general kernel service
771 */
772 #ifndef RT_USING_CONSOLE
773 #define rt_kprintf(...)
774 #define rt_kputs(str)
775 #else
776 int rt_kprintf(const char *fmt, ...);
777 void rt_kputs(const char *str);
778 #endif /* RT_USING_CONSOLE */
779
780 rt_err_t rt_backtrace(void);
781 rt_err_t rt_backtrace_thread(rt_thread_t thread);
782 rt_err_t rt_backtrace_frame(rt_thread_t thread, struct rt_hw_backtrace_frame *frame);
783 rt_err_t rt_backtrace_formatted_print(rt_ubase_t *buffer, long buflen);
784 rt_err_t rt_backtrace_to_buffer(rt_thread_t thread, struct rt_hw_backtrace_frame *frame,
785 long skip, rt_ubase_t *buffer, long buflen);
786
787 #if defined(RT_USING_DEVICE) && defined(RT_USING_CONSOLE)
788 rt_device_t rt_console_set_device(const char *name);
789 rt_device_t rt_console_get_device(void);
790 #ifdef RT_USING_THREADSAFE_PRINTF
791 rt_thread_t rt_console_current_user(void);
792 #else
rt_console_current_user(void)793 rt_inline void *rt_console_current_user(void) { return RT_NULL; }
794 #endif /* RT_USING_THREADSAFE_PRINTF */
795 #endif /* defined(RT_USING_DEVICE) && defined(RT_USING_CONSOLE) */
796
797 int __rt_ffs(int value);
798 unsigned long __rt_ffsl(unsigned long value);
799 unsigned long __rt_clz(unsigned long value);
800
801 void rt_show_version(void);
802
803 #ifdef RT_DEBUGING_ASSERT
804 extern void (*rt_assert_hook)(const char *ex, const char *func, rt_size_t line);
805 void rt_assert_set_hook(void (*hook)(const char *ex, const char *func, rt_size_t line));
806 void rt_assert_handler(const char *ex, const char *func, rt_size_t line);
807
808 #define RT_ASSERT(EX) \
809 if (!(EX)) \
810 { \
811 rt_assert_handler(#EX, __FUNCTION__, __LINE__); \
812 }
813 #else
814 #define RT_ASSERT(EX) {RT_UNUSED(EX);}
815 #endif /* RT_DEBUGING_ASSERT */
816
817 #ifdef RT_DEBUGING_CONTEXT
818 /* Macro to check current context */
819 #define RT_DEBUG_NOT_IN_INTERRUPT \
820 do \
821 { \
822 if (rt_interrupt_get_nest() != 0) \
823 { \
824 rt_kprintf("Function[%s] shall not be used in ISR\n", __FUNCTION__); \
825 RT_ASSERT(0) \
826 } \
827 } \
828 while (0)
829
830 /* "In thread context" means:
831 * 1) the scheduler has been started
832 * 2) not in interrupt context.
833 */
834 #define RT_DEBUG_IN_THREAD_CONTEXT \
835 do \
836 { \
837 if (rt_thread_self() == RT_NULL) \
838 { \
839 rt_kprintf("Function[%s] shall not be used before scheduler start\n", \
840 __FUNCTION__); \
841 RT_ASSERT(0) \
842 } \
843 RT_DEBUG_NOT_IN_INTERRUPT; \
844 } \
845 while (0)
846
847 /* "scheduler available" means:
848 * 1) the scheduler has been started.
849 * 2) not in interrupt context.
850 * 3) scheduler is not locked.
851 */
852 #define RT_DEBUG_SCHEDULER_AVAILABLE(need_check) \
853 do \
854 { \
855 if (need_check) \
856 { \
857 if (rt_critical_level() != 0) \
858 { \
859 rt_kprintf("Function[%s]: scheduler is not available\n", \
860 __FUNCTION__); \
861 RT_ASSERT(0) \
862 } \
863 RT_DEBUG_IN_THREAD_CONTEXT; \
864 } \
865 } \
866 while (0)
867 #else
868 #define RT_DEBUG_NOT_IN_INTERRUPT
869 #define RT_DEBUG_IN_THREAD_CONTEXT
870 #define RT_DEBUG_SCHEDULER_AVAILABLE(need_check)
871 #endif /* RT_DEBUGING_CONTEXT */
872
rt_in_thread_context(void)873 rt_inline rt_bool_t rt_in_thread_context(void)
874 {
875 return rt_thread_self() != RT_NULL && rt_interrupt_get_nest() == 0;
876 }
877
878 /* is scheduler available */
rt_scheduler_is_available(void)879 rt_inline rt_bool_t rt_scheduler_is_available(void)
880 {
881 return rt_critical_level() == 0 && rt_in_thread_context();
882 }
883
884 #ifdef RT_USING_SMP
885 /* is thread bond on core */
rt_sched_thread_is_binding(rt_thread_t thread)886 rt_inline rt_bool_t rt_sched_thread_is_binding(rt_thread_t thread)
887 {
888 if (thread == RT_NULL)
889 {
890 thread = rt_thread_self();
891 }
892 return !thread || RT_SCHED_CTX(thread).bind_cpu != RT_CPUS_NR;
893 }
894
895 #else
896 #define rt_sched_thread_is_binding(thread) (RT_TRUE)
897 #endif
898
899 /**@}*/
900
901 #ifdef __cplusplus
902 }
903 #endif
904
905 #endif /* __RT_THREAD_H__ */
906