1 /*
2 * Copyright (c) 2015, Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/arch/cpu.h>
8 #include <errno.h>
9 #include <stdio.h>
10 #include <malloc.h>
11 #include <zephyr/sys/__assert.h>
12 #include <zephyr/posix/sys/stat.h>
13 #include <zephyr/linker/linker-defs.h>
14 #include <zephyr/sys/util.h>
15 #include <zephyr/sys/errno_private.h>
16 #include <zephyr/sys/heap_listener.h>
17 #include <zephyr/sys/libc-hooks.h>
18 #include <zephyr/internal/syscall_handler.h>
19 #include <zephyr/app_memory/app_memdomain.h>
20 #include <zephyr/init.h>
21 #include <zephyr/sys/sem.h>
22 #include <zephyr/sys/mutex.h>
23 #include <zephyr/kernel/mm.h>
24 #include <sys/time.h>
25
26 #ifdef CONFIG_XTENSA
27 #include <xtensa/config/core-isa.h>
28 #endif
29
30 int _fstat(int fd, struct stat *st);
31 int _read(int fd, void *buf, int nbytes);
32 int _write(int fd, const void *buf, int nbytes);
33 int _open(const char *name, int flags, ...);
34 int _close(int file);
35 int _lseek(int file, int ptr, int dir);
36 int _kill(int pid, int sig);
37 int _getpid(void);
38
39 #ifndef CONFIG_NEWLIB_LIBC_CUSTOM_SBRK
40
41 #define LIBC_BSS K_APP_BMEM(z_libc_partition)
42 #define LIBC_DATA K_APP_DMEM(z_libc_partition)
43
44 /*
45 * End result of this thorny set of ifdefs is to define:
46 *
47 * - HEAP_BASE base address of the heap arena
48 * - MAX_HEAP_SIZE size of the heap arena
49 */
50
51 #ifdef CONFIG_MMU
52 #ifdef CONFIG_USERSPACE
53 struct k_mem_partition z_malloc_partition;
54 #endif
55
56 LIBC_BSS static unsigned char *heap_base;
57 LIBC_BSS static size_t max_heap_size;
58
59 #define HEAP_BASE heap_base
60 #define MAX_HEAP_SIZE max_heap_size
61 #define USE_MALLOC_PREPARE 1
62 #elif CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE
63 /* Arena size expressed in Kconfig, due to power-of-two size/align
64 * requirements of certain MPUs.
65 *
66 * We use an automatic memory partition instead of setting this up
67 * in malloc_prepare().
68 */
69 K_APPMEM_PARTITION_DEFINE(z_malloc_partition);
70 #define MALLOC_BSS K_APP_BMEM(z_malloc_partition)
71
72 /* Compiler will throw an error if the provided value isn't a
73 * power of two
74 */
75 MALLOC_BSS static unsigned char
__aligned(CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE)76 __aligned(CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE)
77 heap_base[CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE];
78 #define MAX_HEAP_SIZE CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE
79 #define HEAP_BASE heap_base
80 #else /* Not MMU or CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE */
81 #define USED_RAM_END_ADDR POINTER_TO_UINT(&_end)
82
83 #ifdef Z_MALLOC_PARTITION_EXISTS
84 /* Start of malloc arena needs to be aligned per MPU
85 * requirements
86 */
87 struct k_mem_partition z_malloc_partition;
88
89 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
90 #define HEAP_BASE ROUND_UP(USED_RAM_END_ADDR, \
91 CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
92 #elif defined(CONFIG_ARC)
93 #define HEAP_BASE ROUND_UP(USED_RAM_END_ADDR, \
94 Z_ARC_MPU_ALIGN)
95 #elif defined(CONFIG_XTENSA)
96 #define HEAP_BASE ROUND_UP(USED_RAM_END_ADDR, \
97 XCHAL_MPU_ALIGN)
98 #else
99 #error "Unsupported platform"
100 #endif /* CONFIG_<arch> */
101 #define USE_MALLOC_PREPARE 1
102 #else
103 /* End of kernel image */
104 #define HEAP_BASE USED_RAM_END_ADDR
105 #endif
106
107 /* End of the malloc arena is the end of physical memory */
108 #if defined(CONFIG_XTENSA)
109 /* TODO: Why is xtensa a special case? */
110 extern char _heap_sentry[];
111 #define MAX_HEAP_SIZE (POINTER_TO_UINT(&_heap_sentry) - \
112 HEAP_BASE)
113 #else
114 #define MAX_HEAP_SIZE (KB(CONFIG_SRAM_SIZE) - (HEAP_BASE - \
115 CONFIG_SRAM_BASE_ADDRESS))
116 #endif /* CONFIG_XTENSA */
117 #endif
118
119 static int malloc_prepare(void)
120 {
121
122 #ifdef USE_MALLOC_PREPARE
123 #ifdef CONFIG_MMU
124 max_heap_size = MIN(CONFIG_NEWLIB_LIBC_MAX_MAPPED_REGION_SIZE,
125 k_mem_free_get());
126
127 if (max_heap_size != 0) {
128 heap_base = k_mem_map(max_heap_size, K_MEM_PERM_RW);
129 __ASSERT(heap_base != NULL,
130 "failed to allocate heap of size %zu", max_heap_size);
131
132 }
133 #endif /* CONFIG_MMU */
134
135 #ifdef Z_MALLOC_PARTITION_EXISTS
136 z_malloc_partition.start = (uintptr_t)HEAP_BASE;
137 z_malloc_partition.size = (size_t)MAX_HEAP_SIZE;
138 z_malloc_partition.attr = K_MEM_PARTITION_P_RW_U_RW;
139 #endif /* Z_MALLOC_PARTITION_EXISTS */
140 #endif /* USE_MALLOC_PREPARE */
141
142 /*
143 * Validate that the memory space available for the newlib heap is
144 * greater than the minimum required size.
145 */
146 __ASSERT(MAX_HEAP_SIZE >= CONFIG_NEWLIB_LIBC_MIN_REQUIRED_HEAP_SIZE,
147 "memory space available for newlib heap is less than the "
148 "minimum required size specified by "
149 "CONFIG_NEWLIB_LIBC_MIN_REQUIRED_HEAP_SIZE");
150
151 return 0;
152 }
153
154 SYS_INIT(malloc_prepare, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_LIBC);
155
156 /* Current offset from HEAP_BASE of unused memory */
157 LIBC_BSS static size_t heap_sz;
158 #endif /* CONFIG_NEWLIB_LIBC_CUSTOM_SBRK */
159
_stdout_hook_default(int c)160 static int _stdout_hook_default(int c)
161 {
162 (void)(c); /* Prevent warning about unused argument */
163
164 return EOF;
165 }
166
167 static int (*_stdout_hook)(int) = _stdout_hook_default;
168
__stdout_hook_install(int (* hook)(int))169 void __stdout_hook_install(int (*hook)(int))
170 {
171 _stdout_hook = hook;
172 }
173
_stdin_hook_default(void)174 static unsigned char _stdin_hook_default(void)
175 {
176 return 0;
177 }
178
179 static unsigned char (*_stdin_hook)(void) = _stdin_hook_default;
180
__stdin_hook_install(unsigned char (* hook)(void))181 void __stdin_hook_install(unsigned char (*hook)(void))
182 {
183 _stdin_hook = hook;
184 }
185
z_impl_zephyr_read_stdin(char * buf,int nbytes)186 int z_impl_zephyr_read_stdin(char *buf, int nbytes)
187 {
188 int i = 0;
189
190 for (i = 0; i < nbytes; i++) {
191 *(buf + i) = _stdin_hook();
192 if ((*(buf + i) == '\n') || (*(buf + i) == '\r')) {
193 i++;
194 break;
195 }
196 }
197 return i;
198 }
199
200 #ifdef CONFIG_USERSPACE
z_vrfy_zephyr_read_stdin(char * buf,int nbytes)201 static inline int z_vrfy_zephyr_read_stdin(char *buf, int nbytes)
202 {
203 K_OOPS(K_SYSCALL_MEMORY_WRITE(buf, nbytes));
204 return z_impl_zephyr_read_stdin((char *)buf, nbytes);
205 }
206 #include <zephyr/syscalls/zephyr_read_stdin_mrsh.c>
207 #endif
208
z_impl_zephyr_write_stdout(const void * buffer,int nbytes)209 int z_impl_zephyr_write_stdout(const void *buffer, int nbytes)
210 {
211 const char *buf = buffer;
212 int i;
213
214 for (i = 0; i < nbytes; i++) {
215 if (*(buf + i) == '\n') {
216 _stdout_hook('\r');
217 }
218 _stdout_hook(*(buf + i));
219 }
220 return nbytes;
221 }
222
223 #ifdef CONFIG_USERSPACE
z_vrfy_zephyr_write_stdout(const void * buf,int nbytes)224 static inline int z_vrfy_zephyr_write_stdout(const void *buf, int nbytes)
225 {
226 K_OOPS(K_SYSCALL_MEMORY_READ(buf, nbytes));
227 return z_impl_zephyr_write_stdout((const void *)buf, nbytes);
228 }
229 #include <zephyr/syscalls/zephyr_write_stdout_mrsh.c>
230 #endif
231
232 #ifndef CONFIG_POSIX_DEVICE_IO
_read(int fd,void * buf,int nbytes)233 int _read(int fd, void *buf, int nbytes)
234 {
235 ARG_UNUSED(fd);
236
237 return zephyr_read_stdin(buf, nbytes);
238 }
239 __weak FUNC_ALIAS(_read, read, int);
240
_write(int fd,const void * buf,int nbytes)241 int _write(int fd, const void *buf, int nbytes)
242 {
243 ARG_UNUSED(fd);
244
245 return zephyr_write_stdout(buf, nbytes);
246 }
247 __weak FUNC_ALIAS(_write, write, int);
248
_open(const char * name,int flags,...)249 int _open(const char *name, int flags, ...)
250 {
251 return -1;
252 }
253 __weak FUNC_ALIAS(_open, open, int);
254
_close(int file)255 int _close(int file)
256 {
257 return -1;
258 }
259 __weak FUNC_ALIAS(_close, close, int);
260 #endif /* CONFIG_POSIX_DEVICE_IO */
261
262 #ifndef CONFIG_POSIX_FD_MGMT
_lseek(int file,int ptr,int dir)263 int _lseek(int file, int ptr, int dir)
264 {
265 return 0;
266 }
267 __weak FUNC_ALIAS(_lseek, lseek, int);
268 #endif /* CONFIG_POSIX_FD_MGMT */
269
_isatty(int file)270 int _isatty(int file)
271 {
272 return file <= 2;
273 }
274 __weak FUNC_ALIAS(_isatty, isatty, int);
275
276 #ifndef CONFIG_POSIX_SIGNALS
_kill(int i,int j)277 int _kill(int i, int j)
278 {
279 return 0;
280 }
281 __weak FUNC_ALIAS(_kill, kill, int);
282 #endif /* CONFIG_POSIX_SIGNALS */
283
284 #ifndef CONFIG_POSIX_FILE_SYSTEM
_fstat(int file,struct stat * st)285 int _fstat(int file, struct stat *st)
286 {
287 st->st_mode = S_IFCHR;
288 return 0;
289 }
290 __weak FUNC_ALIAS(_fstat, fstat, int);
291 #endif /* CONFIG_POSIX_FILE_SYSTEM */
292
293 #ifndef CONFIG_POSIX_MULTI_PROCESS
_getpid(void)294 int _getpid(void)
295 {
296 return 0;
297 }
298 __weak FUNC_ALIAS(_getpid, getpid, int);
299
300 #endif /* CONFIG_POSIX_MULTI_PROCESS */
301
_exit(int status)302 __weak void _exit(int status)
303 {
304 _write(1, "exit\n", 5);
305 while (1) {
306 ;
307 }
308 }
309
310 #ifndef CONFIG_NEWLIB_LIBC_CUSTOM_SBRK
_sbrk(intptr_t count)311 void *_sbrk(intptr_t count)
312 {
313 void *ret, *ptr;
314
315 ptr = ((char *)HEAP_BASE) + heap_sz;
316
317 if ((heap_sz + count) < MAX_HEAP_SIZE) {
318 heap_sz += count;
319 ret = ptr;
320
321 #ifdef CONFIG_NEWLIB_LIBC_HEAP_LISTENER
322 heap_listener_notify_resize(HEAP_ID_LIBC, ptr, (char *)ptr + count);
323 #endif
324 } else {
325 ret = (void *)-1;
326 }
327
328 return ret;
329 }
330 __weak FUNC_ALIAS(_sbrk, sbrk, void *);
331 #endif /* CONFIG_NEWLIB_LIBC_CUSTOM_SBRK */
332
333 #ifdef CONFIG_MULTITHREADING
334
335 /* Make sure _RETARGETABLE_LOCKING is enabled in toolchain */
336 BUILD_ASSERT(IS_ENABLED(_RETARGETABLE_LOCKING), "Retargetable locking must be enabled");
337
338 /*
339 * Newlib Retargetable Locking Interface Implementation
340 *
341 * When multithreading is enabled, the newlib retargetable locking interface is
342 * defined below to override the default void implementation and provide the
343 * Zephyr-side locks.
344 *
345 * NOTE: `k_mutex` and `k_sem` are used instead of `sys_mutex` and `sys_sem`
346 * because the latter do not support dynamic allocation for now.
347 */
348
349 /* Static locks */
350 K_MUTEX_DEFINE(__lock___sinit_recursive_mutex);
351 K_MUTEX_DEFINE(__lock___sfp_recursive_mutex);
352 K_MUTEX_DEFINE(__lock___atexit_recursive_mutex);
353 K_MUTEX_DEFINE(__lock___malloc_recursive_mutex);
354 K_MUTEX_DEFINE(__lock___env_recursive_mutex);
355 K_SEM_DEFINE(__lock___at_quick_exit_mutex, 1, 1);
356 K_SEM_DEFINE(__lock___tz_mutex, 1, 1);
357 K_SEM_DEFINE(__lock___dd_hash_mutex, 1, 1);
358 K_SEM_DEFINE(__lock___arc4random_mutex, 1, 1);
359
360 #ifdef CONFIG_USERSPACE
361 /* Grant public access to all static locks after boot */
newlib_locks_prepare(void)362 static int newlib_locks_prepare(void)
363 {
364
365 /* Initialise recursive locks */
366 k_object_access_all_grant(&__lock___sinit_recursive_mutex);
367 k_object_access_all_grant(&__lock___sfp_recursive_mutex);
368 k_object_access_all_grant(&__lock___atexit_recursive_mutex);
369 k_object_access_all_grant(&__lock___malloc_recursive_mutex);
370 k_object_access_all_grant(&__lock___env_recursive_mutex);
371
372 /* Initialise non-recursive locks */
373 k_object_access_all_grant(&__lock___at_quick_exit_mutex);
374 k_object_access_all_grant(&__lock___tz_mutex);
375 k_object_access_all_grant(&__lock___dd_hash_mutex);
376 k_object_access_all_grant(&__lock___arc4random_mutex);
377
378 return 0;
379 }
380
381 SYS_INIT(newlib_locks_prepare, POST_KERNEL,
382 CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
383 #endif /* CONFIG_USERSPACE */
384
385 /* Create a new dynamic non-recursive lock */
__retarget_lock_init(_LOCK_T * lock)386 void __retarget_lock_init(_LOCK_T *lock)
387 {
388 __ASSERT_NO_MSG(lock != NULL);
389
390 /* Allocate semaphore object */
391 #ifndef CONFIG_USERSPACE
392 *lock = malloc(sizeof(struct k_sem));
393 #else
394 *lock = k_object_alloc(K_OBJ_SEM);
395 #endif /* !CONFIG_USERSPACE */
396 __ASSERT(*lock != NULL, "non-recursive lock allocation failed");
397
398 k_sem_init((struct k_sem *)*lock, 1, 1);
399 #ifdef CONFIG_USERSPACE
400 k_object_access_all_grant(*lock);
401 #endif /* CONFIG_USERSPACE */
402 }
403
404 /* Create a new dynamic recursive lock */
__retarget_lock_init_recursive(_LOCK_T * lock)405 void __retarget_lock_init_recursive(_LOCK_T *lock)
406 {
407 __ASSERT_NO_MSG(lock != NULL);
408
409 /* Allocate mutex object */
410 #ifndef CONFIG_USERSPACE
411 *lock = malloc(sizeof(struct k_mutex));
412 #else
413 *lock = k_object_alloc(K_OBJ_MUTEX);
414 #endif /* !CONFIG_USERSPACE */
415 __ASSERT(*lock != NULL, "recursive lock allocation failed");
416
417 k_mutex_init((struct k_mutex *)*lock);
418 #ifdef CONFIG_USERSPACE
419 k_object_access_all_grant(*lock);
420 #endif /* CONFIG_USERSPACE */
421 }
422
423 /* Close dynamic non-recursive lock */
__retarget_lock_close(_LOCK_T lock)424 void __retarget_lock_close(_LOCK_T lock)
425 {
426 __ASSERT_NO_MSG(lock != NULL);
427 #ifndef CONFIG_USERSPACE
428 free(lock);
429 #else
430 k_object_release(lock);
431 #endif /* !CONFIG_USERSPACE */
432 }
433
434 /* Close dynamic recursive lock */
__retarget_lock_close_recursive(_LOCK_T lock)435 void __retarget_lock_close_recursive(_LOCK_T lock)
436 {
437 __ASSERT_NO_MSG(lock != NULL);
438 #ifndef CONFIG_USERSPACE
439 free(lock);
440 #else
441 k_object_release(lock);
442 #endif /* !CONFIG_USERSPACE */
443 }
444
445 /* Acquiure non-recursive lock */
__retarget_lock_acquire(_LOCK_T lock)446 void __retarget_lock_acquire(_LOCK_T lock)
447 {
448 __ASSERT_NO_MSG(lock != NULL);
449 k_sem_take((struct k_sem *)lock, K_FOREVER);
450 }
451
452 /* Acquiure recursive lock */
__retarget_lock_acquire_recursive(_LOCK_T lock)453 void __retarget_lock_acquire_recursive(_LOCK_T lock)
454 {
455 __ASSERT_NO_MSG(lock != NULL);
456 k_mutex_lock((struct k_mutex *)lock, K_FOREVER);
457 }
458
459 /* Try acquiring non-recursive lock */
__retarget_lock_try_acquire(_LOCK_T lock)460 int __retarget_lock_try_acquire(_LOCK_T lock)
461 {
462 __ASSERT_NO_MSG(lock != NULL);
463 return !k_sem_take((struct k_sem *)lock, K_NO_WAIT);
464 }
465
466 /* Try acquiring recursive lock */
__retarget_lock_try_acquire_recursive(_LOCK_T lock)467 int __retarget_lock_try_acquire_recursive(_LOCK_T lock)
468 {
469 __ASSERT_NO_MSG(lock != NULL);
470 return !k_mutex_lock((struct k_mutex *)lock, K_NO_WAIT);
471 }
472
473 /* Release non-recursive lock */
__retarget_lock_release(_LOCK_T lock)474 void __retarget_lock_release(_LOCK_T lock)
475 {
476 __ASSERT_NO_MSG(lock != NULL);
477 k_sem_give((struct k_sem *)lock);
478 }
479
480 /* Release recursive lock */
__retarget_lock_release_recursive(_LOCK_T lock)481 void __retarget_lock_release_recursive(_LOCK_T lock)
482 {
483 __ASSERT_NO_MSG(lock != NULL);
484 k_mutex_unlock((struct k_mutex *)lock);
485 }
486 #endif /* CONFIG_MULTITHREADING */
487
__errno(void)488 __weak int *__errno(void)
489 {
490 return z_errno();
491 }
492
493 /* This function gets called if static buffer overflow detection is enabled
494 * on stdlib side (Newlib here), in case such an overflow is detected. Newlib
495 * provides an implementation not suitable for us, so we override it here.
496 */
__chk_fail(void)497 __weak FUNC_NORETURN void __chk_fail(void)
498 {
499 static const char chk_fail_msg[] = "* buffer overflow detected *\n";
500 _write(2, chk_fail_msg, sizeof(chk_fail_msg) - 1);
501 k_oops();
502 CODE_UNREACHABLE;
503 }
504
505 #if CONFIG_XTENSA
506 /* The Newlib in xtensa toolchain has a few missing functions for the
507 * reentrant versions of the syscalls.
508 */
_read_r(struct _reent * r,int fd,void * buf,size_t nbytes)509 _ssize_t _read_r(struct _reent *r, int fd, void *buf, size_t nbytes)
510 {
511 ARG_UNUSED(r);
512
513 return _read(fd, (char *)buf, nbytes);
514 }
515
_write_r(struct _reent * r,int fd,const void * buf,size_t nbytes)516 _ssize_t _write_r(struct _reent *r, int fd, const void *buf, size_t nbytes)
517 {
518 ARG_UNUSED(r);
519
520 return _write(fd, buf, nbytes);
521 }
522
_open_r(struct _reent * r,const char * name,int flags,int mode)523 int _open_r(struct _reent *r, const char *name, int flags, int mode)
524 {
525 ARG_UNUSED(r);
526 ARG_UNUSED(flags);
527
528 return _open(name, flags, mode);
529 }
530
_close_r(struct _reent * r,int file)531 int _close_r(struct _reent *r, int file)
532 {
533 ARG_UNUSED(r);
534
535 return _close(file);
536 }
537
_lseek_r(struct _reent * r,int file,_off_t ptr,int dir)538 _off_t _lseek_r(struct _reent *r, int file, _off_t ptr, int dir)
539 {
540 ARG_UNUSED(r);
541
542 return _lseek(file, ptr, dir);
543 }
544
_isatty_r(struct _reent * r,int file)545 int _isatty_r(struct _reent *r, int file)
546 {
547 ARG_UNUSED(r);
548
549 return _isatty(file);
550 }
551
_kill_r(struct _reent * r,int i,int j)552 int _kill_r(struct _reent *r, int i, int j)
553 {
554 ARG_UNUSED(r);
555
556 return _kill(i, j);
557 }
558
_getpid_r(struct _reent * r)559 int _getpid_r(struct _reent *r)
560 {
561 ARG_UNUSED(r);
562
563 return _getpid();
564 }
565
_fstat_r(struct _reent * r,int file,struct stat * st)566 int _fstat_r(struct _reent *r, int file, struct stat *st)
567 {
568 ARG_UNUSED(r);
569
570 return _fstat(file, st);
571 }
572
_exit_r(struct _reent * r,int status)573 void _exit_r(struct _reent *r, int status)
574 {
575 ARG_UNUSED(r);
576
577 _exit(status);
578 }
579
_sbrk_r(struct _reent * r,int count)580 void *_sbrk_r(struct _reent *r, int count)
581 {
582 ARG_UNUSED(r);
583
584 return _sbrk(count);
585 }
586 #endif /* CONFIG_XTENSA */
587
_gettimeofday(struct timeval * __tp,void * __tzp)588 int _gettimeofday(struct timeval *__tp, void *__tzp)
589 {
590 #ifdef CONFIG_XSI_SINGLE_PROCESS
591 return gettimeofday(__tp, __tzp);
592 #else
593 /* Non-posix systems should not call gettimeofday() here as it will
594 * result in a recursive call loop and result in a stack overflow.
595 */
596 return -1;
597 #endif
598 }
599