| /arch/posix/ |
| A D | Kconfig | 19 In bytes, stack size for Zephyr threads meant only for the POSIX 22 thread stack, the real stack is the native underlying pthread stack. 23 Therefore the allocated stack can be limited to this size)
|
| /arch/arm/core/cortex_a_r/ |
| A D | Kconfig | 40 int "Undefined Instruction and Abort stack size (in bytes)" 43 This option specifies the size of the stack used by the undefined 47 int "FIQ stack size (in bytes)" 50 This option specifies the size of the stack used by the FIQ handler. 53 int "SVC stack size (in bytes)" 56 This option specifies the size of the stack used by the SVC handler. 59 int "SYS stack size (in bytes)" 191 int "Undefined Instruction and Abort stack size (in bytes)" 198 int "FIQ stack size (in bytes)" 204 int "SVC stack size (in bytes)" [all …]
|
| A D | macro_priv.inc | 52 * Store r0-r3, r12, lr into the stack to construct an exception 53 * stack frame.
|
| /arch/x86/core/intel64/ |
| A D | thread.c | 28 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, in arch_new_thread() argument 39 z_x86_set_stack_guard(stack); in arch_new_thread() 41 ARG_UNUSED(stack); in arch_new_thread()
|
| A D | cpu.c | 50 void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz, in arch_cpu_start() argument 74 x86_cpuboot[cpu_num].sp = (uint64_t) K_KERNEL_STACK_BUFFER(stack) + sz; in arch_cpu_start() 88 ARG_UNUSED(stack); in arch_cpu_start()
|
| /arch/x86/core/ |
| A D | Kconfig.intel64 | 24 int "Size of the exception stack(s)" 27 The exception stack(s) (one per CPU) are used both for exception 69 Number of bytes from the ISR stack to reserve for each nested IRQ 70 level. Must be a multiple of 16 to main stack alignment. Note that 81 bounds of the current process stack are overflowed. This is done 82 by preceding all stack areas with a 4K guard page.
|
| /arch/arc/core/mpu/ |
| A D | Kconfig | 24 Enable thread stack guards via MPU. ARC supports built-in stack protection. 25 If your core supports that, it is preferred over MPU stack guard.
|
| /arch/xtensa/core/ |
| A D | README_WINDOWS.rst | 34 to WINDOWBASE, at the same time copying the old (now hidden) stack 41 in two places. They have to be stored on the stack across potentially 81 be spilled naturally into the stack by using the stack pointers 83 some extent enforces a fairly complicated stack layout to make that 87 stack frame. It lies in the 16 bytes below its CALLEE's stack 89 on its behalf) can see its caller's potentially-spilled stack pointer 90 register (A1) on the stack and be able to walk back up on return. 91 Other architectures do this too by e.g. pushing the incoming stack 92 pointer onto the stack as a standard "frame pointer" defined in the 103 the top of the stack frame, immediately below the parent call's A0-A3
|
| A D | thread.c | 117 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, in arch_new_thread() argument 128 __ASSERT((((size_t)stack) % XCHAL_DCACHE_LINESIZE) == 0, ""); in arch_new_thread() 130 sys_cache_data_flush_and_invd_range(stack, (char *)stack_ptr - (char *)stack); in arch_new_thread()
|
| A D | gdbstub.c | 435 static void copy_to_ctx(struct gdb_ctx *ctx, const struct arch_esf *stack) in copy_to_ctx() argument 450 const uint32_t *bsa = *(const int **)stack; in copy_to_ctx() 452 if (bsa - (const uint32_t *)stack > 12) { in copy_to_ctx() 454 } else if (bsa - (const uint32_t *)stack > 8) { in copy_to_ctx() 456 } else if (bsa - (const uint32_t *)stack > 4) { in copy_to_ctx() 521 static void restore_from_ctx(struct gdb_ctx *ctx, const struct arch_esf *stack) in restore_from_ctx() argument 526 _xtensa_irq_bsa_t *bsa = (void *)*(const int **)stack; in restore_from_ctx() 528 if ((uint32_t *)bsa - (const uint32_t *)stack > 12) { in restore_from_ctx() 530 } else if ((uint32_t *)bsa - (const uint32_t *)stack > 8) { in restore_from_ctx() 532 } else if ((uint32_t *)bsa - (const uint32_t *)stack > 4) { in restore_from_ctx()
|
| /arch/arm64/core/ |
| A D | Kconfig | 140 bool "To enable the safe exception stack" 142 The safe exception stack is used for checking whether the kernel stack 143 overflows during the exception happens from EL1. This stack is not 144 used for user stack overflow checking, because kernel stack support 152 Internal config to enable runtime stack traces on fatal exceptions. 163 int "The stack size of the safe exception stack" 167 The stack size of the safe exception stack. The safe exception stack 168 requires to be enough to do the stack overflow check. 187 the bounds of the current process stack are overflowed. This is done 188 by preceding all stack areas with a fixed guard region.
|
| A D | thread.c | 85 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, in arch_new_thread() argument 149 thread->arch.stack_limit = (uint64_t)stack + Z_ARM64_STACK_GUARD_SIZE; in arch_new_thread()
|
| A D | smp.c | 69 void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz, in arch_cpu_start() argument 91 arm64_cpu_boot_params.sp = K_KERNEL_STACK_BUFFER(stack) + sz; in arch_cpu_start()
|
| /arch/riscv/core/ |
| A D | smp.c | 31 void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz, in arch_cpu_start() argument 37 riscv_cpu_sp = K_KERNEL_STACK_BUFFER(stack) + sz; in arch_cpu_start()
|
| /arch/xtensa/include/ |
| A D | xtensa_internal.h | 28 void xtensa_dump_stack(const void *stack);
|
| /arch/arc/ |
| A D | Kconfig | 196 bool "Separate firq stack" 203 int "FIRQ stack size" 207 The size of firq stack. 215 checking stack accesses and raising an exception when a stack 229 Use ARC STACK_CHECKING to do stack protection 239 - The ARC stack checking, or 240 - the MPU-based stack guard 243 The two stack guard options are mutually exclusive. The 244 selection of the ARC stack checking is 245 prioritized over the MPU-based stack guard. [all …]
|
| /arch/x86/core/ia32/ |
| A D | thread.c | 84 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, in arch_new_thread() argument 95 z_x86_set_stack_guard(stack); in arch_new_thread()
|
| /arch/arc/core/ |
| A D | smp.c | 44 void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz, in arch_cpu_start() argument 55 arc_cpu_sp = K_KERNEL_STACK_BUFFER(stack) + sz; in arch_cpu_start()
|
| /arch/mips/core/ |
| A D | thread.c | 18 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, in arch_new_thread() argument
|
| /arch/arc/include/ |
| A D | kernel_arch_func.h | 68 void *p2, void *p3, uint32_t stack, uint32_t size,
|
| /arch/rx/core/ |
| A D | isr_exit.S | 19 push r2 ; Save old_thread to the stack
|
| A D | thread.c | 21 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, in arch_new_thread() argument
|
| /arch/mips/ |
| A D | Kconfig | 28 # Bump the kernel default stack size values.
|
| /arch/arm/core/ |
| A D | Kconfig | 155 the ARM. It wastes stack space. The option also enforces alignment 156 of stack upon exception entry on Cortex-M3 and Cortex-M4 (ARMv7-M). 157 Note that for ARMv6-M, ARMv8-M, and Cortex-M7 MCUs stack alignment 194 bool "Thread Stack Guards based on built-in ARM stack limit checking" 209 - the MPU-based stack guard 211 if the bounds of the current process stack are overflowed. 212 The two stack guard options are mutually exclusive. The 214 prioritized over the MPU-based stack guard. The developer 216 stack guard, if this is desired.
|
| /arch/sparc/core/ |
| A D | thread.c | 31 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, in arch_new_thread() argument
|