1/*
2 * Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/*
8 * Thread context switching for ARM64 Cortex-A (AArch64)
9 *
10 * This module implements the routines necessary for thread context switching
11 * on ARM64 Cortex-A (AArch64)
12 */
13
14#include <zephyr/toolchain.h>
15#include <zephyr/linker/sections.h>
16#include <offsets_short.h>
17#include <zephyr/arch/cpu.h>
18#include <zephyr/syscall.h>
19#include "macro_priv.inc"
20
21_ASM_FILE_PROLOGUE
22
23/*
24 * Routine to handle context switches
25 *
26 * This function is directly called either by _isr_wrapper() in case of
27 * preemption, or arch_switch() in case of cooperative switching.
28 *
29 * void z_arm64_context_switch(struct k_thread *new, struct k_thread *old);
30 */
31
32GTEXT(z_arm64_context_switch)
33SECTION_FUNC(TEXT, z_arm64_context_switch)
34
35#ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK
36	/* Save the current SP_EL0 */
37	mrs	x4, sp_el0
38#endif
39
40	stp	x19, x20, [x1, #_thread_offset_to_callee_saved_x19_x20]
41	stp	x21, x22, [x1, #_thread_offset_to_callee_saved_x21_x22]
42	stp	x23, x24, [x1, #_thread_offset_to_callee_saved_x23_x24]
43	stp	x25, x26, [x1, #_thread_offset_to_callee_saved_x25_x26]
44	stp	x27, x28, [x1, #_thread_offset_to_callee_saved_x27_x28]
45#ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK
46	stp	x29, x4,  [x1, #_thread_offset_to_callee_saved_x29_sp_el0]
47#else
48	str	x29,      [x1, #_thread_offset_to_callee_saved_x29_sp_el0]
49#endif
50
51	/* Save the current SP_ELx and return address */
52	mov	x4, sp
53	stp	x4, lr, [x1, #_thread_offset_to_callee_saved_sp_elx_lr]
54
55	/* save current thread's exception depth */
56	mrs	x4, tpidrro_el0
57	lsr	x2, x4, #TPIDRROEL0_EXC_SHIFT
58	strb	w2, [x1, #_thread_offset_to_exception_depth]
59
60	/* retrieve next thread's exception depth */
61	ldrb	w2, [x0, #_thread_offset_to_exception_depth]
62	bic	x4, x4, #TPIDRROEL0_EXC_DEPTH
63	orr	x4, x4, x2, lsl #TPIDRROEL0_EXC_SHIFT
64	msr	tpidrro_el0, x4
65
66#ifdef CONFIG_FPU_SHARING
67	/*
68	 * Do this after tpidrro_el0 is updated with the new exception
69	 * depth value, and before old->switch_handle is updated (making
70	 * it available for grab by another CPU) as we still use its stack.
71	 */
72	stp	x0, x1, [sp, #-16]!
73	bl	z_arm64_fpu_thread_context_switch
74	ldp	x0, x1, [sp], #16
75#endif
76
77#ifdef CONFIG_SMP
78	/* Write barrier: ensure all preceding writes are executed
79	* before writing the switch handle
80	*/
81	dmb sy
82#endif
83
84	/* save old thread into switch handle which is required by
85	 * z_sched_switch_spin()
86	 */
87	 str	x1, [x1, #___thread_t_switch_handle_OFFSET]
88
89#ifdef CONFIG_THREAD_LOCAL_STORAGE
90	/* Grab the TLS pointer */
91	ldr	x2, [x0, #_thread_offset_to_tls]
92
93	/* Store in the "Thread ID" register.
94	 * This register is used as a base pointer to all
95	 * thread variables with offsets added by toolchain.
96	 */
97	msr	tpidr_el0, x2
98#endif
99
100	ldp	x19, x20, [x0, #_thread_offset_to_callee_saved_x19_x20]
101	ldp	x21, x22, [x0, #_thread_offset_to_callee_saved_x21_x22]
102	ldp	x23, x24, [x0, #_thread_offset_to_callee_saved_x23_x24]
103	ldp	x25, x26, [x0, #_thread_offset_to_callee_saved_x25_x26]
104	ldp	x27, x28, [x0, #_thread_offset_to_callee_saved_x27_x28]
105#ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK
106	ldp	x29, x4,  [x0, #_thread_offset_to_callee_saved_x29_sp_el0]
107
108	/* Restore SP_EL0 */
109	msr	sp_el0, x4
110#else
111	ldr	x29,      [x0, #_thread_offset_to_callee_saved_x29_sp_el0]
112#endif
113
114	/* Restore SP_EL1 and return address */
115	ldp	x4, lr, [x0, #_thread_offset_to_callee_saved_sp_elx_lr]
116	mov	sp, x4
117
118#if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK)
119	/* arch_curr_cpu()->arch.current_stack_limit = thread->arch.stack_limit */
120	get_cpu	x4
121	ldr	x2, [x0, #_thread_offset_to_stack_limit]
122	str	x2, [x4, #_cpu_offset_to_current_stack_limit]
123#endif
124
125#if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION)
126	str     lr, [sp, #-16]!
127	bl      z_arm64_swap_mem_domains
128	ldr     lr, [sp], #16
129#endif
130
131#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
132	str	lr, [sp, #-16]!
133	bl	z_thread_mark_switched_in
134	ldr	lr, [sp], #16
135#endif
136
137	/* Return to arch_switch() or _isr_wrapper() */
138	ret
139
140/*
141 * Synchronous exceptions handler
142 *
143 * The service call (SVC) is used in the following occasions:
144 * - Cooperative context switching
145 * - IRQ offloading
146 */
147
148GTEXT(z_arm64_sync_exc)
149SECTION_FUNC(TEXT, z_arm64_sync_exc)
150
151	mrs	x0, esr_el1
152	lsr	x1, x0, #26
153
154#ifdef CONFIG_FPU_SHARING
155	cmp	x1, #0x07 /*Access to SIMD or floating-point */
156	bne	1f
157	mov	x0, sp
158	bl	z_arm64_fpu_trap
159	b	z_arm64_exit_exc_fpu_done
1601:
161#endif
162
163	cmp	x1, #0x15 /* 0x15 = SVC */
164	bne	inv
165
166	/* Demux the SVC call */
167	and	x1, x0, #0xff
168
169	cmp	x1, #_SVC_CALL_RUNTIME_EXCEPT
170	beq	oops
171
172#ifdef CONFIG_USERSPACE
173	cmp	x1, #_SVC_CALL_SYSTEM_CALL
174	beq	z_arm64_do_syscall
175#endif
176
177#ifdef CONFIG_IRQ_OFFLOAD
178	cmp	x1, #_SVC_CALL_IRQ_OFFLOAD
179	beq	offload
180	b	inv
181offload:
182	/*
183	 * Retrieve provided routine and argument from the stack.
184	 * Routine pointer is in saved x0, argument in saved x1
185	 * so we load them with x1/x0 (reversed).
186	 */
187	ldp	x1, x0, [sp, ___esf_t_x0_x1_OFFSET]
188
189	/* ++_current_cpu->nested to be checked by arch_is_in_isr() */
190	get_cpu	x2
191	ldr	w3, [x2, #___cpu_t_nested_OFFSET]
192	add	w4, w3, #1
193	str	w4, [x2, #___cpu_t_nested_OFFSET]
194
195	/* If not nested: switch to IRQ stack and save current sp on it. */
196	cbnz	w3, 1f
197	ldr	x3, [x2, #___cpu_t_irq_stack_OFFSET]
198	mov	x4, sp
199	mov	sp, x3
200	str	x4, [sp, #-16]!
201#if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK)
202	/* update the stack limit with IRQ stack limit */
203	sub	x3, x3, #CONFIG_ISR_STACK_SIZE
204	str	x3, [x2, #_cpu_offset_to_current_stack_limit]
205#endif
2061:
207	/* Execute provided routine (argument is in x0 already). */
208	blr	x1
209
210	/* Exit through regular IRQ exit path */
211	b	z_arm64_irq_done
212#endif
213	b	inv
214
215oops:
216	mov	x0, sp
217	b	z_arm64_do_kernel_oops
218
219inv:
220	mov	x0, #0 /* K_ERR_CPU_EXCEPTION */
221	mov	x1, sp
222	bl	z_arm64_fatal_error
223
224	/* Return here only in case of recoverable error */
225	b	z_arm64_exit_exc
226