1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <linux/context_tracking.h>
4 #include <linux/err.h>
5 #include <linux/compat.h>
6 #include <linux/sched/debug.h> /* for show_regs */
7
8 #include <asm/kup.h>
9 #include <asm/cputime.h>
10 #include <asm/hw_irq.h>
11 #include <asm/interrupt.h>
12 #include <asm/kprobes.h>
13 #include <asm/paca.h>
14 #include <asm/ptrace.h>
15 #include <asm/reg.h>
16 #include <asm/signal.h>
17 #include <asm/switch_to.h>
18 #include <asm/syscall.h>
19 #include <asm/time.h>
20 #include <asm/tm.h>
21 #include <asm/unistd.h>
22
23 #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
24 unsigned long global_dbcr0[NR_CPUS];
25 #endif
26
27 #ifdef CONFIG_PPC_BOOK3S_64
28 DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
exit_must_hard_disable(void)29 static inline bool exit_must_hard_disable(void)
30 {
31 return static_branch_unlikely(&interrupt_exit_not_reentrant);
32 }
33 #else
exit_must_hard_disable(void)34 static inline bool exit_must_hard_disable(void)
35 {
36 return true;
37 }
38 #endif
39
40 /*
41 * local irqs must be disabled. Returns false if the caller must re-enable
42 * them, check for new work, and try again.
43 *
44 * This should be called with local irqs disabled, but if they were previously
45 * enabled when the interrupt handler returns (indicating a process-context /
46 * synchronous interrupt) then irqs_enabled should be true.
47 *
48 * restartable is true then EE/RI can be left on because interrupts are handled
49 * with a restart sequence.
50 */
prep_irq_for_enabled_exit(bool restartable)51 static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
52 {
53 bool must_hard_disable = (exit_must_hard_disable() || !restartable);
54
55 /* This must be done with RI=1 because tracing may touch vmaps */
56 trace_hardirqs_on();
57
58 if (must_hard_disable)
59 __hard_EE_RI_disable();
60
61 #ifdef CONFIG_PPC64
62 /* This pattern matches prep_irq_for_idle */
63 if (unlikely(lazy_irq_pending_nocheck())) {
64 if (must_hard_disable) {
65 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
66 __hard_RI_enable();
67 }
68 trace_hardirqs_off();
69
70 return false;
71 }
72 #endif
73 return true;
74 }
75
booke_load_dbcr0(void)76 static notrace void booke_load_dbcr0(void)
77 {
78 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
79 unsigned long dbcr0 = current->thread.debug.dbcr0;
80
81 if (likely(!(dbcr0 & DBCR0_IDM)))
82 return;
83
84 /*
85 * Check to see if the dbcr0 register is set up to debug.
86 * Use the internal debug mode bit to do this.
87 */
88 mtmsr(mfmsr() & ~MSR_DE);
89 if (IS_ENABLED(CONFIG_PPC32)) {
90 isync();
91 global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0);
92 }
93 mtspr(SPRN_DBCR0, dbcr0);
94 mtspr(SPRN_DBSR, -1);
95 #endif
96 }
97
check_return_regs_valid(struct pt_regs * regs)98 static void check_return_regs_valid(struct pt_regs *regs)
99 {
100 #ifdef CONFIG_PPC_BOOK3S_64
101 unsigned long trap, srr0, srr1;
102 static bool warned;
103 u8 *validp;
104 char *h;
105
106 if (trap_is_scv(regs))
107 return;
108
109 trap = TRAP(regs);
110 // EE in HV mode sets HSRRs like 0xea0
111 if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL)
112 trap = 0xea0;
113
114 switch (trap) {
115 case 0x980:
116 case INTERRUPT_H_DATA_STORAGE:
117 case 0xe20:
118 case 0xe40:
119 case INTERRUPT_HMI:
120 case 0xe80:
121 case 0xea0:
122 case INTERRUPT_H_FAC_UNAVAIL:
123 case 0x1200:
124 case 0x1500:
125 case 0x1600:
126 case 0x1800:
127 validp = &local_paca->hsrr_valid;
128 if (!*validp)
129 return;
130
131 srr0 = mfspr(SPRN_HSRR0);
132 srr1 = mfspr(SPRN_HSRR1);
133 h = "H";
134
135 break;
136 default:
137 validp = &local_paca->srr_valid;
138 if (!*validp)
139 return;
140
141 srr0 = mfspr(SPRN_SRR0);
142 srr1 = mfspr(SPRN_SRR1);
143 h = "";
144 break;
145 }
146
147 if (srr0 == regs->nip && srr1 == regs->msr)
148 return;
149
150 /*
151 * A NMI / soft-NMI interrupt may have come in after we found
152 * srr_valid and before the SRRs are loaded. The interrupt then
153 * comes in and clobbers SRRs and clears srr_valid. Then we load
154 * the SRRs here and test them above and find they don't match.
155 *
156 * Test validity again after that, to catch such false positives.
157 *
158 * This test in general will have some window for false negatives
159 * and may not catch and fix all such cases if an NMI comes in
160 * later and clobbers SRRs without clearing srr_valid, but hopefully
161 * such things will get caught most of the time, statistically
162 * enough to be able to get a warning out.
163 */
164 barrier();
165
166 if (!*validp)
167 return;
168
169 if (!warned) {
170 warned = true;
171 printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip);
172 printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr);
173 show_regs(regs);
174 }
175
176 *validp = 0; /* fixup */
177 #endif
178 }
179
180 static notrace unsigned long
interrupt_exit_user_prepare_main(unsigned long ret,struct pt_regs * regs)181 interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
182 {
183 unsigned long ti_flags;
184
185 again:
186 ti_flags = read_thread_flags();
187 while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
188 local_irq_enable();
189 if (ti_flags & _TIF_NEED_RESCHED) {
190 schedule();
191 } else {
192 /*
193 * SIGPENDING must restore signal handler function
194 * argument GPRs, and some non-volatiles (e.g., r1).
195 * Restore all for now. This could be made lighter.
196 */
197 if (ti_flags & _TIF_SIGPENDING)
198 ret |= _TIF_RESTOREALL;
199 do_notify_resume(regs, ti_flags);
200 }
201 local_irq_disable();
202 ti_flags = read_thread_flags();
203 }
204
205 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
206 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
207 unlikely((ti_flags & _TIF_RESTORE_TM))) {
208 restore_tm_state(regs);
209 } else {
210 unsigned long mathflags = MSR_FP;
211
212 if (cpu_has_feature(CPU_FTR_VSX))
213 mathflags |= MSR_VEC | MSR_VSX;
214 else if (cpu_has_feature(CPU_FTR_ALTIVEC))
215 mathflags |= MSR_VEC;
216
217 /*
218 * If userspace MSR has all available FP bits set,
219 * then they are live and no need to restore. If not,
220 * it means the regs were given up and restore_math
221 * may decide to restore them (to avoid taking an FP
222 * fault).
223 */
224 if ((regs->msr & mathflags) != mathflags)
225 restore_math(regs);
226 }
227 }
228
229 check_return_regs_valid(regs);
230
231 user_enter_irqoff();
232 if (!prep_irq_for_enabled_exit(true)) {
233 user_exit_irqoff();
234 local_irq_enable();
235 local_irq_disable();
236 goto again;
237 }
238
239 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
240 local_paca->tm_scratch = regs->msr;
241 #endif
242
243 booke_load_dbcr0();
244
245 account_cpu_user_exit();
246
247 /* Restore user access locks last */
248 kuap_user_restore(regs);
249
250 return ret;
251 }
252
253 /*
254 * This should be called after a syscall returns, with r3 the return value
255 * from the syscall. If this function returns non-zero, the system call
256 * exit assembly should additionally load all GPR registers and CTR and XER
257 * from the interrupt frame.
258 *
259 * The function graph tracer can not trace the return side of this function,
260 * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
261 */
syscall_exit_prepare(unsigned long r3,struct pt_regs * regs,long scv)262 notrace unsigned long syscall_exit_prepare(unsigned long r3,
263 struct pt_regs *regs,
264 long scv)
265 {
266 unsigned long ti_flags;
267 unsigned long ret = 0;
268 bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
269
270 CT_WARN_ON(ct_state() == CONTEXT_USER);
271
272 kuap_assert_locked();
273
274 regs->result = r3;
275
276 /* Check whether the syscall is issued inside a restartable sequence */
277 rseq_syscall(regs);
278
279 ti_flags = read_thread_flags();
280
281 if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
282 if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
283 r3 = -r3;
284 regs->ccr |= 0x10000000; /* Set SO bit in CR */
285 }
286 }
287
288 if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
289 if (ti_flags & _TIF_RESTOREALL)
290 ret = _TIF_RESTOREALL;
291 else
292 regs->gpr[3] = r3;
293 clear_bits(_TIF_PERSYSCALL_MASK, ¤t_thread_info()->flags);
294 } else {
295 regs->gpr[3] = r3;
296 }
297
298 if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
299 do_syscall_trace_leave(regs);
300 ret |= _TIF_RESTOREALL;
301 }
302
303 local_irq_disable();
304 ret = interrupt_exit_user_prepare_main(ret, regs);
305
306 #ifdef CONFIG_PPC64
307 regs->exit_result = ret;
308 #endif
309
310 return ret;
311 }
312
313 #ifdef CONFIG_PPC64
syscall_exit_restart(unsigned long r3,struct pt_regs * regs)314 notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs)
315 {
316 /*
317 * This is called when detecting a soft-pending interrupt as well as
318 * an alternate-return interrupt. So we can't just have the alternate
319 * return path clear SRR1[MSR] and set PACA_IRQ_HARD_DIS (unless
320 * the soft-pending case were to fix things up as well). RI might be
321 * disabled, in which case it gets re-enabled by __hard_irq_disable().
322 */
323 __hard_irq_disable();
324 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
325
326 #ifdef CONFIG_PPC_BOOK3S_64
327 set_kuap(AMR_KUAP_BLOCKED);
328 #endif
329
330 trace_hardirqs_off();
331 user_exit_irqoff();
332 account_cpu_user_entry();
333
334 BUG_ON(!user_mode(regs));
335
336 regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs);
337
338 return regs->exit_result;
339 }
340 #endif
341
interrupt_exit_user_prepare(struct pt_regs * regs)342 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
343 {
344 unsigned long ret;
345
346 BUG_ON(regs_is_unrecoverable(regs));
347 BUG_ON(arch_irq_disabled_regs(regs));
348 CT_WARN_ON(ct_state() == CONTEXT_USER);
349
350 /*
351 * We don't need to restore AMR on the way back to userspace for KUAP.
352 * AMR can only have been unlocked if we interrupted the kernel.
353 */
354 kuap_assert_locked();
355
356 local_irq_disable();
357
358 ret = interrupt_exit_user_prepare_main(0, regs);
359
360 #ifdef CONFIG_PPC64
361 regs->exit_result = ret;
362 #endif
363
364 return ret;
365 }
366
367 void preempt_schedule_irq(void);
368
interrupt_exit_kernel_prepare(struct pt_regs * regs)369 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
370 {
371 unsigned long flags;
372 unsigned long ret = 0;
373 unsigned long kuap;
374 bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE;
375
376 if (regs_is_unrecoverable(regs))
377 unrecoverable_exception(regs);
378 /*
379 * CT_WARN_ON comes here via program_check_exception, so avoid
380 * recursion.
381 *
382 * Skip the assertion on PMIs on 64e to work around a problem caused
383 * by NMI PMIs incorrectly taking this interrupt return path, it's
384 * possible for this to hit after interrupt exit to user switches
385 * context to user. See also the comment in the performance monitor
386 * handler in exceptions-64e.S
387 */
388 if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64) &&
389 TRAP(regs) != INTERRUPT_PROGRAM &&
390 TRAP(regs) != INTERRUPT_PERFMON)
391 CT_WARN_ON(ct_state() == CONTEXT_USER);
392
393 kuap = kuap_get_and_assert_locked();
394
395 local_irq_save(flags);
396
397 if (!arch_irq_disabled_regs(regs)) {
398 /* Returning to a kernel context with local irqs enabled. */
399 WARN_ON_ONCE(!(regs->msr & MSR_EE));
400 again:
401 if (IS_ENABLED(CONFIG_PREEMPT)) {
402 /* Return to preemptible kernel context */
403 if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
404 if (preempt_count() == 0)
405 preempt_schedule_irq();
406 }
407 }
408
409 check_return_regs_valid(regs);
410
411 /*
412 * Stack store exit can't be restarted because the interrupt
413 * stack frame might have been clobbered.
414 */
415 if (!prep_irq_for_enabled_exit(unlikely(stack_store))) {
416 /*
417 * Replay pending soft-masked interrupts now. Don't
418 * just local_irq_enabe(); local_irq_disable(); because
419 * if we are returning from an asynchronous interrupt
420 * here, another one might hit after irqs are enabled,
421 * and it would exit via this same path allowing
422 * another to fire, and so on unbounded.
423 */
424 hard_irq_disable();
425 replay_soft_interrupts();
426 /* Took an interrupt, may have more exit work to do. */
427 goto again;
428 }
429 #ifdef CONFIG_PPC64
430 /*
431 * An interrupt may clear MSR[EE] and set this concurrently,
432 * but it will be marked pending and the exit will be retried.
433 * This leaves a racy window where MSR[EE]=0 and HARD_DIS is
434 * clear, until interrupt_exit_kernel_restart() calls
435 * hard_irq_disable(), which will set HARD_DIS again.
436 */
437 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
438
439 } else {
440 check_return_regs_valid(regs);
441
442 if (unlikely(stack_store))
443 __hard_EE_RI_disable();
444 #endif /* CONFIG_PPC64 */
445 }
446
447 if (unlikely(stack_store)) {
448 clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags);
449 ret = 1;
450 }
451
452 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
453 local_paca->tm_scratch = regs->msr;
454 #endif
455
456 /*
457 * 64s does not want to mfspr(SPRN_AMR) here, because this comes after
458 * mtmsr, which would cause Read-After-Write stalls. Hence, take the
459 * AMR value from the check above.
460 */
461 kuap_kernel_restore(regs, kuap);
462
463 return ret;
464 }
465
466 #ifdef CONFIG_PPC64
interrupt_exit_user_restart(struct pt_regs * regs)467 notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs)
468 {
469 __hard_irq_disable();
470 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
471
472 #ifdef CONFIG_PPC_BOOK3S_64
473 set_kuap(AMR_KUAP_BLOCKED);
474 #endif
475
476 trace_hardirqs_off();
477 user_exit_irqoff();
478 account_cpu_user_entry();
479
480 BUG_ON(!user_mode(regs));
481
482 regs->exit_result |= interrupt_exit_user_prepare(regs);
483
484 return regs->exit_result;
485 }
486
487 /*
488 * No real need to return a value here because the stack store case does not
489 * get restarted.
490 */
interrupt_exit_kernel_restart(struct pt_regs * regs)491 notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs)
492 {
493 __hard_irq_disable();
494 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
495
496 #ifdef CONFIG_PPC_BOOK3S_64
497 set_kuap(AMR_KUAP_BLOCKED);
498 #endif
499
500 if (regs->softe == IRQS_ENABLED)
501 trace_hardirqs_off();
502
503 BUG_ON(user_mode(regs));
504
505 return interrupt_exit_kernel_prepare(regs);
506 }
507 #endif
508