1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
7 *
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
10 */
11
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/extable.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <linux/kfence.h>
35 #include <asm/asm-extable.h>
36 #include <asm/asm-offsets.h>
37 #include <asm/diag.h>
38 #include <asm/gmap.h>
39 #include <asm/irq.h>
40 #include <asm/mmu_context.h>
41 #include <asm/facility.h>
42 #include <asm/uv.h>
43 #include "../kernel/entry.h"
44
45 #define __FAIL_ADDR_MASK -4096L
46 #define __SUBCODE_MASK 0x0600
47 #define __PF_RES_FIELD 0x8000000000000000ULL
48
49 /*
50 * Allocate private vm_fault_reason from top. Please make sure it won't
51 * collide with vm_fault_reason.
52 */
53 #define VM_FAULT_BADCONTEXT ((__force vm_fault_t)0x80000000)
54 #define VM_FAULT_BADMAP ((__force vm_fault_t)0x40000000)
55 #define VM_FAULT_BADACCESS ((__force vm_fault_t)0x20000000)
56 #define VM_FAULT_SIGNAL ((__force vm_fault_t)0x10000000)
57 #define VM_FAULT_PFAULT ((__force vm_fault_t)0x8000000)
58
59 enum fault_type {
60 KERNEL_FAULT,
61 USER_FAULT,
62 GMAP_FAULT,
63 };
64
65 static unsigned long store_indication __read_mostly;
66
fault_init(void)67 static int __init fault_init(void)
68 {
69 if (test_facility(75))
70 store_indication = 0xc00;
71 return 0;
72 }
73 early_initcall(fault_init);
74
75 /*
76 * Find out which address space caused the exception.
77 */
get_fault_type(struct pt_regs * regs)78 static enum fault_type get_fault_type(struct pt_regs *regs)
79 {
80 unsigned long trans_exc_code;
81
82 trans_exc_code = regs->int_parm_long & 3;
83 if (likely(trans_exc_code == 0)) {
84 /* primary space exception */
85 if (user_mode(regs))
86 return USER_FAULT;
87 if (!IS_ENABLED(CONFIG_PGSTE))
88 return KERNEL_FAULT;
89 if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
90 return GMAP_FAULT;
91 return KERNEL_FAULT;
92 }
93 if (trans_exc_code == 2)
94 return USER_FAULT;
95 if (trans_exc_code == 1) {
96 /* access register mode, not used in the kernel */
97 return USER_FAULT;
98 }
99 /* home space exception -> access via kernel ASCE */
100 return KERNEL_FAULT;
101 }
102
get_fault_address(struct pt_regs * regs)103 static unsigned long get_fault_address(struct pt_regs *regs)
104 {
105 unsigned long trans_exc_code = regs->int_parm_long;
106
107 return trans_exc_code & __FAIL_ADDR_MASK;
108 }
109
fault_is_write(struct pt_regs * regs)110 static bool fault_is_write(struct pt_regs *regs)
111 {
112 unsigned long trans_exc_code = regs->int_parm_long;
113
114 return (trans_exc_code & store_indication) == 0x400;
115 }
116
bad_address(void * p)117 static int bad_address(void *p)
118 {
119 unsigned long dummy;
120
121 return get_kernel_nofault(dummy, (unsigned long *)p);
122 }
123
dump_pagetable(unsigned long asce,unsigned long address)124 static void dump_pagetable(unsigned long asce, unsigned long address)
125 {
126 unsigned long *table = __va(asce & _ASCE_ORIGIN);
127
128 pr_alert("AS:%016lx ", asce);
129 switch (asce & _ASCE_TYPE_MASK) {
130 case _ASCE_TYPE_REGION1:
131 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
132 if (bad_address(table))
133 goto bad;
134 pr_cont("R1:%016lx ", *table);
135 if (*table & _REGION_ENTRY_INVALID)
136 goto out;
137 table = __va(*table & _REGION_ENTRY_ORIGIN);
138 fallthrough;
139 case _ASCE_TYPE_REGION2:
140 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
141 if (bad_address(table))
142 goto bad;
143 pr_cont("R2:%016lx ", *table);
144 if (*table & _REGION_ENTRY_INVALID)
145 goto out;
146 table = __va(*table & _REGION_ENTRY_ORIGIN);
147 fallthrough;
148 case _ASCE_TYPE_REGION3:
149 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
150 if (bad_address(table))
151 goto bad;
152 pr_cont("R3:%016lx ", *table);
153 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
154 goto out;
155 table = __va(*table & _REGION_ENTRY_ORIGIN);
156 fallthrough;
157 case _ASCE_TYPE_SEGMENT:
158 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
159 if (bad_address(table))
160 goto bad;
161 pr_cont("S:%016lx ", *table);
162 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
163 goto out;
164 table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
165 }
166 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
167 if (bad_address(table))
168 goto bad;
169 pr_cont("P:%016lx ", *table);
170 out:
171 pr_cont("\n");
172 return;
173 bad:
174 pr_cont("BAD\n");
175 }
176
dump_fault_info(struct pt_regs * regs)177 static void dump_fault_info(struct pt_regs *regs)
178 {
179 unsigned long asce;
180
181 pr_alert("Failing address: %016lx TEID: %016lx\n",
182 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
183 pr_alert("Fault in ");
184 switch (regs->int_parm_long & 3) {
185 case 3:
186 pr_cont("home space ");
187 break;
188 case 2:
189 pr_cont("secondary space ");
190 break;
191 case 1:
192 pr_cont("access register ");
193 break;
194 case 0:
195 pr_cont("primary space ");
196 break;
197 }
198 pr_cont("mode while using ");
199 switch (get_fault_type(regs)) {
200 case USER_FAULT:
201 asce = S390_lowcore.user_asce;
202 pr_cont("user ");
203 break;
204 case GMAP_FAULT:
205 asce = ((struct gmap *) S390_lowcore.gmap)->asce;
206 pr_cont("gmap ");
207 break;
208 case KERNEL_FAULT:
209 asce = S390_lowcore.kernel_asce;
210 pr_cont("kernel ");
211 break;
212 default:
213 unreachable();
214 }
215 pr_cont("ASCE.\n");
216 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
217 }
218
219 int show_unhandled_signals = 1;
220
report_user_fault(struct pt_regs * regs,long signr,int is_mm_fault)221 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
222 {
223 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
224 return;
225 if (!unhandled_signal(current, signr))
226 return;
227 if (!printk_ratelimit())
228 return;
229 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
230 regs->int_code & 0xffff, regs->int_code >> 17);
231 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
232 printk(KERN_CONT "\n");
233 if (is_mm_fault)
234 dump_fault_info(regs);
235 show_regs(regs);
236 }
237
238 /*
239 * Send SIGSEGV to task. This is an external routine
240 * to keep the stack usage of do_page_fault small.
241 */
do_sigsegv(struct pt_regs * regs,int si_code)242 static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
243 {
244 report_user_fault(regs, SIGSEGV, 1);
245 force_sig_fault(SIGSEGV, si_code,
246 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
247 }
248
do_no_context(struct pt_regs * regs,vm_fault_t fault)249 static noinline void do_no_context(struct pt_regs *regs, vm_fault_t fault)
250 {
251 enum fault_type fault_type;
252 unsigned long address;
253 bool is_write;
254
255 if (fixup_exception(regs))
256 return;
257 fault_type = get_fault_type(regs);
258 if ((fault_type == KERNEL_FAULT) && (fault == VM_FAULT_BADCONTEXT)) {
259 address = get_fault_address(regs);
260 is_write = fault_is_write(regs);
261 if (kfence_handle_page_fault(address, is_write, regs))
262 return;
263 }
264 /*
265 * Oops. The kernel tried to access some bad page. We'll have to
266 * terminate things with extreme prejudice.
267 */
268 if (fault_type == KERNEL_FAULT)
269 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
270 " in virtual kernel address space\n");
271 else
272 printk(KERN_ALERT "Unable to handle kernel paging request"
273 " in virtual user address space\n");
274 dump_fault_info(regs);
275 die(regs, "Oops");
276 }
277
do_low_address(struct pt_regs * regs)278 static noinline void do_low_address(struct pt_regs *regs)
279 {
280 /* Low-address protection hit in kernel mode means
281 NULL pointer write access in kernel mode. */
282 if (regs->psw.mask & PSW_MASK_PSTATE) {
283 /* Low-address protection hit in user mode 'cannot happen'. */
284 die (regs, "Low-address protection");
285 }
286
287 do_no_context(regs, VM_FAULT_BADACCESS);
288 }
289
do_sigbus(struct pt_regs * regs)290 static noinline void do_sigbus(struct pt_regs *regs)
291 {
292 /*
293 * Send a sigbus, regardless of whether we were in kernel
294 * or user mode.
295 */
296 force_sig_fault(SIGBUS, BUS_ADRERR,
297 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
298 }
299
do_fault_error(struct pt_regs * regs,vm_fault_t fault)300 static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
301 {
302 int si_code;
303
304 switch (fault) {
305 case VM_FAULT_BADACCESS:
306 case VM_FAULT_BADMAP:
307 /* Bad memory access. Check if it is kernel or user space. */
308 if (user_mode(regs)) {
309 /* User mode accesses just cause a SIGSEGV */
310 si_code = (fault == VM_FAULT_BADMAP) ?
311 SEGV_MAPERR : SEGV_ACCERR;
312 do_sigsegv(regs, si_code);
313 break;
314 }
315 fallthrough;
316 case VM_FAULT_BADCONTEXT:
317 case VM_FAULT_PFAULT:
318 do_no_context(regs, fault);
319 break;
320 case VM_FAULT_SIGNAL:
321 if (!user_mode(regs))
322 do_no_context(regs, fault);
323 break;
324 default: /* fault & VM_FAULT_ERROR */
325 if (fault & VM_FAULT_OOM) {
326 if (!user_mode(regs))
327 do_no_context(regs, fault);
328 else
329 pagefault_out_of_memory();
330 } else if (fault & VM_FAULT_SIGSEGV) {
331 /* Kernel mode? Handle exceptions or die */
332 if (!user_mode(regs))
333 do_no_context(regs, fault);
334 else
335 do_sigsegv(regs, SEGV_MAPERR);
336 } else if (fault & VM_FAULT_SIGBUS) {
337 /* Kernel mode? Handle exceptions or die */
338 if (!user_mode(regs))
339 do_no_context(regs, fault);
340 else
341 do_sigbus(regs);
342 } else
343 BUG();
344 break;
345 }
346 }
347
348 /*
349 * This routine handles page faults. It determines the address,
350 * and the problem, and then passes it off to one of the appropriate
351 * routines.
352 *
353 * interruption code (int_code):
354 * 04 Protection -> Write-Protection (suppression)
355 * 10 Segment translation -> Not present (nullification)
356 * 11 Page translation -> Not present (nullification)
357 * 3b Region third trans. -> Not present (nullification)
358 */
do_exception(struct pt_regs * regs,int access)359 static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
360 {
361 struct gmap *gmap;
362 struct task_struct *tsk;
363 struct mm_struct *mm;
364 struct vm_area_struct *vma;
365 enum fault_type type;
366 unsigned long address;
367 unsigned int flags;
368 vm_fault_t fault;
369 bool is_write;
370
371 tsk = current;
372 /*
373 * The instruction that caused the program check has
374 * been nullified. Don't signal single step via SIGTRAP.
375 */
376 clear_thread_flag(TIF_PER_TRAP);
377
378 if (kprobe_page_fault(regs, 14))
379 return 0;
380
381 mm = tsk->mm;
382 address = get_fault_address(regs);
383 is_write = fault_is_write(regs);
384
385 /*
386 * Verify that the fault happened in user space, that
387 * we are not in an interrupt and that there is a
388 * user context.
389 */
390 fault = VM_FAULT_BADCONTEXT;
391 type = get_fault_type(regs);
392 switch (type) {
393 case KERNEL_FAULT:
394 goto out;
395 case USER_FAULT:
396 case GMAP_FAULT:
397 if (faulthandler_disabled() || !mm)
398 goto out;
399 break;
400 }
401
402 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
403 flags = FAULT_FLAG_DEFAULT;
404 if (user_mode(regs))
405 flags |= FAULT_FLAG_USER;
406 if (is_write)
407 access = VM_WRITE;
408 if (access == VM_WRITE)
409 flags |= FAULT_FLAG_WRITE;
410 mmap_read_lock(mm);
411
412 gmap = NULL;
413 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
414 gmap = (struct gmap *) S390_lowcore.gmap;
415 current->thread.gmap_addr = address;
416 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
417 current->thread.gmap_int_code = regs->int_code & 0xffff;
418 address = __gmap_translate(gmap, address);
419 if (address == -EFAULT) {
420 fault = VM_FAULT_BADMAP;
421 goto out_up;
422 }
423 if (gmap->pfault_enabled)
424 flags |= FAULT_FLAG_RETRY_NOWAIT;
425 }
426
427 retry:
428 fault = VM_FAULT_BADMAP;
429 vma = find_vma(mm, address);
430 if (!vma)
431 goto out_up;
432
433 if (unlikely(vma->vm_start > address)) {
434 if (!(vma->vm_flags & VM_GROWSDOWN))
435 goto out_up;
436 if (expand_stack(vma, address))
437 goto out_up;
438 }
439
440 /*
441 * Ok, we have a good vm_area for this memory access, so
442 * we can handle it..
443 */
444 fault = VM_FAULT_BADACCESS;
445 if (unlikely(!(vma->vm_flags & access)))
446 goto out_up;
447
448 /*
449 * If for any reason at all we couldn't handle the fault,
450 * make sure we exit gracefully rather than endlessly redo
451 * the fault.
452 */
453 fault = handle_mm_fault(vma, address, flags, regs);
454 if (fault_signal_pending(fault, regs)) {
455 fault = VM_FAULT_SIGNAL;
456 if (flags & FAULT_FLAG_RETRY_NOWAIT)
457 goto out_up;
458 goto out;
459 }
460
461 /* The fault is fully completed (including releasing mmap lock) */
462 if (fault & VM_FAULT_COMPLETED) {
463 if (gmap) {
464 mmap_read_lock(mm);
465 goto out_gmap;
466 }
467 fault = 0;
468 goto out;
469 }
470
471 if (unlikely(fault & VM_FAULT_ERROR))
472 goto out_up;
473
474 if (fault & VM_FAULT_RETRY) {
475 if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
476 (flags & FAULT_FLAG_RETRY_NOWAIT)) {
477 /*
478 * FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has
479 * not been released
480 */
481 current->thread.gmap_pfault = 1;
482 fault = VM_FAULT_PFAULT;
483 goto out_up;
484 }
485 flags &= ~FAULT_FLAG_RETRY_NOWAIT;
486 flags |= FAULT_FLAG_TRIED;
487 mmap_read_lock(mm);
488 goto retry;
489 }
490 out_gmap:
491 if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
492 address = __gmap_link(gmap, current->thread.gmap_addr,
493 address);
494 if (address == -EFAULT) {
495 fault = VM_FAULT_BADMAP;
496 goto out_up;
497 }
498 if (address == -ENOMEM) {
499 fault = VM_FAULT_OOM;
500 goto out_up;
501 }
502 }
503 fault = 0;
504 out_up:
505 mmap_read_unlock(mm);
506 out:
507 return fault;
508 }
509
do_protection_exception(struct pt_regs * regs)510 void do_protection_exception(struct pt_regs *regs)
511 {
512 unsigned long trans_exc_code;
513 int access;
514 vm_fault_t fault;
515
516 trans_exc_code = regs->int_parm_long;
517 /*
518 * Protection exceptions are suppressing, decrement psw address.
519 * The exception to this rule are aborted transactions, for these
520 * the PSW already points to the correct location.
521 */
522 if (!(regs->int_code & 0x200))
523 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
524 /*
525 * Check for low-address protection. This needs to be treated
526 * as a special case because the translation exception code
527 * field is not guaranteed to contain valid data in this case.
528 */
529 if (unlikely(!(trans_exc_code & 4))) {
530 do_low_address(regs);
531 return;
532 }
533 if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
534 regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
535 (regs->psw.addr & PAGE_MASK);
536 access = VM_EXEC;
537 fault = VM_FAULT_BADACCESS;
538 } else {
539 access = VM_WRITE;
540 fault = do_exception(regs, access);
541 }
542 if (unlikely(fault))
543 do_fault_error(regs, fault);
544 }
545 NOKPROBE_SYMBOL(do_protection_exception);
546
do_dat_exception(struct pt_regs * regs)547 void do_dat_exception(struct pt_regs *regs)
548 {
549 int access;
550 vm_fault_t fault;
551
552 access = VM_ACCESS_FLAGS;
553 fault = do_exception(regs, access);
554 if (unlikely(fault))
555 do_fault_error(regs, fault);
556 }
557 NOKPROBE_SYMBOL(do_dat_exception);
558
559 #ifdef CONFIG_PFAULT
560 /*
561 * 'pfault' pseudo page faults routines.
562 */
563 static int pfault_disable;
564
nopfault(char * str)565 static int __init nopfault(char *str)
566 {
567 pfault_disable = 1;
568 return 1;
569 }
570
571 __setup("nopfault", nopfault);
572
573 struct pfault_refbk {
574 u16 refdiagc;
575 u16 reffcode;
576 u16 refdwlen;
577 u16 refversn;
578 u64 refgaddr;
579 u64 refselmk;
580 u64 refcmpmk;
581 u64 reserved;
582 } __attribute__ ((packed, aligned(8)));
583
584 static struct pfault_refbk pfault_init_refbk = {
585 .refdiagc = 0x258,
586 .reffcode = 0,
587 .refdwlen = 5,
588 .refversn = 2,
589 .refgaddr = __LC_LPP,
590 .refselmk = 1ULL << 48,
591 .refcmpmk = 1ULL << 48,
592 .reserved = __PF_RES_FIELD
593 };
594
pfault_init(void)595 int pfault_init(void)
596 {
597 int rc;
598
599 if (pfault_disable)
600 return -1;
601 diag_stat_inc(DIAG_STAT_X258);
602 asm volatile(
603 " diag %1,%0,0x258\n"
604 "0: j 2f\n"
605 "1: la %0,8\n"
606 "2:\n"
607 EX_TABLE(0b,1b)
608 : "=d" (rc)
609 : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
610 return rc;
611 }
612
613 static struct pfault_refbk pfault_fini_refbk = {
614 .refdiagc = 0x258,
615 .reffcode = 1,
616 .refdwlen = 5,
617 .refversn = 2,
618 };
619
pfault_fini(void)620 void pfault_fini(void)
621 {
622
623 if (pfault_disable)
624 return;
625 diag_stat_inc(DIAG_STAT_X258);
626 asm volatile(
627 " diag %0,0,0x258\n"
628 "0: nopr %%r7\n"
629 EX_TABLE(0b,0b)
630 : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
631 }
632
633 static DEFINE_SPINLOCK(pfault_lock);
634 static LIST_HEAD(pfault_list);
635
636 #define PF_COMPLETE 0x0080
637
638 /*
639 * The mechanism of our pfault code: if Linux is running as guest, runs a user
640 * space process and the user space process accesses a page that the host has
641 * paged out we get a pfault interrupt.
642 *
643 * This allows us, within the guest, to schedule a different process. Without
644 * this mechanism the host would have to suspend the whole virtual cpu until
645 * the page has been paged in.
646 *
647 * So when we get such an interrupt then we set the state of the current task
648 * to uninterruptible and also set the need_resched flag. Both happens within
649 * interrupt context(!). If we later on want to return to user space we
650 * recognize the need_resched flag and then call schedule(). It's not very
651 * obvious how this works...
652 *
653 * Of course we have a lot of additional fun with the completion interrupt (->
654 * host signals that a page of a process has been paged in and the process can
655 * continue to run). This interrupt can arrive on any cpu and, since we have
656 * virtual cpus, actually appear before the interrupt that signals that a page
657 * is missing.
658 */
pfault_interrupt(struct ext_code ext_code,unsigned int param32,unsigned long param64)659 static void pfault_interrupt(struct ext_code ext_code,
660 unsigned int param32, unsigned long param64)
661 {
662 struct task_struct *tsk;
663 __u16 subcode;
664 pid_t pid;
665
666 /*
667 * Get the external interruption subcode & pfault initial/completion
668 * signal bit. VM stores this in the 'cpu address' field associated
669 * with the external interrupt.
670 */
671 subcode = ext_code.subcode;
672 if ((subcode & 0xff00) != __SUBCODE_MASK)
673 return;
674 inc_irq_stat(IRQEXT_PFL);
675 /* Get the token (= pid of the affected task). */
676 pid = param64 & LPP_PID_MASK;
677 rcu_read_lock();
678 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
679 if (tsk)
680 get_task_struct(tsk);
681 rcu_read_unlock();
682 if (!tsk)
683 return;
684 spin_lock(&pfault_lock);
685 if (subcode & PF_COMPLETE) {
686 /* signal bit is set -> a page has been swapped in by VM */
687 if (tsk->thread.pfault_wait == 1) {
688 /* Initial interrupt was faster than the completion
689 * interrupt. pfault_wait is valid. Set pfault_wait
690 * back to zero and wake up the process. This can
691 * safely be done because the task is still sleeping
692 * and can't produce new pfaults. */
693 tsk->thread.pfault_wait = 0;
694 list_del(&tsk->thread.list);
695 wake_up_process(tsk);
696 put_task_struct(tsk);
697 } else {
698 /* Completion interrupt was faster than initial
699 * interrupt. Set pfault_wait to -1 so the initial
700 * interrupt doesn't put the task to sleep.
701 * If the task is not running, ignore the completion
702 * interrupt since it must be a leftover of a PFAULT
703 * CANCEL operation which didn't remove all pending
704 * completion interrupts. */
705 if (task_is_running(tsk))
706 tsk->thread.pfault_wait = -1;
707 }
708 } else {
709 /* signal bit not set -> a real page is missing. */
710 if (WARN_ON_ONCE(tsk != current))
711 goto out;
712 if (tsk->thread.pfault_wait == 1) {
713 /* Already on the list with a reference: put to sleep */
714 goto block;
715 } else if (tsk->thread.pfault_wait == -1) {
716 /* Completion interrupt was faster than the initial
717 * interrupt (pfault_wait == -1). Set pfault_wait
718 * back to zero and exit. */
719 tsk->thread.pfault_wait = 0;
720 } else {
721 /* Initial interrupt arrived before completion
722 * interrupt. Let the task sleep.
723 * An extra task reference is needed since a different
724 * cpu may set the task state to TASK_RUNNING again
725 * before the scheduler is reached. */
726 get_task_struct(tsk);
727 tsk->thread.pfault_wait = 1;
728 list_add(&tsk->thread.list, &pfault_list);
729 block:
730 /* Since this must be a userspace fault, there
731 * is no kernel task state to trample. Rely on the
732 * return to userspace schedule() to block. */
733 __set_current_state(TASK_UNINTERRUPTIBLE);
734 set_tsk_need_resched(tsk);
735 set_preempt_need_resched();
736 }
737 }
738 out:
739 spin_unlock(&pfault_lock);
740 put_task_struct(tsk);
741 }
742
pfault_cpu_dead(unsigned int cpu)743 static int pfault_cpu_dead(unsigned int cpu)
744 {
745 struct thread_struct *thread, *next;
746 struct task_struct *tsk;
747
748 spin_lock_irq(&pfault_lock);
749 list_for_each_entry_safe(thread, next, &pfault_list, list) {
750 thread->pfault_wait = 0;
751 list_del(&thread->list);
752 tsk = container_of(thread, struct task_struct, thread);
753 wake_up_process(tsk);
754 put_task_struct(tsk);
755 }
756 spin_unlock_irq(&pfault_lock);
757 return 0;
758 }
759
pfault_irq_init(void)760 static int __init pfault_irq_init(void)
761 {
762 int rc;
763
764 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
765 if (rc)
766 goto out_extint;
767 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
768 if (rc)
769 goto out_pfault;
770 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
771 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
772 NULL, pfault_cpu_dead);
773 return 0;
774
775 out_pfault:
776 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
777 out_extint:
778 pfault_disable = 1;
779 return rc;
780 }
781 early_initcall(pfault_irq_init);
782
783 #endif /* CONFIG_PFAULT */
784
785 #if IS_ENABLED(CONFIG_PGSTE)
786
do_secure_storage_access(struct pt_regs * regs)787 void do_secure_storage_access(struct pt_regs *regs)
788 {
789 unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
790 struct vm_area_struct *vma;
791 struct mm_struct *mm;
792 struct page *page;
793 struct gmap *gmap;
794 int rc;
795
796 /*
797 * bit 61 tells us if the address is valid, if it's not we
798 * have a major problem and should stop the kernel or send a
799 * SIGSEGV to the process. Unfortunately bit 61 is not
800 * reliable without the misc UV feature so we need to check
801 * for that as well.
802 */
803 if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) &&
804 !test_bit_inv(61, ®s->int_parm_long)) {
805 /*
806 * When this happens, userspace did something that it
807 * was not supposed to do, e.g. branching into secure
808 * memory. Trigger a segmentation fault.
809 */
810 if (user_mode(regs)) {
811 send_sig(SIGSEGV, current, 0);
812 return;
813 }
814
815 /*
816 * The kernel should never run into this case and we
817 * have no way out of this situation.
818 */
819 panic("Unexpected PGM 0x3d with TEID bit 61=0");
820 }
821
822 switch (get_fault_type(regs)) {
823 case GMAP_FAULT:
824 mm = current->mm;
825 gmap = (struct gmap *)S390_lowcore.gmap;
826 mmap_read_lock(mm);
827 addr = __gmap_translate(gmap, addr);
828 mmap_read_unlock(mm);
829 if (IS_ERR_VALUE(addr)) {
830 do_fault_error(regs, VM_FAULT_BADMAP);
831 break;
832 }
833 fallthrough;
834 case USER_FAULT:
835 mm = current->mm;
836 mmap_read_lock(mm);
837 vma = find_vma(mm, addr);
838 if (!vma) {
839 mmap_read_unlock(mm);
840 do_fault_error(regs, VM_FAULT_BADMAP);
841 break;
842 }
843 page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
844 if (IS_ERR_OR_NULL(page)) {
845 mmap_read_unlock(mm);
846 break;
847 }
848 if (arch_make_page_accessible(page))
849 send_sig(SIGSEGV, current, 0);
850 put_page(page);
851 mmap_read_unlock(mm);
852 break;
853 case KERNEL_FAULT:
854 page = phys_to_page(addr);
855 if (unlikely(!try_get_page(page)))
856 break;
857 rc = arch_make_page_accessible(page);
858 put_page(page);
859 if (rc)
860 BUG();
861 break;
862 default:
863 do_fault_error(regs, VM_FAULT_BADMAP);
864 WARN_ON_ONCE(1);
865 }
866 }
867 NOKPROBE_SYMBOL(do_secure_storage_access);
868
do_non_secure_storage_access(struct pt_regs * regs)869 void do_non_secure_storage_access(struct pt_regs *regs)
870 {
871 unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
872 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
873
874 if (get_fault_type(regs) != GMAP_FAULT) {
875 do_fault_error(regs, VM_FAULT_BADMAP);
876 WARN_ON_ONCE(1);
877 return;
878 }
879
880 if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
881 send_sig(SIGSEGV, current, 0);
882 }
883 NOKPROBE_SYMBOL(do_non_secure_storage_access);
884
do_secure_storage_violation(struct pt_regs * regs)885 void do_secure_storage_violation(struct pt_regs *regs)
886 {
887 unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
888 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
889
890 /*
891 * If the VM has been rebooted, its address space might still contain
892 * secure pages from the previous boot.
893 * Clear the page so it can be reused.
894 */
895 if (!gmap_destroy_page(gmap, gaddr))
896 return;
897 /*
898 * Either KVM messed up the secure guest mapping or the same
899 * page is mapped into multiple secure guests.
900 *
901 * This exception is only triggered when a guest 2 is running
902 * and can therefore never occur in kernel context.
903 */
904 printk_ratelimited(KERN_WARNING
905 "Secure storage violation in task: %s, pid %d\n",
906 current->comm, current->pid);
907 send_sig(SIGSEGV, current, 0);
908 }
909
910 #endif /* CONFIG_PGSTE */
911