1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kexec.c - kexec system call core code.
4 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/btf.h>
10 #include <linux/capability.h>
11 #include <linux/mm.h>
12 #include <linux/file.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 #include <linux/kexec.h>
16 #include <linux/mutex.h>
17 #include <linux/list.h>
18 #include <linux/highmem.h>
19 #include <linux/syscalls.h>
20 #include <linux/reboot.h>
21 #include <linux/ioport.h>
22 #include <linux/hardirq.h>
23 #include <linux/elf.h>
24 #include <linux/elfcore.h>
25 #include <linux/utsname.h>
26 #include <linux/numa.h>
27 #include <linux/suspend.h>
28 #include <linux/device.h>
29 #include <linux/freezer.h>
30 #include <linux/panic_notifier.h>
31 #include <linux/pm.h>
32 #include <linux/cpu.h>
33 #include <linux/uaccess.h>
34 #include <linux/io.h>
35 #include <linux/console.h>
36 #include <linux/vmalloc.h>
37 #include <linux/swap.h>
38 #include <linux/syscore_ops.h>
39 #include <linux/compiler.h>
40 #include <linux/hugetlb.h>
41 #include <linux/objtool.h>
42 #include <linux/kmsg_dump.h>
43
44 #include <asm/page.h>
45 #include <asm/sections.h>
46
47 #include <crypto/hash.h>
48 #include "kexec_internal.h"
49
50 atomic_t __kexec_lock = ATOMIC_INIT(0);
51
52 /* Per cpu memory for storing cpu states in case of system crash. */
53 note_buf_t __percpu *crash_notes;
54
55 /* Flag to indicate we are going to kexec a new kernel */
56 bool kexec_in_progress = false;
57
58
59 /* Location of the reserved area for the crash kernel */
60 struct resource crashk_res = {
61 .name = "Crash kernel",
62 .start = 0,
63 .end = 0,
64 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
65 .desc = IORES_DESC_CRASH_KERNEL
66 };
67 struct resource crashk_low_res = {
68 .name = "Crash kernel",
69 .start = 0,
70 .end = 0,
71 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
72 .desc = IORES_DESC_CRASH_KERNEL
73 };
74
kexec_should_crash(struct task_struct * p)75 int kexec_should_crash(struct task_struct *p)
76 {
77 /*
78 * If crash_kexec_post_notifiers is enabled, don't run
79 * crash_kexec() here yet, which must be run after panic
80 * notifiers in panic().
81 */
82 if (crash_kexec_post_notifiers)
83 return 0;
84 /*
85 * There are 4 panic() calls in make_task_dead() path, each of which
86 * corresponds to each of these 4 conditions.
87 */
88 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
89 return 1;
90 return 0;
91 }
92
kexec_crash_loaded(void)93 int kexec_crash_loaded(void)
94 {
95 return !!kexec_crash_image;
96 }
97 EXPORT_SYMBOL_GPL(kexec_crash_loaded);
98
99 /*
100 * When kexec transitions to the new kernel there is a one-to-one
101 * mapping between physical and virtual addresses. On processors
102 * where you can disable the MMU this is trivial, and easy. For
103 * others it is still a simple predictable page table to setup.
104 *
105 * In that environment kexec copies the new kernel to its final
106 * resting place. This means I can only support memory whose
107 * physical address can fit in an unsigned long. In particular
108 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
109 * If the assembly stub has more restrictive requirements
110 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
111 * defined more restrictively in <asm/kexec.h>.
112 *
113 * The code for the transition from the current kernel to the
114 * new kernel is placed in the control_code_buffer, whose size
115 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
116 * page of memory is necessary, but some architectures require more.
117 * Because this memory must be identity mapped in the transition from
118 * virtual to physical addresses it must live in the range
119 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
120 * modifiable.
121 *
122 * The assembly stub in the control code buffer is passed a linked list
123 * of descriptor pages detailing the source pages of the new kernel,
124 * and the destination addresses of those source pages. As this data
125 * structure is not used in the context of the current OS, it must
126 * be self-contained.
127 *
128 * The code has been made to work with highmem pages and will use a
129 * destination page in its final resting place (if it happens
130 * to allocate it). The end product of this is that most of the
131 * physical address space, and most of RAM can be used.
132 *
133 * Future directions include:
134 * - allocating a page table with the control code buffer identity
135 * mapped, to simplify machine_kexec and make kexec_on_panic more
136 * reliable.
137 */
138
139 /*
140 * KIMAGE_NO_DEST is an impossible destination address..., for
141 * allocating pages whose destination address we do not care about.
142 */
143 #define KIMAGE_NO_DEST (-1UL)
144 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
145
146 static struct page *kimage_alloc_page(struct kimage *image,
147 gfp_t gfp_mask,
148 unsigned long dest);
149
sanity_check_segment_list(struct kimage * image)150 int sanity_check_segment_list(struct kimage *image)
151 {
152 int i;
153 unsigned long nr_segments = image->nr_segments;
154 unsigned long total_pages = 0;
155 unsigned long nr_pages = totalram_pages();
156
157 /*
158 * Verify we have good destination addresses. The caller is
159 * responsible for making certain we don't attempt to load
160 * the new image into invalid or reserved areas of RAM. This
161 * just verifies it is an address we can use.
162 *
163 * Since the kernel does everything in page size chunks ensure
164 * the destination addresses are page aligned. Too many
165 * special cases crop of when we don't do this. The most
166 * insidious is getting overlapping destination addresses
167 * simply because addresses are changed to page size
168 * granularity.
169 */
170 for (i = 0; i < nr_segments; i++) {
171 unsigned long mstart, mend;
172
173 mstart = image->segment[i].mem;
174 mend = mstart + image->segment[i].memsz;
175 if (mstart > mend)
176 return -EADDRNOTAVAIL;
177 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
178 return -EADDRNOTAVAIL;
179 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
180 return -EADDRNOTAVAIL;
181 }
182
183 /* Verify our destination addresses do not overlap.
184 * If we alloed overlapping destination addresses
185 * through very weird things can happen with no
186 * easy explanation as one segment stops on another.
187 */
188 for (i = 0; i < nr_segments; i++) {
189 unsigned long mstart, mend;
190 unsigned long j;
191
192 mstart = image->segment[i].mem;
193 mend = mstart + image->segment[i].memsz;
194 for (j = 0; j < i; j++) {
195 unsigned long pstart, pend;
196
197 pstart = image->segment[j].mem;
198 pend = pstart + image->segment[j].memsz;
199 /* Do the segments overlap ? */
200 if ((mend > pstart) && (mstart < pend))
201 return -EINVAL;
202 }
203 }
204
205 /* Ensure our buffer sizes are strictly less than
206 * our memory sizes. This should always be the case,
207 * and it is easier to check up front than to be surprised
208 * later on.
209 */
210 for (i = 0; i < nr_segments; i++) {
211 if (image->segment[i].bufsz > image->segment[i].memsz)
212 return -EINVAL;
213 }
214
215 /*
216 * Verify that no more than half of memory will be consumed. If the
217 * request from userspace is too large, a large amount of time will be
218 * wasted allocating pages, which can cause a soft lockup.
219 */
220 for (i = 0; i < nr_segments; i++) {
221 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
222 return -EINVAL;
223
224 total_pages += PAGE_COUNT(image->segment[i].memsz);
225 }
226
227 if (total_pages > nr_pages / 2)
228 return -EINVAL;
229
230 /*
231 * Verify we have good destination addresses. Normally
232 * the caller is responsible for making certain we don't
233 * attempt to load the new image into invalid or reserved
234 * areas of RAM. But crash kernels are preloaded into a
235 * reserved area of ram. We must ensure the addresses
236 * are in the reserved area otherwise preloading the
237 * kernel could corrupt things.
238 */
239
240 if (image->type == KEXEC_TYPE_CRASH) {
241 for (i = 0; i < nr_segments; i++) {
242 unsigned long mstart, mend;
243
244 mstart = image->segment[i].mem;
245 mend = mstart + image->segment[i].memsz - 1;
246 /* Ensure we are within the crash kernel limits */
247 if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
248 (mend > phys_to_boot_phys(crashk_res.end)))
249 return -EADDRNOTAVAIL;
250 }
251 }
252
253 return 0;
254 }
255
do_kimage_alloc_init(void)256 struct kimage *do_kimage_alloc_init(void)
257 {
258 struct kimage *image;
259
260 /* Allocate a controlling structure */
261 image = kzalloc(sizeof(*image), GFP_KERNEL);
262 if (!image)
263 return NULL;
264
265 image->head = 0;
266 image->entry = &image->head;
267 image->last_entry = &image->head;
268 image->control_page = ~0; /* By default this does not apply */
269 image->type = KEXEC_TYPE_DEFAULT;
270
271 /* Initialize the list of control pages */
272 INIT_LIST_HEAD(&image->control_pages);
273
274 /* Initialize the list of destination pages */
275 INIT_LIST_HEAD(&image->dest_pages);
276
277 /* Initialize the list of unusable pages */
278 INIT_LIST_HEAD(&image->unusable_pages);
279
280 return image;
281 }
282
kimage_is_destination_range(struct kimage * image,unsigned long start,unsigned long end)283 int kimage_is_destination_range(struct kimage *image,
284 unsigned long start,
285 unsigned long end)
286 {
287 unsigned long i;
288
289 for (i = 0; i < image->nr_segments; i++) {
290 unsigned long mstart, mend;
291
292 mstart = image->segment[i].mem;
293 mend = mstart + image->segment[i].memsz;
294 if ((end > mstart) && (start < mend))
295 return 1;
296 }
297
298 return 0;
299 }
300
kimage_alloc_pages(gfp_t gfp_mask,unsigned int order)301 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
302 {
303 struct page *pages;
304
305 if (fatal_signal_pending(current))
306 return NULL;
307 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
308 if (pages) {
309 unsigned int count, i;
310
311 pages->mapping = NULL;
312 set_page_private(pages, order);
313 count = 1 << order;
314 for (i = 0; i < count; i++)
315 SetPageReserved(pages + i);
316
317 arch_kexec_post_alloc_pages(page_address(pages), count,
318 gfp_mask);
319
320 if (gfp_mask & __GFP_ZERO)
321 for (i = 0; i < count; i++)
322 clear_highpage(pages + i);
323 }
324
325 return pages;
326 }
327
kimage_free_pages(struct page * page)328 static void kimage_free_pages(struct page *page)
329 {
330 unsigned int order, count, i;
331
332 order = page_private(page);
333 count = 1 << order;
334
335 arch_kexec_pre_free_pages(page_address(page), count);
336
337 for (i = 0; i < count; i++)
338 ClearPageReserved(page + i);
339 __free_pages(page, order);
340 }
341
kimage_free_page_list(struct list_head * list)342 void kimage_free_page_list(struct list_head *list)
343 {
344 struct page *page, *next;
345
346 list_for_each_entry_safe(page, next, list, lru) {
347 list_del(&page->lru);
348 kimage_free_pages(page);
349 }
350 }
351
kimage_alloc_normal_control_pages(struct kimage * image,unsigned int order)352 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
353 unsigned int order)
354 {
355 /* Control pages are special, they are the intermediaries
356 * that are needed while we copy the rest of the pages
357 * to their final resting place. As such they must
358 * not conflict with either the destination addresses
359 * or memory the kernel is already using.
360 *
361 * The only case where we really need more than one of
362 * these are for architectures where we cannot disable
363 * the MMU and must instead generate an identity mapped
364 * page table for all of the memory.
365 *
366 * At worst this runs in O(N) of the image size.
367 */
368 struct list_head extra_pages;
369 struct page *pages;
370 unsigned int count;
371
372 count = 1 << order;
373 INIT_LIST_HEAD(&extra_pages);
374
375 /* Loop while I can allocate a page and the page allocated
376 * is a destination page.
377 */
378 do {
379 unsigned long pfn, epfn, addr, eaddr;
380
381 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
382 if (!pages)
383 break;
384 pfn = page_to_boot_pfn(pages);
385 epfn = pfn + count;
386 addr = pfn << PAGE_SHIFT;
387 eaddr = epfn << PAGE_SHIFT;
388 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
389 kimage_is_destination_range(image, addr, eaddr)) {
390 list_add(&pages->lru, &extra_pages);
391 pages = NULL;
392 }
393 } while (!pages);
394
395 if (pages) {
396 /* Remember the allocated page... */
397 list_add(&pages->lru, &image->control_pages);
398
399 /* Because the page is already in it's destination
400 * location we will never allocate another page at
401 * that address. Therefore kimage_alloc_pages
402 * will not return it (again) and we don't need
403 * to give it an entry in image->segment[].
404 */
405 }
406 /* Deal with the destination pages I have inadvertently allocated.
407 *
408 * Ideally I would convert multi-page allocations into single
409 * page allocations, and add everything to image->dest_pages.
410 *
411 * For now it is simpler to just free the pages.
412 */
413 kimage_free_page_list(&extra_pages);
414
415 return pages;
416 }
417
kimage_alloc_crash_control_pages(struct kimage * image,unsigned int order)418 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
419 unsigned int order)
420 {
421 /* Control pages are special, they are the intermediaries
422 * that are needed while we copy the rest of the pages
423 * to their final resting place. As such they must
424 * not conflict with either the destination addresses
425 * or memory the kernel is already using.
426 *
427 * Control pages are also the only pags we must allocate
428 * when loading a crash kernel. All of the other pages
429 * are specified by the segments and we just memcpy
430 * into them directly.
431 *
432 * The only case where we really need more than one of
433 * these are for architectures where we cannot disable
434 * the MMU and must instead generate an identity mapped
435 * page table for all of the memory.
436 *
437 * Given the low demand this implements a very simple
438 * allocator that finds the first hole of the appropriate
439 * size in the reserved memory region, and allocates all
440 * of the memory up to and including the hole.
441 */
442 unsigned long hole_start, hole_end, size;
443 struct page *pages;
444
445 pages = NULL;
446 size = (1 << order) << PAGE_SHIFT;
447 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
448 hole_end = hole_start + size - 1;
449 while (hole_end <= crashk_res.end) {
450 unsigned long i;
451
452 cond_resched();
453
454 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
455 break;
456 /* See if I overlap any of the segments */
457 for (i = 0; i < image->nr_segments; i++) {
458 unsigned long mstart, mend;
459
460 mstart = image->segment[i].mem;
461 mend = mstart + image->segment[i].memsz - 1;
462 if ((hole_end >= mstart) && (hole_start <= mend)) {
463 /* Advance the hole to the end of the segment */
464 hole_start = (mend + (size - 1)) & ~(size - 1);
465 hole_end = hole_start + size - 1;
466 break;
467 }
468 }
469 /* If I don't overlap any segments I have found my hole! */
470 if (i == image->nr_segments) {
471 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
472 image->control_page = hole_end;
473 break;
474 }
475 }
476
477 /* Ensure that these pages are decrypted if SME is enabled. */
478 if (pages)
479 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
480
481 return pages;
482 }
483
484
kimage_alloc_control_pages(struct kimage * image,unsigned int order)485 struct page *kimage_alloc_control_pages(struct kimage *image,
486 unsigned int order)
487 {
488 struct page *pages = NULL;
489
490 switch (image->type) {
491 case KEXEC_TYPE_DEFAULT:
492 pages = kimage_alloc_normal_control_pages(image, order);
493 break;
494 case KEXEC_TYPE_CRASH:
495 pages = kimage_alloc_crash_control_pages(image, order);
496 break;
497 }
498
499 return pages;
500 }
501
kimage_crash_copy_vmcoreinfo(struct kimage * image)502 int kimage_crash_copy_vmcoreinfo(struct kimage *image)
503 {
504 struct page *vmcoreinfo_page;
505 void *safecopy;
506
507 if (image->type != KEXEC_TYPE_CRASH)
508 return 0;
509
510 /*
511 * For kdump, allocate one vmcoreinfo safe copy from the
512 * crash memory. as we have arch_kexec_protect_crashkres()
513 * after kexec syscall, we naturally protect it from write
514 * (even read) access under kernel direct mapping. But on
515 * the other hand, we still need to operate it when crash
516 * happens to generate vmcoreinfo note, hereby we rely on
517 * vmap for this purpose.
518 */
519 vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
520 if (!vmcoreinfo_page) {
521 pr_warn("Could not allocate vmcoreinfo buffer\n");
522 return -ENOMEM;
523 }
524 safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
525 if (!safecopy) {
526 pr_warn("Could not vmap vmcoreinfo buffer\n");
527 return -ENOMEM;
528 }
529
530 image->vmcoreinfo_data_copy = safecopy;
531 crash_update_vmcoreinfo_safecopy(safecopy);
532
533 return 0;
534 }
535
kimage_add_entry(struct kimage * image,kimage_entry_t entry)536 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
537 {
538 if (*image->entry != 0)
539 image->entry++;
540
541 if (image->entry == image->last_entry) {
542 kimage_entry_t *ind_page;
543 struct page *page;
544
545 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
546 if (!page)
547 return -ENOMEM;
548
549 ind_page = page_address(page);
550 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
551 image->entry = ind_page;
552 image->last_entry = ind_page +
553 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
554 }
555 *image->entry = entry;
556 image->entry++;
557 *image->entry = 0;
558
559 return 0;
560 }
561
kimage_set_destination(struct kimage * image,unsigned long destination)562 static int kimage_set_destination(struct kimage *image,
563 unsigned long destination)
564 {
565 destination &= PAGE_MASK;
566
567 return kimage_add_entry(image, destination | IND_DESTINATION);
568 }
569
570
kimage_add_page(struct kimage * image,unsigned long page)571 static int kimage_add_page(struct kimage *image, unsigned long page)
572 {
573 page &= PAGE_MASK;
574
575 return kimage_add_entry(image, page | IND_SOURCE);
576 }
577
578
kimage_free_extra_pages(struct kimage * image)579 static void kimage_free_extra_pages(struct kimage *image)
580 {
581 /* Walk through and free any extra destination pages I may have */
582 kimage_free_page_list(&image->dest_pages);
583
584 /* Walk through and free any unusable pages I have cached */
585 kimage_free_page_list(&image->unusable_pages);
586
587 }
588
kimage_terminate(struct kimage * image)589 void kimage_terminate(struct kimage *image)
590 {
591 if (*image->entry != 0)
592 image->entry++;
593
594 *image->entry = IND_DONE;
595 }
596
597 #define for_each_kimage_entry(image, ptr, entry) \
598 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
599 ptr = (entry & IND_INDIRECTION) ? \
600 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
601
kimage_free_entry(kimage_entry_t entry)602 static void kimage_free_entry(kimage_entry_t entry)
603 {
604 struct page *page;
605
606 page = boot_pfn_to_page(entry >> PAGE_SHIFT);
607 kimage_free_pages(page);
608 }
609
kimage_free(struct kimage * image)610 void kimage_free(struct kimage *image)
611 {
612 kimage_entry_t *ptr, entry;
613 kimage_entry_t ind = 0;
614
615 if (!image)
616 return;
617
618 if (image->vmcoreinfo_data_copy) {
619 crash_update_vmcoreinfo_safecopy(NULL);
620 vunmap(image->vmcoreinfo_data_copy);
621 }
622
623 kimage_free_extra_pages(image);
624 for_each_kimage_entry(image, ptr, entry) {
625 if (entry & IND_INDIRECTION) {
626 /* Free the previous indirection page */
627 if (ind & IND_INDIRECTION)
628 kimage_free_entry(ind);
629 /* Save this indirection page until we are
630 * done with it.
631 */
632 ind = entry;
633 } else if (entry & IND_SOURCE)
634 kimage_free_entry(entry);
635 }
636 /* Free the final indirection page */
637 if (ind & IND_INDIRECTION)
638 kimage_free_entry(ind);
639
640 /* Handle any machine specific cleanup */
641 machine_kexec_cleanup(image);
642
643 /* Free the kexec control pages... */
644 kimage_free_page_list(&image->control_pages);
645
646 /*
647 * Free up any temporary buffers allocated. This might hit if
648 * error occurred much later after buffer allocation.
649 */
650 if (image->file_mode)
651 kimage_file_post_load_cleanup(image);
652
653 kfree(image);
654 }
655
kimage_dst_used(struct kimage * image,unsigned long page)656 static kimage_entry_t *kimage_dst_used(struct kimage *image,
657 unsigned long page)
658 {
659 kimage_entry_t *ptr, entry;
660 unsigned long destination = 0;
661
662 for_each_kimage_entry(image, ptr, entry) {
663 if (entry & IND_DESTINATION)
664 destination = entry & PAGE_MASK;
665 else if (entry & IND_SOURCE) {
666 if (page == destination)
667 return ptr;
668 destination += PAGE_SIZE;
669 }
670 }
671
672 return NULL;
673 }
674
kimage_alloc_page(struct kimage * image,gfp_t gfp_mask,unsigned long destination)675 static struct page *kimage_alloc_page(struct kimage *image,
676 gfp_t gfp_mask,
677 unsigned long destination)
678 {
679 /*
680 * Here we implement safeguards to ensure that a source page
681 * is not copied to its destination page before the data on
682 * the destination page is no longer useful.
683 *
684 * To do this we maintain the invariant that a source page is
685 * either its own destination page, or it is not a
686 * destination page at all.
687 *
688 * That is slightly stronger than required, but the proof
689 * that no problems will not occur is trivial, and the
690 * implementation is simply to verify.
691 *
692 * When allocating all pages normally this algorithm will run
693 * in O(N) time, but in the worst case it will run in O(N^2)
694 * time. If the runtime is a problem the data structures can
695 * be fixed.
696 */
697 struct page *page;
698 unsigned long addr;
699
700 /*
701 * Walk through the list of destination pages, and see if I
702 * have a match.
703 */
704 list_for_each_entry(page, &image->dest_pages, lru) {
705 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
706 if (addr == destination) {
707 list_del(&page->lru);
708 return page;
709 }
710 }
711 page = NULL;
712 while (1) {
713 kimage_entry_t *old;
714
715 /* Allocate a page, if we run out of memory give up */
716 page = kimage_alloc_pages(gfp_mask, 0);
717 if (!page)
718 return NULL;
719 /* If the page cannot be used file it away */
720 if (page_to_boot_pfn(page) >
721 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
722 list_add(&page->lru, &image->unusable_pages);
723 continue;
724 }
725 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
726
727 /* If it is the destination page we want use it */
728 if (addr == destination)
729 break;
730
731 /* If the page is not a destination page use it */
732 if (!kimage_is_destination_range(image, addr,
733 addr + PAGE_SIZE))
734 break;
735
736 /*
737 * I know that the page is someones destination page.
738 * See if there is already a source page for this
739 * destination page. And if so swap the source pages.
740 */
741 old = kimage_dst_used(image, addr);
742 if (old) {
743 /* If so move it */
744 unsigned long old_addr;
745 struct page *old_page;
746
747 old_addr = *old & PAGE_MASK;
748 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
749 copy_highpage(page, old_page);
750 *old = addr | (*old & ~PAGE_MASK);
751
752 /* The old page I have found cannot be a
753 * destination page, so return it if it's
754 * gfp_flags honor the ones passed in.
755 */
756 if (!(gfp_mask & __GFP_HIGHMEM) &&
757 PageHighMem(old_page)) {
758 kimage_free_pages(old_page);
759 continue;
760 }
761 page = old_page;
762 break;
763 }
764 /* Place the page on the destination list, to be used later */
765 list_add(&page->lru, &image->dest_pages);
766 }
767
768 return page;
769 }
770
kimage_load_normal_segment(struct kimage * image,struct kexec_segment * segment)771 static int kimage_load_normal_segment(struct kimage *image,
772 struct kexec_segment *segment)
773 {
774 unsigned long maddr;
775 size_t ubytes, mbytes;
776 int result;
777 unsigned char __user *buf = NULL;
778 unsigned char *kbuf = NULL;
779
780 if (image->file_mode)
781 kbuf = segment->kbuf;
782 else
783 buf = segment->buf;
784 ubytes = segment->bufsz;
785 mbytes = segment->memsz;
786 maddr = segment->mem;
787
788 result = kimage_set_destination(image, maddr);
789 if (result < 0)
790 goto out;
791
792 while (mbytes) {
793 struct page *page;
794 char *ptr;
795 size_t uchunk, mchunk;
796
797 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
798 if (!page) {
799 result = -ENOMEM;
800 goto out;
801 }
802 result = kimage_add_page(image, page_to_boot_pfn(page)
803 << PAGE_SHIFT);
804 if (result < 0)
805 goto out;
806
807 ptr = kmap_local_page(page);
808 /* Start with a clear page */
809 clear_page(ptr);
810 ptr += maddr & ~PAGE_MASK;
811 mchunk = min_t(size_t, mbytes,
812 PAGE_SIZE - (maddr & ~PAGE_MASK));
813 uchunk = min(ubytes, mchunk);
814
815 /* For file based kexec, source pages are in kernel memory */
816 if (image->file_mode)
817 memcpy(ptr, kbuf, uchunk);
818 else
819 result = copy_from_user(ptr, buf, uchunk);
820 kunmap_local(ptr);
821 if (result) {
822 result = -EFAULT;
823 goto out;
824 }
825 ubytes -= uchunk;
826 maddr += mchunk;
827 if (image->file_mode)
828 kbuf += mchunk;
829 else
830 buf += mchunk;
831 mbytes -= mchunk;
832
833 cond_resched();
834 }
835 out:
836 return result;
837 }
838
kimage_load_crash_segment(struct kimage * image,struct kexec_segment * segment)839 static int kimage_load_crash_segment(struct kimage *image,
840 struct kexec_segment *segment)
841 {
842 /* For crash dumps kernels we simply copy the data from
843 * user space to it's destination.
844 * We do things a page at a time for the sake of kmap.
845 */
846 unsigned long maddr;
847 size_t ubytes, mbytes;
848 int result;
849 unsigned char __user *buf = NULL;
850 unsigned char *kbuf = NULL;
851
852 result = 0;
853 if (image->file_mode)
854 kbuf = segment->kbuf;
855 else
856 buf = segment->buf;
857 ubytes = segment->bufsz;
858 mbytes = segment->memsz;
859 maddr = segment->mem;
860 while (mbytes) {
861 struct page *page;
862 char *ptr;
863 size_t uchunk, mchunk;
864
865 page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
866 if (!page) {
867 result = -ENOMEM;
868 goto out;
869 }
870 arch_kexec_post_alloc_pages(page_address(page), 1, 0);
871 ptr = kmap_local_page(page);
872 ptr += maddr & ~PAGE_MASK;
873 mchunk = min_t(size_t, mbytes,
874 PAGE_SIZE - (maddr & ~PAGE_MASK));
875 uchunk = min(ubytes, mchunk);
876 if (mchunk > uchunk) {
877 /* Zero the trailing part of the page */
878 memset(ptr + uchunk, 0, mchunk - uchunk);
879 }
880
881 /* For file based kexec, source pages are in kernel memory */
882 if (image->file_mode)
883 memcpy(ptr, kbuf, uchunk);
884 else
885 result = copy_from_user(ptr, buf, uchunk);
886 kexec_flush_icache_page(page);
887 kunmap_local(ptr);
888 arch_kexec_pre_free_pages(page_address(page), 1);
889 if (result) {
890 result = -EFAULT;
891 goto out;
892 }
893 ubytes -= uchunk;
894 maddr += mchunk;
895 if (image->file_mode)
896 kbuf += mchunk;
897 else
898 buf += mchunk;
899 mbytes -= mchunk;
900
901 cond_resched();
902 }
903 out:
904 return result;
905 }
906
kimage_load_segment(struct kimage * image,struct kexec_segment * segment)907 int kimage_load_segment(struct kimage *image,
908 struct kexec_segment *segment)
909 {
910 int result = -ENOMEM;
911
912 switch (image->type) {
913 case KEXEC_TYPE_DEFAULT:
914 result = kimage_load_normal_segment(image, segment);
915 break;
916 case KEXEC_TYPE_CRASH:
917 result = kimage_load_crash_segment(image, segment);
918 break;
919 }
920
921 return result;
922 }
923
924 struct kexec_load_limit {
925 /* Mutex protects the limit count. */
926 struct mutex mutex;
927 int limit;
928 };
929
930 static struct kexec_load_limit load_limit_reboot = {
931 .mutex = __MUTEX_INITIALIZER(load_limit_reboot.mutex),
932 .limit = -1,
933 };
934
935 static struct kexec_load_limit load_limit_panic = {
936 .mutex = __MUTEX_INITIALIZER(load_limit_panic.mutex),
937 .limit = -1,
938 };
939
940 struct kimage *kexec_image;
941 struct kimage *kexec_crash_image;
942 static int kexec_load_disabled;
943
944 #ifdef CONFIG_SYSCTL
kexec_limit_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)945 static int kexec_limit_handler(struct ctl_table *table, int write,
946 void *buffer, size_t *lenp, loff_t *ppos)
947 {
948 struct kexec_load_limit *limit = table->data;
949 int val;
950 struct ctl_table tmp = {
951 .data = &val,
952 .maxlen = sizeof(val),
953 .mode = table->mode,
954 };
955 int ret;
956
957 if (write) {
958 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
959 if (ret)
960 return ret;
961
962 if (val < 0)
963 return -EINVAL;
964
965 mutex_lock(&limit->mutex);
966 if (limit->limit != -1 && val >= limit->limit)
967 ret = -EINVAL;
968 else
969 limit->limit = val;
970 mutex_unlock(&limit->mutex);
971
972 return ret;
973 }
974
975 mutex_lock(&limit->mutex);
976 val = limit->limit;
977 mutex_unlock(&limit->mutex);
978
979 return proc_dointvec(&tmp, write, buffer, lenp, ppos);
980 }
981
982 static struct ctl_table kexec_core_sysctls[] = {
983 {
984 .procname = "kexec_load_disabled",
985 .data = &kexec_load_disabled,
986 .maxlen = sizeof(int),
987 .mode = 0644,
988 /* only handle a transition from default "0" to "1" */
989 .proc_handler = proc_dointvec_minmax,
990 .extra1 = SYSCTL_ONE,
991 .extra2 = SYSCTL_ONE,
992 },
993 {
994 .procname = "kexec_load_limit_panic",
995 .data = &load_limit_panic,
996 .mode = 0644,
997 .proc_handler = kexec_limit_handler,
998 },
999 {
1000 .procname = "kexec_load_limit_reboot",
1001 .data = &load_limit_reboot,
1002 .mode = 0644,
1003 .proc_handler = kexec_limit_handler,
1004 },
1005 { }
1006 };
1007
kexec_core_sysctl_init(void)1008 static int __init kexec_core_sysctl_init(void)
1009 {
1010 register_sysctl_init("kernel", kexec_core_sysctls);
1011 return 0;
1012 }
1013 late_initcall(kexec_core_sysctl_init);
1014 #endif
1015
kexec_load_permitted(int kexec_image_type)1016 bool kexec_load_permitted(int kexec_image_type)
1017 {
1018 struct kexec_load_limit *limit;
1019
1020 /*
1021 * Only the superuser can use the kexec syscall and if it has not
1022 * been disabled.
1023 */
1024 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
1025 return false;
1026
1027 /* Check limit counter and decrease it.*/
1028 limit = (kexec_image_type == KEXEC_TYPE_CRASH) ?
1029 &load_limit_panic : &load_limit_reboot;
1030 mutex_lock(&limit->mutex);
1031 if (!limit->limit) {
1032 mutex_unlock(&limit->mutex);
1033 return false;
1034 }
1035 if (limit->limit != -1)
1036 limit->limit--;
1037 mutex_unlock(&limit->mutex);
1038
1039 return true;
1040 }
1041
1042 /*
1043 * No panic_cpu check version of crash_kexec(). This function is called
1044 * only when panic_cpu holds the current CPU number; this is the only CPU
1045 * which processes crash_kexec routines.
1046 */
__crash_kexec(struct pt_regs * regs)1047 void __noclone __crash_kexec(struct pt_regs *regs)
1048 {
1049 /* Take the kexec_lock here to prevent sys_kexec_load
1050 * running on one cpu from replacing the crash kernel
1051 * we are using after a panic on a different cpu.
1052 *
1053 * If the crash kernel was not located in a fixed area
1054 * of memory the xchg(&kexec_crash_image) would be
1055 * sufficient. But since I reuse the memory...
1056 */
1057 if (kexec_trylock()) {
1058 if (kexec_crash_image) {
1059 struct pt_regs fixed_regs;
1060
1061 crash_setup_regs(&fixed_regs, regs);
1062 crash_save_vmcoreinfo();
1063 machine_crash_shutdown(&fixed_regs);
1064 machine_kexec(kexec_crash_image);
1065 }
1066 kexec_unlock();
1067 }
1068 }
1069 STACK_FRAME_NON_STANDARD(__crash_kexec);
1070
crash_kexec(struct pt_regs * regs)1071 __bpf_kfunc void crash_kexec(struct pt_regs *regs)
1072 {
1073 int old_cpu, this_cpu;
1074
1075 /*
1076 * Only one CPU is allowed to execute the crash_kexec() code as with
1077 * panic(). Otherwise parallel calls of panic() and crash_kexec()
1078 * may stop each other. To exclude them, we use panic_cpu here too.
1079 */
1080 this_cpu = raw_smp_processor_id();
1081 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
1082 if (old_cpu == PANIC_CPU_INVALID) {
1083 /* This is the 1st CPU which comes here, so go ahead. */
1084 __crash_kexec(regs);
1085
1086 /*
1087 * Reset panic_cpu to allow another panic()/crash_kexec()
1088 * call.
1089 */
1090 atomic_set(&panic_cpu, PANIC_CPU_INVALID);
1091 }
1092 }
1093
crash_get_memory_size(void)1094 ssize_t crash_get_memory_size(void)
1095 {
1096 ssize_t size = 0;
1097
1098 if (!kexec_trylock())
1099 return -EBUSY;
1100
1101 if (crashk_res.end != crashk_res.start)
1102 size = resource_size(&crashk_res);
1103
1104 kexec_unlock();
1105 return size;
1106 }
1107
crash_shrink_memory(unsigned long new_size)1108 int crash_shrink_memory(unsigned long new_size)
1109 {
1110 int ret = 0;
1111 unsigned long start, end;
1112 unsigned long old_size;
1113 struct resource *ram_res;
1114
1115 if (!kexec_trylock())
1116 return -EBUSY;
1117
1118 if (kexec_crash_image) {
1119 ret = -ENOENT;
1120 goto unlock;
1121 }
1122 start = crashk_res.start;
1123 end = crashk_res.end;
1124 old_size = (end == 0) ? 0 : end - start + 1;
1125 if (new_size >= old_size) {
1126 ret = (new_size == old_size) ? 0 : -EINVAL;
1127 goto unlock;
1128 }
1129
1130 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1131 if (!ram_res) {
1132 ret = -ENOMEM;
1133 goto unlock;
1134 }
1135
1136 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1137 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1138
1139 crash_free_reserved_phys_range(end, crashk_res.end);
1140
1141 if ((start == end) && (crashk_res.parent != NULL))
1142 release_resource(&crashk_res);
1143
1144 ram_res->start = end;
1145 ram_res->end = crashk_res.end;
1146 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1147 ram_res->name = "System RAM";
1148
1149 crashk_res.end = end - 1;
1150
1151 insert_resource(&iomem_resource, ram_res);
1152
1153 unlock:
1154 kexec_unlock();
1155 return ret;
1156 }
1157
crash_save_cpu(struct pt_regs * regs,int cpu)1158 void crash_save_cpu(struct pt_regs *regs, int cpu)
1159 {
1160 struct elf_prstatus prstatus;
1161 u32 *buf;
1162
1163 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1164 return;
1165
1166 /* Using ELF notes here is opportunistic.
1167 * I need a well defined structure format
1168 * for the data I pass, and I need tags
1169 * on the data to indicate what information I have
1170 * squirrelled away. ELF notes happen to provide
1171 * all of that, so there is no need to invent something new.
1172 */
1173 buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1174 if (!buf)
1175 return;
1176 memset(&prstatus, 0, sizeof(prstatus));
1177 prstatus.common.pr_pid = current->pid;
1178 elf_core_copy_regs(&prstatus.pr_reg, regs);
1179 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1180 &prstatus, sizeof(prstatus));
1181 final_note(buf);
1182 }
1183
crash_notes_memory_init(void)1184 static int __init crash_notes_memory_init(void)
1185 {
1186 /* Allocate memory for saving cpu registers. */
1187 size_t size, align;
1188
1189 /*
1190 * crash_notes could be allocated across 2 vmalloc pages when percpu
1191 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1192 * pages are also on 2 continuous physical pages. In this case the
1193 * 2nd part of crash_notes in 2nd page could be lost since only the
1194 * starting address and size of crash_notes are exported through sysfs.
1195 * Here round up the size of crash_notes to the nearest power of two
1196 * and pass it to __alloc_percpu as align value. This can make sure
1197 * crash_notes is allocated inside one physical page.
1198 */
1199 size = sizeof(note_buf_t);
1200 align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1201
1202 /*
1203 * Break compile if size is bigger than PAGE_SIZE since crash_notes
1204 * definitely will be in 2 pages with that.
1205 */
1206 BUILD_BUG_ON(size > PAGE_SIZE);
1207
1208 crash_notes = __alloc_percpu(size, align);
1209 if (!crash_notes) {
1210 pr_warn("Memory allocation for saving cpu register states failed\n");
1211 return -ENOMEM;
1212 }
1213 return 0;
1214 }
1215 subsys_initcall(crash_notes_memory_init);
1216
1217
1218 /*
1219 * Move into place and start executing a preloaded standalone
1220 * executable. If nothing was preloaded return an error.
1221 */
kernel_kexec(void)1222 int kernel_kexec(void)
1223 {
1224 int error = 0;
1225
1226 if (!kexec_trylock())
1227 return -EBUSY;
1228 if (!kexec_image) {
1229 error = -EINVAL;
1230 goto Unlock;
1231 }
1232
1233 #ifdef CONFIG_KEXEC_JUMP
1234 if (kexec_image->preserve_context) {
1235 pm_prepare_console();
1236 error = freeze_processes();
1237 if (error) {
1238 error = -EBUSY;
1239 goto Restore_console;
1240 }
1241 suspend_console();
1242 error = dpm_suspend_start(PMSG_FREEZE);
1243 if (error)
1244 goto Resume_console;
1245 /* At this point, dpm_suspend_start() has been called,
1246 * but *not* dpm_suspend_end(). We *must* call
1247 * dpm_suspend_end() now. Otherwise, drivers for
1248 * some devices (e.g. interrupt controllers) become
1249 * desynchronized with the actual state of the
1250 * hardware at resume time, and evil weirdness ensues.
1251 */
1252 error = dpm_suspend_end(PMSG_FREEZE);
1253 if (error)
1254 goto Resume_devices;
1255 error = suspend_disable_secondary_cpus();
1256 if (error)
1257 goto Enable_cpus;
1258 local_irq_disable();
1259 error = syscore_suspend();
1260 if (error)
1261 goto Enable_irqs;
1262 } else
1263 #endif
1264 {
1265 kexec_in_progress = true;
1266 kernel_restart_prepare("kexec reboot");
1267 migrate_to_reboot_cpu();
1268
1269 /*
1270 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1271 * no further code needs to use CPU hotplug (which is true in
1272 * the reboot case). However, the kexec path depends on using
1273 * CPU hotplug again; so re-enable it here.
1274 */
1275 cpu_hotplug_enable();
1276 pr_notice("Starting new kernel\n");
1277 machine_shutdown();
1278 }
1279
1280 kmsg_dump(KMSG_DUMP_SHUTDOWN);
1281 machine_kexec(kexec_image);
1282
1283 #ifdef CONFIG_KEXEC_JUMP
1284 if (kexec_image->preserve_context) {
1285 syscore_resume();
1286 Enable_irqs:
1287 local_irq_enable();
1288 Enable_cpus:
1289 suspend_enable_secondary_cpus();
1290 dpm_resume_start(PMSG_RESTORE);
1291 Resume_devices:
1292 dpm_resume_end(PMSG_RESTORE);
1293 Resume_console:
1294 resume_console();
1295 thaw_processes();
1296 Restore_console:
1297 pm_restore_console();
1298 }
1299 #endif
1300
1301 Unlock:
1302 kexec_unlock();
1303 return error;
1304 }
1305