1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * kexec for arm64
4 *
5 * Copyright (C) Linaro.
6 * Copyright (C) Huawei Futurewei Technologies.
7 */
8
9 #ifndef _ARM64_KEXEC_H
10 #define _ARM64_KEXEC_H
11
12 /* Maximum physical address we can use pages from */
13
14 #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
15
16 /* Maximum address we can reach in physical address mode */
17
18 #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
19
20 /* Maximum address we can use for the control code buffer */
21
22 #define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
23
24 #define KEXEC_CONTROL_PAGE_SIZE 4096
25
26 #define KEXEC_ARCH KEXEC_ARCH_AARCH64
27
28 #ifndef __ASSEMBLY__
29
30 /**
31 * crash_setup_regs() - save registers for the panic kernel
32 *
33 * @newregs: registers are saved here
34 * @oldregs: registers to be saved (may be %NULL)
35 */
36
crash_setup_regs(struct pt_regs * newregs,struct pt_regs * oldregs)37 static inline void crash_setup_regs(struct pt_regs *newregs,
38 struct pt_regs *oldregs)
39 {
40 if (oldregs) {
41 memcpy(newregs, oldregs, sizeof(*newregs));
42 } else {
43 u64 tmp1, tmp2;
44
45 __asm__ __volatile__ (
46 "stp x0, x1, [%2, #16 * 0]\n"
47 "stp x2, x3, [%2, #16 * 1]\n"
48 "stp x4, x5, [%2, #16 * 2]\n"
49 "stp x6, x7, [%2, #16 * 3]\n"
50 "stp x8, x9, [%2, #16 * 4]\n"
51 "stp x10, x11, [%2, #16 * 5]\n"
52 "stp x12, x13, [%2, #16 * 6]\n"
53 "stp x14, x15, [%2, #16 * 7]\n"
54 "stp x16, x17, [%2, #16 * 8]\n"
55 "stp x18, x19, [%2, #16 * 9]\n"
56 "stp x20, x21, [%2, #16 * 10]\n"
57 "stp x22, x23, [%2, #16 * 11]\n"
58 "stp x24, x25, [%2, #16 * 12]\n"
59 "stp x26, x27, [%2, #16 * 13]\n"
60 "stp x28, x29, [%2, #16 * 14]\n"
61 "mov %0, sp\n"
62 "stp x30, %0, [%2, #16 * 15]\n"
63
64 "/* faked current PSTATE */\n"
65 "mrs %0, CurrentEL\n"
66 "mrs %1, SPSEL\n"
67 "orr %0, %0, %1\n"
68 "mrs %1, DAIF\n"
69 "orr %0, %0, %1\n"
70 "mrs %1, NZCV\n"
71 "orr %0, %0, %1\n"
72 /* pc */
73 "adr %1, 1f\n"
74 "1:\n"
75 "stp %1, %0, [%2, #16 * 16]\n"
76 : "=&r" (tmp1), "=&r" (tmp2)
77 : "r" (newregs)
78 : "memory"
79 );
80 }
81 }
82
83 #if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_HIBERNATION)
84 extern bool crash_is_nosave(unsigned long pfn);
85 extern void crash_prepare_suspend(void);
86 extern void crash_post_resume(void);
87
88 void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
89 #define crash_free_reserved_phys_range crash_free_reserved_phys_range
90 #else
crash_is_nosave(unsigned long pfn)91 static inline bool crash_is_nosave(unsigned long pfn) {return false; }
crash_prepare_suspend(void)92 static inline void crash_prepare_suspend(void) {}
crash_post_resume(void)93 static inline void crash_post_resume(void) {}
94 #endif
95
96 struct kimage;
97
98 #if defined(CONFIG_KEXEC_CORE)
99 void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
100 unsigned long arg0, unsigned long arg1,
101 unsigned long arg2);
102
103 int machine_kexec_post_load(struct kimage *image);
104 #define machine_kexec_post_load machine_kexec_post_load
105
106 void arch_kexec_protect_crashkres(void);
107 #define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
108
109 void arch_kexec_unprotect_crashkres(void);
110 #define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
111 #endif
112
113 #define ARCH_HAS_KIMAGE_ARCH
114
115 struct kimage_arch {
116 void *dtb;
117 phys_addr_t dtb_mem;
118 phys_addr_t kern_reloc;
119 phys_addr_t el2_vectors;
120 phys_addr_t ttbr0;
121 phys_addr_t ttbr1;
122 phys_addr_t zero_page;
123 unsigned long phys_offset;
124 unsigned long t0sz;
125 };
126
127 #ifdef CONFIG_KEXEC_FILE
128 extern const struct kexec_file_ops kexec_image_ops;
129
130 int arch_kimage_file_post_load_cleanup(struct kimage *image);
131 #define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
132
133 extern int load_other_segments(struct kimage *image,
134 unsigned long kernel_load_addr, unsigned long kernel_size,
135 char *initrd, unsigned long initrd_len,
136 char *cmdline);
137 #endif
138
139 #endif /* __ASSEMBLY__ */
140
141 #endif
142