1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/string.h>
3 #include <linux/elf.h>
4 #include <asm/boot_data.h>
5 #include <asm/sections.h>
6 #include <asm/maccess.h>
7 #include <asm/cpu_mf.h>
8 #include <asm/setup.h>
9 #include <asm/kasan.h>
10 #include <asm/kexec.h>
11 #include <asm/sclp.h>
12 #include <asm/diag.h>
13 #include <asm/uv.h>
14 #include <asm/abs_lowcore.h>
15 #include <asm/mem_detect.h>
16 #include "decompressor.h"
17 #include "boot.h"
18 #include "uv.h"
19 
20 unsigned long __bootdata_preserved(__kaslr_offset);
21 unsigned long __bootdata_preserved(__abs_lowcore);
22 unsigned long __bootdata_preserved(__memcpy_real_area);
23 pte_t *__bootdata_preserved(memcpy_real_ptep);
24 unsigned long __bootdata(__amode31_base);
25 unsigned long __bootdata_preserved(VMALLOC_START);
26 unsigned long __bootdata_preserved(VMALLOC_END);
27 struct page *__bootdata_preserved(vmemmap);
28 unsigned long __bootdata_preserved(vmemmap_size);
29 unsigned long __bootdata_preserved(MODULES_VADDR);
30 unsigned long __bootdata_preserved(MODULES_END);
31 unsigned long __bootdata(ident_map_size);
32 int __bootdata(is_full_image) = 1;
33 struct initrd_data __bootdata(initrd_data);
34 
35 u64 __bootdata_preserved(stfle_fac_list[16]);
36 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
37 struct oldmem_data __bootdata_preserved(oldmem_data);
38 
39 struct machine_info machine;
40 
error(char * x)41 void error(char *x)
42 {
43 	sclp_early_printk("\n\n");
44 	sclp_early_printk(x);
45 	sclp_early_printk("\n\n -- System halted");
46 
47 	disabled_wait();
48 }
49 
detect_facilities(void)50 static void detect_facilities(void)
51 {
52 	if (test_facility(8)) {
53 		machine.has_edat1 = 1;
54 		__ctl_set_bit(0, 23);
55 	}
56 	if (test_facility(78))
57 		machine.has_edat2 = 1;
58 	if (!noexec_disabled && test_facility(130)) {
59 		machine.has_nx = 1;
60 		__ctl_set_bit(0, 20);
61 	}
62 }
63 
setup_lpp(void)64 static void setup_lpp(void)
65 {
66 	S390_lowcore.current_pid = 0;
67 	S390_lowcore.lpp = LPP_MAGIC;
68 	if (test_facility(40))
69 		lpp(&S390_lowcore.lpp);
70 }
71 
72 #ifdef CONFIG_KERNEL_UNCOMPRESSED
mem_safe_offset(void)73 unsigned long mem_safe_offset(void)
74 {
75 	return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
76 }
77 #endif
78 
rescue_initrd(unsigned long safe_addr)79 static unsigned long rescue_initrd(unsigned long safe_addr)
80 {
81 	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
82 		return safe_addr;
83 	if (!initrd_data.start || !initrd_data.size)
84 		return safe_addr;
85 	if (initrd_data.start < safe_addr) {
86 		memmove((void *)safe_addr, (void *)initrd_data.start, initrd_data.size);
87 		initrd_data.start = safe_addr;
88 	}
89 	return initrd_data.start + initrd_data.size;
90 }
91 
copy_bootdata(void)92 static void copy_bootdata(void)
93 {
94 	if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
95 		error(".boot.data section size mismatch");
96 	memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
97 	if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
98 		error(".boot.preserved.data section size mismatch");
99 	memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
100 }
101 
handle_relocs(unsigned long offset)102 static void handle_relocs(unsigned long offset)
103 {
104 	Elf64_Rela *rela_start, *rela_end, *rela;
105 	int r_type, r_sym, rc;
106 	Elf64_Addr loc, val;
107 	Elf64_Sym *dynsym;
108 
109 	rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
110 	rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
111 	dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
112 	for (rela = rela_start; rela < rela_end; rela++) {
113 		loc = rela->r_offset + offset;
114 		val = rela->r_addend;
115 		r_sym = ELF64_R_SYM(rela->r_info);
116 		if (r_sym) {
117 			if (dynsym[r_sym].st_shndx != SHN_UNDEF)
118 				val += dynsym[r_sym].st_value + offset;
119 		} else {
120 			/*
121 			 * 0 == undefined symbol table index (STN_UNDEF),
122 			 * used for R_390_RELATIVE, only add KASLR offset
123 			 */
124 			val += offset;
125 		}
126 		r_type = ELF64_R_TYPE(rela->r_info);
127 		rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
128 		if (rc)
129 			error("Unknown relocation type");
130 	}
131 }
132 
133 /*
134  * Merge information from several sources into a single ident_map_size value.
135  * "ident_map_size" represents the upper limit of physical memory we may ever
136  * reach. It might not be all online memory, but also include standby (offline)
137  * memory. "ident_map_size" could be lower then actual standby or even online
138  * memory present, due to limiting factors. We should never go above this limit.
139  * It is the size of our identity mapping.
140  *
141  * Consider the following factors:
142  * 1. max_physmem_end - end of physical memory online or standby.
143  *    Always <= end of the last online memory block (get_mem_detect_end()).
144  * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
145  *    kernel is able to support.
146  * 3. "mem=" kernel command line option which limits physical memory usage.
147  * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
148  *    crash kernel.
149  * 5. "hsa" size which is a memory limit when the kernel is executed during
150  *    zfcp/nvme dump.
151  */
setup_ident_map_size(unsigned long max_physmem_end)152 static void setup_ident_map_size(unsigned long max_physmem_end)
153 {
154 	unsigned long hsa_size;
155 
156 	ident_map_size = max_physmem_end;
157 	if (memory_limit)
158 		ident_map_size = min(ident_map_size, memory_limit);
159 	ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
160 
161 #ifdef CONFIG_CRASH_DUMP
162 	if (oldmem_data.start) {
163 		kaslr_enabled = 0;
164 		ident_map_size = min(ident_map_size, oldmem_data.size);
165 	} else if (ipl_block_valid && is_ipl_block_dump()) {
166 		kaslr_enabled = 0;
167 		if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
168 			ident_map_size = min(ident_map_size, hsa_size);
169 	}
170 #endif
171 }
172 
setup_kernel_memory_layout(void)173 static unsigned long setup_kernel_memory_layout(void)
174 {
175 	unsigned long vmemmap_start;
176 	unsigned long asce_limit;
177 	unsigned long rte_size;
178 	unsigned long pages;
179 	unsigned long vmax;
180 
181 	pages = ident_map_size / PAGE_SIZE;
182 	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
183 	vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
184 
185 	/* choose kernel address space layout: 4 or 3 levels. */
186 	vmemmap_start = round_up(ident_map_size, _REGION3_SIZE);
187 	if (IS_ENABLED(CONFIG_KASAN) ||
188 	    vmalloc_size > _REGION2_SIZE ||
189 	    vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
190 		    _REGION2_SIZE) {
191 		asce_limit = _REGION1_SIZE;
192 		rte_size = _REGION2_SIZE;
193 	} else {
194 		asce_limit = _REGION2_SIZE;
195 		rte_size = _REGION3_SIZE;
196 	}
197 	/*
198 	 * forcing modules and vmalloc area under the ultravisor
199 	 * secure storage limit, so that any vmalloc allocation
200 	 * we do could be used to back secure guest storage.
201 	 */
202 	vmax = adjust_to_uv_max(asce_limit);
203 #ifdef CONFIG_KASAN
204 	/* force vmalloc and modules below kasan shadow */
205 	vmax = min(vmax, KASAN_SHADOW_START);
206 #endif
207 	__memcpy_real_area = round_down(vmax - PAGE_SIZE, PAGE_SIZE);
208 	__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
209 				   sizeof(struct lowcore));
210 	MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
211 	MODULES_VADDR = MODULES_END - MODULES_LEN;
212 	VMALLOC_END = MODULES_VADDR;
213 
214 	/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
215 	vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE));
216 	VMALLOC_START = VMALLOC_END - vmalloc_size;
217 
218 	/* split remaining virtual space between 1:1 mapping & vmemmap array */
219 	pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
220 	pages = SECTION_ALIGN_UP(pages);
221 	/* keep vmemmap_start aligned to a top level region table entry */
222 	vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
223 	/* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */
224 	vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
225 	/* make sure identity map doesn't overlay with vmemmap */
226 	ident_map_size = min(ident_map_size, vmemmap_start);
227 	vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
228 	/* make sure vmemmap doesn't overlay with vmalloc area */
229 	VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
230 	vmemmap = (struct page *)vmemmap_start;
231 
232 	return asce_limit;
233 }
234 
235 /*
236  * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
237  */
clear_bss_section(void)238 static void clear_bss_section(void)
239 {
240 	memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
241 }
242 
243 /*
244  * Set vmalloc area size to an 8th of (potential) physical memory
245  * size, unless size has been set by kernel command line parameter.
246  */
setup_vmalloc_size(void)247 static void setup_vmalloc_size(void)
248 {
249 	unsigned long size;
250 
251 	if (vmalloc_size_set)
252 		return;
253 	size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
254 	vmalloc_size = max(size, vmalloc_size);
255 }
256 
offset_vmlinux_info(unsigned long offset)257 static void offset_vmlinux_info(unsigned long offset)
258 {
259 	vmlinux.default_lma += offset;
260 	*(unsigned long *)(&vmlinux.entry) += offset;
261 	vmlinux.bootdata_off += offset;
262 	vmlinux.bootdata_preserved_off += offset;
263 	vmlinux.rela_dyn_start += offset;
264 	vmlinux.rela_dyn_end += offset;
265 	vmlinux.dynsym_start += offset;
266 	vmlinux.init_mm_off += offset;
267 	vmlinux.swapper_pg_dir_off += offset;
268 	vmlinux.invalid_pg_dir_off += offset;
269 }
270 
reserve_amode31(unsigned long safe_addr)271 static unsigned long reserve_amode31(unsigned long safe_addr)
272 {
273 	__amode31_base = PAGE_ALIGN(safe_addr);
274 	return __amode31_base + vmlinux.amode31_size;
275 }
276 
startup_kernel(void)277 void startup_kernel(void)
278 {
279 	unsigned long max_physmem_end;
280 	unsigned long random_lma;
281 	unsigned long safe_addr;
282 	unsigned long asce_limit;
283 	void *img;
284 	psw_t psw;
285 
286 	initrd_data.start = parmarea.initrd_start;
287 	initrd_data.size = parmarea.initrd_size;
288 	oldmem_data.start = parmarea.oldmem_base;
289 	oldmem_data.size = parmarea.oldmem_size;
290 
291 	setup_lpp();
292 	store_ipl_parmblock();
293 	safe_addr = mem_safe_offset();
294 	safe_addr = reserve_amode31(safe_addr);
295 	safe_addr = read_ipl_report(safe_addr);
296 	uv_query_info();
297 	safe_addr = rescue_initrd(safe_addr);
298 	sclp_early_read_info();
299 	setup_boot_command_line();
300 	parse_boot_command_line();
301 	detect_facilities();
302 	sanitize_prot_virt_host();
303 	max_physmem_end = detect_memory(&safe_addr);
304 	setup_ident_map_size(max_physmem_end);
305 	setup_vmalloc_size();
306 	asce_limit = setup_kernel_memory_layout();
307 	mem_detect_set_usable_limit(ident_map_size);
308 
309 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
310 		random_lma = get_random_base(safe_addr);
311 		if (random_lma) {
312 			__kaslr_offset = random_lma - vmlinux.default_lma;
313 			img = (void *)vmlinux.default_lma;
314 			offset_vmlinux_info(__kaslr_offset);
315 		}
316 	}
317 
318 	if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
319 		img = decompress_kernel();
320 		memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
321 	} else if (__kaslr_offset)
322 		memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
323 
324 	/*
325 	 * The order of the following operations is important:
326 	 *
327 	 * - handle_relocs() must follow clear_bss_section() to establish static
328 	 *   memory references to data in .bss to be used by setup_vmem()
329 	 *   (i.e init_mm.pgd)
330 	 *
331 	 * - setup_vmem() must follow handle_relocs() to be able using
332 	 *   static memory references to data in .bss (i.e init_mm.pgd)
333 	 *
334 	 * - copy_bootdata() must follow setup_vmem() to propagate changes to
335 	 *   bootdata made by setup_vmem()
336 	 */
337 	clear_bss_section();
338 	handle_relocs(__kaslr_offset);
339 	setup_vmem(asce_limit);
340 	copy_bootdata();
341 
342 	if (__kaslr_offset) {
343 		/*
344 		 * Save KASLR offset for early dumps, before vmcore_info is set.
345 		 * Mark as uneven to distinguish from real vmcore_info pointer.
346 		 */
347 		S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
348 		/* Clear non-relocated kernel */
349 		if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
350 			memset(img, 0, vmlinux.image_size);
351 	}
352 
353 	/*
354 	 * Jump to the decompressed kernel entry point and switch DAT mode on.
355 	 */
356 	psw.addr = vmlinux.entry;
357 	psw.mask = PSW_KERNEL_BITS;
358 	__load_psw(psw);
359 }
360