1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/mm/init.c
4 *
5 * Copyright (C) 1995-2005 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/cache.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18 #include <linux/gfp.h>
19 #include <linux/memblock.h>
20 #include <linux/sort.h>
21 #include <linux/of.h>
22 #include <linux/of_fdt.h>
23 #include <linux/dma-direct.h>
24 #include <linux/dma-map-ops.h>
25 #include <linux/efi.h>
26 #include <linux/swiotlb.h>
27 #include <linux/vmalloc.h>
28 #include <linux/mm.h>
29 #include <linux/kexec.h>
30 #include <linux/crash_dump.h>
31 #include <linux/hugetlb.h>
32 #include <linux/acpi_iort.h>
33 #include <linux/kmemleak.h>
34
35 #include <asm/boot.h>
36 #include <asm/fixmap.h>
37 #include <asm/kasan.h>
38 #include <asm/kernel-pgtable.h>
39 #include <asm/kvm_host.h>
40 #include <asm/memory.h>
41 #include <asm/numa.h>
42 #include <asm/sections.h>
43 #include <asm/setup.h>
44 #include <linux/sizes.h>
45 #include <asm/tlb.h>
46 #include <asm/alternative.h>
47 #include <asm/xen/swiotlb-xen.h>
48
49 /*
50 * We need to be able to catch inadvertent references to memstart_addr
51 * that occur (potentially in generic code) before arm64_memblock_init()
52 * executes, which assigns it its actual value. So use a default value
53 * that cannot be mistaken for a real physical address.
54 */
55 s64 memstart_addr __ro_after_init = -1;
56 EXPORT_SYMBOL(memstart_addr);
57
58 /*
59 * If the corresponding config options are enabled, we create both ZONE_DMA
60 * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
61 * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
62 * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
63 * otherwise it is empty.
64 *
65 * Memory reservation for crash kernel either done early or deferred
66 * depending on DMA memory zones configs (ZONE_DMA) --
67 *
68 * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized
69 * here instead of max_zone_phys(). This lets early reservation of
70 * crash kernel memory which has a dependency on arm64_dma_phys_limit.
71 * Reserving memory early for crash kernel allows linear creation of block
72 * mappings (greater than page-granularity) for all the memory bank rangs.
73 * In this scheme a comparatively quicker boot is observed.
74 *
75 * If ZONE_DMA configs are defined, crash kernel memory reservation
76 * is delayed until DMA zone memory range size initialization performed in
77 * zone_sizes_init(). The defer is necessary to steer clear of DMA zone
78 * memory range to avoid overlap allocation. So crash kernel memory boundaries
79 * are not known when mapping all bank memory ranges, which otherwise means
80 * not possible to exclude crash kernel range from creating block mappings
81 * so page-granularity mappings are created for the entire memory range.
82 * Hence a slightly slower boot is observed.
83 *
84 * Note: Page-granularity mappings are necessary for crash kernel memory
85 * range for shrinking its size via /sys/kernel/kexec_crash_size interface.
86 */
87 #if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
88 phys_addr_t __ro_after_init arm64_dma_phys_limit;
89 #else
90 phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
91 #endif
92
93 /* Current arm64 boot protocol requires 2MB alignment */
94 #define CRASH_ALIGN SZ_2M
95
96 #define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit
97 #define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1)
98
99 #define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
100
reserve_crashkernel_low(unsigned long long low_size)101 static int __init reserve_crashkernel_low(unsigned long long low_size)
102 {
103 unsigned long long low_base;
104
105 low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
106 if (!low_base) {
107 pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
108 return -ENOMEM;
109 }
110
111 pr_info("crashkernel low memory reserved: 0x%08llx - 0x%08llx (%lld MB)\n",
112 low_base, low_base + low_size, low_size >> 20);
113
114 crashk_low_res.start = low_base;
115 crashk_low_res.end = low_base + low_size - 1;
116 insert_resource(&iomem_resource, &crashk_low_res);
117
118 return 0;
119 }
120
121 /*
122 * reserve_crashkernel() - reserves memory for crash kernel
123 *
124 * This function reserves memory area given in "crashkernel=" kernel command
125 * line parameter. The memory reserved is used by dump capture kernel when
126 * primary kernel is crashing.
127 */
reserve_crashkernel(void)128 static void __init reserve_crashkernel(void)
129 {
130 unsigned long long crash_base, crash_size;
131 unsigned long long crash_low_size = 0;
132 unsigned long long crash_max = CRASH_ADDR_LOW_MAX;
133 char *cmdline = boot_command_line;
134 int ret;
135 bool fixed_base = false;
136
137 if (!IS_ENABLED(CONFIG_KEXEC_CORE))
138 return;
139
140 /* crashkernel=X[@offset] */
141 ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
142 &crash_size, &crash_base);
143 if (ret == -ENOENT) {
144 ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base);
145 if (ret || !crash_size)
146 return;
147
148 /*
149 * crashkernel=Y,low can be specified or not, but invalid value
150 * is not allowed.
151 */
152 ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
153 if (ret == -ENOENT)
154 crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
155 else if (ret)
156 return;
157
158 crash_max = CRASH_ADDR_HIGH_MAX;
159 } else if (ret || !crash_size) {
160 /* The specified value is invalid */
161 return;
162 }
163
164 crash_size = PAGE_ALIGN(crash_size);
165
166 /* User specifies base address explicitly. */
167 if (crash_base) {
168 fixed_base = true;
169 crash_max = crash_base + crash_size;
170 }
171
172 retry:
173 crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
174 crash_base, crash_max);
175 if (!crash_base) {
176 /*
177 * If the first attempt was for low memory, fall back to
178 * high memory, the minimum required low memory will be
179 * reserved later.
180 */
181 if (!fixed_base && (crash_max == CRASH_ADDR_LOW_MAX)) {
182 crash_max = CRASH_ADDR_HIGH_MAX;
183 crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
184 goto retry;
185 }
186
187 pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
188 crash_size);
189 return;
190 }
191
192 if ((crash_base > CRASH_ADDR_LOW_MAX - crash_low_size) &&
193 crash_low_size && reserve_crashkernel_low(crash_low_size)) {
194 memblock_phys_free(crash_base, crash_size);
195 return;
196 }
197
198 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
199 crash_base, crash_base + crash_size, crash_size >> 20);
200
201 /*
202 * The crashkernel memory will be removed from the kernel linear
203 * map. Inform kmemleak so that it won't try to access it.
204 */
205 kmemleak_ignore_phys(crash_base);
206 if (crashk_low_res.end)
207 kmemleak_ignore_phys(crashk_low_res.start);
208
209 crashk_res.start = crash_base;
210 crashk_res.end = crash_base + crash_size - 1;
211 insert_resource(&iomem_resource, &crashk_res);
212 }
213
214 /*
215 * Return the maximum physical address for a zone accessible by the given bits
216 * limit. If DRAM starts above 32-bit, expand the zone to the maximum
217 * available memory, otherwise cap it at 32-bit.
218 */
max_zone_phys(unsigned int zone_bits)219 static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
220 {
221 phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
222 phys_addr_t phys_start = memblock_start_of_DRAM();
223
224 if (phys_start > U32_MAX)
225 zone_mask = PHYS_ADDR_MAX;
226 else if (phys_start > zone_mask)
227 zone_mask = U32_MAX;
228
229 return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
230 }
231
zone_sizes_init(void)232 static void __init zone_sizes_init(void)
233 {
234 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
235 unsigned int __maybe_unused acpi_zone_dma_bits;
236 unsigned int __maybe_unused dt_zone_dma_bits;
237 phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
238
239 #ifdef CONFIG_ZONE_DMA
240 acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
241 dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
242 zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
243 arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
244 max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
245 #endif
246 #ifdef CONFIG_ZONE_DMA32
247 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
248 if (!arm64_dma_phys_limit)
249 arm64_dma_phys_limit = dma32_phys_limit;
250 #endif
251 max_zone_pfns[ZONE_NORMAL] = max_pfn;
252
253 free_area_init(max_zone_pfns);
254 }
255
pfn_is_map_memory(unsigned long pfn)256 int pfn_is_map_memory(unsigned long pfn)
257 {
258 phys_addr_t addr = PFN_PHYS(pfn);
259
260 /* avoid false positives for bogus PFNs, see comment in pfn_valid() */
261 if (PHYS_PFN(addr) != pfn)
262 return 0;
263
264 return memblock_is_map_memory(addr);
265 }
266 EXPORT_SYMBOL(pfn_is_map_memory);
267
268 static phys_addr_t memory_limit __ro_after_init = PHYS_ADDR_MAX;
269
270 /*
271 * Limit the memory size that was specified via FDT.
272 */
early_mem(char * p)273 static int __init early_mem(char *p)
274 {
275 if (!p)
276 return 1;
277
278 memory_limit = memparse(p, &p) & PAGE_MASK;
279 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
280
281 return 0;
282 }
283 early_param("mem", early_mem);
284
arm64_memblock_init(void)285 void __init arm64_memblock_init(void)
286 {
287 s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
288
289 /*
290 * Corner case: 52-bit VA capable systems running KVM in nVHE mode may
291 * be limited in their ability to support a linear map that exceeds 51
292 * bits of VA space, depending on the placement of the ID map. Given
293 * that the placement of the ID map may be randomized, let's simply
294 * limit the kernel's linear map to 51 bits as well if we detect this
295 * configuration.
296 */
297 if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 &&
298 is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
299 pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n");
300 linear_region_size = min_t(u64, linear_region_size, BIT(51));
301 }
302
303 /* Remove memory above our supported physical address size */
304 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
305
306 /*
307 * Select a suitable value for the base of physical memory.
308 */
309 memstart_addr = round_down(memblock_start_of_DRAM(),
310 ARM64_MEMSTART_ALIGN);
311
312 if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size)
313 pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n");
314
315 /*
316 * Remove the memory that we will not be able to cover with the
317 * linear mapping. Take care not to clip the kernel which may be
318 * high in memory.
319 */
320 memblock_remove(max_t(u64, memstart_addr + linear_region_size,
321 __pa_symbol(_end)), ULLONG_MAX);
322 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
323 /* ensure that memstart_addr remains sufficiently aligned */
324 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
325 ARM64_MEMSTART_ALIGN);
326 memblock_remove(0, memstart_addr);
327 }
328
329 /*
330 * If we are running with a 52-bit kernel VA config on a system that
331 * does not support it, we have to place the available physical
332 * memory in the 48-bit addressable part of the linear region, i.e.,
333 * we have to move it upward. Since memstart_addr represents the
334 * physical address of PAGE_OFFSET, we have to *subtract* from it.
335 */
336 if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
337 memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
338
339 /*
340 * Apply the memory limit if it was set. Since the kernel may be loaded
341 * high up in memory, add back the kernel region that must be accessible
342 * via the linear mapping.
343 */
344 if (memory_limit != PHYS_ADDR_MAX) {
345 memblock_mem_limit_remove_map(memory_limit);
346 memblock_add(__pa_symbol(_text), (u64)(_end - _text));
347 }
348
349 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
350 /*
351 * Add back the memory we just removed if it results in the
352 * initrd to become inaccessible via the linear mapping.
353 * Otherwise, this is a no-op
354 */
355 u64 base = phys_initrd_start & PAGE_MASK;
356 u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
357
358 /*
359 * We can only add back the initrd memory if we don't end up
360 * with more memory than we can address via the linear mapping.
361 * It is up to the bootloader to position the kernel and the
362 * initrd reasonably close to each other (i.e., within 32 GB of
363 * each other) so that all granule/#levels combinations can
364 * always access both.
365 */
366 if (WARN(base < memblock_start_of_DRAM() ||
367 base + size > memblock_start_of_DRAM() +
368 linear_region_size,
369 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
370 phys_initrd_size = 0;
371 } else {
372 memblock_add(base, size);
373 memblock_clear_nomap(base, size);
374 memblock_reserve(base, size);
375 }
376 }
377
378 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
379 extern u16 memstart_offset_seed;
380 u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
381 int parange = cpuid_feature_extract_unsigned_field(
382 mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
383 s64 range = linear_region_size -
384 BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
385
386 /*
387 * If the size of the linear region exceeds, by a sufficient
388 * margin, the size of the region that the physical memory can
389 * span, randomize the linear region as well.
390 */
391 if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
392 range /= ARM64_MEMSTART_ALIGN;
393 memstart_addr -= ARM64_MEMSTART_ALIGN *
394 ((range * memstart_offset_seed) >> 16);
395 }
396 }
397
398 /*
399 * Register the kernel text, kernel data, initrd, and initial
400 * pagetables with memblock.
401 */
402 memblock_reserve(__pa_symbol(_stext), _end - _stext);
403 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
404 /* the generic initrd code expects virtual addresses */
405 initrd_start = __phys_to_virt(phys_initrd_start);
406 initrd_end = initrd_start + phys_initrd_size;
407 }
408
409 early_init_fdt_scan_reserved_mem();
410
411 if (!defer_reserve_crashkernel())
412 reserve_crashkernel();
413
414 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
415 }
416
bootmem_init(void)417 void __init bootmem_init(void)
418 {
419 unsigned long min, max;
420
421 min = PFN_UP(memblock_start_of_DRAM());
422 max = PFN_DOWN(memblock_end_of_DRAM());
423
424 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
425
426 max_pfn = max_low_pfn = max;
427 min_low_pfn = min;
428
429 arch_numa_init();
430
431 /*
432 * must be done after arch_numa_init() which calls numa_init() to
433 * initialize node_online_map that gets used in hugetlb_cma_reserve()
434 * while allocating required CMA size across online nodes.
435 */
436 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
437 arm64_hugetlb_cma_reserve();
438 #endif
439
440 dma_pernuma_cma_reserve();
441
442 kvm_hyp_reserve();
443
444 /*
445 * sparse_init() tries to allocate memory from memblock, so must be
446 * done after the fixed reservations
447 */
448 sparse_init();
449 zone_sizes_init();
450
451 /*
452 * Reserve the CMA area after arm64_dma_phys_limit was initialised.
453 */
454 dma_contiguous_reserve(arm64_dma_phys_limit);
455
456 /*
457 * request_standard_resources() depends on crashkernel's memory being
458 * reserved, so do it here.
459 */
460 if (defer_reserve_crashkernel())
461 reserve_crashkernel();
462
463 memblock_dump_all();
464 }
465
466 /*
467 * mem_init() marks the free areas in the mem_map and tells us how much memory
468 * is free. This is done after various parts of the system have claimed their
469 * memory after the kernel image.
470 */
mem_init(void)471 void __init mem_init(void)
472 {
473 swiotlb_init(max_pfn > PFN_DOWN(arm64_dma_phys_limit), SWIOTLB_VERBOSE);
474
475 /* this will put all unused low memory onto the freelists */
476 memblock_free_all();
477
478 /*
479 * Check boundaries twice: Some fundamental inconsistencies can be
480 * detected at build time already.
481 */
482 #ifdef CONFIG_COMPAT
483 BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
484 #endif
485
486 /*
487 * Selected page table levels should match when derived from
488 * scratch using the virtual address range and page size.
489 */
490 BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
491 CONFIG_PGTABLE_LEVELS);
492
493 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
494 extern int sysctl_overcommit_memory;
495 /*
496 * On a machine this small we won't get anywhere without
497 * overcommit, so turn it on by default.
498 */
499 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
500 }
501 }
502
free_initmem(void)503 void free_initmem(void)
504 {
505 free_reserved_area(lm_alias(__init_begin),
506 lm_alias(__init_end),
507 POISON_FREE_INITMEM, "unused kernel");
508 /*
509 * Unmap the __init region but leave the VM area in place. This
510 * prevents the region from being reused for kernel modules, which
511 * is not supported by kallsyms.
512 */
513 vunmap_range((u64)__init_begin, (u64)__init_end);
514 }
515
dump_mem_limit(void)516 void dump_mem_limit(void)
517 {
518 if (memory_limit != PHYS_ADDR_MAX) {
519 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
520 } else {
521 pr_emerg("Memory Limit: none\n");
522 }
523 }
524