Lines Matching refs:kernel_map

42 struct kernel_mapping kernel_map __ro_after_init;
43 EXPORT_SYMBOL(kernel_map);
45 #define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map)) macro
158 print_ml("kernel", (unsigned long)kernel_map.virt_addr, in print_vm_layout()
251 kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; in setup_bootmem()
462 BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT); in alloc_pmd_early()
526 BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); in alloc_pud_early()
564 BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); in alloc_p4d_early()
768 kernel_map.page_offset = PAGE_OFFSET_L4; in disable_pgtable_l5()
775 kernel_map.page_offset = PAGE_OFFSET_L3; in disable_pgtable_l4()
890 uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR; in relocate_kernel()
895 uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr; in relocate_kernel()
925 end_va = kernel_map.virt_addr + kernel_map.xiprom_sz; in create_kernel_page_table()
926 for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE) in create_kernel_page_table()
928 kernel_map.xiprom + (va - kernel_map.virt_addr), in create_kernel_page_table()
932 start_va = kernel_map.virt_addr + (uintptr_t)&_sdata - (uintptr_t)&_start; in create_kernel_page_table()
933 end_va = kernel_map.virt_addr + kernel_map.size; in create_kernel_page_table()
936 kernel_map.phys_addr + (va - start_va), in create_kernel_page_table()
944 end_va = kernel_map.virt_addr + kernel_map.size; in create_kernel_page_table()
945 for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE) in create_kernel_page_table()
947 kernel_map.phys_addr + (va - kernel_map.virt_addr), in create_kernel_page_table()
1065 return kernel_map.virt_offset; in kaslr_offset()
1088 kernel_map.virt_offset = (kaslr_seed % nr_pos) * PMD_SIZE; in setup_vm()
1092 kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset; in setup_vm()
1096 kernel_map.page_offset = PAGE_OFFSET_L3; in setup_vm()
1098 kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL); in setup_vm()
1100 kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; in setup_vm()
1101 kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); in setup_vm()
1104 kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; in setup_vm()
1105 kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start); in setup_vm()
1107 kernel_map.va_kernel_xip_text_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; in setup_vm()
1108 kernel_map.va_kernel_xip_data_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr in setup_vm()
1111 kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL); in setup_vm()
1112 kernel_map.phys_addr = (uintptr_t)(&_start); in setup_vm()
1113 kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; in setup_vm()
1114 kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; in setup_vm()
1134 kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ? in setup_vm()
1135 0UL : PAGE_OFFSET - kernel_map.phys_addr; in setup_vm()
1141 BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0); in setup_vm()
1148 BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); in setup_vm()
1158 BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size); in setup_vm()
1181 create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, in setup_vm()
1184 create_p4d_mapping(trampoline_p4d, kernel_map.virt_addr, in setup_vm()
1187 create_pud_mapping(trampoline_pud, kernel_map.virt_addr, in setup_vm()
1190 create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr, in setup_vm()
1191 kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC); in setup_vm()
1193 create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr, in setup_vm()
1194 kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC); in setup_vm()
1198 create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, in setup_vm()
1199 kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC); in setup_vm()