/linux-6.3-rc2/include/asm-generic/ |
A D | pgtable-nop4d.h | 9 typedef struct { pgd_t pgd; } p4d_t; member 21 static inline int pgd_none(pgd_t pgd) { return 0; } in pgd_none() argument 22 static inline int pgd_bad(pgd_t pgd) { return 0; } in pgd_bad() argument 23 static inline int pgd_present(pgd_t pgd) { return 1; } in pgd_present() argument 24 static inline void pgd_clear(pgd_t *pgd) { } in pgd_clear() argument 25 #define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd)) 27 #define pgd_populate(mm, pgd, p4d) do { } while (0) argument 37 return (p4d_t *)pgd; in p4d_offset() 40 #define p4d_val(x) (pgd_val((x).pgd)) 43 #define pgd_page(pgd) (p4d_page((p4d_t){ pgd })) argument [all …]
|
/linux-6.3-rc2/arch/x86/mm/ |
A D | kasan_init_64.c | 130 if (pgd_none(*pgd)) { in kasan_populate_pgd() 145 pgd_t *pgd; in kasan_populate_shadow() local 150 pgd = pgd_offset_k(addr); in kasan_populate_shadow() 171 pgd_t *pgd; in clear_pgds() local 182 pgd_clear(pgd); in clear_pgds() 197 return (p4d_t *)pgd; in early_p4d_offset() 212 if (pgd_none(*pgd)) { in kasan_early_p4d_populate() 215 set_pgd(pgd, pgd_entry); in kasan_early_p4d_populate() 238 pgd += pgd_index(addr); in kasan_map_early_shadow() 267 pgd_t *pgd; in kasan_shallow_populate_pgds() local [all …]
|
A D | pgtable.c | 139 pgd_list_add(pgd); in pgd_ctor() 149 pgd_list_del(pgd); in pgd_dtor() 264 pgd_t pgd = *pgdp; in mop_up_one_pmd() local 423 pgd_t *pgd; in pgd_alloc() local 429 if (pgd == NULL) in pgd_alloc() 432 mm->pgd = pgd; in pgd_alloc() 452 pgd_ctor(mm, pgd); in pgd_alloc() 461 return pgd; in pgd_alloc() 470 _pgd_free(pgd); in pgd_alloc() 478 pgd_dtor(pgd); in pgd_free() [all …]
|
A D | pti.c | 136 return pgd; in __pti_set_user_pgtbl() 142 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd; in __pti_set_user_pgtbl() 157 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) && in __pti_set_user_pgtbl() 159 pgd.pgd |= _PAGE_NX; in __pti_set_user_pgtbl() 162 return pgd; in __pti_set_user_pgtbl() 181 if (pgd_none(*pgd)) { in pti_user_pagetable_walk_p4d() 190 return p4d_offset(pgd, address); in pti_user_pagetable_walk_p4d() 315 pgd_t *pgd; in pti_clone_pgtable() local 323 pgd = pgd_offset_k(addr); in pti_clone_pgtable() 324 if (WARN_ON(pgd_none(*pgd))) in pti_clone_pgtable() [all …]
|
A D | init_64.c | 150 pgd_t *pgd; in sync_global_pgds_l5() local 161 if (pgd_none(*pgd)) in sync_global_pgds_l5() 191 pgd_t *pgd; in sync_global_pgds_l4() local 251 if (pgd_none(*pgd)) { in fill_p4d() 327 pgd_t *pgd; in set_pte_vaddr() local 333 if (pgd_none(*pgd)) { in set_pte_vaddr() 345 pgd_t *pgd; in populate_extra_pmd() local 369 pgd_t *pgd; in __init_extra_mapping() local 380 if (pgd_none(*pgd)) { in __init_extra_mapping() 1224 pgd_t *pgd; in remove_pagetable() local [all …]
|
/linux-6.3-rc2/arch/riscv/include/asm/ |
A D | pgtable-64.h | 316 *pgdp = pgd; in set_pgd() 321 static inline int pgd_none(pgd_t pgd) in pgd_none() argument 324 return (pgd_val(pgd) == 0); in pgd_none() 329 static inline int pgd_present(pgd_t pgd) in pgd_present() argument 332 return (pgd_val(pgd) & _PAGE_PRESENT); in pgd_present() 337 static inline int pgd_bad(pgd_t pgd) in pgd_bad() argument 340 return !pgd_present(pgd); in pgd_bad() 348 set_pgd(pgd, __pgd(0)); in pgd_clear() 358 #define pgd_page_vaddr(pgd) ((unsigned long)pgd_pgtable(pgd)) argument 364 #define pgd_page(pgd) pgd_page(pgd) argument [all …]
|
A D | pgalloc.h | 67 set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in pgd_populate() 77 set_pgd_safe(pgd, in pgd_populate_safe() 130 static inline void sync_kernel_mappings(pgd_t *pgd) in sync_kernel_mappings() argument 132 memcpy(pgd + USER_PTRS_PER_PGD, in sync_kernel_mappings() 133 init_mm.pgd + USER_PTRS_PER_PGD, in sync_kernel_mappings() 139 pgd_t *pgd; in pgd_alloc() local 141 pgd = (pgd_t *)__get_free_page(GFP_KERNEL); in pgd_alloc() 142 if (likely(pgd != NULL)) { in pgd_alloc() 143 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); in pgd_alloc() 145 sync_kernel_mappings(pgd); in pgd_alloc() [all …]
|
/linux-6.3-rc2/arch/powerpc/include/asm/book3s/64/ |
A D | pgalloc.h | 42 free_page((unsigned long)pgd); in radix__pgd_free() 44 free_pages((unsigned long)pgd, 4); in radix__pgd_free() 50 pgd_t *pgd; in pgd_alloc() local 55 pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), in pgd_alloc() 57 if (unlikely(!pgd)) in pgd_alloc() 58 return pgd; in pgd_alloc() 65 kmemleak_no_scan(pgd); in pgd_alloc() 76 memset(pgd, 0, PGD_TABLE_SIZE); in pgd_alloc() 78 return pgd; in pgd_alloc() 84 return radix__pgd_free(mm, pgd); in pgd_free() [all …]
|
/linux-6.3-rc2/arch/arm/mm/ |
A D | pgd.c | 21 #define __pgd_free(pgd) kfree(pgd) argument 24 #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) argument 144 pgd_t *pgd; in pgd_free() local 153 pgd = pgd_base + pgd_index(0); in pgd_free() 154 if (pgd_none_or_clear_bad(pgd)) in pgd_free() 157 p4d = p4d_offset(pgd, 0); in pgd_free() 181 pgd_clear(pgd); in pgd_free() 188 for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { in pgd_free() 189 if (pgd_none_or_clear_bad(pgd)) in pgd_free() 193 p4d = p4d_offset(pgd, 0); in pgd_free() [all …]
|
/linux-6.3-rc2/mm/kasan/ |
A D | init.c | 250 pgd_populate(&init_mm, pgd, in kasan_populate_early_shadow() 252 p4d = p4d_offset(pgd, addr); in kasan_populate_early_shadow() 264 if (pgd_none(*pgd)) { in kasan_populate_early_shadow() 339 pgd_clear(pgd); in kasan_free_p4d() 443 pgd_t *pgd; in kasan_remove_zero_shadow() local 457 pgd = pgd_offset_k(addr); in kasan_remove_zero_shadow() 458 if (!pgd_present(*pgd)) in kasan_remove_zero_shadow() 461 if (kasan_p4d_table(*pgd)) { in kasan_remove_zero_shadow() 464 pgd_clear(pgd); in kasan_remove_zero_shadow() 469 p4d = p4d_offset(pgd, addr); in kasan_remove_zero_shadow() [all …]
|
/linux-6.3-rc2/arch/x86/power/ |
A D | hibernate_32.c | 30 static pmd_t *resume_one_md_table_init(pgd_t *pgd) in resume_one_md_table_init() argument 41 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); in resume_one_md_table_init() 42 p4d = p4d_offset(pgd, 0); in resume_one_md_table_init() 47 p4d = p4d_offset(pgd, 0); in resume_one_md_table_init() 84 pgd_t *pgd; in resume_physical_mapping_init() local 90 pgd = pgd_base + pgd_idx; in resume_physical_mapping_init() 93 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { in resume_physical_mapping_init() 94 pmd = resume_one_md_table_init(pgd); in resume_physical_mapping_init() 147 pgd_t *pgd; in set_up_temporary_text_mapping() local 151 pgd = pgd_base + pgd_index(restore_jump_address); in set_up_temporary_text_mapping() [all …]
|
A D | hibernate_64.c | 28 static int set_up_temporary_text_mapping(pgd_t *pgd) in set_up_temporary_text_mapping() argument 77 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); in set_up_temporary_text_mapping() 81 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); in set_up_temporary_text_mapping() 100 pgd_t *pgd; in set_up_temporary_mappings() local 104 pgd = (pgd_t *)get_safe_page(GFP_ATOMIC); in set_up_temporary_mappings() 105 if (!pgd) in set_up_temporary_mappings() 109 result = set_up_temporary_text_mapping(pgd); in set_up_temporary_mappings() 118 result = kernel_ident_mapping_init(&info, pgd, mstart, mend); in set_up_temporary_mappings() 123 temp_pgt = __pa(pgd); in set_up_temporary_mappings()
|
/linux-6.3-rc2/arch/sh/mm/ |
A D | hugetlbpage.c | 27 pgd_t *pgd; in huge_pte_alloc() local 33 pgd = pgd_offset(mm, addr); in huge_pte_alloc() 34 if (pgd) { in huge_pte_alloc() 35 p4d = p4d_alloc(mm, pgd, addr); in huge_pte_alloc() 52 pgd_t *pgd; in huge_pte_offset() local 58 pgd = pgd_offset(mm, addr); in huge_pte_offset() 59 if (pgd) { in huge_pte_offset() 60 p4d = p4d_offset(pgd, addr); in huge_pte_offset()
|
A D | fault.c | 39 pgd_t *pgd; in show_pte() local 42 pgd = mm->pgd; in show_pte() 44 pgd = get_TTB(); in show_pte() 46 if (unlikely(!pgd)) in show_pte() 47 pgd = swapper_pg_dir; in show_pte() 50 pr_alert("pgd = %p\n", pgd); in show_pte() 51 pgd += pgd_index(addr); in show_pte() 53 (u64)pgd_val(*pgd)); in show_pte() 61 if (pgd_none(*pgd)) in show_pte() 64 if (pgd_bad(*pgd)) { in show_pte() [all …]
|
A D | pgtable.c | 12 pgd_t *pgd = x; in pgd_ctor() local 14 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); in pgd_ctor() 15 memcpy(pgd + USER_PTRS_PER_PGD, in pgd_ctor() 37 void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument 39 kmem_cache_free(pgd_cachep, pgd); in pgd_free()
|
/linux-6.3-rc2/arch/mips/mm/ |
A D | hugetlbpage.c | 27 pgd_t *pgd; in huge_pte_alloc() local 32 pgd = pgd_offset(mm, addr); in huge_pte_alloc() 33 p4d = p4d_alloc(mm, pgd, addr); in huge_pte_alloc() 44 pgd_t *pgd; in huge_pte_offset() local 49 pgd = pgd_offset(mm, addr); in huge_pte_offset() 50 if (pgd_present(*pgd)) { in huge_pte_offset() 51 p4d = p4d_offset(pgd, addr); in huge_pte_offset()
|
/linux-6.3-rc2/arch/x86/kernel/ |
A D | machine_kexec_32.c | 45 free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER); in machine_kexec_free_page_tables() 46 image->arch.pgd = NULL; in machine_kexec_free_page_tables() 69 if (!image->arch.pgd || in machine_kexec_alloc_page_tables() 80 pgd_t *pgd, pmd_t *pmd, pte_t *pte, in machine_kexec_page_table_set_one() argument 86 pgd += pgd_index(vaddr); in machine_kexec_page_table_set_one() 88 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) in machine_kexec_page_table_set_one() 89 set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT)); in machine_kexec_page_table_set_one() 91 p4d = p4d_offset(pgd, vaddr); in machine_kexec_page_table_set_one() 110 image->arch.pgd, pmd, image->arch.pte0, in machine_kexec_prepare_page_tables() 116 image->arch.pgd, pmd, image->arch.pte1, in machine_kexec_prepare_page_tables() [all …]
|
/linux-6.3-rc2/arch/parisc/include/asm/ |
A D | pgalloc.h | 21 pgd_t *pgd; in pgd_alloc() local 23 pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER); in pgd_alloc() 24 if (unlikely(pgd == NULL)) in pgd_alloc() 27 memset(pgd, 0, PAGE_SIZE << PGD_TABLE_ORDER); in pgd_alloc() 29 return pgd; in pgd_alloc() 32 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument 34 free_pages((unsigned long)pgd, PGD_TABLE_ORDER); in pgd_free()
|
/linux-6.3-rc2/arch/hexagon/include/asm/ |
A D | pgalloc.h | 23 pgd_t *pgd; in pgd_alloc() local 25 pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); in pgd_alloc() 35 memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t)); in pgd_alloc() 39 mm->context.ptbase = __pa(pgd); in pgd_alloc() 41 return pgd; in pgd_alloc() 83 pmdindex = (pgd_t *)pmd - mm->pgd; in pmd_populate_kernel() 84 ppmd = (pmd_t *)current->active_mm->pgd + pmdindex; in pmd_populate_kernel()
|
/linux-6.3-rc2/arch/loongarch/mm/ |
A D | hugetlbpage.c | 19 pgd_t *pgd; in huge_pte_alloc() local 24 pgd = pgd_offset(mm, addr); in huge_pte_alloc() 25 p4d = p4d_alloc(mm, pgd, addr); in huge_pte_alloc() 36 pgd_t *pgd; in huge_pte_offset() local 41 pgd = pgd_offset(mm, addr); in huge_pte_offset() 42 if (pgd_present(*pgd)) { in huge_pte_offset() 43 p4d = p4d_offset(pgd, addr); in huge_pte_offset()
|
/linux-6.3-rc2/arch/s390/mm/ |
A D | page-states.c | 131 static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end) in mark_kernel_p4d() argument 138 p4d = p4d_offset(pgd, addr); in mark_kernel_p4d() 156 pgd_t *pgd; in mark_kernel_pgd() local 160 pgd = pgd_offset_k(addr); in mark_kernel_pgd() 163 if (pgd_none(*pgd)) in mark_kernel_pgd() 165 if (!pgd_folded(*pgd)) { in mark_kernel_pgd() 166 page = phys_to_page(pgd_val(*pgd)); in mark_kernel_pgd() 170 mark_kernel_p4d(pgd, addr, next); in mark_kernel_pgd() 171 } while (pgd++, addr = next, addr != MODULES_END); in mark_kernel_pgd()
|
/linux-6.3-rc2/arch/um/kernel/ |
A D | mem.c | 114 pgd_t *pgd; in fixrange_init() local 124 pgd = pgd_base + i; in fixrange_init() 126 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { in fixrange_init() 127 p4d = p4d_offset(pgd, vaddr); in fixrange_init() 204 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); in pgd_alloc() local 206 if (pgd) { in pgd_alloc() 207 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); in pgd_alloc() 208 memcpy(pgd + USER_PTRS_PER_PGD, in pgd_alloc() 212 return pgd; in pgd_alloc()
|
/linux-6.3-rc2/arch/x86/include/asm/ |
A D | pgtable_64.h | 144 pgd_t pgd; in native_set_p4d() local 151 pgd = native_make_pgd(native_p4d_val(p4d)); in native_set_p4d() 152 pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd); in native_set_p4d() 153 WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd))); in native_set_p4d() 161 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) in native_set_pgd() argument 163 WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd)); in native_set_pgd() 166 static inline void native_pgd_clear(pgd_t *pgd) in native_pgd_clear() argument 168 native_set_pgd(pgd, native_make_pgd(0)); in native_pgd_clear()
|
/linux-6.3-rc2/arch/x86/platform/efi/ |
A D | efi_64.c | 70 pgd_t *pgd, *efi_pgd; in efi_alloc_page_tables() local 80 pgd = efi_pgd + pgd_index(EFI_VA_END); in efi_alloc_page_tables() 81 p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END); in efi_alloc_page_tables() 89 efi_mm.pgd = efi_pgd; in efi_alloc_page_tables() 97 free_page((unsigned long)pgd_page_vaddr(*pgd)); in efi_alloc_page_tables() 113 pgd_t *efi_pgd = efi_mm.pgd; in efi_sync_low_kernel_mappings() 183 pgd_t *pgd = efi_mm.pgd; in efi_setup_page_tables() local 218 if (sev_es_efi_map_ghcbs(pgd)) { in efi_setup_page_tables() 243 if (kernel_unmap_pages_in_pgd(pgd, text, npages)) { in efi_setup_page_tables() 274 pgd_t *pgd = efi_mm.pgd; in __map_region() local [all …]
|
/linux-6.3-rc2/arch/x86/xen/ |
A D | mmu_pv.c | 368 return pte_mfn_to_pfn(pgd.pgd); in xen_pgd_val() 382 pgd = pte_pfn_to_mfn(pgd); in xen_make_pgd() 627 if (pgd_none(pgd[i])) in __xen_pgd_walk() 1120 pgd_t *pgd; in xen_cleanmfnmap() local 1126 pgd = pgd_offset_k(vaddr); in xen_cleanmfnmap() 1127 p4d = p4d_offset(pgd, 0); in xen_cleanmfnmap() 1395 pgd_t *pgd = mm->pgd; in xen_pgd_alloc() local 1732 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); in xen_setup_kernel_pagetable() 1840 pgd_t pgd; in xen_early_virt_to_phys() local 1848 if (!pgd_present(pgd)) in xen_early_virt_to_phys() [all …]
|