Searched refs:pgd_page (Results 1 – 9 of 9) sorted by relevance
21 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
1132 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) macro
436 struct page *pgd_page; in kvm_riscv_mmu_alloc_pgd() local443 pgd_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, in kvm_riscv_mmu_alloc_pgd()445 if (!pgd_page) in kvm_riscv_mmu_alloc_pgd()447 kvm->arch.pgd = page_to_virt(pgd_page); in kvm_riscv_mmu_alloc_pgd()448 kvm->arch.pgd_phys = page_to_phys(pgd_page); in kvm_riscv_mmu_alloc_pgd()
388 static inline struct page *pgd_page(pgd_t pgd) in pgd_page() function392 #define pgd_page(pgd) pgd_page(pgd) macro
178 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, in kernel_ident_mapping_init() argument194 pgd_t *pgd = pgd_page + pgd_index(addr); in kernel_ident_mapping_init()
1592 get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO); in register_page_bootmem_memmap()
508 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); in xen_get_user_pgd() local509 unsigned offset = pgd - pgd_page; in xen_get_user_pgd()513 struct page *page = virt_to_page(pgd_page); in xen_get_user_pgd()
1580 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) macro
1204 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd))) macro
Completed in 47 milliseconds