/linux-6.3-rc2/include/trace/events/ |
A D | fs_dax.h | 65 struct page *zero_page, 67 TP_ARGS(inode, vmf, zero_page, radix_entry), 72 __field(struct page *, zero_page) 81 __entry->zero_page = zero_page; 91 __entry->zero_page, 99 struct page *zero_page, void *radix_entry), \ 100 TP_ARGS(inode, vmf, zero_page, radix_entry))
|
/linux-6.3-rc2/arch/arm/mm/ |
A D | nommu.c | 158 void *zero_page; in paging_init() local 164 zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE); in paging_init() 165 if (!zero_page) in paging_init() 171 empty_zero_page = virt_to_page(zero_page); in paging_init()
|
A D | mmu.c | 1763 void *zero_page; in paging_init() local 1786 zero_page = early_alloc(PAGE_SIZE); in paging_init() 1790 empty_zero_page = virt_to_page(zero_page); in paging_init()
|
/linux-6.3-rc2/arch/arm64/kernel/ |
A D | hibernate.c | 403 void *zero_page; in swsusp_arch_resume() local 428 zero_page = (void *)get_safe_page(GFP_ATOMIC); in swsusp_arch_resume() 429 if (!zero_page) { in swsusp_arch_resume() 465 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); in swsusp_arch_resume()
|
A D | machine_kexec.c | 152 kimage->arch.zero_page = __pa_symbol(empty_zero_page); in machine_kexec_post_load()
|
A D | asm-offsets.c | 191 DEFINE(KIMAGE_ARCH_ZERO_PAGE, offsetof(struct kimage, arch.zero_page)); in main()
|
/linux-6.3-rc2/mm/ |
A D | huge_memory.c | 156 struct page *zero_page; in get_huge_zero_page() local 163 if (!zero_page) { in get_huge_zero_page() 168 if (cmpxchg(&huge_zero_page, NULL, zero_page)) { in get_huge_zero_page() 170 __free_pages(zero_page, compound_order(zero_page)); in get_huge_zero_page() 223 BUG_ON(zero_page == NULL); in shrink_huge_zero_page_scan() 225 __free_pages(zero_page, compound_order(zero_page)); in shrink_huge_zero_page_scan() 770 struct page *zero_page) in set_huge_zero_page() argument 775 entry = mk_pmd(zero_page, vma->vm_page_prot); in set_huge_zero_page() 799 struct page *zero_page; in do_huge_pmd_anonymous_page() local 805 if (unlikely(!zero_page)) { in do_huge_pmd_anonymous_page() [all …]
|
/linux-6.3-rc2/arch/arm64/include/asm/ |
A D | kexec.h | 122 phys_addr_t zero_page; member
|
A D | assembler.h | 487 .macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2 488 phys_to_ttbr \tmp, \zero_page
|
/linux-6.3-rc2/drivers/dma/ |
A D | bcm2835-dma.c | 50 dma_addr_t zero_page; member 750 if (buf_addr == od->zero_page && !c->is_lite_channel) in bcm2835_dma_prep_dma_cyclic() 852 dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE, in bcm2835_dma_free() 935 od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0, in bcm2835_dma_probe() 938 if (dma_mapping_error(od->ddev.dev, od->zero_page)) { in bcm2835_dma_probe()
|
/linux-6.3-rc2/fs/ |
A D | dax.c | 1197 struct page *zero_page; in dax_pmd_load_hole() local 1202 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); in dax_pmd_load_hole() 1204 if (unlikely(!zero_page)) in dax_pmd_load_hole() 1207 pfn = page_to_pfn_t(zero_page); in dax_pmd_load_hole() 1227 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); in dax_pmd_load_hole() 1231 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); in dax_pmd_load_hole() 1237 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
|
/linux-6.3-rc2/include/target/ |
A D | target_core_fabric.h | 215 u32 length, bool zero_page, bool chainable);
|
/linux-6.3-rc2/drivers/nvdimm/ |
A D | pfn_devs.c | 368 void *zero_page = page_address(ZERO_PAGE(0)); in nd_pfn_clear_memmap_errors() local 406 rc = nvdimm_write_bytes(ndns, nsoff, zero_page, in nd_pfn_clear_memmap_errors()
|
A D | btt.c | 510 void *zero_page = page_address(ZERO_PAGE(0)); in arena_clear_freelist_error() local 520 ret = arena_write_bytes(arena, nsoff, zero_page, in arena_clear_freelist_error()
|
/linux-6.3-rc2/drivers/target/ |
A D | target_core_transport.c | 2729 bool zero_page, bool chainable) in target_alloc_sgl() argument 2731 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); in target_alloc_sgl()
|
/linux-6.3-rc2/virt/kvm/ |
A D | kvm_main.c | 3289 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); in kvm_clear_guest() local 3296 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); in kvm_clear_guest()
|
/linux-6.3-rc2/arch/x86/kvm/vmx/ |
A D | vmx.c | 3826 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); in init_rmode_tss() local 3831 if (__copy_to_user(ua + PAGE_SIZE * i, zero_page, PAGE_SIZE)) in init_rmode_tss()
|