Lines Matching refs:page

48 #define BACKING_PAGE(page) (is_device_private_page((page)) ? \  argument
49 (page)->zone_device_data : (page))
121 struct page *free_pages;
196 static struct dmirror_chunk *dmirror_page_to_chunk(struct page *page) in dmirror_page_to_chunk() argument
198 return container_of(page_pgmap(page), struct dmirror_chunk, in dmirror_page_to_chunk()
202 static struct dmirror_device *dmirror_page_to_device(struct page *page) in dmirror_page_to_device() argument
205 return dmirror_page_to_chunk(page)->mdevice; in dmirror_page_to_device()
216 struct page *page; in dmirror_do_fault() local
226 page = hmm_pfn_to_page(*pfns); in dmirror_do_fault()
227 WARN_ON(!page); in dmirror_do_fault()
229 entry = page; in dmirror_do_fault()
371 struct page *page; in dmirror_do_read() local
374 page = xa_untag_pointer(entry); in dmirror_do_read()
375 if (!page) in dmirror_do_read()
378 memcpy_from_page(ptr, page, 0, PAGE_SIZE); in dmirror_do_read()
437 struct page *page; in dmirror_do_write() local
440 page = xa_untag_pointer(entry); in dmirror_do_write()
441 if (!page || xa_pointer_tag(entry) != DPT_XA_TAG_WRITE) in dmirror_do_write()
444 memcpy_to_page(page, 0, ptr, PAGE_SIZE); in dmirror_do_write()
495 struct page **ppage) in dmirror_allocate_chunk()
576 struct page *page = pfn_to_page(pfn); in dmirror_allocate_chunk() local
578 page->zone_device_data = mdevice->free_pages; in dmirror_allocate_chunk()
579 mdevice->free_pages = page; in dmirror_allocate_chunk()
601 static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) in dmirror_devmem_alloc_page()
603 struct page *dpage = NULL; in dmirror_devmem_alloc_page()
604 struct page *rpage = NULL; in dmirror_devmem_alloc_page()
650 struct page *spage; in dmirror_migrate_alloc_and_copy()
651 struct page *dpage; in dmirror_migrate_alloc_and_copy()
652 struct page *rpage; in dmirror_migrate_alloc_and_copy()
710 static int dmirror_atomic_map(unsigned long addr, struct page *page, in dmirror_atomic_map() argument
718 entry = xa_tag_pointer(page, DPT_XA_TAG_ATOMIC); in dmirror_atomic_map()
743 struct page *dpage; in dmirror_migrate_finalize_and_map()
788 struct page *page; in dmirror_exclusive() local
790 page = make_device_exclusive(mm, addr, NULL, &folio); in dmirror_exclusive()
791 if (IS_ERR(page)) { in dmirror_exclusive()
792 ret = PTR_ERR(page); in dmirror_exclusive()
796 ret = dmirror_atomic_map(addr, page, dmirror); in dmirror_exclusive()
835 struct page *dpage, *spage; in dmirror_devmem_fault_alloc_and_copy()
1017 struct page *page; in dmirror_mkentry() local
1028 page = hmm_pfn_to_page(entry); in dmirror_mkentry()
1029 if (is_device_private_page(page)) { in dmirror_mkentry()
1031 if (dmirror->mdevice == dmirror_page_to_device(page)) in dmirror_mkentry()
1035 } else if (is_device_coherent_page(page)) { in dmirror_mkentry()
1037 if (dmirror->mdevice == dmirror_page_to_device(page)) in dmirror_mkentry()
1041 } else if (is_zero_pfn(page_to_pfn(page))) in dmirror_mkentry()
1209 struct page *dpage, *spage; in dmirror_device_evict_chunk()
1236 struct page *page; in dmirror_remove_free_pages() local
1238 for (page = mdevice->free_pages; page; page = page->zone_device_data) in dmirror_remove_free_pages()
1239 if (dmirror_page_to_chunk(page) == devmem) in dmirror_remove_free_pages()
1240 mdevice->free_pages = page->zone_device_data; in dmirror_remove_free_pages()
1350 struct page *page; in dmirror_fops_mmap() local
1353 page = alloc_page(GFP_KERNEL | __GFP_ZERO); in dmirror_fops_mmap()
1354 if (!page) in dmirror_fops_mmap()
1357 ret = vm_insert_page(vma, addr, page); in dmirror_fops_mmap()
1359 __free_page(page); in dmirror_fops_mmap()
1362 put_page(page); in dmirror_fops_mmap()
1377 static void dmirror_devmem_free(struct page *page) in dmirror_devmem_free() argument
1379 struct page *rpage = BACKING_PAGE(page); in dmirror_devmem_free()
1382 if (rpage != page) in dmirror_devmem_free()
1385 mdevice = dmirror_page_to_device(page); in dmirror_devmem_free()
1389 if (!dmirror_page_to_chunk(page)->remove) { in dmirror_devmem_free()
1391 page->zone_device_data = mdevice->free_pages; in dmirror_devmem_free()
1392 mdevice->free_pages = page; in dmirror_devmem_free()
1402 struct page *rpage; in dmirror_devmem_fault()
1411 rpage = vmf->page->zone_device_data; in dmirror_devmem_fault()
1422 args.fault_page = vmf->page; in dmirror_devmem_fault()