/linux-6.3-rc2/fs/verity/ |
A D | verify.c | 68 return PageChecked(hpage); in is_hash_block_verified() 85 if (PageChecked(hpage)) { in is_hash_block_verified() 94 if (PageChecked(hpage)) { in is_hash_block_verified() 106 SetPageChecked(hpage); in is_hash_block_verified() 177 struct page *hpage; in verify_data_block() local 203 if (IS_ERR(hpage)) { in verify_data_block() 204 err = PTR_ERR(hpage); in verify_data_block() 213 put_page(hpage); in verify_data_block() 216 hblocks[level].page = hpage; in verify_data_block() 248 SetPageChecked(hpage); in verify_data_block() [all …]
|
/linux-6.3-rc2/mm/ |
A D | khugepaged.c | 992 struct page *hpage; in collapse_huge_page() local 1125 hpage = NULL; in collapse_huge_page() 1131 if (hpage) { in collapse_huge_page() 1133 put_page(hpage); in collapse_huge_page() 1391 get_page(hpage); in set_huge_pmd() 1488 if (!hpage) in collapse_pte_mapped_thp() 1599 put_page(hpage); in collapse_pte_mapped_thp() 2087 hpage = NULL; in collapse_file() 2129 if (hpage) in collapse_file() 2133 if (hpage) { in collapse_file() [all …]
|
A D | hwpoison-inject.c | 18 struct page *hpage; in hwpoison_inject() local 28 hpage = compound_head(p); in hwpoison_inject() 33 shake_page(hpage); in hwpoison_inject() 37 if (!PageLRU(hpage) && !PageHuge(p) && !is_free_buddy_page(p)) in hwpoison_inject() 45 err = hwpoison_filter(hpage); in hwpoison_inject()
|
A D | memory-failure.c | 1134 if (!PageHuge(hpage)) in me_huge_page() 1142 unlock_page(hpage); in me_huge_page() 1144 unlock_page(hpage); in me_huge_page() 1150 put_page(hpage); in me_huge_page() 1509 if (!page_mapped(hpage)) in hwpoison_user_mappings() 1532 SetPageDirty(hpage); in hwpoison_user_mappings() 1547 if (PageHuge(hpage) && !PageAnon(hpage)) { in hwpoison_user_mappings() 1575 shake_page(hpage); in hwpoison_user_mappings() 2081 struct page *hpage; in memory_failure() local 2130 hpage = compound_head(p); in memory_failure() [all …]
|
A D | migrate.c | 1377 struct page *hpage, int force, in unmap_and_move_huge_page() argument 1381 struct folio *dst, *src = page_folio(hpage); in unmap_and_move_huge_page() 1394 new_hpage = get_new_page(hpage, private); in unmap_and_move_huge_page() 1438 mapping = hugetlb_page_mapping_lock_write(hpage); in unmap_and_move_huge_page()
|
A D | hugetlb.c | 2074 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) in hugetlb_page_mapping_lock_write() argument 2076 struct address_space *mapping = page_mapping(hpage); in hugetlb_page_mapping_lock_write()
|
/linux-6.3-rc2/drivers/dma-buf/ |
A D | udmabuf.c | 210 struct page *page, *hpage = NULL; in udmabuf_create() local 268 if (!hpage) { in udmabuf_create() 269 hpage = find_get_page_flags(mapping, pgoff, in udmabuf_create() 271 if (!hpage) { in udmabuf_create() 276 page = hpage + subpgoff; in udmabuf_create() 280 put_page(hpage); in udmabuf_create() 281 hpage = NULL; in udmabuf_create() 297 if (hpage) { in udmabuf_create() 298 put_page(hpage); in udmabuf_create() 299 hpage = NULL; in udmabuf_create()
|
/linux-6.3-rc2/include/trace/events/ |
A D | huge_memory.h | 207 TP_PROTO(struct mm_struct *mm, struct page *hpage, pgoff_t index, 210 TP_ARGS(mm, hpage, index, addr, is_shmem, file, nr, result), 224 __entry->hpfn = hpage ? page_to_pfn(hpage) : -1;
|
/linux-6.3-rc2/include/linux/ |
A D | hugetlb.h | 188 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); 275 struct page *hpage) in hugetlb_page_mapping_lock_write() argument 757 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) in hugetlb_page_subpool() argument 759 return hugetlb_folio_subpool(page_folio(hpage)); in hugetlb_page_subpool() 768 static inline void hugetlb_set_page_subpool(struct page *hpage, in hugetlb_set_page_subpool() argument 771 hugetlb_set_folio_subpool(page_folio(hpage), subpool); in hugetlb_set_page_subpool() 1028 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
|
/linux-6.3-rc2/io_uring/ |
A D | rsrc.c | 1081 int nr_pages, struct page *hpage) in headpage_already_acct() argument 1089 if (compound_head(pages[i]) == hpage) in headpage_already_acct() 1100 if (compound_head(imu->bvec[j].bv_page) == hpage) in headpage_already_acct() 1119 struct page *hpage; in io_buffer_account_pin() local 1121 hpage = compound_head(pages[i]); in io_buffer_account_pin() 1122 if (hpage == *last_hpage) in io_buffer_account_pin() 1124 *last_hpage = hpage; in io_buffer_account_pin() 1125 if (headpage_already_acct(ctx, pages, i, hpage)) in io_buffer_account_pin() 1127 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT; in io_buffer_account_pin()
|
/linux-6.3-rc2/arch/powerpc/kvm/ |
A D | book3s_pr.c | 647 struct page *hpage; in kvmppc_patch_dcbz() local 652 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); in kvmppc_patch_dcbz() 653 if (is_error_page(hpage)) in kvmppc_patch_dcbz() 660 get_page(hpage); in kvmppc_patch_dcbz() 661 page = kmap_atomic(hpage); in kvmppc_patch_dcbz() 669 put_page(hpage); in kvmppc_patch_dcbz()
|
/linux-6.3-rc2/Documentation/virt/kvm/x86/ |
A D | running-nested-guests.rst | 171 .. note:: On s390x, the kernel parameter ``hpage`` is mutually exclusive 173 ``nested``, the ``hpage`` parameter *must* be disabled.
|
/linux-6.3-rc2/arch/s390/kvm/ |
A D | kvm-s390.c | 193 static int hpage; variable 194 module_param(hpage, int, 0444); 195 MODULE_PARM_DESC(hpage, "1m huge page backing support"); 583 if (hpage && !kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_check_extension() 837 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap() 5763 if (nested && hpage) { in kvm_s390_init()
|
/linux-6.3-rc2/Documentation/virt/kvm/ |
A D | api.rst | 7204 :Returns: 0 on success, -EINVAL if hpage module parameter was not set 7212 hpage module parameter is not set to 1, -EINVAL is returned.
|