| /mm/ |
| A D | migrate_device.c | 69 pte_t *ptep; in migrate_vma_collect_pmd() local 111 if (!ptep) in migrate_vma_collect_pmd() 123 pte = ptep_get(ptep); in migrate_vma_collect_pmd() 219 set_pte_at(mm, addr, ptep, pte); in migrate_vma_collect_pmd() 291 pte_unmap_unlock(ptep - 1, ptl); in migrate_vma_collect_pmd() 601 pte_t *ptep; in migrate_vma_insert_page() local 656 if (!ptep) in migrate_vma_insert_page() 658 orig_pte = ptep_get(ptep); in migrate_vma_insert_page() 689 set_pte_at(mm, addr, ptep, entry); in migrate_vma_insert_page() 692 pte_unmap_unlock(ptep, ptl); in migrate_vma_insert_page() [all …]
|
| A D | mincore.c | 109 pte_t *ptep; in mincore_pte_range() local 121 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in mincore_pte_range() 122 if (!ptep) { in mincore_pte_range() 126 for (; addr != end; ptep += step, addr += step * PAGE_SIZE) { in mincore_pte_range() 127 pte_t pte = ptep_get(ptep); in mincore_pte_range() 135 unsigned int batch = pte_batch_hint(ptep, pte); in mincore_pte_range() 166 pte_unmap_unlock(ptep - 1, ptl); in mincore_pte_range()
|
| A D | page_table_check.c | 196 void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte, in __page_table_check_ptes_set() argument 207 __page_table_check_pte_clear(mm, ptep_get(ptep + i)); in __page_table_check_ptes_set() 263 pte_t *ptep = pte_offset_map(&pmd, addr); in __page_table_check_pte_clear_range() local 266 if (WARN_ON(!ptep)) in __page_table_check_pte_clear_range() 269 __page_table_check_pte_clear(mm, ptep_get(ptep)); in __page_table_check_pte_clear_range() 271 ptep++; in __page_table_check_pte_clear_range() 273 pte_unmap(ptep - PTRS_PER_PTE); in __page_table_check_pte_clear_range()
|
| A D | pgtable-generic.c | 69 unsigned long address, pte_t *ptep, in ptep_set_access_flags() argument 72 int changed = !pte_same(ptep_get(ptep), entry); in ptep_set_access_flags() 74 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 75 flush_tlb_fix_spurious_fault(vma, address, ptep); in ptep_set_access_flags() 83 unsigned long address, pte_t *ptep) in ptep_clear_flush_young() argument 86 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young() 95 pte_t *ptep) in ptep_clear_flush() argument 99 pte = ptep_get_and_clear(mm, address, ptep); in ptep_clear_flush()
|
| A D | debug_vm_pgtable.c | 53 pte_t *ptep; member 128 if (WARN_ON(!args->ptep)) in pte_advanced_tests() 135 pte = ptep_get(args->ptep); in pte_advanced_tests() 138 pte = ptep_get(args->ptep); in pte_advanced_tests() 149 pte = ptep_get(args->ptep); in pte_advanced_tests() 152 pte = ptep_get(args->ptep); in pte_advanced_tests() 160 pte = ptep_get(args->ptep); in pte_advanced_tests() 609 if (WARN_ON(!args->ptep)) in pte_clear_tests() 617 pte = ptep_get(args->ptep); in pte_clear_tests() 1316 if (args.ptep) in debug_vm_pgtable() [all …]
|
| A D | hmm.c | 236 unsigned long end, pmd_t *pmdp, pte_t *ptep, in hmm_vma_handle_pte() argument 243 pte_t pte = ptep_get(ptep); in hmm_vma_handle_pte() 287 pte_unmap(ptep); in hmm_vma_handle_pte() 294 pte_unmap(ptep); in hmm_vma_handle_pte() 311 pte_unmap(ptep); in hmm_vma_handle_pte() 324 pte_unmap(ptep); in hmm_vma_handle_pte() 340 pte_t *ptep; in hmm_vma_walk_pmd() local 392 ptep = pte_offset_map(pmdp, addr); in hmm_vma_walk_pmd() 393 if (!ptep) in hmm_vma_walk_pmd() 395 for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) { in hmm_vma_walk_pmd() [all …]
|
| A D | mprotect.c | 108 static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep, in mprotect_folio_pte_batch() argument 118 return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags); in mprotect_folio_pte_batch() 179 pte_t *ptep, pte_t oldpte, pte_t ptent, int nr_ptes, in prot_commit_flush_ptes() argument 187 ptep += idx; in prot_commit_flush_ptes() 194 modify_prot_commit_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes); in prot_commit_flush_ptes() 231 struct folio *folio, struct page *first_page, unsigned long addr, pte_t *ptep, in commit_anon_folio_batch() argument 242 prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, len, in commit_anon_folio_batch() 250 struct folio *folio, struct page *page, unsigned long addr, pte_t *ptep, in set_write_prot_commit_flush_ptes() argument 257 prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes, in set_write_prot_commit_flush_ptes() 265 prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes, in set_write_prot_commit_flush_ptes() [all …]
|
| A D | internal.h | 268 struct vm_area_struct *vma, pte_t *ptep, pte_t *ptentp, in folio_pte_batch_flags() argument 289 nr = pte_batch_hint(ptep, pte); in folio_pte_batch_flags() 291 ptep = ptep + nr; in folio_pte_batch_flags() 294 pte = ptep_get(ptep); in folio_pte_batch_flags() 306 cur_nr = pte_batch_hint(ptep, pte); in folio_pte_batch_flags() 308 ptep += cur_nr; in folio_pte_batch_flags() 386 pte_t *ptep = start_ptep + 1; in swap_pte_batch() local 394 while (ptep < end_ptep) { in swap_pte_batch() 395 pte = ptep_get(ptep); in swap_pte_batch() 402 ptep++; in swap_pte_batch() [all …]
|
| A D | hugetlb.c | 5849 pte_t *ptep; in __unmap_hugepage_range() local 5873 if (!ptep) { in __unmap_hugepage_range() 5878 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range() 6399 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_pte_stable() 7093 pte_t *ptep; in hugetlb_change_protection() local 7122 if (!ptep) { in hugetlb_change_protection() 7132 if (!ptep) { in hugetlb_change_protection() 7137 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_change_protection() 7900 pte_t *ptep; in hugetlb_unshare_pmds() local 7924 if (!ptep) in hugetlb_unshare_pmds() [all …]
|
| A D | madvise.c | 192 pte_t *ptep = NULL; in swapin_walk_pmd_entry() local 201 if (!ptep++) { in swapin_walk_pmd_entry() 203 if (!ptep) in swapin_walk_pmd_entry() 207 pte = ptep_get(ptep); in swapin_walk_pmd_entry() 214 pte_unmap_unlock(ptep, ptl); in swapin_walk_pmd_entry() 215 ptep = NULL; in swapin_walk_pmd_entry() 223 if (ptep) in swapin_walk_pmd_entry() 224 pte_unmap_unlock(ptep, ptl); in swapin_walk_pmd_entry() 347 struct folio *folio, pte_t *ptep, in madvise_folio_pte_batch() argument 1114 pte_t *ptep, struct mm_walk *walk) in guard_install_set_pte() argument [all …]
|
| A D | pagewalk.c | 872 pte_t *ptep, pte; in folio_walk_start() local 970 ptep = pte_offset_map_lock(vma->vm_mm, pmdp, addr, &ptl); in folio_walk_start() 971 if (!ptep) in folio_walk_start() 973 pte = ptep_get(ptep); in folio_walk_start() 977 fw->ptep = ptep; in folio_walk_start() 1000 pte_unmap_unlock(ptep, ptl); in folio_walk_start()
|
| A D | gup.c | 823 pte_t *ptep, pte; in follow_page_pte() local 827 if (!ptep) in follow_page_pte() 829 pte = ptep_get(ptep); in follow_page_pte() 856 ret = follow_pfn_pte(vma, address, ptep, flags); in follow_page_pte() 903 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 906 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 2858 pte_t *ptep, *ptem; in gup_fast_pte_range() local 2860 ptem = ptep = pte_offset_map(&pmd, addr); in gup_fast_pte_range() 2861 if (!ptep) in gup_fast_pte_range() 2864 pte_t pte = ptep_get_lockless(ptep); in gup_fast_pte_range() [all …]
|
| A D | memory.c | 4314 pte = ptep_get(ptep); in can_swapin_thp() 4471 pte_t *ptep; in do_swap_page() local 4692 ptep = folio_ptep; in do_swap_page() 4699 ptep = vmf->pte; in do_swap_page() 4721 ptep = folio_ptep; in do_swap_page() 6474 args->ptep = ptep; in pfnmap_args_setup() 6537 pte_t *ptep, pte; in follow_pfnmap_start() local 6587 if (!ptep) in follow_pfnmap_start() 6589 pte = ptep_get(ptep); in follow_pfnmap_start() 6614 if (args->ptep) in follow_pfnmap_end() [all …]
|
| A D | memory-failure.c | 807 pte_t *ptep, *mapped_pte; in hwpoison_pte_range() local 817 mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, in hwpoison_pte_range() 819 if (!ptep) in hwpoison_pte_range() 822 for (; addr != end; ptep++, addr += PAGE_SIZE) { in hwpoison_pte_range() 823 ret = check_hwpoisoned_entry(ptep_get(ptep), addr, PAGE_SHIFT, in hwpoison_pte_range() 835 static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, in hwpoison_hugetlb_range() argument 845 ptl = huge_pte_lock(h, walk->mm, ptep); in hwpoison_hugetlb_range() 846 pte = huge_ptep_get(walk->mm, addr, ptep); in hwpoison_hugetlb_range()
|
| A D | migrate.c | 462 pte_t *ptep; in migration_entry_wait() local 466 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in migration_entry_wait() 467 if (!ptep) in migration_entry_wait() 470 pte = ptep_get(ptep); in migration_entry_wait() 471 pte_unmap(ptep); in migration_entry_wait() 493 void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) in migration_entry_wait_huge() argument 495 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep); in migration_entry_wait_huge() 500 pte = huge_ptep_get(vma->vm_mm, addr, ptep); in migration_entry_wait_huge()
|
| A D | ksm.c | 1358 pte_t *ptep; in replace_page() local 1385 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); in replace_page() 1386 if (!ptep) in replace_page() 1388 if (!pte_same(ptep_get(ptep), orig_pte)) { in replace_page() 1389 pte_unmap_unlock(ptep, ptl); in replace_page() 1421 flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep))); in replace_page() 1428 ptep_clear_flush(vma, addr, ptep); in replace_page() 1429 set_pte_at(mm, addr, ptep, newpte); in replace_page() 1436 pte_unmap_unlock(ptep, ptl); in replace_page()
|
| A D | highmem.c | 518 #define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \ argument 519 set_pte_at(mm, vaddr, ptep, ptev)
|
| A D | mremap.c | 175 pte_t *ptep, pte_t pte, int max_nr) in mremap_folio_pte_batch() argument 183 if (pte_batch_hint(ptep, pte) == 1) in mremap_folio_pte_batch() 190 return folio_pte_batch(folio, ptep, pte, max_nr); in mremap_folio_pte_batch()
|
| A D | util.c | 1278 unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte, in folio_pte_batch() argument 1281 return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr, 0); in folio_pte_batch()
|
| A D | filemap.c | 3341 pte_t *ptep; in filemap_fault_recheck_pte_none() local 3363 ptep = pte_offset_map_ro_nolock(vma->vm_mm, vmf->pmd, vmf->address, in filemap_fault_recheck_pte_none() 3365 if (unlikely(!ptep)) in filemap_fault_recheck_pte_none() 3368 if (unlikely(!pte_none(ptep_get_lockless(ptep)))) { in filemap_fault_recheck_pte_none() 3372 if (unlikely(!pte_none(ptep_get(ptep)))) in filemap_fault_recheck_pte_none() 3376 pte_unmap(ptep); in filemap_fault_recheck_pte_none()
|
| A D | rmap.c | 2734 fw.pte = ptep_clear_flush(vma, addr, fw.ptep); in make_device_exclusive() 2750 set_pte_at(mm, addr, fw.ptep, swp_pte); in make_device_exclusive()
|
| A D | vmalloc.c | 787 pte_t *ptep, pte; in vmalloc_to_page() local 826 ptep = pte_offset_kernel(pmd, addr); in vmalloc_to_page() 827 pte = ptep_get(ptep); in vmalloc_to_page()
|
| /mm/kasan/ |
| A D | shadow.c | 300 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, in kasan_populate_vmalloc_pte() argument 308 if (likely(!pte_none(ptep_get(ptep)))) in kasan_populate_vmalloc_pte() 317 if (likely(pte_none(ptep_get(ptep)))) { in kasan_populate_vmalloc_pte() 318 set_pte_at(&init_mm, addr, ptep, pte); in kasan_populate_vmalloc_pte() 461 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, in kasan_depopulate_vmalloc_pte() argument 466 page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT); in kasan_depopulate_vmalloc_pte() 470 if (likely(!pte_none(ptep_get(ptep)))) { in kasan_depopulate_vmalloc_pte() 471 pte_clear(&init_mm, addr, ptep); in kasan_depopulate_vmalloc_pte()
|
| /mm/damon/ |
| A D | vaddr.c | 616 unsigned long addr, pte_t *ptep, pmd_t *pmdp) in damos_va_filter_young_match() argument 620 if (ptep) in damos_va_filter_young_match() 621 young = pte_young(ptep_get(ptep)); in damos_va_filter_young_match() 628 if (young && ptep) in damos_va_filter_young_match() 629 damon_ptep_mkold(ptep, vma, addr); in damos_va_filter_young_match() 638 pte_t *ptep, pmd_t *pmdp) in damos_va_filter_out() argument 655 vma, addr, ptep, pmdp); in damos_va_filter_out()
|