| /mm/ |
| A D | swap.h | 34 #define swap_address_space(entry) \ argument 35 (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ 41 static inline loff_t swap_dev_pos(swp_entry_t entry) in swap_dev_pos() argument 43 return ((loff_t)swp_offset(entry)) << PAGE_SHIFT; in swap_dev_pos() 52 return swp_offset(entry) & SWAP_ADDRESS_SPACE_MASK; in swap_cache_index() 56 void *get_shadow_from_swap_cache(swp_entry_t entry); 60 swp_entry_t entry, void *shadow); 65 struct folio *swap_cache_get_folio(swp_entry_t entry, 95 unsigned long start = swp_offset(entry); in swap_zeromap_batch() 114 pgoff_t offset = swp_offset(entry); in non_swapcache_batch() [all …]
|
| A D | zswap.c | 696 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL; in mem_cgroup_from_entry() 791 if (!entry) in zswap_entry_cache_alloc() 793 return entry; in zswap_entry_cache_alloc() 808 zpool_free(entry->pool->zpool, entry->handle); in zswap_entry_free() 811 obj_cgroup_uncharge_zswap(entry->objcg, entry->length); in zswap_entry_free() 1045 entry->pool->tfm_name, entry->length, dlen); in zswap_decompress() 1122 if (entry->objcg) in zswap_writeback_entry() 1487 if (!entry) { in zswap_store_page() 1634 if (entry) in zswap_store() 1686 if (!entry) in zswap_load() [all …]
|
| A D | shmem_quota.c | 97 struct quota_id *entry; in shmem_free_file_info() local 107 kfree(entry); in shmem_free_file_info() 131 if (id < entry->id) in shmem_get_next_id() 133 else if (id > entry->id) in shmem_get_next_id() 139 if (!entry) { in shmem_get_next_id() 144 if (id > entry->id) { in shmem_get_next_id() 184 if (id < entry->id) in shmem_acquire_dquot() 186 else if (id > entry->id) in shmem_acquire_dquot() 211 entry = new_entry; in shmem_acquire_dquot() 283 if (id < entry->id) in shmem_release_dquot() [all …]
|
| A D | swap_state.c | 105 folio->swap = entry; in add_to_swap_cache() 142 swp_entry_t entry, void *shadow) in __delete_from_swap_cache() argument 158 VM_BUG_ON_PAGE(entry != folio, entry); in __delete_from_swap_cache() 176 swp_entry_t entry = folio->swap; in delete_from_swap_cache() local 183 put_swap_folio(folio, entry); in delete_from_swap_cache() 288 folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry)); in swap_cache_get_folio() 383 swap_cache_index(entry)); in __read_swap_cache_async() 448 memcg1_swapin(entry, 1); in __read_swap_cache_async() 462 put_swap_folio(new_folio, entry); in __read_swap_cache_async() 490 si = get_swap_device(entry); in read_swap_cache_async() [all …]
|
| A D | swapfile.c | 244 entry = folio->swap; in __try_to_reclaim_swap() 245 if (offset < swp_offset(entry) || offset >= swp_offset(entry) + nr_pages) { in __try_to_reclaim_swap() 1270 swp_entry_t entry = {}; in folio_alloc_swap() local 1302 if (!entry.val) in folio_alloc_swap() 1328 if (!entry.val) in _swap_info_get() 1443 if (!entry.val) in get_swap_device() 1877 return entry; in get_swap_page_of_type() 2075 swap_free(entry); in unuse_pte() 2098 swp_entry_t entry; in unuse_pte_range() local 2298 swp_entry_t entry; in try_to_unuse() local [all …]
|
| A D | page_vma_mapped.c | 48 swp_entry_t entry; in map_pte() local 65 entry = pte_to_swp_entry(ptent); in map_pte() 110 swp_entry_t entry; in check_pte() local 113 entry = pte_to_swp_entry(ptent); in check_pte() 115 if (!is_migration_entry(entry)) in check_pte() 118 pfn = swp_offset_pfn(entry); in check_pte() 120 swp_entry_t entry; in check_pte() local 123 entry = pte_to_swp_entry(ptent); in check_pte() 128 pfn = swp_offset_pfn(entry); in check_pte() 253 swp_entry_t entry; in page_vma_mapped_walk() local [all …]
|
| A D | huge_memory.c | 1417 entry = pmd_mkspecial(entry); in insert_pmd() 1563 entry = pud_mkspecial(entry); in insert_pud() 2458 entry = pmd_mkuffd_wp(entry); in change_huge_pmd() 2465 entry = pmd_clear_uffd_wp(entry); in change_huge_pmd() 2470 entry = pmd_mkwrite(entry, vma); in change_huge_pmd() 2825 entry = pte_mkspecial(entry); in __split_huge_zero_page_pmd() 2827 entry = pte_mkuffd_wp(entry); in __split_huge_zero_page_pmd() 3031 entry = pte_mkwrite(entry, vma); in __split_huge_pmd_locked() 3033 entry = pte_mkold(entry); in __split_huge_pmd_locked() 3036 entry = pte_mkdirty(entry); in __split_huge_pmd_locked() [all …]
|
| A D | memory.c | 2468 entry = pte_mkyoung(entry); in insert_pfn() 2480 entry = pte_mkyoung(entry); in insert_pfn() 3601 entry = pte_sw_mkyoung(entry); in wp_page_copy() 5071 entry = pte_sw_mkyoung(entry); in do_anonymous_page() 5104 entry = pte_mkuffd_wp(entry); in do_anonymous_page() 5295 entry = pte_mkold(entry); in set_pte_range() 5297 entry = pte_sw_mkyoung(entry); in set_pte_range() 5302 entry = pte_mkdirty(entry); in set_pte_range() 5304 entry = pte_mkuffd_wp(entry); in set_pte_range() 6070 entry = pte_mkdirty(entry); in handle_pte_fault() [all …]
|
| A D | shrinker_debug.c | 164 struct dentry *entry; in shrinker_debugfs_add() local 182 entry = debugfs_create_dir(buf, shrinker_debugfs_root); in shrinker_debugfs_add() 183 if (IS_ERR(entry)) { in shrinker_debugfs_add() 185 return PTR_ERR(entry); in shrinker_debugfs_add() 187 shrinker->debugfs_entry = entry; in shrinker_debugfs_add() 189 debugfs_create_file("count", 0440, entry, shrinker, in shrinker_debugfs_add() 191 debugfs_create_file("scan", 0220, entry, shrinker, in shrinker_debugfs_add() 232 struct dentry *entry = shrinker->debugfs_entry; in shrinker_debugfs_detach() local 236 *debugfs_id = entry ? shrinker->debugfs_id : -1; in shrinker_debugfs_detach() 239 return entry; in shrinker_debugfs_detach()
|
| A D | migrate_device.c | 120 swp_entry_t entry; in migrate_vma_collect_pmd() local 139 entry = pte_to_swp_entry(pte); in migrate_vma_collect_pmd() 140 if (!is_device_private_entry(entry)) in migrate_vma_collect_pmd() 143 page = pfn_swap_entry_to_page(entry); in migrate_vma_collect_pmd() 248 entry = make_migration_entry_young(entry); in migrate_vma_collect_pmd() 250 entry = make_migration_entry_dirty(entry); in migrate_vma_collect_pmd() 252 swp_pte = swp_entry_to_pte(entry); in migrate_vma_collect_pmd() 596 pte_t entry; in migrate_vma_insert_page() local 643 entry = swp_entry_to_pte(swp_entry); in migrate_vma_insert_page() 652 entry = pte_mkwrite(pte_mkdirty(entry), vma); in migrate_vma_insert_page() [all …]
|
| A D | rmap.c | 1055 if (!pte_dirty(entry) && !pte_write(entry)) in page_vma_mkclean_one() 1060 entry = pte_wrprotect(entry); in page_vma_mkclean_one() 1061 entry = pte_mkclean(entry); in page_vma_mkclean_one() 1067 pmd_t entry; in page_vma_mkclean_one() local 1075 entry = pmd_wrprotect(entry); in page_vma_mkclean_one() 1076 entry = pmd_mkclean(entry); in page_vma_mkclean_one() 2160 swap_free(entry); in try_to_unmap_one() 2168 swap_free(entry); in try_to_unmap_one() 2536 entry = make_migration_entry_young(entry); in try_to_migrate_one() 2538 entry = make_migration_entry_dirty(entry); in try_to_migrate_one() [all …]
|
| A D | hmm.c | 256 swp_entry_t entry = pte_to_swp_entry(pte); in hmm_vma_handle_pte() local 262 if (is_device_private_entry(entry) && in hmm_vma_handle_pte() 266 if (is_writable_device_private_entry(entry)) in hmm_vma_handle_pte() 268 new_pfn_flags = swp_offset_pfn(entry) | cpu_flags; in hmm_vma_handle_pte() 277 if (!non_swap_entry(entry)) in hmm_vma_handle_pte() 280 if (is_device_private_entry(entry)) in hmm_vma_handle_pte() 283 if (is_device_exclusive_entry(entry)) in hmm_vma_handle_pte() 286 if (is_migration_entry(entry)) { in hmm_vma_handle_pte() 490 pte_t entry; in hmm_vma_walk_hugetlb_entry() local 493 entry = huge_ptep_get(walk->mm, addr, pte); in hmm_vma_walk_hugetlb_entry() [all …]
|
| A D | mprotect.c | 358 swp_entry_t entry = pte_to_swp_entry(oldpte); in change_pte_range() local 361 if (is_writable_migration_entry(entry)) { in change_pte_range() 370 swp_offset(entry)); in change_pte_range() 372 entry = make_readable_migration_entry(swp_offset(entry)); in change_pte_range() 373 newpte = swp_entry_to_pte(entry); in change_pte_range() 381 entry = make_readable_device_private_entry( in change_pte_range() 382 swp_offset(entry)); in change_pte_range() 383 newpte = swp_entry_to_pte(entry); in change_pte_range() 386 } else if (is_pte_marker_entry(entry)) { in change_pte_range() 392 if (is_poisoned_swp_entry(entry) || in change_pte_range() [all …]
|
| A D | workingset.c | 213 unsigned long entry = xa_to_value(shadow); in unpack_shadow() local 217 workingset = entry & ((1UL << WORKINGSET_SHIFT) - 1); in unpack_shadow() 218 entry >>= WORKINGSET_SHIFT; in unpack_shadow() 219 nid = entry & ((1UL << NODES_SHIFT) - 1); in unpack_shadow() 220 entry >>= NODES_SHIFT; in unpack_shadow() 221 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); in unpack_shadow() 222 entry >>= MEM_CGROUP_ID_SHIFT; in unpack_shadow() 226 *evictionp = entry; in unpack_shadow()
|
| A D | pgtable-generic.c | 70 pte_t entry, int dirty) in ptep_set_access_flags() argument 72 int changed = !pte_same(ptep_get(ptep), entry); in ptep_set_access_flags() 74 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 111 pmd_t entry, int dirty) in pmdp_set_access_flags() argument 113 int changed = !pmd_same(*pmdp, entry); in pmdp_set_access_flags() 116 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags()
|
| A D | hugetlb.c | 5483 entry = pte_mkwrite_novma(pte_mkdirty(entry)); in make_huge_pte() 5485 entry = pte_wrprotect(entry); in make_huge_pte() 5487 entry = pte_mkyoung(entry); in make_huge_pte() 5488 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); in make_huge_pte() 5490 return entry; in make_huge_pte() 5496 pte_t entry; in set_huge_ptep_writable() local 5622 entry = huge_pte_clear_uffd_wp(entry); in copy_hugetlb_page_range() 5637 entry = pte_swp_mkuffd_wp(entry); in copy_hugetlb_page_range() 5641 entry = huge_pte_clear_uffd_wp(entry); in copy_hugetlb_page_range() 5716 entry = huge_pte_wrprotect(entry); in copy_hugetlb_page_range() [all …]
|
| A D | mincore.c | 146 swp_entry_t entry = pte_to_swp_entry(pte); in mincore_pte_range() local 148 if (non_swap_entry(entry)) { in mincore_pte_range() 156 *vec = mincore_page(swap_address_space(entry), in mincore_pte_range() 157 swap_cache_index(entry)); in mincore_pte_range()
|
| A D | madvise.c | 198 swp_entry_t entry; in swapin_walk_pmd_entry() local 210 entry = pte_to_swp_entry(pte); in swapin_walk_pmd_entry() 211 if (unlikely(non_swap_entry(entry))) in swapin_walk_pmd_entry() 248 swp_entry_t entry; in shmem_swapin_range() local 252 entry = radix_to_swp_entry(folio); in shmem_swapin_range() 254 if (non_swap_entry(entry)) in shmem_swapin_range() 693 swp_entry_t entry; in madvise_free_pte_range() local 695 entry = pte_to_swp_entry(ptent); in madvise_free_pte_range() 696 if (!non_swap_entry(entry)) { in madvise_free_pte_range() 700 free_swap_and_cache_nr(entry, nr); in madvise_free_pte_range() [all …]
|
| A D | hugetlb_vmemmap.c | 66 pte_t entry, *pte; in vmemmap_split_pmd() local 69 entry = mk_pte(head + i, pgprot); in vmemmap_split_pmd() 71 set_pte_at(&init_mm, addr, pte, entry); in vmemmap_split_pmd() 216 pte_t entry; in vmemmap_remap_pte() local 231 entry = mk_pte(walk->reuse_page, pgprot); in vmemmap_remap_pte() 233 set_pte_at(&init_mm, addr, pte, entry); in vmemmap_remap_pte()
|
| A D | page_io.c | 209 swp_entry_t entry; in swap_zeromap_folio_set() local 213 entry = page_swap_entry(folio_page(folio, i)); in swap_zeromap_folio_set() 214 set_bit(swp_offset(entry), sis->zeromap); in swap_zeromap_folio_set() 227 swp_entry_t entry; in swap_zeromap_folio_clear() local 231 entry = page_swap_entry(folio_page(folio, i)); in swap_zeromap_folio_clear() 232 clear_bit(swp_offset(entry), sis->zeromap); in swap_zeromap_folio_clear()
|
| A D | userfaultfd.c | 1088 struct swap_info_struct *si, swp_entry_t entry) in move_swap_pte() argument 1096 entry.val != src_folio->swap.val)) in move_swap_pte() 1130 if (READ_ONCE(si->swap_map[swp_offset(entry)]) & SWAP_HAS_CACHE) { in move_swap_pte() 1185 swp_entry_t entry; in move_pages_pte() local 1385 entry = pte_to_swp_entry(orig_src_pte); in move_pages_pte() 1386 if (non_swap_entry(entry)) { in move_pages_pte() 1387 if (is_migration_entry(entry)) { in move_pages_pte() 1403 si = get_swap_device(entry); in move_pages_pte() 1421 folio = filemap_get_folio(swap_address_space(entry), in move_pages_pte() 1422 swap_cache_index(entry)); in move_pages_pte() [all …]
|
| A D | page_table_check.c | 182 static inline bool swap_cached_writable(swp_entry_t entry) in swap_cached_writable() argument 184 return is_writable_device_private_entry(entry) || in swap_cached_writable() 185 is_writable_migration_entry(entry); in swap_cached_writable()
|
| /mm/kasan/ |
| A D | tags.c | 103 struct kasan_stack_ring_entry *entry; in save_stack_info() local 117 entry = &stack_ring.entries[pos % stack_ring.size]; in save_stack_info() 120 old_ptr = READ_ONCE(entry->ptr); in save_stack_info() 123 if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR)) in save_stack_info() 126 old_stack = entry->track.stack; in save_stack_info() 128 entry->size = cache->object_size; in save_stack_info() 129 kasan_set_track(&entry->track, stack); in save_stack_info() 130 entry->is_free = is_free; in save_stack_info() 132 entry->ptr = object; in save_stack_info()
|
| A D | report_tags.c | 34 struct kasan_stack_ring_entry *entry; in kasan_complete_mode_report_info() local 59 entry = &stack_ring.entries[i % stack_ring.size]; in kasan_complete_mode_report_info() 61 if (kasan_reset_tag(entry->ptr) != info->object || in kasan_complete_mode_report_info() 62 get_tag(entry->ptr) != get_tag(info->access_addr) || in kasan_complete_mode_report_info() 63 info->cache->object_size != entry->size) in kasan_complete_mode_report_info() 66 if (entry->is_free) { in kasan_complete_mode_report_info() 74 memcpy(&info->free_track, &entry->track, in kasan_complete_mode_report_info() 89 memcpy(&info->alloc_track, &entry->track, in kasan_complete_mode_report_info()
|
| /mm/damon/ |
| A D | vaddr.c | 348 pte_t entry = huge_ptep_get(mm, addr, pte); in damon_hugetlb_mkold() local 354 if (pte_young(entry)) { in damon_hugetlb_mkold() 356 entry = pte_mkold(entry); in damon_hugetlb_mkold() 357 set_huge_pte_at(mm, addr, pte, entry, psize); in damon_hugetlb_mkold() 377 pte_t entry; in damon_mkold_hugetlb_entry() local 380 entry = huge_ptep_get(walk->mm, addr, pte); in damon_mkold_hugetlb_entry() 381 if (!pte_present(entry)) in damon_mkold_hugetlb_entry() 513 pte_t entry; in damon_young_hugetlb_entry() local 516 entry = huge_ptep_get(walk->mm, addr, pte); in damon_young_hugetlb_entry() 517 if (!pte_present(entry)) in damon_young_hugetlb_entry() [all …]
|