Searched refs:refs (Results 1 – 8 of 8) sorted by relevance
| /mm/ |
| A D | gup.c | 100 folio_put_refs(folio, refs); in try_get_folio() 116 refs *= GUP_PIN_COUNTING_BIAS; in gup_put_folio() 119 folio_put_refs(folio, refs); in gup_put_folio() 155 folio_ref_add(folio, refs); in try_grab_folio() 169 folio_ref_add(folio, refs); in try_grab_folio() 545 return try_get_folio(page, refs); in try_grab_folio_fast() 567 folio_put_refs(folio, refs); in try_grab_folio_fast() 2959 int refs; in gup_fast_pmd_leaf() local 2988 *nr += refs; in gup_fast_pmd_leaf() 2999 int refs; in gup_fast_pud_leaf() local [all …]
|
| A D | workingset.c | 241 int refs = folio_lru_refs(folio); in lru_gen_eviction() local 243 int tier = lru_tier_from_refs(refs, workingset); in lru_gen_eviction() 252 token = (min_seq << LRU_REFS_WIDTH) | max(refs - 1, 0); in lru_gen_eviction() 286 int hist, tier, refs; in lru_gen_refault() local 308 refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + 1; in lru_gen_refault() 309 tier = lru_tier_from_refs(refs, workingset); in lru_gen_refault() 321 set_mask_bits(&folio->flags, LRU_REFS_MASK, (refs - 1UL) << LRU_REFS_PGOFF); in lru_gen_refault()
|
| A D | swap.c | 946 void folios_put_refs(struct folio_batch *folios, unsigned int *refs) in folios_put_refs() argument 954 unsigned int nr_refs = refs ? refs[i] : 1; in folios_put_refs() 1016 int refs[PAGEVEC_SIZE]; in release_pages() local 1026 refs[fbatch.nr] = 1; in release_pages() 1029 refs[fbatch.nr] = encoded_nr_pages(encoded[++i]); in release_pages() 1033 folios_put_refs(&fbatch, refs); in release_pages() 1037 folios_put_refs(&fbatch, refs); in release_pages()
|
| A D | swap_state.c | 251 unsigned int refs[PAGEVEC_SIZE]; in free_pages_and_swap_cache() local 258 refs[folios.nr] = 1; in free_pages_and_swap_cache() 261 refs[folios.nr] = encoded_nr_pages(pages[++i]); in free_pages_and_swap_cache() 264 folios_put_refs(&folios, refs); in free_pages_and_swap_cache() 267 folios_put_refs(&folios, refs); in free_pages_and_swap_cache()
|
| A D | hugetlb.c | 380 struct hugetlb_vma_lock, refs); in hugetlb_vma_lock_release() 397 kref_put(&vma_lock->refs, hugetlb_vma_lock_release); in __hugetlb_vma_unlock_write_put() 458 kref_init(&vma_lock->refs); in hugetlb_vma_lock_alloc() 1124 kref_init(&resv_map->refs); in resv_map_alloc() 1147 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); in resv_map_release() 1272 kref_put(&reservations->refs, resv_map_release); in clear_vma_resv_huge_pages() 5350 kref_get(&resv->refs); in hugetlb_vm_op_open() 5401 kref_put(&resv->refs, resv_map_release); in hugetlb_vm_op_close() 7404 kref_put(&resv_map->refs, resv_map_release); in hugetlb_reserve_pages()
|
| A D | vmscan.c | 3895 int refs = folio_lru_refs(folio); in inc_min_seq() local 3907 if (refs + workingset != BIT(LRU_REFS_WIDTH) + 1) { in inc_min_seq() 3908 int tier = lru_tier_from_refs(refs, workingset); in inc_min_seq() 4471 int refs = folio_lru_refs(folio); in sort_folio() local 4473 int tier = lru_tier_from_refs(refs, workingset); in sort_folio() 4495 if (tier > tier_idx || refs + workingset == BIT(LRU_REFS_WIDTH) + 1) { in sort_folio() 4500 if (refs + workingset != BIT(LRU_REFS_WIDTH) + 1) { in sort_folio()
|
| A D | page_owner.c | 212 if (atomic_try_cmpxchg_relaxed(&stack_record->count.refs, &old, 1)) in inc_stack_record_count()
|
| A D | internal.h | 1401 int __must_check try_grab_folio(struct folio *folio, int refs,
|
Completed in 61 milliseconds