Lines Matching refs:folio

73 static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,  in __page_cache_release()  argument
76 if (folio_test_lru(folio)) { in __page_cache_release()
77 folio_lruvec_relock_irqsave(folio, lruvecp, flagsp); in __page_cache_release()
78 lruvec_del_folio(*lruvecp, folio); in __page_cache_release()
79 __folio_clear_lru_flags(folio); in __page_cache_release()
87 static void page_cache_release(struct folio *folio) in page_cache_release() argument
92 __page_cache_release(folio, &lruvec, &flags); in page_cache_release()
97 void __folio_put(struct folio *folio) in __folio_put() argument
99 if (unlikely(folio_is_zone_device(folio))) { in __folio_put()
100 free_zone_device_folio(folio); in __folio_put()
104 if (folio_test_hugetlb(folio)) { in __folio_put()
105 free_huge_folio(folio); in __folio_put()
109 page_cache_release(folio); in __folio_put()
110 folio_unqueue_deferred_split(folio); in __folio_put()
111 mem_cgroup_uncharge(folio); in __folio_put()
112 free_unref_page(&folio->page, folio_order(folio)); in __folio_put()
125 struct folio *folio, *next; in put_pages_list() local
128 list_for_each_entry_safe(folio, next, pages, lru) { in put_pages_list()
129 if (!folio_put_testzero(folio)) in put_pages_list()
131 if (folio_test_hugetlb(folio)) { in put_pages_list()
132 free_huge_folio(folio); in put_pages_list()
136 if (folio_batch_add(&fbatch, folio) > 0) in put_pages_list()
147 typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
149 static void lru_add(struct lruvec *lruvec, struct folio *folio) in lru_add() argument
151 int was_unevictable = folio_test_clear_unevictable(folio); in lru_add()
152 long nr_pages = folio_nr_pages(folio); in lru_add()
154 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in lru_add()
167 if (folio_evictable(folio)) { in lru_add()
171 folio_clear_active(folio); in lru_add()
172 folio_set_unevictable(folio); in lru_add()
180 folio->mlock_count = 0; in lru_add()
185 lruvec_add_folio(lruvec, folio); in lru_add()
186 trace_mm_lru_insertion(folio); in lru_add()
196 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() local
198 folio_lruvec_relock_irqsave(folio, &lruvec, &flags); in folio_batch_move_lru()
199 move_fn(lruvec, folio); in folio_batch_move_lru()
201 folio_set_lru(folio); in folio_batch_move_lru()
210 struct folio *folio, move_fn_t move_fn, in __folio_batch_add_and_move() argument
215 if (on_lru && !folio_test_clear_lru(folio)) in __folio_batch_add_and_move()
218 folio_get(folio); in __folio_batch_add_and_move()
225 if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) || in __folio_batch_add_and_move()
235 #define folio_batch_add_and_move(folio, op, on_lru) \ argument
238 folio, \
244 static void lru_move_tail(struct lruvec *lruvec, struct folio *folio) in lru_move_tail() argument
246 if (folio_test_unevictable(folio)) in lru_move_tail()
249 lruvec_del_folio(lruvec, folio); in lru_move_tail()
250 folio_clear_active(folio); in lru_move_tail()
251 lruvec_add_folio_tail(lruvec, folio); in lru_move_tail()
252 __count_vm_events(PGROTATED, folio_nr_pages(folio)); in lru_move_tail()
262 void folio_rotate_reclaimable(struct folio *folio) in folio_rotate_reclaimable() argument
264 if (folio_test_locked(folio) || folio_test_dirty(folio) || in folio_rotate_reclaimable()
265 folio_test_unevictable(folio)) in folio_rotate_reclaimable()
268 folio_batch_add_and_move(folio, lru_move_tail, true); in folio_rotate_reclaimable()
323 void lru_note_cost_refault(struct folio *folio) in lru_note_cost_refault() argument
325 lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio), in lru_note_cost_refault()
326 folio_nr_pages(folio), 0); in lru_note_cost_refault()
329 static void lru_activate(struct lruvec *lruvec, struct folio *folio) in lru_activate() argument
331 long nr_pages = folio_nr_pages(folio); in lru_activate()
333 if (folio_test_active(folio) || folio_test_unevictable(folio)) in lru_activate()
337 lruvec_del_folio(lruvec, folio); in lru_activate()
338 folio_set_active(folio); in lru_activate()
339 lruvec_add_folio(lruvec, folio); in lru_activate()
340 trace_mm_lru_activate(folio); in lru_activate()
355 void folio_activate(struct folio *folio) in folio_activate() argument
357 if (folio_test_active(folio) || folio_test_unevictable(folio)) in folio_activate()
360 folio_batch_add_and_move(folio, lru_activate, true); in folio_activate()
368 void folio_activate(struct folio *folio) in folio_activate() argument
372 if (!folio_test_clear_lru(folio)) in folio_activate()
375 lruvec = folio_lruvec_lock_irq(folio); in folio_activate()
376 lru_activate(lruvec, folio); in folio_activate()
378 folio_set_lru(folio); in folio_activate()
382 static void __lru_cache_activate_folio(struct folio *folio) in __lru_cache_activate_folio() argument
401 struct folio *batch_folio = fbatch->folios[i]; in __lru_cache_activate_folio()
403 if (batch_folio == folio) { in __lru_cache_activate_folio()
404 folio_set_active(folio); in __lru_cache_activate_folio()
413 static void folio_inc_refs(struct folio *folio) in folio_inc_refs() argument
415 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_inc_refs()
417 if (folio_test_unevictable(folio)) in folio_inc_refs()
420 if (!folio_test_referenced(folio)) { in folio_inc_refs()
421 folio_set_referenced(folio); in folio_inc_refs()
425 if (!folio_test_workingset(folio)) { in folio_inc_refs()
426 folio_set_workingset(folio); in folio_inc_refs()
438 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_inc_refs()
441 static void folio_inc_refs(struct folio *folio) in folio_inc_refs() argument
459 void folio_mark_accessed(struct folio *folio) in folio_mark_accessed() argument
462 folio_inc_refs(folio); in folio_mark_accessed()
466 if (!folio_test_referenced(folio)) { in folio_mark_accessed()
467 folio_set_referenced(folio); in folio_mark_accessed()
468 } else if (folio_test_unevictable(folio)) { in folio_mark_accessed()
474 } else if (!folio_test_active(folio)) { in folio_mark_accessed()
481 if (folio_test_lru(folio)) in folio_mark_accessed()
482 folio_activate(folio); in folio_mark_accessed()
484 __lru_cache_activate_folio(folio); in folio_mark_accessed()
485 folio_clear_referenced(folio); in folio_mark_accessed()
486 workingset_activation(folio); in folio_mark_accessed()
488 if (folio_test_idle(folio)) in folio_mark_accessed()
489 folio_clear_idle(folio); in folio_mark_accessed()
502 void folio_add_lru(struct folio *folio) in folio_add_lru() argument
504 VM_BUG_ON_FOLIO(folio_test_active(folio) && in folio_add_lru()
505 folio_test_unevictable(folio), folio); in folio_add_lru()
506 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in folio_add_lru()
509 if (lru_gen_enabled() && !folio_test_unevictable(folio) && in folio_add_lru()
511 folio_set_active(folio); in folio_add_lru()
513 folio_batch_add_and_move(folio, lru_add, false); in folio_add_lru()
525 void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma) in folio_add_lru_vma() argument
527 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in folio_add_lru_vma()
530 mlock_new_folio(folio); in folio_add_lru_vma()
532 folio_add_lru(folio); in folio_add_lru_vma()
556 static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio) in lru_deactivate_file() argument
558 bool active = folio_test_active(folio); in lru_deactivate_file()
559 long nr_pages = folio_nr_pages(folio); in lru_deactivate_file()
561 if (folio_test_unevictable(folio)) in lru_deactivate_file()
565 if (folio_mapped(folio)) in lru_deactivate_file()
568 lruvec_del_folio(lruvec, folio); in lru_deactivate_file()
569 folio_clear_active(folio); in lru_deactivate_file()
570 folio_clear_referenced(folio); in lru_deactivate_file()
572 if (folio_test_writeback(folio) || folio_test_dirty(folio)) { in lru_deactivate_file()
579 lruvec_add_folio(lruvec, folio); in lru_deactivate_file()
580 folio_set_reclaim(folio); in lru_deactivate_file()
586 lruvec_add_folio_tail(lruvec, folio); in lru_deactivate_file()
597 static void lru_deactivate(struct lruvec *lruvec, struct folio *folio) in lru_deactivate() argument
599 long nr_pages = folio_nr_pages(folio); in lru_deactivate()
601 if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled())) in lru_deactivate()
604 lruvec_del_folio(lruvec, folio); in lru_deactivate()
605 folio_clear_active(folio); in lru_deactivate()
606 folio_clear_referenced(folio); in lru_deactivate()
607 lruvec_add_folio(lruvec, folio); in lru_deactivate()
613 static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio) in lru_lazyfree() argument
615 long nr_pages = folio_nr_pages(folio); in lru_lazyfree()
617 if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) || in lru_lazyfree()
618 folio_test_swapcache(folio) || folio_test_unevictable(folio)) in lru_lazyfree()
621 lruvec_del_folio(lruvec, folio); in lru_lazyfree()
622 folio_clear_active(folio); in lru_lazyfree()
623 folio_clear_referenced(folio); in lru_lazyfree()
629 folio_clear_swapbacked(folio); in lru_lazyfree()
630 lruvec_add_folio(lruvec, folio); in lru_lazyfree()
685 void deactivate_file_folio(struct folio *folio) in deactivate_file_folio() argument
688 if (folio_test_unevictable(folio)) in deactivate_file_folio()
691 folio_batch_add_and_move(folio, lru_deactivate_file, true); in deactivate_file_folio()
702 void folio_deactivate(struct folio *folio) in folio_deactivate() argument
704 if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled())) in folio_deactivate()
707 folio_batch_add_and_move(folio, lru_deactivate, true); in folio_deactivate()
717 void folio_mark_lazyfree(struct folio *folio) in folio_mark_lazyfree() argument
719 if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) || in folio_mark_lazyfree()
720 folio_test_swapcache(folio) || folio_test_unevictable(folio)) in folio_mark_lazyfree()
723 folio_batch_add_and_move(folio, lru_lazyfree, true); in folio_mark_lazyfree()
947 struct folio *folio = folios->folios[i]; in folios_put_refs() local
950 if (is_huge_zero_folio(folio)) in folios_put_refs()
953 if (folio_is_zone_device(folio)) { in folios_put_refs()
958 if (put_devmap_managed_folio_refs(folio, nr_refs)) in folios_put_refs()
960 if (folio_ref_sub_and_test(folio, nr_refs)) in folios_put_refs()
961 free_zone_device_folio(folio); in folios_put_refs()
965 if (!folio_ref_sub_and_test(folio, nr_refs)) in folios_put_refs()
969 if (folio_test_hugetlb(folio)) { in folios_put_refs()
974 free_huge_folio(folio); in folios_put_refs()
977 folio_unqueue_deferred_split(folio); in folios_put_refs()
978 __page_cache_release(folio, &lruvec, &flags); in folios_put_refs()
981 folios->folios[j] = folio; in folios_put_refs()
1019 struct folio *folio = page_folio(encoded_page_ptr(encoded[i])); in release_pages() local
1027 if (folio_batch_add(&fbatch, folio) > 0) in release_pages()
1071 struct folio *folio = fbatch->folios[i]; in folio_batch_remove_exceptionals() local
1072 if (!xa_is_value(folio)) in folio_batch_remove_exceptionals()
1073 fbatch->folios[j++] = folio; in folio_batch_remove_exceptionals()