Lines Matching refs:folio
61 static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec) in __mlock_folio() argument
64 if (!folio_test_clear_lru(folio)) in __mlock_folio()
67 lruvec = folio_lruvec_relock_irq(folio, lruvec); in __mlock_folio()
69 if (unlikely(folio_evictable(folio))) { in __mlock_folio()
75 if (folio_test_unevictable(folio)) { in __mlock_folio()
76 lruvec_del_folio(lruvec, folio); in __mlock_folio()
77 folio_clear_unevictable(folio); in __mlock_folio()
78 lruvec_add_folio(lruvec, folio); in __mlock_folio()
81 folio_nr_pages(folio)); in __mlock_folio()
86 if (folio_test_unevictable(folio)) { in __mlock_folio()
87 if (folio_test_mlocked(folio)) in __mlock_folio()
88 folio->mlock_count++; in __mlock_folio()
92 lruvec_del_folio(lruvec, folio); in __mlock_folio()
93 folio_clear_active(folio); in __mlock_folio()
94 folio_set_unevictable(folio); in __mlock_folio()
95 folio->mlock_count = !!folio_test_mlocked(folio); in __mlock_folio()
96 lruvec_add_folio(lruvec, folio); in __mlock_folio()
97 __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio)); in __mlock_folio()
99 folio_set_lru(folio); in __mlock_folio()
103 static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec) in __mlock_new_folio() argument
105 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in __mlock_new_folio()
107 lruvec = folio_lruvec_relock_irq(folio, lruvec); in __mlock_new_folio()
110 if (unlikely(folio_evictable(folio))) in __mlock_new_folio()
113 folio_set_unevictable(folio); in __mlock_new_folio()
114 folio->mlock_count = !!folio_test_mlocked(folio); in __mlock_new_folio()
115 __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio)); in __mlock_new_folio()
117 lruvec_add_folio(lruvec, folio); in __mlock_new_folio()
118 folio_set_lru(folio); in __mlock_new_folio()
122 static struct lruvec *__munlock_folio(struct folio *folio, struct lruvec *lruvec) in __munlock_folio() argument
124 int nr_pages = folio_nr_pages(folio); in __munlock_folio()
127 if (!folio_test_clear_lru(folio)) in __munlock_folio()
131 lruvec = folio_lruvec_relock_irq(folio, lruvec); in __munlock_folio()
133 if (folio_test_unevictable(folio)) { in __munlock_folio()
135 if (folio->mlock_count) in __munlock_folio()
136 folio->mlock_count--; in __munlock_folio()
137 if (folio->mlock_count) in __munlock_folio()
143 if (folio_test_clear_mlocked(folio)) { in __munlock_folio()
144 __zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); in __munlock_folio()
145 if (isolated || !folio_test_unevictable(folio)) in __munlock_folio()
152 if (isolated && folio_test_unevictable(folio) && folio_evictable(folio)) { in __munlock_folio()
153 lruvec_del_folio(lruvec, folio); in __munlock_folio()
154 folio_clear_unevictable(folio); in __munlock_folio()
155 lruvec_add_folio(lruvec, folio); in __munlock_folio()
160 folio_set_lru(folio); in __munlock_folio()
169 static inline struct folio *mlock_lru(struct folio *folio) in mlock_lru() argument
171 return (struct folio *)((unsigned long)folio + LRU_FOLIO); in mlock_lru()
174 static inline struct folio *mlock_new(struct folio *folio) in mlock_new() argument
176 return (struct folio *)((unsigned long)folio + NEW_FOLIO); in mlock_new()
190 struct folio *folio; in mlock_folio_batch() local
194 folio = fbatch->folios[i]; in mlock_folio_batch()
195 mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO); in mlock_folio_batch()
196 folio = (struct folio *)((unsigned long)folio - mlock); in mlock_folio_batch()
197 fbatch->folios[i] = folio; in mlock_folio_batch()
200 lruvec = __mlock_folio(folio, lruvec); in mlock_folio_batch()
202 lruvec = __mlock_new_folio(folio, lruvec); in mlock_folio_batch()
204 lruvec = __munlock_folio(folio, lruvec); in mlock_folio_batch()
243 void mlock_folio(struct folio *folio) in mlock_folio() argument
250 if (!folio_test_set_mlocked(folio)) { in mlock_folio()
251 int nr_pages = folio_nr_pages(folio); in mlock_folio()
253 zone_stat_mod_folio(folio, NR_MLOCK, nr_pages); in mlock_folio()
257 folio_get(folio); in mlock_folio()
258 if (!folio_batch_add(fbatch, mlock_lru(folio)) || in mlock_folio()
259 folio_test_large(folio) || lru_cache_disabled()) in mlock_folio()
268 void mlock_new_folio(struct folio *folio) in mlock_new_folio() argument
271 int nr_pages = folio_nr_pages(folio); in mlock_new_folio()
275 folio_set_mlocked(folio); in mlock_new_folio()
277 zone_stat_mod_folio(folio, NR_MLOCK, nr_pages); in mlock_new_folio()
280 folio_get(folio); in mlock_new_folio()
281 if (!folio_batch_add(fbatch, mlock_new(folio)) || in mlock_new_folio()
282 folio_test_large(folio) || lru_cache_disabled()) in mlock_new_folio()
291 void munlock_folio(struct folio *folio) in munlock_folio() argument
301 folio_get(folio); in munlock_folio()
302 if (!folio_batch_add(fbatch, folio) || in munlock_folio()
303 folio_test_large(folio) || lru_cache_disabled()) in munlock_folio()
315 struct folio *folio; in mlock_pte_range() local
323 folio = page_folio(pmd_page(*pmd)); in mlock_pte_range()
325 mlock_folio(folio); in mlock_pte_range()
327 munlock_folio(folio); in mlock_pte_range()
335 folio = vm_normal_folio(vma, addr, *pte); in mlock_pte_range()
336 if (!folio || folio_is_zone_device(folio)) in mlock_pte_range()
338 if (folio_test_large(folio)) in mlock_pte_range()
341 mlock_folio(folio); in mlock_pte_range()
343 munlock_folio(folio); in mlock_pte_range()