1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
10 #include <linux/mm.h>
11 #include <linux/gfp.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/init.h>
16 #include <linux/pagemap.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/pagevec.h>
20 #include <linux/migrate.h>
21 #include <linux/vmalloc.h>
22 #include <linux/swap_slots.h>
23 #include <linux/huge_mm.h>
24 #include <linux/shmem_fs.h>
25 #include "internal.h"
26 #include "swap.h"
27
28 /*
29 * swapper_space is a fiction, retained to simplify the path through
30 * vmscan's shrink_page_list.
31 */
32 static const struct address_space_operations swap_aops = {
33 .writepage = swap_writepage,
34 .dirty_folio = noop_dirty_folio,
35 #ifdef CONFIG_MIGRATION
36 .migrate_folio = migrate_folio,
37 #endif
38 };
39
40 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
41 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
42 static bool enable_vma_readahead __read_mostly = true;
43
44 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
45 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
46 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
47 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
48
49 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
50 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
51 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
52
53 #define SWAP_RA_VAL(addr, win, hits) \
54 (((addr) & PAGE_MASK) | \
55 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
56 ((hits) & SWAP_RA_HITS_MASK))
57
58 /* Initial readahead hits is 4 to start up with a small window */
59 #define GET_SWAP_RA_VAL(vma) \
60 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
61
62 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
63
show_swap_cache_info(void)64 void show_swap_cache_info(void)
65 {
66 printk("%lu pages in swap cache\n", total_swapcache_pages());
67 printk("Free swap = %ldkB\n",
68 get_nr_swap_pages() << (PAGE_SHIFT - 10));
69 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
70 }
71
get_shadow_from_swap_cache(swp_entry_t entry)72 void *get_shadow_from_swap_cache(swp_entry_t entry)
73 {
74 struct address_space *address_space = swap_address_space(entry);
75 pgoff_t idx = swp_offset(entry);
76 struct page *page;
77
78 page = xa_load(&address_space->i_pages, idx);
79 if (xa_is_value(page))
80 return page;
81 return NULL;
82 }
83
84 /*
85 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
86 * but sets SwapCache flag and private instead of mapping and index.
87 */
add_to_swap_cache(struct folio * folio,swp_entry_t entry,gfp_t gfp,void ** shadowp)88 int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
89 gfp_t gfp, void **shadowp)
90 {
91 struct address_space *address_space = swap_address_space(entry);
92 pgoff_t idx = swp_offset(entry);
93 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
94 unsigned long i, nr = folio_nr_pages(folio);
95 void *old;
96
97 xas_set_update(&xas, workingset_update_node);
98
99 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
100 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
101 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
102
103 folio_ref_add(folio, nr);
104 folio_set_swapcache(folio);
105
106 do {
107 xas_lock_irq(&xas);
108 xas_create_range(&xas);
109 if (xas_error(&xas))
110 goto unlock;
111 for (i = 0; i < nr; i++) {
112 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
113 old = xas_load(&xas);
114 if (xa_is_value(old)) {
115 if (shadowp)
116 *shadowp = old;
117 }
118 set_page_private(folio_page(folio, i), entry.val + i);
119 xas_store(&xas, folio);
120 xas_next(&xas);
121 }
122 address_space->nrpages += nr;
123 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
124 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
125 unlock:
126 xas_unlock_irq(&xas);
127 } while (xas_nomem(&xas, gfp));
128
129 if (!xas_error(&xas))
130 return 0;
131
132 folio_clear_swapcache(folio);
133 folio_ref_sub(folio, nr);
134 return xas_error(&xas);
135 }
136
137 /*
138 * This must be called only on folios that have
139 * been verified to be in the swap cache.
140 */
__delete_from_swap_cache(struct folio * folio,swp_entry_t entry,void * shadow)141 void __delete_from_swap_cache(struct folio *folio,
142 swp_entry_t entry, void *shadow)
143 {
144 struct address_space *address_space = swap_address_space(entry);
145 int i;
146 long nr = folio_nr_pages(folio);
147 pgoff_t idx = swp_offset(entry);
148 XA_STATE(xas, &address_space->i_pages, idx);
149
150 xas_set_update(&xas, workingset_update_node);
151
152 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
153 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
154 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
155
156 for (i = 0; i < nr; i++) {
157 void *entry = xas_store(&xas, shadow);
158 VM_BUG_ON_PAGE(entry != folio, entry);
159 set_page_private(folio_page(folio, i), 0);
160 xas_next(&xas);
161 }
162 folio_clear_swapcache(folio);
163 address_space->nrpages -= nr;
164 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
165 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
166 }
167
168 /**
169 * add_to_swap - allocate swap space for a folio
170 * @folio: folio we want to move to swap
171 *
172 * Allocate swap space for the folio and add the folio to the
173 * swap cache.
174 *
175 * Context: Caller needs to hold the folio lock.
176 * Return: Whether the folio was added to the swap cache.
177 */
add_to_swap(struct folio * folio)178 bool add_to_swap(struct folio *folio)
179 {
180 swp_entry_t entry;
181 int err;
182
183 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
184 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
185
186 entry = folio_alloc_swap(folio);
187 if (!entry.val)
188 return false;
189
190 /*
191 * XArray node allocations from PF_MEMALLOC contexts could
192 * completely exhaust the page allocator. __GFP_NOMEMALLOC
193 * stops emergency reserves from being allocated.
194 *
195 * TODO: this could cause a theoretical memory reclaim
196 * deadlock in the swap out path.
197 */
198 /*
199 * Add it to the swap cache.
200 */
201 err = add_to_swap_cache(folio, entry,
202 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
203 if (err)
204 /*
205 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
206 * clear SWAP_HAS_CACHE flag.
207 */
208 goto fail;
209 /*
210 * Normally the folio will be dirtied in unmap because its
211 * pte should be dirty. A special case is MADV_FREE page. The
212 * page's pte could have dirty bit cleared but the folio's
213 * SwapBacked flag is still set because clearing the dirty bit
214 * and SwapBacked flag has no lock protected. For such folio,
215 * unmap will not set dirty bit for it, so folio reclaim will
216 * not write the folio out. This can cause data corruption when
217 * the folio is swapped in later. Always setting the dirty flag
218 * for the folio solves the problem.
219 */
220 folio_mark_dirty(folio);
221
222 return true;
223
224 fail:
225 put_swap_folio(folio, entry);
226 return false;
227 }
228
229 /*
230 * This must be called only on folios that have
231 * been verified to be in the swap cache and locked.
232 * It will never put the folio into the free list,
233 * the caller has a reference on the folio.
234 */
delete_from_swap_cache(struct folio * folio)235 void delete_from_swap_cache(struct folio *folio)
236 {
237 swp_entry_t entry = folio_swap_entry(folio);
238 struct address_space *address_space = swap_address_space(entry);
239
240 xa_lock_irq(&address_space->i_pages);
241 __delete_from_swap_cache(folio, entry, NULL);
242 xa_unlock_irq(&address_space->i_pages);
243
244 put_swap_folio(folio, entry);
245 folio_ref_sub(folio, folio_nr_pages(folio));
246 }
247
clear_shadow_from_swap_cache(int type,unsigned long begin,unsigned long end)248 void clear_shadow_from_swap_cache(int type, unsigned long begin,
249 unsigned long end)
250 {
251 unsigned long curr = begin;
252 void *old;
253
254 for (;;) {
255 swp_entry_t entry = swp_entry(type, curr);
256 struct address_space *address_space = swap_address_space(entry);
257 XA_STATE(xas, &address_space->i_pages, curr);
258
259 xas_set_update(&xas, workingset_update_node);
260
261 xa_lock_irq(&address_space->i_pages);
262 xas_for_each(&xas, old, end) {
263 if (!xa_is_value(old))
264 continue;
265 xas_store(&xas, NULL);
266 }
267 xa_unlock_irq(&address_space->i_pages);
268
269 /* search the next swapcache until we meet end */
270 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
271 curr++;
272 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
273 if (curr > end)
274 break;
275 }
276 }
277
278 /*
279 * If we are the only user, then try to free up the swap cache.
280 *
281 * Its ok to check the swapcache flag without the folio lock
282 * here because we are going to recheck again inside
283 * folio_free_swap() _with_ the lock.
284 * - Marcelo
285 */
free_swap_cache(struct page * page)286 void free_swap_cache(struct page *page)
287 {
288 struct folio *folio = page_folio(page);
289
290 if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
291 folio_trylock(folio)) {
292 folio_free_swap(folio);
293 folio_unlock(folio);
294 }
295 }
296
297 /*
298 * Perform a free_page(), also freeing any swap cache associated with
299 * this page if it is the last user of the page.
300 */
free_page_and_swap_cache(struct page * page)301 void free_page_and_swap_cache(struct page *page)
302 {
303 free_swap_cache(page);
304 if (!is_huge_zero_page(page))
305 put_page(page);
306 }
307
308 /*
309 * Passed an array of pages, drop them all from swapcache and then release
310 * them. They are removed from the LRU and freed if this is their last use.
311 */
free_pages_and_swap_cache(struct encoded_page ** pages,int nr)312 void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
313 {
314 lru_add_drain();
315 for (int i = 0; i < nr; i++)
316 free_swap_cache(encoded_page_ptr(pages[i]));
317 release_pages(pages, nr);
318 }
319
swap_use_vma_readahead(void)320 static inline bool swap_use_vma_readahead(void)
321 {
322 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
323 }
324
325 /*
326 * Lookup a swap entry in the swap cache. A found folio will be returned
327 * unlocked and with its refcount incremented - we rely on the kernel
328 * lock getting page table operations atomic even if we drop the folio
329 * lock before returning.
330 *
331 * Caller must lock the swap device or hold a reference to keep it valid.
332 */
swap_cache_get_folio(swp_entry_t entry,struct vm_area_struct * vma,unsigned long addr)333 struct folio *swap_cache_get_folio(swp_entry_t entry,
334 struct vm_area_struct *vma, unsigned long addr)
335 {
336 struct folio *folio;
337
338 folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
339 if (folio) {
340 bool vma_ra = swap_use_vma_readahead();
341 bool readahead;
342
343 /*
344 * At the moment, we don't support PG_readahead for anon THP
345 * so let's bail out rather than confusing the readahead stat.
346 */
347 if (unlikely(folio_test_large(folio)))
348 return folio;
349
350 readahead = folio_test_clear_readahead(folio);
351 if (vma && vma_ra) {
352 unsigned long ra_val;
353 int win, hits;
354
355 ra_val = GET_SWAP_RA_VAL(vma);
356 win = SWAP_RA_WIN(ra_val);
357 hits = SWAP_RA_HITS(ra_val);
358 if (readahead)
359 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
360 atomic_long_set(&vma->swap_readahead_info,
361 SWAP_RA_VAL(addr, win, hits));
362 }
363
364 if (readahead) {
365 count_vm_event(SWAP_RA_HIT);
366 if (!vma || !vma_ra)
367 atomic_inc(&swapin_readahead_hits);
368 }
369 }
370
371 return folio;
372 }
373
374 /**
375 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
376 * @mapping: The address_space to search.
377 * @index: The page cache index.
378 *
379 * This differs from filemap_get_folio() in that it will also look for the
380 * folio in the swap cache.
381 *
382 * Return: The found folio or %NULL.
383 */
filemap_get_incore_folio(struct address_space * mapping,pgoff_t index)384 struct folio *filemap_get_incore_folio(struct address_space *mapping,
385 pgoff_t index)
386 {
387 swp_entry_t swp;
388 struct swap_info_struct *si;
389 struct folio *folio = __filemap_get_folio(mapping, index, FGP_ENTRY, 0);
390
391 if (!xa_is_value(folio))
392 goto out;
393 if (!shmem_mapping(mapping))
394 return NULL;
395
396 swp = radix_to_swp_entry(folio);
397 /* There might be swapin error entries in shmem mapping. */
398 if (non_swap_entry(swp))
399 return NULL;
400 /* Prevent swapoff from happening to us */
401 si = get_swap_device(swp);
402 if (!si)
403 return NULL;
404 index = swp_offset(swp);
405 folio = filemap_get_folio(swap_address_space(swp), index);
406 put_swap_device(si);
407 out:
408 return folio;
409 }
410
__read_swap_cache_async(swp_entry_t entry,gfp_t gfp_mask,struct vm_area_struct * vma,unsigned long addr,bool * new_page_allocated)411 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
412 struct vm_area_struct *vma, unsigned long addr,
413 bool *new_page_allocated)
414 {
415 struct swap_info_struct *si;
416 struct folio *folio;
417 void *shadow = NULL;
418
419 *new_page_allocated = false;
420
421 for (;;) {
422 int err;
423 /*
424 * First check the swap cache. Since this is normally
425 * called after swap_cache_get_folio() failed, re-calling
426 * that would confuse statistics.
427 */
428 si = get_swap_device(entry);
429 if (!si)
430 return NULL;
431 folio = filemap_get_folio(swap_address_space(entry),
432 swp_offset(entry));
433 put_swap_device(si);
434 if (folio)
435 return folio_file_page(folio, swp_offset(entry));
436
437 /*
438 * Just skip read ahead for unused swap slot.
439 * During swap_off when swap_slot_cache is disabled,
440 * we have to handle the race between putting
441 * swap entry in swap cache and marking swap slot
442 * as SWAP_HAS_CACHE. That's done in later part of code or
443 * else swap_off will be aborted if we return NULL.
444 */
445 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
446 return NULL;
447
448 /*
449 * Get a new page to read into from swap. Allocate it now,
450 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
451 * cause any racers to loop around until we add it to cache.
452 */
453 folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
454 if (!folio)
455 return NULL;
456
457 /*
458 * Swap entry may have been freed since our caller observed it.
459 */
460 err = swapcache_prepare(entry);
461 if (!err)
462 break;
463
464 folio_put(folio);
465 if (err != -EEXIST)
466 return NULL;
467
468 /*
469 * We might race against __delete_from_swap_cache(), and
470 * stumble across a swap_map entry whose SWAP_HAS_CACHE
471 * has not yet been cleared. Or race against another
472 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
473 * in swap_map, but not yet added its page to swap cache.
474 */
475 schedule_timeout_uninterruptible(1);
476 }
477
478 /*
479 * The swap entry is ours to swap in. Prepare the new page.
480 */
481
482 __folio_set_locked(folio);
483 __folio_set_swapbacked(folio);
484
485 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
486 goto fail_unlock;
487
488 /* May fail (-ENOMEM) if XArray node allocation failed. */
489 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
490 goto fail_unlock;
491
492 mem_cgroup_swapin_uncharge_swap(entry);
493
494 if (shadow)
495 workingset_refault(folio, shadow);
496
497 /* Caller will initiate read into locked folio */
498 folio_add_lru(folio);
499 *new_page_allocated = true;
500 return &folio->page;
501
502 fail_unlock:
503 put_swap_folio(folio, entry);
504 folio_unlock(folio);
505 folio_put(folio);
506 return NULL;
507 }
508
509 /*
510 * Locate a page of swap in physical memory, reserving swap cache space
511 * and reading the disk if it is not already cached.
512 * A failure return means that either the page allocation failed or that
513 * the swap entry is no longer in use.
514 */
read_swap_cache_async(swp_entry_t entry,gfp_t gfp_mask,struct vm_area_struct * vma,unsigned long addr,bool do_poll,struct swap_iocb ** plug)515 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
516 struct vm_area_struct *vma,
517 unsigned long addr, bool do_poll,
518 struct swap_iocb **plug)
519 {
520 bool page_was_allocated;
521 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
522 vma, addr, &page_was_allocated);
523
524 if (page_was_allocated)
525 swap_readpage(retpage, do_poll, plug);
526
527 return retpage;
528 }
529
__swapin_nr_pages(unsigned long prev_offset,unsigned long offset,int hits,int max_pages,int prev_win)530 static unsigned int __swapin_nr_pages(unsigned long prev_offset,
531 unsigned long offset,
532 int hits,
533 int max_pages,
534 int prev_win)
535 {
536 unsigned int pages, last_ra;
537
538 /*
539 * This heuristic has been found to work well on both sequential and
540 * random loads, swapping to hard disk or to SSD: please don't ask
541 * what the "+ 2" means, it just happens to work well, that's all.
542 */
543 pages = hits + 2;
544 if (pages == 2) {
545 /*
546 * We can have no readahead hits to judge by: but must not get
547 * stuck here forever, so check for an adjacent offset instead
548 * (and don't even bother to check whether swap type is same).
549 */
550 if (offset != prev_offset + 1 && offset != prev_offset - 1)
551 pages = 1;
552 } else {
553 unsigned int roundup = 4;
554 while (roundup < pages)
555 roundup <<= 1;
556 pages = roundup;
557 }
558
559 if (pages > max_pages)
560 pages = max_pages;
561
562 /* Don't shrink readahead too fast */
563 last_ra = prev_win / 2;
564 if (pages < last_ra)
565 pages = last_ra;
566
567 return pages;
568 }
569
swapin_nr_pages(unsigned long offset)570 static unsigned long swapin_nr_pages(unsigned long offset)
571 {
572 static unsigned long prev_offset;
573 unsigned int hits, pages, max_pages;
574 static atomic_t last_readahead_pages;
575
576 max_pages = 1 << READ_ONCE(page_cluster);
577 if (max_pages <= 1)
578 return 1;
579
580 hits = atomic_xchg(&swapin_readahead_hits, 0);
581 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
582 max_pages,
583 atomic_read(&last_readahead_pages));
584 if (!hits)
585 WRITE_ONCE(prev_offset, offset);
586 atomic_set(&last_readahead_pages, pages);
587
588 return pages;
589 }
590
591 /**
592 * swap_cluster_readahead - swap in pages in hope we need them soon
593 * @entry: swap entry of this memory
594 * @gfp_mask: memory allocation flags
595 * @vmf: fault information
596 *
597 * Returns the struct page for entry and addr, after queueing swapin.
598 *
599 * Primitive swap readahead code. We simply read an aligned block of
600 * (1 << page_cluster) entries in the swap area. This method is chosen
601 * because it doesn't cost us any seek time. We also make sure to queue
602 * the 'original' request together with the readahead ones...
603 *
604 * This has been extended to use the NUMA policies from the mm triggering
605 * the readahead.
606 *
607 * Caller must hold read mmap_lock if vmf->vma is not NULL.
608 */
swap_cluster_readahead(swp_entry_t entry,gfp_t gfp_mask,struct vm_fault * vmf)609 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
610 struct vm_fault *vmf)
611 {
612 struct page *page;
613 unsigned long entry_offset = swp_offset(entry);
614 unsigned long offset = entry_offset;
615 unsigned long start_offset, end_offset;
616 unsigned long mask;
617 struct swap_info_struct *si = swp_swap_info(entry);
618 struct blk_plug plug;
619 struct swap_iocb *splug = NULL;
620 bool do_poll = true, page_allocated;
621 struct vm_area_struct *vma = vmf->vma;
622 unsigned long addr = vmf->address;
623
624 mask = swapin_nr_pages(offset) - 1;
625 if (!mask)
626 goto skip;
627
628 do_poll = false;
629 /* Read a page_cluster sized and aligned cluster around offset. */
630 start_offset = offset & ~mask;
631 end_offset = offset | mask;
632 if (!start_offset) /* First page is swap header. */
633 start_offset++;
634 if (end_offset >= si->max)
635 end_offset = si->max - 1;
636
637 blk_start_plug(&plug);
638 for (offset = start_offset; offset <= end_offset ; offset++) {
639 /* Ok, do the async read-ahead now */
640 page = __read_swap_cache_async(
641 swp_entry(swp_type(entry), offset),
642 gfp_mask, vma, addr, &page_allocated);
643 if (!page)
644 continue;
645 if (page_allocated) {
646 swap_readpage(page, false, &splug);
647 if (offset != entry_offset) {
648 SetPageReadahead(page);
649 count_vm_event(SWAP_RA);
650 }
651 }
652 put_page(page);
653 }
654 blk_finish_plug(&plug);
655 swap_read_unplug(splug);
656
657 lru_add_drain(); /* Push any new pages onto the LRU now */
658 skip:
659 /* The page was likely read above, so no need for plugging here */
660 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
661 }
662
init_swap_address_space(unsigned int type,unsigned long nr_pages)663 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
664 {
665 struct address_space *spaces, *space;
666 unsigned int i, nr;
667
668 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
669 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
670 if (!spaces)
671 return -ENOMEM;
672 for (i = 0; i < nr; i++) {
673 space = spaces + i;
674 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
675 atomic_set(&space->i_mmap_writable, 0);
676 space->a_ops = &swap_aops;
677 /* swap cache doesn't use writeback related tags */
678 mapping_set_no_writeback_tags(space);
679 }
680 nr_swapper_spaces[type] = nr;
681 swapper_spaces[type] = spaces;
682
683 return 0;
684 }
685
exit_swap_address_space(unsigned int type)686 void exit_swap_address_space(unsigned int type)
687 {
688 int i;
689 struct address_space *spaces = swapper_spaces[type];
690
691 for (i = 0; i < nr_swapper_spaces[type]; i++)
692 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
693 kvfree(spaces);
694 nr_swapper_spaces[type] = 0;
695 swapper_spaces[type] = NULL;
696 }
697
swap_ra_info(struct vm_fault * vmf,struct vma_swap_readahead * ra_info)698 static void swap_ra_info(struct vm_fault *vmf,
699 struct vma_swap_readahead *ra_info)
700 {
701 struct vm_area_struct *vma = vmf->vma;
702 unsigned long ra_val;
703 unsigned long faddr, pfn, fpfn, lpfn, rpfn;
704 unsigned long start, end;
705 pte_t *pte, *orig_pte;
706 unsigned int max_win, hits, prev_win, win;
707 #ifndef CONFIG_64BIT
708 pte_t *tpte;
709 #endif
710
711 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
712 SWAP_RA_ORDER_CEILING);
713 if (max_win == 1) {
714 ra_info->win = 1;
715 return;
716 }
717
718 faddr = vmf->address;
719 fpfn = PFN_DOWN(faddr);
720 ra_val = GET_SWAP_RA_VAL(vma);
721 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
722 prev_win = SWAP_RA_WIN(ra_val);
723 hits = SWAP_RA_HITS(ra_val);
724 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
725 max_win, prev_win);
726 atomic_long_set(&vma->swap_readahead_info,
727 SWAP_RA_VAL(faddr, win, 0));
728
729 if (win == 1)
730 return;
731
732 /* Copy the PTEs because the page table may be unmapped */
733 orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
734 if (fpfn == pfn + 1) {
735 lpfn = fpfn;
736 rpfn = fpfn + win;
737 } else if (pfn == fpfn + 1) {
738 lpfn = fpfn - win + 1;
739 rpfn = fpfn + 1;
740 } else {
741 unsigned int left = (win - 1) / 2;
742
743 lpfn = fpfn - left;
744 rpfn = fpfn + win - left;
745 }
746 start = max3(lpfn, PFN_DOWN(vma->vm_start),
747 PFN_DOWN(faddr & PMD_MASK));
748 end = min3(rpfn, PFN_DOWN(vma->vm_end),
749 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
750
751 ra_info->nr_pte = end - start;
752 ra_info->offset = fpfn - start;
753 pte -= ra_info->offset;
754 #ifdef CONFIG_64BIT
755 ra_info->ptes = pte;
756 #else
757 tpte = ra_info->ptes;
758 for (pfn = start; pfn != end; pfn++)
759 *tpte++ = *pte++;
760 #endif
761 pte_unmap(orig_pte);
762 }
763
764 /**
765 * swap_vma_readahead - swap in pages in hope we need them soon
766 * @fentry: swap entry of this memory
767 * @gfp_mask: memory allocation flags
768 * @vmf: fault information
769 *
770 * Returns the struct page for entry and addr, after queueing swapin.
771 *
772 * Primitive swap readahead code. We simply read in a few pages whose
773 * virtual addresses are around the fault address in the same vma.
774 *
775 * Caller must hold read mmap_lock if vmf->vma is not NULL.
776 *
777 */
swap_vma_readahead(swp_entry_t fentry,gfp_t gfp_mask,struct vm_fault * vmf)778 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
779 struct vm_fault *vmf)
780 {
781 struct blk_plug plug;
782 struct swap_iocb *splug = NULL;
783 struct vm_area_struct *vma = vmf->vma;
784 struct page *page;
785 pte_t *pte, pentry;
786 swp_entry_t entry;
787 unsigned int i;
788 bool page_allocated;
789 struct vma_swap_readahead ra_info = {
790 .win = 1,
791 };
792
793 swap_ra_info(vmf, &ra_info);
794 if (ra_info.win == 1)
795 goto skip;
796
797 blk_start_plug(&plug);
798 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
799 i++, pte++) {
800 pentry = *pte;
801 if (!is_swap_pte(pentry))
802 continue;
803 entry = pte_to_swp_entry(pentry);
804 if (unlikely(non_swap_entry(entry)))
805 continue;
806 page = __read_swap_cache_async(entry, gfp_mask, vma,
807 vmf->address, &page_allocated);
808 if (!page)
809 continue;
810 if (page_allocated) {
811 swap_readpage(page, false, &splug);
812 if (i != ra_info.offset) {
813 SetPageReadahead(page);
814 count_vm_event(SWAP_RA);
815 }
816 }
817 put_page(page);
818 }
819 blk_finish_plug(&plug);
820 swap_read_unplug(splug);
821 lru_add_drain();
822 skip:
823 /* The page was likely read above, so no need for plugging here */
824 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
825 ra_info.win == 1, NULL);
826 }
827
828 /**
829 * swapin_readahead - swap in pages in hope we need them soon
830 * @entry: swap entry of this memory
831 * @gfp_mask: memory allocation flags
832 * @vmf: fault information
833 *
834 * Returns the struct page for entry and addr, after queueing swapin.
835 *
836 * It's a main entry function for swap readahead. By the configuration,
837 * it will read ahead blocks by cluster-based(ie, physical disk based)
838 * or vma-based(ie, virtual address based on faulty address) readahead.
839 */
swapin_readahead(swp_entry_t entry,gfp_t gfp_mask,struct vm_fault * vmf)840 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
841 struct vm_fault *vmf)
842 {
843 return swap_use_vma_readahead() ?
844 swap_vma_readahead(entry, gfp_mask, vmf) :
845 swap_cluster_readahead(entry, gfp_mask, vmf);
846 }
847
848 #ifdef CONFIG_SYSFS
vma_ra_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)849 static ssize_t vma_ra_enabled_show(struct kobject *kobj,
850 struct kobj_attribute *attr, char *buf)
851 {
852 return sysfs_emit(buf, "%s\n",
853 enable_vma_readahead ? "true" : "false");
854 }
vma_ra_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)855 static ssize_t vma_ra_enabled_store(struct kobject *kobj,
856 struct kobj_attribute *attr,
857 const char *buf, size_t count)
858 {
859 ssize_t ret;
860
861 ret = kstrtobool(buf, &enable_vma_readahead);
862 if (ret)
863 return ret;
864
865 return count;
866 }
867 static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
868
869 static struct attribute *swap_attrs[] = {
870 &vma_ra_enabled_attr.attr,
871 NULL,
872 };
873
874 static const struct attribute_group swap_attr_group = {
875 .attrs = swap_attrs,
876 };
877
swap_init_sysfs(void)878 static int __init swap_init_sysfs(void)
879 {
880 int err;
881 struct kobject *swap_kobj;
882
883 swap_kobj = kobject_create_and_add("swap", mm_kobj);
884 if (!swap_kobj) {
885 pr_err("failed to create swap kobject\n");
886 return -ENOMEM;
887 }
888 err = sysfs_create_group(swap_kobj, &swap_attr_group);
889 if (err) {
890 pr_err("failed to register swap group\n");
891 goto delete_obj;
892 }
893 return 0;
894
895 delete_obj:
896 kobject_put(swap_kobj);
897 return err;
898 }
899 subsys_initcall(swap_init_sysfs);
900 #endif
901