1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SWAP_H
3 #define _LINUX_SWAP_H
4
5 #include <linux/spinlock.h>
6 #include <linux/linkage.h>
7 #include <linux/mmzone.h>
8 #include <linux/list.h>
9 #include <linux/memcontrol.h>
10 #include <linux/sched.h>
11 #include <linux/node.h>
12 #include <linux/fs.h>
13 #include <linux/pagemap.h>
14 #include <linux/atomic.h>
15 #include <linux/page-flags.h>
16 #include <uapi/linux/mempolicy.h>
17 #include <asm/page.h>
18
19 struct notifier_block;
20
21 struct bio;
22
23 struct pagevec;
24
25 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
26 #define SWAP_FLAG_PRIO_MASK 0x7fff
27 #define SWAP_FLAG_PRIO_SHIFT 0
28 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
29 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
30 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
31
32 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
33 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
34 SWAP_FLAG_DISCARD_PAGES)
35 #define SWAP_BATCH 64
36
current_is_kswapd(void)37 static inline int current_is_kswapd(void)
38 {
39 return current->flags & PF_KSWAPD;
40 }
41
42 /*
43 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
44 * be swapped to. The swap type and the offset into that swap type are
45 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
46 * for the type means that the maximum number of swapcache pages is 27 bits
47 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
48 * the type/offset into the pte as 5/27 as well.
49 */
50 #define MAX_SWAPFILES_SHIFT 5
51
52 /*
53 * Use some of the swap files numbers for other purposes. This
54 * is a convenient way to hook into the VM to trigger special
55 * actions on faults.
56 */
57
58 /*
59 * PTE markers are used to persist information onto PTEs that otherwise
60 * should be a none pte. As its name "PTE" hints, it should only be
61 * applied to the leaves of pgtables.
62 */
63 #define SWP_PTE_MARKER_NUM 1
64 #define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
65 SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
66
67 /*
68 * Unaddressable device memory support. See include/linux/hmm.h and
69 * Documentation/mm/hmm.rst. Short description is we need struct pages for
70 * device memory that is unaddressable (inaccessible) by CPU, so that we can
71 * migrate part of a process memory to device memory.
72 *
73 * When a page is migrated from CPU to device, we set the CPU page table entry
74 * to a special SWP_DEVICE_{READ|WRITE} entry.
75 *
76 * When a page is mapped by the device for exclusive access we set the CPU page
77 * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
78 */
79 #ifdef CONFIG_DEVICE_PRIVATE
80 #define SWP_DEVICE_NUM 4
81 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
82 #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
83 #define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
84 #define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
85 #else
86 #define SWP_DEVICE_NUM 0
87 #endif
88
89 /*
90 * Page migration support.
91 *
92 * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
93 * indicates that the referenced (part of) an anonymous page is exclusive to
94 * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
95 * (part of) an anonymous page that are mapped writable are exclusive to a
96 * single process.
97 */
98 #ifdef CONFIG_MIGRATION
99 #define SWP_MIGRATION_NUM 3
100 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
101 #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
102 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
103 #else
104 #define SWP_MIGRATION_NUM 0
105 #endif
106
107 /*
108 * Handling of hardware poisoned pages with memory corruption.
109 */
110 #ifdef CONFIG_MEMORY_FAILURE
111 #define SWP_HWPOISON_NUM 1
112 #define SWP_HWPOISON MAX_SWAPFILES
113 #else
114 #define SWP_HWPOISON_NUM 0
115 #endif
116
117 #define MAX_SWAPFILES \
118 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
119 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
120 SWP_PTE_MARKER_NUM)
121
122 /*
123 * Magic header for a swap area. The first part of the union is
124 * what the swap magic looks like for the old (limited to 128MB)
125 * swap area format, the second part of the union adds - in the
126 * old reserved area - some extra information. Note that the first
127 * kilobyte is reserved for boot loader or disk label stuff...
128 *
129 * Having the magic at the end of the PAGE_SIZE makes detecting swap
130 * areas somewhat tricky on machines that support multiple page sizes.
131 * For 2.5 we'll probably want to move the magic to just beyond the
132 * bootbits...
133 */
134 union swap_header {
135 struct {
136 char reserved[PAGE_SIZE - 10];
137 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
138 } magic;
139 struct {
140 char bootbits[1024]; /* Space for disklabel etc. */
141 __u32 version;
142 __u32 last_page;
143 __u32 nr_badpages;
144 unsigned char sws_uuid[16];
145 unsigned char sws_volume[16];
146 __u32 padding[117];
147 __u32 badpages[1];
148 } info;
149 };
150
151 /*
152 * current->reclaim_state points to one of these when a task is running
153 * memory reclaim
154 */
155 struct reclaim_state {
156 unsigned long reclaimed_slab;
157 #ifdef CONFIG_LRU_GEN
158 /* per-thread mm walk data */
159 struct lru_gen_mm_walk *mm_walk;
160 #endif
161 };
162
163 #ifdef __KERNEL__
164
165 struct address_space;
166 struct sysinfo;
167 struct writeback_control;
168 struct zone;
169
170 /*
171 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
172 * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the
173 * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
174 * from setup, they're handled identically.
175 *
176 * We always assume that blocks are of size PAGE_SIZE.
177 */
178 struct swap_extent {
179 struct rb_node rb_node;
180 pgoff_t start_page;
181 pgoff_t nr_pages;
182 sector_t start_block;
183 };
184
185 /*
186 * Max bad pages in the new format..
187 */
188 #define MAX_SWAP_BADPAGES \
189 ((offsetof(union swap_header, magic.magic) - \
190 offsetof(union swap_header, info.badpages)) / sizeof(int))
191
192 enum {
193 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
194 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
195 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
196 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
197 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
198 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
199 SWP_BLKDEV = (1 << 6), /* its a block device */
200 SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
201 SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
202 SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
203 SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
204 SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
205 SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
206 /* add others here before... */
207 SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
208 };
209
210 #define SWAP_CLUSTER_MAX 32UL
211 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
212
213 /* Bit flag in swap_map */
214 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
215 #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
216
217 /* Special value in first swap_map */
218 #define SWAP_MAP_MAX 0x3e /* Max count */
219 #define SWAP_MAP_BAD 0x3f /* Note page is bad */
220 #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
221
222 /* Special value in each swap_map continuation */
223 #define SWAP_CONT_MAX 0x7f /* Max count */
224
225 /*
226 * We use this to track usage of a cluster. A cluster is a block of swap disk
227 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
228 * free clusters are organized into a list. We fetch an entry from the list to
229 * get a free cluster.
230 *
231 * The data field stores next cluster if the cluster is free or cluster usage
232 * counter otherwise. The flags field determines if a cluster is free. This is
233 * protected by swap_info_struct.lock.
234 */
235 struct swap_cluster_info {
236 spinlock_t lock; /*
237 * Protect swap_cluster_info fields
238 * and swap_info_struct->swap_map
239 * elements correspond to the swap
240 * cluster
241 */
242 unsigned int data:24;
243 unsigned int flags:8;
244 };
245 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
246 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
247 #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
248
249 /*
250 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
251 * its own cluster and swapout sequentially. The purpose is to optimize swapout
252 * throughput.
253 */
254 struct percpu_cluster {
255 struct swap_cluster_info index; /* Current cluster index */
256 unsigned int next; /* Likely next allocation offset */
257 };
258
259 struct swap_cluster_list {
260 struct swap_cluster_info head;
261 struct swap_cluster_info tail;
262 };
263
264 /*
265 * The in-memory structure used to track swap areas.
266 */
267 struct swap_info_struct {
268 struct percpu_ref users; /* indicate and keep swap device valid. */
269 unsigned long flags; /* SWP_USED etc: see above */
270 signed short prio; /* swap priority of this type */
271 struct plist_node list; /* entry in swap_active_head */
272 signed char type; /* strange name for an index */
273 unsigned int max; /* extent of the swap_map */
274 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
275 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
276 struct swap_cluster_list free_clusters; /* free clusters list */
277 unsigned int lowest_bit; /* index of first free in swap_map */
278 unsigned int highest_bit; /* index of last free in swap_map */
279 unsigned int pages; /* total of usable pages of swap */
280 unsigned int inuse_pages; /* number of those currently in use */
281 unsigned int cluster_next; /* likely index for next allocation */
282 unsigned int cluster_nr; /* countdown to next cluster search */
283 unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
284 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
285 struct rb_root swap_extent_root;/* root of the swap extent rbtree */
286 struct block_device *bdev; /* swap device or bdev of swap file */
287 struct file *swap_file; /* seldom referenced */
288 unsigned int old_block_size; /* seldom referenced */
289 struct completion comp; /* seldom referenced */
290 #ifdef CONFIG_FRONTSWAP
291 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
292 atomic_t frontswap_pages; /* frontswap pages in-use counter */
293 #endif
294 spinlock_t lock; /*
295 * protect map scan related fields like
296 * swap_map, lowest_bit, highest_bit,
297 * inuse_pages, cluster_next,
298 * cluster_nr, lowest_alloc,
299 * highest_alloc, free/discard cluster
300 * list. other fields are only changed
301 * at swapon/swapoff, so are protected
302 * by swap_lock. changing flags need
303 * hold this lock and swap_lock. If
304 * both locks need hold, hold swap_lock
305 * first.
306 */
307 spinlock_t cont_lock; /*
308 * protect swap count continuation page
309 * list.
310 */
311 struct work_struct discard_work; /* discard worker */
312 struct swap_cluster_list discard_clusters; /* discard clusters list */
313 struct plist_node avail_lists[]; /*
314 * entries in swap_avail_heads, one
315 * entry per node.
316 * Must be last as the number of the
317 * array is nr_node_ids, which is not
318 * a fixed value so have to allocate
319 * dynamically.
320 * And it has to be an array so that
321 * plist_for_each_* can work.
322 */
323 };
324
325 #ifdef CONFIG_64BIT
326 #define SWAP_RA_ORDER_CEILING 5
327 #else
328 /* Avoid stack overflow, because we need to save part of page table */
329 #define SWAP_RA_ORDER_CEILING 3
330 #define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
331 #endif
332
333 struct vma_swap_readahead {
334 unsigned short win;
335 unsigned short offset;
336 unsigned short nr_pte;
337 #ifdef CONFIG_64BIT
338 pte_t *ptes;
339 #else
340 pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
341 #endif
342 };
343
folio_swap_entry(struct folio * folio)344 static inline swp_entry_t folio_swap_entry(struct folio *folio)
345 {
346 swp_entry_t entry = { .val = page_private(&folio->page) };
347 return entry;
348 }
349
folio_set_swap_entry(struct folio * folio,swp_entry_t entry)350 static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
351 {
352 folio->private = (void *)entry.val;
353 }
354
355 /* linux/mm/workingset.c */
356 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
357 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
358 void workingset_refault(struct folio *folio, void *shadow);
359 void workingset_activation(struct folio *folio);
360
361 /* Only track the nodes of mappings with shadow entries */
362 void workingset_update_node(struct xa_node *node);
363 extern struct list_lru shadow_nodes;
364 #define mapping_set_update(xas, mapping) do { \
365 if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
366 xas_set_update(xas, workingset_update_node); \
367 xas_set_lru(xas, &shadow_nodes); \
368 } \
369 } while (0)
370
371 /* linux/mm/page_alloc.c */
372 extern unsigned long totalreserve_pages;
373
374 /* Definition of global_zone_page_state not available yet */
375 #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
376
377
378 /* linux/mm/swap.c */
379 void lru_note_cost(struct lruvec *lruvec, bool file,
380 unsigned int nr_io, unsigned int nr_rotated);
381 void lru_note_cost_refault(struct folio *);
382 void folio_add_lru(struct folio *);
383 void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
384 void mark_page_accessed(struct page *);
385 void folio_mark_accessed(struct folio *);
386
387 extern atomic_t lru_disable_count;
388
lru_cache_disabled(void)389 static inline bool lru_cache_disabled(void)
390 {
391 return atomic_read(&lru_disable_count);
392 }
393
lru_cache_enable(void)394 static inline void lru_cache_enable(void)
395 {
396 atomic_dec(&lru_disable_count);
397 }
398
399 extern void lru_cache_disable(void);
400 extern void lru_add_drain(void);
401 extern void lru_add_drain_cpu(int cpu);
402 extern void lru_add_drain_cpu_zone(struct zone *zone);
403 extern void lru_add_drain_all(void);
404 void folio_deactivate(struct folio *folio);
405 void folio_mark_lazyfree(struct folio *folio);
406 extern void swap_setup(void);
407
408 extern void lru_cache_add_inactive_or_unevictable(struct page *page,
409 struct vm_area_struct *vma);
410
411 /* linux/mm/vmscan.c */
412 extern unsigned long zone_reclaimable_pages(struct zone *zone);
413 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
414 gfp_t gfp_mask, nodemask_t *mask);
415
416 #define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
417 #define MEMCG_RECLAIM_PROACTIVE (1 << 2)
418 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
419 unsigned long nr_pages,
420 gfp_t gfp_mask,
421 unsigned int reclaim_options);
422 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
423 gfp_t gfp_mask, bool noswap,
424 pg_data_t *pgdat,
425 unsigned long *nr_scanned);
426 extern unsigned long shrink_all_memory(unsigned long nr_pages);
427 extern int vm_swappiness;
428 long remove_mapping(struct address_space *mapping, struct folio *folio);
429
430 extern unsigned long reclaim_pages(struct list_head *page_list);
431 #ifdef CONFIG_NUMA
432 extern int node_reclaim_mode;
433 extern int sysctl_min_unmapped_ratio;
434 extern int sysctl_min_slab_ratio;
435 #else
436 #define node_reclaim_mode 0
437 #endif
438
node_reclaim_enabled(void)439 static inline bool node_reclaim_enabled(void)
440 {
441 /* Is any node_reclaim_mode bit set? */
442 return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
443 }
444
445 void check_move_unevictable_folios(struct folio_batch *fbatch);
446 void check_move_unevictable_pages(struct pagevec *pvec);
447
448 extern void kswapd_run(int nid);
449 extern void kswapd_stop(int nid);
450
451 #ifdef CONFIG_SWAP
452
453 int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
454 unsigned long nr_pages, sector_t start_block);
455 int generic_swapfile_activate(struct swap_info_struct *, struct file *,
456 sector_t *);
457
total_swapcache_pages(void)458 static inline unsigned long total_swapcache_pages(void)
459 {
460 return global_node_page_state(NR_SWAPCACHE);
461 }
462
463 extern void free_swap_cache(struct page *page);
464 extern void free_page_and_swap_cache(struct page *);
465 extern void free_pages_and_swap_cache(struct encoded_page **, int);
466 /* linux/mm/swapfile.c */
467 extern atomic_long_t nr_swap_pages;
468 extern long total_swap_pages;
469 extern atomic_t nr_rotate_swap;
470 extern bool has_usable_swap(void);
471
472 /* Swap 50% full? Release swapcache more aggressively.. */
vm_swap_full(void)473 static inline bool vm_swap_full(void)
474 {
475 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
476 }
477
get_nr_swap_pages(void)478 static inline long get_nr_swap_pages(void)
479 {
480 return atomic_long_read(&nr_swap_pages);
481 }
482
483 extern void si_swapinfo(struct sysinfo *);
484 swp_entry_t folio_alloc_swap(struct folio *folio);
485 bool folio_free_swap(struct folio *folio);
486 void put_swap_folio(struct folio *folio, swp_entry_t entry);
487 extern swp_entry_t get_swap_page_of_type(int);
488 extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
489 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
490 extern void swap_shmem_alloc(swp_entry_t);
491 extern int swap_duplicate(swp_entry_t);
492 extern int swapcache_prepare(swp_entry_t);
493 extern void swap_free(swp_entry_t);
494 extern void swapcache_free_entries(swp_entry_t *entries, int n);
495 extern int free_swap_and_cache(swp_entry_t);
496 int swap_type_of(dev_t device, sector_t offset);
497 int find_first_swap(dev_t *device);
498 extern unsigned int count_swap_pages(int, int);
499 extern sector_t swapdev_block(int, pgoff_t);
500 extern int __swap_count(swp_entry_t entry);
501 extern int __swp_swapcount(swp_entry_t entry);
502 extern int swp_swapcount(swp_entry_t entry);
503 extern struct swap_info_struct *page_swap_info(struct page *);
504 extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
505 struct backing_dev_info;
506 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
507 extern void exit_swap_address_space(unsigned int type);
508 extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
509 sector_t swap_page_sector(struct page *page);
510
put_swap_device(struct swap_info_struct * si)511 static inline void put_swap_device(struct swap_info_struct *si)
512 {
513 percpu_ref_put(&si->users);
514 }
515
516 #else /* CONFIG_SWAP */
swp_swap_info(swp_entry_t entry)517 static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
518 {
519 return NULL;
520 }
521
get_swap_device(swp_entry_t entry)522 static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
523 {
524 return NULL;
525 }
526
put_swap_device(struct swap_info_struct * si)527 static inline void put_swap_device(struct swap_info_struct *si)
528 {
529 }
530
531 #define get_nr_swap_pages() 0L
532 #define total_swap_pages 0L
533 #define total_swapcache_pages() 0UL
534 #define vm_swap_full() 0
535
536 #define si_swapinfo(val) \
537 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
538 /* only sparc can not include linux/pagemap.h in this file
539 * so leave put_page and release_pages undeclared... */
540 #define free_page_and_swap_cache(page) \
541 put_page(page)
542 #define free_pages_and_swap_cache(pages, nr) \
543 release_pages((pages), (nr));
544
545 /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
546 #define free_swap_and_cache(e) is_pfn_swap_entry(e)
547
free_swap_cache(struct page * page)548 static inline void free_swap_cache(struct page *page)
549 {
550 }
551
add_swap_count_continuation(swp_entry_t swp,gfp_t gfp_mask)552 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
553 {
554 return 0;
555 }
556
swap_shmem_alloc(swp_entry_t swp)557 static inline void swap_shmem_alloc(swp_entry_t swp)
558 {
559 }
560
swap_duplicate(swp_entry_t swp)561 static inline int swap_duplicate(swp_entry_t swp)
562 {
563 return 0;
564 }
565
swap_free(swp_entry_t swp)566 static inline void swap_free(swp_entry_t swp)
567 {
568 }
569
put_swap_folio(struct folio * folio,swp_entry_t swp)570 static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
571 {
572 }
573
__swap_count(swp_entry_t entry)574 static inline int __swap_count(swp_entry_t entry)
575 {
576 return 0;
577 }
578
__swp_swapcount(swp_entry_t entry)579 static inline int __swp_swapcount(swp_entry_t entry)
580 {
581 return 0;
582 }
583
swp_swapcount(swp_entry_t entry)584 static inline int swp_swapcount(swp_entry_t entry)
585 {
586 return 0;
587 }
588
folio_alloc_swap(struct folio * folio)589 static inline swp_entry_t folio_alloc_swap(struct folio *folio)
590 {
591 swp_entry_t entry;
592 entry.val = 0;
593 return entry;
594 }
595
folio_free_swap(struct folio * folio)596 static inline bool folio_free_swap(struct folio *folio)
597 {
598 return false;
599 }
600
add_swap_extent(struct swap_info_struct * sis,unsigned long start_page,unsigned long nr_pages,sector_t start_block)601 static inline int add_swap_extent(struct swap_info_struct *sis,
602 unsigned long start_page,
603 unsigned long nr_pages, sector_t start_block)
604 {
605 return -EINVAL;
606 }
607 #endif /* CONFIG_SWAP */
608
609 #ifdef CONFIG_THP_SWAP
610 extern int split_swap_cluster(swp_entry_t entry);
611 #else
split_swap_cluster(swp_entry_t entry)612 static inline int split_swap_cluster(swp_entry_t entry)
613 {
614 return 0;
615 }
616 #endif
617
618 #ifdef CONFIG_MEMCG
mem_cgroup_swappiness(struct mem_cgroup * memcg)619 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
620 {
621 /* Cgroup2 doesn't have per-cgroup swappiness */
622 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
623 return vm_swappiness;
624
625 /* root ? */
626 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
627 return vm_swappiness;
628
629 return memcg->swappiness;
630 }
631 #else
mem_cgroup_swappiness(struct mem_cgroup * mem)632 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
633 {
634 return vm_swappiness;
635 }
636 #endif
637
638 #ifdef CONFIG_ZSWAP
639 extern u64 zswap_pool_total_size;
640 extern atomic_t zswap_stored_pages;
641 #endif
642
643 #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
644 extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
cgroup_throttle_swaprate(struct page * page,gfp_t gfp_mask)645 static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
646 {
647 if (mem_cgroup_disabled())
648 return;
649 __cgroup_throttle_swaprate(page, gfp_mask);
650 }
651 #else
cgroup_throttle_swaprate(struct page * page,gfp_t gfp_mask)652 static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
653 {
654 }
655 #endif
folio_throttle_swaprate(struct folio * folio,gfp_t gfp)656 static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
657 {
658 cgroup_throttle_swaprate(&folio->page, gfp);
659 }
660
661 #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
662 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
663 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)664 static inline int mem_cgroup_try_charge_swap(struct folio *folio,
665 swp_entry_t entry)
666 {
667 if (mem_cgroup_disabled())
668 return 0;
669 return __mem_cgroup_try_charge_swap(folio, entry);
670 }
671
672 extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)673 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
674 {
675 if (mem_cgroup_disabled())
676 return;
677 __mem_cgroup_uncharge_swap(entry, nr_pages);
678 }
679
680 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
681 extern bool mem_cgroup_swap_full(struct folio *folio);
682 #else
mem_cgroup_swapout(struct folio * folio,swp_entry_t entry)683 static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
684 {
685 }
686
mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)687 static inline int mem_cgroup_try_charge_swap(struct folio *folio,
688 swp_entry_t entry)
689 {
690 return 0;
691 }
692
mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)693 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
694 unsigned int nr_pages)
695 {
696 }
697
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)698 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
699 {
700 return get_nr_swap_pages();
701 }
702
mem_cgroup_swap_full(struct folio * folio)703 static inline bool mem_cgroup_swap_full(struct folio *folio)
704 {
705 return vm_swap_full();
706 }
707 #endif
708
709 #endif /* __KERNEL__*/
710 #endif /* _LINUX_SWAP_H */
711