1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/madvise.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 2002 Christoph Hellwig
7 */
8
9 #include <linux/mman.h>
10 #include <linux/pagemap.h>
11 #include <linux/syscalls.h>
12 #include <linux/mempolicy.h>
13 #include <linux/page-isolation.h>
14 #include <linux/page_idle.h>
15 #include <linux/userfaultfd_k.h>
16 #include <linux/hugetlb.h>
17 #include <linux/falloc.h>
18 #include <linux/fadvise.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
21 #include <linux/mm_inline.h>
22 #include <linux/string.h>
23 #include <linux/uio.h>
24 #include <linux/ksm.h>
25 #include <linux/fs.h>
26 #include <linux/file.h>
27 #include <linux/blkdev.h>
28 #include <linux/backing-dev.h>
29 #include <linux/pagewalk.h>
30 #include <linux/swap.h>
31 #include <linux/swapops.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/mmu_notifier.h>
34
35 #include <asm/tlb.h>
36
37 #include "internal.h"
38 #include "swap.h"
39
40 struct madvise_walk_private {
41 struct mmu_gather *tlb;
42 bool pageout;
43 };
44
45 /*
46 * Any behaviour which results in changes to the vma->vm_flags needs to
47 * take mmap_lock for writing. Others, which simply traverse vmas, need
48 * to only take it for reading.
49 */
madvise_need_mmap_write(int behavior)50 static int madvise_need_mmap_write(int behavior)
51 {
52 switch (behavior) {
53 case MADV_REMOVE:
54 case MADV_WILLNEED:
55 case MADV_DONTNEED:
56 case MADV_DONTNEED_LOCKED:
57 case MADV_COLD:
58 case MADV_PAGEOUT:
59 case MADV_FREE:
60 case MADV_POPULATE_READ:
61 case MADV_POPULATE_WRITE:
62 case MADV_COLLAPSE:
63 return 0;
64 default:
65 /* be safe, default to 1. list exceptions explicitly */
66 return 1;
67 }
68 }
69
70 #ifdef CONFIG_ANON_VMA_NAME
anon_vma_name_alloc(const char * name)71 struct anon_vma_name *anon_vma_name_alloc(const char *name)
72 {
73 struct anon_vma_name *anon_name;
74 size_t count;
75
76 /* Add 1 for NUL terminator at the end of the anon_name->name */
77 count = strlen(name) + 1;
78 anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL);
79 if (anon_name) {
80 kref_init(&anon_name->kref);
81 memcpy(anon_name->name, name, count);
82 }
83
84 return anon_name;
85 }
86
anon_vma_name_free(struct kref * kref)87 void anon_vma_name_free(struct kref *kref)
88 {
89 struct anon_vma_name *anon_name =
90 container_of(kref, struct anon_vma_name, kref);
91 kfree(anon_name);
92 }
93
anon_vma_name(struct vm_area_struct * vma)94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
95 {
96 mmap_assert_locked(vma->vm_mm);
97
98 return vma->anon_name;
99 }
100
101 /* mmap_lock should be write-locked */
replace_anon_vma_name(struct vm_area_struct * vma,struct anon_vma_name * anon_name)102 static int replace_anon_vma_name(struct vm_area_struct *vma,
103 struct anon_vma_name *anon_name)
104 {
105 struct anon_vma_name *orig_name = anon_vma_name(vma);
106
107 if (!anon_name) {
108 vma->anon_name = NULL;
109 anon_vma_name_put(orig_name);
110 return 0;
111 }
112
113 if (anon_vma_name_eq(orig_name, anon_name))
114 return 0;
115
116 vma->anon_name = anon_vma_name_reuse(anon_name);
117 anon_vma_name_put(orig_name);
118
119 return 0;
120 }
121 #else /* CONFIG_ANON_VMA_NAME */
replace_anon_vma_name(struct vm_area_struct * vma,struct anon_vma_name * anon_name)122 static int replace_anon_vma_name(struct vm_area_struct *vma,
123 struct anon_vma_name *anon_name)
124 {
125 if (anon_name)
126 return -EINVAL;
127
128 return 0;
129 }
130 #endif /* CONFIG_ANON_VMA_NAME */
131 /*
132 * Update the vm_flags on region of a vma, splitting it or merging it as
133 * necessary. Must be called with mmap_lock held for writing;
134 * Caller should ensure anon_name stability by raising its refcount even when
135 * anon_name belongs to a valid vma because this function might free that vma.
136 */
madvise_update_vma(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,unsigned long new_flags,struct anon_vma_name * anon_name)137 static int madvise_update_vma(struct vm_area_struct *vma,
138 struct vm_area_struct **prev, unsigned long start,
139 unsigned long end, unsigned long new_flags,
140 struct anon_vma_name *anon_name)
141 {
142 struct mm_struct *mm = vma->vm_mm;
143 int error;
144 pgoff_t pgoff;
145 VMA_ITERATOR(vmi, mm, start);
146
147 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) {
148 *prev = vma;
149 return 0;
150 }
151
152 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
153 *prev = vma_merge(&vmi, mm, *prev, start, end, new_flags,
154 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
155 vma->vm_userfaultfd_ctx, anon_name);
156 if (*prev) {
157 vma = *prev;
158 goto success;
159 }
160
161 *prev = vma;
162
163 if (start != vma->vm_start) {
164 error = split_vma(&vmi, vma, start, 1);
165 if (error)
166 return error;
167 }
168
169 if (end != vma->vm_end) {
170 error = split_vma(&vmi, vma, end, 0);
171 if (error)
172 return error;
173 }
174
175 success:
176 /*
177 * vm_flags is protected by the mmap_lock held in write mode.
178 */
179 vm_flags_reset(vma, new_flags);
180 if (!vma->vm_file || vma_is_anon_shmem(vma)) {
181 error = replace_anon_vma_name(vma, anon_name);
182 if (error)
183 return error;
184 }
185
186 return 0;
187 }
188
189 #ifdef CONFIG_SWAP
swapin_walk_pmd_entry(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * walk)190 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
191 unsigned long end, struct mm_walk *walk)
192 {
193 struct vm_area_struct *vma = walk->private;
194 unsigned long index;
195 struct swap_iocb *splug = NULL;
196
197 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
198 return 0;
199
200 for (index = start; index != end; index += PAGE_SIZE) {
201 pte_t pte;
202 swp_entry_t entry;
203 struct page *page;
204 spinlock_t *ptl;
205 pte_t *ptep;
206
207 ptep = pte_offset_map_lock(vma->vm_mm, pmd, index, &ptl);
208 pte = *ptep;
209 pte_unmap_unlock(ptep, ptl);
210
211 if (!is_swap_pte(pte))
212 continue;
213 entry = pte_to_swp_entry(pte);
214 if (unlikely(non_swap_entry(entry)))
215 continue;
216
217 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
218 vma, index, false, &splug);
219 if (page)
220 put_page(page);
221 }
222 swap_read_unplug(splug);
223 cond_resched();
224
225 return 0;
226 }
227
228 static const struct mm_walk_ops swapin_walk_ops = {
229 .pmd_entry = swapin_walk_pmd_entry,
230 };
231
force_shm_swapin_readahead(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct address_space * mapping)232 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
233 unsigned long start, unsigned long end,
234 struct address_space *mapping)
235 {
236 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
237 pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1);
238 struct page *page;
239 struct swap_iocb *splug = NULL;
240
241 rcu_read_lock();
242 xas_for_each(&xas, page, end_index) {
243 swp_entry_t swap;
244
245 if (!xa_is_value(page))
246 continue;
247 swap = radix_to_swp_entry(page);
248 /* There might be swapin error entries in shmem mapping. */
249 if (non_swap_entry(swap))
250 continue;
251 xas_pause(&xas);
252 rcu_read_unlock();
253
254 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
255 NULL, 0, false, &splug);
256 if (page)
257 put_page(page);
258
259 rcu_read_lock();
260 }
261 rcu_read_unlock();
262 swap_read_unplug(splug);
263
264 lru_add_drain(); /* Push any new pages onto the LRU now */
265 }
266 #endif /* CONFIG_SWAP */
267
268 /*
269 * Schedule all required I/O operations. Do not wait for completion.
270 */
madvise_willneed(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)271 static long madvise_willneed(struct vm_area_struct *vma,
272 struct vm_area_struct **prev,
273 unsigned long start, unsigned long end)
274 {
275 struct mm_struct *mm = vma->vm_mm;
276 struct file *file = vma->vm_file;
277 loff_t offset;
278
279 *prev = vma;
280 #ifdef CONFIG_SWAP
281 if (!file) {
282 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
283 lru_add_drain(); /* Push any new pages onto the LRU now */
284 return 0;
285 }
286
287 if (shmem_mapping(file->f_mapping)) {
288 force_shm_swapin_readahead(vma, start, end,
289 file->f_mapping);
290 return 0;
291 }
292 #else
293 if (!file)
294 return -EBADF;
295 #endif
296
297 if (IS_DAX(file_inode(file))) {
298 /* no bad return value, but ignore advice */
299 return 0;
300 }
301
302 /*
303 * Filesystem's fadvise may need to take various locks. We need to
304 * explicitly grab a reference because the vma (and hence the
305 * vma's reference to the file) can go away as soon as we drop
306 * mmap_lock.
307 */
308 *prev = NULL; /* tell sys_madvise we drop mmap_lock */
309 get_file(file);
310 offset = (loff_t)(start - vma->vm_start)
311 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
312 mmap_read_unlock(mm);
313 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
314 fput(file);
315 mmap_read_lock(mm);
316 return 0;
317 }
318
can_do_file_pageout(struct vm_area_struct * vma)319 static inline bool can_do_file_pageout(struct vm_area_struct *vma)
320 {
321 if (!vma->vm_file)
322 return false;
323 /*
324 * paging out pagecache only for non-anonymous mappings that correspond
325 * to the files the calling process could (if tried) open for writing;
326 * otherwise we'd be including shared non-exclusive mappings, which
327 * opens a side channel.
328 */
329 return inode_owner_or_capable(&nop_mnt_idmap,
330 file_inode(vma->vm_file)) ||
331 file_permission(vma->vm_file, MAY_WRITE) == 0;
332 }
333
madvise_cold_or_pageout_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)334 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
335 unsigned long addr, unsigned long end,
336 struct mm_walk *walk)
337 {
338 struct madvise_walk_private *private = walk->private;
339 struct mmu_gather *tlb = private->tlb;
340 bool pageout = private->pageout;
341 struct mm_struct *mm = tlb->mm;
342 struct vm_area_struct *vma = walk->vma;
343 pte_t *orig_pte, *pte, ptent;
344 spinlock_t *ptl;
345 struct folio *folio = NULL;
346 LIST_HEAD(folio_list);
347 bool pageout_anon_only_filter;
348
349 if (fatal_signal_pending(current))
350 return -EINTR;
351
352 pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) &&
353 !can_do_file_pageout(vma);
354
355 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
356 if (pmd_trans_huge(*pmd)) {
357 pmd_t orig_pmd;
358 unsigned long next = pmd_addr_end(addr, end);
359
360 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
361 ptl = pmd_trans_huge_lock(pmd, vma);
362 if (!ptl)
363 return 0;
364
365 orig_pmd = *pmd;
366 if (is_huge_zero_pmd(orig_pmd))
367 goto huge_unlock;
368
369 if (unlikely(!pmd_present(orig_pmd))) {
370 VM_BUG_ON(thp_migration_supported() &&
371 !is_pmd_migration_entry(orig_pmd));
372 goto huge_unlock;
373 }
374
375 folio = pfn_folio(pmd_pfn(orig_pmd));
376
377 /* Do not interfere with other mappings of this folio */
378 if (folio_mapcount(folio) != 1)
379 goto huge_unlock;
380
381 if (pageout_anon_only_filter && !folio_test_anon(folio))
382 goto huge_unlock;
383
384 if (next - addr != HPAGE_PMD_SIZE) {
385 int err;
386
387 folio_get(folio);
388 spin_unlock(ptl);
389 folio_lock(folio);
390 err = split_folio(folio);
391 folio_unlock(folio);
392 folio_put(folio);
393 if (!err)
394 goto regular_folio;
395 return 0;
396 }
397
398 if (pmd_young(orig_pmd)) {
399 pmdp_invalidate(vma, addr, pmd);
400 orig_pmd = pmd_mkold(orig_pmd);
401
402 set_pmd_at(mm, addr, pmd, orig_pmd);
403 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
404 }
405
406 folio_clear_referenced(folio);
407 folio_test_clear_young(folio);
408 if (pageout) {
409 if (folio_isolate_lru(folio)) {
410 if (folio_test_unevictable(folio))
411 folio_putback_lru(folio);
412 else
413 list_add(&folio->lru, &folio_list);
414 }
415 } else
416 folio_deactivate(folio);
417 huge_unlock:
418 spin_unlock(ptl);
419 if (pageout)
420 reclaim_pages(&folio_list);
421 return 0;
422 }
423
424 regular_folio:
425 if (pmd_trans_unstable(pmd))
426 return 0;
427 #endif
428 tlb_change_page_size(tlb, PAGE_SIZE);
429 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
430 flush_tlb_batched_pending(mm);
431 arch_enter_lazy_mmu_mode();
432 for (; addr < end; pte++, addr += PAGE_SIZE) {
433 ptent = *pte;
434
435 if (pte_none(ptent))
436 continue;
437
438 if (!pte_present(ptent))
439 continue;
440
441 folio = vm_normal_folio(vma, addr, ptent);
442 if (!folio || folio_is_zone_device(folio))
443 continue;
444
445 /*
446 * Creating a THP page is expensive so split it only if we
447 * are sure it's worth. Split it if we are only owner.
448 */
449 if (folio_test_large(folio)) {
450 if (folio_mapcount(folio) != 1)
451 break;
452 if (pageout_anon_only_filter && !folio_test_anon(folio))
453 break;
454 folio_get(folio);
455 if (!folio_trylock(folio)) {
456 folio_put(folio);
457 break;
458 }
459 pte_unmap_unlock(orig_pte, ptl);
460 if (split_folio(folio)) {
461 folio_unlock(folio);
462 folio_put(folio);
463 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
464 break;
465 }
466 folio_unlock(folio);
467 folio_put(folio);
468 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
469 pte--;
470 addr -= PAGE_SIZE;
471 continue;
472 }
473
474 /*
475 * Do not interfere with other mappings of this folio and
476 * non-LRU folio.
477 */
478 if (!folio_test_lru(folio) || folio_mapcount(folio) != 1)
479 continue;
480
481 if (pageout_anon_only_filter && !folio_test_anon(folio))
482 continue;
483
484 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
485
486 if (pte_young(ptent)) {
487 ptent = ptep_get_and_clear_full(mm, addr, pte,
488 tlb->fullmm);
489 ptent = pte_mkold(ptent);
490 set_pte_at(mm, addr, pte, ptent);
491 tlb_remove_tlb_entry(tlb, pte, addr);
492 }
493
494 /*
495 * We are deactivating a folio for accelerating reclaiming.
496 * VM couldn't reclaim the folio unless we clear PG_young.
497 * As a side effect, it makes confuse idle-page tracking
498 * because they will miss recent referenced history.
499 */
500 folio_clear_referenced(folio);
501 folio_test_clear_young(folio);
502 if (pageout) {
503 if (folio_isolate_lru(folio)) {
504 if (folio_test_unevictable(folio))
505 folio_putback_lru(folio);
506 else
507 list_add(&folio->lru, &folio_list);
508 }
509 } else
510 folio_deactivate(folio);
511 }
512
513 arch_leave_lazy_mmu_mode();
514 pte_unmap_unlock(orig_pte, ptl);
515 if (pageout)
516 reclaim_pages(&folio_list);
517 cond_resched();
518
519 return 0;
520 }
521
522 static const struct mm_walk_ops cold_walk_ops = {
523 .pmd_entry = madvise_cold_or_pageout_pte_range,
524 };
525
madvise_cold_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end)526 static void madvise_cold_page_range(struct mmu_gather *tlb,
527 struct vm_area_struct *vma,
528 unsigned long addr, unsigned long end)
529 {
530 struct madvise_walk_private walk_private = {
531 .pageout = false,
532 .tlb = tlb,
533 };
534
535 tlb_start_vma(tlb, vma);
536 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
537 tlb_end_vma(tlb, vma);
538 }
539
can_madv_lru_vma(struct vm_area_struct * vma)540 static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
541 {
542 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB));
543 }
544
madvise_cold(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start_addr,unsigned long end_addr)545 static long madvise_cold(struct vm_area_struct *vma,
546 struct vm_area_struct **prev,
547 unsigned long start_addr, unsigned long end_addr)
548 {
549 struct mm_struct *mm = vma->vm_mm;
550 struct mmu_gather tlb;
551
552 *prev = vma;
553 if (!can_madv_lru_vma(vma))
554 return -EINVAL;
555
556 lru_add_drain();
557 tlb_gather_mmu(&tlb, mm);
558 madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
559 tlb_finish_mmu(&tlb);
560
561 return 0;
562 }
563
madvise_pageout_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end)564 static void madvise_pageout_page_range(struct mmu_gather *tlb,
565 struct vm_area_struct *vma,
566 unsigned long addr, unsigned long end)
567 {
568 struct madvise_walk_private walk_private = {
569 .pageout = true,
570 .tlb = tlb,
571 };
572
573 tlb_start_vma(tlb, vma);
574 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
575 tlb_end_vma(tlb, vma);
576 }
577
madvise_pageout(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start_addr,unsigned long end_addr)578 static long madvise_pageout(struct vm_area_struct *vma,
579 struct vm_area_struct **prev,
580 unsigned long start_addr, unsigned long end_addr)
581 {
582 struct mm_struct *mm = vma->vm_mm;
583 struct mmu_gather tlb;
584
585 *prev = vma;
586 if (!can_madv_lru_vma(vma))
587 return -EINVAL;
588
589 /*
590 * If the VMA belongs to a private file mapping, there can be private
591 * dirty pages which can be paged out if even this process is neither
592 * owner nor write capable of the file. We allow private file mappings
593 * further to pageout dirty anon pages.
594 */
595 if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) &&
596 (vma->vm_flags & VM_MAYSHARE)))
597 return 0;
598
599 lru_add_drain();
600 tlb_gather_mmu(&tlb, mm);
601 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
602 tlb_finish_mmu(&tlb);
603
604 return 0;
605 }
606
madvise_free_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)607 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
608 unsigned long end, struct mm_walk *walk)
609
610 {
611 struct mmu_gather *tlb = walk->private;
612 struct mm_struct *mm = tlb->mm;
613 struct vm_area_struct *vma = walk->vma;
614 spinlock_t *ptl;
615 pte_t *orig_pte, *pte, ptent;
616 struct folio *folio;
617 int nr_swap = 0;
618 unsigned long next;
619
620 next = pmd_addr_end(addr, end);
621 if (pmd_trans_huge(*pmd))
622 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
623 goto next;
624
625 if (pmd_trans_unstable(pmd))
626 return 0;
627
628 tlb_change_page_size(tlb, PAGE_SIZE);
629 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
630 flush_tlb_batched_pending(mm);
631 arch_enter_lazy_mmu_mode();
632 for (; addr != end; pte++, addr += PAGE_SIZE) {
633 ptent = *pte;
634
635 if (pte_none(ptent))
636 continue;
637 /*
638 * If the pte has swp_entry, just clear page table to
639 * prevent swap-in which is more expensive rather than
640 * (page allocation + zeroing).
641 */
642 if (!pte_present(ptent)) {
643 swp_entry_t entry;
644
645 entry = pte_to_swp_entry(ptent);
646 if (!non_swap_entry(entry)) {
647 nr_swap--;
648 free_swap_and_cache(entry);
649 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
650 } else if (is_hwpoison_entry(entry) ||
651 is_swapin_error_entry(entry)) {
652 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
653 }
654 continue;
655 }
656
657 folio = vm_normal_folio(vma, addr, ptent);
658 if (!folio || folio_is_zone_device(folio))
659 continue;
660
661 /*
662 * If pmd isn't transhuge but the folio is large and
663 * is owned by only this process, split it and
664 * deactivate all pages.
665 */
666 if (folio_test_large(folio)) {
667 if (folio_mapcount(folio) != 1)
668 goto out;
669 folio_get(folio);
670 if (!folio_trylock(folio)) {
671 folio_put(folio);
672 goto out;
673 }
674 pte_unmap_unlock(orig_pte, ptl);
675 if (split_folio(folio)) {
676 folio_unlock(folio);
677 folio_put(folio);
678 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
679 goto out;
680 }
681 folio_unlock(folio);
682 folio_put(folio);
683 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
684 pte--;
685 addr -= PAGE_SIZE;
686 continue;
687 }
688
689 if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
690 if (!folio_trylock(folio))
691 continue;
692 /*
693 * If folio is shared with others, we mustn't clear
694 * the folio's dirty flag.
695 */
696 if (folio_mapcount(folio) != 1) {
697 folio_unlock(folio);
698 continue;
699 }
700
701 if (folio_test_swapcache(folio) &&
702 !folio_free_swap(folio)) {
703 folio_unlock(folio);
704 continue;
705 }
706
707 folio_clear_dirty(folio);
708 folio_unlock(folio);
709 }
710
711 if (pte_young(ptent) || pte_dirty(ptent)) {
712 /*
713 * Some of architecture(ex, PPC) don't update TLB
714 * with set_pte_at and tlb_remove_tlb_entry so for
715 * the portability, remap the pte with old|clean
716 * after pte clearing.
717 */
718 ptent = ptep_get_and_clear_full(mm, addr, pte,
719 tlb->fullmm);
720
721 ptent = pte_mkold(ptent);
722 ptent = pte_mkclean(ptent);
723 set_pte_at(mm, addr, pte, ptent);
724 tlb_remove_tlb_entry(tlb, pte, addr);
725 }
726 folio_mark_lazyfree(folio);
727 }
728 out:
729 if (nr_swap) {
730 if (current->mm == mm)
731 sync_mm_rss(mm);
732
733 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
734 }
735 arch_leave_lazy_mmu_mode();
736 pte_unmap_unlock(orig_pte, ptl);
737 cond_resched();
738 next:
739 return 0;
740 }
741
742 static const struct mm_walk_ops madvise_free_walk_ops = {
743 .pmd_entry = madvise_free_pte_range,
744 };
745
madvise_free_single_vma(struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr)746 static int madvise_free_single_vma(struct vm_area_struct *vma,
747 unsigned long start_addr, unsigned long end_addr)
748 {
749 struct mm_struct *mm = vma->vm_mm;
750 struct mmu_notifier_range range;
751 struct mmu_gather tlb;
752
753 /* MADV_FREE works for only anon vma at the moment */
754 if (!vma_is_anonymous(vma))
755 return -EINVAL;
756
757 range.start = max(vma->vm_start, start_addr);
758 if (range.start >= vma->vm_end)
759 return -EINVAL;
760 range.end = min(vma->vm_end, end_addr);
761 if (range.end <= vma->vm_start)
762 return -EINVAL;
763 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
764 range.start, range.end);
765
766 lru_add_drain();
767 tlb_gather_mmu(&tlb, mm);
768 update_hiwater_rss(mm);
769
770 mmu_notifier_invalidate_range_start(&range);
771 tlb_start_vma(&tlb, vma);
772 walk_page_range(vma->vm_mm, range.start, range.end,
773 &madvise_free_walk_ops, &tlb);
774 tlb_end_vma(&tlb, vma);
775 mmu_notifier_invalidate_range_end(&range);
776 tlb_finish_mmu(&tlb);
777
778 return 0;
779 }
780
781 /*
782 * Application no longer needs these pages. If the pages are dirty,
783 * it's OK to just throw them away. The app will be more careful about
784 * data it wants to keep. Be sure to free swap resources too. The
785 * zap_page_range_single call sets things up for shrink_active_list to actually
786 * free these pages later if no one else has touched them in the meantime,
787 * although we could add these pages to a global reuse list for
788 * shrink_active_list to pick up before reclaiming other pages.
789 *
790 * NB: This interface discards data rather than pushes it out to swap,
791 * as some implementations do. This has performance implications for
792 * applications like large transactional databases which want to discard
793 * pages in anonymous maps after committing to backing store the data
794 * that was kept in them. There is no reason to write this data out to
795 * the swap area if the application is discarding it.
796 *
797 * An interface that causes the system to free clean pages and flush
798 * dirty pages is already available as msync(MS_INVALIDATE).
799 */
madvise_dontneed_single_vma(struct vm_area_struct * vma,unsigned long start,unsigned long end)800 static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
801 unsigned long start, unsigned long end)
802 {
803 zap_page_range_single(vma, start, end - start, NULL);
804 return 0;
805 }
806
madvise_dontneed_free_valid_vma(struct vm_area_struct * vma,unsigned long start,unsigned long * end,int behavior)807 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma,
808 unsigned long start,
809 unsigned long *end,
810 int behavior)
811 {
812 if (!is_vm_hugetlb_page(vma)) {
813 unsigned int forbidden = VM_PFNMAP;
814
815 if (behavior != MADV_DONTNEED_LOCKED)
816 forbidden |= VM_LOCKED;
817
818 return !(vma->vm_flags & forbidden);
819 }
820
821 if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED)
822 return false;
823 if (start & ~huge_page_mask(hstate_vma(vma)))
824 return false;
825
826 /*
827 * Madvise callers expect the length to be rounded up to PAGE_SIZE
828 * boundaries, and may be unaware that this VMA uses huge pages.
829 * Avoid unexpected data loss by rounding down the number of
830 * huge pages freed.
831 */
832 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma)));
833
834 return true;
835 }
836
madvise_dontneed_free(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,int behavior)837 static long madvise_dontneed_free(struct vm_area_struct *vma,
838 struct vm_area_struct **prev,
839 unsigned long start, unsigned long end,
840 int behavior)
841 {
842 struct mm_struct *mm = vma->vm_mm;
843
844 *prev = vma;
845 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior))
846 return -EINVAL;
847
848 if (start == end)
849 return 0;
850
851 if (!userfaultfd_remove(vma, start, end)) {
852 *prev = NULL; /* mmap_lock has been dropped, prev is stale */
853
854 mmap_read_lock(mm);
855 vma = find_vma(mm, start);
856 if (!vma)
857 return -ENOMEM;
858 if (start < vma->vm_start) {
859 /*
860 * This "vma" under revalidation is the one
861 * with the lowest vma->vm_start where start
862 * is also < vma->vm_end. If start <
863 * vma->vm_start it means an hole materialized
864 * in the user address space within the
865 * virtual range passed to MADV_DONTNEED
866 * or MADV_FREE.
867 */
868 return -ENOMEM;
869 }
870 /*
871 * Potential end adjustment for hugetlb vma is OK as
872 * the check below keeps end within vma.
873 */
874 if (!madvise_dontneed_free_valid_vma(vma, start, &end,
875 behavior))
876 return -EINVAL;
877 if (end > vma->vm_end) {
878 /*
879 * Don't fail if end > vma->vm_end. If the old
880 * vma was split while the mmap_lock was
881 * released the effect of the concurrent
882 * operation may not cause madvise() to
883 * have an undefined result. There may be an
884 * adjacent next vma that we'll walk
885 * next. userfaultfd_remove() will generate an
886 * UFFD_EVENT_REMOVE repetition on the
887 * end-vma->vm_end range, but the manager can
888 * handle a repetition fine.
889 */
890 end = vma->vm_end;
891 }
892 VM_WARN_ON(start >= end);
893 }
894
895 if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
896 return madvise_dontneed_single_vma(vma, start, end);
897 else if (behavior == MADV_FREE)
898 return madvise_free_single_vma(vma, start, end);
899 else
900 return -EINVAL;
901 }
902
madvise_populate(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,int behavior)903 static long madvise_populate(struct vm_area_struct *vma,
904 struct vm_area_struct **prev,
905 unsigned long start, unsigned long end,
906 int behavior)
907 {
908 const bool write = behavior == MADV_POPULATE_WRITE;
909 struct mm_struct *mm = vma->vm_mm;
910 unsigned long tmp_end;
911 int locked = 1;
912 long pages;
913
914 *prev = vma;
915
916 while (start < end) {
917 /*
918 * We might have temporarily dropped the lock. For example,
919 * our VMA might have been split.
920 */
921 if (!vma || start >= vma->vm_end) {
922 vma = vma_lookup(mm, start);
923 if (!vma)
924 return -ENOMEM;
925 }
926
927 tmp_end = min_t(unsigned long, end, vma->vm_end);
928 /* Populate (prefault) page tables readable/writable. */
929 pages = faultin_vma_page_range(vma, start, tmp_end, write,
930 &locked);
931 if (!locked) {
932 mmap_read_lock(mm);
933 locked = 1;
934 *prev = NULL;
935 vma = NULL;
936 }
937 if (pages < 0) {
938 switch (pages) {
939 case -EINTR:
940 return -EINTR;
941 case -EINVAL: /* Incompatible mappings / permissions. */
942 return -EINVAL;
943 case -EHWPOISON:
944 return -EHWPOISON;
945 case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
946 return -EFAULT;
947 default:
948 pr_warn_once("%s: unhandled return value: %ld\n",
949 __func__, pages);
950 fallthrough;
951 case -ENOMEM:
952 return -ENOMEM;
953 }
954 }
955 start += pages * PAGE_SIZE;
956 }
957 return 0;
958 }
959
960 /*
961 * Application wants to free up the pages and associated backing store.
962 * This is effectively punching a hole into the middle of a file.
963 */
madvise_remove(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)964 static long madvise_remove(struct vm_area_struct *vma,
965 struct vm_area_struct **prev,
966 unsigned long start, unsigned long end)
967 {
968 loff_t offset;
969 int error;
970 struct file *f;
971 struct mm_struct *mm = vma->vm_mm;
972
973 *prev = NULL; /* tell sys_madvise we drop mmap_lock */
974
975 if (vma->vm_flags & VM_LOCKED)
976 return -EINVAL;
977
978 f = vma->vm_file;
979
980 if (!f || !f->f_mapping || !f->f_mapping->host) {
981 return -EINVAL;
982 }
983
984 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
985 return -EACCES;
986
987 offset = (loff_t)(start - vma->vm_start)
988 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
989
990 /*
991 * Filesystem's fallocate may need to take i_rwsem. We need to
992 * explicitly grab a reference because the vma (and hence the
993 * vma's reference to the file) can go away as soon as we drop
994 * mmap_lock.
995 */
996 get_file(f);
997 if (userfaultfd_remove(vma, start, end)) {
998 /* mmap_lock was not released by userfaultfd_remove() */
999 mmap_read_unlock(mm);
1000 }
1001 error = vfs_fallocate(f,
1002 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1003 offset, end - start);
1004 fput(f);
1005 mmap_read_lock(mm);
1006 return error;
1007 }
1008
1009 /*
1010 * Apply an madvise behavior to a region of a vma. madvise_update_vma
1011 * will handle splitting a vm area into separate areas, each area with its own
1012 * behavior.
1013 */
madvise_vma_behavior(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,unsigned long behavior)1014 static int madvise_vma_behavior(struct vm_area_struct *vma,
1015 struct vm_area_struct **prev,
1016 unsigned long start, unsigned long end,
1017 unsigned long behavior)
1018 {
1019 int error;
1020 struct anon_vma_name *anon_name;
1021 unsigned long new_flags = vma->vm_flags;
1022
1023 switch (behavior) {
1024 case MADV_REMOVE:
1025 return madvise_remove(vma, prev, start, end);
1026 case MADV_WILLNEED:
1027 return madvise_willneed(vma, prev, start, end);
1028 case MADV_COLD:
1029 return madvise_cold(vma, prev, start, end);
1030 case MADV_PAGEOUT:
1031 return madvise_pageout(vma, prev, start, end);
1032 case MADV_FREE:
1033 case MADV_DONTNEED:
1034 case MADV_DONTNEED_LOCKED:
1035 return madvise_dontneed_free(vma, prev, start, end, behavior);
1036 case MADV_POPULATE_READ:
1037 case MADV_POPULATE_WRITE:
1038 return madvise_populate(vma, prev, start, end, behavior);
1039 case MADV_NORMAL:
1040 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
1041 break;
1042 case MADV_SEQUENTIAL:
1043 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
1044 break;
1045 case MADV_RANDOM:
1046 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
1047 break;
1048 case MADV_DONTFORK:
1049 new_flags |= VM_DONTCOPY;
1050 break;
1051 case MADV_DOFORK:
1052 if (vma->vm_flags & VM_IO)
1053 return -EINVAL;
1054 new_flags &= ~VM_DONTCOPY;
1055 break;
1056 case MADV_WIPEONFORK:
1057 /* MADV_WIPEONFORK is only supported on anonymous memory. */
1058 if (vma->vm_file || vma->vm_flags & VM_SHARED)
1059 return -EINVAL;
1060 new_flags |= VM_WIPEONFORK;
1061 break;
1062 case MADV_KEEPONFORK:
1063 new_flags &= ~VM_WIPEONFORK;
1064 break;
1065 case MADV_DONTDUMP:
1066 new_flags |= VM_DONTDUMP;
1067 break;
1068 case MADV_DODUMP:
1069 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL)
1070 return -EINVAL;
1071 new_flags &= ~VM_DONTDUMP;
1072 break;
1073 case MADV_MERGEABLE:
1074 case MADV_UNMERGEABLE:
1075 error = ksm_madvise(vma, start, end, behavior, &new_flags);
1076 if (error)
1077 goto out;
1078 break;
1079 case MADV_HUGEPAGE:
1080 case MADV_NOHUGEPAGE:
1081 error = hugepage_madvise(vma, &new_flags, behavior);
1082 if (error)
1083 goto out;
1084 break;
1085 case MADV_COLLAPSE:
1086 return madvise_collapse(vma, prev, start, end);
1087 }
1088
1089 anon_name = anon_vma_name(vma);
1090 anon_vma_name_get(anon_name);
1091 error = madvise_update_vma(vma, prev, start, end, new_flags,
1092 anon_name);
1093 anon_vma_name_put(anon_name);
1094
1095 out:
1096 /*
1097 * madvise() returns EAGAIN if kernel resources, such as
1098 * slab, are temporarily unavailable.
1099 */
1100 if (error == -ENOMEM)
1101 error = -EAGAIN;
1102 return error;
1103 }
1104
1105 #ifdef CONFIG_MEMORY_FAILURE
1106 /*
1107 * Error injection support for memory error handling.
1108 */
madvise_inject_error(int behavior,unsigned long start,unsigned long end)1109 static int madvise_inject_error(int behavior,
1110 unsigned long start, unsigned long end)
1111 {
1112 unsigned long size;
1113
1114 if (!capable(CAP_SYS_ADMIN))
1115 return -EPERM;
1116
1117
1118 for (; start < end; start += size) {
1119 unsigned long pfn;
1120 struct page *page;
1121 int ret;
1122
1123 ret = get_user_pages_fast(start, 1, 0, &page);
1124 if (ret != 1)
1125 return ret;
1126 pfn = page_to_pfn(page);
1127
1128 /*
1129 * When soft offlining hugepages, after migrating the page
1130 * we dissolve it, therefore in the second loop "page" will
1131 * no longer be a compound page.
1132 */
1133 size = page_size(compound_head(page));
1134
1135 if (behavior == MADV_SOFT_OFFLINE) {
1136 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
1137 pfn, start);
1138 ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
1139 } else {
1140 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
1141 pfn, start);
1142 ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED);
1143 if (ret == -EOPNOTSUPP)
1144 ret = 0;
1145 }
1146
1147 if (ret)
1148 return ret;
1149 }
1150
1151 return 0;
1152 }
1153 #endif
1154
1155 static bool
madvise_behavior_valid(int behavior)1156 madvise_behavior_valid(int behavior)
1157 {
1158 switch (behavior) {
1159 case MADV_DOFORK:
1160 case MADV_DONTFORK:
1161 case MADV_NORMAL:
1162 case MADV_SEQUENTIAL:
1163 case MADV_RANDOM:
1164 case MADV_REMOVE:
1165 case MADV_WILLNEED:
1166 case MADV_DONTNEED:
1167 case MADV_DONTNEED_LOCKED:
1168 case MADV_FREE:
1169 case MADV_COLD:
1170 case MADV_PAGEOUT:
1171 case MADV_POPULATE_READ:
1172 case MADV_POPULATE_WRITE:
1173 #ifdef CONFIG_KSM
1174 case MADV_MERGEABLE:
1175 case MADV_UNMERGEABLE:
1176 #endif
1177 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1178 case MADV_HUGEPAGE:
1179 case MADV_NOHUGEPAGE:
1180 case MADV_COLLAPSE:
1181 #endif
1182 case MADV_DONTDUMP:
1183 case MADV_DODUMP:
1184 case MADV_WIPEONFORK:
1185 case MADV_KEEPONFORK:
1186 #ifdef CONFIG_MEMORY_FAILURE
1187 case MADV_SOFT_OFFLINE:
1188 case MADV_HWPOISON:
1189 #endif
1190 return true;
1191
1192 default:
1193 return false;
1194 }
1195 }
1196
process_madvise_behavior_valid(int behavior)1197 static bool process_madvise_behavior_valid(int behavior)
1198 {
1199 switch (behavior) {
1200 case MADV_COLD:
1201 case MADV_PAGEOUT:
1202 case MADV_WILLNEED:
1203 case MADV_COLLAPSE:
1204 return true;
1205 default:
1206 return false;
1207 }
1208 }
1209
1210 /*
1211 * Walk the vmas in range [start,end), and call the visit function on each one.
1212 * The visit function will get start and end parameters that cover the overlap
1213 * between the current vma and the original range. Any unmapped regions in the
1214 * original range will result in this function returning -ENOMEM while still
1215 * calling the visit function on all of the existing vmas in the range.
1216 * Must be called with the mmap_lock held for reading or writing.
1217 */
1218 static
madvise_walk_vmas(struct mm_struct * mm,unsigned long start,unsigned long end,unsigned long arg,int (* visit)(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,unsigned long arg))1219 int madvise_walk_vmas(struct mm_struct *mm, unsigned long start,
1220 unsigned long end, unsigned long arg,
1221 int (*visit)(struct vm_area_struct *vma,
1222 struct vm_area_struct **prev, unsigned long start,
1223 unsigned long end, unsigned long arg))
1224 {
1225 struct vm_area_struct *vma;
1226 struct vm_area_struct *prev;
1227 unsigned long tmp;
1228 int unmapped_error = 0;
1229
1230 /*
1231 * If the interval [start,end) covers some unmapped address
1232 * ranges, just ignore them, but return -ENOMEM at the end.
1233 * - different from the way of handling in mlock etc.
1234 */
1235 vma = find_vma_prev(mm, start, &prev);
1236 if (vma && start > vma->vm_start)
1237 prev = vma;
1238
1239 for (;;) {
1240 int error;
1241
1242 /* Still start < end. */
1243 if (!vma)
1244 return -ENOMEM;
1245
1246 /* Here start < (end|vma->vm_end). */
1247 if (start < vma->vm_start) {
1248 unmapped_error = -ENOMEM;
1249 start = vma->vm_start;
1250 if (start >= end)
1251 break;
1252 }
1253
1254 /* Here vma->vm_start <= start < (end|vma->vm_end) */
1255 tmp = vma->vm_end;
1256 if (end < tmp)
1257 tmp = end;
1258
1259 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1260 error = visit(vma, &prev, start, tmp, arg);
1261 if (error)
1262 return error;
1263 start = tmp;
1264 if (prev && start < prev->vm_end)
1265 start = prev->vm_end;
1266 if (start >= end)
1267 break;
1268 if (prev)
1269 vma = find_vma(mm, prev->vm_end);
1270 else /* madvise_remove dropped mmap_lock */
1271 vma = find_vma(mm, start);
1272 }
1273
1274 return unmapped_error;
1275 }
1276
1277 #ifdef CONFIG_ANON_VMA_NAME
madvise_vma_anon_name(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,unsigned long anon_name)1278 static int madvise_vma_anon_name(struct vm_area_struct *vma,
1279 struct vm_area_struct **prev,
1280 unsigned long start, unsigned long end,
1281 unsigned long anon_name)
1282 {
1283 int error;
1284
1285 /* Only anonymous mappings can be named */
1286 if (vma->vm_file && !vma_is_anon_shmem(vma))
1287 return -EBADF;
1288
1289 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags,
1290 (struct anon_vma_name *)anon_name);
1291
1292 /*
1293 * madvise() returns EAGAIN if kernel resources, such as
1294 * slab, are temporarily unavailable.
1295 */
1296 if (error == -ENOMEM)
1297 error = -EAGAIN;
1298 return error;
1299 }
1300
madvise_set_anon_name(struct mm_struct * mm,unsigned long start,unsigned long len_in,struct anon_vma_name * anon_name)1301 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
1302 unsigned long len_in, struct anon_vma_name *anon_name)
1303 {
1304 unsigned long end;
1305 unsigned long len;
1306
1307 if (start & ~PAGE_MASK)
1308 return -EINVAL;
1309 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
1310
1311 /* Check to see whether len was rounded up from small -ve to zero */
1312 if (len_in && !len)
1313 return -EINVAL;
1314
1315 end = start + len;
1316 if (end < start)
1317 return -EINVAL;
1318
1319 if (end == start)
1320 return 0;
1321
1322 return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name,
1323 madvise_vma_anon_name);
1324 }
1325 #endif /* CONFIG_ANON_VMA_NAME */
1326 /*
1327 * The madvise(2) system call.
1328 *
1329 * Applications can use madvise() to advise the kernel how it should
1330 * handle paging I/O in this VM area. The idea is to help the kernel
1331 * use appropriate read-ahead and caching techniques. The information
1332 * provided is advisory only, and can be safely disregarded by the
1333 * kernel without affecting the correct operation of the application.
1334 *
1335 * behavior values:
1336 * MADV_NORMAL - the default behavior is to read clusters. This
1337 * results in some read-ahead and read-behind.
1338 * MADV_RANDOM - the system should read the minimum amount of data
1339 * on any access, since it is unlikely that the appli-
1340 * cation will need more than what it asks for.
1341 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
1342 * once, so they can be aggressively read ahead, and
1343 * can be freed soon after they are accessed.
1344 * MADV_WILLNEED - the application is notifying the system to read
1345 * some pages ahead.
1346 * MADV_DONTNEED - the application is finished with the given range,
1347 * so the kernel can free resources associated with it.
1348 * MADV_FREE - the application marks pages in the given range as lazy free,
1349 * where actual purges are postponed until memory pressure happens.
1350 * MADV_REMOVE - the application wants to free up the given range of
1351 * pages and associated backing store.
1352 * MADV_DONTFORK - omit this area from child's address space when forking:
1353 * typically, to avoid COWing pages pinned by get_user_pages().
1354 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
1355 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
1356 * range after a fork.
1357 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
1358 * MADV_HWPOISON - trigger memory error handler as if the given memory range
1359 * were corrupted by unrecoverable hardware memory failure.
1360 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
1361 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
1362 * this area with pages of identical content from other such areas.
1363 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
1364 * MADV_HUGEPAGE - the application wants to back the given range by transparent
1365 * huge pages in the future. Existing pages might be coalesced and
1366 * new pages might be allocated as THP.
1367 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
1368 * transparent huge pages so the existing pages will not be
1369 * coalesced into THP and new pages will not be allocated as THP.
1370 * MADV_COLLAPSE - synchronously coalesce pages into new THP.
1371 * MADV_DONTDUMP - the application wants to prevent pages in the given range
1372 * from being included in its core dump.
1373 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
1374 * MADV_COLD - the application is not expected to use this memory soon,
1375 * deactivate pages in this range so that they can be reclaimed
1376 * easily if memory pressure happens.
1377 * MADV_PAGEOUT - the application is not expected to use this memory soon,
1378 * page out the pages in this range immediately.
1379 * MADV_POPULATE_READ - populate (prefault) page tables readable by
1380 * triggering read faults if required
1381 * MADV_POPULATE_WRITE - populate (prefault) page tables writable by
1382 * triggering write faults if required
1383 *
1384 * return values:
1385 * zero - success
1386 * -EINVAL - start + len < 0, start is not page-aligned,
1387 * "behavior" is not a valid value, or application
1388 * is attempting to release locked or shared pages,
1389 * or the specified address range includes file, Huge TLB,
1390 * MAP_SHARED or VMPFNMAP range.
1391 * -ENOMEM - addresses in the specified range are not currently
1392 * mapped, or are outside the AS of the process.
1393 * -EIO - an I/O error occurred while paging in data.
1394 * -EBADF - map exists, but area maps something that isn't a file.
1395 * -EAGAIN - a kernel resource was temporarily unavailable.
1396 */
do_madvise(struct mm_struct * mm,unsigned long start,size_t len_in,int behavior)1397 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
1398 {
1399 unsigned long end;
1400 int error;
1401 int write;
1402 size_t len;
1403 struct blk_plug plug;
1404
1405 start = untagged_addr(start);
1406
1407 if (!madvise_behavior_valid(behavior))
1408 return -EINVAL;
1409
1410 if (!PAGE_ALIGNED(start))
1411 return -EINVAL;
1412 len = PAGE_ALIGN(len_in);
1413
1414 /* Check to see whether len was rounded up from small -ve to zero */
1415 if (len_in && !len)
1416 return -EINVAL;
1417
1418 end = start + len;
1419 if (end < start)
1420 return -EINVAL;
1421
1422 if (end == start)
1423 return 0;
1424
1425 #ifdef CONFIG_MEMORY_FAILURE
1426 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
1427 return madvise_inject_error(behavior, start, start + len_in);
1428 #endif
1429
1430 write = madvise_need_mmap_write(behavior);
1431 if (write) {
1432 if (mmap_write_lock_killable(mm))
1433 return -EINTR;
1434 } else {
1435 mmap_read_lock(mm);
1436 }
1437
1438 blk_start_plug(&plug);
1439 error = madvise_walk_vmas(mm, start, end, behavior,
1440 madvise_vma_behavior);
1441 blk_finish_plug(&plug);
1442 if (write)
1443 mmap_write_unlock(mm);
1444 else
1445 mmap_read_unlock(mm);
1446
1447 return error;
1448 }
1449
SYSCALL_DEFINE3(madvise,unsigned long,start,size_t,len_in,int,behavior)1450 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1451 {
1452 return do_madvise(current->mm, start, len_in, behavior);
1453 }
1454
SYSCALL_DEFINE5(process_madvise,int,pidfd,const struct iovec __user *,vec,size_t,vlen,int,behavior,unsigned int,flags)1455 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
1456 size_t, vlen, int, behavior, unsigned int, flags)
1457 {
1458 ssize_t ret;
1459 struct iovec iovstack[UIO_FASTIOV], iovec;
1460 struct iovec *iov = iovstack;
1461 struct iov_iter iter;
1462 struct task_struct *task;
1463 struct mm_struct *mm;
1464 size_t total_len;
1465 unsigned int f_flags;
1466
1467 if (flags != 0) {
1468 ret = -EINVAL;
1469 goto out;
1470 }
1471
1472 ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
1473 if (ret < 0)
1474 goto out;
1475
1476 task = pidfd_get_task(pidfd, &f_flags);
1477 if (IS_ERR(task)) {
1478 ret = PTR_ERR(task);
1479 goto free_iov;
1480 }
1481
1482 if (!process_madvise_behavior_valid(behavior)) {
1483 ret = -EINVAL;
1484 goto release_task;
1485 }
1486
1487 /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */
1488 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
1489 if (IS_ERR_OR_NULL(mm)) {
1490 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
1491 goto release_task;
1492 }
1493
1494 /*
1495 * Require CAP_SYS_NICE for influencing process performance. Note that
1496 * only non-destructive hints are currently supported.
1497 */
1498 if (!capable(CAP_SYS_NICE)) {
1499 ret = -EPERM;
1500 goto release_mm;
1501 }
1502
1503 total_len = iov_iter_count(&iter);
1504
1505 while (iov_iter_count(&iter)) {
1506 iovec = iov_iter_iovec(&iter);
1507 ret = do_madvise(mm, (unsigned long)iovec.iov_base,
1508 iovec.iov_len, behavior);
1509 if (ret < 0)
1510 break;
1511 iov_iter_advance(&iter, iovec.iov_len);
1512 }
1513
1514 ret = (total_len - iov_iter_count(&iter)) ? : ret;
1515
1516 release_mm:
1517 mmput(mm);
1518 release_task:
1519 put_task_struct(task);
1520 free_iov:
1521 kfree(iov);
1522 out:
1523 return ret;
1524 }
1525