1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/madvise.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 2002 Christoph Hellwig
7 */
8
9#include <linux/mman.h>
10#include <linux/pagemap.h>
11#include <linux/syscalls.h>
12#include <linux/mempolicy.h>
13#include <linux/page-isolation.h>
14#include <linux/userfaultfd_k.h>
15#include <linux/hugetlb.h>
16#include <linux/falloc.h>
17#include <linux/sched.h>
18#include <linux/ksm.h>
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/blkdev.h>
22#include <linux/backing-dev.h>
23#include <linux/swap.h>
24#include <linux/swapops.h>
25#include <linux/shmem_fs.h>
26#include <linux/mmu_notifier.h>
27
28#include <asm/tlb.h>
29
30#include "internal.h"
31
32/*
33 * Any behaviour which results in changes to the vma->vm_flags needs to
34 * take mmap_sem for writing. Others, which simply traverse vmas, need
35 * to only take it for reading.
36 */
37static int madvise_need_mmap_write(int behavior)
38{
39 switch (behavior) {
40 case MADV_REMOVE:
41 case MADV_WILLNEED:
42 case MADV_DONTNEED:
43 case MADV_FREE:
44 return 0;
45 default:
46 /* be safe, default to 1. list exceptions explicitly */
47 return 1;
48 }
49}
50
51/*
52 * We can potentially split a vm area into separate
53 * areas, each area with its own behavior.
54 */
55static long madvise_behavior(struct vm_area_struct *vma,
56 struct vm_area_struct **prev,
57 unsigned long start, unsigned long end, int behavior)
58{
59 struct mm_struct *mm = vma->vm_mm;
60 int error = 0;
61 pgoff_t pgoff;
62 unsigned long new_flags = vma->vm_flags;
63
64 switch (behavior) {
65 case MADV_NORMAL:
66 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67 break;
68 case MADV_SEQUENTIAL:
69 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
70 break;
71 case MADV_RANDOM:
72 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
73 break;
74 case MADV_DONTFORK:
75 new_flags |= VM_DONTCOPY;
76 break;
77 case MADV_DOFORK:
78 if (vma->vm_flags & VM_IO) {
79 error = -EINVAL;
80 goto out;
81 }
82 new_flags &= ~VM_DONTCOPY;
83 break;
84 case MADV_WIPEONFORK:
85 /* MADV_WIPEONFORK is only supported on anonymous memory. */
86 if (vma->vm_file || vma->vm_flags & VM_SHARED) {
87 error = -EINVAL;
88 goto out;
89 }
90 new_flags |= VM_WIPEONFORK;
91 break;
92 case MADV_KEEPONFORK:
93 new_flags &= ~VM_WIPEONFORK;
94 break;
95 case MADV_DONTDUMP:
96 new_flags |= VM_DONTDUMP;
97 break;
98 case MADV_DODUMP:
99 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
100 error = -EINVAL;
101 goto out;
102 }
103 new_flags &= ~VM_DONTDUMP;
104 break;
105 case MADV_MERGEABLE:
106 case MADV_UNMERGEABLE:
107 error = ksm_madvise(vma, start, end, behavior, &new_flags);
108 if (error) {
109 /*
110 * madvise() returns EAGAIN if kernel resources, such as
111 * slab, are temporarily unavailable.
112 */
113 if (error == -ENOMEM)
114 error = -EAGAIN;
115 goto out;
116 }
117 break;
118 case MADV_HUGEPAGE:
119 case MADV_NOHUGEPAGE:
120 error = hugepage_madvise(vma, &new_flags, behavior);
121 if (error) {
122 /*
123 * madvise() returns EAGAIN if kernel resources, such as
124 * slab, are temporarily unavailable.
125 */
126 if (error == -ENOMEM)
127 error = -EAGAIN;
128 goto out;
129 }
130 break;
131 }
132
133 if (new_flags == vma->vm_flags) {
134 *prev = vma;
135 goto out;
136 }
137
138 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
139 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
140 vma->vm_file, pgoff, vma_policy(vma),
141 vma->vm_userfaultfd_ctx);
142 if (*prev) {
143 vma = *prev;
144 goto success;
145 }
146
147 *prev = vma;
148
149 if (start != vma->vm_start) {
150 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
151 error = -ENOMEM;
152 goto out;
153 }
154 error = __split_vma(mm, vma, start, 1);
155 if (error) {
156 /*
157 * madvise() returns EAGAIN if kernel resources, such as
158 * slab, are temporarily unavailable.
159 */
160 if (error == -ENOMEM)
161 error = -EAGAIN;
162 goto out;
163 }
164 }
165
166 if (end != vma->vm_end) {
167 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
168 error = -ENOMEM;
169 goto out;
170 }
171 error = __split_vma(mm, vma, end, 0);
172 if (error) {
173 /*
174 * madvise() returns EAGAIN if kernel resources, such as
175 * slab, are temporarily unavailable.
176 */
177 if (error == -ENOMEM)
178 error = -EAGAIN;
179 goto out;
180 }
181 }
182
183success:
184 /*
185 * vm_flags is protected by the mmap_sem held in write mode.
186 */
187 vma->vm_flags = new_flags;
188out:
189 return error;
190}
191
192#ifdef CONFIG_SWAP
193static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
194 unsigned long end, struct mm_walk *walk)
195{
196 pte_t *orig_pte;
197 struct vm_area_struct *vma = walk->private;
198 unsigned long index;
199
200 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
201 return 0;
202
203 for (index = start; index != end; index += PAGE_SIZE) {
204 pte_t pte;
205 swp_entry_t entry;
206 struct page *page;
207 spinlock_t *ptl;
208
209 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
210 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
211 pte_unmap_unlock(orig_pte, ptl);
212
213 if (pte_present(pte) || pte_none(pte))
214 continue;
215 entry = pte_to_swp_entry(pte);
216 if (unlikely(non_swap_entry(entry)))
217 continue;
218
219 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
220 vma, index, false);
221 if (page)
222 put_page(page);
223 }
224
225 return 0;
226}
227
228static void force_swapin_readahead(struct vm_area_struct *vma,
229 unsigned long start, unsigned long end)
230{
231 struct mm_walk walk = {
232 .mm = vma->vm_mm,
233 .pmd_entry = swapin_walk_pmd_entry,
234 .private = vma,
235 };
236
237 walk_page_range(start, end, &walk);
238
239 lru_add_drain(); /* Push any new pages onto the LRU now */
240}
241
242static void force_shm_swapin_readahead(struct vm_area_struct *vma,
243 unsigned long start, unsigned long end,
244 struct address_space *mapping)
245{
246 pgoff_t index;
247 struct page *page;
248 swp_entry_t swap;
249
250 for (; start < end; start += PAGE_SIZE) {
251 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
252
253 page = find_get_entry(mapping, index);
254 if (!xa_is_value(page)) {
255 if (page)
256 put_page(page);
257 continue;
258 }
259 swap = radix_to_swp_entry(page);
260 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
261 NULL, 0, false);
262 if (page)
263 put_page(page);
264 }
265
266 lru_add_drain(); /* Push any new pages onto the LRU now */
267}
268#endif /* CONFIG_SWAP */
269
270/*
271 * Schedule all required I/O operations. Do not wait for completion.
272 */
273static long madvise_willneed(struct vm_area_struct *vma,
274 struct vm_area_struct **prev,
275 unsigned long start, unsigned long end)
276{
277 struct file *file = vma->vm_file;
278
279 *prev = vma;
280#ifdef CONFIG_SWAP
281 if (!file) {
282 force_swapin_readahead(vma, start, end);
283 return 0;
284 }
285
286 if (shmem_mapping(file->f_mapping)) {
287 force_shm_swapin_readahead(vma, start, end,
288 file->f_mapping);
289 return 0;
290 }
291#else
292 if (!file)
293 return -EBADF;
294#endif
295
296 if (IS_DAX(file_inode(file))) {
297 /* no bad return value, but ignore advice */
298 return 0;
299 }
300
301 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
302 if (end > vma->vm_end)
303 end = vma->vm_end;
304 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
305
306 force_page_cache_readahead(file->f_mapping, file, start, end - start);
307 return 0;
308}
309
310static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
311 unsigned long end, struct mm_walk *walk)
312
313{
314 struct mmu_gather *tlb = walk->private;
315 struct mm_struct *mm = tlb->mm;
316 struct vm_area_struct *vma = walk->vma;
317 spinlock_t *ptl;
318 pte_t *orig_pte, *pte, ptent;
319 struct page *page;
320 int nr_swap = 0;
321 unsigned long next;
322
323 next = pmd_addr_end(addr, end);
324 if (pmd_trans_huge(*pmd))
325 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
326 goto next;
327
328 if (pmd_trans_unstable(pmd))
329 return 0;
330
331 tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
332 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
333 flush_tlb_batched_pending(mm);
334 arch_enter_lazy_mmu_mode();
335 for (; addr != end; pte++, addr += PAGE_SIZE) {
336 ptent = *pte;
337
338 if (pte_none(ptent))
339 continue;
340 /*
341 * If the pte has swp_entry, just clear page table to
342 * prevent swap-in which is more expensive rather than
343 * (page allocation + zeroing).
344 */
345 if (!pte_present(ptent)) {
346 swp_entry_t entry;
347
348 entry = pte_to_swp_entry(ptent);
349 if (non_swap_entry(entry))
350 continue;
351 nr_swap--;
352 free_swap_and_cache(entry);
353 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
354 continue;
355 }
356
357 page = _vm_normal_page(vma, addr, ptent, true);
358 if (!page)
359 continue;
360
361 /*
362 * If pmd isn't transhuge but the page is THP and
363 * is owned by only this process, split it and
364 * deactivate all pages.
365 */
366 if (PageTransCompound(page)) {
367 if (page_mapcount(page) != 1)
368 goto out;
369 get_page(page);
370 if (!trylock_page(page)) {
371 put_page(page);
372 goto out;
373 }
374 pte_unmap_unlock(orig_pte, ptl);
375 if (split_huge_page(page)) {
376 unlock_page(page);
377 put_page(page);
378 pte_offset_map_lock(mm, pmd, addr, &ptl);
379 goto out;
380 }
381 unlock_page(page);
382 put_page(page);
383 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
384 pte--;
385 addr -= PAGE_SIZE;
386 continue;
387 }
388
389 VM_BUG_ON_PAGE(PageTransCompound(page), page);
390
391 if (PageSwapCache(page) || PageDirty(page)) {
392 if (!trylock_page(page))
393 continue;
394 /*
395 * If page is shared with others, we couldn't clear
396 * PG_dirty of the page.
397 */
398 if (page_mapcount(page) != 1) {
399 unlock_page(page);
400 continue;
401 }
402
403 if (PageSwapCache(page) && !try_to_free_swap(page)) {
404 unlock_page(page);
405 continue;
406 }
407
408 ClearPageDirty(page);
409 unlock_page(page);
410 }
411
412 if (pte_young(ptent) || pte_dirty(ptent)) {
413 /*
414 * Some of architecture(ex, PPC) don't update TLB
415 * with set_pte_at and tlb_remove_tlb_entry so for
416 * the portability, remap the pte with old|clean
417 * after pte clearing.
418 */
419 ptent = ptep_get_and_clear_full(mm, addr, pte,
420 tlb->fullmm);
421
422 ptent = pte_mkold(ptent);
423 ptent = pte_mkclean(ptent);
424 set_pte_at(mm, addr, pte, ptent);
425 tlb_remove_tlb_entry(tlb, pte, addr);
426 }
427 mark_page_lazyfree(page);
428 }
429out:
430 if (nr_swap) {
431 if (current->mm == mm)
432 sync_mm_rss(mm);
433
434 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
435 }
436 arch_leave_lazy_mmu_mode();
437 pte_unmap_unlock(orig_pte, ptl);
438 cond_resched();
439next:
440 return 0;
441}
442
443static void madvise_free_page_range(struct mmu_gather *tlb,
444 struct vm_area_struct *vma,
445 unsigned long addr, unsigned long end)
446{
447 struct mm_walk free_walk = {
448 .pmd_entry = madvise_free_pte_range,
449 .mm = vma->vm_mm,
450 .private = tlb,
451 };
452
453 tlb_start_vma(tlb, vma);
454 walk_page_range(addr, end, &free_walk);
455 tlb_end_vma(tlb, vma);
456}
457
458static int madvise_free_single_vma(struct vm_area_struct *vma,
459 unsigned long start_addr, unsigned long end_addr)
460{
461 struct mm_struct *mm = vma->vm_mm;
462 struct mmu_notifier_range range;
463 struct mmu_gather tlb;
464
465 /* MADV_FREE works for only anon vma at the moment */
466 if (!vma_is_anonymous(vma))
467 return -EINVAL;
468
469 range.start = max(vma->vm_start, start_addr);
470 if (range.start >= vma->vm_end)
471 return -EINVAL;
472 range.end = min(vma->vm_end, end_addr);
473 if (range.end <= vma->vm_start)
474 return -EINVAL;
475 mmu_notifier_range_init(&range, mm, range.start, range.end);
476
477 lru_add_drain();
478 tlb_gather_mmu(&tlb, mm, range.start, range.end);
479 update_hiwater_rss(mm);
480
481 mmu_notifier_invalidate_range_start(&range);
482 madvise_free_page_range(&tlb, vma, range.start, range.end);
483 mmu_notifier_invalidate_range_end(&range);
484 tlb_finish_mmu(&tlb, range.start, range.end);
485
486 return 0;
487}
488
489/*
490 * Application no longer needs these pages. If the pages are dirty,
491 * it's OK to just throw them away. The app will be more careful about
492 * data it wants to keep. Be sure to free swap resources too. The
493 * zap_page_range call sets things up for shrink_active_list to actually free
494 * these pages later if no one else has touched them in the meantime,
495 * although we could add these pages to a global reuse list for
496 * shrink_active_list to pick up before reclaiming other pages.
497 *
498 * NB: This interface discards data rather than pushes it out to swap,
499 * as some implementations do. This has performance implications for
500 * applications like large transactional databases which want to discard
501 * pages in anonymous maps after committing to backing store the data
502 * that was kept in them. There is no reason to write this data out to
503 * the swap area if the application is discarding it.
504 *
505 * An interface that causes the system to free clean pages and flush
506 * dirty pages is already available as msync(MS_INVALIDATE).
507 */
508static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
509 unsigned long start, unsigned long end)
510{
511 zap_page_range(vma, start, end - start);
512 return 0;
513}
514
515static long madvise_dontneed_free(struct vm_area_struct *vma,
516 struct vm_area_struct **prev,
517 unsigned long start, unsigned long end,
518 int behavior)
519{
520 *prev = vma;
521 if (!can_madv_dontneed_vma(vma))
522 return -EINVAL;
523
524 if (!userfaultfd_remove(vma, start, end)) {
525 *prev = NULL; /* mmap_sem has been dropped, prev is stale */
526
527 down_read(&current->mm->mmap_sem);
528 vma = find_vma(current->mm, start);
529 if (!vma)
530 return -ENOMEM;
531 if (start < vma->vm_start) {
532 /*
533 * This "vma" under revalidation is the one
534 * with the lowest vma->vm_start where start
535 * is also < vma->vm_end. If start <
536 * vma->vm_start it means an hole materialized
537 * in the user address space within the
538 * virtual range passed to MADV_DONTNEED
539 * or MADV_FREE.
540 */
541 return -ENOMEM;
542 }
543 if (!can_madv_dontneed_vma(vma))
544 return -EINVAL;
545 if (end > vma->vm_end) {
546 /*
547 * Don't fail if end > vma->vm_end. If the old
548 * vma was splitted while the mmap_sem was
549 * released the effect of the concurrent
550 * operation may not cause madvise() to
551 * have an undefined result. There may be an
552 * adjacent next vma that we'll walk
553 * next. userfaultfd_remove() will generate an
554 * UFFD_EVENT_REMOVE repetition on the
555 * end-vma->vm_end range, but the manager can
556 * handle a repetition fine.
557 */
558 end = vma->vm_end;
559 }
560 VM_WARN_ON(start >= end);
561 }
562
563 if (behavior == MADV_DONTNEED)
564 return madvise_dontneed_single_vma(vma, start, end);
565 else if (behavior == MADV_FREE)
566 return madvise_free_single_vma(vma, start, end);
567 else
568 return -EINVAL;
569}
570
571/*
572 * Application wants to free up the pages and associated backing store.
573 * This is effectively punching a hole into the middle of a file.
574 */
575static long madvise_remove(struct vm_area_struct *vma,
576 struct vm_area_struct **prev,
577 unsigned long start, unsigned long end)
578{
579 loff_t offset;
580 int error;
581 struct file *f;
582
583 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
584
585 if (vma->vm_flags & VM_LOCKED)
586 return -EINVAL;
587
588 f = vma->vm_file;
589
590 if (!f || !f->f_mapping || !f->f_mapping->host) {
591 return -EINVAL;
592 }
593
594 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
595 return -EACCES;
596
597 offset = (loff_t)(start - vma->vm_start)
598 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
599
600 /*
601 * Filesystem's fallocate may need to take i_mutex. We need to
602 * explicitly grab a reference because the vma (and hence the
603 * vma's reference to the file) can go away as soon as we drop
604 * mmap_sem.
605 */
606 get_file(f);
607 if (userfaultfd_remove(vma, start, end)) {
608 /* mmap_sem was not released by userfaultfd_remove() */
609 up_read(&current->mm->mmap_sem);
610 }
611 error = vfs_fallocate(f,
612 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
613 offset, end - start);
614 fput(f);
615 down_read(&current->mm->mmap_sem);
616 return error;
617}
618
619#ifdef CONFIG_MEMORY_FAILURE
620/*
621 * Error injection support for memory error handling.
622 */
623static int madvise_inject_error(int behavior,
624 unsigned long start, unsigned long end)
625{
626 struct page *page;
627 struct zone *zone;
628 unsigned int order;
629
630 if (!capable(CAP_SYS_ADMIN))
631 return -EPERM;
632
633
634 for (; start < end; start += PAGE_SIZE << order) {
635 unsigned long pfn;
636 int ret;
637
638 ret = get_user_pages_fast(start, 1, 0, &page);
639 if (ret != 1)
640 return ret;
641 pfn = page_to_pfn(page);
642
643 /*
644 * When soft offlining hugepages, after migrating the page
645 * we dissolve it, therefore in the second loop "page" will
646 * no longer be a compound page, and order will be 0.
647 */
648 order = compound_order(compound_head(page));
649
650 if (PageHWPoison(page)) {
651 put_page(page);
652 continue;
653 }
654
655 if (behavior == MADV_SOFT_OFFLINE) {
656 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
657 pfn, start);
658
659 ret = soft_offline_page(page, MF_COUNT_INCREASED);
660 if (ret)
661 return ret;
662 continue;
663 }
664
665 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
666 pfn, start);
667
668 /*
669 * Drop the page reference taken by get_user_pages_fast(). In
670 * the absence of MF_COUNT_INCREASED the memory_failure()
671 * routine is responsible for pinning the page to prevent it
672 * from being released back to the page allocator.
673 */
674 put_page(page);
675 ret = memory_failure(pfn, 0);
676 if (ret)
677 return ret;
678 }
679
680 /* Ensure that all poisoned pages are removed from per-cpu lists */
681 for_each_populated_zone(zone)
682 drain_all_pages(zone);
683
684 return 0;
685}
686#endif
687
688static long
689madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
690 unsigned long start, unsigned long end, int behavior)
691{
692 switch (behavior) {
693 case MADV_REMOVE:
694 return madvise_remove(vma, prev, start, end);
695 case MADV_WILLNEED:
696 return madvise_willneed(vma, prev, start, end);
697 case MADV_FREE:
698 case MADV_DONTNEED:
699 return madvise_dontneed_free(vma, prev, start, end, behavior);
700 default:
701 return madvise_behavior(vma, prev, start, end, behavior);
702 }
703}
704
705static bool
706madvise_behavior_valid(int behavior)
707{
708 switch (behavior) {
709 case MADV_DOFORK:
710 case MADV_DONTFORK:
711 case MADV_NORMAL:
712 case MADV_SEQUENTIAL:
713 case MADV_RANDOM:
714 case MADV_REMOVE:
715 case MADV_WILLNEED:
716 case MADV_DONTNEED:
717 case MADV_FREE:
718#ifdef CONFIG_KSM
719 case MADV_MERGEABLE:
720 case MADV_UNMERGEABLE:
721#endif
722#ifdef CONFIG_TRANSPARENT_HUGEPAGE
723 case MADV_HUGEPAGE:
724 case MADV_NOHUGEPAGE:
725#endif
726 case MADV_DONTDUMP:
727 case MADV_DODUMP:
728 case MADV_WIPEONFORK:
729 case MADV_KEEPONFORK:
730#ifdef CONFIG_MEMORY_FAILURE
731 case MADV_SOFT_OFFLINE:
732 case MADV_HWPOISON:
733#endif
734 return true;
735
736 default:
737 return false;
738 }
739}
740
741/*
742 * The madvise(2) system call.
743 *
744 * Applications can use madvise() to advise the kernel how it should
745 * handle paging I/O in this VM area. The idea is to help the kernel
746 * use appropriate read-ahead and caching techniques. The information
747 * provided is advisory only, and can be safely disregarded by the
748 * kernel without affecting the correct operation of the application.
749 *
750 * behavior values:
751 * MADV_NORMAL - the default behavior is to read clusters. This
752 * results in some read-ahead and read-behind.
753 * MADV_RANDOM - the system should read the minimum amount of data
754 * on any access, since it is unlikely that the appli-
755 * cation will need more than what it asks for.
756 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
757 * once, so they can be aggressively read ahead, and
758 * can be freed soon after they are accessed.
759 * MADV_WILLNEED - the application is notifying the system to read
760 * some pages ahead.
761 * MADV_DONTNEED - the application is finished with the given range,
762 * so the kernel can free resources associated with it.
763 * MADV_FREE - the application marks pages in the given range as lazy free,
764 * where actual purges are postponed until memory pressure happens.
765 * MADV_REMOVE - the application wants to free up the given range of
766 * pages and associated backing store.
767 * MADV_DONTFORK - omit this area from child's address space when forking:
768 * typically, to avoid COWing pages pinned by get_user_pages().
769 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
770 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
771 * range after a fork.
772 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
773 * MADV_HWPOISON - trigger memory error handler as if the given memory range
774 * were corrupted by unrecoverable hardware memory failure.
775 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
776 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
777 * this area with pages of identical content from other such areas.
778 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
779 * MADV_HUGEPAGE - the application wants to back the given range by transparent
780 * huge pages in the future. Existing pages might be coalesced and
781 * new pages might be allocated as THP.
782 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
783 * transparent huge pages so the existing pages will not be
784 * coalesced into THP and new pages will not be allocated as THP.
785 * MADV_DONTDUMP - the application wants to prevent pages in the given range
786 * from being included in its core dump.
787 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
788 *
789 * return values:
790 * zero - success
791 * -EINVAL - start + len < 0, start is not page-aligned,
792 * "behavior" is not a valid value, or application
793 * is attempting to release locked or shared pages,
794 * or the specified address range includes file, Huge TLB,
795 * MAP_SHARED or VMPFNMAP range.
796 * -ENOMEM - addresses in the specified range are not currently
797 * mapped, or are outside the AS of the process.
798 * -EIO - an I/O error occurred while paging in data.
799 * -EBADF - map exists, but area maps something that isn't a file.
800 * -EAGAIN - a kernel resource was temporarily unavailable.
801 */
802SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
803{
804 unsigned long end, tmp;
805 struct vm_area_struct *vma, *prev;
806 int unmapped_error = 0;
807 int error = -EINVAL;
808 int write;
809 size_t len;
810 struct blk_plug plug;
811
812 if (!madvise_behavior_valid(behavior))
813 return error;
814
815 if (start & ~PAGE_MASK)
816 return error;
817 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
818
819 /* Check to see whether len was rounded up from small -ve to zero */
820 if (len_in && !len)
821 return error;
822
823 end = start + len;
824 if (end < start)
825 return error;
826
827 error = 0;
828 if (end == start)
829 return error;
830
831#ifdef CONFIG_MEMORY_FAILURE
832 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
833 return madvise_inject_error(behavior, start, start + len_in);
834#endif
835
836 write = madvise_need_mmap_write(behavior);
837 if (write) {
838 if (down_write_killable(&current->mm->mmap_sem))
839 return -EINTR;
840 } else {
841 down_read(&current->mm->mmap_sem);
842 }
843
844 /*
845 * If the interval [start,end) covers some unmapped address
846 * ranges, just ignore them, but return -ENOMEM at the end.
847 * - different from the way of handling in mlock etc.
848 */
849 vma = find_vma_prev(current->mm, start, &prev);
850 if (vma && start > vma->vm_start)
851 prev = vma;
852
853 blk_start_plug(&plug);
854 for (;;) {
855 /* Still start < end. */
856 error = -ENOMEM;
857 if (!vma)
858 goto out;
859
860 /* Here start < (end|vma->vm_end). */
861 if (start < vma->vm_start) {
862 unmapped_error = -ENOMEM;
863 start = vma->vm_start;
864 if (start >= end)
865 goto out;
866 }
867
868 /* Here vma->vm_start <= start < (end|vma->vm_end) */
869 tmp = vma->vm_end;
870 if (end < tmp)
871 tmp = end;
872
873 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
874 error = madvise_vma(vma, &prev, start, tmp, behavior);
875 if (error)
876 goto out;
877 start = tmp;
878 if (prev && start < prev->vm_end)
879 start = prev->vm_end;
880 error = unmapped_error;
881 if (start >= end)
882 goto out;
883 if (prev)
884 vma = prev->vm_next;
885 else /* madvise_remove dropped mmap_sem */
886 vma = find_vma(current->mm, start);
887 }
888out:
889 blk_finish_plug(&plug);
890 if (write)
891 up_write(&current->mm->mmap_sem);
892 else
893 up_read(&current->mm->mmap_sem);
894
895 return error;
896}
897