1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_KSM_H
3#define __LINUX_KSM_H
4/*
5 * Memory merging support.
6 *
7 * This code enables dynamic sharing of identical pages found in different
8 * memory areas, even if they are not shared by fork().
9 */
10
11#include <linux/bitops.h>
12#include <linux/mm.h>
13#include <linux/pagemap.h>
14#include <linux/rmap.h>
15#include <linux/sched.h>
16#include <linux/sched/coredump.h>
17
18struct stable_node;
19struct mem_cgroup;
20
21#ifdef CONFIG_KSM
22int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
23 unsigned long end, int advice, unsigned long *vm_flags);
24int __ksm_enter(struct mm_struct *mm);
25void __ksm_exit(struct mm_struct *mm);
26
27static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
28{
29 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
30 return __ksm_enter(mm);
31 return 0;
32}
33
34static inline void ksm_exit(struct mm_struct *mm)
35{
36 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
37 __ksm_exit(mm);
38}
39
40/*
41 * When do_swap_page() first faults in from swap what used to be a KSM page,
42 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
43 * it might be faulted into a different anon_vma (or perhaps to a different
44 * offset in the same anon_vma). do_swap_page() cannot do all the locking
45 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
46 * a copy, and leave remerging the pages to a later pass of ksmd.
47 *
48 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
49 * but what if the vma was unmerged while the page was swapped out?
50 */
51struct page *ksm_might_need_to_copy(struct page *page,
52 struct vm_area_struct *vma, unsigned long address);
53
54void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
55void ksm_migrate_page(struct page *newpage, struct page *oldpage);
56
57#else /* !CONFIG_KSM */
58
59static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
60{
61 return 0;
62}
63
64static inline void ksm_exit(struct mm_struct *mm)
65{
66}
67
68#ifdef CONFIG_MMU
69static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
70 unsigned long end, int advice, unsigned long *vm_flags)
71{
72 return 0;
73}
74
75static inline struct page *ksm_might_need_to_copy(struct page *page,
76 struct vm_area_struct *vma, unsigned long address)
77{
78 return page;
79}
80
81static inline void rmap_walk_ksm(struct page *page,
82 struct rmap_walk_control *rwc)
83{
84}
85
86static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
87{
88}
89#endif /* CONFIG_MMU */
90#endif /* !CONFIG_KSM */
91
92#endif /* __LINUX_KSM_H */
93