1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_KSM_H
3#define __LINUX_KSM_H
4/*
5 * Memory merging support.
6 *
7 * This code enables dynamic sharing of identical pages found in different
8 * memory areas, even if they are not shared by fork().
9 */
10
11#include <linux/bitops.h>
12#include <linux/mm.h>
13#include <linux/pagemap.h>
14#include <linux/rmap.h>
15#include <linux/sched.h>
16#include <linux/sched/coredump.h>
17
18struct stable_node;
19struct mem_cgroup;
20
21#ifdef CONFIG_KSM
22int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
23 unsigned long end, int advice, unsigned long *vm_flags);
24int __ksm_enter(struct mm_struct *mm);
25void __ksm_exit(struct mm_struct *mm);
26
27static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
28{
29 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
30 return __ksm_enter(mm);
31 return 0;
32}
33
34static inline void ksm_exit(struct mm_struct *mm)
35{
36 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
37 __ksm_exit(mm);
38}
39
40/*
41 * When do_swap_page() first faults in from swap what used to be a KSM page,
42 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
43 * it might be faulted into a different anon_vma (or perhaps to a different
44 * offset in the same anon_vma). do_swap_page() cannot do all the locking
45 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
46 * a copy, and leave remerging the pages to a later pass of ksmd.
47 *
48 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
49 * but what if the vma was unmerged while the page was swapped out?
50 */
51struct page *ksm_might_need_to_copy(struct page *page,
52 struct vm_area_struct *vma, unsigned long address);
53
54void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
55void ksm_migrate_page(struct page *newpage, struct page *oldpage);
56bool reuse_ksm_page(struct page *page,
57 struct vm_area_struct *vma, unsigned long address);
58
59#else /* !CONFIG_KSM */
60
61static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
62{
63 return 0;
64}
65
66static inline void ksm_exit(struct mm_struct *mm)
67{
68}
69
70#ifdef CONFIG_MMU
71static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
72 unsigned long end, int advice, unsigned long *vm_flags)
73{
74 return 0;
75}
76
77static inline struct page *ksm_might_need_to_copy(struct page *page,
78 struct vm_area_struct *vma, unsigned long address)
79{
80 return page;
81}
82
83static inline void rmap_walk_ksm(struct page *page,
84 struct rmap_walk_control *rwc)
85{
86}
87
88static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
89{
90}
91static inline bool reuse_ksm_page(struct page *page,
92 struct vm_area_struct *vma, unsigned long address)
93{
94 return false;
95}
96#endif /* CONFIG_MMU */
97#endif /* !CONFIG_KSM */
98
99#endif /* __LINUX_KSM_H */
100