1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_KSM_H |
3 | #define __LINUX_KSM_H |
4 | /* |
5 | * Memory merging support. |
6 | * |
7 | * This code enables dynamic sharing of identical pages found in different |
8 | * memory areas, even if they are not shared by fork(). |
9 | */ |
10 | |
11 | #include <linux/bitops.h> |
12 | #include <linux/mm.h> |
13 | #include <linux/pagemap.h> |
14 | #include <linux/rmap.h> |
15 | #include <linux/sched.h> |
16 | #include <linux/sched/coredump.h> |
17 | |
18 | #ifdef CONFIG_KSM |
19 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
20 | unsigned long end, int advice, unsigned long *vm_flags); |
21 | |
22 | void ksm_add_vma(struct vm_area_struct *vma); |
23 | int ksm_enable_merge_any(struct mm_struct *mm); |
24 | int ksm_disable_merge_any(struct mm_struct *mm); |
25 | int ksm_disable(struct mm_struct *mm); |
26 | |
27 | int __ksm_enter(struct mm_struct *mm); |
28 | void __ksm_exit(struct mm_struct *mm); |
29 | /* |
30 | * To identify zeropages that were mapped by KSM, we reuse the dirty bit |
31 | * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when |
32 | * deduplicating memory. |
33 | */ |
34 | #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)) |
35 | |
36 | extern unsigned long ksm_zero_pages; |
37 | |
38 | static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) |
39 | { |
40 | if (is_ksm_zero_pte(pte)) { |
41 | ksm_zero_pages--; |
42 | mm->ksm_zero_pages--; |
43 | } |
44 | } |
45 | |
46 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) |
47 | { |
48 | int ret; |
49 | |
50 | if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) { |
51 | ret = __ksm_enter(mm); |
52 | if (ret) |
53 | return ret; |
54 | } |
55 | |
56 | if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags)) |
57 | set_bit(MMF_VM_MERGE_ANY, addr: &mm->flags); |
58 | |
59 | return 0; |
60 | } |
61 | |
62 | static inline void ksm_exit(struct mm_struct *mm) |
63 | { |
64 | if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) |
65 | __ksm_exit(mm); |
66 | } |
67 | |
68 | /* |
69 | * When do_swap_page() first faults in from swap what used to be a KSM page, |
70 | * no problem, it will be assigned to this vma's anon_vma; but thereafter, |
71 | * it might be faulted into a different anon_vma (or perhaps to a different |
72 | * offset in the same anon_vma). do_swap_page() cannot do all the locking |
73 | * needed to reconstitute a cross-anon_vma KSM page: for now it has to make |
74 | * a copy, and leave remerging the pages to a later pass of ksmd. |
75 | * |
76 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, |
77 | * but what if the vma was unmerged while the page was swapped out? |
78 | */ |
79 | struct folio *ksm_might_need_to_copy(struct folio *folio, |
80 | struct vm_area_struct *vma, unsigned long addr); |
81 | |
82 | void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); |
83 | void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); |
84 | |
85 | #ifdef CONFIG_MEMORY_FAILURE |
86 | void collect_procs_ksm(struct page *page, struct list_head *to_kill, |
87 | int force_early); |
88 | #endif |
89 | |
90 | #ifdef CONFIG_PROC_FS |
91 | long ksm_process_profit(struct mm_struct *); |
92 | #endif /* CONFIG_PROC_FS */ |
93 | |
94 | #else /* !CONFIG_KSM */ |
95 | |
96 | static inline void ksm_add_vma(struct vm_area_struct *vma) |
97 | { |
98 | } |
99 | |
100 | static inline int ksm_disable(struct mm_struct *mm) |
101 | { |
102 | return 0; |
103 | } |
104 | |
105 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) |
106 | { |
107 | return 0; |
108 | } |
109 | |
110 | static inline void ksm_exit(struct mm_struct *mm) |
111 | { |
112 | } |
113 | |
114 | static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) |
115 | { |
116 | } |
117 | |
118 | #ifdef CONFIG_MEMORY_FAILURE |
119 | static inline void collect_procs_ksm(struct page *page, |
120 | struct list_head *to_kill, int force_early) |
121 | { |
122 | } |
123 | #endif |
124 | |
125 | #ifdef CONFIG_MMU |
126 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
127 | unsigned long end, int advice, unsigned long *vm_flags) |
128 | { |
129 | return 0; |
130 | } |
131 | |
132 | static inline struct folio *ksm_might_need_to_copy(struct folio *folio, |
133 | struct vm_area_struct *vma, unsigned long addr) |
134 | { |
135 | return folio; |
136 | } |
137 | |
138 | static inline void rmap_walk_ksm(struct folio *folio, |
139 | struct rmap_walk_control *rwc) |
140 | { |
141 | } |
142 | |
143 | static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old) |
144 | { |
145 | } |
146 | #endif /* CONFIG_MMU */ |
147 | #endif /* !CONFIG_KSM */ |
148 | |
149 | #endif /* __LINUX_KSM_H */ |
150 | |