1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_MIGRATE_H
3#define _LINUX_MIGRATE_H
4
5#include <linux/mm.h>
6#include <linux/mempolicy.h>
7#include <linux/migrate_mode.h>
8#include <linux/hugetlb.h>
9
10typedef struct page *new_page_t(struct page *page, unsigned long private);
11typedef void free_page_t(struct page *page, unsigned long private);
12
13/*
14 * Return values from addresss_space_operations.migratepage():
15 * - negative errno on page migration failure;
16 * - zero on page migration success;
17 */
18#define MIGRATEPAGE_SUCCESS 0
19
20enum migrate_reason {
21 MR_COMPACTION,
22 MR_MEMORY_FAILURE,
23 MR_MEMORY_HOTPLUG,
24 MR_SYSCALL, /* also applies to cpusets */
25 MR_MEMPOLICY_MBIND,
26 MR_NUMA_MISPLACED,
27 MR_CONTIG_RANGE,
28 MR_TYPES
29};
30
31/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
32extern const char *migrate_reason_names[MR_TYPES];
33
34static inline struct page *new_page_nodemask(struct page *page,
35 int preferred_nid, nodemask_t *nodemask)
36{
37 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
38 unsigned int order = 0;
39 struct page *new_page = NULL;
40
41 if (PageHuge(page))
42 return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
43 preferred_nid, nodemask);
44
45 if (PageTransHuge(page)) {
46 gfp_mask |= GFP_TRANSHUGE;
47 order = HPAGE_PMD_ORDER;
48 }
49
50 if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
51 gfp_mask |= __GFP_HIGHMEM;
52
53 new_page = __alloc_pages_nodemask(gfp_mask, order,
54 preferred_nid, nodemask);
55
56 if (new_page && PageTransHuge(new_page))
57 prep_transhuge_page(new_page);
58
59 return new_page;
60}
61
62#ifdef CONFIG_MIGRATION
63
64extern void putback_movable_pages(struct list_head *l);
65extern int migrate_page(struct address_space *mapping,
66 struct page *newpage, struct page *page,
67 enum migrate_mode mode);
68extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
69 unsigned long private, enum migrate_mode mode, int reason);
70extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
71extern void putback_movable_page(struct page *page);
72
73extern int migrate_prep(void);
74extern int migrate_prep_local(void);
75extern void migrate_page_states(struct page *newpage, struct page *page);
76extern void migrate_page_copy(struct page *newpage, struct page *page);
77extern int migrate_huge_page_move_mapping(struct address_space *mapping,
78 struct page *newpage, struct page *page);
79extern int migrate_page_move_mapping(struct address_space *mapping,
80 struct page *newpage, struct page *page, enum migrate_mode mode,
81 int extra_count);
82#else
83
84static inline void putback_movable_pages(struct list_head *l) {}
85static inline int migrate_pages(struct list_head *l, new_page_t new,
86 free_page_t free, unsigned long private, enum migrate_mode mode,
87 int reason)
88 { return -ENOSYS; }
89static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
90 { return -EBUSY; }
91
92static inline int migrate_prep(void) { return -ENOSYS; }
93static inline int migrate_prep_local(void) { return -ENOSYS; }
94
95static inline void migrate_page_states(struct page *newpage, struct page *page)
96{
97}
98
99static inline void migrate_page_copy(struct page *newpage,
100 struct page *page) {}
101
102static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
103 struct page *newpage, struct page *page)
104{
105 return -ENOSYS;
106}
107
108#endif /* CONFIG_MIGRATION */
109
110#ifdef CONFIG_COMPACTION
111extern int PageMovable(struct page *page);
112extern void __SetPageMovable(struct page *page, struct address_space *mapping);
113extern void __ClearPageMovable(struct page *page);
114#else
115static inline int PageMovable(struct page *page) { return 0; };
116static inline void __SetPageMovable(struct page *page,
117 struct address_space *mapping)
118{
119}
120static inline void __ClearPageMovable(struct page *page)
121{
122}
123#endif
124
125#ifdef CONFIG_NUMA_BALANCING
126extern bool pmd_trans_migrating(pmd_t pmd);
127extern int migrate_misplaced_page(struct page *page,
128 struct vm_area_struct *vma, int node);
129#else
130static inline bool pmd_trans_migrating(pmd_t pmd)
131{
132 return false;
133}
134static inline int migrate_misplaced_page(struct page *page,
135 struct vm_area_struct *vma, int node)
136{
137 return -EAGAIN; /* can't migrate now */
138}
139#endif /* CONFIG_NUMA_BALANCING */
140
141#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
142extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
143 struct vm_area_struct *vma,
144 pmd_t *pmd, pmd_t entry,
145 unsigned long address,
146 struct page *page, int node);
147#else
148static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
149 struct vm_area_struct *vma,
150 pmd_t *pmd, pmd_t entry,
151 unsigned long address,
152 struct page *page, int node)
153{
154 return -EAGAIN;
155}
156#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
157
158
159#ifdef CONFIG_MIGRATION
160
161/*
162 * Watch out for PAE architecture, which has an unsigned long, and might not
163 * have enough bits to store all physical address and flags. So far we have
164 * enough room for all our flags.
165 */
166#define MIGRATE_PFN_VALID (1UL << 0)
167#define MIGRATE_PFN_MIGRATE (1UL << 1)
168#define MIGRATE_PFN_LOCKED (1UL << 2)
169#define MIGRATE_PFN_WRITE (1UL << 3)
170#define MIGRATE_PFN_DEVICE (1UL << 4)
171#define MIGRATE_PFN_ERROR (1UL << 5)
172#define MIGRATE_PFN_SHIFT 6
173
174static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
175{
176 if (!(mpfn & MIGRATE_PFN_VALID))
177 return NULL;
178 return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
179}
180
181static inline unsigned long migrate_pfn(unsigned long pfn)
182{
183 return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
184}
185
186/*
187 * struct migrate_vma_ops - migrate operation callback
188 *
189 * @alloc_and_copy: alloc destination memory and copy source memory to it
190 * @finalize_and_map: allow caller to map the successfully migrated pages
191 *
192 *
193 * The alloc_and_copy() callback happens once all source pages have been locked,
194 * unmapped and checked (checked whether pinned or not). All pages that can be
195 * migrated will have an entry in the src array set with the pfn value of the
196 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other
197 * flags might be set but should be ignored by the callback).
198 *
199 * The alloc_and_copy() callback can then allocate destination memory and copy
200 * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and
201 * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the
202 * callback must update each corresponding entry in the dst array with the pfn
203 * value of the destination page and with the MIGRATE_PFN_VALID and
204 * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages
205 * locked, via lock_page()).
206 *
207 * At this point the alloc_and_copy() callback is done and returns.
208 *
209 * Note that the callback does not have to migrate all the pages that are
210 * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration
211 * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also
212 * set in the src array entry). If the device driver cannot migrate a device
213 * page back to system memory, then it must set the corresponding dst array
214 * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to
215 * access any of the virtual addresses originally backed by this page. Because
216 * a SIGBUS is such a severe result for the userspace process, the device
217 * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an
218 * unrecoverable state.
219 *
220 * For empty entry inside CPU page table (pte_none() or pmd_none() is true) we
221 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
222 * allowing device driver to allocate device memory for those unback virtual
223 * address. For this the device driver simply have to allocate device memory
224 * and properly set the destination entry like for regular migration. Note that
225 * this can still fails and thus inside the device driver must check if the
226 * migration was successful for those entry inside the finalize_and_map()
227 * callback just like for regular migration.
228 *
229 * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES
230 * OR BAD THINGS WILL HAPPEN !
231 *
232 *
233 * The finalize_and_map() callback happens after struct page migration from
234 * source to destination (destination struct pages are the struct pages for the
235 * memory allocated by the alloc_and_copy() callback). Migration can fail, and
236 * thus the finalize_and_map() allows the driver to inspect which pages were
237 * successfully migrated, and which were not. Successfully migrated pages will
238 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
239 *
240 * It is safe to update device page table from within the finalize_and_map()
241 * callback because both destination and source page are still locked, and the
242 * mmap_sem is held in read mode (hence no one can unmap the range being
243 * migrated).
244 *
245 * Once callback is done cleaning up things and updating its page table (if it
246 * chose to do so, this is not an obligation) then it returns. At this point,
247 * the HMM core will finish up the final steps, and the migration is complete.
248 *
249 * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY
250 * ENTRIES OR BAD THINGS WILL HAPPEN !
251 */
252struct migrate_vma_ops {
253 void (*alloc_and_copy)(struct vm_area_struct *vma,
254 const unsigned long *src,
255 unsigned long *dst,
256 unsigned long start,
257 unsigned long end,
258 void *private);
259 void (*finalize_and_map)(struct vm_area_struct *vma,
260 const unsigned long *src,
261 const unsigned long *dst,
262 unsigned long start,
263 unsigned long end,
264 void *private);
265};
266
267#if defined(CONFIG_MIGRATE_VMA_HELPER)
268int migrate_vma(const struct migrate_vma_ops *ops,
269 struct vm_area_struct *vma,
270 unsigned long start,
271 unsigned long end,
272 unsigned long *src,
273 unsigned long *dst,
274 void *private);
275#else
276static inline int migrate_vma(const struct migrate_vma_ops *ops,
277 struct vm_area_struct *vma,
278 unsigned long start,
279 unsigned long end,
280 unsigned long *src,
281 unsigned long *dst,
282 void *private)
283{
284 return -EINVAL;
285}
286#endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */
287
288#endif /* CONFIG_MIGRATION */
289
290#endif /* _LINUX_MIGRATE_H */
291