1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SWAPOPS_H
3#define _LINUX_SWAPOPS_H
4
5#include <linux/radix-tree.h>
6#include <linux/bug.h>
7#include <linux/mm_types.h>
8
9/*
10 * swapcache pages are stored in the swapper_space radix tree. We want to
11 * get good packing density in that tree, so the index should be dense in
12 * the low-order bits.
13 *
14 * We arrange the `type' and `offset' fields so that `type' is at the seven
15 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
16 * remaining bits. Although `type' itself needs only five bits, we allow for
17 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
18 *
19 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
20 */
21#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
22#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
23
24/*
25 * Store a type+offset into a swp_entry_t in an arch-independent format
26 */
27static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
28{
29 swp_entry_t ret;
30
31 ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
32 return ret;
33}
34
35/*
36 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
37 * arch-independent format
38 */
39static inline unsigned swp_type(swp_entry_t entry)
40{
41 return (entry.val >> SWP_TYPE_SHIFT);
42}
43
44/*
45 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
46 * arch-independent format
47 */
48static inline pgoff_t swp_offset(swp_entry_t entry)
49{
50 return entry.val & SWP_OFFSET_MASK;
51}
52
53#ifdef CONFIG_MMU
54/* check whether a pte points to a swap entry */
55static inline int is_swap_pte(pte_t pte)
56{
57 return !pte_none(pte) && !pte_present(pte);
58}
59#endif
60
61/*
62 * Convert the arch-dependent pte representation of a swp_entry_t into an
63 * arch-independent swp_entry_t.
64 */
65static inline swp_entry_t pte_to_swp_entry(pte_t pte)
66{
67 swp_entry_t arch_entry;
68
69 if (pte_swp_soft_dirty(pte))
70 pte = pte_swp_clear_soft_dirty(pte);
71 arch_entry = __pte_to_swp_entry(pte);
72 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
73}
74
75/*
76 * Convert the arch-independent representation of a swp_entry_t into the
77 * arch-dependent pte representation.
78 */
79static inline pte_t swp_entry_to_pte(swp_entry_t entry)
80{
81 swp_entry_t arch_entry;
82
83 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
84 return __swp_entry_to_pte(arch_entry);
85}
86
87static inline swp_entry_t radix_to_swp_entry(void *arg)
88{
89 swp_entry_t entry;
90
91 entry.val = xa_to_value(arg);
92 return entry;
93}
94
95static inline void *swp_to_radix_entry(swp_entry_t entry)
96{
97 return xa_mk_value(entry.val);
98}
99
100#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
101static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
102{
103 return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
104 page_to_pfn(page));
105}
106
107static inline bool is_device_private_entry(swp_entry_t entry)
108{
109 int type = swp_type(entry);
110 return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
111}
112
113static inline void make_device_private_entry_read(swp_entry_t *entry)
114{
115 *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
116}
117
118static inline bool is_write_device_private_entry(swp_entry_t entry)
119{
120 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
121}
122
123static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
124{
125 return swp_offset(entry);
126}
127
128static inline struct page *device_private_entry_to_page(swp_entry_t entry)
129{
130 return pfn_to_page(swp_offset(entry));
131}
132
133vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
134 unsigned long addr,
135 swp_entry_t entry,
136 unsigned int flags,
137 pmd_t *pmdp);
138#else /* CONFIG_DEVICE_PRIVATE */
139static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
140{
141 return swp_entry(0, 0);
142}
143
144static inline void make_device_private_entry_read(swp_entry_t *entry)
145{
146}
147
148static inline bool is_device_private_entry(swp_entry_t entry)
149{
150 return false;
151}
152
153static inline bool is_write_device_private_entry(swp_entry_t entry)
154{
155 return false;
156}
157
158static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
159{
160 return 0;
161}
162
163static inline struct page *device_private_entry_to_page(swp_entry_t entry)
164{
165 return NULL;
166}
167
168static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
169 unsigned long addr,
170 swp_entry_t entry,
171 unsigned int flags,
172 pmd_t *pmdp)
173{
174 return VM_FAULT_SIGBUS;
175}
176#endif /* CONFIG_DEVICE_PRIVATE */
177
178#ifdef CONFIG_MIGRATION
179static inline swp_entry_t make_migration_entry(struct page *page, int write)
180{
181 BUG_ON(!PageLocked(compound_head(page)));
182
183 return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
184 page_to_pfn(page));
185}
186
187static inline int is_migration_entry(swp_entry_t entry)
188{
189 return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
190 swp_type(entry) == SWP_MIGRATION_WRITE);
191}
192
193static inline int is_write_migration_entry(swp_entry_t entry)
194{
195 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
196}
197
198static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
199{
200 return swp_offset(entry);
201}
202
203static inline struct page *migration_entry_to_page(swp_entry_t entry)
204{
205 struct page *p = pfn_to_page(swp_offset(entry));
206 /*
207 * Any use of migration entries may only occur while the
208 * corresponding page is locked
209 */
210 BUG_ON(!PageLocked(compound_head(p)));
211 return p;
212}
213
214static inline void make_migration_entry_read(swp_entry_t *entry)
215{
216 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
217}
218
219extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
220 spinlock_t *ptl);
221extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
222 unsigned long address);
223extern void migration_entry_wait_huge(struct vm_area_struct *vma,
224 struct mm_struct *mm, pte_t *pte);
225#else
226
227#define make_migration_entry(page, write) swp_entry(0, 0)
228static inline int is_migration_entry(swp_entry_t swp)
229{
230 return 0;
231}
232
233static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
234{
235 return 0;
236}
237
238static inline struct page *migration_entry_to_page(swp_entry_t entry)
239{
240 return NULL;
241}
242
243static inline void make_migration_entry_read(swp_entry_t *entryp) { }
244static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
245 spinlock_t *ptl) { }
246static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
247 unsigned long address) { }
248static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
249 struct mm_struct *mm, pte_t *pte) { }
250static inline int is_write_migration_entry(swp_entry_t entry)
251{
252 return 0;
253}
254
255#endif
256
257struct page_vma_mapped_walk;
258
259#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
260extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
261 struct page *page);
262
263extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
264 struct page *new);
265
266extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
267
268static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
269{
270 swp_entry_t arch_entry;
271
272 if (pmd_swp_soft_dirty(pmd))
273 pmd = pmd_swp_clear_soft_dirty(pmd);
274 arch_entry = __pmd_to_swp_entry(pmd);
275 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
276}
277
278static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
279{
280 swp_entry_t arch_entry;
281
282 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
283 return __swp_entry_to_pmd(arch_entry);
284}
285
286static inline int is_pmd_migration_entry(pmd_t pmd)
287{
288 return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
289}
290#else
291static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
292 struct page *page)
293{
294 BUILD_BUG();
295}
296
297static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
298 struct page *new)
299{
300 BUILD_BUG();
301}
302
303static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
304
305static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
306{
307 return swp_entry(0, 0);
308}
309
310static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
311{
312 return __pmd(0);
313}
314
315static inline int is_pmd_migration_entry(pmd_t pmd)
316{
317 return 0;
318}
319#endif
320
321#ifdef CONFIG_MEMORY_FAILURE
322
323extern atomic_long_t num_poisoned_pages __read_mostly;
324
325/*
326 * Support for hardware poisoned pages
327 */
328static inline swp_entry_t make_hwpoison_entry(struct page *page)
329{
330 BUG_ON(!PageLocked(page));
331 return swp_entry(SWP_HWPOISON, page_to_pfn(page));
332}
333
334static inline int is_hwpoison_entry(swp_entry_t entry)
335{
336 return swp_type(entry) == SWP_HWPOISON;
337}
338
339static inline void num_poisoned_pages_inc(void)
340{
341 atomic_long_inc(&num_poisoned_pages);
342}
343
344static inline void num_poisoned_pages_dec(void)
345{
346 atomic_long_dec(&num_poisoned_pages);
347}
348
349#else
350
351static inline swp_entry_t make_hwpoison_entry(struct page *page)
352{
353 return swp_entry(0, 0);
354}
355
356static inline int is_hwpoison_entry(swp_entry_t swp)
357{
358 return 0;
359}
360
361static inline void num_poisoned_pages_inc(void)
362{
363}
364#endif
365
366#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
367static inline int non_swap_entry(swp_entry_t entry)
368{
369 return swp_type(entry) >= MAX_SWAPFILES;
370}
371#else
372static inline int non_swap_entry(swp_entry_t entry)
373{
374 return 0;
375}
376#endif
377
378#endif /* _LINUX_SWAPOPS_H */
379