1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/mm/swap_state.c |
4 | * |
5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
6 | * Swap reorganised 29.12.95, Stephen Tweedie |
7 | * |
8 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie |
9 | */ |
10 | #include <linux/mm.h> |
11 | #include <linux/gfp.h> |
12 | #include <linux/kernel_stat.h> |
13 | #include <linux/mempolicy.h> |
14 | #include <linux/swap.h> |
15 | #include <linux/swapops.h> |
16 | #include <linux/init.h> |
17 | #include <linux/pagemap.h> |
18 | #include <linux/backing-dev.h> |
19 | #include <linux/blkdev.h> |
20 | #include <linux/migrate.h> |
21 | #include <linux/vmalloc.h> |
22 | #include <linux/swap_slots.h> |
23 | #include <linux/huge_mm.h> |
24 | #include <linux/shmem_fs.h> |
25 | #include "internal.h" |
26 | #include "swap.h" |
27 | |
28 | /* |
29 | * swapper_space is a fiction, retained to simplify the path through |
30 | * vmscan's shrink_page_list. |
31 | */ |
32 | static const struct address_space_operations swap_aops = { |
33 | .writepage = swap_writepage, |
34 | .dirty_folio = noop_dirty_folio, |
35 | #ifdef CONFIG_MIGRATION |
36 | .migrate_folio = migrate_folio, |
37 | #endif |
38 | }; |
39 | |
40 | struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; |
41 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; |
42 | static bool enable_vma_readahead __read_mostly = true; |
43 | |
44 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) |
45 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) |
46 | #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK |
47 | #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) |
48 | |
49 | #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) |
50 | #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) |
51 | #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) |
52 | |
53 | #define SWAP_RA_VAL(addr, win, hits) \ |
54 | (((addr) & PAGE_MASK) | \ |
55 | (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ |
56 | ((hits) & SWAP_RA_HITS_MASK)) |
57 | |
58 | /* Initial readahead hits is 4 to start up with a small window */ |
59 | #define GET_SWAP_RA_VAL(vma) \ |
60 | (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) |
61 | |
62 | static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); |
63 | |
64 | void show_swap_cache_info(void) |
65 | { |
66 | printk("%lu pages in swap cache\n" , total_swapcache_pages()); |
67 | printk("Free swap = %ldkB\n" , K(get_nr_swap_pages())); |
68 | printk("Total swap = %lukB\n" , K(total_swap_pages)); |
69 | } |
70 | |
71 | void *get_shadow_from_swap_cache(swp_entry_t entry) |
72 | { |
73 | struct address_space *address_space = swap_address_space(entry); |
74 | pgoff_t idx = swp_offset(entry); |
75 | struct page *page; |
76 | |
77 | page = xa_load(&address_space->i_pages, index: idx); |
78 | if (xa_is_value(entry: page)) |
79 | return page; |
80 | return NULL; |
81 | } |
82 | |
83 | /* |
84 | * add_to_swap_cache resembles filemap_add_folio on swapper_space, |
85 | * but sets SwapCache flag and private instead of mapping and index. |
86 | */ |
87 | int add_to_swap_cache(struct folio *folio, swp_entry_t entry, |
88 | gfp_t gfp, void **shadowp) |
89 | { |
90 | struct address_space *address_space = swap_address_space(entry); |
91 | pgoff_t idx = swp_offset(entry); |
92 | XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); |
93 | unsigned long i, nr = folio_nr_pages(folio); |
94 | void *old; |
95 | |
96 | xas_set_update(xas: &xas, update: workingset_update_node); |
97 | |
98 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
99 | VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); |
100 | VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); |
101 | |
102 | folio_ref_add(folio, nr); |
103 | folio_set_swapcache(folio); |
104 | folio->swap = entry; |
105 | |
106 | do { |
107 | xas_lock_irq(&xas); |
108 | xas_create_range(&xas); |
109 | if (xas_error(xas: &xas)) |
110 | goto unlock; |
111 | for (i = 0; i < nr; i++) { |
112 | VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio); |
113 | if (shadowp) { |
114 | old = xas_load(&xas); |
115 | if (xa_is_value(entry: old)) |
116 | *shadowp = old; |
117 | } |
118 | xas_store(&xas, entry: folio); |
119 | xas_next(xas: &xas); |
120 | } |
121 | address_space->nrpages += nr; |
122 | __node_stat_mod_folio(folio, item: NR_FILE_PAGES, nr); |
123 | __lruvec_stat_mod_folio(folio, idx: NR_SWAPCACHE, val: nr); |
124 | unlock: |
125 | xas_unlock_irq(&xas); |
126 | } while (xas_nomem(&xas, gfp)); |
127 | |
128 | if (!xas_error(xas: &xas)) |
129 | return 0; |
130 | |
131 | folio_clear_swapcache(folio); |
132 | folio_ref_sub(folio, nr); |
133 | return xas_error(xas: &xas); |
134 | } |
135 | |
136 | /* |
137 | * This must be called only on folios that have |
138 | * been verified to be in the swap cache. |
139 | */ |
140 | void __delete_from_swap_cache(struct folio *folio, |
141 | swp_entry_t entry, void *shadow) |
142 | { |
143 | struct address_space *address_space = swap_address_space(entry); |
144 | int i; |
145 | long nr = folio_nr_pages(folio); |
146 | pgoff_t idx = swp_offset(entry); |
147 | XA_STATE(xas, &address_space->i_pages, idx); |
148 | |
149 | xas_set_update(xas: &xas, update: workingset_update_node); |
150 | |
151 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
152 | VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); |
153 | VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); |
154 | |
155 | for (i = 0; i < nr; i++) { |
156 | void *entry = xas_store(&xas, entry: shadow); |
157 | VM_BUG_ON_PAGE(entry != folio, entry); |
158 | xas_next(xas: &xas); |
159 | } |
160 | folio->swap.val = 0; |
161 | folio_clear_swapcache(folio); |
162 | address_space->nrpages -= nr; |
163 | __node_stat_mod_folio(folio, item: NR_FILE_PAGES, nr: -nr); |
164 | __lruvec_stat_mod_folio(folio, idx: NR_SWAPCACHE, val: -nr); |
165 | } |
166 | |
167 | /** |
168 | * add_to_swap - allocate swap space for a folio |
169 | * @folio: folio we want to move to swap |
170 | * |
171 | * Allocate swap space for the folio and add the folio to the |
172 | * swap cache. |
173 | * |
174 | * Context: Caller needs to hold the folio lock. |
175 | * Return: Whether the folio was added to the swap cache. |
176 | */ |
177 | bool add_to_swap(struct folio *folio) |
178 | { |
179 | swp_entry_t entry; |
180 | int err; |
181 | |
182 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
183 | VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio); |
184 | |
185 | entry = folio_alloc_swap(folio); |
186 | if (!entry.val) |
187 | return false; |
188 | |
189 | /* |
190 | * XArray node allocations from PF_MEMALLOC contexts could |
191 | * completely exhaust the page allocator. __GFP_NOMEMALLOC |
192 | * stops emergency reserves from being allocated. |
193 | * |
194 | * TODO: this could cause a theoretical memory reclaim |
195 | * deadlock in the swap out path. |
196 | */ |
197 | /* |
198 | * Add it to the swap cache. |
199 | */ |
200 | err = add_to_swap_cache(folio, entry, |
201 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); |
202 | if (err) |
203 | /* |
204 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
205 | * clear SWAP_HAS_CACHE flag. |
206 | */ |
207 | goto fail; |
208 | /* |
209 | * Normally the folio will be dirtied in unmap because its |
210 | * pte should be dirty. A special case is MADV_FREE page. The |
211 | * page's pte could have dirty bit cleared but the folio's |
212 | * SwapBacked flag is still set because clearing the dirty bit |
213 | * and SwapBacked flag has no lock protected. For such folio, |
214 | * unmap will not set dirty bit for it, so folio reclaim will |
215 | * not write the folio out. This can cause data corruption when |
216 | * the folio is swapped in later. Always setting the dirty flag |
217 | * for the folio solves the problem. |
218 | */ |
219 | folio_mark_dirty(folio); |
220 | |
221 | return true; |
222 | |
223 | fail: |
224 | put_swap_folio(folio, entry); |
225 | return false; |
226 | } |
227 | |
228 | /* |
229 | * This must be called only on folios that have |
230 | * been verified to be in the swap cache and locked. |
231 | * It will never put the folio into the free list, |
232 | * the caller has a reference on the folio. |
233 | */ |
234 | void delete_from_swap_cache(struct folio *folio) |
235 | { |
236 | swp_entry_t entry = folio->swap; |
237 | struct address_space *address_space = swap_address_space(entry); |
238 | |
239 | xa_lock_irq(&address_space->i_pages); |
240 | __delete_from_swap_cache(folio, entry, NULL); |
241 | xa_unlock_irq(&address_space->i_pages); |
242 | |
243 | put_swap_folio(folio, entry); |
244 | folio_ref_sub(folio, nr: folio_nr_pages(folio)); |
245 | } |
246 | |
247 | void clear_shadow_from_swap_cache(int type, unsigned long begin, |
248 | unsigned long end) |
249 | { |
250 | unsigned long curr = begin; |
251 | void *old; |
252 | |
253 | for (;;) { |
254 | swp_entry_t entry = swp_entry(type, offset: curr); |
255 | struct address_space *address_space = swap_address_space(entry); |
256 | XA_STATE(xas, &address_space->i_pages, curr); |
257 | |
258 | xas_set_update(xas: &xas, update: workingset_update_node); |
259 | |
260 | xa_lock_irq(&address_space->i_pages); |
261 | xas_for_each(&xas, old, end) { |
262 | if (!xa_is_value(entry: old)) |
263 | continue; |
264 | xas_store(&xas, NULL); |
265 | } |
266 | xa_unlock_irq(&address_space->i_pages); |
267 | |
268 | /* search the next swapcache until we meet end */ |
269 | curr >>= SWAP_ADDRESS_SPACE_SHIFT; |
270 | curr++; |
271 | curr <<= SWAP_ADDRESS_SPACE_SHIFT; |
272 | if (curr > end) |
273 | break; |
274 | } |
275 | } |
276 | |
277 | /* |
278 | * If we are the only user, then try to free up the swap cache. |
279 | * |
280 | * Its ok to check the swapcache flag without the folio lock |
281 | * here because we are going to recheck again inside |
282 | * folio_free_swap() _with_ the lock. |
283 | * - Marcelo |
284 | */ |
285 | void free_swap_cache(struct page *page) |
286 | { |
287 | struct folio *folio = page_folio(page); |
288 | |
289 | if (folio_test_swapcache(folio) && !folio_mapped(folio) && |
290 | folio_trylock(folio)) { |
291 | folio_free_swap(folio); |
292 | folio_unlock(folio); |
293 | } |
294 | } |
295 | |
296 | /* |
297 | * Perform a free_page(), also freeing any swap cache associated with |
298 | * this page if it is the last user of the page. |
299 | */ |
300 | void free_page_and_swap_cache(struct page *page) |
301 | { |
302 | free_swap_cache(page); |
303 | if (!is_huge_zero_page(page)) |
304 | put_page(page); |
305 | } |
306 | |
307 | /* |
308 | * Passed an array of pages, drop them all from swapcache and then release |
309 | * them. They are removed from the LRU and freed if this is their last use. |
310 | */ |
311 | void free_pages_and_swap_cache(struct encoded_page **pages, int nr) |
312 | { |
313 | lru_add_drain(); |
314 | for (int i = 0; i < nr; i++) |
315 | free_swap_cache(page: encoded_page_ptr(page: pages[i])); |
316 | release_pages(pages, nr); |
317 | } |
318 | |
319 | static inline bool swap_use_vma_readahead(void) |
320 | { |
321 | return READ_ONCE(enable_vma_readahead) && !atomic_read(v: &nr_rotate_swap); |
322 | } |
323 | |
324 | /* |
325 | * Lookup a swap entry in the swap cache. A found folio will be returned |
326 | * unlocked and with its refcount incremented - we rely on the kernel |
327 | * lock getting page table operations atomic even if we drop the folio |
328 | * lock before returning. |
329 | * |
330 | * Caller must lock the swap device or hold a reference to keep it valid. |
331 | */ |
332 | struct folio *swap_cache_get_folio(swp_entry_t entry, |
333 | struct vm_area_struct *vma, unsigned long addr) |
334 | { |
335 | struct folio *folio; |
336 | |
337 | folio = filemap_get_folio(swap_address_space(entry), index: swp_offset(entry)); |
338 | if (!IS_ERR(ptr: folio)) { |
339 | bool vma_ra = swap_use_vma_readahead(); |
340 | bool readahead; |
341 | |
342 | /* |
343 | * At the moment, we don't support PG_readahead for anon THP |
344 | * so let's bail out rather than confusing the readahead stat. |
345 | */ |
346 | if (unlikely(folio_test_large(folio))) |
347 | return folio; |
348 | |
349 | readahead = folio_test_clear_readahead(folio); |
350 | if (vma && vma_ra) { |
351 | unsigned long ra_val; |
352 | int win, hits; |
353 | |
354 | ra_val = GET_SWAP_RA_VAL(vma); |
355 | win = SWAP_RA_WIN(ra_val); |
356 | hits = SWAP_RA_HITS(ra_val); |
357 | if (readahead) |
358 | hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); |
359 | atomic_long_set(v: &vma->swap_readahead_info, |
360 | SWAP_RA_VAL(addr, win, hits)); |
361 | } |
362 | |
363 | if (readahead) { |
364 | count_vm_event(item: SWAP_RA_HIT); |
365 | if (!vma || !vma_ra) |
366 | atomic_inc(v: &swapin_readahead_hits); |
367 | } |
368 | } else { |
369 | folio = NULL; |
370 | } |
371 | |
372 | return folio; |
373 | } |
374 | |
375 | /** |
376 | * filemap_get_incore_folio - Find and get a folio from the page or swap caches. |
377 | * @mapping: The address_space to search. |
378 | * @index: The page cache index. |
379 | * |
380 | * This differs from filemap_get_folio() in that it will also look for the |
381 | * folio in the swap cache. |
382 | * |
383 | * Return: The found folio or %NULL. |
384 | */ |
385 | struct folio *filemap_get_incore_folio(struct address_space *mapping, |
386 | pgoff_t index) |
387 | { |
388 | swp_entry_t swp; |
389 | struct swap_info_struct *si; |
390 | struct folio *folio = filemap_get_entry(mapping, index); |
391 | |
392 | if (!folio) |
393 | return ERR_PTR(error: -ENOENT); |
394 | if (!xa_is_value(entry: folio)) |
395 | return folio; |
396 | if (!shmem_mapping(mapping)) |
397 | return ERR_PTR(error: -ENOENT); |
398 | |
399 | swp = radix_to_swp_entry(arg: folio); |
400 | /* There might be swapin error entries in shmem mapping. */ |
401 | if (non_swap_entry(entry: swp)) |
402 | return ERR_PTR(error: -ENOENT); |
403 | /* Prevent swapoff from happening to us */ |
404 | si = get_swap_device(entry: swp); |
405 | if (!si) |
406 | return ERR_PTR(error: -ENOENT); |
407 | index = swp_offset(entry: swp); |
408 | folio = filemap_get_folio(swap_address_space(swp), index); |
409 | put_swap_device(si); |
410 | return folio; |
411 | } |
412 | |
413 | struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
414 | struct mempolicy *mpol, pgoff_t ilx, |
415 | bool *new_page_allocated) |
416 | { |
417 | struct swap_info_struct *si; |
418 | struct folio *folio; |
419 | struct page *page; |
420 | void *shadow = NULL; |
421 | |
422 | *new_page_allocated = false; |
423 | si = get_swap_device(entry); |
424 | if (!si) |
425 | return NULL; |
426 | |
427 | for (;;) { |
428 | int err; |
429 | /* |
430 | * First check the swap cache. Since this is normally |
431 | * called after swap_cache_get_folio() failed, re-calling |
432 | * that would confuse statistics. |
433 | */ |
434 | folio = filemap_get_folio(swap_address_space(entry), |
435 | index: swp_offset(entry)); |
436 | if (!IS_ERR(ptr: folio)) { |
437 | page = folio_file_page(folio, index: swp_offset(entry)); |
438 | goto got_page; |
439 | } |
440 | |
441 | /* |
442 | * Just skip read ahead for unused swap slot. |
443 | * During swap_off when swap_slot_cache is disabled, |
444 | * we have to handle the race between putting |
445 | * swap entry in swap cache and marking swap slot |
446 | * as SWAP_HAS_CACHE. That's done in later part of code or |
447 | * else swap_off will be aborted if we return NULL. |
448 | */ |
449 | if (!swap_swapcount(si, entry) && swap_slot_cache_enabled) |
450 | goto fail_put_swap; |
451 | |
452 | /* |
453 | * Get a new page to read into from swap. Allocate it now, |
454 | * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will |
455 | * cause any racers to loop around until we add it to cache. |
456 | */ |
457 | folio = (struct folio *)alloc_pages_mpol(gfp: gfp_mask, order: 0, |
458 | mpol, ilx, nid: numa_node_id()); |
459 | if (!folio) |
460 | goto fail_put_swap; |
461 | |
462 | /* |
463 | * Swap entry may have been freed since our caller observed it. |
464 | */ |
465 | err = swapcache_prepare(entry); |
466 | if (!err) |
467 | break; |
468 | |
469 | folio_put(folio); |
470 | if (err != -EEXIST) |
471 | goto fail_put_swap; |
472 | |
473 | /* |
474 | * We might race against __delete_from_swap_cache(), and |
475 | * stumble across a swap_map entry whose SWAP_HAS_CACHE |
476 | * has not yet been cleared. Or race against another |
477 | * __read_swap_cache_async(), which has set SWAP_HAS_CACHE |
478 | * in swap_map, but not yet added its page to swap cache. |
479 | */ |
480 | schedule_timeout_uninterruptible(timeout: 1); |
481 | } |
482 | |
483 | /* |
484 | * The swap entry is ours to swap in. Prepare the new page. |
485 | */ |
486 | |
487 | __folio_set_locked(folio); |
488 | __folio_set_swapbacked(folio); |
489 | |
490 | if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp: gfp_mask, entry)) |
491 | goto fail_unlock; |
492 | |
493 | /* May fail (-ENOMEM) if XArray node allocation failed. */ |
494 | if (add_to_swap_cache(folio, entry, gfp: gfp_mask & GFP_RECLAIM_MASK, shadowp: &shadow)) |
495 | goto fail_unlock; |
496 | |
497 | mem_cgroup_swapin_uncharge_swap(entry); |
498 | |
499 | if (shadow) |
500 | workingset_refault(folio, shadow); |
501 | |
502 | /* Caller will initiate read into locked folio */ |
503 | folio_add_lru(folio); |
504 | *new_page_allocated = true; |
505 | page = &folio->page; |
506 | got_page: |
507 | put_swap_device(si); |
508 | return page; |
509 | |
510 | fail_unlock: |
511 | put_swap_folio(folio, entry); |
512 | folio_unlock(folio); |
513 | folio_put(folio); |
514 | fail_put_swap: |
515 | put_swap_device(si); |
516 | return NULL; |
517 | } |
518 | |
519 | /* |
520 | * Locate a page of swap in physical memory, reserving swap cache space |
521 | * and reading the disk if it is not already cached. |
522 | * A failure return means that either the page allocation failed or that |
523 | * the swap entry is no longer in use. |
524 | * |
525 | * get/put_swap_device() aren't needed to call this function, because |
526 | * __read_swap_cache_async() call them and swap_readpage() holds the |
527 | * swap cache folio lock. |
528 | */ |
529 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
530 | struct vm_area_struct *vma, |
531 | unsigned long addr, struct swap_iocb **plug) |
532 | { |
533 | bool page_allocated; |
534 | struct mempolicy *mpol; |
535 | pgoff_t ilx; |
536 | struct page *page; |
537 | |
538 | mpol = get_vma_policy(vma, addr, order: 0, ilx: &ilx); |
539 | page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, |
540 | new_page_allocated: &page_allocated); |
541 | mpol_cond_put(pol: mpol); |
542 | |
543 | if (page_allocated) |
544 | swap_readpage(page, do_poll: false, plug); |
545 | return page; |
546 | } |
547 | |
548 | static unsigned int __swapin_nr_pages(unsigned long prev_offset, |
549 | unsigned long offset, |
550 | int hits, |
551 | int max_pages, |
552 | int prev_win) |
553 | { |
554 | unsigned int pages, last_ra; |
555 | |
556 | /* |
557 | * This heuristic has been found to work well on both sequential and |
558 | * random loads, swapping to hard disk or to SSD: please don't ask |
559 | * what the "+ 2" means, it just happens to work well, that's all. |
560 | */ |
561 | pages = hits + 2; |
562 | if (pages == 2) { |
563 | /* |
564 | * We can have no readahead hits to judge by: but must not get |
565 | * stuck here forever, so check for an adjacent offset instead |
566 | * (and don't even bother to check whether swap type is same). |
567 | */ |
568 | if (offset != prev_offset + 1 && offset != prev_offset - 1) |
569 | pages = 1; |
570 | } else { |
571 | unsigned int roundup = 4; |
572 | while (roundup < pages) |
573 | roundup <<= 1; |
574 | pages = roundup; |
575 | } |
576 | |
577 | if (pages > max_pages) |
578 | pages = max_pages; |
579 | |
580 | /* Don't shrink readahead too fast */ |
581 | last_ra = prev_win / 2; |
582 | if (pages < last_ra) |
583 | pages = last_ra; |
584 | |
585 | return pages; |
586 | } |
587 | |
588 | static unsigned long swapin_nr_pages(unsigned long offset) |
589 | { |
590 | static unsigned long prev_offset; |
591 | unsigned int hits, pages, max_pages; |
592 | static atomic_t last_readahead_pages; |
593 | |
594 | max_pages = 1 << READ_ONCE(page_cluster); |
595 | if (max_pages <= 1) |
596 | return 1; |
597 | |
598 | hits = atomic_xchg(v: &swapin_readahead_hits, new: 0); |
599 | pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, |
600 | max_pages, |
601 | prev_win: atomic_read(v: &last_readahead_pages)); |
602 | if (!hits) |
603 | WRITE_ONCE(prev_offset, offset); |
604 | atomic_set(v: &last_readahead_pages, i: pages); |
605 | |
606 | return pages; |
607 | } |
608 | |
609 | /** |
610 | * swap_cluster_readahead - swap in pages in hope we need them soon |
611 | * @entry: swap entry of this memory |
612 | * @gfp_mask: memory allocation flags |
613 | * @mpol: NUMA memory allocation policy to be applied |
614 | * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE |
615 | * |
616 | * Returns the struct page for entry and addr, after queueing swapin. |
617 | * |
618 | * Primitive swap readahead code. We simply read an aligned block of |
619 | * (1 << page_cluster) entries in the swap area. This method is chosen |
620 | * because it doesn't cost us any seek time. We also make sure to queue |
621 | * the 'original' request together with the readahead ones... |
622 | * |
623 | * Note: it is intentional that the same NUMA policy and interleave index |
624 | * are used for every page of the readahead: neighbouring pages on swap |
625 | * are fairly likely to have been swapped out from the same node. |
626 | */ |
627 | struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, |
628 | struct mempolicy *mpol, pgoff_t ilx) |
629 | { |
630 | struct page *page; |
631 | unsigned long entry_offset = swp_offset(entry); |
632 | unsigned long offset = entry_offset; |
633 | unsigned long start_offset, end_offset; |
634 | unsigned long mask; |
635 | struct swap_info_struct *si = swp_swap_info(entry); |
636 | struct blk_plug plug; |
637 | struct swap_iocb *splug = NULL; |
638 | bool page_allocated; |
639 | |
640 | mask = swapin_nr_pages(offset) - 1; |
641 | if (!mask) |
642 | goto skip; |
643 | |
644 | /* Read a page_cluster sized and aligned cluster around offset. */ |
645 | start_offset = offset & ~mask; |
646 | end_offset = offset | mask; |
647 | if (!start_offset) /* First page is swap header. */ |
648 | start_offset++; |
649 | if (end_offset >= si->max) |
650 | end_offset = si->max - 1; |
651 | |
652 | blk_start_plug(&plug); |
653 | for (offset = start_offset; offset <= end_offset ; offset++) { |
654 | /* Ok, do the async read-ahead now */ |
655 | page = __read_swap_cache_async( |
656 | entry: swp_entry(type: swp_type(entry), offset), |
657 | gfp_mask, mpol, ilx, new_page_allocated: &page_allocated); |
658 | if (!page) |
659 | continue; |
660 | if (page_allocated) { |
661 | swap_readpage(page, do_poll: false, plug: &splug); |
662 | if (offset != entry_offset) { |
663 | SetPageReadahead(page); |
664 | count_vm_event(item: SWAP_RA); |
665 | } |
666 | } |
667 | put_page(page); |
668 | } |
669 | blk_finish_plug(&plug); |
670 | swap_read_unplug(plug: splug); |
671 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
672 | skip: |
673 | /* The page was likely read above, so no need for plugging here */ |
674 | page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, |
675 | new_page_allocated: &page_allocated); |
676 | if (unlikely(page_allocated)) |
677 | swap_readpage(page, do_poll: false, NULL); |
678 | return page; |
679 | } |
680 | |
681 | int init_swap_address_space(unsigned int type, unsigned long nr_pages) |
682 | { |
683 | struct address_space *spaces, *space; |
684 | unsigned int i, nr; |
685 | |
686 | nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); |
687 | spaces = kvcalloc(n: nr, size: sizeof(struct address_space), GFP_KERNEL); |
688 | if (!spaces) |
689 | return -ENOMEM; |
690 | for (i = 0; i < nr; i++) { |
691 | space = spaces + i; |
692 | xa_init_flags(xa: &space->i_pages, XA_FLAGS_LOCK_IRQ); |
693 | atomic_set(v: &space->i_mmap_writable, i: 0); |
694 | space->a_ops = &swap_aops; |
695 | /* swap cache doesn't use writeback related tags */ |
696 | mapping_set_no_writeback_tags(mapping: space); |
697 | } |
698 | nr_swapper_spaces[type] = nr; |
699 | swapper_spaces[type] = spaces; |
700 | |
701 | return 0; |
702 | } |
703 | |
704 | void exit_swap_address_space(unsigned int type) |
705 | { |
706 | int i; |
707 | struct address_space *spaces = swapper_spaces[type]; |
708 | |
709 | for (i = 0; i < nr_swapper_spaces[type]; i++) |
710 | VM_WARN_ON_ONCE(!mapping_empty(&spaces[i])); |
711 | kvfree(addr: spaces); |
712 | nr_swapper_spaces[type] = 0; |
713 | swapper_spaces[type] = NULL; |
714 | } |
715 | |
716 | #define SWAP_RA_ORDER_CEILING 5 |
717 | |
718 | struct vma_swap_readahead { |
719 | unsigned short win; |
720 | unsigned short offset; |
721 | unsigned short nr_pte; |
722 | }; |
723 | |
724 | static void swap_ra_info(struct vm_fault *vmf, |
725 | struct vma_swap_readahead *ra_info) |
726 | { |
727 | struct vm_area_struct *vma = vmf->vma; |
728 | unsigned long ra_val; |
729 | unsigned long faddr, pfn, fpfn, lpfn, rpfn; |
730 | unsigned long start, end; |
731 | unsigned int max_win, hits, prev_win, win; |
732 | |
733 | max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), |
734 | SWAP_RA_ORDER_CEILING); |
735 | if (max_win == 1) { |
736 | ra_info->win = 1; |
737 | return; |
738 | } |
739 | |
740 | faddr = vmf->address; |
741 | fpfn = PFN_DOWN(faddr); |
742 | ra_val = GET_SWAP_RA_VAL(vma); |
743 | pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); |
744 | prev_win = SWAP_RA_WIN(ra_val); |
745 | hits = SWAP_RA_HITS(ra_val); |
746 | ra_info->win = win = __swapin_nr_pages(prev_offset: pfn, offset: fpfn, hits, |
747 | max_pages: max_win, prev_win); |
748 | atomic_long_set(v: &vma->swap_readahead_info, |
749 | SWAP_RA_VAL(faddr, win, 0)); |
750 | if (win == 1) |
751 | return; |
752 | |
753 | if (fpfn == pfn + 1) { |
754 | lpfn = fpfn; |
755 | rpfn = fpfn + win; |
756 | } else if (pfn == fpfn + 1) { |
757 | lpfn = fpfn - win + 1; |
758 | rpfn = fpfn + 1; |
759 | } else { |
760 | unsigned int left = (win - 1) / 2; |
761 | |
762 | lpfn = fpfn - left; |
763 | rpfn = fpfn + win - left; |
764 | } |
765 | start = max3(lpfn, PFN_DOWN(vma->vm_start), |
766 | PFN_DOWN(faddr & PMD_MASK)); |
767 | end = min3(rpfn, PFN_DOWN(vma->vm_end), |
768 | PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); |
769 | |
770 | ra_info->nr_pte = end - start; |
771 | ra_info->offset = fpfn - start; |
772 | } |
773 | |
774 | /** |
775 | * swap_vma_readahead - swap in pages in hope we need them soon |
776 | * @targ_entry: swap entry of the targeted memory |
777 | * @gfp_mask: memory allocation flags |
778 | * @mpol: NUMA memory allocation policy to be applied |
779 | * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE |
780 | * @vmf: fault information |
781 | * |
782 | * Returns the struct page for entry and addr, after queueing swapin. |
783 | * |
784 | * Primitive swap readahead code. We simply read in a few pages whose |
785 | * virtual addresses are around the fault address in the same vma. |
786 | * |
787 | * Caller must hold read mmap_lock if vmf->vma is not NULL. |
788 | * |
789 | */ |
790 | static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, |
791 | struct mempolicy *mpol, pgoff_t targ_ilx, |
792 | struct vm_fault *vmf) |
793 | { |
794 | struct blk_plug plug; |
795 | struct swap_iocb *splug = NULL; |
796 | struct page *page; |
797 | pte_t *pte = NULL, pentry; |
798 | unsigned long addr; |
799 | swp_entry_t entry; |
800 | pgoff_t ilx; |
801 | unsigned int i; |
802 | bool page_allocated; |
803 | struct vma_swap_readahead ra_info = { |
804 | .win = 1, |
805 | }; |
806 | |
807 | swap_ra_info(vmf, ra_info: &ra_info); |
808 | if (ra_info.win == 1) |
809 | goto skip; |
810 | |
811 | addr = vmf->address - (ra_info.offset * PAGE_SIZE); |
812 | ilx = targ_ilx - ra_info.offset; |
813 | |
814 | blk_start_plug(&plug); |
815 | for (i = 0; i < ra_info.nr_pte; i++, ilx++, addr += PAGE_SIZE) { |
816 | if (!pte++) { |
817 | pte = pte_offset_map(pmd: vmf->pmd, addr); |
818 | if (!pte) |
819 | break; |
820 | } |
821 | pentry = ptep_get_lockless(ptep: pte); |
822 | if (!is_swap_pte(pte: pentry)) |
823 | continue; |
824 | entry = pte_to_swp_entry(pte: pentry); |
825 | if (unlikely(non_swap_entry(entry))) |
826 | continue; |
827 | pte_unmap(pte); |
828 | pte = NULL; |
829 | page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, |
830 | new_page_allocated: &page_allocated); |
831 | if (!page) |
832 | continue; |
833 | if (page_allocated) { |
834 | swap_readpage(page, do_poll: false, plug: &splug); |
835 | if (i != ra_info.offset) { |
836 | SetPageReadahead(page); |
837 | count_vm_event(item: SWAP_RA); |
838 | } |
839 | } |
840 | put_page(page); |
841 | } |
842 | if (pte) |
843 | pte_unmap(pte); |
844 | blk_finish_plug(&plug); |
845 | swap_read_unplug(plug: splug); |
846 | lru_add_drain(); |
847 | skip: |
848 | /* The page was likely read above, so no need for plugging here */ |
849 | page = __read_swap_cache_async(entry: targ_entry, gfp_mask, mpol, ilx: targ_ilx, |
850 | new_page_allocated: &page_allocated); |
851 | if (unlikely(page_allocated)) |
852 | swap_readpage(page, do_poll: false, NULL); |
853 | return page; |
854 | } |
855 | |
856 | /** |
857 | * swapin_readahead - swap in pages in hope we need them soon |
858 | * @entry: swap entry of this memory |
859 | * @gfp_mask: memory allocation flags |
860 | * @vmf: fault information |
861 | * |
862 | * Returns the struct page for entry and addr, after queueing swapin. |
863 | * |
864 | * It's a main entry function for swap readahead. By the configuration, |
865 | * it will read ahead blocks by cluster-based(ie, physical disk based) |
866 | * or vma-based(ie, virtual address based on faulty address) readahead. |
867 | */ |
868 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
869 | struct vm_fault *vmf) |
870 | { |
871 | struct mempolicy *mpol; |
872 | pgoff_t ilx; |
873 | struct page *page; |
874 | |
875 | mpol = get_vma_policy(vma: vmf->vma, addr: vmf->address, order: 0, ilx: &ilx); |
876 | page = swap_use_vma_readahead() ? |
877 | swap_vma_readahead(targ_entry: entry, gfp_mask, mpol, targ_ilx: ilx, vmf) : |
878 | swap_cluster_readahead(entry, gfp_mask, mpol, ilx); |
879 | mpol_cond_put(pol: mpol); |
880 | return page; |
881 | } |
882 | |
883 | #ifdef CONFIG_SYSFS |
884 | static ssize_t vma_ra_enabled_show(struct kobject *kobj, |
885 | struct kobj_attribute *attr, char *buf) |
886 | { |
887 | return sysfs_emit(buf, fmt: "%s\n" , |
888 | enable_vma_readahead ? "true" : "false" ); |
889 | } |
890 | static ssize_t vma_ra_enabled_store(struct kobject *kobj, |
891 | struct kobj_attribute *attr, |
892 | const char *buf, size_t count) |
893 | { |
894 | ssize_t ret; |
895 | |
896 | ret = kstrtobool(s: buf, res: &enable_vma_readahead); |
897 | if (ret) |
898 | return ret; |
899 | |
900 | return count; |
901 | } |
902 | static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled); |
903 | |
904 | static struct attribute *swap_attrs[] = { |
905 | &vma_ra_enabled_attr.attr, |
906 | NULL, |
907 | }; |
908 | |
909 | static const struct attribute_group swap_attr_group = { |
910 | .attrs = swap_attrs, |
911 | }; |
912 | |
913 | static int __init swap_init_sysfs(void) |
914 | { |
915 | int err; |
916 | struct kobject *swap_kobj; |
917 | |
918 | swap_kobj = kobject_create_and_add(name: "swap" , parent: mm_kobj); |
919 | if (!swap_kobj) { |
920 | pr_err("failed to create swap kobject\n" ); |
921 | return -ENOMEM; |
922 | } |
923 | err = sysfs_create_group(kobj: swap_kobj, grp: &swap_attr_group); |
924 | if (err) { |
925 | pr_err("failed to register swap group\n" ); |
926 | goto delete_obj; |
927 | } |
928 | return 0; |
929 | |
930 | delete_obj: |
931 | kobject_put(kobj: swap_kobj); |
932 | return err; |
933 | } |
934 | subsys_initcall(swap_init_sysfs); |
935 | #endif |
936 | |