1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
4 | * |
5 | * Swap reorganised 29.12.95, Stephen Tweedie. |
6 | * kswapd added: 7.1.96 sct |
7 | * Removed kswapd_ctl limits, and swap out as many pages as needed |
8 | * to bring the system back to freepages.high: 2.4.97, Rik van Riel. |
9 | * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). |
10 | * Multiqueue VM started 5.8.00, Rik van Riel. |
11 | */ |
12 | |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | |
15 | #include <linux/mm.h> |
16 | #include <linux/sched/mm.h> |
17 | #include <linux/module.h> |
18 | #include <linux/gfp.h> |
19 | #include <linux/kernel_stat.h> |
20 | #include <linux/swap.h> |
21 | #include <linux/pagemap.h> |
22 | #include <linux/init.h> |
23 | #include <linux/highmem.h> |
24 | #include <linux/vmpressure.h> |
25 | #include <linux/vmstat.h> |
26 | #include <linux/file.h> |
27 | #include <linux/writeback.h> |
28 | #include <linux/blkdev.h> |
29 | #include <linux/buffer_head.h> /* for buffer_heads_over_limit */ |
30 | #include <linux/mm_inline.h> |
31 | #include <linux/backing-dev.h> |
32 | #include <linux/rmap.h> |
33 | #include <linux/topology.h> |
34 | #include <linux/cpu.h> |
35 | #include <linux/cpuset.h> |
36 | #include <linux/compaction.h> |
37 | #include <linux/notifier.h> |
38 | #include <linux/delay.h> |
39 | #include <linux/kthread.h> |
40 | #include <linux/freezer.h> |
41 | #include <linux/memcontrol.h> |
42 | #include <linux/migrate.h> |
43 | #include <linux/delayacct.h> |
44 | #include <linux/sysctl.h> |
45 | #include <linux/memory-tiers.h> |
46 | #include <linux/oom.h> |
47 | #include <linux/pagevec.h> |
48 | #include <linux/prefetch.h> |
49 | #include <linux/printk.h> |
50 | #include <linux/dax.h> |
51 | #include <linux/psi.h> |
52 | #include <linux/pagewalk.h> |
53 | #include <linux/shmem_fs.h> |
54 | #include <linux/ctype.h> |
55 | #include <linux/debugfs.h> |
56 | #include <linux/khugepaged.h> |
57 | #include <linux/rculist_nulls.h> |
58 | #include <linux/random.h> |
59 | |
60 | #include <asm/tlbflush.h> |
61 | #include <asm/div64.h> |
62 | |
63 | #include <linux/swapops.h> |
64 | #include <linux/balloon_compaction.h> |
65 | #include <linux/sched/sysctl.h> |
66 | |
67 | #include "internal.h" |
68 | #include "swap.h" |
69 | |
70 | #define CREATE_TRACE_POINTS |
71 | #include <trace/events/vmscan.h> |
72 | |
73 | struct scan_control { |
74 | /* How many pages shrink_list() should reclaim */ |
75 | unsigned long nr_to_reclaim; |
76 | |
77 | /* |
78 | * Nodemask of nodes allowed by the caller. If NULL, all nodes |
79 | * are scanned. |
80 | */ |
81 | nodemask_t *nodemask; |
82 | |
83 | /* |
84 | * The memory cgroup that hit its limit and as a result is the |
85 | * primary target of this reclaim invocation. |
86 | */ |
87 | struct mem_cgroup *target_mem_cgroup; |
88 | |
89 | /* |
90 | * Scan pressure balancing between anon and file LRUs |
91 | */ |
92 | unsigned long anon_cost; |
93 | unsigned long file_cost; |
94 | |
95 | /* Can active folios be deactivated as part of reclaim? */ |
96 | #define DEACTIVATE_ANON 1 |
97 | #define DEACTIVATE_FILE 2 |
98 | unsigned int may_deactivate:2; |
99 | unsigned int force_deactivate:1; |
100 | unsigned int skipped_deactivate:1; |
101 | |
102 | /* Writepage batching in laptop mode; RECLAIM_WRITE */ |
103 | unsigned int may_writepage:1; |
104 | |
105 | /* Can mapped folios be reclaimed? */ |
106 | unsigned int may_unmap:1; |
107 | |
108 | /* Can folios be swapped as part of reclaim? */ |
109 | unsigned int may_swap:1; |
110 | |
111 | /* Proactive reclaim invoked by userspace through memory.reclaim */ |
112 | unsigned int proactive:1; |
113 | |
114 | /* |
115 | * Cgroup memory below memory.low is protected as long as we |
116 | * don't threaten to OOM. If any cgroup is reclaimed at |
117 | * reduced force or passed over entirely due to its memory.low |
118 | * setting (memcg_low_skipped), and nothing is reclaimed as a |
119 | * result, then go back for one more cycle that reclaims the protected |
120 | * memory (memcg_low_reclaim) to avert OOM. |
121 | */ |
122 | unsigned int memcg_low_reclaim:1; |
123 | unsigned int memcg_low_skipped:1; |
124 | |
125 | unsigned int hibernation_mode:1; |
126 | |
127 | /* One of the zones is ready for compaction */ |
128 | unsigned int compaction_ready:1; |
129 | |
130 | /* There is easily reclaimable cold cache in the current node */ |
131 | unsigned int cache_trim_mode:1; |
132 | |
133 | /* The file folios on the current node are dangerously low */ |
134 | unsigned int file_is_tiny:1; |
135 | |
136 | /* Always discard instead of demoting to lower tier memory */ |
137 | unsigned int no_demotion:1; |
138 | |
139 | /* Allocation order */ |
140 | s8 order; |
141 | |
142 | /* Scan (total_size >> priority) pages at once */ |
143 | s8 priority; |
144 | |
145 | /* The highest zone to isolate folios for reclaim from */ |
146 | s8 reclaim_idx; |
147 | |
148 | /* This context's GFP mask */ |
149 | gfp_t gfp_mask; |
150 | |
151 | /* Incremented by the number of inactive pages that were scanned */ |
152 | unsigned long nr_scanned; |
153 | |
154 | /* Number of pages freed so far during a call to shrink_zones() */ |
155 | unsigned long nr_reclaimed; |
156 | |
157 | struct { |
158 | unsigned int dirty; |
159 | unsigned int unqueued_dirty; |
160 | unsigned int congested; |
161 | unsigned int writeback; |
162 | unsigned int immediate; |
163 | unsigned int file_taken; |
164 | unsigned int taken; |
165 | } nr; |
166 | |
167 | /* for recording the reclaimed slab by now */ |
168 | struct reclaim_state reclaim_state; |
169 | }; |
170 | |
171 | #ifdef ARCH_HAS_PREFETCHW |
172 | #define prefetchw_prev_lru_folio(_folio, _base, _field) \ |
173 | do { \ |
174 | if ((_folio)->lru.prev != _base) { \ |
175 | struct folio *prev; \ |
176 | \ |
177 | prev = lru_to_folio(&(_folio->lru)); \ |
178 | prefetchw(&prev->_field); \ |
179 | } \ |
180 | } while (0) |
181 | #else |
182 | #define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0) |
183 | #endif |
184 | |
185 | /* |
186 | * From 0 .. 200. Higher means more swappy. |
187 | */ |
188 | int vm_swappiness = 60; |
189 | |
190 | #ifdef CONFIG_MEMCG |
191 | |
192 | /* Returns true for reclaim through cgroup limits or cgroup interfaces. */ |
193 | static bool cgroup_reclaim(struct scan_control *sc) |
194 | { |
195 | return sc->target_mem_cgroup; |
196 | } |
197 | |
198 | /* |
199 | * Returns true for reclaim on the root cgroup. This is true for direct |
200 | * allocator reclaim and reclaim through cgroup interfaces on the root cgroup. |
201 | */ |
202 | static bool root_reclaim(struct scan_control *sc) |
203 | { |
204 | return !sc->target_mem_cgroup || mem_cgroup_is_root(memcg: sc->target_mem_cgroup); |
205 | } |
206 | |
207 | /** |
208 | * writeback_throttling_sane - is the usual dirty throttling mechanism available? |
209 | * @sc: scan_control in question |
210 | * |
211 | * The normal page dirty throttling mechanism in balance_dirty_pages() is |
212 | * completely broken with the legacy memcg and direct stalling in |
213 | * shrink_folio_list() is used for throttling instead, which lacks all the |
214 | * niceties such as fairness, adaptive pausing, bandwidth proportional |
215 | * allocation and configurability. |
216 | * |
217 | * This function tests whether the vmscan currently in progress can assume |
218 | * that the normal dirty throttling mechanism is operational. |
219 | */ |
220 | static bool writeback_throttling_sane(struct scan_control *sc) |
221 | { |
222 | if (!cgroup_reclaim(sc)) |
223 | return true; |
224 | #ifdef CONFIG_CGROUP_WRITEBACK |
225 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
226 | return true; |
227 | #endif |
228 | return false; |
229 | } |
230 | #else |
231 | static bool cgroup_reclaim(struct scan_control *sc) |
232 | { |
233 | return false; |
234 | } |
235 | |
236 | static bool root_reclaim(struct scan_control *sc) |
237 | { |
238 | return true; |
239 | } |
240 | |
241 | static bool writeback_throttling_sane(struct scan_control *sc) |
242 | { |
243 | return true; |
244 | } |
245 | #endif |
246 | |
247 | static void set_task_reclaim_state(struct task_struct *task, |
248 | struct reclaim_state *rs) |
249 | { |
250 | /* Check for an overwrite */ |
251 | WARN_ON_ONCE(rs && task->reclaim_state); |
252 | |
253 | /* Check for the nulling of an already-nulled member */ |
254 | WARN_ON_ONCE(!rs && !task->reclaim_state); |
255 | |
256 | task->reclaim_state = rs; |
257 | } |
258 | |
259 | /* |
260 | * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to |
261 | * scan_control->nr_reclaimed. |
262 | */ |
263 | static void flush_reclaim_state(struct scan_control *sc) |
264 | { |
265 | /* |
266 | * Currently, reclaim_state->reclaimed includes three types of pages |
267 | * freed outside of vmscan: |
268 | * (1) Slab pages. |
269 | * (2) Clean file pages from pruned inodes (on highmem systems). |
270 | * (3) XFS freed buffer pages. |
271 | * |
272 | * For all of these cases, we cannot universally link the pages to a |
273 | * single memcg. For example, a memcg-aware shrinker can free one object |
274 | * charged to the target memcg, causing an entire page to be freed. |
275 | * If we count the entire page as reclaimed from the memcg, we end up |
276 | * overestimating the reclaimed amount (potentially under-reclaiming). |
277 | * |
278 | * Only count such pages for global reclaim to prevent under-reclaiming |
279 | * from the target memcg; preventing unnecessary retries during memcg |
280 | * charging and false positives from proactive reclaim. |
281 | * |
282 | * For uncommon cases where the freed pages were actually mostly |
283 | * charged to the target memcg, we end up underestimating the reclaimed |
284 | * amount. This should be fine. The freed pages will be uncharged |
285 | * anyway, even if they are not counted here properly, and we will be |
286 | * able to make forward progress in charging (which is usually in a |
287 | * retry loop). |
288 | * |
289 | * We can go one step further, and report the uncharged objcg pages in |
290 | * memcg reclaim, to make reporting more accurate and reduce |
291 | * underestimation, but it's probably not worth the complexity for now. |
292 | */ |
293 | if (current->reclaim_state && root_reclaim(sc)) { |
294 | sc->nr_reclaimed += current->reclaim_state->reclaimed; |
295 | current->reclaim_state->reclaimed = 0; |
296 | } |
297 | } |
298 | |
299 | static bool can_demote(int nid, struct scan_control *sc) |
300 | { |
301 | if (!numa_demotion_enabled) |
302 | return false; |
303 | if (sc && sc->no_demotion) |
304 | return false; |
305 | if (next_demotion_node(node: nid) == NUMA_NO_NODE) |
306 | return false; |
307 | |
308 | return true; |
309 | } |
310 | |
311 | static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, |
312 | int nid, |
313 | struct scan_control *sc) |
314 | { |
315 | if (memcg == NULL) { |
316 | /* |
317 | * For non-memcg reclaim, is there |
318 | * space in any swap device? |
319 | */ |
320 | if (get_nr_swap_pages() > 0) |
321 | return true; |
322 | } else { |
323 | /* Is the memcg below its swap limit? */ |
324 | if (mem_cgroup_get_nr_swap_pages(memcg) > 0) |
325 | return true; |
326 | } |
327 | |
328 | /* |
329 | * The page can not be swapped. |
330 | * |
331 | * Can it be reclaimed from this node via demotion? |
332 | */ |
333 | return can_demote(nid, sc); |
334 | } |
335 | |
336 | /* |
337 | * This misses isolated folios which are not accounted for to save counters. |
338 | * As the data only determines if reclaim or compaction continues, it is |
339 | * not expected that isolated folios will be a dominating factor. |
340 | */ |
341 | unsigned long zone_reclaimable_pages(struct zone *zone) |
342 | { |
343 | unsigned long nr; |
344 | |
345 | nr = zone_page_state_snapshot(zone, item: NR_ZONE_INACTIVE_FILE) + |
346 | zone_page_state_snapshot(zone, item: NR_ZONE_ACTIVE_FILE); |
347 | if (can_reclaim_anon_pages(NULL, nid: zone_to_nid(zone), NULL)) |
348 | nr += zone_page_state_snapshot(zone, item: NR_ZONE_INACTIVE_ANON) + |
349 | zone_page_state_snapshot(zone, item: NR_ZONE_ACTIVE_ANON); |
350 | |
351 | return nr; |
352 | } |
353 | |
354 | /** |
355 | * lruvec_lru_size - Returns the number of pages on the given LRU list. |
356 | * @lruvec: lru vector |
357 | * @lru: lru to use |
358 | * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list) |
359 | */ |
360 | static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, |
361 | int zone_idx) |
362 | { |
363 | unsigned long size = 0; |
364 | int zid; |
365 | |
366 | for (zid = 0; zid <= zone_idx; zid++) { |
367 | struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; |
368 | |
369 | if (!managed_zone(zone)) |
370 | continue; |
371 | |
372 | if (!mem_cgroup_disabled()) |
373 | size += mem_cgroup_get_zone_lru_size(lruvec, lru, zone_idx: zid); |
374 | else |
375 | size += zone_page_state(zone, item: NR_ZONE_LRU_BASE + lru); |
376 | } |
377 | return size; |
378 | } |
379 | |
380 | static unsigned long drop_slab_node(int nid) |
381 | { |
382 | unsigned long freed = 0; |
383 | struct mem_cgroup *memcg = NULL; |
384 | |
385 | memcg = mem_cgroup_iter(NULL, NULL, NULL); |
386 | do { |
387 | freed += shrink_slab(GFP_KERNEL, nid, memcg, priority: 0); |
388 | } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); |
389 | |
390 | return freed; |
391 | } |
392 | |
393 | void drop_slab(void) |
394 | { |
395 | int nid; |
396 | int shift = 0; |
397 | unsigned long freed; |
398 | |
399 | do { |
400 | freed = 0; |
401 | for_each_online_node(nid) { |
402 | if (fatal_signal_pending(current)) |
403 | return; |
404 | |
405 | freed += drop_slab_node(nid); |
406 | } |
407 | } while ((freed >> shift++) > 1); |
408 | } |
409 | |
410 | static int reclaimer_offset(void) |
411 | { |
412 | BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != |
413 | PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD); |
414 | BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != |
415 | PGSCAN_DIRECT - PGSCAN_KSWAPD); |
416 | BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != |
417 | PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD); |
418 | BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != |
419 | PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD); |
420 | |
421 | if (current_is_kswapd()) |
422 | return 0; |
423 | if (current_is_khugepaged()) |
424 | return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; |
425 | return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; |
426 | } |
427 | |
428 | static inline int is_page_cache_freeable(struct folio *folio) |
429 | { |
430 | /* |
431 | * A freeable page cache folio is referenced only by the caller |
432 | * that isolated the folio, the page cache and optional filesystem |
433 | * private data at folio->private. |
434 | */ |
435 | return folio_ref_count(folio) - folio_test_private(folio) == |
436 | 1 + folio_nr_pages(folio); |
437 | } |
438 | |
439 | /* |
440 | * We detected a synchronous write error writing a folio out. Probably |
441 | * -ENOSPC. We need to propagate that into the address_space for a subsequent |
442 | * fsync(), msync() or close(). |
443 | * |
444 | * The tricky part is that after writepage we cannot touch the mapping: nothing |
445 | * prevents it from being freed up. But we have a ref on the folio and once |
446 | * that folio is locked, the mapping is pinned. |
447 | * |
448 | * We're allowed to run sleeping folio_lock() here because we know the caller has |
449 | * __GFP_FS. |
450 | */ |
451 | static void handle_write_error(struct address_space *mapping, |
452 | struct folio *folio, int error) |
453 | { |
454 | folio_lock(folio); |
455 | if (folio_mapping(folio) == mapping) |
456 | mapping_set_error(mapping, error); |
457 | folio_unlock(folio); |
458 | } |
459 | |
460 | static bool skip_throttle_noprogress(pg_data_t *pgdat) |
461 | { |
462 | int reclaimable = 0, write_pending = 0; |
463 | int i; |
464 | |
465 | /* |
466 | * If kswapd is disabled, reschedule if necessary but do not |
467 | * throttle as the system is likely near OOM. |
468 | */ |
469 | if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) |
470 | return true; |
471 | |
472 | /* |
473 | * If there are a lot of dirty/writeback folios then do not |
474 | * throttle as throttling will occur when the folios cycle |
475 | * towards the end of the LRU if still under writeback. |
476 | */ |
477 | for (i = 0; i < MAX_NR_ZONES; i++) { |
478 | struct zone *zone = pgdat->node_zones + i; |
479 | |
480 | if (!managed_zone(zone)) |
481 | continue; |
482 | |
483 | reclaimable += zone_reclaimable_pages(zone); |
484 | write_pending += zone_page_state_snapshot(zone, |
485 | item: NR_ZONE_WRITE_PENDING); |
486 | } |
487 | if (2 * write_pending <= reclaimable) |
488 | return true; |
489 | |
490 | return false; |
491 | } |
492 | |
493 | void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) |
494 | { |
495 | wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; |
496 | long timeout, ret; |
497 | DEFINE_WAIT(wait); |
498 | |
499 | /* |
500 | * Do not throttle user workers, kthreads other than kswapd or |
501 | * workqueues. They may be required for reclaim to make |
502 | * forward progress (e.g. journalling workqueues or kthreads). |
503 | */ |
504 | if (!current_is_kswapd() && |
505 | current->flags & (PF_USER_WORKER|PF_KTHREAD)) { |
506 | cond_resched(); |
507 | return; |
508 | } |
509 | |
510 | /* |
511 | * These figures are pulled out of thin air. |
512 | * VMSCAN_THROTTLE_ISOLATED is a transient condition based on too many |
513 | * parallel reclaimers which is a short-lived event so the timeout is |
514 | * short. Failing to make progress or waiting on writeback are |
515 | * potentially long-lived events so use a longer timeout. This is shaky |
516 | * logic as a failure to make progress could be due to anything from |
517 | * writeback to a slow device to excessive referenced folios at the tail |
518 | * of the inactive LRU. |
519 | */ |
520 | switch(reason) { |
521 | case VMSCAN_THROTTLE_WRITEBACK: |
522 | timeout = HZ/10; |
523 | |
524 | if (atomic_inc_return(v: &pgdat->nr_writeback_throttled) == 1) { |
525 | WRITE_ONCE(pgdat->nr_reclaim_start, |
526 | node_page_state(pgdat, NR_THROTTLED_WRITTEN)); |
527 | } |
528 | |
529 | break; |
530 | case VMSCAN_THROTTLE_CONGESTED: |
531 | fallthrough; |
532 | case VMSCAN_THROTTLE_NOPROGRESS: |
533 | if (skip_throttle_noprogress(pgdat)) { |
534 | cond_resched(); |
535 | return; |
536 | } |
537 | |
538 | timeout = 1; |
539 | |
540 | break; |
541 | case VMSCAN_THROTTLE_ISOLATED: |
542 | timeout = HZ/50; |
543 | break; |
544 | default: |
545 | WARN_ON_ONCE(1); |
546 | timeout = HZ; |
547 | break; |
548 | } |
549 | |
550 | prepare_to_wait(wq_head: wqh, wq_entry: &wait, TASK_UNINTERRUPTIBLE); |
551 | ret = schedule_timeout(timeout); |
552 | finish_wait(wq_head: wqh, wq_entry: &wait); |
553 | |
554 | if (reason == VMSCAN_THROTTLE_WRITEBACK) |
555 | atomic_dec(v: &pgdat->nr_writeback_throttled); |
556 | |
557 | trace_mm_vmscan_throttled(nid: pgdat->node_id, usec_timeout: jiffies_to_usecs(j: timeout), |
558 | usec_delayed: jiffies_to_usecs(j: timeout - ret), |
559 | reason); |
560 | } |
561 | |
562 | /* |
563 | * Account for folios written if tasks are throttled waiting on dirty |
564 | * folios to clean. If enough folios have been cleaned since throttling |
565 | * started then wakeup the throttled tasks. |
566 | */ |
567 | void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, |
568 | int nr_throttled) |
569 | { |
570 | unsigned long nr_written; |
571 | |
572 | node_stat_add_folio(folio, item: NR_THROTTLED_WRITTEN); |
573 | |
574 | /* |
575 | * This is an inaccurate read as the per-cpu deltas may not |
576 | * be synchronised. However, given that the system is |
577 | * writeback throttled, it is not worth taking the penalty |
578 | * of getting an accurate count. At worst, the throttle |
579 | * timeout guarantees forward progress. |
580 | */ |
581 | nr_written = node_page_state(pgdat, item: NR_THROTTLED_WRITTEN) - |
582 | READ_ONCE(pgdat->nr_reclaim_start); |
583 | |
584 | if (nr_written > SWAP_CLUSTER_MAX * nr_throttled) |
585 | wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); |
586 | } |
587 | |
588 | /* possible outcome of pageout() */ |
589 | typedef enum { |
590 | /* failed to write folio out, folio is locked */ |
591 | PAGE_KEEP, |
592 | /* move folio to the active list, folio is locked */ |
593 | PAGE_ACTIVATE, |
594 | /* folio has been sent to the disk successfully, folio is unlocked */ |
595 | PAGE_SUCCESS, |
596 | /* folio is clean and locked */ |
597 | PAGE_CLEAN, |
598 | } pageout_t; |
599 | |
600 | /* |
601 | * pageout is called by shrink_folio_list() for each dirty folio. |
602 | * Calls ->writepage(). |
603 | */ |
604 | static pageout_t pageout(struct folio *folio, struct address_space *mapping, |
605 | struct swap_iocb **plug) |
606 | { |
607 | /* |
608 | * If the folio is dirty, only perform writeback if that write |
609 | * will be non-blocking. To prevent this allocation from being |
610 | * stalled by pagecache activity. But note that there may be |
611 | * stalls if we need to run get_block(). We could test |
612 | * PagePrivate for that. |
613 | * |
614 | * If this process is currently in __generic_file_write_iter() against |
615 | * this folio's queue, we can perform writeback even if that |
616 | * will block. |
617 | * |
618 | * If the folio is swapcache, write it back even if that would |
619 | * block, for some throttling. This happens by accident, because |
620 | * swap_backing_dev_info is bust: it doesn't reflect the |
621 | * congestion state of the swapdevs. Easy to fix, if needed. |
622 | */ |
623 | if (!is_page_cache_freeable(folio)) |
624 | return PAGE_KEEP; |
625 | if (!mapping) { |
626 | /* |
627 | * Some data journaling orphaned folios can have |
628 | * folio->mapping == NULL while being dirty with clean buffers. |
629 | */ |
630 | if (folio_test_private(folio)) { |
631 | if (try_to_free_buffers(folio)) { |
632 | folio_clear_dirty(folio); |
633 | pr_info("%s: orphaned folio\n" , __func__); |
634 | return PAGE_CLEAN; |
635 | } |
636 | } |
637 | return PAGE_KEEP; |
638 | } |
639 | if (mapping->a_ops->writepage == NULL) |
640 | return PAGE_ACTIVATE; |
641 | |
642 | if (folio_clear_dirty_for_io(folio)) { |
643 | int res; |
644 | struct writeback_control wbc = { |
645 | .sync_mode = WB_SYNC_NONE, |
646 | .nr_to_write = SWAP_CLUSTER_MAX, |
647 | .range_start = 0, |
648 | .range_end = LLONG_MAX, |
649 | .for_reclaim = 1, |
650 | .swap_plug = plug, |
651 | }; |
652 | |
653 | folio_set_reclaim(folio); |
654 | res = mapping->a_ops->writepage(&folio->page, &wbc); |
655 | if (res < 0) |
656 | handle_write_error(mapping, folio, error: res); |
657 | if (res == AOP_WRITEPAGE_ACTIVATE) { |
658 | folio_clear_reclaim(folio); |
659 | return PAGE_ACTIVATE; |
660 | } |
661 | |
662 | if (!folio_test_writeback(folio)) { |
663 | /* synchronous write or broken a_ops? */ |
664 | folio_clear_reclaim(folio); |
665 | } |
666 | trace_mm_vmscan_write_folio(folio); |
667 | node_stat_add_folio(folio, item: NR_VMSCAN_WRITE); |
668 | return PAGE_SUCCESS; |
669 | } |
670 | |
671 | return PAGE_CLEAN; |
672 | } |
673 | |
674 | /* |
675 | * Same as remove_mapping, but if the folio is removed from the mapping, it |
676 | * gets returned with a refcount of 0. |
677 | */ |
678 | static int __remove_mapping(struct address_space *mapping, struct folio *folio, |
679 | bool reclaimed, struct mem_cgroup *target_memcg) |
680 | { |
681 | int refcount; |
682 | void *shadow = NULL; |
683 | |
684 | BUG_ON(!folio_test_locked(folio)); |
685 | BUG_ON(mapping != folio_mapping(folio)); |
686 | |
687 | if (!folio_test_swapcache(folio)) |
688 | spin_lock(lock: &mapping->host->i_lock); |
689 | xa_lock_irq(&mapping->i_pages); |
690 | /* |
691 | * The non racy check for a busy folio. |
692 | * |
693 | * Must be careful with the order of the tests. When someone has |
694 | * a ref to the folio, it may be possible that they dirty it then |
695 | * drop the reference. So if the dirty flag is tested before the |
696 | * refcount here, then the following race may occur: |
697 | * |
698 | * get_user_pages(&page); |
699 | * [user mapping goes away] |
700 | * write_to(page); |
701 | * !folio_test_dirty(folio) [good] |
702 | * folio_set_dirty(folio); |
703 | * folio_put(folio); |
704 | * !refcount(folio) [good, discard it] |
705 | * |
706 | * [oops, our write_to data is lost] |
707 | * |
708 | * Reversing the order of the tests ensures such a situation cannot |
709 | * escape unnoticed. The smp_rmb is needed to ensure the folio->flags |
710 | * load is not satisfied before that of folio->_refcount. |
711 | * |
712 | * Note that if the dirty flag is always set via folio_mark_dirty, |
713 | * and thus under the i_pages lock, then this ordering is not required. |
714 | */ |
715 | refcount = 1 + folio_nr_pages(folio); |
716 | if (!folio_ref_freeze(folio, count: refcount)) |
717 | goto cannot_free; |
718 | /* note: atomic_cmpxchg in folio_ref_freeze provides the smp_rmb */ |
719 | if (unlikely(folio_test_dirty(folio))) { |
720 | folio_ref_unfreeze(folio, count: refcount); |
721 | goto cannot_free; |
722 | } |
723 | |
724 | if (folio_test_swapcache(folio)) { |
725 | swp_entry_t swap = folio->swap; |
726 | |
727 | if (reclaimed && !mapping_exiting(mapping)) |
728 | shadow = workingset_eviction(folio, target_memcg); |
729 | __delete_from_swap_cache(folio, entry: swap, shadow); |
730 | mem_cgroup_swapout(folio, entry: swap); |
731 | xa_unlock_irq(&mapping->i_pages); |
732 | put_swap_folio(folio, entry: swap); |
733 | } else { |
734 | void (*free_folio)(struct folio *); |
735 | |
736 | free_folio = mapping->a_ops->free_folio; |
737 | /* |
738 | * Remember a shadow entry for reclaimed file cache in |
739 | * order to detect refaults, thus thrashing, later on. |
740 | * |
741 | * But don't store shadows in an address space that is |
742 | * already exiting. This is not just an optimization, |
743 | * inode reclaim needs to empty out the radix tree or |
744 | * the nodes are lost. Don't plant shadows behind its |
745 | * back. |
746 | * |
747 | * We also don't store shadows for DAX mappings because the |
748 | * only page cache folios found in these are zero pages |
749 | * covering holes, and because we don't want to mix DAX |
750 | * exceptional entries and shadow exceptional entries in the |
751 | * same address_space. |
752 | */ |
753 | if (reclaimed && folio_is_file_lru(folio) && |
754 | !mapping_exiting(mapping) && !dax_mapping(mapping)) |
755 | shadow = workingset_eviction(folio, target_memcg); |
756 | __filemap_remove_folio(folio, shadow); |
757 | xa_unlock_irq(&mapping->i_pages); |
758 | if (mapping_shrinkable(mapping)) |
759 | inode_add_lru(inode: mapping->host); |
760 | spin_unlock(lock: &mapping->host->i_lock); |
761 | |
762 | if (free_folio) |
763 | free_folio(folio); |
764 | } |
765 | |
766 | return 1; |
767 | |
768 | cannot_free: |
769 | xa_unlock_irq(&mapping->i_pages); |
770 | if (!folio_test_swapcache(folio)) |
771 | spin_unlock(lock: &mapping->host->i_lock); |
772 | return 0; |
773 | } |
774 | |
775 | /** |
776 | * remove_mapping() - Attempt to remove a folio from its mapping. |
777 | * @mapping: The address space. |
778 | * @folio: The folio to remove. |
779 | * |
780 | * If the folio is dirty, under writeback or if someone else has a ref |
781 | * on it, removal will fail. |
782 | * Return: The number of pages removed from the mapping. 0 if the folio |
783 | * could not be removed. |
784 | * Context: The caller should have a single refcount on the folio and |
785 | * hold its lock. |
786 | */ |
787 | long remove_mapping(struct address_space *mapping, struct folio *folio) |
788 | { |
789 | if (__remove_mapping(mapping, folio, reclaimed: false, NULL)) { |
790 | /* |
791 | * Unfreezing the refcount with 1 effectively |
792 | * drops the pagecache ref for us without requiring another |
793 | * atomic operation. |
794 | */ |
795 | folio_ref_unfreeze(folio, count: 1); |
796 | return folio_nr_pages(folio); |
797 | } |
798 | return 0; |
799 | } |
800 | |
801 | /** |
802 | * folio_putback_lru - Put previously isolated folio onto appropriate LRU list. |
803 | * @folio: Folio to be returned to an LRU list. |
804 | * |
805 | * Add previously isolated @folio to appropriate LRU list. |
806 | * The folio may still be unevictable for other reasons. |
807 | * |
808 | * Context: lru_lock must not be held, interrupts must be enabled. |
809 | */ |
810 | void folio_putback_lru(struct folio *folio) |
811 | { |
812 | folio_add_lru(folio); |
813 | folio_put(folio); /* drop ref from isolate */ |
814 | } |
815 | |
816 | enum folio_references { |
817 | FOLIOREF_RECLAIM, |
818 | FOLIOREF_RECLAIM_CLEAN, |
819 | FOLIOREF_KEEP, |
820 | FOLIOREF_ACTIVATE, |
821 | }; |
822 | |
823 | static enum folio_references folio_check_references(struct folio *folio, |
824 | struct scan_control *sc) |
825 | { |
826 | int referenced_ptes, referenced_folio; |
827 | unsigned long vm_flags; |
828 | |
829 | referenced_ptes = folio_referenced(folio, is_locked: 1, memcg: sc->target_mem_cgroup, |
830 | vm_flags: &vm_flags); |
831 | referenced_folio = folio_test_clear_referenced(folio); |
832 | |
833 | /* |
834 | * The supposedly reclaimable folio was found to be in a VM_LOCKED vma. |
835 | * Let the folio, now marked Mlocked, be moved to the unevictable list. |
836 | */ |
837 | if (vm_flags & VM_LOCKED) |
838 | return FOLIOREF_ACTIVATE; |
839 | |
840 | /* rmap lock contention: rotate */ |
841 | if (referenced_ptes == -1) |
842 | return FOLIOREF_KEEP; |
843 | |
844 | if (referenced_ptes) { |
845 | /* |
846 | * All mapped folios start out with page table |
847 | * references from the instantiating fault, so we need |
848 | * to look twice if a mapped file/anon folio is used more |
849 | * than once. |
850 | * |
851 | * Mark it and spare it for another trip around the |
852 | * inactive list. Another page table reference will |
853 | * lead to its activation. |
854 | * |
855 | * Note: the mark is set for activated folios as well |
856 | * so that recently deactivated but used folios are |
857 | * quickly recovered. |
858 | */ |
859 | folio_set_referenced(folio); |
860 | |
861 | if (referenced_folio || referenced_ptes > 1) |
862 | return FOLIOREF_ACTIVATE; |
863 | |
864 | /* |
865 | * Activate file-backed executable folios after first usage. |
866 | */ |
867 | if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) |
868 | return FOLIOREF_ACTIVATE; |
869 | |
870 | return FOLIOREF_KEEP; |
871 | } |
872 | |
873 | /* Reclaim if clean, defer dirty folios to writeback */ |
874 | if (referenced_folio && folio_is_file_lru(folio)) |
875 | return FOLIOREF_RECLAIM_CLEAN; |
876 | |
877 | return FOLIOREF_RECLAIM; |
878 | } |
879 | |
880 | /* Check if a folio is dirty or under writeback */ |
881 | static void folio_check_dirty_writeback(struct folio *folio, |
882 | bool *dirty, bool *writeback) |
883 | { |
884 | struct address_space *mapping; |
885 | |
886 | /* |
887 | * Anonymous folios are not handled by flushers and must be written |
888 | * from reclaim context. Do not stall reclaim based on them. |
889 | * MADV_FREE anonymous folios are put into inactive file list too. |
890 | * They could be mistakenly treated as file lru. So further anon |
891 | * test is needed. |
892 | */ |
893 | if (!folio_is_file_lru(folio) || |
894 | (folio_test_anon(folio) && !folio_test_swapbacked(folio))) { |
895 | *dirty = false; |
896 | *writeback = false; |
897 | return; |
898 | } |
899 | |
900 | /* By default assume that the folio flags are accurate */ |
901 | *dirty = folio_test_dirty(folio); |
902 | *writeback = folio_test_writeback(folio); |
903 | |
904 | /* Verify dirty/writeback state if the filesystem supports it */ |
905 | if (!folio_test_private(folio)) |
906 | return; |
907 | |
908 | mapping = folio_mapping(folio); |
909 | if (mapping && mapping->a_ops->is_dirty_writeback) |
910 | mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); |
911 | } |
912 | |
913 | static struct folio *alloc_demote_folio(struct folio *src, |
914 | unsigned long private) |
915 | { |
916 | struct folio *dst; |
917 | nodemask_t *allowed_mask; |
918 | struct migration_target_control *mtc; |
919 | |
920 | mtc = (struct migration_target_control *)private; |
921 | |
922 | allowed_mask = mtc->nmask; |
923 | /* |
924 | * make sure we allocate from the target node first also trying to |
925 | * demote or reclaim pages from the target node via kswapd if we are |
926 | * low on free memory on target node. If we don't do this and if |
927 | * we have free memory on the slower(lower) memtier, we would start |
928 | * allocating pages from slower(lower) memory tiers without even forcing |
929 | * a demotion of cold pages from the target memtier. This can result |
930 | * in the kernel placing hot pages in slower(lower) memory tiers. |
931 | */ |
932 | mtc->nmask = NULL; |
933 | mtc->gfp_mask |= __GFP_THISNODE; |
934 | dst = alloc_migration_target(src, private: (unsigned long)mtc); |
935 | if (dst) |
936 | return dst; |
937 | |
938 | mtc->gfp_mask &= ~__GFP_THISNODE; |
939 | mtc->nmask = allowed_mask; |
940 | |
941 | return alloc_migration_target(src, private: (unsigned long)mtc); |
942 | } |
943 | |
944 | /* |
945 | * Take folios on @demote_folios and attempt to demote them to another node. |
946 | * Folios which are not demoted are left on @demote_folios. |
947 | */ |
948 | static unsigned int demote_folio_list(struct list_head *demote_folios, |
949 | struct pglist_data *pgdat) |
950 | { |
951 | int target_nid = next_demotion_node(node: pgdat->node_id); |
952 | unsigned int nr_succeeded; |
953 | nodemask_t allowed_mask; |
954 | |
955 | struct migration_target_control mtc = { |
956 | /* |
957 | * Allocate from 'node', or fail quickly and quietly. |
958 | * When this happens, 'page' will likely just be discarded |
959 | * instead of migrated. |
960 | */ |
961 | .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN | |
962 | __GFP_NOMEMALLOC | GFP_NOWAIT, |
963 | .nid = target_nid, |
964 | .nmask = &allowed_mask |
965 | }; |
966 | |
967 | if (list_empty(head: demote_folios)) |
968 | return 0; |
969 | |
970 | if (target_nid == NUMA_NO_NODE) |
971 | return 0; |
972 | |
973 | node_get_allowed_targets(pgdat, targets: &allowed_mask); |
974 | |
975 | /* Demotion ignores all cpuset and mempolicy settings */ |
976 | migrate_pages(l: demote_folios, new: alloc_demote_folio, NULL, |
977 | private: (unsigned long)&mtc, mode: MIGRATE_ASYNC, reason: MR_DEMOTION, |
978 | ret_succeeded: &nr_succeeded); |
979 | |
980 | __count_vm_events(item: PGDEMOTE_KSWAPD + reclaimer_offset(), delta: nr_succeeded); |
981 | |
982 | return nr_succeeded; |
983 | } |
984 | |
985 | static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask) |
986 | { |
987 | if (gfp_mask & __GFP_FS) |
988 | return true; |
989 | if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO)) |
990 | return false; |
991 | /* |
992 | * We can "enter_fs" for swap-cache with only __GFP_IO |
993 | * providing this isn't SWP_FS_OPS. |
994 | * ->flags can be updated non-atomicially (scan_swap_map_slots), |
995 | * but that will never affect SWP_FS_OPS, so the data_race |
996 | * is safe. |
997 | */ |
998 | return !data_race(folio_swap_flags(folio) & SWP_FS_OPS); |
999 | } |
1000 | |
1001 | /* |
1002 | * shrink_folio_list() returns the number of reclaimed pages |
1003 | */ |
1004 | static unsigned int shrink_folio_list(struct list_head *folio_list, |
1005 | struct pglist_data *pgdat, struct scan_control *sc, |
1006 | struct reclaim_stat *stat, bool ignore_references) |
1007 | { |
1008 | LIST_HEAD(ret_folios); |
1009 | LIST_HEAD(free_folios); |
1010 | LIST_HEAD(demote_folios); |
1011 | unsigned int nr_reclaimed = 0; |
1012 | unsigned int pgactivate = 0; |
1013 | bool do_demote_pass; |
1014 | struct swap_iocb *plug = NULL; |
1015 | |
1016 | memset(stat, 0, sizeof(*stat)); |
1017 | cond_resched(); |
1018 | do_demote_pass = can_demote(nid: pgdat->node_id, sc); |
1019 | |
1020 | retry: |
1021 | while (!list_empty(head: folio_list)) { |
1022 | struct address_space *mapping; |
1023 | struct folio *folio; |
1024 | enum folio_references references = FOLIOREF_RECLAIM; |
1025 | bool dirty, writeback; |
1026 | unsigned int nr_pages; |
1027 | |
1028 | cond_resched(); |
1029 | |
1030 | folio = lru_to_folio(head: folio_list); |
1031 | list_del(entry: &folio->lru); |
1032 | |
1033 | if (!folio_trylock(folio)) |
1034 | goto keep; |
1035 | |
1036 | VM_BUG_ON_FOLIO(folio_test_active(folio), folio); |
1037 | |
1038 | nr_pages = folio_nr_pages(folio); |
1039 | |
1040 | /* Account the number of base pages */ |
1041 | sc->nr_scanned += nr_pages; |
1042 | |
1043 | if (unlikely(!folio_evictable(folio))) |
1044 | goto activate_locked; |
1045 | |
1046 | if (!sc->may_unmap && folio_mapped(folio)) |
1047 | goto keep_locked; |
1048 | |
1049 | /* folio_update_gen() tried to promote this page? */ |
1050 | if (lru_gen_enabled() && !ignore_references && |
1051 | folio_mapped(folio) && folio_test_referenced(folio)) |
1052 | goto keep_locked; |
1053 | |
1054 | /* |
1055 | * The number of dirty pages determines if a node is marked |
1056 | * reclaim_congested. kswapd will stall and start writing |
1057 | * folios if the tail of the LRU is all dirty unqueued folios. |
1058 | */ |
1059 | folio_check_dirty_writeback(folio, dirty: &dirty, writeback: &writeback); |
1060 | if (dirty || writeback) |
1061 | stat->nr_dirty += nr_pages; |
1062 | |
1063 | if (dirty && !writeback) |
1064 | stat->nr_unqueued_dirty += nr_pages; |
1065 | |
1066 | /* |
1067 | * Treat this folio as congested if folios are cycling |
1068 | * through the LRU so quickly that the folios marked |
1069 | * for immediate reclaim are making it to the end of |
1070 | * the LRU a second time. |
1071 | */ |
1072 | if (writeback && folio_test_reclaim(folio)) |
1073 | stat->nr_congested += nr_pages; |
1074 | |
1075 | /* |
1076 | * If a folio at the tail of the LRU is under writeback, there |
1077 | * are three cases to consider. |
1078 | * |
1079 | * 1) If reclaim is encountering an excessive number |
1080 | * of folios under writeback and this folio has both |
1081 | * the writeback and reclaim flags set, then it |
1082 | * indicates that folios are being queued for I/O but |
1083 | * are being recycled through the LRU before the I/O |
1084 | * can complete. Waiting on the folio itself risks an |
1085 | * indefinite stall if it is impossible to writeback |
1086 | * the folio due to I/O error or disconnected storage |
1087 | * so instead note that the LRU is being scanned too |
1088 | * quickly and the caller can stall after the folio |
1089 | * list has been processed. |
1090 | * |
1091 | * 2) Global or new memcg reclaim encounters a folio that is |
1092 | * not marked for immediate reclaim, or the caller does not |
1093 | * have __GFP_FS (or __GFP_IO if it's simply going to swap, |
1094 | * not to fs). In this case mark the folio for immediate |
1095 | * reclaim and continue scanning. |
1096 | * |
1097 | * Require may_enter_fs() because we would wait on fs, which |
1098 | * may not have submitted I/O yet. And the loop driver might |
1099 | * enter reclaim, and deadlock if it waits on a folio for |
1100 | * which it is needed to do the write (loop masks off |
1101 | * __GFP_IO|__GFP_FS for this reason); but more thought |
1102 | * would probably show more reasons. |
1103 | * |
1104 | * 3) Legacy memcg encounters a folio that already has the |
1105 | * reclaim flag set. memcg does not have any dirty folio |
1106 | * throttling so we could easily OOM just because too many |
1107 | * folios are in writeback and there is nothing else to |
1108 | * reclaim. Wait for the writeback to complete. |
1109 | * |
1110 | * In cases 1) and 2) we activate the folios to get them out of |
1111 | * the way while we continue scanning for clean folios on the |
1112 | * inactive list and refilling from the active list. The |
1113 | * observation here is that waiting for disk writes is more |
1114 | * expensive than potentially causing reloads down the line. |
1115 | * Since they're marked for immediate reclaim, they won't put |
1116 | * memory pressure on the cache working set any longer than it |
1117 | * takes to write them to disk. |
1118 | */ |
1119 | if (folio_test_writeback(folio)) { |
1120 | /* Case 1 above */ |
1121 | if (current_is_kswapd() && |
1122 | folio_test_reclaim(folio) && |
1123 | test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { |
1124 | stat->nr_immediate += nr_pages; |
1125 | goto activate_locked; |
1126 | |
1127 | /* Case 2 above */ |
1128 | } else if (writeback_throttling_sane(sc) || |
1129 | !folio_test_reclaim(folio) || |
1130 | !may_enter_fs(folio, gfp_mask: sc->gfp_mask)) { |
1131 | /* |
1132 | * This is slightly racy - |
1133 | * folio_end_writeback() might have |
1134 | * just cleared the reclaim flag, then |
1135 | * setting the reclaim flag here ends up |
1136 | * interpreted as the readahead flag - but |
1137 | * that does not matter enough to care. |
1138 | * What we do want is for this folio to |
1139 | * have the reclaim flag set next time |
1140 | * memcg reclaim reaches the tests above, |
1141 | * so it will then wait for writeback to |
1142 | * avoid OOM; and it's also appropriate |
1143 | * in global reclaim. |
1144 | */ |
1145 | folio_set_reclaim(folio); |
1146 | stat->nr_writeback += nr_pages; |
1147 | goto activate_locked; |
1148 | |
1149 | /* Case 3 above */ |
1150 | } else { |
1151 | folio_unlock(folio); |
1152 | folio_wait_writeback(folio); |
1153 | /* then go back and try same folio again */ |
1154 | list_add_tail(new: &folio->lru, head: folio_list); |
1155 | continue; |
1156 | } |
1157 | } |
1158 | |
1159 | if (!ignore_references) |
1160 | references = folio_check_references(folio, sc); |
1161 | |
1162 | switch (references) { |
1163 | case FOLIOREF_ACTIVATE: |
1164 | goto activate_locked; |
1165 | case FOLIOREF_KEEP: |
1166 | stat->nr_ref_keep += nr_pages; |
1167 | goto keep_locked; |
1168 | case FOLIOREF_RECLAIM: |
1169 | case FOLIOREF_RECLAIM_CLEAN: |
1170 | ; /* try to reclaim the folio below */ |
1171 | } |
1172 | |
1173 | /* |
1174 | * Before reclaiming the folio, try to relocate |
1175 | * its contents to another node. |
1176 | */ |
1177 | if (do_demote_pass && |
1178 | (thp_migration_supported() || !folio_test_large(folio))) { |
1179 | list_add(new: &folio->lru, head: &demote_folios); |
1180 | folio_unlock(folio); |
1181 | continue; |
1182 | } |
1183 | |
1184 | /* |
1185 | * Anonymous process memory has backing store? |
1186 | * Try to allocate it some swap space here. |
1187 | * Lazyfree folio could be freed directly |
1188 | */ |
1189 | if (folio_test_anon(folio) && folio_test_swapbacked(folio)) { |
1190 | if (!folio_test_swapcache(folio)) { |
1191 | if (!(sc->gfp_mask & __GFP_IO)) |
1192 | goto keep_locked; |
1193 | if (folio_maybe_dma_pinned(folio)) |
1194 | goto keep_locked; |
1195 | if (folio_test_large(folio)) { |
1196 | /* cannot split folio, skip it */ |
1197 | if (!can_split_folio(folio, NULL)) |
1198 | goto activate_locked; |
1199 | /* |
1200 | * Split folios without a PMD map right |
1201 | * away. Chances are some or all of the |
1202 | * tail pages can be freed without IO. |
1203 | */ |
1204 | if (!folio_entire_mapcount(folio) && |
1205 | split_folio_to_list(folio, |
1206 | list: folio_list)) |
1207 | goto activate_locked; |
1208 | } |
1209 | if (!add_to_swap(folio)) { |
1210 | if (!folio_test_large(folio)) |
1211 | goto activate_locked_split; |
1212 | /* Fallback to swap normal pages */ |
1213 | if (split_folio_to_list(folio, |
1214 | list: folio_list)) |
1215 | goto activate_locked; |
1216 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1217 | count_memcg_folio_events(folio, idx: THP_SWPOUT_FALLBACK, nr: 1); |
1218 | count_vm_event(item: THP_SWPOUT_FALLBACK); |
1219 | #endif |
1220 | if (!add_to_swap(folio)) |
1221 | goto activate_locked_split; |
1222 | } |
1223 | } |
1224 | } else if (folio_test_swapbacked(folio) && |
1225 | folio_test_large(folio)) { |
1226 | /* Split shmem folio */ |
1227 | if (split_folio_to_list(folio, list: folio_list)) |
1228 | goto keep_locked; |
1229 | } |
1230 | |
1231 | /* |
1232 | * If the folio was split above, the tail pages will make |
1233 | * their own pass through this function and be accounted |
1234 | * then. |
1235 | */ |
1236 | if ((nr_pages > 1) && !folio_test_large(folio)) { |
1237 | sc->nr_scanned -= (nr_pages - 1); |
1238 | nr_pages = 1; |
1239 | } |
1240 | |
1241 | /* |
1242 | * The folio is mapped into the page tables of one or more |
1243 | * processes. Try to unmap it here. |
1244 | */ |
1245 | if (folio_mapped(folio)) { |
1246 | enum ttu_flags flags = TTU_BATCH_FLUSH; |
1247 | bool was_swapbacked = folio_test_swapbacked(folio); |
1248 | |
1249 | if (folio_test_pmd_mappable(folio)) |
1250 | flags |= TTU_SPLIT_HUGE_PMD; |
1251 | |
1252 | try_to_unmap(folio, flags); |
1253 | if (folio_mapped(folio)) { |
1254 | stat->nr_unmap_fail += nr_pages; |
1255 | if (!was_swapbacked && |
1256 | folio_test_swapbacked(folio)) |
1257 | stat->nr_lazyfree_fail += nr_pages; |
1258 | goto activate_locked; |
1259 | } |
1260 | } |
1261 | |
1262 | /* |
1263 | * Folio is unmapped now so it cannot be newly pinned anymore. |
1264 | * No point in trying to reclaim folio if it is pinned. |
1265 | * Furthermore we don't want to reclaim underlying fs metadata |
1266 | * if the folio is pinned and thus potentially modified by the |
1267 | * pinning process as that may upset the filesystem. |
1268 | */ |
1269 | if (folio_maybe_dma_pinned(folio)) |
1270 | goto activate_locked; |
1271 | |
1272 | mapping = folio_mapping(folio); |
1273 | if (folio_test_dirty(folio)) { |
1274 | /* |
1275 | * Only kswapd can writeback filesystem folios |
1276 | * to avoid risk of stack overflow. But avoid |
1277 | * injecting inefficient single-folio I/O into |
1278 | * flusher writeback as much as possible: only |
1279 | * write folios when we've encountered many |
1280 | * dirty folios, and when we've already scanned |
1281 | * the rest of the LRU for clean folios and see |
1282 | * the same dirty folios again (with the reclaim |
1283 | * flag set). |
1284 | */ |
1285 | if (folio_is_file_lru(folio) && |
1286 | (!current_is_kswapd() || |
1287 | !folio_test_reclaim(folio) || |
1288 | !test_bit(PGDAT_DIRTY, &pgdat->flags))) { |
1289 | /* |
1290 | * Immediately reclaim when written back. |
1291 | * Similar in principle to folio_deactivate() |
1292 | * except we already have the folio isolated |
1293 | * and know it's dirty |
1294 | */ |
1295 | node_stat_mod_folio(folio, item: NR_VMSCAN_IMMEDIATE, |
1296 | nr: nr_pages); |
1297 | folio_set_reclaim(folio); |
1298 | |
1299 | goto activate_locked; |
1300 | } |
1301 | |
1302 | if (references == FOLIOREF_RECLAIM_CLEAN) |
1303 | goto keep_locked; |
1304 | if (!may_enter_fs(folio, gfp_mask: sc->gfp_mask)) |
1305 | goto keep_locked; |
1306 | if (!sc->may_writepage) |
1307 | goto keep_locked; |
1308 | |
1309 | /* |
1310 | * Folio is dirty. Flush the TLB if a writable entry |
1311 | * potentially exists to avoid CPU writes after I/O |
1312 | * starts and then write it out here. |
1313 | */ |
1314 | try_to_unmap_flush_dirty(); |
1315 | switch (pageout(folio, mapping, plug: &plug)) { |
1316 | case PAGE_KEEP: |
1317 | goto keep_locked; |
1318 | case PAGE_ACTIVATE: |
1319 | goto activate_locked; |
1320 | case PAGE_SUCCESS: |
1321 | stat->nr_pageout += nr_pages; |
1322 | |
1323 | if (folio_test_writeback(folio)) |
1324 | goto keep; |
1325 | if (folio_test_dirty(folio)) |
1326 | goto keep; |
1327 | |
1328 | /* |
1329 | * A synchronous write - probably a ramdisk. Go |
1330 | * ahead and try to reclaim the folio. |
1331 | */ |
1332 | if (!folio_trylock(folio)) |
1333 | goto keep; |
1334 | if (folio_test_dirty(folio) || |
1335 | folio_test_writeback(folio)) |
1336 | goto keep_locked; |
1337 | mapping = folio_mapping(folio); |
1338 | fallthrough; |
1339 | case PAGE_CLEAN: |
1340 | ; /* try to free the folio below */ |
1341 | } |
1342 | } |
1343 | |
1344 | /* |
1345 | * If the folio has buffers, try to free the buffer |
1346 | * mappings associated with this folio. If we succeed |
1347 | * we try to free the folio as well. |
1348 | * |
1349 | * We do this even if the folio is dirty. |
1350 | * filemap_release_folio() does not perform I/O, but it |
1351 | * is possible for a folio to have the dirty flag set, |
1352 | * but it is actually clean (all its buffers are clean). |
1353 | * This happens if the buffers were written out directly, |
1354 | * with submit_bh(). ext3 will do this, as well as |
1355 | * the blockdev mapping. filemap_release_folio() will |
1356 | * discover that cleanness and will drop the buffers |
1357 | * and mark the folio clean - it can be freed. |
1358 | * |
1359 | * Rarely, folios can have buffers and no ->mapping. |
1360 | * These are the folios which were not successfully |
1361 | * invalidated in truncate_cleanup_folio(). We try to |
1362 | * drop those buffers here and if that worked, and the |
1363 | * folio is no longer mapped into process address space |
1364 | * (refcount == 1) it can be freed. Otherwise, leave |
1365 | * the folio on the LRU so it is swappable. |
1366 | */ |
1367 | if (folio_needs_release(folio)) { |
1368 | if (!filemap_release_folio(folio, gfp: sc->gfp_mask)) |
1369 | goto activate_locked; |
1370 | if (!mapping && folio_ref_count(folio) == 1) { |
1371 | folio_unlock(folio); |
1372 | if (folio_put_testzero(folio)) |
1373 | goto free_it; |
1374 | else { |
1375 | /* |
1376 | * rare race with speculative reference. |
1377 | * the speculative reference will free |
1378 | * this folio shortly, so we may |
1379 | * increment nr_reclaimed here (and |
1380 | * leave it off the LRU). |
1381 | */ |
1382 | nr_reclaimed += nr_pages; |
1383 | continue; |
1384 | } |
1385 | } |
1386 | } |
1387 | |
1388 | if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { |
1389 | /* follow __remove_mapping for reference */ |
1390 | if (!folio_ref_freeze(folio, count: 1)) |
1391 | goto keep_locked; |
1392 | /* |
1393 | * The folio has only one reference left, which is |
1394 | * from the isolation. After the caller puts the |
1395 | * folio back on the lru and drops the reference, the |
1396 | * folio will be freed anyway. It doesn't matter |
1397 | * which lru it goes on. So we don't bother checking |
1398 | * the dirty flag here. |
1399 | */ |
1400 | count_vm_events(item: PGLAZYFREED, delta: nr_pages); |
1401 | count_memcg_folio_events(folio, idx: PGLAZYFREED, nr: nr_pages); |
1402 | } else if (!mapping || !__remove_mapping(mapping, folio, reclaimed: true, |
1403 | target_memcg: sc->target_mem_cgroup)) |
1404 | goto keep_locked; |
1405 | |
1406 | folio_unlock(folio); |
1407 | free_it: |
1408 | /* |
1409 | * Folio may get swapped out as a whole, need to account |
1410 | * all pages in it. |
1411 | */ |
1412 | nr_reclaimed += nr_pages; |
1413 | |
1414 | /* |
1415 | * Is there need to periodically free_folio_list? It would |
1416 | * appear not as the counts should be low |
1417 | */ |
1418 | if (unlikely(folio_test_large(folio))) |
1419 | destroy_large_folio(folio); |
1420 | else |
1421 | list_add(new: &folio->lru, head: &free_folios); |
1422 | continue; |
1423 | |
1424 | activate_locked_split: |
1425 | /* |
1426 | * The tail pages that are failed to add into swap cache |
1427 | * reach here. Fixup nr_scanned and nr_pages. |
1428 | */ |
1429 | if (nr_pages > 1) { |
1430 | sc->nr_scanned -= (nr_pages - 1); |
1431 | nr_pages = 1; |
1432 | } |
1433 | activate_locked: |
1434 | /* Not a candidate for swapping, so reclaim swap space. */ |
1435 | if (folio_test_swapcache(folio) && |
1436 | (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio))) |
1437 | folio_free_swap(folio); |
1438 | VM_BUG_ON_FOLIO(folio_test_active(folio), folio); |
1439 | if (!folio_test_mlocked(folio)) { |
1440 | int type = folio_is_file_lru(folio); |
1441 | folio_set_active(folio); |
1442 | stat->nr_activate[type] += nr_pages; |
1443 | count_memcg_folio_events(folio, idx: PGACTIVATE, nr: nr_pages); |
1444 | } |
1445 | keep_locked: |
1446 | folio_unlock(folio); |
1447 | keep: |
1448 | list_add(new: &folio->lru, head: &ret_folios); |
1449 | VM_BUG_ON_FOLIO(folio_test_lru(folio) || |
1450 | folio_test_unevictable(folio), folio); |
1451 | } |
1452 | /* 'folio_list' is always empty here */ |
1453 | |
1454 | /* Migrate folios selected for demotion */ |
1455 | nr_reclaimed += demote_folio_list(demote_folios: &demote_folios, pgdat); |
1456 | /* Folios that could not be demoted are still in @demote_folios */ |
1457 | if (!list_empty(head: &demote_folios)) { |
1458 | /* Folios which weren't demoted go back on @folio_list */ |
1459 | list_splice_init(list: &demote_folios, head: folio_list); |
1460 | |
1461 | /* |
1462 | * goto retry to reclaim the undemoted folios in folio_list if |
1463 | * desired. |
1464 | * |
1465 | * Reclaiming directly from top tier nodes is not often desired |
1466 | * due to it breaking the LRU ordering: in general memory |
1467 | * should be reclaimed from lower tier nodes and demoted from |
1468 | * top tier nodes. |
1469 | * |
1470 | * However, disabling reclaim from top tier nodes entirely |
1471 | * would cause ooms in edge scenarios where lower tier memory |
1472 | * is unreclaimable for whatever reason, eg memory being |
1473 | * mlocked or too hot to reclaim. We can disable reclaim |
1474 | * from top tier nodes in proactive reclaim though as that is |
1475 | * not real memory pressure. |
1476 | */ |
1477 | if (!sc->proactive) { |
1478 | do_demote_pass = false; |
1479 | goto retry; |
1480 | } |
1481 | } |
1482 | |
1483 | pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; |
1484 | |
1485 | mem_cgroup_uncharge_list(page_list: &free_folios); |
1486 | try_to_unmap_flush(); |
1487 | free_unref_page_list(list: &free_folios); |
1488 | |
1489 | list_splice(list: &ret_folios, head: folio_list); |
1490 | count_vm_events(item: PGACTIVATE, delta: pgactivate); |
1491 | |
1492 | if (plug) |
1493 | swap_write_unplug(sio: plug); |
1494 | return nr_reclaimed; |
1495 | } |
1496 | |
1497 | unsigned int reclaim_clean_pages_from_list(struct zone *zone, |
1498 | struct list_head *folio_list) |
1499 | { |
1500 | struct scan_control sc = { |
1501 | .gfp_mask = GFP_KERNEL, |
1502 | .may_unmap = 1, |
1503 | }; |
1504 | struct reclaim_stat stat; |
1505 | unsigned int nr_reclaimed; |
1506 | struct folio *folio, *next; |
1507 | LIST_HEAD(clean_folios); |
1508 | unsigned int noreclaim_flag; |
1509 | |
1510 | list_for_each_entry_safe(folio, next, folio_list, lru) { |
1511 | if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) && |
1512 | !folio_test_dirty(folio) && !__folio_test_movable(folio) && |
1513 | !folio_test_unevictable(folio)) { |
1514 | folio_clear_active(folio); |
1515 | list_move(list: &folio->lru, head: &clean_folios); |
1516 | } |
1517 | } |
1518 | |
1519 | /* |
1520 | * We should be safe here since we are only dealing with file pages and |
1521 | * we are not kswapd and therefore cannot write dirty file pages. But |
1522 | * call memalloc_noreclaim_save() anyway, just in case these conditions |
1523 | * change in the future. |
1524 | */ |
1525 | noreclaim_flag = memalloc_noreclaim_save(); |
1526 | nr_reclaimed = shrink_folio_list(folio_list: &clean_folios, pgdat: zone->zone_pgdat, sc: &sc, |
1527 | stat: &stat, ignore_references: true); |
1528 | memalloc_noreclaim_restore(flags: noreclaim_flag); |
1529 | |
1530 | list_splice(list: &clean_folios, head: folio_list); |
1531 | mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, |
1532 | -(long)nr_reclaimed); |
1533 | /* |
1534 | * Since lazyfree pages are isolated from file LRU from the beginning, |
1535 | * they will rotate back to anonymous LRU in the end if it failed to |
1536 | * discard so isolated count will be mismatched. |
1537 | * Compensate the isolated count for both LRU lists. |
1538 | */ |
1539 | mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, |
1540 | stat.nr_lazyfree_fail); |
1541 | mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, |
1542 | -(long)stat.nr_lazyfree_fail); |
1543 | return nr_reclaimed; |
1544 | } |
1545 | |
1546 | /* |
1547 | * Update LRU sizes after isolating pages. The LRU size updates must |
1548 | * be complete before mem_cgroup_update_lru_size due to a sanity check. |
1549 | */ |
1550 | static __always_inline void update_lru_sizes(struct lruvec *lruvec, |
1551 | enum lru_list lru, unsigned long *nr_zone_taken) |
1552 | { |
1553 | int zid; |
1554 | |
1555 | for (zid = 0; zid < MAX_NR_ZONES; zid++) { |
1556 | if (!nr_zone_taken[zid]) |
1557 | continue; |
1558 | |
1559 | update_lru_size(lruvec, lru, zid, nr_pages: -nr_zone_taken[zid]); |
1560 | } |
1561 | |
1562 | } |
1563 | |
1564 | #ifdef CONFIG_CMA |
1565 | /* |
1566 | * It is waste of effort to scan and reclaim CMA pages if it is not available |
1567 | * for current allocation context. Kswapd can not be enrolled as it can not |
1568 | * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL |
1569 | */ |
1570 | static bool skip_cma(struct folio *folio, struct scan_control *sc) |
1571 | { |
1572 | return !current_is_kswapd() && |
1573 | gfp_migratetype(gfp_flags: sc->gfp_mask) != MIGRATE_MOVABLE && |
1574 | folio_migratetype(folio) == MIGRATE_CMA; |
1575 | } |
1576 | #else |
1577 | static bool skip_cma(struct folio *folio, struct scan_control *sc) |
1578 | { |
1579 | return false; |
1580 | } |
1581 | #endif |
1582 | |
1583 | /* |
1584 | * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. |
1585 | * |
1586 | * lruvec->lru_lock is heavily contended. Some of the functions that |
1587 | * shrink the lists perform better by taking out a batch of pages |
1588 | * and working on them outside the LRU lock. |
1589 | * |
1590 | * For pagecache intensive workloads, this function is the hottest |
1591 | * spot in the kernel (apart from copy_*_user functions). |
1592 | * |
1593 | * Lru_lock must be held before calling this function. |
1594 | * |
1595 | * @nr_to_scan: The number of eligible pages to look through on the list. |
1596 | * @lruvec: The LRU vector to pull pages from. |
1597 | * @dst: The temp list to put pages on to. |
1598 | * @nr_scanned: The number of pages that were scanned. |
1599 | * @sc: The scan_control struct for this reclaim session |
1600 | * @lru: LRU list id for isolating |
1601 | * |
1602 | * returns how many pages were moved onto *@dst. |
1603 | */ |
1604 | static unsigned long isolate_lru_folios(unsigned long nr_to_scan, |
1605 | struct lruvec *lruvec, struct list_head *dst, |
1606 | unsigned long *nr_scanned, struct scan_control *sc, |
1607 | enum lru_list lru) |
1608 | { |
1609 | struct list_head *src = &lruvec->lists[lru]; |
1610 | unsigned long nr_taken = 0; |
1611 | unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 }; |
1612 | unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; |
1613 | unsigned long skipped = 0; |
1614 | unsigned long scan, total_scan, nr_pages; |
1615 | LIST_HEAD(folios_skipped); |
1616 | |
1617 | total_scan = 0; |
1618 | scan = 0; |
1619 | while (scan < nr_to_scan && !list_empty(head: src)) { |
1620 | struct list_head *move_to = src; |
1621 | struct folio *folio; |
1622 | |
1623 | folio = lru_to_folio(head: src); |
1624 | prefetchw_prev_lru_folio(folio, src, flags); |
1625 | |
1626 | nr_pages = folio_nr_pages(folio); |
1627 | total_scan += nr_pages; |
1628 | |
1629 | if (folio_zonenum(folio) > sc->reclaim_idx || |
1630 | skip_cma(folio, sc)) { |
1631 | nr_skipped[folio_zonenum(folio)] += nr_pages; |
1632 | move_to = &folios_skipped; |
1633 | goto move; |
1634 | } |
1635 | |
1636 | /* |
1637 | * Do not count skipped folios because that makes the function |
1638 | * return with no isolated folios if the LRU mostly contains |
1639 | * ineligible folios. This causes the VM to not reclaim any |
1640 | * folios, triggering a premature OOM. |
1641 | * Account all pages in a folio. |
1642 | */ |
1643 | scan += nr_pages; |
1644 | |
1645 | if (!folio_test_lru(folio)) |
1646 | goto move; |
1647 | if (!sc->may_unmap && folio_mapped(folio)) |
1648 | goto move; |
1649 | |
1650 | /* |
1651 | * Be careful not to clear the lru flag until after we're |
1652 | * sure the folio is not being freed elsewhere -- the |
1653 | * folio release code relies on it. |
1654 | */ |
1655 | if (unlikely(!folio_try_get(folio))) |
1656 | goto move; |
1657 | |
1658 | if (!folio_test_clear_lru(folio)) { |
1659 | /* Another thread is already isolating this folio */ |
1660 | folio_put(folio); |
1661 | goto move; |
1662 | } |
1663 | |
1664 | nr_taken += nr_pages; |
1665 | nr_zone_taken[folio_zonenum(folio)] += nr_pages; |
1666 | move_to = dst; |
1667 | move: |
1668 | list_move(list: &folio->lru, head: move_to); |
1669 | } |
1670 | |
1671 | /* |
1672 | * Splice any skipped folios to the start of the LRU list. Note that |
1673 | * this disrupts the LRU order when reclaiming for lower zones but |
1674 | * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX |
1675 | * scanning would soon rescan the same folios to skip and waste lots |
1676 | * of cpu cycles. |
1677 | */ |
1678 | if (!list_empty(head: &folios_skipped)) { |
1679 | int zid; |
1680 | |
1681 | list_splice(list: &folios_skipped, head: src); |
1682 | for (zid = 0; zid < MAX_NR_ZONES; zid++) { |
1683 | if (!nr_skipped[zid]) |
1684 | continue; |
1685 | |
1686 | __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]); |
1687 | skipped += nr_skipped[zid]; |
1688 | } |
1689 | } |
1690 | *nr_scanned = total_scan; |
1691 | trace_mm_vmscan_lru_isolate(highest_zoneidx: sc->reclaim_idx, order: sc->order, nr_requested: nr_to_scan, |
1692 | nr_scanned: total_scan, nr_skipped: skipped, nr_taken, lru); |
1693 | update_lru_sizes(lruvec, lru, nr_zone_taken); |
1694 | return nr_taken; |
1695 | } |
1696 | |
1697 | /** |
1698 | * folio_isolate_lru() - Try to isolate a folio from its LRU list. |
1699 | * @folio: Folio to isolate from its LRU list. |
1700 | * |
1701 | * Isolate a @folio from an LRU list and adjust the vmstat statistic |
1702 | * corresponding to whatever LRU list the folio was on. |
1703 | * |
1704 | * The folio will have its LRU flag cleared. If it was found on the |
1705 | * active list, it will have the Active flag set. If it was found on the |
1706 | * unevictable list, it will have the Unevictable flag set. These flags |
1707 | * may need to be cleared by the caller before letting the page go. |
1708 | * |
1709 | * Context: |
1710 | * |
1711 | * (1) Must be called with an elevated refcount on the folio. This is a |
1712 | * fundamental difference from isolate_lru_folios() (which is called |
1713 | * without a stable reference). |
1714 | * (2) The lru_lock must not be held. |
1715 | * (3) Interrupts must be enabled. |
1716 | * |
1717 | * Return: true if the folio was removed from an LRU list. |
1718 | * false if the folio was not on an LRU list. |
1719 | */ |
1720 | bool folio_isolate_lru(struct folio *folio) |
1721 | { |
1722 | bool ret = false; |
1723 | |
1724 | VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio); |
1725 | |
1726 | if (folio_test_clear_lru(folio)) { |
1727 | struct lruvec *lruvec; |
1728 | |
1729 | folio_get(folio); |
1730 | lruvec = folio_lruvec_lock_irq(folio); |
1731 | lruvec_del_folio(lruvec, folio); |
1732 | unlock_page_lruvec_irq(lruvec); |
1733 | ret = true; |
1734 | } |
1735 | |
1736 | return ret; |
1737 | } |
1738 | |
1739 | /* |
1740 | * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and |
1741 | * then get rescheduled. When there are massive number of tasks doing page |
1742 | * allocation, such sleeping direct reclaimers may keep piling up on each CPU, |
1743 | * the LRU list will go small and be scanned faster than necessary, leading to |
1744 | * unnecessary swapping, thrashing and OOM. |
1745 | */ |
1746 | static int too_many_isolated(struct pglist_data *pgdat, int file, |
1747 | struct scan_control *sc) |
1748 | { |
1749 | unsigned long inactive, isolated; |
1750 | bool too_many; |
1751 | |
1752 | if (current_is_kswapd()) |
1753 | return 0; |
1754 | |
1755 | if (!writeback_throttling_sane(sc)) |
1756 | return 0; |
1757 | |
1758 | if (file) { |
1759 | inactive = node_page_state(pgdat, item: NR_INACTIVE_FILE); |
1760 | isolated = node_page_state(pgdat, item: NR_ISOLATED_FILE); |
1761 | } else { |
1762 | inactive = node_page_state(pgdat, item: NR_INACTIVE_ANON); |
1763 | isolated = node_page_state(pgdat, item: NR_ISOLATED_ANON); |
1764 | } |
1765 | |
1766 | /* |
1767 | * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they |
1768 | * won't get blocked by normal direct-reclaimers, forming a circular |
1769 | * deadlock. |
1770 | */ |
1771 | if (gfp_has_io_fs(gfp: sc->gfp_mask)) |
1772 | inactive >>= 3; |
1773 | |
1774 | too_many = isolated > inactive; |
1775 | |
1776 | /* Wake up tasks throttled due to too_many_isolated. */ |
1777 | if (!too_many) |
1778 | wake_throttle_isolated(pgdat); |
1779 | |
1780 | return too_many; |
1781 | } |
1782 | |
1783 | /* |
1784 | * move_folios_to_lru() moves folios from private @list to appropriate LRU list. |
1785 | * On return, @list is reused as a list of folios to be freed by the caller. |
1786 | * |
1787 | * Returns the number of pages moved to the given lruvec. |
1788 | */ |
1789 | static unsigned int move_folios_to_lru(struct lruvec *lruvec, |
1790 | struct list_head *list) |
1791 | { |
1792 | int nr_pages, nr_moved = 0; |
1793 | LIST_HEAD(folios_to_free); |
1794 | |
1795 | while (!list_empty(head: list)) { |
1796 | struct folio *folio = lru_to_folio(head: list); |
1797 | |
1798 | VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); |
1799 | list_del(entry: &folio->lru); |
1800 | if (unlikely(!folio_evictable(folio))) { |
1801 | spin_unlock_irq(lock: &lruvec->lru_lock); |
1802 | folio_putback_lru(folio); |
1803 | spin_lock_irq(lock: &lruvec->lru_lock); |
1804 | continue; |
1805 | } |
1806 | |
1807 | /* |
1808 | * The folio_set_lru needs to be kept here for list integrity. |
1809 | * Otherwise: |
1810 | * #0 move_folios_to_lru #1 release_pages |
1811 | * if (!folio_put_testzero()) |
1812 | * if (folio_put_testzero()) |
1813 | * !lru //skip lru_lock |
1814 | * folio_set_lru() |
1815 | * list_add(&folio->lru,) |
1816 | * list_add(&folio->lru,) |
1817 | */ |
1818 | folio_set_lru(folio); |
1819 | |
1820 | if (unlikely(folio_put_testzero(folio))) { |
1821 | __folio_clear_lru_flags(folio); |
1822 | |
1823 | if (unlikely(folio_test_large(folio))) { |
1824 | spin_unlock_irq(lock: &lruvec->lru_lock); |
1825 | destroy_large_folio(folio); |
1826 | spin_lock_irq(lock: &lruvec->lru_lock); |
1827 | } else |
1828 | list_add(new: &folio->lru, head: &folios_to_free); |
1829 | |
1830 | continue; |
1831 | } |
1832 | |
1833 | /* |
1834 | * All pages were isolated from the same lruvec (and isolation |
1835 | * inhibits memcg migration). |
1836 | */ |
1837 | VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); |
1838 | lruvec_add_folio(lruvec, folio); |
1839 | nr_pages = folio_nr_pages(folio); |
1840 | nr_moved += nr_pages; |
1841 | if (folio_test_active(folio)) |
1842 | workingset_age_nonresident(lruvec, nr_pages); |
1843 | } |
1844 | |
1845 | /* |
1846 | * To save our caller's stack, now use input list for pages to free. |
1847 | */ |
1848 | list_splice(list: &folios_to_free, head: list); |
1849 | |
1850 | return nr_moved; |
1851 | } |
1852 | |
1853 | /* |
1854 | * If a kernel thread (such as nfsd for loop-back mounts) services a backing |
1855 | * device by writing to the page cache it sets PF_LOCAL_THROTTLE. In this case |
1856 | * we should not throttle. Otherwise it is safe to do so. |
1857 | */ |
1858 | static int current_may_throttle(void) |
1859 | { |
1860 | return !(current->flags & PF_LOCAL_THROTTLE); |
1861 | } |
1862 | |
1863 | /* |
1864 | * shrink_inactive_list() is a helper for shrink_node(). It returns the number |
1865 | * of reclaimed pages |
1866 | */ |
1867 | static unsigned long shrink_inactive_list(unsigned long nr_to_scan, |
1868 | struct lruvec *lruvec, struct scan_control *sc, |
1869 | enum lru_list lru) |
1870 | { |
1871 | LIST_HEAD(folio_list); |
1872 | unsigned long nr_scanned; |
1873 | unsigned int nr_reclaimed = 0; |
1874 | unsigned long nr_taken; |
1875 | struct reclaim_stat stat; |
1876 | bool file = is_file_lru(lru); |
1877 | enum vm_event_item item; |
1878 | struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
1879 | bool stalled = false; |
1880 | |
1881 | while (unlikely(too_many_isolated(pgdat, file, sc))) { |
1882 | if (stalled) |
1883 | return 0; |
1884 | |
1885 | /* wait a bit for the reclaimer. */ |
1886 | stalled = true; |
1887 | reclaim_throttle(pgdat, reason: VMSCAN_THROTTLE_ISOLATED); |
1888 | |
1889 | /* We are about to die and free our memory. Return now. */ |
1890 | if (fatal_signal_pending(current)) |
1891 | return SWAP_CLUSTER_MAX; |
1892 | } |
1893 | |
1894 | lru_add_drain(); |
1895 | |
1896 | spin_lock_irq(lock: &lruvec->lru_lock); |
1897 | |
1898 | nr_taken = isolate_lru_folios(nr_to_scan, lruvec, dst: &folio_list, |
1899 | nr_scanned: &nr_scanned, sc, lru); |
1900 | |
1901 | __mod_node_page_state(pgdat, item: NR_ISOLATED_ANON + file, nr_taken); |
1902 | item = PGSCAN_KSWAPD + reclaimer_offset(); |
1903 | if (!cgroup_reclaim(sc)) |
1904 | __count_vm_events(item, delta: nr_scanned); |
1905 | __count_memcg_events(memcg: lruvec_memcg(lruvec), idx: item, count: nr_scanned); |
1906 | __count_vm_events(item: PGSCAN_ANON + file, delta: nr_scanned); |
1907 | |
1908 | spin_unlock_irq(lock: &lruvec->lru_lock); |
1909 | |
1910 | if (nr_taken == 0) |
1911 | return 0; |
1912 | |
1913 | nr_reclaimed = shrink_folio_list(folio_list: &folio_list, pgdat, sc, stat: &stat, ignore_references: false); |
1914 | |
1915 | spin_lock_irq(lock: &lruvec->lru_lock); |
1916 | move_folios_to_lru(lruvec, list: &folio_list); |
1917 | |
1918 | __mod_node_page_state(pgdat, item: NR_ISOLATED_ANON + file, -nr_taken); |
1919 | item = PGSTEAL_KSWAPD + reclaimer_offset(); |
1920 | if (!cgroup_reclaim(sc)) |
1921 | __count_vm_events(item, delta: nr_reclaimed); |
1922 | __count_memcg_events(memcg: lruvec_memcg(lruvec), idx: item, count: nr_reclaimed); |
1923 | __count_vm_events(item: PGSTEAL_ANON + file, delta: nr_reclaimed); |
1924 | spin_unlock_irq(lock: &lruvec->lru_lock); |
1925 | |
1926 | lru_note_cost(lruvec, file, nr_io: stat.nr_pageout, nr_rotated: nr_scanned - nr_reclaimed); |
1927 | mem_cgroup_uncharge_list(page_list: &folio_list); |
1928 | free_unref_page_list(list: &folio_list); |
1929 | |
1930 | /* |
1931 | * If dirty folios are scanned that are not queued for IO, it |
1932 | * implies that flushers are not doing their job. This can |
1933 | * happen when memory pressure pushes dirty folios to the end of |
1934 | * the LRU before the dirty limits are breached and the dirty |
1935 | * data has expired. It can also happen when the proportion of |
1936 | * dirty folios grows not through writes but through memory |
1937 | * pressure reclaiming all the clean cache. And in some cases, |
1938 | * the flushers simply cannot keep up with the allocation |
1939 | * rate. Nudge the flusher threads in case they are asleep. |
1940 | */ |
1941 | if (stat.nr_unqueued_dirty == nr_taken) { |
1942 | wakeup_flusher_threads(reason: WB_REASON_VMSCAN); |
1943 | /* |
1944 | * For cgroupv1 dirty throttling is achieved by waking up |
1945 | * the kernel flusher here and later waiting on folios |
1946 | * which are in writeback to finish (see shrink_folio_list()). |
1947 | * |
1948 | * Flusher may not be able to issue writeback quickly |
1949 | * enough for cgroupv1 writeback throttling to work |
1950 | * on a large system. |
1951 | */ |
1952 | if (!writeback_throttling_sane(sc)) |
1953 | reclaim_throttle(pgdat, reason: VMSCAN_THROTTLE_WRITEBACK); |
1954 | } |
1955 | |
1956 | sc->nr.dirty += stat.nr_dirty; |
1957 | sc->nr.congested += stat.nr_congested; |
1958 | sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; |
1959 | sc->nr.writeback += stat.nr_writeback; |
1960 | sc->nr.immediate += stat.nr_immediate; |
1961 | sc->nr.taken += nr_taken; |
1962 | if (file) |
1963 | sc->nr.file_taken += nr_taken; |
1964 | |
1965 | trace_mm_vmscan_lru_shrink_inactive(nid: pgdat->node_id, |
1966 | nr_scanned, nr_reclaimed, stat: &stat, priority: sc->priority, file); |
1967 | return nr_reclaimed; |
1968 | } |
1969 | |
1970 | /* |
1971 | * shrink_active_list() moves folios from the active LRU to the inactive LRU. |
1972 | * |
1973 | * We move them the other way if the folio is referenced by one or more |
1974 | * processes. |
1975 | * |
1976 | * If the folios are mostly unmapped, the processing is fast and it is |
1977 | * appropriate to hold lru_lock across the whole operation. But if |
1978 | * the folios are mapped, the processing is slow (folio_referenced()), so |
1979 | * we should drop lru_lock around each folio. It's impossible to balance |
1980 | * this, so instead we remove the folios from the LRU while processing them. |
1981 | * It is safe to rely on the active flag against the non-LRU folios in here |
1982 | * because nobody will play with that bit on a non-LRU folio. |
1983 | * |
1984 | * The downside is that we have to touch folio->_refcount against each folio. |
1985 | * But we had to alter folio->flags anyway. |
1986 | */ |
1987 | static void shrink_active_list(unsigned long nr_to_scan, |
1988 | struct lruvec *lruvec, |
1989 | struct scan_control *sc, |
1990 | enum lru_list lru) |
1991 | { |
1992 | unsigned long nr_taken; |
1993 | unsigned long nr_scanned; |
1994 | unsigned long vm_flags; |
1995 | LIST_HEAD(l_hold); /* The folios which were snipped off */ |
1996 | LIST_HEAD(l_active); |
1997 | LIST_HEAD(l_inactive); |
1998 | unsigned nr_deactivate, nr_activate; |
1999 | unsigned nr_rotated = 0; |
2000 | int file = is_file_lru(lru); |
2001 | struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
2002 | |
2003 | lru_add_drain(); |
2004 | |
2005 | spin_lock_irq(lock: &lruvec->lru_lock); |
2006 | |
2007 | nr_taken = isolate_lru_folios(nr_to_scan, lruvec, dst: &l_hold, |
2008 | nr_scanned: &nr_scanned, sc, lru); |
2009 | |
2010 | __mod_node_page_state(pgdat, item: NR_ISOLATED_ANON + file, nr_taken); |
2011 | |
2012 | if (!cgroup_reclaim(sc)) |
2013 | __count_vm_events(item: PGREFILL, delta: nr_scanned); |
2014 | __count_memcg_events(memcg: lruvec_memcg(lruvec), idx: PGREFILL, count: nr_scanned); |
2015 | |
2016 | spin_unlock_irq(lock: &lruvec->lru_lock); |
2017 | |
2018 | while (!list_empty(head: &l_hold)) { |
2019 | struct folio *folio; |
2020 | |
2021 | cond_resched(); |
2022 | folio = lru_to_folio(head: &l_hold); |
2023 | list_del(entry: &folio->lru); |
2024 | |
2025 | if (unlikely(!folio_evictable(folio))) { |
2026 | folio_putback_lru(folio); |
2027 | continue; |
2028 | } |
2029 | |
2030 | if (unlikely(buffer_heads_over_limit)) { |
2031 | if (folio_needs_release(folio) && |
2032 | folio_trylock(folio)) { |
2033 | filemap_release_folio(folio, gfp: 0); |
2034 | folio_unlock(folio); |
2035 | } |
2036 | } |
2037 | |
2038 | /* Referenced or rmap lock contention: rotate */ |
2039 | if (folio_referenced(folio, is_locked: 0, memcg: sc->target_mem_cgroup, |
2040 | vm_flags: &vm_flags) != 0) { |
2041 | /* |
2042 | * Identify referenced, file-backed active folios and |
2043 | * give them one more trip around the active list. So |
2044 | * that executable code get better chances to stay in |
2045 | * memory under moderate memory pressure. Anon folios |
2046 | * are not likely to be evicted by use-once streaming |
2047 | * IO, plus JVM can create lots of anon VM_EXEC folios, |
2048 | * so we ignore them here. |
2049 | */ |
2050 | if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) { |
2051 | nr_rotated += folio_nr_pages(folio); |
2052 | list_add(new: &folio->lru, head: &l_active); |
2053 | continue; |
2054 | } |
2055 | } |
2056 | |
2057 | folio_clear_active(folio); /* we are de-activating */ |
2058 | folio_set_workingset(folio); |
2059 | list_add(new: &folio->lru, head: &l_inactive); |
2060 | } |
2061 | |
2062 | /* |
2063 | * Move folios back to the lru list. |
2064 | */ |
2065 | spin_lock_irq(lock: &lruvec->lru_lock); |
2066 | |
2067 | nr_activate = move_folios_to_lru(lruvec, list: &l_active); |
2068 | nr_deactivate = move_folios_to_lru(lruvec, list: &l_inactive); |
2069 | /* Keep all free folios in l_active list */ |
2070 | list_splice(list: &l_inactive, head: &l_active); |
2071 | |
2072 | __count_vm_events(item: PGDEACTIVATE, delta: nr_deactivate); |
2073 | __count_memcg_events(memcg: lruvec_memcg(lruvec), idx: PGDEACTIVATE, count: nr_deactivate); |
2074 | |
2075 | __mod_node_page_state(pgdat, item: NR_ISOLATED_ANON + file, -nr_taken); |
2076 | spin_unlock_irq(lock: &lruvec->lru_lock); |
2077 | |
2078 | if (nr_rotated) |
2079 | lru_note_cost(lruvec, file, nr_io: 0, nr_rotated); |
2080 | mem_cgroup_uncharge_list(page_list: &l_active); |
2081 | free_unref_page_list(list: &l_active); |
2082 | trace_mm_vmscan_lru_shrink_active(nid: pgdat->node_id, nr_taken, nr_active: nr_activate, |
2083 | nr_deactivated: nr_deactivate, nr_referenced: nr_rotated, priority: sc->priority, file); |
2084 | } |
2085 | |
2086 | static unsigned int reclaim_folio_list(struct list_head *folio_list, |
2087 | struct pglist_data *pgdat) |
2088 | { |
2089 | struct reclaim_stat dummy_stat; |
2090 | unsigned int nr_reclaimed; |
2091 | struct folio *folio; |
2092 | struct scan_control sc = { |
2093 | .gfp_mask = GFP_KERNEL, |
2094 | .may_writepage = 1, |
2095 | .may_unmap = 1, |
2096 | .may_swap = 1, |
2097 | .no_demotion = 1, |
2098 | }; |
2099 | |
2100 | nr_reclaimed = shrink_folio_list(folio_list, pgdat, sc: &sc, stat: &dummy_stat, ignore_references: false); |
2101 | while (!list_empty(head: folio_list)) { |
2102 | folio = lru_to_folio(head: folio_list); |
2103 | list_del(entry: &folio->lru); |
2104 | folio_putback_lru(folio); |
2105 | } |
2106 | |
2107 | return nr_reclaimed; |
2108 | } |
2109 | |
2110 | unsigned long reclaim_pages(struct list_head *folio_list) |
2111 | { |
2112 | int nid; |
2113 | unsigned int nr_reclaimed = 0; |
2114 | LIST_HEAD(node_folio_list); |
2115 | unsigned int noreclaim_flag; |
2116 | |
2117 | if (list_empty(head: folio_list)) |
2118 | return nr_reclaimed; |
2119 | |
2120 | noreclaim_flag = memalloc_noreclaim_save(); |
2121 | |
2122 | nid = folio_nid(folio: lru_to_folio(head: folio_list)); |
2123 | do { |
2124 | struct folio *folio = lru_to_folio(head: folio_list); |
2125 | |
2126 | if (nid == folio_nid(folio)) { |
2127 | folio_clear_active(folio); |
2128 | list_move(list: &folio->lru, head: &node_folio_list); |
2129 | continue; |
2130 | } |
2131 | |
2132 | nr_reclaimed += reclaim_folio_list(folio_list: &node_folio_list, NODE_DATA(nid)); |
2133 | nid = folio_nid(folio: lru_to_folio(head: folio_list)); |
2134 | } while (!list_empty(head: folio_list)); |
2135 | |
2136 | nr_reclaimed += reclaim_folio_list(folio_list: &node_folio_list, NODE_DATA(nid)); |
2137 | |
2138 | memalloc_noreclaim_restore(flags: noreclaim_flag); |
2139 | |
2140 | return nr_reclaimed; |
2141 | } |
2142 | |
2143 | static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, |
2144 | struct lruvec *lruvec, struct scan_control *sc) |
2145 | { |
2146 | if (is_active_lru(lru)) { |
2147 | if (sc->may_deactivate & (1 << is_file_lru(lru))) |
2148 | shrink_active_list(nr_to_scan, lruvec, sc, lru); |
2149 | else |
2150 | sc->skipped_deactivate = 1; |
2151 | return 0; |
2152 | } |
2153 | |
2154 | return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); |
2155 | } |
2156 | |
2157 | /* |
2158 | * The inactive anon list should be small enough that the VM never has |
2159 | * to do too much work. |
2160 | * |
2161 | * The inactive file list should be small enough to leave most memory |
2162 | * to the established workingset on the scan-resistant active list, |
2163 | * but large enough to avoid thrashing the aggregate readahead window. |
2164 | * |
2165 | * Both inactive lists should also be large enough that each inactive |
2166 | * folio has a chance to be referenced again before it is reclaimed. |
2167 | * |
2168 | * If that fails and refaulting is observed, the inactive list grows. |
2169 | * |
2170 | * The inactive_ratio is the target ratio of ACTIVE to INACTIVE folios |
2171 | * on this LRU, maintained by the pageout code. An inactive_ratio |
2172 | * of 3 means 3:1 or 25% of the folios are kept on the inactive list. |
2173 | * |
2174 | * total target max |
2175 | * memory ratio inactive |
2176 | * ------------------------------------- |
2177 | * 10MB 1 5MB |
2178 | * 100MB 1 50MB |
2179 | * 1GB 3 250MB |
2180 | * 10GB 10 0.9GB |
2181 | * 100GB 31 3GB |
2182 | * 1TB 101 10GB |
2183 | * 10TB 320 32GB |
2184 | */ |
2185 | static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) |
2186 | { |
2187 | enum lru_list active_lru = inactive_lru + LRU_ACTIVE; |
2188 | unsigned long inactive, active; |
2189 | unsigned long inactive_ratio; |
2190 | unsigned long gb; |
2191 | |
2192 | inactive = lruvec_page_state(lruvec, idx: NR_LRU_BASE + inactive_lru); |
2193 | active = lruvec_page_state(lruvec, idx: NR_LRU_BASE + active_lru); |
2194 | |
2195 | gb = (inactive + active) >> (30 - PAGE_SHIFT); |
2196 | if (gb) |
2197 | inactive_ratio = int_sqrt(10 * gb); |
2198 | else |
2199 | inactive_ratio = 1; |
2200 | |
2201 | return inactive * inactive_ratio < active; |
2202 | } |
2203 | |
2204 | enum scan_balance { |
2205 | SCAN_EQUAL, |
2206 | SCAN_FRACT, |
2207 | SCAN_ANON, |
2208 | SCAN_FILE, |
2209 | }; |
2210 | |
2211 | static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) |
2212 | { |
2213 | unsigned long file; |
2214 | struct lruvec *target_lruvec; |
2215 | |
2216 | if (lru_gen_enabled()) |
2217 | return; |
2218 | |
2219 | target_lruvec = mem_cgroup_lruvec(memcg: sc->target_mem_cgroup, pgdat); |
2220 | |
2221 | /* |
2222 | * Flush the memory cgroup stats, so that we read accurate per-memcg |
2223 | * lruvec stats for heuristics. |
2224 | */ |
2225 | mem_cgroup_flush_stats(); |
2226 | |
2227 | /* |
2228 | * Determine the scan balance between anon and file LRUs. |
2229 | */ |
2230 | spin_lock_irq(lock: &target_lruvec->lru_lock); |
2231 | sc->anon_cost = target_lruvec->anon_cost; |
2232 | sc->file_cost = target_lruvec->file_cost; |
2233 | spin_unlock_irq(lock: &target_lruvec->lru_lock); |
2234 | |
2235 | /* |
2236 | * Target desirable inactive:active list ratios for the anon |
2237 | * and file LRU lists. |
2238 | */ |
2239 | if (!sc->force_deactivate) { |
2240 | unsigned long refaults; |
2241 | |
2242 | /* |
2243 | * When refaults are being observed, it means a new |
2244 | * workingset is being established. Deactivate to get |
2245 | * rid of any stale active pages quickly. |
2246 | */ |
2247 | refaults = lruvec_page_state(lruvec: target_lruvec, |
2248 | idx: WORKINGSET_ACTIVATE_ANON); |
2249 | if (refaults != target_lruvec->refaults[WORKINGSET_ANON] || |
2250 | inactive_is_low(lruvec: target_lruvec, inactive_lru: LRU_INACTIVE_ANON)) |
2251 | sc->may_deactivate |= DEACTIVATE_ANON; |
2252 | else |
2253 | sc->may_deactivate &= ~DEACTIVATE_ANON; |
2254 | |
2255 | refaults = lruvec_page_state(lruvec: target_lruvec, |
2256 | idx: WORKINGSET_ACTIVATE_FILE); |
2257 | if (refaults != target_lruvec->refaults[WORKINGSET_FILE] || |
2258 | inactive_is_low(lruvec: target_lruvec, inactive_lru: LRU_INACTIVE_FILE)) |
2259 | sc->may_deactivate |= DEACTIVATE_FILE; |
2260 | else |
2261 | sc->may_deactivate &= ~DEACTIVATE_FILE; |
2262 | } else |
2263 | sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; |
2264 | |
2265 | /* |
2266 | * If we have plenty of inactive file pages that aren't |
2267 | * thrashing, try to reclaim those first before touching |
2268 | * anonymous pages. |
2269 | */ |
2270 | file = lruvec_page_state(lruvec: target_lruvec, idx: NR_INACTIVE_FILE); |
2271 | if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE)) |
2272 | sc->cache_trim_mode = 1; |
2273 | else |
2274 | sc->cache_trim_mode = 0; |
2275 | |
2276 | /* |
2277 | * Prevent the reclaimer from falling into the cache trap: as |
2278 | * cache pages start out inactive, every cache fault will tip |
2279 | * the scan balance towards the file LRU. And as the file LRU |
2280 | * shrinks, so does the window for rotation from references. |
2281 | * This means we have a runaway feedback loop where a tiny |
2282 | * thrashing file LRU becomes infinitely more attractive than |
2283 | * anon pages. Try to detect this based on file LRU size. |
2284 | */ |
2285 | if (!cgroup_reclaim(sc)) { |
2286 | unsigned long total_high_wmark = 0; |
2287 | unsigned long free, anon; |
2288 | int z; |
2289 | |
2290 | free = sum_zone_node_page_state(node: pgdat->node_id, item: NR_FREE_PAGES); |
2291 | file = node_page_state(pgdat, item: NR_ACTIVE_FILE) + |
2292 | node_page_state(pgdat, item: NR_INACTIVE_FILE); |
2293 | |
2294 | for (z = 0; z < MAX_NR_ZONES; z++) { |
2295 | struct zone *zone = &pgdat->node_zones[z]; |
2296 | |
2297 | if (!managed_zone(zone)) |
2298 | continue; |
2299 | |
2300 | total_high_wmark += high_wmark_pages(zone); |
2301 | } |
2302 | |
2303 | /* |
2304 | * Consider anon: if that's low too, this isn't a |
2305 | * runaway file reclaim problem, but rather just |
2306 | * extreme pressure. Reclaim as per usual then. |
2307 | */ |
2308 | anon = node_page_state(pgdat, item: NR_INACTIVE_ANON); |
2309 | |
2310 | sc->file_is_tiny = |
2311 | file + free <= total_high_wmark && |
2312 | !(sc->may_deactivate & DEACTIVATE_ANON) && |
2313 | anon >> sc->priority; |
2314 | } |
2315 | } |
2316 | |
2317 | /* |
2318 | * Determine how aggressively the anon and file LRU lists should be |
2319 | * scanned. |
2320 | * |
2321 | * nr[0] = anon inactive folios to scan; nr[1] = anon active folios to scan |
2322 | * nr[2] = file inactive folios to scan; nr[3] = file active folios to scan |
2323 | */ |
2324 | static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, |
2325 | unsigned long *nr) |
2326 | { |
2327 | struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
2328 | struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
2329 | unsigned long anon_cost, file_cost, total_cost; |
2330 | int swappiness = mem_cgroup_swappiness(memcg); |
2331 | u64 fraction[ANON_AND_FILE]; |
2332 | u64 denominator = 0; /* gcc */ |
2333 | enum scan_balance scan_balance; |
2334 | unsigned long ap, fp; |
2335 | enum lru_list lru; |
2336 | |
2337 | /* If we have no swap space, do not bother scanning anon folios. */ |
2338 | if (!sc->may_swap || !can_reclaim_anon_pages(memcg, nid: pgdat->node_id, sc)) { |
2339 | scan_balance = SCAN_FILE; |
2340 | goto out; |
2341 | } |
2342 | |
2343 | /* |
2344 | * Global reclaim will swap to prevent OOM even with no |
2345 | * swappiness, but memcg users want to use this knob to |
2346 | * disable swapping for individual groups completely when |
2347 | * using the memory controller's swap limit feature would be |
2348 | * too expensive. |
2349 | */ |
2350 | if (cgroup_reclaim(sc) && !swappiness) { |
2351 | scan_balance = SCAN_FILE; |
2352 | goto out; |
2353 | } |
2354 | |
2355 | /* |
2356 | * Do not apply any pressure balancing cleverness when the |
2357 | * system is close to OOM, scan both anon and file equally |
2358 | * (unless the swappiness setting disagrees with swapping). |
2359 | */ |
2360 | if (!sc->priority && swappiness) { |
2361 | scan_balance = SCAN_EQUAL; |
2362 | goto out; |
2363 | } |
2364 | |
2365 | /* |
2366 | * If the system is almost out of file pages, force-scan anon. |
2367 | */ |
2368 | if (sc->file_is_tiny) { |
2369 | scan_balance = SCAN_ANON; |
2370 | goto out; |
2371 | } |
2372 | |
2373 | /* |
2374 | * If there is enough inactive page cache, we do not reclaim |
2375 | * anything from the anonymous working right now. |
2376 | */ |
2377 | if (sc->cache_trim_mode) { |
2378 | scan_balance = SCAN_FILE; |
2379 | goto out; |
2380 | } |
2381 | |
2382 | scan_balance = SCAN_FRACT; |
2383 | /* |
2384 | * Calculate the pressure balance between anon and file pages. |
2385 | * |
2386 | * The amount of pressure we put on each LRU is inversely |
2387 | * proportional to the cost of reclaiming each list, as |
2388 | * determined by the share of pages that are refaulting, times |
2389 | * the relative IO cost of bringing back a swapped out |
2390 | * anonymous page vs reloading a filesystem page (swappiness). |
2391 | * |
2392 | * Although we limit that influence to ensure no list gets |
2393 | * left behind completely: at least a third of the pressure is |
2394 | * applied, before swappiness. |
2395 | * |
2396 | * With swappiness at 100, anon and file have equal IO cost. |
2397 | */ |
2398 | total_cost = sc->anon_cost + sc->file_cost; |
2399 | anon_cost = total_cost + sc->anon_cost; |
2400 | file_cost = total_cost + sc->file_cost; |
2401 | total_cost = anon_cost + file_cost; |
2402 | |
2403 | ap = swappiness * (total_cost + 1); |
2404 | ap /= anon_cost + 1; |
2405 | |
2406 | fp = (200 - swappiness) * (total_cost + 1); |
2407 | fp /= file_cost + 1; |
2408 | |
2409 | fraction[0] = ap; |
2410 | fraction[1] = fp; |
2411 | denominator = ap + fp; |
2412 | out: |
2413 | for_each_evictable_lru(lru) { |
2414 | int file = is_file_lru(lru); |
2415 | unsigned long lruvec_size; |
2416 | unsigned long low, min; |
2417 | unsigned long scan; |
2418 | |
2419 | lruvec_size = lruvec_lru_size(lruvec, lru, zone_idx: sc->reclaim_idx); |
2420 | mem_cgroup_protection(root: sc->target_mem_cgroup, memcg, |
2421 | min: &min, low: &low); |
2422 | |
2423 | if (min || low) { |
2424 | /* |
2425 | * Scale a cgroup's reclaim pressure by proportioning |
2426 | * its current usage to its memory.low or memory.min |
2427 | * setting. |
2428 | * |
2429 | * This is important, as otherwise scanning aggression |
2430 | * becomes extremely binary -- from nothing as we |
2431 | * approach the memory protection threshold, to totally |
2432 | * nominal as we exceed it. This results in requiring |
2433 | * setting extremely liberal protection thresholds. It |
2434 | * also means we simply get no protection at all if we |
2435 | * set it too low, which is not ideal. |
2436 | * |
2437 | * If there is any protection in place, we reduce scan |
2438 | * pressure by how much of the total memory used is |
2439 | * within protection thresholds. |
2440 | * |
2441 | * There is one special case: in the first reclaim pass, |
2442 | * we skip over all groups that are within their low |
2443 | * protection. If that fails to reclaim enough pages to |
2444 | * satisfy the reclaim goal, we come back and override |
2445 | * the best-effort low protection. However, we still |
2446 | * ideally want to honor how well-behaved groups are in |
2447 | * that case instead of simply punishing them all |
2448 | * equally. As such, we reclaim them based on how much |
2449 | * memory they are using, reducing the scan pressure |
2450 | * again by how much of the total memory used is under |
2451 | * hard protection. |
2452 | */ |
2453 | unsigned long cgroup_size = mem_cgroup_size(memcg); |
2454 | unsigned long protection; |
2455 | |
2456 | /* memory.low scaling, make sure we retry before OOM */ |
2457 | if (!sc->memcg_low_reclaim && low > min) { |
2458 | protection = low; |
2459 | sc->memcg_low_skipped = 1; |
2460 | } else { |
2461 | protection = min; |
2462 | } |
2463 | |
2464 | /* Avoid TOCTOU with earlier protection check */ |
2465 | cgroup_size = max(cgroup_size, protection); |
2466 | |
2467 | scan = lruvec_size - lruvec_size * protection / |
2468 | (cgroup_size + 1); |
2469 | |
2470 | /* |
2471 | * Minimally target SWAP_CLUSTER_MAX pages to keep |
2472 | * reclaim moving forwards, avoiding decrementing |
2473 | * sc->priority further than desirable. |
2474 | */ |
2475 | scan = max(scan, SWAP_CLUSTER_MAX); |
2476 | } else { |
2477 | scan = lruvec_size; |
2478 | } |
2479 | |
2480 | scan >>= sc->priority; |
2481 | |
2482 | /* |
2483 | * If the cgroup's already been deleted, make sure to |
2484 | * scrape out the remaining cache. |
2485 | */ |
2486 | if (!scan && !mem_cgroup_online(memcg)) |
2487 | scan = min(lruvec_size, SWAP_CLUSTER_MAX); |
2488 | |
2489 | switch (scan_balance) { |
2490 | case SCAN_EQUAL: |
2491 | /* Scan lists relative to size */ |
2492 | break; |
2493 | case SCAN_FRACT: |
2494 | /* |
2495 | * Scan types proportional to swappiness and |
2496 | * their relative recent reclaim efficiency. |
2497 | * Make sure we don't miss the last page on |
2498 | * the offlined memory cgroups because of a |
2499 | * round-off error. |
2500 | */ |
2501 | scan = mem_cgroup_online(memcg) ? |
2502 | div64_u64(dividend: scan * fraction[file], divisor: denominator) : |
2503 | DIV64_U64_ROUND_UP(scan * fraction[file], |
2504 | denominator); |
2505 | break; |
2506 | case SCAN_FILE: |
2507 | case SCAN_ANON: |
2508 | /* Scan one type exclusively */ |
2509 | if ((scan_balance == SCAN_FILE) != file) |
2510 | scan = 0; |
2511 | break; |
2512 | default: |
2513 | /* Look ma, no brain */ |
2514 | BUG(); |
2515 | } |
2516 | |
2517 | nr[lru] = scan; |
2518 | } |
2519 | } |
2520 | |
2521 | /* |
2522 | * Anonymous LRU management is a waste if there is |
2523 | * ultimately no way to reclaim the memory. |
2524 | */ |
2525 | static bool can_age_anon_pages(struct pglist_data *pgdat, |
2526 | struct scan_control *sc) |
2527 | { |
2528 | /* Aging the anon LRU is valuable if swap is present: */ |
2529 | if (total_swap_pages > 0) |
2530 | return true; |
2531 | |
2532 | /* Also valuable if anon pages can be demoted: */ |
2533 | return can_demote(nid: pgdat->node_id, sc); |
2534 | } |
2535 | |
2536 | #ifdef CONFIG_LRU_GEN |
2537 | |
2538 | #ifdef CONFIG_LRU_GEN_ENABLED |
2539 | DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS); |
2540 | #define get_cap(cap) static_branch_likely(&lru_gen_caps[cap]) |
2541 | #else |
2542 | DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS); |
2543 | #define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap]) |
2544 | #endif |
2545 | |
2546 | static bool should_walk_mmu(void) |
2547 | { |
2548 | return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK); |
2549 | } |
2550 | |
2551 | static bool should_clear_pmd_young(void) |
2552 | { |
2553 | return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG); |
2554 | } |
2555 | |
2556 | /****************************************************************************** |
2557 | * shorthand helpers |
2558 | ******************************************************************************/ |
2559 | |
2560 | #define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset)) |
2561 | |
2562 | #define DEFINE_MAX_SEQ(lruvec) \ |
2563 | unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq) |
2564 | |
2565 | #define DEFINE_MIN_SEQ(lruvec) \ |
2566 | unsigned long min_seq[ANON_AND_FILE] = { \ |
2567 | READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \ |
2568 | READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \ |
2569 | } |
2570 | |
2571 | #define for_each_gen_type_zone(gen, type, zone) \ |
2572 | for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \ |
2573 | for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ |
2574 | for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++) |
2575 | |
2576 | #define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS) |
2577 | #define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS) |
2578 | |
2579 | static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid) |
2580 | { |
2581 | struct pglist_data *pgdat = NODE_DATA(nid); |
2582 | |
2583 | #ifdef CONFIG_MEMCG |
2584 | if (memcg) { |
2585 | struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; |
2586 | |
2587 | /* see the comment in mem_cgroup_lruvec() */ |
2588 | if (!lruvec->pgdat) |
2589 | lruvec->pgdat = pgdat; |
2590 | |
2591 | return lruvec; |
2592 | } |
2593 | #endif |
2594 | VM_WARN_ON_ONCE(!mem_cgroup_disabled()); |
2595 | |
2596 | return &pgdat->__lruvec; |
2597 | } |
2598 | |
2599 | static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc) |
2600 | { |
2601 | struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
2602 | struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
2603 | |
2604 | if (!sc->may_swap) |
2605 | return 0; |
2606 | |
2607 | if (!can_demote(nid: pgdat->node_id, sc) && |
2608 | mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH) |
2609 | return 0; |
2610 | |
2611 | return mem_cgroup_swappiness(memcg); |
2612 | } |
2613 | |
2614 | static int get_nr_gens(struct lruvec *lruvec, int type) |
2615 | { |
2616 | return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; |
2617 | } |
2618 | |
2619 | static bool __maybe_unused seq_is_valid(struct lruvec *lruvec) |
2620 | { |
2621 | /* see the comment on lru_gen_folio */ |
2622 | return get_nr_gens(lruvec, type: LRU_GEN_FILE) >= MIN_NR_GENS && |
2623 | get_nr_gens(lruvec, type: LRU_GEN_FILE) <= get_nr_gens(lruvec, type: LRU_GEN_ANON) && |
2624 | get_nr_gens(lruvec, type: LRU_GEN_ANON) <= MAX_NR_GENS; |
2625 | } |
2626 | |
2627 | /****************************************************************************** |
2628 | * Bloom filters |
2629 | ******************************************************************************/ |
2630 | |
2631 | /* |
2632 | * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when |
2633 | * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of |
2634 | * bits in a bitmap, k is the number of hash functions and n is the number of |
2635 | * inserted items. |
2636 | * |
2637 | * Page table walkers use one of the two filters to reduce their search space. |
2638 | * To get rid of non-leaf entries that no longer have enough leaf entries, the |
2639 | * aging uses the double-buffering technique to flip to the other filter each |
2640 | * time it produces a new generation. For non-leaf entries that have enough |
2641 | * leaf entries, the aging carries them over to the next generation in |
2642 | * walk_pmd_range(); the eviction also report them when walking the rmap |
2643 | * in lru_gen_look_around(). |
2644 | * |
2645 | * For future optimizations: |
2646 | * 1. It's not necessary to keep both filters all the time. The spare one can be |
2647 | * freed after the RCU grace period and reallocated if needed again. |
2648 | * 2. And when reallocating, it's worth scaling its size according to the number |
2649 | * of inserted entries in the other filter, to reduce the memory overhead on |
2650 | * small systems and false positives on large systems. |
2651 | * 3. Jenkins' hash function is an alternative to Knuth's. |
2652 | */ |
2653 | #define BLOOM_FILTER_SHIFT 15 |
2654 | |
2655 | static inline int filter_gen_from_seq(unsigned long seq) |
2656 | { |
2657 | return seq % NR_BLOOM_FILTERS; |
2658 | } |
2659 | |
2660 | static void get_item_key(void *item, int *key) |
2661 | { |
2662 | u32 hash = hash_ptr(ptr: item, BLOOM_FILTER_SHIFT * 2); |
2663 | |
2664 | BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32)); |
2665 | |
2666 | key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); |
2667 | key[1] = hash >> BLOOM_FILTER_SHIFT; |
2668 | } |
2669 | |
2670 | static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item) |
2671 | { |
2672 | int key[2]; |
2673 | unsigned long *filter; |
2674 | int gen = filter_gen_from_seq(seq); |
2675 | |
2676 | filter = READ_ONCE(lruvec->mm_state.filters[gen]); |
2677 | if (!filter) |
2678 | return true; |
2679 | |
2680 | get_item_key(item, key); |
2681 | |
2682 | return test_bit(key[0], filter) && test_bit(key[1], filter); |
2683 | } |
2684 | |
2685 | static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item) |
2686 | { |
2687 | int key[2]; |
2688 | unsigned long *filter; |
2689 | int gen = filter_gen_from_seq(seq); |
2690 | |
2691 | filter = READ_ONCE(lruvec->mm_state.filters[gen]); |
2692 | if (!filter) |
2693 | return; |
2694 | |
2695 | get_item_key(item, key); |
2696 | |
2697 | if (!test_bit(key[0], filter)) |
2698 | set_bit(nr: key[0], addr: filter); |
2699 | if (!test_bit(key[1], filter)) |
2700 | set_bit(nr: key[1], addr: filter); |
2701 | } |
2702 | |
2703 | static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq) |
2704 | { |
2705 | unsigned long *filter; |
2706 | int gen = filter_gen_from_seq(seq); |
2707 | |
2708 | filter = lruvec->mm_state.filters[gen]; |
2709 | if (filter) { |
2710 | bitmap_clear(map: filter, start: 0, BIT(BLOOM_FILTER_SHIFT)); |
2711 | return; |
2712 | } |
2713 | |
2714 | filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT), |
2715 | __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); |
2716 | WRITE_ONCE(lruvec->mm_state.filters[gen], filter); |
2717 | } |
2718 | |
2719 | /****************************************************************************** |
2720 | * mm_struct list |
2721 | ******************************************************************************/ |
2722 | |
2723 | static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg) |
2724 | { |
2725 | static struct lru_gen_mm_list mm_list = { |
2726 | .fifo = LIST_HEAD_INIT(mm_list.fifo), |
2727 | .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock), |
2728 | }; |
2729 | |
2730 | #ifdef CONFIG_MEMCG |
2731 | if (memcg) |
2732 | return &memcg->mm_list; |
2733 | #endif |
2734 | VM_WARN_ON_ONCE(!mem_cgroup_disabled()); |
2735 | |
2736 | return &mm_list; |
2737 | } |
2738 | |
2739 | void lru_gen_add_mm(struct mm_struct *mm) |
2740 | { |
2741 | int nid; |
2742 | struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); |
2743 | struct lru_gen_mm_list *mm_list = get_mm_list(memcg); |
2744 | |
2745 | VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); |
2746 | #ifdef CONFIG_MEMCG |
2747 | VM_WARN_ON_ONCE(mm->lru_gen.memcg); |
2748 | mm->lru_gen.memcg = memcg; |
2749 | #endif |
2750 | spin_lock(lock: &mm_list->lock); |
2751 | |
2752 | for_each_node_state(nid, N_MEMORY) { |
2753 | struct lruvec *lruvec = get_lruvec(memcg, nid); |
2754 | |
2755 | /* the first addition since the last iteration */ |
2756 | if (lruvec->mm_state.tail == &mm_list->fifo) |
2757 | lruvec->mm_state.tail = &mm->lru_gen.list; |
2758 | } |
2759 | |
2760 | list_add_tail(new: &mm->lru_gen.list, head: &mm_list->fifo); |
2761 | |
2762 | spin_unlock(lock: &mm_list->lock); |
2763 | } |
2764 | |
2765 | void lru_gen_del_mm(struct mm_struct *mm) |
2766 | { |
2767 | int nid; |
2768 | struct lru_gen_mm_list *mm_list; |
2769 | struct mem_cgroup *memcg = NULL; |
2770 | |
2771 | if (list_empty(head: &mm->lru_gen.list)) |
2772 | return; |
2773 | |
2774 | #ifdef CONFIG_MEMCG |
2775 | memcg = mm->lru_gen.memcg; |
2776 | #endif |
2777 | mm_list = get_mm_list(memcg); |
2778 | |
2779 | spin_lock(lock: &mm_list->lock); |
2780 | |
2781 | for_each_node(nid) { |
2782 | struct lruvec *lruvec = get_lruvec(memcg, nid); |
2783 | |
2784 | /* where the current iteration continues after */ |
2785 | if (lruvec->mm_state.head == &mm->lru_gen.list) |
2786 | lruvec->mm_state.head = lruvec->mm_state.head->prev; |
2787 | |
2788 | /* where the last iteration ended before */ |
2789 | if (lruvec->mm_state.tail == &mm->lru_gen.list) |
2790 | lruvec->mm_state.tail = lruvec->mm_state.tail->next; |
2791 | } |
2792 | |
2793 | list_del_init(entry: &mm->lru_gen.list); |
2794 | |
2795 | spin_unlock(lock: &mm_list->lock); |
2796 | |
2797 | #ifdef CONFIG_MEMCG |
2798 | mem_cgroup_put(memcg: mm->lru_gen.memcg); |
2799 | mm->lru_gen.memcg = NULL; |
2800 | #endif |
2801 | } |
2802 | |
2803 | #ifdef CONFIG_MEMCG |
2804 | void lru_gen_migrate_mm(struct mm_struct *mm) |
2805 | { |
2806 | struct mem_cgroup *memcg; |
2807 | struct task_struct *task = rcu_dereference_protected(mm->owner, true); |
2808 | |
2809 | VM_WARN_ON_ONCE(task->mm != mm); |
2810 | lockdep_assert_held(&task->alloc_lock); |
2811 | |
2812 | /* for mm_update_next_owner() */ |
2813 | if (mem_cgroup_disabled()) |
2814 | return; |
2815 | |
2816 | /* migration can happen before addition */ |
2817 | if (!mm->lru_gen.memcg) |
2818 | return; |
2819 | |
2820 | rcu_read_lock(); |
2821 | memcg = mem_cgroup_from_task(p: task); |
2822 | rcu_read_unlock(); |
2823 | if (memcg == mm->lru_gen.memcg) |
2824 | return; |
2825 | |
2826 | VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); |
2827 | |
2828 | lru_gen_del_mm(mm); |
2829 | lru_gen_add_mm(mm); |
2830 | } |
2831 | #endif |
2832 | |
2833 | static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last) |
2834 | { |
2835 | int i; |
2836 | int hist; |
2837 | |
2838 | lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); |
2839 | |
2840 | if (walk) { |
2841 | hist = lru_hist_from_seq(seq: walk->max_seq); |
2842 | |
2843 | for (i = 0; i < NR_MM_STATS; i++) { |
2844 | WRITE_ONCE(lruvec->mm_state.stats[hist][i], |
2845 | lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]); |
2846 | walk->mm_stats[i] = 0; |
2847 | } |
2848 | } |
2849 | |
2850 | if (NR_HIST_GENS > 1 && last) { |
2851 | hist = lru_hist_from_seq(seq: lruvec->mm_state.seq + 1); |
2852 | |
2853 | for (i = 0; i < NR_MM_STATS; i++) |
2854 | WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0); |
2855 | } |
2856 | } |
2857 | |
2858 | static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk) |
2859 | { |
2860 | int type; |
2861 | unsigned long size = 0; |
2862 | struct pglist_data *pgdat = lruvec_pgdat(lruvec: walk->lruvec); |
2863 | int key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); |
2864 | |
2865 | if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap)) |
2866 | return true; |
2867 | |
2868 | clear_bit(nr: key, addr: &mm->lru_gen.bitmap); |
2869 | |
2870 | for (type = !walk->can_swap; type < ANON_AND_FILE; type++) { |
2871 | size += type ? get_mm_counter(mm, member: MM_FILEPAGES) : |
2872 | get_mm_counter(mm, member: MM_ANONPAGES) + |
2873 | get_mm_counter(mm, member: MM_SHMEMPAGES); |
2874 | } |
2875 | |
2876 | if (size < MIN_LRU_BATCH) |
2877 | return true; |
2878 | |
2879 | return !mmget_not_zero(mm); |
2880 | } |
2881 | |
2882 | static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, |
2883 | struct mm_struct **iter) |
2884 | { |
2885 | bool first = false; |
2886 | bool last = false; |
2887 | struct mm_struct *mm = NULL; |
2888 | struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
2889 | struct lru_gen_mm_list *mm_list = get_mm_list(memcg); |
2890 | struct lru_gen_mm_state *mm_state = &lruvec->mm_state; |
2891 | |
2892 | /* |
2893 | * mm_state->seq is incremented after each iteration of mm_list. There |
2894 | * are three interesting cases for this page table walker: |
2895 | * 1. It tries to start a new iteration with a stale max_seq: there is |
2896 | * nothing left to do. |
2897 | * 2. It started the next iteration: it needs to reset the Bloom filter |
2898 | * so that a fresh set of PTE tables can be recorded. |
2899 | * 3. It ended the current iteration: it needs to reset the mm stats |
2900 | * counters and tell its caller to increment max_seq. |
2901 | */ |
2902 | spin_lock(lock: &mm_list->lock); |
2903 | |
2904 | VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq); |
2905 | |
2906 | if (walk->max_seq <= mm_state->seq) |
2907 | goto done; |
2908 | |
2909 | if (!mm_state->head) |
2910 | mm_state->head = &mm_list->fifo; |
2911 | |
2912 | if (mm_state->head == &mm_list->fifo) |
2913 | first = true; |
2914 | |
2915 | do { |
2916 | mm_state->head = mm_state->head->next; |
2917 | if (mm_state->head == &mm_list->fifo) { |
2918 | WRITE_ONCE(mm_state->seq, mm_state->seq + 1); |
2919 | last = true; |
2920 | break; |
2921 | } |
2922 | |
2923 | /* force scan for those added after the last iteration */ |
2924 | if (!mm_state->tail || mm_state->tail == mm_state->head) { |
2925 | mm_state->tail = mm_state->head->next; |
2926 | walk->force_scan = true; |
2927 | } |
2928 | |
2929 | mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); |
2930 | if (should_skip_mm(mm, walk)) |
2931 | mm = NULL; |
2932 | } while (!mm); |
2933 | done: |
2934 | if (*iter || last) |
2935 | reset_mm_stats(lruvec, walk, last); |
2936 | |
2937 | spin_unlock(lock: &mm_list->lock); |
2938 | |
2939 | if (mm && first) |
2940 | reset_bloom_filter(lruvec, seq: walk->max_seq + 1); |
2941 | |
2942 | if (*iter) |
2943 | mmput_async(*iter); |
2944 | |
2945 | *iter = mm; |
2946 | |
2947 | return last; |
2948 | } |
2949 | |
2950 | static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq) |
2951 | { |
2952 | bool success = false; |
2953 | struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
2954 | struct lru_gen_mm_list *mm_list = get_mm_list(memcg); |
2955 | struct lru_gen_mm_state *mm_state = &lruvec->mm_state; |
2956 | |
2957 | spin_lock(lock: &mm_list->lock); |
2958 | |
2959 | VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq); |
2960 | |
2961 | if (max_seq > mm_state->seq) { |
2962 | mm_state->head = NULL; |
2963 | mm_state->tail = NULL; |
2964 | WRITE_ONCE(mm_state->seq, mm_state->seq + 1); |
2965 | reset_mm_stats(lruvec, NULL, last: true); |
2966 | success = true; |
2967 | } |
2968 | |
2969 | spin_unlock(lock: &mm_list->lock); |
2970 | |
2971 | return success; |
2972 | } |
2973 | |
2974 | /****************************************************************************** |
2975 | * PID controller |
2976 | ******************************************************************************/ |
2977 | |
2978 | /* |
2979 | * A feedback loop based on Proportional-Integral-Derivative (PID) controller. |
2980 | * |
2981 | * The P term is refaulted/(evicted+protected) from a tier in the generation |
2982 | * currently being evicted; the I term is the exponential moving average of the |
2983 | * P term over the generations previously evicted, using the smoothing factor |
2984 | * 1/2; the D term isn't supported. |
2985 | * |
2986 | * The setpoint (SP) is always the first tier of one type; the process variable |
2987 | * (PV) is either any tier of the other type or any other tier of the same |
2988 | * type. |
2989 | * |
2990 | * The error is the difference between the SP and the PV; the correction is to |
2991 | * turn off protection when SP>PV or turn on protection when SP<PV. |
2992 | * |
2993 | * For future optimizations: |
2994 | * 1. The D term may discount the other two terms over time so that long-lived |
2995 | * generations can resist stale information. |
2996 | */ |
2997 | struct ctrl_pos { |
2998 | unsigned long refaulted; |
2999 | unsigned long total; |
3000 | int gain; |
3001 | }; |
3002 | |
3003 | static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain, |
3004 | struct ctrl_pos *pos) |
3005 | { |
3006 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
3007 | int hist = lru_hist_from_seq(seq: lrugen->min_seq[type]); |
3008 | |
3009 | pos->refaulted = lrugen->avg_refaulted[type][tier] + |
3010 | atomic_long_read(v: &lrugen->refaulted[hist][type][tier]); |
3011 | pos->total = lrugen->avg_total[type][tier] + |
3012 | atomic_long_read(v: &lrugen->evicted[hist][type][tier]); |
3013 | if (tier) |
3014 | pos->total += lrugen->protected[hist][type][tier - 1]; |
3015 | pos->gain = gain; |
3016 | } |
3017 | |
3018 | static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover) |
3019 | { |
3020 | int hist, tier; |
3021 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
3022 | bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1; |
3023 | unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; |
3024 | |
3025 | lockdep_assert_held(&lruvec->lru_lock); |
3026 | |
3027 | if (!carryover && !clear) |
3028 | return; |
3029 | |
3030 | hist = lru_hist_from_seq(seq); |
3031 | |
3032 | for (tier = 0; tier < MAX_NR_TIERS; tier++) { |
3033 | if (carryover) { |
3034 | unsigned long sum; |
3035 | |
3036 | sum = lrugen->avg_refaulted[type][tier] + |
3037 | atomic_long_read(v: &lrugen->refaulted[hist][type][tier]); |
3038 | WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); |
3039 | |
3040 | sum = lrugen->avg_total[type][tier] + |
3041 | atomic_long_read(v: &lrugen->evicted[hist][type][tier]); |
3042 | if (tier) |
3043 | sum += lrugen->protected[hist][type][tier - 1]; |
3044 | WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); |
3045 | } |
3046 | |
3047 | if (clear) { |
3048 | atomic_long_set(v: &lrugen->refaulted[hist][type][tier], i: 0); |
3049 | atomic_long_set(v: &lrugen->evicted[hist][type][tier], i: 0); |
3050 | if (tier) |
3051 | WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0); |
3052 | } |
3053 | } |
3054 | } |
3055 | |
3056 | static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv) |
3057 | { |
3058 | /* |
3059 | * Return true if the PV has a limited number of refaults or a lower |
3060 | * refaulted/total than the SP. |
3061 | */ |
3062 | return pv->refaulted < MIN_LRU_BATCH || |
3063 | pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <= |
3064 | (sp->refaulted + 1) * pv->total * pv->gain; |
3065 | } |
3066 | |
3067 | /****************************************************************************** |
3068 | * the aging |
3069 | ******************************************************************************/ |
3070 | |
3071 | /* promote pages accessed through page tables */ |
3072 | static int folio_update_gen(struct folio *folio, int gen) |
3073 | { |
3074 | unsigned long new_flags, old_flags = READ_ONCE(folio->flags); |
3075 | |
3076 | VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); |
3077 | VM_WARN_ON_ONCE(!rcu_read_lock_held()); |
3078 | |
3079 | do { |
3080 | /* lru_gen_del_folio() has isolated this page? */ |
3081 | if (!(old_flags & LRU_GEN_MASK)) { |
3082 | /* for shrink_folio_list() */ |
3083 | new_flags = old_flags | BIT(PG_referenced); |
3084 | continue; |
3085 | } |
3086 | |
3087 | new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); |
3088 | new_flags |= (gen + 1UL) << LRU_GEN_PGOFF; |
3089 | } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); |
3090 | |
3091 | return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; |
3092 | } |
3093 | |
3094 | /* protect pages accessed multiple times through file descriptors */ |
3095 | static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming) |
3096 | { |
3097 | int type = folio_is_file_lru(folio); |
3098 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
3099 | int new_gen, old_gen = lru_gen_from_seq(seq: lrugen->min_seq[type]); |
3100 | unsigned long new_flags, old_flags = READ_ONCE(folio->flags); |
3101 | |
3102 | VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio); |
3103 | |
3104 | do { |
3105 | new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; |
3106 | /* folio_update_gen() has promoted this page? */ |
3107 | if (new_gen >= 0 && new_gen != old_gen) |
3108 | return new_gen; |
3109 | |
3110 | new_gen = (old_gen + 1) % MAX_NR_GENS; |
3111 | |
3112 | new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); |
3113 | new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF; |
3114 | /* for folio_end_writeback() */ |
3115 | if (reclaiming) |
3116 | new_flags |= BIT(PG_reclaim); |
3117 | } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); |
3118 | |
3119 | lru_gen_update_size(lruvec, folio, old_gen, new_gen); |
3120 | |
3121 | return new_gen; |
3122 | } |
3123 | |
3124 | static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio, |
3125 | int old_gen, int new_gen) |
3126 | { |
3127 | int type = folio_is_file_lru(folio); |
3128 | int zone = folio_zonenum(folio); |
3129 | int delta = folio_nr_pages(folio); |
3130 | |
3131 | VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS); |
3132 | VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS); |
3133 | |
3134 | walk->batched++; |
3135 | |
3136 | walk->nr_pages[old_gen][type][zone] -= delta; |
3137 | walk->nr_pages[new_gen][type][zone] += delta; |
3138 | } |
3139 | |
3140 | static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk) |
3141 | { |
3142 | int gen, type, zone; |
3143 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
3144 | |
3145 | walk->batched = 0; |
3146 | |
3147 | for_each_gen_type_zone(gen, type, zone) { |
3148 | enum lru_list lru = type * LRU_INACTIVE_FILE; |
3149 | int delta = walk->nr_pages[gen][type][zone]; |
3150 | |
3151 | if (!delta) |
3152 | continue; |
3153 | |
3154 | walk->nr_pages[gen][type][zone] = 0; |
3155 | WRITE_ONCE(lrugen->nr_pages[gen][type][zone], |
3156 | lrugen->nr_pages[gen][type][zone] + delta); |
3157 | |
3158 | if (lru_gen_is_active(lruvec, gen)) |
3159 | lru += LRU_ACTIVE; |
3160 | __update_lru_size(lruvec, lru, zone, delta); |
3161 | } |
3162 | } |
3163 | |
3164 | static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args) |
3165 | { |
3166 | struct address_space *mapping; |
3167 | struct vm_area_struct *vma = args->vma; |
3168 | struct lru_gen_mm_walk *walk = args->private; |
3169 | |
3170 | if (!vma_is_accessible(vma)) |
3171 | return true; |
3172 | |
3173 | if (is_vm_hugetlb_page(vma)) |
3174 | return true; |
3175 | |
3176 | if (!vma_has_recency(vma)) |
3177 | return true; |
3178 | |
3179 | if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) |
3180 | return true; |
3181 | |
3182 | if (vma == get_gate_vma(mm: vma->vm_mm)) |
3183 | return true; |
3184 | |
3185 | if (vma_is_anonymous(vma)) |
3186 | return !walk->can_swap; |
3187 | |
3188 | if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) |
3189 | return true; |
3190 | |
3191 | mapping = vma->vm_file->f_mapping; |
3192 | if (mapping_unevictable(mapping)) |
3193 | return true; |
3194 | |
3195 | if (shmem_mapping(mapping)) |
3196 | return !walk->can_swap; |
3197 | |
3198 | /* to exclude special mappings like dax, etc. */ |
3199 | return !mapping->a_ops->read_folio; |
3200 | } |
3201 | |
3202 | /* |
3203 | * Some userspace memory allocators map many single-page VMAs. Instead of |
3204 | * returning back to the PGD table for each of such VMAs, finish an entire PMD |
3205 | * table to reduce zigzags and improve cache performance. |
3206 | */ |
3207 | static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args, |
3208 | unsigned long *vm_start, unsigned long *vm_end) |
3209 | { |
3210 | unsigned long start = round_up(*vm_end, size); |
3211 | unsigned long end = (start | ~mask) + 1; |
3212 | VMA_ITERATOR(vmi, args->mm, start); |
3213 | |
3214 | VM_WARN_ON_ONCE(mask & size); |
3215 | VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask)); |
3216 | |
3217 | for_each_vma(vmi, args->vma) { |
3218 | if (end && end <= args->vma->vm_start) |
3219 | return false; |
3220 | |
3221 | if (should_skip_vma(start: args->vma->vm_start, end: args->vma->vm_end, args)) |
3222 | continue; |
3223 | |
3224 | *vm_start = max(start, args->vma->vm_start); |
3225 | *vm_end = min(end - 1, args->vma->vm_end - 1) + 1; |
3226 | |
3227 | return true; |
3228 | } |
3229 | |
3230 | return false; |
3231 | } |
3232 | |
3233 | static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr) |
3234 | { |
3235 | unsigned long pfn = pte_pfn(pte); |
3236 | |
3237 | VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); |
3238 | |
3239 | if (!pte_present(a: pte) || is_zero_pfn(pfn)) |
3240 | return -1; |
3241 | |
3242 | if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte))) |
3243 | return -1; |
3244 | |
3245 | if (WARN_ON_ONCE(!pfn_valid(pfn))) |
3246 | return -1; |
3247 | |
3248 | return pfn; |
3249 | } |
3250 | |
3251 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) |
3252 | static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr) |
3253 | { |
3254 | unsigned long pfn = pmd_pfn(pmd); |
3255 | |
3256 | VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); |
3257 | |
3258 | if (!pmd_present(pmd) || is_huge_zero_pmd(pmd)) |
3259 | return -1; |
3260 | |
3261 | if (WARN_ON_ONCE(pmd_devmap(pmd))) |
3262 | return -1; |
3263 | |
3264 | if (WARN_ON_ONCE(!pfn_valid(pfn))) |
3265 | return -1; |
3266 | |
3267 | return pfn; |
3268 | } |
3269 | #endif |
3270 | |
3271 | static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg, |
3272 | struct pglist_data *pgdat, bool can_swap) |
3273 | { |
3274 | struct folio *folio; |
3275 | |
3276 | /* try to avoid unnecessary memory loads */ |
3277 | if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) |
3278 | return NULL; |
3279 | |
3280 | folio = pfn_folio(pfn); |
3281 | if (folio_nid(folio) != pgdat->node_id) |
3282 | return NULL; |
3283 | |
3284 | if (folio_memcg_rcu(folio) != memcg) |
3285 | return NULL; |
3286 | |
3287 | /* file VMAs can contain anon pages from COW */ |
3288 | if (!folio_is_file_lru(folio) && !can_swap) |
3289 | return NULL; |
3290 | |
3291 | return folio; |
3292 | } |
3293 | |
3294 | static bool suitable_to_scan(int total, int young) |
3295 | { |
3296 | int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8); |
3297 | |
3298 | /* suitable if the average number of young PTEs per cacheline is >=1 */ |
3299 | return young * n >= total; |
3300 | } |
3301 | |
3302 | static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end, |
3303 | struct mm_walk *args) |
3304 | { |
3305 | int i; |
3306 | pte_t *pte; |
3307 | spinlock_t *ptl; |
3308 | unsigned long addr; |
3309 | int total = 0; |
3310 | int young = 0; |
3311 | struct lru_gen_mm_walk *walk = args->private; |
3312 | struct mem_cgroup *memcg = lruvec_memcg(lruvec: walk->lruvec); |
3313 | struct pglist_data *pgdat = lruvec_pgdat(lruvec: walk->lruvec); |
3314 | int old_gen, new_gen = lru_gen_from_seq(seq: walk->max_seq); |
3315 | |
3316 | pte = pte_offset_map_nolock(mm: args->mm, pmd, addr: start & PMD_MASK, ptlp: &ptl); |
3317 | if (!pte) |
3318 | return false; |
3319 | if (!spin_trylock(lock: ptl)) { |
3320 | pte_unmap(pte); |
3321 | return false; |
3322 | } |
3323 | |
3324 | arch_enter_lazy_mmu_mode(); |
3325 | restart: |
3326 | for (i = pte_index(address: start), addr = start; addr != end; i++, addr += PAGE_SIZE) { |
3327 | unsigned long pfn; |
3328 | struct folio *folio; |
3329 | pte_t ptent = ptep_get(ptep: pte + i); |
3330 | |
3331 | total++; |
3332 | walk->mm_stats[MM_LEAF_TOTAL]++; |
3333 | |
3334 | pfn = get_pte_pfn(pte: ptent, vma: args->vma, addr); |
3335 | if (pfn == -1) |
3336 | continue; |
3337 | |
3338 | if (!pte_young(pte: ptent)) { |
3339 | walk->mm_stats[MM_LEAF_OLD]++; |
3340 | continue; |
3341 | } |
3342 | |
3343 | folio = get_pfn_folio(pfn, memcg, pgdat, can_swap: walk->can_swap); |
3344 | if (!folio) |
3345 | continue; |
3346 | |
3347 | if (!ptep_test_and_clear_young(vma: args->vma, addr, ptep: pte + i)) |
3348 | VM_WARN_ON_ONCE(true); |
3349 | |
3350 | young++; |
3351 | walk->mm_stats[MM_LEAF_YOUNG]++; |
3352 | |
3353 | if (pte_dirty(pte: ptent) && !folio_test_dirty(folio) && |
3354 | !(folio_test_anon(folio) && folio_test_swapbacked(folio) && |
3355 | !folio_test_swapcache(folio))) |
3356 | folio_mark_dirty(folio); |
3357 | |
3358 | old_gen = folio_update_gen(folio, gen: new_gen); |
3359 | if (old_gen >= 0 && old_gen != new_gen) |
3360 | update_batch_size(walk, folio, old_gen, new_gen); |
3361 | } |
3362 | |
3363 | if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, vm_start: &start, vm_end: &end)) |
3364 | goto restart; |
3365 | |
3366 | arch_leave_lazy_mmu_mode(); |
3367 | pte_unmap_unlock(pte, ptl); |
3368 | |
3369 | return suitable_to_scan(total, young); |
3370 | } |
3371 | |
3372 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) |
3373 | static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma, |
3374 | struct mm_walk *args, unsigned long *bitmap, unsigned long *first) |
3375 | { |
3376 | int i; |
3377 | pmd_t *pmd; |
3378 | spinlock_t *ptl; |
3379 | struct lru_gen_mm_walk *walk = args->private; |
3380 | struct mem_cgroup *memcg = lruvec_memcg(lruvec: walk->lruvec); |
3381 | struct pglist_data *pgdat = lruvec_pgdat(lruvec: walk->lruvec); |
3382 | int old_gen, new_gen = lru_gen_from_seq(seq: walk->max_seq); |
3383 | |
3384 | VM_WARN_ON_ONCE(pud_leaf(*pud)); |
3385 | |
3386 | /* try to batch at most 1+MIN_LRU_BATCH+1 entries */ |
3387 | if (*first == -1) { |
3388 | *first = addr; |
3389 | bitmap_zero(dst: bitmap, MIN_LRU_BATCH); |
3390 | return; |
3391 | } |
3392 | |
3393 | i = addr == -1 ? 0 : pmd_index(address: addr) - pmd_index(address: *first); |
3394 | if (i && i <= MIN_LRU_BATCH) { |
3395 | __set_bit(i - 1, bitmap); |
3396 | return; |
3397 | } |
3398 | |
3399 | pmd = pmd_offset(pud, address: *first); |
3400 | |
3401 | ptl = pmd_lockptr(mm: args->mm, pmd); |
3402 | if (!spin_trylock(lock: ptl)) |
3403 | goto done; |
3404 | |
3405 | arch_enter_lazy_mmu_mode(); |
3406 | |
3407 | do { |
3408 | unsigned long pfn; |
3409 | struct folio *folio; |
3410 | |
3411 | /* don't round down the first address */ |
3412 | addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first; |
3413 | |
3414 | pfn = get_pmd_pfn(pmd: pmd[i], vma, addr); |
3415 | if (pfn == -1) |
3416 | goto next; |
3417 | |
3418 | if (!pmd_trans_huge(pmd: pmd[i])) { |
3419 | if (should_clear_pmd_young()) |
3420 | pmdp_test_and_clear_young(vma, addr, pmdp: pmd + i); |
3421 | goto next; |
3422 | } |
3423 | |
3424 | folio = get_pfn_folio(pfn, memcg, pgdat, can_swap: walk->can_swap); |
3425 | if (!folio) |
3426 | goto next; |
3427 | |
3428 | if (!pmdp_test_and_clear_young(vma, addr, pmdp: pmd + i)) |
3429 | goto next; |
3430 | |
3431 | walk->mm_stats[MM_LEAF_YOUNG]++; |
3432 | |
3433 | if (pmd_dirty(pmd: pmd[i]) && !folio_test_dirty(folio) && |
3434 | !(folio_test_anon(folio) && folio_test_swapbacked(folio) && |
3435 | !folio_test_swapcache(folio))) |
3436 | folio_mark_dirty(folio); |
3437 | |
3438 | old_gen = folio_update_gen(folio, gen: new_gen); |
3439 | if (old_gen >= 0 && old_gen != new_gen) |
3440 | update_batch_size(walk, folio, old_gen, new_gen); |
3441 | next: |
3442 | i = i > MIN_LRU_BATCH ? 0 : find_next_bit(addr: bitmap, MIN_LRU_BATCH, offset: i) + 1; |
3443 | } while (i <= MIN_LRU_BATCH); |
3444 | |
3445 | arch_leave_lazy_mmu_mode(); |
3446 | spin_unlock(lock: ptl); |
3447 | done: |
3448 | *first = -1; |
3449 | } |
3450 | #else |
3451 | static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma, |
3452 | struct mm_walk *args, unsigned long *bitmap, unsigned long *first) |
3453 | { |
3454 | } |
3455 | #endif |
3456 | |
3457 | static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end, |
3458 | struct mm_walk *args) |
3459 | { |
3460 | int i; |
3461 | pmd_t *pmd; |
3462 | unsigned long next; |
3463 | unsigned long addr; |
3464 | struct vm_area_struct *vma; |
3465 | DECLARE_BITMAP(bitmap, MIN_LRU_BATCH); |
3466 | unsigned long first = -1; |
3467 | struct lru_gen_mm_walk *walk = args->private; |
3468 | |
3469 | VM_WARN_ON_ONCE(pud_leaf(*pud)); |
3470 | |
3471 | /* |
3472 | * Finish an entire PMD in two passes: the first only reaches to PTE |
3473 | * tables to avoid taking the PMD lock; the second, if necessary, takes |
3474 | * the PMD lock to clear the accessed bit in PMD entries. |
3475 | */ |
3476 | pmd = pmd_offset(pud, address: start & PUD_MASK); |
3477 | restart: |
3478 | /* walk_pte_range() may call get_next_vma() */ |
3479 | vma = args->vma; |
3480 | for (i = pmd_index(address: start), addr = start; addr != end; i++, addr = next) { |
3481 | pmd_t val = pmdp_get_lockless(pmdp: pmd + i); |
3482 | |
3483 | next = pmd_addr_end(addr, end); |
3484 | |
3485 | if (!pmd_present(pmd: val) || is_huge_zero_pmd(pmd: val)) { |
3486 | walk->mm_stats[MM_LEAF_TOTAL]++; |
3487 | continue; |
3488 | } |
3489 | |
3490 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
3491 | if (pmd_trans_huge(pmd: val)) { |
3492 | unsigned long pfn = pmd_pfn(pmd: val); |
3493 | struct pglist_data *pgdat = lruvec_pgdat(lruvec: walk->lruvec); |
3494 | |
3495 | walk->mm_stats[MM_LEAF_TOTAL]++; |
3496 | |
3497 | if (!pmd_young(pmd: val)) { |
3498 | walk->mm_stats[MM_LEAF_OLD]++; |
3499 | continue; |
3500 | } |
3501 | |
3502 | /* try to avoid unnecessary memory loads */ |
3503 | if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) |
3504 | continue; |
3505 | |
3506 | walk_pmd_range_locked(pud, addr, vma, args, bitmap, first: &first); |
3507 | continue; |
3508 | } |
3509 | #endif |
3510 | walk->mm_stats[MM_NONLEAF_TOTAL]++; |
3511 | |
3512 | if (should_clear_pmd_young()) { |
3513 | if (!pmd_young(pmd: val)) |
3514 | continue; |
3515 | |
3516 | walk_pmd_range_locked(pud, addr, vma, args, bitmap, first: &first); |
3517 | } |
3518 | |
3519 | if (!walk->force_scan && !test_bloom_filter(lruvec: walk->lruvec, seq: walk->max_seq, item: pmd + i)) |
3520 | continue; |
3521 | |
3522 | walk->mm_stats[MM_NONLEAF_FOUND]++; |
3523 | |
3524 | if (!walk_pte_range(pmd: &val, start: addr, end: next, args)) |
3525 | continue; |
3526 | |
3527 | walk->mm_stats[MM_NONLEAF_ADDED]++; |
3528 | |
3529 | /* carry over to the next generation */ |
3530 | update_bloom_filter(lruvec: walk->lruvec, seq: walk->max_seq + 1, item: pmd + i); |
3531 | } |
3532 | |
3533 | walk_pmd_range_locked(pud, addr: -1, vma, args, bitmap, first: &first); |
3534 | |
3535 | if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, vm_start: &start, vm_end: &end)) |
3536 | goto restart; |
3537 | } |
3538 | |
3539 | static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end, |
3540 | struct mm_walk *args) |
3541 | { |
3542 | int i; |
3543 | pud_t *pud; |
3544 | unsigned long addr; |
3545 | unsigned long next; |
3546 | struct lru_gen_mm_walk *walk = args->private; |
3547 | |
3548 | VM_WARN_ON_ONCE(p4d_leaf(*p4d)); |
3549 | |
3550 | pud = pud_offset(p4d, address: start & P4D_MASK); |
3551 | restart: |
3552 | for (i = pud_index(address: start), addr = start; addr != end; i++, addr = next) { |
3553 | pud_t val = READ_ONCE(pud[i]); |
3554 | |
3555 | next = pud_addr_end(addr, end); |
3556 | |
3557 | if (!pud_present(pud: val) || WARN_ON_ONCE(pud_leaf(val))) |
3558 | continue; |
3559 | |
3560 | walk_pmd_range(pud: &val, start: addr, end: next, args); |
3561 | |
3562 | if (need_resched() || walk->batched >= MAX_LRU_BATCH) { |
3563 | end = (addr | ~PUD_MASK) + 1; |
3564 | goto done; |
3565 | } |
3566 | } |
3567 | |
3568 | if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, vm_start: &start, vm_end: &end)) |
3569 | goto restart; |
3570 | |
3571 | end = round_up(end, P4D_SIZE); |
3572 | done: |
3573 | if (!end || !args->vma) |
3574 | return 1; |
3575 | |
3576 | walk->next_addr = max(end, args->vma->vm_start); |
3577 | |
3578 | return -EAGAIN; |
3579 | } |
3580 | |
3581 | static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk) |
3582 | { |
3583 | static const struct mm_walk_ops mm_walk_ops = { |
3584 | .test_walk = should_skip_vma, |
3585 | .p4d_entry = walk_pud_range, |
3586 | .walk_lock = PGWALK_RDLOCK, |
3587 | }; |
3588 | |
3589 | int err; |
3590 | struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
3591 | |
3592 | walk->next_addr = FIRST_USER_ADDRESS; |
3593 | |
3594 | do { |
3595 | DEFINE_MAX_SEQ(lruvec); |
3596 | |
3597 | err = -EBUSY; |
3598 | |
3599 | /* another thread might have called inc_max_seq() */ |
3600 | if (walk->max_seq != max_seq) |
3601 | break; |
3602 | |
3603 | /* folio_update_gen() requires stable folio_memcg() */ |
3604 | if (!mem_cgroup_trylock_pages(memcg)) |
3605 | break; |
3606 | |
3607 | /* the caller might be holding the lock for write */ |
3608 | if (mmap_read_trylock(mm)) { |
3609 | err = walk_page_range(mm, start: walk->next_addr, ULONG_MAX, ops: &mm_walk_ops, private: walk); |
3610 | |
3611 | mmap_read_unlock(mm); |
3612 | } |
3613 | |
3614 | mem_cgroup_unlock_pages(); |
3615 | |
3616 | if (walk->batched) { |
3617 | spin_lock_irq(lock: &lruvec->lru_lock); |
3618 | reset_batch_size(lruvec, walk); |
3619 | spin_unlock_irq(lock: &lruvec->lru_lock); |
3620 | } |
3621 | |
3622 | cond_resched(); |
3623 | } while (err == -EAGAIN); |
3624 | } |
3625 | |
3626 | static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc) |
3627 | { |
3628 | struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; |
3629 | |
3630 | if (pgdat && current_is_kswapd()) { |
3631 | VM_WARN_ON_ONCE(walk); |
3632 | |
3633 | walk = &pgdat->mm_walk; |
3634 | } else if (!walk && force_alloc) { |
3635 | VM_WARN_ON_ONCE(current_is_kswapd()); |
3636 | |
3637 | walk = kzalloc(size: sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); |
3638 | } |
3639 | |
3640 | current->reclaim_state->mm_walk = walk; |
3641 | |
3642 | return walk; |
3643 | } |
3644 | |
3645 | static void clear_mm_walk(void) |
3646 | { |
3647 | struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; |
3648 | |
3649 | VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages))); |
3650 | VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats))); |
3651 | |
3652 | current->reclaim_state->mm_walk = NULL; |
3653 | |
3654 | if (!current_is_kswapd()) |
3655 | kfree(objp: walk); |
3656 | } |
3657 | |
3658 | static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap) |
3659 | { |
3660 | int zone; |
3661 | int remaining = MAX_LRU_BATCH; |
3662 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
3663 | int new_gen, old_gen = lru_gen_from_seq(seq: lrugen->min_seq[type]); |
3664 | |
3665 | if (type == LRU_GEN_ANON && !can_swap) |
3666 | goto done; |
3667 | |
3668 | /* prevent cold/hot inversion if force_scan is true */ |
3669 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { |
3670 | struct list_head *head = &lrugen->folios[old_gen][type][zone]; |
3671 | |
3672 | while (!list_empty(head)) { |
3673 | struct folio *folio = lru_to_folio(head); |
3674 | |
3675 | VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); |
3676 | VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); |
3677 | VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); |
3678 | VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); |
3679 | |
3680 | new_gen = folio_inc_gen(lruvec, folio, false); |
3681 | list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); |
3682 | |
3683 | if (!--remaining) |
3684 | return false; |
3685 | } |
3686 | } |
3687 | done: |
3688 | reset_ctrl_pos(lruvec, type, carryover: true); |
3689 | WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); |
3690 | |
3691 | return true; |
3692 | } |
3693 | |
3694 | static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) |
3695 | { |
3696 | int gen, type, zone; |
3697 | bool success = false; |
3698 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
3699 | DEFINE_MIN_SEQ(lruvec); |
3700 | |
3701 | VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); |
3702 | |
3703 | /* find the oldest populated generation */ |
3704 | for (type = !can_swap; type < ANON_AND_FILE; type++) { |
3705 | while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { |
3706 | gen = lru_gen_from_seq(seq: min_seq[type]); |
3707 | |
3708 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { |
3709 | if (!list_empty(&lrugen->folios[gen][type][zone])) |
3710 | goto next; |
3711 | } |
3712 | |
3713 | min_seq[type]++; |
3714 | } |
3715 | next: |
3716 | ; |
3717 | } |
3718 | |
3719 | /* see the comment on lru_gen_folio */ |
3720 | if (can_swap) { |
3721 | min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]); |
3722 | min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); |
3723 | } |
3724 | |
3725 | for (type = !can_swap; type < ANON_AND_FILE; type++) { |
3726 | if (min_seq[type] == lrugen->min_seq[type]) |
3727 | continue; |
3728 | |
3729 | reset_ctrl_pos(lruvec, type, carryover: true); |
3730 | WRITE_ONCE(lrugen->min_seq[type], min_seq[type]); |
3731 | success = true; |
3732 | } |
3733 | |
3734 | return success; |
3735 | } |
3736 | |
3737 | static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan) |
3738 | { |
3739 | int prev, next; |
3740 | int type, zone; |
3741 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
3742 | restart: |
3743 | spin_lock_irq(lock: &lruvec->lru_lock); |
3744 | |
3745 | VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); |
3746 | |
3747 | for (type = ANON_AND_FILE - 1; type >= 0; type--) { |
3748 | if (get_nr_gens(lruvec, type) != MAX_NR_GENS) |
3749 | continue; |
3750 | |
3751 | VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap)); |
3752 | |
3753 | if (inc_min_seq(lruvec, type, can_swap)) |
3754 | continue; |
3755 | |
3756 | spin_unlock_irq(lock: &lruvec->lru_lock); |
3757 | cond_resched(); |
3758 | goto restart; |
3759 | } |
3760 | |
3761 | /* |
3762 | * Update the active/inactive LRU sizes for compatibility. Both sides of |
3763 | * the current max_seq need to be covered, since max_seq+1 can overlap |
3764 | * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do |
3765 | * overlap, cold/hot inversion happens. |
3766 | */ |
3767 | prev = lru_gen_from_seq(seq: lrugen->max_seq - 1); |
3768 | next = lru_gen_from_seq(seq: lrugen->max_seq + 1); |
3769 | |
3770 | for (type = 0; type < ANON_AND_FILE; type++) { |
3771 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { |
3772 | enum lru_list lru = type * LRU_INACTIVE_FILE; |
3773 | long delta = lrugen->nr_pages[prev][type][zone] - |
3774 | lrugen->nr_pages[next][type][zone]; |
3775 | |
3776 | if (!delta) |
3777 | continue; |
3778 | |
3779 | __update_lru_size(lruvec, lru, zone, delta); |
3780 | __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); |
3781 | } |
3782 | } |
3783 | |
3784 | for (type = 0; type < ANON_AND_FILE; type++) |
3785 | reset_ctrl_pos(lruvec, type, carryover: false); |
3786 | |
3787 | WRITE_ONCE(lrugen->timestamps[next], jiffies); |
3788 | /* make sure preceding modifications appear */ |
3789 | smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); |
3790 | |
3791 | spin_unlock_irq(lock: &lruvec->lru_lock); |
3792 | } |
3793 | |
3794 | static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, |
3795 | struct scan_control *sc, bool can_swap, bool force_scan) |
3796 | { |
3797 | bool success; |
3798 | struct lru_gen_mm_walk *walk; |
3799 | struct mm_struct *mm = NULL; |
3800 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
3801 | |
3802 | VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq)); |
3803 | |
3804 | /* see the comment in iterate_mm_list() */ |
3805 | if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) { |
3806 | success = false; |
3807 | goto done; |
3808 | } |
3809 | |
3810 | /* |
3811 | * If the hardware doesn't automatically set the accessed bit, fallback |
3812 | * to lru_gen_look_around(), which only clears the accessed bit in a |
3813 | * handful of PTEs. Spreading the work out over a period of time usually |
3814 | * is less efficient, but it avoids bursty page faults. |
3815 | */ |
3816 | if (!should_walk_mmu()) { |
3817 | success = iterate_mm_list_nowalk(lruvec, max_seq); |
3818 | goto done; |
3819 | } |
3820 | |
3821 | walk = set_mm_walk(NULL, force_alloc: true); |
3822 | if (!walk) { |
3823 | success = iterate_mm_list_nowalk(lruvec, max_seq); |
3824 | goto done; |
3825 | } |
3826 | |
3827 | walk->lruvec = lruvec; |
3828 | walk->max_seq = max_seq; |
3829 | walk->can_swap = can_swap; |
3830 | walk->force_scan = force_scan; |
3831 | |
3832 | do { |
3833 | success = iterate_mm_list(lruvec, walk, iter: &mm); |
3834 | if (mm) |
3835 | walk_mm(lruvec, mm, walk); |
3836 | } while (mm); |
3837 | done: |
3838 | if (success) |
3839 | inc_max_seq(lruvec, can_swap, force_scan); |
3840 | |
3841 | return success; |
3842 | } |
3843 | |
3844 | /****************************************************************************** |
3845 | * working set protection |
3846 | ******************************************************************************/ |
3847 | |
3848 | static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc) |
3849 | { |
3850 | int gen, type, zone; |
3851 | unsigned long total = 0; |
3852 | bool can_swap = get_swappiness(lruvec, sc); |
3853 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
3854 | struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
3855 | DEFINE_MAX_SEQ(lruvec); |
3856 | DEFINE_MIN_SEQ(lruvec); |
3857 | |
3858 | for (type = !can_swap; type < ANON_AND_FILE; type++) { |
3859 | unsigned long seq; |
3860 | |
3861 | for (seq = min_seq[type]; seq <= max_seq; seq++) { |
3862 | gen = lru_gen_from_seq(seq); |
3863 | |
3864 | for (zone = 0; zone < MAX_NR_ZONES; zone++) |
3865 | total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); |
3866 | } |
3867 | } |
3868 | |
3869 | /* whether the size is big enough to be helpful */ |
3870 | return mem_cgroup_online(memcg) ? (total >> sc->priority) : total; |
3871 | } |
3872 | |
3873 | static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc, |
3874 | unsigned long min_ttl) |
3875 | { |
3876 | int gen; |
3877 | unsigned long birth; |
3878 | struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
3879 | DEFINE_MIN_SEQ(lruvec); |
3880 | |
3881 | /* see the comment on lru_gen_folio */ |
3882 | gen = lru_gen_from_seq(seq: min_seq[LRU_GEN_FILE]); |
3883 | birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); |
3884 | |
3885 | if (time_is_after_jiffies(birth + min_ttl)) |
3886 | return false; |
3887 | |
3888 | if (!lruvec_is_sizable(lruvec, sc)) |
3889 | return false; |
3890 | |
3891 | mem_cgroup_calculate_protection(NULL, memcg); |
3892 | |
3893 | return !mem_cgroup_below_min(NULL, memcg); |
3894 | } |
3895 | |
3896 | /* to protect the working set of the last N jiffies */ |
3897 | static unsigned long lru_gen_min_ttl __read_mostly; |
3898 | |
3899 | static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) |
3900 | { |
3901 | struct mem_cgroup *memcg; |
3902 | unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl); |
3903 | |
3904 | VM_WARN_ON_ONCE(!current_is_kswapd()); |
3905 | |
3906 | /* check the order to exclude compaction-induced reclaim */ |
3907 | if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY) |
3908 | return; |
3909 | |
3910 | memcg = mem_cgroup_iter(NULL, NULL, NULL); |
3911 | do { |
3912 | struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); |
3913 | |
3914 | if (lruvec_is_reclaimable(lruvec, sc, min_ttl)) { |
3915 | mem_cgroup_iter_break(NULL, memcg); |
3916 | return; |
3917 | } |
3918 | |
3919 | cond_resched(); |
3920 | } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); |
3921 | |
3922 | /* |
3923 | * The main goal is to OOM kill if every generation from all memcgs is |
3924 | * younger than min_ttl. However, another possibility is all memcgs are |
3925 | * either too small or below min. |
3926 | */ |
3927 | if (mutex_trylock(lock: &oom_lock)) { |
3928 | struct oom_control oc = { |
3929 | .gfp_mask = sc->gfp_mask, |
3930 | }; |
3931 | |
3932 | out_of_memory(oc: &oc); |
3933 | |
3934 | mutex_unlock(lock: &oom_lock); |
3935 | } |
3936 | } |
3937 | |
3938 | /****************************************************************************** |
3939 | * rmap/PT walk feedback |
3940 | ******************************************************************************/ |
3941 | |
3942 | /* |
3943 | * This function exploits spatial locality when shrink_folio_list() walks the |
3944 | * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If |
3945 | * the scan was done cacheline efficiently, it adds the PMD entry pointing to |
3946 | * the PTE table to the Bloom filter. This forms a feedback loop between the |
3947 | * eviction and the aging. |
3948 | */ |
3949 | void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) |
3950 | { |
3951 | int i; |
3952 | unsigned long start; |
3953 | unsigned long end; |
3954 | struct lru_gen_mm_walk *walk; |
3955 | int young = 0; |
3956 | pte_t *pte = pvmw->pte; |
3957 | unsigned long addr = pvmw->address; |
3958 | struct folio *folio = pfn_folio(pfn: pvmw->pfn); |
3959 | bool can_swap = !folio_is_file_lru(folio); |
3960 | struct mem_cgroup *memcg = folio_memcg(folio); |
3961 | struct pglist_data *pgdat = folio_pgdat(folio); |
3962 | struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); |
3963 | DEFINE_MAX_SEQ(lruvec); |
3964 | int old_gen, new_gen = lru_gen_from_seq(seq: max_seq); |
3965 | |
3966 | lockdep_assert_held(pvmw->ptl); |
3967 | VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio); |
3968 | |
3969 | if (spin_is_contended(lock: pvmw->ptl)) |
3970 | return; |
3971 | |
3972 | /* avoid taking the LRU lock under the PTL when possible */ |
3973 | walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; |
3974 | |
3975 | start = max(addr & PMD_MASK, pvmw->vma->vm_start); |
3976 | end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1; |
3977 | |
3978 | if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { |
3979 | if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) |
3980 | end = start + MIN_LRU_BATCH * PAGE_SIZE; |
3981 | else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2) |
3982 | start = end - MIN_LRU_BATCH * PAGE_SIZE; |
3983 | else { |
3984 | start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2; |
3985 | end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2; |
3986 | } |
3987 | } |
3988 | |
3989 | /* folio_update_gen() requires stable folio_memcg() */ |
3990 | if (!mem_cgroup_trylock_pages(memcg)) |
3991 | return; |
3992 | |
3993 | arch_enter_lazy_mmu_mode(); |
3994 | |
3995 | pte -= (addr - start) / PAGE_SIZE; |
3996 | |
3997 | for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) { |
3998 | unsigned long pfn; |
3999 | pte_t ptent = ptep_get(ptep: pte + i); |
4000 | |
4001 | pfn = get_pte_pfn(pte: ptent, vma: pvmw->vma, addr); |
4002 | if (pfn == -1) |
4003 | continue; |
4004 | |
4005 | if (!pte_young(pte: ptent)) |
4006 | continue; |
4007 | |
4008 | folio = get_pfn_folio(pfn, memcg, pgdat, can_swap); |
4009 | if (!folio) |
4010 | continue; |
4011 | |
4012 | if (!ptep_test_and_clear_young(vma: pvmw->vma, addr, ptep: pte + i)) |
4013 | VM_WARN_ON_ONCE(true); |
4014 | |
4015 | young++; |
4016 | |
4017 | if (pte_dirty(pte: ptent) && !folio_test_dirty(folio) && |
4018 | !(folio_test_anon(folio) && folio_test_swapbacked(folio) && |
4019 | !folio_test_swapcache(folio))) |
4020 | folio_mark_dirty(folio); |
4021 | |
4022 | if (walk) { |
4023 | old_gen = folio_update_gen(folio, gen: new_gen); |
4024 | if (old_gen >= 0 && old_gen != new_gen) |
4025 | update_batch_size(walk, folio, old_gen, new_gen); |
4026 | |
4027 | continue; |
4028 | } |
4029 | |
4030 | old_gen = folio_lru_gen(folio); |
4031 | if (old_gen < 0) |
4032 | folio_set_referenced(folio); |
4033 | else if (old_gen != new_gen) |
4034 | folio_activate(folio); |
4035 | } |
4036 | |
4037 | arch_leave_lazy_mmu_mode(); |
4038 | mem_cgroup_unlock_pages(); |
4039 | |
4040 | /* feedback from rmap walkers to page table walkers */ |
4041 | if (suitable_to_scan(total: i, young)) |
4042 | update_bloom_filter(lruvec, seq: max_seq, item: pvmw->pmd); |
4043 | } |
4044 | |
4045 | /****************************************************************************** |
4046 | * memcg LRU |
4047 | ******************************************************************************/ |
4048 | |
4049 | /* see the comment on MEMCG_NR_GENS */ |
4050 | enum { |
4051 | MEMCG_LRU_NOP, |
4052 | MEMCG_LRU_HEAD, |
4053 | MEMCG_LRU_TAIL, |
4054 | MEMCG_LRU_OLD, |
4055 | MEMCG_LRU_YOUNG, |
4056 | }; |
4057 | |
4058 | #ifdef CONFIG_MEMCG |
4059 | |
4060 | static int lru_gen_memcg_seg(struct lruvec *lruvec) |
4061 | { |
4062 | return READ_ONCE(lruvec->lrugen.seg); |
4063 | } |
4064 | |
4065 | static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op) |
4066 | { |
4067 | int seg; |
4068 | int old, new; |
4069 | unsigned long flags; |
4070 | int bin = get_random_u32_below(MEMCG_NR_BINS); |
4071 | struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
4072 | |
4073 | spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); |
4074 | |
4075 | VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); |
4076 | |
4077 | seg = 0; |
4078 | new = old = lruvec->lrugen.gen; |
4079 | |
4080 | /* see the comment on MEMCG_NR_GENS */ |
4081 | if (op == MEMCG_LRU_HEAD) |
4082 | seg = MEMCG_LRU_HEAD; |
4083 | else if (op == MEMCG_LRU_TAIL) |
4084 | seg = MEMCG_LRU_TAIL; |
4085 | else if (op == MEMCG_LRU_OLD) |
4086 | new = get_memcg_gen(pgdat->memcg_lru.seq); |
4087 | else if (op == MEMCG_LRU_YOUNG) |
4088 | new = get_memcg_gen(pgdat->memcg_lru.seq + 1); |
4089 | else |
4090 | VM_WARN_ON_ONCE(true); |
4091 | |
4092 | hlist_nulls_del_rcu(n: &lruvec->lrugen.list); |
4093 | |
4094 | if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD) |
4095 | hlist_nulls_add_head_rcu(n: &lruvec->lrugen.list, h: &pgdat->memcg_lru.fifo[new][bin]); |
4096 | else |
4097 | hlist_nulls_add_tail_rcu(n: &lruvec->lrugen.list, h: &pgdat->memcg_lru.fifo[new][bin]); |
4098 | |
4099 | pgdat->memcg_lru.nr_memcgs[old]--; |
4100 | pgdat->memcg_lru.nr_memcgs[new]++; |
4101 | |
4102 | lruvec->lrugen.gen = new; |
4103 | WRITE_ONCE(lruvec->lrugen.seg, seg); |
4104 | |
4105 | if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) |
4106 | WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); |
4107 | |
4108 | spin_unlock_irqrestore(lock: &pgdat->memcg_lru.lock, flags); |
4109 | } |
4110 | |
4111 | void lru_gen_online_memcg(struct mem_cgroup *memcg) |
4112 | { |
4113 | int gen; |
4114 | int nid; |
4115 | int bin = get_random_u32_below(MEMCG_NR_BINS); |
4116 | |
4117 | for_each_node(nid) { |
4118 | struct pglist_data *pgdat = NODE_DATA(nid); |
4119 | struct lruvec *lruvec = get_lruvec(memcg, nid); |
4120 | |
4121 | spin_lock_irq(lock: &pgdat->memcg_lru.lock); |
4122 | |
4123 | VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); |
4124 | |
4125 | gen = get_memcg_gen(pgdat->memcg_lru.seq); |
4126 | |
4127 | hlist_nulls_add_tail_rcu(n: &lruvec->lrugen.list, h: &pgdat->memcg_lru.fifo[gen][bin]); |
4128 | pgdat->memcg_lru.nr_memcgs[gen]++; |
4129 | |
4130 | lruvec->lrugen.gen = gen; |
4131 | |
4132 | spin_unlock_irq(lock: &pgdat->memcg_lru.lock); |
4133 | } |
4134 | } |
4135 | |
4136 | void lru_gen_offline_memcg(struct mem_cgroup *memcg) |
4137 | { |
4138 | int nid; |
4139 | |
4140 | for_each_node(nid) { |
4141 | struct lruvec *lruvec = get_lruvec(memcg, nid); |
4142 | |
4143 | lru_gen_rotate_memcg(lruvec, op: MEMCG_LRU_OLD); |
4144 | } |
4145 | } |
4146 | |
4147 | void lru_gen_release_memcg(struct mem_cgroup *memcg) |
4148 | { |
4149 | int gen; |
4150 | int nid; |
4151 | |
4152 | for_each_node(nid) { |
4153 | struct pglist_data *pgdat = NODE_DATA(nid); |
4154 | struct lruvec *lruvec = get_lruvec(memcg, nid); |
4155 | |
4156 | spin_lock_irq(lock: &pgdat->memcg_lru.lock); |
4157 | |
4158 | if (hlist_nulls_unhashed(h: &lruvec->lrugen.list)) |
4159 | goto unlock; |
4160 | |
4161 | gen = lruvec->lrugen.gen; |
4162 | |
4163 | hlist_nulls_del_init_rcu(n: &lruvec->lrugen.list); |
4164 | pgdat->memcg_lru.nr_memcgs[gen]--; |
4165 | |
4166 | if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) |
4167 | WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); |
4168 | unlock: |
4169 | spin_unlock_irq(lock: &pgdat->memcg_lru.lock); |
4170 | } |
4171 | } |
4172 | |
4173 | void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) |
4174 | { |
4175 | struct lruvec *lruvec = get_lruvec(memcg, nid); |
4176 | |
4177 | /* see the comment on MEMCG_NR_GENS */ |
4178 | if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD) |
4179 | lru_gen_rotate_memcg(lruvec, op: MEMCG_LRU_HEAD); |
4180 | } |
4181 | |
4182 | #else /* !CONFIG_MEMCG */ |
4183 | |
4184 | static int lru_gen_memcg_seg(struct lruvec *lruvec) |
4185 | { |
4186 | return 0; |
4187 | } |
4188 | |
4189 | #endif |
4190 | |
4191 | /****************************************************************************** |
4192 | * the eviction |
4193 | ******************************************************************************/ |
4194 | |
4195 | static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc, |
4196 | int tier_idx) |
4197 | { |
4198 | bool success; |
4199 | int gen = folio_lru_gen(folio); |
4200 | int type = folio_is_file_lru(folio); |
4201 | int zone = folio_zonenum(folio); |
4202 | int delta = folio_nr_pages(folio); |
4203 | int refs = folio_lru_refs(folio); |
4204 | int tier = lru_tier_from_refs(refs); |
4205 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
4206 | |
4207 | VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio); |
4208 | |
4209 | /* unevictable */ |
4210 | if (!folio_evictable(folio)) { |
4211 | success = lru_gen_del_folio(lruvec, folio, reclaiming: true); |
4212 | VM_WARN_ON_ONCE_FOLIO(!success, folio); |
4213 | folio_set_unevictable(folio); |
4214 | lruvec_add_folio(lruvec, folio); |
4215 | __count_vm_events(item: UNEVICTABLE_PGCULLED, delta); |
4216 | return true; |
4217 | } |
4218 | |
4219 | /* dirty lazyfree */ |
4220 | if (type == LRU_GEN_FILE && folio_test_anon(folio) && folio_test_dirty(folio)) { |
4221 | success = lru_gen_del_folio(lruvec, folio, reclaiming: true); |
4222 | VM_WARN_ON_ONCE_FOLIO(!success, folio); |
4223 | folio_set_swapbacked(folio); |
4224 | lruvec_add_folio_tail(lruvec, folio); |
4225 | return true; |
4226 | } |
4227 | |
4228 | /* promoted */ |
4229 | if (gen != lru_gen_from_seq(seq: lrugen->min_seq[type])) { |
4230 | list_move(list: &folio->lru, head: &lrugen->folios[gen][type][zone]); |
4231 | return true; |
4232 | } |
4233 | |
4234 | /* protected */ |
4235 | if (tier > tier_idx) { |
4236 | int hist = lru_hist_from_seq(seq: lrugen->min_seq[type]); |
4237 | |
4238 | gen = folio_inc_gen(lruvec, folio, reclaiming: false); |
4239 | list_move_tail(list: &folio->lru, head: &lrugen->folios[gen][type][zone]); |
4240 | |
4241 | WRITE_ONCE(lrugen->protected[hist][type][tier - 1], |
4242 | lrugen->protected[hist][type][tier - 1] + delta); |
4243 | return true; |
4244 | } |
4245 | |
4246 | /* ineligible */ |
4247 | if (zone > sc->reclaim_idx || skip_cma(folio, sc)) { |
4248 | gen = folio_inc_gen(lruvec, folio, reclaiming: false); |
4249 | list_move_tail(list: &folio->lru, head: &lrugen->folios[gen][type][zone]); |
4250 | return true; |
4251 | } |
4252 | |
4253 | /* waiting for writeback */ |
4254 | if (folio_test_locked(folio) || folio_test_writeback(folio) || |
4255 | (type == LRU_GEN_FILE && folio_test_dirty(folio))) { |
4256 | gen = folio_inc_gen(lruvec, folio, reclaiming: true); |
4257 | list_move(list: &folio->lru, head: &lrugen->folios[gen][type][zone]); |
4258 | return true; |
4259 | } |
4260 | |
4261 | return false; |
4262 | } |
4263 | |
4264 | static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc) |
4265 | { |
4266 | bool success; |
4267 | |
4268 | /* swapping inhibited */ |
4269 | if (!(sc->gfp_mask & __GFP_IO) && |
4270 | (folio_test_dirty(folio) || |
4271 | (folio_test_anon(folio) && !folio_test_swapcache(folio)))) |
4272 | return false; |
4273 | |
4274 | /* raced with release_pages() */ |
4275 | if (!folio_try_get(folio)) |
4276 | return false; |
4277 | |
4278 | /* raced with another isolation */ |
4279 | if (!folio_test_clear_lru(folio)) { |
4280 | folio_put(folio); |
4281 | return false; |
4282 | } |
4283 | |
4284 | /* see the comment on MAX_NR_TIERS */ |
4285 | if (!folio_test_referenced(folio)) |
4286 | set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); |
4287 | |
4288 | /* for shrink_folio_list() */ |
4289 | folio_clear_reclaim(folio); |
4290 | folio_clear_referenced(folio); |
4291 | |
4292 | success = lru_gen_del_folio(lruvec, folio, reclaiming: true); |
4293 | VM_WARN_ON_ONCE_FOLIO(!success, folio); |
4294 | |
4295 | return true; |
4296 | } |
4297 | |
4298 | static int scan_folios(struct lruvec *lruvec, struct scan_control *sc, |
4299 | int type, int tier, struct list_head *list) |
4300 | { |
4301 | int i; |
4302 | int gen; |
4303 | enum vm_event_item item; |
4304 | int sorted = 0; |
4305 | int scanned = 0; |
4306 | int isolated = 0; |
4307 | int skipped = 0; |
4308 | int remaining = MAX_LRU_BATCH; |
4309 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
4310 | struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
4311 | |
4312 | VM_WARN_ON_ONCE(!list_empty(list)); |
4313 | |
4314 | if (get_nr_gens(lruvec, type) == MIN_NR_GENS) |
4315 | return 0; |
4316 | |
4317 | gen = lru_gen_from_seq(seq: lrugen->min_seq[type]); |
4318 | |
4319 | for (i = MAX_NR_ZONES; i > 0; i--) { |
4320 | LIST_HEAD(moved); |
4321 | int skipped_zone = 0; |
4322 | int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; |
4323 | struct list_head *head = &lrugen->folios[gen][type][zone]; |
4324 | |
4325 | while (!list_empty(head)) { |
4326 | struct folio *folio = lru_to_folio(head); |
4327 | int delta = folio_nr_pages(folio); |
4328 | |
4329 | VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); |
4330 | VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); |
4331 | VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); |
4332 | VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); |
4333 | |
4334 | scanned += delta; |
4335 | |
4336 | if (sort_folio(lruvec, folio, sc, tier_idx: tier)) |
4337 | sorted += delta; |
4338 | else if (isolate_folio(lruvec, folio, sc)) { |
4339 | list_add(new: &folio->lru, head: list); |
4340 | isolated += delta; |
4341 | } else { |
4342 | list_move(list: &folio->lru, head: &moved); |
4343 | skipped_zone += delta; |
4344 | } |
4345 | |
4346 | if (!--remaining || max(isolated, skipped_zone) >= MIN_LRU_BATCH) |
4347 | break; |
4348 | } |
4349 | |
4350 | if (skipped_zone) { |
4351 | list_splice(list: &moved, head); |
4352 | __count_zid_vm_events(PGSCAN_SKIP, zone, skipped_zone); |
4353 | skipped += skipped_zone; |
4354 | } |
4355 | |
4356 | if (!remaining || isolated >= MIN_LRU_BATCH) |
4357 | break; |
4358 | } |
4359 | |
4360 | item = PGSCAN_KSWAPD + reclaimer_offset(); |
4361 | if (!cgroup_reclaim(sc)) { |
4362 | __count_vm_events(item, delta: isolated); |
4363 | __count_vm_events(item: PGREFILL, delta: sorted); |
4364 | } |
4365 | __count_memcg_events(memcg, idx: item, count: isolated); |
4366 | __count_memcg_events(memcg, idx: PGREFILL, count: sorted); |
4367 | __count_vm_events(item: PGSCAN_ANON + type, delta: isolated); |
4368 | trace_mm_vmscan_lru_isolate(highest_zoneidx: sc->reclaim_idx, order: sc->order, MAX_LRU_BATCH, |
4369 | nr_scanned: scanned, nr_skipped: skipped, nr_taken: isolated, |
4370 | lru: type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); |
4371 | |
4372 | /* |
4373 | * There might not be eligible folios due to reclaim_idx. Check the |
4374 | * remaining to prevent livelock if it's not making progress. |
4375 | */ |
4376 | return isolated || !remaining ? scanned : 0; |
4377 | } |
4378 | |
4379 | static int get_tier_idx(struct lruvec *lruvec, int type) |
4380 | { |
4381 | int tier; |
4382 | struct ctrl_pos sp, pv; |
4383 | |
4384 | /* |
4385 | * To leave a margin for fluctuations, use a larger gain factor (1:2). |
4386 | * This value is chosen because any other tier would have at least twice |
4387 | * as many refaults as the first tier. |
4388 | */ |
4389 | read_ctrl_pos(lruvec, type, tier: 0, gain: 1, pos: &sp); |
4390 | for (tier = 1; tier < MAX_NR_TIERS; tier++) { |
4391 | read_ctrl_pos(lruvec, type, tier, gain: 2, pos: &pv); |
4392 | if (!positive_ctrl_err(sp: &sp, pv: &pv)) |
4393 | break; |
4394 | } |
4395 | |
4396 | return tier - 1; |
4397 | } |
4398 | |
4399 | static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx) |
4400 | { |
4401 | int type, tier; |
4402 | struct ctrl_pos sp, pv; |
4403 | int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness }; |
4404 | |
4405 | /* |
4406 | * Compare the first tier of anon with that of file to determine which |
4407 | * type to scan. Also need to compare other tiers of the selected type |
4408 | * with the first tier of the other type to determine the last tier (of |
4409 | * the selected type) to evict. |
4410 | */ |
4411 | read_ctrl_pos(lruvec, type: LRU_GEN_ANON, tier: 0, gain: gain[LRU_GEN_ANON], pos: &sp); |
4412 | read_ctrl_pos(lruvec, type: LRU_GEN_FILE, tier: 0, gain: gain[LRU_GEN_FILE], pos: &pv); |
4413 | type = positive_ctrl_err(sp: &sp, pv: &pv); |
4414 | |
4415 | read_ctrl_pos(lruvec, type: !type, tier: 0, gain: gain[!type], pos: &sp); |
4416 | for (tier = 1; tier < MAX_NR_TIERS; tier++) { |
4417 | read_ctrl_pos(lruvec, type, tier, gain: gain[type], pos: &pv); |
4418 | if (!positive_ctrl_err(sp: &sp, pv: &pv)) |
4419 | break; |
4420 | } |
4421 | |
4422 | *tier_idx = tier - 1; |
4423 | |
4424 | return type; |
4425 | } |
4426 | |
4427 | static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness, |
4428 | int *type_scanned, struct list_head *list) |
4429 | { |
4430 | int i; |
4431 | int type; |
4432 | int scanned; |
4433 | int tier = -1; |
4434 | DEFINE_MIN_SEQ(lruvec); |
4435 | |
4436 | /* |
4437 | * Try to make the obvious choice first. When anon and file are both |
4438 | * available from the same generation, interpret swappiness 1 as file |
4439 | * first and 200 as anon first. |
4440 | */ |
4441 | if (!swappiness) |
4442 | type = LRU_GEN_FILE; |
4443 | else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE]) |
4444 | type = LRU_GEN_ANON; |
4445 | else if (swappiness == 1) |
4446 | type = LRU_GEN_FILE; |
4447 | else if (swappiness == 200) |
4448 | type = LRU_GEN_ANON; |
4449 | else |
4450 | type = get_type_to_scan(lruvec, swappiness, tier_idx: &tier); |
4451 | |
4452 | for (i = !swappiness; i < ANON_AND_FILE; i++) { |
4453 | if (tier < 0) |
4454 | tier = get_tier_idx(lruvec, type); |
4455 | |
4456 | scanned = scan_folios(lruvec, sc, type, tier, list); |
4457 | if (scanned) |
4458 | break; |
4459 | |
4460 | type = !type; |
4461 | tier = -1; |
4462 | } |
4463 | |
4464 | *type_scanned = type; |
4465 | |
4466 | return scanned; |
4467 | } |
4468 | |
4469 | static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness) |
4470 | { |
4471 | int type; |
4472 | int scanned; |
4473 | int reclaimed; |
4474 | LIST_HEAD(list); |
4475 | LIST_HEAD(clean); |
4476 | struct folio *folio; |
4477 | struct folio *next; |
4478 | enum vm_event_item item; |
4479 | struct reclaim_stat stat; |
4480 | struct lru_gen_mm_walk *walk; |
4481 | bool skip_retry = false; |
4482 | struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
4483 | struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
4484 | |
4485 | spin_lock_irq(lock: &lruvec->lru_lock); |
4486 | |
4487 | scanned = isolate_folios(lruvec, sc, swappiness, type_scanned: &type, list: &list); |
4488 | |
4489 | scanned += try_to_inc_min_seq(lruvec, can_swap: swappiness); |
4490 | |
4491 | if (get_nr_gens(lruvec, type: !swappiness) == MIN_NR_GENS) |
4492 | scanned = 0; |
4493 | |
4494 | spin_unlock_irq(lock: &lruvec->lru_lock); |
4495 | |
4496 | if (list_empty(head: &list)) |
4497 | return scanned; |
4498 | retry: |
4499 | reclaimed = shrink_folio_list(folio_list: &list, pgdat, sc, stat: &stat, ignore_references: false); |
4500 | sc->nr_reclaimed += reclaimed; |
4501 | trace_mm_vmscan_lru_shrink_inactive(nid: pgdat->node_id, |
4502 | nr_scanned: scanned, nr_reclaimed: reclaimed, stat: &stat, priority: sc->priority, |
4503 | file: type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); |
4504 | |
4505 | list_for_each_entry_safe_reverse(folio, next, &list, lru) { |
4506 | if (!folio_evictable(folio)) { |
4507 | list_del(entry: &folio->lru); |
4508 | folio_putback_lru(folio); |
4509 | continue; |
4510 | } |
4511 | |
4512 | if (folio_test_reclaim(folio) && |
4513 | (folio_test_dirty(folio) || folio_test_writeback(folio))) { |
4514 | /* restore LRU_REFS_FLAGS cleared by isolate_folio() */ |
4515 | if (folio_test_workingset(folio)) |
4516 | folio_set_referenced(folio); |
4517 | continue; |
4518 | } |
4519 | |
4520 | if (skip_retry || folio_test_active(folio) || folio_test_referenced(folio) || |
4521 | folio_mapped(folio) || folio_test_locked(folio) || |
4522 | folio_test_dirty(folio) || folio_test_writeback(folio)) { |
4523 | /* don't add rejected folios to the oldest generation */ |
4524 | set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, |
4525 | BIT(PG_active)); |
4526 | continue; |
4527 | } |
4528 | |
4529 | /* retry folios that may have missed folio_rotate_reclaimable() */ |
4530 | list_move(list: &folio->lru, head: &clean); |
4531 | sc->nr_scanned -= folio_nr_pages(folio); |
4532 | } |
4533 | |
4534 | spin_lock_irq(lock: &lruvec->lru_lock); |
4535 | |
4536 | move_folios_to_lru(lruvec, list: &list); |
4537 | |
4538 | walk = current->reclaim_state->mm_walk; |
4539 | if (walk && walk->batched) |
4540 | reset_batch_size(lruvec, walk); |
4541 | |
4542 | item = PGSTEAL_KSWAPD + reclaimer_offset(); |
4543 | if (!cgroup_reclaim(sc)) |
4544 | __count_vm_events(item, delta: reclaimed); |
4545 | __count_memcg_events(memcg, idx: item, count: reclaimed); |
4546 | __count_vm_events(item: PGSTEAL_ANON + type, delta: reclaimed); |
4547 | |
4548 | spin_unlock_irq(lock: &lruvec->lru_lock); |
4549 | |
4550 | mem_cgroup_uncharge_list(page_list: &list); |
4551 | free_unref_page_list(list: &list); |
4552 | |
4553 | INIT_LIST_HEAD(list: &list); |
4554 | list_splice_init(list: &clean, head: &list); |
4555 | |
4556 | if (!list_empty(head: &list)) { |
4557 | skip_retry = true; |
4558 | goto retry; |
4559 | } |
4560 | |
4561 | return scanned; |
4562 | } |
4563 | |
4564 | static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, |
4565 | struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan) |
4566 | { |
4567 | int gen, type, zone; |
4568 | unsigned long old = 0; |
4569 | unsigned long young = 0; |
4570 | unsigned long total = 0; |
4571 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
4572 | struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
4573 | DEFINE_MIN_SEQ(lruvec); |
4574 | |
4575 | /* whether this lruvec is completely out of cold folios */ |
4576 | if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) { |
4577 | *nr_to_scan = 0; |
4578 | return true; |
4579 | } |
4580 | |
4581 | for (type = !can_swap; type < ANON_AND_FILE; type++) { |
4582 | unsigned long seq; |
4583 | |
4584 | for (seq = min_seq[type]; seq <= max_seq; seq++) { |
4585 | unsigned long size = 0; |
4586 | |
4587 | gen = lru_gen_from_seq(seq); |
4588 | |
4589 | for (zone = 0; zone < MAX_NR_ZONES; zone++) |
4590 | size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); |
4591 | |
4592 | total += size; |
4593 | if (seq == max_seq) |
4594 | young += size; |
4595 | else if (seq + MIN_NR_GENS == max_seq) |
4596 | old += size; |
4597 | } |
4598 | } |
4599 | |
4600 | /* try to scrape all its memory if this memcg was deleted */ |
4601 | *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total; |
4602 | |
4603 | /* |
4604 | * The aging tries to be lazy to reduce the overhead, while the eviction |
4605 | * stalls when the number of generations reaches MIN_NR_GENS. Hence, the |
4606 | * ideal number of generations is MIN_NR_GENS+1. |
4607 | */ |
4608 | if (min_seq[!can_swap] + MIN_NR_GENS < max_seq) |
4609 | return false; |
4610 | |
4611 | /* |
4612 | * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1) |
4613 | * of the total number of pages for each generation. A reasonable range |
4614 | * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The |
4615 | * aging cares about the upper bound of hot pages, while the eviction |
4616 | * cares about the lower bound of cold pages. |
4617 | */ |
4618 | if (young * MIN_NR_GENS > total) |
4619 | return true; |
4620 | if (old * (MIN_NR_GENS + 2) < total) |
4621 | return true; |
4622 | |
4623 | return false; |
4624 | } |
4625 | |
4626 | /* |
4627 | * For future optimizations: |
4628 | * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg |
4629 | * reclaim. |
4630 | */ |
4631 | static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool can_swap) |
4632 | { |
4633 | unsigned long nr_to_scan; |
4634 | struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
4635 | DEFINE_MAX_SEQ(lruvec); |
4636 | |
4637 | if (mem_cgroup_below_min(target: sc->target_mem_cgroup, memcg)) |
4638 | return 0; |
4639 | |
4640 | if (!should_run_aging(lruvec, max_seq, sc, can_swap, nr_to_scan: &nr_to_scan)) |
4641 | return nr_to_scan; |
4642 | |
4643 | /* skip the aging path at the default priority */ |
4644 | if (sc->priority == DEF_PRIORITY) |
4645 | return nr_to_scan; |
4646 | |
4647 | /* skip this lruvec as it's low on cold folios */ |
4648 | return try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, force_scan: false) ? -1 : 0; |
4649 | } |
4650 | |
4651 | static unsigned long get_nr_to_reclaim(struct scan_control *sc) |
4652 | { |
4653 | /* don't abort memcg reclaim to ensure fairness */ |
4654 | if (!root_reclaim(sc)) |
4655 | return -1; |
4656 | |
4657 | return max(sc->nr_to_reclaim, compact_gap(sc->order)); |
4658 | } |
4659 | |
4660 | static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) |
4661 | { |
4662 | long nr_to_scan; |
4663 | unsigned long scanned = 0; |
4664 | unsigned long nr_to_reclaim = get_nr_to_reclaim(sc); |
4665 | int swappiness = get_swappiness(lruvec, sc); |
4666 | |
4667 | /* clean file folios are more likely to exist */ |
4668 | if (swappiness && !(sc->gfp_mask & __GFP_IO)) |
4669 | swappiness = 1; |
4670 | |
4671 | while (true) { |
4672 | int delta; |
4673 | |
4674 | nr_to_scan = get_nr_to_scan(lruvec, sc, can_swap: swappiness); |
4675 | if (nr_to_scan <= 0) |
4676 | break; |
4677 | |
4678 | delta = evict_folios(lruvec, sc, swappiness); |
4679 | if (!delta) |
4680 | break; |
4681 | |
4682 | scanned += delta; |
4683 | if (scanned >= nr_to_scan) |
4684 | break; |
4685 | |
4686 | if (sc->nr_reclaimed >= nr_to_reclaim) |
4687 | break; |
4688 | |
4689 | cond_resched(); |
4690 | } |
4691 | |
4692 | /* whether try_to_inc_max_seq() was successful */ |
4693 | return nr_to_scan < 0; |
4694 | } |
4695 | |
4696 | static int shrink_one(struct lruvec *lruvec, struct scan_control *sc) |
4697 | { |
4698 | bool success; |
4699 | unsigned long scanned = sc->nr_scanned; |
4700 | unsigned long reclaimed = sc->nr_reclaimed; |
4701 | int seg = lru_gen_memcg_seg(lruvec); |
4702 | struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
4703 | struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
4704 | |
4705 | /* see the comment on MEMCG_NR_GENS */ |
4706 | if (!lruvec_is_sizable(lruvec, sc)) |
4707 | return seg != MEMCG_LRU_TAIL ? MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG; |
4708 | |
4709 | mem_cgroup_calculate_protection(NULL, memcg); |
4710 | |
4711 | if (mem_cgroup_below_min(NULL, memcg)) |
4712 | return MEMCG_LRU_YOUNG; |
4713 | |
4714 | if (mem_cgroup_below_low(NULL, memcg)) { |
4715 | /* see the comment on MEMCG_NR_GENS */ |
4716 | if (seg != MEMCG_LRU_TAIL) |
4717 | return MEMCG_LRU_TAIL; |
4718 | |
4719 | memcg_memory_event(memcg, event: MEMCG_LOW); |
4720 | } |
4721 | |
4722 | success = try_to_shrink_lruvec(lruvec, sc); |
4723 | |
4724 | shrink_slab(gfp_mask: sc->gfp_mask, nid: pgdat->node_id, memcg, priority: sc->priority); |
4725 | |
4726 | if (!sc->proactive) |
4727 | vmpressure(gfp: sc->gfp_mask, memcg, tree: false, scanned: sc->nr_scanned - scanned, |
4728 | reclaimed: sc->nr_reclaimed - reclaimed); |
4729 | |
4730 | flush_reclaim_state(sc); |
4731 | |
4732 | return success ? MEMCG_LRU_YOUNG : 0; |
4733 | } |
4734 | |
4735 | #ifdef CONFIG_MEMCG |
4736 | |
4737 | static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) |
4738 | { |
4739 | int op; |
4740 | int gen; |
4741 | int bin; |
4742 | int first_bin; |
4743 | struct lruvec *lruvec; |
4744 | struct lru_gen_folio *lrugen; |
4745 | struct mem_cgroup *memcg; |
4746 | const struct hlist_nulls_node *pos; |
4747 | unsigned long nr_to_reclaim = get_nr_to_reclaim(sc); |
4748 | |
4749 | bin = first_bin = get_random_u32_below(MEMCG_NR_BINS); |
4750 | restart: |
4751 | op = 0; |
4752 | memcg = NULL; |
4753 | gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); |
4754 | |
4755 | rcu_read_lock(); |
4756 | |
4757 | hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { |
4758 | if (op) { |
4759 | lru_gen_rotate_memcg(lruvec, op); |
4760 | op = 0; |
4761 | } |
4762 | |
4763 | mem_cgroup_put(memcg); |
4764 | |
4765 | lruvec = container_of(lrugen, struct lruvec, lrugen); |
4766 | memcg = lruvec_memcg(lruvec); |
4767 | |
4768 | if (!mem_cgroup_tryget(memcg)) { |
4769 | lru_gen_release_memcg(memcg); |
4770 | memcg = NULL; |
4771 | continue; |
4772 | } |
4773 | |
4774 | rcu_read_unlock(); |
4775 | |
4776 | op = shrink_one(lruvec, sc); |
4777 | |
4778 | rcu_read_lock(); |
4779 | |
4780 | if (sc->nr_reclaimed >= nr_to_reclaim) |
4781 | break; |
4782 | } |
4783 | |
4784 | rcu_read_unlock(); |
4785 | |
4786 | if (op) |
4787 | lru_gen_rotate_memcg(lruvec, op); |
4788 | |
4789 | mem_cgroup_put(memcg); |
4790 | |
4791 | if (sc->nr_reclaimed >= nr_to_reclaim) |
4792 | return; |
4793 | |
4794 | /* restart if raced with lru_gen_rotate_memcg() */ |
4795 | if (gen != get_nulls_value(ptr: pos)) |
4796 | goto restart; |
4797 | |
4798 | /* try the rest of the bins of the current generation */ |
4799 | bin = get_memcg_bin(bin + 1); |
4800 | if (bin != first_bin) |
4801 | goto restart; |
4802 | } |
4803 | |
4804 | static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) |
4805 | { |
4806 | struct blk_plug plug; |
4807 | |
4808 | VM_WARN_ON_ONCE(root_reclaim(sc)); |
4809 | VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); |
4810 | |
4811 | lru_add_drain(); |
4812 | |
4813 | blk_start_plug(&plug); |
4814 | |
4815 | set_mm_walk(NULL, force_alloc: sc->proactive); |
4816 | |
4817 | if (try_to_shrink_lruvec(lruvec, sc)) |
4818 | lru_gen_rotate_memcg(lruvec, op: MEMCG_LRU_YOUNG); |
4819 | |
4820 | clear_mm_walk(); |
4821 | |
4822 | blk_finish_plug(&plug); |
4823 | } |
4824 | |
4825 | #else /* !CONFIG_MEMCG */ |
4826 | |
4827 | static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) |
4828 | { |
4829 | BUILD_BUG(); |
4830 | } |
4831 | |
4832 | static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) |
4833 | { |
4834 | BUILD_BUG(); |
4835 | } |
4836 | |
4837 | #endif |
4838 | |
4839 | static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc) |
4840 | { |
4841 | int priority; |
4842 | unsigned long reclaimable; |
4843 | struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); |
4844 | |
4845 | if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) |
4846 | return; |
4847 | /* |
4848 | * Determine the initial priority based on ((total / MEMCG_NR_GENS) >> |
4849 | * priority) * reclaimed_to_scanned_ratio = nr_to_reclaim, where the |
4850 | * estimated reclaimed_to_scanned_ratio = inactive / total. |
4851 | */ |
4852 | reclaimable = node_page_state(pgdat, item: NR_INACTIVE_FILE); |
4853 | if (get_swappiness(lruvec, sc)) |
4854 | reclaimable += node_page_state(pgdat, item: NR_INACTIVE_ANON); |
4855 | |
4856 | reclaimable /= MEMCG_NR_GENS; |
4857 | |
4858 | /* round down reclaimable and round up sc->nr_to_reclaim */ |
4859 | priority = fls_long(l: reclaimable) - 1 - fls_long(l: sc->nr_to_reclaim - 1); |
4860 | |
4861 | sc->priority = clamp(priority, 0, DEF_PRIORITY); |
4862 | } |
4863 | |
4864 | static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) |
4865 | { |
4866 | struct blk_plug plug; |
4867 | unsigned long reclaimed = sc->nr_reclaimed; |
4868 | |
4869 | VM_WARN_ON_ONCE(!root_reclaim(sc)); |
4870 | |
4871 | /* |
4872 | * Unmapped clean folios are already prioritized. Scanning for more of |
4873 | * them is likely futile and can cause high reclaim latency when there |
4874 | * is a large number of memcgs. |
4875 | */ |
4876 | if (!sc->may_writepage || !sc->may_unmap) |
4877 | goto done; |
4878 | |
4879 | lru_add_drain(); |
4880 | |
4881 | blk_start_plug(&plug); |
4882 | |
4883 | set_mm_walk(pgdat, force_alloc: sc->proactive); |
4884 | |
4885 | set_initial_priority(pgdat, sc); |
4886 | |
4887 | if (current_is_kswapd()) |
4888 | sc->nr_reclaimed = 0; |
4889 | |
4890 | if (mem_cgroup_disabled()) |
4891 | shrink_one(lruvec: &pgdat->__lruvec, sc); |
4892 | else |
4893 | shrink_many(pgdat, sc); |
4894 | |
4895 | if (current_is_kswapd()) |
4896 | sc->nr_reclaimed += reclaimed; |
4897 | |
4898 | clear_mm_walk(); |
4899 | |
4900 | blk_finish_plug(&plug); |
4901 | done: |
4902 | /* kswapd should never fail */ |
4903 | pgdat->kswapd_failures = 0; |
4904 | } |
4905 | |
4906 | /****************************************************************************** |
4907 | * state change |
4908 | ******************************************************************************/ |
4909 | |
4910 | static bool __maybe_unused state_is_valid(struct lruvec *lruvec) |
4911 | { |
4912 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
4913 | |
4914 | if (lrugen->enabled) { |
4915 | enum lru_list lru; |
4916 | |
4917 | for_each_evictable_lru(lru) { |
4918 | if (!list_empty(head: &lruvec->lists[lru])) |
4919 | return false; |
4920 | } |
4921 | } else { |
4922 | int gen, type, zone; |
4923 | |
4924 | for_each_gen_type_zone(gen, type, zone) { |
4925 | if (!list_empty(&lrugen->folios[gen][type][zone])) |
4926 | return false; |
4927 | } |
4928 | } |
4929 | |
4930 | return true; |
4931 | } |
4932 | |
4933 | static bool fill_evictable(struct lruvec *lruvec) |
4934 | { |
4935 | enum lru_list lru; |
4936 | int remaining = MAX_LRU_BATCH; |
4937 | |
4938 | for_each_evictable_lru(lru) { |
4939 | int type = is_file_lru(lru); |
4940 | bool active = is_active_lru(lru); |
4941 | struct list_head *head = &lruvec->lists[lru]; |
4942 | |
4943 | while (!list_empty(head)) { |
4944 | bool success; |
4945 | struct folio *folio = lru_to_folio(head); |
4946 | |
4947 | VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); |
4948 | VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio); |
4949 | VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); |
4950 | VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); |
4951 | |
4952 | lruvec_del_folio(lruvec, folio); |
4953 | success = lru_gen_add_folio(lruvec, folio, reclaiming: false); |
4954 | VM_WARN_ON_ONCE(!success); |
4955 | |
4956 | if (!--remaining) |
4957 | return false; |
4958 | } |
4959 | } |
4960 | |
4961 | return true; |
4962 | } |
4963 | |
4964 | static bool drain_evictable(struct lruvec *lruvec) |
4965 | { |
4966 | int gen, type, zone; |
4967 | int remaining = MAX_LRU_BATCH; |
4968 | |
4969 | for_each_gen_type_zone(gen, type, zone) { |
4970 | struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; |
4971 | |
4972 | while (!list_empty(head)) { |
4973 | bool success; |
4974 | struct folio *folio = lru_to_folio(head); |
4975 | |
4976 | VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); |
4977 | VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); |
4978 | VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); |
4979 | VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); |
4980 | |
4981 | success = lru_gen_del_folio(lruvec, folio, false); |
4982 | VM_WARN_ON_ONCE(!success); |
4983 | lruvec_add_folio(lruvec, folio); |
4984 | |
4985 | if (!--remaining) |
4986 | return false; |
4987 | } |
4988 | } |
4989 | |
4990 | return true; |
4991 | } |
4992 | |
4993 | static void lru_gen_change_state(bool enabled) |
4994 | { |
4995 | static DEFINE_MUTEX(state_mutex); |
4996 | |
4997 | struct mem_cgroup *memcg; |
4998 | |
4999 | cgroup_lock(); |
5000 | cpus_read_lock(); |
5001 | get_online_mems(); |
5002 | mutex_lock(&state_mutex); |
5003 | |
5004 | if (enabled == lru_gen_enabled()) |
5005 | goto unlock; |
5006 | |
5007 | if (enabled) |
5008 | static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); |
5009 | else |
5010 | static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); |
5011 | |
5012 | memcg = mem_cgroup_iter(NULL, NULL, NULL); |
5013 | do { |
5014 | int nid; |
5015 | |
5016 | for_each_node(nid) { |
5017 | struct lruvec *lruvec = get_lruvec(memcg, nid); |
5018 | |
5019 | spin_lock_irq(lock: &lruvec->lru_lock); |
5020 | |
5021 | VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); |
5022 | VM_WARN_ON_ONCE(!state_is_valid(lruvec)); |
5023 | |
5024 | lruvec->lrugen.enabled = enabled; |
5025 | |
5026 | while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) { |
5027 | spin_unlock_irq(lock: &lruvec->lru_lock); |
5028 | cond_resched(); |
5029 | spin_lock_irq(lock: &lruvec->lru_lock); |
5030 | } |
5031 | |
5032 | spin_unlock_irq(lock: &lruvec->lru_lock); |
5033 | } |
5034 | |
5035 | cond_resched(); |
5036 | } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); |
5037 | unlock: |
5038 | mutex_unlock(lock: &state_mutex); |
5039 | put_online_mems(); |
5040 | cpus_read_unlock(); |
5041 | cgroup_unlock(); |
5042 | } |
5043 | |
5044 | /****************************************************************************** |
5045 | * sysfs interface |
5046 | ******************************************************************************/ |
5047 | |
5048 | static ssize_t min_ttl_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) |
5049 | { |
5050 | return sysfs_emit(buf, fmt: "%u\n" , jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl))); |
5051 | } |
5052 | |
5053 | /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ |
5054 | static ssize_t min_ttl_ms_store(struct kobject *kobj, struct kobj_attribute *attr, |
5055 | const char *buf, size_t len) |
5056 | { |
5057 | unsigned int msecs; |
5058 | |
5059 | if (kstrtouint(s: buf, base: 0, res: &msecs)) |
5060 | return -EINVAL; |
5061 | |
5062 | WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs)); |
5063 | |
5064 | return len; |
5065 | } |
5066 | |
5067 | static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR_RW(min_ttl_ms); |
5068 | |
5069 | static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) |
5070 | { |
5071 | unsigned int caps = 0; |
5072 | |
5073 | if (get_cap(LRU_GEN_CORE)) |
5074 | caps |= BIT(LRU_GEN_CORE); |
5075 | |
5076 | if (should_walk_mmu()) |
5077 | caps |= BIT(LRU_GEN_MM_WALK); |
5078 | |
5079 | if (should_clear_pmd_young()) |
5080 | caps |= BIT(LRU_GEN_NONLEAF_YOUNG); |
5081 | |
5082 | return sysfs_emit(buf, fmt: "0x%04x\n" , caps); |
5083 | } |
5084 | |
5085 | /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ |
5086 | static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, |
5087 | const char *buf, size_t len) |
5088 | { |
5089 | int i; |
5090 | unsigned int caps; |
5091 | |
5092 | if (tolower(*buf) == 'n') |
5093 | caps = 0; |
5094 | else if (tolower(*buf) == 'y') |
5095 | caps = -1; |
5096 | else if (kstrtouint(s: buf, base: 0, res: &caps)) |
5097 | return -EINVAL; |
5098 | |
5099 | for (i = 0; i < NR_LRU_GEN_CAPS; i++) { |
5100 | bool enabled = caps & BIT(i); |
5101 | |
5102 | if (i == LRU_GEN_CORE) |
5103 | lru_gen_change_state(enabled); |
5104 | else if (enabled) |
5105 | static_branch_enable(&lru_gen_caps[i]); |
5106 | else |
5107 | static_branch_disable(&lru_gen_caps[i]); |
5108 | } |
5109 | |
5110 | return len; |
5111 | } |
5112 | |
5113 | static struct kobj_attribute lru_gen_enabled_attr = __ATTR_RW(enabled); |
5114 | |
5115 | static struct attribute *lru_gen_attrs[] = { |
5116 | &lru_gen_min_ttl_attr.attr, |
5117 | &lru_gen_enabled_attr.attr, |
5118 | NULL |
5119 | }; |
5120 | |
5121 | static const struct attribute_group lru_gen_attr_group = { |
5122 | .name = "lru_gen" , |
5123 | .attrs = lru_gen_attrs, |
5124 | }; |
5125 | |
5126 | /****************************************************************************** |
5127 | * debugfs interface |
5128 | ******************************************************************************/ |
5129 | |
5130 | static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos) |
5131 | { |
5132 | struct mem_cgroup *memcg; |
5133 | loff_t nr_to_skip = *pos; |
5134 | |
5135 | m->private = kvmalloc(PATH_MAX, GFP_KERNEL); |
5136 | if (!m->private) |
5137 | return ERR_PTR(error: -ENOMEM); |
5138 | |
5139 | memcg = mem_cgroup_iter(NULL, NULL, NULL); |
5140 | do { |
5141 | int nid; |
5142 | |
5143 | for_each_node_state(nid, N_MEMORY) { |
5144 | if (!nr_to_skip--) |
5145 | return get_lruvec(memcg, nid); |
5146 | } |
5147 | } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); |
5148 | |
5149 | return NULL; |
5150 | } |
5151 | |
5152 | static void lru_gen_seq_stop(struct seq_file *m, void *v) |
5153 | { |
5154 | if (!IS_ERR_OR_NULL(ptr: v)) |
5155 | mem_cgroup_iter_break(NULL, lruvec_memcg(lruvec: v)); |
5156 | |
5157 | kvfree(addr: m->private); |
5158 | m->private = NULL; |
5159 | } |
5160 | |
5161 | static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos) |
5162 | { |
5163 | int nid = lruvec_pgdat(lruvec: v)->node_id; |
5164 | struct mem_cgroup *memcg = lruvec_memcg(lruvec: v); |
5165 | |
5166 | ++*pos; |
5167 | |
5168 | nid = next_memory_node(nid); |
5169 | if (nid == MAX_NUMNODES) { |
5170 | memcg = mem_cgroup_iter(NULL, memcg, NULL); |
5171 | if (!memcg) |
5172 | return NULL; |
5173 | |
5174 | nid = first_memory_node; |
5175 | } |
5176 | |
5177 | return get_lruvec(memcg, nid); |
5178 | } |
5179 | |
5180 | static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec, |
5181 | unsigned long max_seq, unsigned long *min_seq, |
5182 | unsigned long seq) |
5183 | { |
5184 | int i; |
5185 | int type, tier; |
5186 | int hist = lru_hist_from_seq(seq); |
5187 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
5188 | |
5189 | for (tier = 0; tier < MAX_NR_TIERS; tier++) { |
5190 | seq_printf(m, fmt: " %10d" , tier); |
5191 | for (type = 0; type < ANON_AND_FILE; type++) { |
5192 | const char *s = " " ; |
5193 | unsigned long n[3] = {}; |
5194 | |
5195 | if (seq == max_seq) { |
5196 | s = "RT " ; |
5197 | n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]); |
5198 | n[1] = READ_ONCE(lrugen->avg_total[type][tier]); |
5199 | } else if (seq == min_seq[type] || NR_HIST_GENS > 1) { |
5200 | s = "rep" ; |
5201 | n[0] = atomic_long_read(v: &lrugen->refaulted[hist][type][tier]); |
5202 | n[1] = atomic_long_read(v: &lrugen->evicted[hist][type][tier]); |
5203 | if (tier) |
5204 | n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]); |
5205 | } |
5206 | |
5207 | for (i = 0; i < 3; i++) |
5208 | seq_printf(m, fmt: " %10lu%c" , n[i], s[i]); |
5209 | } |
5210 | seq_putc(m, c: '\n'); |
5211 | } |
5212 | |
5213 | seq_puts(m, s: " " ); |
5214 | for (i = 0; i < NR_MM_STATS; i++) { |
5215 | const char *s = " " ; |
5216 | unsigned long n = 0; |
5217 | |
5218 | if (seq == max_seq && NR_HIST_GENS == 1) { |
5219 | s = "LOYNFA" ; |
5220 | n = READ_ONCE(lruvec->mm_state.stats[hist][i]); |
5221 | } else if (seq != max_seq && NR_HIST_GENS > 1) { |
5222 | s = "loynfa" ; |
5223 | n = READ_ONCE(lruvec->mm_state.stats[hist][i]); |
5224 | } |
5225 | |
5226 | seq_printf(m, fmt: " %10lu%c" , n, s[i]); |
5227 | } |
5228 | seq_putc(m, c: '\n'); |
5229 | } |
5230 | |
5231 | /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ |
5232 | static int lru_gen_seq_show(struct seq_file *m, void *v) |
5233 | { |
5234 | unsigned long seq; |
5235 | bool full = !debugfs_real_fops(filp: m->file)->write; |
5236 | struct lruvec *lruvec = v; |
5237 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
5238 | int nid = lruvec_pgdat(lruvec)->node_id; |
5239 | struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
5240 | DEFINE_MAX_SEQ(lruvec); |
5241 | DEFINE_MIN_SEQ(lruvec); |
5242 | |
5243 | if (nid == first_memory_node) { |
5244 | const char *path = memcg ? m->private : "" ; |
5245 | |
5246 | #ifdef CONFIG_MEMCG |
5247 | if (memcg) |
5248 | cgroup_path(cgrp: memcg->css.cgroup, buf: m->private, PATH_MAX); |
5249 | #endif |
5250 | seq_printf(m, fmt: "memcg %5hu %s\n" , mem_cgroup_id(memcg), path); |
5251 | } |
5252 | |
5253 | seq_printf(m, fmt: " node %5d\n" , nid); |
5254 | |
5255 | if (!full) |
5256 | seq = min_seq[LRU_GEN_ANON]; |
5257 | else if (max_seq >= MAX_NR_GENS) |
5258 | seq = max_seq - MAX_NR_GENS + 1; |
5259 | else |
5260 | seq = 0; |
5261 | |
5262 | for (; seq <= max_seq; seq++) { |
5263 | int type, zone; |
5264 | int gen = lru_gen_from_seq(seq); |
5265 | unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); |
5266 | |
5267 | seq_printf(m, fmt: " %10lu %10u" , seq, jiffies_to_msecs(j: jiffies - birth)); |
5268 | |
5269 | for (type = 0; type < ANON_AND_FILE; type++) { |
5270 | unsigned long size = 0; |
5271 | char mark = full && seq < min_seq[type] ? 'x' : ' '; |
5272 | |
5273 | for (zone = 0; zone < MAX_NR_ZONES; zone++) |
5274 | size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); |
5275 | |
5276 | seq_printf(m, fmt: " %10lu%c" , size, mark); |
5277 | } |
5278 | |
5279 | seq_putc(m, c: '\n'); |
5280 | |
5281 | if (full) |
5282 | lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq); |
5283 | } |
5284 | |
5285 | return 0; |
5286 | } |
5287 | |
5288 | static const struct seq_operations lru_gen_seq_ops = { |
5289 | .start = lru_gen_seq_start, |
5290 | .stop = lru_gen_seq_stop, |
5291 | .next = lru_gen_seq_next, |
5292 | .show = lru_gen_seq_show, |
5293 | }; |
5294 | |
5295 | static int run_aging(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, |
5296 | bool can_swap, bool force_scan) |
5297 | { |
5298 | DEFINE_MAX_SEQ(lruvec); |
5299 | DEFINE_MIN_SEQ(lruvec); |
5300 | |
5301 | if (seq < max_seq) |
5302 | return 0; |
5303 | |
5304 | if (seq > max_seq) |
5305 | return -EINVAL; |
5306 | |
5307 | if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq) |
5308 | return -ERANGE; |
5309 | |
5310 | try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, force_scan); |
5311 | |
5312 | return 0; |
5313 | } |
5314 | |
5315 | static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, |
5316 | int swappiness, unsigned long nr_to_reclaim) |
5317 | { |
5318 | DEFINE_MAX_SEQ(lruvec); |
5319 | |
5320 | if (seq + MIN_NR_GENS > max_seq) |
5321 | return -EINVAL; |
5322 | |
5323 | sc->nr_reclaimed = 0; |
5324 | |
5325 | while (!signal_pending(current)) { |
5326 | DEFINE_MIN_SEQ(lruvec); |
5327 | |
5328 | if (seq < min_seq[!swappiness]) |
5329 | return 0; |
5330 | |
5331 | if (sc->nr_reclaimed >= nr_to_reclaim) |
5332 | return 0; |
5333 | |
5334 | if (!evict_folios(lruvec, sc, swappiness)) |
5335 | return 0; |
5336 | |
5337 | cond_resched(); |
5338 | } |
5339 | |
5340 | return -EINTR; |
5341 | } |
5342 | |
5343 | static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq, |
5344 | struct scan_control *sc, int swappiness, unsigned long opt) |
5345 | { |
5346 | struct lruvec *lruvec; |
5347 | int err = -EINVAL; |
5348 | struct mem_cgroup *memcg = NULL; |
5349 | |
5350 | if (nid < 0 || nid >= MAX_NUMNODES || !node_state(node: nid, state: N_MEMORY)) |
5351 | return -EINVAL; |
5352 | |
5353 | if (!mem_cgroup_disabled()) { |
5354 | rcu_read_lock(); |
5355 | |
5356 | memcg = mem_cgroup_from_id(id: memcg_id); |
5357 | if (!mem_cgroup_tryget(memcg)) |
5358 | memcg = NULL; |
5359 | |
5360 | rcu_read_unlock(); |
5361 | |
5362 | if (!memcg) |
5363 | return -EINVAL; |
5364 | } |
5365 | |
5366 | if (memcg_id != mem_cgroup_id(memcg)) |
5367 | goto done; |
5368 | |
5369 | lruvec = get_lruvec(memcg, nid); |
5370 | |
5371 | if (swappiness < 0) |
5372 | swappiness = get_swappiness(lruvec, sc); |
5373 | else if (swappiness > 200) |
5374 | goto done; |
5375 | |
5376 | switch (cmd) { |
5377 | case '+': |
5378 | err = run_aging(lruvec, seq, sc, can_swap: swappiness, force_scan: opt); |
5379 | break; |
5380 | case '-': |
5381 | err = run_eviction(lruvec, seq, sc, swappiness, nr_to_reclaim: opt); |
5382 | break; |
5383 | } |
5384 | done: |
5385 | mem_cgroup_put(memcg); |
5386 | |
5387 | return err; |
5388 | } |
5389 | |
5390 | /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ |
5391 | static ssize_t lru_gen_seq_write(struct file *file, const char __user *src, |
5392 | size_t len, loff_t *pos) |
5393 | { |
5394 | void *buf; |
5395 | char *cur, *next; |
5396 | unsigned int flags; |
5397 | struct blk_plug plug; |
5398 | int err = -EINVAL; |
5399 | struct scan_control sc = { |
5400 | .may_writepage = true, |
5401 | .may_unmap = true, |
5402 | .may_swap = true, |
5403 | .reclaim_idx = MAX_NR_ZONES - 1, |
5404 | .gfp_mask = GFP_KERNEL, |
5405 | }; |
5406 | |
5407 | buf = kvmalloc(size: len + 1, GFP_KERNEL); |
5408 | if (!buf) |
5409 | return -ENOMEM; |
5410 | |
5411 | if (copy_from_user(to: buf, from: src, n: len)) { |
5412 | kvfree(addr: buf); |
5413 | return -EFAULT; |
5414 | } |
5415 | |
5416 | set_task_reclaim_state(current, rs: &sc.reclaim_state); |
5417 | flags = memalloc_noreclaim_save(); |
5418 | blk_start_plug(&plug); |
5419 | if (!set_mm_walk(NULL, force_alloc: true)) { |
5420 | err = -ENOMEM; |
5421 | goto done; |
5422 | } |
5423 | |
5424 | next = buf; |
5425 | next[len] = '\0'; |
5426 | |
5427 | while ((cur = strsep(&next, ",;\n" ))) { |
5428 | int n; |
5429 | int end; |
5430 | char cmd; |
5431 | unsigned int memcg_id; |
5432 | unsigned int nid; |
5433 | unsigned long seq; |
5434 | unsigned int swappiness = -1; |
5435 | unsigned long opt = -1; |
5436 | |
5437 | cur = skip_spaces(cur); |
5438 | if (!*cur) |
5439 | continue; |
5440 | |
5441 | n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n" , &cmd, &memcg_id, &nid, |
5442 | &seq, &end, &swappiness, &end, &opt, &end); |
5443 | if (n < 4 || cur[end]) { |
5444 | err = -EINVAL; |
5445 | break; |
5446 | } |
5447 | |
5448 | err = run_cmd(cmd, memcg_id, nid, seq, sc: &sc, swappiness, opt); |
5449 | if (err) |
5450 | break; |
5451 | } |
5452 | done: |
5453 | clear_mm_walk(); |
5454 | blk_finish_plug(&plug); |
5455 | memalloc_noreclaim_restore(flags); |
5456 | set_task_reclaim_state(current, NULL); |
5457 | |
5458 | kvfree(addr: buf); |
5459 | |
5460 | return err ? : len; |
5461 | } |
5462 | |
5463 | static int lru_gen_seq_open(struct inode *inode, struct file *file) |
5464 | { |
5465 | return seq_open(file, &lru_gen_seq_ops); |
5466 | } |
5467 | |
5468 | static const struct file_operations lru_gen_rw_fops = { |
5469 | .open = lru_gen_seq_open, |
5470 | .read = seq_read, |
5471 | .write = lru_gen_seq_write, |
5472 | .llseek = seq_lseek, |
5473 | .release = seq_release, |
5474 | }; |
5475 | |
5476 | static const struct file_operations lru_gen_ro_fops = { |
5477 | .open = lru_gen_seq_open, |
5478 | .read = seq_read, |
5479 | .llseek = seq_lseek, |
5480 | .release = seq_release, |
5481 | }; |
5482 | |
5483 | /****************************************************************************** |
5484 | * initialization |
5485 | ******************************************************************************/ |
5486 | |
5487 | void lru_gen_init_lruvec(struct lruvec *lruvec) |
5488 | { |
5489 | int i; |
5490 | int gen, type, zone; |
5491 | struct lru_gen_folio *lrugen = &lruvec->lrugen; |
5492 | |
5493 | lrugen->max_seq = MIN_NR_GENS + 1; |
5494 | lrugen->enabled = lru_gen_enabled(); |
5495 | |
5496 | for (i = 0; i <= MIN_NR_GENS + 1; i++) |
5497 | lrugen->timestamps[i] = jiffies; |
5498 | |
5499 | for_each_gen_type_zone(gen, type, zone) |
5500 | INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); |
5501 | |
5502 | lruvec->mm_state.seq = MIN_NR_GENS; |
5503 | } |
5504 | |
5505 | #ifdef CONFIG_MEMCG |
5506 | |
5507 | void lru_gen_init_pgdat(struct pglist_data *pgdat) |
5508 | { |
5509 | int i, j; |
5510 | |
5511 | spin_lock_init(&pgdat->memcg_lru.lock); |
5512 | |
5513 | for (i = 0; i < MEMCG_NR_GENS; i++) { |
5514 | for (j = 0; j < MEMCG_NR_BINS; j++) |
5515 | INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); |
5516 | } |
5517 | } |
5518 | |
5519 | void lru_gen_init_memcg(struct mem_cgroup *memcg) |
5520 | { |
5521 | INIT_LIST_HEAD(list: &memcg->mm_list.fifo); |
5522 | spin_lock_init(&memcg->mm_list.lock); |
5523 | } |
5524 | |
5525 | void lru_gen_exit_memcg(struct mem_cgroup *memcg) |
5526 | { |
5527 | int i; |
5528 | int nid; |
5529 | |
5530 | VM_WARN_ON_ONCE(!list_empty(&memcg->mm_list.fifo)); |
5531 | |
5532 | for_each_node(nid) { |
5533 | struct lruvec *lruvec = get_lruvec(memcg, nid); |
5534 | |
5535 | VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, |
5536 | sizeof(lruvec->lrugen.nr_pages))); |
5537 | |
5538 | lruvec->lrugen.list.next = LIST_POISON1; |
5539 | |
5540 | for (i = 0; i < NR_BLOOM_FILTERS; i++) { |
5541 | bitmap_free(bitmap: lruvec->mm_state.filters[i]); |
5542 | lruvec->mm_state.filters[i] = NULL; |
5543 | } |
5544 | } |
5545 | } |
5546 | |
5547 | #endif /* CONFIG_MEMCG */ |
5548 | |
5549 | static int __init init_lru_gen(void) |
5550 | { |
5551 | BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS); |
5552 | BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS); |
5553 | |
5554 | if (sysfs_create_group(kobj: mm_kobj, grp: &lru_gen_attr_group)) |
5555 | pr_err("lru_gen: failed to create sysfs group\n" ); |
5556 | |
5557 | debugfs_create_file(name: "lru_gen" , mode: 0644, NULL, NULL, fops: &lru_gen_rw_fops); |
5558 | debugfs_create_file(name: "lru_gen_full" , mode: 0444, NULL, NULL, fops: &lru_gen_ro_fops); |
5559 | |
5560 | return 0; |
5561 | }; |
5562 | late_initcall(init_lru_gen); |
5563 | |
5564 | #else /* !CONFIG_LRU_GEN */ |
5565 | |
5566 | static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) |
5567 | { |
5568 | } |
5569 | |
5570 | static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) |
5571 | { |
5572 | } |
5573 | |
5574 | static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) |
5575 | { |
5576 | } |
5577 | |
5578 | #endif /* CONFIG_LRU_GEN */ |
5579 | |
5580 | static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) |
5581 | { |
5582 | unsigned long nr[NR_LRU_LISTS]; |
5583 | unsigned long targets[NR_LRU_LISTS]; |
5584 | unsigned long nr_to_scan; |
5585 | enum lru_list lru; |
5586 | unsigned long nr_reclaimed = 0; |
5587 | unsigned long nr_to_reclaim = sc->nr_to_reclaim; |
5588 | bool proportional_reclaim; |
5589 | struct blk_plug plug; |
5590 | |
5591 | if (lru_gen_enabled() && !root_reclaim(sc)) { |
5592 | lru_gen_shrink_lruvec(lruvec, sc); |
5593 | return; |
5594 | } |
5595 | |
5596 | get_scan_count(lruvec, sc, nr); |
5597 | |
5598 | /* Record the original scan target for proportional adjustments later */ |
5599 | memcpy(targets, nr, sizeof(nr)); |
5600 | |
5601 | /* |
5602 | * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal |
5603 | * event that can occur when there is little memory pressure e.g. |
5604 | * multiple streaming readers/writers. Hence, we do not abort scanning |
5605 | * when the requested number of pages are reclaimed when scanning at |
5606 | * DEF_PRIORITY on the assumption that the fact we are direct |
5607 | * reclaiming implies that kswapd is not keeping up and it is best to |
5608 | * do a batch of work at once. For memcg reclaim one check is made to |
5609 | * abort proportional reclaim if either the file or anon lru has already |
5610 | * dropped to zero at the first pass. |
5611 | */ |
5612 | proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() && |
5613 | sc->priority == DEF_PRIORITY); |
5614 | |
5615 | blk_start_plug(&plug); |
5616 | while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || |
5617 | nr[LRU_INACTIVE_FILE]) { |
5618 | unsigned long nr_anon, nr_file, percentage; |
5619 | unsigned long nr_scanned; |
5620 | |
5621 | for_each_evictable_lru(lru) { |
5622 | if (nr[lru]) { |
5623 | nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); |
5624 | nr[lru] -= nr_to_scan; |
5625 | |
5626 | nr_reclaimed += shrink_list(lru, nr_to_scan, |
5627 | lruvec, sc); |
5628 | } |
5629 | } |
5630 | |
5631 | cond_resched(); |
5632 | |
5633 | if (nr_reclaimed < nr_to_reclaim || proportional_reclaim) |
5634 | continue; |
5635 | |
5636 | /* |
5637 | * For kswapd and memcg, reclaim at least the number of pages |
5638 | * requested. Ensure that the anon and file LRUs are scanned |
5639 | * proportionally what was requested by get_scan_count(). We |
5640 | * stop reclaiming one LRU and reduce the amount scanning |
5641 | * proportional to the original scan target. |
5642 | */ |
5643 | nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; |
5644 | nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; |
5645 | |
5646 | /* |
5647 | * It's just vindictive to attack the larger once the smaller |
5648 | * has gone to zero. And given the way we stop scanning the |
5649 | * smaller below, this makes sure that we only make one nudge |
5650 | * towards proportionality once we've got nr_to_reclaim. |
5651 | */ |
5652 | if (!nr_file || !nr_anon) |
5653 | break; |
5654 | |
5655 | if (nr_file > nr_anon) { |
5656 | unsigned long scan_target = targets[LRU_INACTIVE_ANON] + |
5657 | targets[LRU_ACTIVE_ANON] + 1; |
5658 | lru = LRU_BASE; |
5659 | percentage = nr_anon * 100 / scan_target; |
5660 | } else { |
5661 | unsigned long scan_target = targets[LRU_INACTIVE_FILE] + |
5662 | targets[LRU_ACTIVE_FILE] + 1; |
5663 | lru = LRU_FILE; |
5664 | percentage = nr_file * 100 / scan_target; |
5665 | } |
5666 | |
5667 | /* Stop scanning the smaller of the LRU */ |
5668 | nr[lru] = 0; |
5669 | nr[lru + LRU_ACTIVE] = 0; |
5670 | |
5671 | /* |
5672 | * Recalculate the other LRU scan count based on its original |
5673 | * scan target and the percentage scanning already complete |
5674 | */ |
5675 | lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; |
5676 | nr_scanned = targets[lru] - nr[lru]; |
5677 | nr[lru] = targets[lru] * (100 - percentage) / 100; |
5678 | nr[lru] -= min(nr[lru], nr_scanned); |
5679 | |
5680 | lru += LRU_ACTIVE; |
5681 | nr_scanned = targets[lru] - nr[lru]; |
5682 | nr[lru] = targets[lru] * (100 - percentage) / 100; |
5683 | nr[lru] -= min(nr[lru], nr_scanned); |
5684 | } |
5685 | blk_finish_plug(&plug); |
5686 | sc->nr_reclaimed += nr_reclaimed; |
5687 | |
5688 | /* |
5689 | * Even if we did not try to evict anon pages at all, we want to |
5690 | * rebalance the anon lru active/inactive ratio. |
5691 | */ |
5692 | if (can_age_anon_pages(pgdat: lruvec_pgdat(lruvec), sc) && |
5693 | inactive_is_low(lruvec, inactive_lru: LRU_INACTIVE_ANON)) |
5694 | shrink_active_list(SWAP_CLUSTER_MAX, lruvec, |
5695 | sc, lru: LRU_ACTIVE_ANON); |
5696 | } |
5697 | |
5698 | /* Use reclaim/compaction for costly allocs or under memory pressure */ |
5699 | static bool in_reclaim_compaction(struct scan_control *sc) |
5700 | { |
5701 | if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && |
5702 | (sc->order > PAGE_ALLOC_COSTLY_ORDER || |
5703 | sc->priority < DEF_PRIORITY - 2)) |
5704 | return true; |
5705 | |
5706 | return false; |
5707 | } |
5708 | |
5709 | /* |
5710 | * Reclaim/compaction is used for high-order allocation requests. It reclaims |
5711 | * order-0 pages before compacting the zone. should_continue_reclaim() returns |
5712 | * true if more pages should be reclaimed such that when the page allocator |
5713 | * calls try_to_compact_pages() that it will have enough free pages to succeed. |
5714 | * It will give up earlier than that if there is difficulty reclaiming pages. |
5715 | */ |
5716 | static inline bool should_continue_reclaim(struct pglist_data *pgdat, |
5717 | unsigned long nr_reclaimed, |
5718 | struct scan_control *sc) |
5719 | { |
5720 | unsigned long pages_for_compaction; |
5721 | unsigned long inactive_lru_pages; |
5722 | int z; |
5723 | |
5724 | /* If not in reclaim/compaction mode, stop */ |
5725 | if (!in_reclaim_compaction(sc)) |
5726 | return false; |
5727 | |
5728 | /* |
5729 | * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX |
5730 | * number of pages that were scanned. This will return to the caller |
5731 | * with the risk reclaim/compaction and the resulting allocation attempt |
5732 | * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL |
5733 | * allocations through requiring that the full LRU list has been scanned |
5734 | * first, by assuming that zero delta of sc->nr_scanned means full LRU |
5735 | * scan, but that approximation was wrong, and there were corner cases |
5736 | * where always a non-zero amount of pages were scanned. |
5737 | */ |
5738 | if (!nr_reclaimed) |
5739 | return false; |
5740 | |
5741 | /* If compaction would go ahead or the allocation would succeed, stop */ |
5742 | for (z = 0; z <= sc->reclaim_idx; z++) { |
5743 | struct zone *zone = &pgdat->node_zones[z]; |
5744 | if (!managed_zone(zone)) |
5745 | continue; |
5746 | |
5747 | /* Allocation can already succeed, nothing to do */ |
5748 | if (zone_watermark_ok(z: zone, order: sc->order, min_wmark_pages(zone), |
5749 | highest_zoneidx: sc->reclaim_idx, alloc_flags: 0)) |
5750 | return false; |
5751 | |
5752 | if (compaction_suitable(zone, order: sc->order, highest_zoneidx: sc->reclaim_idx)) |
5753 | return false; |
5754 | } |
5755 | |
5756 | /* |
5757 | * If we have not reclaimed enough pages for compaction and the |
5758 | * inactive lists are large enough, continue reclaiming |
5759 | */ |
5760 | pages_for_compaction = compact_gap(order: sc->order); |
5761 | inactive_lru_pages = node_page_state(pgdat, item: NR_INACTIVE_FILE); |
5762 | if (can_reclaim_anon_pages(NULL, nid: pgdat->node_id, sc)) |
5763 | inactive_lru_pages += node_page_state(pgdat, item: NR_INACTIVE_ANON); |
5764 | |
5765 | return inactive_lru_pages > pages_for_compaction; |
5766 | } |
5767 | |
5768 | static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) |
5769 | { |
5770 | struct mem_cgroup *target_memcg = sc->target_mem_cgroup; |
5771 | struct mem_cgroup *memcg; |
5772 | |
5773 | memcg = mem_cgroup_iter(target_memcg, NULL, NULL); |
5774 | do { |
5775 | struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); |
5776 | unsigned long reclaimed; |
5777 | unsigned long scanned; |
5778 | |
5779 | /* |
5780 | * This loop can become CPU-bound when target memcgs |
5781 | * aren't eligible for reclaim - either because they |
5782 | * don't have any reclaimable pages, or because their |
5783 | * memory is explicitly protected. Avoid soft lockups. |
5784 | */ |
5785 | cond_resched(); |
5786 | |
5787 | mem_cgroup_calculate_protection(root: target_memcg, memcg); |
5788 | |
5789 | if (mem_cgroup_below_min(target: target_memcg, memcg)) { |
5790 | /* |
5791 | * Hard protection. |
5792 | * If there is no reclaimable memory, OOM. |
5793 | */ |
5794 | continue; |
5795 | } else if (mem_cgroup_below_low(target: target_memcg, memcg)) { |
5796 | /* |
5797 | * Soft protection. |
5798 | * Respect the protection only as long as |
5799 | * there is an unprotected supply |
5800 | * of reclaimable memory from other cgroups. |
5801 | */ |
5802 | if (!sc->memcg_low_reclaim) { |
5803 | sc->memcg_low_skipped = 1; |
5804 | continue; |
5805 | } |
5806 | memcg_memory_event(memcg, event: MEMCG_LOW); |
5807 | } |
5808 | |
5809 | reclaimed = sc->nr_reclaimed; |
5810 | scanned = sc->nr_scanned; |
5811 | |
5812 | shrink_lruvec(lruvec, sc); |
5813 | |
5814 | shrink_slab(gfp_mask: sc->gfp_mask, nid: pgdat->node_id, memcg, |
5815 | priority: sc->priority); |
5816 | |
5817 | /* Record the group's reclaim efficiency */ |
5818 | if (!sc->proactive) |
5819 | vmpressure(gfp: sc->gfp_mask, memcg, tree: false, |
5820 | scanned: sc->nr_scanned - scanned, |
5821 | reclaimed: sc->nr_reclaimed - reclaimed); |
5822 | |
5823 | } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL))); |
5824 | } |
5825 | |
5826 | static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) |
5827 | { |
5828 | unsigned long nr_reclaimed, nr_scanned, nr_node_reclaimed; |
5829 | struct lruvec *target_lruvec; |
5830 | bool reclaimable = false; |
5831 | |
5832 | if (lru_gen_enabled() && root_reclaim(sc)) { |
5833 | lru_gen_shrink_node(pgdat, sc); |
5834 | return; |
5835 | } |
5836 | |
5837 | target_lruvec = mem_cgroup_lruvec(memcg: sc->target_mem_cgroup, pgdat); |
5838 | |
5839 | again: |
5840 | memset(&sc->nr, 0, sizeof(sc->nr)); |
5841 | |
5842 | nr_reclaimed = sc->nr_reclaimed; |
5843 | nr_scanned = sc->nr_scanned; |
5844 | |
5845 | prepare_scan_control(pgdat, sc); |
5846 | |
5847 | shrink_node_memcgs(pgdat, sc); |
5848 | |
5849 | flush_reclaim_state(sc); |
5850 | |
5851 | nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; |
5852 | |
5853 | /* Record the subtree's reclaim efficiency */ |
5854 | if (!sc->proactive) |
5855 | vmpressure(gfp: sc->gfp_mask, memcg: sc->target_mem_cgroup, tree: true, |
5856 | scanned: sc->nr_scanned - nr_scanned, reclaimed: nr_node_reclaimed); |
5857 | |
5858 | if (nr_node_reclaimed) |
5859 | reclaimable = true; |
5860 | |
5861 | if (current_is_kswapd()) { |
5862 | /* |
5863 | * If reclaim is isolating dirty pages under writeback, |
5864 | * it implies that the long-lived page allocation rate |
5865 | * is exceeding the page laundering rate. Either the |
5866 | * global limits are not being effective at throttling |
5867 | * processes due to the page distribution throughout |
5868 | * zones or there is heavy usage of a slow backing |
5869 | * device. The only option is to throttle from reclaim |
5870 | * context which is not ideal as there is no guarantee |
5871 | * the dirtying process is throttled in the same way |
5872 | * balance_dirty_pages() manages. |
5873 | * |
5874 | * Once a node is flagged PGDAT_WRITEBACK, kswapd will |
5875 | * count the number of pages under pages flagged for |
5876 | * immediate reclaim and stall if any are encountered |
5877 | * in the nr_immediate check below. |
5878 | */ |
5879 | if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) |
5880 | set_bit(nr: PGDAT_WRITEBACK, addr: &pgdat->flags); |
5881 | |
5882 | /* Allow kswapd to start writing pages during reclaim.*/ |
5883 | if (sc->nr.unqueued_dirty == sc->nr.file_taken) |
5884 | set_bit(nr: PGDAT_DIRTY, addr: &pgdat->flags); |
5885 | |
5886 | /* |
5887 | * If kswapd scans pages marked for immediate |
5888 | * reclaim and under writeback (nr_immediate), it |
5889 | * implies that pages are cycling through the LRU |
5890 | * faster than they are written so forcibly stall |
5891 | * until some pages complete writeback. |
5892 | */ |
5893 | if (sc->nr.immediate) |
5894 | reclaim_throttle(pgdat, reason: VMSCAN_THROTTLE_WRITEBACK); |
5895 | } |
5896 | |
5897 | /* |
5898 | * Tag a node/memcg as congested if all the dirty pages were marked |
5899 | * for writeback and immediate reclaim (counted in nr.congested). |
5900 | * |
5901 | * Legacy memcg will stall in page writeback so avoid forcibly |
5902 | * stalling in reclaim_throttle(). |
5903 | */ |
5904 | if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { |
5905 | if (cgroup_reclaim(sc) && writeback_throttling_sane(sc)) |
5906 | set_bit(nr: LRUVEC_CGROUP_CONGESTED, addr: &target_lruvec->flags); |
5907 | |
5908 | if (current_is_kswapd()) |
5909 | set_bit(nr: LRUVEC_NODE_CONGESTED, addr: &target_lruvec->flags); |
5910 | } |
5911 | |
5912 | /* |
5913 | * Stall direct reclaim for IO completions if the lruvec is |
5914 | * node is congested. Allow kswapd to continue until it |
5915 | * starts encountering unqueued dirty pages or cycling through |
5916 | * the LRU too quickly. |
5917 | */ |
5918 | if (!current_is_kswapd() && current_may_throttle() && |
5919 | !sc->hibernation_mode && |
5920 | (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || |
5921 | test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) |
5922 | reclaim_throttle(pgdat, reason: VMSCAN_THROTTLE_CONGESTED); |
5923 | |
5924 | if (should_continue_reclaim(pgdat, nr_reclaimed: nr_node_reclaimed, sc)) |
5925 | goto again; |
5926 | |
5927 | /* |
5928 | * Kswapd gives up on balancing particular nodes after too |
5929 | * many failures to reclaim anything from them and goes to |
5930 | * sleep. On reclaim progress, reset the failure counter. A |
5931 | * successful direct reclaim run will revive a dormant kswapd. |
5932 | */ |
5933 | if (reclaimable) |
5934 | pgdat->kswapd_failures = 0; |
5935 | } |
5936 | |
5937 | /* |
5938 | * Returns true if compaction should go ahead for a costly-order request, or |
5939 | * the allocation would already succeed without compaction. Return false if we |
5940 | * should reclaim first. |
5941 | */ |
5942 | static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) |
5943 | { |
5944 | unsigned long watermark; |
5945 | |
5946 | /* Allocation can already succeed, nothing to do */ |
5947 | if (zone_watermark_ok(z: zone, order: sc->order, min_wmark_pages(zone), |
5948 | highest_zoneidx: sc->reclaim_idx, alloc_flags: 0)) |
5949 | return true; |
5950 | |
5951 | /* Compaction cannot yet proceed. Do reclaim. */ |
5952 | if (!compaction_suitable(zone, order: sc->order, highest_zoneidx: sc->reclaim_idx)) |
5953 | return false; |
5954 | |
5955 | /* |
5956 | * Compaction is already possible, but it takes time to run and there |
5957 | * are potentially other callers using the pages just freed. So proceed |
5958 | * with reclaim to make a buffer of free pages available to give |
5959 | * compaction a reasonable chance of completing and allocating the page. |
5960 | * Note that we won't actually reclaim the whole buffer in one attempt |
5961 | * as the target watermark in should_continue_reclaim() is lower. But if |
5962 | * we are already above the high+gap watermark, don't reclaim at all. |
5963 | */ |
5964 | watermark = high_wmark_pages(zone) + compact_gap(order: sc->order); |
5965 | |
5966 | return zone_watermark_ok_safe(z: zone, order: 0, mark: watermark, highest_zoneidx: sc->reclaim_idx); |
5967 | } |
5968 | |
5969 | static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc) |
5970 | { |
5971 | /* |
5972 | * If reclaim is making progress greater than 12% efficiency then |
5973 | * wake all the NOPROGRESS throttled tasks. |
5974 | */ |
5975 | if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) { |
5976 | wait_queue_head_t *wqh; |
5977 | |
5978 | wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; |
5979 | if (waitqueue_active(wq_head: wqh)) |
5980 | wake_up(wqh); |
5981 | |
5982 | return; |
5983 | } |
5984 | |
5985 | /* |
5986 | * Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will |
5987 | * throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages |
5988 | * under writeback and marked for immediate reclaim at the tail of the |
5989 | * LRU. |
5990 | */ |
5991 | if (current_is_kswapd() || cgroup_reclaim(sc)) |
5992 | return; |
5993 | |
5994 | /* Throttle if making no progress at high prioities. */ |
5995 | if (sc->priority == 1 && !sc->nr_reclaimed) |
5996 | reclaim_throttle(pgdat, reason: VMSCAN_THROTTLE_NOPROGRESS); |
5997 | } |
5998 | |
5999 | /* |
6000 | * This is the direct reclaim path, for page-allocating processes. We only |
6001 | * try to reclaim pages from zones which will satisfy the caller's allocation |
6002 | * request. |
6003 | * |
6004 | * If a zone is deemed to be full of pinned pages then just give it a light |
6005 | * scan then give up on it. |
6006 | */ |
6007 | static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) |
6008 | { |
6009 | struct zoneref *z; |
6010 | struct zone *zone; |
6011 | unsigned long nr_soft_reclaimed; |
6012 | unsigned long nr_soft_scanned; |
6013 | gfp_t orig_mask; |
6014 | pg_data_t *last_pgdat = NULL; |
6015 | pg_data_t *first_pgdat = NULL; |
6016 | |
6017 | /* |
6018 | * If the number of buffer_heads in the machine exceeds the maximum |
6019 | * allowed level, force direct reclaim to scan the highmem zone as |
6020 | * highmem pages could be pinning lowmem pages storing buffer_heads |
6021 | */ |
6022 | orig_mask = sc->gfp_mask; |
6023 | if (buffer_heads_over_limit) { |
6024 | sc->gfp_mask |= __GFP_HIGHMEM; |
6025 | sc->reclaim_idx = gfp_zone(flags: sc->gfp_mask); |
6026 | } |
6027 | |
6028 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
6029 | sc->reclaim_idx, sc->nodemask) { |
6030 | /* |
6031 | * Take care memory controller reclaiming has small influence |
6032 | * to global LRU. |
6033 | */ |
6034 | if (!cgroup_reclaim(sc)) { |
6035 | if (!cpuset_zone_allowed(z: zone, |
6036 | GFP_KERNEL | __GFP_HARDWALL)) |
6037 | continue; |
6038 | |
6039 | /* |
6040 | * If we already have plenty of memory free for |
6041 | * compaction in this zone, don't free any more. |
6042 | * Even though compaction is invoked for any |
6043 | * non-zero order, only frequent costly order |
6044 | * reclamation is disruptive enough to become a |
6045 | * noticeable problem, like transparent huge |
6046 | * page allocations. |
6047 | */ |
6048 | if (IS_ENABLED(CONFIG_COMPACTION) && |
6049 | sc->order > PAGE_ALLOC_COSTLY_ORDER && |
6050 | compaction_ready(zone, sc)) { |
6051 | sc->compaction_ready = true; |
6052 | continue; |
6053 | } |
6054 | |
6055 | /* |
6056 | * Shrink each node in the zonelist once. If the |
6057 | * zonelist is ordered by zone (not the default) then a |
6058 | * node may be shrunk multiple times but in that case |
6059 | * the user prefers lower zones being preserved. |
6060 | */ |
6061 | if (zone->zone_pgdat == last_pgdat) |
6062 | continue; |
6063 | |
6064 | /* |
6065 | * This steals pages from memory cgroups over softlimit |
6066 | * and returns the number of reclaimed pages and |
6067 | * scanned pages. This works for global memory pressure |
6068 | * and balancing, not for a memcg's limit. |
6069 | */ |
6070 | nr_soft_scanned = 0; |
6071 | nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat: zone->zone_pgdat, |
6072 | order: sc->order, gfp_mask: sc->gfp_mask, |
6073 | total_scanned: &nr_soft_scanned); |
6074 | sc->nr_reclaimed += nr_soft_reclaimed; |
6075 | sc->nr_scanned += nr_soft_scanned; |
6076 | /* need some check for avoid more shrink_zone() */ |
6077 | } |
6078 | |
6079 | if (!first_pgdat) |
6080 | first_pgdat = zone->zone_pgdat; |
6081 | |
6082 | /* See comment about same check for global reclaim above */ |
6083 | if (zone->zone_pgdat == last_pgdat) |
6084 | continue; |
6085 | last_pgdat = zone->zone_pgdat; |
6086 | shrink_node(pgdat: zone->zone_pgdat, sc); |
6087 | } |
6088 | |
6089 | if (first_pgdat) |
6090 | consider_reclaim_throttle(pgdat: first_pgdat, sc); |
6091 | |
6092 | /* |
6093 | * Restore to original mask to avoid the impact on the caller if we |
6094 | * promoted it to __GFP_HIGHMEM. |
6095 | */ |
6096 | sc->gfp_mask = orig_mask; |
6097 | } |
6098 | |
6099 | static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) |
6100 | { |
6101 | struct lruvec *target_lruvec; |
6102 | unsigned long refaults; |
6103 | |
6104 | if (lru_gen_enabled()) |
6105 | return; |
6106 | |
6107 | target_lruvec = mem_cgroup_lruvec(memcg: target_memcg, pgdat); |
6108 | refaults = lruvec_page_state(lruvec: target_lruvec, idx: WORKINGSET_ACTIVATE_ANON); |
6109 | target_lruvec->refaults[WORKINGSET_ANON] = refaults; |
6110 | refaults = lruvec_page_state(lruvec: target_lruvec, idx: WORKINGSET_ACTIVATE_FILE); |
6111 | target_lruvec->refaults[WORKINGSET_FILE] = refaults; |
6112 | } |
6113 | |
6114 | /* |
6115 | * This is the main entry point to direct page reclaim. |
6116 | * |
6117 | * If a full scan of the inactive list fails to free enough memory then we |
6118 | * are "out of memory" and something needs to be killed. |
6119 | * |
6120 | * If the caller is !__GFP_FS then the probability of a failure is reasonably |
6121 | * high - the zone may be full of dirty or under-writeback pages, which this |
6122 | * caller can't do much about. We kick the writeback threads and take explicit |
6123 | * naps in the hope that some of these pages can be written. But if the |
6124 | * allocating task holds filesystem locks which prevent writeout this might not |
6125 | * work, and the allocation attempt will fail. |
6126 | * |
6127 | * returns: 0, if no pages reclaimed |
6128 | * else, the number of pages reclaimed |
6129 | */ |
6130 | static unsigned long do_try_to_free_pages(struct zonelist *zonelist, |
6131 | struct scan_control *sc) |
6132 | { |
6133 | int initial_priority = sc->priority; |
6134 | pg_data_t *last_pgdat; |
6135 | struct zoneref *z; |
6136 | struct zone *zone; |
6137 | retry: |
6138 | delayacct_freepages_start(); |
6139 | |
6140 | if (!cgroup_reclaim(sc)) |
6141 | __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); |
6142 | |
6143 | do { |
6144 | if (!sc->proactive) |
6145 | vmpressure_prio(gfp: sc->gfp_mask, memcg: sc->target_mem_cgroup, |
6146 | prio: sc->priority); |
6147 | sc->nr_scanned = 0; |
6148 | shrink_zones(zonelist, sc); |
6149 | |
6150 | if (sc->nr_reclaimed >= sc->nr_to_reclaim) |
6151 | break; |
6152 | |
6153 | if (sc->compaction_ready) |
6154 | break; |
6155 | |
6156 | /* |
6157 | * If we're getting trouble reclaiming, start doing |
6158 | * writepage even in laptop mode. |
6159 | */ |
6160 | if (sc->priority < DEF_PRIORITY - 2) |
6161 | sc->may_writepage = 1; |
6162 | } while (--sc->priority >= 0); |
6163 | |
6164 | last_pgdat = NULL; |
6165 | for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, |
6166 | sc->nodemask) { |
6167 | if (zone->zone_pgdat == last_pgdat) |
6168 | continue; |
6169 | last_pgdat = zone->zone_pgdat; |
6170 | |
6171 | snapshot_refaults(target_memcg: sc->target_mem_cgroup, pgdat: zone->zone_pgdat); |
6172 | |
6173 | if (cgroup_reclaim(sc)) { |
6174 | struct lruvec *lruvec; |
6175 | |
6176 | lruvec = mem_cgroup_lruvec(memcg: sc->target_mem_cgroup, |
6177 | pgdat: zone->zone_pgdat); |
6178 | clear_bit(nr: LRUVEC_CGROUP_CONGESTED, addr: &lruvec->flags); |
6179 | } |
6180 | } |
6181 | |
6182 | delayacct_freepages_end(); |
6183 | |
6184 | if (sc->nr_reclaimed) |
6185 | return sc->nr_reclaimed; |
6186 | |
6187 | /* Aborted reclaim to try compaction? don't OOM, then */ |
6188 | if (sc->compaction_ready) |
6189 | return 1; |
6190 | |
6191 | /* |
6192 | * We make inactive:active ratio decisions based on the node's |
6193 | * composition of memory, but a restrictive reclaim_idx or a |
6194 | * memory.low cgroup setting can exempt large amounts of |
6195 | * memory from reclaim. Neither of which are very common, so |
6196 | * instead of doing costly eligibility calculations of the |
6197 | * entire cgroup subtree up front, we assume the estimates are |
6198 | * good, and retry with forcible deactivation if that fails. |
6199 | */ |
6200 | if (sc->skipped_deactivate) { |
6201 | sc->priority = initial_priority; |
6202 | sc->force_deactivate = 1; |
6203 | sc->skipped_deactivate = 0; |
6204 | goto retry; |
6205 | } |
6206 | |
6207 | /* Untapped cgroup reserves? Don't OOM, retry. */ |
6208 | if (sc->memcg_low_skipped) { |
6209 | sc->priority = initial_priority; |
6210 | sc->force_deactivate = 0; |
6211 | sc->memcg_low_reclaim = 1; |
6212 | sc->memcg_low_skipped = 0; |
6213 | goto retry; |
6214 | } |
6215 | |
6216 | return 0; |
6217 | } |
6218 | |
6219 | static bool allow_direct_reclaim(pg_data_t *pgdat) |
6220 | { |
6221 | struct zone *zone; |
6222 | unsigned long pfmemalloc_reserve = 0; |
6223 | unsigned long free_pages = 0; |
6224 | int i; |
6225 | bool wmark_ok; |
6226 | |
6227 | if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) |
6228 | return true; |
6229 | |
6230 | for (i = 0; i <= ZONE_NORMAL; i++) { |
6231 | zone = &pgdat->node_zones[i]; |
6232 | if (!managed_zone(zone)) |
6233 | continue; |
6234 | |
6235 | if (!zone_reclaimable_pages(zone)) |
6236 | continue; |
6237 | |
6238 | pfmemalloc_reserve += min_wmark_pages(zone); |
6239 | free_pages += zone_page_state_snapshot(zone, item: NR_FREE_PAGES); |
6240 | } |
6241 | |
6242 | /* If there are no reserves (unexpected config) then do not throttle */ |
6243 | if (!pfmemalloc_reserve) |
6244 | return true; |
6245 | |
6246 | wmark_ok = free_pages > pfmemalloc_reserve / 2; |
6247 | |
6248 | /* kswapd must be awake if processes are being throttled */ |
6249 | if (!wmark_ok && waitqueue_active(wq_head: &pgdat->kswapd_wait)) { |
6250 | if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) |
6251 | WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); |
6252 | |
6253 | wake_up_interruptible(&pgdat->kswapd_wait); |
6254 | } |
6255 | |
6256 | return wmark_ok; |
6257 | } |
6258 | |
6259 | /* |
6260 | * Throttle direct reclaimers if backing storage is backed by the network |
6261 | * and the PFMEMALLOC reserve for the preferred node is getting dangerously |
6262 | * depleted. kswapd will continue to make progress and wake the processes |
6263 | * when the low watermark is reached. |
6264 | * |
6265 | * Returns true if a fatal signal was delivered during throttling. If this |
6266 | * happens, the page allocator should not consider triggering the OOM killer. |
6267 | */ |
6268 | static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, |
6269 | nodemask_t *nodemask) |
6270 | { |
6271 | struct zoneref *z; |
6272 | struct zone *zone; |
6273 | pg_data_t *pgdat = NULL; |
6274 | |
6275 | /* |
6276 | * Kernel threads should not be throttled as they may be indirectly |
6277 | * responsible for cleaning pages necessary for reclaim to make forward |
6278 | * progress. kjournald for example may enter direct reclaim while |
6279 | * committing a transaction where throttling it could forcing other |
6280 | * processes to block on log_wait_commit(). |
6281 | */ |
6282 | if (current->flags & PF_KTHREAD) |
6283 | goto out; |
6284 | |
6285 | /* |
6286 | * If a fatal signal is pending, this process should not throttle. |
6287 | * It should return quickly so it can exit and free its memory |
6288 | */ |
6289 | if (fatal_signal_pending(current)) |
6290 | goto out; |
6291 | |
6292 | /* |
6293 | * Check if the pfmemalloc reserves are ok by finding the first node |
6294 | * with a usable ZONE_NORMAL or lower zone. The expectation is that |
6295 | * GFP_KERNEL will be required for allocating network buffers when |
6296 | * swapping over the network so ZONE_HIGHMEM is unusable. |
6297 | * |
6298 | * Throttling is based on the first usable node and throttled processes |
6299 | * wait on a queue until kswapd makes progress and wakes them. There |
6300 | * is an affinity then between processes waking up and where reclaim |
6301 | * progress has been made assuming the process wakes on the same node. |
6302 | * More importantly, processes running on remote nodes will not compete |
6303 | * for remote pfmemalloc reserves and processes on different nodes |
6304 | * should make reasonable progress. |
6305 | */ |
6306 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
6307 | gfp_zone(gfp_mask), nodemask) { |
6308 | if (zone_idx(zone) > ZONE_NORMAL) |
6309 | continue; |
6310 | |
6311 | /* Throttle based on the first usable node */ |
6312 | pgdat = zone->zone_pgdat; |
6313 | if (allow_direct_reclaim(pgdat)) |
6314 | goto out; |
6315 | break; |
6316 | } |
6317 | |
6318 | /* If no zone was usable by the allocation flags then do not throttle */ |
6319 | if (!pgdat) |
6320 | goto out; |
6321 | |
6322 | /* Account for the throttling */ |
6323 | count_vm_event(item: PGSCAN_DIRECT_THROTTLE); |
6324 | |
6325 | /* |
6326 | * If the caller cannot enter the filesystem, it's possible that it |
6327 | * is due to the caller holding an FS lock or performing a journal |
6328 | * transaction in the case of a filesystem like ext[3|4]. In this case, |
6329 | * it is not safe to block on pfmemalloc_wait as kswapd could be |
6330 | * blocked waiting on the same lock. Instead, throttle for up to a |
6331 | * second before continuing. |
6332 | */ |
6333 | if (!(gfp_mask & __GFP_FS)) |
6334 | wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, |
6335 | allow_direct_reclaim(pgdat), HZ); |
6336 | else |
6337 | /* Throttle until kswapd wakes the process */ |
6338 | wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, |
6339 | allow_direct_reclaim(pgdat)); |
6340 | |
6341 | if (fatal_signal_pending(current)) |
6342 | return true; |
6343 | |
6344 | out: |
6345 | return false; |
6346 | } |
6347 | |
6348 | unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
6349 | gfp_t gfp_mask, nodemask_t *nodemask) |
6350 | { |
6351 | unsigned long nr_reclaimed; |
6352 | struct scan_control sc = { |
6353 | .nr_to_reclaim = SWAP_CLUSTER_MAX, |
6354 | .gfp_mask = current_gfp_context(flags: gfp_mask), |
6355 | .reclaim_idx = gfp_zone(flags: gfp_mask), |
6356 | .order = order, |
6357 | .nodemask = nodemask, |
6358 | .priority = DEF_PRIORITY, |
6359 | .may_writepage = !laptop_mode, |
6360 | .may_unmap = 1, |
6361 | .may_swap = 1, |
6362 | }; |
6363 | |
6364 | /* |
6365 | * scan_control uses s8 fields for order, priority, and reclaim_idx. |
6366 | * Confirm they are large enough for max values. |
6367 | */ |
6368 | BUILD_BUG_ON(MAX_ORDER >= S8_MAX); |
6369 | BUILD_BUG_ON(DEF_PRIORITY > S8_MAX); |
6370 | BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX); |
6371 | |
6372 | /* |
6373 | * Do not enter reclaim if fatal signal was delivered while throttled. |
6374 | * 1 is returned so that the page allocator does not OOM kill at this |
6375 | * point. |
6376 | */ |
6377 | if (throttle_direct_reclaim(gfp_mask: sc.gfp_mask, zonelist, nodemask)) |
6378 | return 1; |
6379 | |
6380 | set_task_reclaim_state(current, rs: &sc.reclaim_state); |
6381 | trace_mm_vmscan_direct_reclaim_begin(order, gfp_flags: sc.gfp_mask); |
6382 | |
6383 | nr_reclaimed = do_try_to_free_pages(zonelist, sc: &sc); |
6384 | |
6385 | trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); |
6386 | set_task_reclaim_state(current, NULL); |
6387 | |
6388 | return nr_reclaimed; |
6389 | } |
6390 | |
6391 | #ifdef CONFIG_MEMCG |
6392 | |
6393 | /* Only used by soft limit reclaim. Do not reuse for anything else. */ |
6394 | unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, |
6395 | gfp_t gfp_mask, bool noswap, |
6396 | pg_data_t *pgdat, |
6397 | unsigned long *nr_scanned) |
6398 | { |
6399 | struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); |
6400 | struct scan_control sc = { |
6401 | .nr_to_reclaim = SWAP_CLUSTER_MAX, |
6402 | .target_mem_cgroup = memcg, |
6403 | .may_writepage = !laptop_mode, |
6404 | .may_unmap = 1, |
6405 | .reclaim_idx = MAX_NR_ZONES - 1, |
6406 | .may_swap = !noswap, |
6407 | }; |
6408 | |
6409 | WARN_ON_ONCE(!current->reclaim_state); |
6410 | |
6411 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | |
6412 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); |
6413 | |
6414 | trace_mm_vmscan_memcg_softlimit_reclaim_begin(order: sc.order, |
6415 | gfp_flags: sc.gfp_mask); |
6416 | |
6417 | /* |
6418 | * NOTE: Although we can get the priority field, using it |
6419 | * here is not a good idea, since it limits the pages we can scan. |
6420 | * if we don't reclaim here, the shrink_node from balance_pgdat |
6421 | * will pick up pages from other mem cgroup's as well. We hack |
6422 | * the priority and make it zero. |
6423 | */ |
6424 | shrink_lruvec(lruvec, sc: &sc); |
6425 | |
6426 | trace_mm_vmscan_memcg_softlimit_reclaim_end(nr_reclaimed: sc.nr_reclaimed); |
6427 | |
6428 | *nr_scanned = sc.nr_scanned; |
6429 | |
6430 | return sc.nr_reclaimed; |
6431 | } |
6432 | |
6433 | unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, |
6434 | unsigned long nr_pages, |
6435 | gfp_t gfp_mask, |
6436 | unsigned int reclaim_options) |
6437 | { |
6438 | unsigned long nr_reclaimed; |
6439 | unsigned int noreclaim_flag; |
6440 | struct scan_control sc = { |
6441 | .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), |
6442 | .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | |
6443 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), |
6444 | .reclaim_idx = MAX_NR_ZONES - 1, |
6445 | .target_mem_cgroup = memcg, |
6446 | .priority = DEF_PRIORITY, |
6447 | .may_writepage = !laptop_mode, |
6448 | .may_unmap = 1, |
6449 | .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP), |
6450 | .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE), |
6451 | }; |
6452 | /* |
6453 | * Traverse the ZONELIST_FALLBACK zonelist of the current node to put |
6454 | * equal pressure on all the nodes. This is based on the assumption that |
6455 | * the reclaim does not bail out early. |
6456 | */ |
6457 | struct zonelist *zonelist = node_zonelist(nid: numa_node_id(), flags: sc.gfp_mask); |
6458 | |
6459 | set_task_reclaim_state(current, rs: &sc.reclaim_state); |
6460 | trace_mm_vmscan_memcg_reclaim_begin(order: 0, gfp_flags: sc.gfp_mask); |
6461 | noreclaim_flag = memalloc_noreclaim_save(); |
6462 | |
6463 | nr_reclaimed = do_try_to_free_pages(zonelist, sc: &sc); |
6464 | |
6465 | memalloc_noreclaim_restore(flags: noreclaim_flag); |
6466 | trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); |
6467 | set_task_reclaim_state(current, NULL); |
6468 | |
6469 | return nr_reclaimed; |
6470 | } |
6471 | #endif |
6472 | |
6473 | static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc) |
6474 | { |
6475 | struct mem_cgroup *memcg; |
6476 | struct lruvec *lruvec; |
6477 | |
6478 | if (lru_gen_enabled()) { |
6479 | lru_gen_age_node(pgdat, sc); |
6480 | return; |
6481 | } |
6482 | |
6483 | if (!can_age_anon_pages(pgdat, sc)) |
6484 | return; |
6485 | |
6486 | lruvec = mem_cgroup_lruvec(NULL, pgdat); |
6487 | if (!inactive_is_low(lruvec, inactive_lru: LRU_INACTIVE_ANON)) |
6488 | return; |
6489 | |
6490 | memcg = mem_cgroup_iter(NULL, NULL, NULL); |
6491 | do { |
6492 | lruvec = mem_cgroup_lruvec(memcg, pgdat); |
6493 | shrink_active_list(SWAP_CLUSTER_MAX, lruvec, |
6494 | sc, lru: LRU_ACTIVE_ANON); |
6495 | memcg = mem_cgroup_iter(NULL, memcg, NULL); |
6496 | } while (memcg); |
6497 | } |
6498 | |
6499 | static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx) |
6500 | { |
6501 | int i; |
6502 | struct zone *zone; |
6503 | |
6504 | /* |
6505 | * Check for watermark boosts top-down as the higher zones |
6506 | * are more likely to be boosted. Both watermarks and boosts |
6507 | * should not be checked at the same time as reclaim would |
6508 | * start prematurely when there is no boosting and a lower |
6509 | * zone is balanced. |
6510 | */ |
6511 | for (i = highest_zoneidx; i >= 0; i--) { |
6512 | zone = pgdat->node_zones + i; |
6513 | if (!managed_zone(zone)) |
6514 | continue; |
6515 | |
6516 | if (zone->watermark_boost) |
6517 | return true; |
6518 | } |
6519 | |
6520 | return false; |
6521 | } |
6522 | |
6523 | /* |
6524 | * Returns true if there is an eligible zone balanced for the request order |
6525 | * and highest_zoneidx |
6526 | */ |
6527 | static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) |
6528 | { |
6529 | int i; |
6530 | unsigned long mark = -1; |
6531 | struct zone *zone; |
6532 | |
6533 | /* |
6534 | * Check watermarks bottom-up as lower zones are more likely to |
6535 | * meet watermarks. |
6536 | */ |
6537 | for (i = 0; i <= highest_zoneidx; i++) { |
6538 | zone = pgdat->node_zones + i; |
6539 | |
6540 | if (!managed_zone(zone)) |
6541 | continue; |
6542 | |
6543 | if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) |
6544 | mark = wmark_pages(zone, WMARK_PROMO); |
6545 | else |
6546 | mark = high_wmark_pages(zone); |
6547 | if (zone_watermark_ok_safe(z: zone, order, mark, highest_zoneidx)) |
6548 | return true; |
6549 | } |
6550 | |
6551 | /* |
6552 | * If a node has no managed zone within highest_zoneidx, it does not |
6553 | * need balancing by definition. This can happen if a zone-restricted |
6554 | * allocation tries to wake a remote kswapd. |
6555 | */ |
6556 | if (mark == -1) |
6557 | return true; |
6558 | |
6559 | return false; |
6560 | } |
6561 | |
6562 | /* Clear pgdat state for congested, dirty or under writeback. */ |
6563 | static void clear_pgdat_congested(pg_data_t *pgdat) |
6564 | { |
6565 | struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); |
6566 | |
6567 | clear_bit(nr: LRUVEC_NODE_CONGESTED, addr: &lruvec->flags); |
6568 | clear_bit(nr: LRUVEC_CGROUP_CONGESTED, addr: &lruvec->flags); |
6569 | clear_bit(nr: PGDAT_DIRTY, addr: &pgdat->flags); |
6570 | clear_bit(nr: PGDAT_WRITEBACK, addr: &pgdat->flags); |
6571 | } |
6572 | |
6573 | /* |
6574 | * Prepare kswapd for sleeping. This verifies that there are no processes |
6575 | * waiting in throttle_direct_reclaim() and that watermarks have been met. |
6576 | * |
6577 | * Returns true if kswapd is ready to sleep |
6578 | */ |
6579 | static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, |
6580 | int highest_zoneidx) |
6581 | { |
6582 | /* |
6583 | * The throttled processes are normally woken up in balance_pgdat() as |
6584 | * soon as allow_direct_reclaim() is true. But there is a potential |
6585 | * race between when kswapd checks the watermarks and a process gets |
6586 | * throttled. There is also a potential race if processes get |
6587 | * throttled, kswapd wakes, a large process exits thereby balancing the |
6588 | * zones, which causes kswapd to exit balance_pgdat() before reaching |
6589 | * the wake up checks. If kswapd is going to sleep, no process should |
6590 | * be sleeping on pfmemalloc_wait, so wake them now if necessary. If |
6591 | * the wake up is premature, processes will wake kswapd and get |
6592 | * throttled again. The difference from wake ups in balance_pgdat() is |
6593 | * that here we are under prepare_to_wait(). |
6594 | */ |
6595 | if (waitqueue_active(wq_head: &pgdat->pfmemalloc_wait)) |
6596 | wake_up_all(&pgdat->pfmemalloc_wait); |
6597 | |
6598 | /* Hopeless node, leave it to direct reclaim */ |
6599 | if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) |
6600 | return true; |
6601 | |
6602 | if (pgdat_balanced(pgdat, order, highest_zoneidx)) { |
6603 | clear_pgdat_congested(pgdat); |
6604 | return true; |
6605 | } |
6606 | |
6607 | return false; |
6608 | } |
6609 | |
6610 | /* |
6611 | * kswapd shrinks a node of pages that are at or below the highest usable |
6612 | * zone that is currently unbalanced. |
6613 | * |
6614 | * Returns true if kswapd scanned at least the requested number of pages to |
6615 | * reclaim or if the lack of progress was due to pages under writeback. |
6616 | * This is used to determine if the scanning priority needs to be raised. |
6617 | */ |
6618 | static bool kswapd_shrink_node(pg_data_t *pgdat, |
6619 | struct scan_control *sc) |
6620 | { |
6621 | struct zone *zone; |
6622 | int z; |
6623 | |
6624 | /* Reclaim a number of pages proportional to the number of zones */ |
6625 | sc->nr_to_reclaim = 0; |
6626 | for (z = 0; z <= sc->reclaim_idx; z++) { |
6627 | zone = pgdat->node_zones + z; |
6628 | if (!managed_zone(zone)) |
6629 | continue; |
6630 | |
6631 | sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); |
6632 | } |
6633 | |
6634 | /* |
6635 | * Historically care was taken to put equal pressure on all zones but |
6636 | * now pressure is applied based on node LRU order. |
6637 | */ |
6638 | shrink_node(pgdat, sc); |
6639 | |
6640 | /* |
6641 | * Fragmentation may mean that the system cannot be rebalanced for |
6642 | * high-order allocations. If twice the allocation size has been |
6643 | * reclaimed then recheck watermarks only at order-0 to prevent |
6644 | * excessive reclaim. Assume that a process requested a high-order |
6645 | * can direct reclaim/compact. |
6646 | */ |
6647 | if (sc->order && sc->nr_reclaimed >= compact_gap(order: sc->order)) |
6648 | sc->order = 0; |
6649 | |
6650 | return sc->nr_scanned >= sc->nr_to_reclaim; |
6651 | } |
6652 | |
6653 | /* Page allocator PCP high watermark is lowered if reclaim is active. */ |
6654 | static inline void |
6655 | update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) |
6656 | { |
6657 | int i; |
6658 | struct zone *zone; |
6659 | |
6660 | for (i = 0; i <= highest_zoneidx; i++) { |
6661 | zone = pgdat->node_zones + i; |
6662 | |
6663 | if (!managed_zone(zone)) |
6664 | continue; |
6665 | |
6666 | if (active) |
6667 | set_bit(nr: ZONE_RECLAIM_ACTIVE, addr: &zone->flags); |
6668 | else |
6669 | clear_bit(nr: ZONE_RECLAIM_ACTIVE, addr: &zone->flags); |
6670 | } |
6671 | } |
6672 | |
6673 | static inline void |
6674 | set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) |
6675 | { |
6676 | update_reclaim_active(pgdat, highest_zoneidx, active: true); |
6677 | } |
6678 | |
6679 | static inline void |
6680 | clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) |
6681 | { |
6682 | update_reclaim_active(pgdat, highest_zoneidx, active: false); |
6683 | } |
6684 | |
6685 | /* |
6686 | * For kswapd, balance_pgdat() will reclaim pages across a node from zones |
6687 | * that are eligible for use by the caller until at least one zone is |
6688 | * balanced. |
6689 | * |
6690 | * Returns the order kswapd finished reclaiming at. |
6691 | * |
6692 | * kswapd scans the zones in the highmem->normal->dma direction. It skips |
6693 | * zones which have free_pages > high_wmark_pages(zone), but once a zone is |
6694 | * found to have free_pages <= high_wmark_pages(zone), any page in that zone |
6695 | * or lower is eligible for reclaim until at least one usable zone is |
6696 | * balanced. |
6697 | */ |
6698 | static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) |
6699 | { |
6700 | int i; |
6701 | unsigned long nr_soft_reclaimed; |
6702 | unsigned long nr_soft_scanned; |
6703 | unsigned long pflags; |
6704 | unsigned long nr_boost_reclaim; |
6705 | unsigned long zone_boosts[MAX_NR_ZONES] = { 0, }; |
6706 | bool boosted; |
6707 | struct zone *zone; |
6708 | struct scan_control sc = { |
6709 | .gfp_mask = GFP_KERNEL, |
6710 | .order = order, |
6711 | .may_unmap = 1, |
6712 | }; |
6713 | |
6714 | set_task_reclaim_state(current, rs: &sc.reclaim_state); |
6715 | psi_memstall_enter(flags: &pflags); |
6716 | __fs_reclaim_acquire(_THIS_IP_); |
6717 | |
6718 | count_vm_event(item: PAGEOUTRUN); |
6719 | |
6720 | /* |
6721 | * Account for the reclaim boost. Note that the zone boost is left in |
6722 | * place so that parallel allocations that are near the watermark will |
6723 | * stall or direct reclaim until kswapd is finished. |
6724 | */ |
6725 | nr_boost_reclaim = 0; |
6726 | for (i = 0; i <= highest_zoneidx; i++) { |
6727 | zone = pgdat->node_zones + i; |
6728 | if (!managed_zone(zone)) |
6729 | continue; |
6730 | |
6731 | nr_boost_reclaim += zone->watermark_boost; |
6732 | zone_boosts[i] = zone->watermark_boost; |
6733 | } |
6734 | boosted = nr_boost_reclaim; |
6735 | |
6736 | restart: |
6737 | set_reclaim_active(pgdat, highest_zoneidx); |
6738 | sc.priority = DEF_PRIORITY; |
6739 | do { |
6740 | unsigned long nr_reclaimed = sc.nr_reclaimed; |
6741 | bool raise_priority = true; |
6742 | bool balanced; |
6743 | bool ret; |
6744 | |
6745 | sc.reclaim_idx = highest_zoneidx; |
6746 | |
6747 | /* |
6748 | * If the number of buffer_heads exceeds the maximum allowed |
6749 | * then consider reclaiming from all zones. This has a dual |
6750 | * purpose -- on 64-bit systems it is expected that |
6751 | * buffer_heads are stripped during active rotation. On 32-bit |
6752 | * systems, highmem pages can pin lowmem memory and shrinking |
6753 | * buffers can relieve lowmem pressure. Reclaim may still not |
6754 | * go ahead if all eligible zones for the original allocation |
6755 | * request are balanced to avoid excessive reclaim from kswapd. |
6756 | */ |
6757 | if (buffer_heads_over_limit) { |
6758 | for (i = MAX_NR_ZONES - 1; i >= 0; i--) { |
6759 | zone = pgdat->node_zones + i; |
6760 | if (!managed_zone(zone)) |
6761 | continue; |
6762 | |
6763 | sc.reclaim_idx = i; |
6764 | break; |
6765 | } |
6766 | } |
6767 | |
6768 | /* |
6769 | * If the pgdat is imbalanced then ignore boosting and preserve |
6770 | * the watermarks for a later time and restart. Note that the |
6771 | * zone watermarks will be still reset at the end of balancing |
6772 | * on the grounds that the normal reclaim should be enough to |
6773 | * re-evaluate if boosting is required when kswapd next wakes. |
6774 | */ |
6775 | balanced = pgdat_balanced(pgdat, order: sc.order, highest_zoneidx); |
6776 | if (!balanced && nr_boost_reclaim) { |
6777 | nr_boost_reclaim = 0; |
6778 | goto restart; |
6779 | } |
6780 | |
6781 | /* |
6782 | * If boosting is not active then only reclaim if there are no |
6783 | * eligible zones. Note that sc.reclaim_idx is not used as |
6784 | * buffer_heads_over_limit may have adjusted it. |
6785 | */ |
6786 | if (!nr_boost_reclaim && balanced) |
6787 | goto out; |
6788 | |
6789 | /* Limit the priority of boosting to avoid reclaim writeback */ |
6790 | if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) |
6791 | raise_priority = false; |
6792 | |
6793 | /* |
6794 | * Do not writeback or swap pages for boosted reclaim. The |
6795 | * intent is to relieve pressure not issue sub-optimal IO |
6796 | * from reclaim context. If no pages are reclaimed, the |
6797 | * reclaim will be aborted. |
6798 | */ |
6799 | sc.may_writepage = !laptop_mode && !nr_boost_reclaim; |
6800 | sc.may_swap = !nr_boost_reclaim; |
6801 | |
6802 | /* |
6803 | * Do some background aging, to give pages a chance to be |
6804 | * referenced before reclaiming. All pages are rotated |
6805 | * regardless of classzone as this is about consistent aging. |
6806 | */ |
6807 | kswapd_age_node(pgdat, sc: &sc); |
6808 | |
6809 | /* |
6810 | * If we're getting trouble reclaiming, start doing writepage |
6811 | * even in laptop mode. |
6812 | */ |
6813 | if (sc.priority < DEF_PRIORITY - 2) |
6814 | sc.may_writepage = 1; |
6815 | |
6816 | /* Call soft limit reclaim before calling shrink_node. */ |
6817 | sc.nr_scanned = 0; |
6818 | nr_soft_scanned = 0; |
6819 | nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, order: sc.order, |
6820 | gfp_mask: sc.gfp_mask, total_scanned: &nr_soft_scanned); |
6821 | sc.nr_reclaimed += nr_soft_reclaimed; |
6822 | |
6823 | /* |
6824 | * There should be no need to raise the scanning priority if |
6825 | * enough pages are already being scanned that that high |
6826 | * watermark would be met at 100% efficiency. |
6827 | */ |
6828 | if (kswapd_shrink_node(pgdat, sc: &sc)) |
6829 | raise_priority = false; |
6830 | |
6831 | /* |
6832 | * If the low watermark is met there is no need for processes |
6833 | * to be throttled on pfmemalloc_wait as they should not be |
6834 | * able to safely make forward progress. Wake them |
6835 | */ |
6836 | if (waitqueue_active(wq_head: &pgdat->pfmemalloc_wait) && |
6837 | allow_direct_reclaim(pgdat)) |
6838 | wake_up_all(&pgdat->pfmemalloc_wait); |
6839 | |
6840 | /* Check if kswapd should be suspending */ |
6841 | __fs_reclaim_release(_THIS_IP_); |
6842 | ret = try_to_freeze(); |
6843 | __fs_reclaim_acquire(_THIS_IP_); |
6844 | if (ret || kthread_should_stop()) |
6845 | break; |
6846 | |
6847 | /* |
6848 | * Raise priority if scanning rate is too low or there was no |
6849 | * progress in reclaiming pages |
6850 | */ |
6851 | nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; |
6852 | nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); |
6853 | |
6854 | /* |
6855 | * If reclaim made no progress for a boost, stop reclaim as |
6856 | * IO cannot be queued and it could be an infinite loop in |
6857 | * extreme circumstances. |
6858 | */ |
6859 | if (nr_boost_reclaim && !nr_reclaimed) |
6860 | break; |
6861 | |
6862 | if (raise_priority || !nr_reclaimed) |
6863 | sc.priority--; |
6864 | } while (sc.priority >= 1); |
6865 | |
6866 | if (!sc.nr_reclaimed) |
6867 | pgdat->kswapd_failures++; |
6868 | |
6869 | out: |
6870 | clear_reclaim_active(pgdat, highest_zoneidx); |
6871 | |
6872 | /* If reclaim was boosted, account for the reclaim done in this pass */ |
6873 | if (boosted) { |
6874 | unsigned long flags; |
6875 | |
6876 | for (i = 0; i <= highest_zoneidx; i++) { |
6877 | if (!zone_boosts[i]) |
6878 | continue; |
6879 | |
6880 | /* Increments are under the zone lock */ |
6881 | zone = pgdat->node_zones + i; |
6882 | spin_lock_irqsave(&zone->lock, flags); |
6883 | zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); |
6884 | spin_unlock_irqrestore(lock: &zone->lock, flags); |
6885 | } |
6886 | |
6887 | /* |
6888 | * As there is now likely space, wakeup kcompact to defragment |
6889 | * pageblocks. |
6890 | */ |
6891 | wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx); |
6892 | } |
6893 | |
6894 | snapshot_refaults(NULL, pgdat); |
6895 | __fs_reclaim_release(_THIS_IP_); |
6896 | psi_memstall_leave(flags: &pflags); |
6897 | set_task_reclaim_state(current, NULL); |
6898 | |
6899 | /* |
6900 | * Return the order kswapd stopped reclaiming at as |
6901 | * prepare_kswapd_sleep() takes it into account. If another caller |
6902 | * entered the allocator slow path while kswapd was awake, order will |
6903 | * remain at the higher level. |
6904 | */ |
6905 | return sc.order; |
6906 | } |
6907 | |
6908 | /* |
6909 | * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to |
6910 | * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is |
6911 | * not a valid index then either kswapd runs for first time or kswapd couldn't |
6912 | * sleep after previous reclaim attempt (node is still unbalanced). In that |
6913 | * case return the zone index of the previous kswapd reclaim cycle. |
6914 | */ |
6915 | static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat, |
6916 | enum zone_type prev_highest_zoneidx) |
6917 | { |
6918 | enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); |
6919 | |
6920 | return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx; |
6921 | } |
6922 | |
6923 | static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, |
6924 | unsigned int highest_zoneidx) |
6925 | { |
6926 | long remaining = 0; |
6927 | DEFINE_WAIT(wait); |
6928 | |
6929 | if (freezing(current) || kthread_should_stop()) |
6930 | return; |
6931 | |
6932 | prepare_to_wait(wq_head: &pgdat->kswapd_wait, wq_entry: &wait, TASK_INTERRUPTIBLE); |
6933 | |
6934 | /* |
6935 | * Try to sleep for a short interval. Note that kcompactd will only be |
6936 | * woken if it is possible to sleep for a short interval. This is |
6937 | * deliberate on the assumption that if reclaim cannot keep an |
6938 | * eligible zone balanced that it's also unlikely that compaction will |
6939 | * succeed. |
6940 | */ |
6941 | if (prepare_kswapd_sleep(pgdat, order: reclaim_order, highest_zoneidx)) { |
6942 | /* |
6943 | * Compaction records what page blocks it recently failed to |
6944 | * isolate pages from and skips them in the future scanning. |
6945 | * When kswapd is going to sleep, it is reasonable to assume |
6946 | * that pages and compaction may succeed so reset the cache. |
6947 | */ |
6948 | reset_isolation_suitable(pgdat); |
6949 | |
6950 | /* |
6951 | * We have freed the memory, now we should compact it to make |
6952 | * allocation of the requested order possible. |
6953 | */ |
6954 | wakeup_kcompactd(pgdat, order: alloc_order, highest_zoneidx); |
6955 | |
6956 | remaining = schedule_timeout(HZ/10); |
6957 | |
6958 | /* |
6959 | * If woken prematurely then reset kswapd_highest_zoneidx and |
6960 | * order. The values will either be from a wakeup request or |
6961 | * the previous request that slept prematurely. |
6962 | */ |
6963 | if (remaining) { |
6964 | WRITE_ONCE(pgdat->kswapd_highest_zoneidx, |
6965 | kswapd_highest_zoneidx(pgdat, |
6966 | highest_zoneidx)); |
6967 | |
6968 | if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) |
6969 | WRITE_ONCE(pgdat->kswapd_order, reclaim_order); |
6970 | } |
6971 | |
6972 | finish_wait(wq_head: &pgdat->kswapd_wait, wq_entry: &wait); |
6973 | prepare_to_wait(wq_head: &pgdat->kswapd_wait, wq_entry: &wait, TASK_INTERRUPTIBLE); |
6974 | } |
6975 | |
6976 | /* |
6977 | * After a short sleep, check if it was a premature sleep. If not, then |
6978 | * go fully to sleep until explicitly woken up. |
6979 | */ |
6980 | if (!remaining && |
6981 | prepare_kswapd_sleep(pgdat, order: reclaim_order, highest_zoneidx)) { |
6982 | trace_mm_vmscan_kswapd_sleep(nid: pgdat->node_id); |
6983 | |
6984 | /* |
6985 | * vmstat counters are not perfectly accurate and the estimated |
6986 | * value for counters such as NR_FREE_PAGES can deviate from the |
6987 | * true value by nr_online_cpus * threshold. To avoid the zone |
6988 | * watermarks being breached while under pressure, we reduce the |
6989 | * per-cpu vmstat threshold while kswapd is awake and restore |
6990 | * them before going back to sleep. |
6991 | */ |
6992 | set_pgdat_percpu_threshold(pgdat, calculate_pressure: calculate_normal_threshold); |
6993 | |
6994 | if (!kthread_should_stop()) |
6995 | schedule(); |
6996 | |
6997 | set_pgdat_percpu_threshold(pgdat, calculate_pressure: calculate_pressure_threshold); |
6998 | } else { |
6999 | if (remaining) |
7000 | count_vm_event(item: KSWAPD_LOW_WMARK_HIT_QUICKLY); |
7001 | else |
7002 | count_vm_event(item: KSWAPD_HIGH_WMARK_HIT_QUICKLY); |
7003 | } |
7004 | finish_wait(wq_head: &pgdat->kswapd_wait, wq_entry: &wait); |
7005 | } |
7006 | |
7007 | /* |
7008 | * The background pageout daemon, started as a kernel thread |
7009 | * from the init process. |
7010 | * |
7011 | * This basically trickles out pages so that we have _some_ |
7012 | * free memory available even if there is no other activity |
7013 | * that frees anything up. This is needed for things like routing |
7014 | * etc, where we otherwise might have all activity going on in |
7015 | * asynchronous contexts that cannot page things out. |
7016 | * |
7017 | * If there are applications that are active memory-allocators |
7018 | * (most normal use), this basically shouldn't matter. |
7019 | */ |
7020 | static int kswapd(void *p) |
7021 | { |
7022 | unsigned int alloc_order, reclaim_order; |
7023 | unsigned int highest_zoneidx = MAX_NR_ZONES - 1; |
7024 | pg_data_t *pgdat = (pg_data_t *)p; |
7025 | struct task_struct *tsk = current; |
7026 | const struct cpumask *cpumask = cpumask_of_node(node: pgdat->node_id); |
7027 | |
7028 | if (!cpumask_empty(srcp: cpumask)) |
7029 | set_cpus_allowed_ptr(p: tsk, new_mask: cpumask); |
7030 | |
7031 | /* |
7032 | * Tell the memory management that we're a "memory allocator", |
7033 | * and that if we need more memory we should get access to it |
7034 | * regardless (see "__alloc_pages()"). "kswapd" should |
7035 | * never get caught in the normal page freeing logic. |
7036 | * |
7037 | * (Kswapd normally doesn't need memory anyway, but sometimes |
7038 | * you need a small amount of memory in order to be able to |
7039 | * page out something else, and this flag essentially protects |
7040 | * us from recursively trying to free more memory as we're |
7041 | * trying to free the first piece of memory in the first place). |
7042 | */ |
7043 | tsk->flags |= PF_MEMALLOC | PF_KSWAPD; |
7044 | set_freezable(); |
7045 | |
7046 | WRITE_ONCE(pgdat->kswapd_order, 0); |
7047 | WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); |
7048 | atomic_set(v: &pgdat->nr_writeback_throttled, i: 0); |
7049 | for ( ; ; ) { |
7050 | bool ret; |
7051 | |
7052 | alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); |
7053 | highest_zoneidx = kswapd_highest_zoneidx(pgdat, |
7054 | prev_highest_zoneidx: highest_zoneidx); |
7055 | |
7056 | kswapd_try_sleep: |
7057 | kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, |
7058 | highest_zoneidx); |
7059 | |
7060 | /* Read the new order and highest_zoneidx */ |
7061 | alloc_order = READ_ONCE(pgdat->kswapd_order); |
7062 | highest_zoneidx = kswapd_highest_zoneidx(pgdat, |
7063 | prev_highest_zoneidx: highest_zoneidx); |
7064 | WRITE_ONCE(pgdat->kswapd_order, 0); |
7065 | WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); |
7066 | |
7067 | ret = try_to_freeze(); |
7068 | if (kthread_should_stop()) |
7069 | break; |
7070 | |
7071 | /* |
7072 | * We can speed up thawing tasks if we don't call balance_pgdat |
7073 | * after returning from the refrigerator |
7074 | */ |
7075 | if (ret) |
7076 | continue; |
7077 | |
7078 | /* |
7079 | * Reclaim begins at the requested order but if a high-order |
7080 | * reclaim fails then kswapd falls back to reclaiming for |
7081 | * order-0. If that happens, kswapd will consider sleeping |
7082 | * for the order it finished reclaiming at (reclaim_order) |
7083 | * but kcompactd is woken to compact for the original |
7084 | * request (alloc_order). |
7085 | */ |
7086 | trace_mm_vmscan_kswapd_wake(nid: pgdat->node_id, zid: highest_zoneidx, |
7087 | order: alloc_order); |
7088 | reclaim_order = balance_pgdat(pgdat, order: alloc_order, |
7089 | highest_zoneidx); |
7090 | if (reclaim_order < alloc_order) |
7091 | goto kswapd_try_sleep; |
7092 | } |
7093 | |
7094 | tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); |
7095 | |
7096 | return 0; |
7097 | } |
7098 | |
7099 | /* |
7100 | * A zone is low on free memory or too fragmented for high-order memory. If |
7101 | * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's |
7102 | * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim |
7103 | * has failed or is not needed, still wake up kcompactd if only compaction is |
7104 | * needed. |
7105 | */ |
7106 | void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, |
7107 | enum zone_type highest_zoneidx) |
7108 | { |
7109 | pg_data_t *pgdat; |
7110 | enum zone_type curr_idx; |
7111 | |
7112 | if (!managed_zone(zone)) |
7113 | return; |
7114 | |
7115 | if (!cpuset_zone_allowed(z: zone, gfp_mask: gfp_flags)) |
7116 | return; |
7117 | |
7118 | pgdat = zone->zone_pgdat; |
7119 | curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); |
7120 | |
7121 | if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx) |
7122 | WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); |
7123 | |
7124 | if (READ_ONCE(pgdat->kswapd_order) < order) |
7125 | WRITE_ONCE(pgdat->kswapd_order, order); |
7126 | |
7127 | if (!waitqueue_active(wq_head: &pgdat->kswapd_wait)) |
7128 | return; |
7129 | |
7130 | /* Hopeless node, leave it to direct reclaim if possible */ |
7131 | if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || |
7132 | (pgdat_balanced(pgdat, order, highest_zoneidx) && |
7133 | !pgdat_watermark_boosted(pgdat, highest_zoneidx))) { |
7134 | /* |
7135 | * There may be plenty of free memory available, but it's too |
7136 | * fragmented for high-order allocations. Wake up kcompactd |
7137 | * and rely on compaction_suitable() to determine if it's |
7138 | * needed. If it fails, it will defer subsequent attempts to |
7139 | * ratelimit its work. |
7140 | */ |
7141 | if (!(gfp_flags & __GFP_DIRECT_RECLAIM)) |
7142 | wakeup_kcompactd(pgdat, order, highest_zoneidx); |
7143 | return; |
7144 | } |
7145 | |
7146 | trace_mm_vmscan_wakeup_kswapd(nid: pgdat->node_id, zid: highest_zoneidx, order, |
7147 | gfp_flags); |
7148 | wake_up_interruptible(&pgdat->kswapd_wait); |
7149 | } |
7150 | |
7151 | #ifdef CONFIG_HIBERNATION |
7152 | /* |
7153 | * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of |
7154 | * freed pages. |
7155 | * |
7156 | * Rather than trying to age LRUs the aim is to preserve the overall |
7157 | * LRU order by reclaiming preferentially |
7158 | * inactive > active > active referenced > active mapped |
7159 | */ |
7160 | unsigned long shrink_all_memory(unsigned long nr_to_reclaim) |
7161 | { |
7162 | struct scan_control sc = { |
7163 | .nr_to_reclaim = nr_to_reclaim, |
7164 | .gfp_mask = GFP_HIGHUSER_MOVABLE, |
7165 | .reclaim_idx = MAX_NR_ZONES - 1, |
7166 | .priority = DEF_PRIORITY, |
7167 | .may_writepage = 1, |
7168 | .may_unmap = 1, |
7169 | .may_swap = 1, |
7170 | .hibernation_mode = 1, |
7171 | }; |
7172 | struct zonelist *zonelist = node_zonelist(nid: numa_node_id(), flags: sc.gfp_mask); |
7173 | unsigned long nr_reclaimed; |
7174 | unsigned int noreclaim_flag; |
7175 | |
7176 | fs_reclaim_acquire(gfp_mask: sc.gfp_mask); |
7177 | noreclaim_flag = memalloc_noreclaim_save(); |
7178 | set_task_reclaim_state(current, rs: &sc.reclaim_state); |
7179 | |
7180 | nr_reclaimed = do_try_to_free_pages(zonelist, sc: &sc); |
7181 | |
7182 | set_task_reclaim_state(current, NULL); |
7183 | memalloc_noreclaim_restore(flags: noreclaim_flag); |
7184 | fs_reclaim_release(gfp_mask: sc.gfp_mask); |
7185 | |
7186 | return nr_reclaimed; |
7187 | } |
7188 | #endif /* CONFIG_HIBERNATION */ |
7189 | |
7190 | /* |
7191 | * This kswapd start function will be called by init and node-hot-add. |
7192 | */ |
7193 | void __meminit kswapd_run(int nid) |
7194 | { |
7195 | pg_data_t *pgdat = NODE_DATA(nid); |
7196 | |
7197 | pgdat_kswapd_lock(pgdat); |
7198 | if (!pgdat->kswapd) { |
7199 | pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d" , nid); |
7200 | if (IS_ERR(ptr: pgdat->kswapd)) { |
7201 | /* failure at boot is fatal */ |
7202 | pr_err("Failed to start kswapd on node %d,ret=%ld\n" , |
7203 | nid, PTR_ERR(pgdat->kswapd)); |
7204 | BUG_ON(system_state < SYSTEM_RUNNING); |
7205 | pgdat->kswapd = NULL; |
7206 | } |
7207 | } |
7208 | pgdat_kswapd_unlock(pgdat); |
7209 | } |
7210 | |
7211 | /* |
7212 | * Called by memory hotplug when all memory in a node is offlined. Caller must |
7213 | * be holding mem_hotplug_begin/done(). |
7214 | */ |
7215 | void __meminit kswapd_stop(int nid) |
7216 | { |
7217 | pg_data_t *pgdat = NODE_DATA(nid); |
7218 | struct task_struct *kswapd; |
7219 | |
7220 | pgdat_kswapd_lock(pgdat); |
7221 | kswapd = pgdat->kswapd; |
7222 | if (kswapd) { |
7223 | kthread_stop(k: kswapd); |
7224 | pgdat->kswapd = NULL; |
7225 | } |
7226 | pgdat_kswapd_unlock(pgdat); |
7227 | } |
7228 | |
7229 | static int __init kswapd_init(void) |
7230 | { |
7231 | int nid; |
7232 | |
7233 | swap_setup(); |
7234 | for_each_node_state(nid, N_MEMORY) |
7235 | kswapd_run(nid); |
7236 | return 0; |
7237 | } |
7238 | |
7239 | module_init(kswapd_init) |
7240 | |
7241 | #ifdef CONFIG_NUMA |
7242 | /* |
7243 | * Node reclaim mode |
7244 | * |
7245 | * If non-zero call node_reclaim when the number of free pages falls below |
7246 | * the watermarks. |
7247 | */ |
7248 | int node_reclaim_mode __read_mostly; |
7249 | |
7250 | /* |
7251 | * Priority for NODE_RECLAIM. This determines the fraction of pages |
7252 | * of a node considered for each zone_reclaim. 4 scans 1/16th of |
7253 | * a zone. |
7254 | */ |
7255 | #define NODE_RECLAIM_PRIORITY 4 |
7256 | |
7257 | /* |
7258 | * Percentage of pages in a zone that must be unmapped for node_reclaim to |
7259 | * occur. |
7260 | */ |
7261 | int sysctl_min_unmapped_ratio = 1; |
7262 | |
7263 | /* |
7264 | * If the number of slab pages in a zone grows beyond this percentage then |
7265 | * slab reclaim needs to occur. |
7266 | */ |
7267 | int sysctl_min_slab_ratio = 5; |
7268 | |
7269 | static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) |
7270 | { |
7271 | unsigned long file_mapped = node_page_state(pgdat, item: NR_FILE_MAPPED); |
7272 | unsigned long file_lru = node_page_state(pgdat, item: NR_INACTIVE_FILE) + |
7273 | node_page_state(pgdat, item: NR_ACTIVE_FILE); |
7274 | |
7275 | /* |
7276 | * It's possible for there to be more file mapped pages than |
7277 | * accounted for by the pages on the file LRU lists because |
7278 | * tmpfs pages accounted for as ANON can also be FILE_MAPPED |
7279 | */ |
7280 | return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; |
7281 | } |
7282 | |
7283 | /* Work out how many page cache pages we can reclaim in this reclaim_mode */ |
7284 | static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) |
7285 | { |
7286 | unsigned long nr_pagecache_reclaimable; |
7287 | unsigned long delta = 0; |
7288 | |
7289 | /* |
7290 | * If RECLAIM_UNMAP is set, then all file pages are considered |
7291 | * potentially reclaimable. Otherwise, we have to worry about |
7292 | * pages like swapcache and node_unmapped_file_pages() provides |
7293 | * a better estimate |
7294 | */ |
7295 | if (node_reclaim_mode & RECLAIM_UNMAP) |
7296 | nr_pagecache_reclaimable = node_page_state(pgdat, item: NR_FILE_PAGES); |
7297 | else |
7298 | nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); |
7299 | |
7300 | /* If we can't clean pages, remove dirty pages from consideration */ |
7301 | if (!(node_reclaim_mode & RECLAIM_WRITE)) |
7302 | delta += node_page_state(pgdat, item: NR_FILE_DIRTY); |
7303 | |
7304 | /* Watch for any possible underflows due to delta */ |
7305 | if (unlikely(delta > nr_pagecache_reclaimable)) |
7306 | delta = nr_pagecache_reclaimable; |
7307 | |
7308 | return nr_pagecache_reclaimable - delta; |
7309 | } |
7310 | |
7311 | /* |
7312 | * Try to free up some pages from this node through reclaim. |
7313 | */ |
7314 | static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) |
7315 | { |
7316 | /* Minimum pages needed in order to stay on node */ |
7317 | const unsigned long nr_pages = 1 << order; |
7318 | struct task_struct *p = current; |
7319 | unsigned int noreclaim_flag; |
7320 | struct scan_control sc = { |
7321 | .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), |
7322 | .gfp_mask = current_gfp_context(flags: gfp_mask), |
7323 | .order = order, |
7324 | .priority = NODE_RECLAIM_PRIORITY, |
7325 | .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE), |
7326 | .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP), |
7327 | .may_swap = 1, |
7328 | .reclaim_idx = gfp_zone(flags: gfp_mask), |
7329 | }; |
7330 | unsigned long pflags; |
7331 | |
7332 | trace_mm_vmscan_node_reclaim_begin(nid: pgdat->node_id, order, |
7333 | gfp_flags: sc.gfp_mask); |
7334 | |
7335 | cond_resched(); |
7336 | psi_memstall_enter(flags: &pflags); |
7337 | delayacct_freepages_start(); |
7338 | fs_reclaim_acquire(gfp_mask: sc.gfp_mask); |
7339 | /* |
7340 | * We need to be able to allocate from the reserves for RECLAIM_UNMAP |
7341 | */ |
7342 | noreclaim_flag = memalloc_noreclaim_save(); |
7343 | set_task_reclaim_state(task: p, rs: &sc.reclaim_state); |
7344 | |
7345 | if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || |
7346 | node_page_state_pages(pgdat, item: NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { |
7347 | /* |
7348 | * Free memory by calling shrink node with increasing |
7349 | * priorities until we have enough memory freed. |
7350 | */ |
7351 | do { |
7352 | shrink_node(pgdat, sc: &sc); |
7353 | } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); |
7354 | } |
7355 | |
7356 | set_task_reclaim_state(task: p, NULL); |
7357 | memalloc_noreclaim_restore(flags: noreclaim_flag); |
7358 | fs_reclaim_release(gfp_mask: sc.gfp_mask); |
7359 | psi_memstall_leave(flags: &pflags); |
7360 | delayacct_freepages_end(); |
7361 | |
7362 | trace_mm_vmscan_node_reclaim_end(nr_reclaimed: sc.nr_reclaimed); |
7363 | |
7364 | return sc.nr_reclaimed >= nr_pages; |
7365 | } |
7366 | |
7367 | int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) |
7368 | { |
7369 | int ret; |
7370 | |
7371 | /* |
7372 | * Node reclaim reclaims unmapped file backed pages and |
7373 | * slab pages if we are over the defined limits. |
7374 | * |
7375 | * A small portion of unmapped file backed pages is needed for |
7376 | * file I/O otherwise pages read by file I/O will be immediately |
7377 | * thrown out if the node is overallocated. So we do not reclaim |
7378 | * if less than a specified percentage of the node is used by |
7379 | * unmapped file backed pages. |
7380 | */ |
7381 | if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && |
7382 | node_page_state_pages(pgdat, item: NR_SLAB_RECLAIMABLE_B) <= |
7383 | pgdat->min_slab_pages) |
7384 | return NODE_RECLAIM_FULL; |
7385 | |
7386 | /* |
7387 | * Do not scan if the allocation should not be delayed. |
7388 | */ |
7389 | if (!gfpflags_allow_blocking(gfp_flags: gfp_mask) || (current->flags & PF_MEMALLOC)) |
7390 | return NODE_RECLAIM_NOSCAN; |
7391 | |
7392 | /* |
7393 | * Only run node reclaim on the local node or on nodes that do not |
7394 | * have associated processors. This will favor the local processor |
7395 | * over remote processors and spread off node memory allocations |
7396 | * as wide as possible. |
7397 | */ |
7398 | if (node_state(node: pgdat->node_id, state: N_CPU) && pgdat->node_id != numa_node_id()) |
7399 | return NODE_RECLAIM_NOSCAN; |
7400 | |
7401 | if (test_and_set_bit(nr: PGDAT_RECLAIM_LOCKED, addr: &pgdat->flags)) |
7402 | return NODE_RECLAIM_NOSCAN; |
7403 | |
7404 | ret = __node_reclaim(pgdat, gfp_mask, order); |
7405 | clear_bit(nr: PGDAT_RECLAIM_LOCKED, addr: &pgdat->flags); |
7406 | |
7407 | if (!ret) |
7408 | count_vm_event(item: PGSCAN_ZONE_RECLAIM_FAILED); |
7409 | |
7410 | return ret; |
7411 | } |
7412 | #endif |
7413 | |
7414 | /** |
7415 | * check_move_unevictable_folios - Move evictable folios to appropriate zone |
7416 | * lru list |
7417 | * @fbatch: Batch of lru folios to check. |
7418 | * |
7419 | * Checks folios for evictability, if an evictable folio is in the unevictable |
7420 | * lru list, moves it to the appropriate evictable lru list. This function |
7421 | * should be only used for lru folios. |
7422 | */ |
7423 | void check_move_unevictable_folios(struct folio_batch *fbatch) |
7424 | { |
7425 | struct lruvec *lruvec = NULL; |
7426 | int pgscanned = 0; |
7427 | int pgrescued = 0; |
7428 | int i; |
7429 | |
7430 | for (i = 0; i < fbatch->nr; i++) { |
7431 | struct folio *folio = fbatch->folios[i]; |
7432 | int nr_pages = folio_nr_pages(folio); |
7433 | |
7434 | pgscanned += nr_pages; |
7435 | |
7436 | /* block memcg migration while the folio moves between lrus */ |
7437 | if (!folio_test_clear_lru(folio)) |
7438 | continue; |
7439 | |
7440 | lruvec = folio_lruvec_relock_irq(folio, locked_lruvec: lruvec); |
7441 | if (folio_evictable(folio) && folio_test_unevictable(folio)) { |
7442 | lruvec_del_folio(lruvec, folio); |
7443 | folio_clear_unevictable(folio); |
7444 | lruvec_add_folio(lruvec, folio); |
7445 | pgrescued += nr_pages; |
7446 | } |
7447 | folio_set_lru(folio); |
7448 | } |
7449 | |
7450 | if (lruvec) { |
7451 | __count_vm_events(item: UNEVICTABLE_PGRESCUED, delta: pgrescued); |
7452 | __count_vm_events(item: UNEVICTABLE_PGSCANNED, delta: pgscanned); |
7453 | unlock_page_lruvec_irq(lruvec); |
7454 | } else if (pgscanned) { |
7455 | count_vm_events(item: UNEVICTABLE_PGSCANNED, delta: pgscanned); |
7456 | } |
7457 | } |
7458 | EXPORT_SYMBOL_GPL(check_move_unevictable_folios); |
7459 | |