1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* memcontrol.h - Memory Controller |
3 | * |
4 | * Copyright IBM Corporation, 2007 |
5 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> |
6 | * |
7 | * Copyright 2007 OpenVZ SWsoft Inc |
8 | * Author: Pavel Emelianov <xemul@openvz.org> |
9 | */ |
10 | |
11 | #ifndef _LINUX_MEMCONTROL_H |
12 | #define _LINUX_MEMCONTROL_H |
13 | #include <linux/cgroup.h> |
14 | #include <linux/vm_event_item.h> |
15 | #include <linux/hardirq.h> |
16 | #include <linux/jump_label.h> |
17 | #include <linux/page_counter.h> |
18 | #include <linux/vmpressure.h> |
19 | #include <linux/eventfd.h> |
20 | #include <linux/mm.h> |
21 | #include <linux/vmstat.h> |
22 | #include <linux/writeback.h> |
23 | #include <linux/page-flags.h> |
24 | #include <linux/shrinker.h> |
25 | |
26 | struct mem_cgroup; |
27 | struct obj_cgroup; |
28 | struct page; |
29 | struct mm_struct; |
30 | struct kmem_cache; |
31 | |
32 | /* Cgroup-specific page state, on top of universal node page state */ |
33 | enum memcg_stat_item { |
34 | MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, |
35 | MEMCG_SOCK, |
36 | MEMCG_PERCPU_B, |
37 | MEMCG_VMALLOC, |
38 | MEMCG_KMEM, |
39 | MEMCG_ZSWAP_B, |
40 | MEMCG_ZSWAPPED, |
41 | MEMCG_NR_STAT, |
42 | }; |
43 | |
44 | enum memcg_memory_event { |
45 | MEMCG_LOW, |
46 | MEMCG_HIGH, |
47 | MEMCG_MAX, |
48 | MEMCG_OOM, |
49 | MEMCG_OOM_KILL, |
50 | MEMCG_OOM_GROUP_KILL, |
51 | MEMCG_SWAP_HIGH, |
52 | MEMCG_SWAP_MAX, |
53 | MEMCG_SWAP_FAIL, |
54 | MEMCG_NR_MEMORY_EVENTS, |
55 | }; |
56 | |
57 | struct mem_cgroup_reclaim_cookie { |
58 | pg_data_t *pgdat; |
59 | unsigned int generation; |
60 | }; |
61 | |
62 | #ifdef CONFIG_MEMCG |
63 | |
64 | #define MEM_CGROUP_ID_SHIFT 16 |
65 | |
66 | struct mem_cgroup_id { |
67 | int id; |
68 | refcount_t ref; |
69 | }; |
70 | |
71 | /* |
72 | * Per memcg event counter is incremented at every pagein/pageout. With THP, |
73 | * it will be incremented by the number of pages. This counter is used |
74 | * to trigger some periodic events. This is straightforward and better |
75 | * than using jiffies etc. to handle periodic memcg event. |
76 | */ |
77 | enum mem_cgroup_events_target { |
78 | MEM_CGROUP_TARGET_THRESH, |
79 | MEM_CGROUP_TARGET_SOFTLIMIT, |
80 | MEM_CGROUP_NTARGETS, |
81 | }; |
82 | |
83 | struct memcg_vmstats_percpu; |
84 | struct memcg_vmstats; |
85 | |
86 | struct mem_cgroup_reclaim_iter { |
87 | struct mem_cgroup *position; |
88 | /* scan generation, increased every round-trip */ |
89 | unsigned int generation; |
90 | }; |
91 | |
92 | struct lruvec_stats_percpu { |
93 | /* Local (CPU and cgroup) state */ |
94 | long state[NR_VM_NODE_STAT_ITEMS]; |
95 | |
96 | /* Delta calculation for lockless upward propagation */ |
97 | long state_prev[NR_VM_NODE_STAT_ITEMS]; |
98 | }; |
99 | |
100 | struct lruvec_stats { |
101 | /* Aggregated (CPU and subtree) state */ |
102 | long state[NR_VM_NODE_STAT_ITEMS]; |
103 | |
104 | /* Non-hierarchical (CPU aggregated) state */ |
105 | long state_local[NR_VM_NODE_STAT_ITEMS]; |
106 | |
107 | /* Pending child counts during tree propagation */ |
108 | long state_pending[NR_VM_NODE_STAT_ITEMS]; |
109 | }; |
110 | |
111 | /* |
112 | * per-node information in memory controller. |
113 | */ |
114 | struct mem_cgroup_per_node { |
115 | struct lruvec lruvec; |
116 | |
117 | struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; |
118 | struct lruvec_stats lruvec_stats; |
119 | |
120 | unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; |
121 | |
122 | struct mem_cgroup_reclaim_iter iter; |
123 | |
124 | struct shrinker_info __rcu *shrinker_info; |
125 | |
126 | struct rb_node tree_node; /* RB tree node */ |
127 | unsigned long usage_in_excess;/* Set to the value by which */ |
128 | /* the soft limit is exceeded*/ |
129 | bool on_tree; |
130 | struct mem_cgroup *memcg; /* Back pointer, we cannot */ |
131 | /* use container_of */ |
132 | }; |
133 | |
134 | struct mem_cgroup_threshold { |
135 | struct eventfd_ctx *eventfd; |
136 | unsigned long threshold; |
137 | }; |
138 | |
139 | /* For threshold */ |
140 | struct mem_cgroup_threshold_ary { |
141 | /* An array index points to threshold just below or equal to usage. */ |
142 | int current_threshold; |
143 | /* Size of entries[] */ |
144 | unsigned int size; |
145 | /* Array of thresholds */ |
146 | struct mem_cgroup_threshold entries[] __counted_by(size); |
147 | }; |
148 | |
149 | struct mem_cgroup_thresholds { |
150 | /* Primary thresholds array */ |
151 | struct mem_cgroup_threshold_ary *primary; |
152 | /* |
153 | * Spare threshold array. |
154 | * This is needed to make mem_cgroup_unregister_event() "never fail". |
155 | * It must be able to store at least primary->size - 1 entries. |
156 | */ |
157 | struct mem_cgroup_threshold_ary *spare; |
158 | }; |
159 | |
160 | /* |
161 | * Remember four most recent foreign writebacks with dirty pages in this |
162 | * cgroup. Inode sharing is expected to be uncommon and, even if we miss |
163 | * one in a given round, we're likely to catch it later if it keeps |
164 | * foreign-dirtying, so a fairly low count should be enough. |
165 | * |
166 | * See mem_cgroup_track_foreign_dirty_slowpath() for details. |
167 | */ |
168 | #define MEMCG_CGWB_FRN_CNT 4 |
169 | |
170 | struct memcg_cgwb_frn { |
171 | u64 bdi_id; /* bdi->id of the foreign inode */ |
172 | int memcg_id; /* memcg->css.id of foreign inode */ |
173 | u64 at; /* jiffies_64 at the time of dirtying */ |
174 | struct wb_completion done; /* tracks in-flight foreign writebacks */ |
175 | }; |
176 | |
177 | /* |
178 | * Bucket for arbitrarily byte-sized objects charged to a memory |
179 | * cgroup. The bucket can be reparented in one piece when the cgroup |
180 | * is destroyed, without having to round up the individual references |
181 | * of all live memory objects in the wild. |
182 | */ |
183 | struct obj_cgroup { |
184 | struct percpu_ref refcnt; |
185 | struct mem_cgroup *memcg; |
186 | atomic_t nr_charged_bytes; |
187 | union { |
188 | struct list_head list; /* protected by objcg_lock */ |
189 | struct rcu_head rcu; |
190 | }; |
191 | }; |
192 | |
193 | /* |
194 | * The memory controller data structure. The memory controller controls both |
195 | * page cache and RSS per cgroup. We would eventually like to provide |
196 | * statistics based on the statistics developed by Rik Van Riel for clock-pro, |
197 | * to help the administrator determine what knobs to tune. |
198 | */ |
199 | struct mem_cgroup { |
200 | struct cgroup_subsys_state css; |
201 | |
202 | /* Private memcg ID. Used to ID objects that outlive the cgroup */ |
203 | struct mem_cgroup_id id; |
204 | |
205 | /* Accounted resources */ |
206 | struct page_counter memory; /* Both v1 & v2 */ |
207 | |
208 | union { |
209 | struct page_counter swap; /* v2 only */ |
210 | struct page_counter memsw; /* v1 only */ |
211 | }; |
212 | |
213 | /* Legacy consumer-oriented counters */ |
214 | struct page_counter kmem; /* v1 only */ |
215 | struct page_counter tcpmem; /* v1 only */ |
216 | |
217 | /* Range enforcement for interrupt charges */ |
218 | struct work_struct high_work; |
219 | |
220 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) |
221 | unsigned long zswap_max; |
222 | #endif |
223 | |
224 | unsigned long soft_limit; |
225 | |
226 | /* vmpressure notifications */ |
227 | struct vmpressure vmpressure; |
228 | |
229 | /* |
230 | * Should the OOM killer kill all belonging tasks, had it kill one? |
231 | */ |
232 | bool oom_group; |
233 | |
234 | /* protected by memcg_oom_lock */ |
235 | bool oom_lock; |
236 | int under_oom; |
237 | |
238 | int swappiness; |
239 | /* OOM-Killer disable */ |
240 | int oom_kill_disable; |
241 | |
242 | /* memory.events and memory.events.local */ |
243 | struct cgroup_file events_file; |
244 | struct cgroup_file events_local_file; |
245 | |
246 | /* handle for "memory.swap.events" */ |
247 | struct cgroup_file swap_events_file; |
248 | |
249 | /* protect arrays of thresholds */ |
250 | struct mutex thresholds_lock; |
251 | |
252 | /* thresholds for memory usage. RCU-protected */ |
253 | struct mem_cgroup_thresholds thresholds; |
254 | |
255 | /* thresholds for mem+swap usage. RCU-protected */ |
256 | struct mem_cgroup_thresholds memsw_thresholds; |
257 | |
258 | /* For oom notifier event fd */ |
259 | struct list_head oom_notify; |
260 | |
261 | /* |
262 | * Should we move charges of a task when a task is moved into this |
263 | * mem_cgroup ? And what type of charges should we move ? |
264 | */ |
265 | unsigned long move_charge_at_immigrate; |
266 | /* taken only while moving_account > 0 */ |
267 | spinlock_t move_lock; |
268 | unsigned long move_lock_flags; |
269 | |
270 | CACHELINE_PADDING(_pad1_); |
271 | |
272 | /* memory.stat */ |
273 | struct memcg_vmstats *vmstats; |
274 | |
275 | /* memory.events */ |
276 | atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; |
277 | atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; |
278 | |
279 | /* |
280 | * Hint of reclaim pressure for socket memroy management. Note |
281 | * that this indicator should NOT be used in legacy cgroup mode |
282 | * where socket memory is accounted/charged separately. |
283 | */ |
284 | unsigned long socket_pressure; |
285 | |
286 | /* Legacy tcp memory accounting */ |
287 | bool tcpmem_active; |
288 | int tcpmem_pressure; |
289 | |
290 | #ifdef CONFIG_MEMCG_KMEM |
291 | int kmemcg_id; |
292 | /* |
293 | * memcg->objcg is wiped out as a part of the objcg repaprenting |
294 | * process. memcg->orig_objcg preserves a pointer (and a reference) |
295 | * to the original objcg until the end of live of memcg. |
296 | */ |
297 | struct obj_cgroup __rcu *objcg; |
298 | struct obj_cgroup *orig_objcg; |
299 | /* list of inherited objcgs, protected by objcg_lock */ |
300 | struct list_head objcg_list; |
301 | #endif |
302 | |
303 | CACHELINE_PADDING(_pad2_); |
304 | |
305 | /* |
306 | * set > 0 if pages under this cgroup are moving to other cgroup. |
307 | */ |
308 | atomic_t moving_account; |
309 | struct task_struct *move_lock_task; |
310 | |
311 | struct memcg_vmstats_percpu __percpu *vmstats_percpu; |
312 | |
313 | #ifdef CONFIG_CGROUP_WRITEBACK |
314 | struct list_head cgwb_list; |
315 | struct wb_domain cgwb_domain; |
316 | struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; |
317 | #endif |
318 | |
319 | /* List of events which userspace want to receive */ |
320 | struct list_head event_list; |
321 | spinlock_t event_list_lock; |
322 | |
323 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
324 | struct deferred_split deferred_split_queue; |
325 | #endif |
326 | |
327 | #ifdef CONFIG_LRU_GEN |
328 | /* per-memcg mm_struct list */ |
329 | struct lru_gen_mm_list mm_list; |
330 | #endif |
331 | |
332 | struct mem_cgroup_per_node *nodeinfo[]; |
333 | }; |
334 | |
335 | /* |
336 | * size of first charge trial. |
337 | * TODO: maybe necessary to use big numbers in big irons or dynamic based of the |
338 | * workload. |
339 | */ |
340 | #define MEMCG_CHARGE_BATCH 64U |
341 | |
342 | extern struct mem_cgroup *root_mem_cgroup; |
343 | |
344 | enum page_memcg_data_flags { |
345 | /* page->memcg_data is a pointer to an objcgs vector */ |
346 | MEMCG_DATA_OBJCGS = (1UL << 0), |
347 | /* page has been accounted as a non-slab kernel page */ |
348 | MEMCG_DATA_KMEM = (1UL << 1), |
349 | /* the next bit after the last actual flag */ |
350 | __NR_MEMCG_DATA_FLAGS = (1UL << 2), |
351 | }; |
352 | |
353 | #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) |
354 | |
355 | static inline bool folio_memcg_kmem(struct folio *folio); |
356 | |
357 | /* |
358 | * After the initialization objcg->memcg is always pointing at |
359 | * a valid memcg, but can be atomically swapped to the parent memcg. |
360 | * |
361 | * The caller must ensure that the returned memcg won't be released: |
362 | * e.g. acquire the rcu_read_lock or css_set_lock. |
363 | */ |
364 | static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) |
365 | { |
366 | return READ_ONCE(objcg->memcg); |
367 | } |
368 | |
369 | /* |
370 | * __folio_memcg - Get the memory cgroup associated with a non-kmem folio |
371 | * @folio: Pointer to the folio. |
372 | * |
373 | * Returns a pointer to the memory cgroup associated with the folio, |
374 | * or NULL. This function assumes that the folio is known to have a |
375 | * proper memory cgroup pointer. It's not safe to call this function |
376 | * against some type of folios, e.g. slab folios or ex-slab folios or |
377 | * kmem folios. |
378 | */ |
379 | static inline struct mem_cgroup *__folio_memcg(struct folio *folio) |
380 | { |
381 | unsigned long memcg_data = folio->memcg_data; |
382 | |
383 | VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); |
384 | VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); |
385 | VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); |
386 | |
387 | return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); |
388 | } |
389 | |
390 | /* |
391 | * __folio_objcg - get the object cgroup associated with a kmem folio. |
392 | * @folio: Pointer to the folio. |
393 | * |
394 | * Returns a pointer to the object cgroup associated with the folio, |
395 | * or NULL. This function assumes that the folio is known to have a |
396 | * proper object cgroup pointer. It's not safe to call this function |
397 | * against some type of folios, e.g. slab folios or ex-slab folios or |
398 | * LRU folios. |
399 | */ |
400 | static inline struct obj_cgroup *__folio_objcg(struct folio *folio) |
401 | { |
402 | unsigned long memcg_data = folio->memcg_data; |
403 | |
404 | VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); |
405 | VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); |
406 | VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); |
407 | |
408 | return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); |
409 | } |
410 | |
411 | /* |
412 | * folio_memcg - Get the memory cgroup associated with a folio. |
413 | * @folio: Pointer to the folio. |
414 | * |
415 | * Returns a pointer to the memory cgroup associated with the folio, |
416 | * or NULL. This function assumes that the folio is known to have a |
417 | * proper memory cgroup pointer. It's not safe to call this function |
418 | * against some type of folios, e.g. slab folios or ex-slab folios. |
419 | * |
420 | * For a non-kmem folio any of the following ensures folio and memcg binding |
421 | * stability: |
422 | * |
423 | * - the folio lock |
424 | * - LRU isolation |
425 | * - folio_memcg_lock() |
426 | * - exclusive reference |
427 | * - mem_cgroup_trylock_pages() |
428 | * |
429 | * For a kmem folio a caller should hold an rcu read lock to protect memcg |
430 | * associated with a kmem folio from being released. |
431 | */ |
432 | static inline struct mem_cgroup *folio_memcg(struct folio *folio) |
433 | { |
434 | if (folio_memcg_kmem(folio)) |
435 | return obj_cgroup_memcg(objcg: __folio_objcg(folio)); |
436 | return __folio_memcg(folio); |
437 | } |
438 | |
439 | static inline struct mem_cgroup *page_memcg(struct page *page) |
440 | { |
441 | return folio_memcg(page_folio(page)); |
442 | } |
443 | |
444 | /** |
445 | * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. |
446 | * @folio: Pointer to the folio. |
447 | * |
448 | * This function assumes that the folio is known to have a |
449 | * proper memory cgroup pointer. It's not safe to call this function |
450 | * against some type of folios, e.g. slab folios or ex-slab folios. |
451 | * |
452 | * Return: A pointer to the memory cgroup associated with the folio, |
453 | * or NULL. |
454 | */ |
455 | static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) |
456 | { |
457 | unsigned long memcg_data = READ_ONCE(folio->memcg_data); |
458 | |
459 | VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); |
460 | WARN_ON_ONCE(!rcu_read_lock_held()); |
461 | |
462 | if (memcg_data & MEMCG_DATA_KMEM) { |
463 | struct obj_cgroup *objcg; |
464 | |
465 | objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); |
466 | return obj_cgroup_memcg(objcg); |
467 | } |
468 | |
469 | return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); |
470 | } |
471 | |
472 | /* |
473 | * folio_memcg_check - Get the memory cgroup associated with a folio. |
474 | * @folio: Pointer to the folio. |
475 | * |
476 | * Returns a pointer to the memory cgroup associated with the folio, |
477 | * or NULL. This function unlike folio_memcg() can take any folio |
478 | * as an argument. It has to be used in cases when it's not known if a folio |
479 | * has an associated memory cgroup pointer or an object cgroups vector or |
480 | * an object cgroup. |
481 | * |
482 | * For a non-kmem folio any of the following ensures folio and memcg binding |
483 | * stability: |
484 | * |
485 | * - the folio lock |
486 | * - LRU isolation |
487 | * - lock_folio_memcg() |
488 | * - exclusive reference |
489 | * - mem_cgroup_trylock_pages() |
490 | * |
491 | * For a kmem folio a caller should hold an rcu read lock to protect memcg |
492 | * associated with a kmem folio from being released. |
493 | */ |
494 | static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) |
495 | { |
496 | /* |
497 | * Because folio->memcg_data might be changed asynchronously |
498 | * for slabs, READ_ONCE() should be used here. |
499 | */ |
500 | unsigned long memcg_data = READ_ONCE(folio->memcg_data); |
501 | |
502 | if (memcg_data & MEMCG_DATA_OBJCGS) |
503 | return NULL; |
504 | |
505 | if (memcg_data & MEMCG_DATA_KMEM) { |
506 | struct obj_cgroup *objcg; |
507 | |
508 | objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); |
509 | return obj_cgroup_memcg(objcg); |
510 | } |
511 | |
512 | return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); |
513 | } |
514 | |
515 | static inline struct mem_cgroup *page_memcg_check(struct page *page) |
516 | { |
517 | if (PageTail(page)) |
518 | return NULL; |
519 | return folio_memcg_check(folio: (struct folio *)page); |
520 | } |
521 | |
522 | static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) |
523 | { |
524 | struct mem_cgroup *memcg; |
525 | |
526 | rcu_read_lock(); |
527 | retry: |
528 | memcg = obj_cgroup_memcg(objcg); |
529 | if (unlikely(!css_tryget(&memcg->css))) |
530 | goto retry; |
531 | rcu_read_unlock(); |
532 | |
533 | return memcg; |
534 | } |
535 | |
536 | #ifdef CONFIG_MEMCG_KMEM |
537 | /* |
538 | * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. |
539 | * @folio: Pointer to the folio. |
540 | * |
541 | * Checks if the folio has MemcgKmem flag set. The caller must ensure |
542 | * that the folio has an associated memory cgroup. It's not safe to call |
543 | * this function against some types of folios, e.g. slab folios. |
544 | */ |
545 | static inline bool folio_memcg_kmem(struct folio *folio) |
546 | { |
547 | VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); |
548 | VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio); |
549 | return folio->memcg_data & MEMCG_DATA_KMEM; |
550 | } |
551 | |
552 | |
553 | #else |
554 | static inline bool folio_memcg_kmem(struct folio *folio) |
555 | { |
556 | return false; |
557 | } |
558 | |
559 | #endif |
560 | |
561 | static inline bool PageMemcgKmem(struct page *page) |
562 | { |
563 | return folio_memcg_kmem(page_folio(page)); |
564 | } |
565 | |
566 | static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) |
567 | { |
568 | return (memcg == root_mem_cgroup); |
569 | } |
570 | |
571 | static inline bool mem_cgroup_disabled(void) |
572 | { |
573 | return !cgroup_subsys_enabled(memory_cgrp_subsys); |
574 | } |
575 | |
576 | static inline void mem_cgroup_protection(struct mem_cgroup *root, |
577 | struct mem_cgroup *memcg, |
578 | unsigned long *min, |
579 | unsigned long *low) |
580 | { |
581 | *min = *low = 0; |
582 | |
583 | if (mem_cgroup_disabled()) |
584 | return; |
585 | |
586 | /* |
587 | * There is no reclaim protection applied to a targeted reclaim. |
588 | * We are special casing this specific case here because |
589 | * mem_cgroup_calculate_protection is not robust enough to keep |
590 | * the protection invariant for calculated effective values for |
591 | * parallel reclaimers with different reclaim target. This is |
592 | * especially a problem for tail memcgs (as they have pages on LRU) |
593 | * which would want to have effective values 0 for targeted reclaim |
594 | * but a different value for external reclaim. |
595 | * |
596 | * Example |
597 | * Let's have global and A's reclaim in parallel: |
598 | * | |
599 | * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) |
600 | * |\ |
601 | * | C (low = 1G, usage = 2.5G) |
602 | * B (low = 1G, usage = 0.5G) |
603 | * |
604 | * For the global reclaim |
605 | * A.elow = A.low |
606 | * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow |
607 | * C.elow = min(C.usage, C.low) |
608 | * |
609 | * With the effective values resetting we have A reclaim |
610 | * A.elow = 0 |
611 | * B.elow = B.low |
612 | * C.elow = C.low |
613 | * |
614 | * If the global reclaim races with A's reclaim then |
615 | * B.elow = C.elow = 0 because children_low_usage > A.elow) |
616 | * is possible and reclaiming B would be violating the protection. |
617 | * |
618 | */ |
619 | if (root == memcg) |
620 | return; |
621 | |
622 | *min = READ_ONCE(memcg->memory.emin); |
623 | *low = READ_ONCE(memcg->memory.elow); |
624 | } |
625 | |
626 | void mem_cgroup_calculate_protection(struct mem_cgroup *root, |
627 | struct mem_cgroup *memcg); |
628 | |
629 | static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, |
630 | struct mem_cgroup *memcg) |
631 | { |
632 | /* |
633 | * The root memcg doesn't account charges, and doesn't support |
634 | * protection. The target memcg's protection is ignored, see |
635 | * mem_cgroup_calculate_protection() and mem_cgroup_protection() |
636 | */ |
637 | return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) || |
638 | memcg == target; |
639 | } |
640 | |
641 | static inline bool mem_cgroup_below_low(struct mem_cgroup *target, |
642 | struct mem_cgroup *memcg) |
643 | { |
644 | if (mem_cgroup_unprotected(target, memcg)) |
645 | return false; |
646 | |
647 | return READ_ONCE(memcg->memory.elow) >= |
648 | page_counter_read(counter: &memcg->memory); |
649 | } |
650 | |
651 | static inline bool mem_cgroup_below_min(struct mem_cgroup *target, |
652 | struct mem_cgroup *memcg) |
653 | { |
654 | if (mem_cgroup_unprotected(target, memcg)) |
655 | return false; |
656 | |
657 | return READ_ONCE(memcg->memory.emin) >= |
658 | page_counter_read(counter: &memcg->memory); |
659 | } |
660 | |
661 | void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg); |
662 | |
663 | int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); |
664 | |
665 | /** |
666 | * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. |
667 | * @folio: Folio to charge. |
668 | * @mm: mm context of the allocating task. |
669 | * @gfp: Reclaim mode. |
670 | * |
671 | * Try to charge @folio to the memcg that @mm belongs to, reclaiming |
672 | * pages according to @gfp if necessary. If @mm is NULL, try to |
673 | * charge to the active memcg. |
674 | * |
675 | * Do not use this for folios allocated for swapin. |
676 | * |
677 | * Return: 0 on success. Otherwise, an error code is returned. |
678 | */ |
679 | static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, |
680 | gfp_t gfp) |
681 | { |
682 | if (mem_cgroup_disabled()) |
683 | return 0; |
684 | return __mem_cgroup_charge(folio, mm, gfp); |
685 | } |
686 | |
687 | int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, |
688 | long nr_pages); |
689 | |
690 | int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, |
691 | gfp_t gfp, swp_entry_t entry); |
692 | void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); |
693 | |
694 | void __mem_cgroup_uncharge(struct folio *folio); |
695 | |
696 | /** |
697 | * mem_cgroup_uncharge - Uncharge a folio. |
698 | * @folio: Folio to uncharge. |
699 | * |
700 | * Uncharge a folio previously charged with mem_cgroup_charge(). |
701 | */ |
702 | static inline void mem_cgroup_uncharge(struct folio *folio) |
703 | { |
704 | if (mem_cgroup_disabled()) |
705 | return; |
706 | __mem_cgroup_uncharge(folio); |
707 | } |
708 | |
709 | void __mem_cgroup_uncharge_list(struct list_head *page_list); |
710 | static inline void mem_cgroup_uncharge_list(struct list_head *page_list) |
711 | { |
712 | if (mem_cgroup_disabled()) |
713 | return; |
714 | __mem_cgroup_uncharge_list(page_list); |
715 | } |
716 | |
717 | void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages); |
718 | |
719 | void mem_cgroup_replace_folio(struct folio *old, struct folio *new); |
720 | |
721 | void mem_cgroup_migrate(struct folio *old, struct folio *new); |
722 | |
723 | /** |
724 | * mem_cgroup_lruvec - get the lru list vector for a memcg & node |
725 | * @memcg: memcg of the wanted lruvec |
726 | * @pgdat: pglist_data |
727 | * |
728 | * Returns the lru list vector holding pages for a given @memcg & |
729 | * @pgdat combination. This can be the node lruvec, if the memory |
730 | * controller is disabled. |
731 | */ |
732 | static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, |
733 | struct pglist_data *pgdat) |
734 | { |
735 | struct mem_cgroup_per_node *mz; |
736 | struct lruvec *lruvec; |
737 | |
738 | if (mem_cgroup_disabled()) { |
739 | lruvec = &pgdat->__lruvec; |
740 | goto out; |
741 | } |
742 | |
743 | if (!memcg) |
744 | memcg = root_mem_cgroup; |
745 | |
746 | mz = memcg->nodeinfo[pgdat->node_id]; |
747 | lruvec = &mz->lruvec; |
748 | out: |
749 | /* |
750 | * Since a node can be onlined after the mem_cgroup was created, |
751 | * we have to be prepared to initialize lruvec->pgdat here; |
752 | * and if offlined then reonlined, we need to reinitialize it. |
753 | */ |
754 | if (unlikely(lruvec->pgdat != pgdat)) |
755 | lruvec->pgdat = pgdat; |
756 | return lruvec; |
757 | } |
758 | |
759 | /** |
760 | * folio_lruvec - return lruvec for isolating/putting an LRU folio |
761 | * @folio: Pointer to the folio. |
762 | * |
763 | * This function relies on folio->mem_cgroup being stable. |
764 | */ |
765 | static inline struct lruvec *folio_lruvec(struct folio *folio) |
766 | { |
767 | struct mem_cgroup *memcg = folio_memcg(folio); |
768 | |
769 | VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); |
770 | return mem_cgroup_lruvec(memcg, pgdat: folio_pgdat(folio)); |
771 | } |
772 | |
773 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
774 | |
775 | struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); |
776 | |
777 | struct mem_cgroup *get_mem_cgroup_from_current(void); |
778 | |
779 | struct lruvec *folio_lruvec_lock(struct folio *folio); |
780 | struct lruvec *folio_lruvec_lock_irq(struct folio *folio); |
781 | struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, |
782 | unsigned long *flags); |
783 | |
784 | #ifdef CONFIG_DEBUG_VM |
785 | void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); |
786 | #else |
787 | static inline |
788 | void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) |
789 | { |
790 | } |
791 | #endif |
792 | |
793 | static inline |
794 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ |
795 | return css ? container_of(css, struct mem_cgroup, css) : NULL; |
796 | } |
797 | |
798 | static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) |
799 | { |
800 | return percpu_ref_tryget(ref: &objcg->refcnt); |
801 | } |
802 | |
803 | static inline void obj_cgroup_get(struct obj_cgroup *objcg) |
804 | { |
805 | percpu_ref_get(ref: &objcg->refcnt); |
806 | } |
807 | |
808 | static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, |
809 | unsigned long nr) |
810 | { |
811 | percpu_ref_get_many(ref: &objcg->refcnt, nr); |
812 | } |
813 | |
814 | static inline void obj_cgroup_put(struct obj_cgroup *objcg) |
815 | { |
816 | percpu_ref_put(ref: &objcg->refcnt); |
817 | } |
818 | |
819 | static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) |
820 | { |
821 | return !memcg || css_tryget(css: &memcg->css); |
822 | } |
823 | |
824 | static inline void mem_cgroup_put(struct mem_cgroup *memcg) |
825 | { |
826 | if (memcg) |
827 | css_put(css: &memcg->css); |
828 | } |
829 | |
830 | #define mem_cgroup_from_counter(counter, member) \ |
831 | container_of(counter, struct mem_cgroup, member) |
832 | |
833 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, |
834 | struct mem_cgroup *, |
835 | struct mem_cgroup_reclaim_cookie *); |
836 | void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); |
837 | void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, |
838 | int (*)(struct task_struct *, void *), void *arg); |
839 | |
840 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) |
841 | { |
842 | if (mem_cgroup_disabled()) |
843 | return 0; |
844 | |
845 | return memcg->id.id; |
846 | } |
847 | struct mem_cgroup *mem_cgroup_from_id(unsigned short id); |
848 | |
849 | #ifdef CONFIG_SHRINKER_DEBUG |
850 | static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) |
851 | { |
852 | return memcg ? cgroup_ino(cgrp: memcg->css.cgroup) : 0; |
853 | } |
854 | |
855 | struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino); |
856 | #endif |
857 | |
858 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) |
859 | { |
860 | return mem_cgroup_from_css(css: seq_css(seq: m)); |
861 | } |
862 | |
863 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
864 | { |
865 | struct mem_cgroup_per_node *mz; |
866 | |
867 | if (mem_cgroup_disabled()) |
868 | return NULL; |
869 | |
870 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
871 | return mz->memcg; |
872 | } |
873 | |
874 | /** |
875 | * parent_mem_cgroup - find the accounting parent of a memcg |
876 | * @memcg: memcg whose parent to find |
877 | * |
878 | * Returns the parent memcg, or NULL if this is the root. |
879 | */ |
880 | static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) |
881 | { |
882 | return mem_cgroup_from_css(css: memcg->css.parent); |
883 | } |
884 | |
885 | static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, |
886 | struct mem_cgroup *root) |
887 | { |
888 | if (root == memcg) |
889 | return true; |
890 | return cgroup_is_descendant(cgrp: memcg->css.cgroup, ancestor: root->css.cgroup); |
891 | } |
892 | |
893 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
894 | struct mem_cgroup *memcg) |
895 | { |
896 | struct mem_cgroup *task_memcg; |
897 | bool match = false; |
898 | |
899 | rcu_read_lock(); |
900 | task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
901 | if (task_memcg) |
902 | match = mem_cgroup_is_descendant(memcg: task_memcg, root: memcg); |
903 | rcu_read_unlock(); |
904 | return match; |
905 | } |
906 | |
907 | struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio); |
908 | ino_t page_cgroup_ino(struct page *page); |
909 | |
910 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) |
911 | { |
912 | if (mem_cgroup_disabled()) |
913 | return true; |
914 | return !!(memcg->css.flags & CSS_ONLINE); |
915 | } |
916 | |
917 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, |
918 | int zid, int nr_pages); |
919 | |
920 | static inline |
921 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, |
922 | enum lru_list lru, int zone_idx) |
923 | { |
924 | struct mem_cgroup_per_node *mz; |
925 | |
926 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
927 | return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); |
928 | } |
929 | |
930 | void mem_cgroup_handle_over_high(gfp_t gfp_mask); |
931 | |
932 | unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); |
933 | |
934 | unsigned long mem_cgroup_size(struct mem_cgroup *memcg); |
935 | |
936 | void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, |
937 | struct task_struct *p); |
938 | |
939 | void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); |
940 | |
941 | static inline void mem_cgroup_enter_user_fault(void) |
942 | { |
943 | WARN_ON(current->in_user_fault); |
944 | current->in_user_fault = 1; |
945 | } |
946 | |
947 | static inline void mem_cgroup_exit_user_fault(void) |
948 | { |
949 | WARN_ON(!current->in_user_fault); |
950 | current->in_user_fault = 0; |
951 | } |
952 | |
953 | static inline bool task_in_memcg_oom(struct task_struct *p) |
954 | { |
955 | return p->memcg_in_oom; |
956 | } |
957 | |
958 | bool mem_cgroup_oom_synchronize(bool wait); |
959 | struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, |
960 | struct mem_cgroup *oom_domain); |
961 | void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); |
962 | |
963 | void folio_memcg_lock(struct folio *folio); |
964 | void folio_memcg_unlock(struct folio *folio); |
965 | |
966 | void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); |
967 | |
968 | /* try to stablize folio_memcg() for all the pages in a memcg */ |
969 | static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) |
970 | { |
971 | rcu_read_lock(); |
972 | |
973 | if (mem_cgroup_disabled() || !atomic_read(v: &memcg->moving_account)) |
974 | return true; |
975 | |
976 | rcu_read_unlock(); |
977 | return false; |
978 | } |
979 | |
980 | static inline void mem_cgroup_unlock_pages(void) |
981 | { |
982 | rcu_read_unlock(); |
983 | } |
984 | |
985 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
986 | static inline void mod_memcg_state(struct mem_cgroup *memcg, |
987 | int idx, int val) |
988 | { |
989 | unsigned long flags; |
990 | |
991 | local_irq_save(flags); |
992 | __mod_memcg_state(memcg, idx, val); |
993 | local_irq_restore(flags); |
994 | } |
995 | |
996 | static inline void mod_memcg_page_state(struct page *page, |
997 | int idx, int val) |
998 | { |
999 | struct mem_cgroup *memcg; |
1000 | |
1001 | if (mem_cgroup_disabled()) |
1002 | return; |
1003 | |
1004 | rcu_read_lock(); |
1005 | memcg = page_memcg(page); |
1006 | if (memcg) |
1007 | mod_memcg_state(memcg, idx, val); |
1008 | rcu_read_unlock(); |
1009 | } |
1010 | |
1011 | unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx); |
1012 | |
1013 | static inline unsigned long lruvec_page_state(struct lruvec *lruvec, |
1014 | enum node_stat_item idx) |
1015 | { |
1016 | struct mem_cgroup_per_node *pn; |
1017 | long x; |
1018 | |
1019 | if (mem_cgroup_disabled()) |
1020 | return node_page_state(pgdat: lruvec_pgdat(lruvec), item: idx); |
1021 | |
1022 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
1023 | x = READ_ONCE(pn->lruvec_stats.state[idx]); |
1024 | #ifdef CONFIG_SMP |
1025 | if (x < 0) |
1026 | x = 0; |
1027 | #endif |
1028 | return x; |
1029 | } |
1030 | |
1031 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, |
1032 | enum node_stat_item idx) |
1033 | { |
1034 | struct mem_cgroup_per_node *pn; |
1035 | long x = 0; |
1036 | |
1037 | if (mem_cgroup_disabled()) |
1038 | return node_page_state(pgdat: lruvec_pgdat(lruvec), item: idx); |
1039 | |
1040 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
1041 | x = READ_ONCE(pn->lruvec_stats.state_local[idx]); |
1042 | #ifdef CONFIG_SMP |
1043 | if (x < 0) |
1044 | x = 0; |
1045 | #endif |
1046 | return x; |
1047 | } |
1048 | |
1049 | void mem_cgroup_flush_stats(void); |
1050 | void mem_cgroup_flush_stats_ratelimited(void); |
1051 | |
1052 | void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, |
1053 | int val); |
1054 | void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); |
1055 | |
1056 | static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, |
1057 | int val) |
1058 | { |
1059 | unsigned long flags; |
1060 | |
1061 | local_irq_save(flags); |
1062 | __mod_lruvec_kmem_state(p, idx, val); |
1063 | local_irq_restore(flags); |
1064 | } |
1065 | |
1066 | static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, |
1067 | enum node_stat_item idx, int val) |
1068 | { |
1069 | unsigned long flags; |
1070 | |
1071 | local_irq_save(flags); |
1072 | __mod_memcg_lruvec_state(lruvec, idx, val); |
1073 | local_irq_restore(flags); |
1074 | } |
1075 | |
1076 | void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, |
1077 | unsigned long count); |
1078 | |
1079 | static inline void count_memcg_events(struct mem_cgroup *memcg, |
1080 | enum vm_event_item idx, |
1081 | unsigned long count) |
1082 | { |
1083 | unsigned long flags; |
1084 | |
1085 | local_irq_save(flags); |
1086 | __count_memcg_events(memcg, idx, count); |
1087 | local_irq_restore(flags); |
1088 | } |
1089 | |
1090 | static inline void count_memcg_folio_events(struct folio *folio, |
1091 | enum vm_event_item idx, unsigned long nr) |
1092 | { |
1093 | struct mem_cgroup *memcg = folio_memcg(folio); |
1094 | |
1095 | if (memcg) |
1096 | count_memcg_events(memcg, idx, count: nr); |
1097 | } |
1098 | |
1099 | static inline void count_memcg_event_mm(struct mm_struct *mm, |
1100 | enum vm_event_item idx) |
1101 | { |
1102 | struct mem_cgroup *memcg; |
1103 | |
1104 | if (mem_cgroup_disabled()) |
1105 | return; |
1106 | |
1107 | rcu_read_lock(); |
1108 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
1109 | if (likely(memcg)) |
1110 | count_memcg_events(memcg, idx, count: 1); |
1111 | rcu_read_unlock(); |
1112 | } |
1113 | |
1114 | static inline void memcg_memory_event(struct mem_cgroup *memcg, |
1115 | enum memcg_memory_event event) |
1116 | { |
1117 | bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || |
1118 | event == MEMCG_SWAP_FAIL; |
1119 | |
1120 | atomic_long_inc(v: &memcg->memory_events_local[event]); |
1121 | if (!swap_event) |
1122 | cgroup_file_notify(cfile: &memcg->events_local_file); |
1123 | |
1124 | do { |
1125 | atomic_long_inc(v: &memcg->memory_events[event]); |
1126 | if (swap_event) |
1127 | cgroup_file_notify(cfile: &memcg->swap_events_file); |
1128 | else |
1129 | cgroup_file_notify(cfile: &memcg->events_file); |
1130 | |
1131 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
1132 | break; |
1133 | if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) |
1134 | break; |
1135 | } while ((memcg = parent_mem_cgroup(memcg)) && |
1136 | !mem_cgroup_is_root(memcg)); |
1137 | } |
1138 | |
1139 | static inline void memcg_memory_event_mm(struct mm_struct *mm, |
1140 | enum memcg_memory_event event) |
1141 | { |
1142 | struct mem_cgroup *memcg; |
1143 | |
1144 | if (mem_cgroup_disabled()) |
1145 | return; |
1146 | |
1147 | rcu_read_lock(); |
1148 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
1149 | if (likely(memcg)) |
1150 | memcg_memory_event(memcg, event); |
1151 | rcu_read_unlock(); |
1152 | } |
1153 | |
1154 | void split_page_memcg(struct page *head, unsigned int nr); |
1155 | |
1156 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, |
1157 | gfp_t gfp_mask, |
1158 | unsigned long *total_scanned); |
1159 | |
1160 | #else /* CONFIG_MEMCG */ |
1161 | |
1162 | #define MEM_CGROUP_ID_SHIFT 0 |
1163 | |
1164 | static inline struct mem_cgroup *folio_memcg(struct folio *folio) |
1165 | { |
1166 | return NULL; |
1167 | } |
1168 | |
1169 | static inline struct mem_cgroup *page_memcg(struct page *page) |
1170 | { |
1171 | return NULL; |
1172 | } |
1173 | |
1174 | static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) |
1175 | { |
1176 | WARN_ON_ONCE(!rcu_read_lock_held()); |
1177 | return NULL; |
1178 | } |
1179 | |
1180 | static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) |
1181 | { |
1182 | return NULL; |
1183 | } |
1184 | |
1185 | static inline struct mem_cgroup *page_memcg_check(struct page *page) |
1186 | { |
1187 | return NULL; |
1188 | } |
1189 | |
1190 | static inline bool folio_memcg_kmem(struct folio *folio) |
1191 | { |
1192 | return false; |
1193 | } |
1194 | |
1195 | static inline bool PageMemcgKmem(struct page *page) |
1196 | { |
1197 | return false; |
1198 | } |
1199 | |
1200 | static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) |
1201 | { |
1202 | return true; |
1203 | } |
1204 | |
1205 | static inline bool mem_cgroup_disabled(void) |
1206 | { |
1207 | return true; |
1208 | } |
1209 | |
1210 | static inline void memcg_memory_event(struct mem_cgroup *memcg, |
1211 | enum memcg_memory_event event) |
1212 | { |
1213 | } |
1214 | |
1215 | static inline void memcg_memory_event_mm(struct mm_struct *mm, |
1216 | enum memcg_memory_event event) |
1217 | { |
1218 | } |
1219 | |
1220 | static inline void mem_cgroup_protection(struct mem_cgroup *root, |
1221 | struct mem_cgroup *memcg, |
1222 | unsigned long *min, |
1223 | unsigned long *low) |
1224 | { |
1225 | *min = *low = 0; |
1226 | } |
1227 | |
1228 | static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, |
1229 | struct mem_cgroup *memcg) |
1230 | { |
1231 | } |
1232 | |
1233 | static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, |
1234 | struct mem_cgroup *memcg) |
1235 | { |
1236 | return true; |
1237 | } |
1238 | static inline bool mem_cgroup_below_low(struct mem_cgroup *target, |
1239 | struct mem_cgroup *memcg) |
1240 | { |
1241 | return false; |
1242 | } |
1243 | |
1244 | static inline bool mem_cgroup_below_min(struct mem_cgroup *target, |
1245 | struct mem_cgroup *memcg) |
1246 | { |
1247 | return false; |
1248 | } |
1249 | |
1250 | static inline void mem_cgroup_commit_charge(struct folio *folio, |
1251 | struct mem_cgroup *memcg) |
1252 | { |
1253 | } |
1254 | |
1255 | static inline int mem_cgroup_charge(struct folio *folio, |
1256 | struct mm_struct *mm, gfp_t gfp) |
1257 | { |
1258 | return 0; |
1259 | } |
1260 | |
1261 | static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, |
1262 | gfp_t gfp, long nr_pages) |
1263 | { |
1264 | return 0; |
1265 | } |
1266 | |
1267 | static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, |
1268 | struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) |
1269 | { |
1270 | return 0; |
1271 | } |
1272 | |
1273 | static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) |
1274 | { |
1275 | } |
1276 | |
1277 | static inline void mem_cgroup_uncharge(struct folio *folio) |
1278 | { |
1279 | } |
1280 | |
1281 | static inline void mem_cgroup_uncharge_list(struct list_head *page_list) |
1282 | { |
1283 | } |
1284 | |
1285 | static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, |
1286 | unsigned int nr_pages) |
1287 | { |
1288 | } |
1289 | |
1290 | static inline void mem_cgroup_replace_folio(struct folio *old, |
1291 | struct folio *new) |
1292 | { |
1293 | } |
1294 | |
1295 | static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) |
1296 | { |
1297 | } |
1298 | |
1299 | static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, |
1300 | struct pglist_data *pgdat) |
1301 | { |
1302 | return &pgdat->__lruvec; |
1303 | } |
1304 | |
1305 | static inline struct lruvec *folio_lruvec(struct folio *folio) |
1306 | { |
1307 | struct pglist_data *pgdat = folio_pgdat(folio); |
1308 | return &pgdat->__lruvec; |
1309 | } |
1310 | |
1311 | static inline |
1312 | void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) |
1313 | { |
1314 | } |
1315 | |
1316 | static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) |
1317 | { |
1318 | return NULL; |
1319 | } |
1320 | |
1321 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
1322 | struct mem_cgroup *memcg) |
1323 | { |
1324 | return true; |
1325 | } |
1326 | |
1327 | static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) |
1328 | { |
1329 | return NULL; |
1330 | } |
1331 | |
1332 | static inline struct mem_cgroup *get_mem_cgroup_from_current(void) |
1333 | { |
1334 | return NULL; |
1335 | } |
1336 | |
1337 | static inline |
1338 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) |
1339 | { |
1340 | return NULL; |
1341 | } |
1342 | |
1343 | static inline void obj_cgroup_put(struct obj_cgroup *objcg) |
1344 | { |
1345 | } |
1346 | |
1347 | static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) |
1348 | { |
1349 | return true; |
1350 | } |
1351 | |
1352 | static inline void mem_cgroup_put(struct mem_cgroup *memcg) |
1353 | { |
1354 | } |
1355 | |
1356 | static inline struct lruvec *folio_lruvec_lock(struct folio *folio) |
1357 | { |
1358 | struct pglist_data *pgdat = folio_pgdat(folio); |
1359 | |
1360 | spin_lock(&pgdat->__lruvec.lru_lock); |
1361 | return &pgdat->__lruvec; |
1362 | } |
1363 | |
1364 | static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) |
1365 | { |
1366 | struct pglist_data *pgdat = folio_pgdat(folio); |
1367 | |
1368 | spin_lock_irq(&pgdat->__lruvec.lru_lock); |
1369 | return &pgdat->__lruvec; |
1370 | } |
1371 | |
1372 | static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, |
1373 | unsigned long *flagsp) |
1374 | { |
1375 | struct pglist_data *pgdat = folio_pgdat(folio); |
1376 | |
1377 | spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); |
1378 | return &pgdat->__lruvec; |
1379 | } |
1380 | |
1381 | static inline struct mem_cgroup * |
1382 | mem_cgroup_iter(struct mem_cgroup *root, |
1383 | struct mem_cgroup *prev, |
1384 | struct mem_cgroup_reclaim_cookie *reclaim) |
1385 | { |
1386 | return NULL; |
1387 | } |
1388 | |
1389 | static inline void mem_cgroup_iter_break(struct mem_cgroup *root, |
1390 | struct mem_cgroup *prev) |
1391 | { |
1392 | } |
1393 | |
1394 | static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, |
1395 | int (*fn)(struct task_struct *, void *), void *arg) |
1396 | { |
1397 | } |
1398 | |
1399 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) |
1400 | { |
1401 | return 0; |
1402 | } |
1403 | |
1404 | static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) |
1405 | { |
1406 | WARN_ON_ONCE(id); |
1407 | /* XXX: This should always return root_mem_cgroup */ |
1408 | return NULL; |
1409 | } |
1410 | |
1411 | #ifdef CONFIG_SHRINKER_DEBUG |
1412 | static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) |
1413 | { |
1414 | return 0; |
1415 | } |
1416 | |
1417 | static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) |
1418 | { |
1419 | return NULL; |
1420 | } |
1421 | #endif |
1422 | |
1423 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) |
1424 | { |
1425 | return NULL; |
1426 | } |
1427 | |
1428 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
1429 | { |
1430 | return NULL; |
1431 | } |
1432 | |
1433 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) |
1434 | { |
1435 | return true; |
1436 | } |
1437 | |
1438 | static inline |
1439 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, |
1440 | enum lru_list lru, int zone_idx) |
1441 | { |
1442 | return 0; |
1443 | } |
1444 | |
1445 | static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) |
1446 | { |
1447 | return 0; |
1448 | } |
1449 | |
1450 | static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) |
1451 | { |
1452 | return 0; |
1453 | } |
1454 | |
1455 | static inline void |
1456 | mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) |
1457 | { |
1458 | } |
1459 | |
1460 | static inline void |
1461 | mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) |
1462 | { |
1463 | } |
1464 | |
1465 | static inline void folio_memcg_lock(struct folio *folio) |
1466 | { |
1467 | } |
1468 | |
1469 | static inline void folio_memcg_unlock(struct folio *folio) |
1470 | { |
1471 | } |
1472 | |
1473 | static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) |
1474 | { |
1475 | /* to match folio_memcg_rcu() */ |
1476 | rcu_read_lock(); |
1477 | return true; |
1478 | } |
1479 | |
1480 | static inline void mem_cgroup_unlock_pages(void) |
1481 | { |
1482 | rcu_read_unlock(); |
1483 | } |
1484 | |
1485 | static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) |
1486 | { |
1487 | } |
1488 | |
1489 | static inline void mem_cgroup_enter_user_fault(void) |
1490 | { |
1491 | } |
1492 | |
1493 | static inline void mem_cgroup_exit_user_fault(void) |
1494 | { |
1495 | } |
1496 | |
1497 | static inline bool task_in_memcg_oom(struct task_struct *p) |
1498 | { |
1499 | return false; |
1500 | } |
1501 | |
1502 | static inline bool mem_cgroup_oom_synchronize(bool wait) |
1503 | { |
1504 | return false; |
1505 | } |
1506 | |
1507 | static inline struct mem_cgroup *mem_cgroup_get_oom_group( |
1508 | struct task_struct *victim, struct mem_cgroup *oom_domain) |
1509 | { |
1510 | return NULL; |
1511 | } |
1512 | |
1513 | static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) |
1514 | { |
1515 | } |
1516 | |
1517 | static inline void __mod_memcg_state(struct mem_cgroup *memcg, |
1518 | int idx, |
1519 | int nr) |
1520 | { |
1521 | } |
1522 | |
1523 | static inline void mod_memcg_state(struct mem_cgroup *memcg, |
1524 | int idx, |
1525 | int nr) |
1526 | { |
1527 | } |
1528 | |
1529 | static inline void mod_memcg_page_state(struct page *page, |
1530 | int idx, int val) |
1531 | { |
1532 | } |
1533 | |
1534 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) |
1535 | { |
1536 | return 0; |
1537 | } |
1538 | |
1539 | static inline unsigned long lruvec_page_state(struct lruvec *lruvec, |
1540 | enum node_stat_item idx) |
1541 | { |
1542 | return node_page_state(lruvec_pgdat(lruvec), idx); |
1543 | } |
1544 | |
1545 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, |
1546 | enum node_stat_item idx) |
1547 | { |
1548 | return node_page_state(lruvec_pgdat(lruvec), idx); |
1549 | } |
1550 | |
1551 | static inline void mem_cgroup_flush_stats(void) |
1552 | { |
1553 | } |
1554 | |
1555 | static inline void mem_cgroup_flush_stats_ratelimited(void) |
1556 | { |
1557 | } |
1558 | |
1559 | static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, |
1560 | enum node_stat_item idx, int val) |
1561 | { |
1562 | } |
1563 | |
1564 | static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, |
1565 | int val) |
1566 | { |
1567 | struct page *page = virt_to_head_page(p); |
1568 | |
1569 | __mod_node_page_state(page_pgdat(page), idx, val); |
1570 | } |
1571 | |
1572 | static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, |
1573 | int val) |
1574 | { |
1575 | struct page *page = virt_to_head_page(p); |
1576 | |
1577 | mod_node_page_state(page_pgdat(page), idx, val); |
1578 | } |
1579 | |
1580 | static inline void count_memcg_events(struct mem_cgroup *memcg, |
1581 | enum vm_event_item idx, |
1582 | unsigned long count) |
1583 | { |
1584 | } |
1585 | |
1586 | static inline void __count_memcg_events(struct mem_cgroup *memcg, |
1587 | enum vm_event_item idx, |
1588 | unsigned long count) |
1589 | { |
1590 | } |
1591 | |
1592 | static inline void count_memcg_folio_events(struct folio *folio, |
1593 | enum vm_event_item idx, unsigned long nr) |
1594 | { |
1595 | } |
1596 | |
1597 | static inline |
1598 | void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) |
1599 | { |
1600 | } |
1601 | |
1602 | static inline void split_page_memcg(struct page *head, unsigned int nr) |
1603 | { |
1604 | } |
1605 | |
1606 | static inline |
1607 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, |
1608 | gfp_t gfp_mask, |
1609 | unsigned long *total_scanned) |
1610 | { |
1611 | return 0; |
1612 | } |
1613 | #endif /* CONFIG_MEMCG */ |
1614 | |
1615 | static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) |
1616 | { |
1617 | __mod_lruvec_kmem_state(p, idx, val: 1); |
1618 | } |
1619 | |
1620 | static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) |
1621 | { |
1622 | __mod_lruvec_kmem_state(p, idx, val: -1); |
1623 | } |
1624 | |
1625 | static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) |
1626 | { |
1627 | struct mem_cgroup *memcg; |
1628 | |
1629 | memcg = lruvec_memcg(lruvec); |
1630 | if (!memcg) |
1631 | return NULL; |
1632 | memcg = parent_mem_cgroup(memcg); |
1633 | if (!memcg) |
1634 | return NULL; |
1635 | return mem_cgroup_lruvec(memcg, pgdat: lruvec_pgdat(lruvec)); |
1636 | } |
1637 | |
1638 | static inline void unlock_page_lruvec(struct lruvec *lruvec) |
1639 | { |
1640 | spin_unlock(lock: &lruvec->lru_lock); |
1641 | } |
1642 | |
1643 | static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) |
1644 | { |
1645 | spin_unlock_irq(lock: &lruvec->lru_lock); |
1646 | } |
1647 | |
1648 | static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, |
1649 | unsigned long flags) |
1650 | { |
1651 | spin_unlock_irqrestore(lock: &lruvec->lru_lock, flags); |
1652 | } |
1653 | |
1654 | /* Test requires a stable page->memcg binding, see page_memcg() */ |
1655 | static inline bool folio_matches_lruvec(struct folio *folio, |
1656 | struct lruvec *lruvec) |
1657 | { |
1658 | return lruvec_pgdat(lruvec) == folio_pgdat(folio) && |
1659 | lruvec_memcg(lruvec) == folio_memcg(folio); |
1660 | } |
1661 | |
1662 | /* Don't lock again iff page's lruvec locked */ |
1663 | static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, |
1664 | struct lruvec *locked_lruvec) |
1665 | { |
1666 | if (locked_lruvec) { |
1667 | if (folio_matches_lruvec(folio, lruvec: locked_lruvec)) |
1668 | return locked_lruvec; |
1669 | |
1670 | unlock_page_lruvec_irq(lruvec: locked_lruvec); |
1671 | } |
1672 | |
1673 | return folio_lruvec_lock_irq(folio); |
1674 | } |
1675 | |
1676 | /* Don't lock again iff page's lruvec locked */ |
1677 | static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio, |
1678 | struct lruvec *locked_lruvec, unsigned long *flags) |
1679 | { |
1680 | if (locked_lruvec) { |
1681 | if (folio_matches_lruvec(folio, lruvec: locked_lruvec)) |
1682 | return locked_lruvec; |
1683 | |
1684 | unlock_page_lruvec_irqrestore(lruvec: locked_lruvec, flags: *flags); |
1685 | } |
1686 | |
1687 | return folio_lruvec_lock_irqsave(folio, flags); |
1688 | } |
1689 | |
1690 | #ifdef CONFIG_CGROUP_WRITEBACK |
1691 | |
1692 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); |
1693 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, |
1694 | unsigned long *pheadroom, unsigned long *pdirty, |
1695 | unsigned long *pwriteback); |
1696 | |
1697 | void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, |
1698 | struct bdi_writeback *wb); |
1699 | |
1700 | static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, |
1701 | struct bdi_writeback *wb) |
1702 | { |
1703 | struct mem_cgroup *memcg; |
1704 | |
1705 | if (mem_cgroup_disabled()) |
1706 | return; |
1707 | |
1708 | memcg = folio_memcg(folio); |
1709 | if (unlikely(memcg && &memcg->css != wb->memcg_css)) |
1710 | mem_cgroup_track_foreign_dirty_slowpath(folio, wb); |
1711 | } |
1712 | |
1713 | void mem_cgroup_flush_foreign(struct bdi_writeback *wb); |
1714 | |
1715 | #else /* CONFIG_CGROUP_WRITEBACK */ |
1716 | |
1717 | static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) |
1718 | { |
1719 | return NULL; |
1720 | } |
1721 | |
1722 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, |
1723 | unsigned long *pfilepages, |
1724 | unsigned long *pheadroom, |
1725 | unsigned long *pdirty, |
1726 | unsigned long *pwriteback) |
1727 | { |
1728 | } |
1729 | |
1730 | static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, |
1731 | struct bdi_writeback *wb) |
1732 | { |
1733 | } |
1734 | |
1735 | static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) |
1736 | { |
1737 | } |
1738 | |
1739 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
1740 | |
1741 | struct sock; |
1742 | bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, |
1743 | gfp_t gfp_mask); |
1744 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); |
1745 | #ifdef CONFIG_MEMCG |
1746 | extern struct static_key_false memcg_sockets_enabled_key; |
1747 | #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) |
1748 | void mem_cgroup_sk_alloc(struct sock *sk); |
1749 | void mem_cgroup_sk_free(struct sock *sk); |
1750 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
1751 | { |
1752 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
1753 | return !!memcg->tcpmem_pressure; |
1754 | do { |
1755 | if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) |
1756 | return true; |
1757 | } while ((memcg = parent_mem_cgroup(memcg))); |
1758 | return false; |
1759 | } |
1760 | |
1761 | int alloc_shrinker_info(struct mem_cgroup *memcg); |
1762 | void free_shrinker_info(struct mem_cgroup *memcg); |
1763 | void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); |
1764 | void reparent_shrinker_deferred(struct mem_cgroup *memcg); |
1765 | #else |
1766 | #define mem_cgroup_sockets_enabled 0 |
1767 | static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; |
1768 | static inline void mem_cgroup_sk_free(struct sock *sk) { }; |
1769 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
1770 | { |
1771 | return false; |
1772 | } |
1773 | |
1774 | static inline void set_shrinker_bit(struct mem_cgroup *memcg, |
1775 | int nid, int shrinker_id) |
1776 | { |
1777 | } |
1778 | #endif |
1779 | |
1780 | #ifdef CONFIG_MEMCG_KMEM |
1781 | bool mem_cgroup_kmem_disabled(void); |
1782 | int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); |
1783 | void __memcg_kmem_uncharge_page(struct page *page, int order); |
1784 | |
1785 | /* |
1786 | * The returned objcg pointer is safe to use without additional |
1787 | * protection within a scope. The scope is defined either by |
1788 | * the current task (similar to the "current" global variable) |
1789 | * or by set_active_memcg() pair. |
1790 | * Please, use obj_cgroup_get() to get a reference if the pointer |
1791 | * needs to be used outside of the local scope. |
1792 | */ |
1793 | struct obj_cgroup *current_obj_cgroup(void); |
1794 | struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio); |
1795 | |
1796 | static inline struct obj_cgroup *get_obj_cgroup_from_current(void) |
1797 | { |
1798 | struct obj_cgroup *objcg = current_obj_cgroup(); |
1799 | |
1800 | if (objcg) |
1801 | obj_cgroup_get(objcg); |
1802 | |
1803 | return objcg; |
1804 | } |
1805 | |
1806 | int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); |
1807 | void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); |
1808 | |
1809 | extern struct static_key_false memcg_bpf_enabled_key; |
1810 | static inline bool memcg_bpf_enabled(void) |
1811 | { |
1812 | return static_branch_likely(&memcg_bpf_enabled_key); |
1813 | } |
1814 | |
1815 | extern struct static_key_false memcg_kmem_online_key; |
1816 | |
1817 | static inline bool memcg_kmem_online(void) |
1818 | { |
1819 | return static_branch_likely(&memcg_kmem_online_key); |
1820 | } |
1821 | |
1822 | static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1823 | int order) |
1824 | { |
1825 | if (memcg_kmem_online()) |
1826 | return __memcg_kmem_charge_page(page, gfp, order); |
1827 | return 0; |
1828 | } |
1829 | |
1830 | static inline void memcg_kmem_uncharge_page(struct page *page, int order) |
1831 | { |
1832 | if (memcg_kmem_online()) |
1833 | __memcg_kmem_uncharge_page(page, order); |
1834 | } |
1835 | |
1836 | /* |
1837 | * A helper for accessing memcg's kmem_id, used for getting |
1838 | * corresponding LRU lists. |
1839 | */ |
1840 | static inline int memcg_kmem_id(struct mem_cgroup *memcg) |
1841 | { |
1842 | return memcg ? memcg->kmemcg_id : -1; |
1843 | } |
1844 | |
1845 | struct mem_cgroup *mem_cgroup_from_obj(void *p); |
1846 | struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); |
1847 | |
1848 | static inline void count_objcg_event(struct obj_cgroup *objcg, |
1849 | enum vm_event_item idx) |
1850 | { |
1851 | struct mem_cgroup *memcg; |
1852 | |
1853 | if (!memcg_kmem_online()) |
1854 | return; |
1855 | |
1856 | rcu_read_lock(); |
1857 | memcg = obj_cgroup_memcg(objcg); |
1858 | count_memcg_events(memcg, idx, count: 1); |
1859 | rcu_read_unlock(); |
1860 | } |
1861 | |
1862 | #else |
1863 | static inline bool mem_cgroup_kmem_disabled(void) |
1864 | { |
1865 | return true; |
1866 | } |
1867 | |
1868 | static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1869 | int order) |
1870 | { |
1871 | return 0; |
1872 | } |
1873 | |
1874 | static inline void memcg_kmem_uncharge_page(struct page *page, int order) |
1875 | { |
1876 | } |
1877 | |
1878 | static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1879 | int order) |
1880 | { |
1881 | return 0; |
1882 | } |
1883 | |
1884 | static inline void __memcg_kmem_uncharge_page(struct page *page, int order) |
1885 | { |
1886 | } |
1887 | |
1888 | static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) |
1889 | { |
1890 | return NULL; |
1891 | } |
1892 | |
1893 | static inline bool memcg_bpf_enabled(void) |
1894 | { |
1895 | return false; |
1896 | } |
1897 | |
1898 | static inline bool memcg_kmem_online(void) |
1899 | { |
1900 | return false; |
1901 | } |
1902 | |
1903 | static inline int memcg_kmem_id(struct mem_cgroup *memcg) |
1904 | { |
1905 | return -1; |
1906 | } |
1907 | |
1908 | static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) |
1909 | { |
1910 | return NULL; |
1911 | } |
1912 | |
1913 | static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) |
1914 | { |
1915 | return NULL; |
1916 | } |
1917 | |
1918 | static inline void count_objcg_event(struct obj_cgroup *objcg, |
1919 | enum vm_event_item idx) |
1920 | { |
1921 | } |
1922 | |
1923 | #endif /* CONFIG_MEMCG_KMEM */ |
1924 | |
1925 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) |
1926 | bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); |
1927 | void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); |
1928 | void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); |
1929 | #else |
1930 | static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) |
1931 | { |
1932 | return true; |
1933 | } |
1934 | static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, |
1935 | size_t size) |
1936 | { |
1937 | } |
1938 | static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, |
1939 | size_t size) |
1940 | { |
1941 | } |
1942 | #endif |
1943 | |
1944 | #endif /* _LINUX_MEMCONTROL_H */ |
1945 | |