1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_VMSTAT_H
3#define _LINUX_VMSTAT_H
4
5#include <linux/types.h>
6#include <linux/percpu.h>
7#include <linux/mmzone.h>
8#include <linux/vm_event_item.h>
9#include <linux/atomic.h>
10#include <linux/static_key.h>
11
12extern int sysctl_stat_interval;
13
14#ifdef CONFIG_NUMA
15#define ENABLE_NUMA_STAT 1
16#define DISABLE_NUMA_STAT 0
17extern int sysctl_vm_numa_stat;
18DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
19extern int sysctl_vm_numa_stat_handler(struct ctl_table *table,
20 int write, void __user *buffer, size_t *length, loff_t *ppos);
21#endif
22
23struct reclaim_stat {
24 unsigned nr_dirty;
25 unsigned nr_unqueued_dirty;
26 unsigned nr_congested;
27 unsigned nr_writeback;
28 unsigned nr_immediate;
29 unsigned nr_activate;
30 unsigned nr_ref_keep;
31 unsigned nr_unmap_fail;
32};
33
34#ifdef CONFIG_VM_EVENT_COUNTERS
35/*
36 * Light weight per cpu counter implementation.
37 *
38 * Counters should only be incremented and no critical kernel component
39 * should rely on the counter values.
40 *
41 * Counters are handled completely inline. On many platforms the code
42 * generated will simply be the increment of a global address.
43 */
44
45struct vm_event_state {
46 unsigned long event[NR_VM_EVENT_ITEMS];
47};
48
49DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
50
51/*
52 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
53 * local_irq_disable overhead.
54 */
55static inline void __count_vm_event(enum vm_event_item item)
56{
57 raw_cpu_inc(vm_event_states.event[item]);
58}
59
60static inline void count_vm_event(enum vm_event_item item)
61{
62 this_cpu_inc(vm_event_states.event[item]);
63}
64
65static inline void __count_vm_events(enum vm_event_item item, long delta)
66{
67 raw_cpu_add(vm_event_states.event[item], delta);
68}
69
70static inline void count_vm_events(enum vm_event_item item, long delta)
71{
72 this_cpu_add(vm_event_states.event[item], delta);
73}
74
75extern void all_vm_events(unsigned long *);
76
77extern void vm_events_fold_cpu(int cpu);
78
79#else
80
81/* Disable counters */
82static inline void count_vm_event(enum vm_event_item item)
83{
84}
85static inline void count_vm_events(enum vm_event_item item, long delta)
86{
87}
88static inline void __count_vm_event(enum vm_event_item item)
89{
90}
91static inline void __count_vm_events(enum vm_event_item item, long delta)
92{
93}
94static inline void all_vm_events(unsigned long *ret)
95{
96}
97static inline void vm_events_fold_cpu(int cpu)
98{
99}
100
101#endif /* CONFIG_VM_EVENT_COUNTERS */
102
103#ifdef CONFIG_NUMA_BALANCING
104#define count_vm_numa_event(x) count_vm_event(x)
105#define count_vm_numa_events(x, y) count_vm_events(x, y)
106#else
107#define count_vm_numa_event(x) do {} while (0)
108#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
109#endif /* CONFIG_NUMA_BALANCING */
110
111#ifdef CONFIG_DEBUG_TLBFLUSH
112#define count_vm_tlb_event(x) count_vm_event(x)
113#define count_vm_tlb_events(x, y) count_vm_events(x, y)
114#else
115#define count_vm_tlb_event(x) do {} while (0)
116#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
117#endif
118
119#ifdef CONFIG_DEBUG_VM_VMACACHE
120#define count_vm_vmacache_event(x) count_vm_event(x)
121#else
122#define count_vm_vmacache_event(x) do {} while (0)
123#endif
124
125#define __count_zid_vm_events(item, zid, delta) \
126 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
127
128/*
129 * Zone and node-based page accounting with per cpu differentials.
130 */
131extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
132extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
133extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
134
135#ifdef CONFIG_NUMA
136static inline void zone_numa_state_add(long x, struct zone *zone,
137 enum numa_stat_item item)
138{
139 atomic_long_add(x, &zone->vm_numa_stat[item]);
140 atomic_long_add(x, &vm_numa_stat[item]);
141}
142
143static inline unsigned long global_numa_state(enum numa_stat_item item)
144{
145 long x = atomic_long_read(&vm_numa_stat[item]);
146
147 return x;
148}
149
150static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
151 enum numa_stat_item item)
152{
153 long x = atomic_long_read(&zone->vm_numa_stat[item]);
154 int cpu;
155
156 for_each_online_cpu(cpu)
157 x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
158
159 return x;
160}
161#endif /* CONFIG_NUMA */
162
163static inline void zone_page_state_add(long x, struct zone *zone,
164 enum zone_stat_item item)
165{
166 atomic_long_add(x, &zone->vm_stat[item]);
167 atomic_long_add(x, &vm_zone_stat[item]);
168}
169
170static inline void node_page_state_add(long x, struct pglist_data *pgdat,
171 enum node_stat_item item)
172{
173 atomic_long_add(x, &pgdat->vm_stat[item]);
174 atomic_long_add(x, &vm_node_stat[item]);
175}
176
177static inline unsigned long global_zone_page_state(enum zone_stat_item item)
178{
179 long x = atomic_long_read(&vm_zone_stat[item]);
180#ifdef CONFIG_SMP
181 if (x < 0)
182 x = 0;
183#endif
184 return x;
185}
186
187static inline unsigned long global_node_page_state(enum node_stat_item item)
188{
189 long x = atomic_long_read(&vm_node_stat[item]);
190#ifdef CONFIG_SMP
191 if (x < 0)
192 x = 0;
193#endif
194 return x;
195}
196
197static inline unsigned long zone_page_state(struct zone *zone,
198 enum zone_stat_item item)
199{
200 long x = atomic_long_read(&zone->vm_stat[item]);
201#ifdef CONFIG_SMP
202 if (x < 0)
203 x = 0;
204#endif
205 return x;
206}
207
208/*
209 * More accurate version that also considers the currently pending
210 * deltas. For that we need to loop over all cpus to find the current
211 * deltas. There is no synchronization so the result cannot be
212 * exactly accurate either.
213 */
214static inline unsigned long zone_page_state_snapshot(struct zone *zone,
215 enum zone_stat_item item)
216{
217 long x = atomic_long_read(&zone->vm_stat[item]);
218
219#ifdef CONFIG_SMP
220 int cpu;
221 for_each_online_cpu(cpu)
222 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
223
224 if (x < 0)
225 x = 0;
226#endif
227 return x;
228}
229
230#ifdef CONFIG_NUMA
231extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
232extern unsigned long sum_zone_node_page_state(int node,
233 enum zone_stat_item item);
234extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
235extern unsigned long node_page_state(struct pglist_data *pgdat,
236 enum node_stat_item item);
237#else
238#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
239#define node_page_state(node, item) global_node_page_state(item)
240#endif /* CONFIG_NUMA */
241
242#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
243#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
244#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
245#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
246
247#ifdef CONFIG_SMP
248void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
249void __inc_zone_page_state(struct page *, enum zone_stat_item);
250void __dec_zone_page_state(struct page *, enum zone_stat_item);
251
252void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
253void __inc_node_page_state(struct page *, enum node_stat_item);
254void __dec_node_page_state(struct page *, enum node_stat_item);
255
256void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
257void inc_zone_page_state(struct page *, enum zone_stat_item);
258void dec_zone_page_state(struct page *, enum zone_stat_item);
259
260void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
261void inc_node_page_state(struct page *, enum node_stat_item);
262void dec_node_page_state(struct page *, enum node_stat_item);
263
264extern void inc_node_state(struct pglist_data *, enum node_stat_item);
265extern void __inc_zone_state(struct zone *, enum zone_stat_item);
266extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
267extern void dec_zone_state(struct zone *, enum zone_stat_item);
268extern void __dec_zone_state(struct zone *, enum zone_stat_item);
269extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
270
271void quiet_vmstat(void);
272void cpu_vm_stats_fold(int cpu);
273void refresh_zone_stat_thresholds(void);
274
275struct ctl_table;
276int vmstat_refresh(struct ctl_table *, int write,
277 void __user *buffer, size_t *lenp, loff_t *ppos);
278
279void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
280
281int calculate_pressure_threshold(struct zone *zone);
282int calculate_normal_threshold(struct zone *zone);
283void set_pgdat_percpu_threshold(pg_data_t *pgdat,
284 int (*calculate_pressure)(struct zone *));
285#else /* CONFIG_SMP */
286
287/*
288 * We do not maintain differentials in a single processor configuration.
289 * The functions directly modify the zone and global counters.
290 */
291static inline void __mod_zone_page_state(struct zone *zone,
292 enum zone_stat_item item, long delta)
293{
294 zone_page_state_add(delta, zone, item);
295}
296
297static inline void __mod_node_page_state(struct pglist_data *pgdat,
298 enum node_stat_item item, int delta)
299{
300 node_page_state_add(delta, pgdat, item);
301}
302
303static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
304{
305 atomic_long_inc(&zone->vm_stat[item]);
306 atomic_long_inc(&vm_zone_stat[item]);
307}
308
309static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
310{
311 atomic_long_inc(&pgdat->vm_stat[item]);
312 atomic_long_inc(&vm_node_stat[item]);
313}
314
315static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
316{
317 atomic_long_dec(&zone->vm_stat[item]);
318 atomic_long_dec(&vm_zone_stat[item]);
319}
320
321static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
322{
323 atomic_long_dec(&pgdat->vm_stat[item]);
324 atomic_long_dec(&vm_node_stat[item]);
325}
326
327static inline void __inc_zone_page_state(struct page *page,
328 enum zone_stat_item item)
329{
330 __inc_zone_state(page_zone(page), item);
331}
332
333static inline void __inc_node_page_state(struct page *page,
334 enum node_stat_item item)
335{
336 __inc_node_state(page_pgdat(page), item);
337}
338
339
340static inline void __dec_zone_page_state(struct page *page,
341 enum zone_stat_item item)
342{
343 __dec_zone_state(page_zone(page), item);
344}
345
346static inline void __dec_node_page_state(struct page *page,
347 enum node_stat_item item)
348{
349 __dec_node_state(page_pgdat(page), item);
350}
351
352
353/*
354 * We only use atomic operations to update counters. So there is no need to
355 * disable interrupts.
356 */
357#define inc_zone_page_state __inc_zone_page_state
358#define dec_zone_page_state __dec_zone_page_state
359#define mod_zone_page_state __mod_zone_page_state
360
361#define inc_node_page_state __inc_node_page_state
362#define dec_node_page_state __dec_node_page_state
363#define mod_node_page_state __mod_node_page_state
364
365#define inc_zone_state __inc_zone_state
366#define inc_node_state __inc_node_state
367#define dec_zone_state __dec_zone_state
368
369#define set_pgdat_percpu_threshold(pgdat, callback) { }
370
371static inline void refresh_zone_stat_thresholds(void) { }
372static inline void cpu_vm_stats_fold(int cpu) { }
373static inline void quiet_vmstat(void) { }
374
375static inline void drain_zonestat(struct zone *zone,
376 struct per_cpu_pageset *pset) { }
377#endif /* CONFIG_SMP */
378
379static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
380 int migratetype)
381{
382 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
383 if (is_migrate_cma(migratetype))
384 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
385}
386
387extern const char * const vmstat_text[];
388
389#endif /* _LINUX_VMSTAT_H */
390