1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Pressure stall information for CPU, memory and IO |
4 | * |
5 | * Copyright (c) 2018 Facebook, Inc. |
6 | * Author: Johannes Weiner <hannes@cmpxchg.org> |
7 | * |
8 | * Polling support by Suren Baghdasaryan <surenb@google.com> |
9 | * Copyright (c) 2018 Google, Inc. |
10 | * |
11 | * When CPU, memory and IO are contended, tasks experience delays that |
12 | * reduce throughput and introduce latencies into the workload. Memory |
13 | * and IO contention, in addition, can cause a full loss of forward |
14 | * progress in which the CPU goes idle. |
15 | * |
16 | * This code aggregates individual task delays into resource pressure |
17 | * metrics that indicate problems with both workload health and |
18 | * resource utilization. |
19 | * |
20 | * Model |
21 | * |
22 | * The time in which a task can execute on a CPU is our baseline for |
23 | * productivity. Pressure expresses the amount of time in which this |
24 | * potential cannot be realized due to resource contention. |
25 | * |
26 | * This concept of productivity has two components: the workload and |
27 | * the CPU. To measure the impact of pressure on both, we define two |
28 | * contention states for a resource: SOME and FULL. |
29 | * |
30 | * In the SOME state of a given resource, one or more tasks are |
31 | * delayed on that resource. This affects the workload's ability to |
32 | * perform work, but the CPU may still be executing other tasks. |
33 | * |
34 | * In the FULL state of a given resource, all non-idle tasks are |
35 | * delayed on that resource such that nobody is advancing and the CPU |
36 | * goes idle. This leaves both workload and CPU unproductive. |
37 | * |
38 | * SOME = nr_delayed_tasks != 0 |
39 | * FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0 |
40 | * |
41 | * What it means for a task to be productive is defined differently |
42 | * for each resource. For IO, productive means a running task. For |
43 | * memory, productive means a running task that isn't a reclaimer. For |
44 | * CPU, productive means an oncpu task. |
45 | * |
46 | * Naturally, the FULL state doesn't exist for the CPU resource at the |
47 | * system level, but exist at the cgroup level. At the cgroup level, |
48 | * FULL means all non-idle tasks in the cgroup are delayed on the CPU |
49 | * resource which is being used by others outside of the cgroup or |
50 | * throttled by the cgroup cpu.max configuration. |
51 | * |
52 | * The percentage of wallclock time spent in those compound stall |
53 | * states gives pressure numbers between 0 and 100 for each resource, |
54 | * where the SOME percentage indicates workload slowdowns and the FULL |
55 | * percentage indicates reduced CPU utilization: |
56 | * |
57 | * %SOME = time(SOME) / period |
58 | * %FULL = time(FULL) / period |
59 | * |
60 | * Multiple CPUs |
61 | * |
62 | * The more tasks and available CPUs there are, the more work can be |
63 | * performed concurrently. This means that the potential that can go |
64 | * unrealized due to resource contention *also* scales with non-idle |
65 | * tasks and CPUs. |
66 | * |
67 | * Consider a scenario where 257 number crunching tasks are trying to |
68 | * run concurrently on 256 CPUs. If we simply aggregated the task |
69 | * states, we would have to conclude a CPU SOME pressure number of |
70 | * 100%, since *somebody* is waiting on a runqueue at all |
71 | * times. However, that is clearly not the amount of contention the |
72 | * workload is experiencing: only one out of 256 possible execution |
73 | * threads will be contended at any given time, or about 0.4%. |
74 | * |
75 | * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any |
76 | * given time *one* of the tasks is delayed due to a lack of memory. |
77 | * Again, looking purely at the task state would yield a memory FULL |
78 | * pressure number of 0%, since *somebody* is always making forward |
79 | * progress. But again this wouldn't capture the amount of execution |
80 | * potential lost, which is 1 out of 4 CPUs, or 25%. |
81 | * |
82 | * To calculate wasted potential (pressure) with multiple processors, |
83 | * we have to base our calculation on the number of non-idle tasks in |
84 | * conjunction with the number of available CPUs, which is the number |
85 | * of potential execution threads. SOME becomes then the proportion of |
86 | * delayed tasks to possible threads, and FULL is the share of possible |
87 | * threads that are unproductive due to delays: |
88 | * |
89 | * threads = min(nr_nonidle_tasks, nr_cpus) |
90 | * SOME = min(nr_delayed_tasks / threads, 1) |
91 | * FULL = (threads - min(nr_productive_tasks, threads)) / threads |
92 | * |
93 | * For the 257 number crunchers on 256 CPUs, this yields: |
94 | * |
95 | * threads = min(257, 256) |
96 | * SOME = min(1 / 256, 1) = 0.4% |
97 | * FULL = (256 - min(256, 256)) / 256 = 0% |
98 | * |
99 | * For the 1 out of 4 memory-delayed tasks, this yields: |
100 | * |
101 | * threads = min(4, 4) |
102 | * SOME = min(1 / 4, 1) = 25% |
103 | * FULL = (4 - min(3, 4)) / 4 = 25% |
104 | * |
105 | * [ Substitute nr_cpus with 1, and you can see that it's a natural |
106 | * extension of the single-CPU model. ] |
107 | * |
108 | * Implementation |
109 | * |
110 | * To assess the precise time spent in each such state, we would have |
111 | * to freeze the system on task changes and start/stop the state |
112 | * clocks accordingly. Obviously that doesn't scale in practice. |
113 | * |
114 | * Because the scheduler aims to distribute the compute load evenly |
115 | * among the available CPUs, we can track task state locally to each |
116 | * CPU and, at much lower frequency, extrapolate the global state for |
117 | * the cumulative stall times and the running averages. |
118 | * |
119 | * For each runqueue, we track: |
120 | * |
121 | * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0) |
122 | * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu]) |
123 | * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0) |
124 | * |
125 | * and then periodically aggregate: |
126 | * |
127 | * tNONIDLE = sum(tNONIDLE[i]) |
128 | * |
129 | * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE |
130 | * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE |
131 | * |
132 | * %SOME = tSOME / period |
133 | * %FULL = tFULL / period |
134 | * |
135 | * This gives us an approximation of pressure that is practical |
136 | * cost-wise, yet way more sensitive and accurate than periodic |
137 | * sampling of the aggregate task states would be. |
138 | */ |
139 | |
140 | static int psi_bug __read_mostly; |
141 | |
142 | DEFINE_STATIC_KEY_FALSE(psi_disabled); |
143 | static DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled); |
144 | |
145 | #ifdef CONFIG_PSI_DEFAULT_DISABLED |
146 | static bool psi_enable; |
147 | #else |
148 | static bool psi_enable = true; |
149 | #endif |
150 | static int __init setup_psi(char *str) |
151 | { |
152 | return kstrtobool(s: str, res: &psi_enable) == 0; |
153 | } |
154 | __setup("psi=" , setup_psi); |
155 | |
156 | /* Running averages - we need to be higher-res than loadavg */ |
157 | #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */ |
158 | #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */ |
159 | #define EXP_60s 1981 /* 1/exp(2s/60s) */ |
160 | #define EXP_300s 2034 /* 1/exp(2s/300s) */ |
161 | |
162 | /* PSI trigger definitions */ |
163 | #define WINDOW_MAX_US 10000000 /* Max window size is 10s */ |
164 | #define UPDATES_PER_WINDOW 10 /* 10 updates per window */ |
165 | |
166 | /* Sampling frequency in nanoseconds */ |
167 | static u64 psi_period __read_mostly; |
168 | |
169 | /* System-level pressure and stall tracking */ |
170 | static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu); |
171 | struct psi_group psi_system = { |
172 | .pcpu = &system_group_pcpu, |
173 | }; |
174 | |
175 | static void psi_avgs_work(struct work_struct *work); |
176 | |
177 | static void poll_timer_fn(struct timer_list *t); |
178 | |
179 | static void group_init(struct psi_group *group) |
180 | { |
181 | int cpu; |
182 | |
183 | group->enabled = true; |
184 | for_each_possible_cpu(cpu) |
185 | seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); |
186 | group->avg_last_update = sched_clock(); |
187 | group->avg_next_update = group->avg_last_update + psi_period; |
188 | mutex_init(&group->avgs_lock); |
189 | |
190 | /* Init avg trigger-related members */ |
191 | INIT_LIST_HEAD(list: &group->avg_triggers); |
192 | memset(group->avg_nr_triggers, 0, sizeof(group->avg_nr_triggers)); |
193 | INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); |
194 | |
195 | /* Init rtpoll trigger-related members */ |
196 | atomic_set(v: &group->rtpoll_scheduled, i: 0); |
197 | mutex_init(&group->rtpoll_trigger_lock); |
198 | INIT_LIST_HEAD(list: &group->rtpoll_triggers); |
199 | group->rtpoll_min_period = U32_MAX; |
200 | group->rtpoll_next_update = ULLONG_MAX; |
201 | init_waitqueue_head(&group->rtpoll_wait); |
202 | timer_setup(&group->rtpoll_timer, poll_timer_fn, 0); |
203 | rcu_assign_pointer(group->rtpoll_task, NULL); |
204 | } |
205 | |
206 | void __init psi_init(void) |
207 | { |
208 | if (!psi_enable) { |
209 | static_branch_enable(&psi_disabled); |
210 | static_branch_disable(&psi_cgroups_enabled); |
211 | return; |
212 | } |
213 | |
214 | if (!cgroup_psi_enabled()) |
215 | static_branch_disable(&psi_cgroups_enabled); |
216 | |
217 | psi_period = jiffies_to_nsecs(PSI_FREQ); |
218 | group_init(group: &psi_system); |
219 | } |
220 | |
221 | static bool test_state(unsigned int *tasks, enum psi_states state, bool oncpu) |
222 | { |
223 | switch (state) { |
224 | case PSI_IO_SOME: |
225 | return unlikely(tasks[NR_IOWAIT]); |
226 | case PSI_IO_FULL: |
227 | return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]); |
228 | case PSI_MEM_SOME: |
229 | return unlikely(tasks[NR_MEMSTALL]); |
230 | case PSI_MEM_FULL: |
231 | return unlikely(tasks[NR_MEMSTALL] && |
232 | tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]); |
233 | case PSI_CPU_SOME: |
234 | return unlikely(tasks[NR_RUNNING] > oncpu); |
235 | case PSI_CPU_FULL: |
236 | return unlikely(tasks[NR_RUNNING] && !oncpu); |
237 | case PSI_NONIDLE: |
238 | return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || |
239 | tasks[NR_RUNNING]; |
240 | default: |
241 | return false; |
242 | } |
243 | } |
244 | |
245 | static void get_recent_times(struct psi_group *group, int cpu, |
246 | enum psi_aggregators aggregator, u32 *times, |
247 | u32 *pchanged_states) |
248 | { |
249 | struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); |
250 | int current_cpu = raw_smp_processor_id(); |
251 | unsigned int tasks[NR_PSI_TASK_COUNTS]; |
252 | u64 now, state_start; |
253 | enum psi_states s; |
254 | unsigned int seq; |
255 | u32 state_mask; |
256 | |
257 | *pchanged_states = 0; |
258 | |
259 | /* Snapshot a coherent view of the CPU state */ |
260 | do { |
261 | seq = read_seqcount_begin(&groupc->seq); |
262 | now = cpu_clock(cpu); |
263 | memcpy(times, groupc->times, sizeof(groupc->times)); |
264 | state_mask = groupc->state_mask; |
265 | state_start = groupc->state_start; |
266 | if (cpu == current_cpu) |
267 | memcpy(tasks, groupc->tasks, sizeof(groupc->tasks)); |
268 | } while (read_seqcount_retry(&groupc->seq, seq)); |
269 | |
270 | /* Calculate state time deltas against the previous snapshot */ |
271 | for (s = 0; s < NR_PSI_STATES; s++) { |
272 | u32 delta; |
273 | /* |
274 | * In addition to already concluded states, we also |
275 | * incorporate currently active states on the CPU, |
276 | * since states may last for many sampling periods. |
277 | * |
278 | * This way we keep our delta sampling buckets small |
279 | * (u32) and our reported pressure close to what's |
280 | * actually happening. |
281 | */ |
282 | if (state_mask & (1 << s)) |
283 | times[s] += now - state_start; |
284 | |
285 | delta = times[s] - groupc->times_prev[aggregator][s]; |
286 | groupc->times_prev[aggregator][s] = times[s]; |
287 | |
288 | times[s] = delta; |
289 | if (delta) |
290 | *pchanged_states |= (1 << s); |
291 | } |
292 | |
293 | /* |
294 | * When collect_percpu_times() from the avgs_work, we don't want to |
295 | * re-arm avgs_work when all CPUs are IDLE. But the current CPU running |
296 | * this avgs_work is never IDLE, cause avgs_work can't be shut off. |
297 | * So for the current CPU, we need to re-arm avgs_work only when |
298 | * (NR_RUNNING > 1 || NR_IOWAIT > 0 || NR_MEMSTALL > 0), for other CPUs |
299 | * we can just check PSI_NONIDLE delta. |
300 | */ |
301 | if (current_work() == &group->avgs_work.work) { |
302 | bool reschedule; |
303 | |
304 | if (cpu == current_cpu) |
305 | reschedule = tasks[NR_RUNNING] + |
306 | tasks[NR_IOWAIT] + |
307 | tasks[NR_MEMSTALL] > 1; |
308 | else |
309 | reschedule = *pchanged_states & (1 << PSI_NONIDLE); |
310 | |
311 | if (reschedule) |
312 | *pchanged_states |= PSI_STATE_RESCHEDULE; |
313 | } |
314 | } |
315 | |
316 | static void calc_avgs(unsigned long avg[3], int missed_periods, |
317 | u64 time, u64 period) |
318 | { |
319 | unsigned long pct; |
320 | |
321 | /* Fill in zeroes for periods of no activity */ |
322 | if (missed_periods) { |
323 | avg[0] = calc_load_n(load: avg[0], EXP_10s, active: 0, n: missed_periods); |
324 | avg[1] = calc_load_n(load: avg[1], EXP_60s, active: 0, n: missed_periods); |
325 | avg[2] = calc_load_n(load: avg[2], EXP_300s, active: 0, n: missed_periods); |
326 | } |
327 | |
328 | /* Sample the most recent active period */ |
329 | pct = div_u64(dividend: time * 100, divisor: period); |
330 | pct *= FIXED_1; |
331 | avg[0] = calc_load(load: avg[0], EXP_10s, active: pct); |
332 | avg[1] = calc_load(load: avg[1], EXP_60s, active: pct); |
333 | avg[2] = calc_load(load: avg[2], EXP_300s, active: pct); |
334 | } |
335 | |
336 | static void collect_percpu_times(struct psi_group *group, |
337 | enum psi_aggregators aggregator, |
338 | u32 *pchanged_states) |
339 | { |
340 | u64 deltas[NR_PSI_STATES - 1] = { 0, }; |
341 | unsigned long nonidle_total = 0; |
342 | u32 changed_states = 0; |
343 | int cpu; |
344 | int s; |
345 | |
346 | /* |
347 | * Collect the per-cpu time buckets and average them into a |
348 | * single time sample that is normalized to wallclock time. |
349 | * |
350 | * For averaging, each CPU is weighted by its non-idle time in |
351 | * the sampling period. This eliminates artifacts from uneven |
352 | * loading, or even entirely idle CPUs. |
353 | */ |
354 | for_each_possible_cpu(cpu) { |
355 | u32 times[NR_PSI_STATES]; |
356 | u32 nonidle; |
357 | u32 cpu_changed_states; |
358 | |
359 | get_recent_times(group, cpu, aggregator, times, |
360 | pchanged_states: &cpu_changed_states); |
361 | changed_states |= cpu_changed_states; |
362 | |
363 | nonidle = nsecs_to_jiffies(n: times[PSI_NONIDLE]); |
364 | nonidle_total += nonidle; |
365 | |
366 | for (s = 0; s < PSI_NONIDLE; s++) |
367 | deltas[s] += (u64)times[s] * nonidle; |
368 | } |
369 | |
370 | /* |
371 | * Integrate the sample into the running statistics that are |
372 | * reported to userspace: the cumulative stall times and the |
373 | * decaying averages. |
374 | * |
375 | * Pressure percentages are sampled at PSI_FREQ. We might be |
376 | * called more often when the user polls more frequently than |
377 | * that; we might be called less often when there is no task |
378 | * activity, thus no data, and clock ticks are sporadic. The |
379 | * below handles both. |
380 | */ |
381 | |
382 | /* total= */ |
383 | for (s = 0; s < NR_PSI_STATES - 1; s++) |
384 | group->total[aggregator][s] += |
385 | div_u64(dividend: deltas[s], max(nonidle_total, 1UL)); |
386 | |
387 | if (pchanged_states) |
388 | *pchanged_states = changed_states; |
389 | } |
390 | |
391 | /* Trigger tracking window manipulations */ |
392 | static void window_reset(struct psi_window *win, u64 now, u64 value, |
393 | u64 prev_growth) |
394 | { |
395 | win->start_time = now; |
396 | win->start_value = value; |
397 | win->prev_growth = prev_growth; |
398 | } |
399 | |
400 | /* |
401 | * PSI growth tracking window update and growth calculation routine. |
402 | * |
403 | * This approximates a sliding tracking window by interpolating |
404 | * partially elapsed windows using historical growth data from the |
405 | * previous intervals. This minimizes memory requirements (by not storing |
406 | * all the intermediate values in the previous window) and simplifies |
407 | * the calculations. It works well because PSI signal changes only in |
408 | * positive direction and over relatively small window sizes the growth |
409 | * is close to linear. |
410 | */ |
411 | static u64 window_update(struct psi_window *win, u64 now, u64 value) |
412 | { |
413 | u64 elapsed; |
414 | u64 growth; |
415 | |
416 | elapsed = now - win->start_time; |
417 | growth = value - win->start_value; |
418 | /* |
419 | * After each tracking window passes win->start_value and |
420 | * win->start_time get reset and win->prev_growth stores |
421 | * the average per-window growth of the previous window. |
422 | * win->prev_growth is then used to interpolate additional |
423 | * growth from the previous window assuming it was linear. |
424 | */ |
425 | if (elapsed > win->size) |
426 | window_reset(win, now, value, prev_growth: growth); |
427 | else { |
428 | u32 remaining; |
429 | |
430 | remaining = win->size - elapsed; |
431 | growth += div64_u64(dividend: win->prev_growth * remaining, divisor: win->size); |
432 | } |
433 | |
434 | return growth; |
435 | } |
436 | |
437 | static void update_triggers(struct psi_group *group, u64 now, |
438 | enum psi_aggregators aggregator) |
439 | { |
440 | struct psi_trigger *t; |
441 | u64 *total = group->total[aggregator]; |
442 | struct list_head *triggers; |
443 | u64 *aggregator_total; |
444 | |
445 | if (aggregator == PSI_AVGS) { |
446 | triggers = &group->avg_triggers; |
447 | aggregator_total = group->avg_total; |
448 | } else { |
449 | triggers = &group->rtpoll_triggers; |
450 | aggregator_total = group->rtpoll_total; |
451 | } |
452 | |
453 | /* |
454 | * On subsequent updates, calculate growth deltas and let |
455 | * watchers know when their specified thresholds are exceeded. |
456 | */ |
457 | list_for_each_entry(t, triggers, node) { |
458 | u64 growth; |
459 | bool new_stall; |
460 | |
461 | new_stall = aggregator_total[t->state] != total[t->state]; |
462 | |
463 | /* Check for stall activity or a previous threshold breach */ |
464 | if (!new_stall && !t->pending_event) |
465 | continue; |
466 | /* |
467 | * Check for new stall activity, as well as deferred |
468 | * events that occurred in the last window after the |
469 | * trigger had already fired (we want to ratelimit |
470 | * events without dropping any). |
471 | */ |
472 | if (new_stall) { |
473 | /* Calculate growth since last update */ |
474 | growth = window_update(win: &t->win, now, value: total[t->state]); |
475 | if (!t->pending_event) { |
476 | if (growth < t->threshold) |
477 | continue; |
478 | |
479 | t->pending_event = true; |
480 | } |
481 | } |
482 | /* Limit event signaling to once per window */ |
483 | if (now < t->last_event_time + t->win.size) |
484 | continue; |
485 | |
486 | /* Generate an event */ |
487 | if (cmpxchg(&t->event, 0, 1) == 0) { |
488 | if (t->of) |
489 | kernfs_notify(kn: t->of->kn); |
490 | else |
491 | wake_up_interruptible(&t->event_wait); |
492 | } |
493 | t->last_event_time = now; |
494 | /* Reset threshold breach flag once event got generated */ |
495 | t->pending_event = false; |
496 | } |
497 | } |
498 | |
499 | static u64 update_averages(struct psi_group *group, u64 now) |
500 | { |
501 | unsigned long missed_periods = 0; |
502 | u64 expires, period; |
503 | u64 avg_next_update; |
504 | int s; |
505 | |
506 | /* avgX= */ |
507 | expires = group->avg_next_update; |
508 | if (now - expires >= psi_period) |
509 | missed_periods = div_u64(dividend: now - expires, divisor: psi_period); |
510 | |
511 | /* |
512 | * The periodic clock tick can get delayed for various |
513 | * reasons, especially on loaded systems. To avoid clock |
514 | * drift, we schedule the clock in fixed psi_period intervals. |
515 | * But the deltas we sample out of the per-cpu buckets above |
516 | * are based on the actual time elapsing between clock ticks. |
517 | */ |
518 | avg_next_update = expires + ((1 + missed_periods) * psi_period); |
519 | period = now - (group->avg_last_update + (missed_periods * psi_period)); |
520 | group->avg_last_update = now; |
521 | |
522 | for (s = 0; s < NR_PSI_STATES - 1; s++) { |
523 | u32 sample; |
524 | |
525 | sample = group->total[PSI_AVGS][s] - group->avg_total[s]; |
526 | /* |
527 | * Due to the lockless sampling of the time buckets, |
528 | * recorded time deltas can slip into the next period, |
529 | * which under full pressure can result in samples in |
530 | * excess of the period length. |
531 | * |
532 | * We don't want to report non-sensical pressures in |
533 | * excess of 100%, nor do we want to drop such events |
534 | * on the floor. Instead we punt any overage into the |
535 | * future until pressure subsides. By doing this we |
536 | * don't underreport the occurring pressure curve, we |
537 | * just report it delayed by one period length. |
538 | * |
539 | * The error isn't cumulative. As soon as another |
540 | * delta slips from a period P to P+1, by definition |
541 | * it frees up its time T in P. |
542 | */ |
543 | if (sample > period) |
544 | sample = period; |
545 | group->avg_total[s] += sample; |
546 | calc_avgs(avg: group->avg[s], missed_periods, time: sample, period); |
547 | } |
548 | |
549 | return avg_next_update; |
550 | } |
551 | |
552 | static void psi_avgs_work(struct work_struct *work) |
553 | { |
554 | struct delayed_work *dwork; |
555 | struct psi_group *group; |
556 | u32 changed_states; |
557 | u64 now; |
558 | |
559 | dwork = to_delayed_work(work); |
560 | group = container_of(dwork, struct psi_group, avgs_work); |
561 | |
562 | mutex_lock(&group->avgs_lock); |
563 | |
564 | now = sched_clock(); |
565 | |
566 | collect_percpu_times(group, aggregator: PSI_AVGS, pchanged_states: &changed_states); |
567 | /* |
568 | * If there is task activity, periodically fold the per-cpu |
569 | * times and feed samples into the running averages. If things |
570 | * are idle and there is no data to process, stop the clock. |
571 | * Once restarted, we'll catch up the running averages in one |
572 | * go - see calc_avgs() and missed_periods. |
573 | */ |
574 | if (now >= group->avg_next_update) { |
575 | update_triggers(group, now, aggregator: PSI_AVGS); |
576 | group->avg_next_update = update_averages(group, now); |
577 | } |
578 | |
579 | if (changed_states & PSI_STATE_RESCHEDULE) { |
580 | schedule_delayed_work(dwork, delay: nsecs_to_jiffies( |
581 | n: group->avg_next_update - now) + 1); |
582 | } |
583 | |
584 | mutex_unlock(lock: &group->avgs_lock); |
585 | } |
586 | |
587 | static void init_rtpoll_triggers(struct psi_group *group, u64 now) |
588 | { |
589 | struct psi_trigger *t; |
590 | |
591 | list_for_each_entry(t, &group->rtpoll_triggers, node) |
592 | window_reset(win: &t->win, now, |
593 | value: group->total[PSI_POLL][t->state], prev_growth: 0); |
594 | memcpy(group->rtpoll_total, group->total[PSI_POLL], |
595 | sizeof(group->rtpoll_total)); |
596 | group->rtpoll_next_update = now + group->rtpoll_min_period; |
597 | } |
598 | |
599 | /* Schedule rtpolling if it's not already scheduled or forced. */ |
600 | static void psi_schedule_rtpoll_work(struct psi_group *group, unsigned long delay, |
601 | bool force) |
602 | { |
603 | struct task_struct *task; |
604 | |
605 | /* |
606 | * atomic_xchg should be called even when !force to provide a |
607 | * full memory barrier (see the comment inside psi_rtpoll_work). |
608 | */ |
609 | if (atomic_xchg(v: &group->rtpoll_scheduled, new: 1) && !force) |
610 | return; |
611 | |
612 | rcu_read_lock(); |
613 | |
614 | task = rcu_dereference(group->rtpoll_task); |
615 | /* |
616 | * kworker might be NULL in case psi_trigger_destroy races with |
617 | * psi_task_change (hotpath) which can't use locks |
618 | */ |
619 | if (likely(task)) |
620 | mod_timer(timer: &group->rtpoll_timer, expires: jiffies + delay); |
621 | else |
622 | atomic_set(v: &group->rtpoll_scheduled, i: 0); |
623 | |
624 | rcu_read_unlock(); |
625 | } |
626 | |
627 | static void psi_rtpoll_work(struct psi_group *group) |
628 | { |
629 | bool force_reschedule = false; |
630 | u32 changed_states; |
631 | u64 now; |
632 | |
633 | mutex_lock(&group->rtpoll_trigger_lock); |
634 | |
635 | now = sched_clock(); |
636 | |
637 | if (now > group->rtpoll_until) { |
638 | /* |
639 | * We are either about to start or might stop rtpolling if no |
640 | * state change was recorded. Resetting rtpoll_scheduled leaves |
641 | * a small window for psi_group_change to sneak in and schedule |
642 | * an immediate rtpoll_work before we get to rescheduling. One |
643 | * potential extra wakeup at the end of the rtpolling window |
644 | * should be negligible and rtpoll_next_update still keeps |
645 | * updates correctly on schedule. |
646 | */ |
647 | atomic_set(v: &group->rtpoll_scheduled, i: 0); |
648 | /* |
649 | * A task change can race with the rtpoll worker that is supposed to |
650 | * report on it. To avoid missing events, ensure ordering between |
651 | * rtpoll_scheduled and the task state accesses, such that if the |
652 | * rtpoll worker misses the state update, the task change is |
653 | * guaranteed to reschedule the rtpoll worker: |
654 | * |
655 | * rtpoll worker: |
656 | * atomic_set(rtpoll_scheduled, 0) |
657 | * smp_mb() |
658 | * LOAD states |
659 | * |
660 | * task change: |
661 | * STORE states |
662 | * if atomic_xchg(rtpoll_scheduled, 1) == 0: |
663 | * schedule rtpoll worker |
664 | * |
665 | * The atomic_xchg() implies a full barrier. |
666 | */ |
667 | smp_mb(); |
668 | } else { |
669 | /* The rtpolling window is not over, keep rescheduling */ |
670 | force_reschedule = true; |
671 | } |
672 | |
673 | |
674 | collect_percpu_times(group, aggregator: PSI_POLL, pchanged_states: &changed_states); |
675 | |
676 | if (changed_states & group->rtpoll_states) { |
677 | /* Initialize trigger windows when entering rtpolling mode */ |
678 | if (now > group->rtpoll_until) |
679 | init_rtpoll_triggers(group, now); |
680 | |
681 | /* |
682 | * Keep the monitor active for at least the duration of the |
683 | * minimum tracking window as long as monitor states are |
684 | * changing. |
685 | */ |
686 | group->rtpoll_until = now + |
687 | group->rtpoll_min_period * UPDATES_PER_WINDOW; |
688 | } |
689 | |
690 | if (now > group->rtpoll_until) { |
691 | group->rtpoll_next_update = ULLONG_MAX; |
692 | goto out; |
693 | } |
694 | |
695 | if (now >= group->rtpoll_next_update) { |
696 | if (changed_states & group->rtpoll_states) { |
697 | update_triggers(group, now, aggregator: PSI_POLL); |
698 | memcpy(group->rtpoll_total, group->total[PSI_POLL], |
699 | sizeof(group->rtpoll_total)); |
700 | } |
701 | group->rtpoll_next_update = now + group->rtpoll_min_period; |
702 | } |
703 | |
704 | psi_schedule_rtpoll_work(group, |
705 | delay: nsecs_to_jiffies(n: group->rtpoll_next_update - now) + 1, |
706 | force: force_reschedule); |
707 | |
708 | out: |
709 | mutex_unlock(lock: &group->rtpoll_trigger_lock); |
710 | } |
711 | |
712 | static int psi_rtpoll_worker(void *data) |
713 | { |
714 | struct psi_group *group = (struct psi_group *)data; |
715 | |
716 | sched_set_fifo_low(current); |
717 | |
718 | while (true) { |
719 | wait_event_interruptible(group->rtpoll_wait, |
720 | atomic_cmpxchg(&group->rtpoll_wakeup, 1, 0) || |
721 | kthread_should_stop()); |
722 | if (kthread_should_stop()) |
723 | break; |
724 | |
725 | psi_rtpoll_work(group); |
726 | } |
727 | return 0; |
728 | } |
729 | |
730 | static void poll_timer_fn(struct timer_list *t) |
731 | { |
732 | struct psi_group *group = from_timer(group, t, rtpoll_timer); |
733 | |
734 | atomic_set(v: &group->rtpoll_wakeup, i: 1); |
735 | wake_up_interruptible(&group->rtpoll_wait); |
736 | } |
737 | |
738 | static void record_times(struct psi_group_cpu *groupc, u64 now) |
739 | { |
740 | u32 delta; |
741 | |
742 | delta = now - groupc->state_start; |
743 | groupc->state_start = now; |
744 | |
745 | if (groupc->state_mask & (1 << PSI_IO_SOME)) { |
746 | groupc->times[PSI_IO_SOME] += delta; |
747 | if (groupc->state_mask & (1 << PSI_IO_FULL)) |
748 | groupc->times[PSI_IO_FULL] += delta; |
749 | } |
750 | |
751 | if (groupc->state_mask & (1 << PSI_MEM_SOME)) { |
752 | groupc->times[PSI_MEM_SOME] += delta; |
753 | if (groupc->state_mask & (1 << PSI_MEM_FULL)) |
754 | groupc->times[PSI_MEM_FULL] += delta; |
755 | } |
756 | |
757 | if (groupc->state_mask & (1 << PSI_CPU_SOME)) { |
758 | groupc->times[PSI_CPU_SOME] += delta; |
759 | if (groupc->state_mask & (1 << PSI_CPU_FULL)) |
760 | groupc->times[PSI_CPU_FULL] += delta; |
761 | } |
762 | |
763 | if (groupc->state_mask & (1 << PSI_NONIDLE)) |
764 | groupc->times[PSI_NONIDLE] += delta; |
765 | } |
766 | |
767 | static void psi_group_change(struct psi_group *group, int cpu, |
768 | unsigned int clear, unsigned int set, u64 now, |
769 | bool wake_clock) |
770 | { |
771 | struct psi_group_cpu *groupc; |
772 | unsigned int t, m; |
773 | enum psi_states s; |
774 | u32 state_mask; |
775 | |
776 | groupc = per_cpu_ptr(group->pcpu, cpu); |
777 | |
778 | /* |
779 | * First we update the task counts according to the state |
780 | * change requested through the @clear and @set bits. |
781 | * |
782 | * Then if the cgroup PSI stats accounting enabled, we |
783 | * assess the aggregate resource states this CPU's tasks |
784 | * have been in since the last change, and account any |
785 | * SOME and FULL time these may have resulted in. |
786 | */ |
787 | write_seqcount_begin(&groupc->seq); |
788 | |
789 | /* |
790 | * Start with TSK_ONCPU, which doesn't have a corresponding |
791 | * task count - it's just a boolean flag directly encoded in |
792 | * the state mask. Clear, set, or carry the current state if |
793 | * no changes are requested. |
794 | */ |
795 | if (unlikely(clear & TSK_ONCPU)) { |
796 | state_mask = 0; |
797 | clear &= ~TSK_ONCPU; |
798 | } else if (unlikely(set & TSK_ONCPU)) { |
799 | state_mask = PSI_ONCPU; |
800 | set &= ~TSK_ONCPU; |
801 | } else { |
802 | state_mask = groupc->state_mask & PSI_ONCPU; |
803 | } |
804 | |
805 | /* |
806 | * The rest of the state mask is calculated based on the task |
807 | * counts. Update those first, then construct the mask. |
808 | */ |
809 | for (t = 0, m = clear; m; m &= ~(1 << t), t++) { |
810 | if (!(m & (1 << t))) |
811 | continue; |
812 | if (groupc->tasks[t]) { |
813 | groupc->tasks[t]--; |
814 | } else if (!psi_bug) { |
815 | printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n" , |
816 | cpu, t, groupc->tasks[0], |
817 | groupc->tasks[1], groupc->tasks[2], |
818 | groupc->tasks[3], clear, set); |
819 | psi_bug = 1; |
820 | } |
821 | } |
822 | |
823 | for (t = 0; set; set &= ~(1 << t), t++) |
824 | if (set & (1 << t)) |
825 | groupc->tasks[t]++; |
826 | |
827 | if (!group->enabled) { |
828 | /* |
829 | * On the first group change after disabling PSI, conclude |
830 | * the current state and flush its time. This is unlikely |
831 | * to matter to the user, but aggregation (get_recent_times) |
832 | * may have already incorporated the live state into times_prev; |
833 | * avoid a delta sample underflow when PSI is later re-enabled. |
834 | */ |
835 | if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE))) |
836 | record_times(groupc, now); |
837 | |
838 | groupc->state_mask = state_mask; |
839 | |
840 | write_seqcount_end(&groupc->seq); |
841 | return; |
842 | } |
843 | |
844 | for (s = 0; s < NR_PSI_STATES; s++) { |
845 | if (test_state(tasks: groupc->tasks, state: s, oncpu: state_mask & PSI_ONCPU)) |
846 | state_mask |= (1 << s); |
847 | } |
848 | |
849 | /* |
850 | * Since we care about lost potential, a memstall is FULL |
851 | * when there are no other working tasks, but also when |
852 | * the CPU is actively reclaiming and nothing productive |
853 | * could run even if it were runnable. So when the current |
854 | * task in a cgroup is in_memstall, the corresponding groupc |
855 | * on that cpu is in PSI_MEM_FULL state. |
856 | */ |
857 | if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall)) |
858 | state_mask |= (1 << PSI_MEM_FULL); |
859 | |
860 | record_times(groupc, now); |
861 | |
862 | groupc->state_mask = state_mask; |
863 | |
864 | write_seqcount_end(&groupc->seq); |
865 | |
866 | if (state_mask & group->rtpoll_states) |
867 | psi_schedule_rtpoll_work(group, delay: 1, force: false); |
868 | |
869 | if (wake_clock && !delayed_work_pending(&group->avgs_work)) |
870 | schedule_delayed_work(dwork: &group->avgs_work, PSI_FREQ); |
871 | } |
872 | |
873 | static inline struct psi_group *task_psi_group(struct task_struct *task) |
874 | { |
875 | #ifdef CONFIG_CGROUPS |
876 | if (static_branch_likely(&psi_cgroups_enabled)) |
877 | return cgroup_psi(cgrp: task_dfl_cgroup(task)); |
878 | #endif |
879 | return &psi_system; |
880 | } |
881 | |
882 | static void psi_flags_change(struct task_struct *task, int clear, int set) |
883 | { |
884 | if (((task->psi_flags & set) || |
885 | (task->psi_flags & clear) != clear) && |
886 | !psi_bug) { |
887 | printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n" , |
888 | task->pid, task->comm, task_cpu(task), |
889 | task->psi_flags, clear, set); |
890 | psi_bug = 1; |
891 | } |
892 | |
893 | task->psi_flags &= ~clear; |
894 | task->psi_flags |= set; |
895 | } |
896 | |
897 | void psi_task_change(struct task_struct *task, int clear, int set) |
898 | { |
899 | int cpu = task_cpu(p: task); |
900 | struct psi_group *group; |
901 | u64 now; |
902 | |
903 | if (!task->pid) |
904 | return; |
905 | |
906 | psi_flags_change(task, clear, set); |
907 | |
908 | now = cpu_clock(cpu); |
909 | |
910 | group = task_psi_group(task); |
911 | do { |
912 | psi_group_change(group, cpu, clear, set, now, wake_clock: true); |
913 | } while ((group = group->parent)); |
914 | } |
915 | |
916 | void psi_task_switch(struct task_struct *prev, struct task_struct *next, |
917 | bool sleep) |
918 | { |
919 | struct psi_group *group, *common = NULL; |
920 | int cpu = task_cpu(p: prev); |
921 | u64 now = cpu_clock(cpu); |
922 | |
923 | if (next->pid) { |
924 | psi_flags_change(task: next, clear: 0, TSK_ONCPU); |
925 | /* |
926 | * Set TSK_ONCPU on @next's cgroups. If @next shares any |
927 | * ancestors with @prev, those will already have @prev's |
928 | * TSK_ONCPU bit set, and we can stop the iteration there. |
929 | */ |
930 | group = task_psi_group(task: next); |
931 | do { |
932 | if (per_cpu_ptr(group->pcpu, cpu)->state_mask & |
933 | PSI_ONCPU) { |
934 | common = group; |
935 | break; |
936 | } |
937 | |
938 | psi_group_change(group, cpu, clear: 0, TSK_ONCPU, now, wake_clock: true); |
939 | } while ((group = group->parent)); |
940 | } |
941 | |
942 | if (prev->pid) { |
943 | int clear = TSK_ONCPU, set = 0; |
944 | bool wake_clock = true; |
945 | |
946 | /* |
947 | * When we're going to sleep, psi_dequeue() lets us |
948 | * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and |
949 | * TSK_IOWAIT here, where we can combine it with |
950 | * TSK_ONCPU and save walking common ancestors twice. |
951 | */ |
952 | if (sleep) { |
953 | clear |= TSK_RUNNING; |
954 | if (prev->in_memstall) |
955 | clear |= TSK_MEMSTALL_RUNNING; |
956 | if (prev->in_iowait) |
957 | set |= TSK_IOWAIT; |
958 | |
959 | /* |
960 | * Periodic aggregation shuts off if there is a period of no |
961 | * task changes, so we wake it back up if necessary. However, |
962 | * don't do this if the task change is the aggregation worker |
963 | * itself going to sleep, or we'll ping-pong forever. |
964 | */ |
965 | if (unlikely((prev->flags & PF_WQ_WORKER) && |
966 | wq_worker_last_func(prev) == psi_avgs_work)) |
967 | wake_clock = false; |
968 | } |
969 | |
970 | psi_flags_change(task: prev, clear, set); |
971 | |
972 | group = task_psi_group(task: prev); |
973 | do { |
974 | if (group == common) |
975 | break; |
976 | psi_group_change(group, cpu, clear, set, now, wake_clock); |
977 | } while ((group = group->parent)); |
978 | |
979 | /* |
980 | * TSK_ONCPU is handled up to the common ancestor. If there are |
981 | * any other differences between the two tasks (e.g. prev goes |
982 | * to sleep, or only one task is memstall), finish propagating |
983 | * those differences all the way up to the root. |
984 | */ |
985 | if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) { |
986 | clear &= ~TSK_ONCPU; |
987 | for (; group; group = group->parent) |
988 | psi_group_change(group, cpu, clear, set, now, wake_clock); |
989 | } |
990 | } |
991 | } |
992 | |
993 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
994 | void psi_account_irqtime(struct task_struct *task, u32 delta) |
995 | { |
996 | int cpu = task_cpu(p: task); |
997 | struct psi_group *group; |
998 | struct psi_group_cpu *groupc; |
999 | u64 now; |
1000 | |
1001 | if (static_branch_likely(&psi_disabled)) |
1002 | return; |
1003 | |
1004 | if (!task->pid) |
1005 | return; |
1006 | |
1007 | now = cpu_clock(cpu); |
1008 | |
1009 | group = task_psi_group(task); |
1010 | do { |
1011 | if (!group->enabled) |
1012 | continue; |
1013 | |
1014 | groupc = per_cpu_ptr(group->pcpu, cpu); |
1015 | |
1016 | write_seqcount_begin(&groupc->seq); |
1017 | |
1018 | record_times(groupc, now); |
1019 | groupc->times[PSI_IRQ_FULL] += delta; |
1020 | |
1021 | write_seqcount_end(&groupc->seq); |
1022 | |
1023 | if (group->rtpoll_states & (1 << PSI_IRQ_FULL)) |
1024 | psi_schedule_rtpoll_work(group, delay: 1, force: false); |
1025 | } while ((group = group->parent)); |
1026 | } |
1027 | #endif |
1028 | |
1029 | /** |
1030 | * psi_memstall_enter - mark the beginning of a memory stall section |
1031 | * @flags: flags to handle nested sections |
1032 | * |
1033 | * Marks the calling task as being stalled due to a lack of memory, |
1034 | * such as waiting for a refault or performing reclaim. |
1035 | */ |
1036 | void psi_memstall_enter(unsigned long *flags) |
1037 | { |
1038 | struct rq_flags rf; |
1039 | struct rq *rq; |
1040 | |
1041 | if (static_branch_likely(&psi_disabled)) |
1042 | return; |
1043 | |
1044 | *flags = current->in_memstall; |
1045 | if (*flags) |
1046 | return; |
1047 | /* |
1048 | * in_memstall setting & accounting needs to be atomic wrt |
1049 | * changes to the task's scheduling state, otherwise we can |
1050 | * race with CPU migration. |
1051 | */ |
1052 | rq = this_rq_lock_irq(rf: &rf); |
1053 | |
1054 | current->in_memstall = 1; |
1055 | psi_task_change(current, clear: 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING); |
1056 | |
1057 | rq_unlock_irq(rq, rf: &rf); |
1058 | } |
1059 | EXPORT_SYMBOL_GPL(psi_memstall_enter); |
1060 | |
1061 | /** |
1062 | * psi_memstall_leave - mark the end of an memory stall section |
1063 | * @flags: flags to handle nested memdelay sections |
1064 | * |
1065 | * Marks the calling task as no longer stalled due to lack of memory. |
1066 | */ |
1067 | void psi_memstall_leave(unsigned long *flags) |
1068 | { |
1069 | struct rq_flags rf; |
1070 | struct rq *rq; |
1071 | |
1072 | if (static_branch_likely(&psi_disabled)) |
1073 | return; |
1074 | |
1075 | if (*flags) |
1076 | return; |
1077 | /* |
1078 | * in_memstall clearing & accounting needs to be atomic wrt |
1079 | * changes to the task's scheduling state, otherwise we could |
1080 | * race with CPU migration. |
1081 | */ |
1082 | rq = this_rq_lock_irq(rf: &rf); |
1083 | |
1084 | current->in_memstall = 0; |
1085 | psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, set: 0); |
1086 | |
1087 | rq_unlock_irq(rq, rf: &rf); |
1088 | } |
1089 | EXPORT_SYMBOL_GPL(psi_memstall_leave); |
1090 | |
1091 | #ifdef CONFIG_CGROUPS |
1092 | int psi_cgroup_alloc(struct cgroup *cgroup) |
1093 | { |
1094 | if (!static_branch_likely(&psi_cgroups_enabled)) |
1095 | return 0; |
1096 | |
1097 | cgroup->psi = kzalloc(size: sizeof(struct psi_group), GFP_KERNEL); |
1098 | if (!cgroup->psi) |
1099 | return -ENOMEM; |
1100 | |
1101 | cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu); |
1102 | if (!cgroup->psi->pcpu) { |
1103 | kfree(objp: cgroup->psi); |
1104 | return -ENOMEM; |
1105 | } |
1106 | group_init(group: cgroup->psi); |
1107 | cgroup->psi->parent = cgroup_psi(cgrp: cgroup_parent(cgrp: cgroup)); |
1108 | return 0; |
1109 | } |
1110 | |
1111 | void psi_cgroup_free(struct cgroup *cgroup) |
1112 | { |
1113 | if (!static_branch_likely(&psi_cgroups_enabled)) |
1114 | return; |
1115 | |
1116 | cancel_delayed_work_sync(dwork: &cgroup->psi->avgs_work); |
1117 | free_percpu(pdata: cgroup->psi->pcpu); |
1118 | /* All triggers must be removed by now */ |
1119 | WARN_ONCE(cgroup->psi->rtpoll_states, "psi: trigger leak\n" ); |
1120 | kfree(objp: cgroup->psi); |
1121 | } |
1122 | |
1123 | /** |
1124 | * cgroup_move_task - move task to a different cgroup |
1125 | * @task: the task |
1126 | * @to: the target css_set |
1127 | * |
1128 | * Move task to a new cgroup and safely migrate its associated stall |
1129 | * state between the different groups. |
1130 | * |
1131 | * This function acquires the task's rq lock to lock out concurrent |
1132 | * changes to the task's scheduling state and - in case the task is |
1133 | * running - concurrent changes to its stall state. |
1134 | */ |
1135 | void cgroup_move_task(struct task_struct *task, struct css_set *to) |
1136 | { |
1137 | unsigned int task_flags; |
1138 | struct rq_flags rf; |
1139 | struct rq *rq; |
1140 | |
1141 | if (!static_branch_likely(&psi_cgroups_enabled)) { |
1142 | /* |
1143 | * Lame to do this here, but the scheduler cannot be locked |
1144 | * from the outside, so we move cgroups from inside sched/. |
1145 | */ |
1146 | rcu_assign_pointer(task->cgroups, to); |
1147 | return; |
1148 | } |
1149 | |
1150 | rq = task_rq_lock(p: task, rf: &rf); |
1151 | |
1152 | /* |
1153 | * We may race with schedule() dropping the rq lock between |
1154 | * deactivating prev and switching to next. Because the psi |
1155 | * updates from the deactivation are deferred to the switch |
1156 | * callback to save cgroup tree updates, the task's scheduling |
1157 | * state here is not coherent with its psi state: |
1158 | * |
1159 | * schedule() cgroup_move_task() |
1160 | * rq_lock() |
1161 | * deactivate_task() |
1162 | * p->on_rq = 0 |
1163 | * psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates |
1164 | * pick_next_task() |
1165 | * rq_unlock() |
1166 | * rq_lock() |
1167 | * psi_task_change() // old cgroup |
1168 | * task->cgroups = to |
1169 | * psi_task_change() // new cgroup |
1170 | * rq_unlock() |
1171 | * rq_lock() |
1172 | * psi_sched_switch() // does deferred updates in new cgroup |
1173 | * |
1174 | * Don't rely on the scheduling state. Use psi_flags instead. |
1175 | */ |
1176 | task_flags = task->psi_flags; |
1177 | |
1178 | if (task_flags) |
1179 | psi_task_change(task, clear: task_flags, set: 0); |
1180 | |
1181 | /* See comment above */ |
1182 | rcu_assign_pointer(task->cgroups, to); |
1183 | |
1184 | if (task_flags) |
1185 | psi_task_change(task, clear: 0, set: task_flags); |
1186 | |
1187 | task_rq_unlock(rq, p: task, rf: &rf); |
1188 | } |
1189 | |
1190 | void psi_cgroup_restart(struct psi_group *group) |
1191 | { |
1192 | int cpu; |
1193 | |
1194 | /* |
1195 | * After we disable psi_group->enabled, we don't actually |
1196 | * stop percpu tasks accounting in each psi_group_cpu, |
1197 | * instead only stop test_state() loop, record_times() |
1198 | * and averaging worker, see psi_group_change() for details. |
1199 | * |
1200 | * When disable cgroup PSI, this function has nothing to sync |
1201 | * since cgroup pressure files are hidden and percpu psi_group_cpu |
1202 | * would see !psi_group->enabled and only do task accounting. |
1203 | * |
1204 | * When re-enable cgroup PSI, this function use psi_group_change() |
1205 | * to get correct state mask from test_state() loop on tasks[], |
1206 | * and restart groupc->state_start from now, use .clear = .set = 0 |
1207 | * here since no task status really changed. |
1208 | */ |
1209 | if (!group->enabled) |
1210 | return; |
1211 | |
1212 | for_each_possible_cpu(cpu) { |
1213 | struct rq *rq = cpu_rq(cpu); |
1214 | struct rq_flags rf; |
1215 | u64 now; |
1216 | |
1217 | rq_lock_irq(rq, rf: &rf); |
1218 | now = cpu_clock(cpu); |
1219 | psi_group_change(group, cpu, clear: 0, set: 0, now, wake_clock: true); |
1220 | rq_unlock_irq(rq, rf: &rf); |
1221 | } |
1222 | } |
1223 | #endif /* CONFIG_CGROUPS */ |
1224 | |
1225 | int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) |
1226 | { |
1227 | bool only_full = false; |
1228 | int full; |
1229 | u64 now; |
1230 | |
1231 | if (static_branch_likely(&psi_disabled)) |
1232 | return -EOPNOTSUPP; |
1233 | |
1234 | /* Update averages before reporting them */ |
1235 | mutex_lock(&group->avgs_lock); |
1236 | now = sched_clock(); |
1237 | collect_percpu_times(group, aggregator: PSI_AVGS, NULL); |
1238 | if (now >= group->avg_next_update) |
1239 | group->avg_next_update = update_averages(group, now); |
1240 | mutex_unlock(lock: &group->avgs_lock); |
1241 | |
1242 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
1243 | only_full = res == PSI_IRQ; |
1244 | #endif |
1245 | |
1246 | for (full = 0; full < 2 - only_full; full++) { |
1247 | unsigned long avg[3] = { 0, }; |
1248 | u64 total = 0; |
1249 | int w; |
1250 | |
1251 | /* CPU FULL is undefined at the system level */ |
1252 | if (!(group == &psi_system && res == PSI_CPU && full)) { |
1253 | for (w = 0; w < 3; w++) |
1254 | avg[w] = group->avg[res * 2 + full][w]; |
1255 | total = div_u64(dividend: group->total[PSI_AVGS][res * 2 + full], |
1256 | NSEC_PER_USEC); |
1257 | } |
1258 | |
1259 | seq_printf(m, fmt: "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n" , |
1260 | full || only_full ? "full" : "some" , |
1261 | LOAD_INT(avg[0]), LOAD_FRAC(avg[0]), |
1262 | LOAD_INT(avg[1]), LOAD_FRAC(avg[1]), |
1263 | LOAD_INT(avg[2]), LOAD_FRAC(avg[2]), |
1264 | total); |
1265 | } |
1266 | |
1267 | return 0; |
1268 | } |
1269 | |
1270 | struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf, |
1271 | enum psi_res res, struct file *file, |
1272 | struct kernfs_open_file *of) |
1273 | { |
1274 | struct psi_trigger *t; |
1275 | enum psi_states state; |
1276 | u32 threshold_us; |
1277 | bool privileged; |
1278 | u32 window_us; |
1279 | |
1280 | if (static_branch_likely(&psi_disabled)) |
1281 | return ERR_PTR(error: -EOPNOTSUPP); |
1282 | |
1283 | /* |
1284 | * Checking the privilege here on file->f_cred implies that a privileged user |
1285 | * could open the file and delegate the write to an unprivileged one. |
1286 | */ |
1287 | privileged = cap_raised(file->f_cred->cap_effective, CAP_SYS_RESOURCE); |
1288 | |
1289 | if (sscanf(buf, "some %u %u" , &threshold_us, &window_us) == 2) |
1290 | state = PSI_IO_SOME + res * 2; |
1291 | else if (sscanf(buf, "full %u %u" , &threshold_us, &window_us) == 2) |
1292 | state = PSI_IO_FULL + res * 2; |
1293 | else |
1294 | return ERR_PTR(error: -EINVAL); |
1295 | |
1296 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
1297 | if (res == PSI_IRQ && --state != PSI_IRQ_FULL) |
1298 | return ERR_PTR(error: -EINVAL); |
1299 | #endif |
1300 | |
1301 | if (state >= PSI_NONIDLE) |
1302 | return ERR_PTR(error: -EINVAL); |
1303 | |
1304 | if (window_us == 0 || window_us > WINDOW_MAX_US) |
1305 | return ERR_PTR(error: -EINVAL); |
1306 | |
1307 | /* |
1308 | * Unprivileged users can only use 2s windows so that averages aggregation |
1309 | * work is used, and no RT threads need to be spawned. |
1310 | */ |
1311 | if (!privileged && window_us % 2000000) |
1312 | return ERR_PTR(error: -EINVAL); |
1313 | |
1314 | /* Check threshold */ |
1315 | if (threshold_us == 0 || threshold_us > window_us) |
1316 | return ERR_PTR(error: -EINVAL); |
1317 | |
1318 | t = kmalloc(size: sizeof(*t), GFP_KERNEL); |
1319 | if (!t) |
1320 | return ERR_PTR(error: -ENOMEM); |
1321 | |
1322 | t->group = group; |
1323 | t->state = state; |
1324 | t->threshold = threshold_us * NSEC_PER_USEC; |
1325 | t->win.size = window_us * NSEC_PER_USEC; |
1326 | window_reset(win: &t->win, now: sched_clock(), |
1327 | value: group->total[PSI_POLL][t->state], prev_growth: 0); |
1328 | |
1329 | t->event = 0; |
1330 | t->last_event_time = 0; |
1331 | t->of = of; |
1332 | if (!of) |
1333 | init_waitqueue_head(&t->event_wait); |
1334 | t->pending_event = false; |
1335 | t->aggregator = privileged ? PSI_POLL : PSI_AVGS; |
1336 | |
1337 | if (privileged) { |
1338 | mutex_lock(&group->rtpoll_trigger_lock); |
1339 | |
1340 | if (!rcu_access_pointer(group->rtpoll_task)) { |
1341 | struct task_struct *task; |
1342 | |
1343 | task = kthread_create(psi_rtpoll_worker, group, "psimon" ); |
1344 | if (IS_ERR(ptr: task)) { |
1345 | kfree(objp: t); |
1346 | mutex_unlock(lock: &group->rtpoll_trigger_lock); |
1347 | return ERR_CAST(ptr: task); |
1348 | } |
1349 | atomic_set(v: &group->rtpoll_wakeup, i: 0); |
1350 | wake_up_process(tsk: task); |
1351 | rcu_assign_pointer(group->rtpoll_task, task); |
1352 | } |
1353 | |
1354 | list_add(new: &t->node, head: &group->rtpoll_triggers); |
1355 | group->rtpoll_min_period = min(group->rtpoll_min_period, |
1356 | div_u64(t->win.size, UPDATES_PER_WINDOW)); |
1357 | group->rtpoll_nr_triggers[t->state]++; |
1358 | group->rtpoll_states |= (1 << t->state); |
1359 | |
1360 | mutex_unlock(lock: &group->rtpoll_trigger_lock); |
1361 | } else { |
1362 | mutex_lock(&group->avgs_lock); |
1363 | |
1364 | list_add(new: &t->node, head: &group->avg_triggers); |
1365 | group->avg_nr_triggers[t->state]++; |
1366 | |
1367 | mutex_unlock(lock: &group->avgs_lock); |
1368 | } |
1369 | return t; |
1370 | } |
1371 | |
1372 | void psi_trigger_destroy(struct psi_trigger *t) |
1373 | { |
1374 | struct psi_group *group; |
1375 | struct task_struct *task_to_destroy = NULL; |
1376 | |
1377 | /* |
1378 | * We do not check psi_disabled since it might have been disabled after |
1379 | * the trigger got created. |
1380 | */ |
1381 | if (!t) |
1382 | return; |
1383 | |
1384 | group = t->group; |
1385 | /* |
1386 | * Wakeup waiters to stop polling and clear the queue to prevent it from |
1387 | * being accessed later. Can happen if cgroup is deleted from under a |
1388 | * polling process. |
1389 | */ |
1390 | if (t->of) |
1391 | kernfs_notify(kn: t->of->kn); |
1392 | else |
1393 | wake_up_interruptible(&t->event_wait); |
1394 | |
1395 | if (t->aggregator == PSI_AVGS) { |
1396 | mutex_lock(&group->avgs_lock); |
1397 | if (!list_empty(head: &t->node)) { |
1398 | list_del(entry: &t->node); |
1399 | group->avg_nr_triggers[t->state]--; |
1400 | } |
1401 | mutex_unlock(lock: &group->avgs_lock); |
1402 | } else { |
1403 | mutex_lock(&group->rtpoll_trigger_lock); |
1404 | if (!list_empty(head: &t->node)) { |
1405 | struct psi_trigger *tmp; |
1406 | u64 period = ULLONG_MAX; |
1407 | |
1408 | list_del(entry: &t->node); |
1409 | group->rtpoll_nr_triggers[t->state]--; |
1410 | if (!group->rtpoll_nr_triggers[t->state]) |
1411 | group->rtpoll_states &= ~(1 << t->state); |
1412 | /* |
1413 | * Reset min update period for the remaining triggers |
1414 | * iff the destroying trigger had the min window size. |
1415 | */ |
1416 | if (group->rtpoll_min_period == div_u64(dividend: t->win.size, UPDATES_PER_WINDOW)) { |
1417 | list_for_each_entry(tmp, &group->rtpoll_triggers, node) |
1418 | period = min(period, div_u64(tmp->win.size, |
1419 | UPDATES_PER_WINDOW)); |
1420 | group->rtpoll_min_period = period; |
1421 | } |
1422 | /* Destroy rtpoll_task when the last trigger is destroyed */ |
1423 | if (group->rtpoll_states == 0) { |
1424 | group->rtpoll_until = 0; |
1425 | task_to_destroy = rcu_dereference_protected( |
1426 | group->rtpoll_task, |
1427 | lockdep_is_held(&group->rtpoll_trigger_lock)); |
1428 | rcu_assign_pointer(group->rtpoll_task, NULL); |
1429 | del_timer(timer: &group->rtpoll_timer); |
1430 | } |
1431 | } |
1432 | mutex_unlock(lock: &group->rtpoll_trigger_lock); |
1433 | } |
1434 | |
1435 | /* |
1436 | * Wait for psi_schedule_rtpoll_work RCU to complete its read-side |
1437 | * critical section before destroying the trigger and optionally the |
1438 | * rtpoll_task. |
1439 | */ |
1440 | synchronize_rcu(); |
1441 | /* |
1442 | * Stop kthread 'psimon' after releasing rtpoll_trigger_lock to prevent |
1443 | * a deadlock while waiting for psi_rtpoll_work to acquire |
1444 | * rtpoll_trigger_lock |
1445 | */ |
1446 | if (task_to_destroy) { |
1447 | /* |
1448 | * After the RCU grace period has expired, the worker |
1449 | * can no longer be found through group->rtpoll_task. |
1450 | */ |
1451 | kthread_stop(k: task_to_destroy); |
1452 | atomic_set(v: &group->rtpoll_scheduled, i: 0); |
1453 | } |
1454 | kfree(objp: t); |
1455 | } |
1456 | |
1457 | __poll_t psi_trigger_poll(void **trigger_ptr, |
1458 | struct file *file, poll_table *wait) |
1459 | { |
1460 | __poll_t ret = DEFAULT_POLLMASK; |
1461 | struct psi_trigger *t; |
1462 | |
1463 | if (static_branch_likely(&psi_disabled)) |
1464 | return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; |
1465 | |
1466 | t = smp_load_acquire(trigger_ptr); |
1467 | if (!t) |
1468 | return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; |
1469 | |
1470 | if (t->of) |
1471 | kernfs_generic_poll(of: t->of, pt: wait); |
1472 | else |
1473 | poll_wait(filp: file, wait_address: &t->event_wait, p: wait); |
1474 | |
1475 | if (cmpxchg(&t->event, 1, 0) == 1) |
1476 | ret |= EPOLLPRI; |
1477 | |
1478 | return ret; |
1479 | } |
1480 | |
1481 | #ifdef CONFIG_PROC_FS |
1482 | static int psi_io_show(struct seq_file *m, void *v) |
1483 | { |
1484 | return psi_show(m, group: &psi_system, res: PSI_IO); |
1485 | } |
1486 | |
1487 | static int psi_memory_show(struct seq_file *m, void *v) |
1488 | { |
1489 | return psi_show(m, group: &psi_system, res: PSI_MEM); |
1490 | } |
1491 | |
1492 | static int psi_cpu_show(struct seq_file *m, void *v) |
1493 | { |
1494 | return psi_show(m, group: &psi_system, res: PSI_CPU); |
1495 | } |
1496 | |
1497 | static int psi_io_open(struct inode *inode, struct file *file) |
1498 | { |
1499 | return single_open(file, psi_io_show, NULL); |
1500 | } |
1501 | |
1502 | static int psi_memory_open(struct inode *inode, struct file *file) |
1503 | { |
1504 | return single_open(file, psi_memory_show, NULL); |
1505 | } |
1506 | |
1507 | static int psi_cpu_open(struct inode *inode, struct file *file) |
1508 | { |
1509 | return single_open(file, psi_cpu_show, NULL); |
1510 | } |
1511 | |
1512 | static ssize_t psi_write(struct file *file, const char __user *user_buf, |
1513 | size_t nbytes, enum psi_res res) |
1514 | { |
1515 | char buf[32]; |
1516 | size_t buf_size; |
1517 | struct seq_file *seq; |
1518 | struct psi_trigger *new; |
1519 | |
1520 | if (static_branch_likely(&psi_disabled)) |
1521 | return -EOPNOTSUPP; |
1522 | |
1523 | if (!nbytes) |
1524 | return -EINVAL; |
1525 | |
1526 | buf_size = min(nbytes, sizeof(buf)); |
1527 | if (copy_from_user(to: buf, from: user_buf, n: buf_size)) |
1528 | return -EFAULT; |
1529 | |
1530 | buf[buf_size - 1] = '\0'; |
1531 | |
1532 | seq = file->private_data; |
1533 | |
1534 | /* Take seq->lock to protect seq->private from concurrent writes */ |
1535 | mutex_lock(&seq->lock); |
1536 | |
1537 | /* Allow only one trigger per file descriptor */ |
1538 | if (seq->private) { |
1539 | mutex_unlock(lock: &seq->lock); |
1540 | return -EBUSY; |
1541 | } |
1542 | |
1543 | new = psi_trigger_create(group: &psi_system, buf, res, file, NULL); |
1544 | if (IS_ERR(ptr: new)) { |
1545 | mutex_unlock(lock: &seq->lock); |
1546 | return PTR_ERR(ptr: new); |
1547 | } |
1548 | |
1549 | smp_store_release(&seq->private, new); |
1550 | mutex_unlock(lock: &seq->lock); |
1551 | |
1552 | return nbytes; |
1553 | } |
1554 | |
1555 | static ssize_t psi_io_write(struct file *file, const char __user *user_buf, |
1556 | size_t nbytes, loff_t *ppos) |
1557 | { |
1558 | return psi_write(file, user_buf, nbytes, res: PSI_IO); |
1559 | } |
1560 | |
1561 | static ssize_t psi_memory_write(struct file *file, const char __user *user_buf, |
1562 | size_t nbytes, loff_t *ppos) |
1563 | { |
1564 | return psi_write(file, user_buf, nbytes, res: PSI_MEM); |
1565 | } |
1566 | |
1567 | static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf, |
1568 | size_t nbytes, loff_t *ppos) |
1569 | { |
1570 | return psi_write(file, user_buf, nbytes, res: PSI_CPU); |
1571 | } |
1572 | |
1573 | static __poll_t psi_fop_poll(struct file *file, poll_table *wait) |
1574 | { |
1575 | struct seq_file *seq = file->private_data; |
1576 | |
1577 | return psi_trigger_poll(trigger_ptr: &seq->private, file, wait); |
1578 | } |
1579 | |
1580 | static int psi_fop_release(struct inode *inode, struct file *file) |
1581 | { |
1582 | struct seq_file *seq = file->private_data; |
1583 | |
1584 | psi_trigger_destroy(t: seq->private); |
1585 | return single_release(inode, file); |
1586 | } |
1587 | |
1588 | static const struct proc_ops psi_io_proc_ops = { |
1589 | .proc_open = psi_io_open, |
1590 | .proc_read = seq_read, |
1591 | .proc_lseek = seq_lseek, |
1592 | .proc_write = psi_io_write, |
1593 | .proc_poll = psi_fop_poll, |
1594 | .proc_release = psi_fop_release, |
1595 | }; |
1596 | |
1597 | static const struct proc_ops psi_memory_proc_ops = { |
1598 | .proc_open = psi_memory_open, |
1599 | .proc_read = seq_read, |
1600 | .proc_lseek = seq_lseek, |
1601 | .proc_write = psi_memory_write, |
1602 | .proc_poll = psi_fop_poll, |
1603 | .proc_release = psi_fop_release, |
1604 | }; |
1605 | |
1606 | static const struct proc_ops psi_cpu_proc_ops = { |
1607 | .proc_open = psi_cpu_open, |
1608 | .proc_read = seq_read, |
1609 | .proc_lseek = seq_lseek, |
1610 | .proc_write = psi_cpu_write, |
1611 | .proc_poll = psi_fop_poll, |
1612 | .proc_release = psi_fop_release, |
1613 | }; |
1614 | |
1615 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
1616 | static int psi_irq_show(struct seq_file *m, void *v) |
1617 | { |
1618 | return psi_show(m, group: &psi_system, res: PSI_IRQ); |
1619 | } |
1620 | |
1621 | static int psi_irq_open(struct inode *inode, struct file *file) |
1622 | { |
1623 | return single_open(file, psi_irq_show, NULL); |
1624 | } |
1625 | |
1626 | static ssize_t psi_irq_write(struct file *file, const char __user *user_buf, |
1627 | size_t nbytes, loff_t *ppos) |
1628 | { |
1629 | return psi_write(file, user_buf, nbytes, res: PSI_IRQ); |
1630 | } |
1631 | |
1632 | static const struct proc_ops psi_irq_proc_ops = { |
1633 | .proc_open = psi_irq_open, |
1634 | .proc_read = seq_read, |
1635 | .proc_lseek = seq_lseek, |
1636 | .proc_write = psi_irq_write, |
1637 | .proc_poll = psi_fop_poll, |
1638 | .proc_release = psi_fop_release, |
1639 | }; |
1640 | #endif |
1641 | |
1642 | static int __init psi_proc_init(void) |
1643 | { |
1644 | if (psi_enable) { |
1645 | proc_mkdir("pressure" , NULL); |
1646 | proc_create(name: "pressure/io" , mode: 0666, NULL, proc_ops: &psi_io_proc_ops); |
1647 | proc_create(name: "pressure/memory" , mode: 0666, NULL, proc_ops: &psi_memory_proc_ops); |
1648 | proc_create(name: "pressure/cpu" , mode: 0666, NULL, proc_ops: &psi_cpu_proc_ops); |
1649 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
1650 | proc_create(name: "pressure/irq" , mode: 0666, NULL, proc_ops: &psi_irq_proc_ops); |
1651 | #endif |
1652 | } |
1653 | return 0; |
1654 | } |
1655 | module_init(psi_proc_init); |
1656 | |
1657 | #endif /* CONFIG_PROC_FS */ |
1658 | |