1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * |
3 | * IO cost model based controller. |
4 | * |
5 | * Copyright (C) 2019 Tejun Heo <tj@kernel.org> |
6 | * Copyright (C) 2019 Andy Newell <newella@fb.com> |
7 | * Copyright (C) 2019 Facebook |
8 | * |
9 | * One challenge of controlling IO resources is the lack of trivially |
10 | * observable cost metric. This is distinguished from CPU and memory where |
11 | * wallclock time and the number of bytes can serve as accurate enough |
12 | * approximations. |
13 | * |
14 | * Bandwidth and iops are the most commonly used metrics for IO devices but |
15 | * depending on the type and specifics of the device, different IO patterns |
16 | * easily lead to multiple orders of magnitude variations rendering them |
17 | * useless for the purpose of IO capacity distribution. While on-device |
18 | * time, with a lot of clutches, could serve as a useful approximation for |
19 | * non-queued rotational devices, this is no longer viable with modern |
20 | * devices, even the rotational ones. |
21 | * |
22 | * While there is no cost metric we can trivially observe, it isn't a |
23 | * complete mystery. For example, on a rotational device, seek cost |
24 | * dominates while a contiguous transfer contributes a smaller amount |
25 | * proportional to the size. If we can characterize at least the relative |
26 | * costs of these different types of IOs, it should be possible to |
27 | * implement a reasonable work-conserving proportional IO resource |
28 | * distribution. |
29 | * |
30 | * 1. IO Cost Model |
31 | * |
32 | * IO cost model estimates the cost of an IO given its basic parameters and |
33 | * history (e.g. the end sector of the last IO). The cost is measured in |
34 | * device time. If a given IO is estimated to cost 10ms, the device should |
35 | * be able to process ~100 of those IOs in a second. |
36 | * |
37 | * Currently, there's only one builtin cost model - linear. Each IO is |
38 | * classified as sequential or random and given a base cost accordingly. |
39 | * On top of that, a size cost proportional to the length of the IO is |
40 | * added. While simple, this model captures the operational |
41 | * characteristics of a wide varienty of devices well enough. Default |
42 | * parameters for several different classes of devices are provided and the |
43 | * parameters can be configured from userspace via |
44 | * /sys/fs/cgroup/io.cost.model. |
45 | * |
46 | * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate |
47 | * device-specific coefficients. |
48 | * |
49 | * 2. Control Strategy |
50 | * |
51 | * The device virtual time (vtime) is used as the primary control metric. |
52 | * The control strategy is composed of the following three parts. |
53 | * |
54 | * 2-1. Vtime Distribution |
55 | * |
56 | * When a cgroup becomes active in terms of IOs, its hierarchical share is |
57 | * calculated. Please consider the following hierarchy where the numbers |
58 | * inside parentheses denote the configured weights. |
59 | * |
60 | * root |
61 | * / \ |
62 | * A (w:100) B (w:300) |
63 | * / \ |
64 | * A0 (w:100) A1 (w:100) |
65 | * |
66 | * If B is idle and only A0 and A1 are actively issuing IOs, as the two are |
67 | * of equal weight, each gets 50% share. If then B starts issuing IOs, B |
68 | * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest, |
69 | * 12.5% each. The distribution mechanism only cares about these flattened |
70 | * shares. They're called hweights (hierarchical weights) and always add |
71 | * upto 1 (WEIGHT_ONE). |
72 | * |
73 | * A given cgroup's vtime runs slower in inverse proportion to its hweight. |
74 | * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5) |
75 | * against the device vtime - an IO which takes 10ms on the underlying |
76 | * device is considered to take 80ms on A0. |
77 | * |
78 | * This constitutes the basis of IO capacity distribution. Each cgroup's |
79 | * vtime is running at a rate determined by its hweight. A cgroup tracks |
80 | * the vtime consumed by past IOs and can issue a new IO if doing so |
81 | * wouldn't outrun the current device vtime. Otherwise, the IO is |
82 | * suspended until the vtime has progressed enough to cover it. |
83 | * |
84 | * 2-2. Vrate Adjustment |
85 | * |
86 | * It's unrealistic to expect the cost model to be perfect. There are too |
87 | * many devices and even on the same device the overall performance |
88 | * fluctuates depending on numerous factors such as IO mixture and device |
89 | * internal garbage collection. The controller needs to adapt dynamically. |
90 | * |
91 | * This is achieved by adjusting the overall IO rate according to how busy |
92 | * the device is. If the device becomes overloaded, we're sending down too |
93 | * many IOs and should generally slow down. If there are waiting issuers |
94 | * but the device isn't saturated, we're issuing too few and should |
95 | * generally speed up. |
96 | * |
97 | * To slow down, we lower the vrate - the rate at which the device vtime |
98 | * passes compared to the wall clock. For example, if the vtime is running |
99 | * at the vrate of 75%, all cgroups added up would only be able to issue |
100 | * 750ms worth of IOs per second, and vice-versa for speeding up. |
101 | * |
102 | * Device business is determined using two criteria - rq wait and |
103 | * completion latencies. |
104 | * |
105 | * When a device gets saturated, the on-device and then the request queues |
106 | * fill up and a bio which is ready to be issued has to wait for a request |
107 | * to become available. When this delay becomes noticeable, it's a clear |
108 | * indication that the device is saturated and we lower the vrate. This |
109 | * saturation signal is fairly conservative as it only triggers when both |
110 | * hardware and software queues are filled up, and is used as the default |
111 | * busy signal. |
112 | * |
113 | * As devices can have deep queues and be unfair in how the queued commands |
114 | * are executed, solely depending on rq wait may not result in satisfactory |
115 | * control quality. For a better control quality, completion latency QoS |
116 | * parameters can be configured so that the device is considered saturated |
117 | * if N'th percentile completion latency rises above the set point. |
118 | * |
119 | * The completion latency requirements are a function of both the |
120 | * underlying device characteristics and the desired IO latency quality of |
121 | * service. There is an inherent trade-off - the tighter the latency QoS, |
122 | * the higher the bandwidth lossage. Latency QoS is disabled by default |
123 | * and can be set through /sys/fs/cgroup/io.cost.qos. |
124 | * |
125 | * 2-3. Work Conservation |
126 | * |
127 | * Imagine two cgroups A and B with equal weights. A is issuing a small IO |
128 | * periodically while B is sending out enough parallel IOs to saturate the |
129 | * device on its own. Let's say A's usage amounts to 100ms worth of IO |
130 | * cost per second, i.e., 10% of the device capacity. The naive |
131 | * distribution of half and half would lead to 60% utilization of the |
132 | * device, a significant reduction in the total amount of work done |
133 | * compared to free-for-all competition. This is too high a cost to pay |
134 | * for IO control. |
135 | * |
136 | * To conserve the total amount of work done, we keep track of how much |
137 | * each active cgroup is actually using and yield part of its weight if |
138 | * there are other cgroups which can make use of it. In the above case, |
139 | * A's weight will be lowered so that it hovers above the actual usage and |
140 | * B would be able to use the rest. |
141 | * |
142 | * As we don't want to penalize a cgroup for donating its weight, the |
143 | * surplus weight adjustment factors in a margin and has an immediate |
144 | * snapback mechanism in case the cgroup needs more IO vtime for itself. |
145 | * |
146 | * Note that adjusting down surplus weights has the same effects as |
147 | * accelerating vtime for other cgroups and work conservation can also be |
148 | * implemented by adjusting vrate dynamically. However, squaring who can |
149 | * donate and should take back how much requires hweight propagations |
150 | * anyway making it easier to implement and understand as a separate |
151 | * mechanism. |
152 | * |
153 | * 3. Monitoring |
154 | * |
155 | * Instead of debugfs or other clumsy monitoring mechanisms, this |
156 | * controller uses a drgn based monitoring script - |
157 | * tools/cgroup/iocost_monitor.py. For details on drgn, please see |
158 | * https://github.com/osandov/drgn. The output looks like the following. |
159 | * |
160 | * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12% |
161 | * active weight hweight% inflt% dbt delay usages% |
162 | * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033 |
163 | * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077 |
164 | * |
165 | * - per : Timer period |
166 | * - cur_per : Internal wall and device vtime clock |
167 | * - vrate : Device virtual time rate against wall clock |
168 | * - weight : Surplus-adjusted and configured weights |
169 | * - hweight : Surplus-adjusted and configured hierarchical weights |
170 | * - inflt : The percentage of in-flight IO cost at the end of last period |
171 | * - del_ms : Deferred issuer delay induction level and duration |
172 | * - usages : Usage history |
173 | */ |
174 | |
175 | #include <linux/kernel.h> |
176 | #include <linux/module.h> |
177 | #include <linux/timer.h> |
178 | #include <linux/time64.h> |
179 | #include <linux/parser.h> |
180 | #include <linux/sched/signal.h> |
181 | #include <asm/local.h> |
182 | #include <asm/local64.h> |
183 | #include "blk-rq-qos.h" |
184 | #include "blk-stat.h" |
185 | #include "blk-wbt.h" |
186 | #include "blk-cgroup.h" |
187 | |
188 | #ifdef CONFIG_TRACEPOINTS |
189 | |
190 | /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */ |
191 | #define TRACE_IOCG_PATH_LEN 1024 |
192 | static DEFINE_SPINLOCK(trace_iocg_path_lock); |
193 | static char trace_iocg_path[TRACE_IOCG_PATH_LEN]; |
194 | |
195 | #define TRACE_IOCG_PATH(type, iocg, ...) \ |
196 | do { \ |
197 | unsigned long flags; \ |
198 | if (trace_iocost_##type##_enabled()) { \ |
199 | spin_lock_irqsave(&trace_iocg_path_lock, flags); \ |
200 | cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \ |
201 | trace_iocg_path, TRACE_IOCG_PATH_LEN); \ |
202 | trace_iocost_##type(iocg, trace_iocg_path, \ |
203 | ##__VA_ARGS__); \ |
204 | spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \ |
205 | } \ |
206 | } while (0) |
207 | |
208 | #else /* CONFIG_TRACE_POINTS */ |
209 | #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0) |
210 | #endif /* CONFIG_TRACE_POINTS */ |
211 | |
212 | enum { |
213 | MILLION = 1000000, |
214 | |
215 | /* timer period is calculated from latency requirements, bound it */ |
216 | MIN_PERIOD = USEC_PER_MSEC, |
217 | MAX_PERIOD = USEC_PER_SEC, |
218 | |
219 | /* |
220 | * iocg->vtime is targeted at 50% behind the device vtime, which |
221 | * serves as its IO credit buffer. Surplus weight adjustment is |
222 | * immediately canceled if the vtime margin runs below 10%. |
223 | */ |
224 | MARGIN_MIN_PCT = 10, |
225 | MARGIN_LOW_PCT = 20, |
226 | MARGIN_TARGET_PCT = 50, |
227 | |
228 | INUSE_ADJ_STEP_PCT = 25, |
229 | |
230 | /* Have some play in timer operations */ |
231 | TIMER_SLACK_PCT = 1, |
232 | |
233 | /* 1/64k is granular enough and can easily be handled w/ u32 */ |
234 | WEIGHT_ONE = 1 << 16, |
235 | }; |
236 | |
237 | enum { |
238 | /* |
239 | * As vtime is used to calculate the cost of each IO, it needs to |
240 | * be fairly high precision. For example, it should be able to |
241 | * represent the cost of a single page worth of discard with |
242 | * suffificient accuracy. At the same time, it should be able to |
243 | * represent reasonably long enough durations to be useful and |
244 | * convenient during operation. |
245 | * |
246 | * 1s worth of vtime is 2^37. This gives us both sub-nanosecond |
247 | * granularity and days of wrap-around time even at extreme vrates. |
248 | */ |
249 | VTIME_PER_SEC_SHIFT = 37, |
250 | VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT, |
251 | VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC, |
252 | VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC, |
253 | |
254 | /* bound vrate adjustments within two orders of magnitude */ |
255 | VRATE_MIN_PPM = 10000, /* 1% */ |
256 | VRATE_MAX_PPM = 100000000, /* 10000% */ |
257 | |
258 | VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION, |
259 | VRATE_CLAMP_ADJ_PCT = 4, |
260 | |
261 | /* switch iff the conditions are met for longer than this */ |
262 | AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC, |
263 | }; |
264 | |
265 | enum { |
266 | /* if IOs end up waiting for requests, issue less */ |
267 | RQ_WAIT_BUSY_PCT = 5, |
268 | |
269 | /* unbusy hysterisis */ |
270 | UNBUSY_THR_PCT = 75, |
271 | |
272 | /* |
273 | * The effect of delay is indirect and non-linear and a huge amount of |
274 | * future debt can accumulate abruptly while unthrottled. Linearly scale |
275 | * up delay as debt is going up and then let it decay exponentially. |
276 | * This gives us quick ramp ups while delay is accumulating and long |
277 | * tails which can help reducing the frequency of debt explosions on |
278 | * unthrottle. The parameters are experimentally determined. |
279 | * |
280 | * The delay mechanism provides adequate protection and behavior in many |
281 | * cases. However, this is far from ideal and falls shorts on both |
282 | * fronts. The debtors are often throttled too harshly costing a |
283 | * significant level of fairness and possibly total work while the |
284 | * protection against their impacts on the system can be choppy and |
285 | * unreliable. |
286 | * |
287 | * The shortcoming primarily stems from the fact that, unlike for page |
288 | * cache, the kernel doesn't have well-defined back-pressure propagation |
289 | * mechanism and policies for anonymous memory. Fully addressing this |
290 | * issue will likely require substantial improvements in the area. |
291 | */ |
292 | MIN_DELAY_THR_PCT = 500, |
293 | MAX_DELAY_THR_PCT = 25000, |
294 | MIN_DELAY = 250, |
295 | MAX_DELAY = 250 * USEC_PER_MSEC, |
296 | |
297 | /* halve debts if avg usage over 100ms is under 50% */ |
298 | DFGV_USAGE_PCT = 50, |
299 | DFGV_PERIOD = 100 * USEC_PER_MSEC, |
300 | |
301 | /* don't let cmds which take a very long time pin lagging for too long */ |
302 | MAX_LAGGING_PERIODS = 10, |
303 | |
304 | /* |
305 | * Count IO size in 4k pages. The 12bit shift helps keeping |
306 | * size-proportional components of cost calculation in closer |
307 | * numbers of digits to per-IO cost components. |
308 | */ |
309 | IOC_PAGE_SHIFT = 12, |
310 | IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT, |
311 | IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT, |
312 | |
313 | /* if apart further than 16M, consider randio for linear model */ |
314 | LCOEF_RANDIO_PAGES = 4096, |
315 | }; |
316 | |
317 | enum ioc_running { |
318 | IOC_IDLE, |
319 | IOC_RUNNING, |
320 | IOC_STOP, |
321 | }; |
322 | |
323 | /* io.cost.qos controls including per-dev enable of the whole controller */ |
324 | enum { |
325 | QOS_ENABLE, |
326 | QOS_CTRL, |
327 | NR_QOS_CTRL_PARAMS, |
328 | }; |
329 | |
330 | /* io.cost.qos params */ |
331 | enum { |
332 | QOS_RPPM, |
333 | QOS_RLAT, |
334 | QOS_WPPM, |
335 | QOS_WLAT, |
336 | QOS_MIN, |
337 | QOS_MAX, |
338 | NR_QOS_PARAMS, |
339 | }; |
340 | |
341 | /* io.cost.model controls */ |
342 | enum { |
343 | COST_CTRL, |
344 | COST_MODEL, |
345 | NR_COST_CTRL_PARAMS, |
346 | }; |
347 | |
348 | /* builtin linear cost model coefficients */ |
349 | enum { |
350 | I_LCOEF_RBPS, |
351 | I_LCOEF_RSEQIOPS, |
352 | I_LCOEF_RRANDIOPS, |
353 | I_LCOEF_WBPS, |
354 | I_LCOEF_WSEQIOPS, |
355 | I_LCOEF_WRANDIOPS, |
356 | NR_I_LCOEFS, |
357 | }; |
358 | |
359 | enum { |
360 | LCOEF_RPAGE, |
361 | LCOEF_RSEQIO, |
362 | LCOEF_RRANDIO, |
363 | LCOEF_WPAGE, |
364 | LCOEF_WSEQIO, |
365 | LCOEF_WRANDIO, |
366 | NR_LCOEFS, |
367 | }; |
368 | |
369 | enum { |
370 | AUTOP_INVALID, |
371 | AUTOP_HDD, |
372 | AUTOP_SSD_QD1, |
373 | AUTOP_SSD_DFL, |
374 | AUTOP_SSD_FAST, |
375 | }; |
376 | |
377 | struct ioc_params { |
378 | u32 qos[NR_QOS_PARAMS]; |
379 | u64 i_lcoefs[NR_I_LCOEFS]; |
380 | u64 lcoefs[NR_LCOEFS]; |
381 | u32 too_fast_vrate_pct; |
382 | u32 too_slow_vrate_pct; |
383 | }; |
384 | |
385 | struct ioc_margins { |
386 | s64 min; |
387 | s64 low; |
388 | s64 target; |
389 | }; |
390 | |
391 | struct ioc_missed { |
392 | local_t nr_met; |
393 | local_t nr_missed; |
394 | u32 last_met; |
395 | u32 last_missed; |
396 | }; |
397 | |
398 | struct ioc_pcpu_stat { |
399 | struct ioc_missed missed[2]; |
400 | |
401 | local64_t rq_wait_ns; |
402 | u64 last_rq_wait_ns; |
403 | }; |
404 | |
405 | /* per device */ |
406 | struct ioc { |
407 | struct rq_qos rqos; |
408 | |
409 | bool enabled; |
410 | |
411 | struct ioc_params params; |
412 | struct ioc_margins margins; |
413 | u32 period_us; |
414 | u32 timer_slack_ns; |
415 | u64 vrate_min; |
416 | u64 vrate_max; |
417 | |
418 | spinlock_t lock; |
419 | struct timer_list timer; |
420 | struct list_head active_iocgs; /* active cgroups */ |
421 | struct ioc_pcpu_stat __percpu *pcpu_stat; |
422 | |
423 | enum ioc_running running; |
424 | atomic64_t vtime_rate; |
425 | u64 vtime_base_rate; |
426 | s64 vtime_err; |
427 | |
428 | seqcount_spinlock_t period_seqcount; |
429 | u64 period_at; /* wallclock starttime */ |
430 | u64 period_at_vtime; /* vtime starttime */ |
431 | |
432 | atomic64_t cur_period; /* inc'd each period */ |
433 | int busy_level; /* saturation history */ |
434 | |
435 | bool weights_updated; |
436 | atomic_t hweight_gen; /* for lazy hweights */ |
437 | |
438 | /* debt forgivness */ |
439 | u64 dfgv_period_at; |
440 | u64 dfgv_period_rem; |
441 | u64 dfgv_usage_us_sum; |
442 | |
443 | u64 autop_too_fast_at; |
444 | u64 autop_too_slow_at; |
445 | int autop_idx; |
446 | bool user_qos_params:1; |
447 | bool user_cost_model:1; |
448 | }; |
449 | |
450 | struct iocg_pcpu_stat { |
451 | local64_t abs_vusage; |
452 | }; |
453 | |
454 | struct iocg_stat { |
455 | u64 usage_us; |
456 | u64 wait_us; |
457 | u64 indebt_us; |
458 | u64 indelay_us; |
459 | }; |
460 | |
461 | /* per device-cgroup pair */ |
462 | struct ioc_gq { |
463 | struct blkg_policy_data pd; |
464 | struct ioc *ioc; |
465 | |
466 | /* |
467 | * A iocg can get its weight from two sources - an explicit |
468 | * per-device-cgroup configuration or the default weight of the |
469 | * cgroup. `cfg_weight` is the explicit per-device-cgroup |
470 | * configuration. `weight` is the effective considering both |
471 | * sources. |
472 | * |
473 | * When an idle cgroup becomes active its `active` goes from 0 to |
474 | * `weight`. `inuse` is the surplus adjusted active weight. |
475 | * `active` and `inuse` are used to calculate `hweight_active` and |
476 | * `hweight_inuse`. |
477 | * |
478 | * `last_inuse` remembers `inuse` while an iocg is idle to persist |
479 | * surplus adjustments. |
480 | * |
481 | * `inuse` may be adjusted dynamically during period. `saved_*` are used |
482 | * to determine and track adjustments. |
483 | */ |
484 | u32 cfg_weight; |
485 | u32 weight; |
486 | u32 active; |
487 | u32 inuse; |
488 | |
489 | u32 last_inuse; |
490 | s64 saved_margin; |
491 | |
492 | sector_t cursor; /* to detect randio */ |
493 | |
494 | /* |
495 | * `vtime` is this iocg's vtime cursor which progresses as IOs are |
496 | * issued. If lagging behind device vtime, the delta represents |
497 | * the currently available IO budget. If running ahead, the |
498 | * overage. |
499 | * |
500 | * `vtime_done` is the same but progressed on completion rather |
501 | * than issue. The delta behind `vtime` represents the cost of |
502 | * currently in-flight IOs. |
503 | */ |
504 | atomic64_t vtime; |
505 | atomic64_t done_vtime; |
506 | u64 abs_vdebt; |
507 | |
508 | /* current delay in effect and when it started */ |
509 | u64 delay; |
510 | u64 delay_at; |
511 | |
512 | /* |
513 | * The period this iocg was last active in. Used for deactivation |
514 | * and invalidating `vtime`. |
515 | */ |
516 | atomic64_t active_period; |
517 | struct list_head active_list; |
518 | |
519 | /* see __propagate_weights() and current_hweight() for details */ |
520 | u64 child_active_sum; |
521 | u64 child_inuse_sum; |
522 | u64 child_adjusted_sum; |
523 | int hweight_gen; |
524 | u32 hweight_active; |
525 | u32 hweight_inuse; |
526 | u32 hweight_donating; |
527 | u32 hweight_after_donation; |
528 | |
529 | struct list_head walk_list; |
530 | struct list_head surplus_list; |
531 | |
532 | struct wait_queue_head waitq; |
533 | struct hrtimer waitq_timer; |
534 | |
535 | /* timestamp at the latest activation */ |
536 | u64 activated_at; |
537 | |
538 | /* statistics */ |
539 | struct iocg_pcpu_stat __percpu *pcpu_stat; |
540 | struct iocg_stat stat; |
541 | struct iocg_stat last_stat; |
542 | u64 last_stat_abs_vusage; |
543 | u64 usage_delta_us; |
544 | u64 wait_since; |
545 | u64 indebt_since; |
546 | u64 indelay_since; |
547 | |
548 | /* this iocg's depth in the hierarchy and ancestors including self */ |
549 | int level; |
550 | struct ioc_gq *ancestors[]; |
551 | }; |
552 | |
553 | /* per cgroup */ |
554 | struct ioc_cgrp { |
555 | struct blkcg_policy_data cpd; |
556 | unsigned int dfl_weight; |
557 | }; |
558 | |
559 | struct ioc_now { |
560 | u64 now_ns; |
561 | u64 now; |
562 | u64 vnow; |
563 | }; |
564 | |
565 | struct iocg_wait { |
566 | struct wait_queue_entry wait; |
567 | struct bio *bio; |
568 | u64 abs_cost; |
569 | bool committed; |
570 | }; |
571 | |
572 | struct iocg_wake_ctx { |
573 | struct ioc_gq *iocg; |
574 | u32 hw_inuse; |
575 | s64 vbudget; |
576 | }; |
577 | |
578 | static const struct ioc_params autop[] = { |
579 | [AUTOP_HDD] = { |
580 | .qos = { |
581 | [QOS_RLAT] = 250000, /* 250ms */ |
582 | [QOS_WLAT] = 250000, |
583 | [QOS_MIN] = VRATE_MIN_PPM, |
584 | [QOS_MAX] = VRATE_MAX_PPM, |
585 | }, |
586 | .i_lcoefs = { |
587 | [I_LCOEF_RBPS] = 174019176, |
588 | [I_LCOEF_RSEQIOPS] = 41708, |
589 | [I_LCOEF_RRANDIOPS] = 370, |
590 | [I_LCOEF_WBPS] = 178075866, |
591 | [I_LCOEF_WSEQIOPS] = 42705, |
592 | [I_LCOEF_WRANDIOPS] = 378, |
593 | }, |
594 | }, |
595 | [AUTOP_SSD_QD1] = { |
596 | .qos = { |
597 | [QOS_RLAT] = 25000, /* 25ms */ |
598 | [QOS_WLAT] = 25000, |
599 | [QOS_MIN] = VRATE_MIN_PPM, |
600 | [QOS_MAX] = VRATE_MAX_PPM, |
601 | }, |
602 | .i_lcoefs = { |
603 | [I_LCOEF_RBPS] = 245855193, |
604 | [I_LCOEF_RSEQIOPS] = 61575, |
605 | [I_LCOEF_RRANDIOPS] = 6946, |
606 | [I_LCOEF_WBPS] = 141365009, |
607 | [I_LCOEF_WSEQIOPS] = 33716, |
608 | [I_LCOEF_WRANDIOPS] = 26796, |
609 | }, |
610 | }, |
611 | [AUTOP_SSD_DFL] = { |
612 | .qos = { |
613 | [QOS_RLAT] = 25000, /* 25ms */ |
614 | [QOS_WLAT] = 25000, |
615 | [QOS_MIN] = VRATE_MIN_PPM, |
616 | [QOS_MAX] = VRATE_MAX_PPM, |
617 | }, |
618 | .i_lcoefs = { |
619 | [I_LCOEF_RBPS] = 488636629, |
620 | [I_LCOEF_RSEQIOPS] = 8932, |
621 | [I_LCOEF_RRANDIOPS] = 8518, |
622 | [I_LCOEF_WBPS] = 427891549, |
623 | [I_LCOEF_WSEQIOPS] = 28755, |
624 | [I_LCOEF_WRANDIOPS] = 21940, |
625 | }, |
626 | .too_fast_vrate_pct = 500, |
627 | }, |
628 | [AUTOP_SSD_FAST] = { |
629 | .qos = { |
630 | [QOS_RLAT] = 5000, /* 5ms */ |
631 | [QOS_WLAT] = 5000, |
632 | [QOS_MIN] = VRATE_MIN_PPM, |
633 | [QOS_MAX] = VRATE_MAX_PPM, |
634 | }, |
635 | .i_lcoefs = { |
636 | [I_LCOEF_RBPS] = 3102524156LLU, |
637 | [I_LCOEF_RSEQIOPS] = 724816, |
638 | [I_LCOEF_RRANDIOPS] = 778122, |
639 | [I_LCOEF_WBPS] = 1742780862LLU, |
640 | [I_LCOEF_WSEQIOPS] = 425702, |
641 | [I_LCOEF_WRANDIOPS] = 443193, |
642 | }, |
643 | .too_slow_vrate_pct = 10, |
644 | }, |
645 | }; |
646 | |
647 | /* |
648 | * vrate adjust percentages indexed by ioc->busy_level. We adjust up on |
649 | * vtime credit shortage and down on device saturation. |
650 | */ |
651 | static u32 vrate_adj_pct[] = |
652 | { 0, 0, 0, 0, |
653 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, |
654 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
655 | 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 }; |
656 | |
657 | static struct blkcg_policy blkcg_policy_iocost; |
658 | |
659 | /* accessors and helpers */ |
660 | static struct ioc *rqos_to_ioc(struct rq_qos *rqos) |
661 | { |
662 | return container_of(rqos, struct ioc, rqos); |
663 | } |
664 | |
665 | static struct ioc *q_to_ioc(struct request_queue *q) |
666 | { |
667 | return rqos_to_ioc(rqos: rq_qos_id(q, id: RQ_QOS_COST)); |
668 | } |
669 | |
670 | static const char __maybe_unused *ioc_name(struct ioc *ioc) |
671 | { |
672 | struct gendisk *disk = ioc->rqos.disk; |
673 | |
674 | if (!disk) |
675 | return "<unknown>" ; |
676 | return disk->disk_name; |
677 | } |
678 | |
679 | static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd) |
680 | { |
681 | return pd ? container_of(pd, struct ioc_gq, pd) : NULL; |
682 | } |
683 | |
684 | static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg) |
685 | { |
686 | return pd_to_iocg(pd: blkg_to_pd(blkg, pol: &blkcg_policy_iocost)); |
687 | } |
688 | |
689 | static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg) |
690 | { |
691 | return pd_to_blkg(pd: &iocg->pd); |
692 | } |
693 | |
694 | static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg) |
695 | { |
696 | return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost), |
697 | struct ioc_cgrp, cpd); |
698 | } |
699 | |
700 | /* |
701 | * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical |
702 | * weight, the more expensive each IO. Must round up. |
703 | */ |
704 | static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse) |
705 | { |
706 | return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse); |
707 | } |
708 | |
709 | /* |
710 | * The inverse of abs_cost_to_cost(). Must round up. |
711 | */ |
712 | static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse) |
713 | { |
714 | return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE); |
715 | } |
716 | |
717 | static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, |
718 | u64 abs_cost, u64 cost) |
719 | { |
720 | struct iocg_pcpu_stat *gcs; |
721 | |
722 | bio->bi_iocost_cost = cost; |
723 | atomic64_add(i: cost, v: &iocg->vtime); |
724 | |
725 | gcs = get_cpu_ptr(iocg->pcpu_stat); |
726 | local64_add(abs_cost, &gcs->abs_vusage); |
727 | put_cpu_ptr(gcs); |
728 | } |
729 | |
730 | static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags) |
731 | { |
732 | if (lock_ioc) { |
733 | spin_lock_irqsave(&iocg->ioc->lock, *flags); |
734 | spin_lock(lock: &iocg->waitq.lock); |
735 | } else { |
736 | spin_lock_irqsave(&iocg->waitq.lock, *flags); |
737 | } |
738 | } |
739 | |
740 | static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags) |
741 | { |
742 | if (unlock_ioc) { |
743 | spin_unlock(lock: &iocg->waitq.lock); |
744 | spin_unlock_irqrestore(lock: &iocg->ioc->lock, flags: *flags); |
745 | } else { |
746 | spin_unlock_irqrestore(lock: &iocg->waitq.lock, flags: *flags); |
747 | } |
748 | } |
749 | |
750 | #define CREATE_TRACE_POINTS |
751 | #include <trace/events/iocost.h> |
752 | |
753 | static void ioc_refresh_margins(struct ioc *ioc) |
754 | { |
755 | struct ioc_margins *margins = &ioc->margins; |
756 | u32 period_us = ioc->period_us; |
757 | u64 vrate = ioc->vtime_base_rate; |
758 | |
759 | margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate; |
760 | margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate; |
761 | margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate; |
762 | } |
763 | |
764 | /* latency Qos params changed, update period_us and all the dependent params */ |
765 | static void ioc_refresh_period_us(struct ioc *ioc) |
766 | { |
767 | u32 ppm, lat, multi, period_us; |
768 | |
769 | lockdep_assert_held(&ioc->lock); |
770 | |
771 | /* pick the higher latency target */ |
772 | if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) { |
773 | ppm = ioc->params.qos[QOS_RPPM]; |
774 | lat = ioc->params.qos[QOS_RLAT]; |
775 | } else { |
776 | ppm = ioc->params.qos[QOS_WPPM]; |
777 | lat = ioc->params.qos[QOS_WLAT]; |
778 | } |
779 | |
780 | /* |
781 | * We want the period to be long enough to contain a healthy number |
782 | * of IOs while short enough for granular control. Define it as a |
783 | * multiple of the latency target. Ideally, the multiplier should |
784 | * be scaled according to the percentile so that it would nominally |
785 | * contain a certain number of requests. Let's be simpler and |
786 | * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50). |
787 | */ |
788 | if (ppm) |
789 | multi = max_t(u32, (MILLION - ppm) / 50000, 2); |
790 | else |
791 | multi = 2; |
792 | period_us = multi * lat; |
793 | period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD); |
794 | |
795 | /* calculate dependent params */ |
796 | ioc->period_us = period_us; |
797 | ioc->timer_slack_ns = div64_u64( |
798 | dividend: (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT, |
799 | divisor: 100); |
800 | ioc_refresh_margins(ioc); |
801 | } |
802 | |
803 | /* |
804 | * ioc->rqos.disk isn't initialized when this function is called from |
805 | * the init path. |
806 | */ |
807 | static int ioc_autop_idx(struct ioc *ioc, struct gendisk *disk) |
808 | { |
809 | int idx = ioc->autop_idx; |
810 | const struct ioc_params *p = &autop[idx]; |
811 | u32 vrate_pct; |
812 | u64 now_ns; |
813 | |
814 | /* rotational? */ |
815 | if (!blk_queue_nonrot(disk->queue)) |
816 | return AUTOP_HDD; |
817 | |
818 | /* handle SATA SSDs w/ broken NCQ */ |
819 | if (blk_queue_depth(q: disk->queue) == 1) |
820 | return AUTOP_SSD_QD1; |
821 | |
822 | /* use one of the normal ssd sets */ |
823 | if (idx < AUTOP_SSD_DFL) |
824 | return AUTOP_SSD_DFL; |
825 | |
826 | /* if user is overriding anything, maintain what was there */ |
827 | if (ioc->user_qos_params || ioc->user_cost_model) |
828 | return idx; |
829 | |
830 | /* step up/down based on the vrate */ |
831 | vrate_pct = div64_u64(dividend: ioc->vtime_base_rate * 100, divisor: VTIME_PER_USEC); |
832 | now_ns = blk_time_get_ns(); |
833 | |
834 | if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) { |
835 | if (!ioc->autop_too_fast_at) |
836 | ioc->autop_too_fast_at = now_ns; |
837 | if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC) |
838 | return idx + 1; |
839 | } else { |
840 | ioc->autop_too_fast_at = 0; |
841 | } |
842 | |
843 | if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) { |
844 | if (!ioc->autop_too_slow_at) |
845 | ioc->autop_too_slow_at = now_ns; |
846 | if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC) |
847 | return idx - 1; |
848 | } else { |
849 | ioc->autop_too_slow_at = 0; |
850 | } |
851 | |
852 | return idx; |
853 | } |
854 | |
855 | /* |
856 | * Take the followings as input |
857 | * |
858 | * @bps maximum sequential throughput |
859 | * @seqiops maximum sequential 4k iops |
860 | * @randiops maximum random 4k iops |
861 | * |
862 | * and calculate the linear model cost coefficients. |
863 | * |
864 | * *@page per-page cost 1s / (@bps / 4096) |
865 | * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0) |
866 | * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0) |
867 | */ |
868 | static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops, |
869 | u64 *page, u64 *seqio, u64 *randio) |
870 | { |
871 | u64 v; |
872 | |
873 | *page = *seqio = *randio = 0; |
874 | |
875 | if (bps) { |
876 | u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE); |
877 | |
878 | if (bps_pages) |
879 | *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages); |
880 | else |
881 | *page = 1; |
882 | } |
883 | |
884 | if (seqiops) { |
885 | v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops); |
886 | if (v > *page) |
887 | *seqio = v - *page; |
888 | } |
889 | |
890 | if (randiops) { |
891 | v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops); |
892 | if (v > *page) |
893 | *randio = v - *page; |
894 | } |
895 | } |
896 | |
897 | static void ioc_refresh_lcoefs(struct ioc *ioc) |
898 | { |
899 | u64 *u = ioc->params.i_lcoefs; |
900 | u64 *c = ioc->params.lcoefs; |
901 | |
902 | calc_lcoefs(bps: u[I_LCOEF_RBPS], seqiops: u[I_LCOEF_RSEQIOPS], randiops: u[I_LCOEF_RRANDIOPS], |
903 | page: &c[LCOEF_RPAGE], seqio: &c[LCOEF_RSEQIO], randio: &c[LCOEF_RRANDIO]); |
904 | calc_lcoefs(bps: u[I_LCOEF_WBPS], seqiops: u[I_LCOEF_WSEQIOPS], randiops: u[I_LCOEF_WRANDIOPS], |
905 | page: &c[LCOEF_WPAGE], seqio: &c[LCOEF_WSEQIO], randio: &c[LCOEF_WRANDIO]); |
906 | } |
907 | |
908 | /* |
909 | * struct gendisk is required as an argument because ioc->rqos.disk |
910 | * is not properly initialized when called from the init path. |
911 | */ |
912 | static bool ioc_refresh_params_disk(struct ioc *ioc, bool force, |
913 | struct gendisk *disk) |
914 | { |
915 | const struct ioc_params *p; |
916 | int idx; |
917 | |
918 | lockdep_assert_held(&ioc->lock); |
919 | |
920 | idx = ioc_autop_idx(ioc, disk); |
921 | p = &autop[idx]; |
922 | |
923 | if (idx == ioc->autop_idx && !force) |
924 | return false; |
925 | |
926 | if (idx != ioc->autop_idx) { |
927 | atomic64_set(v: &ioc->vtime_rate, i: VTIME_PER_USEC); |
928 | ioc->vtime_base_rate = VTIME_PER_USEC; |
929 | } |
930 | |
931 | ioc->autop_idx = idx; |
932 | ioc->autop_too_fast_at = 0; |
933 | ioc->autop_too_slow_at = 0; |
934 | |
935 | if (!ioc->user_qos_params) |
936 | memcpy(ioc->params.qos, p->qos, sizeof(p->qos)); |
937 | if (!ioc->user_cost_model) |
938 | memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs)); |
939 | |
940 | ioc_refresh_period_us(ioc); |
941 | ioc_refresh_lcoefs(ioc); |
942 | |
943 | ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] * |
944 | VTIME_PER_USEC, MILLION); |
945 | ioc->vrate_max = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MAX] * |
946 | VTIME_PER_USEC, MILLION); |
947 | |
948 | return true; |
949 | } |
950 | |
951 | static bool ioc_refresh_params(struct ioc *ioc, bool force) |
952 | { |
953 | return ioc_refresh_params_disk(ioc, force, disk: ioc->rqos.disk); |
954 | } |
955 | |
956 | /* |
957 | * When an iocg accumulates too much vtime or gets deactivated, we throw away |
958 | * some vtime, which lowers the overall device utilization. As the exact amount |
959 | * which is being thrown away is known, we can compensate by accelerating the |
960 | * vrate accordingly so that the extra vtime generated in the current period |
961 | * matches what got lost. |
962 | */ |
963 | static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now) |
964 | { |
965 | s64 pleft = ioc->period_at + ioc->period_us - now->now; |
966 | s64 vperiod = ioc->period_us * ioc->vtime_base_rate; |
967 | s64 vcomp, vcomp_min, vcomp_max; |
968 | |
969 | lockdep_assert_held(&ioc->lock); |
970 | |
971 | /* we need some time left in this period */ |
972 | if (pleft <= 0) |
973 | goto done; |
974 | |
975 | /* |
976 | * Calculate how much vrate should be adjusted to offset the error. |
977 | * Limit the amount of adjustment and deduct the adjusted amount from |
978 | * the error. |
979 | */ |
980 | vcomp = -div64_s64(dividend: ioc->vtime_err, divisor: pleft); |
981 | vcomp_min = -(ioc->vtime_base_rate >> 1); |
982 | vcomp_max = ioc->vtime_base_rate; |
983 | vcomp = clamp(vcomp, vcomp_min, vcomp_max); |
984 | |
985 | ioc->vtime_err += vcomp * pleft; |
986 | |
987 | atomic64_set(v: &ioc->vtime_rate, i: ioc->vtime_base_rate + vcomp); |
988 | done: |
989 | /* bound how much error can accumulate */ |
990 | ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod); |
991 | } |
992 | |
993 | static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct, |
994 | int nr_lagging, int nr_shortages, |
995 | int prev_busy_level, u32 *missed_ppm) |
996 | { |
997 | u64 vrate = ioc->vtime_base_rate; |
998 | u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max; |
999 | |
1000 | if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) { |
1001 | if (ioc->busy_level != prev_busy_level || nr_lagging) |
1002 | trace_iocost_ioc_vrate_adj(ioc, new_vrate: vrate, |
1003 | missed_ppm, rq_wait_pct, |
1004 | nr_lagging, nr_shortages); |
1005 | |
1006 | return; |
1007 | } |
1008 | |
1009 | /* |
1010 | * If vrate is out of bounds, apply clamp gradually as the |
1011 | * bounds can change abruptly. Otherwise, apply busy_level |
1012 | * based adjustment. |
1013 | */ |
1014 | if (vrate < vrate_min) { |
1015 | vrate = div64_u64(dividend: vrate * (100 + VRATE_CLAMP_ADJ_PCT), divisor: 100); |
1016 | vrate = min(vrate, vrate_min); |
1017 | } else if (vrate > vrate_max) { |
1018 | vrate = div64_u64(dividend: vrate * (100 - VRATE_CLAMP_ADJ_PCT), divisor: 100); |
1019 | vrate = max(vrate, vrate_max); |
1020 | } else { |
1021 | int idx = min_t(int, abs(ioc->busy_level), |
1022 | ARRAY_SIZE(vrate_adj_pct) - 1); |
1023 | u32 adj_pct = vrate_adj_pct[idx]; |
1024 | |
1025 | if (ioc->busy_level > 0) |
1026 | adj_pct = 100 - adj_pct; |
1027 | else |
1028 | adj_pct = 100 + adj_pct; |
1029 | |
1030 | vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100), |
1031 | vrate_min, vrate_max); |
1032 | } |
1033 | |
1034 | trace_iocost_ioc_vrate_adj(ioc, new_vrate: vrate, missed_ppm, rq_wait_pct, |
1035 | nr_lagging, nr_shortages); |
1036 | |
1037 | ioc->vtime_base_rate = vrate; |
1038 | ioc_refresh_margins(ioc); |
1039 | } |
1040 | |
1041 | /* take a snapshot of the current [v]time and vrate */ |
1042 | static void ioc_now(struct ioc *ioc, struct ioc_now *now) |
1043 | { |
1044 | unsigned seq; |
1045 | u64 vrate; |
1046 | |
1047 | now->now_ns = blk_time_get_ns(); |
1048 | now->now = ktime_to_us(kt: now->now_ns); |
1049 | vrate = atomic64_read(v: &ioc->vtime_rate); |
1050 | |
1051 | /* |
1052 | * The current vtime is |
1053 | * |
1054 | * vtime at period start + (wallclock time since the start) * vrate |
1055 | * |
1056 | * As a consistent snapshot of `period_at_vtime` and `period_at` is |
1057 | * needed, they're seqcount protected. |
1058 | */ |
1059 | do { |
1060 | seq = read_seqcount_begin(&ioc->period_seqcount); |
1061 | now->vnow = ioc->period_at_vtime + |
1062 | (now->now - ioc->period_at) * vrate; |
1063 | } while (read_seqcount_retry(&ioc->period_seqcount, seq)); |
1064 | } |
1065 | |
1066 | static void ioc_start_period(struct ioc *ioc, struct ioc_now *now) |
1067 | { |
1068 | WARN_ON_ONCE(ioc->running != IOC_RUNNING); |
1069 | |
1070 | write_seqcount_begin(&ioc->period_seqcount); |
1071 | ioc->period_at = now->now; |
1072 | ioc->period_at_vtime = now->vnow; |
1073 | write_seqcount_end(&ioc->period_seqcount); |
1074 | |
1075 | ioc->timer.expires = jiffies + usecs_to_jiffies(u: ioc->period_us); |
1076 | add_timer(timer: &ioc->timer); |
1077 | } |
1078 | |
1079 | /* |
1080 | * Update @iocg's `active` and `inuse` to @active and @inuse, update level |
1081 | * weight sums and propagate upwards accordingly. If @save, the current margin |
1082 | * is saved to be used as reference for later inuse in-period adjustments. |
1083 | */ |
1084 | static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse, |
1085 | bool save, struct ioc_now *now) |
1086 | { |
1087 | struct ioc *ioc = iocg->ioc; |
1088 | int lvl; |
1089 | |
1090 | lockdep_assert_held(&ioc->lock); |
1091 | |
1092 | /* |
1093 | * For an active leaf node, its inuse shouldn't be zero or exceed |
1094 | * @active. An active internal node's inuse is solely determined by the |
1095 | * inuse to active ratio of its children regardless of @inuse. |
1096 | */ |
1097 | if (list_empty(head: &iocg->active_list) && iocg->child_active_sum) { |
1098 | inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum, |
1099 | iocg->child_active_sum); |
1100 | } else { |
1101 | inuse = clamp_t(u32, inuse, 1, active); |
1102 | } |
1103 | |
1104 | iocg->last_inuse = iocg->inuse; |
1105 | if (save) |
1106 | iocg->saved_margin = now->vnow - atomic64_read(v: &iocg->vtime); |
1107 | |
1108 | if (active == iocg->active && inuse == iocg->inuse) |
1109 | return; |
1110 | |
1111 | for (lvl = iocg->level - 1; lvl >= 0; lvl--) { |
1112 | struct ioc_gq *parent = iocg->ancestors[lvl]; |
1113 | struct ioc_gq *child = iocg->ancestors[lvl + 1]; |
1114 | u32 parent_active = 0, parent_inuse = 0; |
1115 | |
1116 | /* update the level sums */ |
1117 | parent->child_active_sum += (s32)(active - child->active); |
1118 | parent->child_inuse_sum += (s32)(inuse - child->inuse); |
1119 | /* apply the updates */ |
1120 | child->active = active; |
1121 | child->inuse = inuse; |
1122 | |
1123 | /* |
1124 | * The delta between inuse and active sums indicates that |
1125 | * much of weight is being given away. Parent's inuse |
1126 | * and active should reflect the ratio. |
1127 | */ |
1128 | if (parent->child_active_sum) { |
1129 | parent_active = parent->weight; |
1130 | parent_inuse = DIV64_U64_ROUND_UP( |
1131 | parent_active * parent->child_inuse_sum, |
1132 | parent->child_active_sum); |
1133 | } |
1134 | |
1135 | /* do we need to keep walking up? */ |
1136 | if (parent_active == parent->active && |
1137 | parent_inuse == parent->inuse) |
1138 | break; |
1139 | |
1140 | active = parent_active; |
1141 | inuse = parent_inuse; |
1142 | } |
1143 | |
1144 | ioc->weights_updated = true; |
1145 | } |
1146 | |
1147 | static void commit_weights(struct ioc *ioc) |
1148 | { |
1149 | lockdep_assert_held(&ioc->lock); |
1150 | |
1151 | if (ioc->weights_updated) { |
1152 | /* paired with rmb in current_hweight(), see there */ |
1153 | smp_wmb(); |
1154 | atomic_inc(v: &ioc->hweight_gen); |
1155 | ioc->weights_updated = false; |
1156 | } |
1157 | } |
1158 | |
1159 | static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse, |
1160 | bool save, struct ioc_now *now) |
1161 | { |
1162 | __propagate_weights(iocg, active, inuse, save, now); |
1163 | commit_weights(ioc: iocg->ioc); |
1164 | } |
1165 | |
1166 | static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep) |
1167 | { |
1168 | struct ioc *ioc = iocg->ioc; |
1169 | int lvl; |
1170 | u32 hwa, hwi; |
1171 | int ioc_gen; |
1172 | |
1173 | /* hot path - if uptodate, use cached */ |
1174 | ioc_gen = atomic_read(v: &ioc->hweight_gen); |
1175 | if (ioc_gen == iocg->hweight_gen) |
1176 | goto out; |
1177 | |
1178 | /* |
1179 | * Paired with wmb in commit_weights(). If we saw the updated |
1180 | * hweight_gen, all the weight updates from __propagate_weights() are |
1181 | * visible too. |
1182 | * |
1183 | * We can race with weight updates during calculation and get it |
1184 | * wrong. However, hweight_gen would have changed and a future |
1185 | * reader will recalculate and we're guaranteed to discard the |
1186 | * wrong result soon. |
1187 | */ |
1188 | smp_rmb(); |
1189 | |
1190 | hwa = hwi = WEIGHT_ONE; |
1191 | for (lvl = 0; lvl <= iocg->level - 1; lvl++) { |
1192 | struct ioc_gq *parent = iocg->ancestors[lvl]; |
1193 | struct ioc_gq *child = iocg->ancestors[lvl + 1]; |
1194 | u64 active_sum = READ_ONCE(parent->child_active_sum); |
1195 | u64 inuse_sum = READ_ONCE(parent->child_inuse_sum); |
1196 | u32 active = READ_ONCE(child->active); |
1197 | u32 inuse = READ_ONCE(child->inuse); |
1198 | |
1199 | /* we can race with deactivations and either may read as zero */ |
1200 | if (!active_sum || !inuse_sum) |
1201 | continue; |
1202 | |
1203 | active_sum = max_t(u64, active, active_sum); |
1204 | hwa = div64_u64(dividend: (u64)hwa * active, divisor: active_sum); |
1205 | |
1206 | inuse_sum = max_t(u64, inuse, inuse_sum); |
1207 | hwi = div64_u64(dividend: (u64)hwi * inuse, divisor: inuse_sum); |
1208 | } |
1209 | |
1210 | iocg->hweight_active = max_t(u32, hwa, 1); |
1211 | iocg->hweight_inuse = max_t(u32, hwi, 1); |
1212 | iocg->hweight_gen = ioc_gen; |
1213 | out: |
1214 | if (hw_activep) |
1215 | *hw_activep = iocg->hweight_active; |
1216 | if (hw_inusep) |
1217 | *hw_inusep = iocg->hweight_inuse; |
1218 | } |
1219 | |
1220 | /* |
1221 | * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the |
1222 | * other weights stay unchanged. |
1223 | */ |
1224 | static u32 current_hweight_max(struct ioc_gq *iocg) |
1225 | { |
1226 | u32 hwm = WEIGHT_ONE; |
1227 | u32 inuse = iocg->active; |
1228 | u64 child_inuse_sum; |
1229 | int lvl; |
1230 | |
1231 | lockdep_assert_held(&iocg->ioc->lock); |
1232 | |
1233 | for (lvl = iocg->level - 1; lvl >= 0; lvl--) { |
1234 | struct ioc_gq *parent = iocg->ancestors[lvl]; |
1235 | struct ioc_gq *child = iocg->ancestors[lvl + 1]; |
1236 | |
1237 | child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse; |
1238 | hwm = div64_u64(dividend: (u64)hwm * inuse, divisor: child_inuse_sum); |
1239 | inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum, |
1240 | parent->child_active_sum); |
1241 | } |
1242 | |
1243 | return max_t(u32, hwm, 1); |
1244 | } |
1245 | |
1246 | static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now) |
1247 | { |
1248 | struct ioc *ioc = iocg->ioc; |
1249 | struct blkcg_gq *blkg = iocg_to_blkg(iocg); |
1250 | struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg: blkg->blkcg); |
1251 | u32 weight; |
1252 | |
1253 | lockdep_assert_held(&ioc->lock); |
1254 | |
1255 | weight = iocg->cfg_weight ?: iocc->dfl_weight; |
1256 | if (weight != iocg->weight && iocg->active) |
1257 | propagate_weights(iocg, active: weight, inuse: iocg->inuse, save: true, now); |
1258 | iocg->weight = weight; |
1259 | } |
1260 | |
1261 | static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) |
1262 | { |
1263 | struct ioc *ioc = iocg->ioc; |
1264 | u64 __maybe_unused last_period, cur_period; |
1265 | u64 vtime, vtarget; |
1266 | int i; |
1267 | |
1268 | /* |
1269 | * If seem to be already active, just update the stamp to tell the |
1270 | * timer that we're still active. We don't mind occassional races. |
1271 | */ |
1272 | if (!list_empty(head: &iocg->active_list)) { |
1273 | ioc_now(ioc, now); |
1274 | cur_period = atomic64_read(v: &ioc->cur_period); |
1275 | if (atomic64_read(v: &iocg->active_period) != cur_period) |
1276 | atomic64_set(v: &iocg->active_period, i: cur_period); |
1277 | return true; |
1278 | } |
1279 | |
1280 | /* racy check on internal node IOs, treat as root level IOs */ |
1281 | if (iocg->child_active_sum) |
1282 | return false; |
1283 | |
1284 | spin_lock_irq(lock: &ioc->lock); |
1285 | |
1286 | ioc_now(ioc, now); |
1287 | |
1288 | /* update period */ |
1289 | cur_period = atomic64_read(v: &ioc->cur_period); |
1290 | last_period = atomic64_read(v: &iocg->active_period); |
1291 | atomic64_set(v: &iocg->active_period, i: cur_period); |
1292 | |
1293 | /* already activated or breaking leaf-only constraint? */ |
1294 | if (!list_empty(head: &iocg->active_list)) |
1295 | goto succeed_unlock; |
1296 | for (i = iocg->level - 1; i > 0; i--) |
1297 | if (!list_empty(head: &iocg->ancestors[i]->active_list)) |
1298 | goto fail_unlock; |
1299 | |
1300 | if (iocg->child_active_sum) |
1301 | goto fail_unlock; |
1302 | |
1303 | /* |
1304 | * Always start with the target budget. On deactivation, we throw away |
1305 | * anything above it. |
1306 | */ |
1307 | vtarget = now->vnow - ioc->margins.target; |
1308 | vtime = atomic64_read(v: &iocg->vtime); |
1309 | |
1310 | atomic64_add(i: vtarget - vtime, v: &iocg->vtime); |
1311 | atomic64_add(i: vtarget - vtime, v: &iocg->done_vtime); |
1312 | vtime = vtarget; |
1313 | |
1314 | /* |
1315 | * Activate, propagate weight and start period timer if not |
1316 | * running. Reset hweight_gen to avoid accidental match from |
1317 | * wrapping. |
1318 | */ |
1319 | iocg->hweight_gen = atomic_read(v: &ioc->hweight_gen) - 1; |
1320 | list_add(new: &iocg->active_list, head: &ioc->active_iocgs); |
1321 | |
1322 | propagate_weights(iocg, active: iocg->weight, |
1323 | inuse: iocg->last_inuse ?: iocg->weight, save: true, now); |
1324 | |
1325 | TRACE_IOCG_PATH(iocg_activate, iocg, now, |
1326 | last_period, cur_period, vtime); |
1327 | |
1328 | iocg->activated_at = now->now; |
1329 | |
1330 | if (ioc->running == IOC_IDLE) { |
1331 | ioc->running = IOC_RUNNING; |
1332 | ioc->dfgv_period_at = now->now; |
1333 | ioc->dfgv_period_rem = 0; |
1334 | ioc_start_period(ioc, now); |
1335 | } |
1336 | |
1337 | succeed_unlock: |
1338 | spin_unlock_irq(lock: &ioc->lock); |
1339 | return true; |
1340 | |
1341 | fail_unlock: |
1342 | spin_unlock_irq(lock: &ioc->lock); |
1343 | return false; |
1344 | } |
1345 | |
1346 | static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) |
1347 | { |
1348 | struct ioc *ioc = iocg->ioc; |
1349 | struct blkcg_gq *blkg = iocg_to_blkg(iocg); |
1350 | u64 tdelta, delay, new_delay, shift; |
1351 | s64 vover, vover_pct; |
1352 | u32 hwa; |
1353 | |
1354 | lockdep_assert_held(&iocg->waitq.lock); |
1355 | |
1356 | /* |
1357 | * If the delay is set by another CPU, we may be in the past. No need to |
1358 | * change anything if so. This avoids decay calculation underflow. |
1359 | */ |
1360 | if (time_before64(now->now, iocg->delay_at)) |
1361 | return false; |
1362 | |
1363 | /* calculate the current delay in effect - 1/2 every second */ |
1364 | tdelta = now->now - iocg->delay_at; |
1365 | shift = div64_u64(dividend: tdelta, USEC_PER_SEC); |
1366 | if (iocg->delay && shift < BITS_PER_LONG) |
1367 | delay = iocg->delay >> shift; |
1368 | else |
1369 | delay = 0; |
1370 | |
1371 | /* calculate the new delay from the debt amount */ |
1372 | current_hweight(iocg, hw_activep: &hwa, NULL); |
1373 | vover = atomic64_read(v: &iocg->vtime) + |
1374 | abs_cost_to_cost(abs_cost: iocg->abs_vdebt, hw_inuse: hwa) - now->vnow; |
1375 | vover_pct = div64_s64(dividend: 100 * vover, |
1376 | divisor: ioc->period_us * ioc->vtime_base_rate); |
1377 | |
1378 | if (vover_pct <= MIN_DELAY_THR_PCT) |
1379 | new_delay = 0; |
1380 | else if (vover_pct >= MAX_DELAY_THR_PCT) |
1381 | new_delay = MAX_DELAY; |
1382 | else |
1383 | new_delay = MIN_DELAY + |
1384 | div_u64(dividend: (MAX_DELAY - MIN_DELAY) * |
1385 | (vover_pct - MIN_DELAY_THR_PCT), |
1386 | divisor: MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT); |
1387 | |
1388 | /* pick the higher one and apply */ |
1389 | if (new_delay > delay) { |
1390 | iocg->delay = new_delay; |
1391 | iocg->delay_at = now->now; |
1392 | delay = new_delay; |
1393 | } |
1394 | |
1395 | if (delay >= MIN_DELAY) { |
1396 | if (!iocg->indelay_since) |
1397 | iocg->indelay_since = now->now; |
1398 | blkcg_set_delay(blkg, delay: delay * NSEC_PER_USEC); |
1399 | return true; |
1400 | } else { |
1401 | if (iocg->indelay_since) { |
1402 | iocg->stat.indelay_us += now->now - iocg->indelay_since; |
1403 | iocg->indelay_since = 0; |
1404 | } |
1405 | iocg->delay = 0; |
1406 | blkcg_clear_delay(blkg); |
1407 | return false; |
1408 | } |
1409 | } |
1410 | |
1411 | static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost, |
1412 | struct ioc_now *now) |
1413 | { |
1414 | struct iocg_pcpu_stat *gcs; |
1415 | |
1416 | lockdep_assert_held(&iocg->ioc->lock); |
1417 | lockdep_assert_held(&iocg->waitq.lock); |
1418 | WARN_ON_ONCE(list_empty(&iocg->active_list)); |
1419 | |
1420 | /* |
1421 | * Once in debt, debt handling owns inuse. @iocg stays at the minimum |
1422 | * inuse donating all of it share to others until its debt is paid off. |
1423 | */ |
1424 | if (!iocg->abs_vdebt && abs_cost) { |
1425 | iocg->indebt_since = now->now; |
1426 | propagate_weights(iocg, active: iocg->active, inuse: 0, save: false, now); |
1427 | } |
1428 | |
1429 | iocg->abs_vdebt += abs_cost; |
1430 | |
1431 | gcs = get_cpu_ptr(iocg->pcpu_stat); |
1432 | local64_add(abs_cost, &gcs->abs_vusage); |
1433 | put_cpu_ptr(gcs); |
1434 | } |
1435 | |
1436 | static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay, |
1437 | struct ioc_now *now) |
1438 | { |
1439 | lockdep_assert_held(&iocg->ioc->lock); |
1440 | lockdep_assert_held(&iocg->waitq.lock); |
1441 | |
1442 | /* |
1443 | * make sure that nobody messed with @iocg. Check iocg->pd.online |
1444 | * to avoid warn when removing blkcg or disk. |
1445 | */ |
1446 | WARN_ON_ONCE(list_empty(&iocg->active_list) && iocg->pd.online); |
1447 | WARN_ON_ONCE(iocg->inuse > 1); |
1448 | |
1449 | iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt); |
1450 | |
1451 | /* if debt is paid in full, restore inuse */ |
1452 | if (!iocg->abs_vdebt) { |
1453 | iocg->stat.indebt_us += now->now - iocg->indebt_since; |
1454 | iocg->indebt_since = 0; |
1455 | |
1456 | propagate_weights(iocg, active: iocg->active, inuse: iocg->last_inuse, |
1457 | save: false, now); |
1458 | } |
1459 | } |
1460 | |
1461 | static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode, |
1462 | int flags, void *key) |
1463 | { |
1464 | struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait); |
1465 | struct iocg_wake_ctx *ctx = key; |
1466 | u64 cost = abs_cost_to_cost(abs_cost: wait->abs_cost, hw_inuse: ctx->hw_inuse); |
1467 | |
1468 | ctx->vbudget -= cost; |
1469 | |
1470 | if (ctx->vbudget < 0) |
1471 | return -1; |
1472 | |
1473 | iocg_commit_bio(iocg: ctx->iocg, bio: wait->bio, abs_cost: wait->abs_cost, cost); |
1474 | wait->committed = true; |
1475 | |
1476 | /* |
1477 | * autoremove_wake_function() removes the wait entry only when it |
1478 | * actually changed the task state. We want the wait always removed. |
1479 | * Remove explicitly and use default_wake_function(). Note that the |
1480 | * order of operations is important as finish_wait() tests whether |
1481 | * @wq_entry is removed without grabbing the lock. |
1482 | */ |
1483 | default_wake_function(wq_entry, mode, flags, key); |
1484 | list_del_init_careful(entry: &wq_entry->entry); |
1485 | return 0; |
1486 | } |
1487 | |
1488 | /* |
1489 | * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters |
1490 | * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in |
1491 | * addition to iocg->waitq.lock. |
1492 | */ |
1493 | static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt, |
1494 | struct ioc_now *now) |
1495 | { |
1496 | struct ioc *ioc = iocg->ioc; |
1497 | struct iocg_wake_ctx ctx = { .iocg = iocg }; |
1498 | u64 vshortage, expires, oexpires; |
1499 | s64 vbudget; |
1500 | u32 hwa; |
1501 | |
1502 | lockdep_assert_held(&iocg->waitq.lock); |
1503 | |
1504 | current_hweight(iocg, hw_activep: &hwa, NULL); |
1505 | vbudget = now->vnow - atomic64_read(v: &iocg->vtime); |
1506 | |
1507 | /* pay off debt */ |
1508 | if (pay_debt && iocg->abs_vdebt && vbudget > 0) { |
1509 | u64 abs_vbudget = cost_to_abs_cost(cost: vbudget, hw_inuse: hwa); |
1510 | u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt); |
1511 | u64 vpay = abs_cost_to_cost(abs_cost: abs_vpay, hw_inuse: hwa); |
1512 | |
1513 | lockdep_assert_held(&ioc->lock); |
1514 | |
1515 | atomic64_add(i: vpay, v: &iocg->vtime); |
1516 | atomic64_add(i: vpay, v: &iocg->done_vtime); |
1517 | iocg_pay_debt(iocg, abs_vpay, now); |
1518 | vbudget -= vpay; |
1519 | } |
1520 | |
1521 | if (iocg->abs_vdebt || iocg->delay) |
1522 | iocg_kick_delay(iocg, now); |
1523 | |
1524 | /* |
1525 | * Debt can still be outstanding if we haven't paid all yet or the |
1526 | * caller raced and called without @pay_debt. Shouldn't wake up waiters |
1527 | * under debt. Make sure @vbudget reflects the outstanding amount and is |
1528 | * not positive. |
1529 | */ |
1530 | if (iocg->abs_vdebt) { |
1531 | s64 vdebt = abs_cost_to_cost(abs_cost: iocg->abs_vdebt, hw_inuse: hwa); |
1532 | vbudget = min_t(s64, 0, vbudget - vdebt); |
1533 | } |
1534 | |
1535 | /* |
1536 | * Wake up the ones which are due and see how much vtime we'll need for |
1537 | * the next one. As paying off debt restores hw_inuse, it must be read |
1538 | * after the above debt payment. |
1539 | */ |
1540 | ctx.vbudget = vbudget; |
1541 | current_hweight(iocg, NULL, hw_inusep: &ctx.hw_inuse); |
1542 | |
1543 | __wake_up_locked_key(wq_head: &iocg->waitq, TASK_NORMAL, key: &ctx); |
1544 | |
1545 | if (!waitqueue_active(wq_head: &iocg->waitq)) { |
1546 | if (iocg->wait_since) { |
1547 | iocg->stat.wait_us += now->now - iocg->wait_since; |
1548 | iocg->wait_since = 0; |
1549 | } |
1550 | return; |
1551 | } |
1552 | |
1553 | if (!iocg->wait_since) |
1554 | iocg->wait_since = now->now; |
1555 | |
1556 | if (WARN_ON_ONCE(ctx.vbudget >= 0)) |
1557 | return; |
1558 | |
1559 | /* determine next wakeup, add a timer margin to guarantee chunking */ |
1560 | vshortage = -ctx.vbudget; |
1561 | expires = now->now_ns + |
1562 | DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) * |
1563 | NSEC_PER_USEC; |
1564 | expires += ioc->timer_slack_ns; |
1565 | |
1566 | /* if already active and close enough, don't bother */ |
1567 | oexpires = ktime_to_ns(kt: hrtimer_get_softexpires(timer: &iocg->waitq_timer)); |
1568 | if (hrtimer_is_queued(timer: &iocg->waitq_timer) && |
1569 | abs(oexpires - expires) <= ioc->timer_slack_ns) |
1570 | return; |
1571 | |
1572 | hrtimer_start_range_ns(timer: &iocg->waitq_timer, tim: ns_to_ktime(ns: expires), |
1573 | range_ns: ioc->timer_slack_ns, mode: HRTIMER_MODE_ABS); |
1574 | } |
1575 | |
1576 | static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer) |
1577 | { |
1578 | struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer); |
1579 | bool pay_debt = READ_ONCE(iocg->abs_vdebt); |
1580 | struct ioc_now now; |
1581 | unsigned long flags; |
1582 | |
1583 | ioc_now(ioc: iocg->ioc, now: &now); |
1584 | |
1585 | iocg_lock(iocg, lock_ioc: pay_debt, flags: &flags); |
1586 | iocg_kick_waitq(iocg, pay_debt, now: &now); |
1587 | iocg_unlock(iocg, unlock_ioc: pay_debt, flags: &flags); |
1588 | |
1589 | return HRTIMER_NORESTART; |
1590 | } |
1591 | |
1592 | static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p) |
1593 | { |
1594 | u32 nr_met[2] = { }; |
1595 | u32 nr_missed[2] = { }; |
1596 | u64 rq_wait_ns = 0; |
1597 | int cpu, rw; |
1598 | |
1599 | for_each_online_cpu(cpu) { |
1600 | struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu); |
1601 | u64 this_rq_wait_ns; |
1602 | |
1603 | for (rw = READ; rw <= WRITE; rw++) { |
1604 | u32 this_met = local_read(&stat->missed[rw].nr_met); |
1605 | u32 this_missed = local_read(&stat->missed[rw].nr_missed); |
1606 | |
1607 | nr_met[rw] += this_met - stat->missed[rw].last_met; |
1608 | nr_missed[rw] += this_missed - stat->missed[rw].last_missed; |
1609 | stat->missed[rw].last_met = this_met; |
1610 | stat->missed[rw].last_missed = this_missed; |
1611 | } |
1612 | |
1613 | this_rq_wait_ns = local64_read(&stat->rq_wait_ns); |
1614 | rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns; |
1615 | stat->last_rq_wait_ns = this_rq_wait_ns; |
1616 | } |
1617 | |
1618 | for (rw = READ; rw <= WRITE; rw++) { |
1619 | if (nr_met[rw] + nr_missed[rw]) |
1620 | missed_ppm_ar[rw] = |
1621 | DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION, |
1622 | nr_met[rw] + nr_missed[rw]); |
1623 | else |
1624 | missed_ppm_ar[rw] = 0; |
1625 | } |
1626 | |
1627 | *rq_wait_pct_p = div64_u64(dividend: rq_wait_ns * 100, |
1628 | divisor: ioc->period_us * NSEC_PER_USEC); |
1629 | } |
1630 | |
1631 | /* was iocg idle this period? */ |
1632 | static bool iocg_is_idle(struct ioc_gq *iocg) |
1633 | { |
1634 | struct ioc *ioc = iocg->ioc; |
1635 | |
1636 | /* did something get issued this period? */ |
1637 | if (atomic64_read(v: &iocg->active_period) == |
1638 | atomic64_read(v: &ioc->cur_period)) |
1639 | return false; |
1640 | |
1641 | /* is something in flight? */ |
1642 | if (atomic64_read(v: &iocg->done_vtime) != atomic64_read(v: &iocg->vtime)) |
1643 | return false; |
1644 | |
1645 | return true; |
1646 | } |
1647 | |
1648 | /* |
1649 | * Call this function on the target leaf @iocg's to build pre-order traversal |
1650 | * list of all the ancestors in @inner_walk. The inner nodes are linked through |
1651 | * ->walk_list and the caller is responsible for dissolving the list after use. |
1652 | */ |
1653 | static void iocg_build_inner_walk(struct ioc_gq *iocg, |
1654 | struct list_head *inner_walk) |
1655 | { |
1656 | int lvl; |
1657 | |
1658 | WARN_ON_ONCE(!list_empty(&iocg->walk_list)); |
1659 | |
1660 | /* find the first ancestor which hasn't been visited yet */ |
1661 | for (lvl = iocg->level - 1; lvl >= 0; lvl--) { |
1662 | if (!list_empty(head: &iocg->ancestors[lvl]->walk_list)) |
1663 | break; |
1664 | } |
1665 | |
1666 | /* walk down and visit the inner nodes to get pre-order traversal */ |
1667 | while (++lvl <= iocg->level - 1) { |
1668 | struct ioc_gq *inner = iocg->ancestors[lvl]; |
1669 | |
1670 | /* record traversal order */ |
1671 | list_add_tail(new: &inner->walk_list, head: inner_walk); |
1672 | } |
1673 | } |
1674 | |
1675 | /* propagate the deltas to the parent */ |
1676 | static void iocg_flush_stat_upward(struct ioc_gq *iocg) |
1677 | { |
1678 | if (iocg->level > 0) { |
1679 | struct iocg_stat *parent_stat = |
1680 | &iocg->ancestors[iocg->level - 1]->stat; |
1681 | |
1682 | parent_stat->usage_us += |
1683 | iocg->stat.usage_us - iocg->last_stat.usage_us; |
1684 | parent_stat->wait_us += |
1685 | iocg->stat.wait_us - iocg->last_stat.wait_us; |
1686 | parent_stat->indebt_us += |
1687 | iocg->stat.indebt_us - iocg->last_stat.indebt_us; |
1688 | parent_stat->indelay_us += |
1689 | iocg->stat.indelay_us - iocg->last_stat.indelay_us; |
1690 | } |
1691 | |
1692 | iocg->last_stat = iocg->stat; |
1693 | } |
1694 | |
1695 | /* collect per-cpu counters and propagate the deltas to the parent */ |
1696 | static void iocg_flush_stat_leaf(struct ioc_gq *iocg, struct ioc_now *now) |
1697 | { |
1698 | struct ioc *ioc = iocg->ioc; |
1699 | u64 abs_vusage = 0; |
1700 | u64 vusage_delta; |
1701 | int cpu; |
1702 | |
1703 | lockdep_assert_held(&iocg->ioc->lock); |
1704 | |
1705 | /* collect per-cpu counters */ |
1706 | for_each_possible_cpu(cpu) { |
1707 | abs_vusage += local64_read( |
1708 | per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu)); |
1709 | } |
1710 | vusage_delta = abs_vusage - iocg->last_stat_abs_vusage; |
1711 | iocg->last_stat_abs_vusage = abs_vusage; |
1712 | |
1713 | iocg->usage_delta_us = div64_u64(dividend: vusage_delta, divisor: ioc->vtime_base_rate); |
1714 | iocg->stat.usage_us += iocg->usage_delta_us; |
1715 | |
1716 | iocg_flush_stat_upward(iocg); |
1717 | } |
1718 | |
1719 | /* get stat counters ready for reading on all active iocgs */ |
1720 | static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now) |
1721 | { |
1722 | LIST_HEAD(inner_walk); |
1723 | struct ioc_gq *iocg, *tiocg; |
1724 | |
1725 | /* flush leaves and build inner node walk list */ |
1726 | list_for_each_entry(iocg, target_iocgs, active_list) { |
1727 | iocg_flush_stat_leaf(iocg, now); |
1728 | iocg_build_inner_walk(iocg, inner_walk: &inner_walk); |
1729 | } |
1730 | |
1731 | /* keep flushing upwards by walking the inner list backwards */ |
1732 | list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) { |
1733 | iocg_flush_stat_upward(iocg); |
1734 | list_del_init(entry: &iocg->walk_list); |
1735 | } |
1736 | } |
1737 | |
1738 | /* |
1739 | * Determine what @iocg's hweight_inuse should be after donating unused |
1740 | * capacity. @hwm is the upper bound and used to signal no donation. This |
1741 | * function also throws away @iocg's excess budget. |
1742 | */ |
1743 | static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm, |
1744 | u32 usage, struct ioc_now *now) |
1745 | { |
1746 | struct ioc *ioc = iocg->ioc; |
1747 | u64 vtime = atomic64_read(v: &iocg->vtime); |
1748 | s64 excess, delta, target, new_hwi; |
1749 | |
1750 | /* debt handling owns inuse for debtors */ |
1751 | if (iocg->abs_vdebt) |
1752 | return 1; |
1753 | |
1754 | /* see whether minimum margin requirement is met */ |
1755 | if (waitqueue_active(wq_head: &iocg->waitq) || |
1756 | time_after64(vtime, now->vnow - ioc->margins.min)) |
1757 | return hwm; |
1758 | |
1759 | /* throw away excess above target */ |
1760 | excess = now->vnow - vtime - ioc->margins.target; |
1761 | if (excess > 0) { |
1762 | atomic64_add(i: excess, v: &iocg->vtime); |
1763 | atomic64_add(i: excess, v: &iocg->done_vtime); |
1764 | vtime += excess; |
1765 | ioc->vtime_err -= div64_u64(dividend: excess * old_hwi, divisor: WEIGHT_ONE); |
1766 | } |
1767 | |
1768 | /* |
1769 | * Let's say the distance between iocg's and device's vtimes as a |
1770 | * fraction of period duration is delta. Assuming that the iocg will |
1771 | * consume the usage determined above, we want to determine new_hwi so |
1772 | * that delta equals MARGIN_TARGET at the end of the next period. |
1773 | * |
1774 | * We need to execute usage worth of IOs while spending the sum of the |
1775 | * new budget (1 - MARGIN_TARGET) and the leftover from the last period |
1776 | * (delta): |
1777 | * |
1778 | * usage = (1 - MARGIN_TARGET + delta) * new_hwi |
1779 | * |
1780 | * Therefore, the new_hwi is: |
1781 | * |
1782 | * new_hwi = usage / (1 - MARGIN_TARGET + delta) |
1783 | */ |
1784 | delta = div64_s64(dividend: WEIGHT_ONE * (now->vnow - vtime), |
1785 | divisor: now->vnow - ioc->period_at_vtime); |
1786 | target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100; |
1787 | new_hwi = div64_s64(dividend: WEIGHT_ONE * usage, divisor: WEIGHT_ONE - target + delta); |
1788 | |
1789 | return clamp_t(s64, new_hwi, 1, hwm); |
1790 | } |
1791 | |
1792 | /* |
1793 | * For work-conservation, an iocg which isn't using all of its share should |
1794 | * donate the leftover to other iocgs. There are two ways to achieve this - 1. |
1795 | * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight. |
1796 | * |
1797 | * #1 is mathematically simpler but has the drawback of requiring synchronous |
1798 | * global hweight_inuse updates when idle iocg's get activated or inuse weights |
1799 | * change due to donation snapbacks as it has the possibility of grossly |
1800 | * overshooting what's allowed by the model and vrate. |
1801 | * |
1802 | * #2 is inherently safe with local operations. The donating iocg can easily |
1803 | * snap back to higher weights when needed without worrying about impacts on |
1804 | * other nodes as the impacts will be inherently correct. This also makes idle |
1805 | * iocg activations safe. The only effect activations have is decreasing |
1806 | * hweight_inuse of others, the right solution to which is for those iocgs to |
1807 | * snap back to higher weights. |
1808 | * |
1809 | * So, we go with #2. The challenge is calculating how each donating iocg's |
1810 | * inuse should be adjusted to achieve the target donation amounts. This is done |
1811 | * using Andy's method described in the following pdf. |
1812 | * |
1813 | * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo |
1814 | * |
1815 | * Given the weights and target after-donation hweight_inuse values, Andy's |
1816 | * method determines how the proportional distribution should look like at each |
1817 | * sibling level to maintain the relative relationship between all non-donating |
1818 | * pairs. To roughly summarize, it divides the tree into donating and |
1819 | * non-donating parts, calculates global donation rate which is used to |
1820 | * determine the target hweight_inuse for each node, and then derives per-level |
1821 | * proportions. |
1822 | * |
1823 | * The following pdf shows that global distribution calculated this way can be |
1824 | * achieved by scaling inuse weights of donating leaves and propagating the |
1825 | * adjustments upwards proportionally. |
1826 | * |
1827 | * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE |
1828 | * |
1829 | * Combining the above two, we can determine how each leaf iocg's inuse should |
1830 | * be adjusted to achieve the target donation. |
1831 | * |
1832 | * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN |
1833 | * |
1834 | * The inline comments use symbols from the last pdf. |
1835 | * |
1836 | * b is the sum of the absolute budgets in the subtree. 1 for the root node. |
1837 | * f is the sum of the absolute budgets of non-donating nodes in the subtree. |
1838 | * t is the sum of the absolute budgets of donating nodes in the subtree. |
1839 | * w is the weight of the node. w = w_f + w_t |
1840 | * w_f is the non-donating portion of w. w_f = w * f / b |
1841 | * w_b is the donating portion of w. w_t = w * t / b |
1842 | * s is the sum of all sibling weights. s = Sum(w) for siblings |
1843 | * s_f and s_t are the non-donating and donating portions of s. |
1844 | * |
1845 | * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g. |
1846 | * w_pt is the donating portion of the parent's weight and w'_pt the same value |
1847 | * after adjustments. Subscript r denotes the root node's values. |
1848 | */ |
1849 | static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now) |
1850 | { |
1851 | LIST_HEAD(over_hwa); |
1852 | LIST_HEAD(inner_walk); |
1853 | struct ioc_gq *iocg, *tiocg, *root_iocg; |
1854 | u32 after_sum, over_sum, over_target, gamma; |
1855 | |
1856 | /* |
1857 | * It's pretty unlikely but possible for the total sum of |
1858 | * hweight_after_donation's to be higher than WEIGHT_ONE, which will |
1859 | * confuse the following calculations. If such condition is detected, |
1860 | * scale down everyone over its full share equally to keep the sum below |
1861 | * WEIGHT_ONE. |
1862 | */ |
1863 | after_sum = 0; |
1864 | over_sum = 0; |
1865 | list_for_each_entry(iocg, surpluses, surplus_list) { |
1866 | u32 hwa; |
1867 | |
1868 | current_hweight(iocg, hw_activep: &hwa, NULL); |
1869 | after_sum += iocg->hweight_after_donation; |
1870 | |
1871 | if (iocg->hweight_after_donation > hwa) { |
1872 | over_sum += iocg->hweight_after_donation; |
1873 | list_add(new: &iocg->walk_list, head: &over_hwa); |
1874 | } |
1875 | } |
1876 | |
1877 | if (after_sum >= WEIGHT_ONE) { |
1878 | /* |
1879 | * The delta should be deducted from the over_sum, calculate |
1880 | * target over_sum value. |
1881 | */ |
1882 | u32 over_delta = after_sum - (WEIGHT_ONE - 1); |
1883 | WARN_ON_ONCE(over_sum <= over_delta); |
1884 | over_target = over_sum - over_delta; |
1885 | } else { |
1886 | over_target = 0; |
1887 | } |
1888 | |
1889 | list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) { |
1890 | if (over_target) |
1891 | iocg->hweight_after_donation = |
1892 | div_u64(dividend: (u64)iocg->hweight_after_donation * |
1893 | over_target, divisor: over_sum); |
1894 | list_del_init(entry: &iocg->walk_list); |
1895 | } |
1896 | |
1897 | /* |
1898 | * Build pre-order inner node walk list and prepare for donation |
1899 | * adjustment calculations. |
1900 | */ |
1901 | list_for_each_entry(iocg, surpluses, surplus_list) { |
1902 | iocg_build_inner_walk(iocg, inner_walk: &inner_walk); |
1903 | } |
1904 | |
1905 | root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list); |
1906 | WARN_ON_ONCE(root_iocg->level > 0); |
1907 | |
1908 | list_for_each_entry(iocg, &inner_walk, walk_list) { |
1909 | iocg->child_adjusted_sum = 0; |
1910 | iocg->hweight_donating = 0; |
1911 | iocg->hweight_after_donation = 0; |
1912 | } |
1913 | |
1914 | /* |
1915 | * Propagate the donating budget (b_t) and after donation budget (b'_t) |
1916 | * up the hierarchy. |
1917 | */ |
1918 | list_for_each_entry(iocg, surpluses, surplus_list) { |
1919 | struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; |
1920 | |
1921 | parent->hweight_donating += iocg->hweight_donating; |
1922 | parent->hweight_after_donation += iocg->hweight_after_donation; |
1923 | } |
1924 | |
1925 | list_for_each_entry_reverse(iocg, &inner_walk, walk_list) { |
1926 | if (iocg->level > 0) { |
1927 | struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; |
1928 | |
1929 | parent->hweight_donating += iocg->hweight_donating; |
1930 | parent->hweight_after_donation += iocg->hweight_after_donation; |
1931 | } |
1932 | } |
1933 | |
1934 | /* |
1935 | * Calculate inner hwa's (b) and make sure the donation values are |
1936 | * within the accepted ranges as we're doing low res calculations with |
1937 | * roundups. |
1938 | */ |
1939 | list_for_each_entry(iocg, &inner_walk, walk_list) { |
1940 | if (iocg->level) { |
1941 | struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; |
1942 | |
1943 | iocg->hweight_active = DIV64_U64_ROUND_UP( |
1944 | (u64)parent->hweight_active * iocg->active, |
1945 | parent->child_active_sum); |
1946 | |
1947 | } |
1948 | |
1949 | iocg->hweight_donating = min(iocg->hweight_donating, |
1950 | iocg->hweight_active); |
1951 | iocg->hweight_after_donation = min(iocg->hweight_after_donation, |
1952 | iocg->hweight_donating - 1); |
1953 | if (WARN_ON_ONCE(iocg->hweight_active <= 1 || |
1954 | iocg->hweight_donating <= 1 || |
1955 | iocg->hweight_after_donation == 0)) { |
1956 | pr_warn("iocg: invalid donation weights in " ); |
1957 | pr_cont_cgroup_path(cgrp: iocg_to_blkg(iocg)->blkcg->css.cgroup); |
1958 | pr_cont(": active=%u donating=%u after=%u\n" , |
1959 | iocg->hweight_active, iocg->hweight_donating, |
1960 | iocg->hweight_after_donation); |
1961 | } |
1962 | } |
1963 | |
1964 | /* |
1965 | * Calculate the global donation rate (gamma) - the rate to adjust |
1966 | * non-donating budgets by. |
1967 | * |
1968 | * No need to use 64bit multiplication here as the first operand is |
1969 | * guaranteed to be smaller than WEIGHT_ONE (1<<16). |
1970 | * |
1971 | * We know that there are beneficiary nodes and the sum of the donating |
1972 | * hweights can't be whole; however, due to the round-ups during hweight |
1973 | * calculations, root_iocg->hweight_donating might still end up equal to |
1974 | * or greater than whole. Limit the range when calculating the divider. |
1975 | * |
1976 | * gamma = (1 - t_r') / (1 - t_r) |
1977 | */ |
1978 | gamma = DIV_ROUND_UP( |
1979 | (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE, |
1980 | WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1)); |
1981 | |
1982 | /* |
1983 | * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner |
1984 | * nodes. |
1985 | */ |
1986 | list_for_each_entry(iocg, &inner_walk, walk_list) { |
1987 | struct ioc_gq *parent; |
1988 | u32 inuse, wpt, wptp; |
1989 | u64 st, sf; |
1990 | |
1991 | if (iocg->level == 0) { |
1992 | /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */ |
1993 | iocg->child_adjusted_sum = DIV64_U64_ROUND_UP( |
1994 | iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating), |
1995 | WEIGHT_ONE - iocg->hweight_after_donation); |
1996 | continue; |
1997 | } |
1998 | |
1999 | parent = iocg->ancestors[iocg->level - 1]; |
2000 | |
2001 | /* b' = gamma * b_f + b_t' */ |
2002 | iocg->hweight_inuse = DIV64_U64_ROUND_UP( |
2003 | (u64)gamma * (iocg->hweight_active - iocg->hweight_donating), |
2004 | WEIGHT_ONE) + iocg->hweight_after_donation; |
2005 | |
2006 | /* w' = s' * b' / b'_p */ |
2007 | inuse = DIV64_U64_ROUND_UP( |
2008 | (u64)parent->child_adjusted_sum * iocg->hweight_inuse, |
2009 | parent->hweight_inuse); |
2010 | |
2011 | /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */ |
2012 | st = DIV64_U64_ROUND_UP( |
2013 | iocg->child_active_sum * iocg->hweight_donating, |
2014 | iocg->hweight_active); |
2015 | sf = iocg->child_active_sum - st; |
2016 | wpt = DIV64_U64_ROUND_UP( |
2017 | (u64)iocg->active * iocg->hweight_donating, |
2018 | iocg->hweight_active); |
2019 | wptp = DIV64_U64_ROUND_UP( |
2020 | (u64)inuse * iocg->hweight_after_donation, |
2021 | iocg->hweight_inuse); |
2022 | |
2023 | iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt); |
2024 | } |
2025 | |
2026 | /* |
2027 | * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and |
2028 | * we can finally determine leaf adjustments. |
2029 | */ |
2030 | list_for_each_entry(iocg, surpluses, surplus_list) { |
2031 | struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; |
2032 | u32 inuse; |
2033 | |
2034 | /* |
2035 | * In-debt iocgs participated in the donation calculation with |
2036 | * the minimum target hweight_inuse. Configuring inuse |
2037 | * accordingly would work fine but debt handling expects |
2038 | * @iocg->inuse stay at the minimum and we don't wanna |
2039 | * interfere. |
2040 | */ |
2041 | if (iocg->abs_vdebt) { |
2042 | WARN_ON_ONCE(iocg->inuse > 1); |
2043 | continue; |
2044 | } |
2045 | |
2046 | /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */ |
2047 | inuse = DIV64_U64_ROUND_UP( |
2048 | parent->child_adjusted_sum * iocg->hweight_after_donation, |
2049 | parent->hweight_inuse); |
2050 | |
2051 | TRACE_IOCG_PATH(inuse_transfer, iocg, now, |
2052 | iocg->inuse, inuse, |
2053 | iocg->hweight_inuse, |
2054 | iocg->hweight_after_donation); |
2055 | |
2056 | __propagate_weights(iocg, active: iocg->active, inuse, save: true, now); |
2057 | } |
2058 | |
2059 | /* walk list should be dissolved after use */ |
2060 | list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list) |
2061 | list_del_init(entry: &iocg->walk_list); |
2062 | } |
2063 | |
2064 | /* |
2065 | * A low weight iocg can amass a large amount of debt, for example, when |
2066 | * anonymous memory gets reclaimed aggressively. If the system has a lot of |
2067 | * memory paired with a slow IO device, the debt can span multiple seconds or |
2068 | * more. If there are no other subsequent IO issuers, the in-debt iocg may end |
2069 | * up blocked paying its debt while the IO device is idle. |
2070 | * |
2071 | * The following protects against such cases. If the device has been |
2072 | * sufficiently idle for a while, the debts are halved and delays are |
2073 | * recalculated. |
2074 | */ |
2075 | static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, |
2076 | struct ioc_now *now) |
2077 | { |
2078 | struct ioc_gq *iocg; |
2079 | u64 dur, usage_pct, nr_cycles; |
2080 | |
2081 | /* if no debtor, reset the cycle */ |
2082 | if (!nr_debtors) { |
2083 | ioc->dfgv_period_at = now->now; |
2084 | ioc->dfgv_period_rem = 0; |
2085 | ioc->dfgv_usage_us_sum = 0; |
2086 | return; |
2087 | } |
2088 | |
2089 | /* |
2090 | * Debtors can pass through a lot of writes choking the device and we |
2091 | * don't want to be forgiving debts while the device is struggling from |
2092 | * write bursts. If we're missing latency targets, consider the device |
2093 | * fully utilized. |
2094 | */ |
2095 | if (ioc->busy_level > 0) |
2096 | usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us); |
2097 | |
2098 | ioc->dfgv_usage_us_sum += usage_us_sum; |
2099 | if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD)) |
2100 | return; |
2101 | |
2102 | /* |
2103 | * At least DFGV_PERIOD has passed since the last period. Calculate the |
2104 | * average usage and reset the period counters. |
2105 | */ |
2106 | dur = now->now - ioc->dfgv_period_at; |
2107 | usage_pct = div64_u64(dividend: 100 * ioc->dfgv_usage_us_sum, divisor: dur); |
2108 | |
2109 | ioc->dfgv_period_at = now->now; |
2110 | ioc->dfgv_usage_us_sum = 0; |
2111 | |
2112 | /* if was too busy, reset everything */ |
2113 | if (usage_pct > DFGV_USAGE_PCT) { |
2114 | ioc->dfgv_period_rem = 0; |
2115 | return; |
2116 | } |
2117 | |
2118 | /* |
2119 | * Usage is lower than threshold. Let's forgive some debts. Debt |
2120 | * forgiveness runs off of the usual ioc timer but its period usually |
2121 | * doesn't match ioc's. Compensate the difference by performing the |
2122 | * reduction as many times as would fit in the duration since the last |
2123 | * run and carrying over the left-over duration in @ioc->dfgv_period_rem |
2124 | * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive |
2125 | * reductions is doubled. |
2126 | */ |
2127 | nr_cycles = dur + ioc->dfgv_period_rem; |
2128 | ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD); |
2129 | |
2130 | list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { |
2131 | u64 __maybe_unused old_debt, __maybe_unused old_delay; |
2132 | |
2133 | if (!iocg->abs_vdebt && !iocg->delay) |
2134 | continue; |
2135 | |
2136 | spin_lock(lock: &iocg->waitq.lock); |
2137 | |
2138 | old_debt = iocg->abs_vdebt; |
2139 | old_delay = iocg->delay; |
2140 | |
2141 | if (iocg->abs_vdebt) |
2142 | iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1; |
2143 | if (iocg->delay) |
2144 | iocg->delay = iocg->delay >> nr_cycles ?: 1; |
2145 | |
2146 | iocg_kick_waitq(iocg, pay_debt: true, now); |
2147 | |
2148 | TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct, |
2149 | old_debt, iocg->abs_vdebt, |
2150 | old_delay, iocg->delay); |
2151 | |
2152 | spin_unlock(lock: &iocg->waitq.lock); |
2153 | } |
2154 | } |
2155 | |
2156 | /* |
2157 | * Check the active iocgs' state to avoid oversleeping and deactive |
2158 | * idle iocgs. |
2159 | * |
2160 | * Since waiters determine the sleep durations based on the vrate |
2161 | * they saw at the time of sleep, if vrate has increased, some |
2162 | * waiters could be sleeping for too long. Wake up tardy waiters |
2163 | * which should have woken up in the last period and expire idle |
2164 | * iocgs. |
2165 | */ |
2166 | static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now) |
2167 | { |
2168 | int nr_debtors = 0; |
2169 | struct ioc_gq *iocg, *tiocg; |
2170 | |
2171 | list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { |
2172 | if (!waitqueue_active(wq_head: &iocg->waitq) && !iocg->abs_vdebt && |
2173 | !iocg->delay && !iocg_is_idle(iocg)) |
2174 | continue; |
2175 | |
2176 | spin_lock(lock: &iocg->waitq.lock); |
2177 | |
2178 | /* flush wait and indebt stat deltas */ |
2179 | if (iocg->wait_since) { |
2180 | iocg->stat.wait_us += now->now - iocg->wait_since; |
2181 | iocg->wait_since = now->now; |
2182 | } |
2183 | if (iocg->indebt_since) { |
2184 | iocg->stat.indebt_us += |
2185 | now->now - iocg->indebt_since; |
2186 | iocg->indebt_since = now->now; |
2187 | } |
2188 | if (iocg->indelay_since) { |
2189 | iocg->stat.indelay_us += |
2190 | now->now - iocg->indelay_since; |
2191 | iocg->indelay_since = now->now; |
2192 | } |
2193 | |
2194 | if (waitqueue_active(wq_head: &iocg->waitq) || iocg->abs_vdebt || |
2195 | iocg->delay) { |
2196 | /* might be oversleeping vtime / hweight changes, kick */ |
2197 | iocg_kick_waitq(iocg, pay_debt: true, now); |
2198 | if (iocg->abs_vdebt || iocg->delay) |
2199 | nr_debtors++; |
2200 | } else if (iocg_is_idle(iocg)) { |
2201 | /* no waiter and idle, deactivate */ |
2202 | u64 vtime = atomic64_read(v: &iocg->vtime); |
2203 | s64 excess; |
2204 | |
2205 | /* |
2206 | * @iocg has been inactive for a full duration and will |
2207 | * have a high budget. Account anything above target as |
2208 | * error and throw away. On reactivation, it'll start |
2209 | * with the target budget. |
2210 | */ |
2211 | excess = now->vnow - vtime - ioc->margins.target; |
2212 | if (excess > 0) { |
2213 | u32 old_hwi; |
2214 | |
2215 | current_hweight(iocg, NULL, hw_inusep: &old_hwi); |
2216 | ioc->vtime_err -= div64_u64(dividend: excess * old_hwi, |
2217 | divisor: WEIGHT_ONE); |
2218 | } |
2219 | |
2220 | TRACE_IOCG_PATH(iocg_idle, iocg, now, |
2221 | atomic64_read(&iocg->active_period), |
2222 | atomic64_read(&ioc->cur_period), vtime); |
2223 | __propagate_weights(iocg, active: 0, inuse: 0, save: false, now); |
2224 | list_del_init(entry: &iocg->active_list); |
2225 | } |
2226 | |
2227 | spin_unlock(lock: &iocg->waitq.lock); |
2228 | } |
2229 | |
2230 | commit_weights(ioc); |
2231 | return nr_debtors; |
2232 | } |
2233 | |
2234 | static void ioc_timer_fn(struct timer_list *timer) |
2235 | { |
2236 | struct ioc *ioc = container_of(timer, struct ioc, timer); |
2237 | struct ioc_gq *iocg, *tiocg; |
2238 | struct ioc_now now; |
2239 | LIST_HEAD(surpluses); |
2240 | int nr_debtors, nr_shortages = 0, nr_lagging = 0; |
2241 | u64 usage_us_sum = 0; |
2242 | u32 ppm_rthr; |
2243 | u32 ppm_wthr; |
2244 | u32 missed_ppm[2], rq_wait_pct; |
2245 | u64 period_vtime; |
2246 | int prev_busy_level; |
2247 | |
2248 | /* how were the latencies during the period? */ |
2249 | ioc_lat_stat(ioc, missed_ppm_ar: missed_ppm, rq_wait_pct_p: &rq_wait_pct); |
2250 | |
2251 | /* take care of active iocgs */ |
2252 | spin_lock_irq(lock: &ioc->lock); |
2253 | |
2254 | ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM]; |
2255 | ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM]; |
2256 | ioc_now(ioc, now: &now); |
2257 | |
2258 | period_vtime = now.vnow - ioc->period_at_vtime; |
2259 | if (WARN_ON_ONCE(!period_vtime)) { |
2260 | spin_unlock_irq(lock: &ioc->lock); |
2261 | return; |
2262 | } |
2263 | |
2264 | nr_debtors = ioc_check_iocgs(ioc, now: &now); |
2265 | |
2266 | /* |
2267 | * Wait and indebt stat are flushed above and the donation calculation |
2268 | * below needs updated usage stat. Let's bring stat up-to-date. |
2269 | */ |
2270 | iocg_flush_stat(target_iocgs: &ioc->active_iocgs, now: &now); |
2271 | |
2272 | /* calc usage and see whether some weights need to be moved around */ |
2273 | list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { |
2274 | u64 vdone, vtime, usage_us; |
2275 | u32 hw_active, hw_inuse; |
2276 | |
2277 | /* |
2278 | * Collect unused and wind vtime closer to vnow to prevent |
2279 | * iocgs from accumulating a large amount of budget. |
2280 | */ |
2281 | vdone = atomic64_read(v: &iocg->done_vtime); |
2282 | vtime = atomic64_read(v: &iocg->vtime); |
2283 | current_hweight(iocg, hw_activep: &hw_active, hw_inusep: &hw_inuse); |
2284 | |
2285 | /* |
2286 | * Latency QoS detection doesn't account for IOs which are |
2287 | * in-flight for longer than a period. Detect them by |
2288 | * comparing vdone against period start. If lagging behind |
2289 | * IOs from past periods, don't increase vrate. |
2290 | */ |
2291 | if ((ppm_rthr != MILLION || ppm_wthr != MILLION) && |
2292 | !atomic_read(v: &iocg_to_blkg(iocg)->use_delay) && |
2293 | time_after64(vtime, vdone) && |
2294 | time_after64(vtime, now.vnow - |
2295 | MAX_LAGGING_PERIODS * period_vtime) && |
2296 | time_before64(vdone, now.vnow - period_vtime)) |
2297 | nr_lagging++; |
2298 | |
2299 | /* |
2300 | * Determine absolute usage factoring in in-flight IOs to avoid |
2301 | * high-latency completions appearing as idle. |
2302 | */ |
2303 | usage_us = iocg->usage_delta_us; |
2304 | usage_us_sum += usage_us; |
2305 | |
2306 | /* see whether there's surplus vtime */ |
2307 | WARN_ON_ONCE(!list_empty(&iocg->surplus_list)); |
2308 | if (hw_inuse < hw_active || |
2309 | (!waitqueue_active(wq_head: &iocg->waitq) && |
2310 | time_before64(vtime, now.vnow - ioc->margins.low))) { |
2311 | u32 hwa, old_hwi, hwm, new_hwi, usage; |
2312 | u64 usage_dur; |
2313 | |
2314 | if (vdone != vtime) { |
2315 | u64 inflight_us = DIV64_U64_ROUND_UP( |
2316 | cost_to_abs_cost(vtime - vdone, hw_inuse), |
2317 | ioc->vtime_base_rate); |
2318 | |
2319 | usage_us = max(usage_us, inflight_us); |
2320 | } |
2321 | |
2322 | /* convert to hweight based usage ratio */ |
2323 | if (time_after64(iocg->activated_at, ioc->period_at)) |
2324 | usage_dur = max_t(u64, now.now - iocg->activated_at, 1); |
2325 | else |
2326 | usage_dur = max_t(u64, now.now - ioc->period_at, 1); |
2327 | |
2328 | usage = clamp_t(u32, |
2329 | DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE, |
2330 | usage_dur), |
2331 | 1, WEIGHT_ONE); |
2332 | |
2333 | /* |
2334 | * Already donating or accumulated enough to start. |
2335 | * Determine the donation amount. |
2336 | */ |
2337 | current_hweight(iocg, hw_activep: &hwa, hw_inusep: &old_hwi); |
2338 | hwm = current_hweight_max(iocg); |
2339 | new_hwi = hweight_after_donation(iocg, old_hwi, hwm, |
2340 | usage, now: &now); |
2341 | /* |
2342 | * Donation calculation assumes hweight_after_donation |
2343 | * to be positive, a condition that a donor w/ hwa < 2 |
2344 | * can't meet. Don't bother with donation if hwa is |
2345 | * below 2. It's not gonna make a meaningful difference |
2346 | * anyway. |
2347 | */ |
2348 | if (new_hwi < hwm && hwa >= 2) { |
2349 | iocg->hweight_donating = hwa; |
2350 | iocg->hweight_after_donation = new_hwi; |
2351 | list_add(new: &iocg->surplus_list, head: &surpluses); |
2352 | } else if (!iocg->abs_vdebt) { |
2353 | /* |
2354 | * @iocg doesn't have enough to donate. Reset |
2355 | * its inuse to active. |
2356 | * |
2357 | * Don't reset debtors as their inuse's are |
2358 | * owned by debt handling. This shouldn't affect |
2359 | * donation calculuation in any meaningful way |
2360 | * as @iocg doesn't have a meaningful amount of |
2361 | * share anyway. |
2362 | */ |
2363 | TRACE_IOCG_PATH(inuse_shortage, iocg, &now, |
2364 | iocg->inuse, iocg->active, |
2365 | iocg->hweight_inuse, new_hwi); |
2366 | |
2367 | __propagate_weights(iocg, active: iocg->active, |
2368 | inuse: iocg->active, save: true, now: &now); |
2369 | nr_shortages++; |
2370 | } |
2371 | } else { |
2372 | /* genuinely short on vtime */ |
2373 | nr_shortages++; |
2374 | } |
2375 | } |
2376 | |
2377 | if (!list_empty(head: &surpluses) && nr_shortages) |
2378 | transfer_surpluses(surpluses: &surpluses, now: &now); |
2379 | |
2380 | commit_weights(ioc); |
2381 | |
2382 | /* surplus list should be dissolved after use */ |
2383 | list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list) |
2384 | list_del_init(entry: &iocg->surplus_list); |
2385 | |
2386 | /* |
2387 | * If q is getting clogged or we're missing too much, we're issuing |
2388 | * too much IO and should lower vtime rate. If we're not missing |
2389 | * and experiencing shortages but not surpluses, we're too stingy |
2390 | * and should increase vtime rate. |
2391 | */ |
2392 | prev_busy_level = ioc->busy_level; |
2393 | if (rq_wait_pct > RQ_WAIT_BUSY_PCT || |
2394 | missed_ppm[READ] > ppm_rthr || |
2395 | missed_ppm[WRITE] > ppm_wthr) { |
2396 | /* clearly missing QoS targets, slow down vrate */ |
2397 | ioc->busy_level = max(ioc->busy_level, 0); |
2398 | ioc->busy_level++; |
2399 | } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 && |
2400 | missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 && |
2401 | missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) { |
2402 | /* QoS targets are being met with >25% margin */ |
2403 | if (nr_shortages) { |
2404 | /* |
2405 | * We're throttling while the device has spare |
2406 | * capacity. If vrate was being slowed down, stop. |
2407 | */ |
2408 | ioc->busy_level = min(ioc->busy_level, 0); |
2409 | |
2410 | /* |
2411 | * If there are IOs spanning multiple periods, wait |
2412 | * them out before pushing the device harder. |
2413 | */ |
2414 | if (!nr_lagging) |
2415 | ioc->busy_level--; |
2416 | } else { |
2417 | /* |
2418 | * Nobody is being throttled and the users aren't |
2419 | * issuing enough IOs to saturate the device. We |
2420 | * simply don't know how close the device is to |
2421 | * saturation. Coast. |
2422 | */ |
2423 | ioc->busy_level = 0; |
2424 | } |
2425 | } else { |
2426 | /* inside the hysterisis margin, we're good */ |
2427 | ioc->busy_level = 0; |
2428 | } |
2429 | |
2430 | ioc->busy_level = clamp(ioc->busy_level, -1000, 1000); |
2431 | |
2432 | ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages, |
2433 | prev_busy_level, missed_ppm); |
2434 | |
2435 | ioc_refresh_params(ioc, force: false); |
2436 | |
2437 | ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, now: &now); |
2438 | |
2439 | /* |
2440 | * This period is done. Move onto the next one. If nothing's |
2441 | * going on with the device, stop the timer. |
2442 | */ |
2443 | atomic64_inc(v: &ioc->cur_period); |
2444 | |
2445 | if (ioc->running != IOC_STOP) { |
2446 | if (!list_empty(head: &ioc->active_iocgs)) { |
2447 | ioc_start_period(ioc, now: &now); |
2448 | } else { |
2449 | ioc->busy_level = 0; |
2450 | ioc->vtime_err = 0; |
2451 | ioc->running = IOC_IDLE; |
2452 | } |
2453 | |
2454 | ioc_refresh_vrate(ioc, now: &now); |
2455 | } |
2456 | |
2457 | spin_unlock_irq(lock: &ioc->lock); |
2458 | } |
2459 | |
2460 | static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, |
2461 | u64 abs_cost, struct ioc_now *now) |
2462 | { |
2463 | struct ioc *ioc = iocg->ioc; |
2464 | struct ioc_margins *margins = &ioc->margins; |
2465 | u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi; |
2466 | u32 hwi, adj_step; |
2467 | s64 margin; |
2468 | u64 cost, new_inuse; |
2469 | unsigned long flags; |
2470 | |
2471 | current_hweight(iocg, NULL, hw_inusep: &hwi); |
2472 | old_hwi = hwi; |
2473 | cost = abs_cost_to_cost(abs_cost, hw_inuse: hwi); |
2474 | margin = now->vnow - vtime - cost; |
2475 | |
2476 | /* debt handling owns inuse for debtors */ |
2477 | if (iocg->abs_vdebt) |
2478 | return cost; |
2479 | |
2480 | /* |
2481 | * We only increase inuse during period and do so if the margin has |
2482 | * deteriorated since the previous adjustment. |
2483 | */ |
2484 | if (margin >= iocg->saved_margin || margin >= margins->low || |
2485 | iocg->inuse == iocg->active) |
2486 | return cost; |
2487 | |
2488 | spin_lock_irqsave(&ioc->lock, flags); |
2489 | |
2490 | /* we own inuse only when @iocg is in the normal active state */ |
2491 | if (iocg->abs_vdebt || list_empty(head: &iocg->active_list)) { |
2492 | spin_unlock_irqrestore(lock: &ioc->lock, flags); |
2493 | return cost; |
2494 | } |
2495 | |
2496 | /* |
2497 | * Bump up inuse till @abs_cost fits in the existing budget. |
2498 | * adj_step must be determined after acquiring ioc->lock - we might |
2499 | * have raced and lost to another thread for activation and could |
2500 | * be reading 0 iocg->active before ioc->lock which will lead to |
2501 | * infinite loop. |
2502 | */ |
2503 | new_inuse = iocg->inuse; |
2504 | adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100); |
2505 | do { |
2506 | new_inuse = new_inuse + adj_step; |
2507 | propagate_weights(iocg, active: iocg->active, inuse: new_inuse, save: true, now); |
2508 | current_hweight(iocg, NULL, hw_inusep: &hwi); |
2509 | cost = abs_cost_to_cost(abs_cost, hw_inuse: hwi); |
2510 | } while (time_after64(vtime + cost, now->vnow) && |
2511 | iocg->inuse != iocg->active); |
2512 | |
2513 | spin_unlock_irqrestore(lock: &ioc->lock, flags); |
2514 | |
2515 | TRACE_IOCG_PATH(inuse_adjust, iocg, now, |
2516 | old_inuse, iocg->inuse, old_hwi, hwi); |
2517 | |
2518 | return cost; |
2519 | } |
2520 | |
2521 | static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg, |
2522 | bool is_merge, u64 *costp) |
2523 | { |
2524 | struct ioc *ioc = iocg->ioc; |
2525 | u64 coef_seqio, coef_randio, coef_page; |
2526 | u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1); |
2527 | u64 seek_pages = 0; |
2528 | u64 cost = 0; |
2529 | |
2530 | /* Can't calculate cost for empty bio */ |
2531 | if (!bio->bi_iter.bi_size) |
2532 | goto out; |
2533 | |
2534 | switch (bio_op(bio)) { |
2535 | case REQ_OP_READ: |
2536 | coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO]; |
2537 | coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO]; |
2538 | coef_page = ioc->params.lcoefs[LCOEF_RPAGE]; |
2539 | break; |
2540 | case REQ_OP_WRITE: |
2541 | coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO]; |
2542 | coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO]; |
2543 | coef_page = ioc->params.lcoefs[LCOEF_WPAGE]; |
2544 | break; |
2545 | default: |
2546 | goto out; |
2547 | } |
2548 | |
2549 | if (iocg->cursor) { |
2550 | seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor); |
2551 | seek_pages >>= IOC_SECT_TO_PAGE_SHIFT; |
2552 | } |
2553 | |
2554 | if (!is_merge) { |
2555 | if (seek_pages > LCOEF_RANDIO_PAGES) { |
2556 | cost += coef_randio; |
2557 | } else { |
2558 | cost += coef_seqio; |
2559 | } |
2560 | } |
2561 | cost += pages * coef_page; |
2562 | out: |
2563 | *costp = cost; |
2564 | } |
2565 | |
2566 | static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge) |
2567 | { |
2568 | u64 cost; |
2569 | |
2570 | calc_vtime_cost_builtin(bio, iocg, is_merge, costp: &cost); |
2571 | return cost; |
2572 | } |
2573 | |
2574 | static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc, |
2575 | u64 *costp) |
2576 | { |
2577 | unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT; |
2578 | |
2579 | switch (req_op(req: rq)) { |
2580 | case REQ_OP_READ: |
2581 | *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE]; |
2582 | break; |
2583 | case REQ_OP_WRITE: |
2584 | *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE]; |
2585 | break; |
2586 | default: |
2587 | *costp = 0; |
2588 | } |
2589 | } |
2590 | |
2591 | static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc) |
2592 | { |
2593 | u64 cost; |
2594 | |
2595 | calc_size_vtime_cost_builtin(rq, ioc, costp: &cost); |
2596 | return cost; |
2597 | } |
2598 | |
2599 | static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) |
2600 | { |
2601 | struct blkcg_gq *blkg = bio->bi_blkg; |
2602 | struct ioc *ioc = rqos_to_ioc(rqos); |
2603 | struct ioc_gq *iocg = blkg_to_iocg(blkg); |
2604 | struct ioc_now now; |
2605 | struct iocg_wait wait; |
2606 | u64 abs_cost, cost, vtime; |
2607 | bool use_debt, ioc_locked; |
2608 | unsigned long flags; |
2609 | |
2610 | /* bypass IOs if disabled, still initializing, or for root cgroup */ |
2611 | if (!ioc->enabled || !iocg || !iocg->level) |
2612 | return; |
2613 | |
2614 | /* calculate the absolute vtime cost */ |
2615 | abs_cost = calc_vtime_cost(bio, iocg, is_merge: false); |
2616 | if (!abs_cost) |
2617 | return; |
2618 | |
2619 | if (!iocg_activate(iocg, now: &now)) |
2620 | return; |
2621 | |
2622 | iocg->cursor = bio_end_sector(bio); |
2623 | vtime = atomic64_read(v: &iocg->vtime); |
2624 | cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, now: &now); |
2625 | |
2626 | /* |
2627 | * If no one's waiting and within budget, issue right away. The |
2628 | * tests are racy but the races aren't systemic - we only miss once |
2629 | * in a while which is fine. |
2630 | */ |
2631 | if (!waitqueue_active(wq_head: &iocg->waitq) && !iocg->abs_vdebt && |
2632 | time_before_eq64(vtime + cost, now.vnow)) { |
2633 | iocg_commit_bio(iocg, bio, abs_cost, cost); |
2634 | return; |
2635 | } |
2636 | |
2637 | /* |
2638 | * We're over budget. This can be handled in two ways. IOs which may |
2639 | * cause priority inversions are punted to @ioc->aux_iocg and charged as |
2640 | * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling |
2641 | * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine |
2642 | * whether debt handling is needed and acquire locks accordingly. |
2643 | */ |
2644 | use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current); |
2645 | ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt); |
2646 | retry_lock: |
2647 | iocg_lock(iocg, lock_ioc: ioc_locked, flags: &flags); |
2648 | |
2649 | /* |
2650 | * @iocg must stay activated for debt and waitq handling. Deactivation |
2651 | * is synchronized against both ioc->lock and waitq.lock and we won't |
2652 | * get deactivated as long as we're waiting or has debt, so we're good |
2653 | * if we're activated here. In the unlikely cases that we aren't, just |
2654 | * issue the IO. |
2655 | */ |
2656 | if (unlikely(list_empty(&iocg->active_list))) { |
2657 | iocg_unlock(iocg, unlock_ioc: ioc_locked, flags: &flags); |
2658 | iocg_commit_bio(iocg, bio, abs_cost, cost); |
2659 | return; |
2660 | } |
2661 | |
2662 | /* |
2663 | * We're over budget. If @bio has to be issued regardless, remember |
2664 | * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay |
2665 | * off the debt before waking more IOs. |
2666 | * |
2667 | * This way, the debt is continuously paid off each period with the |
2668 | * actual budget available to the cgroup. If we just wound vtime, we |
2669 | * would incorrectly use the current hw_inuse for the entire amount |
2670 | * which, for example, can lead to the cgroup staying blocked for a |
2671 | * long time even with substantially raised hw_inuse. |
2672 | * |
2673 | * An iocg with vdebt should stay online so that the timer can keep |
2674 | * deducting its vdebt and [de]activate use_delay mechanism |
2675 | * accordingly. We don't want to race against the timer trying to |
2676 | * clear them and leave @iocg inactive w/ dangling use_delay heavily |
2677 | * penalizing the cgroup and its descendants. |
2678 | */ |
2679 | if (use_debt) { |
2680 | iocg_incur_debt(iocg, abs_cost, now: &now); |
2681 | if (iocg_kick_delay(iocg, now: &now)) |
2682 | blkcg_schedule_throttle(disk: rqos->disk, |
2683 | use_memdelay: (bio->bi_opf & REQ_SWAP) == REQ_SWAP); |
2684 | iocg_unlock(iocg, unlock_ioc: ioc_locked, flags: &flags); |
2685 | return; |
2686 | } |
2687 | |
2688 | /* guarantee that iocgs w/ waiters have maximum inuse */ |
2689 | if (!iocg->abs_vdebt && iocg->inuse != iocg->active) { |
2690 | if (!ioc_locked) { |
2691 | iocg_unlock(iocg, unlock_ioc: false, flags: &flags); |
2692 | ioc_locked = true; |
2693 | goto retry_lock; |
2694 | } |
2695 | propagate_weights(iocg, active: iocg->active, inuse: iocg->active, save: true, |
2696 | now: &now); |
2697 | } |
2698 | |
2699 | /* |
2700 | * Append self to the waitq and schedule the wakeup timer if we're |
2701 | * the first waiter. The timer duration is calculated based on the |
2702 | * current vrate. vtime and hweight changes can make it too short |
2703 | * or too long. Each wait entry records the absolute cost it's |
2704 | * waiting for to allow re-evaluation using a custom wait entry. |
2705 | * |
2706 | * If too short, the timer simply reschedules itself. If too long, |
2707 | * the period timer will notice and trigger wakeups. |
2708 | * |
2709 | * All waiters are on iocg->waitq and the wait states are |
2710 | * synchronized using waitq.lock. |
2711 | */ |
2712 | init_waitqueue_func_entry(wq_entry: &wait.wait, func: iocg_wake_fn); |
2713 | wait.wait.private = current; |
2714 | wait.bio = bio; |
2715 | wait.abs_cost = abs_cost; |
2716 | wait.committed = false; /* will be set true by waker */ |
2717 | |
2718 | __add_wait_queue_entry_tail(wq_head: &iocg->waitq, wq_entry: &wait.wait); |
2719 | iocg_kick_waitq(iocg, pay_debt: ioc_locked, now: &now); |
2720 | |
2721 | iocg_unlock(iocg, unlock_ioc: ioc_locked, flags: &flags); |
2722 | |
2723 | while (true) { |
2724 | set_current_state(TASK_UNINTERRUPTIBLE); |
2725 | if (wait.committed) |
2726 | break; |
2727 | io_schedule(); |
2728 | } |
2729 | |
2730 | /* waker already committed us, proceed */ |
2731 | finish_wait(wq_head: &iocg->waitq, wq_entry: &wait.wait); |
2732 | } |
2733 | |
2734 | static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, |
2735 | struct bio *bio) |
2736 | { |
2737 | struct ioc_gq *iocg = blkg_to_iocg(blkg: bio->bi_blkg); |
2738 | struct ioc *ioc = rqos_to_ioc(rqos); |
2739 | sector_t bio_end = bio_end_sector(bio); |
2740 | struct ioc_now now; |
2741 | u64 vtime, abs_cost, cost; |
2742 | unsigned long flags; |
2743 | |
2744 | /* bypass if disabled, still initializing, or for root cgroup */ |
2745 | if (!ioc->enabled || !iocg || !iocg->level) |
2746 | return; |
2747 | |
2748 | abs_cost = calc_vtime_cost(bio, iocg, is_merge: true); |
2749 | if (!abs_cost) |
2750 | return; |
2751 | |
2752 | ioc_now(ioc, now: &now); |
2753 | |
2754 | vtime = atomic64_read(v: &iocg->vtime); |
2755 | cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, now: &now); |
2756 | |
2757 | /* update cursor if backmerging into the request at the cursor */ |
2758 | if (blk_rq_pos(rq) < bio_end && |
2759 | blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor) |
2760 | iocg->cursor = bio_end; |
2761 | |
2762 | /* |
2763 | * Charge if there's enough vtime budget and the existing request has |
2764 | * cost assigned. |
2765 | */ |
2766 | if (rq->bio && rq->bio->bi_iocost_cost && |
2767 | time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) { |
2768 | iocg_commit_bio(iocg, bio, abs_cost, cost); |
2769 | return; |
2770 | } |
2771 | |
2772 | /* |
2773 | * Otherwise, account it as debt if @iocg is online, which it should |
2774 | * be for the vast majority of cases. See debt handling in |
2775 | * ioc_rqos_throttle() for details. |
2776 | */ |
2777 | spin_lock_irqsave(&ioc->lock, flags); |
2778 | spin_lock(lock: &iocg->waitq.lock); |
2779 | |
2780 | if (likely(!list_empty(&iocg->active_list))) { |
2781 | iocg_incur_debt(iocg, abs_cost, now: &now); |
2782 | if (iocg_kick_delay(iocg, now: &now)) |
2783 | blkcg_schedule_throttle(disk: rqos->disk, |
2784 | use_memdelay: (bio->bi_opf & REQ_SWAP) == REQ_SWAP); |
2785 | } else { |
2786 | iocg_commit_bio(iocg, bio, abs_cost, cost); |
2787 | } |
2788 | |
2789 | spin_unlock(lock: &iocg->waitq.lock); |
2790 | spin_unlock_irqrestore(lock: &ioc->lock, flags); |
2791 | } |
2792 | |
2793 | static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) |
2794 | { |
2795 | struct ioc_gq *iocg = blkg_to_iocg(blkg: bio->bi_blkg); |
2796 | |
2797 | if (iocg && bio->bi_iocost_cost) |
2798 | atomic64_add(i: bio->bi_iocost_cost, v: &iocg->done_vtime); |
2799 | } |
2800 | |
2801 | static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq) |
2802 | { |
2803 | struct ioc *ioc = rqos_to_ioc(rqos); |
2804 | struct ioc_pcpu_stat *ccs; |
2805 | u64 on_q_ns, rq_wait_ns, size_nsec; |
2806 | int pidx, rw; |
2807 | |
2808 | if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns) |
2809 | return; |
2810 | |
2811 | switch (req_op(req: rq)) { |
2812 | case REQ_OP_READ: |
2813 | pidx = QOS_RLAT; |
2814 | rw = READ; |
2815 | break; |
2816 | case REQ_OP_WRITE: |
2817 | pidx = QOS_WLAT; |
2818 | rw = WRITE; |
2819 | break; |
2820 | default: |
2821 | return; |
2822 | } |
2823 | |
2824 | on_q_ns = blk_time_get_ns() - rq->alloc_time_ns; |
2825 | rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns; |
2826 | size_nsec = div64_u64(dividend: calc_size_vtime_cost(rq, ioc), divisor: VTIME_PER_NSEC); |
2827 | |
2828 | ccs = get_cpu_ptr(ioc->pcpu_stat); |
2829 | |
2830 | if (on_q_ns <= size_nsec || |
2831 | on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC) |
2832 | local_inc(l: &ccs->missed[rw].nr_met); |
2833 | else |
2834 | local_inc(l: &ccs->missed[rw].nr_missed); |
2835 | |
2836 | local64_add(rq_wait_ns, &ccs->rq_wait_ns); |
2837 | |
2838 | put_cpu_ptr(ccs); |
2839 | } |
2840 | |
2841 | static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos) |
2842 | { |
2843 | struct ioc *ioc = rqos_to_ioc(rqos); |
2844 | |
2845 | spin_lock_irq(lock: &ioc->lock); |
2846 | ioc_refresh_params(ioc, force: false); |
2847 | spin_unlock_irq(lock: &ioc->lock); |
2848 | } |
2849 | |
2850 | static void ioc_rqos_exit(struct rq_qos *rqos) |
2851 | { |
2852 | struct ioc *ioc = rqos_to_ioc(rqos); |
2853 | |
2854 | blkcg_deactivate_policy(disk: rqos->disk, pol: &blkcg_policy_iocost); |
2855 | |
2856 | spin_lock_irq(lock: &ioc->lock); |
2857 | ioc->running = IOC_STOP; |
2858 | spin_unlock_irq(lock: &ioc->lock); |
2859 | |
2860 | timer_shutdown_sync(timer: &ioc->timer); |
2861 | free_percpu(pdata: ioc->pcpu_stat); |
2862 | kfree(objp: ioc); |
2863 | } |
2864 | |
2865 | static const struct rq_qos_ops ioc_rqos_ops = { |
2866 | .throttle = ioc_rqos_throttle, |
2867 | .merge = ioc_rqos_merge, |
2868 | .done_bio = ioc_rqos_done_bio, |
2869 | .done = ioc_rqos_done, |
2870 | .queue_depth_changed = ioc_rqos_queue_depth_changed, |
2871 | .exit = ioc_rqos_exit, |
2872 | }; |
2873 | |
2874 | static int blk_iocost_init(struct gendisk *disk) |
2875 | { |
2876 | struct ioc *ioc; |
2877 | int i, cpu, ret; |
2878 | |
2879 | ioc = kzalloc(size: sizeof(*ioc), GFP_KERNEL); |
2880 | if (!ioc) |
2881 | return -ENOMEM; |
2882 | |
2883 | ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat); |
2884 | if (!ioc->pcpu_stat) { |
2885 | kfree(objp: ioc); |
2886 | return -ENOMEM; |
2887 | } |
2888 | |
2889 | for_each_possible_cpu(cpu) { |
2890 | struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu); |
2891 | |
2892 | for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) { |
2893 | local_set(&ccs->missed[i].nr_met, 0); |
2894 | local_set(&ccs->missed[i].nr_missed, 0); |
2895 | } |
2896 | local64_set(&ccs->rq_wait_ns, 0); |
2897 | } |
2898 | |
2899 | spin_lock_init(&ioc->lock); |
2900 | timer_setup(&ioc->timer, ioc_timer_fn, 0); |
2901 | INIT_LIST_HEAD(list: &ioc->active_iocgs); |
2902 | |
2903 | ioc->running = IOC_IDLE; |
2904 | ioc->vtime_base_rate = VTIME_PER_USEC; |
2905 | atomic64_set(v: &ioc->vtime_rate, i: VTIME_PER_USEC); |
2906 | seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock); |
2907 | ioc->period_at = ktime_to_us(kt: blk_time_get()); |
2908 | atomic64_set(v: &ioc->cur_period, i: 0); |
2909 | atomic_set(v: &ioc->hweight_gen, i: 0); |
2910 | |
2911 | spin_lock_irq(lock: &ioc->lock); |
2912 | ioc->autop_idx = AUTOP_INVALID; |
2913 | ioc_refresh_params_disk(ioc, force: true, disk); |
2914 | spin_unlock_irq(lock: &ioc->lock); |
2915 | |
2916 | /* |
2917 | * rqos must be added before activation to allow ioc_pd_init() to |
2918 | * lookup the ioc from q. This means that the rqos methods may get |
2919 | * called before policy activation completion, can't assume that the |
2920 | * target bio has an iocg associated and need to test for NULL iocg. |
2921 | */ |
2922 | ret = rq_qos_add(rqos: &ioc->rqos, disk, id: RQ_QOS_COST, ops: &ioc_rqos_ops); |
2923 | if (ret) |
2924 | goto err_free_ioc; |
2925 | |
2926 | ret = blkcg_activate_policy(disk, pol: &blkcg_policy_iocost); |
2927 | if (ret) |
2928 | goto err_del_qos; |
2929 | return 0; |
2930 | |
2931 | err_del_qos: |
2932 | rq_qos_del(rqos: &ioc->rqos); |
2933 | err_free_ioc: |
2934 | free_percpu(pdata: ioc->pcpu_stat); |
2935 | kfree(objp: ioc); |
2936 | return ret; |
2937 | } |
2938 | |
2939 | static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp) |
2940 | { |
2941 | struct ioc_cgrp *iocc; |
2942 | |
2943 | iocc = kzalloc(size: sizeof(struct ioc_cgrp), flags: gfp); |
2944 | if (!iocc) |
2945 | return NULL; |
2946 | |
2947 | iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE; |
2948 | return &iocc->cpd; |
2949 | } |
2950 | |
2951 | static void ioc_cpd_free(struct blkcg_policy_data *cpd) |
2952 | { |
2953 | kfree(container_of(cpd, struct ioc_cgrp, cpd)); |
2954 | } |
2955 | |
2956 | static struct blkg_policy_data *ioc_pd_alloc(struct gendisk *disk, |
2957 | struct blkcg *blkcg, gfp_t gfp) |
2958 | { |
2959 | int levels = blkcg->css.cgroup->level + 1; |
2960 | struct ioc_gq *iocg; |
2961 | |
2962 | iocg = kzalloc_node(struct_size(iocg, ancestors, levels), flags: gfp, |
2963 | node: disk->node_id); |
2964 | if (!iocg) |
2965 | return NULL; |
2966 | |
2967 | iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp); |
2968 | if (!iocg->pcpu_stat) { |
2969 | kfree(objp: iocg); |
2970 | return NULL; |
2971 | } |
2972 | |
2973 | return &iocg->pd; |
2974 | } |
2975 | |
2976 | static void ioc_pd_init(struct blkg_policy_data *pd) |
2977 | { |
2978 | struct ioc_gq *iocg = pd_to_iocg(pd); |
2979 | struct blkcg_gq *blkg = pd_to_blkg(pd: &iocg->pd); |
2980 | struct ioc *ioc = q_to_ioc(q: blkg->q); |
2981 | struct ioc_now now; |
2982 | struct blkcg_gq *tblkg; |
2983 | unsigned long flags; |
2984 | |
2985 | ioc_now(ioc, now: &now); |
2986 | |
2987 | iocg->ioc = ioc; |
2988 | atomic64_set(v: &iocg->vtime, i: now.vnow); |
2989 | atomic64_set(v: &iocg->done_vtime, i: now.vnow); |
2990 | atomic64_set(v: &iocg->active_period, i: atomic64_read(v: &ioc->cur_period)); |
2991 | INIT_LIST_HEAD(list: &iocg->active_list); |
2992 | INIT_LIST_HEAD(list: &iocg->walk_list); |
2993 | INIT_LIST_HEAD(list: &iocg->surplus_list); |
2994 | iocg->hweight_active = WEIGHT_ONE; |
2995 | iocg->hweight_inuse = WEIGHT_ONE; |
2996 | |
2997 | init_waitqueue_head(&iocg->waitq); |
2998 | hrtimer_init(timer: &iocg->waitq_timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_ABS); |
2999 | iocg->waitq_timer.function = iocg_waitq_timer_fn; |
3000 | |
3001 | iocg->level = blkg->blkcg->css.cgroup->level; |
3002 | |
3003 | for (tblkg = blkg; tblkg; tblkg = tblkg->parent) { |
3004 | struct ioc_gq *tiocg = blkg_to_iocg(blkg: tblkg); |
3005 | iocg->ancestors[tiocg->level] = tiocg; |
3006 | } |
3007 | |
3008 | spin_lock_irqsave(&ioc->lock, flags); |
3009 | weight_updated(iocg, now: &now); |
3010 | spin_unlock_irqrestore(lock: &ioc->lock, flags); |
3011 | } |
3012 | |
3013 | static void ioc_pd_free(struct blkg_policy_data *pd) |
3014 | { |
3015 | struct ioc_gq *iocg = pd_to_iocg(pd); |
3016 | struct ioc *ioc = iocg->ioc; |
3017 | unsigned long flags; |
3018 | |
3019 | if (ioc) { |
3020 | spin_lock_irqsave(&ioc->lock, flags); |
3021 | |
3022 | if (!list_empty(head: &iocg->active_list)) { |
3023 | struct ioc_now now; |
3024 | |
3025 | ioc_now(ioc, now: &now); |
3026 | propagate_weights(iocg, active: 0, inuse: 0, save: false, now: &now); |
3027 | list_del_init(entry: &iocg->active_list); |
3028 | } |
3029 | |
3030 | WARN_ON_ONCE(!list_empty(&iocg->walk_list)); |
3031 | WARN_ON_ONCE(!list_empty(&iocg->surplus_list)); |
3032 | |
3033 | spin_unlock_irqrestore(lock: &ioc->lock, flags); |
3034 | |
3035 | hrtimer_cancel(timer: &iocg->waitq_timer); |
3036 | } |
3037 | free_percpu(pdata: iocg->pcpu_stat); |
3038 | kfree(objp: iocg); |
3039 | } |
3040 | |
3041 | static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s) |
3042 | { |
3043 | struct ioc_gq *iocg = pd_to_iocg(pd); |
3044 | struct ioc *ioc = iocg->ioc; |
3045 | |
3046 | if (!ioc->enabled) |
3047 | return; |
3048 | |
3049 | if (iocg->level == 0) { |
3050 | unsigned vp10k = DIV64_U64_ROUND_CLOSEST( |
3051 | ioc->vtime_base_rate * 10000, |
3052 | VTIME_PER_USEC); |
3053 | seq_printf(m: s, fmt: " cost.vrate=%u.%02u" , vp10k / 100, vp10k % 100); |
3054 | } |
3055 | |
3056 | seq_printf(m: s, fmt: " cost.usage=%llu" , iocg->last_stat.usage_us); |
3057 | |
3058 | if (blkcg_debug_stats) |
3059 | seq_printf(m: s, fmt: " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu" , |
3060 | iocg->last_stat.wait_us, |
3061 | iocg->last_stat.indebt_us, |
3062 | iocg->last_stat.indelay_us); |
3063 | } |
3064 | |
3065 | static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd, |
3066 | int off) |
3067 | { |
3068 | const char *dname = blkg_dev_name(blkg: pd->blkg); |
3069 | struct ioc_gq *iocg = pd_to_iocg(pd); |
3070 | |
3071 | if (dname && iocg->cfg_weight) |
3072 | seq_printf(m: sf, fmt: "%s %u\n" , dname, iocg->cfg_weight / WEIGHT_ONE); |
3073 | return 0; |
3074 | } |
3075 | |
3076 | |
3077 | static int ioc_weight_show(struct seq_file *sf, void *v) |
3078 | { |
3079 | struct blkcg *blkcg = css_to_blkcg(css: seq_css(seq: sf)); |
3080 | struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg); |
3081 | |
3082 | seq_printf(m: sf, fmt: "default %u\n" , iocc->dfl_weight / WEIGHT_ONE); |
3083 | blkcg_print_blkgs(sf, blkcg, prfill: ioc_weight_prfill, |
3084 | pol: &blkcg_policy_iocost, data: seq_cft(seq: sf)->private, show_total: false); |
3085 | return 0; |
3086 | } |
3087 | |
3088 | static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf, |
3089 | size_t nbytes, loff_t off) |
3090 | { |
3091 | struct blkcg *blkcg = css_to_blkcg(css: of_css(of)); |
3092 | struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg); |
3093 | struct blkg_conf_ctx ctx; |
3094 | struct ioc_now now; |
3095 | struct ioc_gq *iocg; |
3096 | u32 v; |
3097 | int ret; |
3098 | |
3099 | if (!strchr(buf, ':')) { |
3100 | struct blkcg_gq *blkg; |
3101 | |
3102 | if (!sscanf(buf, "default %u" , &v) && !sscanf(buf, "%u" , &v)) |
3103 | return -EINVAL; |
3104 | |
3105 | if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX) |
3106 | return -EINVAL; |
3107 | |
3108 | spin_lock_irq(lock: &blkcg->lock); |
3109 | iocc->dfl_weight = v * WEIGHT_ONE; |
3110 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
3111 | struct ioc_gq *iocg = blkg_to_iocg(blkg); |
3112 | |
3113 | if (iocg) { |
3114 | spin_lock(lock: &iocg->ioc->lock); |
3115 | ioc_now(ioc: iocg->ioc, now: &now); |
3116 | weight_updated(iocg, now: &now); |
3117 | spin_unlock(lock: &iocg->ioc->lock); |
3118 | } |
3119 | } |
3120 | spin_unlock_irq(lock: &blkcg->lock); |
3121 | |
3122 | return nbytes; |
3123 | } |
3124 | |
3125 | blkg_conf_init(ctx: &ctx, input: buf); |
3126 | |
3127 | ret = blkg_conf_prep(blkcg, pol: &blkcg_policy_iocost, ctx: &ctx); |
3128 | if (ret) |
3129 | goto err; |
3130 | |
3131 | iocg = blkg_to_iocg(blkg: ctx.blkg); |
3132 | |
3133 | if (!strncmp(ctx.body, "default" , 7)) { |
3134 | v = 0; |
3135 | } else { |
3136 | if (!sscanf(ctx.body, "%u" , &v)) |
3137 | goto einval; |
3138 | if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX) |
3139 | goto einval; |
3140 | } |
3141 | |
3142 | spin_lock(lock: &iocg->ioc->lock); |
3143 | iocg->cfg_weight = v * WEIGHT_ONE; |
3144 | ioc_now(ioc: iocg->ioc, now: &now); |
3145 | weight_updated(iocg, now: &now); |
3146 | spin_unlock(lock: &iocg->ioc->lock); |
3147 | |
3148 | blkg_conf_exit(ctx: &ctx); |
3149 | return nbytes; |
3150 | |
3151 | einval: |
3152 | ret = -EINVAL; |
3153 | err: |
3154 | blkg_conf_exit(ctx: &ctx); |
3155 | return ret; |
3156 | } |
3157 | |
3158 | static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd, |
3159 | int off) |
3160 | { |
3161 | const char *dname = blkg_dev_name(blkg: pd->blkg); |
3162 | struct ioc *ioc = pd_to_iocg(pd)->ioc; |
3163 | |
3164 | if (!dname) |
3165 | return 0; |
3166 | |
3167 | spin_lock_irq(lock: &ioc->lock); |
3168 | seq_printf(m: sf, fmt: "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n" , |
3169 | dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto" , |
3170 | ioc->params.qos[QOS_RPPM] / 10000, |
3171 | ioc->params.qos[QOS_RPPM] % 10000 / 100, |
3172 | ioc->params.qos[QOS_RLAT], |
3173 | ioc->params.qos[QOS_WPPM] / 10000, |
3174 | ioc->params.qos[QOS_WPPM] % 10000 / 100, |
3175 | ioc->params.qos[QOS_WLAT], |
3176 | ioc->params.qos[QOS_MIN] / 10000, |
3177 | ioc->params.qos[QOS_MIN] % 10000 / 100, |
3178 | ioc->params.qos[QOS_MAX] / 10000, |
3179 | ioc->params.qos[QOS_MAX] % 10000 / 100); |
3180 | spin_unlock_irq(lock: &ioc->lock); |
3181 | return 0; |
3182 | } |
3183 | |
3184 | static int ioc_qos_show(struct seq_file *sf, void *v) |
3185 | { |
3186 | struct blkcg *blkcg = css_to_blkcg(css: seq_css(seq: sf)); |
3187 | |
3188 | blkcg_print_blkgs(sf, blkcg, prfill: ioc_qos_prfill, |
3189 | pol: &blkcg_policy_iocost, data: seq_cft(seq: sf)->private, show_total: false); |
3190 | return 0; |
3191 | } |
3192 | |
3193 | static const match_table_t qos_ctrl_tokens = { |
3194 | { QOS_ENABLE, "enable=%u" }, |
3195 | { QOS_CTRL, "ctrl=%s" }, |
3196 | { NR_QOS_CTRL_PARAMS, NULL }, |
3197 | }; |
3198 | |
3199 | static const match_table_t qos_tokens = { |
3200 | { QOS_RPPM, "rpct=%s" }, |
3201 | { QOS_RLAT, "rlat=%u" }, |
3202 | { QOS_WPPM, "wpct=%s" }, |
3203 | { QOS_WLAT, "wlat=%u" }, |
3204 | { QOS_MIN, "min=%s" }, |
3205 | { QOS_MAX, "max=%s" }, |
3206 | { NR_QOS_PARAMS, NULL }, |
3207 | }; |
3208 | |
3209 | static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input, |
3210 | size_t nbytes, loff_t off) |
3211 | { |
3212 | struct blkg_conf_ctx ctx; |
3213 | struct gendisk *disk; |
3214 | struct ioc *ioc; |
3215 | u32 qos[NR_QOS_PARAMS]; |
3216 | bool enable, user; |
3217 | char *body, *p; |
3218 | int ret; |
3219 | |
3220 | blkg_conf_init(ctx: &ctx, input); |
3221 | |
3222 | ret = blkg_conf_open_bdev(ctx: &ctx); |
3223 | if (ret) |
3224 | goto err; |
3225 | |
3226 | body = ctx.body; |
3227 | disk = ctx.bdev->bd_disk; |
3228 | if (!queue_is_mq(q: disk->queue)) { |
3229 | ret = -EOPNOTSUPP; |
3230 | goto err; |
3231 | } |
3232 | |
3233 | ioc = q_to_ioc(q: disk->queue); |
3234 | if (!ioc) { |
3235 | ret = blk_iocost_init(disk); |
3236 | if (ret) |
3237 | goto err; |
3238 | ioc = q_to_ioc(q: disk->queue); |
3239 | } |
3240 | |
3241 | blk_mq_freeze_queue(q: disk->queue); |
3242 | blk_mq_quiesce_queue(q: disk->queue); |
3243 | |
3244 | spin_lock_irq(lock: &ioc->lock); |
3245 | memcpy(qos, ioc->params.qos, sizeof(qos)); |
3246 | enable = ioc->enabled; |
3247 | user = ioc->user_qos_params; |
3248 | |
3249 | while ((p = strsep(&body, " \t\n" ))) { |
3250 | substring_t args[MAX_OPT_ARGS]; |
3251 | char buf[32]; |
3252 | int tok; |
3253 | s64 v; |
3254 | |
3255 | if (!*p) |
3256 | continue; |
3257 | |
3258 | switch (match_token(p, table: qos_ctrl_tokens, args)) { |
3259 | case QOS_ENABLE: |
3260 | if (match_u64(&args[0], result: &v)) |
3261 | goto einval; |
3262 | enable = v; |
3263 | continue; |
3264 | case QOS_CTRL: |
3265 | match_strlcpy(buf, &args[0], sizeof(buf)); |
3266 | if (!strcmp(buf, "auto" )) |
3267 | user = false; |
3268 | else if (!strcmp(buf, "user" )) |
3269 | user = true; |
3270 | else |
3271 | goto einval; |
3272 | continue; |
3273 | } |
3274 | |
3275 | tok = match_token(p, table: qos_tokens, args); |
3276 | switch (tok) { |
3277 | case QOS_RPPM: |
3278 | case QOS_WPPM: |
3279 | if (match_strlcpy(buf, &args[0], sizeof(buf)) >= |
3280 | sizeof(buf)) |
3281 | goto einval; |
3282 | if (cgroup_parse_float(input: buf, dec_shift: 2, v: &v)) |
3283 | goto einval; |
3284 | if (v < 0 || v > 10000) |
3285 | goto einval; |
3286 | qos[tok] = v * 100; |
3287 | break; |
3288 | case QOS_RLAT: |
3289 | case QOS_WLAT: |
3290 | if (match_u64(&args[0], result: &v)) |
3291 | goto einval; |
3292 | qos[tok] = v; |
3293 | break; |
3294 | case QOS_MIN: |
3295 | case QOS_MAX: |
3296 | if (match_strlcpy(buf, &args[0], sizeof(buf)) >= |
3297 | sizeof(buf)) |
3298 | goto einval; |
3299 | if (cgroup_parse_float(input: buf, dec_shift: 2, v: &v)) |
3300 | goto einval; |
3301 | if (v < 0) |
3302 | goto einval; |
3303 | qos[tok] = clamp_t(s64, v * 100, |
3304 | VRATE_MIN_PPM, VRATE_MAX_PPM); |
3305 | break; |
3306 | default: |
3307 | goto einval; |
3308 | } |
3309 | user = true; |
3310 | } |
3311 | |
3312 | if (qos[QOS_MIN] > qos[QOS_MAX]) |
3313 | goto einval; |
3314 | |
3315 | if (enable && !ioc->enabled) { |
3316 | blk_stat_enable_accounting(q: disk->queue); |
3317 | blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, q: disk->queue); |
3318 | ioc->enabled = true; |
3319 | } else if (!enable && ioc->enabled) { |
3320 | blk_stat_disable_accounting(q: disk->queue); |
3321 | blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, q: disk->queue); |
3322 | ioc->enabled = false; |
3323 | } |
3324 | |
3325 | if (user) { |
3326 | memcpy(ioc->params.qos, qos, sizeof(qos)); |
3327 | ioc->user_qos_params = true; |
3328 | } else { |
3329 | ioc->user_qos_params = false; |
3330 | } |
3331 | |
3332 | ioc_refresh_params(ioc, force: true); |
3333 | spin_unlock_irq(lock: &ioc->lock); |
3334 | |
3335 | if (enable) |
3336 | wbt_disable_default(disk); |
3337 | else |
3338 | wbt_enable_default(disk); |
3339 | |
3340 | blk_mq_unquiesce_queue(q: disk->queue); |
3341 | blk_mq_unfreeze_queue(q: disk->queue); |
3342 | |
3343 | blkg_conf_exit(ctx: &ctx); |
3344 | return nbytes; |
3345 | einval: |
3346 | spin_unlock_irq(lock: &ioc->lock); |
3347 | |
3348 | blk_mq_unquiesce_queue(q: disk->queue); |
3349 | blk_mq_unfreeze_queue(q: disk->queue); |
3350 | |
3351 | ret = -EINVAL; |
3352 | err: |
3353 | blkg_conf_exit(ctx: &ctx); |
3354 | return ret; |
3355 | } |
3356 | |
3357 | static u64 ioc_cost_model_prfill(struct seq_file *sf, |
3358 | struct blkg_policy_data *pd, int off) |
3359 | { |
3360 | const char *dname = blkg_dev_name(blkg: pd->blkg); |
3361 | struct ioc *ioc = pd_to_iocg(pd)->ioc; |
3362 | u64 *u = ioc->params.i_lcoefs; |
3363 | |
3364 | if (!dname) |
3365 | return 0; |
3366 | |
3367 | spin_lock_irq(lock: &ioc->lock); |
3368 | seq_printf(m: sf, fmt: "%s ctrl=%s model=linear " |
3369 | "rbps=%llu rseqiops=%llu rrandiops=%llu " |
3370 | "wbps=%llu wseqiops=%llu wrandiops=%llu\n" , |
3371 | dname, ioc->user_cost_model ? "user" : "auto" , |
3372 | u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS], |
3373 | u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]); |
3374 | spin_unlock_irq(lock: &ioc->lock); |
3375 | return 0; |
3376 | } |
3377 | |
3378 | static int ioc_cost_model_show(struct seq_file *sf, void *v) |
3379 | { |
3380 | struct blkcg *blkcg = css_to_blkcg(css: seq_css(seq: sf)); |
3381 | |
3382 | blkcg_print_blkgs(sf, blkcg, prfill: ioc_cost_model_prfill, |
3383 | pol: &blkcg_policy_iocost, data: seq_cft(seq: sf)->private, show_total: false); |
3384 | return 0; |
3385 | } |
3386 | |
3387 | static const match_table_t cost_ctrl_tokens = { |
3388 | { COST_CTRL, "ctrl=%s" }, |
3389 | { COST_MODEL, "model=%s" }, |
3390 | { NR_COST_CTRL_PARAMS, NULL }, |
3391 | }; |
3392 | |
3393 | static const match_table_t i_lcoef_tokens = { |
3394 | { I_LCOEF_RBPS, "rbps=%u" }, |
3395 | { I_LCOEF_RSEQIOPS, "rseqiops=%u" }, |
3396 | { I_LCOEF_RRANDIOPS, "rrandiops=%u" }, |
3397 | { I_LCOEF_WBPS, "wbps=%u" }, |
3398 | { I_LCOEF_WSEQIOPS, "wseqiops=%u" }, |
3399 | { I_LCOEF_WRANDIOPS, "wrandiops=%u" }, |
3400 | { NR_I_LCOEFS, NULL }, |
3401 | }; |
3402 | |
3403 | static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input, |
3404 | size_t nbytes, loff_t off) |
3405 | { |
3406 | struct blkg_conf_ctx ctx; |
3407 | struct request_queue *q; |
3408 | struct ioc *ioc; |
3409 | u64 u[NR_I_LCOEFS]; |
3410 | bool user; |
3411 | char *body, *p; |
3412 | int ret; |
3413 | |
3414 | blkg_conf_init(ctx: &ctx, input); |
3415 | |
3416 | ret = blkg_conf_open_bdev(ctx: &ctx); |
3417 | if (ret) |
3418 | goto err; |
3419 | |
3420 | body = ctx.body; |
3421 | q = bdev_get_queue(bdev: ctx.bdev); |
3422 | if (!queue_is_mq(q)) { |
3423 | ret = -EOPNOTSUPP; |
3424 | goto err; |
3425 | } |
3426 | |
3427 | ioc = q_to_ioc(q); |
3428 | if (!ioc) { |
3429 | ret = blk_iocost_init(disk: ctx.bdev->bd_disk); |
3430 | if (ret) |
3431 | goto err; |
3432 | ioc = q_to_ioc(q); |
3433 | } |
3434 | |
3435 | blk_mq_freeze_queue(q); |
3436 | blk_mq_quiesce_queue(q); |
3437 | |
3438 | spin_lock_irq(lock: &ioc->lock); |
3439 | memcpy(u, ioc->params.i_lcoefs, sizeof(u)); |
3440 | user = ioc->user_cost_model; |
3441 | |
3442 | while ((p = strsep(&body, " \t\n" ))) { |
3443 | substring_t args[MAX_OPT_ARGS]; |
3444 | char buf[32]; |
3445 | int tok; |
3446 | u64 v; |
3447 | |
3448 | if (!*p) |
3449 | continue; |
3450 | |
3451 | switch (match_token(p, table: cost_ctrl_tokens, args)) { |
3452 | case COST_CTRL: |
3453 | match_strlcpy(buf, &args[0], sizeof(buf)); |
3454 | if (!strcmp(buf, "auto" )) |
3455 | user = false; |
3456 | else if (!strcmp(buf, "user" )) |
3457 | user = true; |
3458 | else |
3459 | goto einval; |
3460 | continue; |
3461 | case COST_MODEL: |
3462 | match_strlcpy(buf, &args[0], sizeof(buf)); |
3463 | if (strcmp(buf, "linear" )) |
3464 | goto einval; |
3465 | continue; |
3466 | } |
3467 | |
3468 | tok = match_token(p, table: i_lcoef_tokens, args); |
3469 | if (tok == NR_I_LCOEFS) |
3470 | goto einval; |
3471 | if (match_u64(&args[0], result: &v)) |
3472 | goto einval; |
3473 | u[tok] = v; |
3474 | user = true; |
3475 | } |
3476 | |
3477 | if (user) { |
3478 | memcpy(ioc->params.i_lcoefs, u, sizeof(u)); |
3479 | ioc->user_cost_model = true; |
3480 | } else { |
3481 | ioc->user_cost_model = false; |
3482 | } |
3483 | ioc_refresh_params(ioc, force: true); |
3484 | spin_unlock_irq(lock: &ioc->lock); |
3485 | |
3486 | blk_mq_unquiesce_queue(q); |
3487 | blk_mq_unfreeze_queue(q); |
3488 | |
3489 | blkg_conf_exit(ctx: &ctx); |
3490 | return nbytes; |
3491 | |
3492 | einval: |
3493 | spin_unlock_irq(lock: &ioc->lock); |
3494 | |
3495 | blk_mq_unquiesce_queue(q); |
3496 | blk_mq_unfreeze_queue(q); |
3497 | |
3498 | ret = -EINVAL; |
3499 | err: |
3500 | blkg_conf_exit(ctx: &ctx); |
3501 | return ret; |
3502 | } |
3503 | |
3504 | static struct cftype ioc_files[] = { |
3505 | { |
3506 | .name = "weight" , |
3507 | .flags = CFTYPE_NOT_ON_ROOT, |
3508 | .seq_show = ioc_weight_show, |
3509 | .write = ioc_weight_write, |
3510 | }, |
3511 | { |
3512 | .name = "cost.qos" , |
3513 | .flags = CFTYPE_ONLY_ON_ROOT, |
3514 | .seq_show = ioc_qos_show, |
3515 | .write = ioc_qos_write, |
3516 | }, |
3517 | { |
3518 | .name = "cost.model" , |
3519 | .flags = CFTYPE_ONLY_ON_ROOT, |
3520 | .seq_show = ioc_cost_model_show, |
3521 | .write = ioc_cost_model_write, |
3522 | }, |
3523 | {} |
3524 | }; |
3525 | |
3526 | static struct blkcg_policy blkcg_policy_iocost = { |
3527 | .dfl_cftypes = ioc_files, |
3528 | .cpd_alloc_fn = ioc_cpd_alloc, |
3529 | .cpd_free_fn = ioc_cpd_free, |
3530 | .pd_alloc_fn = ioc_pd_alloc, |
3531 | .pd_init_fn = ioc_pd_init, |
3532 | .pd_free_fn = ioc_pd_free, |
3533 | .pd_stat_fn = ioc_pd_stat, |
3534 | }; |
3535 | |
3536 | static int __init ioc_init(void) |
3537 | { |
3538 | return blkcg_policy_register(pol: &blkcg_policy_iocost); |
3539 | } |
3540 | |
3541 | static void __exit ioc_exit(void) |
3542 | { |
3543 | blkcg_policy_unregister(pol: &blkcg_policy_iocost); |
3544 | } |
3545 | |
3546 | module_init(ioc_init); |
3547 | module_exit(ioc_exit); |
3548 | |