1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BLK_CGROUP_H
3#define _BLK_CGROUP_H
4/*
5 * Common Block IO controller cgroup interface
6 *
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 *
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
12 *
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
15 */
16
17#include <linux/cgroup.h>
18#include <linux/percpu_counter.h>
19#include <linux/seq_file.h>
20#include <linux/radix-tree.h>
21#include <linux/blkdev.h>
22#include <linux/atomic.h>
23#include <linux/kthread.h>
24#include <linux/fs.h>
25
26/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
27#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
28
29/* Max limits for throttle policy */
30#define THROTL_IOPS_MAX UINT_MAX
31
32#ifdef CONFIG_BLK_CGROUP
33
34enum blkg_rwstat_type {
35 BLKG_RWSTAT_READ,
36 BLKG_RWSTAT_WRITE,
37 BLKG_RWSTAT_SYNC,
38 BLKG_RWSTAT_ASYNC,
39 BLKG_RWSTAT_DISCARD,
40
41 BLKG_RWSTAT_NR,
42 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
43};
44
45struct blkcg_gq;
46
47struct blkcg {
48 struct cgroup_subsys_state css;
49 spinlock_t lock;
50
51 struct radix_tree_root blkg_tree;
52 struct blkcg_gq __rcu *blkg_hint;
53 struct hlist_head blkg_list;
54
55 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
56
57 struct list_head all_blkcgs_node;
58#ifdef CONFIG_CGROUP_WRITEBACK
59 struct list_head cgwb_list;
60 refcount_t cgwb_refcnt;
61#endif
62};
63
64/*
65 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
66 * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
67 * to carry result values from read and sum operations.
68 */
69struct blkg_stat {
70 struct percpu_counter cpu_cnt;
71 atomic64_t aux_cnt;
72};
73
74struct blkg_rwstat {
75 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
76 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
77};
78
79/*
80 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
81 * request_queue (q). This is used by blkcg policies which need to track
82 * information per blkcg - q pair.
83 *
84 * There can be multiple active blkcg policies and each blkg:policy pair is
85 * represented by a blkg_policy_data which is allocated and freed by each
86 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
87 * area by allocating larger data structure which embeds blkg_policy_data
88 * at the beginning.
89 */
90struct blkg_policy_data {
91 /* the blkg and policy id this per-policy data belongs to */
92 struct blkcg_gq *blkg;
93 int plid;
94};
95
96/*
97 * Policies that need to keep per-blkcg data which is independent from any
98 * request_queue associated to it should implement cpd_alloc/free_fn()
99 * methods. A policy can allocate private data area by allocating larger
100 * data structure which embeds blkcg_policy_data at the beginning.
101 * cpd_init() is invoked to let each policy handle per-blkcg data.
102 */
103struct blkcg_policy_data {
104 /* the blkcg and policy id this per-policy data belongs to */
105 struct blkcg *blkcg;
106 int plid;
107};
108
109/* association between a blk cgroup and a request queue */
110struct blkcg_gq {
111 /* Pointer to the associated request_queue */
112 struct request_queue *q;
113 struct list_head q_node;
114 struct hlist_node blkcg_node;
115 struct blkcg *blkcg;
116
117 /*
118 * Each blkg gets congested separately and the congestion state is
119 * propagated to the matching bdi_writeback_congested.
120 */
121 struct bdi_writeback_congested *wb_congested;
122
123 /* all non-root blkcg_gq's are guaranteed to have access to parent */
124 struct blkcg_gq *parent;
125
126 /* reference count */
127 struct percpu_ref refcnt;
128
129 /* is this blkg online? protected by both blkcg and q locks */
130 bool online;
131
132 struct blkg_rwstat stat_bytes;
133 struct blkg_rwstat stat_ios;
134
135 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
136
137 struct rcu_head rcu_head;
138
139 atomic_t use_delay;
140 atomic64_t delay_nsec;
141 atomic64_t delay_start;
142 u64 last_delay;
143 int last_use;
144};
145
146typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
147typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
148typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
149typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
150typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
151typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
152typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
153typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
154typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
155typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
156typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
157 size_t size);
158
159struct blkcg_policy {
160 int plid;
161 /* cgroup files for the policy */
162 struct cftype *dfl_cftypes;
163 struct cftype *legacy_cftypes;
164
165 /* operations */
166 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
167 blkcg_pol_init_cpd_fn *cpd_init_fn;
168 blkcg_pol_free_cpd_fn *cpd_free_fn;
169 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
170
171 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
172 blkcg_pol_init_pd_fn *pd_init_fn;
173 blkcg_pol_online_pd_fn *pd_online_fn;
174 blkcg_pol_offline_pd_fn *pd_offline_fn;
175 blkcg_pol_free_pd_fn *pd_free_fn;
176 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
177 blkcg_pol_stat_pd_fn *pd_stat_fn;
178};
179
180extern struct blkcg blkcg_root;
181extern struct cgroup_subsys_state * const blkcg_root_css;
182
183struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
184 struct request_queue *q, bool update_hint);
185struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
186 struct request_queue *q);
187struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
188 struct request_queue *q);
189int blkcg_init_queue(struct request_queue *q);
190void blkcg_drain_queue(struct request_queue *q);
191void blkcg_exit_queue(struct request_queue *q);
192
193/* Blkio controller policy registration */
194int blkcg_policy_register(struct blkcg_policy *pol);
195void blkcg_policy_unregister(struct blkcg_policy *pol);
196int blkcg_activate_policy(struct request_queue *q,
197 const struct blkcg_policy *pol);
198void blkcg_deactivate_policy(struct request_queue *q,
199 const struct blkcg_policy *pol);
200
201const char *blkg_dev_name(struct blkcg_gq *blkg);
202void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
203 u64 (*prfill)(struct seq_file *,
204 struct blkg_policy_data *, int),
205 const struct blkcg_policy *pol, int data,
206 bool show_total);
207u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
208u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
209 const struct blkg_rwstat *rwstat);
210u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
211u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
212 int off);
213int blkg_print_stat_bytes(struct seq_file *sf, void *v);
214int blkg_print_stat_ios(struct seq_file *sf, void *v);
215int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
216int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
217
218u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
219 struct blkcg_policy *pol, int off);
220struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
221 struct blkcg_policy *pol, int off);
222
223struct blkg_conf_ctx {
224 struct gendisk *disk;
225 struct blkcg_gq *blkg;
226 char *body;
227};
228
229int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
230 char *input, struct blkg_conf_ctx *ctx);
231void blkg_conf_finish(struct blkg_conf_ctx *ctx);
232
233/**
234 * blkcg_css - find the current css
235 *
236 * Find the css associated with either the kthread or the current task.
237 * This may return a dying css, so it is up to the caller to use tryget logic
238 * to confirm it is alive and well.
239 */
240static inline struct cgroup_subsys_state *blkcg_css(void)
241{
242 struct cgroup_subsys_state *css;
243
244 css = kthread_blkcg();
245 if (css)
246 return css;
247 return task_css(current, io_cgrp_id);
248}
249
250static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
251{
252 return css ? container_of(css, struct blkcg, css) : NULL;
253}
254
255/**
256 * __bio_blkcg - internal, inconsistent version to get blkcg
257 *
258 * DO NOT USE.
259 * This function is inconsistent and consequently is dangerous to use. The
260 * first part of the function returns a blkcg where a reference is owned by the
261 * bio. This means it does not need to be rcu protected as it cannot go away
262 * with the bio owning a reference to it. However, the latter potentially gets
263 * it from task_css(). This can race against task migration and the cgroup
264 * dying. It is also semantically different as it must be called rcu protected
265 * and is susceptible to failure when trying to get a reference to it.
266 * Therefore, it is not ok to assume that *_get() will always succeed on the
267 * blkcg returned here.
268 */
269static inline struct blkcg *__bio_blkcg(struct bio *bio)
270{
271 if (bio && bio->bi_blkg)
272 return bio->bi_blkg->blkcg;
273 return css_to_blkcg(blkcg_css());
274}
275
276/**
277 * bio_blkcg - grab the blkcg associated with a bio
278 * @bio: target bio
279 *
280 * This returns the blkcg associated with a bio, %NULL if not associated.
281 * Callers are expected to either handle %NULL or know association has been
282 * done prior to calling this.
283 */
284static inline struct blkcg *bio_blkcg(struct bio *bio)
285{
286 if (bio && bio->bi_blkg)
287 return bio->bi_blkg->blkcg;
288 return NULL;
289}
290
291static inline bool blk_cgroup_congested(void)
292{
293 struct cgroup_subsys_state *css;
294 bool ret = false;
295
296 rcu_read_lock();
297 css = kthread_blkcg();
298 if (!css)
299 css = task_css(current, io_cgrp_id);
300 while (css) {
301 if (atomic_read(&css->cgroup->congestion_count)) {
302 ret = true;
303 break;
304 }
305 css = css->parent;
306 }
307 rcu_read_unlock();
308 return ret;
309}
310
311/**
312 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
313 * @return: true if this bio needs to be submitted with the root blkg context.
314 *
315 * In order to avoid priority inversions we sometimes need to issue a bio as if
316 * it were attached to the root blkg, and then backcharge to the actual owning
317 * blkg. The idea is we do bio_blkcg() to look up the actual context for the
318 * bio and attach the appropriate blkg to the bio. Then we call this helper and
319 * if it is true run with the root blkg for that queue and then do any
320 * backcharging to the originating cgroup once the io is complete.
321 */
322static inline bool bio_issue_as_root_blkg(struct bio *bio)
323{
324 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
325}
326
327/**
328 * blkcg_parent - get the parent of a blkcg
329 * @blkcg: blkcg of interest
330 *
331 * Return the parent blkcg of @blkcg. Can be called anytime.
332 */
333static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
334{
335 return css_to_blkcg(blkcg->css.parent);
336}
337
338/**
339 * __blkg_lookup - internal version of blkg_lookup()
340 * @blkcg: blkcg of interest
341 * @q: request_queue of interest
342 * @update_hint: whether to update lookup hint with the result or not
343 *
344 * This is internal version and shouldn't be used by policy
345 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
346 * @q's bypass state. If @update_hint is %true, the caller should be
347 * holding @q->queue_lock and lookup hint is updated on success.
348 */
349static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
350 struct request_queue *q,
351 bool update_hint)
352{
353 struct blkcg_gq *blkg;
354
355 if (blkcg == &blkcg_root)
356 return q->root_blkg;
357
358 blkg = rcu_dereference(blkcg->blkg_hint);
359 if (blkg && blkg->q == q)
360 return blkg;
361
362 return blkg_lookup_slowpath(blkcg, q, update_hint);
363}
364
365/**
366 * blkg_lookup - lookup blkg for the specified blkcg - q pair
367 * @blkcg: blkcg of interest
368 * @q: request_queue of interest
369 *
370 * Lookup blkg for the @blkcg - @q pair. This function should be called
371 * under RCU read loc.
372 */
373static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
374 struct request_queue *q)
375{
376 WARN_ON_ONCE(!rcu_read_lock_held());
377 return __blkg_lookup(blkcg, q, false);
378}
379
380/**
381 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
382 * @q: request_queue of interest
383 *
384 * Lookup blkg for @q at the root level. See also blkg_lookup().
385 */
386static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
387{
388 return q->root_blkg;
389}
390
391/**
392 * blkg_to_pdata - get policy private data
393 * @blkg: blkg of interest
394 * @pol: policy of interest
395 *
396 * Return pointer to private data associated with the @blkg-@pol pair.
397 */
398static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
399 struct blkcg_policy *pol)
400{
401 return blkg ? blkg->pd[pol->plid] : NULL;
402}
403
404static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
405 struct blkcg_policy *pol)
406{
407 return blkcg ? blkcg->cpd[pol->plid] : NULL;
408}
409
410/**
411 * pdata_to_blkg - get blkg associated with policy private data
412 * @pd: policy private data of interest
413 *
414 * @pd is policy private data. Determine the blkg it's associated with.
415 */
416static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
417{
418 return pd ? pd->blkg : NULL;
419}
420
421static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
422{
423 return cpd ? cpd->blkcg : NULL;
424}
425
426extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
427
428#ifdef CONFIG_CGROUP_WRITEBACK
429
430/**
431 * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
432 * @blkcg: blkcg of interest
433 *
434 * This is used to track the number of active wb's related to a blkcg.
435 */
436static inline void blkcg_cgwb_get(struct blkcg *blkcg)
437{
438 refcount_inc(&blkcg->cgwb_refcnt);
439}
440
441/**
442 * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
443 * @blkcg: blkcg of interest
444 *
445 * This is used to track the number of active wb's related to a blkcg.
446 * When this count goes to zero, all active wb has finished so the
447 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
448 * This work may occur in cgwb_release_workfn() on the cgwb_release
449 * workqueue.
450 */
451static inline void blkcg_cgwb_put(struct blkcg *blkcg)
452{
453 if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
454 blkcg_destroy_blkgs(blkcg);
455}
456
457#else
458
459static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
460
461static inline void blkcg_cgwb_put(struct blkcg *blkcg)
462{
463 /* wb isn't being accounted, so trigger destruction right away */
464 blkcg_destroy_blkgs(blkcg);
465}
466
467#endif
468
469/**
470 * blkg_path - format cgroup path of blkg
471 * @blkg: blkg of interest
472 * @buf: target buffer
473 * @buflen: target buffer length
474 *
475 * Format the path of the cgroup of @blkg into @buf.
476 */
477static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
478{
479 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
480}
481
482/**
483 * blkg_get - get a blkg reference
484 * @blkg: blkg to get
485 *
486 * The caller should be holding an existing reference.
487 */
488static inline void blkg_get(struct blkcg_gq *blkg)
489{
490 percpu_ref_get(&blkg->refcnt);
491}
492
493/**
494 * blkg_tryget - try and get a blkg reference
495 * @blkg: blkg to get
496 *
497 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
498 * of freeing this blkg, so we can only use it if the refcnt is not zero.
499 */
500static inline bool blkg_tryget(struct blkcg_gq *blkg)
501{
502 return blkg && percpu_ref_tryget(&blkg->refcnt);
503}
504
505/**
506 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
507 * @blkg: blkg to get
508 *
509 * This needs to be called rcu protected. As the failure mode here is to walk
510 * up the blkg tree, this ensure that the blkg->parent pointers are always
511 * valid. This returns the blkg that it ended up taking a reference on or %NULL
512 * if no reference was taken.
513 */
514static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
515{
516 struct blkcg_gq *ret_blkg = NULL;
517
518 WARN_ON_ONCE(!rcu_read_lock_held());
519
520 while (blkg) {
521 if (blkg_tryget(blkg)) {
522 ret_blkg = blkg;
523 break;
524 }
525 blkg = blkg->parent;
526 }
527
528 return ret_blkg;
529}
530
531/**
532 * blkg_put - put a blkg reference
533 * @blkg: blkg to put
534 */
535static inline void blkg_put(struct blkcg_gq *blkg)
536{
537 percpu_ref_put(&blkg->refcnt);
538}
539
540/**
541 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
542 * @d_blkg: loop cursor pointing to the current descendant
543 * @pos_css: used for iteration
544 * @p_blkg: target blkg to walk descendants of
545 *
546 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
547 * read locked. If called under either blkcg or queue lock, the iteration
548 * is guaranteed to include all and only online blkgs. The caller may
549 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
550 * @p_blkg is included in the iteration and the first node to be visited.
551 */
552#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
553 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
554 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
555 (p_blkg)->q, false)))
556
557/**
558 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
559 * @d_blkg: loop cursor pointing to the current descendant
560 * @pos_css: used for iteration
561 * @p_blkg: target blkg to walk descendants of
562 *
563 * Similar to blkg_for_each_descendant_pre() but performs post-order
564 * traversal instead. Synchronization rules are the same. @p_blkg is
565 * included in the iteration and the last node to be visited.
566 */
567#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
568 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
569 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
570 (p_blkg)->q, false)))
571
572static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
573{
574 int ret;
575
576 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
577 if (ret)
578 return ret;
579
580 atomic64_set(&stat->aux_cnt, 0);
581 return 0;
582}
583
584static inline void blkg_stat_exit(struct blkg_stat *stat)
585{
586 percpu_counter_destroy(&stat->cpu_cnt);
587}
588
589/**
590 * blkg_stat_add - add a value to a blkg_stat
591 * @stat: target blkg_stat
592 * @val: value to add
593 *
594 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
595 * don't re-enter this function for the same counter.
596 */
597static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
598{
599 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
600}
601
602/**
603 * blkg_stat_read - read the current value of a blkg_stat
604 * @stat: blkg_stat to read
605 */
606static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
607{
608 return percpu_counter_sum_positive(&stat->cpu_cnt);
609}
610
611/**
612 * blkg_stat_reset - reset a blkg_stat
613 * @stat: blkg_stat to reset
614 */
615static inline void blkg_stat_reset(struct blkg_stat *stat)
616{
617 percpu_counter_set(&stat->cpu_cnt, 0);
618 atomic64_set(&stat->aux_cnt, 0);
619}
620
621/**
622 * blkg_stat_add_aux - add a blkg_stat into another's aux count
623 * @to: the destination blkg_stat
624 * @from: the source
625 *
626 * Add @from's count including the aux one to @to's aux count.
627 */
628static inline void blkg_stat_add_aux(struct blkg_stat *to,
629 struct blkg_stat *from)
630{
631 atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
632 &to->aux_cnt);
633}
634
635static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
636{
637 int i, ret;
638
639 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
640 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
641 if (ret) {
642 while (--i >= 0)
643 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
644 return ret;
645 }
646 atomic64_set(&rwstat->aux_cnt[i], 0);
647 }
648 return 0;
649}
650
651static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
652{
653 int i;
654
655 for (i = 0; i < BLKG_RWSTAT_NR; i++)
656 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
657}
658
659/**
660 * blkg_rwstat_add - add a value to a blkg_rwstat
661 * @rwstat: target blkg_rwstat
662 * @op: REQ_OP and flags
663 * @val: value to add
664 *
665 * Add @val to @rwstat. The counters are chosen according to @rw. The
666 * caller is responsible for synchronizing calls to this function.
667 */
668static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
669 unsigned int op, uint64_t val)
670{
671 struct percpu_counter *cnt;
672
673 if (op_is_discard(op))
674 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
675 else if (op_is_write(op))
676 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
677 else
678 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
679
680 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
681
682 if (op_is_sync(op))
683 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
684 else
685 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
686
687 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
688}
689
690/**
691 * blkg_rwstat_read - read the current values of a blkg_rwstat
692 * @rwstat: blkg_rwstat to read
693 *
694 * Read the current snapshot of @rwstat and return it in the aux counts.
695 */
696static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
697{
698 struct blkg_rwstat result;
699 int i;
700
701 for (i = 0; i < BLKG_RWSTAT_NR; i++)
702 atomic64_set(&result.aux_cnt[i],
703 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
704 return result;
705}
706
707/**
708 * blkg_rwstat_total - read the total count of a blkg_rwstat
709 * @rwstat: blkg_rwstat to read
710 *
711 * Return the total count of @rwstat regardless of the IO direction. This
712 * function can be called without synchronization and takes care of u64
713 * atomicity.
714 */
715static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
716{
717 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
718
719 return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
720 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
721}
722
723/**
724 * blkg_rwstat_reset - reset a blkg_rwstat
725 * @rwstat: blkg_rwstat to reset
726 */
727static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
728{
729 int i;
730
731 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
732 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
733 atomic64_set(&rwstat->aux_cnt[i], 0);
734 }
735}
736
737/**
738 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
739 * @to: the destination blkg_rwstat
740 * @from: the source
741 *
742 * Add @from's count including the aux one to @to's aux count.
743 */
744static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
745 struct blkg_rwstat *from)
746{
747 u64 sum[BLKG_RWSTAT_NR];
748 int i;
749
750 for (i = 0; i < BLKG_RWSTAT_NR; i++)
751 sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
752
753 for (i = 0; i < BLKG_RWSTAT_NR; i++)
754 atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
755 &to->aux_cnt[i]);
756}
757
758#ifdef CONFIG_BLK_DEV_THROTTLING
759extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
760 struct bio *bio);
761#else
762static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
763 struct bio *bio) { return false; }
764#endif
765
766
767static inline void blkcg_bio_issue_init(struct bio *bio)
768{
769 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
770}
771
772static inline bool blkcg_bio_issue_check(struct request_queue *q,
773 struct bio *bio)
774{
775 struct blkcg_gq *blkg;
776 bool throtl = false;
777
778 rcu_read_lock();
779
780 if (!bio->bi_blkg) {
781 char b[BDEVNAME_SIZE];
782
783 WARN_ONCE(1,
784 "no blkg associated for bio on block-device: %s\n",
785 bio_devname(bio, b));
786 bio_associate_blkg(bio);
787 }
788
789 blkg = bio->bi_blkg;
790
791 throtl = blk_throtl_bio(q, blkg, bio);
792
793 if (!throtl) {
794 /*
795 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
796 * is a split bio and we would have already accounted for the
797 * size of the bio.
798 */
799 if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
800 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
801 bio->bi_iter.bi_size);
802 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
803 }
804
805 blkcg_bio_issue_init(bio);
806
807 rcu_read_unlock();
808 return !throtl;
809}
810
811static inline void blkcg_use_delay(struct blkcg_gq *blkg)
812{
813 if (atomic_add_return(1, &blkg->use_delay) == 1)
814 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
815}
816
817static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
818{
819 int old = atomic_read(&blkg->use_delay);
820
821 if (old == 0)
822 return 0;
823
824 /*
825 * We do this song and dance because we can race with somebody else
826 * adding or removing delay. If we just did an atomic_dec we'd end up
827 * negative and we'd already be in trouble. We need to subtract 1 and
828 * then check to see if we were the last delay so we can drop the
829 * congestion count on the cgroup.
830 */
831 while (old) {
832 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
833 if (cur == old)
834 break;
835 old = cur;
836 }
837
838 if (old == 0)
839 return 0;
840 if (old == 1)
841 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
842 return 1;
843}
844
845static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
846{
847 int old = atomic_read(&blkg->use_delay);
848 if (!old)
849 return;
850 /* We only want 1 person clearing the congestion count for this blkg. */
851 while (old) {
852 int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
853 if (cur == old) {
854 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
855 break;
856 }
857 old = cur;
858 }
859}
860
861void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
862void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
863void blkcg_maybe_throttle_current(void);
864#else /* CONFIG_BLK_CGROUP */
865
866struct blkcg {
867};
868
869struct blkg_policy_data {
870};
871
872struct blkcg_policy_data {
873};
874
875struct blkcg_gq {
876};
877
878struct blkcg_policy {
879};
880
881#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
882
883static inline void blkcg_maybe_throttle_current(void) { }
884static inline bool blk_cgroup_congested(void) { return false; }
885
886#ifdef CONFIG_BLOCK
887
888static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
889
890static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
891static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
892{ return NULL; }
893static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
894static inline void blkcg_drain_queue(struct request_queue *q) { }
895static inline void blkcg_exit_queue(struct request_queue *q) { }
896static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
897static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
898static inline int blkcg_activate_policy(struct request_queue *q,
899 const struct blkcg_policy *pol) { return 0; }
900static inline void blkcg_deactivate_policy(struct request_queue *q,
901 const struct blkcg_policy *pol) { }
902
903static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
904static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
905
906static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
907 struct blkcg_policy *pol) { return NULL; }
908static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
909static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
910static inline void blkg_get(struct blkcg_gq *blkg) { }
911static inline void blkg_put(struct blkcg_gq *blkg) { }
912
913static inline void blkcg_bio_issue_init(struct bio *bio) { }
914static inline bool blkcg_bio_issue_check(struct request_queue *q,
915 struct bio *bio) { return true; }
916
917#define blk_queue_for_each_rl(rl, q) \
918 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
919
920#endif /* CONFIG_BLOCK */
921#endif /* CONFIG_BLK_CGROUP */
922#endif /* _BLK_CGROUP_H */
923