1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | |
3 | #include <linux/blkdev.h> |
4 | #include <linux/wait.h> |
5 | #include <linux/rbtree.h> |
6 | #include <linux/kthread.h> |
7 | #include <linux/backing-dev.h> |
8 | #include <linux/blk-cgroup.h> |
9 | #include <linux/freezer.h> |
10 | #include <linux/fs.h> |
11 | #include <linux/pagemap.h> |
12 | #include <linux/mm.h> |
13 | #include <linux/sched/mm.h> |
14 | #include <linux/sched.h> |
15 | #include <linux/module.h> |
16 | #include <linux/writeback.h> |
17 | #include <linux/device.h> |
18 | #include <trace/events/writeback.h> |
19 | #include "internal.h" |
20 | |
21 | struct backing_dev_info noop_backing_dev_info; |
22 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); |
23 | |
24 | static const char *bdi_unknown_name = "(unknown)" ; |
25 | |
26 | /* |
27 | * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU |
28 | * reader side locking. |
29 | */ |
30 | DEFINE_SPINLOCK(bdi_lock); |
31 | static u64 bdi_id_cursor; |
32 | static struct rb_root bdi_tree = RB_ROOT; |
33 | LIST_HEAD(bdi_list); |
34 | |
35 | /* bdi_wq serves all asynchronous writeback tasks */ |
36 | struct workqueue_struct *bdi_wq; |
37 | |
38 | #ifdef CONFIG_DEBUG_FS |
39 | #include <linux/debugfs.h> |
40 | #include <linux/seq_file.h> |
41 | |
42 | static struct dentry *bdi_debug_root; |
43 | |
44 | static void bdi_debug_init(void) |
45 | { |
46 | bdi_debug_root = debugfs_create_dir(name: "bdi" , NULL); |
47 | } |
48 | |
49 | static int bdi_debug_stats_show(struct seq_file *m, void *v) |
50 | { |
51 | struct backing_dev_info *bdi = m->private; |
52 | struct bdi_writeback *wb = &bdi->wb; |
53 | unsigned long background_thresh; |
54 | unsigned long dirty_thresh; |
55 | unsigned long wb_thresh; |
56 | unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time; |
57 | struct inode *inode; |
58 | |
59 | nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0; |
60 | spin_lock(lock: &wb->list_lock); |
61 | list_for_each_entry(inode, &wb->b_dirty, i_io_list) |
62 | nr_dirty++; |
63 | list_for_each_entry(inode, &wb->b_io, i_io_list) |
64 | nr_io++; |
65 | list_for_each_entry(inode, &wb->b_more_io, i_io_list) |
66 | nr_more_io++; |
67 | list_for_each_entry(inode, &wb->b_dirty_time, i_io_list) |
68 | if (inode->i_state & I_DIRTY_TIME) |
69 | nr_dirty_time++; |
70 | spin_unlock(lock: &wb->list_lock); |
71 | |
72 | global_dirty_limits(pbackground: &background_thresh, pdirty: &dirty_thresh); |
73 | wb_thresh = wb_calc_thresh(wb, thresh: dirty_thresh); |
74 | |
75 | seq_printf(m, |
76 | fmt: "BdiWriteback: %10lu kB\n" |
77 | "BdiReclaimable: %10lu kB\n" |
78 | "BdiDirtyThresh: %10lu kB\n" |
79 | "DirtyThresh: %10lu kB\n" |
80 | "BackgroundThresh: %10lu kB\n" |
81 | "BdiDirtied: %10lu kB\n" |
82 | "BdiWritten: %10lu kB\n" |
83 | "BdiWriteBandwidth: %10lu kBps\n" |
84 | "b_dirty: %10lu\n" |
85 | "b_io: %10lu\n" |
86 | "b_more_io: %10lu\n" |
87 | "b_dirty_time: %10lu\n" |
88 | "bdi_list: %10u\n" |
89 | "state: %10lx\n" , |
90 | (unsigned long) K(wb_stat(wb, WB_WRITEBACK)), |
91 | (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)), |
92 | K(wb_thresh), |
93 | K(dirty_thresh), |
94 | K(background_thresh), |
95 | (unsigned long) K(wb_stat(wb, WB_DIRTIED)), |
96 | (unsigned long) K(wb_stat(wb, WB_WRITTEN)), |
97 | (unsigned long) K(wb->write_bandwidth), |
98 | nr_dirty, |
99 | nr_io, |
100 | nr_more_io, |
101 | nr_dirty_time, |
102 | !list_empty(head: &bdi->bdi_list), bdi->wb.state); |
103 | |
104 | return 0; |
105 | } |
106 | DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats); |
107 | |
108 | static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) |
109 | { |
110 | bdi->debug_dir = debugfs_create_dir(name, parent: bdi_debug_root); |
111 | |
112 | debugfs_create_file(name: "stats" , mode: 0444, parent: bdi->debug_dir, data: bdi, |
113 | fops: &bdi_debug_stats_fops); |
114 | } |
115 | |
116 | static void bdi_debug_unregister(struct backing_dev_info *bdi) |
117 | { |
118 | debugfs_remove_recursive(dentry: bdi->debug_dir); |
119 | } |
120 | #else |
121 | static inline void bdi_debug_init(void) |
122 | { |
123 | } |
124 | static inline void bdi_debug_register(struct backing_dev_info *bdi, |
125 | const char *name) |
126 | { |
127 | } |
128 | static inline void bdi_debug_unregister(struct backing_dev_info *bdi) |
129 | { |
130 | } |
131 | #endif |
132 | |
133 | static ssize_t read_ahead_kb_store(struct device *dev, |
134 | struct device_attribute *attr, |
135 | const char *buf, size_t count) |
136 | { |
137 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
138 | unsigned long read_ahead_kb; |
139 | ssize_t ret; |
140 | |
141 | ret = kstrtoul(s: buf, base: 10, res: &read_ahead_kb); |
142 | if (ret < 0) |
143 | return ret; |
144 | |
145 | bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); |
146 | |
147 | return count; |
148 | } |
149 | |
150 | #define BDI_SHOW(name, expr) \ |
151 | static ssize_t name##_show(struct device *dev, \ |
152 | struct device_attribute *attr, char *buf) \ |
153 | { \ |
154 | struct backing_dev_info *bdi = dev_get_drvdata(dev); \ |
155 | \ |
156 | return sysfs_emit(buf, "%lld\n", (long long)expr); \ |
157 | } \ |
158 | static DEVICE_ATTR_RW(name); |
159 | |
160 | BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) |
161 | |
162 | static ssize_t min_ratio_store(struct device *dev, |
163 | struct device_attribute *attr, const char *buf, size_t count) |
164 | { |
165 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
166 | unsigned int ratio; |
167 | ssize_t ret; |
168 | |
169 | ret = kstrtouint(s: buf, base: 10, res: &ratio); |
170 | if (ret < 0) |
171 | return ret; |
172 | |
173 | ret = bdi_set_min_ratio(bdi, min_ratio: ratio); |
174 | if (!ret) |
175 | ret = count; |
176 | |
177 | return ret; |
178 | } |
179 | BDI_SHOW(min_ratio, bdi->min_ratio / BDI_RATIO_SCALE) |
180 | |
181 | static ssize_t min_ratio_fine_store(struct device *dev, |
182 | struct device_attribute *attr, const char *buf, size_t count) |
183 | { |
184 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
185 | unsigned int ratio; |
186 | ssize_t ret; |
187 | |
188 | ret = kstrtouint(s: buf, base: 10, res: &ratio); |
189 | if (ret < 0) |
190 | return ret; |
191 | |
192 | ret = bdi_set_min_ratio_no_scale(bdi, min_ratio: ratio); |
193 | if (!ret) |
194 | ret = count; |
195 | |
196 | return ret; |
197 | } |
198 | BDI_SHOW(min_ratio_fine, bdi->min_ratio) |
199 | |
200 | static ssize_t max_ratio_store(struct device *dev, |
201 | struct device_attribute *attr, const char *buf, size_t count) |
202 | { |
203 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
204 | unsigned int ratio; |
205 | ssize_t ret; |
206 | |
207 | ret = kstrtouint(s: buf, base: 10, res: &ratio); |
208 | if (ret < 0) |
209 | return ret; |
210 | |
211 | ret = bdi_set_max_ratio(bdi, max_ratio: ratio); |
212 | if (!ret) |
213 | ret = count; |
214 | |
215 | return ret; |
216 | } |
217 | BDI_SHOW(max_ratio, bdi->max_ratio / BDI_RATIO_SCALE) |
218 | |
219 | static ssize_t max_ratio_fine_store(struct device *dev, |
220 | struct device_attribute *attr, const char *buf, size_t count) |
221 | { |
222 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
223 | unsigned int ratio; |
224 | ssize_t ret; |
225 | |
226 | ret = kstrtouint(s: buf, base: 10, res: &ratio); |
227 | if (ret < 0) |
228 | return ret; |
229 | |
230 | ret = bdi_set_max_ratio_no_scale(bdi, max_ratio: ratio); |
231 | if (!ret) |
232 | ret = count; |
233 | |
234 | return ret; |
235 | } |
236 | BDI_SHOW(max_ratio_fine, bdi->max_ratio) |
237 | |
238 | static ssize_t min_bytes_show(struct device *dev, |
239 | struct device_attribute *attr, |
240 | char *buf) |
241 | { |
242 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
243 | |
244 | return sysfs_emit(buf, fmt: "%llu\n" , bdi_get_min_bytes(bdi)); |
245 | } |
246 | |
247 | static ssize_t min_bytes_store(struct device *dev, |
248 | struct device_attribute *attr, const char *buf, size_t count) |
249 | { |
250 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
251 | u64 bytes; |
252 | ssize_t ret; |
253 | |
254 | ret = kstrtoull(s: buf, base: 10, res: &bytes); |
255 | if (ret < 0) |
256 | return ret; |
257 | |
258 | ret = bdi_set_min_bytes(bdi, min_bytes: bytes); |
259 | if (!ret) |
260 | ret = count; |
261 | |
262 | return ret; |
263 | } |
264 | static DEVICE_ATTR_RW(min_bytes); |
265 | |
266 | static ssize_t max_bytes_show(struct device *dev, |
267 | struct device_attribute *attr, |
268 | char *buf) |
269 | { |
270 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
271 | |
272 | return sysfs_emit(buf, fmt: "%llu\n" , bdi_get_max_bytes(bdi)); |
273 | } |
274 | |
275 | static ssize_t max_bytes_store(struct device *dev, |
276 | struct device_attribute *attr, const char *buf, size_t count) |
277 | { |
278 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
279 | u64 bytes; |
280 | ssize_t ret; |
281 | |
282 | ret = kstrtoull(s: buf, base: 10, res: &bytes); |
283 | if (ret < 0) |
284 | return ret; |
285 | |
286 | ret = bdi_set_max_bytes(bdi, max_bytes: bytes); |
287 | if (!ret) |
288 | ret = count; |
289 | |
290 | return ret; |
291 | } |
292 | static DEVICE_ATTR_RW(max_bytes); |
293 | |
294 | static ssize_t stable_pages_required_show(struct device *dev, |
295 | struct device_attribute *attr, |
296 | char *buf) |
297 | { |
298 | dev_warn_once(dev, |
299 | "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n" ); |
300 | return sysfs_emit(buf, fmt: "%d\n" , 0); |
301 | } |
302 | static DEVICE_ATTR_RO(stable_pages_required); |
303 | |
304 | static ssize_t strict_limit_store(struct device *dev, |
305 | struct device_attribute *attr, const char *buf, size_t count) |
306 | { |
307 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
308 | unsigned int strict_limit; |
309 | ssize_t ret; |
310 | |
311 | ret = kstrtouint(s: buf, base: 10, res: &strict_limit); |
312 | if (ret < 0) |
313 | return ret; |
314 | |
315 | ret = bdi_set_strict_limit(bdi, strict_limit); |
316 | if (!ret) |
317 | ret = count; |
318 | |
319 | return ret; |
320 | } |
321 | |
322 | static ssize_t strict_limit_show(struct device *dev, |
323 | struct device_attribute *attr, char *buf) |
324 | { |
325 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
326 | |
327 | return sysfs_emit(buf, fmt: "%d\n" , |
328 | !!(bdi->capabilities & BDI_CAP_STRICTLIMIT)); |
329 | } |
330 | static DEVICE_ATTR_RW(strict_limit); |
331 | |
332 | static struct attribute *bdi_dev_attrs[] = { |
333 | &dev_attr_read_ahead_kb.attr, |
334 | &dev_attr_min_ratio.attr, |
335 | &dev_attr_min_ratio_fine.attr, |
336 | &dev_attr_max_ratio.attr, |
337 | &dev_attr_max_ratio_fine.attr, |
338 | &dev_attr_min_bytes.attr, |
339 | &dev_attr_max_bytes.attr, |
340 | &dev_attr_stable_pages_required.attr, |
341 | &dev_attr_strict_limit.attr, |
342 | NULL, |
343 | }; |
344 | ATTRIBUTE_GROUPS(bdi_dev); |
345 | |
346 | static const struct class bdi_class = { |
347 | .name = "bdi" , |
348 | .dev_groups = bdi_dev_groups, |
349 | }; |
350 | |
351 | static __init int bdi_class_init(void) |
352 | { |
353 | int ret; |
354 | |
355 | ret = class_register(class: &bdi_class); |
356 | if (ret) |
357 | return ret; |
358 | |
359 | bdi_debug_init(); |
360 | |
361 | return 0; |
362 | } |
363 | postcore_initcall(bdi_class_init); |
364 | |
365 | static int __init default_bdi_init(void) |
366 | { |
367 | bdi_wq = alloc_workqueue(fmt: "writeback" , flags: WQ_MEM_RECLAIM | WQ_UNBOUND | |
368 | WQ_SYSFS, max_active: 0); |
369 | if (!bdi_wq) |
370 | return -ENOMEM; |
371 | return 0; |
372 | } |
373 | subsys_initcall(default_bdi_init); |
374 | |
375 | /* |
376 | * This function is used when the first inode for this wb is marked dirty. It |
377 | * wakes-up the corresponding bdi thread which should then take care of the |
378 | * periodic background write-out of dirty inodes. Since the write-out would |
379 | * starts only 'dirty_writeback_interval' centisecs from now anyway, we just |
380 | * set up a timer which wakes the bdi thread up later. |
381 | * |
382 | * Note, we wouldn't bother setting up the timer, but this function is on the |
383 | * fast-path (used by '__mark_inode_dirty()'), so we save few context switches |
384 | * by delaying the wake-up. |
385 | * |
386 | * We have to be careful not to postpone flush work if it is scheduled for |
387 | * earlier. Thus we use queue_delayed_work(). |
388 | */ |
389 | void wb_wakeup_delayed(struct bdi_writeback *wb) |
390 | { |
391 | unsigned long timeout; |
392 | |
393 | timeout = msecs_to_jiffies(m: dirty_writeback_interval * 10); |
394 | spin_lock_irq(lock: &wb->work_lock); |
395 | if (test_bit(WB_registered, &wb->state)) |
396 | queue_delayed_work(wq: bdi_wq, dwork: &wb->dwork, delay: timeout); |
397 | spin_unlock_irq(lock: &wb->work_lock); |
398 | } |
399 | |
400 | static void wb_update_bandwidth_workfn(struct work_struct *work) |
401 | { |
402 | struct bdi_writeback *wb = container_of(to_delayed_work(work), |
403 | struct bdi_writeback, bw_dwork); |
404 | |
405 | wb_update_bandwidth(wb); |
406 | } |
407 | |
408 | /* |
409 | * Initial write bandwidth: 100 MB/s |
410 | */ |
411 | #define INIT_BW (100 << (20 - PAGE_SHIFT)) |
412 | |
413 | static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, |
414 | gfp_t gfp) |
415 | { |
416 | int i, err; |
417 | |
418 | memset(wb, 0, sizeof(*wb)); |
419 | |
420 | wb->bdi = bdi; |
421 | wb->last_old_flush = jiffies; |
422 | INIT_LIST_HEAD(list: &wb->b_dirty); |
423 | INIT_LIST_HEAD(list: &wb->b_io); |
424 | INIT_LIST_HEAD(list: &wb->b_more_io); |
425 | INIT_LIST_HEAD(list: &wb->b_dirty_time); |
426 | spin_lock_init(&wb->list_lock); |
427 | |
428 | atomic_set(v: &wb->writeback_inodes, i: 0); |
429 | wb->bw_time_stamp = jiffies; |
430 | wb->balanced_dirty_ratelimit = INIT_BW; |
431 | wb->dirty_ratelimit = INIT_BW; |
432 | wb->write_bandwidth = INIT_BW; |
433 | wb->avg_write_bandwidth = INIT_BW; |
434 | |
435 | spin_lock_init(&wb->work_lock); |
436 | INIT_LIST_HEAD(list: &wb->work_list); |
437 | INIT_DELAYED_WORK(&wb->dwork, wb_workfn); |
438 | INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn); |
439 | wb->dirty_sleep = jiffies; |
440 | |
441 | err = fprop_local_init_percpu(pl: &wb->completions, gfp); |
442 | if (err) |
443 | return err; |
444 | |
445 | for (i = 0; i < NR_WB_STAT_ITEMS; i++) { |
446 | err = percpu_counter_init(&wb->stat[i], 0, gfp); |
447 | if (err) |
448 | goto out_destroy_stat; |
449 | } |
450 | |
451 | return 0; |
452 | |
453 | out_destroy_stat: |
454 | while (i--) |
455 | percpu_counter_destroy(fbc: &wb->stat[i]); |
456 | fprop_local_destroy_percpu(pl: &wb->completions); |
457 | return err; |
458 | } |
459 | |
460 | static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb); |
461 | |
462 | /* |
463 | * Remove bdi from the global list and shutdown any threads we have running |
464 | */ |
465 | static void wb_shutdown(struct bdi_writeback *wb) |
466 | { |
467 | /* Make sure nobody queues further work */ |
468 | spin_lock_irq(lock: &wb->work_lock); |
469 | if (!test_and_clear_bit(nr: WB_registered, addr: &wb->state)) { |
470 | spin_unlock_irq(lock: &wb->work_lock); |
471 | return; |
472 | } |
473 | spin_unlock_irq(lock: &wb->work_lock); |
474 | |
475 | cgwb_remove_from_bdi_list(wb); |
476 | /* |
477 | * Drain work list and shutdown the delayed_work. !WB_registered |
478 | * tells wb_workfn() that @wb is dying and its work_list needs to |
479 | * be drained no matter what. |
480 | */ |
481 | mod_delayed_work(wq: bdi_wq, dwork: &wb->dwork, delay: 0); |
482 | flush_delayed_work(dwork: &wb->dwork); |
483 | WARN_ON(!list_empty(&wb->work_list)); |
484 | flush_delayed_work(dwork: &wb->bw_dwork); |
485 | } |
486 | |
487 | static void wb_exit(struct bdi_writeback *wb) |
488 | { |
489 | int i; |
490 | |
491 | WARN_ON(delayed_work_pending(&wb->dwork)); |
492 | |
493 | for (i = 0; i < NR_WB_STAT_ITEMS; i++) |
494 | percpu_counter_destroy(fbc: &wb->stat[i]); |
495 | |
496 | fprop_local_destroy_percpu(pl: &wb->completions); |
497 | } |
498 | |
499 | #ifdef CONFIG_CGROUP_WRITEBACK |
500 | |
501 | #include <linux/memcontrol.h> |
502 | |
503 | /* |
504 | * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and |
505 | * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected. |
506 | */ |
507 | static DEFINE_SPINLOCK(cgwb_lock); |
508 | static struct workqueue_struct *cgwb_release_wq; |
509 | |
510 | static LIST_HEAD(offline_cgwbs); |
511 | static void cleanup_offline_cgwbs_workfn(struct work_struct *work); |
512 | static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn); |
513 | |
514 | static void cgwb_free_rcu(struct rcu_head *rcu_head) |
515 | { |
516 | struct bdi_writeback *wb = container_of(rcu_head, |
517 | struct bdi_writeback, rcu); |
518 | |
519 | percpu_ref_exit(ref: &wb->refcnt); |
520 | kfree(objp: wb); |
521 | } |
522 | |
523 | static void cgwb_release_workfn(struct work_struct *work) |
524 | { |
525 | struct bdi_writeback *wb = container_of(work, struct bdi_writeback, |
526 | release_work); |
527 | struct backing_dev_info *bdi = wb->bdi; |
528 | |
529 | mutex_lock(&wb->bdi->cgwb_release_mutex); |
530 | wb_shutdown(wb); |
531 | |
532 | css_put(css: wb->memcg_css); |
533 | css_put(css: wb->blkcg_css); |
534 | mutex_unlock(lock: &wb->bdi->cgwb_release_mutex); |
535 | |
536 | /* triggers blkg destruction if no online users left */ |
537 | blkcg_unpin_online(blkcg_css: wb->blkcg_css); |
538 | |
539 | fprop_local_destroy_percpu(pl: &wb->memcg_completions); |
540 | |
541 | spin_lock_irq(lock: &cgwb_lock); |
542 | list_del(entry: &wb->offline_node); |
543 | spin_unlock_irq(lock: &cgwb_lock); |
544 | |
545 | wb_exit(wb); |
546 | bdi_put(bdi); |
547 | WARN_ON_ONCE(!list_empty(&wb->b_attached)); |
548 | call_rcu(head: &wb->rcu, func: cgwb_free_rcu); |
549 | } |
550 | |
551 | static void cgwb_release(struct percpu_ref *refcnt) |
552 | { |
553 | struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback, |
554 | refcnt); |
555 | queue_work(wq: cgwb_release_wq, work: &wb->release_work); |
556 | } |
557 | |
558 | static void cgwb_kill(struct bdi_writeback *wb) |
559 | { |
560 | lockdep_assert_held(&cgwb_lock); |
561 | |
562 | WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id)); |
563 | list_del(entry: &wb->memcg_node); |
564 | list_del(entry: &wb->blkcg_node); |
565 | list_add(new: &wb->offline_node, head: &offline_cgwbs); |
566 | percpu_ref_kill(ref: &wb->refcnt); |
567 | } |
568 | |
569 | static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) |
570 | { |
571 | spin_lock_irq(lock: &cgwb_lock); |
572 | list_del_rcu(entry: &wb->bdi_node); |
573 | spin_unlock_irq(lock: &cgwb_lock); |
574 | } |
575 | |
576 | static int cgwb_create(struct backing_dev_info *bdi, |
577 | struct cgroup_subsys_state *memcg_css, gfp_t gfp) |
578 | { |
579 | struct mem_cgroup *memcg; |
580 | struct cgroup_subsys_state *blkcg_css; |
581 | struct list_head *memcg_cgwb_list, *blkcg_cgwb_list; |
582 | struct bdi_writeback *wb; |
583 | unsigned long flags; |
584 | int ret = 0; |
585 | |
586 | memcg = mem_cgroup_from_css(css: memcg_css); |
587 | blkcg_css = cgroup_get_e_css(cgroup: memcg_css->cgroup, ss: &io_cgrp_subsys); |
588 | memcg_cgwb_list = &memcg->cgwb_list; |
589 | blkcg_cgwb_list = blkcg_get_cgwb_list(css: blkcg_css); |
590 | |
591 | /* look up again under lock and discard on blkcg mismatch */ |
592 | spin_lock_irqsave(&cgwb_lock, flags); |
593 | wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); |
594 | if (wb && wb->blkcg_css != blkcg_css) { |
595 | cgwb_kill(wb); |
596 | wb = NULL; |
597 | } |
598 | spin_unlock_irqrestore(lock: &cgwb_lock, flags); |
599 | if (wb) |
600 | goto out_put; |
601 | |
602 | /* need to create a new one */ |
603 | wb = kmalloc(size: sizeof(*wb), flags: gfp); |
604 | if (!wb) { |
605 | ret = -ENOMEM; |
606 | goto out_put; |
607 | } |
608 | |
609 | ret = wb_init(wb, bdi, gfp); |
610 | if (ret) |
611 | goto err_free; |
612 | |
613 | ret = percpu_ref_init(ref: &wb->refcnt, release: cgwb_release, flags: 0, gfp); |
614 | if (ret) |
615 | goto err_wb_exit; |
616 | |
617 | ret = fprop_local_init_percpu(pl: &wb->memcg_completions, gfp); |
618 | if (ret) |
619 | goto err_ref_exit; |
620 | |
621 | wb->memcg_css = memcg_css; |
622 | wb->blkcg_css = blkcg_css; |
623 | INIT_LIST_HEAD(list: &wb->b_attached); |
624 | INIT_WORK(&wb->release_work, cgwb_release_workfn); |
625 | set_bit(nr: WB_registered, addr: &wb->state); |
626 | bdi_get(bdi); |
627 | |
628 | /* |
629 | * The root wb determines the registered state of the whole bdi and |
630 | * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate |
631 | * whether they're still online. Don't link @wb if any is dead. |
632 | * See wb_memcg_offline() and wb_blkcg_offline(). |
633 | */ |
634 | ret = -ENODEV; |
635 | spin_lock_irqsave(&cgwb_lock, flags); |
636 | if (test_bit(WB_registered, &bdi->wb.state) && |
637 | blkcg_cgwb_list->next && memcg_cgwb_list->next) { |
638 | /* we might have raced another instance of this function */ |
639 | ret = radix_tree_insert(&bdi->cgwb_tree, index: memcg_css->id, wb); |
640 | if (!ret) { |
641 | list_add_tail_rcu(new: &wb->bdi_node, head: &bdi->wb_list); |
642 | list_add(new: &wb->memcg_node, head: memcg_cgwb_list); |
643 | list_add(new: &wb->blkcg_node, head: blkcg_cgwb_list); |
644 | blkcg_pin_online(blkcg_css); |
645 | css_get(css: memcg_css); |
646 | css_get(css: blkcg_css); |
647 | } |
648 | } |
649 | spin_unlock_irqrestore(lock: &cgwb_lock, flags); |
650 | if (ret) { |
651 | if (ret == -EEXIST) |
652 | ret = 0; |
653 | goto err_fprop_exit; |
654 | } |
655 | goto out_put; |
656 | |
657 | err_fprop_exit: |
658 | bdi_put(bdi); |
659 | fprop_local_destroy_percpu(pl: &wb->memcg_completions); |
660 | err_ref_exit: |
661 | percpu_ref_exit(ref: &wb->refcnt); |
662 | err_wb_exit: |
663 | wb_exit(wb); |
664 | err_free: |
665 | kfree(objp: wb); |
666 | out_put: |
667 | css_put(css: blkcg_css); |
668 | return ret; |
669 | } |
670 | |
671 | /** |
672 | * wb_get_lookup - get wb for a given memcg |
673 | * @bdi: target bdi |
674 | * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) |
675 | * |
676 | * Try to get the wb for @memcg_css on @bdi. The returned wb has its |
677 | * refcount incremented. |
678 | * |
679 | * This function uses css_get() on @memcg_css and thus expects its refcnt |
680 | * to be positive on invocation. IOW, rcu_read_lock() protection on |
681 | * @memcg_css isn't enough. try_get it before calling this function. |
682 | * |
683 | * A wb is keyed by its associated memcg. As blkcg implicitly enables |
684 | * memcg on the default hierarchy, memcg association is guaranteed to be |
685 | * more specific (equal or descendant to the associated blkcg) and thus can |
686 | * identify both the memcg and blkcg associations. |
687 | * |
688 | * Because the blkcg associated with a memcg may change as blkcg is enabled |
689 | * and disabled closer to root in the hierarchy, each wb keeps track of |
690 | * both the memcg and blkcg associated with it and verifies the blkcg on |
691 | * each lookup. On mismatch, the existing wb is discarded and a new one is |
692 | * created. |
693 | */ |
694 | struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, |
695 | struct cgroup_subsys_state *memcg_css) |
696 | { |
697 | struct bdi_writeback *wb; |
698 | |
699 | if (!memcg_css->parent) |
700 | return &bdi->wb; |
701 | |
702 | rcu_read_lock(); |
703 | wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); |
704 | if (wb) { |
705 | struct cgroup_subsys_state *blkcg_css; |
706 | |
707 | /* see whether the blkcg association has changed */ |
708 | blkcg_css = cgroup_get_e_css(cgroup: memcg_css->cgroup, ss: &io_cgrp_subsys); |
709 | if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb))) |
710 | wb = NULL; |
711 | css_put(css: blkcg_css); |
712 | } |
713 | rcu_read_unlock(); |
714 | |
715 | return wb; |
716 | } |
717 | |
718 | /** |
719 | * wb_get_create - get wb for a given memcg, create if necessary |
720 | * @bdi: target bdi |
721 | * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) |
722 | * @gfp: allocation mask to use |
723 | * |
724 | * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to |
725 | * create one. See wb_get_lookup() for more details. |
726 | */ |
727 | struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, |
728 | struct cgroup_subsys_state *memcg_css, |
729 | gfp_t gfp) |
730 | { |
731 | struct bdi_writeback *wb; |
732 | |
733 | might_alloc(gfp_mask: gfp); |
734 | |
735 | do { |
736 | wb = wb_get_lookup(bdi, memcg_css); |
737 | } while (!wb && !cgwb_create(bdi, memcg_css, gfp)); |
738 | |
739 | return wb; |
740 | } |
741 | |
742 | static int cgwb_bdi_init(struct backing_dev_info *bdi) |
743 | { |
744 | int ret; |
745 | |
746 | INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); |
747 | mutex_init(&bdi->cgwb_release_mutex); |
748 | init_rwsem(&bdi->wb_switch_rwsem); |
749 | |
750 | ret = wb_init(wb: &bdi->wb, bdi, GFP_KERNEL); |
751 | if (!ret) { |
752 | bdi->wb.memcg_css = &root_mem_cgroup->css; |
753 | bdi->wb.blkcg_css = blkcg_root_css; |
754 | } |
755 | return ret; |
756 | } |
757 | |
758 | static void cgwb_bdi_unregister(struct backing_dev_info *bdi) |
759 | { |
760 | struct radix_tree_iter iter; |
761 | void **slot; |
762 | struct bdi_writeback *wb; |
763 | |
764 | WARN_ON(test_bit(WB_registered, &bdi->wb.state)); |
765 | |
766 | spin_lock_irq(lock: &cgwb_lock); |
767 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) |
768 | cgwb_kill(wb: *slot); |
769 | spin_unlock_irq(lock: &cgwb_lock); |
770 | |
771 | mutex_lock(&bdi->cgwb_release_mutex); |
772 | spin_lock_irq(lock: &cgwb_lock); |
773 | while (!list_empty(head: &bdi->wb_list)) { |
774 | wb = list_first_entry(&bdi->wb_list, struct bdi_writeback, |
775 | bdi_node); |
776 | spin_unlock_irq(lock: &cgwb_lock); |
777 | wb_shutdown(wb); |
778 | spin_lock_irq(lock: &cgwb_lock); |
779 | } |
780 | spin_unlock_irq(lock: &cgwb_lock); |
781 | mutex_unlock(lock: &bdi->cgwb_release_mutex); |
782 | } |
783 | |
784 | /* |
785 | * cleanup_offline_cgwbs_workfn - try to release dying cgwbs |
786 | * |
787 | * Try to release dying cgwbs by switching attached inodes to the nearest |
788 | * living ancestor's writeback. Processed wbs are placed at the end |
789 | * of the list to guarantee the forward progress. |
790 | */ |
791 | static void cleanup_offline_cgwbs_workfn(struct work_struct *work) |
792 | { |
793 | struct bdi_writeback *wb; |
794 | LIST_HEAD(processed); |
795 | |
796 | spin_lock_irq(lock: &cgwb_lock); |
797 | |
798 | while (!list_empty(head: &offline_cgwbs)) { |
799 | wb = list_first_entry(&offline_cgwbs, struct bdi_writeback, |
800 | offline_node); |
801 | list_move(list: &wb->offline_node, head: &processed); |
802 | |
803 | /* |
804 | * If wb is dirty, cleaning up the writeback by switching |
805 | * attached inodes will result in an effective removal of any |
806 | * bandwidth restrictions, which isn't the goal. Instead, |
807 | * it can be postponed until the next time, when all io |
808 | * will be likely completed. If in the meantime some inodes |
809 | * will get re-dirtied, they should be eventually switched to |
810 | * a new cgwb. |
811 | */ |
812 | if (wb_has_dirty_io(wb)) |
813 | continue; |
814 | |
815 | if (!wb_tryget(wb)) |
816 | continue; |
817 | |
818 | spin_unlock_irq(lock: &cgwb_lock); |
819 | while (cleanup_offline_cgwb(wb)) |
820 | cond_resched(); |
821 | spin_lock_irq(lock: &cgwb_lock); |
822 | |
823 | wb_put(wb); |
824 | } |
825 | |
826 | if (!list_empty(head: &processed)) |
827 | list_splice_tail(list: &processed, head: &offline_cgwbs); |
828 | |
829 | spin_unlock_irq(lock: &cgwb_lock); |
830 | } |
831 | |
832 | /** |
833 | * wb_memcg_offline - kill all wb's associated with a memcg being offlined |
834 | * @memcg: memcg being offlined |
835 | * |
836 | * Also prevents creation of any new wb's associated with @memcg. |
837 | */ |
838 | void wb_memcg_offline(struct mem_cgroup *memcg) |
839 | { |
840 | struct list_head *memcg_cgwb_list = &memcg->cgwb_list; |
841 | struct bdi_writeback *wb, *next; |
842 | |
843 | spin_lock_irq(lock: &cgwb_lock); |
844 | list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node) |
845 | cgwb_kill(wb); |
846 | memcg_cgwb_list->next = NULL; /* prevent new wb's */ |
847 | spin_unlock_irq(lock: &cgwb_lock); |
848 | |
849 | queue_work(wq: system_unbound_wq, work: &cleanup_offline_cgwbs_work); |
850 | } |
851 | |
852 | /** |
853 | * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined |
854 | * @css: blkcg being offlined |
855 | * |
856 | * Also prevents creation of any new wb's associated with @blkcg. |
857 | */ |
858 | void wb_blkcg_offline(struct cgroup_subsys_state *css) |
859 | { |
860 | struct bdi_writeback *wb, *next; |
861 | struct list_head *list = blkcg_get_cgwb_list(css); |
862 | |
863 | spin_lock_irq(lock: &cgwb_lock); |
864 | list_for_each_entry_safe(wb, next, list, blkcg_node) |
865 | cgwb_kill(wb); |
866 | list->next = NULL; /* prevent new wb's */ |
867 | spin_unlock_irq(lock: &cgwb_lock); |
868 | } |
869 | |
870 | static void cgwb_bdi_register(struct backing_dev_info *bdi) |
871 | { |
872 | spin_lock_irq(lock: &cgwb_lock); |
873 | list_add_tail_rcu(new: &bdi->wb.bdi_node, head: &bdi->wb_list); |
874 | spin_unlock_irq(lock: &cgwb_lock); |
875 | } |
876 | |
877 | static int __init cgwb_init(void) |
878 | { |
879 | /* |
880 | * There can be many concurrent release work items overwhelming |
881 | * system_wq. Put them in a separate wq and limit concurrency. |
882 | * There's no point in executing many of these in parallel. |
883 | */ |
884 | cgwb_release_wq = alloc_workqueue(fmt: "cgwb_release" , flags: 0, max_active: 1); |
885 | if (!cgwb_release_wq) |
886 | return -ENOMEM; |
887 | |
888 | return 0; |
889 | } |
890 | subsys_initcall(cgwb_init); |
891 | |
892 | #else /* CONFIG_CGROUP_WRITEBACK */ |
893 | |
894 | static int cgwb_bdi_init(struct backing_dev_info *bdi) |
895 | { |
896 | return wb_init(&bdi->wb, bdi, GFP_KERNEL); |
897 | } |
898 | |
899 | static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { } |
900 | |
901 | static void cgwb_bdi_register(struct backing_dev_info *bdi) |
902 | { |
903 | list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); |
904 | } |
905 | |
906 | static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) |
907 | { |
908 | list_del_rcu(&wb->bdi_node); |
909 | } |
910 | |
911 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
912 | |
913 | int bdi_init(struct backing_dev_info *bdi) |
914 | { |
915 | bdi->dev = NULL; |
916 | |
917 | kref_init(kref: &bdi->refcnt); |
918 | bdi->min_ratio = 0; |
919 | bdi->max_ratio = 100 * BDI_RATIO_SCALE; |
920 | bdi->max_prop_frac = FPROP_FRAC_BASE; |
921 | INIT_LIST_HEAD(list: &bdi->bdi_list); |
922 | INIT_LIST_HEAD(list: &bdi->wb_list); |
923 | init_waitqueue_head(&bdi->wb_waitq); |
924 | |
925 | return cgwb_bdi_init(bdi); |
926 | } |
927 | |
928 | struct backing_dev_info *bdi_alloc(int node_id) |
929 | { |
930 | struct backing_dev_info *bdi; |
931 | |
932 | bdi = kzalloc_node(size: sizeof(*bdi), GFP_KERNEL, node: node_id); |
933 | if (!bdi) |
934 | return NULL; |
935 | |
936 | if (bdi_init(bdi)) { |
937 | kfree(objp: bdi); |
938 | return NULL; |
939 | } |
940 | bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT; |
941 | bdi->ra_pages = VM_READAHEAD_PAGES; |
942 | bdi->io_pages = VM_READAHEAD_PAGES; |
943 | timer_setup(&bdi->laptop_mode_wb_timer, laptop_mode_timer_fn, 0); |
944 | return bdi; |
945 | } |
946 | EXPORT_SYMBOL(bdi_alloc); |
947 | |
948 | static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp) |
949 | { |
950 | struct rb_node **p = &bdi_tree.rb_node; |
951 | struct rb_node *parent = NULL; |
952 | struct backing_dev_info *bdi; |
953 | |
954 | lockdep_assert_held(&bdi_lock); |
955 | |
956 | while (*p) { |
957 | parent = *p; |
958 | bdi = rb_entry(parent, struct backing_dev_info, rb_node); |
959 | |
960 | if (bdi->id > id) |
961 | p = &(*p)->rb_left; |
962 | else if (bdi->id < id) |
963 | p = &(*p)->rb_right; |
964 | else |
965 | break; |
966 | } |
967 | |
968 | if (parentp) |
969 | *parentp = parent; |
970 | return p; |
971 | } |
972 | |
973 | /** |
974 | * bdi_get_by_id - lookup and get bdi from its id |
975 | * @id: bdi id to lookup |
976 | * |
977 | * Find bdi matching @id and get it. Returns NULL if the matching bdi |
978 | * doesn't exist or is already unregistered. |
979 | */ |
980 | struct backing_dev_info *bdi_get_by_id(u64 id) |
981 | { |
982 | struct backing_dev_info *bdi = NULL; |
983 | struct rb_node **p; |
984 | |
985 | spin_lock_bh(lock: &bdi_lock); |
986 | p = bdi_lookup_rb_node(id, NULL); |
987 | if (*p) { |
988 | bdi = rb_entry(*p, struct backing_dev_info, rb_node); |
989 | bdi_get(bdi); |
990 | } |
991 | spin_unlock_bh(lock: &bdi_lock); |
992 | |
993 | return bdi; |
994 | } |
995 | |
996 | int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) |
997 | { |
998 | struct device *dev; |
999 | struct rb_node *parent, **p; |
1000 | |
1001 | if (bdi->dev) /* The driver needs to use separate queues per device */ |
1002 | return 0; |
1003 | |
1004 | vsnprintf(buf: bdi->dev_name, size: sizeof(bdi->dev_name), fmt, args); |
1005 | dev = device_create(cls: &bdi_class, NULL, MKDEV(0, 0), drvdata: bdi, fmt: bdi->dev_name); |
1006 | if (IS_ERR(ptr: dev)) |
1007 | return PTR_ERR(ptr: dev); |
1008 | |
1009 | cgwb_bdi_register(bdi); |
1010 | bdi->dev = dev; |
1011 | |
1012 | bdi_debug_register(bdi, name: dev_name(dev)); |
1013 | set_bit(nr: WB_registered, addr: &bdi->wb.state); |
1014 | |
1015 | spin_lock_bh(lock: &bdi_lock); |
1016 | |
1017 | bdi->id = ++bdi_id_cursor; |
1018 | |
1019 | p = bdi_lookup_rb_node(id: bdi->id, parentp: &parent); |
1020 | rb_link_node(node: &bdi->rb_node, parent, rb_link: p); |
1021 | rb_insert_color(&bdi->rb_node, &bdi_tree); |
1022 | |
1023 | list_add_tail_rcu(new: &bdi->bdi_list, head: &bdi_list); |
1024 | |
1025 | spin_unlock_bh(lock: &bdi_lock); |
1026 | |
1027 | trace_writeback_bdi_register(bdi); |
1028 | return 0; |
1029 | } |
1030 | |
1031 | int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...) |
1032 | { |
1033 | va_list args; |
1034 | int ret; |
1035 | |
1036 | va_start(args, fmt); |
1037 | ret = bdi_register_va(bdi, fmt, args); |
1038 | va_end(args); |
1039 | return ret; |
1040 | } |
1041 | EXPORT_SYMBOL(bdi_register); |
1042 | |
1043 | void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner) |
1044 | { |
1045 | WARN_ON_ONCE(bdi->owner); |
1046 | bdi->owner = owner; |
1047 | get_device(dev: owner); |
1048 | } |
1049 | |
1050 | /* |
1051 | * Remove bdi from bdi_list, and ensure that it is no longer visible |
1052 | */ |
1053 | static void bdi_remove_from_list(struct backing_dev_info *bdi) |
1054 | { |
1055 | spin_lock_bh(lock: &bdi_lock); |
1056 | rb_erase(&bdi->rb_node, &bdi_tree); |
1057 | list_del_rcu(entry: &bdi->bdi_list); |
1058 | spin_unlock_bh(lock: &bdi_lock); |
1059 | |
1060 | synchronize_rcu_expedited(); |
1061 | } |
1062 | |
1063 | void bdi_unregister(struct backing_dev_info *bdi) |
1064 | { |
1065 | del_timer_sync(timer: &bdi->laptop_mode_wb_timer); |
1066 | |
1067 | /* make sure nobody finds us on the bdi_list anymore */ |
1068 | bdi_remove_from_list(bdi); |
1069 | wb_shutdown(wb: &bdi->wb); |
1070 | cgwb_bdi_unregister(bdi); |
1071 | |
1072 | /* |
1073 | * If this BDI's min ratio has been set, use bdi_set_min_ratio() to |
1074 | * update the global bdi_min_ratio. |
1075 | */ |
1076 | if (bdi->min_ratio) |
1077 | bdi_set_min_ratio(bdi, min_ratio: 0); |
1078 | |
1079 | if (bdi->dev) { |
1080 | bdi_debug_unregister(bdi); |
1081 | device_unregister(dev: bdi->dev); |
1082 | bdi->dev = NULL; |
1083 | } |
1084 | |
1085 | if (bdi->owner) { |
1086 | put_device(dev: bdi->owner); |
1087 | bdi->owner = NULL; |
1088 | } |
1089 | } |
1090 | EXPORT_SYMBOL(bdi_unregister); |
1091 | |
1092 | static void release_bdi(struct kref *ref) |
1093 | { |
1094 | struct backing_dev_info *bdi = |
1095 | container_of(ref, struct backing_dev_info, refcnt); |
1096 | |
1097 | WARN_ON_ONCE(test_bit(WB_registered, &bdi->wb.state)); |
1098 | WARN_ON_ONCE(bdi->dev); |
1099 | wb_exit(wb: &bdi->wb); |
1100 | kfree(objp: bdi); |
1101 | } |
1102 | |
1103 | void bdi_put(struct backing_dev_info *bdi) |
1104 | { |
1105 | kref_put(kref: &bdi->refcnt, release: release_bdi); |
1106 | } |
1107 | EXPORT_SYMBOL(bdi_put); |
1108 | |
1109 | struct backing_dev_info *inode_to_bdi(struct inode *inode) |
1110 | { |
1111 | struct super_block *sb; |
1112 | |
1113 | if (!inode) |
1114 | return &noop_backing_dev_info; |
1115 | |
1116 | sb = inode->i_sb; |
1117 | #ifdef CONFIG_BLOCK |
1118 | if (sb_is_blkdev_sb(sb)) |
1119 | return I_BDEV(inode)->bd_disk->bdi; |
1120 | #endif |
1121 | return sb->s_bdi; |
1122 | } |
1123 | EXPORT_SYMBOL(inode_to_bdi); |
1124 | |
1125 | const char *bdi_dev_name(struct backing_dev_info *bdi) |
1126 | { |
1127 | if (!bdi || !bdi->dev) |
1128 | return bdi_unknown_name; |
1129 | return bdi->dev_name; |
1130 | } |
1131 | EXPORT_SYMBOL_GPL(bdi_dev_name); |
1132 | |