1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * bcache setup/teardown code, and some metadata io - read a superblock and |
4 | * figure out what to do with it. |
5 | * |
6 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> |
7 | * Copyright 2012 Google, Inc. |
8 | */ |
9 | |
10 | #include "bcache.h" |
11 | #include "btree.h" |
12 | #include "debug.h" |
13 | #include "extents.h" |
14 | #include "request.h" |
15 | #include "writeback.h" |
16 | #include "features.h" |
17 | |
18 | #include <linux/blkdev.h> |
19 | #include <linux/pagemap.h> |
20 | #include <linux/debugfs.h> |
21 | #include <linux/idr.h> |
22 | #include <linux/kthread.h> |
23 | #include <linux/workqueue.h> |
24 | #include <linux/module.h> |
25 | #include <linux/random.h> |
26 | #include <linux/reboot.h> |
27 | #include <linux/sysfs.h> |
28 | |
29 | unsigned int bch_cutoff_writeback; |
30 | unsigned int bch_cutoff_writeback_sync; |
31 | |
32 | static const char bcache_magic[] = { |
33 | 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, |
34 | 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 |
35 | }; |
36 | |
37 | static const char invalid_uuid[] = { |
38 | 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, |
39 | 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 |
40 | }; |
41 | |
42 | static struct kobject *bcache_kobj; |
43 | struct mutex bch_register_lock; |
44 | bool bcache_is_reboot; |
45 | LIST_HEAD(bch_cache_sets); |
46 | static LIST_HEAD(uncached_devices); |
47 | |
48 | static int bcache_major; |
49 | static DEFINE_IDA(bcache_device_idx); |
50 | static wait_queue_head_t unregister_wait; |
51 | struct workqueue_struct *bcache_wq; |
52 | struct workqueue_struct *bch_flush_wq; |
53 | struct workqueue_struct *bch_journal_wq; |
54 | |
55 | |
56 | #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) |
57 | /* limitation of partitions number on single bcache device */ |
58 | #define BCACHE_MINORS 128 |
59 | /* limitation of bcache devices number on single system */ |
60 | #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS) |
61 | |
62 | /* Superblock */ |
63 | |
64 | static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s) |
65 | { |
66 | unsigned int bucket_size = le16_to_cpu(s->bucket_size); |
67 | |
68 | if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) { |
69 | if (bch_has_feature_large_bucket(sb)) { |
70 | unsigned int max, order; |
71 | |
72 | max = sizeof(unsigned int) * BITS_PER_BYTE - 1; |
73 | order = le16_to_cpu(s->bucket_size); |
74 | /* |
75 | * bcache tool will make sure the overflow won't |
76 | * happen, an error message here is enough. |
77 | */ |
78 | if (order > max) |
79 | pr_err("Bucket size (1 << %u) overflows\n" , |
80 | order); |
81 | bucket_size = 1 << order; |
82 | } else if (bch_has_feature_obso_large_bucket(sb)) { |
83 | bucket_size += |
84 | le16_to_cpu(s->obso_bucket_size_hi) << 16; |
85 | } |
86 | } |
87 | |
88 | return bucket_size; |
89 | } |
90 | |
91 | static const char *read_super_common(struct cache_sb *sb, struct block_device *bdev, |
92 | struct cache_sb_disk *s) |
93 | { |
94 | const char *err; |
95 | unsigned int i; |
96 | |
97 | sb->first_bucket= le16_to_cpu(s->first_bucket); |
98 | sb->nbuckets = le64_to_cpu(s->nbuckets); |
99 | sb->bucket_size = get_bucket_size(sb, s); |
100 | |
101 | sb->nr_in_set = le16_to_cpu(s->nr_in_set); |
102 | sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); |
103 | |
104 | err = "Too many journal buckets" ; |
105 | if (sb->keys > SB_JOURNAL_BUCKETS) |
106 | goto err; |
107 | |
108 | err = "Too many buckets" ; |
109 | if (sb->nbuckets > LONG_MAX) |
110 | goto err; |
111 | |
112 | err = "Not enough buckets" ; |
113 | if (sb->nbuckets < 1 << 7) |
114 | goto err; |
115 | |
116 | err = "Bad block size (not power of 2)" ; |
117 | if (!is_power_of_2(n: sb->block_size)) |
118 | goto err; |
119 | |
120 | err = "Bad block size (larger than page size)" ; |
121 | if (sb->block_size > PAGE_SECTORS) |
122 | goto err; |
123 | |
124 | err = "Bad bucket size (not power of 2)" ; |
125 | if (!is_power_of_2(n: sb->bucket_size)) |
126 | goto err; |
127 | |
128 | err = "Bad bucket size (smaller than page size)" ; |
129 | if (sb->bucket_size < PAGE_SECTORS) |
130 | goto err; |
131 | |
132 | err = "Invalid superblock: device too small" ; |
133 | if (get_capacity(disk: bdev->bd_disk) < |
134 | sb->bucket_size * sb->nbuckets) |
135 | goto err; |
136 | |
137 | err = "Bad UUID" ; |
138 | if (bch_is_zero(p: sb->set_uuid, n: 16)) |
139 | goto err; |
140 | |
141 | err = "Bad cache device number in set" ; |
142 | if (!sb->nr_in_set || |
143 | sb->nr_in_set <= sb->nr_this_dev || |
144 | sb->nr_in_set > MAX_CACHES_PER_SET) |
145 | goto err; |
146 | |
147 | err = "Journal buckets not sequential" ; |
148 | for (i = 0; i < sb->keys; i++) |
149 | if (sb->d[i] != sb->first_bucket + i) |
150 | goto err; |
151 | |
152 | err = "Too many journal buckets" ; |
153 | if (sb->first_bucket + sb->keys > sb->nbuckets) |
154 | goto err; |
155 | |
156 | err = "Invalid superblock: first bucket comes before end of super" ; |
157 | if (sb->first_bucket * sb->bucket_size < 16) |
158 | goto err; |
159 | |
160 | err = NULL; |
161 | err: |
162 | return err; |
163 | } |
164 | |
165 | |
166 | static const char *read_super(struct cache_sb *sb, struct block_device *bdev, |
167 | struct cache_sb_disk **res) |
168 | { |
169 | const char *err; |
170 | struct cache_sb_disk *s; |
171 | struct page *page; |
172 | unsigned int i; |
173 | |
174 | page = read_cache_page_gfp(mapping: bdev->bd_inode->i_mapping, |
175 | SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL); |
176 | if (IS_ERR(ptr: page)) |
177 | return "IO error" ; |
178 | s = page_address(page) + offset_in_page(SB_OFFSET); |
179 | |
180 | sb->offset = le64_to_cpu(s->offset); |
181 | sb->version = le64_to_cpu(s->version); |
182 | |
183 | memcpy(sb->magic, s->magic, 16); |
184 | memcpy(sb->uuid, s->uuid, 16); |
185 | memcpy(sb->set_uuid, s->set_uuid, 16); |
186 | memcpy(sb->label, s->label, SB_LABEL_SIZE); |
187 | |
188 | sb->flags = le64_to_cpu(s->flags); |
189 | sb->seq = le64_to_cpu(s->seq); |
190 | sb->last_mount = le32_to_cpu(s->last_mount); |
191 | sb->keys = le16_to_cpu(s->keys); |
192 | |
193 | for (i = 0; i < SB_JOURNAL_BUCKETS; i++) |
194 | sb->d[i] = le64_to_cpu(s->d[i]); |
195 | |
196 | pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n" , |
197 | sb->version, sb->flags, sb->seq, sb->keys); |
198 | |
199 | err = "Not a bcache superblock (bad offset)" ; |
200 | if (sb->offset != SB_SECTOR) |
201 | goto err; |
202 | |
203 | err = "Not a bcache superblock (bad magic)" ; |
204 | if (memcmp(p: sb->magic, q: bcache_magic, size: 16)) |
205 | goto err; |
206 | |
207 | err = "Bad checksum" ; |
208 | if (s->csum != csum_set(s)) |
209 | goto err; |
210 | |
211 | err = "Bad UUID" ; |
212 | if (bch_is_zero(p: sb->uuid, n: 16)) |
213 | goto err; |
214 | |
215 | sb->block_size = le16_to_cpu(s->block_size); |
216 | |
217 | err = "Superblock block size smaller than device block size" ; |
218 | if (sb->block_size << 9 < bdev_logical_block_size(bdev)) |
219 | goto err; |
220 | |
221 | switch (sb->version) { |
222 | case BCACHE_SB_VERSION_BDEV: |
223 | sb->data_offset = BDEV_DATA_START_DEFAULT; |
224 | break; |
225 | case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: |
226 | case BCACHE_SB_VERSION_BDEV_WITH_FEATURES: |
227 | sb->data_offset = le64_to_cpu(s->data_offset); |
228 | |
229 | err = "Bad data offset" ; |
230 | if (sb->data_offset < BDEV_DATA_START_DEFAULT) |
231 | goto err; |
232 | |
233 | break; |
234 | case BCACHE_SB_VERSION_CDEV: |
235 | case BCACHE_SB_VERSION_CDEV_WITH_UUID: |
236 | err = read_super_common(sb, bdev, s); |
237 | if (err) |
238 | goto err; |
239 | break; |
240 | case BCACHE_SB_VERSION_CDEV_WITH_FEATURES: |
241 | /* |
242 | * Feature bits are needed in read_super_common(), |
243 | * convert them firstly. |
244 | */ |
245 | sb->feature_compat = le64_to_cpu(s->feature_compat); |
246 | sb->feature_incompat = le64_to_cpu(s->feature_incompat); |
247 | sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat); |
248 | |
249 | /* Check incompatible features */ |
250 | err = "Unsupported compatible feature found" ; |
251 | if (bch_has_unknown_compat_features(sb)) |
252 | goto err; |
253 | |
254 | err = "Unsupported read-only compatible feature found" ; |
255 | if (bch_has_unknown_ro_compat_features(sb)) |
256 | goto err; |
257 | |
258 | err = "Unsupported incompatible feature found" ; |
259 | if (bch_has_unknown_incompat_features(sb)) |
260 | goto err; |
261 | |
262 | err = read_super_common(sb, bdev, s); |
263 | if (err) |
264 | goto err; |
265 | break; |
266 | default: |
267 | err = "Unsupported superblock version" ; |
268 | goto err; |
269 | } |
270 | |
271 | sb->last_mount = (u32)ktime_get_real_seconds(); |
272 | *res = s; |
273 | return NULL; |
274 | err: |
275 | put_page(page); |
276 | return err; |
277 | } |
278 | |
279 | static void write_bdev_super_endio(struct bio *bio) |
280 | { |
281 | struct cached_dev *dc = bio->bi_private; |
282 | |
283 | if (bio->bi_status) |
284 | bch_count_backing_io_errors(dc, bio); |
285 | |
286 | closure_put(cl: &dc->sb_write); |
287 | } |
288 | |
289 | static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, |
290 | struct bio *bio) |
291 | { |
292 | unsigned int i; |
293 | |
294 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META; |
295 | bio->bi_iter.bi_sector = SB_SECTOR; |
296 | __bio_add_page(bio, virt_to_page(out), SB_SIZE, |
297 | offset_in_page(out)); |
298 | |
299 | out->offset = cpu_to_le64(sb->offset); |
300 | |
301 | memcpy(out->uuid, sb->uuid, 16); |
302 | memcpy(out->set_uuid, sb->set_uuid, 16); |
303 | memcpy(out->label, sb->label, SB_LABEL_SIZE); |
304 | |
305 | out->flags = cpu_to_le64(sb->flags); |
306 | out->seq = cpu_to_le64(sb->seq); |
307 | |
308 | out->last_mount = cpu_to_le32(sb->last_mount); |
309 | out->first_bucket = cpu_to_le16(sb->first_bucket); |
310 | out->keys = cpu_to_le16(sb->keys); |
311 | |
312 | for (i = 0; i < sb->keys; i++) |
313 | out->d[i] = cpu_to_le64(sb->d[i]); |
314 | |
315 | if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) { |
316 | out->feature_compat = cpu_to_le64(sb->feature_compat); |
317 | out->feature_incompat = cpu_to_le64(sb->feature_incompat); |
318 | out->feature_ro_compat = cpu_to_le64(sb->feature_ro_compat); |
319 | } |
320 | |
321 | out->version = cpu_to_le64(sb->version); |
322 | out->csum = csum_set(out); |
323 | |
324 | pr_debug("ver %llu, flags %llu, seq %llu\n" , |
325 | sb->version, sb->flags, sb->seq); |
326 | |
327 | submit_bio(bio); |
328 | } |
329 | |
330 | static void bch_write_bdev_super_unlock(struct closure *cl) |
331 | { |
332 | struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); |
333 | |
334 | up(sem: &dc->sb_write_mutex); |
335 | } |
336 | |
337 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) |
338 | { |
339 | struct closure *cl = &dc->sb_write; |
340 | struct bio *bio = &dc->sb_bio; |
341 | |
342 | down(sem: &dc->sb_write_mutex); |
343 | closure_init(cl, parent); |
344 | |
345 | bio_init(bio, bdev: dc->bdev, table: dc->sb_bv, max_vecs: 1, opf: 0); |
346 | bio->bi_end_io = write_bdev_super_endio; |
347 | bio->bi_private = dc; |
348 | |
349 | closure_get(cl); |
350 | /* I/O request sent to backing device */ |
351 | __write_super(sb: &dc->sb, out: dc->sb_disk, bio); |
352 | |
353 | closure_return_with_destructor(cl, bch_write_bdev_super_unlock); |
354 | } |
355 | |
356 | static void write_super_endio(struct bio *bio) |
357 | { |
358 | struct cache *ca = bio->bi_private; |
359 | |
360 | /* is_read = 0 */ |
361 | bch_count_io_errors(ca, error: bio->bi_status, is_read: 0, |
362 | m: "writing superblock" ); |
363 | closure_put(cl: &ca->set->sb_write); |
364 | } |
365 | |
366 | static void bcache_write_super_unlock(struct closure *cl) |
367 | { |
368 | struct cache_set *c = container_of(cl, struct cache_set, sb_write); |
369 | |
370 | up(sem: &c->sb_write_mutex); |
371 | } |
372 | |
373 | void bcache_write_super(struct cache_set *c) |
374 | { |
375 | struct closure *cl = &c->sb_write; |
376 | struct cache *ca = c->cache; |
377 | struct bio *bio = &ca->sb_bio; |
378 | unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID; |
379 | |
380 | down(sem: &c->sb_write_mutex); |
381 | closure_init(cl, parent: &c->cl); |
382 | |
383 | ca->sb.seq++; |
384 | |
385 | if (ca->sb.version < version) |
386 | ca->sb.version = version; |
387 | |
388 | bio_init(bio, bdev: ca->bdev, table: ca->sb_bv, max_vecs: 1, opf: 0); |
389 | bio->bi_end_io = write_super_endio; |
390 | bio->bi_private = ca; |
391 | |
392 | closure_get(cl); |
393 | __write_super(sb: &ca->sb, out: ca->sb_disk, bio); |
394 | |
395 | closure_return_with_destructor(cl, bcache_write_super_unlock); |
396 | } |
397 | |
398 | /* UUID io */ |
399 | |
400 | static void uuid_endio(struct bio *bio) |
401 | { |
402 | struct closure *cl = bio->bi_private; |
403 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write); |
404 | |
405 | cache_set_err_on(bio->bi_status, c, "accessing uuids" ); |
406 | bch_bbio_free(bio, c); |
407 | closure_put(cl); |
408 | } |
409 | |
410 | static void uuid_io_unlock(struct closure *cl) |
411 | { |
412 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write); |
413 | |
414 | up(sem: &c->uuid_write_mutex); |
415 | } |
416 | |
417 | static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k, |
418 | struct closure *parent) |
419 | { |
420 | struct closure *cl = &c->uuid_write; |
421 | struct uuid_entry *u; |
422 | unsigned int i; |
423 | char buf[80]; |
424 | |
425 | BUG_ON(!parent); |
426 | down(sem: &c->uuid_write_mutex); |
427 | closure_init(cl, parent); |
428 | |
429 | for (i = 0; i < KEY_PTRS(k); i++) { |
430 | struct bio *bio = bch_bbio_alloc(c); |
431 | |
432 | bio->bi_opf = opf | REQ_SYNC | REQ_META; |
433 | bio->bi_iter.bi_size = KEY_SIZE(k) << 9; |
434 | |
435 | bio->bi_end_io = uuid_endio; |
436 | bio->bi_private = cl; |
437 | bch_bio_map(bio, base: c->uuids); |
438 | |
439 | bch_submit_bbio(bio, c, k, ptr: i); |
440 | |
441 | if ((opf & REQ_OP_MASK) != REQ_OP_WRITE) |
442 | break; |
443 | } |
444 | |
445 | bch_extent_to_text(buf, size: sizeof(buf), k); |
446 | pr_debug("%s UUIDs at %s\n" , (opf & REQ_OP_MASK) == REQ_OP_WRITE ? |
447 | "wrote" : "read" , buf); |
448 | |
449 | for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) |
450 | if (!bch_is_zero(p: u->uuid, n: 16)) |
451 | pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n" , |
452 | u - c->uuids, u->uuid, u->label, |
453 | u->first_reg, u->last_reg, u->invalidated); |
454 | |
455 | closure_return_with_destructor(cl, uuid_io_unlock); |
456 | } |
457 | |
458 | static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) |
459 | { |
460 | struct bkey *k = &j->uuid_bucket; |
461 | |
462 | if (__bch_btree_ptr_invalid(c, k)) |
463 | return "bad uuid pointer" ; |
464 | |
465 | bkey_copy(&c->uuid_bucket, k); |
466 | uuid_io(c, opf: REQ_OP_READ, k, parent: cl); |
467 | |
468 | if (j->version < BCACHE_JSET_VERSION_UUIDv1) { |
469 | struct uuid_entry_v0 *u0 = (void *) c->uuids; |
470 | struct uuid_entry *u1 = (void *) c->uuids; |
471 | int i; |
472 | |
473 | closure_sync(cl); |
474 | |
475 | /* |
476 | * Since the new uuid entry is bigger than the old, we have to |
477 | * convert starting at the highest memory address and work down |
478 | * in order to do it in place |
479 | */ |
480 | |
481 | for (i = c->nr_uuids - 1; |
482 | i >= 0; |
483 | --i) { |
484 | memcpy(u1[i].uuid, u0[i].uuid, 16); |
485 | memcpy(u1[i].label, u0[i].label, 32); |
486 | |
487 | u1[i].first_reg = u0[i].first_reg; |
488 | u1[i].last_reg = u0[i].last_reg; |
489 | u1[i].invalidated = u0[i].invalidated; |
490 | |
491 | u1[i].flags = 0; |
492 | u1[i].sectors = 0; |
493 | } |
494 | } |
495 | |
496 | return NULL; |
497 | } |
498 | |
499 | static int __uuid_write(struct cache_set *c) |
500 | { |
501 | BKEY_PADDED(key) k; |
502 | struct closure cl; |
503 | struct cache *ca = c->cache; |
504 | unsigned int size; |
505 | |
506 | closure_init_stack(cl: &cl); |
507 | lockdep_assert_held(&bch_register_lock); |
508 | |
509 | if (bch_bucket_alloc_set(c, reserve: RESERVE_BTREE, k: &k.key, wait: true)) |
510 | return 1; |
511 | |
512 | size = meta_bucket_pages(sb: &ca->sb) * PAGE_SECTORS; |
513 | SET_KEY_SIZE(k: &k.key, v: size); |
514 | uuid_io(c, opf: REQ_OP_WRITE, k: &k.key, parent: &cl); |
515 | closure_sync(cl: &cl); |
516 | |
517 | /* Only one bucket used for uuid write */ |
518 | atomic_long_add(i: ca->sb.bucket_size, v: &ca->meta_sectors_written); |
519 | |
520 | bkey_copy(&c->uuid_bucket, &k.key); |
521 | bkey_put(c, k: &k.key); |
522 | return 0; |
523 | } |
524 | |
525 | int bch_uuid_write(struct cache_set *c) |
526 | { |
527 | int ret = __uuid_write(c); |
528 | |
529 | if (!ret) |
530 | bch_journal_meta(c, NULL); |
531 | |
532 | return ret; |
533 | } |
534 | |
535 | static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) |
536 | { |
537 | struct uuid_entry *u; |
538 | |
539 | for (u = c->uuids; |
540 | u < c->uuids + c->nr_uuids; u++) |
541 | if (!memcmp(p: u->uuid, q: uuid, size: 16)) |
542 | return u; |
543 | |
544 | return NULL; |
545 | } |
546 | |
547 | static struct uuid_entry *uuid_find_empty(struct cache_set *c) |
548 | { |
549 | static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" ; |
550 | |
551 | return uuid_find(c, uuid: zero_uuid); |
552 | } |
553 | |
554 | /* |
555 | * Bucket priorities/gens: |
556 | * |
557 | * For each bucket, we store on disk its |
558 | * 8 bit gen |
559 | * 16 bit priority |
560 | * |
561 | * See alloc.c for an explanation of the gen. The priority is used to implement |
562 | * lru (and in the future other) cache replacement policies; for most purposes |
563 | * it's just an opaque integer. |
564 | * |
565 | * The gens and the priorities don't have a whole lot to do with each other, and |
566 | * it's actually the gens that must be written out at specific times - it's no |
567 | * big deal if the priorities don't get written, if we lose them we just reuse |
568 | * buckets in suboptimal order. |
569 | * |
570 | * On disk they're stored in a packed array, and in as many buckets are required |
571 | * to fit them all. The buckets we use to store them form a list; the journal |
572 | * header points to the first bucket, the first bucket points to the second |
573 | * bucket, et cetera. |
574 | * |
575 | * This code is used by the allocation code; periodically (whenever it runs out |
576 | * of buckets to allocate from) the allocation code will invalidate some |
577 | * buckets, but it can't use those buckets until their new gens are safely on |
578 | * disk. |
579 | */ |
580 | |
581 | static void prio_endio(struct bio *bio) |
582 | { |
583 | struct cache *ca = bio->bi_private; |
584 | |
585 | cache_set_err_on(bio->bi_status, ca->set, "accessing priorities" ); |
586 | bch_bbio_free(bio, c: ca->set); |
587 | closure_put(cl: &ca->prio); |
588 | } |
589 | |
590 | static void prio_io(struct cache *ca, uint64_t bucket, blk_opf_t opf) |
591 | { |
592 | struct closure *cl = &ca->prio; |
593 | struct bio *bio = bch_bbio_alloc(c: ca->set); |
594 | |
595 | closure_init_stack(cl); |
596 | |
597 | bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; |
598 | bio_set_dev(bio, bdev: ca->bdev); |
599 | bio->bi_iter.bi_size = meta_bucket_bytes(sb: &ca->sb); |
600 | |
601 | bio->bi_end_io = prio_endio; |
602 | bio->bi_private = ca; |
603 | bio->bi_opf = opf | REQ_SYNC | REQ_META; |
604 | bch_bio_map(bio, base: ca->disk_buckets); |
605 | |
606 | closure_bio_submit(c: ca->set, bio, cl: &ca->prio); |
607 | closure_sync(cl); |
608 | } |
609 | |
610 | int bch_prio_write(struct cache *ca, bool wait) |
611 | { |
612 | int i; |
613 | struct bucket *b; |
614 | struct closure cl; |
615 | |
616 | pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n" , |
617 | fifo_used(&ca->free[RESERVE_PRIO]), |
618 | fifo_used(&ca->free[RESERVE_NONE]), |
619 | fifo_used(&ca->free_inc)); |
620 | |
621 | /* |
622 | * Pre-check if there are enough free buckets. In the non-blocking |
623 | * scenario it's better to fail early rather than starting to allocate |
624 | * buckets and do a cleanup later in case of failure. |
625 | */ |
626 | if (!wait) { |
627 | size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) + |
628 | fifo_used(&ca->free[RESERVE_NONE]); |
629 | if (prio_buckets(ca) > avail) |
630 | return -ENOMEM; |
631 | } |
632 | |
633 | closure_init_stack(cl: &cl); |
634 | |
635 | lockdep_assert_held(&ca->set->bucket_lock); |
636 | |
637 | ca->disk_buckets->seq++; |
638 | |
639 | atomic_long_add(i: ca->sb.bucket_size * prio_buckets(ca), |
640 | v: &ca->meta_sectors_written); |
641 | |
642 | for (i = prio_buckets(ca) - 1; i >= 0; --i) { |
643 | long bucket; |
644 | struct prio_set *p = ca->disk_buckets; |
645 | struct bucket_disk *d = p->data; |
646 | struct bucket_disk *end = d + prios_per_bucket(ca); |
647 | |
648 | for (b = ca->buckets + i * prios_per_bucket(ca); |
649 | b < ca->buckets + ca->sb.nbuckets && d < end; |
650 | b++, d++) { |
651 | d->prio = cpu_to_le16(b->prio); |
652 | d->gen = b->gen; |
653 | } |
654 | |
655 | p->next_bucket = ca->prio_buckets[i + 1]; |
656 | p->magic = pset_magic(sb: &ca->sb); |
657 | p->csum = bch_crc64(p: &p->magic, len: meta_bucket_bytes(sb: &ca->sb) - 8); |
658 | |
659 | bucket = bch_bucket_alloc(ca, reserve: RESERVE_PRIO, wait); |
660 | BUG_ON(bucket == -1); |
661 | |
662 | mutex_unlock(lock: &ca->set->bucket_lock); |
663 | prio_io(ca, bucket, opf: REQ_OP_WRITE); |
664 | mutex_lock(&ca->set->bucket_lock); |
665 | |
666 | ca->prio_buckets[i] = bucket; |
667 | atomic_dec_bug(&ca->buckets[bucket].pin); |
668 | } |
669 | |
670 | mutex_unlock(lock: &ca->set->bucket_lock); |
671 | |
672 | bch_journal_meta(c: ca->set, cl: &cl); |
673 | closure_sync(cl: &cl); |
674 | |
675 | mutex_lock(&ca->set->bucket_lock); |
676 | |
677 | /* |
678 | * Don't want the old priorities to get garbage collected until after we |
679 | * finish writing the new ones, and they're journalled |
680 | */ |
681 | for (i = 0; i < prio_buckets(ca); i++) { |
682 | if (ca->prio_last_buckets[i]) |
683 | __bch_bucket_free(ca, |
684 | b: &ca->buckets[ca->prio_last_buckets[i]]); |
685 | |
686 | ca->prio_last_buckets[i] = ca->prio_buckets[i]; |
687 | } |
688 | return 0; |
689 | } |
690 | |
691 | static int prio_read(struct cache *ca, uint64_t bucket) |
692 | { |
693 | struct prio_set *p = ca->disk_buckets; |
694 | struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; |
695 | struct bucket *b; |
696 | unsigned int bucket_nr = 0; |
697 | int ret = -EIO; |
698 | |
699 | for (b = ca->buckets; |
700 | b < ca->buckets + ca->sb.nbuckets; |
701 | b++, d++) { |
702 | if (d == end) { |
703 | ca->prio_buckets[bucket_nr] = bucket; |
704 | ca->prio_last_buckets[bucket_nr] = bucket; |
705 | bucket_nr++; |
706 | |
707 | prio_io(ca, bucket, opf: REQ_OP_READ); |
708 | |
709 | if (p->csum != |
710 | bch_crc64(p: &p->magic, len: meta_bucket_bytes(sb: &ca->sb) - 8)) { |
711 | pr_warn("bad csum reading priorities\n" ); |
712 | goto out; |
713 | } |
714 | |
715 | if (p->magic != pset_magic(sb: &ca->sb)) { |
716 | pr_warn("bad magic reading priorities\n" ); |
717 | goto out; |
718 | } |
719 | |
720 | bucket = p->next_bucket; |
721 | d = p->data; |
722 | } |
723 | |
724 | b->prio = le16_to_cpu(d->prio); |
725 | b->gen = b->last_gc = d->gen; |
726 | } |
727 | |
728 | ret = 0; |
729 | out: |
730 | return ret; |
731 | } |
732 | |
733 | /* Bcache device */ |
734 | |
735 | static int open_dev(struct gendisk *disk, blk_mode_t mode) |
736 | { |
737 | struct bcache_device *d = disk->private_data; |
738 | |
739 | if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) |
740 | return -ENXIO; |
741 | |
742 | closure_get(cl: &d->cl); |
743 | return 0; |
744 | } |
745 | |
746 | static void release_dev(struct gendisk *b) |
747 | { |
748 | struct bcache_device *d = b->private_data; |
749 | |
750 | closure_put(cl: &d->cl); |
751 | } |
752 | |
753 | static int ioctl_dev(struct block_device *b, blk_mode_t mode, |
754 | unsigned int cmd, unsigned long arg) |
755 | { |
756 | struct bcache_device *d = b->bd_disk->private_data; |
757 | |
758 | return d->ioctl(d, mode, cmd, arg); |
759 | } |
760 | |
761 | static const struct block_device_operations bcache_cached_ops = { |
762 | .submit_bio = cached_dev_submit_bio, |
763 | .open = open_dev, |
764 | .release = release_dev, |
765 | .ioctl = ioctl_dev, |
766 | .owner = THIS_MODULE, |
767 | }; |
768 | |
769 | static const struct block_device_operations bcache_flash_ops = { |
770 | .submit_bio = flash_dev_submit_bio, |
771 | .open = open_dev, |
772 | .release = release_dev, |
773 | .ioctl = ioctl_dev, |
774 | .owner = THIS_MODULE, |
775 | }; |
776 | |
777 | void bcache_device_stop(struct bcache_device *d) |
778 | { |
779 | if (!test_and_set_bit(BCACHE_DEV_CLOSING, addr: &d->flags)) |
780 | /* |
781 | * closure_fn set to |
782 | * - cached device: cached_dev_flush() |
783 | * - flash dev: flash_dev_flush() |
784 | */ |
785 | closure_queue(cl: &d->cl); |
786 | } |
787 | |
788 | static void bcache_device_unlink(struct bcache_device *d) |
789 | { |
790 | lockdep_assert_held(&bch_register_lock); |
791 | |
792 | if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, addr: &d->flags)) { |
793 | struct cache *ca = d->c->cache; |
794 | |
795 | sysfs_remove_link(kobj: &d->c->kobj, name: d->name); |
796 | sysfs_remove_link(kobj: &d->kobj, name: "cache" ); |
797 | |
798 | bd_unlink_disk_holder(bdev: ca->bdev, disk: d->disk); |
799 | } |
800 | } |
801 | |
802 | static void bcache_device_link(struct bcache_device *d, struct cache_set *c, |
803 | const char *name) |
804 | { |
805 | struct cache *ca = c->cache; |
806 | int ret; |
807 | |
808 | bd_link_disk_holder(bdev: ca->bdev, disk: d->disk); |
809 | |
810 | snprintf(buf: d->name, BCACHEDEVNAME_SIZE, |
811 | fmt: "%s%u" , name, d->id); |
812 | |
813 | ret = sysfs_create_link(kobj: &d->kobj, target: &c->kobj, name: "cache" ); |
814 | if (ret < 0) |
815 | pr_err("Couldn't create device -> cache set symlink\n" ); |
816 | |
817 | ret = sysfs_create_link(kobj: &c->kobj, target: &d->kobj, name: d->name); |
818 | if (ret < 0) |
819 | pr_err("Couldn't create cache set -> device symlink\n" ); |
820 | |
821 | clear_bit(BCACHE_DEV_UNLINK_DONE, addr: &d->flags); |
822 | } |
823 | |
824 | static void bcache_device_detach(struct bcache_device *d) |
825 | { |
826 | lockdep_assert_held(&bch_register_lock); |
827 | |
828 | atomic_dec(v: &d->c->attached_dev_nr); |
829 | |
830 | if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { |
831 | struct uuid_entry *u = d->c->uuids + d->id; |
832 | |
833 | SET_UUID_FLASH_ONLY(k: u, v: 0); |
834 | memcpy(u->uuid, invalid_uuid, 16); |
835 | u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); |
836 | bch_uuid_write(c: d->c); |
837 | } |
838 | |
839 | bcache_device_unlink(d); |
840 | |
841 | d->c->devices[d->id] = NULL; |
842 | closure_put(cl: &d->c->caching); |
843 | d->c = NULL; |
844 | } |
845 | |
846 | static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, |
847 | unsigned int id) |
848 | { |
849 | d->id = id; |
850 | d->c = c; |
851 | c->devices[id] = d; |
852 | |
853 | if (id >= c->devices_max_used) |
854 | c->devices_max_used = id + 1; |
855 | |
856 | closure_get(cl: &c->caching); |
857 | } |
858 | |
859 | static inline int first_minor_to_idx(int first_minor) |
860 | { |
861 | return (first_minor/BCACHE_MINORS); |
862 | } |
863 | |
864 | static inline int idx_to_first_minor(int idx) |
865 | { |
866 | return (idx * BCACHE_MINORS); |
867 | } |
868 | |
869 | static void bcache_device_free(struct bcache_device *d) |
870 | { |
871 | struct gendisk *disk = d->disk; |
872 | |
873 | lockdep_assert_held(&bch_register_lock); |
874 | |
875 | if (disk) |
876 | pr_info("%s stopped\n" , disk->disk_name); |
877 | else |
878 | pr_err("bcache device (NULL gendisk) stopped\n" ); |
879 | |
880 | if (d->c) |
881 | bcache_device_detach(d); |
882 | |
883 | if (disk) { |
884 | ida_simple_remove(&bcache_device_idx, |
885 | first_minor_to_idx(disk->first_minor)); |
886 | put_disk(disk); |
887 | } |
888 | |
889 | bioset_exit(&d->bio_split); |
890 | kvfree(addr: d->full_dirty_stripes); |
891 | kvfree(addr: d->stripe_sectors_dirty); |
892 | |
893 | closure_debug_destroy(cl: &d->cl); |
894 | } |
895 | |
896 | static int bcache_device_init(struct bcache_device *d, unsigned int block_size, |
897 | sector_t sectors, struct block_device *cached_bdev, |
898 | const struct block_device_operations *ops) |
899 | { |
900 | struct request_queue *q; |
901 | const size_t max_stripes = min_t(size_t, INT_MAX, |
902 | SIZE_MAX / sizeof(atomic_t)); |
903 | uint64_t n; |
904 | int idx; |
905 | |
906 | if (!d->stripe_size) |
907 | d->stripe_size = 1 << 31; |
908 | |
909 | n = DIV_ROUND_UP_ULL(sectors, d->stripe_size); |
910 | if (!n || n > max_stripes) { |
911 | pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n" , |
912 | n); |
913 | return -ENOMEM; |
914 | } |
915 | d->nr_stripes = n; |
916 | |
917 | n = d->nr_stripes * sizeof(atomic_t); |
918 | d->stripe_sectors_dirty = kvzalloc(size: n, GFP_KERNEL); |
919 | if (!d->stripe_sectors_dirty) |
920 | return -ENOMEM; |
921 | |
922 | n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); |
923 | d->full_dirty_stripes = kvzalloc(size: n, GFP_KERNEL); |
924 | if (!d->full_dirty_stripes) |
925 | goto out_free_stripe_sectors_dirty; |
926 | |
927 | idx = ida_simple_get(&bcache_device_idx, 0, |
928 | BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); |
929 | if (idx < 0) |
930 | goto out_free_full_dirty_stripes; |
931 | |
932 | if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio), |
933 | flags: BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) |
934 | goto out_ida_remove; |
935 | |
936 | d->disk = blk_alloc_disk(NUMA_NO_NODE); |
937 | if (!d->disk) |
938 | goto out_bioset_exit; |
939 | |
940 | set_capacity(disk: d->disk, size: sectors); |
941 | snprintf(buf: d->disk->disk_name, DISK_NAME_LEN, fmt: "bcache%i" , idx); |
942 | |
943 | d->disk->major = bcache_major; |
944 | d->disk->first_minor = idx_to_first_minor(idx); |
945 | d->disk->minors = BCACHE_MINORS; |
946 | d->disk->fops = ops; |
947 | d->disk->private_data = d; |
948 | |
949 | q = d->disk->queue; |
950 | q->limits.max_hw_sectors = UINT_MAX; |
951 | q->limits.max_sectors = UINT_MAX; |
952 | q->limits.max_segment_size = UINT_MAX; |
953 | q->limits.max_segments = BIO_MAX_VECS; |
954 | blk_queue_max_discard_sectors(q, UINT_MAX); |
955 | q->limits.discard_granularity = 512; |
956 | q->limits.io_min = block_size; |
957 | q->limits.logical_block_size = block_size; |
958 | q->limits.physical_block_size = block_size; |
959 | |
960 | if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) { |
961 | /* |
962 | * This should only happen with BCACHE_SB_VERSION_BDEV. |
963 | * Block/page size is checked for BCACHE_SB_VERSION_CDEV. |
964 | */ |
965 | pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n" , |
966 | d->disk->disk_name, q->limits.logical_block_size, |
967 | PAGE_SIZE, bdev_logical_block_size(cached_bdev)); |
968 | |
969 | /* This also adjusts physical block size/min io size if needed */ |
970 | blk_queue_logical_block_size(q, bdev_logical_block_size(bdev: cached_bdev)); |
971 | } |
972 | |
973 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q: d->disk->queue); |
974 | |
975 | blk_queue_write_cache(q, enabled: true, fua: true); |
976 | |
977 | return 0; |
978 | |
979 | out_bioset_exit: |
980 | bioset_exit(&d->bio_split); |
981 | out_ida_remove: |
982 | ida_simple_remove(&bcache_device_idx, idx); |
983 | out_free_full_dirty_stripes: |
984 | kvfree(addr: d->full_dirty_stripes); |
985 | out_free_stripe_sectors_dirty: |
986 | kvfree(addr: d->stripe_sectors_dirty); |
987 | return -ENOMEM; |
988 | |
989 | } |
990 | |
991 | /* Cached device */ |
992 | |
993 | static void calc_cached_dev_sectors(struct cache_set *c) |
994 | { |
995 | uint64_t sectors = 0; |
996 | struct cached_dev *dc; |
997 | |
998 | list_for_each_entry(dc, &c->cached_devs, list) |
999 | sectors += bdev_nr_sectors(bdev: dc->bdev); |
1000 | |
1001 | c->cached_dev_sectors = sectors; |
1002 | } |
1003 | |
1004 | #define BACKING_DEV_OFFLINE_TIMEOUT 5 |
1005 | static int cached_dev_status_update(void *arg) |
1006 | { |
1007 | struct cached_dev *dc = arg; |
1008 | struct request_queue *q; |
1009 | |
1010 | /* |
1011 | * If this delayed worker is stopping outside, directly quit here. |
1012 | * dc->io_disable might be set via sysfs interface, so check it |
1013 | * here too. |
1014 | */ |
1015 | while (!kthread_should_stop() && !dc->io_disable) { |
1016 | q = bdev_get_queue(bdev: dc->bdev); |
1017 | if (blk_queue_dying(q)) |
1018 | dc->offline_seconds++; |
1019 | else |
1020 | dc->offline_seconds = 0; |
1021 | |
1022 | if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) { |
1023 | pr_err("%pg: device offline for %d seconds\n" , |
1024 | dc->bdev, |
1025 | BACKING_DEV_OFFLINE_TIMEOUT); |
1026 | pr_err("%s: disable I/O request due to backing device offline\n" , |
1027 | dc->disk.name); |
1028 | dc->io_disable = true; |
1029 | /* let others know earlier that io_disable is true */ |
1030 | smp_mb(); |
1031 | bcache_device_stop(d: &dc->disk); |
1032 | break; |
1033 | } |
1034 | schedule_timeout_interruptible(HZ); |
1035 | } |
1036 | |
1037 | wait_for_kthread_stop(); |
1038 | return 0; |
1039 | } |
1040 | |
1041 | |
1042 | int bch_cached_dev_run(struct cached_dev *dc) |
1043 | { |
1044 | int ret = 0; |
1045 | struct bcache_device *d = &dc->disk; |
1046 | char *buf = kmemdup_nul(s: dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL); |
1047 | char *env[] = { |
1048 | "DRIVER=bcache" , |
1049 | kasprintf(GFP_KERNEL, fmt: "CACHED_UUID=%pU" , dc->sb.uuid), |
1050 | kasprintf(GFP_KERNEL, fmt: "CACHED_LABEL=%s" , buf ? : "" ), |
1051 | NULL, |
1052 | }; |
1053 | |
1054 | if (dc->io_disable) { |
1055 | pr_err("I/O disabled on cached dev %pg\n" , dc->bdev); |
1056 | ret = -EIO; |
1057 | goto out; |
1058 | } |
1059 | |
1060 | if (atomic_xchg(v: &dc->running, new: 1)) { |
1061 | pr_info("cached dev %pg is running already\n" , dc->bdev); |
1062 | ret = -EBUSY; |
1063 | goto out; |
1064 | } |
1065 | |
1066 | if (!d->c && |
1067 | BDEV_STATE(k: &dc->sb) != BDEV_STATE_NONE) { |
1068 | struct closure cl; |
1069 | |
1070 | closure_init_stack(cl: &cl); |
1071 | |
1072 | SET_BDEV_STATE(k: &dc->sb, BDEV_STATE_STALE); |
1073 | bch_write_bdev_super(dc, parent: &cl); |
1074 | closure_sync(cl: &cl); |
1075 | } |
1076 | |
1077 | ret = add_disk(disk: d->disk); |
1078 | if (ret) |
1079 | goto out; |
1080 | bd_link_disk_holder(bdev: dc->bdev, disk: dc->disk.disk); |
1081 | /* |
1082 | * won't show up in the uevent file, use udevadm monitor -e instead |
1083 | * only class / kset properties are persistent |
1084 | */ |
1085 | kobject_uevent_env(kobj: &disk_to_dev(d->disk)->kobj, action: KOBJ_CHANGE, envp: env); |
1086 | |
1087 | if (sysfs_create_link(kobj: &d->kobj, target: &disk_to_dev(d->disk)->kobj, name: "dev" ) || |
1088 | sysfs_create_link(kobj: &disk_to_dev(d->disk)->kobj, |
1089 | target: &d->kobj, name: "bcache" )) { |
1090 | pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n" ); |
1091 | ret = -ENOMEM; |
1092 | goto out; |
1093 | } |
1094 | |
1095 | dc->status_update_thread = kthread_run(cached_dev_status_update, |
1096 | dc, "bcache_status_update" ); |
1097 | if (IS_ERR(ptr: dc->status_update_thread)) { |
1098 | pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n" ); |
1099 | } |
1100 | |
1101 | out: |
1102 | kfree(objp: env[1]); |
1103 | kfree(objp: env[2]); |
1104 | kfree(objp: buf); |
1105 | return ret; |
1106 | } |
1107 | |
1108 | /* |
1109 | * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed |
1110 | * work dc->writeback_rate_update is running. Wait until the routine |
1111 | * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to |
1112 | * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out |
1113 | * seconds, give up waiting here and continue to cancel it too. |
1114 | */ |
1115 | static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) |
1116 | { |
1117 | int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ; |
1118 | |
1119 | do { |
1120 | if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING, |
1121 | &dc->disk.flags)) |
1122 | break; |
1123 | time_out--; |
1124 | schedule_timeout_interruptible(timeout: 1); |
1125 | } while (time_out > 0); |
1126 | |
1127 | if (time_out == 0) |
1128 | pr_warn("give up waiting for dc->writeback_write_update to quit\n" ); |
1129 | |
1130 | cancel_delayed_work_sync(dwork: &dc->writeback_rate_update); |
1131 | } |
1132 | |
1133 | static void cached_dev_detach_finish(struct work_struct *w) |
1134 | { |
1135 | struct cached_dev *dc = container_of(w, struct cached_dev, detach); |
1136 | struct cache_set *c = dc->disk.c; |
1137 | |
1138 | BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); |
1139 | BUG_ON(refcount_read(&dc->count)); |
1140 | |
1141 | |
1142 | if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, addr: &dc->disk.flags)) |
1143 | cancel_writeback_rate_update_dwork(dc); |
1144 | |
1145 | if (!IS_ERR_OR_NULL(ptr: dc->writeback_thread)) { |
1146 | kthread_stop(k: dc->writeback_thread); |
1147 | dc->writeback_thread = NULL; |
1148 | } |
1149 | |
1150 | mutex_lock(&bch_register_lock); |
1151 | |
1152 | bcache_device_detach(d: &dc->disk); |
1153 | list_move(list: &dc->list, head: &uncached_devices); |
1154 | calc_cached_dev_sectors(c); |
1155 | |
1156 | clear_bit(BCACHE_DEV_DETACHING, addr: &dc->disk.flags); |
1157 | clear_bit(BCACHE_DEV_UNLINK_DONE, addr: &dc->disk.flags); |
1158 | |
1159 | mutex_unlock(lock: &bch_register_lock); |
1160 | |
1161 | pr_info("Caching disabled for %pg\n" , dc->bdev); |
1162 | |
1163 | /* Drop ref we took in cached_dev_detach() */ |
1164 | closure_put(cl: &dc->disk.cl); |
1165 | } |
1166 | |
1167 | void bch_cached_dev_detach(struct cached_dev *dc) |
1168 | { |
1169 | lockdep_assert_held(&bch_register_lock); |
1170 | |
1171 | if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) |
1172 | return; |
1173 | |
1174 | if (test_and_set_bit(BCACHE_DEV_DETACHING, addr: &dc->disk.flags)) |
1175 | return; |
1176 | |
1177 | /* |
1178 | * Block the device from being closed and freed until we're finished |
1179 | * detaching |
1180 | */ |
1181 | closure_get(cl: &dc->disk.cl); |
1182 | |
1183 | bch_writeback_queue(dc); |
1184 | |
1185 | cached_dev_put(dc); |
1186 | } |
1187 | |
1188 | int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, |
1189 | uint8_t *set_uuid) |
1190 | { |
1191 | uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds()); |
1192 | struct uuid_entry *u; |
1193 | struct cached_dev *exist_dc, *t; |
1194 | int ret = 0; |
1195 | |
1196 | if ((set_uuid && memcmp(p: set_uuid, q: c->set_uuid, size: 16)) || |
1197 | (!set_uuid && memcmp(p: dc->sb.set_uuid, q: c->set_uuid, size: 16))) |
1198 | return -ENOENT; |
1199 | |
1200 | if (dc->disk.c) { |
1201 | pr_err("Can't attach %pg: already attached\n" , dc->bdev); |
1202 | return -EINVAL; |
1203 | } |
1204 | |
1205 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) { |
1206 | pr_err("Can't attach %pg: shutting down\n" , dc->bdev); |
1207 | return -EINVAL; |
1208 | } |
1209 | |
1210 | if (dc->sb.block_size < c->cache->sb.block_size) { |
1211 | /* Will die */ |
1212 | pr_err("Couldn't attach %pg: block size less than set's block size\n" , |
1213 | dc->bdev); |
1214 | return -EINVAL; |
1215 | } |
1216 | |
1217 | /* Check whether already attached */ |
1218 | list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { |
1219 | if (!memcmp(p: dc->sb.uuid, q: exist_dc->sb.uuid, size: 16)) { |
1220 | pr_err("Tried to attach %pg but duplicate UUID already attached\n" , |
1221 | dc->bdev); |
1222 | |
1223 | return -EINVAL; |
1224 | } |
1225 | } |
1226 | |
1227 | u = uuid_find(c, uuid: dc->sb.uuid); |
1228 | |
1229 | if (u && |
1230 | (BDEV_STATE(k: &dc->sb) == BDEV_STATE_STALE || |
1231 | BDEV_STATE(k: &dc->sb) == BDEV_STATE_NONE)) { |
1232 | memcpy(u->uuid, invalid_uuid, 16); |
1233 | u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); |
1234 | u = NULL; |
1235 | } |
1236 | |
1237 | if (!u) { |
1238 | if (BDEV_STATE(k: &dc->sb) == BDEV_STATE_DIRTY) { |
1239 | pr_err("Couldn't find uuid for %pg in set\n" , dc->bdev); |
1240 | return -ENOENT; |
1241 | } |
1242 | |
1243 | u = uuid_find_empty(c); |
1244 | if (!u) { |
1245 | pr_err("Not caching %pg, no room for UUID\n" , dc->bdev); |
1246 | return -EINVAL; |
1247 | } |
1248 | } |
1249 | |
1250 | /* |
1251 | * Deadlocks since we're called via sysfs... |
1252 | * sysfs_remove_file(&dc->kobj, &sysfs_attach); |
1253 | */ |
1254 | |
1255 | if (bch_is_zero(p: u->uuid, n: 16)) { |
1256 | struct closure cl; |
1257 | |
1258 | closure_init_stack(cl: &cl); |
1259 | |
1260 | memcpy(u->uuid, dc->sb.uuid, 16); |
1261 | memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); |
1262 | u->first_reg = u->last_reg = rtime; |
1263 | bch_uuid_write(c); |
1264 | |
1265 | memcpy(dc->sb.set_uuid, c->set_uuid, 16); |
1266 | SET_BDEV_STATE(k: &dc->sb, BDEV_STATE_CLEAN); |
1267 | |
1268 | bch_write_bdev_super(dc, parent: &cl); |
1269 | closure_sync(cl: &cl); |
1270 | } else { |
1271 | u->last_reg = rtime; |
1272 | bch_uuid_write(c); |
1273 | } |
1274 | |
1275 | bcache_device_attach(d: &dc->disk, c, id: u - c->uuids); |
1276 | list_move(list: &dc->list, head: &c->cached_devs); |
1277 | calc_cached_dev_sectors(c); |
1278 | |
1279 | /* |
1280 | * dc->c must be set before dc->count != 0 - paired with the mb in |
1281 | * cached_dev_get() |
1282 | */ |
1283 | smp_wmb(); |
1284 | refcount_set(r: &dc->count, n: 1); |
1285 | |
1286 | /* Block writeback thread, but spawn it */ |
1287 | down_write(sem: &dc->writeback_lock); |
1288 | if (bch_cached_dev_writeback_start(dc)) { |
1289 | up_write(sem: &dc->writeback_lock); |
1290 | pr_err("Couldn't start writeback facilities for %s\n" , |
1291 | dc->disk.disk->disk_name); |
1292 | return -ENOMEM; |
1293 | } |
1294 | |
1295 | if (BDEV_STATE(k: &dc->sb) == BDEV_STATE_DIRTY) { |
1296 | atomic_set(v: &dc->has_dirty, i: 1); |
1297 | bch_writeback_queue(dc); |
1298 | } |
1299 | |
1300 | bch_sectors_dirty_init(d: &dc->disk); |
1301 | |
1302 | ret = bch_cached_dev_run(dc); |
1303 | if (ret && (ret != -EBUSY)) { |
1304 | up_write(sem: &dc->writeback_lock); |
1305 | /* |
1306 | * bch_register_lock is held, bcache_device_stop() is not |
1307 | * able to be directly called. The kthread and kworker |
1308 | * created previously in bch_cached_dev_writeback_start() |
1309 | * have to be stopped manually here. |
1310 | */ |
1311 | kthread_stop(k: dc->writeback_thread); |
1312 | cancel_writeback_rate_update_dwork(dc); |
1313 | pr_err("Couldn't run cached device %pg\n" , dc->bdev); |
1314 | return ret; |
1315 | } |
1316 | |
1317 | bcache_device_link(d: &dc->disk, c, name: "bdev" ); |
1318 | atomic_inc(v: &c->attached_dev_nr); |
1319 | |
1320 | if (bch_has_feature_obso_large_bucket(sb: &(c->cache->sb))) { |
1321 | pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n" ); |
1322 | pr_err("Please update to the latest bcache-tools to create the cache device\n" ); |
1323 | set_disk_ro(disk: dc->disk.disk, read_only: 1); |
1324 | } |
1325 | |
1326 | /* Allow the writeback thread to proceed */ |
1327 | up_write(sem: &dc->writeback_lock); |
1328 | |
1329 | pr_info("Caching %pg as %s on set %pU\n" , |
1330 | dc->bdev, |
1331 | dc->disk.disk->disk_name, |
1332 | dc->disk.c->set_uuid); |
1333 | return 0; |
1334 | } |
1335 | |
1336 | /* when dc->disk.kobj released */ |
1337 | void bch_cached_dev_release(struct kobject *kobj) |
1338 | { |
1339 | struct cached_dev *dc = container_of(kobj, struct cached_dev, |
1340 | disk.kobj); |
1341 | kfree(objp: dc); |
1342 | module_put(THIS_MODULE); |
1343 | } |
1344 | |
1345 | static void cached_dev_free(struct closure *cl) |
1346 | { |
1347 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); |
1348 | |
1349 | if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, addr: &dc->disk.flags)) |
1350 | cancel_writeback_rate_update_dwork(dc); |
1351 | |
1352 | if (!IS_ERR_OR_NULL(ptr: dc->writeback_thread)) |
1353 | kthread_stop(k: dc->writeback_thread); |
1354 | if (!IS_ERR_OR_NULL(ptr: dc->status_update_thread)) |
1355 | kthread_stop(k: dc->status_update_thread); |
1356 | |
1357 | mutex_lock(&bch_register_lock); |
1358 | |
1359 | if (atomic_read(v: &dc->running)) { |
1360 | bd_unlink_disk_holder(bdev: dc->bdev, disk: dc->disk.disk); |
1361 | del_gendisk(gp: dc->disk.disk); |
1362 | } |
1363 | bcache_device_free(d: &dc->disk); |
1364 | list_del(entry: &dc->list); |
1365 | |
1366 | mutex_unlock(lock: &bch_register_lock); |
1367 | |
1368 | if (dc->sb_disk) |
1369 | put_page(virt_to_page(dc->sb_disk)); |
1370 | |
1371 | if (dc->bdev_handle) |
1372 | bdev_release(handle: dc->bdev_handle); |
1373 | |
1374 | wake_up(&unregister_wait); |
1375 | |
1376 | kobject_put(kobj: &dc->disk.kobj); |
1377 | } |
1378 | |
1379 | static void cached_dev_flush(struct closure *cl) |
1380 | { |
1381 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); |
1382 | struct bcache_device *d = &dc->disk; |
1383 | |
1384 | mutex_lock(&bch_register_lock); |
1385 | bcache_device_unlink(d); |
1386 | mutex_unlock(lock: &bch_register_lock); |
1387 | |
1388 | bch_cache_accounting_destroy(acc: &dc->accounting); |
1389 | kobject_del(kobj: &d->kobj); |
1390 | |
1391 | continue_at(cl, cached_dev_free, system_wq); |
1392 | } |
1393 | |
1394 | static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) |
1395 | { |
1396 | int ret; |
1397 | struct io *io; |
1398 | struct request_queue *q = bdev_get_queue(bdev: dc->bdev); |
1399 | |
1400 | __module_get(THIS_MODULE); |
1401 | INIT_LIST_HEAD(list: &dc->list); |
1402 | closure_init(cl: &dc->disk.cl, NULL); |
1403 | set_closure_fn(cl: &dc->disk.cl, fn: cached_dev_flush, wq: system_wq); |
1404 | kobject_init(kobj: &dc->disk.kobj, ktype: &bch_cached_dev_ktype); |
1405 | INIT_WORK(&dc->detach, cached_dev_detach_finish); |
1406 | sema_init(sem: &dc->sb_write_mutex, val: 1); |
1407 | INIT_LIST_HEAD(list: &dc->io_lru); |
1408 | spin_lock_init(&dc->io_lock); |
1409 | bch_cache_accounting_init(acc: &dc->accounting, parent: &dc->disk.cl); |
1410 | |
1411 | dc->sequential_cutoff = 4 << 20; |
1412 | |
1413 | for (io = dc->io; io < dc->io + RECENT_IO; io++) { |
1414 | list_add(new: &io->lru, head: &dc->io_lru); |
1415 | hlist_add_head(n: &io->hash, h: dc->io_hash + RECENT_IO); |
1416 | } |
1417 | |
1418 | dc->disk.stripe_size = q->limits.io_opt >> 9; |
1419 | |
1420 | if (dc->disk.stripe_size) |
1421 | dc->partial_stripes_expensive = |
1422 | q->limits.raid_partial_stripes_expensive; |
1423 | |
1424 | ret = bcache_device_init(d: &dc->disk, block_size, |
1425 | sectors: bdev_nr_sectors(bdev: dc->bdev) - dc->sb.data_offset, |
1426 | cached_bdev: dc->bdev, ops: &bcache_cached_ops); |
1427 | if (ret) |
1428 | return ret; |
1429 | |
1430 | blk_queue_io_opt(q: dc->disk.disk->queue, |
1431 | max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q))); |
1432 | |
1433 | atomic_set(v: &dc->io_errors, i: 0); |
1434 | dc->io_disable = false; |
1435 | dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT; |
1436 | /* default to auto */ |
1437 | dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO; |
1438 | |
1439 | bch_cached_dev_request_init(dc); |
1440 | bch_cached_dev_writeback_init(dc); |
1441 | return 0; |
1442 | } |
1443 | |
1444 | /* Cached device - bcache superblock */ |
1445 | |
1446 | static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk, |
1447 | struct bdev_handle *bdev_handle, |
1448 | struct cached_dev *dc) |
1449 | { |
1450 | const char *err = "cannot allocate memory" ; |
1451 | struct cache_set *c; |
1452 | int ret = -ENOMEM; |
1453 | |
1454 | memcpy(&dc->sb, sb, sizeof(struct cache_sb)); |
1455 | dc->bdev_handle = bdev_handle; |
1456 | dc->bdev = bdev_handle->bdev; |
1457 | dc->sb_disk = sb_disk; |
1458 | |
1459 | if (cached_dev_init(dc, block_size: sb->block_size << 9)) |
1460 | goto err; |
1461 | |
1462 | err = "error creating kobject" ; |
1463 | if (kobject_add(kobj: &dc->disk.kobj, bdev_kobj(dc->bdev), fmt: "bcache" )) |
1464 | goto err; |
1465 | if (bch_cache_accounting_add_kobjs(acc: &dc->accounting, parent: &dc->disk.kobj)) |
1466 | goto err; |
1467 | |
1468 | pr_info("registered backing device %pg\n" , dc->bdev); |
1469 | |
1470 | list_add(new: &dc->list, head: &uncached_devices); |
1471 | /* attach to a matched cache set if it exists */ |
1472 | list_for_each_entry(c, &bch_cache_sets, list) |
1473 | bch_cached_dev_attach(dc, c, NULL); |
1474 | |
1475 | if (BDEV_STATE(k: &dc->sb) == BDEV_STATE_NONE || |
1476 | BDEV_STATE(k: &dc->sb) == BDEV_STATE_STALE) { |
1477 | err = "failed to run cached device" ; |
1478 | ret = bch_cached_dev_run(dc); |
1479 | if (ret) |
1480 | goto err; |
1481 | } |
1482 | |
1483 | return 0; |
1484 | err: |
1485 | pr_notice("error %pg: %s\n" , dc->bdev, err); |
1486 | bcache_device_stop(d: &dc->disk); |
1487 | return ret; |
1488 | } |
1489 | |
1490 | /* Flash only volumes */ |
1491 | |
1492 | /* When d->kobj released */ |
1493 | void bch_flash_dev_release(struct kobject *kobj) |
1494 | { |
1495 | struct bcache_device *d = container_of(kobj, struct bcache_device, |
1496 | kobj); |
1497 | kfree(objp: d); |
1498 | } |
1499 | |
1500 | static void flash_dev_free(struct closure *cl) |
1501 | { |
1502 | struct bcache_device *d = container_of(cl, struct bcache_device, cl); |
1503 | |
1504 | mutex_lock(&bch_register_lock); |
1505 | atomic_long_sub(i: bcache_dev_sectors_dirty(d), |
1506 | v: &d->c->flash_dev_dirty_sectors); |
1507 | del_gendisk(gp: d->disk); |
1508 | bcache_device_free(d); |
1509 | mutex_unlock(lock: &bch_register_lock); |
1510 | kobject_put(kobj: &d->kobj); |
1511 | } |
1512 | |
1513 | static void flash_dev_flush(struct closure *cl) |
1514 | { |
1515 | struct bcache_device *d = container_of(cl, struct bcache_device, cl); |
1516 | |
1517 | mutex_lock(&bch_register_lock); |
1518 | bcache_device_unlink(d); |
1519 | mutex_unlock(lock: &bch_register_lock); |
1520 | kobject_del(kobj: &d->kobj); |
1521 | continue_at(cl, flash_dev_free, system_wq); |
1522 | } |
1523 | |
1524 | static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) |
1525 | { |
1526 | int err = -ENOMEM; |
1527 | struct bcache_device *d = kzalloc(size: sizeof(struct bcache_device), |
1528 | GFP_KERNEL); |
1529 | if (!d) |
1530 | goto err_ret; |
1531 | |
1532 | closure_init(cl: &d->cl, NULL); |
1533 | set_closure_fn(cl: &d->cl, fn: flash_dev_flush, wq: system_wq); |
1534 | |
1535 | kobject_init(kobj: &d->kobj, ktype: &bch_flash_dev_ktype); |
1536 | |
1537 | if (bcache_device_init(d, block_bytes(c->cache), sectors: u->sectors, |
1538 | NULL, ops: &bcache_flash_ops)) |
1539 | goto err; |
1540 | |
1541 | bcache_device_attach(d, c, id: u - c->uuids); |
1542 | bch_sectors_dirty_init(d); |
1543 | bch_flash_dev_request_init(d); |
1544 | err = add_disk(disk: d->disk); |
1545 | if (err) |
1546 | goto err; |
1547 | |
1548 | err = kobject_add(kobj: &d->kobj, parent: &disk_to_dev(d->disk)->kobj, fmt: "bcache" ); |
1549 | if (err) |
1550 | goto err; |
1551 | |
1552 | bcache_device_link(d, c, name: "volume" ); |
1553 | |
1554 | if (bch_has_feature_obso_large_bucket(sb: &c->cache->sb)) { |
1555 | pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n" ); |
1556 | pr_err("Please update to the latest bcache-tools to create the cache device\n" ); |
1557 | set_disk_ro(disk: d->disk, read_only: 1); |
1558 | } |
1559 | |
1560 | return 0; |
1561 | err: |
1562 | kobject_put(kobj: &d->kobj); |
1563 | err_ret: |
1564 | return err; |
1565 | } |
1566 | |
1567 | static int flash_devs_run(struct cache_set *c) |
1568 | { |
1569 | int ret = 0; |
1570 | struct uuid_entry *u; |
1571 | |
1572 | for (u = c->uuids; |
1573 | u < c->uuids + c->nr_uuids && !ret; |
1574 | u++) |
1575 | if (UUID_FLASH_ONLY(k: u)) |
1576 | ret = flash_dev_run(c, u); |
1577 | |
1578 | return ret; |
1579 | } |
1580 | |
1581 | int bch_flash_dev_create(struct cache_set *c, uint64_t size) |
1582 | { |
1583 | struct uuid_entry *u; |
1584 | |
1585 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) |
1586 | return -EINTR; |
1587 | |
1588 | if (!test_bit(CACHE_SET_RUNNING, &c->flags)) |
1589 | return -EPERM; |
1590 | |
1591 | u = uuid_find_empty(c); |
1592 | if (!u) { |
1593 | pr_err("Can't create volume, no room for UUID\n" ); |
1594 | return -EINVAL; |
1595 | } |
1596 | |
1597 | get_random_bytes(buf: u->uuid, len: 16); |
1598 | memset(u->label, 0, 32); |
1599 | u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds()); |
1600 | |
1601 | SET_UUID_FLASH_ONLY(k: u, v: 1); |
1602 | u->sectors = size >> 9; |
1603 | |
1604 | bch_uuid_write(c); |
1605 | |
1606 | return flash_dev_run(c, u); |
1607 | } |
1608 | |
1609 | bool bch_cached_dev_error(struct cached_dev *dc) |
1610 | { |
1611 | if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) |
1612 | return false; |
1613 | |
1614 | dc->io_disable = true; |
1615 | /* make others know io_disable is true earlier */ |
1616 | smp_mb(); |
1617 | |
1618 | pr_err("stop %s: too many IO errors on backing device %pg\n" , |
1619 | dc->disk.disk->disk_name, dc->bdev); |
1620 | |
1621 | bcache_device_stop(d: &dc->disk); |
1622 | return true; |
1623 | } |
1624 | |
1625 | /* Cache set */ |
1626 | |
1627 | __printf(2, 3) |
1628 | bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) |
1629 | { |
1630 | struct va_format vaf; |
1631 | va_list args; |
1632 | |
1633 | if (c->on_error != ON_ERROR_PANIC && |
1634 | test_bit(CACHE_SET_STOPPING, &c->flags)) |
1635 | return false; |
1636 | |
1637 | if (test_and_set_bit(CACHE_SET_IO_DISABLE, addr: &c->flags)) |
1638 | pr_info("CACHE_SET_IO_DISABLE already set\n" ); |
1639 | |
1640 | /* |
1641 | * XXX: we can be called from atomic context |
1642 | * acquire_console_sem(); |
1643 | */ |
1644 | |
1645 | va_start(args, fmt); |
1646 | |
1647 | vaf.fmt = fmt; |
1648 | vaf.va = &args; |
1649 | |
1650 | pr_err("error on %pU: %pV, disabling caching\n" , |
1651 | c->set_uuid, &vaf); |
1652 | |
1653 | va_end(args); |
1654 | |
1655 | if (c->on_error == ON_ERROR_PANIC) |
1656 | panic(fmt: "panic forced after error\n" ); |
1657 | |
1658 | bch_cache_set_unregister(c); |
1659 | return true; |
1660 | } |
1661 | |
1662 | /* When c->kobj released */ |
1663 | void bch_cache_set_release(struct kobject *kobj) |
1664 | { |
1665 | struct cache_set *c = container_of(kobj, struct cache_set, kobj); |
1666 | |
1667 | kfree(objp: c); |
1668 | module_put(THIS_MODULE); |
1669 | } |
1670 | |
1671 | static void cache_set_free(struct closure *cl) |
1672 | { |
1673 | struct cache_set *c = container_of(cl, struct cache_set, cl); |
1674 | struct cache *ca; |
1675 | |
1676 | debugfs_remove(dentry: c->debug); |
1677 | |
1678 | bch_open_buckets_free(c); |
1679 | bch_btree_cache_free(c); |
1680 | bch_journal_free(c); |
1681 | |
1682 | mutex_lock(&bch_register_lock); |
1683 | bch_bset_sort_state_free(state: &c->sort); |
1684 | free_pages(addr: (unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb))); |
1685 | |
1686 | ca = c->cache; |
1687 | if (ca) { |
1688 | ca->set = NULL; |
1689 | c->cache = NULL; |
1690 | kobject_put(kobj: &ca->kobj); |
1691 | } |
1692 | |
1693 | |
1694 | if (c->moving_gc_wq) |
1695 | destroy_workqueue(wq: c->moving_gc_wq); |
1696 | bioset_exit(&c->bio_split); |
1697 | mempool_exit(pool: &c->fill_iter); |
1698 | mempool_exit(pool: &c->bio_meta); |
1699 | mempool_exit(pool: &c->search); |
1700 | kfree(objp: c->devices); |
1701 | |
1702 | list_del(entry: &c->list); |
1703 | mutex_unlock(lock: &bch_register_lock); |
1704 | |
1705 | pr_info("Cache set %pU unregistered\n" , c->set_uuid); |
1706 | wake_up(&unregister_wait); |
1707 | |
1708 | closure_debug_destroy(cl: &c->cl); |
1709 | kobject_put(kobj: &c->kobj); |
1710 | } |
1711 | |
1712 | static void cache_set_flush(struct closure *cl) |
1713 | { |
1714 | struct cache_set *c = container_of(cl, struct cache_set, caching); |
1715 | struct cache *ca = c->cache; |
1716 | struct btree *b; |
1717 | |
1718 | bch_cache_accounting_destroy(acc: &c->accounting); |
1719 | |
1720 | kobject_put(kobj: &c->internal); |
1721 | kobject_del(kobj: &c->kobj); |
1722 | |
1723 | if (!IS_ERR_OR_NULL(ptr: c->gc_thread)) |
1724 | kthread_stop(k: c->gc_thread); |
1725 | |
1726 | if (!IS_ERR(ptr: c->root)) |
1727 | list_add(new: &c->root->list, head: &c->btree_cache); |
1728 | |
1729 | /* |
1730 | * Avoid flushing cached nodes if cache set is retiring |
1731 | * due to too many I/O errors detected. |
1732 | */ |
1733 | if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags)) |
1734 | list_for_each_entry(b, &c->btree_cache, list) { |
1735 | mutex_lock(&b->write_lock); |
1736 | if (btree_node_dirty(b)) |
1737 | __bch_btree_node_write(b, NULL); |
1738 | mutex_unlock(lock: &b->write_lock); |
1739 | } |
1740 | |
1741 | if (ca->alloc_thread) |
1742 | kthread_stop(k: ca->alloc_thread); |
1743 | |
1744 | if (c->journal.cur) { |
1745 | cancel_delayed_work_sync(dwork: &c->journal.work); |
1746 | /* flush last journal entry if needed */ |
1747 | c->journal.work.work.func(&c->journal.work.work); |
1748 | } |
1749 | |
1750 | closure_return(cl); |
1751 | } |
1752 | |
1753 | /* |
1754 | * This function is only called when CACHE_SET_IO_DISABLE is set, which means |
1755 | * cache set is unregistering due to too many I/O errors. In this condition, |
1756 | * the bcache device might be stopped, it depends on stop_when_cache_set_failed |
1757 | * value and whether the broken cache has dirty data: |
1758 | * |
1759 | * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device |
1760 | * BCH_CACHED_STOP_AUTO 0 NO |
1761 | * BCH_CACHED_STOP_AUTO 1 YES |
1762 | * BCH_CACHED_DEV_STOP_ALWAYS 0 YES |
1763 | * BCH_CACHED_DEV_STOP_ALWAYS 1 YES |
1764 | * |
1765 | * The expected behavior is, if stop_when_cache_set_failed is configured to |
1766 | * "auto" via sysfs interface, the bcache device will not be stopped if the |
1767 | * backing device is clean on the broken cache device. |
1768 | */ |
1769 | static void conditional_stop_bcache_device(struct cache_set *c, |
1770 | struct bcache_device *d, |
1771 | struct cached_dev *dc) |
1772 | { |
1773 | if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) { |
1774 | pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n" , |
1775 | d->disk->disk_name, c->set_uuid); |
1776 | bcache_device_stop(d); |
1777 | } else if (atomic_read(v: &dc->has_dirty)) { |
1778 | /* |
1779 | * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO |
1780 | * and dc->has_dirty == 1 |
1781 | */ |
1782 | pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n" , |
1783 | d->disk->disk_name); |
1784 | /* |
1785 | * There might be a small time gap that cache set is |
1786 | * released but bcache device is not. Inside this time |
1787 | * gap, regular I/O requests will directly go into |
1788 | * backing device as no cache set attached to. This |
1789 | * behavior may also introduce potential inconsistence |
1790 | * data in writeback mode while cache is dirty. |
1791 | * Therefore before calling bcache_device_stop() due |
1792 | * to a broken cache device, dc->io_disable should be |
1793 | * explicitly set to true. |
1794 | */ |
1795 | dc->io_disable = true; |
1796 | /* make others know io_disable is true earlier */ |
1797 | smp_mb(); |
1798 | bcache_device_stop(d); |
1799 | } else { |
1800 | /* |
1801 | * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO |
1802 | * and dc->has_dirty == 0 |
1803 | */ |
1804 | pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n" , |
1805 | d->disk->disk_name); |
1806 | } |
1807 | } |
1808 | |
1809 | static void __cache_set_unregister(struct closure *cl) |
1810 | { |
1811 | struct cache_set *c = container_of(cl, struct cache_set, caching); |
1812 | struct cached_dev *dc; |
1813 | struct bcache_device *d; |
1814 | size_t i; |
1815 | |
1816 | mutex_lock(&bch_register_lock); |
1817 | |
1818 | for (i = 0; i < c->devices_max_used; i++) { |
1819 | d = c->devices[i]; |
1820 | if (!d) |
1821 | continue; |
1822 | |
1823 | if (!UUID_FLASH_ONLY(k: &c->uuids[i]) && |
1824 | test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { |
1825 | dc = container_of(d, struct cached_dev, disk); |
1826 | bch_cached_dev_detach(dc); |
1827 | if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) |
1828 | conditional_stop_bcache_device(c, d, dc); |
1829 | } else { |
1830 | bcache_device_stop(d); |
1831 | } |
1832 | } |
1833 | |
1834 | mutex_unlock(lock: &bch_register_lock); |
1835 | |
1836 | continue_at(cl, cache_set_flush, system_wq); |
1837 | } |
1838 | |
1839 | void bch_cache_set_stop(struct cache_set *c) |
1840 | { |
1841 | if (!test_and_set_bit(CACHE_SET_STOPPING, addr: &c->flags)) |
1842 | /* closure_fn set to __cache_set_unregister() */ |
1843 | closure_queue(cl: &c->caching); |
1844 | } |
1845 | |
1846 | void bch_cache_set_unregister(struct cache_set *c) |
1847 | { |
1848 | set_bit(CACHE_SET_UNREGISTERING, addr: &c->flags); |
1849 | bch_cache_set_stop(c); |
1850 | } |
1851 | |
1852 | #define alloc_meta_bucket_pages(gfp, sb) \ |
1853 | ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb)))) |
1854 | |
1855 | struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) |
1856 | { |
1857 | int iter_size; |
1858 | struct cache *ca = container_of(sb, struct cache, sb); |
1859 | struct cache_set *c = kzalloc(size: sizeof(struct cache_set), GFP_KERNEL); |
1860 | |
1861 | if (!c) |
1862 | return NULL; |
1863 | |
1864 | __module_get(THIS_MODULE); |
1865 | closure_init(cl: &c->cl, NULL); |
1866 | set_closure_fn(cl: &c->cl, fn: cache_set_free, wq: system_wq); |
1867 | |
1868 | closure_init(cl: &c->caching, parent: &c->cl); |
1869 | set_closure_fn(cl: &c->caching, fn: __cache_set_unregister, wq: system_wq); |
1870 | |
1871 | /* Maybe create continue_at_noreturn() and use it here? */ |
1872 | closure_set_stopped(cl: &c->cl); |
1873 | closure_put(cl: &c->cl); |
1874 | |
1875 | kobject_init(kobj: &c->kobj, ktype: &bch_cache_set_ktype); |
1876 | kobject_init(kobj: &c->internal, ktype: &bch_cache_set_internal_ktype); |
1877 | |
1878 | bch_cache_accounting_init(acc: &c->accounting, parent: &c->cl); |
1879 | |
1880 | memcpy(c->set_uuid, sb->set_uuid, 16); |
1881 | |
1882 | c->cache = ca; |
1883 | c->cache->set = c; |
1884 | c->bucket_bits = ilog2(sb->bucket_size); |
1885 | c->block_bits = ilog2(sb->block_size); |
1886 | c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry); |
1887 | c->devices_max_used = 0; |
1888 | atomic_set(v: &c->attached_dev_nr, i: 0); |
1889 | c->btree_pages = meta_bucket_pages(sb); |
1890 | if (c->btree_pages > BTREE_MAX_PAGES) |
1891 | c->btree_pages = max_t(int, c->btree_pages / 4, |
1892 | BTREE_MAX_PAGES); |
1893 | |
1894 | sema_init(sem: &c->sb_write_mutex, val: 1); |
1895 | mutex_init(&c->bucket_lock); |
1896 | init_waitqueue_head(&c->btree_cache_wait); |
1897 | spin_lock_init(&c->btree_cannibalize_lock); |
1898 | init_waitqueue_head(&c->bucket_wait); |
1899 | init_waitqueue_head(&c->gc_wait); |
1900 | sema_init(sem: &c->uuid_write_mutex, val: 1); |
1901 | |
1902 | spin_lock_init(&c->btree_gc_time.lock); |
1903 | spin_lock_init(&c->btree_split_time.lock); |
1904 | spin_lock_init(&c->btree_read_time.lock); |
1905 | |
1906 | bch_moving_init_cache_set(c); |
1907 | |
1908 | INIT_LIST_HEAD(list: &c->list); |
1909 | INIT_LIST_HEAD(list: &c->cached_devs); |
1910 | INIT_LIST_HEAD(list: &c->btree_cache); |
1911 | INIT_LIST_HEAD(list: &c->btree_cache_freeable); |
1912 | INIT_LIST_HEAD(list: &c->btree_cache_freed); |
1913 | INIT_LIST_HEAD(list: &c->data_buckets); |
1914 | |
1915 | iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) * |
1916 | sizeof(struct btree_iter_set); |
1917 | |
1918 | c->devices = kcalloc(n: c->nr_uuids, size: sizeof(void *), GFP_KERNEL); |
1919 | if (!c->devices) |
1920 | goto err; |
1921 | |
1922 | if (mempool_init_slab_pool(pool: &c->search, min_nr: 32, kc: bch_search_cache)) |
1923 | goto err; |
1924 | |
1925 | if (mempool_init_kmalloc_pool(pool: &c->bio_meta, min_nr: 2, |
1926 | size: sizeof(struct bbio) + |
1927 | sizeof(struct bio_vec) * meta_bucket_pages(sb))) |
1928 | goto err; |
1929 | |
1930 | if (mempool_init_kmalloc_pool(pool: &c->fill_iter, min_nr: 1, size: iter_size)) |
1931 | goto err; |
1932 | |
1933 | if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), |
1934 | flags: BIOSET_NEED_RESCUER)) |
1935 | goto err; |
1936 | |
1937 | c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb); |
1938 | if (!c->uuids) |
1939 | goto err; |
1940 | |
1941 | c->moving_gc_wq = alloc_workqueue(fmt: "bcache_gc" , flags: WQ_MEM_RECLAIM, max_active: 0); |
1942 | if (!c->moving_gc_wq) |
1943 | goto err; |
1944 | |
1945 | if (bch_journal_alloc(c)) |
1946 | goto err; |
1947 | |
1948 | if (bch_btree_cache_alloc(c)) |
1949 | goto err; |
1950 | |
1951 | if (bch_open_buckets_alloc(c)) |
1952 | goto err; |
1953 | |
1954 | if (bch_bset_sort_state_init(state: &c->sort, ilog2(c->btree_pages))) |
1955 | goto err; |
1956 | |
1957 | c->congested_read_threshold_us = 2000; |
1958 | c->congested_write_threshold_us = 20000; |
1959 | c->error_limit = DEFAULT_IO_ERROR_LIMIT; |
1960 | c->idle_max_writeback_rate_enabled = 1; |
1961 | WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); |
1962 | |
1963 | return c; |
1964 | err: |
1965 | bch_cache_set_unregister(c); |
1966 | return NULL; |
1967 | } |
1968 | |
1969 | static int run_cache_set(struct cache_set *c) |
1970 | { |
1971 | const char *err = "cannot allocate memory" ; |
1972 | struct cached_dev *dc, *t; |
1973 | struct cache *ca = c->cache; |
1974 | struct closure cl; |
1975 | LIST_HEAD(journal); |
1976 | struct journal_replay *l; |
1977 | |
1978 | closure_init_stack(cl: &cl); |
1979 | |
1980 | c->nbuckets = ca->sb.nbuckets; |
1981 | set_gc_sectors(c); |
1982 | |
1983 | if (CACHE_SYNC(k: &c->cache->sb)) { |
1984 | struct bkey *k; |
1985 | struct jset *j; |
1986 | |
1987 | err = "cannot allocate memory for journal" ; |
1988 | if (bch_journal_read(c, list: &journal)) |
1989 | goto err; |
1990 | |
1991 | pr_debug("btree_journal_read() done\n" ); |
1992 | |
1993 | err = "no journal entries found" ; |
1994 | if (list_empty(head: &journal)) |
1995 | goto err; |
1996 | |
1997 | j = &list_entry(journal.prev, struct journal_replay, list)->j; |
1998 | |
1999 | err = "IO error reading priorities" ; |
2000 | if (prio_read(ca, bucket: j->prio_bucket[ca->sb.nr_this_dev])) |
2001 | goto err; |
2002 | |
2003 | /* |
2004 | * If prio_read() fails it'll call cache_set_error and we'll |
2005 | * tear everything down right away, but if we perhaps checked |
2006 | * sooner we could avoid journal replay. |
2007 | */ |
2008 | |
2009 | k = &j->btree_root; |
2010 | |
2011 | err = "bad btree root" ; |
2012 | if (__bch_btree_ptr_invalid(c, k)) |
2013 | goto err; |
2014 | |
2015 | err = "error reading btree root" ; |
2016 | c->root = bch_btree_node_get(c, NULL, k, |
2017 | level: j->btree_level, |
2018 | write: true, NULL); |
2019 | if (IS_ERR_OR_NULL(ptr: c->root)) |
2020 | goto err; |
2021 | |
2022 | list_del_init(entry: &c->root->list); |
2023 | rw_unlock(w: true, b: c->root); |
2024 | |
2025 | err = uuid_read(c, j, cl: &cl); |
2026 | if (err) |
2027 | goto err; |
2028 | |
2029 | err = "error in recovery" ; |
2030 | if (bch_btree_check(c)) |
2031 | goto err; |
2032 | |
2033 | bch_journal_mark(c, list: &journal); |
2034 | bch_initial_gc_finish(c); |
2035 | pr_debug("btree_check() done\n" ); |
2036 | |
2037 | /* |
2038 | * bcache_journal_next() can't happen sooner, or |
2039 | * btree_gc_finish() will give spurious errors about last_gc > |
2040 | * gc_gen - this is a hack but oh well. |
2041 | */ |
2042 | bch_journal_next(j: &c->journal); |
2043 | |
2044 | err = "error starting allocator thread" ; |
2045 | if (bch_cache_allocator_start(ca)) |
2046 | goto err; |
2047 | |
2048 | /* |
2049 | * First place it's safe to allocate: btree_check() and |
2050 | * btree_gc_finish() have to run before we have buckets to |
2051 | * allocate, and bch_bucket_alloc_set() might cause a journal |
2052 | * entry to be written so bcache_journal_next() has to be called |
2053 | * first. |
2054 | * |
2055 | * If the uuids were in the old format we have to rewrite them |
2056 | * before the next journal entry is written: |
2057 | */ |
2058 | if (j->version < BCACHE_JSET_VERSION_UUID) |
2059 | __uuid_write(c); |
2060 | |
2061 | err = "bcache: replay journal failed" ; |
2062 | if (bch_journal_replay(c, list: &journal)) |
2063 | goto err; |
2064 | } else { |
2065 | unsigned int j; |
2066 | |
2067 | pr_notice("invalidating existing data\n" ); |
2068 | ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, |
2069 | 2, SB_JOURNAL_BUCKETS); |
2070 | |
2071 | for (j = 0; j < ca->sb.keys; j++) |
2072 | ca->sb.d[j] = ca->sb.first_bucket + j; |
2073 | |
2074 | bch_initial_gc_finish(c); |
2075 | |
2076 | err = "error starting allocator thread" ; |
2077 | if (bch_cache_allocator_start(ca)) |
2078 | goto err; |
2079 | |
2080 | mutex_lock(&c->bucket_lock); |
2081 | bch_prio_write(ca, wait: true); |
2082 | mutex_unlock(lock: &c->bucket_lock); |
2083 | |
2084 | err = "cannot allocate new UUID bucket" ; |
2085 | if (__uuid_write(c)) |
2086 | goto err; |
2087 | |
2088 | err = "cannot allocate new btree root" ; |
2089 | c->root = __bch_btree_node_alloc(c, NULL, level: 0, wait: true, NULL); |
2090 | if (IS_ERR(ptr: c->root)) |
2091 | goto err; |
2092 | |
2093 | mutex_lock(&c->root->write_lock); |
2094 | bkey_copy_key(dest: &c->root->key, src: &MAX_KEY); |
2095 | bch_btree_node_write(b: c->root, parent: &cl); |
2096 | mutex_unlock(lock: &c->root->write_lock); |
2097 | |
2098 | bch_btree_set_root(b: c->root); |
2099 | rw_unlock(w: true, b: c->root); |
2100 | |
2101 | /* |
2102 | * We don't want to write the first journal entry until |
2103 | * everything is set up - fortunately journal entries won't be |
2104 | * written until the SET_CACHE_SYNC() here: |
2105 | */ |
2106 | SET_CACHE_SYNC(k: &c->cache->sb, v: true); |
2107 | |
2108 | bch_journal_next(j: &c->journal); |
2109 | bch_journal_meta(c, cl: &cl); |
2110 | } |
2111 | |
2112 | err = "error starting gc thread" ; |
2113 | if (bch_gc_thread_start(c)) |
2114 | goto err; |
2115 | |
2116 | closure_sync(cl: &cl); |
2117 | c->cache->sb.last_mount = (u32)ktime_get_real_seconds(); |
2118 | bcache_write_super(c); |
2119 | |
2120 | if (bch_has_feature_obso_large_bucket(sb: &c->cache->sb)) |
2121 | pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n" ); |
2122 | |
2123 | list_for_each_entry_safe(dc, t, &uncached_devices, list) |
2124 | bch_cached_dev_attach(dc, c, NULL); |
2125 | |
2126 | flash_devs_run(c); |
2127 | |
2128 | bch_journal_space_reserve(j: &c->journal); |
2129 | set_bit(CACHE_SET_RUNNING, addr: &c->flags); |
2130 | return 0; |
2131 | err: |
2132 | while (!list_empty(head: &journal)) { |
2133 | l = list_first_entry(&journal, struct journal_replay, list); |
2134 | list_del(entry: &l->list); |
2135 | kfree(objp: l); |
2136 | } |
2137 | |
2138 | closure_sync(cl: &cl); |
2139 | |
2140 | bch_cache_set_error(c, fmt: "%s" , err); |
2141 | |
2142 | return -EIO; |
2143 | } |
2144 | |
2145 | static const char *register_cache_set(struct cache *ca) |
2146 | { |
2147 | char buf[12]; |
2148 | const char *err = "cannot allocate memory" ; |
2149 | struct cache_set *c; |
2150 | |
2151 | list_for_each_entry(c, &bch_cache_sets, list) |
2152 | if (!memcmp(p: c->set_uuid, q: ca->sb.set_uuid, size: 16)) { |
2153 | if (c->cache) |
2154 | return "duplicate cache set member" ; |
2155 | |
2156 | goto found; |
2157 | } |
2158 | |
2159 | c = bch_cache_set_alloc(sb: &ca->sb); |
2160 | if (!c) |
2161 | return err; |
2162 | |
2163 | err = "error creating kobject" ; |
2164 | if (kobject_add(kobj: &c->kobj, parent: bcache_kobj, fmt: "%pU" , c->set_uuid) || |
2165 | kobject_add(kobj: &c->internal, parent: &c->kobj, fmt: "internal" )) |
2166 | goto err; |
2167 | |
2168 | if (bch_cache_accounting_add_kobjs(acc: &c->accounting, parent: &c->kobj)) |
2169 | goto err; |
2170 | |
2171 | bch_debug_init_cache_set(c); |
2172 | |
2173 | list_add(new: &c->list, head: &bch_cache_sets); |
2174 | found: |
2175 | sprintf(buf, fmt: "cache%i" , ca->sb.nr_this_dev); |
2176 | if (sysfs_create_link(kobj: &ca->kobj, target: &c->kobj, name: "set" ) || |
2177 | sysfs_create_link(kobj: &c->kobj, target: &ca->kobj, name: buf)) |
2178 | goto err; |
2179 | |
2180 | kobject_get(kobj: &ca->kobj); |
2181 | ca->set = c; |
2182 | ca->set->cache = ca; |
2183 | |
2184 | err = "failed to run cache set" ; |
2185 | if (run_cache_set(c) < 0) |
2186 | goto err; |
2187 | |
2188 | return NULL; |
2189 | err: |
2190 | bch_cache_set_unregister(c); |
2191 | return err; |
2192 | } |
2193 | |
2194 | /* Cache device */ |
2195 | |
2196 | /* When ca->kobj released */ |
2197 | void bch_cache_release(struct kobject *kobj) |
2198 | { |
2199 | struct cache *ca = container_of(kobj, struct cache, kobj); |
2200 | unsigned int i; |
2201 | |
2202 | if (ca->set) { |
2203 | BUG_ON(ca->set->cache != ca); |
2204 | ca->set->cache = NULL; |
2205 | } |
2206 | |
2207 | free_pages(addr: (unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb))); |
2208 | kfree(objp: ca->prio_buckets); |
2209 | vfree(addr: ca->buckets); |
2210 | |
2211 | free_heap(&ca->heap); |
2212 | free_fifo(&ca->free_inc); |
2213 | |
2214 | for (i = 0; i < RESERVE_NR; i++) |
2215 | free_fifo(&ca->free[i]); |
2216 | |
2217 | if (ca->sb_disk) |
2218 | put_page(virt_to_page(ca->sb_disk)); |
2219 | |
2220 | if (ca->bdev_handle) |
2221 | bdev_release(handle: ca->bdev_handle); |
2222 | |
2223 | kfree(objp: ca); |
2224 | module_put(THIS_MODULE); |
2225 | } |
2226 | |
2227 | static int cache_alloc(struct cache *ca) |
2228 | { |
2229 | size_t free; |
2230 | size_t btree_buckets; |
2231 | struct bucket *b; |
2232 | int ret = -ENOMEM; |
2233 | const char *err = NULL; |
2234 | |
2235 | __module_get(THIS_MODULE); |
2236 | kobject_init(kobj: &ca->kobj, ktype: &bch_cache_ktype); |
2237 | |
2238 | bio_init(bio: &ca->journal.bio, NULL, table: ca->journal.bio.bi_inline_vecs, max_vecs: 8, opf: 0); |
2239 | |
2240 | /* |
2241 | * when ca->sb.njournal_buckets is not zero, journal exists, |
2242 | * and in bch_journal_replay(), tree node may split, |
2243 | * so bucket of RESERVE_BTREE type is needed, |
2244 | * the worst situation is all journal buckets are valid journal, |
2245 | * and all the keys need to replay, |
2246 | * so the number of RESERVE_BTREE type buckets should be as much |
2247 | * as journal buckets |
2248 | */ |
2249 | btree_buckets = ca->sb.njournal_buckets ?: 8; |
2250 | free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; |
2251 | if (!free) { |
2252 | ret = -EPERM; |
2253 | err = "ca->sb.nbuckets is too small" ; |
2254 | goto err_free; |
2255 | } |
2256 | |
2257 | if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, |
2258 | GFP_KERNEL)) { |
2259 | err = "ca->free[RESERVE_BTREE] alloc failed" ; |
2260 | goto err_btree_alloc; |
2261 | } |
2262 | |
2263 | if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), |
2264 | GFP_KERNEL)) { |
2265 | err = "ca->free[RESERVE_PRIO] alloc failed" ; |
2266 | goto err_prio_alloc; |
2267 | } |
2268 | |
2269 | if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) { |
2270 | err = "ca->free[RESERVE_MOVINGGC] alloc failed" ; |
2271 | goto err_movinggc_alloc; |
2272 | } |
2273 | |
2274 | if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) { |
2275 | err = "ca->free[RESERVE_NONE] alloc failed" ; |
2276 | goto err_none_alloc; |
2277 | } |
2278 | |
2279 | if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) { |
2280 | err = "ca->free_inc alloc failed" ; |
2281 | goto err_free_inc_alloc; |
2282 | } |
2283 | |
2284 | if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) { |
2285 | err = "ca->heap alloc failed" ; |
2286 | goto err_heap_alloc; |
2287 | } |
2288 | |
2289 | ca->buckets = vzalloc(array_size(sizeof(struct bucket), |
2290 | ca->sb.nbuckets)); |
2291 | if (!ca->buckets) { |
2292 | err = "ca->buckets alloc failed" ; |
2293 | goto err_buckets_alloc; |
2294 | } |
2295 | |
2296 | ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t), |
2297 | prio_buckets(ca), 2), |
2298 | GFP_KERNEL); |
2299 | if (!ca->prio_buckets) { |
2300 | err = "ca->prio_buckets alloc failed" ; |
2301 | goto err_prio_buckets_alloc; |
2302 | } |
2303 | |
2304 | ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb); |
2305 | if (!ca->disk_buckets) { |
2306 | err = "ca->disk_buckets alloc failed" ; |
2307 | goto err_disk_buckets_alloc; |
2308 | } |
2309 | |
2310 | ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); |
2311 | |
2312 | for_each_bucket(b, ca) |
2313 | atomic_set(v: &b->pin, i: 0); |
2314 | return 0; |
2315 | |
2316 | err_disk_buckets_alloc: |
2317 | kfree(objp: ca->prio_buckets); |
2318 | err_prio_buckets_alloc: |
2319 | vfree(addr: ca->buckets); |
2320 | err_buckets_alloc: |
2321 | free_heap(&ca->heap); |
2322 | err_heap_alloc: |
2323 | free_fifo(&ca->free_inc); |
2324 | err_free_inc_alloc: |
2325 | free_fifo(&ca->free[RESERVE_NONE]); |
2326 | err_none_alloc: |
2327 | free_fifo(&ca->free[RESERVE_MOVINGGC]); |
2328 | err_movinggc_alloc: |
2329 | free_fifo(&ca->free[RESERVE_PRIO]); |
2330 | err_prio_alloc: |
2331 | free_fifo(&ca->free[RESERVE_BTREE]); |
2332 | err_btree_alloc: |
2333 | err_free: |
2334 | module_put(THIS_MODULE); |
2335 | if (err) |
2336 | pr_notice("error %pg: %s\n" , ca->bdev, err); |
2337 | return ret; |
2338 | } |
2339 | |
2340 | static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, |
2341 | struct bdev_handle *bdev_handle, |
2342 | struct cache *ca) |
2343 | { |
2344 | const char *err = NULL; /* must be set for any error case */ |
2345 | int ret = 0; |
2346 | |
2347 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); |
2348 | ca->bdev_handle = bdev_handle; |
2349 | ca->bdev = bdev_handle->bdev; |
2350 | ca->sb_disk = sb_disk; |
2351 | |
2352 | if (bdev_max_discard_sectors(bdev: (bdev_handle->bdev))) |
2353 | ca->discard = CACHE_DISCARD(k: &ca->sb); |
2354 | |
2355 | ret = cache_alloc(ca); |
2356 | if (ret != 0) { |
2357 | if (ret == -ENOMEM) |
2358 | err = "cache_alloc(): -ENOMEM" ; |
2359 | else if (ret == -EPERM) |
2360 | err = "cache_alloc(): cache device is too small" ; |
2361 | else |
2362 | err = "cache_alloc(): unknown error" ; |
2363 | pr_notice("error %pg: %s\n" , bdev_handle->bdev, err); |
2364 | /* |
2365 | * If we failed here, it means ca->kobj is not initialized yet, |
2366 | * kobject_put() won't be called and there is no chance to |
2367 | * call bdev_release() to bdev in bch_cache_release(). So |
2368 | * we explicitly call bdev_release() here. |
2369 | */ |
2370 | bdev_release(handle: bdev_handle); |
2371 | return ret; |
2372 | } |
2373 | |
2374 | if (kobject_add(kobj: &ca->kobj, bdev_kobj(bdev_handle->bdev), fmt: "bcache" )) { |
2375 | pr_notice("error %pg: error calling kobject_add\n" , |
2376 | bdev_handle->bdev); |
2377 | ret = -ENOMEM; |
2378 | goto out; |
2379 | } |
2380 | |
2381 | mutex_lock(&bch_register_lock); |
2382 | err = register_cache_set(ca); |
2383 | mutex_unlock(lock: &bch_register_lock); |
2384 | |
2385 | if (err) { |
2386 | ret = -ENODEV; |
2387 | goto out; |
2388 | } |
2389 | |
2390 | pr_info("registered cache device %pg\n" , ca->bdev_handle->bdev); |
2391 | |
2392 | out: |
2393 | kobject_put(kobj: &ca->kobj); |
2394 | return ret; |
2395 | } |
2396 | |
2397 | /* Global interfaces/init */ |
2398 | |
2399 | static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, |
2400 | const char *buffer, size_t size); |
2401 | static ssize_t bch_pending_bdevs_cleanup(struct kobject *k, |
2402 | struct kobj_attribute *attr, |
2403 | const char *buffer, size_t size); |
2404 | |
2405 | kobj_attribute_write(register, register_bcache); |
2406 | kobj_attribute_write(register_quiet, register_bcache); |
2407 | kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup); |
2408 | |
2409 | static bool bch_is_open_backing(dev_t dev) |
2410 | { |
2411 | struct cache_set *c, *tc; |
2412 | struct cached_dev *dc, *t; |
2413 | |
2414 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) |
2415 | list_for_each_entry_safe(dc, t, &c->cached_devs, list) |
2416 | if (dc->bdev->bd_dev == dev) |
2417 | return true; |
2418 | list_for_each_entry_safe(dc, t, &uncached_devices, list) |
2419 | if (dc->bdev->bd_dev == dev) |
2420 | return true; |
2421 | return false; |
2422 | } |
2423 | |
2424 | static bool bch_is_open_cache(dev_t dev) |
2425 | { |
2426 | struct cache_set *c, *tc; |
2427 | |
2428 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { |
2429 | struct cache *ca = c->cache; |
2430 | |
2431 | if (ca->bdev->bd_dev == dev) |
2432 | return true; |
2433 | } |
2434 | |
2435 | return false; |
2436 | } |
2437 | |
2438 | static bool bch_is_open(dev_t dev) |
2439 | { |
2440 | return bch_is_open_cache(dev) || bch_is_open_backing(dev); |
2441 | } |
2442 | |
2443 | struct async_reg_args { |
2444 | struct delayed_work reg_work; |
2445 | char *path; |
2446 | struct cache_sb *sb; |
2447 | struct cache_sb_disk *sb_disk; |
2448 | struct bdev_handle *bdev_handle; |
2449 | void *holder; |
2450 | }; |
2451 | |
2452 | static void register_bdev_worker(struct work_struct *work) |
2453 | { |
2454 | int fail = false; |
2455 | struct async_reg_args *args = |
2456 | container_of(work, struct async_reg_args, reg_work.work); |
2457 | |
2458 | mutex_lock(&bch_register_lock); |
2459 | if (register_bdev(sb: args->sb, sb_disk: args->sb_disk, bdev_handle: args->bdev_handle, |
2460 | dc: args->holder) < 0) |
2461 | fail = true; |
2462 | mutex_unlock(lock: &bch_register_lock); |
2463 | |
2464 | if (fail) |
2465 | pr_info("error %s: fail to register backing device\n" , |
2466 | args->path); |
2467 | kfree(objp: args->sb); |
2468 | kfree(objp: args->path); |
2469 | kfree(objp: args); |
2470 | module_put(THIS_MODULE); |
2471 | } |
2472 | |
2473 | static void register_cache_worker(struct work_struct *work) |
2474 | { |
2475 | int fail = false; |
2476 | struct async_reg_args *args = |
2477 | container_of(work, struct async_reg_args, reg_work.work); |
2478 | |
2479 | /* blkdev_put() will be called in bch_cache_release() */ |
2480 | if (register_cache(sb: args->sb, sb_disk: args->sb_disk, bdev_handle: args->bdev_handle, |
2481 | ca: args->holder)) |
2482 | fail = true; |
2483 | |
2484 | if (fail) |
2485 | pr_info("error %s: fail to register cache device\n" , |
2486 | args->path); |
2487 | kfree(objp: args->sb); |
2488 | kfree(objp: args->path); |
2489 | kfree(objp: args); |
2490 | module_put(THIS_MODULE); |
2491 | } |
2492 | |
2493 | static void register_device_async(struct async_reg_args *args) |
2494 | { |
2495 | if (SB_IS_BDEV(sb: args->sb)) |
2496 | INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker); |
2497 | else |
2498 | INIT_DELAYED_WORK(&args->reg_work, register_cache_worker); |
2499 | |
2500 | /* 10 jiffies is enough for a delay */ |
2501 | queue_delayed_work(wq: system_wq, dwork: &args->reg_work, delay: 10); |
2502 | } |
2503 | |
2504 | static void *alloc_holder_object(struct cache_sb *sb) |
2505 | { |
2506 | if (SB_IS_BDEV(sb)) |
2507 | return kzalloc(size: sizeof(struct cached_dev), GFP_KERNEL); |
2508 | return kzalloc(size: sizeof(struct cache), GFP_KERNEL); |
2509 | } |
2510 | |
2511 | static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, |
2512 | const char *buffer, size_t size) |
2513 | { |
2514 | const char *err; |
2515 | char *path = NULL; |
2516 | struct cache_sb *sb; |
2517 | struct cache_sb_disk *sb_disk; |
2518 | struct bdev_handle *bdev_handle, *bdev_handle2; |
2519 | void *holder = NULL; |
2520 | ssize_t ret; |
2521 | bool async_registration = false; |
2522 | bool quiet = false; |
2523 | |
2524 | #ifdef CONFIG_BCACHE_ASYNC_REGISTRATION |
2525 | async_registration = true; |
2526 | #endif |
2527 | |
2528 | ret = -EBUSY; |
2529 | err = "failed to reference bcache module" ; |
2530 | if (!try_module_get(THIS_MODULE)) |
2531 | goto out; |
2532 | |
2533 | /* For latest state of bcache_is_reboot */ |
2534 | smp_mb(); |
2535 | err = "bcache is in reboot" ; |
2536 | if (bcache_is_reboot) |
2537 | goto out_module_put; |
2538 | |
2539 | ret = -ENOMEM; |
2540 | err = "cannot allocate memory" ; |
2541 | path = kstrndup(s: buffer, len: size, GFP_KERNEL); |
2542 | if (!path) |
2543 | goto out_module_put; |
2544 | |
2545 | sb = kmalloc(size: sizeof(struct cache_sb), GFP_KERNEL); |
2546 | if (!sb) |
2547 | goto out_free_path; |
2548 | |
2549 | ret = -EINVAL; |
2550 | err = "failed to open device" ; |
2551 | bdev_handle = bdev_open_by_path(path: strim(path), BLK_OPEN_READ, NULL, NULL); |
2552 | if (IS_ERR(ptr: bdev_handle)) |
2553 | goto out_free_sb; |
2554 | |
2555 | err = "failed to set blocksize" ; |
2556 | if (set_blocksize(bdev: bdev_handle->bdev, size: 4096)) |
2557 | goto out_blkdev_put; |
2558 | |
2559 | err = read_super(sb, bdev: bdev_handle->bdev, res: &sb_disk); |
2560 | if (err) |
2561 | goto out_blkdev_put; |
2562 | |
2563 | holder = alloc_holder_object(sb); |
2564 | if (!holder) { |
2565 | ret = -ENOMEM; |
2566 | err = "cannot allocate memory" ; |
2567 | goto out_put_sb_page; |
2568 | } |
2569 | |
2570 | /* Now reopen in exclusive mode with proper holder */ |
2571 | bdev_handle2 = bdev_open_by_dev(dev: bdev_handle->bdev->bd_dev, |
2572 | BLK_OPEN_READ | BLK_OPEN_WRITE, holder, NULL); |
2573 | bdev_release(handle: bdev_handle); |
2574 | bdev_handle = bdev_handle2; |
2575 | if (IS_ERR(ptr: bdev_handle)) { |
2576 | ret = PTR_ERR(ptr: bdev_handle); |
2577 | bdev_handle = NULL; |
2578 | if (ret == -EBUSY) { |
2579 | dev_t dev; |
2580 | |
2581 | mutex_lock(&bch_register_lock); |
2582 | if (lookup_bdev(pathname: strim(path), dev: &dev) == 0 && |
2583 | bch_is_open(dev)) |
2584 | err = "device already registered" ; |
2585 | else |
2586 | err = "device busy" ; |
2587 | mutex_unlock(lock: &bch_register_lock); |
2588 | if (attr == &ksysfs_register_quiet) { |
2589 | quiet = true; |
2590 | ret = size; |
2591 | } |
2592 | } |
2593 | goto out_free_holder; |
2594 | } |
2595 | |
2596 | err = "failed to register device" ; |
2597 | |
2598 | if (async_registration) { |
2599 | /* register in asynchronous way */ |
2600 | struct async_reg_args *args = |
2601 | kzalloc(size: sizeof(struct async_reg_args), GFP_KERNEL); |
2602 | |
2603 | if (!args) { |
2604 | ret = -ENOMEM; |
2605 | err = "cannot allocate memory" ; |
2606 | goto out_free_holder; |
2607 | } |
2608 | |
2609 | args->path = path; |
2610 | args->sb = sb; |
2611 | args->sb_disk = sb_disk; |
2612 | args->bdev_handle = bdev_handle; |
2613 | args->holder = holder; |
2614 | register_device_async(args); |
2615 | /* No wait and returns to user space */ |
2616 | goto async_done; |
2617 | } |
2618 | |
2619 | if (SB_IS_BDEV(sb)) { |
2620 | mutex_lock(&bch_register_lock); |
2621 | ret = register_bdev(sb, sb_disk, bdev_handle, dc: holder); |
2622 | mutex_unlock(lock: &bch_register_lock); |
2623 | /* blkdev_put() will be called in cached_dev_free() */ |
2624 | if (ret < 0) |
2625 | goto out_free_sb; |
2626 | } else { |
2627 | /* blkdev_put() will be called in bch_cache_release() */ |
2628 | ret = register_cache(sb, sb_disk, bdev_handle, ca: holder); |
2629 | if (ret) |
2630 | goto out_free_sb; |
2631 | } |
2632 | |
2633 | kfree(objp: sb); |
2634 | kfree(objp: path); |
2635 | module_put(THIS_MODULE); |
2636 | async_done: |
2637 | return size; |
2638 | |
2639 | out_free_holder: |
2640 | kfree(objp: holder); |
2641 | out_put_sb_page: |
2642 | put_page(virt_to_page(sb_disk)); |
2643 | out_blkdev_put: |
2644 | if (bdev_handle) |
2645 | bdev_release(handle: bdev_handle); |
2646 | out_free_sb: |
2647 | kfree(objp: sb); |
2648 | out_free_path: |
2649 | kfree(objp: path); |
2650 | path = NULL; |
2651 | out_module_put: |
2652 | module_put(THIS_MODULE); |
2653 | out: |
2654 | if (!quiet) |
2655 | pr_info("error %s: %s\n" , path?path:"" , err); |
2656 | return ret; |
2657 | } |
2658 | |
2659 | |
2660 | struct pdev { |
2661 | struct list_head list; |
2662 | struct cached_dev *dc; |
2663 | }; |
2664 | |
2665 | static ssize_t bch_pending_bdevs_cleanup(struct kobject *k, |
2666 | struct kobj_attribute *attr, |
2667 | const char *buffer, |
2668 | size_t size) |
2669 | { |
2670 | LIST_HEAD(pending_devs); |
2671 | ssize_t ret = size; |
2672 | struct cached_dev *dc, *tdc; |
2673 | struct pdev *pdev, *tpdev; |
2674 | struct cache_set *c, *tc; |
2675 | |
2676 | mutex_lock(&bch_register_lock); |
2677 | list_for_each_entry_safe(dc, tdc, &uncached_devices, list) { |
2678 | pdev = kmalloc(size: sizeof(struct pdev), GFP_KERNEL); |
2679 | if (!pdev) |
2680 | break; |
2681 | pdev->dc = dc; |
2682 | list_add(new: &pdev->list, head: &pending_devs); |
2683 | } |
2684 | |
2685 | list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) { |
2686 | char *pdev_set_uuid = pdev->dc->sb.set_uuid; |
2687 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { |
2688 | char *set_uuid = c->set_uuid; |
2689 | |
2690 | if (!memcmp(p: pdev_set_uuid, q: set_uuid, size: 16)) { |
2691 | list_del(entry: &pdev->list); |
2692 | kfree(objp: pdev); |
2693 | break; |
2694 | } |
2695 | } |
2696 | } |
2697 | mutex_unlock(lock: &bch_register_lock); |
2698 | |
2699 | list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) { |
2700 | pr_info("delete pdev %p\n" , pdev); |
2701 | list_del(entry: &pdev->list); |
2702 | bcache_device_stop(d: &pdev->dc->disk); |
2703 | kfree(objp: pdev); |
2704 | } |
2705 | |
2706 | return ret; |
2707 | } |
2708 | |
2709 | static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) |
2710 | { |
2711 | if (bcache_is_reboot) |
2712 | return NOTIFY_DONE; |
2713 | |
2714 | if (code == SYS_DOWN || |
2715 | code == SYS_HALT || |
2716 | code == SYS_POWER_OFF) { |
2717 | DEFINE_WAIT(wait); |
2718 | unsigned long start = jiffies; |
2719 | bool stopped = false; |
2720 | |
2721 | struct cache_set *c, *tc; |
2722 | struct cached_dev *dc, *tdc; |
2723 | |
2724 | mutex_lock(&bch_register_lock); |
2725 | |
2726 | if (bcache_is_reboot) |
2727 | goto out; |
2728 | |
2729 | /* New registration is rejected since now */ |
2730 | bcache_is_reboot = true; |
2731 | /* |
2732 | * Make registering caller (if there is) on other CPU |
2733 | * core know bcache_is_reboot set to true earlier |
2734 | */ |
2735 | smp_mb(); |
2736 | |
2737 | if (list_empty(head: &bch_cache_sets) && |
2738 | list_empty(head: &uncached_devices)) |
2739 | goto out; |
2740 | |
2741 | mutex_unlock(lock: &bch_register_lock); |
2742 | |
2743 | pr_info("Stopping all devices:\n" ); |
2744 | |
2745 | /* |
2746 | * The reason bch_register_lock is not held to call |
2747 | * bch_cache_set_stop() and bcache_device_stop() is to |
2748 | * avoid potential deadlock during reboot, because cache |
2749 | * set or bcache device stopping process will acquire |
2750 | * bch_register_lock too. |
2751 | * |
2752 | * We are safe here because bcache_is_reboot sets to |
2753 | * true already, register_bcache() will reject new |
2754 | * registration now. bcache_is_reboot also makes sure |
2755 | * bcache_reboot() won't be re-entered on by other thread, |
2756 | * so there is no race in following list iteration by |
2757 | * list_for_each_entry_safe(). |
2758 | */ |
2759 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) |
2760 | bch_cache_set_stop(c); |
2761 | |
2762 | list_for_each_entry_safe(dc, tdc, &uncached_devices, list) |
2763 | bcache_device_stop(d: &dc->disk); |
2764 | |
2765 | |
2766 | /* |
2767 | * Give an early chance for other kthreads and |
2768 | * kworkers to stop themselves |
2769 | */ |
2770 | schedule(); |
2771 | |
2772 | /* What's a condition variable? */ |
2773 | while (1) { |
2774 | long timeout = start + 10 * HZ - jiffies; |
2775 | |
2776 | mutex_lock(&bch_register_lock); |
2777 | stopped = list_empty(head: &bch_cache_sets) && |
2778 | list_empty(head: &uncached_devices); |
2779 | |
2780 | if (timeout < 0 || stopped) |
2781 | break; |
2782 | |
2783 | prepare_to_wait(wq_head: &unregister_wait, wq_entry: &wait, |
2784 | TASK_UNINTERRUPTIBLE); |
2785 | |
2786 | mutex_unlock(lock: &bch_register_lock); |
2787 | schedule_timeout(timeout); |
2788 | } |
2789 | |
2790 | finish_wait(wq_head: &unregister_wait, wq_entry: &wait); |
2791 | |
2792 | if (stopped) |
2793 | pr_info("All devices stopped\n" ); |
2794 | else |
2795 | pr_notice("Timeout waiting for devices to be closed\n" ); |
2796 | out: |
2797 | mutex_unlock(lock: &bch_register_lock); |
2798 | } |
2799 | |
2800 | return NOTIFY_DONE; |
2801 | } |
2802 | |
2803 | static struct notifier_block reboot = { |
2804 | .notifier_call = bcache_reboot, |
2805 | .priority = INT_MAX, /* before any real devices */ |
2806 | }; |
2807 | |
2808 | static void bcache_exit(void) |
2809 | { |
2810 | bch_debug_exit(); |
2811 | bch_request_exit(); |
2812 | if (bcache_kobj) |
2813 | kobject_put(kobj: bcache_kobj); |
2814 | if (bcache_wq) |
2815 | destroy_workqueue(wq: bcache_wq); |
2816 | if (bch_journal_wq) |
2817 | destroy_workqueue(wq: bch_journal_wq); |
2818 | if (bch_flush_wq) |
2819 | destroy_workqueue(wq: bch_flush_wq); |
2820 | bch_btree_exit(); |
2821 | |
2822 | if (bcache_major) |
2823 | unregister_blkdev(major: bcache_major, name: "bcache" ); |
2824 | unregister_reboot_notifier(&reboot); |
2825 | mutex_destroy(lock: &bch_register_lock); |
2826 | } |
2827 | |
2828 | /* Check and fixup module parameters */ |
2829 | static void check_module_parameters(void) |
2830 | { |
2831 | if (bch_cutoff_writeback_sync == 0) |
2832 | bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC; |
2833 | else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) { |
2834 | pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n" , |
2835 | bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX); |
2836 | bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX; |
2837 | } |
2838 | |
2839 | if (bch_cutoff_writeback == 0) |
2840 | bch_cutoff_writeback = CUTOFF_WRITEBACK; |
2841 | else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) { |
2842 | pr_warn("set bch_cutoff_writeback (%u) to max value %u\n" , |
2843 | bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX); |
2844 | bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX; |
2845 | } |
2846 | |
2847 | if (bch_cutoff_writeback > bch_cutoff_writeback_sync) { |
2848 | pr_warn("set bch_cutoff_writeback (%u) to %u\n" , |
2849 | bch_cutoff_writeback, bch_cutoff_writeback_sync); |
2850 | bch_cutoff_writeback = bch_cutoff_writeback_sync; |
2851 | } |
2852 | } |
2853 | |
2854 | static int __init bcache_init(void) |
2855 | { |
2856 | static const struct attribute *files[] = { |
2857 | &ksysfs_register.attr, |
2858 | &ksysfs_register_quiet.attr, |
2859 | &ksysfs_pendings_cleanup.attr, |
2860 | NULL |
2861 | }; |
2862 | |
2863 | check_module_parameters(); |
2864 | |
2865 | mutex_init(&bch_register_lock); |
2866 | init_waitqueue_head(&unregister_wait); |
2867 | register_reboot_notifier(&reboot); |
2868 | |
2869 | bcache_major = register_blkdev(0, "bcache" ); |
2870 | if (bcache_major < 0) { |
2871 | unregister_reboot_notifier(&reboot); |
2872 | mutex_destroy(lock: &bch_register_lock); |
2873 | return bcache_major; |
2874 | } |
2875 | |
2876 | if (bch_btree_init()) |
2877 | goto err; |
2878 | |
2879 | bcache_wq = alloc_workqueue(fmt: "bcache" , flags: WQ_MEM_RECLAIM, max_active: 0); |
2880 | if (!bcache_wq) |
2881 | goto err; |
2882 | |
2883 | /* |
2884 | * Let's not make this `WQ_MEM_RECLAIM` for the following reasons: |
2885 | * |
2886 | * 1. It used `system_wq` before which also does no memory reclaim. |
2887 | * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and |
2888 | * reduced throughput can be observed. |
2889 | * |
2890 | * We still want to user our own queue to not congest the `system_wq`. |
2891 | */ |
2892 | bch_flush_wq = alloc_workqueue(fmt: "bch_flush" , flags: 0, max_active: 0); |
2893 | if (!bch_flush_wq) |
2894 | goto err; |
2895 | |
2896 | bch_journal_wq = alloc_workqueue(fmt: "bch_journal" , flags: WQ_MEM_RECLAIM, max_active: 0); |
2897 | if (!bch_journal_wq) |
2898 | goto err; |
2899 | |
2900 | bcache_kobj = kobject_create_and_add(name: "bcache" , parent: fs_kobj); |
2901 | if (!bcache_kobj) |
2902 | goto err; |
2903 | |
2904 | if (bch_request_init() || |
2905 | sysfs_create_files(kobj: bcache_kobj, attr: files)) |
2906 | goto err; |
2907 | |
2908 | bch_debug_init(); |
2909 | |
2910 | bcache_is_reboot = false; |
2911 | |
2912 | return 0; |
2913 | err: |
2914 | bcache_exit(); |
2915 | return -ENOMEM; |
2916 | } |
2917 | |
2918 | /* |
2919 | * Module hooks |
2920 | */ |
2921 | module_exit(bcache_exit); |
2922 | module_init(bcache_init); |
2923 | |
2924 | module_param(bch_cutoff_writeback, uint, 0); |
2925 | MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback" ); |
2926 | |
2927 | module_param(bch_cutoff_writeback_sync, uint, 0); |
2928 | MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback" ); |
2929 | |
2930 | MODULE_DESCRIPTION("Bcache: a Linux block layer cache" ); |
2931 | MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>" ); |
2932 | MODULE_LICENSE("GPL" ); |
2933 | |