1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * pblk-read.c - pblk's read path
17 */
18
19#include "pblk.h"
20
21/*
22 * There is no guarantee that the value read from cache has not been updated and
23 * resides at another location in the cache. We guarantee though that if the
24 * value is read from the cache, it belongs to the mapped lba. In order to
25 * guarantee and order between writes and reads are ordered, a flush must be
26 * issued.
27 */
28static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
29 sector_t lba, struct ppa_addr ppa,
30 int bio_iter, bool advanced_bio)
31{
32#ifdef CONFIG_NVM_PBLK_DEBUG
33 /* Callers must ensure that the ppa points to a cache address */
34 BUG_ON(pblk_ppa_empty(ppa));
35 BUG_ON(!pblk_addr_in_cache(ppa));
36#endif
37
38 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
39 bio_iter, advanced_bio);
40}
41
42static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
43 struct bio *bio, sector_t blba,
44 unsigned long *read_bitmap)
45{
46 void *meta_list = rqd->meta_list;
47 struct ppa_addr ppas[NVM_MAX_VLBA];
48 int nr_secs = rqd->nr_ppas;
49 bool advanced_bio = false;
50 int i, j = 0;
51
52 pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
53
54 for (i = 0; i < nr_secs; i++) {
55 struct ppa_addr p = ppas[i];
56 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
57 sector_t lba = blba + i;
58
59retry:
60 if (pblk_ppa_empty(p)) {
61 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
62
63 WARN_ON(test_and_set_bit(i, read_bitmap));
64 meta->lba = addr_empty;
65
66 if (unlikely(!advanced_bio)) {
67 bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
68 advanced_bio = true;
69 }
70
71 goto next;
72 }
73
74 /* Try to read from write buffer. The address is later checked
75 * on the write buffer to prevent retrieving overwritten data.
76 */
77 if (pblk_addr_in_cache(p)) {
78 if (!pblk_read_from_cache(pblk, bio, lba, p, i,
79 advanced_bio)) {
80 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
81 goto retry;
82 }
83 WARN_ON(test_and_set_bit(i, read_bitmap));
84 meta->lba = cpu_to_le64(lba);
85 advanced_bio = true;
86#ifdef CONFIG_NVM_PBLK_DEBUG
87 atomic_long_inc(&pblk->cache_reads);
88#endif
89 } else {
90 /* Read from media non-cached sectors */
91 rqd->ppa_list[j++] = p;
92 }
93
94next:
95 if (advanced_bio)
96 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
97 }
98
99 if (pblk_io_aligned(pblk, nr_secs))
100 rqd->is_seq = 1;
101
102#ifdef CONFIG_NVM_PBLK_DEBUG
103 atomic_long_add(nr_secs, &pblk->inflight_reads);
104#endif
105}
106
107
108static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
109 sector_t blba)
110{
111 void *meta_list = rqd->meta_list;
112 int nr_lbas = rqd->nr_ppas;
113 int i;
114
115 if (!pblk_is_oob_meta_supported(pblk))
116 return;
117
118 for (i = 0; i < nr_lbas; i++) {
119 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
120 u64 lba = le64_to_cpu(meta->lba);
121
122 if (lba == ADDR_EMPTY)
123 continue;
124
125 if (lba != blba + i) {
126#ifdef CONFIG_NVM_PBLK_DEBUG
127 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
128
129 print_ppa(pblk, &ppa_list[i], "seq", i);
130#endif
131 pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
132 lba, (u64)blba + i);
133 WARN_ON(1);
134 }
135 }
136}
137
138/*
139 * There can be holes in the lba list.
140 */
141static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
142 u64 *lba_list, int nr_lbas)
143{
144 void *meta_lba_list = rqd->meta_list;
145 int i, j;
146
147 if (!pblk_is_oob_meta_supported(pblk))
148 return;
149
150 for (i = 0, j = 0; i < nr_lbas; i++) {
151 struct pblk_sec_meta *meta = pblk_get_meta(pblk,
152 meta_lba_list, j);
153 u64 lba = lba_list[i];
154 u64 meta_lba;
155
156 if (lba == ADDR_EMPTY)
157 continue;
158
159 meta_lba = le64_to_cpu(meta->lba);
160
161 if (lba != meta_lba) {
162#ifdef CONFIG_NVM_PBLK_DEBUG
163 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
164
165 print_ppa(pblk, &ppa_list[j], "rnd", j);
166#endif
167 pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
168 meta_lba, lba);
169 WARN_ON(1);
170 }
171
172 j++;
173 }
174
175 WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
176}
177
178static void pblk_end_user_read(struct bio *bio)
179{
180#ifdef CONFIG_NVM_PBLK_DEBUG
181 WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
182#endif
183 bio_endio(bio);
184}
185
186static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
187 bool put_line)
188{
189 struct nvm_tgt_dev *dev = pblk->dev;
190 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
191 struct bio *int_bio = rqd->bio;
192 unsigned long start_time = r_ctx->start_time;
193
194 generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
195
196 if (rqd->error)
197 pblk_log_read_err(pblk, rqd);
198
199 pblk_read_check_seq(pblk, rqd, r_ctx->lba);
200
201 if (int_bio)
202 bio_put(int_bio);
203
204 if (put_line)
205 pblk_rq_to_line_put(pblk, rqd);
206
207#ifdef CONFIG_NVM_PBLK_DEBUG
208 atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
209 atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
210#endif
211
212 pblk_free_rqd(pblk, rqd, PBLK_READ);
213 atomic_dec(&pblk->inflight_io);
214}
215
216static void pblk_end_io_read(struct nvm_rq *rqd)
217{
218 struct pblk *pblk = rqd->private;
219 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
220 struct bio *bio = (struct bio *)r_ctx->private;
221
222 pblk_end_user_read(bio);
223 __pblk_end_io_read(pblk, rqd, true);
224}
225
226static void pblk_end_partial_read(struct nvm_rq *rqd)
227{
228 struct pblk *pblk = rqd->private;
229 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
230 struct pblk_pr_ctx *pr_ctx = r_ctx->private;
231 struct pblk_sec_meta *meta;
232 struct bio *new_bio = rqd->bio;
233 struct bio *bio = pr_ctx->orig_bio;
234 struct bio_vec src_bv, dst_bv;
235 void *meta_list = rqd->meta_list;
236 int bio_init_idx = pr_ctx->bio_init_idx;
237 unsigned long *read_bitmap = pr_ctx->bitmap;
238 int nr_secs = pr_ctx->orig_nr_secs;
239 int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
240 void *src_p, *dst_p;
241 int hole, i;
242
243 if (unlikely(nr_holes == 1)) {
244 struct ppa_addr ppa;
245
246 ppa = rqd->ppa_addr;
247 rqd->ppa_list = pr_ctx->ppa_ptr;
248 rqd->dma_ppa_list = pr_ctx->dma_ppa_list;
249 rqd->ppa_list[0] = ppa;
250 }
251
252 for (i = 0; i < nr_secs; i++) {
253 meta = pblk_get_meta(pblk, meta_list, i);
254 pr_ctx->lba_list_media[i] = le64_to_cpu(meta->lba);
255 meta->lba = cpu_to_le64(pr_ctx->lba_list_mem[i]);
256 }
257
258 /* Fill the holes in the original bio */
259 i = 0;
260 hole = find_first_zero_bit(read_bitmap, nr_secs);
261 do {
262 struct pblk_line *line;
263
264 line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
265 kref_put(&line->ref, pblk_line_put);
266
267 meta = pblk_get_meta(pblk, meta_list, hole);
268 meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
269
270 src_bv = new_bio->bi_io_vec[i++];
271 dst_bv = bio->bi_io_vec[bio_init_idx + hole];
272
273 src_p = kmap_atomic(src_bv.bv_page);
274 dst_p = kmap_atomic(dst_bv.bv_page);
275
276 memcpy(dst_p + dst_bv.bv_offset,
277 src_p + src_bv.bv_offset,
278 PBLK_EXPOSED_PAGE_SIZE);
279
280 kunmap_atomic(src_p);
281 kunmap_atomic(dst_p);
282
283 mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
284
285 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
286 } while (hole < nr_secs);
287
288 bio_put(new_bio);
289 kfree(pr_ctx);
290
291 /* restore original request */
292 rqd->bio = NULL;
293 rqd->nr_ppas = nr_secs;
294
295 bio_endio(bio);
296 __pblk_end_io_read(pblk, rqd, false);
297}
298
299static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
300 unsigned int bio_init_idx,
301 unsigned long *read_bitmap,
302 int nr_holes)
303{
304 void *meta_list = rqd->meta_list;
305 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
306 struct pblk_pr_ctx *pr_ctx;
307 struct bio *new_bio, *bio = r_ctx->private;
308 int nr_secs = rqd->nr_ppas;
309 int i;
310
311 new_bio = bio_alloc(GFP_KERNEL, nr_holes);
312
313 if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
314 goto fail_bio_put;
315
316 if (nr_holes != new_bio->bi_vcnt) {
317 WARN_ONCE(1, "pblk: malformed bio\n");
318 goto fail_free_pages;
319 }
320
321 pr_ctx = kzalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
322 if (!pr_ctx)
323 goto fail_free_pages;
324
325 for (i = 0; i < nr_secs; i++) {
326 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
327
328 pr_ctx->lba_list_mem[i] = le64_to_cpu(meta->lba);
329 }
330
331 new_bio->bi_iter.bi_sector = 0; /* internal bio */
332 bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
333
334 rqd->bio = new_bio;
335 rqd->nr_ppas = nr_holes;
336
337 pr_ctx->orig_bio = bio;
338 bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
339 pr_ctx->bio_init_idx = bio_init_idx;
340 pr_ctx->orig_nr_secs = nr_secs;
341 r_ctx->private = pr_ctx;
342
343 if (unlikely(nr_holes == 1)) {
344 pr_ctx->ppa_ptr = rqd->ppa_list;
345 pr_ctx->dma_ppa_list = rqd->dma_ppa_list;
346 rqd->ppa_addr = rqd->ppa_list[0];
347 }
348 return 0;
349
350fail_free_pages:
351 pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
352fail_bio_put:
353 bio_put(new_bio);
354
355 return -ENOMEM;
356}
357
358static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
359 unsigned int bio_init_idx,
360 unsigned long *read_bitmap, int nr_secs)
361{
362 int nr_holes;
363 int ret;
364
365 nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
366
367 if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap,
368 nr_holes))
369 return NVM_IO_ERR;
370
371 rqd->end_io = pblk_end_partial_read;
372
373 ret = pblk_submit_io(pblk, rqd);
374 if (ret) {
375 bio_put(rqd->bio);
376 pblk_err(pblk, "partial read IO submission failed\n");
377 goto err;
378 }
379
380 return NVM_IO_OK;
381
382err:
383 pblk_err(pblk, "failed to perform partial read\n");
384
385 /* Free allocated pages in new bio */
386 pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
387 __pblk_end_io_read(pblk, rqd, false);
388 return NVM_IO_ERR;
389}
390
391static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
392 sector_t lba, unsigned long *read_bitmap)
393{
394 struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
395 struct ppa_addr ppa;
396
397 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
398
399#ifdef CONFIG_NVM_PBLK_DEBUG
400 atomic_long_inc(&pblk->inflight_reads);
401#endif
402
403retry:
404 if (pblk_ppa_empty(ppa)) {
405 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
406
407 WARN_ON(test_and_set_bit(0, read_bitmap));
408 meta->lba = addr_empty;
409 return;
410 }
411
412 /* Try to read from write buffer. The address is later checked on the
413 * write buffer to prevent retrieving overwritten data.
414 */
415 if (pblk_addr_in_cache(ppa)) {
416 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
417 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
418 goto retry;
419 }
420
421 WARN_ON(test_and_set_bit(0, read_bitmap));
422 meta->lba = cpu_to_le64(lba);
423
424#ifdef CONFIG_NVM_PBLK_DEBUG
425 atomic_long_inc(&pblk->cache_reads);
426#endif
427 } else {
428 rqd->ppa_addr = ppa;
429 }
430}
431
432int pblk_submit_read(struct pblk *pblk, struct bio *bio)
433{
434 struct nvm_tgt_dev *dev = pblk->dev;
435 struct request_queue *q = dev->q;
436 sector_t blba = pblk_get_lba(bio);
437 unsigned int nr_secs = pblk_get_secs(bio);
438 struct pblk_g_ctx *r_ctx;
439 struct nvm_rq *rqd;
440 unsigned int bio_init_idx;
441 DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
442 int ret = NVM_IO_ERR;
443
444 generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
445 &pblk->disk->part0);
446
447 bitmap_zero(read_bitmap, nr_secs);
448
449 rqd = pblk_alloc_rqd(pblk, PBLK_READ);
450
451 rqd->opcode = NVM_OP_PREAD;
452 rqd->nr_ppas = nr_secs;
453 rqd->bio = NULL; /* cloned bio if needed */
454 rqd->private = pblk;
455 rqd->end_io = pblk_end_io_read;
456
457 r_ctx = nvm_rq_to_pdu(rqd);
458 r_ctx->start_time = jiffies;
459 r_ctx->lba = blba;
460 r_ctx->private = bio; /* original bio */
461
462 /* Save the index for this bio's start. This is needed in case
463 * we need to fill a partial read.
464 */
465 bio_init_idx = pblk_get_bi_idx(bio);
466
467 if (pblk_alloc_rqd_meta(pblk, rqd))
468 goto fail_rqd_free;
469
470 if (nr_secs > 1)
471 pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
472 else
473 pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
474
475 if (bitmap_full(read_bitmap, nr_secs)) {
476 atomic_inc(&pblk->inflight_io);
477 __pblk_end_io_read(pblk, rqd, false);
478 return NVM_IO_DONE;
479 }
480
481 /* All sectors are to be read from the device */
482 if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
483 struct bio *int_bio = NULL;
484
485 /* Clone read bio to deal with read errors internally */
486 int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
487 if (!int_bio) {
488 pblk_err(pblk, "could not clone read bio\n");
489 goto fail_end_io;
490 }
491
492 rqd->bio = int_bio;
493
494 if (pblk_submit_io(pblk, rqd)) {
495 pblk_err(pblk, "read IO submission failed\n");
496 ret = NVM_IO_ERR;
497 goto fail_end_io;
498 }
499
500 return NVM_IO_OK;
501 }
502
503 /* The read bio request could be partially filled by the write buffer,
504 * but there are some holes that need to be read from the drive.
505 */
506 ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
507 nr_secs);
508 if (ret)
509 goto fail_meta_free;
510
511 return NVM_IO_OK;
512
513fail_meta_free:
514 nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
515fail_rqd_free:
516 pblk_free_rqd(pblk, rqd, PBLK_READ);
517 return ret;
518fail_end_io:
519 __pblk_end_io_read(pblk, rqd, false);
520 return ret;
521}
522
523static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
524 struct pblk_line *line, u64 *lba_list,
525 u64 *paddr_list_gc, unsigned int nr_secs)
526{
527 struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
528 struct ppa_addr ppa_gc;
529 int valid_secs = 0;
530 int i;
531
532 pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
533
534 for (i = 0; i < nr_secs; i++) {
535 if (lba_list[i] == ADDR_EMPTY)
536 continue;
537
538 ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
539 if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
540 paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
541 continue;
542 }
543
544 rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
545 }
546
547#ifdef CONFIG_NVM_PBLK_DEBUG
548 atomic_long_add(valid_secs, &pblk->inflight_reads);
549#endif
550
551 return valid_secs;
552}
553
554static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
555 struct pblk_line *line, sector_t lba,
556 u64 paddr_gc)
557{
558 struct ppa_addr ppa_l2p, ppa_gc;
559 int valid_secs = 0;
560
561 if (lba == ADDR_EMPTY)
562 goto out;
563
564 /* logic error: lba out-of-bounds */
565 if (lba >= pblk->rl.nr_secs) {
566 WARN(1, "pblk: read lba out of bounds\n");
567 goto out;
568 }
569
570 spin_lock(&pblk->trans_lock);
571 ppa_l2p = pblk_trans_map_get(pblk, lba);
572 spin_unlock(&pblk->trans_lock);
573
574 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
575 if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
576 goto out;
577
578 rqd->ppa_addr = ppa_l2p;
579 valid_secs = 1;
580
581#ifdef CONFIG_NVM_PBLK_DEBUG
582 atomic_long_inc(&pblk->inflight_reads);
583#endif
584
585out:
586 return valid_secs;
587}
588
589int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
590{
591 struct nvm_tgt_dev *dev = pblk->dev;
592 struct nvm_geo *geo = &dev->geo;
593 struct bio *bio;
594 struct nvm_rq rqd;
595 int data_len;
596 int ret = NVM_IO_OK;
597
598 memset(&rqd, 0, sizeof(struct nvm_rq));
599
600 ret = pblk_alloc_rqd_meta(pblk, &rqd);
601 if (ret)
602 return ret;
603
604 if (gc_rq->nr_secs > 1) {
605 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
606 gc_rq->lba_list,
607 gc_rq->paddr_list,
608 gc_rq->nr_secs);
609 if (gc_rq->secs_to_gc == 1)
610 rqd.ppa_addr = rqd.ppa_list[0];
611 } else {
612 gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
613 gc_rq->lba_list[0],
614 gc_rq->paddr_list[0]);
615 }
616
617 if (!(gc_rq->secs_to_gc))
618 goto out;
619
620 data_len = (gc_rq->secs_to_gc) * geo->csecs;
621 bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
622 PBLK_VMALLOC_META, GFP_KERNEL);
623 if (IS_ERR(bio)) {
624 pblk_err(pblk, "could not allocate GC bio (%lu)\n",
625 PTR_ERR(bio));
626 ret = PTR_ERR(bio);
627 goto err_free_dma;
628 }
629
630 bio->bi_iter.bi_sector = 0; /* internal bio */
631 bio_set_op_attrs(bio, REQ_OP_READ, 0);
632
633 rqd.opcode = NVM_OP_PREAD;
634 rqd.nr_ppas = gc_rq->secs_to_gc;
635 rqd.bio = bio;
636
637 if (pblk_submit_io_sync(pblk, &rqd)) {
638 ret = -EIO;
639 pblk_err(pblk, "GC read request failed\n");
640 goto err_free_bio;
641 }
642
643 pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
644
645 atomic_dec(&pblk->inflight_io);
646
647 if (rqd.error) {
648 atomic_long_inc(&pblk->read_failed_gc);
649#ifdef CONFIG_NVM_PBLK_DEBUG
650 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
651#endif
652 }
653
654#ifdef CONFIG_NVM_PBLK_DEBUG
655 atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
656 atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
657 atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
658#endif
659
660out:
661 pblk_free_rqd_meta(pblk, &rqd);
662 return ret;
663
664err_free_bio:
665 bio_put(bio);
666err_free_dma:
667 pblk_free_rqd_meta(pblk, &rqd);
668 return ret;
669}
670