1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /******************************************************************************* |
3 | * Filename: target_core_iblock.c |
4 | * |
5 | * This file contains the Storage Engine <-> Linux BlockIO transport |
6 | * specific functions. |
7 | * |
8 | * (c) Copyright 2003-2013 Datera, Inc. |
9 | * |
10 | * Nicholas A. Bellinger <nab@kernel.org> |
11 | * |
12 | ******************************************************************************/ |
13 | |
14 | #include <linux/string.h> |
15 | #include <linux/parser.h> |
16 | #include <linux/timer.h> |
17 | #include <linux/fs.h> |
18 | #include <linux/blkdev.h> |
19 | #include <linux/blk-integrity.h> |
20 | #include <linux/slab.h> |
21 | #include <linux/spinlock.h> |
22 | #include <linux/bio.h> |
23 | #include <linux/file.h> |
24 | #include <linux/module.h> |
25 | #include <linux/scatterlist.h> |
26 | #include <linux/pr.h> |
27 | #include <scsi/scsi_proto.h> |
28 | #include <scsi/scsi_common.h> |
29 | #include <asm/unaligned.h> |
30 | |
31 | #include <target/target_core_base.h> |
32 | #include <target/target_core_backend.h> |
33 | |
34 | #include "target_core_iblock.h" |
35 | #include "target_core_pr.h" |
36 | |
37 | #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ |
38 | #define IBLOCK_BIO_POOL_SIZE 128 |
39 | |
40 | static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev) |
41 | { |
42 | return container_of(dev, struct iblock_dev, dev); |
43 | } |
44 | |
45 | |
46 | static int iblock_attach_hba(struct se_hba *hba, u32 host_id) |
47 | { |
48 | pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" |
49 | " Generic Target Core Stack %s\n" , hba->hba_id, |
50 | IBLOCK_VERSION, TARGET_CORE_VERSION); |
51 | return 0; |
52 | } |
53 | |
54 | static void iblock_detach_hba(struct se_hba *hba) |
55 | { |
56 | } |
57 | |
58 | static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name) |
59 | { |
60 | struct iblock_dev *ib_dev = NULL; |
61 | |
62 | ib_dev = kzalloc(size: sizeof(struct iblock_dev), GFP_KERNEL); |
63 | if (!ib_dev) { |
64 | pr_err("Unable to allocate struct iblock_dev\n" ); |
65 | return NULL; |
66 | } |
67 | |
68 | ib_dev->ibd_plug = kcalloc(n: nr_cpu_ids, size: sizeof(*ib_dev->ibd_plug), |
69 | GFP_KERNEL); |
70 | if (!ib_dev->ibd_plug) |
71 | goto free_dev; |
72 | |
73 | pr_debug( "IBLOCK: Allocated ib_dev for %s\n" , name); |
74 | |
75 | return &ib_dev->dev; |
76 | |
77 | free_dev: |
78 | kfree(objp: ib_dev); |
79 | return NULL; |
80 | } |
81 | |
82 | static bool iblock_configure_unmap(struct se_device *dev) |
83 | { |
84 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
85 | |
86 | return target_configure_unmap_from_queue(attrib: &dev->dev_attrib, |
87 | bdev: ib_dev->ibd_bd); |
88 | } |
89 | |
90 | static int iblock_configure_device(struct se_device *dev) |
91 | { |
92 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
93 | struct request_queue *q; |
94 | struct bdev_handle *bdev_handle; |
95 | struct block_device *bd; |
96 | struct blk_integrity *bi; |
97 | blk_mode_t mode = BLK_OPEN_READ; |
98 | unsigned int max_write_zeroes_sectors; |
99 | int ret; |
100 | |
101 | if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { |
102 | pr_err("Missing udev_path= parameters for IBLOCK\n" ); |
103 | return -EINVAL; |
104 | } |
105 | |
106 | ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, flags: BIOSET_NEED_BVECS); |
107 | if (ret) { |
108 | pr_err("IBLOCK: Unable to create bioset\n" ); |
109 | goto out; |
110 | } |
111 | |
112 | pr_debug( "IBLOCK: Claiming struct block_device: %s\n" , |
113 | ib_dev->ibd_udev_path); |
114 | |
115 | if (!ib_dev->ibd_readonly) |
116 | mode |= BLK_OPEN_WRITE; |
117 | else |
118 | dev->dev_flags |= DF_READ_ONLY; |
119 | |
120 | bdev_handle = bdev_open_by_path(path: ib_dev->ibd_udev_path, mode, holder: ib_dev, |
121 | NULL); |
122 | if (IS_ERR(ptr: bdev_handle)) { |
123 | ret = PTR_ERR(ptr: bdev_handle); |
124 | goto out_free_bioset; |
125 | } |
126 | ib_dev->ibd_bdev_handle = bdev_handle; |
127 | ib_dev->ibd_bd = bd = bdev_handle->bdev; |
128 | |
129 | q = bdev_get_queue(bdev: bd); |
130 | |
131 | dev->dev_attrib.hw_block_size = bdev_logical_block_size(bdev: bd); |
132 | dev->dev_attrib.hw_max_sectors = mult_frac(queue_max_hw_sectors(q), |
133 | SECTOR_SIZE, |
134 | dev->dev_attrib.hw_block_size); |
135 | dev->dev_attrib.hw_queue_depth = q->nr_requests; |
136 | |
137 | /* |
138 | * Enable write same emulation for IBLOCK and use 0xFFFF as |
139 | * the smaller WRITE_SAME(10) only has a two-byte block count. |
140 | */ |
141 | max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev: bd); |
142 | if (max_write_zeroes_sectors) |
143 | dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors; |
144 | else |
145 | dev->dev_attrib.max_write_same_len = 0xFFFF; |
146 | |
147 | if (bdev_nonrot(bdev: bd)) |
148 | dev->dev_attrib.is_nonrot = 1; |
149 | |
150 | bi = bdev_get_integrity(bdev: bd); |
151 | if (bi) { |
152 | struct bio_set *bs = &ib_dev->ibd_bio_set; |
153 | |
154 | if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP" ) || |
155 | !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP" )) { |
156 | pr_err("IBLOCK export of blk_integrity: %s not" |
157 | " supported\n" , bi->profile->name); |
158 | ret = -ENOSYS; |
159 | goto out_blkdev_put; |
160 | } |
161 | |
162 | if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC" )) { |
163 | dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; |
164 | } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC" )) { |
165 | dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; |
166 | } |
167 | |
168 | if (dev->dev_attrib.pi_prot_type) { |
169 | if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { |
170 | pr_err("Unable to allocate bioset for PI\n" ); |
171 | ret = -ENOMEM; |
172 | goto out_blkdev_put; |
173 | } |
174 | pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n" , |
175 | &bs->bio_integrity_pool); |
176 | } |
177 | dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; |
178 | } |
179 | |
180 | return 0; |
181 | |
182 | out_blkdev_put: |
183 | bdev_release(handle: ib_dev->ibd_bdev_handle); |
184 | out_free_bioset: |
185 | bioset_exit(&ib_dev->ibd_bio_set); |
186 | out: |
187 | return ret; |
188 | } |
189 | |
190 | static void iblock_dev_call_rcu(struct rcu_head *p) |
191 | { |
192 | struct se_device *dev = container_of(p, struct se_device, rcu_head); |
193 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
194 | |
195 | kfree(objp: ib_dev->ibd_plug); |
196 | kfree(objp: ib_dev); |
197 | } |
198 | |
199 | static void iblock_free_device(struct se_device *dev) |
200 | { |
201 | call_rcu(head: &dev->rcu_head, func: iblock_dev_call_rcu); |
202 | } |
203 | |
204 | static void iblock_destroy_device(struct se_device *dev) |
205 | { |
206 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
207 | |
208 | if (ib_dev->ibd_bdev_handle) |
209 | bdev_release(handle: ib_dev->ibd_bdev_handle); |
210 | bioset_exit(&ib_dev->ibd_bio_set); |
211 | } |
212 | |
213 | static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev) |
214 | { |
215 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev: se_dev); |
216 | struct iblock_dev_plug *ib_dev_plug; |
217 | |
218 | /* |
219 | * Each se_device has a per cpu work this can be run from. We |
220 | * shouldn't have multiple threads on the same cpu calling this |
221 | * at the same time. |
222 | */ |
223 | ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()]; |
224 | if (test_and_set_bit(IBD_PLUGF_PLUGGED, addr: &ib_dev_plug->flags)) |
225 | return NULL; |
226 | |
227 | blk_start_plug(&ib_dev_plug->blk_plug); |
228 | return &ib_dev_plug->se_plug; |
229 | } |
230 | |
231 | static void iblock_unplug_device(struct se_dev_plug *se_plug) |
232 | { |
233 | struct iblock_dev_plug *ib_dev_plug = container_of(se_plug, |
234 | struct iblock_dev_plug, se_plug); |
235 | |
236 | blk_finish_plug(&ib_dev_plug->blk_plug); |
237 | clear_bit(IBD_PLUGF_PLUGGED, addr: &ib_dev_plug->flags); |
238 | } |
239 | |
240 | static sector_t iblock_get_blocks(struct se_device *dev) |
241 | { |
242 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
243 | u32 block_size = bdev_logical_block_size(bdev: ib_dev->ibd_bd); |
244 | unsigned long long blocks_long = |
245 | div_u64(dividend: bdev_nr_bytes(bdev: ib_dev->ibd_bd), divisor: block_size) - 1; |
246 | |
247 | if (block_size == dev->dev_attrib.block_size) |
248 | return blocks_long; |
249 | |
250 | switch (block_size) { |
251 | case 4096: |
252 | switch (dev->dev_attrib.block_size) { |
253 | case 2048: |
254 | blocks_long <<= 1; |
255 | break; |
256 | case 1024: |
257 | blocks_long <<= 2; |
258 | break; |
259 | case 512: |
260 | blocks_long <<= 3; |
261 | break; |
262 | default: |
263 | break; |
264 | } |
265 | break; |
266 | case 2048: |
267 | switch (dev->dev_attrib.block_size) { |
268 | case 4096: |
269 | blocks_long >>= 1; |
270 | break; |
271 | case 1024: |
272 | blocks_long <<= 1; |
273 | break; |
274 | case 512: |
275 | blocks_long <<= 2; |
276 | break; |
277 | default: |
278 | break; |
279 | } |
280 | break; |
281 | case 1024: |
282 | switch (dev->dev_attrib.block_size) { |
283 | case 4096: |
284 | blocks_long >>= 2; |
285 | break; |
286 | case 2048: |
287 | blocks_long >>= 1; |
288 | break; |
289 | case 512: |
290 | blocks_long <<= 1; |
291 | break; |
292 | default: |
293 | break; |
294 | } |
295 | break; |
296 | case 512: |
297 | switch (dev->dev_attrib.block_size) { |
298 | case 4096: |
299 | blocks_long >>= 3; |
300 | break; |
301 | case 2048: |
302 | blocks_long >>= 2; |
303 | break; |
304 | case 1024: |
305 | blocks_long >>= 1; |
306 | break; |
307 | default: |
308 | break; |
309 | } |
310 | break; |
311 | default: |
312 | break; |
313 | } |
314 | |
315 | return blocks_long; |
316 | } |
317 | |
318 | static void iblock_complete_cmd(struct se_cmd *cmd, blk_status_t blk_status) |
319 | { |
320 | struct iblock_req *ibr = cmd->priv; |
321 | u8 status; |
322 | |
323 | if (!refcount_dec_and_test(r: &ibr->pending)) |
324 | return; |
325 | |
326 | if (blk_status == BLK_STS_RESV_CONFLICT) |
327 | status = SAM_STAT_RESERVATION_CONFLICT; |
328 | else if (atomic_read(v: &ibr->ib_bio_err_cnt)) |
329 | status = SAM_STAT_CHECK_CONDITION; |
330 | else |
331 | status = SAM_STAT_GOOD; |
332 | |
333 | target_complete_cmd(cmd, status); |
334 | kfree(objp: ibr); |
335 | } |
336 | |
337 | static void iblock_bio_done(struct bio *bio) |
338 | { |
339 | struct se_cmd *cmd = bio->bi_private; |
340 | struct iblock_req *ibr = cmd->priv; |
341 | blk_status_t blk_status = bio->bi_status; |
342 | |
343 | if (bio->bi_status) { |
344 | pr_err("bio error: %p, err: %d\n" , bio, bio->bi_status); |
345 | /* |
346 | * Bump the ib_bio_err_cnt and release bio. |
347 | */ |
348 | atomic_inc(v: &ibr->ib_bio_err_cnt); |
349 | smp_mb__after_atomic(); |
350 | } |
351 | |
352 | bio_put(bio); |
353 | |
354 | iblock_complete_cmd(cmd, blk_status); |
355 | } |
356 | |
357 | static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, |
358 | blk_opf_t opf) |
359 | { |
360 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev: cmd->se_dev); |
361 | struct bio *bio; |
362 | |
363 | /* |
364 | * Only allocate as many vector entries as the bio code allows us to, |
365 | * we'll loop later on until we have handled the whole request. |
366 | */ |
367 | bio = bio_alloc_bioset(bdev: ib_dev->ibd_bd, nr_vecs: bio_max_segs(nr_segs: sg_num), opf, |
368 | GFP_NOIO, bs: &ib_dev->ibd_bio_set); |
369 | if (!bio) { |
370 | pr_err("Unable to allocate memory for bio\n" ); |
371 | return NULL; |
372 | } |
373 | |
374 | bio->bi_private = cmd; |
375 | bio->bi_end_io = &iblock_bio_done; |
376 | bio->bi_iter.bi_sector = lba; |
377 | |
378 | return bio; |
379 | } |
380 | |
381 | static void iblock_submit_bios(struct bio_list *list) |
382 | { |
383 | struct blk_plug plug; |
384 | struct bio *bio; |
385 | /* |
386 | * The block layer handles nested plugs, so just plug/unplug to handle |
387 | * fabric drivers that didn't support batching and multi bio cmds. |
388 | */ |
389 | blk_start_plug(&plug); |
390 | while ((bio = bio_list_pop(bl: list))) |
391 | submit_bio(bio); |
392 | blk_finish_plug(&plug); |
393 | } |
394 | |
395 | static void iblock_end_io_flush(struct bio *bio) |
396 | { |
397 | struct se_cmd *cmd = bio->bi_private; |
398 | |
399 | if (bio->bi_status) |
400 | pr_err("IBLOCK: cache flush failed: %d\n" , bio->bi_status); |
401 | |
402 | if (cmd) { |
403 | if (bio->bi_status) |
404 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); |
405 | else |
406 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
407 | } |
408 | |
409 | bio_put(bio); |
410 | } |
411 | |
412 | /* |
413 | * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must |
414 | * always flush the whole cache. |
415 | */ |
416 | static sense_reason_t |
417 | iblock_execute_sync_cache(struct se_cmd *cmd) |
418 | { |
419 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev: cmd->se_dev); |
420 | int immed = (cmd->t_task_cdb[1] & 0x2); |
421 | struct bio *bio; |
422 | |
423 | /* |
424 | * If the Immediate bit is set, queue up the GOOD response |
425 | * for this SYNCHRONIZE_CACHE op. |
426 | */ |
427 | if (immed) |
428 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
429 | |
430 | bio = bio_alloc(bdev: ib_dev->ibd_bd, nr_vecs: 0, opf: REQ_OP_WRITE | REQ_PREFLUSH, |
431 | GFP_KERNEL); |
432 | bio->bi_end_io = iblock_end_io_flush; |
433 | if (!immed) |
434 | bio->bi_private = cmd; |
435 | submit_bio(bio); |
436 | return 0; |
437 | } |
438 | |
439 | static sense_reason_t |
440 | iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) |
441 | { |
442 | struct block_device *bdev = IBLOCK_DEV(dev: cmd->se_dev)->ibd_bd; |
443 | struct se_device *dev = cmd->se_dev; |
444 | int ret; |
445 | |
446 | ret = blkdev_issue_discard(bdev, |
447 | sector: target_to_linux_sector(dev, lb: lba), |
448 | nr_sects: target_to_linux_sector(dev, lb: nolb), |
449 | GFP_KERNEL); |
450 | if (ret < 0) { |
451 | pr_err("blkdev_issue_discard() failed: %d\n" , ret); |
452 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
453 | } |
454 | |
455 | return 0; |
456 | } |
457 | |
458 | static sense_reason_t |
459 | iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd) |
460 | { |
461 | struct se_device *dev = cmd->se_dev; |
462 | struct scatterlist *sg = &cmd->t_data_sg[0]; |
463 | unsigned char *buf, *not_zero; |
464 | int ret; |
465 | |
466 | buf = kmap(page: sg_page(sg)) + sg->offset; |
467 | if (!buf) |
468 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
469 | /* |
470 | * Fall back to block_execute_write_same() slow-path if |
471 | * incoming WRITE_SAME payload does not contain zeros. |
472 | */ |
473 | not_zero = memchr_inv(p: buf, c: 0x00, size: cmd->data_length); |
474 | kunmap(page: sg_page(sg)); |
475 | |
476 | if (not_zero) |
477 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
478 | |
479 | ret = blkdev_issue_zeroout(bdev, |
480 | sector: target_to_linux_sector(dev, lb: cmd->t_task_lba), |
481 | nr_sects: target_to_linux_sector(dev, |
482 | lb: sbc_get_write_same_sectors(cmd)), |
483 | GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); |
484 | if (ret) |
485 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
486 | |
487 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
488 | return 0; |
489 | } |
490 | |
491 | static sense_reason_t |
492 | iblock_execute_write_same(struct se_cmd *cmd) |
493 | { |
494 | struct block_device *bdev = IBLOCK_DEV(dev: cmd->se_dev)->ibd_bd; |
495 | struct iblock_req *ibr; |
496 | struct scatterlist *sg; |
497 | struct bio *bio; |
498 | struct bio_list list; |
499 | struct se_device *dev = cmd->se_dev; |
500 | sector_t block_lba = target_to_linux_sector(dev, lb: cmd->t_task_lba); |
501 | sector_t sectors = target_to_linux_sector(dev, |
502 | lb: sbc_get_write_same_sectors(cmd)); |
503 | |
504 | if (cmd->prot_op) { |
505 | pr_err("WRITE_SAME: Protection information with IBLOCK" |
506 | " backends not supported\n" ); |
507 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
508 | } |
509 | |
510 | if (!cmd->t_data_nents) |
511 | return TCM_INVALID_CDB_FIELD; |
512 | |
513 | sg = &cmd->t_data_sg[0]; |
514 | |
515 | if (cmd->t_data_nents > 1 || |
516 | sg->length != cmd->se_dev->dev_attrib.block_size) { |
517 | pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" |
518 | " block_size: %u\n" , cmd->t_data_nents, sg->length, |
519 | cmd->se_dev->dev_attrib.block_size); |
520 | return TCM_INVALID_CDB_FIELD; |
521 | } |
522 | |
523 | if (bdev_write_zeroes_sectors(bdev)) { |
524 | if (!iblock_execute_zero_out(bdev, cmd)) |
525 | return 0; |
526 | } |
527 | |
528 | ibr = kzalloc(size: sizeof(struct iblock_req), GFP_KERNEL); |
529 | if (!ibr) |
530 | goto fail; |
531 | cmd->priv = ibr; |
532 | |
533 | bio = iblock_get_bio(cmd, lba: block_lba, sg_num: 1, opf: REQ_OP_WRITE); |
534 | if (!bio) |
535 | goto fail_free_ibr; |
536 | |
537 | bio_list_init(bl: &list); |
538 | bio_list_add(bl: &list, bio); |
539 | |
540 | refcount_set(r: &ibr->pending, n: 1); |
541 | |
542 | while (sectors) { |
543 | while (bio_add_page(bio, page: sg_page(sg), len: sg->length, off: sg->offset) |
544 | != sg->length) { |
545 | |
546 | bio = iblock_get_bio(cmd, lba: block_lba, sg_num: 1, opf: REQ_OP_WRITE); |
547 | if (!bio) |
548 | goto fail_put_bios; |
549 | |
550 | refcount_inc(r: &ibr->pending); |
551 | bio_list_add(bl: &list, bio); |
552 | } |
553 | |
554 | /* Always in 512 byte units for Linux/Block */ |
555 | block_lba += sg->length >> SECTOR_SHIFT; |
556 | sectors -= sg->length >> SECTOR_SHIFT; |
557 | } |
558 | |
559 | iblock_submit_bios(list: &list); |
560 | return 0; |
561 | |
562 | fail_put_bios: |
563 | while ((bio = bio_list_pop(bl: &list))) |
564 | bio_put(bio); |
565 | fail_free_ibr: |
566 | kfree(objp: ibr); |
567 | fail: |
568 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
569 | } |
570 | |
571 | enum { |
572 | Opt_udev_path, Opt_readonly, Opt_force, Opt_err |
573 | }; |
574 | |
575 | static match_table_t tokens = { |
576 | {Opt_udev_path, "udev_path=%s" }, |
577 | {Opt_readonly, "readonly=%d" }, |
578 | {Opt_force, "force=%d" }, |
579 | {Opt_err, NULL} |
580 | }; |
581 | |
582 | static ssize_t iblock_set_configfs_dev_params(struct se_device *dev, |
583 | const char *page, ssize_t count) |
584 | { |
585 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
586 | char *orig, *ptr, *arg_p, *opts; |
587 | substring_t args[MAX_OPT_ARGS]; |
588 | int ret = 0, token; |
589 | unsigned long tmp_readonly; |
590 | |
591 | opts = kstrdup(s: page, GFP_KERNEL); |
592 | if (!opts) |
593 | return -ENOMEM; |
594 | |
595 | orig = opts; |
596 | |
597 | while ((ptr = strsep(&opts, ",\n" )) != NULL) { |
598 | if (!*ptr) |
599 | continue; |
600 | |
601 | token = match_token(ptr, table: tokens, args); |
602 | switch (token) { |
603 | case Opt_udev_path: |
604 | if (ib_dev->ibd_bd) { |
605 | pr_err("Unable to set udev_path= while" |
606 | " ib_dev->ibd_bd exists\n" ); |
607 | ret = -EEXIST; |
608 | goto out; |
609 | } |
610 | if (match_strlcpy(ib_dev->ibd_udev_path, &args[0], |
611 | SE_UDEV_PATH_LEN) == 0) { |
612 | ret = -EINVAL; |
613 | break; |
614 | } |
615 | pr_debug("IBLOCK: Referencing UDEV path: %s\n" , |
616 | ib_dev->ibd_udev_path); |
617 | ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; |
618 | break; |
619 | case Opt_readonly: |
620 | arg_p = match_strdup(&args[0]); |
621 | if (!arg_p) { |
622 | ret = -ENOMEM; |
623 | break; |
624 | } |
625 | ret = kstrtoul(s: arg_p, base: 0, res: &tmp_readonly); |
626 | kfree(objp: arg_p); |
627 | if (ret < 0) { |
628 | pr_err("kstrtoul() failed for" |
629 | " readonly=\n" ); |
630 | goto out; |
631 | } |
632 | ib_dev->ibd_readonly = tmp_readonly; |
633 | pr_debug("IBLOCK: readonly: %d\n" , ib_dev->ibd_readonly); |
634 | break; |
635 | case Opt_force: |
636 | break; |
637 | default: |
638 | break; |
639 | } |
640 | } |
641 | |
642 | out: |
643 | kfree(objp: orig); |
644 | return (!ret) ? count : ret; |
645 | } |
646 | |
647 | static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) |
648 | { |
649 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
650 | struct block_device *bd = ib_dev->ibd_bd; |
651 | ssize_t bl = 0; |
652 | |
653 | if (bd) |
654 | bl += sprintf(buf: b + bl, fmt: "iBlock device: %pg" , bd); |
655 | if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) |
656 | bl += sprintf(buf: b + bl, fmt: " UDEV PATH: %s" , |
657 | ib_dev->ibd_udev_path); |
658 | bl += sprintf(buf: b + bl, fmt: " readonly: %d\n" , ib_dev->ibd_readonly); |
659 | |
660 | bl += sprintf(buf: b + bl, fmt: " " ); |
661 | if (bd) { |
662 | bl += sprintf(buf: b + bl, fmt: "Major: %d Minor: %d %s\n" , |
663 | MAJOR(bd->bd_dev), MINOR(bd->bd_dev), |
664 | "CLAIMED: IBLOCK" ); |
665 | } else { |
666 | bl += sprintf(buf: b + bl, fmt: "Major: 0 Minor: 0\n" ); |
667 | } |
668 | |
669 | return bl; |
670 | } |
671 | |
672 | static int |
673 | iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio, |
674 | struct sg_mapping_iter *miter) |
675 | { |
676 | struct se_device *dev = cmd->se_dev; |
677 | struct blk_integrity *bi; |
678 | struct bio_integrity_payload *bip; |
679 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
680 | int rc; |
681 | size_t resid, len; |
682 | |
683 | bi = bdev_get_integrity(bdev: ib_dev->ibd_bd); |
684 | if (!bi) { |
685 | pr_err("Unable to locate bio_integrity\n" ); |
686 | return -ENODEV; |
687 | } |
688 | |
689 | bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(nr_segs: cmd->t_prot_nents)); |
690 | if (IS_ERR(ptr: bip)) { |
691 | pr_err("Unable to allocate bio_integrity_payload\n" ); |
692 | return PTR_ERR(ptr: bip); |
693 | } |
694 | |
695 | /* virtual start sector must be in integrity interval units */ |
696 | bip_set_seed(bip, seed: bio->bi_iter.bi_sector >> |
697 | (bi->interval_exp - SECTOR_SHIFT)); |
698 | |
699 | pr_debug("IBLOCK BIP Size: %u Sector: %llu\n" , bip->bip_iter.bi_size, |
700 | (unsigned long long)bip->bip_iter.bi_sector); |
701 | |
702 | resid = bio_integrity_bytes(bi, bio_sectors(bio)); |
703 | while (resid > 0 && sg_miter_next(miter)) { |
704 | |
705 | len = min_t(size_t, miter->length, resid); |
706 | rc = bio_integrity_add_page(bio, miter->page, len, |
707 | offset_in_page(miter->addr)); |
708 | if (rc != len) { |
709 | pr_err("bio_integrity_add_page() failed; %d\n" , rc); |
710 | sg_miter_stop(miter); |
711 | return -ENOMEM; |
712 | } |
713 | |
714 | pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n" , |
715 | miter->page, len, offset_in_page(miter->addr)); |
716 | |
717 | resid -= len; |
718 | if (len < miter->length) |
719 | miter->consumed -= miter->length - len; |
720 | } |
721 | sg_miter_stop(miter); |
722 | |
723 | return 0; |
724 | } |
725 | |
726 | static sense_reason_t |
727 | iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, |
728 | enum dma_data_direction data_direction) |
729 | { |
730 | struct se_device *dev = cmd->se_dev; |
731 | sector_t block_lba = target_to_linux_sector(dev, lb: cmd->t_task_lba); |
732 | struct iblock_req *ibr; |
733 | struct bio *bio; |
734 | struct bio_list list; |
735 | struct scatterlist *sg; |
736 | u32 sg_num = sgl_nents; |
737 | blk_opf_t opf; |
738 | unsigned bio_cnt; |
739 | int i, rc; |
740 | struct sg_mapping_iter prot_miter; |
741 | unsigned int miter_dir; |
742 | |
743 | if (data_direction == DMA_TO_DEVICE) { |
744 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
745 | |
746 | /* |
747 | * Set bits to indicate WRITE_ODIRECT so we are not throttled |
748 | * by WBT. |
749 | */ |
750 | opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; |
751 | /* |
752 | * Force writethrough using REQ_FUA if a volatile write cache |
753 | * is not enabled, or if initiator set the Force Unit Access bit. |
754 | */ |
755 | miter_dir = SG_MITER_TO_SG; |
756 | if (bdev_fua(bdev: ib_dev->ibd_bd)) { |
757 | if (cmd->se_cmd_flags & SCF_FUA) |
758 | opf |= REQ_FUA; |
759 | else if (!bdev_write_cache(bdev: ib_dev->ibd_bd)) |
760 | opf |= REQ_FUA; |
761 | } |
762 | } else { |
763 | opf = REQ_OP_READ; |
764 | miter_dir = SG_MITER_FROM_SG; |
765 | } |
766 | |
767 | ibr = kzalloc(size: sizeof(struct iblock_req), GFP_KERNEL); |
768 | if (!ibr) |
769 | goto fail; |
770 | cmd->priv = ibr; |
771 | |
772 | if (!sgl_nents) { |
773 | refcount_set(r: &ibr->pending, n: 1); |
774 | iblock_complete_cmd(cmd, BLK_STS_OK); |
775 | return 0; |
776 | } |
777 | |
778 | bio = iblock_get_bio(cmd, lba: block_lba, sg_num: sgl_nents, opf); |
779 | if (!bio) |
780 | goto fail_free_ibr; |
781 | |
782 | bio_list_init(bl: &list); |
783 | bio_list_add(bl: &list, bio); |
784 | |
785 | refcount_set(r: &ibr->pending, n: 2); |
786 | bio_cnt = 1; |
787 | |
788 | if (cmd->prot_type && dev->dev_attrib.pi_prot_type) |
789 | sg_miter_start(miter: &prot_miter, sgl: cmd->t_prot_sg, nents: cmd->t_prot_nents, |
790 | flags: miter_dir); |
791 | |
792 | for_each_sg(sgl, sg, sgl_nents, i) { |
793 | /* |
794 | * XXX: if the length the device accepts is shorter than the |
795 | * length of the S/G list entry this will cause and |
796 | * endless loop. Better hope no driver uses huge pages. |
797 | */ |
798 | while (bio_add_page(bio, page: sg_page(sg), len: sg->length, off: sg->offset) |
799 | != sg->length) { |
800 | if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
801 | rc = iblock_alloc_bip(cmd, bio, miter: &prot_miter); |
802 | if (rc) |
803 | goto fail_put_bios; |
804 | } |
805 | |
806 | if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { |
807 | iblock_submit_bios(list: &list); |
808 | bio_cnt = 0; |
809 | } |
810 | |
811 | bio = iblock_get_bio(cmd, lba: block_lba, sg_num, opf); |
812 | if (!bio) |
813 | goto fail_put_bios; |
814 | |
815 | refcount_inc(r: &ibr->pending); |
816 | bio_list_add(bl: &list, bio); |
817 | bio_cnt++; |
818 | } |
819 | |
820 | /* Always in 512 byte units for Linux/Block */ |
821 | block_lba += sg->length >> SECTOR_SHIFT; |
822 | sg_num--; |
823 | } |
824 | |
825 | if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
826 | rc = iblock_alloc_bip(cmd, bio, miter: &prot_miter); |
827 | if (rc) |
828 | goto fail_put_bios; |
829 | } |
830 | |
831 | iblock_submit_bios(list: &list); |
832 | iblock_complete_cmd(cmd, BLK_STS_OK); |
833 | return 0; |
834 | |
835 | fail_put_bios: |
836 | while ((bio = bio_list_pop(bl: &list))) |
837 | bio_put(bio); |
838 | fail_free_ibr: |
839 | kfree(objp: ibr); |
840 | fail: |
841 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
842 | } |
843 | |
844 | static sense_reason_t iblock_execute_pr_out(struct se_cmd *cmd, u8 sa, u64 key, |
845 | u64 sa_key, u8 type, bool aptpl) |
846 | { |
847 | struct se_device *dev = cmd->se_dev; |
848 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
849 | struct block_device *bdev = ib_dev->ibd_bd; |
850 | const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; |
851 | int ret; |
852 | |
853 | if (!ops) { |
854 | pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n" ); |
855 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
856 | } |
857 | |
858 | switch (sa) { |
859 | case PRO_REGISTER: |
860 | case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: |
861 | if (!ops->pr_register) { |
862 | pr_err("block device does not support pr_register.\n" ); |
863 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
864 | } |
865 | |
866 | /* The block layer pr ops always enables aptpl */ |
867 | if (!aptpl) |
868 | pr_info("APTPL not set by initiator, but will be used.\n" ); |
869 | |
870 | ret = ops->pr_register(bdev, key, sa_key, |
871 | sa == PRO_REGISTER ? 0 : PR_FL_IGNORE_KEY); |
872 | break; |
873 | case PRO_RESERVE: |
874 | if (!ops->pr_reserve) { |
875 | pr_err("block_device does not support pr_reserve.\n" ); |
876 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
877 | } |
878 | |
879 | ret = ops->pr_reserve(bdev, key, scsi_pr_type_to_block(type), 0); |
880 | break; |
881 | case PRO_CLEAR: |
882 | if (!ops->pr_clear) { |
883 | pr_err("block_device does not support pr_clear.\n" ); |
884 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
885 | } |
886 | |
887 | ret = ops->pr_clear(bdev, key); |
888 | break; |
889 | case PRO_PREEMPT: |
890 | case PRO_PREEMPT_AND_ABORT: |
891 | if (!ops->pr_clear) { |
892 | pr_err("block_device does not support pr_preempt.\n" ); |
893 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
894 | } |
895 | |
896 | ret = ops->pr_preempt(bdev, key, sa_key, |
897 | scsi_pr_type_to_block(type), |
898 | sa == PRO_PREEMPT_AND_ABORT); |
899 | break; |
900 | case PRO_RELEASE: |
901 | if (!ops->pr_clear) { |
902 | pr_err("block_device does not support pr_pclear.\n" ); |
903 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
904 | } |
905 | |
906 | ret = ops->pr_release(bdev, key, scsi_pr_type_to_block(type)); |
907 | break; |
908 | default: |
909 | pr_err("Unknown PERSISTENT_RESERVE_OUT SA: 0x%02x\n" , sa); |
910 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
911 | } |
912 | |
913 | if (!ret) |
914 | return TCM_NO_SENSE; |
915 | else if (ret == PR_STS_RESERVATION_CONFLICT) |
916 | return TCM_RESERVATION_CONFLICT; |
917 | else |
918 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
919 | } |
920 | |
921 | static void iblock_pr_report_caps(unsigned char *param_data) |
922 | { |
923 | u16 len = 8; |
924 | |
925 | put_unaligned_be16(val: len, p: ¶m_data[0]); |
926 | /* |
927 | * When using the pr_ops passthrough method we only support exporting |
928 | * the device through one target port because from the backend module |
929 | * level we can't see the target port config. As a result we only |
930 | * support registration directly from the I_T nexus the cmd is sent |
931 | * through and do not set ATP_C here. |
932 | * |
933 | * The block layer pr_ops do not support passing in initiators so |
934 | * we don't set SIP_C here. |
935 | */ |
936 | /* PTPL_C: Persistence across Target Power Loss bit */ |
937 | param_data[2] |= 0x01; |
938 | /* |
939 | * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so |
940 | * set the TMV: Task Mask Valid bit. |
941 | */ |
942 | param_data[3] |= 0x80; |
943 | /* |
944 | * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166 |
945 | */ |
946 | param_data[3] |= 0x10; /* ALLOW COMMANDs field 001b */ |
947 | /* |
948 | * PTPL_A: Persistence across Target Power Loss Active bit. The block |
949 | * layer pr ops always enables this so report it active. |
950 | */ |
951 | param_data[3] |= 0x01; |
952 | /* |
953 | * Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37. |
954 | */ |
955 | param_data[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ |
956 | param_data[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */ |
957 | param_data[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */ |
958 | param_data[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */ |
959 | param_data[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ |
960 | param_data[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ |
961 | } |
962 | |
963 | static sense_reason_t iblock_pr_read_keys(struct se_cmd *cmd, |
964 | unsigned char *param_data) |
965 | { |
966 | struct se_device *dev = cmd->se_dev; |
967 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
968 | struct block_device *bdev = ib_dev->ibd_bd; |
969 | const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; |
970 | int i, len, paths, data_offset; |
971 | struct pr_keys *keys; |
972 | sense_reason_t ret; |
973 | |
974 | if (!ops) { |
975 | pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n" ); |
976 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
977 | } |
978 | |
979 | if (!ops->pr_read_keys) { |
980 | pr_err("Block device does not support read_keys.\n" ); |
981 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
982 | } |
983 | |
984 | /* |
985 | * We don't know what's under us, but dm-multipath will register every |
986 | * path with the same key, so start off with enough space for 16 paths. |
987 | * which is not a lot of memory and should normally be enough. |
988 | */ |
989 | paths = 16; |
990 | retry: |
991 | len = 8 * paths; |
992 | keys = kzalloc(size: sizeof(*keys) + len, GFP_KERNEL); |
993 | if (!keys) |
994 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
995 | |
996 | keys->num_keys = paths; |
997 | if (!ops->pr_read_keys(bdev, keys)) { |
998 | if (keys->num_keys > paths) { |
999 | kfree(objp: keys); |
1000 | paths *= 2; |
1001 | goto retry; |
1002 | } |
1003 | } else { |
1004 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1005 | goto free_keys; |
1006 | } |
1007 | |
1008 | ret = TCM_NO_SENSE; |
1009 | |
1010 | put_unaligned_be32(val: keys->generation, p: ¶m_data[0]); |
1011 | if (!keys->num_keys) { |
1012 | put_unaligned_be32(val: 0, p: ¶m_data[4]); |
1013 | goto free_keys; |
1014 | } |
1015 | |
1016 | put_unaligned_be32(val: 8 * keys->num_keys, p: ¶m_data[4]); |
1017 | |
1018 | data_offset = 8; |
1019 | for (i = 0; i < keys->num_keys; i++) { |
1020 | if (data_offset + 8 > cmd->data_length) |
1021 | break; |
1022 | |
1023 | put_unaligned_be64(val: keys->keys[i], p: ¶m_data[data_offset]); |
1024 | data_offset += 8; |
1025 | } |
1026 | |
1027 | free_keys: |
1028 | kfree(objp: keys); |
1029 | return ret; |
1030 | } |
1031 | |
1032 | static sense_reason_t iblock_pr_read_reservation(struct se_cmd *cmd, |
1033 | unsigned char *param_data) |
1034 | { |
1035 | struct se_device *dev = cmd->se_dev; |
1036 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
1037 | struct block_device *bdev = ib_dev->ibd_bd; |
1038 | const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; |
1039 | struct pr_held_reservation rsv = { }; |
1040 | |
1041 | if (!ops) { |
1042 | pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n" ); |
1043 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
1044 | } |
1045 | |
1046 | if (!ops->pr_read_reservation) { |
1047 | pr_err("Block device does not support read_keys.\n" ); |
1048 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
1049 | } |
1050 | |
1051 | if (ops->pr_read_reservation(bdev, &rsv)) |
1052 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1053 | |
1054 | put_unaligned_be32(val: rsv.generation, p: ¶m_data[0]); |
1055 | if (!block_pr_type_to_scsi(type: rsv.type)) { |
1056 | put_unaligned_be32(val: 0, p: ¶m_data[4]); |
1057 | return TCM_NO_SENSE; |
1058 | } |
1059 | |
1060 | put_unaligned_be32(val: 16, p: ¶m_data[4]); |
1061 | |
1062 | if (cmd->data_length < 16) |
1063 | return TCM_NO_SENSE; |
1064 | put_unaligned_be64(val: rsv.key, p: ¶m_data[8]); |
1065 | |
1066 | if (cmd->data_length < 22) |
1067 | return TCM_NO_SENSE; |
1068 | param_data[21] = block_pr_type_to_scsi(type: rsv.type); |
1069 | |
1070 | return TCM_NO_SENSE; |
1071 | } |
1072 | |
1073 | static sense_reason_t iblock_execute_pr_in(struct se_cmd *cmd, u8 sa, |
1074 | unsigned char *param_data) |
1075 | { |
1076 | sense_reason_t ret = TCM_NO_SENSE; |
1077 | |
1078 | switch (sa) { |
1079 | case PRI_REPORT_CAPABILITIES: |
1080 | iblock_pr_report_caps(param_data); |
1081 | break; |
1082 | case PRI_READ_KEYS: |
1083 | ret = iblock_pr_read_keys(cmd, param_data); |
1084 | break; |
1085 | case PRI_READ_RESERVATION: |
1086 | ret = iblock_pr_read_reservation(cmd, param_data); |
1087 | break; |
1088 | default: |
1089 | pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n" , sa); |
1090 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
1091 | } |
1092 | |
1093 | return ret; |
1094 | } |
1095 | |
1096 | static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) |
1097 | { |
1098 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
1099 | struct block_device *bd = ib_dev->ibd_bd; |
1100 | int ret; |
1101 | |
1102 | ret = bdev_alignment_offset(bdev: bd); |
1103 | if (ret == -1) |
1104 | return 0; |
1105 | |
1106 | /* convert offset-bytes to offset-lbas */ |
1107 | return ret / bdev_logical_block_size(bdev: bd); |
1108 | } |
1109 | |
1110 | static unsigned int iblock_get_lbppbe(struct se_device *dev) |
1111 | { |
1112 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
1113 | struct block_device *bd = ib_dev->ibd_bd; |
1114 | unsigned int logs_per_phys = |
1115 | bdev_physical_block_size(bdev: bd) / bdev_logical_block_size(bdev: bd); |
1116 | |
1117 | return ilog2(logs_per_phys); |
1118 | } |
1119 | |
1120 | static unsigned int iblock_get_io_min(struct se_device *dev) |
1121 | { |
1122 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
1123 | struct block_device *bd = ib_dev->ibd_bd; |
1124 | |
1125 | return bdev_io_min(bdev: bd); |
1126 | } |
1127 | |
1128 | static unsigned int iblock_get_io_opt(struct se_device *dev) |
1129 | { |
1130 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
1131 | struct block_device *bd = ib_dev->ibd_bd; |
1132 | |
1133 | return bdev_io_opt(bdev: bd); |
1134 | } |
1135 | |
1136 | static struct exec_cmd_ops iblock_exec_cmd_ops = { |
1137 | .execute_rw = iblock_execute_rw, |
1138 | .execute_sync_cache = iblock_execute_sync_cache, |
1139 | .execute_write_same = iblock_execute_write_same, |
1140 | .execute_unmap = iblock_execute_unmap, |
1141 | .execute_pr_out = iblock_execute_pr_out, |
1142 | .execute_pr_in = iblock_execute_pr_in, |
1143 | }; |
1144 | |
1145 | static sense_reason_t |
1146 | iblock_parse_cdb(struct se_cmd *cmd) |
1147 | { |
1148 | return sbc_parse_cdb(cmd, ops: &iblock_exec_cmd_ops); |
1149 | } |
1150 | |
1151 | static bool iblock_get_write_cache(struct se_device *dev) |
1152 | { |
1153 | return bdev_write_cache(bdev: IBLOCK_DEV(dev)->ibd_bd); |
1154 | } |
1155 | |
1156 | static const struct target_backend_ops iblock_ops = { |
1157 | .name = "iblock" , |
1158 | .inquiry_prod = "IBLOCK" , |
1159 | .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR, |
1160 | .inquiry_rev = IBLOCK_VERSION, |
1161 | .owner = THIS_MODULE, |
1162 | .attach_hba = iblock_attach_hba, |
1163 | .detach_hba = iblock_detach_hba, |
1164 | .alloc_device = iblock_alloc_device, |
1165 | .configure_device = iblock_configure_device, |
1166 | .destroy_device = iblock_destroy_device, |
1167 | .free_device = iblock_free_device, |
1168 | .configure_unmap = iblock_configure_unmap, |
1169 | .plug_device = iblock_plug_device, |
1170 | .unplug_device = iblock_unplug_device, |
1171 | .parse_cdb = iblock_parse_cdb, |
1172 | .set_configfs_dev_params = iblock_set_configfs_dev_params, |
1173 | .show_configfs_dev_params = iblock_show_configfs_dev_params, |
1174 | .get_device_type = sbc_get_device_type, |
1175 | .get_blocks = iblock_get_blocks, |
1176 | .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas, |
1177 | .get_lbppbe = iblock_get_lbppbe, |
1178 | .get_io_min = iblock_get_io_min, |
1179 | .get_io_opt = iblock_get_io_opt, |
1180 | .get_write_cache = iblock_get_write_cache, |
1181 | .tb_dev_attrib_attrs = sbc_attrib_attrs, |
1182 | }; |
1183 | |
1184 | static int __init iblock_module_init(void) |
1185 | { |
1186 | return transport_backend_register(&iblock_ops); |
1187 | } |
1188 | |
1189 | static void __exit iblock_module_exit(void) |
1190 | { |
1191 | target_backend_unregister(&iblock_ops); |
1192 | } |
1193 | |
1194 | MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin" ); |
1195 | MODULE_AUTHOR("nab@Linux-iSCSI.org" ); |
1196 | MODULE_LICENSE("GPL" ); |
1197 | |
1198 | module_init(iblock_module_init); |
1199 | module_exit(iblock_module_exit); |
1200 | |