1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /******************************************************************************* |
3 | * Filename: target_core_device.c (based on iscsi_target_device.c) |
4 | * |
5 | * This file contains the TCM Virtual Device and Disk Transport |
6 | * agnostic related functions. |
7 | * |
8 | * (c) Copyright 2003-2013 Datera, Inc. |
9 | * |
10 | * Nicholas A. Bellinger <nab@kernel.org> |
11 | * |
12 | ******************************************************************************/ |
13 | |
14 | #include <linux/net.h> |
15 | #include <linux/string.h> |
16 | #include <linux/delay.h> |
17 | #include <linux/timer.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/spinlock.h> |
20 | #include <linux/kthread.h> |
21 | #include <linux/in.h> |
22 | #include <linux/export.h> |
23 | #include <linux/t10-pi.h> |
24 | #include <asm/unaligned.h> |
25 | #include <net/sock.h> |
26 | #include <net/tcp.h> |
27 | #include <scsi/scsi_common.h> |
28 | #include <scsi/scsi_proto.h> |
29 | |
30 | #include <target/target_core_base.h> |
31 | #include <target/target_core_backend.h> |
32 | #include <target/target_core_fabric.h> |
33 | |
34 | #include "target_core_internal.h" |
35 | #include "target_core_alua.h" |
36 | #include "target_core_pr.h" |
37 | #include "target_core_ua.h" |
38 | |
39 | static DEFINE_MUTEX(device_mutex); |
40 | static LIST_HEAD(device_list); |
41 | static DEFINE_IDR(devices_idr); |
42 | |
43 | static struct se_hba *lun0_hba; |
44 | /* not static, needed by tpg.c */ |
45 | struct se_device *g_lun0_dev; |
46 | |
47 | sense_reason_t |
48 | transport_lookup_cmd_lun(struct se_cmd *se_cmd) |
49 | { |
50 | struct se_lun *se_lun = NULL; |
51 | struct se_session *se_sess = se_cmd->se_sess; |
52 | struct se_node_acl *nacl = se_sess->se_node_acl; |
53 | struct se_dev_entry *deve; |
54 | sense_reason_t ret = TCM_NO_SENSE; |
55 | |
56 | rcu_read_lock(); |
57 | deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); |
58 | if (deve) { |
59 | atomic_long_inc(v: &deve->total_cmds); |
60 | |
61 | if (se_cmd->data_direction == DMA_TO_DEVICE) |
62 | atomic_long_add(i: se_cmd->data_length, |
63 | v: &deve->write_bytes); |
64 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) |
65 | atomic_long_add(i: se_cmd->data_length, |
66 | v: &deve->read_bytes); |
67 | |
68 | if ((se_cmd->data_direction == DMA_TO_DEVICE) && |
69 | deve->lun_access_ro) { |
70 | pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" |
71 | " Access for 0x%08llx\n" , |
72 | se_cmd->se_tfo->fabric_name, |
73 | se_cmd->orig_fe_lun); |
74 | rcu_read_unlock(); |
75 | return TCM_WRITE_PROTECTED; |
76 | } |
77 | |
78 | se_lun = deve->se_lun; |
79 | |
80 | if (!percpu_ref_tryget_live(ref: &se_lun->lun_ref)) { |
81 | se_lun = NULL; |
82 | goto out_unlock; |
83 | } |
84 | |
85 | se_cmd->se_lun = se_lun; |
86 | se_cmd->pr_res_key = deve->pr_res_key; |
87 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
88 | se_cmd->lun_ref_active = true; |
89 | } |
90 | out_unlock: |
91 | rcu_read_unlock(); |
92 | |
93 | if (!se_lun) { |
94 | /* |
95 | * Use the se_portal_group->tpg_virt_lun0 to allow for |
96 | * REPORT_LUNS, et al to be returned when no active |
97 | * MappedLUN=0 exists for this Initiator Port. |
98 | */ |
99 | if (se_cmd->orig_fe_lun != 0) { |
100 | pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
101 | " Access for 0x%08llx from %s\n" , |
102 | se_cmd->se_tfo->fabric_name, |
103 | se_cmd->orig_fe_lun, |
104 | nacl->initiatorname); |
105 | return TCM_NON_EXISTENT_LUN; |
106 | } |
107 | |
108 | /* |
109 | * Force WRITE PROTECT for virtual LUN 0 |
110 | */ |
111 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && |
112 | (se_cmd->data_direction != DMA_NONE)) |
113 | return TCM_WRITE_PROTECTED; |
114 | |
115 | se_lun = se_sess->se_tpg->tpg_virt_lun0; |
116 | if (!percpu_ref_tryget_live(ref: &se_lun->lun_ref)) |
117 | return TCM_NON_EXISTENT_LUN; |
118 | |
119 | se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; |
120 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
121 | se_cmd->lun_ref_active = true; |
122 | } |
123 | /* |
124 | * RCU reference protected by percpu se_lun->lun_ref taken above that |
125 | * must drop to zero (including initial reference) before this se_lun |
126 | * pointer can be kfree_rcu() by the final se_lun->lun_group put via |
127 | * target_core_fabric_configfs.c:target_fabric_port_release |
128 | */ |
129 | se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); |
130 | atomic_long_inc(v: &se_cmd->se_dev->num_cmds); |
131 | |
132 | if (se_cmd->data_direction == DMA_TO_DEVICE) |
133 | atomic_long_add(i: se_cmd->data_length, |
134 | v: &se_cmd->se_dev->write_bytes); |
135 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) |
136 | atomic_long_add(i: se_cmd->data_length, |
137 | v: &se_cmd->se_dev->read_bytes); |
138 | |
139 | return ret; |
140 | } |
141 | EXPORT_SYMBOL(transport_lookup_cmd_lun); |
142 | |
143 | int transport_lookup_tmr_lun(struct se_cmd *se_cmd) |
144 | { |
145 | struct se_dev_entry *deve; |
146 | struct se_lun *se_lun = NULL; |
147 | struct se_session *se_sess = se_cmd->se_sess; |
148 | struct se_node_acl *nacl = se_sess->se_node_acl; |
149 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; |
150 | unsigned long flags; |
151 | |
152 | rcu_read_lock(); |
153 | deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); |
154 | if (deve) { |
155 | se_lun = deve->se_lun; |
156 | |
157 | if (!percpu_ref_tryget_live(ref: &se_lun->lun_ref)) { |
158 | se_lun = NULL; |
159 | goto out_unlock; |
160 | } |
161 | |
162 | se_cmd->se_lun = se_lun; |
163 | se_cmd->pr_res_key = deve->pr_res_key; |
164 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
165 | se_cmd->lun_ref_active = true; |
166 | } |
167 | out_unlock: |
168 | rcu_read_unlock(); |
169 | |
170 | if (!se_lun) { |
171 | pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
172 | " Access for 0x%08llx for %s\n" , |
173 | se_cmd->se_tfo->fabric_name, |
174 | se_cmd->orig_fe_lun, |
175 | nacl->initiatorname); |
176 | return -ENODEV; |
177 | } |
178 | se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); |
179 | se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); |
180 | |
181 | spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); |
182 | list_add_tail(new: &se_tmr->tmr_list, head: &se_tmr->tmr_dev->dev_tmr_list); |
183 | spin_unlock_irqrestore(lock: &se_tmr->tmr_dev->se_tmr_lock, flags); |
184 | |
185 | return 0; |
186 | } |
187 | EXPORT_SYMBOL(transport_lookup_tmr_lun); |
188 | |
189 | bool target_lun_is_rdonly(struct se_cmd *cmd) |
190 | { |
191 | struct se_session *se_sess = cmd->se_sess; |
192 | struct se_dev_entry *deve; |
193 | bool ret; |
194 | |
195 | rcu_read_lock(); |
196 | deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); |
197 | ret = deve && deve->lun_access_ro; |
198 | rcu_read_unlock(); |
199 | |
200 | return ret; |
201 | } |
202 | EXPORT_SYMBOL(target_lun_is_rdonly); |
203 | |
204 | /* |
205 | * This function is called from core_scsi3_emulate_pro_register_and_move() |
206 | * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref |
207 | * when a matching rtpi is found. |
208 | */ |
209 | struct se_dev_entry *core_get_se_deve_from_rtpi( |
210 | struct se_node_acl *nacl, |
211 | u16 rtpi) |
212 | { |
213 | struct se_dev_entry *deve; |
214 | struct se_lun *lun; |
215 | struct se_portal_group *tpg = nacl->se_tpg; |
216 | |
217 | rcu_read_lock(); |
218 | hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { |
219 | lun = deve->se_lun; |
220 | if (!lun) { |
221 | pr_err("%s device entries device pointer is" |
222 | " NULL, but Initiator has access.\n" , |
223 | tpg->se_tpg_tfo->fabric_name); |
224 | continue; |
225 | } |
226 | if (lun->lun_tpg->tpg_rtpi != rtpi) |
227 | continue; |
228 | |
229 | kref_get(kref: &deve->pr_kref); |
230 | rcu_read_unlock(); |
231 | |
232 | return deve; |
233 | } |
234 | rcu_read_unlock(); |
235 | |
236 | return NULL; |
237 | } |
238 | |
239 | void core_free_device_list_for_node( |
240 | struct se_node_acl *nacl, |
241 | struct se_portal_group *tpg) |
242 | { |
243 | struct se_dev_entry *deve; |
244 | |
245 | mutex_lock(&nacl->lun_entry_mutex); |
246 | hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) |
247 | core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg); |
248 | mutex_unlock(lock: &nacl->lun_entry_mutex); |
249 | } |
250 | |
251 | void core_update_device_list_access( |
252 | u64 mapped_lun, |
253 | bool lun_access_ro, |
254 | struct se_node_acl *nacl) |
255 | { |
256 | struct se_dev_entry *deve; |
257 | |
258 | mutex_lock(&nacl->lun_entry_mutex); |
259 | deve = target_nacl_find_deve(nacl, mapped_lun); |
260 | if (deve) |
261 | deve->lun_access_ro = lun_access_ro; |
262 | mutex_unlock(lock: &nacl->lun_entry_mutex); |
263 | } |
264 | |
265 | /* |
266 | * Called with rcu_read_lock or nacl->device_list_lock held. |
267 | */ |
268 | struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) |
269 | { |
270 | struct se_dev_entry *deve; |
271 | |
272 | hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) |
273 | if (deve->mapped_lun == mapped_lun) |
274 | return deve; |
275 | |
276 | return NULL; |
277 | } |
278 | EXPORT_SYMBOL(target_nacl_find_deve); |
279 | |
280 | void target_pr_kref_release(struct kref *kref) |
281 | { |
282 | struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, |
283 | pr_kref); |
284 | complete(&deve->pr_comp); |
285 | } |
286 | |
287 | /* |
288 | * Establish UA condition on SCSI device - all LUNs |
289 | */ |
290 | void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq) |
291 | { |
292 | struct se_dev_entry *se_deve; |
293 | struct se_lun *lun; |
294 | |
295 | spin_lock(lock: &dev->se_port_lock); |
296 | list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) { |
297 | |
298 | spin_lock(lock: &lun->lun_deve_lock); |
299 | list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) |
300 | core_scsi3_ua_allocate(se_deve, asc, ascq); |
301 | spin_unlock(lock: &lun->lun_deve_lock); |
302 | } |
303 | spin_unlock(lock: &dev->se_port_lock); |
304 | } |
305 | |
306 | static void |
307 | target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, |
308 | bool skip_new) |
309 | { |
310 | struct se_dev_entry *tmp; |
311 | |
312 | rcu_read_lock(); |
313 | hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { |
314 | if (skip_new && tmp == new) |
315 | continue; |
316 | core_scsi3_ua_allocate(tmp, 0x3F, |
317 | ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); |
318 | } |
319 | rcu_read_unlock(); |
320 | } |
321 | |
322 | int core_enable_device_list_for_node( |
323 | struct se_lun *lun, |
324 | struct se_lun_acl *lun_acl, |
325 | u64 mapped_lun, |
326 | bool lun_access_ro, |
327 | struct se_node_acl *nacl, |
328 | struct se_portal_group *tpg) |
329 | { |
330 | struct se_dev_entry *orig, *new; |
331 | |
332 | new = kzalloc(size: sizeof(*new), GFP_KERNEL); |
333 | if (!new) { |
334 | pr_err("Unable to allocate se_dev_entry memory\n" ); |
335 | return -ENOMEM; |
336 | } |
337 | |
338 | spin_lock_init(&new->ua_lock); |
339 | INIT_LIST_HEAD(list: &new->ua_list); |
340 | INIT_LIST_HEAD(list: &new->lun_link); |
341 | |
342 | new->mapped_lun = mapped_lun; |
343 | kref_init(kref: &new->pr_kref); |
344 | init_completion(x: &new->pr_comp); |
345 | |
346 | new->lun_access_ro = lun_access_ro; |
347 | new->creation_time = get_jiffies_64(); |
348 | new->attach_count++; |
349 | |
350 | mutex_lock(&nacl->lun_entry_mutex); |
351 | orig = target_nacl_find_deve(nacl, mapped_lun); |
352 | if (orig && orig->se_lun) { |
353 | struct se_lun *orig_lun = orig->se_lun; |
354 | |
355 | if (orig_lun != lun) { |
356 | pr_err("Existing orig->se_lun doesn't match new lun" |
357 | " for dynamic -> explicit NodeACL conversion:" |
358 | " %s\n" , nacl->initiatorname); |
359 | mutex_unlock(lock: &nacl->lun_entry_mutex); |
360 | kfree(objp: new); |
361 | return -EINVAL; |
362 | } |
363 | if (orig->se_lun_acl != NULL) { |
364 | pr_warn_ratelimited("Detected existing explicit" |
365 | " se_lun_acl->se_lun_group reference for %s" |
366 | " mapped_lun: %llu, failing\n" , |
367 | nacl->initiatorname, mapped_lun); |
368 | mutex_unlock(lock: &nacl->lun_entry_mutex); |
369 | kfree(objp: new); |
370 | return -EINVAL; |
371 | } |
372 | |
373 | new->se_lun = lun; |
374 | new->se_lun_acl = lun_acl; |
375 | hlist_del_rcu(n: &orig->link); |
376 | hlist_add_head_rcu(n: &new->link, h: &nacl->lun_entry_hlist); |
377 | mutex_unlock(lock: &nacl->lun_entry_mutex); |
378 | |
379 | spin_lock(lock: &lun->lun_deve_lock); |
380 | list_del(entry: &orig->lun_link); |
381 | list_add_tail(new: &new->lun_link, head: &lun->lun_deve_list); |
382 | spin_unlock(lock: &lun->lun_deve_lock); |
383 | |
384 | kref_put(kref: &orig->pr_kref, release: target_pr_kref_release); |
385 | wait_for_completion(&orig->pr_comp); |
386 | |
387 | target_luns_data_has_changed(nacl, new, skip_new: true); |
388 | kfree_rcu(orig, rcu_head); |
389 | return 0; |
390 | } |
391 | |
392 | new->se_lun = lun; |
393 | new->se_lun_acl = lun_acl; |
394 | hlist_add_head_rcu(n: &new->link, h: &nacl->lun_entry_hlist); |
395 | mutex_unlock(lock: &nacl->lun_entry_mutex); |
396 | |
397 | spin_lock(lock: &lun->lun_deve_lock); |
398 | list_add_tail(new: &new->lun_link, head: &lun->lun_deve_list); |
399 | spin_unlock(lock: &lun->lun_deve_lock); |
400 | |
401 | target_luns_data_has_changed(nacl, new, skip_new: true); |
402 | return 0; |
403 | } |
404 | |
405 | void core_disable_device_list_for_node( |
406 | struct se_lun *lun, |
407 | struct se_dev_entry *orig, |
408 | struct se_node_acl *nacl, |
409 | struct se_portal_group *tpg) |
410 | { |
411 | /* |
412 | * rcu_dereference_raw protected by se_lun->lun_group symlink |
413 | * reference to se_device->dev_group. |
414 | */ |
415 | struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); |
416 | |
417 | lockdep_assert_held(&nacl->lun_entry_mutex); |
418 | |
419 | /* |
420 | * If the MappedLUN entry is being disabled, the entry in |
421 | * lun->lun_deve_list must be removed now before clearing the |
422 | * struct se_dev_entry pointers below as logic in |
423 | * core_alua_do_transition_tg_pt() depends on these being present. |
424 | * |
425 | * deve->se_lun_acl will be NULL for demo-mode created LUNs |
426 | * that have not been explicitly converted to MappedLUNs -> |
427 | * struct se_lun_acl, but we remove deve->lun_link from |
428 | * lun->lun_deve_list. This also means that active UAs and |
429 | * NodeACL context specific PR metadata for demo-mode |
430 | * MappedLUN *deve will be released below.. |
431 | */ |
432 | spin_lock(lock: &lun->lun_deve_lock); |
433 | list_del(entry: &orig->lun_link); |
434 | spin_unlock(lock: &lun->lun_deve_lock); |
435 | /* |
436 | * Disable struct se_dev_entry LUN ACL mapping |
437 | */ |
438 | core_scsi3_ua_release_all(orig); |
439 | |
440 | hlist_del_rcu(n: &orig->link); |
441 | clear_bit(DEF_PR_REG_ACTIVE, addr: &orig->deve_flags); |
442 | orig->lun_access_ro = false; |
443 | orig->creation_time = 0; |
444 | orig->attach_count--; |
445 | /* |
446 | * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 |
447 | * or REGISTER_AND_MOVE PR operation to complete. |
448 | */ |
449 | kref_put(kref: &orig->pr_kref, release: target_pr_kref_release); |
450 | wait_for_completion(&orig->pr_comp); |
451 | |
452 | kfree_rcu(orig, rcu_head); |
453 | |
454 | core_scsi3_free_pr_reg_from_nacl(dev, nacl); |
455 | target_luns_data_has_changed(nacl, NULL, skip_new: false); |
456 | } |
457 | |
458 | /* core_clear_lun_from_tpg(): |
459 | * |
460 | * |
461 | */ |
462 | void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) |
463 | { |
464 | struct se_node_acl *nacl; |
465 | struct se_dev_entry *deve; |
466 | |
467 | mutex_lock(&tpg->acl_node_mutex); |
468 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { |
469 | |
470 | mutex_lock(&nacl->lun_entry_mutex); |
471 | hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { |
472 | if (lun != deve->se_lun) |
473 | continue; |
474 | |
475 | core_disable_device_list_for_node(lun, orig: deve, nacl, tpg); |
476 | } |
477 | mutex_unlock(lock: &nacl->lun_entry_mutex); |
478 | } |
479 | mutex_unlock(lock: &tpg->acl_node_mutex); |
480 | } |
481 | |
482 | static void se_release_vpd_for_dev(struct se_device *dev) |
483 | { |
484 | struct t10_vpd *vpd, *vpd_tmp; |
485 | |
486 | spin_lock(lock: &dev->t10_wwn.t10_vpd_lock); |
487 | list_for_each_entry_safe(vpd, vpd_tmp, |
488 | &dev->t10_wwn.t10_vpd_list, vpd_list) { |
489 | list_del(entry: &vpd->vpd_list); |
490 | kfree(objp: vpd); |
491 | } |
492 | spin_unlock(lock: &dev->t10_wwn.t10_vpd_lock); |
493 | } |
494 | |
495 | static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) |
496 | { |
497 | u32 aligned_max_sectors; |
498 | u32 alignment; |
499 | /* |
500 | * Limit max_sectors to a PAGE_SIZE aligned value for modern |
501 | * transport_allocate_data_tasks() operation. |
502 | */ |
503 | alignment = max(1ul, PAGE_SIZE / block_size); |
504 | aligned_max_sectors = rounddown(max_sectors, alignment); |
505 | |
506 | if (max_sectors != aligned_max_sectors) |
507 | pr_info("Rounding down aligned max_sectors from %u to %u\n" , |
508 | max_sectors, aligned_max_sectors); |
509 | |
510 | return aligned_max_sectors; |
511 | } |
512 | |
513 | int core_dev_add_lun( |
514 | struct se_portal_group *tpg, |
515 | struct se_device *dev, |
516 | struct se_lun *lun) |
517 | { |
518 | int rc; |
519 | |
520 | rc = core_tpg_add_lun(tpg, lun, false, dev); |
521 | if (rc < 0) |
522 | return rc; |
523 | |
524 | pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" |
525 | " CORE HBA: %u\n" , tpg->se_tpg_tfo->fabric_name, |
526 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, |
527 | tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id); |
528 | /* |
529 | * Update LUN maps for dynamically added initiators when |
530 | * generate_node_acl is enabled. |
531 | */ |
532 | if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { |
533 | struct se_node_acl *acl; |
534 | |
535 | mutex_lock(&tpg->acl_node_mutex); |
536 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
537 | if (acl->dynamic_node_acl && |
538 | (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || |
539 | !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { |
540 | core_tpg_add_node_to_devs(acl, tpg, lun); |
541 | } |
542 | } |
543 | mutex_unlock(lock: &tpg->acl_node_mutex); |
544 | } |
545 | |
546 | return 0; |
547 | } |
548 | |
549 | /* core_dev_del_lun(): |
550 | * |
551 | * |
552 | */ |
553 | void core_dev_del_lun( |
554 | struct se_portal_group *tpg, |
555 | struct se_lun *lun) |
556 | { |
557 | pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" |
558 | " device object\n" , tpg->se_tpg_tfo->fabric_name, |
559 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, |
560 | tpg->se_tpg_tfo->fabric_name); |
561 | |
562 | core_tpg_remove_lun(tpg, lun); |
563 | } |
564 | |
565 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl( |
566 | struct se_portal_group *tpg, |
567 | struct se_node_acl *nacl, |
568 | u64 mapped_lun, |
569 | int *ret) |
570 | { |
571 | struct se_lun_acl *lacl; |
572 | |
573 | if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { |
574 | pr_err("%s InitiatorName exceeds maximum size.\n" , |
575 | tpg->se_tpg_tfo->fabric_name); |
576 | *ret = -EOVERFLOW; |
577 | return NULL; |
578 | } |
579 | lacl = kzalloc(size: sizeof(struct se_lun_acl), GFP_KERNEL); |
580 | if (!lacl) { |
581 | pr_err("Unable to allocate memory for struct se_lun_acl.\n" ); |
582 | *ret = -ENOMEM; |
583 | return NULL; |
584 | } |
585 | |
586 | lacl->mapped_lun = mapped_lun; |
587 | lacl->se_lun_nacl = nacl; |
588 | |
589 | return lacl; |
590 | } |
591 | |
592 | int core_dev_add_initiator_node_lun_acl( |
593 | struct se_portal_group *tpg, |
594 | struct se_lun_acl *lacl, |
595 | struct se_lun *lun, |
596 | bool lun_access_ro) |
597 | { |
598 | struct se_node_acl *nacl = lacl->se_lun_nacl; |
599 | /* |
600 | * rcu_dereference_raw protected by se_lun->lun_group symlink |
601 | * reference to se_device->dev_group. |
602 | */ |
603 | struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); |
604 | |
605 | if (!nacl) |
606 | return -EINVAL; |
607 | |
608 | if (lun->lun_access_ro) |
609 | lun_access_ro = true; |
610 | |
611 | lacl->se_lun = lun; |
612 | |
613 | if (core_enable_device_list_for_node(lun, lun_acl: lacl, mapped_lun: lacl->mapped_lun, |
614 | lun_access_ro, nacl, tpg) < 0) |
615 | return -EINVAL; |
616 | |
617 | pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " |
618 | " InitiatorNode: %s\n" , tpg->se_tpg_tfo->fabric_name, |
619 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, |
620 | lun_access_ro ? "RO" : "RW" , |
621 | nacl->initiatorname); |
622 | /* |
623 | * Check to see if there are any existing persistent reservation APTPL |
624 | * pre-registrations that need to be enabled for this LUN ACL.. |
625 | */ |
626 | core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, |
627 | lacl->mapped_lun); |
628 | return 0; |
629 | } |
630 | |
631 | int core_dev_del_initiator_node_lun_acl( |
632 | struct se_lun *lun, |
633 | struct se_lun_acl *lacl) |
634 | { |
635 | struct se_portal_group *tpg = lun->lun_tpg; |
636 | struct se_node_acl *nacl; |
637 | struct se_dev_entry *deve; |
638 | |
639 | nacl = lacl->se_lun_nacl; |
640 | if (!nacl) |
641 | return -EINVAL; |
642 | |
643 | mutex_lock(&nacl->lun_entry_mutex); |
644 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
645 | if (deve) |
646 | core_disable_device_list_for_node(lun, orig: deve, nacl, tpg); |
647 | mutex_unlock(lock: &nacl->lun_entry_mutex); |
648 | |
649 | pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" |
650 | " InitiatorNode: %s Mapped LUN: %llu\n" , |
651 | tpg->se_tpg_tfo->fabric_name, |
652 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, |
653 | nacl->initiatorname, lacl->mapped_lun); |
654 | |
655 | return 0; |
656 | } |
657 | |
658 | void core_dev_free_initiator_node_lun_acl( |
659 | struct se_portal_group *tpg, |
660 | struct se_lun_acl *lacl) |
661 | { |
662 | pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" |
663 | " Mapped LUN: %llu\n" , tpg->se_tpg_tfo->fabric_name, |
664 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
665 | tpg->se_tpg_tfo->fabric_name, |
666 | lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); |
667 | |
668 | kfree(objp: lacl); |
669 | } |
670 | |
671 | static void scsi_dump_inquiry(struct se_device *dev) |
672 | { |
673 | struct t10_wwn *wwn = &dev->t10_wwn; |
674 | int device_type = dev->transport->get_device_type(dev); |
675 | |
676 | /* |
677 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer |
678 | */ |
679 | pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n" , |
680 | wwn->vendor); |
681 | pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n" , |
682 | wwn->model); |
683 | pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n" , |
684 | wwn->revision); |
685 | pr_debug(" Type: %s " , scsi_device_type(device_type)); |
686 | } |
687 | |
688 | struct se_device *target_alloc_device(struct se_hba *hba, const char *name) |
689 | { |
690 | struct se_device *dev; |
691 | struct se_lun *xcopy_lun; |
692 | int i; |
693 | |
694 | dev = hba->backend->ops->alloc_device(hba, name); |
695 | if (!dev) |
696 | return NULL; |
697 | |
698 | dev->queues = kcalloc(n: nr_cpu_ids, size: sizeof(*dev->queues), GFP_KERNEL); |
699 | if (!dev->queues) { |
700 | dev->transport->free_device(dev); |
701 | return NULL; |
702 | } |
703 | |
704 | dev->queue_cnt = nr_cpu_ids; |
705 | for (i = 0; i < dev->queue_cnt; i++) { |
706 | struct se_device_queue *q; |
707 | |
708 | q = &dev->queues[i]; |
709 | INIT_LIST_HEAD(list: &q->state_list); |
710 | spin_lock_init(&q->lock); |
711 | |
712 | init_llist_head(list: &q->sq.cmd_list); |
713 | INIT_WORK(&q->sq.work, target_queued_submit_work); |
714 | } |
715 | |
716 | dev->se_hba = hba; |
717 | dev->transport = hba->backend->ops; |
718 | dev->transport_flags = dev->transport->transport_flags_default; |
719 | dev->prot_length = sizeof(struct t10_pi_tuple); |
720 | dev->hba_index = hba->hba_index; |
721 | |
722 | INIT_LIST_HEAD(list: &dev->dev_sep_list); |
723 | INIT_LIST_HEAD(list: &dev->dev_tmr_list); |
724 | INIT_LIST_HEAD(list: &dev->delayed_cmd_list); |
725 | INIT_LIST_HEAD(list: &dev->qf_cmd_list); |
726 | spin_lock_init(&dev->delayed_cmd_lock); |
727 | spin_lock_init(&dev->dev_reservation_lock); |
728 | spin_lock_init(&dev->se_port_lock); |
729 | spin_lock_init(&dev->se_tmr_lock); |
730 | spin_lock_init(&dev->qf_cmd_lock); |
731 | sema_init(sem: &dev->caw_sem, val: 1); |
732 | INIT_LIST_HEAD(list: &dev->t10_wwn.t10_vpd_list); |
733 | spin_lock_init(&dev->t10_wwn.t10_vpd_lock); |
734 | INIT_LIST_HEAD(list: &dev->t10_pr.registration_list); |
735 | INIT_LIST_HEAD(list: &dev->t10_pr.aptpl_reg_list); |
736 | spin_lock_init(&dev->t10_pr.registration_lock); |
737 | spin_lock_init(&dev->t10_pr.aptpl_reg_lock); |
738 | INIT_LIST_HEAD(list: &dev->t10_alua.tg_pt_gps_list); |
739 | spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); |
740 | INIT_LIST_HEAD(list: &dev->t10_alua.lba_map_list); |
741 | spin_lock_init(&dev->t10_alua.lba_map_lock); |
742 | |
743 | INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); |
744 | mutex_init(&dev->lun_reset_mutex); |
745 | |
746 | dev->t10_wwn.t10_dev = dev; |
747 | /* |
748 | * Use OpenFabrics IEEE Company ID: 00 14 05 |
749 | */ |
750 | dev->t10_wwn.company_id = 0x001405; |
751 | |
752 | dev->t10_alua.t10_dev = dev; |
753 | |
754 | dev->dev_attrib.da_dev = dev; |
755 | dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; |
756 | dev->dev_attrib.emulate_dpo = 1; |
757 | dev->dev_attrib.emulate_fua_write = 1; |
758 | dev->dev_attrib.emulate_fua_read = 1; |
759 | dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; |
760 | dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR; |
761 | dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; |
762 | dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; |
763 | dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; |
764 | dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; |
765 | dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; |
766 | dev->dev_attrib.emulate_pr = DA_EMULATE_PR; |
767 | dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC; |
768 | dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; |
769 | dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; |
770 | dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; |
771 | dev->dev_attrib.is_nonrot = DA_IS_NONROT; |
772 | dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; |
773 | dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; |
774 | dev->dev_attrib.max_unmap_block_desc_count = |
775 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; |
776 | dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; |
777 | dev->dev_attrib.unmap_granularity_alignment = |
778 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; |
779 | dev->dev_attrib.unmap_zeroes_data = |
780 | DA_UNMAP_ZEROES_DATA_DEFAULT; |
781 | dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; |
782 | dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT; |
783 | |
784 | xcopy_lun = &dev->xcopy_lun; |
785 | rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); |
786 | init_completion(x: &xcopy_lun->lun_shutdown_comp); |
787 | INIT_LIST_HEAD(list: &xcopy_lun->lun_deve_list); |
788 | INIT_LIST_HEAD(list: &xcopy_lun->lun_dev_link); |
789 | mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); |
790 | xcopy_lun->lun_tpg = &xcopy_pt_tpg; |
791 | |
792 | /* Preload the default INQUIRY const values */ |
793 | strscpy(p: dev->t10_wwn.vendor, q: "LIO-ORG" , size: sizeof(dev->t10_wwn.vendor)); |
794 | strscpy(p: dev->t10_wwn.model, q: dev->transport->inquiry_prod, |
795 | size: sizeof(dev->t10_wwn.model)); |
796 | strscpy(p: dev->t10_wwn.revision, q: dev->transport->inquiry_rev, |
797 | size: sizeof(dev->t10_wwn.revision)); |
798 | |
799 | return dev; |
800 | } |
801 | |
802 | /* |
803 | * Check if the underlying struct block_device supports discard and if yes |
804 | * configure the UNMAP parameters. |
805 | */ |
806 | bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, |
807 | struct block_device *bdev) |
808 | { |
809 | int block_size = bdev_logical_block_size(bdev); |
810 | |
811 | if (!bdev_max_discard_sectors(bdev)) |
812 | return false; |
813 | |
814 | attrib->max_unmap_lba_count = |
815 | bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9); |
816 | /* |
817 | * Currently hardcoded to 1 in Linux/SCSI code.. |
818 | */ |
819 | attrib->max_unmap_block_desc_count = 1; |
820 | attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size; |
821 | attrib->unmap_granularity_alignment = |
822 | bdev_discard_alignment(bdev) / block_size; |
823 | return true; |
824 | } |
825 | EXPORT_SYMBOL(target_configure_unmap_from_queue); |
826 | |
827 | /* |
828 | * Convert from blocksize advertised to the initiator to the 512 byte |
829 | * units unconditionally used by the Linux block layer. |
830 | */ |
831 | sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) |
832 | { |
833 | switch (dev->dev_attrib.block_size) { |
834 | case 4096: |
835 | return lb << 3; |
836 | case 2048: |
837 | return lb << 2; |
838 | case 1024: |
839 | return lb << 1; |
840 | default: |
841 | return lb; |
842 | } |
843 | } |
844 | EXPORT_SYMBOL(target_to_linux_sector); |
845 | |
846 | struct devices_idr_iter { |
847 | int (*fn)(struct se_device *dev, void *data); |
848 | void *data; |
849 | }; |
850 | |
851 | static int target_devices_idr_iter(int id, void *p, void *data) |
852 | __must_hold(&device_mutex) |
853 | { |
854 | struct devices_idr_iter *iter = data; |
855 | struct se_device *dev = p; |
856 | struct config_item *item; |
857 | int ret; |
858 | |
859 | /* |
860 | * We add the device early to the idr, so it can be used |
861 | * by backend modules during configuration. We do not want |
862 | * to allow other callers to access partially setup devices, |
863 | * so we skip them here. |
864 | */ |
865 | if (!target_dev_configured(se_dev: dev)) |
866 | return 0; |
867 | |
868 | item = config_item_get_unless_zero(&dev->dev_group.cg_item); |
869 | if (!item) |
870 | return 0; |
871 | mutex_unlock(lock: &device_mutex); |
872 | |
873 | ret = iter->fn(dev, iter->data); |
874 | config_item_put(item); |
875 | |
876 | mutex_lock(&device_mutex); |
877 | return ret; |
878 | } |
879 | |
880 | /** |
881 | * target_for_each_device - iterate over configured devices |
882 | * @fn: iterator function |
883 | * @data: pointer to data that will be passed to fn |
884 | * |
885 | * fn must return 0 to continue looping over devices. non-zero will break |
886 | * from the loop and return that value to the caller. |
887 | */ |
888 | int target_for_each_device(int (*fn)(struct se_device *dev, void *data), |
889 | void *data) |
890 | { |
891 | struct devices_idr_iter iter = { .fn = fn, .data = data }; |
892 | int ret; |
893 | |
894 | mutex_lock(&device_mutex); |
895 | ret = idr_for_each(&devices_idr, fn: target_devices_idr_iter, data: &iter); |
896 | mutex_unlock(lock: &device_mutex); |
897 | return ret; |
898 | } |
899 | |
900 | int target_configure_device(struct se_device *dev) |
901 | { |
902 | struct se_hba *hba = dev->se_hba; |
903 | int ret, id; |
904 | |
905 | if (target_dev_configured(se_dev: dev)) { |
906 | pr_err("se_dev->se_dev_ptr already set for storage" |
907 | " object\n" ); |
908 | return -EEXIST; |
909 | } |
910 | |
911 | /* |
912 | * Add early so modules like tcmu can use during its |
913 | * configuration. |
914 | */ |
915 | mutex_lock(&device_mutex); |
916 | /* |
917 | * Use cyclic to try and avoid collisions with devices |
918 | * that were recently removed. |
919 | */ |
920 | id = idr_alloc_cyclic(&devices_idr, ptr: dev, start: 0, INT_MAX, GFP_KERNEL); |
921 | mutex_unlock(lock: &device_mutex); |
922 | if (id < 0) { |
923 | ret = -ENOMEM; |
924 | goto out; |
925 | } |
926 | dev->dev_index = id; |
927 | |
928 | ret = dev->transport->configure_device(dev); |
929 | if (ret) |
930 | goto out_free_index; |
931 | |
932 | if (dev->transport->configure_unmap && |
933 | dev->transport->configure_unmap(dev)) { |
934 | pr_debug("Discard support available, but disabled by default.\n" ); |
935 | } |
936 | |
937 | /* |
938 | * XXX: there is not much point to have two different values here.. |
939 | */ |
940 | dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; |
941 | dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; |
942 | |
943 | /* |
944 | * Align max_hw_sectors down to PAGE_SIZE I/O transfers |
945 | */ |
946 | dev->dev_attrib.hw_max_sectors = |
947 | se_dev_align_max_sectors(max_sectors: dev->dev_attrib.hw_max_sectors, |
948 | block_size: dev->dev_attrib.hw_block_size); |
949 | dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; |
950 | |
951 | dev->creation_time = get_jiffies_64(); |
952 | |
953 | ret = core_setup_alua(dev); |
954 | if (ret) |
955 | goto out_destroy_device; |
956 | |
957 | /* |
958 | * Setup work_queue for QUEUE_FULL |
959 | */ |
960 | INIT_WORK(&dev->qf_work_queue, target_qf_do_work); |
961 | |
962 | scsi_dump_inquiry(dev); |
963 | |
964 | spin_lock(lock: &hba->device_lock); |
965 | hba->dev_count++; |
966 | spin_unlock(lock: &hba->device_lock); |
967 | |
968 | dev->dev_flags |= DF_CONFIGURED; |
969 | |
970 | return 0; |
971 | |
972 | out_destroy_device: |
973 | dev->transport->destroy_device(dev); |
974 | out_free_index: |
975 | mutex_lock(&device_mutex); |
976 | idr_remove(&devices_idr, id: dev->dev_index); |
977 | mutex_unlock(lock: &device_mutex); |
978 | out: |
979 | se_release_vpd_for_dev(dev); |
980 | return ret; |
981 | } |
982 | |
983 | void target_free_device(struct se_device *dev) |
984 | { |
985 | struct se_hba *hba = dev->se_hba; |
986 | |
987 | WARN_ON(!list_empty(&dev->dev_sep_list)); |
988 | |
989 | if (target_dev_configured(se_dev: dev)) { |
990 | dev->transport->destroy_device(dev); |
991 | |
992 | mutex_lock(&device_mutex); |
993 | idr_remove(&devices_idr, id: dev->dev_index); |
994 | mutex_unlock(lock: &device_mutex); |
995 | |
996 | spin_lock(lock: &hba->device_lock); |
997 | hba->dev_count--; |
998 | spin_unlock(lock: &hba->device_lock); |
999 | } |
1000 | |
1001 | core_alua_free_lu_gp_mem(dev); |
1002 | core_alua_set_lba_map(dev, NULL, 0, 0); |
1003 | core_scsi3_free_all_registrations(dev); |
1004 | se_release_vpd_for_dev(dev); |
1005 | |
1006 | if (dev->transport->free_prot) |
1007 | dev->transport->free_prot(dev); |
1008 | |
1009 | kfree(objp: dev->queues); |
1010 | dev->transport->free_device(dev); |
1011 | } |
1012 | |
1013 | int core_dev_setup_virtual_lun0(void) |
1014 | { |
1015 | struct se_hba *hba; |
1016 | struct se_device *dev; |
1017 | char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1" ; |
1018 | int ret; |
1019 | |
1020 | hba = core_alloc_hba("rd_mcp" , 0, HBA_FLAGS_INTERNAL_USE); |
1021 | if (IS_ERR(ptr: hba)) |
1022 | return PTR_ERR(ptr: hba); |
1023 | |
1024 | dev = target_alloc_device(hba, name: "virt_lun0" ); |
1025 | if (!dev) { |
1026 | ret = -ENOMEM; |
1027 | goto out_free_hba; |
1028 | } |
1029 | |
1030 | hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); |
1031 | |
1032 | ret = target_configure_device(dev); |
1033 | if (ret) |
1034 | goto out_free_se_dev; |
1035 | |
1036 | lun0_hba = hba; |
1037 | g_lun0_dev = dev; |
1038 | return 0; |
1039 | |
1040 | out_free_se_dev: |
1041 | target_free_device(dev); |
1042 | out_free_hba: |
1043 | core_delete_hba(hba); |
1044 | return ret; |
1045 | } |
1046 | |
1047 | |
1048 | void core_dev_release_virtual_lun0(void) |
1049 | { |
1050 | struct se_hba *hba = lun0_hba; |
1051 | |
1052 | if (!hba) |
1053 | return; |
1054 | |
1055 | if (g_lun0_dev) |
1056 | target_free_device(dev: g_lun0_dev); |
1057 | core_delete_hba(hba); |
1058 | } |
1059 | |
1060 | /* |
1061 | * Common CDB parsing for kernel and user passthrough. |
1062 | */ |
1063 | sense_reason_t |
1064 | passthrough_parse_cdb(struct se_cmd *cmd, |
1065 | sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) |
1066 | { |
1067 | unsigned char *cdb = cmd->t_task_cdb; |
1068 | struct se_device *dev = cmd->se_dev; |
1069 | unsigned int size; |
1070 | |
1071 | /* |
1072 | * For REPORT LUNS we always need to emulate the response, for everything |
1073 | * else, pass it up. |
1074 | */ |
1075 | if (cdb[0] == REPORT_LUNS) { |
1076 | cmd->execute_cmd = spc_emulate_report_luns; |
1077 | return TCM_NO_SENSE; |
1078 | } |
1079 | |
1080 | /* |
1081 | * With emulate_pr disabled, all reservation requests should fail, |
1082 | * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set. |
1083 | */ |
1084 | if (!dev->dev_attrib.emulate_pr && |
1085 | ((cdb[0] == PERSISTENT_RESERVE_IN) || |
1086 | (cdb[0] == PERSISTENT_RESERVE_OUT) || |
1087 | (cdb[0] == RELEASE || cdb[0] == RELEASE_10) || |
1088 | (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) { |
1089 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
1090 | } |
1091 | |
1092 | /* |
1093 | * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to |
1094 | * emulate the response, since tcmu does not have the information |
1095 | * required to process these commands. |
1096 | */ |
1097 | if (!(dev->transport_flags & |
1098 | TRANSPORT_FLAG_PASSTHROUGH_PGR)) { |
1099 | if (cdb[0] == PERSISTENT_RESERVE_IN) { |
1100 | cmd->execute_cmd = target_scsi3_emulate_pr_in; |
1101 | size = get_unaligned_be16(p: &cdb[7]); |
1102 | return target_cmd_size_check(cmd, size); |
1103 | } |
1104 | if (cdb[0] == PERSISTENT_RESERVE_OUT) { |
1105 | cmd->execute_cmd = target_scsi3_emulate_pr_out; |
1106 | size = get_unaligned_be32(p: &cdb[5]); |
1107 | return target_cmd_size_check(cmd, size); |
1108 | } |
1109 | |
1110 | if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { |
1111 | cmd->execute_cmd = target_scsi2_reservation_release; |
1112 | if (cdb[0] == RELEASE_10) |
1113 | size = get_unaligned_be16(p: &cdb[7]); |
1114 | else |
1115 | size = cmd->data_length; |
1116 | return target_cmd_size_check(cmd, size); |
1117 | } |
1118 | if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { |
1119 | cmd->execute_cmd = target_scsi2_reservation_reserve; |
1120 | if (cdb[0] == RESERVE_10) |
1121 | size = get_unaligned_be16(p: &cdb[7]); |
1122 | else |
1123 | size = cmd->data_length; |
1124 | return target_cmd_size_check(cmd, size); |
1125 | } |
1126 | } |
1127 | |
1128 | /* Set DATA_CDB flag for ops that should have it */ |
1129 | switch (cdb[0]) { |
1130 | case READ_6: |
1131 | case READ_10: |
1132 | case READ_12: |
1133 | case READ_16: |
1134 | case WRITE_6: |
1135 | case WRITE_10: |
1136 | case WRITE_12: |
1137 | case WRITE_16: |
1138 | case WRITE_VERIFY: |
1139 | case WRITE_VERIFY_12: |
1140 | case WRITE_VERIFY_16: |
1141 | case COMPARE_AND_WRITE: |
1142 | case XDWRITEREAD_10: |
1143 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; |
1144 | break; |
1145 | case VARIABLE_LENGTH_CMD: |
1146 | switch (get_unaligned_be16(p: &cdb[8])) { |
1147 | case READ_32: |
1148 | case WRITE_32: |
1149 | case WRITE_VERIFY_32: |
1150 | case XDWRITEREAD_32: |
1151 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; |
1152 | break; |
1153 | } |
1154 | } |
1155 | |
1156 | cmd->execute_cmd = exec_cmd; |
1157 | |
1158 | return TCM_NO_SENSE; |
1159 | } |
1160 | EXPORT_SYMBOL(passthrough_parse_cdb); |
1161 | |