1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /******************************************************************************* |
3 | * This file contains tcm implementation using v4 configfs fabric infrastructure |
4 | * for QLogic target mode HBAs |
5 | * |
6 | * (c) Copyright 2010-2013 Datera, Inc. |
7 | * |
8 | * Author: Nicholas A. Bellinger <nab@daterainc.com> |
9 | * |
10 | * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from |
11 | * the TCM_FC / Open-FCoE.org fabric module. |
12 | * |
13 | * Copyright (c) 2010 Cisco Systems, Inc |
14 | * |
15 | ****************************************************************************/ |
16 | |
17 | |
18 | #include <linux/module.h> |
19 | #include <linux/utsname.h> |
20 | #include <linux/vmalloc.h> |
21 | #include <linux/list.h> |
22 | #include <linux/slab.h> |
23 | #include <linux/types.h> |
24 | #include <linux/string.h> |
25 | #include <linux/configfs.h> |
26 | #include <linux/ctype.h> |
27 | #include <asm/unaligned.h> |
28 | #include <scsi/scsi_host.h> |
29 | #include <target/target_core_base.h> |
30 | #include <target/target_core_fabric.h> |
31 | |
32 | #include "qla_def.h" |
33 | #include "qla_target.h" |
34 | #include "tcm_qla2xxx.h" |
35 | |
36 | static struct workqueue_struct *tcm_qla2xxx_free_wq; |
37 | |
38 | /* |
39 | * Parse WWN. |
40 | * If strict, we require lower-case hex and colon separators to be sure |
41 | * the name is the same as what would be generated by ft_format_wwn() |
42 | * so the name and wwn are mapped one-to-one. |
43 | */ |
44 | static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict) |
45 | { |
46 | const char *cp; |
47 | char c; |
48 | u32 nibble; |
49 | u32 byte = 0; |
50 | u32 pos = 0; |
51 | u32 err; |
52 | |
53 | *wwn = 0; |
54 | for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) { |
55 | c = *cp; |
56 | if (c == '\n' && cp[1] == '\0') |
57 | continue; |
58 | if (strict && pos++ == 2 && byte++ < 7) { |
59 | pos = 0; |
60 | if (c == ':') |
61 | continue; |
62 | err = 1; |
63 | goto fail; |
64 | } |
65 | if (c == '\0') { |
66 | err = 2; |
67 | if (strict && byte != 8) |
68 | goto fail; |
69 | return cp - name; |
70 | } |
71 | err = 3; |
72 | if (isdigit(c)) |
73 | nibble = c - '0'; |
74 | else if (isxdigit(c) && (islower(c) || !strict)) |
75 | nibble = tolower(c) - 'a' + 10; |
76 | else |
77 | goto fail; |
78 | *wwn = (*wwn << 4) | nibble; |
79 | } |
80 | err = 4; |
81 | fail: |
82 | pr_debug("err %u len %zu pos %u byte %u\n" , |
83 | err, cp - name, pos, byte); |
84 | return -1; |
85 | } |
86 | |
87 | static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn) |
88 | { |
89 | u8 b[8]; |
90 | |
91 | put_unaligned_be64(val: wwn, p: b); |
92 | return snprintf(buf, size: len, |
93 | fmt: "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x" , |
94 | b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); |
95 | } |
96 | |
97 | /* |
98 | * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn |
99 | */ |
100 | static int (const char *ns, u64 *nm) |
101 | { |
102 | unsigned int i, j; |
103 | u8 wwn[8]; |
104 | |
105 | memset(wwn, 0, sizeof(wwn)); |
106 | |
107 | /* Validate and store the new name */ |
108 | for (i = 0, j = 0; i < 16; i++) { |
109 | int value; |
110 | |
111 | value = hex_to_bin(ch: *ns++); |
112 | if (value >= 0) |
113 | j = (j << 4) | value; |
114 | else |
115 | return -EINVAL; |
116 | |
117 | if (i % 2) { |
118 | wwn[i/2] = j & 0xff; |
119 | j = 0; |
120 | } |
121 | } |
122 | |
123 | *nm = wwn_to_u64(wwn); |
124 | return 0; |
125 | } |
126 | |
127 | /* |
128 | * This parsing logic follows drivers/scsi/scsi_transport_fc.c: |
129 | * store_fc_host_vport_create() |
130 | */ |
131 | static int tcm_qla2xxx_npiv_parse_wwn( |
132 | const char *name, |
133 | size_t count, |
134 | u64 *wwpn, |
135 | u64 *wwnn) |
136 | { |
137 | unsigned int cnt = count; |
138 | int rc; |
139 | |
140 | *wwpn = 0; |
141 | *wwnn = 0; |
142 | |
143 | /* count may include a LF at end of string */ |
144 | if (name[cnt-1] == '\n' || name[cnt-1] == 0) |
145 | cnt--; |
146 | |
147 | /* validate we have enough characters for WWPN */ |
148 | if ((cnt != (16+1+16)) || (name[16] != ':')) |
149 | return -EINVAL; |
150 | |
151 | rc = tcm_qla2xxx_npiv_extract_wwn(ns: &name[0], nm: wwpn); |
152 | if (rc != 0) |
153 | return rc; |
154 | |
155 | rc = tcm_qla2xxx_npiv_extract_wwn(ns: &name[17], nm: wwnn); |
156 | if (rc != 0) |
157 | return rc; |
158 | |
159 | return 0; |
160 | } |
161 | |
162 | static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) |
163 | { |
164 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
165 | struct tcm_qla2xxx_tpg, se_tpg); |
166 | struct tcm_qla2xxx_lport *lport = tpg->lport; |
167 | |
168 | return lport->lport_naa_name; |
169 | } |
170 | |
171 | static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) |
172 | { |
173 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
174 | struct tcm_qla2xxx_tpg, se_tpg); |
175 | return tpg->lport_tpgt; |
176 | } |
177 | |
178 | static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) |
179 | { |
180 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
181 | struct tcm_qla2xxx_tpg, se_tpg); |
182 | |
183 | return tpg->tpg_attrib.generate_node_acls; |
184 | } |
185 | |
186 | static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) |
187 | { |
188 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
189 | struct tcm_qla2xxx_tpg, se_tpg); |
190 | |
191 | return tpg->tpg_attrib.cache_dynamic_acls; |
192 | } |
193 | |
194 | static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) |
195 | { |
196 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
197 | struct tcm_qla2xxx_tpg, se_tpg); |
198 | |
199 | return tpg->tpg_attrib.demo_mode_write_protect; |
200 | } |
201 | |
202 | static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) |
203 | { |
204 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
205 | struct tcm_qla2xxx_tpg, se_tpg); |
206 | |
207 | return tpg->tpg_attrib.prod_mode_write_protect; |
208 | } |
209 | |
210 | static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) |
211 | { |
212 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
213 | struct tcm_qla2xxx_tpg, se_tpg); |
214 | |
215 | return tpg->tpg_attrib.demo_mode_login_only; |
216 | } |
217 | |
218 | static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg) |
219 | { |
220 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
221 | struct tcm_qla2xxx_tpg, se_tpg); |
222 | |
223 | return tpg->tpg_attrib.fabric_prot_type; |
224 | } |
225 | |
226 | static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg) |
227 | { |
228 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
229 | struct tcm_qla2xxx_tpg, se_tpg); |
230 | |
231 | return tpg->lport_tpgt; |
232 | } |
233 | |
234 | static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) |
235 | { |
236 | struct qla_tgt_mgmt_cmd *mcmd = container_of(work, |
237 | struct qla_tgt_mgmt_cmd, free_work); |
238 | |
239 | transport_generic_free_cmd(&mcmd->se_cmd, 0); |
240 | } |
241 | |
242 | /* |
243 | * Called from qla_target_template->free_mcmd(), and will call |
244 | * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops |
245 | * release callback. qla_hw_data->hardware_lock is expected to be held |
246 | */ |
247 | static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) |
248 | { |
249 | if (!mcmd) |
250 | return; |
251 | INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd); |
252 | queue_work(wq: tcm_qla2xxx_free_wq, work: &mcmd->free_work); |
253 | } |
254 | |
255 | static void tcm_qla2xxx_complete_free(struct work_struct *work) |
256 | { |
257 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); |
258 | unsigned long flags; |
259 | |
260 | cmd->cmd_in_wq = 0; |
261 | |
262 | WARN_ON(cmd->trc_flags & TRC_CMD_FREE); |
263 | |
264 | /* To do: protect all tgt_counters manipulations with proper locking. */ |
265 | cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++; |
266 | cmd->trc_flags |= TRC_CMD_FREE; |
267 | cmd->cmd_sent_to_fw = 0; |
268 | |
269 | spin_lock_irqsave(&cmd->sess->sess_cmd_lock, flags); |
270 | list_del_init(entry: &cmd->sess_cmd_list); |
271 | spin_unlock_irqrestore(lock: &cmd->sess->sess_cmd_lock, flags); |
272 | |
273 | transport_generic_free_cmd(&cmd->se_cmd, 0); |
274 | } |
275 | |
276 | static struct qla_tgt_cmd *tcm_qla2xxx_get_cmd(struct fc_port *sess) |
277 | { |
278 | struct se_session *se_sess = sess->se_sess; |
279 | struct qla_tgt_cmd *cmd; |
280 | int tag, cpu; |
281 | |
282 | tag = sbitmap_queue_get(sbq: &se_sess->sess_tag_pool, cpu: &cpu); |
283 | if (tag < 0) |
284 | return NULL; |
285 | |
286 | cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; |
287 | memset(cmd, 0, sizeof(struct qla_tgt_cmd)); |
288 | cmd->se_cmd.map_tag = tag; |
289 | cmd->se_cmd.map_cpu = cpu; |
290 | |
291 | return cmd; |
292 | } |
293 | |
294 | static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd) |
295 | { |
296 | target_free_tag(sess: cmd->sess->se_sess, cmd: &cmd->se_cmd); |
297 | } |
298 | |
299 | /* |
300 | * Called from qla_target_template->free_cmd(), and will call |
301 | * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops |
302 | * release callback. qla_hw_data->hardware_lock is expected to be held |
303 | */ |
304 | static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) |
305 | { |
306 | cmd->qpair->tgt_counters.core_qla_free_cmd++; |
307 | cmd->cmd_in_wq = 1; |
308 | |
309 | WARN_ON(cmd->trc_flags & TRC_CMD_DONE); |
310 | cmd->trc_flags |= TRC_CMD_DONE; |
311 | |
312 | INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); |
313 | queue_work(wq: tcm_qla2xxx_free_wq, work: &cmd->work); |
314 | } |
315 | |
316 | /* |
317 | * Called from struct target_core_fabric_ops->check_stop_free() context |
318 | */ |
319 | static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) |
320 | { |
321 | struct qla_tgt_cmd *cmd; |
322 | |
323 | if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) { |
324 | cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); |
325 | cmd->trc_flags |= TRC_CMD_CHK_STOP; |
326 | } |
327 | |
328 | return target_put_sess_cmd(se_cmd); |
329 | } |
330 | |
331 | /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying |
332 | * fabric descriptor @se_cmd command to release |
333 | */ |
334 | static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) |
335 | { |
336 | struct qla_tgt_cmd *cmd; |
337 | |
338 | if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { |
339 | struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, |
340 | struct qla_tgt_mgmt_cmd, se_cmd); |
341 | qlt_free_mcmd(mcmd); |
342 | return; |
343 | } |
344 | cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); |
345 | |
346 | if (WARN_ON(cmd->cmd_sent_to_fw)) |
347 | return; |
348 | |
349 | qlt_free_cmd(cmd); |
350 | } |
351 | |
352 | static void tcm_qla2xxx_release_session(struct kref *kref) |
353 | { |
354 | struct fc_port *sess = container_of(kref, |
355 | struct fc_port, sess_kref); |
356 | |
357 | qlt_unreg_sess(sess); |
358 | } |
359 | |
360 | static void tcm_qla2xxx_put_sess(struct fc_port *sess) |
361 | { |
362 | if (!sess) |
363 | return; |
364 | |
365 | kref_put(kref: &sess->sess_kref, release: tcm_qla2xxx_release_session); |
366 | } |
367 | |
368 | static void tcm_qla2xxx_close_session(struct se_session *se_sess) |
369 | { |
370 | struct fc_port *sess = se_sess->fabric_sess_ptr; |
371 | |
372 | BUG_ON(!sess); |
373 | |
374 | target_stop_session(se_sess); |
375 | |
376 | sess->explicit_logout = 1; |
377 | tcm_qla2xxx_put_sess(sess); |
378 | } |
379 | |
380 | static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) |
381 | { |
382 | struct qla_tgt_cmd *cmd = container_of(se_cmd, |
383 | struct qla_tgt_cmd, se_cmd); |
384 | |
385 | if (cmd->aborted) { |
386 | /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task |
387 | * can get ahead of this cmd. tcm_qla2xxx_aborted_task |
388 | * already kick start the free. |
389 | */ |
390 | pr_debug("write_pending aborted cmd[%p] refcount %d " |
391 | "transport_state %x, t_state %x, se_cmd_flags %x\n" , |
392 | cmd, kref_read(&cmd->se_cmd.cmd_kref), |
393 | cmd->se_cmd.transport_state, |
394 | cmd->se_cmd.t_state, |
395 | cmd->se_cmd.se_cmd_flags); |
396 | transport_generic_request_failure(&cmd->se_cmd, |
397 | TCM_CHECK_CONDITION_ABORT_CMD); |
398 | return 0; |
399 | } |
400 | cmd->trc_flags |= TRC_XFR_RDY; |
401 | cmd->bufflen = se_cmd->data_length; |
402 | cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); |
403 | |
404 | cmd->sg_cnt = se_cmd->t_data_nents; |
405 | cmd->sg = se_cmd->t_data_sg; |
406 | |
407 | cmd->prot_sg_cnt = se_cmd->t_prot_nents; |
408 | cmd->prot_sg = se_cmd->t_prot_sg; |
409 | cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; |
410 | se_cmd->pi_err = 0; |
411 | |
412 | /* |
413 | * qla_target.c:qlt_rdy_to_xfer() will call dma_map_sg() to setup |
414 | * the SGL mappings into PCIe memory for incoming FCP WRITE data. |
415 | */ |
416 | return qlt_rdy_to_xfer(cmd); |
417 | } |
418 | |
419 | static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) |
420 | { |
421 | if (!(se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { |
422 | struct qla_tgt_cmd *cmd = container_of(se_cmd, |
423 | struct qla_tgt_cmd, se_cmd); |
424 | return cmd->state; |
425 | } |
426 | |
427 | return 0; |
428 | } |
429 | |
430 | /* |
431 | * Called from process context in qla_target.c:qlt_do_work() code |
432 | */ |
433 | static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, |
434 | unsigned char *cdb, uint32_t data_length, int fcp_task_attr, |
435 | int data_dir, int bidi) |
436 | { |
437 | struct se_cmd *se_cmd = &cmd->se_cmd; |
438 | struct se_session *se_sess; |
439 | struct fc_port *sess; |
440 | #ifdef CONFIG_TCM_QLA2XXX_DEBUG |
441 | struct se_portal_group *se_tpg; |
442 | struct tcm_qla2xxx_tpg *tpg; |
443 | #endif |
444 | int rc, target_flags = TARGET_SCF_ACK_KREF; |
445 | unsigned long flags; |
446 | |
447 | if (bidi) |
448 | target_flags |= TARGET_SCF_BIDI_OP; |
449 | |
450 | if (se_cmd->cpuid != WORK_CPU_UNBOUND) |
451 | target_flags |= TARGET_SCF_USE_CPUID; |
452 | |
453 | sess = cmd->sess; |
454 | if (!sess) { |
455 | pr_err("Unable to locate struct fc_port from qla_tgt_cmd\n" ); |
456 | return -EINVAL; |
457 | } |
458 | |
459 | se_sess = sess->se_sess; |
460 | if (!se_sess) { |
461 | pr_err("Unable to locate active struct se_session\n" ); |
462 | return -EINVAL; |
463 | } |
464 | |
465 | #ifdef CONFIG_TCM_QLA2XXX_DEBUG |
466 | se_tpg = se_sess->se_tpg; |
467 | tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); |
468 | if (unlikely(tpg->tpg_attrib.jam_host)) { |
469 | /* return, and dont run target_submit_cmd,discarding command */ |
470 | return 0; |
471 | } |
472 | #endif |
473 | cmd->qpair->tgt_counters.qla_core_sbt_cmd++; |
474 | |
475 | spin_lock_irqsave(&sess->sess_cmd_lock, flags); |
476 | list_add_tail(new: &cmd->sess_cmd_list, head: &sess->sess_cmd_list); |
477 | spin_unlock_irqrestore(lock: &sess->sess_cmd_lock, flags); |
478 | |
479 | rc = target_init_cmd(se_cmd, se_sess, sense: &cmd->sense_buffer[0], |
480 | unpacked_lun: cmd->unpacked_lun, data_length, task_attr: fcp_task_attr, |
481 | data_dir, flags: target_flags); |
482 | if (rc) |
483 | return rc; |
484 | |
485 | if (target_submit_prep(se_cmd, cdb, NULL, sgl_count: 0, NULL, sgl_bidi_count: 0, NULL, sgl_prot_count: 0, |
486 | GFP_KERNEL)) |
487 | return 0; |
488 | |
489 | target_submit(se_cmd); |
490 | return 0; |
491 | } |
492 | |
493 | static void tcm_qla2xxx_handle_data_work(struct work_struct *work) |
494 | { |
495 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); |
496 | |
497 | /* |
498 | * Ensure that the complete FCP WRITE payload has been received. |
499 | * Otherwise return an exception via CHECK_CONDITION status. |
500 | */ |
501 | cmd->cmd_in_wq = 0; |
502 | cmd->cmd_sent_to_fw = 0; |
503 | if (cmd->aborted) { |
504 | transport_generic_request_failure(&cmd->se_cmd, |
505 | TCM_CHECK_CONDITION_ABORT_CMD); |
506 | return; |
507 | } |
508 | |
509 | cmd->qpair->tgt_counters.qla_core_ret_ctio++; |
510 | if (!cmd->write_data_transferred) { |
511 | switch (cmd->dif_err_code) { |
512 | case DIF_ERR_GRD: |
513 | cmd->se_cmd.pi_err = |
514 | TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; |
515 | break; |
516 | case DIF_ERR_REF: |
517 | cmd->se_cmd.pi_err = |
518 | TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; |
519 | break; |
520 | case DIF_ERR_APP: |
521 | cmd->se_cmd.pi_err = |
522 | TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; |
523 | break; |
524 | case DIF_ERR_NONE: |
525 | default: |
526 | break; |
527 | } |
528 | |
529 | if (cmd->se_cmd.pi_err) |
530 | transport_generic_request_failure(&cmd->se_cmd, |
531 | cmd->se_cmd.pi_err); |
532 | else |
533 | transport_generic_request_failure(&cmd->se_cmd, |
534 | TCM_CHECK_CONDITION_ABORT_CMD); |
535 | |
536 | return; |
537 | } |
538 | |
539 | return target_execute_cmd(cmd: &cmd->se_cmd); |
540 | } |
541 | |
542 | /* |
543 | * Called from qla_target.c:qlt_do_ctio_completion() |
544 | */ |
545 | static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) |
546 | { |
547 | cmd->trc_flags |= TRC_DATA_IN; |
548 | cmd->cmd_in_wq = 1; |
549 | INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); |
550 | queue_work(wq: tcm_qla2xxx_free_wq, work: &cmd->work); |
551 | } |
552 | |
553 | static int tcm_qla2xxx_chk_dif_tags(uint32_t tag) |
554 | { |
555 | return 0; |
556 | } |
557 | |
558 | static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd, |
559 | uint16_t *pfw_prot_opts) |
560 | { |
561 | struct se_cmd *se_cmd = &cmd->se_cmd; |
562 | |
563 | if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) |
564 | *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK; |
565 | |
566 | if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)) |
567 | *pfw_prot_opts |= PO_DIS_APP_TAG_VALD; |
568 | |
569 | return 0; |
570 | } |
571 | |
572 | /* |
573 | * Called from qla_target.c:qlt_issue_task_mgmt() |
574 | */ |
575 | static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun, |
576 | uint16_t tmr_func, uint32_t tag) |
577 | { |
578 | struct fc_port *sess = mcmd->sess; |
579 | struct se_cmd *se_cmd = &mcmd->se_cmd; |
580 | int transl_tmr_func = 0; |
581 | |
582 | switch (tmr_func) { |
583 | case QLA_TGT_ABTS: |
584 | pr_debug("%ld: ABTS received\n" , sess->vha->host_no); |
585 | transl_tmr_func = TMR_ABORT_TASK; |
586 | break; |
587 | case QLA_TGT_2G_ABORT_TASK: |
588 | pr_debug("%ld: 2G Abort Task received\n" , sess->vha->host_no); |
589 | transl_tmr_func = TMR_ABORT_TASK; |
590 | break; |
591 | case QLA_TGT_CLEAR_ACA: |
592 | pr_debug("%ld: CLEAR_ACA received\n" , sess->vha->host_no); |
593 | transl_tmr_func = TMR_CLEAR_ACA; |
594 | break; |
595 | case QLA_TGT_TARGET_RESET: |
596 | pr_debug("%ld: TARGET_RESET received\n" , sess->vha->host_no); |
597 | transl_tmr_func = TMR_TARGET_WARM_RESET; |
598 | break; |
599 | case QLA_TGT_LUN_RESET: |
600 | pr_debug("%ld: LUN_RESET received\n" , sess->vha->host_no); |
601 | transl_tmr_func = TMR_LUN_RESET; |
602 | break; |
603 | case QLA_TGT_CLEAR_TS: |
604 | pr_debug("%ld: CLEAR_TS received\n" , sess->vha->host_no); |
605 | transl_tmr_func = TMR_CLEAR_TASK_SET; |
606 | break; |
607 | case QLA_TGT_ABORT_TS: |
608 | pr_debug("%ld: ABORT_TS received\n" , sess->vha->host_no); |
609 | transl_tmr_func = TMR_ABORT_TASK_SET; |
610 | break; |
611 | default: |
612 | pr_debug("%ld: Unknown task mgmt fn 0x%x\n" , |
613 | sess->vha->host_no, tmr_func); |
614 | return -ENOSYS; |
615 | } |
616 | |
617 | return target_submit_tmr(se_cmd, se_sess: sess->se_sess, NULL, unpacked_lun: lun, fabric_tmr_ptr: mcmd, |
618 | tm_type: transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); |
619 | } |
620 | |
621 | static struct qla_tgt_cmd *tcm_qla2xxx_find_cmd_by_tag(struct fc_port *sess, |
622 | uint64_t tag) |
623 | { |
624 | struct qla_tgt_cmd *cmd; |
625 | unsigned long flags; |
626 | |
627 | if (!sess->se_sess) |
628 | return NULL; |
629 | |
630 | spin_lock_irqsave(&sess->sess_cmd_lock, flags); |
631 | list_for_each_entry(cmd, &sess->sess_cmd_list, sess_cmd_list) { |
632 | if (cmd->se_cmd.tag == tag) |
633 | goto done; |
634 | } |
635 | cmd = NULL; |
636 | done: |
637 | spin_unlock_irqrestore(lock: &sess->sess_cmd_lock, flags); |
638 | |
639 | return cmd; |
640 | } |
641 | |
642 | static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) |
643 | { |
644 | struct qla_tgt_cmd *cmd = container_of(se_cmd, |
645 | struct qla_tgt_cmd, se_cmd); |
646 | |
647 | if (cmd->aborted) { |
648 | /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task |
649 | * can get ahead of this cmd. tcm_qla2xxx_aborted_task |
650 | * already kick start the free. |
651 | */ |
652 | pr_debug("queue_data_in aborted cmd[%p] refcount %d " |
653 | "transport_state %x, t_state %x, se_cmd_flags %x\n" , |
654 | cmd, kref_read(&cmd->se_cmd.cmd_kref), |
655 | cmd->se_cmd.transport_state, |
656 | cmd->se_cmd.t_state, |
657 | cmd->se_cmd.se_cmd_flags); |
658 | return 0; |
659 | } |
660 | |
661 | cmd->trc_flags |= TRC_XMIT_DATA; |
662 | cmd->bufflen = se_cmd->data_length; |
663 | cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); |
664 | |
665 | cmd->sg_cnt = se_cmd->t_data_nents; |
666 | cmd->sg = se_cmd->t_data_sg; |
667 | cmd->offset = 0; |
668 | |
669 | cmd->prot_sg_cnt = se_cmd->t_prot_nents; |
670 | cmd->prot_sg = se_cmd->t_prot_sg; |
671 | cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; |
672 | se_cmd->pi_err = 0; |
673 | |
674 | /* |
675 | * Now queue completed DATA_IN the qla2xxx LLD and response ring |
676 | */ |
677 | return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS, |
678 | se_cmd->scsi_status); |
679 | } |
680 | |
681 | static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) |
682 | { |
683 | struct qla_tgt_cmd *cmd = container_of(se_cmd, |
684 | struct qla_tgt_cmd, se_cmd); |
685 | int xmit_type = QLA_TGT_XMIT_STATUS; |
686 | |
687 | if (cmd->aborted) { |
688 | /* |
689 | * Cmd can loop during Q-full. tcm_qla2xxx_aborted_task |
690 | * can get ahead of this cmd. tcm_qla2xxx_aborted_task |
691 | * already kick start the free. |
692 | */ |
693 | pr_debug( |
694 | "queue_data_in aborted cmd[%p] refcount %d transport_state %x, t_state %x, se_cmd_flags %x\n" , |
695 | cmd, kref_read(&cmd->se_cmd.cmd_kref), |
696 | cmd->se_cmd.transport_state, cmd->se_cmd.t_state, |
697 | cmd->se_cmd.se_cmd_flags); |
698 | return 0; |
699 | } |
700 | cmd->bufflen = se_cmd->data_length; |
701 | cmd->sg = NULL; |
702 | cmd->sg_cnt = 0; |
703 | cmd->offset = 0; |
704 | cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); |
705 | cmd->trc_flags |= TRC_XMIT_STATUS; |
706 | |
707 | if (se_cmd->data_direction == DMA_FROM_DEVICE) { |
708 | /* |
709 | * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen |
710 | * for qla_tgt_xmit_response LLD code |
711 | */ |
712 | if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { |
713 | se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; |
714 | se_cmd->residual_count = 0; |
715 | } |
716 | se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; |
717 | se_cmd->residual_count += se_cmd->data_length; |
718 | |
719 | cmd->bufflen = 0; |
720 | } |
721 | /* |
722 | * Now queue status response to qla2xxx LLD code and response ring |
723 | */ |
724 | return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); |
725 | } |
726 | |
727 | static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) |
728 | { |
729 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; |
730 | struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, |
731 | struct qla_tgt_mgmt_cmd, se_cmd); |
732 | |
733 | pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n" , |
734 | mcmd, se_tmr->function, se_tmr->response); |
735 | /* |
736 | * Do translation between TCM TM response codes and |
737 | * QLA2xxx FC TM response codes. |
738 | */ |
739 | switch (se_tmr->response) { |
740 | case TMR_FUNCTION_COMPLETE: |
741 | mcmd->fc_tm_rsp = FC_TM_SUCCESS; |
742 | break; |
743 | case TMR_TASK_DOES_NOT_EXIST: |
744 | mcmd->fc_tm_rsp = FC_TM_BAD_CMD; |
745 | break; |
746 | case TMR_FUNCTION_REJECTED: |
747 | mcmd->fc_tm_rsp = FC_TM_REJECT; |
748 | break; |
749 | case TMR_LUN_DOES_NOT_EXIST: |
750 | default: |
751 | mcmd->fc_tm_rsp = FC_TM_FAILED; |
752 | break; |
753 | } |
754 | /* |
755 | * Queue the TM response to QLA2xxx LLD to build a |
756 | * CTIO response packet. |
757 | */ |
758 | qlt_xmit_tm_rsp(mcmd); |
759 | } |
760 | |
761 | static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) |
762 | { |
763 | struct qla_tgt_cmd *cmd; |
764 | unsigned long flags; |
765 | |
766 | if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) |
767 | return; |
768 | |
769 | cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); |
770 | |
771 | spin_lock_irqsave(&cmd->sess->sess_cmd_lock, flags); |
772 | list_del_init(entry: &cmd->sess_cmd_list); |
773 | spin_unlock_irqrestore(lock: &cmd->sess->sess_cmd_lock, flags); |
774 | |
775 | qlt_abort_cmd(cmd); |
776 | } |
777 | |
778 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, |
779 | struct tcm_qla2xxx_nacl *, struct fc_port *); |
780 | /* |
781 | * Expected to be called with struct qla_hw_data->tgt.sess_lock held |
782 | */ |
783 | static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess) |
784 | { |
785 | struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; |
786 | struct se_portal_group *se_tpg = se_nacl->se_tpg; |
787 | struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; |
788 | struct tcm_qla2xxx_lport *lport = container_of(se_wwn, |
789 | struct tcm_qla2xxx_lport, lport_wwn); |
790 | struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, |
791 | struct tcm_qla2xxx_nacl, se_node_acl); |
792 | void *node; |
793 | |
794 | pr_debug("fc_rport domain: port_id 0x%06x\n" , nacl->nport_id); |
795 | |
796 | node = btree_remove32(head: &lport->lport_fcport_map, key: nacl->nport_id); |
797 | if (WARN_ON(node && (node != se_nacl))) { |
798 | /* |
799 | * The nacl no longer matches what we think it should be. |
800 | * Most likely a new dynamic acl has been added while |
801 | * someone dropped the hardware lock. It clearly is a |
802 | * bug elsewhere, but this bit can't make things worse. |
803 | */ |
804 | btree_insert32(head: &lport->lport_fcport_map, key: nacl->nport_id, |
805 | val: node, GFP_ATOMIC); |
806 | } |
807 | |
808 | pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n" , |
809 | se_nacl, nacl->nport_wwnn, nacl->nport_id); |
810 | /* |
811 | * Now clear the se_nacl and session pointers from our HW lport lookup |
812 | * table mapping for this initiator's fabric S_ID and LOOP_ID entries. |
813 | * |
814 | * This is done ahead of callbacks into tcm_qla2xxx_free_session() -> |
815 | * target_wait_for_sess_cmds() before the session waits for outstanding |
816 | * I/O to complete, to avoid a race between session shutdown execution |
817 | * and incoming ATIOs or TMRs picking up a stale se_node_act reference. |
818 | */ |
819 | tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); |
820 | } |
821 | |
822 | static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess) |
823 | { |
824 | target_stop_session(se_sess: sess->se_sess); |
825 | } |
826 | |
827 | static int tcm_qla2xxx_init_nodeacl(struct se_node_acl *se_nacl, |
828 | const char *name) |
829 | { |
830 | struct tcm_qla2xxx_nacl *nacl = |
831 | container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); |
832 | u64 wwnn; |
833 | |
834 | if (tcm_qla2xxx_parse_wwn(name, wwn: &wwnn, strict: 1) < 0) |
835 | return -EINVAL; |
836 | |
837 | nacl->nport_wwnn = wwnn; |
838 | tcm_qla2xxx_format_wwn(buf: &nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwn: wwnn); |
839 | |
840 | return 0; |
841 | } |
842 | |
843 | /* Start items for tcm_qla2xxx_tpg_attrib_cit */ |
844 | |
845 | #define DEF_QLA_TPG_ATTRIB(name) \ |
846 | \ |
847 | static ssize_t tcm_qla2xxx_tpg_attrib_##name##_show( \ |
848 | struct config_item *item, char *page) \ |
849 | { \ |
850 | struct se_portal_group *se_tpg = attrib_to_tpg(item); \ |
851 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ |
852 | struct tcm_qla2xxx_tpg, se_tpg); \ |
853 | \ |
854 | return sprintf(page, "%d\n", tpg->tpg_attrib.name); \ |
855 | } \ |
856 | \ |
857 | static ssize_t tcm_qla2xxx_tpg_attrib_##name##_store( \ |
858 | struct config_item *item, const char *page, size_t count) \ |
859 | { \ |
860 | struct se_portal_group *se_tpg = attrib_to_tpg(item); \ |
861 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ |
862 | struct tcm_qla2xxx_tpg, se_tpg); \ |
863 | struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \ |
864 | unsigned long val; \ |
865 | int ret; \ |
866 | \ |
867 | ret = kstrtoul(page, 0, &val); \ |
868 | if (ret < 0) { \ |
869 | pr_err("kstrtoul() failed with" \ |
870 | " ret: %d\n", ret); \ |
871 | return -EINVAL; \ |
872 | } \ |
873 | \ |
874 | if ((val != 0) && (val != 1)) { \ |
875 | pr_err("Illegal boolean value %lu\n", val); \ |
876 | return -EINVAL; \ |
877 | } \ |
878 | \ |
879 | a->name = val; \ |
880 | \ |
881 | return count; \ |
882 | } \ |
883 | CONFIGFS_ATTR(tcm_qla2xxx_tpg_attrib_, name) |
884 | |
885 | DEF_QLA_TPG_ATTRIB(generate_node_acls); |
886 | DEF_QLA_TPG_ATTRIB(cache_dynamic_acls); |
887 | DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); |
888 | DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); |
889 | DEF_QLA_TPG_ATTRIB(demo_mode_login_only); |
890 | #ifdef CONFIG_TCM_QLA2XXX_DEBUG |
891 | DEF_QLA_TPG_ATTRIB(jam_host); |
892 | #endif |
893 | |
894 | static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { |
895 | &tcm_qla2xxx_tpg_attrib_attr_generate_node_acls, |
896 | &tcm_qla2xxx_tpg_attrib_attr_cache_dynamic_acls, |
897 | &tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect, |
898 | &tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect, |
899 | &tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only, |
900 | #ifdef CONFIG_TCM_QLA2XXX_DEBUG |
901 | &tcm_qla2xxx_tpg_attrib_attr_jam_host, |
902 | #endif |
903 | NULL, |
904 | }; |
905 | |
906 | /* End items for tcm_qla2xxx_tpg_attrib_cit */ |
907 | |
908 | static int tcm_qla2xxx_enable_tpg(struct se_portal_group *se_tpg, |
909 | bool enable) |
910 | { |
911 | struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; |
912 | struct tcm_qla2xxx_lport *lport = container_of(se_wwn, |
913 | struct tcm_qla2xxx_lport, lport_wwn); |
914 | struct scsi_qla_host *vha = lport->qla_vha; |
915 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
916 | struct tcm_qla2xxx_tpg, se_tpg); |
917 | |
918 | if (enable) { |
919 | if (atomic_read(v: &tpg->lport_tpg_enabled)) |
920 | return -EEXIST; |
921 | |
922 | atomic_set(v: &tpg->lport_tpg_enabled, i: 1); |
923 | qlt_enable_vha(vha); |
924 | } else { |
925 | if (!atomic_read(v: &tpg->lport_tpg_enabled)) |
926 | return 0; |
927 | |
928 | atomic_set(v: &tpg->lport_tpg_enabled, i: 0); |
929 | qlt_stop_phase1(vha->vha_tgt.qla_tgt); |
930 | qlt_stop_phase2(vha->vha_tgt.qla_tgt); |
931 | } |
932 | |
933 | return 0; |
934 | } |
935 | |
936 | static ssize_t tcm_qla2xxx_tpg_dynamic_sessions_show(struct config_item *item, |
937 | char *page) |
938 | { |
939 | return target_show_dynamic_sessions(to_tpg(item), page); |
940 | } |
941 | |
942 | static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_store(struct config_item *item, |
943 | const char *page, size_t count) |
944 | { |
945 | struct se_portal_group *se_tpg = to_tpg(item); |
946 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
947 | struct tcm_qla2xxx_tpg, se_tpg); |
948 | unsigned long val; |
949 | int ret = kstrtoul(s: page, base: 0, res: &val); |
950 | |
951 | if (ret) { |
952 | pr_err("kstrtoul() returned %d for fabric_prot_type\n" , ret); |
953 | return ret; |
954 | } |
955 | if (val != 0 && val != 1 && val != 3) { |
956 | pr_err("Invalid qla2xxx fabric_prot_type: %lu\n" , val); |
957 | return -EINVAL; |
958 | } |
959 | tpg->tpg_attrib.fabric_prot_type = val; |
960 | |
961 | return count; |
962 | } |
963 | |
964 | static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item, |
965 | char *page) |
966 | { |
967 | struct se_portal_group *se_tpg = to_tpg(item); |
968 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
969 | struct tcm_qla2xxx_tpg, se_tpg); |
970 | |
971 | return sprintf(buf: page, fmt: "%d\n" , tpg->tpg_attrib.fabric_prot_type); |
972 | } |
973 | |
974 | CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); |
975 | CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); |
976 | |
977 | static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { |
978 | &tcm_qla2xxx_tpg_attr_dynamic_sessions, |
979 | &tcm_qla2xxx_tpg_attr_fabric_prot_type, |
980 | NULL, |
981 | }; |
982 | |
983 | static struct se_portal_group *tcm_qla2xxx_make_tpg(struct se_wwn *wwn, |
984 | const char *name) |
985 | { |
986 | struct tcm_qla2xxx_lport *lport = container_of(wwn, |
987 | struct tcm_qla2xxx_lport, lport_wwn); |
988 | struct tcm_qla2xxx_tpg *tpg; |
989 | unsigned long tpgt; |
990 | int ret; |
991 | |
992 | if (strstr(name, "tpgt_" ) != name) |
993 | return ERR_PTR(error: -EINVAL); |
994 | if (kstrtoul(s: name + 5, base: 10, res: &tpgt) || tpgt > USHRT_MAX) |
995 | return ERR_PTR(error: -EINVAL); |
996 | |
997 | if ((tpgt != 1)) { |
998 | pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n" ); |
999 | return ERR_PTR(error: -ENOSYS); |
1000 | } |
1001 | |
1002 | tpg = kzalloc(size: sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); |
1003 | if (!tpg) { |
1004 | pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n" ); |
1005 | return ERR_PTR(error: -ENOMEM); |
1006 | } |
1007 | tpg->lport = lport; |
1008 | tpg->lport_tpgt = tpgt; |
1009 | /* |
1010 | * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic |
1011 | * NodeACLs |
1012 | */ |
1013 | tpg->tpg_attrib.generate_node_acls = 1; |
1014 | tpg->tpg_attrib.demo_mode_write_protect = 1; |
1015 | tpg->tpg_attrib.cache_dynamic_acls = 1; |
1016 | tpg->tpg_attrib.demo_mode_login_only = 1; |
1017 | tpg->tpg_attrib.jam_host = 0; |
1018 | |
1019 | ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); |
1020 | if (ret < 0) { |
1021 | kfree(objp: tpg); |
1022 | return NULL; |
1023 | } |
1024 | |
1025 | lport->tpg_1 = tpg; |
1026 | |
1027 | return &tpg->se_tpg; |
1028 | } |
1029 | |
1030 | static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) |
1031 | { |
1032 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
1033 | struct tcm_qla2xxx_tpg, se_tpg); |
1034 | struct tcm_qla2xxx_lport *lport = tpg->lport; |
1035 | struct scsi_qla_host *vha = lport->qla_vha; |
1036 | /* |
1037 | * Call into qla2x_target.c LLD logic to shutdown the active |
1038 | * FC Nexuses and disable target mode operation for this qla_hw_data |
1039 | */ |
1040 | if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop) |
1041 | qlt_stop_phase1(vha->vha_tgt.qla_tgt); |
1042 | |
1043 | core_tpg_deregister(se_tpg); |
1044 | /* |
1045 | * Clear local TPG=1 pointer for non NPIV mode. |
1046 | */ |
1047 | lport->tpg_1 = NULL; |
1048 | kfree(objp: tpg); |
1049 | } |
1050 | |
1051 | static int tcm_qla2xxx_npiv_enable_tpg(struct se_portal_group *se_tpg, |
1052 | bool enable) |
1053 | { |
1054 | struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; |
1055 | struct tcm_qla2xxx_lport *lport = container_of(se_wwn, |
1056 | struct tcm_qla2xxx_lport, lport_wwn); |
1057 | struct scsi_qla_host *vha = lport->qla_vha; |
1058 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
1059 | struct tcm_qla2xxx_tpg, se_tpg); |
1060 | |
1061 | if (enable) { |
1062 | if (atomic_read(v: &tpg->lport_tpg_enabled)) |
1063 | return -EEXIST; |
1064 | |
1065 | atomic_set(v: &tpg->lport_tpg_enabled, i: 1); |
1066 | qlt_enable_vha(vha); |
1067 | } else { |
1068 | if (!atomic_read(v: &tpg->lport_tpg_enabled)) |
1069 | return 0; |
1070 | |
1071 | atomic_set(v: &tpg->lport_tpg_enabled, i: 0); |
1072 | qlt_stop_phase1(vha->vha_tgt.qla_tgt); |
1073 | qlt_stop_phase2(vha->vha_tgt.qla_tgt); |
1074 | } |
1075 | |
1076 | return 0; |
1077 | } |
1078 | |
1079 | static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn, |
1080 | const char *name) |
1081 | { |
1082 | struct tcm_qla2xxx_lport *lport = container_of(wwn, |
1083 | struct tcm_qla2xxx_lport, lport_wwn); |
1084 | struct tcm_qla2xxx_tpg *tpg; |
1085 | unsigned long tpgt; |
1086 | int ret; |
1087 | |
1088 | if (strstr(name, "tpgt_" ) != name) |
1089 | return ERR_PTR(error: -EINVAL); |
1090 | if (kstrtoul(s: name + 5, base: 10, res: &tpgt) || tpgt > USHRT_MAX) |
1091 | return ERR_PTR(error: -EINVAL); |
1092 | |
1093 | tpg = kzalloc(size: sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); |
1094 | if (!tpg) { |
1095 | pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n" ); |
1096 | return ERR_PTR(error: -ENOMEM); |
1097 | } |
1098 | tpg->lport = lport; |
1099 | tpg->lport_tpgt = tpgt; |
1100 | |
1101 | /* |
1102 | * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic |
1103 | * NodeACLs |
1104 | */ |
1105 | tpg->tpg_attrib.generate_node_acls = 1; |
1106 | tpg->tpg_attrib.demo_mode_write_protect = 1; |
1107 | tpg->tpg_attrib.cache_dynamic_acls = 1; |
1108 | tpg->tpg_attrib.demo_mode_login_only = 1; |
1109 | |
1110 | ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); |
1111 | if (ret < 0) { |
1112 | kfree(objp: tpg); |
1113 | return NULL; |
1114 | } |
1115 | lport->tpg_1 = tpg; |
1116 | return &tpg->se_tpg; |
1117 | } |
1118 | |
1119 | /* |
1120 | * Expected to be called with struct qla_hw_data->tgt.sess_lock held |
1121 | */ |
1122 | static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(scsi_qla_host_t *vha, |
1123 | const be_id_t s_id) |
1124 | { |
1125 | struct tcm_qla2xxx_lport *lport; |
1126 | struct se_node_acl *se_nacl; |
1127 | struct tcm_qla2xxx_nacl *nacl; |
1128 | u32 key; |
1129 | |
1130 | lport = vha->vha_tgt.target_lport_ptr; |
1131 | if (!lport) { |
1132 | pr_err("Unable to locate struct tcm_qla2xxx_lport\n" ); |
1133 | dump_stack(); |
1134 | return NULL; |
1135 | } |
1136 | |
1137 | key = sid_to_key(s_id); |
1138 | pr_debug("find_sess_by_s_id: 0x%06x\n" , key); |
1139 | |
1140 | se_nacl = btree_lookup32(head: &lport->lport_fcport_map, key); |
1141 | if (!se_nacl) { |
1142 | pr_debug("Unable to locate s_id: 0x%06x\n" , key); |
1143 | return NULL; |
1144 | } |
1145 | pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n" , |
1146 | se_nacl, se_nacl->initiatorname); |
1147 | |
1148 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); |
1149 | if (!nacl->fc_port) { |
1150 | pr_err("Unable to locate struct fc_port\n" ); |
1151 | return NULL; |
1152 | } |
1153 | |
1154 | return nacl->fc_port; |
1155 | } |
1156 | |
1157 | /* |
1158 | * Expected to be called with struct qla_hw_data->tgt.sess_lock held |
1159 | */ |
1160 | static void tcm_qla2xxx_set_sess_by_s_id( |
1161 | struct tcm_qla2xxx_lport *lport, |
1162 | struct se_node_acl *new_se_nacl, |
1163 | struct tcm_qla2xxx_nacl *nacl, |
1164 | struct se_session *se_sess, |
1165 | struct fc_port *fc_port, |
1166 | be_id_t s_id) |
1167 | { |
1168 | u32 key; |
1169 | void *slot; |
1170 | int rc; |
1171 | |
1172 | key = sid_to_key(s_id); |
1173 | pr_debug("set_sess_by_s_id: %06x\n" , key); |
1174 | |
1175 | slot = btree_lookup32(head: &lport->lport_fcport_map, key); |
1176 | if (!slot) { |
1177 | if (new_se_nacl) { |
1178 | pr_debug("Setting up new fc_port entry to new_se_nacl\n" ); |
1179 | nacl->nport_id = key; |
1180 | rc = btree_insert32(head: &lport->lport_fcport_map, key, |
1181 | val: new_se_nacl, GFP_ATOMIC); |
1182 | if (rc) |
1183 | printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n" , |
1184 | (int)key); |
1185 | } else { |
1186 | pr_debug("Wiping nonexisting fc_port entry\n" ); |
1187 | } |
1188 | |
1189 | fc_port->se_sess = se_sess; |
1190 | nacl->fc_port = fc_port; |
1191 | return; |
1192 | } |
1193 | |
1194 | if (nacl->fc_port) { |
1195 | if (new_se_nacl == NULL) { |
1196 | pr_debug("Clearing existing nacl->fc_port and fc_port entry\n" ); |
1197 | btree_remove32(head: &lport->lport_fcport_map, key); |
1198 | nacl->fc_port = NULL; |
1199 | return; |
1200 | } |
1201 | pr_debug("Replacing existing nacl->fc_port and fc_port entry\n" ); |
1202 | btree_update32(head: &lport->lport_fcport_map, key, val: new_se_nacl); |
1203 | fc_port->se_sess = se_sess; |
1204 | nacl->fc_port = fc_port; |
1205 | return; |
1206 | } |
1207 | |
1208 | if (new_se_nacl == NULL) { |
1209 | pr_debug("Clearing existing fc_port entry\n" ); |
1210 | btree_remove32(head: &lport->lport_fcport_map, key); |
1211 | return; |
1212 | } |
1213 | |
1214 | pr_debug("Replacing existing fc_port entry w/o active nacl->fc_port\n" ); |
1215 | btree_update32(head: &lport->lport_fcport_map, key, val: new_se_nacl); |
1216 | fc_port->se_sess = se_sess; |
1217 | nacl->fc_port = fc_port; |
1218 | |
1219 | pr_debug("Setup nacl->fc_port %p by s_id for se_nacl: %p, initiatorname: %s\n" , |
1220 | nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); |
1221 | } |
1222 | |
1223 | /* |
1224 | * Expected to be called with struct qla_hw_data->tgt.sess_lock held |
1225 | */ |
1226 | static struct fc_port *tcm_qla2xxx_find_sess_by_loop_id( |
1227 | scsi_qla_host_t *vha, |
1228 | const uint16_t loop_id) |
1229 | { |
1230 | struct tcm_qla2xxx_lport *lport; |
1231 | struct se_node_acl *se_nacl; |
1232 | struct tcm_qla2xxx_nacl *nacl; |
1233 | struct tcm_qla2xxx_fc_loopid *fc_loopid; |
1234 | |
1235 | lport = vha->vha_tgt.target_lport_ptr; |
1236 | if (!lport) { |
1237 | pr_err("Unable to locate struct tcm_qla2xxx_lport\n" ); |
1238 | dump_stack(); |
1239 | return NULL; |
1240 | } |
1241 | |
1242 | pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n" , loop_id); |
1243 | |
1244 | fc_loopid = lport->lport_loopid_map + loop_id; |
1245 | se_nacl = fc_loopid->se_nacl; |
1246 | if (!se_nacl) { |
1247 | pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n" , |
1248 | loop_id); |
1249 | return NULL; |
1250 | } |
1251 | |
1252 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); |
1253 | |
1254 | if (!nacl->fc_port) { |
1255 | pr_err("Unable to locate struct fc_port\n" ); |
1256 | return NULL; |
1257 | } |
1258 | |
1259 | return nacl->fc_port; |
1260 | } |
1261 | |
1262 | /* |
1263 | * Expected to be called with struct qla_hw_data->tgt.sess_lock held |
1264 | */ |
1265 | static void tcm_qla2xxx_set_sess_by_loop_id( |
1266 | struct tcm_qla2xxx_lport *lport, |
1267 | struct se_node_acl *new_se_nacl, |
1268 | struct tcm_qla2xxx_nacl *nacl, |
1269 | struct se_session *se_sess, |
1270 | struct fc_port *fc_port, |
1271 | uint16_t loop_id) |
1272 | { |
1273 | struct se_node_acl *saved_nacl; |
1274 | struct tcm_qla2xxx_fc_loopid *fc_loopid; |
1275 | |
1276 | pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n" , loop_id); |
1277 | |
1278 | fc_loopid = &((struct tcm_qla2xxx_fc_loopid *) |
1279 | lport->lport_loopid_map)[loop_id]; |
1280 | |
1281 | saved_nacl = fc_loopid->se_nacl; |
1282 | if (!saved_nacl) { |
1283 | pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n" ); |
1284 | fc_loopid->se_nacl = new_se_nacl; |
1285 | if (fc_port->se_sess != se_sess) |
1286 | fc_port->se_sess = se_sess; |
1287 | if (nacl->fc_port != fc_port) |
1288 | nacl->fc_port = fc_port; |
1289 | return; |
1290 | } |
1291 | |
1292 | if (nacl->fc_port) { |
1293 | if (new_se_nacl == NULL) { |
1294 | pr_debug("Clearing nacl->fc_port and fc_loopid->se_nacl\n" ); |
1295 | fc_loopid->se_nacl = NULL; |
1296 | nacl->fc_port = NULL; |
1297 | return; |
1298 | } |
1299 | |
1300 | pr_debug("Replacing existing nacl->fc_port and fc_loopid->se_nacl\n" ); |
1301 | fc_loopid->se_nacl = new_se_nacl; |
1302 | if (fc_port->se_sess != se_sess) |
1303 | fc_port->se_sess = se_sess; |
1304 | if (nacl->fc_port != fc_port) |
1305 | nacl->fc_port = fc_port; |
1306 | return; |
1307 | } |
1308 | |
1309 | if (new_se_nacl == NULL) { |
1310 | pr_debug("Clearing fc_loopid->se_nacl\n" ); |
1311 | fc_loopid->se_nacl = NULL; |
1312 | return; |
1313 | } |
1314 | |
1315 | pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->fc_port\n" ); |
1316 | fc_loopid->se_nacl = new_se_nacl; |
1317 | if (fc_port->se_sess != se_sess) |
1318 | fc_port->se_sess = se_sess; |
1319 | if (nacl->fc_port != fc_port) |
1320 | nacl->fc_port = fc_port; |
1321 | |
1322 | pr_debug("Setup nacl->fc_port %p by loop_id for se_nacl: %p, initiatorname: %s\n" , |
1323 | nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); |
1324 | } |
1325 | |
1326 | /* |
1327 | * Should always be called with qla_hw_data->tgt.sess_lock held. |
1328 | */ |
1329 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, |
1330 | struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess) |
1331 | { |
1332 | struct se_session *se_sess = sess->se_sess; |
1333 | |
1334 | tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, |
1335 | fc_port: sess, s_id: port_id_to_be_id(port_id: sess->d_id)); |
1336 | tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, |
1337 | fc_port: sess, loop_id: sess->loop_id); |
1338 | } |
1339 | |
1340 | static void tcm_qla2xxx_free_session(struct fc_port *sess) |
1341 | { |
1342 | struct qla_tgt *tgt = sess->tgt; |
1343 | struct qla_hw_data *ha = tgt->ha; |
1344 | scsi_qla_host_t *vha = pci_get_drvdata(pdev: ha->pdev); |
1345 | struct se_session *se_sess; |
1346 | struct tcm_qla2xxx_lport *lport; |
1347 | |
1348 | se_sess = sess->se_sess; |
1349 | if (!se_sess) { |
1350 | pr_err("struct fc_port->se_sess is NULL\n" ); |
1351 | dump_stack(); |
1352 | return; |
1353 | } |
1354 | |
1355 | lport = vha->vha_tgt.target_lport_ptr; |
1356 | if (!lport) { |
1357 | pr_err("Unable to locate struct tcm_qla2xxx_lport\n" ); |
1358 | dump_stack(); |
1359 | return; |
1360 | } |
1361 | target_wait_for_sess_cmds(se_sess); |
1362 | |
1363 | target_remove_session(se_sess); |
1364 | } |
1365 | |
1366 | static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, |
1367 | struct se_session *se_sess, void *p) |
1368 | { |
1369 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
1370 | struct tcm_qla2xxx_tpg, se_tpg); |
1371 | struct tcm_qla2xxx_lport *lport = tpg->lport; |
1372 | struct qla_hw_data *ha = lport->qla_vha->hw; |
1373 | struct se_node_acl *se_nacl = se_sess->se_node_acl; |
1374 | struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, |
1375 | struct tcm_qla2xxx_nacl, se_node_acl); |
1376 | struct fc_port *qlat_sess = p; |
1377 | uint16_t loop_id = qlat_sess->loop_id; |
1378 | unsigned long flags; |
1379 | |
1380 | /* |
1381 | * And now setup se_nacl and session pointers into HW lport internal |
1382 | * mappings for fabric S_ID and LOOP_ID. |
1383 | */ |
1384 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); |
1385 | tcm_qla2xxx_set_sess_by_s_id(lport, new_se_nacl: se_nacl, nacl, se_sess, fc_port: qlat_sess, |
1386 | s_id: port_id_to_be_id(port_id: qlat_sess->d_id)); |
1387 | tcm_qla2xxx_set_sess_by_loop_id(lport, new_se_nacl: se_nacl, nacl, |
1388 | se_sess, fc_port: qlat_sess, loop_id); |
1389 | spin_unlock_irqrestore(lock: &ha->tgt.sess_lock, flags); |
1390 | |
1391 | return 0; |
1392 | } |
1393 | |
1394 | /* |
1395 | * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl() |
1396 | * to locate struct se_node_acl |
1397 | */ |
1398 | static int tcm_qla2xxx_check_initiator_node_acl( |
1399 | scsi_qla_host_t *vha, |
1400 | unsigned char *fc_wwpn, |
1401 | struct fc_port *qlat_sess) |
1402 | { |
1403 | struct qla_hw_data *ha = vha->hw; |
1404 | struct tcm_qla2xxx_lport *lport; |
1405 | struct tcm_qla2xxx_tpg *tpg; |
1406 | struct se_session *se_sess; |
1407 | unsigned char port_name[36]; |
1408 | int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count : |
1409 | TCM_QLA2XXX_DEFAULT_TAGS; |
1410 | |
1411 | lport = vha->vha_tgt.target_lport_ptr; |
1412 | if (!lport) { |
1413 | pr_err("Unable to locate struct tcm_qla2xxx_lport\n" ); |
1414 | dump_stack(); |
1415 | return -EINVAL; |
1416 | } |
1417 | /* |
1418 | * Locate the TPG=1 reference.. |
1419 | */ |
1420 | tpg = lport->tpg_1; |
1421 | if (!tpg) { |
1422 | pr_err("Unable to locate struct tcm_qla2xxx_lport->tpg_1\n" ); |
1423 | return -EINVAL; |
1424 | } |
1425 | /* |
1426 | * Format the FCP Initiator port_name into colon seperated values to |
1427 | * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. |
1428 | */ |
1429 | memset(&port_name, 0, 36); |
1430 | snprintf(buf: port_name, size: sizeof(port_name), fmt: "%8phC" , fc_wwpn); |
1431 | /* |
1432 | * Locate our struct se_node_acl either from an explict NodeACL created |
1433 | * via ConfigFS, or via running in TPG demo mode. |
1434 | */ |
1435 | se_sess = target_setup_session(&tpg->se_tpg, num_tags, |
1436 | sizeof(struct qla_tgt_cmd), |
1437 | TARGET_PROT_ALL, port_name, |
1438 | qlat_sess, callback: tcm_qla2xxx_session_cb); |
1439 | if (IS_ERR(ptr: se_sess)) |
1440 | return PTR_ERR(ptr: se_sess); |
1441 | |
1442 | return 0; |
1443 | } |
1444 | |
1445 | static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, |
1446 | uint16_t loop_id, bool conf_compl_supported) |
1447 | { |
1448 | struct qla_tgt *tgt = sess->tgt; |
1449 | struct qla_hw_data *ha = tgt->ha; |
1450 | scsi_qla_host_t *vha = pci_get_drvdata(pdev: ha->pdev); |
1451 | struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr; |
1452 | struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; |
1453 | struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, |
1454 | struct tcm_qla2xxx_nacl, se_node_acl); |
1455 | u32 key; |
1456 | |
1457 | |
1458 | if (sess->loop_id != loop_id || sess->d_id.b24 != s_id.b24) |
1459 | pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n" , |
1460 | sess, sess->port_name, |
1461 | sess->loop_id, loop_id, sess->d_id.b.domain, |
1462 | sess->d_id.b.area, sess->d_id.b.al_pa, s_id.b.domain, |
1463 | s_id.b.area, s_id.b.al_pa); |
1464 | |
1465 | if (sess->loop_id != loop_id) { |
1466 | /* |
1467 | * Because we can shuffle loop IDs around and we |
1468 | * update different sessions non-atomically, we might |
1469 | * have overwritten this session's old loop ID |
1470 | * already, and we might end up overwriting some other |
1471 | * session that will be updated later. So we have to |
1472 | * be extra careful and we can't warn about those things... |
1473 | */ |
1474 | if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl) |
1475 | lport->lport_loopid_map[sess->loop_id].se_nacl = NULL; |
1476 | |
1477 | lport->lport_loopid_map[loop_id].se_nacl = se_nacl; |
1478 | |
1479 | sess->loop_id = loop_id; |
1480 | } |
1481 | |
1482 | if (sess->d_id.b24 != s_id.b24) { |
1483 | key = (((u32) sess->d_id.b.domain << 16) | |
1484 | ((u32) sess->d_id.b.area << 8) | |
1485 | ((u32) sess->d_id.b.al_pa)); |
1486 | |
1487 | if (btree_lookup32(head: &lport->lport_fcport_map, key)) |
1488 | WARN(btree_remove32(&lport->lport_fcport_map, key) != |
1489 | se_nacl, "Found wrong se_nacl when updating s_id %x:%x:%x\n" , |
1490 | sess->d_id.b.domain, sess->d_id.b.area, |
1491 | sess->d_id.b.al_pa); |
1492 | else |
1493 | WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n" , |
1494 | sess->d_id.b.domain, sess->d_id.b.area, |
1495 | sess->d_id.b.al_pa); |
1496 | |
1497 | key = (((u32) s_id.b.domain << 16) | |
1498 | ((u32) s_id.b.area << 8) | |
1499 | ((u32) s_id.b.al_pa)); |
1500 | |
1501 | if (btree_lookup32(head: &lport->lport_fcport_map, key)) { |
1502 | WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n" , |
1503 | s_id.b.domain, s_id.b.area, s_id.b.al_pa); |
1504 | btree_update32(head: &lport->lport_fcport_map, key, val: se_nacl); |
1505 | } else { |
1506 | btree_insert32(head: &lport->lport_fcport_map, key, val: se_nacl, |
1507 | GFP_ATOMIC); |
1508 | } |
1509 | |
1510 | sess->d_id = s_id; |
1511 | nacl->nport_id = key; |
1512 | } |
1513 | |
1514 | sess->conf_compl_supported = conf_compl_supported; |
1515 | |
1516 | } |
1517 | |
1518 | /* |
1519 | * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. |
1520 | */ |
1521 | static const struct qla_tgt_func_tmpl tcm_qla2xxx_template = { |
1522 | .find_cmd_by_tag = tcm_qla2xxx_find_cmd_by_tag, |
1523 | .handle_cmd = tcm_qla2xxx_handle_cmd, |
1524 | .handle_data = tcm_qla2xxx_handle_data, |
1525 | .handle_tmr = tcm_qla2xxx_handle_tmr, |
1526 | .get_cmd = tcm_qla2xxx_get_cmd, |
1527 | .rel_cmd = tcm_qla2xxx_rel_cmd, |
1528 | .free_cmd = tcm_qla2xxx_free_cmd, |
1529 | .free_mcmd = tcm_qla2xxx_free_mcmd, |
1530 | .free_session = tcm_qla2xxx_free_session, |
1531 | .update_sess = tcm_qla2xxx_update_sess, |
1532 | .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl, |
1533 | .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, |
1534 | .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, |
1535 | .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, |
1536 | .put_sess = tcm_qla2xxx_put_sess, |
1537 | .shutdown_sess = tcm_qla2xxx_shutdown_sess, |
1538 | .get_dif_tags = tcm_qla2xxx_dif_tags, |
1539 | .chk_dif_tags = tcm_qla2xxx_chk_dif_tags, |
1540 | }; |
1541 | |
1542 | static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) |
1543 | { |
1544 | int rc; |
1545 | size_t map_sz; |
1546 | |
1547 | rc = btree_init32(head: &lport->lport_fcport_map); |
1548 | if (rc) { |
1549 | pr_err("Unable to initialize lport->lport_fcport_map btree\n" ); |
1550 | return rc; |
1551 | } |
1552 | |
1553 | map_sz = array_size(65536, sizeof(struct tcm_qla2xxx_fc_loopid)); |
1554 | |
1555 | lport->lport_loopid_map = vzalloc(size: map_sz); |
1556 | if (!lport->lport_loopid_map) { |
1557 | pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n" , map_sz); |
1558 | btree_destroy32(head: &lport->lport_fcport_map); |
1559 | return -ENOMEM; |
1560 | } |
1561 | pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n" , map_sz); |
1562 | return 0; |
1563 | } |
1564 | |
1565 | static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha, |
1566 | void *target_lport_ptr, |
1567 | u64 npiv_wwpn, u64 npiv_wwnn) |
1568 | { |
1569 | struct qla_hw_data *ha = vha->hw; |
1570 | struct tcm_qla2xxx_lport *lport = |
1571 | (struct tcm_qla2xxx_lport *)target_lport_ptr; |
1572 | /* |
1573 | * Setup tgt_ops, local pointer to vha and target_lport_ptr |
1574 | */ |
1575 | ha->tgt.tgt_ops = &tcm_qla2xxx_template; |
1576 | vha->vha_tgt.target_lport_ptr = target_lport_ptr; |
1577 | lport->qla_vha = vha; |
1578 | |
1579 | return 0; |
1580 | } |
1581 | |
1582 | static struct se_wwn *tcm_qla2xxx_make_lport( |
1583 | struct target_fabric_configfs *tf, |
1584 | struct config_group *group, |
1585 | const char *name) |
1586 | { |
1587 | struct tcm_qla2xxx_lport *lport; |
1588 | u64 wwpn; |
1589 | int ret = -ENODEV; |
1590 | |
1591 | if (tcm_qla2xxx_parse_wwn(name, wwn: &wwpn, strict: 1) < 0) |
1592 | return ERR_PTR(error: -EINVAL); |
1593 | |
1594 | lport = kzalloc(size: sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); |
1595 | if (!lport) { |
1596 | pr_err("Unable to allocate struct tcm_qla2xxx_lport\n" ); |
1597 | return ERR_PTR(error: -ENOMEM); |
1598 | } |
1599 | lport->lport_wwpn = wwpn; |
1600 | tcm_qla2xxx_format_wwn(buf: &lport->lport_name[0], TCM_QLA2XXX_NAMELEN, |
1601 | wwn: wwpn); |
1602 | sprintf(buf: lport->lport_naa_name, fmt: "naa.%016llx" , (unsigned long long) wwpn); |
1603 | |
1604 | ret = tcm_qla2xxx_init_lport(lport); |
1605 | if (ret != 0) |
1606 | goto out; |
1607 | |
1608 | ret = qlt_lport_register(lport, wwpn, 0, 0, |
1609 | callback: tcm_qla2xxx_lport_register_cb); |
1610 | if (ret != 0) |
1611 | goto out_lport; |
1612 | |
1613 | return &lport->lport_wwn; |
1614 | out_lport: |
1615 | vfree(addr: lport->lport_loopid_map); |
1616 | btree_destroy32(head: &lport->lport_fcport_map); |
1617 | out: |
1618 | kfree(objp: lport); |
1619 | return ERR_PTR(error: ret); |
1620 | } |
1621 | |
1622 | static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) |
1623 | { |
1624 | struct tcm_qla2xxx_lport *lport = container_of(wwn, |
1625 | struct tcm_qla2xxx_lport, lport_wwn); |
1626 | struct scsi_qla_host *vha = lport->qla_vha; |
1627 | struct se_node_acl *node; |
1628 | u32 key = 0; |
1629 | |
1630 | /* |
1631 | * Call into qla2x_target.c LLD logic to complete the |
1632 | * shutdown of struct qla_tgt after the call to |
1633 | * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. |
1634 | */ |
1635 | if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped) |
1636 | qlt_stop_phase2(vha->vha_tgt.qla_tgt); |
1637 | |
1638 | qlt_lport_deregister(vha); |
1639 | |
1640 | vfree(addr: lport->lport_loopid_map); |
1641 | btree_for_each_safe32(&lport->lport_fcport_map, key, node) |
1642 | btree_remove32(head: &lport->lport_fcport_map, key); |
1643 | btree_destroy32(head: &lport->lport_fcport_map); |
1644 | kfree(objp: lport); |
1645 | } |
1646 | |
1647 | static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, |
1648 | void *target_lport_ptr, |
1649 | u64 npiv_wwpn, u64 npiv_wwnn) |
1650 | { |
1651 | struct fc_vport *vport; |
1652 | struct Scsi_Host *sh = base_vha->host; |
1653 | struct scsi_qla_host *npiv_vha; |
1654 | struct tcm_qla2xxx_lport *lport = |
1655 | (struct tcm_qla2xxx_lport *)target_lport_ptr; |
1656 | struct tcm_qla2xxx_lport *base_lport = |
1657 | (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; |
1658 | struct fc_vport_identifiers vport_id; |
1659 | |
1660 | if (qla_ini_mode_enabled(ha: base_vha)) { |
1661 | pr_err("qla2xxx base_vha not enabled for target mode\n" ); |
1662 | return -EPERM; |
1663 | } |
1664 | |
1665 | if (!base_lport || !base_lport->tpg_1 || |
1666 | !atomic_read(v: &base_lport->tpg_1->lport_tpg_enabled)) { |
1667 | pr_err("qla2xxx base_lport or tpg_1 not available\n" ); |
1668 | return -EPERM; |
1669 | } |
1670 | |
1671 | memset(&vport_id, 0, sizeof(vport_id)); |
1672 | vport_id.port_name = npiv_wwpn; |
1673 | vport_id.node_name = npiv_wwnn; |
1674 | vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; |
1675 | vport_id.vport_type = FC_PORTTYPE_NPIV; |
1676 | vport_id.disable = false; |
1677 | |
1678 | vport = fc_vport_create(shost: sh, channel: 0, &vport_id); |
1679 | if (!vport) { |
1680 | pr_err("fc_vport_create failed for qla2xxx_npiv\n" ); |
1681 | return -ENODEV; |
1682 | } |
1683 | /* |
1684 | * Setup local pointer to NPIV vhba + target_lport_ptr |
1685 | */ |
1686 | npiv_vha = (struct scsi_qla_host *)vport->dd_data; |
1687 | npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; |
1688 | lport->qla_vha = npiv_vha; |
1689 | scsi_host_get(npiv_vha->host); |
1690 | return 0; |
1691 | } |
1692 | |
1693 | |
1694 | static struct se_wwn *tcm_qla2xxx_npiv_make_lport( |
1695 | struct target_fabric_configfs *tf, |
1696 | struct config_group *group, |
1697 | const char *name) |
1698 | { |
1699 | struct tcm_qla2xxx_lport *lport; |
1700 | u64 phys_wwpn, npiv_wwpn, npiv_wwnn; |
1701 | char *p, tmp[128]; |
1702 | int ret; |
1703 | |
1704 | snprintf(buf: tmp, size: 128, fmt: "%s" , name); |
1705 | |
1706 | p = strchr(tmp, '@'); |
1707 | if (!p) { |
1708 | pr_err("Unable to locate NPIV '@' separator\n" ); |
1709 | return ERR_PTR(error: -EINVAL); |
1710 | } |
1711 | *p++ = '\0'; |
1712 | |
1713 | if (tcm_qla2xxx_parse_wwn(name: tmp, wwn: &phys_wwpn, strict: 1) < 0) |
1714 | return ERR_PTR(error: -EINVAL); |
1715 | |
1716 | if (tcm_qla2xxx_npiv_parse_wwn(name: p, strlen(p)+1, |
1717 | wwpn: &npiv_wwpn, wwnn: &npiv_wwnn) < 0) |
1718 | return ERR_PTR(error: -EINVAL); |
1719 | |
1720 | lport = kzalloc(size: sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); |
1721 | if (!lport) { |
1722 | pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n" ); |
1723 | return ERR_PTR(error: -ENOMEM); |
1724 | } |
1725 | lport->lport_npiv_wwpn = npiv_wwpn; |
1726 | lport->lport_npiv_wwnn = npiv_wwnn; |
1727 | sprintf(buf: lport->lport_naa_name, fmt: "naa.%016llx" , (unsigned long long) npiv_wwpn); |
1728 | |
1729 | ret = tcm_qla2xxx_init_lport(lport); |
1730 | if (ret != 0) |
1731 | goto out; |
1732 | |
1733 | ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn, |
1734 | callback: tcm_qla2xxx_lport_register_npiv_cb); |
1735 | if (ret != 0) |
1736 | goto out_lport; |
1737 | |
1738 | return &lport->lport_wwn; |
1739 | out_lport: |
1740 | vfree(addr: lport->lport_loopid_map); |
1741 | btree_destroy32(head: &lport->lport_fcport_map); |
1742 | out: |
1743 | kfree(objp: lport); |
1744 | return ERR_PTR(error: ret); |
1745 | } |
1746 | |
1747 | static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) |
1748 | { |
1749 | struct tcm_qla2xxx_lport *lport = container_of(wwn, |
1750 | struct tcm_qla2xxx_lport, lport_wwn); |
1751 | struct scsi_qla_host *npiv_vha = lport->qla_vha; |
1752 | struct qla_hw_data *ha = npiv_vha->hw; |
1753 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
1754 | |
1755 | scsi_host_put(t: npiv_vha->host); |
1756 | /* |
1757 | * Notify libfc that we want to release the vha->fc_vport |
1758 | */ |
1759 | fc_vport_terminate(vport: npiv_vha->fc_vport); |
1760 | scsi_host_put(t: base_vha->host); |
1761 | kfree(objp: lport); |
1762 | } |
1763 | |
1764 | |
1765 | static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item, |
1766 | char *page) |
1767 | { |
1768 | return sprintf(buf: page, |
1769 | fmt: "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on %s\n" , |
1770 | QLA2XXX_VERSION, utsname()->sysname, |
1771 | utsname()->machine, utsname()->release); |
1772 | } |
1773 | |
1774 | CONFIGFS_ATTR_RO(tcm_qla2xxx_wwn_, version); |
1775 | |
1776 | static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = { |
1777 | &tcm_qla2xxx_wwn_attr_version, |
1778 | NULL, |
1779 | }; |
1780 | |
1781 | static const struct target_core_fabric_ops tcm_qla2xxx_ops = { |
1782 | .module = THIS_MODULE, |
1783 | .fabric_name = "qla2xxx" , |
1784 | .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), |
1785 | /* |
1786 | * XXX: Limit assumes single page per scatter-gather-list entry. |
1787 | * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096 |
1788 | */ |
1789 | .max_data_sg_nents = 1200, |
1790 | .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, |
1791 | .tpg_get_tag = tcm_qla2xxx_get_tag, |
1792 | .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, |
1793 | .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, |
1794 | .tpg_check_demo_mode_write_protect = |
1795 | tcm_qla2xxx_check_demo_write_protect, |
1796 | .tpg_check_prod_mode_write_protect = |
1797 | tcm_qla2xxx_check_prod_write_protect, |
1798 | .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only, |
1799 | .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, |
1800 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, |
1801 | .check_stop_free = tcm_qla2xxx_check_stop_free, |
1802 | .release_cmd = tcm_qla2xxx_release_cmd, |
1803 | .close_session = tcm_qla2xxx_close_session, |
1804 | .sess_get_initiator_sid = NULL, |
1805 | .write_pending = tcm_qla2xxx_write_pending, |
1806 | .get_cmd_state = tcm_qla2xxx_get_cmd_state, |
1807 | .queue_data_in = tcm_qla2xxx_queue_data_in, |
1808 | .queue_status = tcm_qla2xxx_queue_status, |
1809 | .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, |
1810 | .aborted_task = tcm_qla2xxx_aborted_task, |
1811 | /* |
1812 | * Setup function pointers for generic logic in |
1813 | * target_core_fabric_configfs.c |
1814 | */ |
1815 | .fabric_make_wwn = tcm_qla2xxx_make_lport, |
1816 | .fabric_drop_wwn = tcm_qla2xxx_drop_lport, |
1817 | .fabric_make_tpg = tcm_qla2xxx_make_tpg, |
1818 | .fabric_enable_tpg = tcm_qla2xxx_enable_tpg, |
1819 | .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, |
1820 | .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, |
1821 | |
1822 | .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, |
1823 | .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs, |
1824 | .tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs, |
1825 | |
1826 | .default_submit_type = TARGET_DIRECT_SUBMIT, |
1827 | .direct_submit_supp = 1, |
1828 | }; |
1829 | |
1830 | static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { |
1831 | .module = THIS_MODULE, |
1832 | .fabric_name = "qla2xxx_npiv" , |
1833 | .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), |
1834 | .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, |
1835 | .tpg_get_tag = tcm_qla2xxx_get_tag, |
1836 | .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, |
1837 | .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, |
1838 | .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, |
1839 | .tpg_check_prod_mode_write_protect = |
1840 | tcm_qla2xxx_check_prod_write_protect, |
1841 | .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, |
1842 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, |
1843 | .check_stop_free = tcm_qla2xxx_check_stop_free, |
1844 | .release_cmd = tcm_qla2xxx_release_cmd, |
1845 | .close_session = tcm_qla2xxx_close_session, |
1846 | .sess_get_initiator_sid = NULL, |
1847 | .write_pending = tcm_qla2xxx_write_pending, |
1848 | .get_cmd_state = tcm_qla2xxx_get_cmd_state, |
1849 | .queue_data_in = tcm_qla2xxx_queue_data_in, |
1850 | .queue_status = tcm_qla2xxx_queue_status, |
1851 | .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, |
1852 | .aborted_task = tcm_qla2xxx_aborted_task, |
1853 | /* |
1854 | * Setup function pointers for generic logic in |
1855 | * target_core_fabric_configfs.c |
1856 | */ |
1857 | .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport, |
1858 | .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, |
1859 | .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, |
1860 | .fabric_enable_tpg = tcm_qla2xxx_npiv_enable_tpg, |
1861 | .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, |
1862 | .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, |
1863 | |
1864 | .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, |
1865 | |
1866 | .default_submit_type = TARGET_DIRECT_SUBMIT, |
1867 | .direct_submit_supp = 1, |
1868 | }; |
1869 | |
1870 | static int tcm_qla2xxx_register_configfs(void) |
1871 | { |
1872 | int ret; |
1873 | |
1874 | pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on %s\n" , |
1875 | QLA2XXX_VERSION, utsname()->sysname, |
1876 | utsname()->machine, utsname()->release); |
1877 | |
1878 | ret = target_register_template(fo: &tcm_qla2xxx_ops); |
1879 | if (ret) |
1880 | return ret; |
1881 | |
1882 | ret = target_register_template(fo: &tcm_qla2xxx_npiv_ops); |
1883 | if (ret) |
1884 | goto out_fabric; |
1885 | |
1886 | tcm_qla2xxx_free_wq = alloc_workqueue(fmt: "tcm_qla2xxx_free" , |
1887 | flags: WQ_MEM_RECLAIM, max_active: 0); |
1888 | if (!tcm_qla2xxx_free_wq) { |
1889 | ret = -ENOMEM; |
1890 | goto out_fabric_npiv; |
1891 | } |
1892 | |
1893 | return 0; |
1894 | |
1895 | out_fabric_npiv: |
1896 | target_unregister_template(fo: &tcm_qla2xxx_npiv_ops); |
1897 | out_fabric: |
1898 | target_unregister_template(fo: &tcm_qla2xxx_ops); |
1899 | return ret; |
1900 | } |
1901 | |
1902 | static void tcm_qla2xxx_deregister_configfs(void) |
1903 | { |
1904 | destroy_workqueue(wq: tcm_qla2xxx_free_wq); |
1905 | |
1906 | target_unregister_template(fo: &tcm_qla2xxx_ops); |
1907 | target_unregister_template(fo: &tcm_qla2xxx_npiv_ops); |
1908 | } |
1909 | |
1910 | static int __init tcm_qla2xxx_init(void) |
1911 | { |
1912 | int ret; |
1913 | |
1914 | BUILD_BUG_ON(sizeof(struct abts_recv_from_24xx) != 64); |
1915 | BUILD_BUG_ON(sizeof(struct abts_resp_from_24xx_fw) != 64); |
1916 | BUILD_BUG_ON(sizeof(struct atio7_fcp_cmnd) != 32); |
1917 | BUILD_BUG_ON(sizeof(struct atio_from_isp) != 64); |
1918 | BUILD_BUG_ON(sizeof(struct ba_acc_le) != 12); |
1919 | BUILD_BUG_ON(sizeof(struct ba_rjt_le) != 4); |
1920 | BUILD_BUG_ON(sizeof(struct ctio7_from_24xx) != 64); |
1921 | BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64); |
1922 | BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); |
1923 | BUILD_BUG_ON(sizeof(struct ctio_crc_from_fw) != 64); |
1924 | BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); |
1925 | BUILD_BUG_ON(sizeof(struct fcp_hdr) != 24); |
1926 | BUILD_BUG_ON(sizeof(struct fcp_hdr_le) != 24); |
1927 | BUILD_BUG_ON(sizeof(struct nack_to_isp) != 64); |
1928 | |
1929 | ret = tcm_qla2xxx_register_configfs(); |
1930 | if (ret < 0) |
1931 | return ret; |
1932 | |
1933 | return 0; |
1934 | } |
1935 | |
1936 | static void __exit tcm_qla2xxx_exit(void) |
1937 | { |
1938 | tcm_qla2xxx_deregister_configfs(); |
1939 | } |
1940 | |
1941 | MODULE_DESCRIPTION("TCM QLA24XX+ series NPIV enabled fabric driver" ); |
1942 | MODULE_LICENSE("GPL" ); |
1943 | module_init(tcm_qla2xxx_init); |
1944 | module_exit(tcm_qla2xxx_exit); |
1945 | |