1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * QLogic Fibre Channel HBA Driver |
4 | * Copyright (c) 2003-2014 QLogic Corporation |
5 | */ |
6 | #include "qla_def.h" |
7 | #include "qla_target.h" |
8 | |
9 | #include <linux/blkdev.h> |
10 | #include <linux/delay.h> |
11 | |
12 | #include <scsi/scsi_tcq.h> |
13 | |
14 | static int qla_start_scsi_type6(srb_t *sp); |
15 | /** |
16 | * qla2x00_get_cmd_direction() - Determine control_flag data direction. |
17 | * @sp: SCSI command |
18 | * |
19 | * Returns the proper CF_* direction based on CDB. |
20 | */ |
21 | static inline uint16_t |
22 | qla2x00_get_cmd_direction(srb_t *sp) |
23 | { |
24 | uint16_t cflags; |
25 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
26 | struct scsi_qla_host *vha = sp->vha; |
27 | |
28 | cflags = 0; |
29 | |
30 | /* Set transfer direction */ |
31 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { |
32 | cflags = CF_WRITE; |
33 | vha->qla_stats.output_bytes += scsi_bufflen(cmd); |
34 | vha->qla_stats.output_requests++; |
35 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { |
36 | cflags = CF_READ; |
37 | vha->qla_stats.input_bytes += scsi_bufflen(cmd); |
38 | vha->qla_stats.input_requests++; |
39 | } |
40 | return (cflags); |
41 | } |
42 | |
43 | /** |
44 | * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and |
45 | * Continuation Type 0 IOCBs to allocate. |
46 | * |
47 | * @dsds: number of data segment descriptors needed |
48 | * |
49 | * Returns the number of IOCB entries needed to store @dsds. |
50 | */ |
51 | uint16_t |
52 | qla2x00_calc_iocbs_32(uint16_t dsds) |
53 | { |
54 | uint16_t iocbs; |
55 | |
56 | iocbs = 1; |
57 | if (dsds > 3) { |
58 | iocbs += (dsds - 3) / 7; |
59 | if ((dsds - 3) % 7) |
60 | iocbs++; |
61 | } |
62 | return (iocbs); |
63 | } |
64 | |
65 | /** |
66 | * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and |
67 | * Continuation Type 1 IOCBs to allocate. |
68 | * |
69 | * @dsds: number of data segment descriptors needed |
70 | * |
71 | * Returns the number of IOCB entries needed to store @dsds. |
72 | */ |
73 | uint16_t |
74 | qla2x00_calc_iocbs_64(uint16_t dsds) |
75 | { |
76 | uint16_t iocbs; |
77 | |
78 | iocbs = 1; |
79 | if (dsds > 2) { |
80 | iocbs += (dsds - 2) / 5; |
81 | if ((dsds - 2) % 5) |
82 | iocbs++; |
83 | } |
84 | return (iocbs); |
85 | } |
86 | |
87 | /** |
88 | * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB. |
89 | * @vha: HA context |
90 | * |
91 | * Returns a pointer to the Continuation Type 0 IOCB packet. |
92 | */ |
93 | static inline cont_entry_t * |
94 | qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) |
95 | { |
96 | cont_entry_t *cont_pkt; |
97 | struct req_que *req = vha->req; |
98 | /* Adjust ring index. */ |
99 | req->ring_index++; |
100 | if (req->ring_index == req->length) { |
101 | req->ring_index = 0; |
102 | req->ring_ptr = req->ring; |
103 | } else { |
104 | req->ring_ptr++; |
105 | } |
106 | |
107 | cont_pkt = (cont_entry_t *)req->ring_ptr; |
108 | |
109 | /* Load packet defaults. */ |
110 | put_unaligned_le32(CONTINUE_TYPE, p: &cont_pkt->entry_type); |
111 | |
112 | return (cont_pkt); |
113 | } |
114 | |
115 | /** |
116 | * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB. |
117 | * @vha: HA context |
118 | * @req: request queue |
119 | * |
120 | * Returns a pointer to the continuation type 1 IOCB packet. |
121 | */ |
122 | cont_a64_entry_t * |
123 | qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) |
124 | { |
125 | cont_a64_entry_t *cont_pkt; |
126 | |
127 | /* Adjust ring index. */ |
128 | req->ring_index++; |
129 | if (req->ring_index == req->length) { |
130 | req->ring_index = 0; |
131 | req->ring_ptr = req->ring; |
132 | } else { |
133 | req->ring_ptr++; |
134 | } |
135 | |
136 | cont_pkt = (cont_a64_entry_t *)req->ring_ptr; |
137 | |
138 | /* Load packet defaults. */ |
139 | put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 : |
140 | CONTINUE_A64_TYPE, p: &cont_pkt->entry_type); |
141 | |
142 | return (cont_pkt); |
143 | } |
144 | |
145 | inline int |
146 | qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) |
147 | { |
148 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
149 | |
150 | /* We always use DIFF Bundling for best performance */ |
151 | *fw_prot_opts = 0; |
152 | |
153 | /* Translate SCSI opcode to a protection opcode */ |
154 | switch (scsi_get_prot_op(scmd: cmd)) { |
155 | case SCSI_PROT_READ_STRIP: |
156 | *fw_prot_opts |= PO_MODE_DIF_REMOVE; |
157 | break; |
158 | case SCSI_PROT_WRITE_INSERT: |
159 | *fw_prot_opts |= PO_MODE_DIF_INSERT; |
160 | break; |
161 | case SCSI_PROT_READ_INSERT: |
162 | *fw_prot_opts |= PO_MODE_DIF_INSERT; |
163 | break; |
164 | case SCSI_PROT_WRITE_STRIP: |
165 | *fw_prot_opts |= PO_MODE_DIF_REMOVE; |
166 | break; |
167 | case SCSI_PROT_READ_PASS: |
168 | case SCSI_PROT_WRITE_PASS: |
169 | if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM) |
170 | *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM; |
171 | else |
172 | *fw_prot_opts |= PO_MODE_DIF_PASS; |
173 | break; |
174 | default: /* Normal Request */ |
175 | *fw_prot_opts |= PO_MODE_DIF_PASS; |
176 | break; |
177 | } |
178 | |
179 | if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK)) |
180 | *fw_prot_opts |= PO_DISABLE_GUARD_CHECK; |
181 | |
182 | return scsi_prot_sg_count(cmd); |
183 | } |
184 | |
185 | /* |
186 | * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit |
187 | * capable IOCB types. |
188 | * |
189 | * @sp: SRB command to process |
190 | * @cmd_pkt: Command type 2 IOCB |
191 | * @tot_dsds: Total number of segments to transfer |
192 | */ |
193 | void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, |
194 | uint16_t tot_dsds) |
195 | { |
196 | uint16_t avail_dsds; |
197 | struct dsd32 *cur_dsd; |
198 | scsi_qla_host_t *vha; |
199 | struct scsi_cmnd *cmd; |
200 | struct scatterlist *sg; |
201 | int i; |
202 | |
203 | cmd = GET_CMD_SP(sp); |
204 | |
205 | /* Update entry type to indicate Command Type 2 IOCB */ |
206 | put_unaligned_le32(COMMAND_TYPE, p: &cmd_pkt->entry_type); |
207 | |
208 | /* No data transfer */ |
209 | if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { |
210 | cmd_pkt->byte_count = cpu_to_le32(0); |
211 | return; |
212 | } |
213 | |
214 | vha = sp->vha; |
215 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); |
216 | |
217 | /* Three DSDs are available in the Command Type 2 IOCB */ |
218 | avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32); |
219 | cur_dsd = cmd_pkt->dsd32; |
220 | |
221 | /* Load data segments */ |
222 | scsi_for_each_sg(cmd, sg, tot_dsds, i) { |
223 | cont_entry_t *cont_pkt; |
224 | |
225 | /* Allocate additional continuation packets? */ |
226 | if (avail_dsds == 0) { |
227 | /* |
228 | * Seven DSDs are available in the Continuation |
229 | * Type 0 IOCB. |
230 | */ |
231 | cont_pkt = qla2x00_prep_cont_type0_iocb(vha); |
232 | cur_dsd = cont_pkt->dsd; |
233 | avail_dsds = ARRAY_SIZE(cont_pkt->dsd); |
234 | } |
235 | |
236 | append_dsd32(dsd: &cur_dsd, sg); |
237 | avail_dsds--; |
238 | } |
239 | } |
240 | |
241 | /** |
242 | * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit |
243 | * capable IOCB types. |
244 | * |
245 | * @sp: SRB command to process |
246 | * @cmd_pkt: Command type 3 IOCB |
247 | * @tot_dsds: Total number of segments to transfer |
248 | */ |
249 | void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, |
250 | uint16_t tot_dsds) |
251 | { |
252 | uint16_t avail_dsds; |
253 | struct dsd64 *cur_dsd; |
254 | scsi_qla_host_t *vha; |
255 | struct scsi_cmnd *cmd; |
256 | struct scatterlist *sg; |
257 | int i; |
258 | |
259 | cmd = GET_CMD_SP(sp); |
260 | |
261 | /* Update entry type to indicate Command Type 3 IOCB */ |
262 | put_unaligned_le32(COMMAND_A64_TYPE, p: &cmd_pkt->entry_type); |
263 | |
264 | /* No data transfer */ |
265 | if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { |
266 | cmd_pkt->byte_count = cpu_to_le32(0); |
267 | return; |
268 | } |
269 | |
270 | vha = sp->vha; |
271 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); |
272 | |
273 | /* Two DSDs are available in the Command Type 3 IOCB */ |
274 | avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64); |
275 | cur_dsd = cmd_pkt->dsd64; |
276 | |
277 | /* Load data segments */ |
278 | scsi_for_each_sg(cmd, sg, tot_dsds, i) { |
279 | cont_a64_entry_t *cont_pkt; |
280 | |
281 | /* Allocate additional continuation packets? */ |
282 | if (avail_dsds == 0) { |
283 | /* |
284 | * Five DSDs are available in the Continuation |
285 | * Type 1 IOCB. |
286 | */ |
287 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req: vha->req); |
288 | cur_dsd = cont_pkt->dsd; |
289 | avail_dsds = ARRAY_SIZE(cont_pkt->dsd); |
290 | } |
291 | |
292 | append_dsd64(dsd: &cur_dsd, sg); |
293 | avail_dsds--; |
294 | } |
295 | } |
296 | |
297 | /* |
298 | * Find the first handle that is not in use, starting from |
299 | * req->current_outstanding_cmd + 1. The caller must hold the lock that is |
300 | * associated with @req. |
301 | */ |
302 | uint32_t qla2xxx_get_next_handle(struct req_que *req) |
303 | { |
304 | uint32_t index, handle = req->current_outstanding_cmd; |
305 | |
306 | for (index = 1; index < req->num_outstanding_cmds; index++) { |
307 | handle++; |
308 | if (handle == req->num_outstanding_cmds) |
309 | handle = 1; |
310 | if (!req->outstanding_cmds[handle]) |
311 | return handle; |
312 | } |
313 | |
314 | return 0; |
315 | } |
316 | |
317 | /** |
318 | * qla2x00_start_scsi() - Send a SCSI command to the ISP |
319 | * @sp: command to send to the ISP |
320 | * |
321 | * Returns non-zero if a failure occurred, else zero. |
322 | */ |
323 | int |
324 | qla2x00_start_scsi(srb_t *sp) |
325 | { |
326 | int nseg; |
327 | unsigned long flags; |
328 | scsi_qla_host_t *vha; |
329 | struct scsi_cmnd *cmd; |
330 | uint32_t *clr_ptr; |
331 | uint32_t handle; |
332 | cmd_entry_t *cmd_pkt; |
333 | uint16_t cnt; |
334 | uint16_t req_cnt; |
335 | uint16_t tot_dsds; |
336 | struct device_reg_2xxx __iomem *reg; |
337 | struct qla_hw_data *ha; |
338 | struct req_que *req; |
339 | struct rsp_que *rsp; |
340 | |
341 | /* Setup device pointers. */ |
342 | vha = sp->vha; |
343 | ha = vha->hw; |
344 | reg = &ha->iobase->isp; |
345 | cmd = GET_CMD_SP(sp); |
346 | req = ha->req_q_map[0]; |
347 | rsp = ha->rsp_q_map[0]; |
348 | /* So we know we haven't pci_map'ed anything yet */ |
349 | tot_dsds = 0; |
350 | |
351 | /* Send marker if required */ |
352 | if (vha->marker_needed != 0) { |
353 | if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != |
354 | QLA_SUCCESS) { |
355 | return (QLA_FUNCTION_FAILED); |
356 | } |
357 | vha->marker_needed = 0; |
358 | } |
359 | |
360 | /* Acquire ring specific lock */ |
361 | spin_lock_irqsave(&ha->hardware_lock, flags); |
362 | |
363 | handle = qla2xxx_get_next_handle(req); |
364 | if (handle == 0) |
365 | goto queuing_error; |
366 | |
367 | /* Map the sg table so we have an accurate count of sg entries needed */ |
368 | if (scsi_sg_count(cmd)) { |
369 | nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), |
370 | scsi_sg_count(cmd), cmd->sc_data_direction); |
371 | if (unlikely(!nseg)) |
372 | goto queuing_error; |
373 | } else |
374 | nseg = 0; |
375 | |
376 | tot_dsds = nseg; |
377 | |
378 | /* Calculate the number of request entries needed. */ |
379 | req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); |
380 | if (req->cnt < (req_cnt + 2)) { |
381 | cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg)); |
382 | if (req->ring_index < cnt) |
383 | req->cnt = cnt - req->ring_index; |
384 | else |
385 | req->cnt = req->length - |
386 | (req->ring_index - cnt); |
387 | /* If still no head room then bail out */ |
388 | if (req->cnt < (req_cnt + 2)) |
389 | goto queuing_error; |
390 | } |
391 | |
392 | /* Build command packet */ |
393 | req->current_outstanding_cmd = handle; |
394 | req->outstanding_cmds[handle] = sp; |
395 | sp->handle = handle; |
396 | cmd->host_scribble = (unsigned char *)(unsigned long)handle; |
397 | req->cnt -= req_cnt; |
398 | |
399 | cmd_pkt = (cmd_entry_t *)req->ring_ptr; |
400 | cmd_pkt->handle = handle; |
401 | /* Zero out remaining portion of packet. */ |
402 | clr_ptr = (uint32_t *)cmd_pkt + 2; |
403 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); |
404 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); |
405 | |
406 | /* Set target ID and LUN number*/ |
407 | SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); |
408 | cmd_pkt->lun = cpu_to_le16(cmd->device->lun); |
409 | cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG); |
410 | |
411 | /* Load SCSI command packet. */ |
412 | memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); |
413 | cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); |
414 | |
415 | /* Build IOCB segments */ |
416 | ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds); |
417 | |
418 | /* Set total data segment count. */ |
419 | cmd_pkt->entry_count = (uint8_t)req_cnt; |
420 | wmb(); |
421 | |
422 | /* Adjust ring index. */ |
423 | req->ring_index++; |
424 | if (req->ring_index == req->length) { |
425 | req->ring_index = 0; |
426 | req->ring_ptr = req->ring; |
427 | } else |
428 | req->ring_ptr++; |
429 | |
430 | sp->flags |= SRB_DMA_VALID; |
431 | |
432 | /* Set chip new ring index. */ |
433 | wrt_reg_word(ISP_REQ_Q_IN(ha, reg), data: req->ring_index); |
434 | rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ |
435 | |
436 | /* Manage unprocessed RIO/ZIO commands in response queue. */ |
437 | if (vha->flags.process_response_queue && |
438 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
439 | qla2x00_process_response_queue(rsp); |
440 | |
441 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
442 | return (QLA_SUCCESS); |
443 | |
444 | queuing_error: |
445 | if (tot_dsds) |
446 | scsi_dma_unmap(cmd); |
447 | |
448 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
449 | |
450 | return (QLA_FUNCTION_FAILED); |
451 | } |
452 | |
453 | /** |
454 | * qla2x00_start_iocbs() - Execute the IOCB command |
455 | * @vha: HA context |
456 | * @req: request queue |
457 | */ |
458 | void |
459 | qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) |
460 | { |
461 | struct qla_hw_data *ha = vha->hw; |
462 | device_reg_t *reg = ISP_QUE_REG(ha, req->id); |
463 | |
464 | if (IS_P3P_TYPE(ha)) { |
465 | qla82xx_start_iocbs(vha); |
466 | } else { |
467 | /* Adjust ring index. */ |
468 | req->ring_index++; |
469 | if (req->ring_index == req->length) { |
470 | req->ring_index = 0; |
471 | req->ring_ptr = req->ring; |
472 | } else |
473 | req->ring_ptr++; |
474 | |
475 | /* Set chip new ring index. */ |
476 | if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { |
477 | wrt_reg_dword(addr: req->req_q_in, data: req->ring_index); |
478 | } else if (IS_QLA83XX(ha)) { |
479 | wrt_reg_dword(addr: req->req_q_in, data: req->ring_index); |
480 | rd_reg_dword_relaxed(addr: &ha->iobase->isp24.hccr); |
481 | } else if (IS_QLAFX00(ha)) { |
482 | wrt_reg_dword(addr: ®->ispfx00.req_q_in, data: req->ring_index); |
483 | rd_reg_dword_relaxed(addr: ®->ispfx00.req_q_in); |
484 | QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); |
485 | } else if (IS_FWI2_CAPABLE(ha)) { |
486 | wrt_reg_dword(addr: ®->isp24.req_q_in, data: req->ring_index); |
487 | rd_reg_dword_relaxed(addr: ®->isp24.req_q_in); |
488 | } else { |
489 | wrt_reg_word(ISP_REQ_Q_IN(ha, ®->isp), |
490 | data: req->ring_index); |
491 | rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, ®->isp)); |
492 | } |
493 | } |
494 | } |
495 | |
496 | /** |
497 | * __qla2x00_marker() - Send a marker IOCB to the firmware. |
498 | * @vha: HA context |
499 | * @qpair: queue pair pointer |
500 | * @loop_id: loop ID |
501 | * @lun: LUN |
502 | * @type: marker modifier |
503 | * |
504 | * Can be called from both normal and interrupt context. |
505 | * |
506 | * Returns non-zero if a failure occurred, else zero. |
507 | */ |
508 | static int |
509 | __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, |
510 | uint16_t loop_id, uint64_t lun, uint8_t type) |
511 | { |
512 | mrk_entry_t *mrk; |
513 | struct mrk_entry_24xx *mrk24 = NULL; |
514 | struct req_que *req = qpair->req; |
515 | struct qla_hw_data *ha = vha->hw; |
516 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
517 | |
518 | mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL); |
519 | if (mrk == NULL) { |
520 | ql_log(ql_log_warn, vha: base_vha, 0x3026, |
521 | fmt: "Failed to allocate Marker IOCB.\n" ); |
522 | |
523 | return (QLA_FUNCTION_FAILED); |
524 | } |
525 | |
526 | mrk24 = (struct mrk_entry_24xx *)mrk; |
527 | |
528 | mrk->entry_type = MARKER_TYPE; |
529 | mrk->modifier = type; |
530 | if (type != MK_SYNC_ALL) { |
531 | if (IS_FWI2_CAPABLE(ha)) { |
532 | mrk24->nport_handle = cpu_to_le16(loop_id); |
533 | int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); |
534 | host_to_fcp_swap(fcp: mrk24->lun, bsize: sizeof(mrk24->lun)); |
535 | mrk24->vp_index = vha->vp_idx; |
536 | } else { |
537 | SET_TARGET_ID(ha, mrk->target, loop_id); |
538 | mrk->lun = cpu_to_le16((uint16_t)lun); |
539 | } |
540 | } |
541 | |
542 | if (IS_FWI2_CAPABLE(ha)) |
543 | mrk24->handle = QLA_SKIP_HANDLE; |
544 | |
545 | wmb(); |
546 | |
547 | qla2x00_start_iocbs(vha, req); |
548 | |
549 | return (QLA_SUCCESS); |
550 | } |
551 | |
552 | int |
553 | qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, |
554 | uint16_t loop_id, uint64_t lun, uint8_t type) |
555 | { |
556 | int ret; |
557 | unsigned long flags = 0; |
558 | |
559 | spin_lock_irqsave(qpair->qp_lock_ptr, flags); |
560 | ret = __qla2x00_marker(vha, qpair, loop_id, lun, type); |
561 | spin_unlock_irqrestore(lock: qpair->qp_lock_ptr, flags); |
562 | |
563 | return (ret); |
564 | } |
565 | |
566 | /* |
567 | * qla2x00_issue_marker |
568 | * |
569 | * Issue marker |
570 | * Caller CAN have hardware lock held as specified by ha_locked parameter. |
571 | * Might release it, then reaquire. |
572 | */ |
573 | int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) |
574 | { |
575 | if (ha_locked) { |
576 | if (__qla2x00_marker(vha, qpair: vha->hw->base_qpair, loop_id: 0, lun: 0, |
577 | MK_SYNC_ALL) != QLA_SUCCESS) |
578 | return QLA_FUNCTION_FAILED; |
579 | } else { |
580 | if (qla2x00_marker(vha, qpair: vha->hw->base_qpair, loop_id: 0, lun: 0, |
581 | MK_SYNC_ALL) != QLA_SUCCESS) |
582 | return QLA_FUNCTION_FAILED; |
583 | } |
584 | vha->marker_needed = 0; |
585 | |
586 | return QLA_SUCCESS; |
587 | } |
588 | |
589 | static inline int |
590 | qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, |
591 | uint16_t tot_dsds) |
592 | { |
593 | struct dsd64 *cur_dsd = NULL, *next_dsd; |
594 | struct scsi_cmnd *cmd; |
595 | struct scatterlist *cur_seg; |
596 | uint8_t avail_dsds; |
597 | uint8_t first_iocb = 1; |
598 | uint32_t dsd_list_len; |
599 | struct dsd_dma *dsd_ptr; |
600 | struct ct6_dsd *ctx; |
601 | struct qla_qpair *qpair = sp->qpair; |
602 | |
603 | cmd = GET_CMD_SP(sp); |
604 | |
605 | /* Update entry type to indicate Command Type 3 IOCB */ |
606 | put_unaligned_le32(COMMAND_TYPE_6, p: &cmd_pkt->entry_type); |
607 | |
608 | /* No data transfer */ |
609 | if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE || |
610 | tot_dsds == 0) { |
611 | cmd_pkt->byte_count = cpu_to_le32(0); |
612 | return 0; |
613 | } |
614 | |
615 | /* Set transfer direction */ |
616 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { |
617 | cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); |
618 | qpair->counters.output_bytes += scsi_bufflen(cmd); |
619 | qpair->counters.output_requests++; |
620 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { |
621 | cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); |
622 | qpair->counters.input_bytes += scsi_bufflen(cmd); |
623 | qpair->counters.input_requests++; |
624 | } |
625 | |
626 | cur_seg = scsi_sglist(cmd); |
627 | ctx = &sp->u.scmd.ct6_ctx; |
628 | |
629 | while (tot_dsds) { |
630 | avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? |
631 | QLA_DSDS_PER_IOCB : tot_dsds; |
632 | tot_dsds -= avail_dsds; |
633 | dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; |
634 | |
635 | dsd_ptr = list_first_entry(&qpair->dsd_list, struct dsd_dma, list); |
636 | next_dsd = dsd_ptr->dsd_addr; |
637 | list_del(entry: &dsd_ptr->list); |
638 | qpair->dsd_avail--; |
639 | list_add_tail(new: &dsd_ptr->list, head: &ctx->dsd_list); |
640 | ctx->dsd_use_cnt++; |
641 | qpair->dsd_inuse++; |
642 | |
643 | if (first_iocb) { |
644 | first_iocb = 0; |
645 | put_unaligned_le64(val: dsd_ptr->dsd_list_dma, |
646 | p: &cmd_pkt->fcp_dsd.address); |
647 | cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len); |
648 | } else { |
649 | put_unaligned_le64(val: dsd_ptr->dsd_list_dma, |
650 | p: &cur_dsd->address); |
651 | cur_dsd->length = cpu_to_le32(dsd_list_len); |
652 | cur_dsd++; |
653 | } |
654 | cur_dsd = next_dsd; |
655 | while (avail_dsds) { |
656 | append_dsd64(dsd: &cur_dsd, sg: cur_seg); |
657 | cur_seg = sg_next(cur_seg); |
658 | avail_dsds--; |
659 | } |
660 | } |
661 | |
662 | /* Null termination */ |
663 | cur_dsd->address = 0; |
664 | cur_dsd->length = 0; |
665 | cur_dsd++; |
666 | cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); |
667 | return 0; |
668 | } |
669 | |
670 | /* |
671 | * qla24xx_calc_dsd_lists() - Determine number of DSD list required |
672 | * for Command Type 6. |
673 | * |
674 | * @dsds: number of data segment descriptors needed |
675 | * |
676 | * Returns the number of dsd list needed to store @dsds. |
677 | */ |
678 | static inline uint16_t |
679 | qla24xx_calc_dsd_lists(uint16_t dsds) |
680 | { |
681 | uint16_t dsd_lists = 0; |
682 | |
683 | dsd_lists = (dsds/QLA_DSDS_PER_IOCB); |
684 | if (dsds % QLA_DSDS_PER_IOCB) |
685 | dsd_lists++; |
686 | return dsd_lists; |
687 | } |
688 | |
689 | |
690 | /** |
691 | * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 |
692 | * IOCB types. |
693 | * |
694 | * @sp: SRB command to process |
695 | * @cmd_pkt: Command type 3 IOCB |
696 | * @tot_dsds: Total number of segments to transfer |
697 | * @req: pointer to request queue |
698 | */ |
699 | inline void |
700 | qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, |
701 | uint16_t tot_dsds, struct req_que *req) |
702 | { |
703 | uint16_t avail_dsds; |
704 | struct dsd64 *cur_dsd; |
705 | scsi_qla_host_t *vha; |
706 | struct scsi_cmnd *cmd; |
707 | struct scatterlist *sg; |
708 | int i; |
709 | struct qla_qpair *qpair = sp->qpair; |
710 | |
711 | cmd = GET_CMD_SP(sp); |
712 | |
713 | /* Update entry type to indicate Command Type 3 IOCB */ |
714 | put_unaligned_le32(COMMAND_TYPE_7, p: &cmd_pkt->entry_type); |
715 | |
716 | /* No data transfer */ |
717 | if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { |
718 | cmd_pkt->byte_count = cpu_to_le32(0); |
719 | return; |
720 | } |
721 | |
722 | vha = sp->vha; |
723 | |
724 | /* Set transfer direction */ |
725 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { |
726 | cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA); |
727 | qpair->counters.output_bytes += scsi_bufflen(cmd); |
728 | qpair->counters.output_requests++; |
729 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { |
730 | cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA); |
731 | qpair->counters.input_bytes += scsi_bufflen(cmd); |
732 | qpair->counters.input_requests++; |
733 | } |
734 | |
735 | /* One DSD is available in the Command Type 3 IOCB */ |
736 | avail_dsds = 1; |
737 | cur_dsd = &cmd_pkt->dsd; |
738 | |
739 | /* Load data segments */ |
740 | |
741 | scsi_for_each_sg(cmd, sg, tot_dsds, i) { |
742 | cont_a64_entry_t *cont_pkt; |
743 | |
744 | /* Allocate additional continuation packets? */ |
745 | if (avail_dsds == 0) { |
746 | /* |
747 | * Five DSDs are available in the Continuation |
748 | * Type 1 IOCB. |
749 | */ |
750 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); |
751 | cur_dsd = cont_pkt->dsd; |
752 | avail_dsds = ARRAY_SIZE(cont_pkt->dsd); |
753 | } |
754 | |
755 | append_dsd64(dsd: &cur_dsd, sg); |
756 | avail_dsds--; |
757 | } |
758 | } |
759 | |
760 | struct fw_dif_context { |
761 | __le32 ref_tag; |
762 | __le16 app_tag; |
763 | uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ |
764 | uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ |
765 | }; |
766 | |
767 | /* |
768 | * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command |
769 | * |
770 | */ |
771 | static inline void |
772 | qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, |
773 | unsigned int protcnt) |
774 | { |
775 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
776 | |
777 | pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd)); |
778 | |
779 | if (cmd->prot_flags & SCSI_PROT_REF_CHECK && |
780 | qla2x00_hba_err_chk_enabled(sp)) { |
781 | pkt->ref_tag_mask[0] = 0xff; |
782 | pkt->ref_tag_mask[1] = 0xff; |
783 | pkt->ref_tag_mask[2] = 0xff; |
784 | pkt->ref_tag_mask[3] = 0xff; |
785 | } |
786 | |
787 | pkt->app_tag = cpu_to_le16(0); |
788 | pkt->app_tag_mask[0] = 0x0; |
789 | pkt->app_tag_mask[1] = 0x0; |
790 | } |
791 | |
792 | int |
793 | qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, |
794 | uint32_t *partial) |
795 | { |
796 | struct scatterlist *sg; |
797 | uint32_t cumulative_partial, sg_len; |
798 | dma_addr_t sg_dma_addr; |
799 | |
800 | if (sgx->num_bytes == sgx->tot_bytes) |
801 | return 0; |
802 | |
803 | sg = sgx->cur_sg; |
804 | cumulative_partial = sgx->tot_partial; |
805 | |
806 | sg_dma_addr = sg_dma_address(sg); |
807 | sg_len = sg_dma_len(sg); |
808 | |
809 | sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; |
810 | |
811 | if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { |
812 | sgx->dma_len = (blk_sz - cumulative_partial); |
813 | sgx->tot_partial = 0; |
814 | sgx->num_bytes += blk_sz; |
815 | *partial = 0; |
816 | } else { |
817 | sgx->dma_len = sg_len - sgx->bytes_consumed; |
818 | sgx->tot_partial += sgx->dma_len; |
819 | *partial = 1; |
820 | } |
821 | |
822 | sgx->bytes_consumed += sgx->dma_len; |
823 | |
824 | if (sg_len == sgx->bytes_consumed) { |
825 | sg = sg_next(sg); |
826 | sgx->num_sg++; |
827 | sgx->cur_sg = sg; |
828 | sgx->bytes_consumed = 0; |
829 | } |
830 | |
831 | return 1; |
832 | } |
833 | |
834 | int |
835 | qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, |
836 | struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) |
837 | { |
838 | void *next_dsd; |
839 | uint8_t avail_dsds = 0; |
840 | uint32_t dsd_list_len; |
841 | struct dsd_dma *dsd_ptr; |
842 | struct scatterlist *sg_prot; |
843 | struct dsd64 *cur_dsd = dsd; |
844 | uint16_t used_dsds = tot_dsds; |
845 | uint32_t prot_int; /* protection interval */ |
846 | uint32_t partial; |
847 | struct qla2_sgx sgx; |
848 | dma_addr_t sle_dma; |
849 | uint32_t sle_dma_len, tot_prot_dma_len = 0; |
850 | struct scsi_cmnd *cmd; |
851 | |
852 | memset(&sgx, 0, sizeof(struct qla2_sgx)); |
853 | if (sp) { |
854 | cmd = GET_CMD_SP(sp); |
855 | prot_int = scsi_prot_interval(scmd: cmd); |
856 | |
857 | sgx.tot_bytes = scsi_bufflen(cmd); |
858 | sgx.cur_sg = scsi_sglist(cmd); |
859 | sgx.sp = sp; |
860 | |
861 | sg_prot = scsi_prot_sglist(cmd); |
862 | } else if (tc) { |
863 | prot_int = tc->blk_sz; |
864 | sgx.tot_bytes = tc->bufflen; |
865 | sgx.cur_sg = tc->sg; |
866 | sg_prot = tc->prot_sg; |
867 | } else { |
868 | BUG(); |
869 | return 1; |
870 | } |
871 | |
872 | while (qla24xx_get_one_block_sg(blk_sz: prot_int, sgx: &sgx, partial: &partial)) { |
873 | |
874 | sle_dma = sgx.dma_addr; |
875 | sle_dma_len = sgx.dma_len; |
876 | alloc_and_fill: |
877 | /* Allocate additional continuation packets? */ |
878 | if (avail_dsds == 0) { |
879 | avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? |
880 | QLA_DSDS_PER_IOCB : used_dsds; |
881 | dsd_list_len = (avail_dsds + 1) * 12; |
882 | used_dsds -= avail_dsds; |
883 | |
884 | /* allocate tracking DS */ |
885 | dsd_ptr = kzalloc(size: sizeof(struct dsd_dma), GFP_ATOMIC); |
886 | if (!dsd_ptr) |
887 | return 1; |
888 | |
889 | /* allocate new list */ |
890 | dsd_ptr->dsd_addr = next_dsd = |
891 | dma_pool_alloc(pool: ha->dl_dma_pool, GFP_ATOMIC, |
892 | handle: &dsd_ptr->dsd_list_dma); |
893 | |
894 | if (!next_dsd) { |
895 | /* |
896 | * Need to cleanup only this dsd_ptr, rest |
897 | * will be done by sp_free_dma() |
898 | */ |
899 | kfree(objp: dsd_ptr); |
900 | return 1; |
901 | } |
902 | |
903 | if (sp) { |
904 | list_add_tail(new: &dsd_ptr->list, |
905 | head: &sp->u.scmd.crc_ctx->dsd_list); |
906 | |
907 | sp->flags |= SRB_CRC_CTX_DSD_VALID; |
908 | } else { |
909 | list_add_tail(new: &dsd_ptr->list, |
910 | head: &(tc->ctx->dsd_list)); |
911 | *tc->ctx_dsd_alloced = 1; |
912 | } |
913 | |
914 | |
915 | /* add new list to cmd iocb or last list */ |
916 | put_unaligned_le64(val: dsd_ptr->dsd_list_dma, |
917 | p: &cur_dsd->address); |
918 | cur_dsd->length = cpu_to_le32(dsd_list_len); |
919 | cur_dsd = next_dsd; |
920 | } |
921 | put_unaligned_le64(val: sle_dma, p: &cur_dsd->address); |
922 | cur_dsd->length = cpu_to_le32(sle_dma_len); |
923 | cur_dsd++; |
924 | avail_dsds--; |
925 | |
926 | if (partial == 0) { |
927 | /* Got a full protection interval */ |
928 | sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; |
929 | sle_dma_len = 8; |
930 | |
931 | tot_prot_dma_len += sle_dma_len; |
932 | if (tot_prot_dma_len == sg_dma_len(sg_prot)) { |
933 | tot_prot_dma_len = 0; |
934 | sg_prot = sg_next(sg_prot); |
935 | } |
936 | |
937 | partial = 1; /* So as to not re-enter this block */ |
938 | goto alloc_and_fill; |
939 | } |
940 | } |
941 | /* Null termination */ |
942 | cur_dsd->address = 0; |
943 | cur_dsd->length = 0; |
944 | cur_dsd++; |
945 | return 0; |
946 | } |
947 | |
948 | int |
949 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, |
950 | struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) |
951 | { |
952 | void *next_dsd; |
953 | uint8_t avail_dsds = 0; |
954 | uint32_t dsd_list_len; |
955 | struct dsd_dma *dsd_ptr; |
956 | struct scatterlist *sg, *sgl; |
957 | struct dsd64 *cur_dsd = dsd; |
958 | int i; |
959 | uint16_t used_dsds = tot_dsds; |
960 | struct scsi_cmnd *cmd; |
961 | |
962 | if (sp) { |
963 | cmd = GET_CMD_SP(sp); |
964 | sgl = scsi_sglist(cmd); |
965 | } else if (tc) { |
966 | sgl = tc->sg; |
967 | } else { |
968 | BUG(); |
969 | return 1; |
970 | } |
971 | |
972 | |
973 | for_each_sg(sgl, sg, tot_dsds, i) { |
974 | /* Allocate additional continuation packets? */ |
975 | if (avail_dsds == 0) { |
976 | avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? |
977 | QLA_DSDS_PER_IOCB : used_dsds; |
978 | dsd_list_len = (avail_dsds + 1) * 12; |
979 | used_dsds -= avail_dsds; |
980 | |
981 | /* allocate tracking DS */ |
982 | dsd_ptr = kzalloc(size: sizeof(struct dsd_dma), GFP_ATOMIC); |
983 | if (!dsd_ptr) |
984 | return 1; |
985 | |
986 | /* allocate new list */ |
987 | dsd_ptr->dsd_addr = next_dsd = |
988 | dma_pool_alloc(pool: ha->dl_dma_pool, GFP_ATOMIC, |
989 | handle: &dsd_ptr->dsd_list_dma); |
990 | |
991 | if (!next_dsd) { |
992 | /* |
993 | * Need to cleanup only this dsd_ptr, rest |
994 | * will be done by sp_free_dma() |
995 | */ |
996 | kfree(objp: dsd_ptr); |
997 | return 1; |
998 | } |
999 | |
1000 | if (sp) { |
1001 | list_add_tail(new: &dsd_ptr->list, |
1002 | head: &sp->u.scmd.crc_ctx->dsd_list); |
1003 | |
1004 | sp->flags |= SRB_CRC_CTX_DSD_VALID; |
1005 | } else { |
1006 | list_add_tail(new: &dsd_ptr->list, |
1007 | head: &(tc->ctx->dsd_list)); |
1008 | *tc->ctx_dsd_alloced = 1; |
1009 | } |
1010 | |
1011 | /* add new list to cmd iocb or last list */ |
1012 | put_unaligned_le64(val: dsd_ptr->dsd_list_dma, |
1013 | p: &cur_dsd->address); |
1014 | cur_dsd->length = cpu_to_le32(dsd_list_len); |
1015 | cur_dsd = next_dsd; |
1016 | } |
1017 | append_dsd64(dsd: &cur_dsd, sg); |
1018 | avail_dsds--; |
1019 | |
1020 | } |
1021 | /* Null termination */ |
1022 | cur_dsd->address = 0; |
1023 | cur_dsd->length = 0; |
1024 | cur_dsd++; |
1025 | return 0; |
1026 | } |
1027 | |
1028 | int |
1029 | qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, |
1030 | struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) |
1031 | { |
1032 | struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd; |
1033 | struct scatterlist *sg, *sgl; |
1034 | struct crc_context *difctx = NULL; |
1035 | struct scsi_qla_host *vha; |
1036 | uint dsd_list_len; |
1037 | uint avail_dsds = 0; |
1038 | uint used_dsds = tot_dsds; |
1039 | bool dif_local_dma_alloc = false; |
1040 | bool direction_to_device = false; |
1041 | int i; |
1042 | |
1043 | if (sp) { |
1044 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
1045 | |
1046 | sgl = scsi_prot_sglist(cmd); |
1047 | vha = sp->vha; |
1048 | difctx = sp->u.scmd.crc_ctx; |
1049 | direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE; |
1050 | ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, |
1051 | fmt: "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n" , |
1052 | __func__, cmd, difctx, sp); |
1053 | } else if (tc) { |
1054 | vha = tc->vha; |
1055 | sgl = tc->prot_sg; |
1056 | difctx = tc->ctx; |
1057 | direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE; |
1058 | } else { |
1059 | BUG(); |
1060 | return 1; |
1061 | } |
1062 | |
1063 | ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, |
1064 | fmt: "%s: enter (write=%u)\n" , __func__, direction_to_device); |
1065 | |
1066 | /* if initiator doing write or target doing read */ |
1067 | if (direction_to_device) { |
1068 | for_each_sg(sgl, sg, tot_dsds, i) { |
1069 | u64 sle_phys = sg_phys(sg); |
1070 | |
1071 | /* If SGE addr + len flips bits in upper 32-bits */ |
1072 | if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) { |
1073 | ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022, |
1074 | fmt: "%s: page boundary crossing (phys=%llx len=%x)\n" , |
1075 | __func__, sle_phys, sg->length); |
1076 | |
1077 | if (difctx) { |
1078 | ha->dif_bundle_crossed_pages++; |
1079 | dif_local_dma_alloc = true; |
1080 | } else { |
1081 | ql_dbg(ql_dbg_tgt + ql_dbg_verbose, |
1082 | vha, 0xe022, |
1083 | fmt: "%s: difctx pointer is NULL\n" , |
1084 | __func__); |
1085 | } |
1086 | break; |
1087 | } |
1088 | } |
1089 | ha->dif_bundle_writes++; |
1090 | } else { |
1091 | ha->dif_bundle_reads++; |
1092 | } |
1093 | |
1094 | if (ql2xdifbundlinginternalbuffers) |
1095 | dif_local_dma_alloc = direction_to_device; |
1096 | |
1097 | if (dif_local_dma_alloc) { |
1098 | u32 track_difbundl_buf = 0; |
1099 | u32 ldma_sg_len = 0; |
1100 | u8 ldma_needed = 1; |
1101 | |
1102 | difctx->no_dif_bundl = 0; |
1103 | difctx->dif_bundl_len = 0; |
1104 | |
1105 | /* Track DSD buffers */ |
1106 | INIT_LIST_HEAD(list: &difctx->ldif_dsd_list); |
1107 | /* Track local DMA buffers */ |
1108 | INIT_LIST_HEAD(list: &difctx->ldif_dma_hndl_list); |
1109 | |
1110 | for_each_sg(sgl, sg, tot_dsds, i) { |
1111 | u32 sglen = sg_dma_len(sg); |
1112 | |
1113 | ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023, |
1114 | fmt: "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n" , |
1115 | __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len, |
1116 | difctx->dif_bundl_len, ldma_needed); |
1117 | |
1118 | while (sglen) { |
1119 | u32 xfrlen = 0; |
1120 | |
1121 | if (ldma_needed) { |
1122 | /* |
1123 | * Allocate list item to store |
1124 | * the DMA buffers |
1125 | */ |
1126 | dsd_ptr = kzalloc(size: sizeof(*dsd_ptr), |
1127 | GFP_ATOMIC); |
1128 | if (!dsd_ptr) { |
1129 | ql_dbg(ql_dbg_tgt, vha, 0xe024, |
1130 | fmt: "%s: failed alloc dsd_ptr\n" , |
1131 | __func__); |
1132 | return 1; |
1133 | } |
1134 | ha->dif_bundle_kallocs++; |
1135 | |
1136 | /* allocate dma buffer */ |
1137 | dsd_ptr->dsd_addr = dma_pool_alloc |
1138 | (pool: ha->dif_bundl_pool, GFP_ATOMIC, |
1139 | handle: &dsd_ptr->dsd_list_dma); |
1140 | if (!dsd_ptr->dsd_addr) { |
1141 | ql_dbg(ql_dbg_tgt, vha, 0xe024, |
1142 | fmt: "%s: failed alloc ->dsd_ptr\n" , |
1143 | __func__); |
1144 | /* |
1145 | * need to cleanup only this |
1146 | * dsd_ptr rest will be done |
1147 | * by sp_free_dma() |
1148 | */ |
1149 | kfree(objp: dsd_ptr); |
1150 | ha->dif_bundle_kallocs--; |
1151 | return 1; |
1152 | } |
1153 | ha->dif_bundle_dma_allocs++; |
1154 | ldma_needed = 0; |
1155 | difctx->no_dif_bundl++; |
1156 | list_add_tail(new: &dsd_ptr->list, |
1157 | head: &difctx->ldif_dma_hndl_list); |
1158 | } |
1159 | |
1160 | /* xfrlen is min of dma pool size and sglen */ |
1161 | xfrlen = (sglen > |
1162 | (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ? |
1163 | DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len : |
1164 | sglen; |
1165 | |
1166 | /* replace with local allocated dma buffer */ |
1167 | sg_pcopy_to_buffer(sgl, nents: sg_nents(sg: sgl), |
1168 | buf: dsd_ptr->dsd_addr + ldma_sg_len, buflen: xfrlen, |
1169 | skip: difctx->dif_bundl_len); |
1170 | difctx->dif_bundl_len += xfrlen; |
1171 | sglen -= xfrlen; |
1172 | ldma_sg_len += xfrlen; |
1173 | if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE || |
1174 | sg_is_last(sg)) { |
1175 | ldma_needed = 1; |
1176 | ldma_sg_len = 0; |
1177 | } |
1178 | } |
1179 | } |
1180 | |
1181 | track_difbundl_buf = used_dsds = difctx->no_dif_bundl; |
1182 | ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025, |
1183 | fmt: "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n" , |
1184 | difctx->dif_bundl_len, difctx->no_dif_bundl, |
1185 | track_difbundl_buf); |
1186 | |
1187 | if (sp) |
1188 | sp->flags |= SRB_DIF_BUNDL_DMA_VALID; |
1189 | else |
1190 | tc->prot_flags = DIF_BUNDL_DMA_VALID; |
1191 | |
1192 | list_for_each_entry_safe(dif_dsd, nxt_dsd, |
1193 | &difctx->ldif_dma_hndl_list, list) { |
1194 | u32 sglen = (difctx->dif_bundl_len > |
1195 | DIF_BUNDLING_DMA_POOL_SIZE) ? |
1196 | DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len; |
1197 | |
1198 | BUG_ON(track_difbundl_buf == 0); |
1199 | |
1200 | /* Allocate additional continuation packets? */ |
1201 | if (avail_dsds == 0) { |
1202 | ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, |
1203 | 0xe024, |
1204 | fmt: "%s: adding continuation iocb's\n" , |
1205 | __func__); |
1206 | avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? |
1207 | QLA_DSDS_PER_IOCB : used_dsds; |
1208 | dsd_list_len = (avail_dsds + 1) * 12; |
1209 | used_dsds -= avail_dsds; |
1210 | |
1211 | /* allocate tracking DS */ |
1212 | dsd_ptr = kzalloc(size: sizeof(*dsd_ptr), GFP_ATOMIC); |
1213 | if (!dsd_ptr) { |
1214 | ql_dbg(ql_dbg_tgt, vha, 0xe026, |
1215 | fmt: "%s: failed alloc dsd_ptr\n" , |
1216 | __func__); |
1217 | return 1; |
1218 | } |
1219 | ha->dif_bundle_kallocs++; |
1220 | |
1221 | difctx->no_ldif_dsd++; |
1222 | /* allocate new list */ |
1223 | dsd_ptr->dsd_addr = |
1224 | dma_pool_alloc(pool: ha->dl_dma_pool, GFP_ATOMIC, |
1225 | handle: &dsd_ptr->dsd_list_dma); |
1226 | if (!dsd_ptr->dsd_addr) { |
1227 | ql_dbg(ql_dbg_tgt, vha, 0xe026, |
1228 | fmt: "%s: failed alloc ->dsd_addr\n" , |
1229 | __func__); |
1230 | /* |
1231 | * need to cleanup only this dsd_ptr |
1232 | * rest will be done by sp_free_dma() |
1233 | */ |
1234 | kfree(objp: dsd_ptr); |
1235 | ha->dif_bundle_kallocs--; |
1236 | return 1; |
1237 | } |
1238 | ha->dif_bundle_dma_allocs++; |
1239 | |
1240 | if (sp) { |
1241 | list_add_tail(new: &dsd_ptr->list, |
1242 | head: &difctx->ldif_dsd_list); |
1243 | sp->flags |= SRB_CRC_CTX_DSD_VALID; |
1244 | } else { |
1245 | list_add_tail(new: &dsd_ptr->list, |
1246 | head: &difctx->ldif_dsd_list); |
1247 | tc->ctx_dsd_alloced = 1; |
1248 | } |
1249 | |
1250 | /* add new list to cmd iocb or last list */ |
1251 | put_unaligned_le64(val: dsd_ptr->dsd_list_dma, |
1252 | p: &cur_dsd->address); |
1253 | cur_dsd->length = cpu_to_le32(dsd_list_len); |
1254 | cur_dsd = dsd_ptr->dsd_addr; |
1255 | } |
1256 | put_unaligned_le64(val: dif_dsd->dsd_list_dma, |
1257 | p: &cur_dsd->address); |
1258 | cur_dsd->length = cpu_to_le32(sglen); |
1259 | cur_dsd++; |
1260 | avail_dsds--; |
1261 | difctx->dif_bundl_len -= sglen; |
1262 | track_difbundl_buf--; |
1263 | } |
1264 | |
1265 | ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026, |
1266 | fmt: "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n" , __func__, |
1267 | difctx->no_ldif_dsd, difctx->no_dif_bundl); |
1268 | } else { |
1269 | for_each_sg(sgl, sg, tot_dsds, i) { |
1270 | /* Allocate additional continuation packets? */ |
1271 | if (avail_dsds == 0) { |
1272 | avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? |
1273 | QLA_DSDS_PER_IOCB : used_dsds; |
1274 | dsd_list_len = (avail_dsds + 1) * 12; |
1275 | used_dsds -= avail_dsds; |
1276 | |
1277 | /* allocate tracking DS */ |
1278 | dsd_ptr = kzalloc(size: sizeof(*dsd_ptr), GFP_ATOMIC); |
1279 | if (!dsd_ptr) { |
1280 | ql_dbg(ql_dbg_tgt + ql_dbg_verbose, |
1281 | vha, 0xe027, |
1282 | fmt: "%s: failed alloc dsd_dma...\n" , |
1283 | __func__); |
1284 | return 1; |
1285 | } |
1286 | |
1287 | /* allocate new list */ |
1288 | dsd_ptr->dsd_addr = |
1289 | dma_pool_alloc(pool: ha->dl_dma_pool, GFP_ATOMIC, |
1290 | handle: &dsd_ptr->dsd_list_dma); |
1291 | if (!dsd_ptr->dsd_addr) { |
1292 | /* need to cleanup only this dsd_ptr */ |
1293 | /* rest will be done by sp_free_dma() */ |
1294 | kfree(objp: dsd_ptr); |
1295 | return 1; |
1296 | } |
1297 | |
1298 | if (sp) { |
1299 | list_add_tail(new: &dsd_ptr->list, |
1300 | head: &difctx->dsd_list); |
1301 | sp->flags |= SRB_CRC_CTX_DSD_VALID; |
1302 | } else { |
1303 | list_add_tail(new: &dsd_ptr->list, |
1304 | head: &difctx->dsd_list); |
1305 | tc->ctx_dsd_alloced = 1; |
1306 | } |
1307 | |
1308 | /* add new list to cmd iocb or last list */ |
1309 | put_unaligned_le64(val: dsd_ptr->dsd_list_dma, |
1310 | p: &cur_dsd->address); |
1311 | cur_dsd->length = cpu_to_le32(dsd_list_len); |
1312 | cur_dsd = dsd_ptr->dsd_addr; |
1313 | } |
1314 | append_dsd64(dsd: &cur_dsd, sg); |
1315 | avail_dsds--; |
1316 | } |
1317 | } |
1318 | /* Null termination */ |
1319 | cur_dsd->address = 0; |
1320 | cur_dsd->length = 0; |
1321 | cur_dsd++; |
1322 | return 0; |
1323 | } |
1324 | |
1325 | /** |
1326 | * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command |
1327 | * Type 6 IOCB types. |
1328 | * |
1329 | * @sp: SRB command to process |
1330 | * @cmd_pkt: Command type 3 IOCB |
1331 | * @tot_dsds: Total number of segments to transfer |
1332 | * @tot_prot_dsds: Total number of segments with protection information |
1333 | * @fw_prot_opts: Protection options to be passed to firmware |
1334 | */ |
1335 | static inline int |
1336 | qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, |
1337 | uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) |
1338 | { |
1339 | struct dsd64 *cur_dsd; |
1340 | __be32 *fcp_dl; |
1341 | scsi_qla_host_t *vha; |
1342 | struct scsi_cmnd *cmd; |
1343 | uint32_t total_bytes = 0; |
1344 | uint32_t data_bytes; |
1345 | uint32_t dif_bytes; |
1346 | uint8_t bundling = 1; |
1347 | uint16_t blk_size; |
1348 | struct crc_context *crc_ctx_pkt = NULL; |
1349 | struct qla_hw_data *ha; |
1350 | uint8_t additional_fcpcdb_len; |
1351 | uint16_t fcp_cmnd_len; |
1352 | struct fcp_cmnd *fcp_cmnd; |
1353 | dma_addr_t crc_ctx_dma; |
1354 | |
1355 | cmd = GET_CMD_SP(sp); |
1356 | |
1357 | /* Update entry type to indicate Command Type CRC_2 IOCB */ |
1358 | put_unaligned_le32(COMMAND_TYPE_CRC_2, p: &cmd_pkt->entry_type); |
1359 | |
1360 | vha = sp->vha; |
1361 | ha = vha->hw; |
1362 | |
1363 | /* No data transfer */ |
1364 | data_bytes = scsi_bufflen(cmd); |
1365 | if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { |
1366 | cmd_pkt->byte_count = cpu_to_le32(0); |
1367 | return QLA_SUCCESS; |
1368 | } |
1369 | |
1370 | cmd_pkt->vp_index = sp->vha->vp_idx; |
1371 | |
1372 | /* Set transfer direction */ |
1373 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { |
1374 | cmd_pkt->control_flags = |
1375 | cpu_to_le16(CF_WRITE_DATA); |
1376 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { |
1377 | cmd_pkt->control_flags = |
1378 | cpu_to_le16(CF_READ_DATA); |
1379 | } |
1380 | |
1381 | if ((scsi_get_prot_op(scmd: cmd) == SCSI_PROT_READ_INSERT) || |
1382 | (scsi_get_prot_op(scmd: cmd) == SCSI_PROT_WRITE_STRIP) || |
1383 | (scsi_get_prot_op(scmd: cmd) == SCSI_PROT_READ_STRIP) || |
1384 | (scsi_get_prot_op(scmd: cmd) == SCSI_PROT_WRITE_INSERT)) |
1385 | bundling = 0; |
1386 | |
1387 | /* Allocate CRC context from global pool */ |
1388 | crc_ctx_pkt = sp->u.scmd.crc_ctx = |
1389 | dma_pool_zalloc(pool: ha->dl_dma_pool, GFP_ATOMIC, handle: &crc_ctx_dma); |
1390 | |
1391 | if (!crc_ctx_pkt) |
1392 | goto crc_queuing_error; |
1393 | |
1394 | crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; |
1395 | |
1396 | sp->flags |= SRB_CRC_CTX_DMA_VALID; |
1397 | |
1398 | /* Set handle */ |
1399 | crc_ctx_pkt->handle = cmd_pkt->handle; |
1400 | |
1401 | INIT_LIST_HEAD(list: &crc_ctx_pkt->dsd_list); |
1402 | |
1403 | qla24xx_set_t10dif_tags(sp, pkt: (struct fw_dif_context *) |
1404 | &crc_ctx_pkt->ref_tag, protcnt: tot_prot_dsds); |
1405 | |
1406 | put_unaligned_le64(val: crc_ctx_dma, p: &cmd_pkt->crc_context_address); |
1407 | cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); |
1408 | |
1409 | /* Determine SCSI command length -- align to 4 byte boundary */ |
1410 | if (cmd->cmd_len > 16) { |
1411 | additional_fcpcdb_len = cmd->cmd_len - 16; |
1412 | if ((cmd->cmd_len % 4) != 0) { |
1413 | /* SCSI cmd > 16 bytes must be multiple of 4 */ |
1414 | goto crc_queuing_error; |
1415 | } |
1416 | fcp_cmnd_len = 12 + cmd->cmd_len + 4; |
1417 | } else { |
1418 | additional_fcpcdb_len = 0; |
1419 | fcp_cmnd_len = 12 + 16 + 4; |
1420 | } |
1421 | |
1422 | fcp_cmnd = &crc_ctx_pkt->fcp_cmnd; |
1423 | |
1424 | fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; |
1425 | if (cmd->sc_data_direction == DMA_TO_DEVICE) |
1426 | fcp_cmnd->additional_cdb_len |= 1; |
1427 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) |
1428 | fcp_cmnd->additional_cdb_len |= 2; |
1429 | |
1430 | int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun); |
1431 | memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); |
1432 | cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); |
1433 | put_unaligned_le64(val: crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF, |
1434 | p: &cmd_pkt->fcp_cmnd_dseg_address); |
1435 | fcp_cmnd->task_management = 0; |
1436 | fcp_cmnd->task_attribute = TSK_SIMPLE; |
1437 | |
1438 | cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ |
1439 | |
1440 | /* Compute dif len and adjust data len to incude protection */ |
1441 | dif_bytes = 0; |
1442 | blk_size = cmd->device->sector_size; |
1443 | dif_bytes = (data_bytes / blk_size) * 8; |
1444 | |
1445 | switch (scsi_get_prot_op(GET_CMD_SP(sp))) { |
1446 | case SCSI_PROT_READ_INSERT: |
1447 | case SCSI_PROT_WRITE_STRIP: |
1448 | total_bytes = data_bytes; |
1449 | data_bytes += dif_bytes; |
1450 | break; |
1451 | |
1452 | case SCSI_PROT_READ_STRIP: |
1453 | case SCSI_PROT_WRITE_INSERT: |
1454 | case SCSI_PROT_READ_PASS: |
1455 | case SCSI_PROT_WRITE_PASS: |
1456 | total_bytes = data_bytes + dif_bytes; |
1457 | break; |
1458 | default: |
1459 | BUG(); |
1460 | } |
1461 | |
1462 | if (!qla2x00_hba_err_chk_enabled(sp)) |
1463 | fw_prot_opts |= 0x10; /* Disable Guard tag checking */ |
1464 | /* HBA error checking enabled */ |
1465 | else if (IS_PI_UNINIT_CAPABLE(ha)) { |
1466 | if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1) |
1467 | || (scsi_get_prot_type(GET_CMD_SP(sp)) == |
1468 | SCSI_PROT_DIF_TYPE2)) |
1469 | fw_prot_opts |= BIT_10; |
1470 | else if (scsi_get_prot_type(GET_CMD_SP(sp)) == |
1471 | SCSI_PROT_DIF_TYPE3) |
1472 | fw_prot_opts |= BIT_11; |
1473 | } |
1474 | |
1475 | if (!bundling) { |
1476 | cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; |
1477 | } else { |
1478 | /* |
1479 | * Configure Bundling if we need to fetch interlaving |
1480 | * protection PCI accesses |
1481 | */ |
1482 | fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; |
1483 | crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); |
1484 | crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - |
1485 | tot_prot_dsds); |
1486 | cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; |
1487 | } |
1488 | |
1489 | /* Finish the common fields of CRC pkt */ |
1490 | crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); |
1491 | crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); |
1492 | crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); |
1493 | crc_ctx_pkt->guard_seed = cpu_to_le16(0); |
1494 | /* Fibre channel byte count */ |
1495 | cmd_pkt->byte_count = cpu_to_le32(total_bytes); |
1496 | fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + |
1497 | additional_fcpcdb_len); |
1498 | *fcp_dl = htonl(total_bytes); |
1499 | |
1500 | if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { |
1501 | cmd_pkt->byte_count = cpu_to_le32(0); |
1502 | return QLA_SUCCESS; |
1503 | } |
1504 | /* Walks data segments */ |
1505 | |
1506 | cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); |
1507 | |
1508 | if (!bundling && tot_prot_dsds) { |
1509 | if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, |
1510 | dsd: cur_dsd, tot_dsds, NULL)) |
1511 | goto crc_queuing_error; |
1512 | } else if (qla24xx_walk_and_build_sglist(ha, sp, dsd: cur_dsd, |
1513 | tot_dsds: (tot_dsds - tot_prot_dsds), NULL)) |
1514 | goto crc_queuing_error; |
1515 | |
1516 | if (bundling && tot_prot_dsds) { |
1517 | /* Walks dif segments */ |
1518 | cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); |
1519 | cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; |
1520 | if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, |
1521 | tot_dsds: tot_prot_dsds, NULL)) |
1522 | goto crc_queuing_error; |
1523 | } |
1524 | return QLA_SUCCESS; |
1525 | |
1526 | crc_queuing_error: |
1527 | /* Cleanup will be performed by the caller */ |
1528 | |
1529 | return QLA_FUNCTION_FAILED; |
1530 | } |
1531 | |
1532 | /** |
1533 | * qla24xx_start_scsi() - Send a SCSI command to the ISP |
1534 | * @sp: command to send to the ISP |
1535 | * |
1536 | * Returns non-zero if a failure occurred, else zero. |
1537 | */ |
1538 | int |
1539 | qla24xx_start_scsi(srb_t *sp) |
1540 | { |
1541 | int nseg; |
1542 | unsigned long flags; |
1543 | uint32_t *clr_ptr; |
1544 | uint32_t handle; |
1545 | struct cmd_type_7 *cmd_pkt; |
1546 | uint16_t cnt; |
1547 | uint16_t req_cnt; |
1548 | uint16_t tot_dsds; |
1549 | struct req_que *req = NULL; |
1550 | struct rsp_que *rsp; |
1551 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
1552 | struct scsi_qla_host *vha = sp->vha; |
1553 | struct qla_hw_data *ha = vha->hw; |
1554 | |
1555 | if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE)) |
1556 | return qla28xx_start_scsi_edif(sp); |
1557 | |
1558 | /* Setup device pointers. */ |
1559 | req = vha->req; |
1560 | rsp = req->rsp; |
1561 | |
1562 | /* So we know we haven't pci_map'ed anything yet */ |
1563 | tot_dsds = 0; |
1564 | |
1565 | /* Send marker if required */ |
1566 | if (vha->marker_needed != 0) { |
1567 | if (qla2x00_marker(vha, qpair: ha->base_qpair, loop_id: 0, lun: 0, MK_SYNC_ALL) != |
1568 | QLA_SUCCESS) |
1569 | return QLA_FUNCTION_FAILED; |
1570 | vha->marker_needed = 0; |
1571 | } |
1572 | |
1573 | /* Acquire ring specific lock */ |
1574 | spin_lock_irqsave(&ha->hardware_lock, flags); |
1575 | |
1576 | handle = qla2xxx_get_next_handle(req); |
1577 | if (handle == 0) |
1578 | goto queuing_error; |
1579 | |
1580 | /* Map the sg table so we have an accurate count of sg entries needed */ |
1581 | if (scsi_sg_count(cmd)) { |
1582 | nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), |
1583 | scsi_sg_count(cmd), cmd->sc_data_direction); |
1584 | if (unlikely(!nseg)) |
1585 | goto queuing_error; |
1586 | } else |
1587 | nseg = 0; |
1588 | |
1589 | tot_dsds = nseg; |
1590 | req_cnt = qla24xx_calc_iocbs(vha, dsds: tot_dsds); |
1591 | |
1592 | sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; |
1593 | sp->iores.exch_cnt = 1; |
1594 | sp->iores.iocb_cnt = req_cnt; |
1595 | if (qla_get_fw_resources(qp: sp->qpair, iores: &sp->iores)) |
1596 | goto queuing_error; |
1597 | |
1598 | if (req->cnt < (req_cnt + 2)) { |
1599 | if (IS_SHADOW_REG_CAPABLE(ha)) { |
1600 | cnt = *req->out_ptr; |
1601 | } else { |
1602 | cnt = rd_reg_dword_relaxed(addr: req->req_q_out); |
1603 | if (qla2x00_check_reg16_for_disconnect(vha, cnt)) |
1604 | goto queuing_error; |
1605 | } |
1606 | |
1607 | if (req->ring_index < cnt) |
1608 | req->cnt = cnt - req->ring_index; |
1609 | else |
1610 | req->cnt = req->length - |
1611 | (req->ring_index - cnt); |
1612 | if (req->cnt < (req_cnt + 2)) |
1613 | goto queuing_error; |
1614 | } |
1615 | |
1616 | /* Build command packet. */ |
1617 | req->current_outstanding_cmd = handle; |
1618 | req->outstanding_cmds[handle] = sp; |
1619 | sp->handle = handle; |
1620 | cmd->host_scribble = (unsigned char *)(unsigned long)handle; |
1621 | req->cnt -= req_cnt; |
1622 | |
1623 | cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; |
1624 | cmd_pkt->handle = make_handle(x: req->id, y: handle); |
1625 | |
1626 | /* Zero out remaining portion of packet. */ |
1627 | /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ |
1628 | clr_ptr = (uint32_t *)cmd_pkt + 2; |
1629 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); |
1630 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); |
1631 | |
1632 | /* Set NPORT-ID and LUN number*/ |
1633 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
1634 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; |
1635 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; |
1636 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; |
1637 | cmd_pkt->vp_index = sp->vha->vp_idx; |
1638 | |
1639 | int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); |
1640 | host_to_fcp_swap(fcp: (uint8_t *)&cmd_pkt->lun, bsize: sizeof(cmd_pkt->lun)); |
1641 | |
1642 | cmd_pkt->task = TSK_SIMPLE; |
1643 | |
1644 | /* Load SCSI command packet. */ |
1645 | memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); |
1646 | host_to_fcp_swap(fcp: cmd_pkt->fcp_cdb, bsize: sizeof(cmd_pkt->fcp_cdb)); |
1647 | |
1648 | cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); |
1649 | |
1650 | /* Build IOCB segments */ |
1651 | qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); |
1652 | |
1653 | /* Set total data segment count. */ |
1654 | cmd_pkt->entry_count = (uint8_t)req_cnt; |
1655 | wmb(); |
1656 | /* Adjust ring index. */ |
1657 | req->ring_index++; |
1658 | if (req->ring_index == req->length) { |
1659 | req->ring_index = 0; |
1660 | req->ring_ptr = req->ring; |
1661 | } else |
1662 | req->ring_ptr++; |
1663 | |
1664 | sp->qpair->cmd_cnt++; |
1665 | sp->flags |= SRB_DMA_VALID; |
1666 | |
1667 | /* Set chip new ring index. */ |
1668 | wrt_reg_dword(addr: req->req_q_in, data: req->ring_index); |
1669 | |
1670 | /* Manage unprocessed RIO/ZIO commands in response queue. */ |
1671 | if (vha->flags.process_response_queue && |
1672 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
1673 | qla24xx_process_response_queue(vha, rsp); |
1674 | |
1675 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
1676 | return QLA_SUCCESS; |
1677 | |
1678 | queuing_error: |
1679 | if (tot_dsds) |
1680 | scsi_dma_unmap(cmd); |
1681 | |
1682 | qla_put_fw_resources(qp: sp->qpair, iores: &sp->iores); |
1683 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
1684 | |
1685 | return QLA_FUNCTION_FAILED; |
1686 | } |
1687 | |
1688 | /** |
1689 | * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP |
1690 | * @sp: command to send to the ISP |
1691 | * |
1692 | * Returns non-zero if a failure occurred, else zero. |
1693 | */ |
1694 | int |
1695 | qla24xx_dif_start_scsi(srb_t *sp) |
1696 | { |
1697 | int nseg; |
1698 | unsigned long flags; |
1699 | uint32_t *clr_ptr; |
1700 | uint32_t handle; |
1701 | uint16_t cnt; |
1702 | uint16_t req_cnt = 0; |
1703 | uint16_t tot_dsds; |
1704 | uint16_t tot_prot_dsds; |
1705 | uint16_t fw_prot_opts = 0; |
1706 | struct req_que *req = NULL; |
1707 | struct rsp_que *rsp = NULL; |
1708 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
1709 | struct scsi_qla_host *vha = sp->vha; |
1710 | struct qla_hw_data *ha = vha->hw; |
1711 | struct cmd_type_crc_2 *cmd_pkt; |
1712 | uint32_t status = 0; |
1713 | |
1714 | #define QDSS_GOT_Q_SPACE BIT_0 |
1715 | |
1716 | /* Only process protection or >16 cdb in this routine */ |
1717 | if (scsi_get_prot_op(scmd: cmd) == SCSI_PROT_NORMAL) { |
1718 | if (cmd->cmd_len <= 16) |
1719 | return qla24xx_start_scsi(sp); |
1720 | else |
1721 | return qla_start_scsi_type6(sp); |
1722 | } |
1723 | |
1724 | /* Setup device pointers. */ |
1725 | req = vha->req; |
1726 | rsp = req->rsp; |
1727 | |
1728 | /* So we know we haven't pci_map'ed anything yet */ |
1729 | tot_dsds = 0; |
1730 | |
1731 | /* Send marker if required */ |
1732 | if (vha->marker_needed != 0) { |
1733 | if (qla2x00_marker(vha, qpair: ha->base_qpair, loop_id: 0, lun: 0, MK_SYNC_ALL) != |
1734 | QLA_SUCCESS) |
1735 | return QLA_FUNCTION_FAILED; |
1736 | vha->marker_needed = 0; |
1737 | } |
1738 | |
1739 | /* Acquire ring specific lock */ |
1740 | spin_lock_irqsave(&ha->hardware_lock, flags); |
1741 | |
1742 | handle = qla2xxx_get_next_handle(req); |
1743 | if (handle == 0) |
1744 | goto queuing_error; |
1745 | |
1746 | /* Compute number of required data segments */ |
1747 | /* Map the sg table so we have an accurate count of sg entries needed */ |
1748 | if (scsi_sg_count(cmd)) { |
1749 | nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), |
1750 | scsi_sg_count(cmd), cmd->sc_data_direction); |
1751 | if (unlikely(!nseg)) |
1752 | goto queuing_error; |
1753 | else |
1754 | sp->flags |= SRB_DMA_VALID; |
1755 | |
1756 | if ((scsi_get_prot_op(scmd: cmd) == SCSI_PROT_READ_INSERT) || |
1757 | (scsi_get_prot_op(scmd: cmd) == SCSI_PROT_WRITE_STRIP)) { |
1758 | struct qla2_sgx sgx; |
1759 | uint32_t partial; |
1760 | |
1761 | memset(&sgx, 0, sizeof(struct qla2_sgx)); |
1762 | sgx.tot_bytes = scsi_bufflen(cmd); |
1763 | sgx.cur_sg = scsi_sglist(cmd); |
1764 | sgx.sp = sp; |
1765 | |
1766 | nseg = 0; |
1767 | while (qla24xx_get_one_block_sg( |
1768 | blk_sz: cmd->device->sector_size, sgx: &sgx, partial: &partial)) |
1769 | nseg++; |
1770 | } |
1771 | } else |
1772 | nseg = 0; |
1773 | |
1774 | /* number of required data segments */ |
1775 | tot_dsds = nseg; |
1776 | |
1777 | /* Compute number of required protection segments */ |
1778 | if (qla24xx_configure_prot_mode(sp, fw_prot_opts: &fw_prot_opts)) { |
1779 | nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), |
1780 | scsi_prot_sg_count(cmd), cmd->sc_data_direction); |
1781 | if (unlikely(!nseg)) |
1782 | goto queuing_error; |
1783 | else |
1784 | sp->flags |= SRB_CRC_PROT_DMA_VALID; |
1785 | |
1786 | if ((scsi_get_prot_op(scmd: cmd) == SCSI_PROT_READ_INSERT) || |
1787 | (scsi_get_prot_op(scmd: cmd) == SCSI_PROT_WRITE_STRIP)) { |
1788 | nseg = scsi_bufflen(cmd) / cmd->device->sector_size; |
1789 | } |
1790 | } else { |
1791 | nseg = 0; |
1792 | } |
1793 | |
1794 | req_cnt = 1; |
1795 | /* Total Data and protection sg segment(s) */ |
1796 | tot_prot_dsds = nseg; |
1797 | tot_dsds += nseg; |
1798 | |
1799 | sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; |
1800 | sp->iores.exch_cnt = 1; |
1801 | sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, dsds: tot_dsds); |
1802 | if (qla_get_fw_resources(qp: sp->qpair, iores: &sp->iores)) |
1803 | goto queuing_error; |
1804 | |
1805 | if (req->cnt < (req_cnt + 2)) { |
1806 | if (IS_SHADOW_REG_CAPABLE(ha)) { |
1807 | cnt = *req->out_ptr; |
1808 | } else { |
1809 | cnt = rd_reg_dword_relaxed(addr: req->req_q_out); |
1810 | if (qla2x00_check_reg16_for_disconnect(vha, cnt)) |
1811 | goto queuing_error; |
1812 | } |
1813 | if (req->ring_index < cnt) |
1814 | req->cnt = cnt - req->ring_index; |
1815 | else |
1816 | req->cnt = req->length - |
1817 | (req->ring_index - cnt); |
1818 | if (req->cnt < (req_cnt + 2)) |
1819 | goto queuing_error; |
1820 | } |
1821 | |
1822 | status |= QDSS_GOT_Q_SPACE; |
1823 | |
1824 | /* Build header part of command packet (excluding the OPCODE). */ |
1825 | req->current_outstanding_cmd = handle; |
1826 | req->outstanding_cmds[handle] = sp; |
1827 | sp->handle = handle; |
1828 | cmd->host_scribble = (unsigned char *)(unsigned long)handle; |
1829 | req->cnt -= req_cnt; |
1830 | |
1831 | /* Fill-in common area */ |
1832 | cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; |
1833 | cmd_pkt->handle = make_handle(x: req->id, y: handle); |
1834 | |
1835 | clr_ptr = (uint32_t *)cmd_pkt + 2; |
1836 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); |
1837 | |
1838 | /* Set NPORT-ID and LUN number*/ |
1839 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
1840 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; |
1841 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; |
1842 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; |
1843 | |
1844 | int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); |
1845 | host_to_fcp_swap(fcp: (uint8_t *)&cmd_pkt->lun, bsize: sizeof(cmd_pkt->lun)); |
1846 | |
1847 | /* Total Data and protection segment(s) */ |
1848 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); |
1849 | |
1850 | /* Build IOCB segments and adjust for data protection segments */ |
1851 | if (qla24xx_build_scsi_crc_2_iocbs(sp, cmd_pkt: (struct cmd_type_crc_2 *) |
1852 | req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != |
1853 | QLA_SUCCESS) |
1854 | goto queuing_error; |
1855 | |
1856 | cmd_pkt->entry_count = (uint8_t)req_cnt; |
1857 | /* Specify response queue number where completion should happen */ |
1858 | cmd_pkt->entry_status = (uint8_t) rsp->id; |
1859 | cmd_pkt->timeout = cpu_to_le16(0); |
1860 | wmb(); |
1861 | |
1862 | /* Adjust ring index. */ |
1863 | req->ring_index++; |
1864 | if (req->ring_index == req->length) { |
1865 | req->ring_index = 0; |
1866 | req->ring_ptr = req->ring; |
1867 | } else |
1868 | req->ring_ptr++; |
1869 | |
1870 | sp->qpair->cmd_cnt++; |
1871 | /* Set chip new ring index. */ |
1872 | wrt_reg_dword(addr: req->req_q_in, data: req->ring_index); |
1873 | |
1874 | /* Manage unprocessed RIO/ZIO commands in response queue. */ |
1875 | if (vha->flags.process_response_queue && |
1876 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
1877 | qla24xx_process_response_queue(vha, rsp); |
1878 | |
1879 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
1880 | |
1881 | return QLA_SUCCESS; |
1882 | |
1883 | queuing_error: |
1884 | if (status & QDSS_GOT_Q_SPACE) { |
1885 | req->outstanding_cmds[handle] = NULL; |
1886 | req->cnt += req_cnt; |
1887 | } |
1888 | /* Cleanup will be performed by the caller (queuecommand) */ |
1889 | |
1890 | qla_put_fw_resources(qp: sp->qpair, iores: &sp->iores); |
1891 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
1892 | |
1893 | return QLA_FUNCTION_FAILED; |
1894 | } |
1895 | |
1896 | /** |
1897 | * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP |
1898 | * @sp: command to send to the ISP |
1899 | * |
1900 | * Returns non-zero if a failure occurred, else zero. |
1901 | */ |
1902 | static int |
1903 | qla2xxx_start_scsi_mq(srb_t *sp) |
1904 | { |
1905 | int nseg; |
1906 | unsigned long flags; |
1907 | uint32_t *clr_ptr; |
1908 | uint32_t handle; |
1909 | struct cmd_type_7 *cmd_pkt; |
1910 | uint16_t cnt; |
1911 | uint16_t req_cnt; |
1912 | uint16_t tot_dsds; |
1913 | struct req_que *req = NULL; |
1914 | struct rsp_que *rsp; |
1915 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
1916 | struct scsi_qla_host *vha = sp->fcport->vha; |
1917 | struct qla_hw_data *ha = vha->hw; |
1918 | struct qla_qpair *qpair = sp->qpair; |
1919 | |
1920 | if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE)) |
1921 | return qla28xx_start_scsi_edif(sp); |
1922 | |
1923 | /* Acquire qpair specific lock */ |
1924 | spin_lock_irqsave(&qpair->qp_lock, flags); |
1925 | |
1926 | /* Setup qpair pointers */ |
1927 | req = qpair->req; |
1928 | rsp = qpair->rsp; |
1929 | |
1930 | /* So we know we haven't pci_map'ed anything yet */ |
1931 | tot_dsds = 0; |
1932 | |
1933 | /* Send marker if required */ |
1934 | if (vha->marker_needed != 0) { |
1935 | if (__qla2x00_marker(vha, qpair, loop_id: 0, lun: 0, MK_SYNC_ALL) != |
1936 | QLA_SUCCESS) { |
1937 | spin_unlock_irqrestore(lock: &qpair->qp_lock, flags); |
1938 | return QLA_FUNCTION_FAILED; |
1939 | } |
1940 | vha->marker_needed = 0; |
1941 | } |
1942 | |
1943 | handle = qla2xxx_get_next_handle(req); |
1944 | if (handle == 0) |
1945 | goto queuing_error; |
1946 | |
1947 | /* Map the sg table so we have an accurate count of sg entries needed */ |
1948 | if (scsi_sg_count(cmd)) { |
1949 | nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), |
1950 | scsi_sg_count(cmd), cmd->sc_data_direction); |
1951 | if (unlikely(!nseg)) |
1952 | goto queuing_error; |
1953 | } else |
1954 | nseg = 0; |
1955 | |
1956 | tot_dsds = nseg; |
1957 | req_cnt = qla24xx_calc_iocbs(vha, dsds: tot_dsds); |
1958 | |
1959 | sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; |
1960 | sp->iores.exch_cnt = 1; |
1961 | sp->iores.iocb_cnt = req_cnt; |
1962 | if (qla_get_fw_resources(qp: sp->qpair, iores: &sp->iores)) |
1963 | goto queuing_error; |
1964 | |
1965 | if (req->cnt < (req_cnt + 2)) { |
1966 | if (IS_SHADOW_REG_CAPABLE(ha)) { |
1967 | cnt = *req->out_ptr; |
1968 | } else { |
1969 | cnt = rd_reg_dword_relaxed(addr: req->req_q_out); |
1970 | if (qla2x00_check_reg16_for_disconnect(vha, cnt)) |
1971 | goto queuing_error; |
1972 | } |
1973 | |
1974 | if (req->ring_index < cnt) |
1975 | req->cnt = cnt - req->ring_index; |
1976 | else |
1977 | req->cnt = req->length - |
1978 | (req->ring_index - cnt); |
1979 | if (req->cnt < (req_cnt + 2)) |
1980 | goto queuing_error; |
1981 | } |
1982 | |
1983 | /* Build command packet. */ |
1984 | req->current_outstanding_cmd = handle; |
1985 | req->outstanding_cmds[handle] = sp; |
1986 | sp->handle = handle; |
1987 | cmd->host_scribble = (unsigned char *)(unsigned long)handle; |
1988 | req->cnt -= req_cnt; |
1989 | |
1990 | cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; |
1991 | cmd_pkt->handle = make_handle(x: req->id, y: handle); |
1992 | |
1993 | /* Zero out remaining portion of packet. */ |
1994 | /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ |
1995 | clr_ptr = (uint32_t *)cmd_pkt + 2; |
1996 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); |
1997 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); |
1998 | |
1999 | /* Set NPORT-ID and LUN number*/ |
2000 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
2001 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; |
2002 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; |
2003 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; |
2004 | cmd_pkt->vp_index = sp->fcport->vha->vp_idx; |
2005 | |
2006 | int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); |
2007 | host_to_fcp_swap(fcp: (uint8_t *)&cmd_pkt->lun, bsize: sizeof(cmd_pkt->lun)); |
2008 | |
2009 | cmd_pkt->task = TSK_SIMPLE; |
2010 | |
2011 | /* Load SCSI command packet. */ |
2012 | memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); |
2013 | host_to_fcp_swap(fcp: cmd_pkt->fcp_cdb, bsize: sizeof(cmd_pkt->fcp_cdb)); |
2014 | |
2015 | cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); |
2016 | |
2017 | /* Build IOCB segments */ |
2018 | qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); |
2019 | |
2020 | /* Set total data segment count. */ |
2021 | cmd_pkt->entry_count = (uint8_t)req_cnt; |
2022 | wmb(); |
2023 | /* Adjust ring index. */ |
2024 | req->ring_index++; |
2025 | if (req->ring_index == req->length) { |
2026 | req->ring_index = 0; |
2027 | req->ring_ptr = req->ring; |
2028 | } else |
2029 | req->ring_ptr++; |
2030 | |
2031 | sp->qpair->cmd_cnt++; |
2032 | sp->flags |= SRB_DMA_VALID; |
2033 | |
2034 | /* Set chip new ring index. */ |
2035 | wrt_reg_dword(addr: req->req_q_in, data: req->ring_index); |
2036 | |
2037 | /* Manage unprocessed RIO/ZIO commands in response queue. */ |
2038 | if (vha->flags.process_response_queue && |
2039 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
2040 | qla24xx_process_response_queue(vha, rsp); |
2041 | |
2042 | spin_unlock_irqrestore(lock: &qpair->qp_lock, flags); |
2043 | return QLA_SUCCESS; |
2044 | |
2045 | queuing_error: |
2046 | if (tot_dsds) |
2047 | scsi_dma_unmap(cmd); |
2048 | |
2049 | qla_put_fw_resources(qp: sp->qpair, iores: &sp->iores); |
2050 | spin_unlock_irqrestore(lock: &qpair->qp_lock, flags); |
2051 | |
2052 | return QLA_FUNCTION_FAILED; |
2053 | } |
2054 | |
2055 | |
2056 | /** |
2057 | * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP |
2058 | * @sp: command to send to the ISP |
2059 | * |
2060 | * Returns non-zero if a failure occurred, else zero. |
2061 | */ |
2062 | int |
2063 | qla2xxx_dif_start_scsi_mq(srb_t *sp) |
2064 | { |
2065 | int nseg; |
2066 | unsigned long flags; |
2067 | uint32_t *clr_ptr; |
2068 | uint32_t handle; |
2069 | uint16_t cnt; |
2070 | uint16_t req_cnt = 0; |
2071 | uint16_t tot_dsds; |
2072 | uint16_t tot_prot_dsds; |
2073 | uint16_t fw_prot_opts = 0; |
2074 | struct req_que *req = NULL; |
2075 | struct rsp_que *rsp = NULL; |
2076 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
2077 | struct scsi_qla_host *vha = sp->fcport->vha; |
2078 | struct qla_hw_data *ha = vha->hw; |
2079 | struct cmd_type_crc_2 *cmd_pkt; |
2080 | uint32_t status = 0; |
2081 | struct qla_qpair *qpair = sp->qpair; |
2082 | |
2083 | #define QDSS_GOT_Q_SPACE BIT_0 |
2084 | |
2085 | /* Check for host side state */ |
2086 | if (!qpair->online) { |
2087 | cmd->result = DID_NO_CONNECT << 16; |
2088 | return QLA_INTERFACE_ERROR; |
2089 | } |
2090 | |
2091 | if (!qpair->difdix_supported && |
2092 | scsi_get_prot_op(scmd: cmd) != SCSI_PROT_NORMAL) { |
2093 | cmd->result = DID_NO_CONNECT << 16; |
2094 | return QLA_INTERFACE_ERROR; |
2095 | } |
2096 | |
2097 | /* Only process protection or >16 cdb in this routine */ |
2098 | if (scsi_get_prot_op(scmd: cmd) == SCSI_PROT_NORMAL) { |
2099 | if (cmd->cmd_len <= 16) |
2100 | return qla2xxx_start_scsi_mq(sp); |
2101 | else |
2102 | return qla_start_scsi_type6(sp); |
2103 | } |
2104 | |
2105 | spin_lock_irqsave(&qpair->qp_lock, flags); |
2106 | |
2107 | /* Setup qpair pointers */ |
2108 | rsp = qpair->rsp; |
2109 | req = qpair->req; |
2110 | |
2111 | /* So we know we haven't pci_map'ed anything yet */ |
2112 | tot_dsds = 0; |
2113 | |
2114 | /* Send marker if required */ |
2115 | if (vha->marker_needed != 0) { |
2116 | if (__qla2x00_marker(vha, qpair, loop_id: 0, lun: 0, MK_SYNC_ALL) != |
2117 | QLA_SUCCESS) { |
2118 | spin_unlock_irqrestore(lock: &qpair->qp_lock, flags); |
2119 | return QLA_FUNCTION_FAILED; |
2120 | } |
2121 | vha->marker_needed = 0; |
2122 | } |
2123 | |
2124 | handle = qla2xxx_get_next_handle(req); |
2125 | if (handle == 0) |
2126 | goto queuing_error; |
2127 | |
2128 | /* Compute number of required data segments */ |
2129 | /* Map the sg table so we have an accurate count of sg entries needed */ |
2130 | if (scsi_sg_count(cmd)) { |
2131 | nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), |
2132 | scsi_sg_count(cmd), cmd->sc_data_direction); |
2133 | if (unlikely(!nseg)) |
2134 | goto queuing_error; |
2135 | else |
2136 | sp->flags |= SRB_DMA_VALID; |
2137 | |
2138 | if ((scsi_get_prot_op(scmd: cmd) == SCSI_PROT_READ_INSERT) || |
2139 | (scsi_get_prot_op(scmd: cmd) == SCSI_PROT_WRITE_STRIP)) { |
2140 | struct qla2_sgx sgx; |
2141 | uint32_t partial; |
2142 | |
2143 | memset(&sgx, 0, sizeof(struct qla2_sgx)); |
2144 | sgx.tot_bytes = scsi_bufflen(cmd); |
2145 | sgx.cur_sg = scsi_sglist(cmd); |
2146 | sgx.sp = sp; |
2147 | |
2148 | nseg = 0; |
2149 | while (qla24xx_get_one_block_sg( |
2150 | blk_sz: cmd->device->sector_size, sgx: &sgx, partial: &partial)) |
2151 | nseg++; |
2152 | } |
2153 | } else |
2154 | nseg = 0; |
2155 | |
2156 | /* number of required data segments */ |
2157 | tot_dsds = nseg; |
2158 | |
2159 | /* Compute number of required protection segments */ |
2160 | if (qla24xx_configure_prot_mode(sp, fw_prot_opts: &fw_prot_opts)) { |
2161 | nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), |
2162 | scsi_prot_sg_count(cmd), cmd->sc_data_direction); |
2163 | if (unlikely(!nseg)) |
2164 | goto queuing_error; |
2165 | else |
2166 | sp->flags |= SRB_CRC_PROT_DMA_VALID; |
2167 | |
2168 | if ((scsi_get_prot_op(scmd: cmd) == SCSI_PROT_READ_INSERT) || |
2169 | (scsi_get_prot_op(scmd: cmd) == SCSI_PROT_WRITE_STRIP)) { |
2170 | nseg = scsi_bufflen(cmd) / cmd->device->sector_size; |
2171 | } |
2172 | } else { |
2173 | nseg = 0; |
2174 | } |
2175 | |
2176 | req_cnt = 1; |
2177 | /* Total Data and protection sg segment(s) */ |
2178 | tot_prot_dsds = nseg; |
2179 | tot_dsds += nseg; |
2180 | |
2181 | sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; |
2182 | sp->iores.exch_cnt = 1; |
2183 | sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, dsds: tot_dsds); |
2184 | if (qla_get_fw_resources(qp: sp->qpair, iores: &sp->iores)) |
2185 | goto queuing_error; |
2186 | |
2187 | if (req->cnt < (req_cnt + 2)) { |
2188 | if (IS_SHADOW_REG_CAPABLE(ha)) { |
2189 | cnt = *req->out_ptr; |
2190 | } else { |
2191 | cnt = rd_reg_dword_relaxed(addr: req->req_q_out); |
2192 | if (qla2x00_check_reg16_for_disconnect(vha, cnt)) |
2193 | goto queuing_error; |
2194 | } |
2195 | |
2196 | if (req->ring_index < cnt) |
2197 | req->cnt = cnt - req->ring_index; |
2198 | else |
2199 | req->cnt = req->length - |
2200 | (req->ring_index - cnt); |
2201 | if (req->cnt < (req_cnt + 2)) |
2202 | goto queuing_error; |
2203 | } |
2204 | |
2205 | status |= QDSS_GOT_Q_SPACE; |
2206 | |
2207 | /* Build header part of command packet (excluding the OPCODE). */ |
2208 | req->current_outstanding_cmd = handle; |
2209 | req->outstanding_cmds[handle] = sp; |
2210 | sp->handle = handle; |
2211 | cmd->host_scribble = (unsigned char *)(unsigned long)handle; |
2212 | req->cnt -= req_cnt; |
2213 | |
2214 | /* Fill-in common area */ |
2215 | cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; |
2216 | cmd_pkt->handle = make_handle(x: req->id, y: handle); |
2217 | |
2218 | clr_ptr = (uint32_t *)cmd_pkt + 2; |
2219 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); |
2220 | |
2221 | /* Set NPORT-ID and LUN number*/ |
2222 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
2223 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; |
2224 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; |
2225 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; |
2226 | |
2227 | int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); |
2228 | host_to_fcp_swap(fcp: (uint8_t *)&cmd_pkt->lun, bsize: sizeof(cmd_pkt->lun)); |
2229 | |
2230 | /* Total Data and protection segment(s) */ |
2231 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); |
2232 | |
2233 | /* Build IOCB segments and adjust for data protection segments */ |
2234 | if (qla24xx_build_scsi_crc_2_iocbs(sp, cmd_pkt: (struct cmd_type_crc_2 *) |
2235 | req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != |
2236 | QLA_SUCCESS) |
2237 | goto queuing_error; |
2238 | |
2239 | cmd_pkt->entry_count = (uint8_t)req_cnt; |
2240 | cmd_pkt->timeout = cpu_to_le16(0); |
2241 | wmb(); |
2242 | |
2243 | /* Adjust ring index. */ |
2244 | req->ring_index++; |
2245 | if (req->ring_index == req->length) { |
2246 | req->ring_index = 0; |
2247 | req->ring_ptr = req->ring; |
2248 | } else |
2249 | req->ring_ptr++; |
2250 | |
2251 | sp->qpair->cmd_cnt++; |
2252 | /* Set chip new ring index. */ |
2253 | wrt_reg_dword(addr: req->req_q_in, data: req->ring_index); |
2254 | |
2255 | /* Manage unprocessed RIO/ZIO commands in response queue. */ |
2256 | if (vha->flags.process_response_queue && |
2257 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
2258 | qla24xx_process_response_queue(vha, rsp); |
2259 | |
2260 | spin_unlock_irqrestore(lock: &qpair->qp_lock, flags); |
2261 | |
2262 | return QLA_SUCCESS; |
2263 | |
2264 | queuing_error: |
2265 | if (status & QDSS_GOT_Q_SPACE) { |
2266 | req->outstanding_cmds[handle] = NULL; |
2267 | req->cnt += req_cnt; |
2268 | } |
2269 | /* Cleanup will be performed by the caller (queuecommand) */ |
2270 | |
2271 | qla_put_fw_resources(qp: sp->qpair, iores: &sp->iores); |
2272 | spin_unlock_irqrestore(lock: &qpair->qp_lock, flags); |
2273 | |
2274 | return QLA_FUNCTION_FAILED; |
2275 | } |
2276 | |
2277 | /* Generic Control-SRB manipulation functions. */ |
2278 | |
2279 | /* hardware_lock assumed to be held. */ |
2280 | |
2281 | void * |
2282 | __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) |
2283 | { |
2284 | scsi_qla_host_t *vha = qpair->vha; |
2285 | struct qla_hw_data *ha = vha->hw; |
2286 | struct req_que *req = qpair->req; |
2287 | device_reg_t *reg = ISP_QUE_REG(ha, req->id); |
2288 | uint32_t handle; |
2289 | request_t *pkt; |
2290 | uint16_t cnt, req_cnt; |
2291 | |
2292 | pkt = NULL; |
2293 | req_cnt = 1; |
2294 | handle = 0; |
2295 | |
2296 | if (sp && (sp->type != SRB_SCSI_CMD)) { |
2297 | /* Adjust entry-counts as needed. */ |
2298 | req_cnt = sp->iocbs; |
2299 | } |
2300 | |
2301 | /* Check for room on request queue. */ |
2302 | if (req->cnt < req_cnt + 2) { |
2303 | if (qpair->use_shadow_reg) |
2304 | cnt = *req->out_ptr; |
2305 | else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || |
2306 | IS_QLA28XX(ha)) |
2307 | cnt = rd_reg_dword(addr: ®->isp25mq.req_q_out); |
2308 | else if (IS_P3P_TYPE(ha)) |
2309 | cnt = rd_reg_dword(addr: reg->isp82.req_q_out); |
2310 | else if (IS_FWI2_CAPABLE(ha)) |
2311 | cnt = rd_reg_dword(addr: ®->isp24.req_q_out); |
2312 | else if (IS_QLAFX00(ha)) |
2313 | cnt = rd_reg_dword(addr: ®->ispfx00.req_q_out); |
2314 | else |
2315 | cnt = qla2x00_debounce_register( |
2316 | ISP_REQ_Q_OUT(ha, ®->isp)); |
2317 | |
2318 | if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) { |
2319 | qla_schedule_eeh_work(vha); |
2320 | return NULL; |
2321 | } |
2322 | |
2323 | if (req->ring_index < cnt) |
2324 | req->cnt = cnt - req->ring_index; |
2325 | else |
2326 | req->cnt = req->length - |
2327 | (req->ring_index - cnt); |
2328 | } |
2329 | if (req->cnt < req_cnt + 2) |
2330 | goto queuing_error; |
2331 | |
2332 | if (sp) { |
2333 | handle = qla2xxx_get_next_handle(req); |
2334 | if (handle == 0) { |
2335 | ql_log(ql_log_warn, vha, 0x700b, |
2336 | fmt: "No room on outstanding cmd array.\n" ); |
2337 | goto queuing_error; |
2338 | } |
2339 | |
2340 | /* Prep command array. */ |
2341 | req->current_outstanding_cmd = handle; |
2342 | req->outstanding_cmds[handle] = sp; |
2343 | sp->handle = handle; |
2344 | } |
2345 | |
2346 | /* Prep packet */ |
2347 | req->cnt -= req_cnt; |
2348 | pkt = req->ring_ptr; |
2349 | memset(pkt, 0, REQUEST_ENTRY_SIZE); |
2350 | if (IS_QLAFX00(ha)) { |
2351 | wrt_reg_byte(addr: (u8 __force __iomem *)&pkt->entry_count, data: req_cnt); |
2352 | wrt_reg_dword(addr: (__le32 __force __iomem *)&pkt->handle, data: handle); |
2353 | } else { |
2354 | pkt->entry_count = req_cnt; |
2355 | pkt->handle = handle; |
2356 | } |
2357 | |
2358 | return pkt; |
2359 | |
2360 | queuing_error: |
2361 | qpair->tgt_counters.num_alloc_iocb_failed++; |
2362 | return pkt; |
2363 | } |
2364 | |
2365 | void * |
2366 | qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp) |
2367 | { |
2368 | scsi_qla_host_t *vha = qpair->vha; |
2369 | |
2370 | if (qla2x00_reset_active(vha)) |
2371 | return NULL; |
2372 | |
2373 | return __qla2x00_alloc_iocbs(qpair, sp); |
2374 | } |
2375 | |
2376 | void * |
2377 | qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp) |
2378 | { |
2379 | return __qla2x00_alloc_iocbs(qpair: vha->hw->base_qpair, sp); |
2380 | } |
2381 | |
2382 | static void |
2383 | qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio) |
2384 | { |
2385 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
2386 | |
2387 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; |
2388 | logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); |
2389 | if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) { |
2390 | logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI); |
2391 | if (sp->vha->flags.nvme_first_burst) |
2392 | logio->io_parameter[0] = |
2393 | cpu_to_le32(NVME_PRLI_SP_FIRST_BURST); |
2394 | if (sp->vha->flags.nvme2_enabled) { |
2395 | /* Set service parameter BIT_7 for NVME CONF support */ |
2396 | logio->io_parameter[0] |= |
2397 | cpu_to_le32(NVME_PRLI_SP_CONF); |
2398 | /* Set service parameter BIT_8 for SLER support */ |
2399 | logio->io_parameter[0] |= |
2400 | cpu_to_le32(NVME_PRLI_SP_SLER); |
2401 | /* Set service parameter BIT_9 for PI control support */ |
2402 | logio->io_parameter[0] |= |
2403 | cpu_to_le32(NVME_PRLI_SP_PI_CTRL); |
2404 | } |
2405 | } |
2406 | |
2407 | logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
2408 | logio->port_id[0] = sp->fcport->d_id.b.al_pa; |
2409 | logio->port_id[1] = sp->fcport->d_id.b.area; |
2410 | logio->port_id[2] = sp->fcport->d_id.b.domain; |
2411 | logio->vp_index = sp->vha->vp_idx; |
2412 | } |
2413 | |
2414 | static void |
2415 | qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) |
2416 | { |
2417 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
2418 | |
2419 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; |
2420 | logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); |
2421 | |
2422 | if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) { |
2423 | logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); |
2424 | } else { |
2425 | logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); |
2426 | if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) |
2427 | logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); |
2428 | if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) |
2429 | logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); |
2430 | if (lio->u.logio.flags & SRB_LOGIN_FCSP) { |
2431 | logio->control_flags |= |
2432 | cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI); |
2433 | logio->io_parameter[0] = |
2434 | cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO); |
2435 | } |
2436 | } |
2437 | logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
2438 | logio->port_id[0] = sp->fcport->d_id.b.al_pa; |
2439 | logio->port_id[1] = sp->fcport->d_id.b.area; |
2440 | logio->port_id[2] = sp->fcport->d_id.b.domain; |
2441 | logio->vp_index = sp->vha->vp_idx; |
2442 | } |
2443 | |
2444 | static void |
2445 | qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) |
2446 | { |
2447 | struct qla_hw_data *ha = sp->vha->hw; |
2448 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
2449 | uint16_t opts; |
2450 | |
2451 | mbx->entry_type = MBX_IOCB_TYPE; |
2452 | SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); |
2453 | mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); |
2454 | opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0; |
2455 | opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0; |
2456 | if (HAS_EXTENDED_IDS(ha)) { |
2457 | mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); |
2458 | mbx->mb10 = cpu_to_le16(opts); |
2459 | } else { |
2460 | mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts); |
2461 | } |
2462 | mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); |
2463 | mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | |
2464 | sp->fcport->d_id.b.al_pa); |
2465 | mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); |
2466 | } |
2467 | |
2468 | static void |
2469 | qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) |
2470 | { |
2471 | u16 control_flags = LCF_COMMAND_LOGO; |
2472 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; |
2473 | |
2474 | if (sp->fcport->explicit_logout) { |
2475 | control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT; |
2476 | } else { |
2477 | control_flags |= LCF_IMPL_LOGO; |
2478 | |
2479 | if (!sp->fcport->keep_nport_handle) |
2480 | control_flags |= LCF_FREE_NPORT; |
2481 | } |
2482 | |
2483 | logio->control_flags = cpu_to_le16(control_flags); |
2484 | logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
2485 | logio->port_id[0] = sp->fcport->d_id.b.al_pa; |
2486 | logio->port_id[1] = sp->fcport->d_id.b.area; |
2487 | logio->port_id[2] = sp->fcport->d_id.b.domain; |
2488 | logio->vp_index = sp->vha->vp_idx; |
2489 | } |
2490 | |
2491 | static void |
2492 | qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) |
2493 | { |
2494 | struct qla_hw_data *ha = sp->vha->hw; |
2495 | |
2496 | mbx->entry_type = MBX_IOCB_TYPE; |
2497 | SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); |
2498 | mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); |
2499 | mbx->mb1 = HAS_EXTENDED_IDS(ha) ? |
2500 | cpu_to_le16(sp->fcport->loop_id) : |
2501 | cpu_to_le16(sp->fcport->loop_id << 8); |
2502 | mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); |
2503 | mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | |
2504 | sp->fcport->d_id.b.al_pa); |
2505 | mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); |
2506 | /* Implicit: mbx->mbx10 = 0. */ |
2507 | } |
2508 | |
2509 | static void |
2510 | qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) |
2511 | { |
2512 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; |
2513 | logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); |
2514 | logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
2515 | logio->vp_index = sp->vha->vp_idx; |
2516 | } |
2517 | |
2518 | static void |
2519 | qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) |
2520 | { |
2521 | struct qla_hw_data *ha = sp->vha->hw; |
2522 | |
2523 | mbx->entry_type = MBX_IOCB_TYPE; |
2524 | SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); |
2525 | mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE); |
2526 | if (HAS_EXTENDED_IDS(ha)) { |
2527 | mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); |
2528 | mbx->mb10 = cpu_to_le16(BIT_0); |
2529 | } else { |
2530 | mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0); |
2531 | } |
2532 | mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma)); |
2533 | mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); |
2534 | mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); |
2535 | mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); |
2536 | mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); |
2537 | } |
2538 | |
2539 | static void |
2540 | qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) |
2541 | { |
2542 | uint32_t flags; |
2543 | uint64_t lun; |
2544 | struct fc_port *fcport = sp->fcport; |
2545 | scsi_qla_host_t *vha = fcport->vha; |
2546 | struct qla_hw_data *ha = vha->hw; |
2547 | struct srb_iocb *iocb = &sp->u.iocb_cmd; |
2548 | struct req_que *req = sp->qpair->req; |
2549 | |
2550 | flags = iocb->u.tmf.flags; |
2551 | lun = iocb->u.tmf.lun; |
2552 | |
2553 | tsk->entry_type = TSK_MGMT_IOCB_TYPE; |
2554 | tsk->entry_count = 1; |
2555 | tsk->handle = make_handle(x: req->id, y: tsk->handle); |
2556 | tsk->nport_handle = cpu_to_le16(fcport->loop_id); |
2557 | tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
2558 | tsk->control_flags = cpu_to_le32(flags); |
2559 | tsk->port_id[0] = fcport->d_id.b.al_pa; |
2560 | tsk->port_id[1] = fcport->d_id.b.area; |
2561 | tsk->port_id[2] = fcport->d_id.b.domain; |
2562 | tsk->vp_index = fcport->vha->vp_idx; |
2563 | |
2564 | if (flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET| |
2565 | TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) { |
2566 | int_to_scsilun(lun, &tsk->lun); |
2567 | host_to_fcp_swap(fcp: (uint8_t *)&tsk->lun, |
2568 | bsize: sizeof(tsk->lun)); |
2569 | } |
2570 | } |
2571 | |
2572 | static void |
2573 | qla2x00_async_done(struct srb *sp, int res) |
2574 | { |
2575 | if (del_timer(timer: &sp->u.iocb_cmd.timer)) { |
2576 | /* |
2577 | * Successfully cancelled the timeout handler |
2578 | * ref: TMR |
2579 | */ |
2580 | if (kref_put(kref: &sp->cmd_kref, release: qla2x00_sp_release)) |
2581 | return; |
2582 | } |
2583 | sp->async_done(sp, res); |
2584 | } |
2585 | |
2586 | void |
2587 | qla2x00_sp_release(struct kref *kref) |
2588 | { |
2589 | struct srb *sp = container_of(kref, struct srb, cmd_kref); |
2590 | |
2591 | sp->free(sp); |
2592 | } |
2593 | |
2594 | void |
2595 | qla2x00_init_async_sp(srb_t *sp, unsigned long tmo, |
2596 | void (*done)(struct srb *sp, int res)) |
2597 | { |
2598 | timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0); |
2599 | sp->done = qla2x00_async_done; |
2600 | sp->async_done = done; |
2601 | sp->free = qla2x00_sp_free; |
2602 | sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; |
2603 | sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; |
2604 | if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD) |
2605 | init_completion(x: &sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); |
2606 | sp->start_timer = 1; |
2607 | } |
2608 | |
2609 | static void qla2x00_els_dcmd_sp_free(srb_t *sp) |
2610 | { |
2611 | struct srb_iocb *elsio = &sp->u.iocb_cmd; |
2612 | |
2613 | kfree(objp: sp->fcport); |
2614 | |
2615 | if (elsio->u.els_logo.els_logo_pyld) |
2616 | dma_free_coherent(dev: &sp->vha->hw->pdev->dev, DMA_POOL_SIZE, |
2617 | cpu_addr: elsio->u.els_logo.els_logo_pyld, |
2618 | dma_handle: elsio->u.els_logo.els_logo_pyld_dma); |
2619 | |
2620 | del_timer(timer: &elsio->timer); |
2621 | qla2x00_rel_sp(sp); |
2622 | } |
2623 | |
2624 | static void |
2625 | qla2x00_els_dcmd_iocb_timeout(void *data) |
2626 | { |
2627 | srb_t *sp = data; |
2628 | fc_port_t *fcport = sp->fcport; |
2629 | struct scsi_qla_host *vha = sp->vha; |
2630 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
2631 | unsigned long flags = 0; |
2632 | int res, h; |
2633 | |
2634 | ql_dbg(ql_dbg_io, vha, 0x3069, |
2635 | fmt: "%s Timeout, hdl=%x, portid=%02x%02x%02x\n" , |
2636 | sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, |
2637 | fcport->d_id.b.al_pa); |
2638 | |
2639 | /* Abort the exchange */ |
2640 | res = qla24xx_async_abort_cmd(sp, false); |
2641 | if (res) { |
2642 | ql_dbg(ql_dbg_io, vha, 0x3070, |
2643 | fmt: "mbx abort_command failed.\n" ); |
2644 | spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); |
2645 | for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { |
2646 | if (sp->qpair->req->outstanding_cmds[h] == sp) { |
2647 | sp->qpair->req->outstanding_cmds[h] = NULL; |
2648 | break; |
2649 | } |
2650 | } |
2651 | spin_unlock_irqrestore(lock: sp->qpair->qp_lock_ptr, flags); |
2652 | complete(&lio->u.els_logo.comp); |
2653 | } else { |
2654 | ql_dbg(ql_dbg_io, vha, 0x3071, |
2655 | fmt: "mbx abort_command success.\n" ); |
2656 | } |
2657 | } |
2658 | |
2659 | static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res) |
2660 | { |
2661 | fc_port_t *fcport = sp->fcport; |
2662 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
2663 | struct scsi_qla_host *vha = sp->vha; |
2664 | |
2665 | ql_dbg(ql_dbg_io, vha, 0x3072, |
2666 | fmt: "%s hdl=%x, portid=%02x%02x%02x done\n" , |
2667 | sp->name, sp->handle, fcport->d_id.b.domain, |
2668 | fcport->d_id.b.area, fcport->d_id.b.al_pa); |
2669 | |
2670 | complete(&lio->u.els_logo.comp); |
2671 | } |
2672 | |
2673 | int |
2674 | qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, |
2675 | port_id_t remote_did) |
2676 | { |
2677 | srb_t *sp; |
2678 | fc_port_t *fcport = NULL; |
2679 | struct srb_iocb *elsio = NULL; |
2680 | struct qla_hw_data *ha = vha->hw; |
2681 | struct els_logo_payload logo_pyld; |
2682 | int rval = QLA_SUCCESS; |
2683 | |
2684 | fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); |
2685 | if (!fcport) { |
2686 | ql_log(ql_log_info, vha, 0x70e5, fmt: "fcport allocation failed\n" ); |
2687 | return -ENOMEM; |
2688 | } |
2689 | |
2690 | /* Alloc SRB structure |
2691 | * ref: INIT |
2692 | */ |
2693 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); |
2694 | if (!sp) { |
2695 | kfree(objp: fcport); |
2696 | ql_log(ql_log_info, vha, 0x70e6, |
2697 | fmt: "SRB allocation failed\n" ); |
2698 | return -ENOMEM; |
2699 | } |
2700 | |
2701 | elsio = &sp->u.iocb_cmd; |
2702 | fcport->loop_id = 0xFFFF; |
2703 | fcport->d_id.b.domain = remote_did.b.domain; |
2704 | fcport->d_id.b.area = remote_did.b.area; |
2705 | fcport->d_id.b.al_pa = remote_did.b.al_pa; |
2706 | |
2707 | ql_dbg(ql_dbg_io, vha, 0x3073, fmt: "portid=%02x%02x%02x done\n" , |
2708 | fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); |
2709 | |
2710 | sp->type = SRB_ELS_DCMD; |
2711 | sp->name = "ELS_DCMD" ; |
2712 | sp->fcport = fcport; |
2713 | qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT, |
2714 | done: qla2x00_els_dcmd_sp_done); |
2715 | sp->free = qla2x00_els_dcmd_sp_free; |
2716 | sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout; |
2717 | init_completion(x: &sp->u.iocb_cmd.u.els_logo.comp); |
2718 | |
2719 | elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(dev: &ha->pdev->dev, |
2720 | DMA_POOL_SIZE, dma_handle: &elsio->u.els_logo.els_logo_pyld_dma, |
2721 | GFP_KERNEL); |
2722 | |
2723 | if (!elsio->u.els_logo.els_logo_pyld) { |
2724 | /* ref: INIT */ |
2725 | kref_put(kref: &sp->cmd_kref, release: qla2x00_sp_release); |
2726 | return QLA_FUNCTION_FAILED; |
2727 | } |
2728 | |
2729 | memset(&logo_pyld, 0, sizeof(struct els_logo_payload)); |
2730 | |
2731 | elsio->u.els_logo.els_cmd = els_opcode; |
2732 | logo_pyld.opcode = els_opcode; |
2733 | logo_pyld.s_id[0] = vha->d_id.b.al_pa; |
2734 | logo_pyld.s_id[1] = vha->d_id.b.area; |
2735 | logo_pyld.s_id[2] = vha->d_id.b.domain; |
2736 | host_to_fcp_swap(fcp: logo_pyld.s_id, bsize: sizeof(uint32_t)); |
2737 | memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE); |
2738 | |
2739 | memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, |
2740 | sizeof(struct els_logo_payload)); |
2741 | ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, fmt: "LOGO buffer:" ); |
2742 | ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a, |
2743 | elsio->u.els_logo.els_logo_pyld, |
2744 | sizeof(*elsio->u.els_logo.els_logo_pyld)); |
2745 | |
2746 | rval = qla2x00_start_sp(sp); |
2747 | if (rval != QLA_SUCCESS) { |
2748 | /* ref: INIT */ |
2749 | kref_put(kref: &sp->cmd_kref, release: qla2x00_sp_release); |
2750 | return QLA_FUNCTION_FAILED; |
2751 | } |
2752 | |
2753 | ql_dbg(ql_dbg_io, vha, 0x3074, |
2754 | fmt: "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n" , |
2755 | sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, |
2756 | fcport->d_id.b.area, fcport->d_id.b.al_pa); |
2757 | |
2758 | wait_for_completion(&elsio->u.els_logo.comp); |
2759 | |
2760 | /* ref: INIT */ |
2761 | kref_put(kref: &sp->cmd_kref, release: qla2x00_sp_release); |
2762 | return rval; |
2763 | } |
2764 | |
2765 | static void |
2766 | qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) |
2767 | { |
2768 | scsi_qla_host_t *vha = sp->vha; |
2769 | struct srb_iocb *elsio = &sp->u.iocb_cmd; |
2770 | |
2771 | els_iocb->entry_type = ELS_IOCB_TYPE; |
2772 | els_iocb->entry_count = 1; |
2773 | els_iocb->sys_define = 0; |
2774 | els_iocb->entry_status = 0; |
2775 | els_iocb->handle = sp->handle; |
2776 | els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
2777 | els_iocb->tx_dsd_count = cpu_to_le16(1); |
2778 | els_iocb->vp_index = vha->vp_idx; |
2779 | els_iocb->sof_type = EST_SOFI3; |
2780 | els_iocb->rx_dsd_count = 0; |
2781 | els_iocb->opcode = elsio->u.els_logo.els_cmd; |
2782 | |
2783 | els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; |
2784 | els_iocb->d_id[1] = sp->fcport->d_id.b.area; |
2785 | els_iocb->d_id[2] = sp->fcport->d_id.b.domain; |
2786 | /* For SID the byte order is different than DID */ |
2787 | els_iocb->s_id[1] = vha->d_id.b.al_pa; |
2788 | els_iocb->s_id[2] = vha->d_id.b.area; |
2789 | els_iocb->s_id[0] = vha->d_id.b.domain; |
2790 | |
2791 | if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { |
2792 | if (vha->hw->flags.edif_enabled) |
2793 | els_iocb->control_flags = cpu_to_le16(ECF_SEC_LOGIN); |
2794 | else |
2795 | els_iocb->control_flags = 0; |
2796 | els_iocb->tx_byte_count = els_iocb->tx_len = |
2797 | cpu_to_le32(sizeof(struct els_plogi_payload)); |
2798 | put_unaligned_le64(val: elsio->u.els_plogi.els_plogi_pyld_dma, |
2799 | p: &els_iocb->tx_address); |
2800 | els_iocb->rx_dsd_count = cpu_to_le16(1); |
2801 | els_iocb->rx_byte_count = els_iocb->rx_len = |
2802 | cpu_to_le32(sizeof(struct els_plogi_payload)); |
2803 | put_unaligned_le64(val: elsio->u.els_plogi.els_resp_pyld_dma, |
2804 | p: &els_iocb->rx_address); |
2805 | |
2806 | ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, |
2807 | fmt: "PLOGI ELS IOCB:\n" ); |
2808 | ql_dump_buffer(ql_log_info, vha, 0x0109, |
2809 | (uint8_t *)els_iocb, |
2810 | sizeof(*els_iocb)); |
2811 | } else { |
2812 | els_iocb->tx_byte_count = |
2813 | cpu_to_le32(sizeof(struct els_logo_payload)); |
2814 | put_unaligned_le64(val: elsio->u.els_logo.els_logo_pyld_dma, |
2815 | p: &els_iocb->tx_address); |
2816 | els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); |
2817 | |
2818 | els_iocb->rx_byte_count = 0; |
2819 | els_iocb->rx_address = 0; |
2820 | els_iocb->rx_len = 0; |
2821 | ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076, |
2822 | fmt: "LOGO ELS IOCB:" ); |
2823 | ql_dump_buffer(ql_log_info, vha, 0x010b, |
2824 | els_iocb, |
2825 | sizeof(*els_iocb)); |
2826 | } |
2827 | |
2828 | sp->vha->qla_stats.control_requests++; |
2829 | } |
2830 | |
2831 | void |
2832 | qla2x00_els_dcmd2_iocb_timeout(void *data) |
2833 | { |
2834 | srb_t *sp = data; |
2835 | fc_port_t *fcport = sp->fcport; |
2836 | struct scsi_qla_host *vha = sp->vha; |
2837 | unsigned long flags = 0; |
2838 | int res, h; |
2839 | |
2840 | ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069, |
2841 | fmt: "%s hdl=%x ELS Timeout, %8phC portid=%06x\n" , |
2842 | sp->name, sp->handle, fcport->port_name, fcport->d_id.b24); |
2843 | |
2844 | /* Abort the exchange */ |
2845 | res = qla24xx_async_abort_cmd(sp, false); |
2846 | ql_dbg(ql_dbg_io, vha, 0x3070, |
2847 | fmt: "mbx abort_command %s\n" , |
2848 | (res == QLA_SUCCESS) ? "successful" : "failed" ); |
2849 | if (res) { |
2850 | spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); |
2851 | for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { |
2852 | if (sp->qpair->req->outstanding_cmds[h] == sp) { |
2853 | sp->qpair->req->outstanding_cmds[h] = NULL; |
2854 | break; |
2855 | } |
2856 | } |
2857 | spin_unlock_irqrestore(lock: sp->qpair->qp_lock_ptr, flags); |
2858 | sp->done(sp, QLA_FUNCTION_TIMEOUT); |
2859 | } |
2860 | } |
2861 | |
2862 | void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi) |
2863 | { |
2864 | if (els_plogi->els_plogi_pyld) |
2865 | dma_free_coherent(dev: &vha->hw->pdev->dev, |
2866 | size: els_plogi->tx_size, |
2867 | cpu_addr: els_plogi->els_plogi_pyld, |
2868 | dma_handle: els_plogi->els_plogi_pyld_dma); |
2869 | |
2870 | if (els_plogi->els_resp_pyld) |
2871 | dma_free_coherent(dev: &vha->hw->pdev->dev, |
2872 | size: els_plogi->rx_size, |
2873 | cpu_addr: els_plogi->els_resp_pyld, |
2874 | dma_handle: els_plogi->els_resp_pyld_dma); |
2875 | } |
2876 | |
2877 | static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) |
2878 | { |
2879 | fc_port_t *fcport = sp->fcport; |
2880 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
2881 | struct scsi_qla_host *vha = sp->vha; |
2882 | struct event_arg ea; |
2883 | struct qla_work_evt *e; |
2884 | struct fc_port *conflict_fcport; |
2885 | port_id_t cid; /* conflict Nport id */ |
2886 | const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; |
2887 | u16 lid; |
2888 | |
2889 | ql_dbg(ql_dbg_disc, vha, 0x3072, |
2890 | fmt: "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n" , |
2891 | sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name); |
2892 | |
2893 | fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); |
2894 | /* For edif, set logout on delete to ensure any residual key from FW is flushed.*/ |
2895 | fcport->logout_on_delete = 1; |
2896 | fcport->chip_reset = vha->hw->base_qpair->chip_reset; |
2897 | |
2898 | if (sp->flags & SRB_WAKEUP_ON_COMP) |
2899 | complete(&lio->u.els_plogi.comp); |
2900 | else { |
2901 | switch (le32_to_cpu(fw_status[0])) { |
2902 | case CS_DATA_UNDERRUN: |
2903 | case CS_COMPLETE: |
2904 | memset(&ea, 0, sizeof(ea)); |
2905 | ea.fcport = fcport; |
2906 | ea.rc = res; |
2907 | qla_handle_els_plogi_done(vha, ea: &ea); |
2908 | break; |
2909 | |
2910 | case CS_IOCB_ERROR: |
2911 | switch (le32_to_cpu(fw_status[1])) { |
2912 | case LSC_SCODE_PORTID_USED: |
2913 | lid = le32_to_cpu(fw_status[2]) & 0xffff; |
2914 | qlt_find_sess_invalidate_other(vha, |
2915 | wwn: wwn_to_u64(wwn: fcport->port_name), |
2916 | port_id: fcport->d_id, loop_id: lid, &conflict_fcport); |
2917 | if (conflict_fcport) { |
2918 | /* |
2919 | * Another fcport shares the same |
2920 | * loop_id & nport id; conflict |
2921 | * fcport needs to finish cleanup |
2922 | * before this fcport can proceed |
2923 | * to login. |
2924 | */ |
2925 | conflict_fcport->conflict = fcport; |
2926 | fcport->login_pause = 1; |
2927 | ql_dbg(ql_dbg_disc, vha, 0x20ed, |
2928 | fmt: "%s %d %8phC pid %06x inuse with lid %#x.\n" , |
2929 | __func__, __LINE__, |
2930 | fcport->port_name, |
2931 | fcport->d_id.b24, lid); |
2932 | } else { |
2933 | ql_dbg(ql_dbg_disc, vha, 0x20ed, |
2934 | fmt: "%s %d %8phC pid %06x inuse with lid %#x sched del\n" , |
2935 | __func__, __LINE__, |
2936 | fcport->port_name, |
2937 | fcport->d_id.b24, lid); |
2938 | qla2x00_clear_loop_id(fcport); |
2939 | set_bit(nr: lid, addr: vha->hw->loop_id_map); |
2940 | fcport->loop_id = lid; |
2941 | fcport->keep_nport_handle = 0; |
2942 | qlt_schedule_sess_for_deletion(fcport); |
2943 | } |
2944 | break; |
2945 | |
2946 | case LSC_SCODE_NPORT_USED: |
2947 | cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16) |
2948 | & 0xff; |
2949 | cid.b.area = (le32_to_cpu(fw_status[2]) >> 8) |
2950 | & 0xff; |
2951 | cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff; |
2952 | cid.b.rsvd_1 = 0; |
2953 | |
2954 | ql_dbg(ql_dbg_disc, vha, 0x20ec, |
2955 | fmt: "%s %d %8phC lid %#x in use with pid %06x post gnl\n" , |
2956 | __func__, __LINE__, fcport->port_name, |
2957 | fcport->loop_id, cid.b24); |
2958 | set_bit(nr: fcport->loop_id, |
2959 | addr: vha->hw->loop_id_map); |
2960 | fcport->loop_id = FC_NO_LOOP_ID; |
2961 | qla24xx_post_gnl_work(vha, fcport); |
2962 | break; |
2963 | |
2964 | case LSC_SCODE_NOXCB: |
2965 | vha->hw->exch_starvation++; |
2966 | if (vha->hw->exch_starvation > 5) { |
2967 | ql_log(ql_log_warn, vha, 0xd046, |
2968 | fmt: "Exchange starvation. Resetting RISC\n" ); |
2969 | vha->hw->exch_starvation = 0; |
2970 | set_bit(ISP_ABORT_NEEDED, |
2971 | addr: &vha->dpc_flags); |
2972 | qla2xxx_wake_dpc(vha); |
2973 | break; |
2974 | } |
2975 | fallthrough; |
2976 | default: |
2977 | ql_dbg(ql_dbg_disc, vha, 0x20eb, |
2978 | fmt: "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n" , |
2979 | __func__, sp->fcport->port_name, |
2980 | fw_status[0], fw_status[1], fw_status[2]); |
2981 | |
2982 | fcport->flags &= ~FCF_ASYNC_SENT; |
2983 | qlt_schedule_sess_for_deletion(fcport); |
2984 | break; |
2985 | } |
2986 | break; |
2987 | |
2988 | default: |
2989 | ql_dbg(ql_dbg_disc, vha, 0x20eb, |
2990 | fmt: "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n" , |
2991 | __func__, sp->fcport->port_name, |
2992 | fw_status[0], fw_status[1], fw_status[2]); |
2993 | |
2994 | sp->fcport->flags &= ~FCF_ASYNC_SENT; |
2995 | qlt_schedule_sess_for_deletion(fcport); |
2996 | break; |
2997 | } |
2998 | |
2999 | e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); |
3000 | if (!e) { |
3001 | struct srb_iocb *elsio = &sp->u.iocb_cmd; |
3002 | |
3003 | qla2x00_els_dcmd2_free(vha, els_plogi: &elsio->u.els_plogi); |
3004 | /* ref: INIT */ |
3005 | kref_put(kref: &sp->cmd_kref, release: qla2x00_sp_release); |
3006 | return; |
3007 | } |
3008 | e->u.iosb.sp = sp; |
3009 | qla2x00_post_work(vha, e); |
3010 | } |
3011 | } |
3012 | |
3013 | int |
3014 | qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, |
3015 | fc_port_t *fcport, bool wait) |
3016 | { |
3017 | srb_t *sp; |
3018 | struct srb_iocb *elsio = NULL; |
3019 | struct qla_hw_data *ha = vha->hw; |
3020 | int rval = QLA_SUCCESS; |
3021 | void *ptr, *resp_ptr; |
3022 | |
3023 | /* Alloc SRB structure |
3024 | * ref: INIT |
3025 | */ |
3026 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); |
3027 | if (!sp) { |
3028 | ql_log(ql_log_info, vha, 0x70e6, |
3029 | fmt: "SRB allocation failed\n" ); |
3030 | fcport->flags &= ~FCF_ASYNC_ACTIVE; |
3031 | return -ENOMEM; |
3032 | } |
3033 | |
3034 | fcport->flags |= FCF_ASYNC_SENT; |
3035 | qla2x00_set_fcport_disc_state(fcport, state: DSC_LOGIN_PEND); |
3036 | elsio = &sp->u.iocb_cmd; |
3037 | ql_dbg(ql_dbg_io, vha, 0x3073, |
3038 | fmt: "%s Enter: PLOGI portid=%06x\n" , __func__, fcport->d_id.b24); |
3039 | |
3040 | if (wait) |
3041 | sp->flags = SRB_WAKEUP_ON_COMP; |
3042 | |
3043 | sp->type = SRB_ELS_DCMD; |
3044 | sp->name = "ELS_DCMD" ; |
3045 | sp->fcport = fcport; |
3046 | qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2, |
3047 | done: qla2x00_els_dcmd2_sp_done); |
3048 | sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout; |
3049 | |
3050 | elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE; |
3051 | |
3052 | ptr = elsio->u.els_plogi.els_plogi_pyld = |
3053 | dma_alloc_coherent(dev: &ha->pdev->dev, size: elsio->u.els_plogi.tx_size, |
3054 | dma_handle: &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL); |
3055 | |
3056 | if (!elsio->u.els_plogi.els_plogi_pyld) { |
3057 | rval = QLA_FUNCTION_FAILED; |
3058 | goto out; |
3059 | } |
3060 | |
3061 | resp_ptr = elsio->u.els_plogi.els_resp_pyld = |
3062 | dma_alloc_coherent(dev: &ha->pdev->dev, size: elsio->u.els_plogi.rx_size, |
3063 | dma_handle: &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL); |
3064 | |
3065 | if (!elsio->u.els_plogi.els_resp_pyld) { |
3066 | rval = QLA_FUNCTION_FAILED; |
3067 | goto out; |
3068 | } |
3069 | |
3070 | ql_dbg(ql_dbg_io, vha, 0x3073, fmt: "PLOGI %p %p\n" , ptr, resp_ptr); |
3071 | |
3072 | memset(ptr, 0, sizeof(struct els_plogi_payload)); |
3073 | memset(resp_ptr, 0, sizeof(struct els_plogi_payload)); |
3074 | memcpy(elsio->u.els_plogi.els_plogi_pyld->data, |
3075 | (void *)&ha->plogi_els_payld + offsetof(struct fc_els_flogi, fl_csp), |
3076 | sizeof(ha->plogi_els_payld) - offsetof(struct fc_els_flogi, fl_csp)); |
3077 | |
3078 | elsio->u.els_plogi.els_cmd = els_opcode; |
3079 | elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; |
3080 | |
3081 | if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) { |
3082 | struct fc_els_flogi *p = ptr; |
3083 | |
3084 | p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC); |
3085 | } |
3086 | |
3087 | ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, fmt: "PLOGI buffer:\n" ); |
3088 | ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, |
3089 | (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, |
3090 | sizeof(*elsio->u.els_plogi.els_plogi_pyld)); |
3091 | |
3092 | init_completion(x: &elsio->u.els_plogi.comp); |
3093 | rval = qla2x00_start_sp(sp); |
3094 | if (rval != QLA_SUCCESS) { |
3095 | rval = QLA_FUNCTION_FAILED; |
3096 | } else { |
3097 | ql_dbg(ql_dbg_disc, vha, 0x3074, |
3098 | fmt: "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n" , |
3099 | sp->name, sp->handle, fcport->loop_id, |
3100 | fcport->d_id.b24, vha->d_id.b24); |
3101 | } |
3102 | |
3103 | if (wait) { |
3104 | wait_for_completion(&elsio->u.els_plogi.comp); |
3105 | |
3106 | if (elsio->u.els_plogi.comp_status != CS_COMPLETE) |
3107 | rval = QLA_FUNCTION_FAILED; |
3108 | } else { |
3109 | goto done; |
3110 | } |
3111 | |
3112 | out: |
3113 | fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); |
3114 | qla2x00_els_dcmd2_free(vha, els_plogi: &elsio->u.els_plogi); |
3115 | /* ref: INIT */ |
3116 | kref_put(kref: &sp->cmd_kref, release: qla2x00_sp_release); |
3117 | done: |
3118 | return rval; |
3119 | } |
3120 | |
3121 | /* it is assume qpair lock is held */ |
3122 | void qla_els_pt_iocb(struct scsi_qla_host *vha, |
3123 | struct els_entry_24xx *els_iocb, |
3124 | struct qla_els_pt_arg *a) |
3125 | { |
3126 | els_iocb->entry_type = ELS_IOCB_TYPE; |
3127 | els_iocb->entry_count = 1; |
3128 | els_iocb->sys_define = 0; |
3129 | els_iocb->entry_status = 0; |
3130 | els_iocb->handle = QLA_SKIP_HANDLE; |
3131 | els_iocb->nport_handle = a->nport_handle; |
3132 | els_iocb->rx_xchg_address = a->rx_xchg_address; |
3133 | els_iocb->tx_dsd_count = cpu_to_le16(1); |
3134 | els_iocb->vp_index = a->vp_idx; |
3135 | els_iocb->sof_type = EST_SOFI3; |
3136 | els_iocb->rx_dsd_count = cpu_to_le16(0); |
3137 | els_iocb->opcode = a->els_opcode; |
3138 | |
3139 | els_iocb->d_id[0] = a->did.b.al_pa; |
3140 | els_iocb->d_id[1] = a->did.b.area; |
3141 | els_iocb->d_id[2] = a->did.b.domain; |
3142 | /* For SID the byte order is different than DID */ |
3143 | els_iocb->s_id[1] = vha->d_id.b.al_pa; |
3144 | els_iocb->s_id[2] = vha->d_id.b.area; |
3145 | els_iocb->s_id[0] = vha->d_id.b.domain; |
3146 | |
3147 | els_iocb->control_flags = cpu_to_le16(a->control_flags); |
3148 | |
3149 | els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count); |
3150 | els_iocb->tx_len = cpu_to_le32(a->tx_len); |
3151 | put_unaligned_le64(val: a->tx_addr, p: &els_iocb->tx_address); |
3152 | |
3153 | els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count); |
3154 | els_iocb->rx_len = cpu_to_le32(a->rx_len); |
3155 | put_unaligned_le64(val: a->rx_addr, p: &els_iocb->rx_address); |
3156 | } |
3157 | |
3158 | static void |
3159 | qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) |
3160 | { |
3161 | struct bsg_job *bsg_job = sp->u.bsg_job; |
3162 | struct fc_bsg_request *bsg_request = bsg_job->request; |
3163 | |
3164 | els_iocb->entry_type = ELS_IOCB_TYPE; |
3165 | els_iocb->entry_count = 1; |
3166 | els_iocb->sys_define = 0; |
3167 | els_iocb->entry_status = 0; |
3168 | els_iocb->handle = sp->handle; |
3169 | els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
3170 | els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); |
3171 | els_iocb->vp_index = sp->vha->vp_idx; |
3172 | els_iocb->sof_type = EST_SOFI3; |
3173 | els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); |
3174 | |
3175 | els_iocb->opcode = |
3176 | sp->type == SRB_ELS_CMD_RPT ? |
3177 | bsg_request->rqst_data.r_els.els_code : |
3178 | bsg_request->rqst_data.h_els.command_code; |
3179 | els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; |
3180 | els_iocb->d_id[1] = sp->fcport->d_id.b.area; |
3181 | els_iocb->d_id[2] = sp->fcport->d_id.b.domain; |
3182 | els_iocb->control_flags = 0; |
3183 | els_iocb->rx_byte_count = |
3184 | cpu_to_le32(bsg_job->reply_payload.payload_len); |
3185 | els_iocb->tx_byte_count = |
3186 | cpu_to_le32(bsg_job->request_payload.payload_len); |
3187 | |
3188 | put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), |
3189 | p: &els_iocb->tx_address); |
3190 | els_iocb->tx_len = cpu_to_le32(sg_dma_len |
3191 | (bsg_job->request_payload.sg_list)); |
3192 | |
3193 | put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), |
3194 | p: &els_iocb->rx_address); |
3195 | els_iocb->rx_len = cpu_to_le32(sg_dma_len |
3196 | (bsg_job->reply_payload.sg_list)); |
3197 | |
3198 | sp->vha->qla_stats.control_requests++; |
3199 | } |
3200 | |
3201 | static void |
3202 | qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) |
3203 | { |
3204 | uint16_t avail_dsds; |
3205 | struct dsd64 *cur_dsd; |
3206 | struct scatterlist *sg; |
3207 | int index; |
3208 | uint16_t tot_dsds; |
3209 | scsi_qla_host_t *vha = sp->vha; |
3210 | struct qla_hw_data *ha = vha->hw; |
3211 | struct bsg_job *bsg_job = sp->u.bsg_job; |
3212 | int entry_count = 1; |
3213 | |
3214 | memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); |
3215 | ct_iocb->entry_type = CT_IOCB_TYPE; |
3216 | ct_iocb->entry_status = 0; |
3217 | ct_iocb->handle1 = sp->handle; |
3218 | SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); |
3219 | ct_iocb->status = cpu_to_le16(0); |
3220 | ct_iocb->control_flags = cpu_to_le16(0); |
3221 | ct_iocb->timeout = 0; |
3222 | ct_iocb->cmd_dsd_count = |
3223 | cpu_to_le16(bsg_job->request_payload.sg_cnt); |
3224 | ct_iocb->total_dsd_count = |
3225 | cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); |
3226 | ct_iocb->req_bytecount = |
3227 | cpu_to_le32(bsg_job->request_payload.payload_len); |
3228 | ct_iocb->rsp_bytecount = |
3229 | cpu_to_le32(bsg_job->reply_payload.payload_len); |
3230 | |
3231 | put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), |
3232 | p: &ct_iocb->req_dsd.address); |
3233 | ct_iocb->req_dsd.length = ct_iocb->req_bytecount; |
3234 | |
3235 | put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), |
3236 | p: &ct_iocb->rsp_dsd.address); |
3237 | ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount; |
3238 | |
3239 | avail_dsds = 1; |
3240 | cur_dsd = &ct_iocb->rsp_dsd; |
3241 | index = 0; |
3242 | tot_dsds = bsg_job->reply_payload.sg_cnt; |
3243 | |
3244 | for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { |
3245 | cont_a64_entry_t *cont_pkt; |
3246 | |
3247 | /* Allocate additional continuation packets? */ |
3248 | if (avail_dsds == 0) { |
3249 | /* |
3250 | * Five DSDs are available in the Cont. |
3251 | * Type 1 IOCB. |
3252 | */ |
3253 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, |
3254 | req: vha->hw->req_q_map[0]); |
3255 | cur_dsd = cont_pkt->dsd; |
3256 | avail_dsds = 5; |
3257 | entry_count++; |
3258 | } |
3259 | |
3260 | append_dsd64(dsd: &cur_dsd, sg); |
3261 | avail_dsds--; |
3262 | } |
3263 | ct_iocb->entry_count = entry_count; |
3264 | |
3265 | sp->vha->qla_stats.control_requests++; |
3266 | } |
3267 | |
3268 | static void |
3269 | qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) |
3270 | { |
3271 | uint16_t avail_dsds; |
3272 | struct dsd64 *cur_dsd; |
3273 | struct scatterlist *sg; |
3274 | int index; |
3275 | uint16_t cmd_dsds, rsp_dsds; |
3276 | scsi_qla_host_t *vha = sp->vha; |
3277 | struct qla_hw_data *ha = vha->hw; |
3278 | struct bsg_job *bsg_job = sp->u.bsg_job; |
3279 | int entry_count = 1; |
3280 | cont_a64_entry_t *cont_pkt = NULL; |
3281 | |
3282 | ct_iocb->entry_type = CT_IOCB_TYPE; |
3283 | ct_iocb->entry_status = 0; |
3284 | ct_iocb->sys_define = 0; |
3285 | ct_iocb->handle = sp->handle; |
3286 | |
3287 | ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
3288 | ct_iocb->vp_index = sp->vha->vp_idx; |
3289 | ct_iocb->comp_status = cpu_to_le16(0); |
3290 | |
3291 | cmd_dsds = bsg_job->request_payload.sg_cnt; |
3292 | rsp_dsds = bsg_job->reply_payload.sg_cnt; |
3293 | |
3294 | ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds); |
3295 | ct_iocb->timeout = 0; |
3296 | ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds); |
3297 | ct_iocb->cmd_byte_count = |
3298 | cpu_to_le32(bsg_job->request_payload.payload_len); |
3299 | |
3300 | avail_dsds = 2; |
3301 | cur_dsd = ct_iocb->dsd; |
3302 | index = 0; |
3303 | |
3304 | for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) { |
3305 | /* Allocate additional continuation packets? */ |
3306 | if (avail_dsds == 0) { |
3307 | /* |
3308 | * Five DSDs are available in the Cont. |
3309 | * Type 1 IOCB. |
3310 | */ |
3311 | cont_pkt = qla2x00_prep_cont_type1_iocb( |
3312 | vha, req: ha->req_q_map[0]); |
3313 | cur_dsd = cont_pkt->dsd; |
3314 | avail_dsds = 5; |
3315 | entry_count++; |
3316 | } |
3317 | |
3318 | append_dsd64(dsd: &cur_dsd, sg); |
3319 | avail_dsds--; |
3320 | } |
3321 | |
3322 | index = 0; |
3323 | |
3324 | for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) { |
3325 | /* Allocate additional continuation packets? */ |
3326 | if (avail_dsds == 0) { |
3327 | /* |
3328 | * Five DSDs are available in the Cont. |
3329 | * Type 1 IOCB. |
3330 | */ |
3331 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, |
3332 | req: ha->req_q_map[0]); |
3333 | cur_dsd = cont_pkt->dsd; |
3334 | avail_dsds = 5; |
3335 | entry_count++; |
3336 | } |
3337 | |
3338 | append_dsd64(dsd: &cur_dsd, sg); |
3339 | avail_dsds--; |
3340 | } |
3341 | ct_iocb->entry_count = entry_count; |
3342 | } |
3343 | |
3344 | /* |
3345 | * qla82xx_start_scsi() - Send a SCSI command to the ISP |
3346 | * @sp: command to send to the ISP |
3347 | * |
3348 | * Returns non-zero if a failure occurred, else zero. |
3349 | */ |
3350 | int |
3351 | qla82xx_start_scsi(srb_t *sp) |
3352 | { |
3353 | int nseg; |
3354 | unsigned long flags; |
3355 | struct scsi_cmnd *cmd; |
3356 | uint32_t *clr_ptr; |
3357 | uint32_t handle; |
3358 | uint16_t cnt; |
3359 | uint16_t req_cnt; |
3360 | uint16_t tot_dsds; |
3361 | struct device_reg_82xx __iomem *reg; |
3362 | uint32_t dbval; |
3363 | __be32 *fcp_dl; |
3364 | uint8_t additional_cdb_len; |
3365 | struct ct6_dsd *ctx; |
3366 | struct scsi_qla_host *vha = sp->vha; |
3367 | struct qla_hw_data *ha = vha->hw; |
3368 | struct req_que *req = NULL; |
3369 | struct rsp_que *rsp = NULL; |
3370 | struct qla_qpair *qpair = sp->qpair; |
3371 | |
3372 | /* Setup device pointers. */ |
3373 | reg = &ha->iobase->isp82; |
3374 | cmd = GET_CMD_SP(sp); |
3375 | req = vha->req; |
3376 | rsp = ha->rsp_q_map[0]; |
3377 | |
3378 | /* So we know we haven't pci_map'ed anything yet */ |
3379 | tot_dsds = 0; |
3380 | |
3381 | dbval = 0x04 | (ha->portnum << 5); |
3382 | |
3383 | /* Send marker if required */ |
3384 | if (vha->marker_needed != 0) { |
3385 | if (qla2x00_marker(vha, qpair: ha->base_qpair, |
3386 | loop_id: 0, lun: 0, MK_SYNC_ALL) != QLA_SUCCESS) { |
3387 | ql_log(ql_log_warn, vha, 0x300c, |
3388 | fmt: "qla2x00_marker failed for cmd=%p.\n" , cmd); |
3389 | return QLA_FUNCTION_FAILED; |
3390 | } |
3391 | vha->marker_needed = 0; |
3392 | } |
3393 | |
3394 | /* Acquire ring specific lock */ |
3395 | spin_lock_irqsave(&ha->hardware_lock, flags); |
3396 | |
3397 | handle = qla2xxx_get_next_handle(req); |
3398 | if (handle == 0) |
3399 | goto queuing_error; |
3400 | |
3401 | /* Map the sg table so we have an accurate count of sg entries needed */ |
3402 | if (scsi_sg_count(cmd)) { |
3403 | nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), |
3404 | scsi_sg_count(cmd), cmd->sc_data_direction); |
3405 | if (unlikely(!nseg)) |
3406 | goto queuing_error; |
3407 | } else |
3408 | nseg = 0; |
3409 | |
3410 | tot_dsds = nseg; |
3411 | |
3412 | if (tot_dsds > ql2xshiftctondsd) { |
3413 | struct cmd_type_6 *cmd_pkt; |
3414 | uint16_t more_dsd_lists = 0; |
3415 | struct dsd_dma *dsd_ptr; |
3416 | uint16_t i; |
3417 | |
3418 | more_dsd_lists = qla24xx_calc_dsd_lists(dsds: tot_dsds); |
3419 | if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) { |
3420 | ql_dbg(ql_dbg_io, vha, 0x300d, |
3421 | fmt: "Num of DSD list %d is than %d for cmd=%p.\n" , |
3422 | more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN, |
3423 | cmd); |
3424 | goto queuing_error; |
3425 | } |
3426 | |
3427 | if (more_dsd_lists <= qpair->dsd_avail) |
3428 | goto sufficient_dsds; |
3429 | else |
3430 | more_dsd_lists -= qpair->dsd_avail; |
3431 | |
3432 | for (i = 0; i < more_dsd_lists; i++) { |
3433 | dsd_ptr = kzalloc(size: sizeof(struct dsd_dma), GFP_ATOMIC); |
3434 | if (!dsd_ptr) { |
3435 | ql_log(ql_log_fatal, vha, 0x300e, |
3436 | fmt: "Failed to allocate memory for dsd_dma " |
3437 | "for cmd=%p.\n" , cmd); |
3438 | goto queuing_error; |
3439 | } |
3440 | |
3441 | dsd_ptr->dsd_addr = dma_pool_alloc(pool: ha->dl_dma_pool, |
3442 | GFP_ATOMIC, handle: &dsd_ptr->dsd_list_dma); |
3443 | if (!dsd_ptr->dsd_addr) { |
3444 | kfree(objp: dsd_ptr); |
3445 | ql_log(ql_log_fatal, vha, 0x300f, |
3446 | fmt: "Failed to allocate memory for dsd_addr " |
3447 | "for cmd=%p.\n" , cmd); |
3448 | goto queuing_error; |
3449 | } |
3450 | list_add_tail(new: &dsd_ptr->list, head: &qpair->dsd_list); |
3451 | qpair->dsd_avail++; |
3452 | } |
3453 | |
3454 | sufficient_dsds: |
3455 | req_cnt = 1; |
3456 | |
3457 | if (req->cnt < (req_cnt + 2)) { |
3458 | cnt = (uint16_t)rd_reg_dword_relaxed( |
3459 | addr: ®->req_q_out[0]); |
3460 | if (req->ring_index < cnt) |
3461 | req->cnt = cnt - req->ring_index; |
3462 | else |
3463 | req->cnt = req->length - |
3464 | (req->ring_index - cnt); |
3465 | if (req->cnt < (req_cnt + 2)) |
3466 | goto queuing_error; |
3467 | } |
3468 | |
3469 | ctx = &sp->u.scmd.ct6_ctx; |
3470 | |
3471 | memset(ctx, 0, sizeof(struct ct6_dsd)); |
3472 | ctx->fcp_cmnd = dma_pool_zalloc(pool: ha->fcp_cmnd_dma_pool, |
3473 | GFP_ATOMIC, handle: &ctx->fcp_cmnd_dma); |
3474 | if (!ctx->fcp_cmnd) { |
3475 | ql_log(ql_log_fatal, vha, 0x3011, |
3476 | fmt: "Failed to allocate fcp_cmnd for cmd=%p.\n" , cmd); |
3477 | goto queuing_error; |
3478 | } |
3479 | |
3480 | /* Initialize the DSD list and dma handle */ |
3481 | INIT_LIST_HEAD(list: &ctx->dsd_list); |
3482 | ctx->dsd_use_cnt = 0; |
3483 | |
3484 | if (cmd->cmd_len > 16) { |
3485 | additional_cdb_len = cmd->cmd_len - 16; |
3486 | if ((cmd->cmd_len % 4) != 0) { |
3487 | /* SCSI command bigger than 16 bytes must be |
3488 | * multiple of 4 |
3489 | */ |
3490 | ql_log(ql_log_warn, vha, 0x3012, |
3491 | fmt: "scsi cmd len %d not multiple of 4 " |
3492 | "for cmd=%p.\n" , cmd->cmd_len, cmd); |
3493 | goto queuing_error_fcp_cmnd; |
3494 | } |
3495 | ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; |
3496 | } else { |
3497 | additional_cdb_len = 0; |
3498 | ctx->fcp_cmnd_len = 12 + 16 + 4; |
3499 | } |
3500 | |
3501 | cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; |
3502 | cmd_pkt->handle = make_handle(x: req->id, y: handle); |
3503 | |
3504 | /* Zero out remaining portion of packet. */ |
3505 | /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ |
3506 | clr_ptr = (uint32_t *)cmd_pkt + 2; |
3507 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); |
3508 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); |
3509 | |
3510 | /* Set NPORT-ID and LUN number*/ |
3511 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
3512 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; |
3513 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; |
3514 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; |
3515 | cmd_pkt->vp_index = sp->vha->vp_idx; |
3516 | |
3517 | /* Build IOCB segments */ |
3518 | if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) |
3519 | goto queuing_error_fcp_cmnd; |
3520 | |
3521 | int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); |
3522 | host_to_fcp_swap(fcp: (uint8_t *)&cmd_pkt->lun, bsize: sizeof(cmd_pkt->lun)); |
3523 | |
3524 | /* build FCP_CMND IU */ |
3525 | int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); |
3526 | ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; |
3527 | |
3528 | if (cmd->sc_data_direction == DMA_TO_DEVICE) |
3529 | ctx->fcp_cmnd->additional_cdb_len |= 1; |
3530 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) |
3531 | ctx->fcp_cmnd->additional_cdb_len |= 2; |
3532 | |
3533 | /* Populate the FCP_PRIO. */ |
3534 | if (ha->flags.fcp_prio_enabled) |
3535 | ctx->fcp_cmnd->task_attribute |= |
3536 | sp->fcport->fcp_prio << 3; |
3537 | |
3538 | memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); |
3539 | |
3540 | fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + |
3541 | additional_cdb_len); |
3542 | *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); |
3543 | |
3544 | cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); |
3545 | put_unaligned_le64(val: ctx->fcp_cmnd_dma, |
3546 | p: &cmd_pkt->fcp_cmnd_dseg_address); |
3547 | |
3548 | sp->flags |= SRB_FCP_CMND_DMA_VALID; |
3549 | cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); |
3550 | /* Set total data segment count. */ |
3551 | cmd_pkt->entry_count = (uint8_t)req_cnt; |
3552 | /* Specify response queue number where |
3553 | * completion should happen |
3554 | */ |
3555 | cmd_pkt->entry_status = (uint8_t) rsp->id; |
3556 | } else { |
3557 | struct cmd_type_7 *cmd_pkt; |
3558 | |
3559 | req_cnt = qla24xx_calc_iocbs(vha, dsds: tot_dsds); |
3560 | if (req->cnt < (req_cnt + 2)) { |
3561 | cnt = (uint16_t)rd_reg_dword_relaxed( |
3562 | addr: ®->req_q_out[0]); |
3563 | if (req->ring_index < cnt) |
3564 | req->cnt = cnt - req->ring_index; |
3565 | else |
3566 | req->cnt = req->length - |
3567 | (req->ring_index - cnt); |
3568 | } |
3569 | if (req->cnt < (req_cnt + 2)) |
3570 | goto queuing_error; |
3571 | |
3572 | cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; |
3573 | cmd_pkt->handle = make_handle(x: req->id, y: handle); |
3574 | |
3575 | /* Zero out remaining portion of packet. */ |
3576 | /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ |
3577 | clr_ptr = (uint32_t *)cmd_pkt + 2; |
3578 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); |
3579 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); |
3580 | |
3581 | /* Set NPORT-ID and LUN number*/ |
3582 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
3583 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; |
3584 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; |
3585 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; |
3586 | cmd_pkt->vp_index = sp->vha->vp_idx; |
3587 | |
3588 | int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); |
3589 | host_to_fcp_swap(fcp: (uint8_t *)&cmd_pkt->lun, |
3590 | bsize: sizeof(cmd_pkt->lun)); |
3591 | |
3592 | /* Populate the FCP_PRIO. */ |
3593 | if (ha->flags.fcp_prio_enabled) |
3594 | cmd_pkt->task |= sp->fcport->fcp_prio << 3; |
3595 | |
3596 | /* Load SCSI command packet. */ |
3597 | memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); |
3598 | host_to_fcp_swap(fcp: cmd_pkt->fcp_cdb, bsize: sizeof(cmd_pkt->fcp_cdb)); |
3599 | |
3600 | cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); |
3601 | |
3602 | /* Build IOCB segments */ |
3603 | qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); |
3604 | |
3605 | /* Set total data segment count. */ |
3606 | cmd_pkt->entry_count = (uint8_t)req_cnt; |
3607 | /* Specify response queue number where |
3608 | * completion should happen. |
3609 | */ |
3610 | cmd_pkt->entry_status = (uint8_t) rsp->id; |
3611 | |
3612 | } |
3613 | /* Build command packet. */ |
3614 | req->current_outstanding_cmd = handle; |
3615 | req->outstanding_cmds[handle] = sp; |
3616 | sp->handle = handle; |
3617 | cmd->host_scribble = (unsigned char *)(unsigned long)handle; |
3618 | req->cnt -= req_cnt; |
3619 | wmb(); |
3620 | |
3621 | /* Adjust ring index. */ |
3622 | req->ring_index++; |
3623 | if (req->ring_index == req->length) { |
3624 | req->ring_index = 0; |
3625 | req->ring_ptr = req->ring; |
3626 | } else |
3627 | req->ring_ptr++; |
3628 | |
3629 | sp->flags |= SRB_DMA_VALID; |
3630 | |
3631 | /* Set chip new ring index. */ |
3632 | /* write, read and verify logic */ |
3633 | dbval = dbval | (req->id << 8) | (req->ring_index << 16); |
3634 | if (ql2xdbwr) |
3635 | qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); |
3636 | else { |
3637 | wrt_reg_dword(addr: ha->nxdb_wr_ptr, data: dbval); |
3638 | wmb(); |
3639 | while (rd_reg_dword(addr: ha->nxdb_rd_ptr) != dbval) { |
3640 | wrt_reg_dword(addr: ha->nxdb_wr_ptr, data: dbval); |
3641 | wmb(); |
3642 | } |
3643 | } |
3644 | |
3645 | /* Manage unprocessed RIO/ZIO commands in response queue. */ |
3646 | if (vha->flags.process_response_queue && |
3647 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
3648 | qla24xx_process_response_queue(vha, rsp); |
3649 | |
3650 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
3651 | return QLA_SUCCESS; |
3652 | |
3653 | queuing_error_fcp_cmnd: |
3654 | dma_pool_free(pool: ha->fcp_cmnd_dma_pool, vaddr: ctx->fcp_cmnd, addr: ctx->fcp_cmnd_dma); |
3655 | queuing_error: |
3656 | if (tot_dsds) |
3657 | scsi_dma_unmap(cmd); |
3658 | |
3659 | if (sp->u.scmd.crc_ctx) { |
3660 | mempool_free(element: sp->u.scmd.crc_ctx, pool: ha->ctx_mempool); |
3661 | sp->u.scmd.crc_ctx = NULL; |
3662 | } |
3663 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
3664 | |
3665 | return QLA_FUNCTION_FAILED; |
3666 | } |
3667 | |
3668 | static void |
3669 | qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) |
3670 | { |
3671 | struct srb_iocb *aio = &sp->u.iocb_cmd; |
3672 | scsi_qla_host_t *vha = sp->vha; |
3673 | struct req_que *req = sp->qpair->req; |
3674 | srb_t *orig_sp = sp->cmd_sp; |
3675 | |
3676 | memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); |
3677 | abt_iocb->entry_type = ABORT_IOCB_TYPE; |
3678 | abt_iocb->entry_count = 1; |
3679 | abt_iocb->handle = make_handle(x: req->id, y: sp->handle); |
3680 | if (sp->fcport) { |
3681 | abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
3682 | abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; |
3683 | abt_iocb->port_id[1] = sp->fcport->d_id.b.area; |
3684 | abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; |
3685 | } |
3686 | abt_iocb->handle_to_abort = |
3687 | make_handle(le16_to_cpu(aio->u.abt.req_que_no), |
3688 | y: aio->u.abt.cmd_hndl); |
3689 | abt_iocb->vp_index = vha->vp_idx; |
3690 | abt_iocb->req_que_no = aio->u.abt.req_que_no; |
3691 | |
3692 | /* need to pass original sp */ |
3693 | if (orig_sp) |
3694 | qla_nvme_abort_set_option(abt: abt_iocb, sp: orig_sp); |
3695 | |
3696 | /* Send the command to the firmware */ |
3697 | wmb(); |
3698 | } |
3699 | |
3700 | static void |
3701 | qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx) |
3702 | { |
3703 | int i, sz; |
3704 | |
3705 | mbx->entry_type = MBX_IOCB_TYPE; |
3706 | mbx->handle = sp->handle; |
3707 | sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); |
3708 | |
3709 | for (i = 0; i < sz; i++) |
3710 | mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i]; |
3711 | } |
3712 | |
3713 | static void |
3714 | qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt) |
3715 | { |
3716 | sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt; |
3717 | qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg); |
3718 | ct_pkt->handle = sp->handle; |
3719 | } |
3720 | |
3721 | static void qla2x00_send_notify_ack_iocb(srb_t *sp, |
3722 | struct nack_to_isp *nack) |
3723 | { |
3724 | struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy; |
3725 | |
3726 | nack->entry_type = NOTIFY_ACK_TYPE; |
3727 | nack->entry_count = 1; |
3728 | nack->ox_id = ntfy->ox_id; |
3729 | |
3730 | nack->u.isp24.handle = sp->handle; |
3731 | nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; |
3732 | if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { |
3733 | nack->u.isp24.flags = ntfy->u.isp24.flags & |
3734 | cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); |
3735 | } |
3736 | nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; |
3737 | nack->u.isp24.status = ntfy->u.isp24.status; |
3738 | nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; |
3739 | nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; |
3740 | nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; |
3741 | nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; |
3742 | nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; |
3743 | nack->u.isp24.srr_flags = 0; |
3744 | nack->u.isp24.srr_reject_code = 0; |
3745 | nack->u.isp24.srr_reject_code_expl = 0; |
3746 | nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; |
3747 | |
3748 | if (ntfy->u.isp24.status_subcode == ELS_PLOGI && |
3749 | (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) && |
3750 | sp->vha->hw->flags.edif_enabled) { |
3751 | ql_dbg(ql_dbg_disc, vha: sp->vha, 0x3074, |
3752 | fmt: "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n" , |
3753 | sp->name, sp->handle, sp->fcport->loop_id, |
3754 | sp->fcport->d_id.b24); |
3755 | nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP); |
3756 | } |
3757 | } |
3758 | |
3759 | /* |
3760 | * Build NVME LS request |
3761 | */ |
3762 | static void |
3763 | qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt) |
3764 | { |
3765 | struct srb_iocb *nvme; |
3766 | |
3767 | nvme = &sp->u.iocb_cmd; |
3768 | cmd_pkt->entry_type = PT_LS4_REQUEST; |
3769 | cmd_pkt->entry_count = 1; |
3770 | cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec); |
3771 | cmd_pkt->vp_index = sp->fcport->vha->vp_idx; |
3772 | |
3773 | if (sp->unsol_rsp) { |
3774 | cmd_pkt->control_flags = |
3775 | cpu_to_le16(CF_LS4_RESPONDER << CF_LS4_SHIFT); |
3776 | cmd_pkt->nport_handle = nvme->u.nvme.nport_handle; |
3777 | cmd_pkt->exchange_address = nvme->u.nvme.exchange_address; |
3778 | } else { |
3779 | cmd_pkt->control_flags = |
3780 | cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT); |
3781 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
3782 | cmd_pkt->rx_dseg_count = cpu_to_le16(1); |
3783 | cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len; |
3784 | cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len; |
3785 | put_unaligned_le64(val: nvme->u.nvme.rsp_dma, p: &cmd_pkt->dsd[1].address); |
3786 | } |
3787 | |
3788 | cmd_pkt->tx_dseg_count = cpu_to_le16(1); |
3789 | cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len; |
3790 | cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len; |
3791 | put_unaligned_le64(val: nvme->u.nvme.cmd_dma, p: &cmd_pkt->dsd[0].address); |
3792 | } |
3793 | |
3794 | static void |
3795 | qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce) |
3796 | { |
3797 | int map, pos; |
3798 | |
3799 | vce->entry_type = VP_CTRL_IOCB_TYPE; |
3800 | vce->handle = sp->handle; |
3801 | vce->entry_count = 1; |
3802 | vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd); |
3803 | vce->vp_count = cpu_to_le16(1); |
3804 | |
3805 | /* |
3806 | * index map in firmware starts with 1; decrement index |
3807 | * this is ok as we never use index 0 |
3808 | */ |
3809 | map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8; |
3810 | pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7; |
3811 | vce->vp_idx_map[map] |= 1 << pos; |
3812 | } |
3813 | |
3814 | static void |
3815 | qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio) |
3816 | { |
3817 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; |
3818 | logio->control_flags = |
3819 | cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO); |
3820 | |
3821 | logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
3822 | logio->port_id[0] = sp->fcport->d_id.b.al_pa; |
3823 | logio->port_id[1] = sp->fcport->d_id.b.area; |
3824 | logio->port_id[2] = sp->fcport->d_id.b.domain; |
3825 | logio->vp_index = sp->fcport->vha->vp_idx; |
3826 | } |
3827 | |
3828 | static int qla_get_iocbs_resource(struct srb *sp) |
3829 | { |
3830 | bool get_exch; |
3831 | bool push_it_through = false; |
3832 | |
3833 | if (!ql2xenforce_iocb_limit) { |
3834 | sp->iores.res_type = RESOURCE_NONE; |
3835 | return 0; |
3836 | } |
3837 | sp->iores.res_type = RESOURCE_NONE; |
3838 | |
3839 | switch (sp->type) { |
3840 | case SRB_TM_CMD: |
3841 | case SRB_PRLI_CMD: |
3842 | case SRB_ADISC_CMD: |
3843 | push_it_through = true; |
3844 | fallthrough; |
3845 | case SRB_LOGIN_CMD: |
3846 | case SRB_ELS_CMD_RPT: |
3847 | case SRB_ELS_CMD_HST: |
3848 | case SRB_ELS_CMD_HST_NOLOGIN: |
3849 | case SRB_CT_CMD: |
3850 | case SRB_NVME_LS: |
3851 | case SRB_ELS_DCMD: |
3852 | get_exch = true; |
3853 | break; |
3854 | |
3855 | case SRB_FXIOCB_DCMD: |
3856 | case SRB_FXIOCB_BCMD: |
3857 | sp->iores.res_type = RESOURCE_NONE; |
3858 | return 0; |
3859 | |
3860 | case SRB_SA_UPDATE: |
3861 | case SRB_SA_REPLACE: |
3862 | case SRB_MB_IOCB: |
3863 | case SRB_ABT_CMD: |
3864 | case SRB_NACK_PLOGI: |
3865 | case SRB_NACK_PRLI: |
3866 | case SRB_NACK_LOGO: |
3867 | case SRB_LOGOUT_CMD: |
3868 | case SRB_CTRL_VP: |
3869 | case SRB_MARKER: |
3870 | default: |
3871 | push_it_through = true; |
3872 | get_exch = false; |
3873 | } |
3874 | |
3875 | sp->iores.res_type |= RESOURCE_IOCB; |
3876 | sp->iores.iocb_cnt = 1; |
3877 | if (get_exch) { |
3878 | sp->iores.res_type |= RESOURCE_EXCH; |
3879 | sp->iores.exch_cnt = 1; |
3880 | } |
3881 | if (push_it_through) |
3882 | sp->iores.res_type |= RESOURCE_FORCE; |
3883 | |
3884 | return qla_get_fw_resources(qp: sp->qpair, iores: &sp->iores); |
3885 | } |
3886 | |
3887 | static void |
3888 | qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk) |
3889 | { |
3890 | mrk->entry_type = MARKER_TYPE; |
3891 | mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier; |
3892 | mrk->handle = make_handle(x: sp->qpair->req->id, y: sp->handle); |
3893 | if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) { |
3894 | mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id); |
3895 | int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun); |
3896 | host_to_fcp_swap(fcp: mrk->lun, bsize: sizeof(mrk->lun)); |
3897 | mrk->vp_index = sp->u.iocb_cmd.u.tmf.vp_index; |
3898 | } |
3899 | } |
3900 | |
3901 | int |
3902 | qla2x00_start_sp(srb_t *sp) |
3903 | { |
3904 | int rval = QLA_SUCCESS; |
3905 | scsi_qla_host_t *vha = sp->vha; |
3906 | struct qla_hw_data *ha = vha->hw; |
3907 | struct qla_qpair *qp = sp->qpair; |
3908 | void *pkt; |
3909 | unsigned long flags; |
3910 | |
3911 | if (vha->hw->flags.eeh_busy) |
3912 | return -EIO; |
3913 | |
3914 | spin_lock_irqsave(qp->qp_lock_ptr, flags); |
3915 | rval = qla_get_iocbs_resource(sp); |
3916 | if (rval) { |
3917 | spin_unlock_irqrestore(lock: qp->qp_lock_ptr, flags); |
3918 | return -EAGAIN; |
3919 | } |
3920 | |
3921 | pkt = __qla2x00_alloc_iocbs(qpair: sp->qpair, sp); |
3922 | if (!pkt) { |
3923 | rval = -EAGAIN; |
3924 | ql_log(ql_log_warn, vha, 0x700c, |
3925 | fmt: "qla2x00_alloc_iocbs failed.\n" ); |
3926 | goto done; |
3927 | } |
3928 | |
3929 | switch (sp->type) { |
3930 | case SRB_LOGIN_CMD: |
3931 | IS_FWI2_CAPABLE(ha) ? |
3932 | qla24xx_login_iocb(sp, logio: pkt) : |
3933 | qla2x00_login_iocb(sp, mbx: pkt); |
3934 | break; |
3935 | case SRB_PRLI_CMD: |
3936 | qla24xx_prli_iocb(sp, logio: pkt); |
3937 | break; |
3938 | case SRB_LOGOUT_CMD: |
3939 | IS_FWI2_CAPABLE(ha) ? |
3940 | qla24xx_logout_iocb(sp, logio: pkt) : |
3941 | qla2x00_logout_iocb(sp, mbx: pkt); |
3942 | break; |
3943 | case SRB_ELS_CMD_RPT: |
3944 | case SRB_ELS_CMD_HST: |
3945 | qla24xx_els_iocb(sp, els_iocb: pkt); |
3946 | break; |
3947 | case SRB_ELS_CMD_HST_NOLOGIN: |
3948 | qla_els_pt_iocb(vha: sp->vha, els_iocb: pkt, a: &sp->u.bsg_cmd.u.els_arg); |
3949 | ((struct els_entry_24xx *)pkt)->handle = sp->handle; |
3950 | break; |
3951 | case SRB_CT_CMD: |
3952 | IS_FWI2_CAPABLE(ha) ? |
3953 | qla24xx_ct_iocb(sp, ct_iocb: pkt) : |
3954 | qla2x00_ct_iocb(sp, ct_iocb: pkt); |
3955 | break; |
3956 | case SRB_ADISC_CMD: |
3957 | IS_FWI2_CAPABLE(ha) ? |
3958 | qla24xx_adisc_iocb(sp, logio: pkt) : |
3959 | qla2x00_adisc_iocb(sp, mbx: pkt); |
3960 | break; |
3961 | case SRB_TM_CMD: |
3962 | IS_QLAFX00(ha) ? |
3963 | qlafx00_tm_iocb(sp, pkt) : |
3964 | qla24xx_tm_iocb(sp, tsk: pkt); |
3965 | break; |
3966 | case SRB_FXIOCB_DCMD: |
3967 | case SRB_FXIOCB_BCMD: |
3968 | qlafx00_fxdisc_iocb(sp, pkt); |
3969 | break; |
3970 | case SRB_NVME_LS: |
3971 | qla_nvme_ls(sp, cmd_pkt: pkt); |
3972 | break; |
3973 | case SRB_ABT_CMD: |
3974 | IS_QLAFX00(ha) ? |
3975 | qlafx00_abort_iocb(sp, pkt) : |
3976 | qla24xx_abort_iocb(sp, abt_iocb: pkt); |
3977 | break; |
3978 | case SRB_ELS_DCMD: |
3979 | qla24xx_els_logo_iocb(sp, els_iocb: pkt); |
3980 | break; |
3981 | case SRB_CT_PTHRU_CMD: |
3982 | qla2x00_ctpthru_cmd_iocb(sp, ct_pkt: pkt); |
3983 | break; |
3984 | case SRB_MB_IOCB: |
3985 | qla2x00_mb_iocb(sp, mbx: pkt); |
3986 | break; |
3987 | case SRB_NACK_PLOGI: |
3988 | case SRB_NACK_PRLI: |
3989 | case SRB_NACK_LOGO: |
3990 | qla2x00_send_notify_ack_iocb(sp, nack: pkt); |
3991 | break; |
3992 | case SRB_CTRL_VP: |
3993 | qla25xx_ctrlvp_iocb(sp, vce: pkt); |
3994 | break; |
3995 | case SRB_PRLO_CMD: |
3996 | qla24xx_prlo_iocb(sp, logio: pkt); |
3997 | break; |
3998 | case SRB_SA_UPDATE: |
3999 | qla24xx_sa_update_iocb(sp, sa_update_iocb: pkt); |
4000 | break; |
4001 | case SRB_SA_REPLACE: |
4002 | qla24xx_sa_replace_iocb(sp, sa_update_iocb: pkt); |
4003 | break; |
4004 | case SRB_MARKER: |
4005 | qla_marker_iocb(sp, mrk: pkt); |
4006 | break; |
4007 | default: |
4008 | break; |
4009 | } |
4010 | |
4011 | if (sp->start_timer) { |
4012 | /* ref: TMR timer ref |
4013 | * this code should be just before start_iocbs function |
4014 | * This will make sure that caller function don't to do |
4015 | * kref_put even on failure |
4016 | */ |
4017 | kref_get(kref: &sp->cmd_kref); |
4018 | add_timer(timer: &sp->u.iocb_cmd.timer); |
4019 | } |
4020 | |
4021 | wmb(); |
4022 | qla2x00_start_iocbs(vha, req: qp->req); |
4023 | done: |
4024 | if (rval) |
4025 | qla_put_fw_resources(qp: sp->qpair, iores: &sp->iores); |
4026 | spin_unlock_irqrestore(lock: qp->qp_lock_ptr, flags); |
4027 | return rval; |
4028 | } |
4029 | |
4030 | static void |
4031 | qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, |
4032 | struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) |
4033 | { |
4034 | uint16_t avail_dsds; |
4035 | struct dsd64 *cur_dsd; |
4036 | uint32_t req_data_len = 0; |
4037 | uint32_t rsp_data_len = 0; |
4038 | struct scatterlist *sg; |
4039 | int index; |
4040 | int entry_count = 1; |
4041 | struct bsg_job *bsg_job = sp->u.bsg_job; |
4042 | |
4043 | /*Update entry type to indicate bidir command */ |
4044 | put_unaligned_le32(COMMAND_BIDIRECTIONAL, p: &cmd_pkt->entry_type); |
4045 | |
4046 | /* Set the transfer direction, in this set both flags |
4047 | * Also set the BD_WRAP_BACK flag, firmware will take care |
4048 | * assigning DID=SID for outgoing pkts. |
4049 | */ |
4050 | cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); |
4051 | cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); |
4052 | cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | |
4053 | BD_WRAP_BACK); |
4054 | |
4055 | req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; |
4056 | cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len); |
4057 | cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len); |
4058 | cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); |
4059 | |
4060 | vha->bidi_stats.transfer_bytes += req_data_len; |
4061 | vha->bidi_stats.io_count++; |
4062 | |
4063 | vha->qla_stats.output_bytes += req_data_len; |
4064 | vha->qla_stats.output_requests++; |
4065 | |
4066 | /* Only one dsd is available for bidirectional IOCB, remaining dsds |
4067 | * are bundled in continuation iocb |
4068 | */ |
4069 | avail_dsds = 1; |
4070 | cur_dsd = &cmd_pkt->fcp_dsd; |
4071 | |
4072 | index = 0; |
4073 | |
4074 | for_each_sg(bsg_job->request_payload.sg_list, sg, |
4075 | bsg_job->request_payload.sg_cnt, index) { |
4076 | cont_a64_entry_t *cont_pkt; |
4077 | |
4078 | /* Allocate additional continuation packets */ |
4079 | if (avail_dsds == 0) { |
4080 | /* Continuation type 1 IOCB can accomodate |
4081 | * 5 DSDS |
4082 | */ |
4083 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req: vha->req); |
4084 | cur_dsd = cont_pkt->dsd; |
4085 | avail_dsds = 5; |
4086 | entry_count++; |
4087 | } |
4088 | append_dsd64(dsd: &cur_dsd, sg); |
4089 | avail_dsds--; |
4090 | } |
4091 | /* For read request DSD will always goes to continuation IOCB |
4092 | * and follow the write DSD. If there is room on the current IOCB |
4093 | * then it is added to that IOCB else new continuation IOCB is |
4094 | * allocated. |
4095 | */ |
4096 | for_each_sg(bsg_job->reply_payload.sg_list, sg, |
4097 | bsg_job->reply_payload.sg_cnt, index) { |
4098 | cont_a64_entry_t *cont_pkt; |
4099 | |
4100 | /* Allocate additional continuation packets */ |
4101 | if (avail_dsds == 0) { |
4102 | /* Continuation type 1 IOCB can accomodate |
4103 | * 5 DSDS |
4104 | */ |
4105 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req: vha->req); |
4106 | cur_dsd = cont_pkt->dsd; |
4107 | avail_dsds = 5; |
4108 | entry_count++; |
4109 | } |
4110 | append_dsd64(dsd: &cur_dsd, sg); |
4111 | avail_dsds--; |
4112 | } |
4113 | /* This value should be same as number of IOCB required for this cmd */ |
4114 | cmd_pkt->entry_count = entry_count; |
4115 | } |
4116 | |
4117 | int |
4118 | qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) |
4119 | { |
4120 | |
4121 | struct qla_hw_data *ha = vha->hw; |
4122 | unsigned long flags; |
4123 | uint32_t handle; |
4124 | uint16_t req_cnt; |
4125 | uint16_t cnt; |
4126 | uint32_t *clr_ptr; |
4127 | struct cmd_bidir *cmd_pkt = NULL; |
4128 | struct rsp_que *rsp; |
4129 | struct req_que *req; |
4130 | int rval = EXT_STATUS_OK; |
4131 | |
4132 | rval = QLA_SUCCESS; |
4133 | |
4134 | rsp = ha->rsp_q_map[0]; |
4135 | req = vha->req; |
4136 | |
4137 | /* Send marker if required */ |
4138 | if (vha->marker_needed != 0) { |
4139 | if (qla2x00_marker(vha, qpair: ha->base_qpair, |
4140 | loop_id: 0, lun: 0, MK_SYNC_ALL) != QLA_SUCCESS) |
4141 | return EXT_STATUS_MAILBOX; |
4142 | vha->marker_needed = 0; |
4143 | } |
4144 | |
4145 | /* Acquire ring specific lock */ |
4146 | spin_lock_irqsave(&ha->hardware_lock, flags); |
4147 | |
4148 | handle = qla2xxx_get_next_handle(req); |
4149 | if (handle == 0) { |
4150 | rval = EXT_STATUS_BUSY; |
4151 | goto queuing_error; |
4152 | } |
4153 | |
4154 | /* Calculate number of IOCB required */ |
4155 | req_cnt = qla24xx_calc_iocbs(vha, dsds: tot_dsds); |
4156 | |
4157 | /* Check for room on request queue. */ |
4158 | if (req->cnt < req_cnt + 2) { |
4159 | if (IS_SHADOW_REG_CAPABLE(ha)) { |
4160 | cnt = *req->out_ptr; |
4161 | } else { |
4162 | cnt = rd_reg_dword_relaxed(addr: req->req_q_out); |
4163 | if (qla2x00_check_reg16_for_disconnect(vha, cnt)) |
4164 | goto queuing_error; |
4165 | } |
4166 | |
4167 | if (req->ring_index < cnt) |
4168 | req->cnt = cnt - req->ring_index; |
4169 | else |
4170 | req->cnt = req->length - |
4171 | (req->ring_index - cnt); |
4172 | } |
4173 | if (req->cnt < req_cnt + 2) { |
4174 | rval = EXT_STATUS_BUSY; |
4175 | goto queuing_error; |
4176 | } |
4177 | |
4178 | cmd_pkt = (struct cmd_bidir *)req->ring_ptr; |
4179 | cmd_pkt->handle = make_handle(x: req->id, y: handle); |
4180 | |
4181 | /* Zero out remaining portion of packet. */ |
4182 | /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ |
4183 | clr_ptr = (uint32_t *)cmd_pkt + 2; |
4184 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); |
4185 | |
4186 | /* Set NPORT-ID (of vha)*/ |
4187 | cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id); |
4188 | cmd_pkt->port_id[0] = vha->d_id.b.al_pa; |
4189 | cmd_pkt->port_id[1] = vha->d_id.b.area; |
4190 | cmd_pkt->port_id[2] = vha->d_id.b.domain; |
4191 | |
4192 | qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); |
4193 | cmd_pkt->entry_status = (uint8_t) rsp->id; |
4194 | /* Build command packet. */ |
4195 | req->current_outstanding_cmd = handle; |
4196 | req->outstanding_cmds[handle] = sp; |
4197 | sp->handle = handle; |
4198 | req->cnt -= req_cnt; |
4199 | |
4200 | /* Send the command to the firmware */ |
4201 | wmb(); |
4202 | qla2x00_start_iocbs(vha, req); |
4203 | queuing_error: |
4204 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
4205 | |
4206 | return rval; |
4207 | } |
4208 | |
4209 | /** |
4210 | * qla_start_scsi_type6() - Send a SCSI command to the ISP |
4211 | * @sp: command to send to the ISP |
4212 | * |
4213 | * Returns non-zero if a failure occurred, else zero. |
4214 | */ |
4215 | static int |
4216 | qla_start_scsi_type6(srb_t *sp) |
4217 | { |
4218 | int nseg; |
4219 | unsigned long flags; |
4220 | uint32_t *clr_ptr; |
4221 | uint32_t handle; |
4222 | struct cmd_type_6 *cmd_pkt; |
4223 | uint16_t cnt; |
4224 | uint16_t req_cnt; |
4225 | uint16_t tot_dsds; |
4226 | struct req_que *req = NULL; |
4227 | struct rsp_que *rsp; |
4228 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
4229 | struct scsi_qla_host *vha = sp->fcport->vha; |
4230 | struct qla_hw_data *ha = vha->hw; |
4231 | struct qla_qpair *qpair = sp->qpair; |
4232 | uint16_t more_dsd_lists = 0; |
4233 | struct dsd_dma *dsd_ptr; |
4234 | uint16_t i; |
4235 | __be32 *fcp_dl; |
4236 | uint8_t additional_cdb_len; |
4237 | struct ct6_dsd *ctx; |
4238 | |
4239 | /* Acquire qpair specific lock */ |
4240 | spin_lock_irqsave(&qpair->qp_lock, flags); |
4241 | |
4242 | /* Setup qpair pointers */ |
4243 | req = qpair->req; |
4244 | rsp = qpair->rsp; |
4245 | |
4246 | /* So we know we haven't pci_map'ed anything yet */ |
4247 | tot_dsds = 0; |
4248 | |
4249 | /* Send marker if required */ |
4250 | if (vha->marker_needed != 0) { |
4251 | if (__qla2x00_marker(vha, qpair, loop_id: 0, lun: 0, MK_SYNC_ALL) != QLA_SUCCESS) { |
4252 | spin_unlock_irqrestore(lock: &qpair->qp_lock, flags); |
4253 | return QLA_FUNCTION_FAILED; |
4254 | } |
4255 | vha->marker_needed = 0; |
4256 | } |
4257 | |
4258 | handle = qla2xxx_get_next_handle(req); |
4259 | if (handle == 0) |
4260 | goto queuing_error; |
4261 | |
4262 | /* Map the sg table so we have an accurate count of sg entries needed */ |
4263 | if (scsi_sg_count(cmd)) { |
4264 | nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), |
4265 | scsi_sg_count(cmd), cmd->sc_data_direction); |
4266 | if (unlikely(!nseg)) |
4267 | goto queuing_error; |
4268 | } else { |
4269 | nseg = 0; |
4270 | } |
4271 | |
4272 | tot_dsds = nseg; |
4273 | |
4274 | /* eventhough driver only need 1 T6 IOCB, FW still convert DSD to Continueation IOCB */ |
4275 | req_cnt = qla24xx_calc_iocbs(vha, dsds: tot_dsds); |
4276 | |
4277 | sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; |
4278 | sp->iores.exch_cnt = 1; |
4279 | sp->iores.iocb_cnt = req_cnt; |
4280 | |
4281 | if (qla_get_fw_resources(qp: sp->qpair, iores: &sp->iores)) |
4282 | goto queuing_error; |
4283 | |
4284 | more_dsd_lists = qla24xx_calc_dsd_lists(dsds: tot_dsds); |
4285 | if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) { |
4286 | ql_dbg(ql_dbg_io, vha, 0x3028, |
4287 | fmt: "Num of DSD list %d is than %d for cmd=%p.\n" , |
4288 | more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN, cmd); |
4289 | goto queuing_error; |
4290 | } |
4291 | |
4292 | if (more_dsd_lists <= qpair->dsd_avail) |
4293 | goto sufficient_dsds; |
4294 | else |
4295 | more_dsd_lists -= qpair->dsd_avail; |
4296 | |
4297 | for (i = 0; i < more_dsd_lists; i++) { |
4298 | dsd_ptr = kzalloc(size: sizeof(*dsd_ptr), GFP_ATOMIC); |
4299 | if (!dsd_ptr) { |
4300 | ql_log(ql_log_fatal, vha, 0x3029, |
4301 | fmt: "Failed to allocate memory for dsd_dma for cmd=%p.\n" , cmd); |
4302 | goto queuing_error; |
4303 | } |
4304 | INIT_LIST_HEAD(list: &dsd_ptr->list); |
4305 | |
4306 | dsd_ptr->dsd_addr = dma_pool_alloc(pool: ha->dl_dma_pool, |
4307 | GFP_ATOMIC, handle: &dsd_ptr->dsd_list_dma); |
4308 | if (!dsd_ptr->dsd_addr) { |
4309 | kfree(objp: dsd_ptr); |
4310 | ql_log(ql_log_fatal, vha, 0x302a, |
4311 | fmt: "Failed to allocate memory for dsd_addr for cmd=%p.\n" , cmd); |
4312 | goto queuing_error; |
4313 | } |
4314 | list_add_tail(new: &dsd_ptr->list, head: &qpair->dsd_list); |
4315 | qpair->dsd_avail++; |
4316 | } |
4317 | |
4318 | sufficient_dsds: |
4319 | req_cnt = 1; |
4320 | |
4321 | if (req->cnt < (req_cnt + 2)) { |
4322 | if (IS_SHADOW_REG_CAPABLE(ha)) { |
4323 | cnt = *req->out_ptr; |
4324 | } else { |
4325 | cnt = (uint16_t)rd_reg_dword_relaxed(addr: req->req_q_out); |
4326 | if (qla2x00_check_reg16_for_disconnect(vha, cnt)) |
4327 | goto queuing_error; |
4328 | } |
4329 | |
4330 | if (req->ring_index < cnt) |
4331 | req->cnt = cnt - req->ring_index; |
4332 | else |
4333 | req->cnt = req->length - (req->ring_index - cnt); |
4334 | if (req->cnt < (req_cnt + 2)) |
4335 | goto queuing_error; |
4336 | } |
4337 | |
4338 | ctx = &sp->u.scmd.ct6_ctx; |
4339 | |
4340 | memset(ctx, 0, sizeof(struct ct6_dsd)); |
4341 | ctx->fcp_cmnd = dma_pool_zalloc(pool: ha->fcp_cmnd_dma_pool, |
4342 | GFP_ATOMIC, handle: &ctx->fcp_cmnd_dma); |
4343 | if (!ctx->fcp_cmnd) { |
4344 | ql_log(ql_log_fatal, vha, 0x3031, |
4345 | fmt: "Failed to allocate fcp_cmnd for cmd=%p.\n" , cmd); |
4346 | goto queuing_error; |
4347 | } |
4348 | |
4349 | /* Initialize the DSD list and dma handle */ |
4350 | INIT_LIST_HEAD(list: &ctx->dsd_list); |
4351 | ctx->dsd_use_cnt = 0; |
4352 | |
4353 | if (cmd->cmd_len > 16) { |
4354 | additional_cdb_len = cmd->cmd_len - 16; |
4355 | if (cmd->cmd_len % 4 || |
4356 | cmd->cmd_len > QLA_CDB_BUF_SIZE) { |
4357 | /* |
4358 | * SCSI command bigger than 16 bytes must be |
4359 | * multiple of 4 or too big. |
4360 | */ |
4361 | ql_log(ql_log_warn, vha, 0x3033, |
4362 | fmt: "scsi cmd len %d not multiple of 4 for cmd=%p.\n" , |
4363 | cmd->cmd_len, cmd); |
4364 | goto queuing_error_fcp_cmnd; |
4365 | } |
4366 | ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; |
4367 | } else { |
4368 | additional_cdb_len = 0; |
4369 | ctx->fcp_cmnd_len = 12 + 16 + 4; |
4370 | } |
4371 | |
4372 | /* Build command packet. */ |
4373 | req->current_outstanding_cmd = handle; |
4374 | req->outstanding_cmds[handle] = sp; |
4375 | sp->handle = handle; |
4376 | cmd->host_scribble = (unsigned char *)(unsigned long)handle; |
4377 | req->cnt -= req_cnt; |
4378 | |
4379 | cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; |
4380 | cmd_pkt->handle = make_handle(x: req->id, y: handle); |
4381 | |
4382 | /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ |
4383 | clr_ptr = (uint32_t *)cmd_pkt + 2; |
4384 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); |
4385 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); |
4386 | |
4387 | /* Set NPORT-ID and LUN number */ |
4388 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
4389 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; |
4390 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; |
4391 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; |
4392 | cmd_pkt->vp_index = sp->vha->vp_idx; |
4393 | |
4394 | /* Build IOCB segments */ |
4395 | qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds); |
4396 | |
4397 | int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); |
4398 | host_to_fcp_swap(fcp: (uint8_t *)&cmd_pkt->lun, bsize: sizeof(cmd_pkt->lun)); |
4399 | |
4400 | /* build FCP_CMND IU */ |
4401 | int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); |
4402 | ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; |
4403 | |
4404 | if (cmd->sc_data_direction == DMA_TO_DEVICE) |
4405 | ctx->fcp_cmnd->additional_cdb_len |= 1; |
4406 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) |
4407 | ctx->fcp_cmnd->additional_cdb_len |= 2; |
4408 | |
4409 | /* Populate the FCP_PRIO. */ |
4410 | if (ha->flags.fcp_prio_enabled) |
4411 | ctx->fcp_cmnd->task_attribute |= |
4412 | sp->fcport->fcp_prio << 3; |
4413 | |
4414 | memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); |
4415 | |
4416 | fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + |
4417 | additional_cdb_len); |
4418 | *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); |
4419 | |
4420 | cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); |
4421 | put_unaligned_le64(val: ctx->fcp_cmnd_dma, |
4422 | p: &cmd_pkt->fcp_cmnd_dseg_address); |
4423 | |
4424 | sp->flags |= SRB_FCP_CMND_DMA_VALID; |
4425 | cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); |
4426 | /* Set total data segment count. */ |
4427 | cmd_pkt->entry_count = (uint8_t)req_cnt; |
4428 | |
4429 | wmb(); |
4430 | /* Adjust ring index. */ |
4431 | req->ring_index++; |
4432 | if (req->ring_index == req->length) { |
4433 | req->ring_index = 0; |
4434 | req->ring_ptr = req->ring; |
4435 | } else { |
4436 | req->ring_ptr++; |
4437 | } |
4438 | |
4439 | sp->qpair->cmd_cnt++; |
4440 | sp->flags |= SRB_DMA_VALID; |
4441 | |
4442 | /* Set chip new ring index. */ |
4443 | wrt_reg_dword(addr: req->req_q_in, data: req->ring_index); |
4444 | |
4445 | /* Manage unprocessed RIO/ZIO commands in response queue. */ |
4446 | if (vha->flags.process_response_queue && |
4447 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
4448 | qla24xx_process_response_queue(vha, rsp); |
4449 | |
4450 | spin_unlock_irqrestore(lock: &qpair->qp_lock, flags); |
4451 | |
4452 | return QLA_SUCCESS; |
4453 | |
4454 | queuing_error_fcp_cmnd: |
4455 | dma_pool_free(pool: ha->fcp_cmnd_dma_pool, vaddr: ctx->fcp_cmnd, addr: ctx->fcp_cmnd_dma); |
4456 | |
4457 | queuing_error: |
4458 | if (tot_dsds) |
4459 | scsi_dma_unmap(cmd); |
4460 | |
4461 | qla_put_fw_resources(qp: sp->qpair, iores: &sp->iores); |
4462 | |
4463 | if (sp->u.scmd.crc_ctx) { |
4464 | mempool_free(element: sp->u.scmd.crc_ctx, pool: ha->ctx_mempool); |
4465 | sp->u.scmd.crc_ctx = NULL; |
4466 | } |
4467 | |
4468 | spin_unlock_irqrestore(lock: &qpair->qp_lock, flags); |
4469 | |
4470 | return QLA_FUNCTION_FAILED; |
4471 | } |
4472 | |