1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6#include "qla_def.h"
7#include "qla_target.h"
8#include "qla_gbl.h"
9
10#include <linux/delay.h>
11#include <linux/slab.h>
12#include <linux/cpu.h>
13#include <linux/t10-pi.h>
14#include <scsi/scsi_tcq.h>
15#include <scsi/scsi_bsg_fc.h>
16#include <scsi/scsi_eh.h>
17#include <scsi/fc/fc_fs.h>
18#include <linux/nvme-fc-driver.h>
19
20static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
24 sts_entry_t *);
25static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha,
26 struct purex_item *item);
27static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha,
28 uint16_t size);
29static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha,
30 void *pkt);
31static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha,
32 void **pkt, struct rsp_que **rsp);
33
34static void
35qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
36{
37 void *pkt = &item->iocb;
38 uint16_t pkt_size = item->size;
39
40 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d,
41 fmt: "%s: Enter\n", __func__);
42
43 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e,
44 fmt: "-------- ELS REQ -------\n");
45 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
46 pkt, pkt_size);
47
48 fc_host_fpin_rcv(shost: vha->host, fpin_len: pkt_size, fpin_buf: (char *)pkt, event_acknowledge: 0);
49}
50
51const char *const port_state_str[] = {
52 [FCS_UNKNOWN] = "Unknown",
53 [FCS_UNCONFIGURED] = "UNCONFIGURED",
54 [FCS_DEVICE_DEAD] = "DEAD",
55 [FCS_DEVICE_LOST] = "LOST",
56 [FCS_ONLINE] = "ONLINE"
57};
58
59#define SFP_DISABLE_LASER_INITIATED 0x15 /* Sub code of 8070 AEN */
60#define SFP_ENABLE_LASER_INITIATED 0x16 /* Sub code of 8070 AEN */
61
62static inline void display_Laser_info(scsi_qla_host_t *vha,
63 u16 mb1, u16 mb2, u16 mb3) {
64
65 if (mb1 == SFP_DISABLE_LASER_INITIATED)
66 ql_log(ql_log_warn, vha, 0xf0a2,
67 fmt: "SFP temperature (%d C) reached/exceeded the threshold (%d C). Laser is disabled.\n",
68 mb3, mb2);
69 if (mb1 == SFP_ENABLE_LASER_INITIATED)
70 ql_log(ql_log_warn, vha, 0xf0a3,
71 fmt: "SFP temperature (%d C) reached normal operating level. Laser is enabled.\n",
72 mb3);
73}
74
75static void
76qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
77{
78 struct abts_entry_24xx *abts =
79 (struct abts_entry_24xx *)&pkt->iocb;
80 struct qla_hw_data *ha = vha->hw;
81 struct els_entry_24xx *rsp_els;
82 struct abts_entry_24xx *abts_rsp;
83 dma_addr_t dma;
84 uint32_t fctl;
85 int rval;
86
87 ql_dbg(ql_dbg_init, vha, 0x0286, fmt: "%s: entered.\n", __func__);
88
89 ql_log(ql_log_warn, vha, 0x0287,
90 fmt: "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
91 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
92 abts->seq_id, abts->seq_cnt);
93 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
94 fmt: "-------- ABTS RCV -------\n");
95 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
96 (uint8_t *)abts, sizeof(*abts));
97
98 rsp_els = dma_alloc_coherent(dev: &ha->pdev->dev, size: sizeof(*rsp_els), dma_handle: &dma,
99 GFP_KERNEL);
100 if (!rsp_els) {
101 ql_log(ql_log_warn, vha, 0x0287,
102 fmt: "Failed allocate dma buffer ABTS/ELS RSP.\n");
103 return;
104 }
105
106 /* terminate exchange */
107 rsp_els->entry_type = ELS_IOCB_TYPE;
108 rsp_els->entry_count = 1;
109 rsp_els->nport_handle = cpu_to_le16(~0);
110 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
111 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
112 ql_dbg(ql_dbg_init, vha, 0x0283,
113 fmt: "Sending ELS Response to terminate exchange %#x...\n",
114 abts->rx_xch_addr_to_abort);
115 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
116 fmt: "-------- ELS RSP -------\n");
117 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
118 (uint8_t *)rsp_els, sizeof(*rsp_els));
119 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
120 if (rval) {
121 ql_log(ql_log_warn, vha, 0x0288,
122 fmt: "%s: iocb failed to execute -> %x\n", __func__, rval);
123 } else if (rsp_els->comp_status) {
124 ql_log(ql_log_warn, vha, 0x0289,
125 fmt: "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
126 __func__, rsp_els->comp_status,
127 rsp_els->error_subcode_1, rsp_els->error_subcode_2);
128 } else {
129 ql_dbg(ql_dbg_init, vha, 0x028a,
130 fmt: "%s: abort exchange done.\n", __func__);
131 }
132
133 /* send ABTS response */
134 abts_rsp = (void *)rsp_els;
135 memset(abts_rsp, 0, sizeof(*abts_rsp));
136 abts_rsp->entry_type = ABTS_RSP_TYPE;
137 abts_rsp->entry_count = 1;
138 abts_rsp->nport_handle = abts->nport_handle;
139 abts_rsp->vp_idx = abts->vp_idx;
140 abts_rsp->sof_type = abts->sof_type & 0xf0;
141 abts_rsp->rx_xch_addr = abts->rx_xch_addr;
142 abts_rsp->d_id[0] = abts->s_id[0];
143 abts_rsp->d_id[1] = abts->s_id[1];
144 abts_rsp->d_id[2] = abts->s_id[2];
145 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
146 abts_rsp->s_id[0] = abts->d_id[0];
147 abts_rsp->s_id[1] = abts->d_id[1];
148 abts_rsp->s_id[2] = abts->d_id[2];
149 abts_rsp->cs_ctl = abts->cs_ctl;
150 /* include flipping bit23 in fctl */
151 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
152 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
153 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
154 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
155 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
156 abts_rsp->type = FC_TYPE_BLD;
157 abts_rsp->rx_id = abts->rx_id;
158 abts_rsp->ox_id = abts->ox_id;
159 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
160 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
161 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
162 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
163 ql_dbg(ql_dbg_init, vha, 0x028b,
164 fmt: "Sending BA ACC response to ABTS %#x...\n",
165 abts->rx_xch_addr_to_abort);
166 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
167 fmt: "-------- ELS RSP -------\n");
168 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
169 (uint8_t *)abts_rsp, sizeof(*abts_rsp));
170 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
171 if (rval) {
172 ql_log(ql_log_warn, vha, 0x028c,
173 fmt: "%s: iocb failed to execute -> %x\n", __func__, rval);
174 } else if (abts_rsp->comp_status) {
175 ql_log(ql_log_warn, vha, 0x028d,
176 fmt: "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
177 __func__, abts_rsp->comp_status,
178 abts_rsp->payload.error.subcode1,
179 abts_rsp->payload.error.subcode2);
180 } else {
181 ql_dbg(ql_dbg_init, vha, 0x028ea,
182 fmt: "%s: done.\n", __func__);
183 }
184
185 dma_free_coherent(dev: &ha->pdev->dev, size: sizeof(*rsp_els), cpu_addr: rsp_els, dma_handle: dma);
186}
187
188/**
189 * __qla_consume_iocb - this routine is used to tell fw driver has processed
190 * or consumed the head IOCB along with the continuation IOCB's from the
191 * provided respond queue.
192 * @vha: host adapter pointer
193 * @pkt: pointer to current packet. On return, this pointer shall move
194 * to the next packet.
195 * @rsp: respond queue pointer.
196 *
197 * it is assumed pkt is the head iocb, not the continuation iocbk
198 */
199void __qla_consume_iocb(struct scsi_qla_host *vha,
200 void **pkt, struct rsp_que **rsp)
201{
202 struct rsp_que *rsp_q = *rsp;
203 response_t *new_pkt;
204 uint16_t entry_count_remaining;
205 struct purex_entry_24xx *purex = *pkt;
206
207 entry_count_remaining = purex->entry_count;
208 while (entry_count_remaining > 0) {
209 new_pkt = rsp_q->ring_ptr;
210 *pkt = new_pkt;
211
212 rsp_q->ring_index++;
213 if (rsp_q->ring_index == rsp_q->length) {
214 rsp_q->ring_index = 0;
215 rsp_q->ring_ptr = rsp_q->ring;
216 } else {
217 rsp_q->ring_ptr++;
218 }
219
220 new_pkt->signature = RESPONSE_PROCESSED;
221 /* flush signature */
222 wmb();
223 --entry_count_remaining;
224 }
225}
226
227/**
228 * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB
229 * and save to provided buffer
230 * @vha: host adapter pointer
231 * @pkt: pointer Purex IOCB
232 * @rsp: respond queue
233 * @buf: extracted ELS payload copy here
234 * @buf_len: buffer length
235 */
236int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha,
237 void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len)
238{
239 struct purex_entry_24xx *purex = *pkt;
240 struct rsp_que *rsp_q = *rsp;
241 sts_cont_entry_t *new_pkt;
242 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
243 uint16_t buffer_copy_offset = 0;
244 uint16_t entry_count_remaining;
245 u16 tpad;
246
247 entry_count_remaining = purex->entry_count;
248 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
249 - PURX_ELS_HEADER_SIZE;
250
251 /*
252 * end of payload may not end in 4bytes boundary. Need to
253 * round up / pad for room to swap, before saving data
254 */
255 tpad = roundup(total_bytes, 4);
256
257 if (buf_len < tpad) {
258 ql_dbg(ql_dbg_async, vha, 0x5084,
259 fmt: "%s buffer is too small %d < %d\n",
260 __func__, buf_len, tpad);
261 __qla_consume_iocb(vha, pkt, rsp);
262 return -EIO;
263 }
264
265 pending_bytes = total_bytes = tpad;
266 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
267 sizeof(purex->els_frame_payload) : pending_bytes;
268
269 memcpy(buf, &purex->els_frame_payload[0], no_bytes);
270 buffer_copy_offset += no_bytes;
271 pending_bytes -= no_bytes;
272 --entry_count_remaining;
273
274 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
275 /* flush signature */
276 wmb();
277
278 do {
279 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
280 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
281 *pkt = new_pkt;
282
283 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
284 ql_log(ql_log_warn, vha, 0x507a,
285 fmt: "Unexpected IOCB type, partial data 0x%x\n",
286 buffer_copy_offset);
287 break;
288 }
289
290 rsp_q->ring_index++;
291 if (rsp_q->ring_index == rsp_q->length) {
292 rsp_q->ring_index = 0;
293 rsp_q->ring_ptr = rsp_q->ring;
294 } else {
295 rsp_q->ring_ptr++;
296 }
297 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
298 sizeof(new_pkt->data) : pending_bytes;
299 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
300 memcpy((buf + buffer_copy_offset), new_pkt->data,
301 no_bytes);
302 buffer_copy_offset += no_bytes;
303 pending_bytes -= no_bytes;
304 --entry_count_remaining;
305 } else {
306 ql_log(ql_log_warn, vha, 0x5044,
307 fmt: "Attempt to copy more that we got, optimizing..%x\n",
308 buffer_copy_offset);
309 memcpy((buf + buffer_copy_offset), new_pkt->data,
310 total_bytes - buffer_copy_offset);
311 }
312
313 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
314 /* flush signature */
315 wmb();
316 }
317
318 if (pending_bytes != 0 || entry_count_remaining != 0) {
319 ql_log(ql_log_fatal, vha, 0x508b,
320 fmt: "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n",
321 total_bytes, entry_count_remaining);
322 return -EIO;
323 }
324 } while (entry_count_remaining > 0);
325
326 be32_to_cpu_array(dst: (u32 *)buf, src: (__be32 *)buf, len: total_bytes >> 2);
327
328 return 0;
329}
330
331/**
332 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
333 * @irq: interrupt number
334 * @dev_id: SCSI driver HA context
335 *
336 * Called by system whenever the host adapter generates an interrupt.
337 *
338 * Returns handled flag.
339 */
340irqreturn_t
341qla2100_intr_handler(int irq, void *dev_id)
342{
343 scsi_qla_host_t *vha;
344 struct qla_hw_data *ha;
345 struct device_reg_2xxx __iomem *reg;
346 int status;
347 unsigned long iter;
348 uint16_t hccr;
349 uint16_t mb[8];
350 struct rsp_que *rsp;
351 unsigned long flags;
352
353 rsp = (struct rsp_que *) dev_id;
354 if (!rsp) {
355 ql_log(ql_log_info, NULL, 0x505d,
356 fmt: "%s: NULL response queue pointer.\n", __func__);
357 return (IRQ_NONE);
358 }
359
360 ha = rsp->hw;
361 reg = &ha->iobase->isp;
362 status = 0;
363
364 spin_lock_irqsave(&ha->hardware_lock, flags);
365 vha = pci_get_drvdata(pdev: ha->pdev);
366 for (iter = 50; iter--; ) {
367 hccr = rd_reg_word(addr: &reg->hccr);
368 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
369 break;
370 if (hccr & HCCR_RISC_PAUSE) {
371 if (pci_channel_offline(pdev: ha->pdev))
372 break;
373
374 /*
375 * Issue a "HARD" reset in order for the RISC interrupt
376 * bit to be cleared. Schedule a big hammer to get
377 * out of the RISC PAUSED state.
378 */
379 wrt_reg_word(addr: &reg->hccr, HCCR_RESET_RISC);
380 rd_reg_word(addr: &reg->hccr);
381
382 ha->isp_ops->fw_dump(vha);
383 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
384 break;
385 } else if ((rd_reg_word(addr: &reg->istatus) & ISR_RISC_INT) == 0)
386 break;
387
388 if (rd_reg_word(addr: &reg->semaphore) & BIT_0) {
389 wrt_reg_word(addr: &reg->hccr, HCCR_CLR_RISC_INT);
390 rd_reg_word(addr: &reg->hccr);
391
392 /* Get mailbox data. */
393 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
394 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
395 qla2x00_mbx_completion(vha, mb[0]);
396 status |= MBX_INTERRUPT;
397 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
398 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
399 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
400 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
401 qla2x00_async_event(vha, rsp, mb);
402 } else {
403 /*EMPTY*/
404 ql_dbg(ql_dbg_async, vha, 0x5025,
405 fmt: "Unrecognized interrupt type (%d).\n",
406 mb[0]);
407 }
408 /* Release mailbox registers. */
409 wrt_reg_word(addr: &reg->semaphore, data: 0);
410 rd_reg_word(addr: &reg->semaphore);
411 } else {
412 qla2x00_process_response_queue(rsp);
413
414 wrt_reg_word(addr: &reg->hccr, HCCR_CLR_RISC_INT);
415 rd_reg_word(addr: &reg->hccr);
416 }
417 }
418 qla2x00_handle_mbx_completion(ha, status);
419 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
420
421 return (IRQ_HANDLED);
422}
423
424bool
425qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
426{
427 /* Check for PCI disconnection */
428 if (reg == 0xffffffff && !pci_channel_offline(pdev: vha->hw->pdev)) {
429 if (!test_and_set_bit(PFLG_DISCONNECTED, addr: &vha->pci_flags) &&
430 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
431 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
432 qla_schedule_eeh_work(vha);
433 }
434 return true;
435 } else
436 return false;
437}
438
439bool
440qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
441{
442 return qla2x00_check_reg32_for_disconnect(vha, reg: 0xffff0000 | reg);
443}
444
445/**
446 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
447 * @irq: interrupt number
448 * @dev_id: SCSI driver HA context
449 *
450 * Called by system whenever the host adapter generates an interrupt.
451 *
452 * Returns handled flag.
453 */
454irqreturn_t
455qla2300_intr_handler(int irq, void *dev_id)
456{
457 scsi_qla_host_t *vha;
458 struct device_reg_2xxx __iomem *reg;
459 int status;
460 unsigned long iter;
461 uint32_t stat;
462 uint16_t hccr;
463 uint16_t mb[8];
464 struct rsp_que *rsp;
465 struct qla_hw_data *ha;
466 unsigned long flags;
467
468 rsp = (struct rsp_que *) dev_id;
469 if (!rsp) {
470 ql_log(ql_log_info, NULL, 0x5058,
471 fmt: "%s: NULL response queue pointer.\n", __func__);
472 return (IRQ_NONE);
473 }
474
475 ha = rsp->hw;
476 reg = &ha->iobase->isp;
477 status = 0;
478
479 spin_lock_irqsave(&ha->hardware_lock, flags);
480 vha = pci_get_drvdata(pdev: ha->pdev);
481 for (iter = 50; iter--; ) {
482 stat = rd_reg_dword(addr: &reg->u.isp2300.host_status);
483 if (qla2x00_check_reg32_for_disconnect(vha, reg: stat))
484 break;
485 if (stat & HSR_RISC_PAUSED) {
486 if (unlikely(pci_channel_offline(ha->pdev)))
487 break;
488
489 hccr = rd_reg_word(addr: &reg->hccr);
490
491 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
492 ql_log(ql_log_warn, vha, 0x5026,
493 fmt: "Parity error -- HCCR=%x, Dumping "
494 "firmware.\n", hccr);
495 else
496 ql_log(ql_log_warn, vha, 0x5027,
497 fmt: "RISC paused -- HCCR=%x, Dumping "
498 "firmware.\n", hccr);
499
500 /*
501 * Issue a "HARD" reset in order for the RISC
502 * interrupt bit to be cleared. Schedule a big
503 * hammer to get out of the RISC PAUSED state.
504 */
505 wrt_reg_word(addr: &reg->hccr, HCCR_RESET_RISC);
506 rd_reg_word(addr: &reg->hccr);
507
508 ha->isp_ops->fw_dump(vha);
509 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
510 break;
511 } else if ((stat & HSR_RISC_INT) == 0)
512 break;
513
514 switch (stat & 0xff) {
515 case 0x1:
516 case 0x2:
517 case 0x10:
518 case 0x11:
519 qla2x00_mbx_completion(vha, MSW(stat));
520 status |= MBX_INTERRUPT;
521
522 /* Release mailbox registers. */
523 wrt_reg_word(addr: &reg->semaphore, data: 0);
524 break;
525 case 0x12:
526 mb[0] = MSW(stat);
527 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
528 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
529 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
530 qla2x00_async_event(vha, rsp, mb);
531 break;
532 case 0x13:
533 qla2x00_process_response_queue(rsp);
534 break;
535 case 0x15:
536 mb[0] = MBA_CMPLT_1_16BIT;
537 mb[1] = MSW(stat);
538 qla2x00_async_event(vha, rsp, mb);
539 break;
540 case 0x16:
541 mb[0] = MBA_SCSI_COMPLETION;
542 mb[1] = MSW(stat);
543 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
544 qla2x00_async_event(vha, rsp, mb);
545 break;
546 default:
547 ql_dbg(ql_dbg_async, vha, 0x5028,
548 fmt: "Unrecognized interrupt type (%d).\n", stat & 0xff);
549 break;
550 }
551 wrt_reg_word(addr: &reg->hccr, HCCR_CLR_RISC_INT);
552 rd_reg_word_relaxed(addr: &reg->hccr);
553 }
554 qla2x00_handle_mbx_completion(ha, status);
555 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
556
557 return (IRQ_HANDLED);
558}
559
560/**
561 * qla2x00_mbx_completion() - Process mailbox command completions.
562 * @vha: SCSI driver HA context
563 * @mb0: Mailbox0 register
564 */
565static void
566qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
567{
568 uint16_t cnt;
569 uint32_t mboxes;
570 __le16 __iomem *wptr;
571 struct qla_hw_data *ha = vha->hw;
572 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
573
574 /* Read all mbox registers? */
575 WARN_ON_ONCE(ha->mbx_count > 32);
576 mboxes = (1ULL << ha->mbx_count) - 1;
577 if (!ha->mcp)
578 ql_dbg(ql_dbg_async, vha, 0x5001, fmt: "MBX pointer ERROR.\n");
579 else
580 mboxes = ha->mcp->in_mb;
581
582 /* Load return mailbox registers. */
583 ha->flags.mbox_int = 1;
584 ha->mailbox_out[0] = mb0;
585 mboxes >>= 1;
586 wptr = MAILBOX_REG(ha, reg, 1);
587
588 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
589 if (IS_QLA2200(ha) && cnt == 8)
590 wptr = MAILBOX_REG(ha, reg, 8);
591 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
592 ha->mailbox_out[cnt] = qla2x00_debounce_register(addr: wptr);
593 else if (mboxes & BIT_0)
594 ha->mailbox_out[cnt] = rd_reg_word(addr: wptr);
595
596 wptr++;
597 mboxes >>= 1;
598 }
599}
600
601static void
602qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
603{
604 static char *event[] =
605 { "Complete", "Request Notification", "Time Extension" };
606 int rval;
607 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
608 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
609 __le16 __iomem *wptr;
610 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
611
612 /* Seed data -- mailbox1 -> mailbox7. */
613 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
614 wptr = &reg24->mailbox1;
615 else if (IS_QLA8044(vha->hw))
616 wptr = &reg82->mailbox_out[1];
617 else
618 return;
619
620 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
621 mb[cnt] = rd_reg_word(addr: wptr);
622
623 ql_dbg(ql_dbg_async, vha, 0x5021,
624 fmt: "Inter-Driver Communication %s -- "
625 "%04x %04x %04x %04x %04x %04x %04x.\n",
626 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
627 mb[4], mb[5], mb[6]);
628 switch (aen) {
629 /* Handle IDC Error completion case. */
630 case MBA_IDC_COMPLETE:
631 if (mb[1] >> 15) {
632 vha->hw->flags.idc_compl_status = 1;
633 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
634 complete(&vha->hw->dcbx_comp);
635 }
636 break;
637
638 case MBA_IDC_NOTIFY:
639 /* Acknowledgement needed? [Notify && non-zero timeout]. */
640 timeout = (descr >> 8) & 0xf;
641 ql_dbg(ql_dbg_async, vha, 0x5022,
642 fmt: "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
643 vha->host_no, event[aen & 0xff], timeout);
644
645 if (!timeout)
646 return;
647 rval = qla2x00_post_idc_ack_work(vha, mb);
648 if (rval != QLA_SUCCESS)
649 ql_log(ql_log_warn, vha, 0x5023,
650 fmt: "IDC failed to post ACK.\n");
651 break;
652 case MBA_IDC_TIME_EXT:
653 vha->hw->idc_extend_tmo = descr;
654 ql_dbg(ql_dbg_async, vha, 0x5087,
655 fmt: "%lu Inter-Driver Communication %s -- "
656 "Extend timeout by=%d.\n",
657 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
658 break;
659 }
660}
661
662#define LS_UNKNOWN 2
663const char *
664qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
665{
666 static const char *const link_speeds[] = {
667 "1", "2", "?", "4", "8", "16", "32", "64", "10"
668 };
669#define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
670
671 if (IS_QLA2100(ha) || IS_QLA2200(ha))
672 return link_speeds[0];
673 else if (speed == 0x13)
674 return link_speeds[QLA_LAST_SPEED];
675 else if (speed < QLA_LAST_SPEED)
676 return link_speeds[speed];
677 else
678 return link_speeds[LS_UNKNOWN];
679}
680
681static void
682qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
683{
684 struct qla_hw_data *ha = vha->hw;
685
686 /*
687 * 8200 AEN Interpretation:
688 * mb[0] = AEN code
689 * mb[1] = AEN Reason code
690 * mb[2] = LSW of Peg-Halt Status-1 Register
691 * mb[6] = MSW of Peg-Halt Status-1 Register
692 * mb[3] = LSW of Peg-Halt Status-2 register
693 * mb[7] = MSW of Peg-Halt Status-2 register
694 * mb[4] = IDC Device-State Register value
695 * mb[5] = IDC Driver-Presence Register value
696 */
697 ql_dbg(ql_dbg_async, vha, 0x506b, fmt: "AEN Code: mb[0] = 0x%x AEN reason: "
698 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
699 mb[0], mb[1], mb[2], mb[6]);
700 ql_dbg(ql_dbg_async, vha, 0x506c, fmt: "PH-status2: mb[3] = 0x%x "
701 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
702 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
703
704 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
705 IDC_HEARTBEAT_FAILURE)) {
706 ha->flags.nic_core_hung = 1;
707 ql_log(ql_log_warn, vha, 0x5060,
708 fmt: "83XX: F/W Error Reported: Check if reset required.\n");
709
710 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
711 uint32_t protocol_engine_id, fw_err_code, err_level;
712
713 /*
714 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
715 * - PEG-Halt Status-1 Register:
716 * (LSW = mb[2], MSW = mb[6])
717 * Bits 0-7 = protocol-engine ID
718 * Bits 8-28 = f/w error code
719 * Bits 29-31 = Error-level
720 * Error-level 0x1 = Non-Fatal error
721 * Error-level 0x2 = Recoverable Fatal error
722 * Error-level 0x4 = UnRecoverable Fatal error
723 * - PEG-Halt Status-2 Register:
724 * (LSW = mb[3], MSW = mb[7])
725 */
726 protocol_engine_id = (mb[2] & 0xff);
727 fw_err_code = (((mb[2] & 0xff00) >> 8) |
728 ((mb[6] & 0x1fff) << 8));
729 err_level = ((mb[6] & 0xe000) >> 13);
730 ql_log(ql_log_warn, vha, 0x5061, fmt: "PegHalt Status-1 "
731 "Register: protocol_engine_id=0x%x "
732 "fw_err_code=0x%x err_level=0x%x.\n",
733 protocol_engine_id, fw_err_code, err_level);
734 ql_log(ql_log_warn, vha, 0x5062, fmt: "PegHalt Status-2 "
735 "Register: 0x%x%x.\n", mb[7], mb[3]);
736 if (err_level == ERR_LEVEL_NON_FATAL) {
737 ql_log(ql_log_warn, vha, 0x5063,
738 fmt: "Not a fatal error, f/w has recovered itself.\n");
739 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
740 ql_log(ql_log_fatal, vha, 0x5064,
741 fmt: "Recoverable Fatal error: Chip reset "
742 "required.\n");
743 qla83xx_schedule_work(vha,
744 QLA83XX_NIC_CORE_RESET);
745 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
746 ql_log(ql_log_fatal, vha, 0x5065,
747 fmt: "Unrecoverable Fatal error: Set FAILED "
748 "state, reboot required.\n");
749 qla83xx_schedule_work(vha,
750 QLA83XX_NIC_CORE_UNRECOVERABLE);
751 }
752 }
753
754 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
755 uint16_t peg_fw_state, nw_interface_link_up;
756 uint16_t nw_interface_signal_detect, sfp_status;
757 uint16_t htbt_counter, htbt_monitor_enable;
758 uint16_t sfp_additional_info, sfp_multirate;
759 uint16_t sfp_tx_fault, link_speed, dcbx_status;
760
761 /*
762 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
763 * - PEG-to-FC Status Register:
764 * (LSW = mb[2], MSW = mb[6])
765 * Bits 0-7 = Peg-Firmware state
766 * Bit 8 = N/W Interface Link-up
767 * Bit 9 = N/W Interface signal detected
768 * Bits 10-11 = SFP Status
769 * SFP Status 0x0 = SFP+ transceiver not expected
770 * SFP Status 0x1 = SFP+ transceiver not present
771 * SFP Status 0x2 = SFP+ transceiver invalid
772 * SFP Status 0x3 = SFP+ transceiver present and
773 * valid
774 * Bits 12-14 = Heartbeat Counter
775 * Bit 15 = Heartbeat Monitor Enable
776 * Bits 16-17 = SFP Additional Info
777 * SFP info 0x0 = Unregocnized transceiver for
778 * Ethernet
779 * SFP info 0x1 = SFP+ brand validation failed
780 * SFP info 0x2 = SFP+ speed validation failed
781 * SFP info 0x3 = SFP+ access error
782 * Bit 18 = SFP Multirate
783 * Bit 19 = SFP Tx Fault
784 * Bits 20-22 = Link Speed
785 * Bits 23-27 = Reserved
786 * Bits 28-30 = DCBX Status
787 * DCBX Status 0x0 = DCBX Disabled
788 * DCBX Status 0x1 = DCBX Enabled
789 * DCBX Status 0x2 = DCBX Exchange error
790 * Bit 31 = Reserved
791 */
792 peg_fw_state = (mb[2] & 0x00ff);
793 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
794 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
795 sfp_status = ((mb[2] & 0x0c00) >> 10);
796 htbt_counter = ((mb[2] & 0x7000) >> 12);
797 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
798 sfp_additional_info = (mb[6] & 0x0003);
799 sfp_multirate = ((mb[6] & 0x0004) >> 2);
800 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
801 link_speed = ((mb[6] & 0x0070) >> 4);
802 dcbx_status = ((mb[6] & 0x7000) >> 12);
803
804 ql_log(ql_log_warn, vha, 0x5066,
805 fmt: "Peg-to-Fc Status Register:\n"
806 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
807 "nw_interface_signal_detect=0x%x"
808 "\nsfp_statis=0x%x.\n ", peg_fw_state,
809 nw_interface_link_up, nw_interface_signal_detect,
810 sfp_status);
811 ql_log(ql_log_warn, vha, 0x5067,
812 fmt: "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
813 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
814 htbt_counter, htbt_monitor_enable,
815 sfp_additional_info, sfp_multirate);
816 ql_log(ql_log_warn, vha, 0x5068,
817 fmt: "sfp_tx_fault=0x%x, link_state=0x%x, "
818 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
819 dcbx_status);
820
821 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
822 }
823
824 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
825 ql_log(ql_log_warn, vha, 0x5069,
826 fmt: "Heartbeat Failure encountered, chip reset "
827 "required.\n");
828
829 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
830 }
831 }
832
833 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
834 ql_log(ql_log_info, vha, 0x506a,
835 fmt: "IDC Device-State changed = 0x%x.\n", mb[4]);
836 if (ha->flags.nic_core_reset_owner)
837 return;
838 qla83xx_schedule_work(vha, MBA_IDC_AEN);
839 }
840}
841
842/**
843 * qla27xx_copy_multiple_pkt() - Copy over purex/purls packets that can
844 * span over multiple IOCBs.
845 * @vha: SCSI driver HA context
846 * @pkt: ELS packet
847 * @rsp: Response queue
848 * @is_purls: True, for Unsolicited Received FC-NVMe LS rsp IOCB
849 * false, for Unsolicited Received ELS IOCB
850 * @byte_order: True, to change the byte ordering of iocb payload
851 */
852struct purex_item *
853qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha, void **pkt,
854 struct rsp_que **rsp, bool is_purls,
855 bool byte_order)
856{
857 struct purex_entry_24xx *purex = NULL;
858 struct pt_ls4_rx_unsol *purls = NULL;
859 struct rsp_que *rsp_q = *rsp;
860 sts_cont_entry_t *new_pkt;
861 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
862 uint16_t buffer_copy_offset = 0, payload_size = 0;
863 uint16_t entry_count, entry_count_remaining;
864 struct purex_item *item;
865 void *iocb_pkt = NULL;
866
867 if (is_purls) {
868 purls = *pkt;
869 total_bytes = (le16_to_cpu(purls->frame_size) & 0x0FFF) -
870 PURX_ELS_HEADER_SIZE;
871 entry_count = entry_count_remaining = purls->entry_count;
872 payload_size = sizeof(purls->payload);
873 } else {
874 purex = *pkt;
875 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) -
876 PURX_ELS_HEADER_SIZE;
877 entry_count = entry_count_remaining = purex->entry_count;
878 payload_size = sizeof(purex->els_frame_payload);
879 }
880
881 pending_bytes = total_bytes;
882 no_bytes = (pending_bytes > payload_size) ? payload_size :
883 pending_bytes;
884 ql_dbg(ql_dbg_async, vha, 0x509a,
885 fmt: "%s LS, frame_size 0x%x, entry count %d\n",
886 (is_purls ? "PURLS" : "FPIN"), total_bytes, entry_count);
887
888 item = qla24xx_alloc_purex_item(vha, size: total_bytes);
889 if (!item)
890 return item;
891
892 iocb_pkt = &item->iocb;
893
894 if (is_purls)
895 memcpy(iocb_pkt, &purls->payload[0], no_bytes);
896 else
897 memcpy(iocb_pkt, &purex->els_frame_payload[0], no_bytes);
898 buffer_copy_offset += no_bytes;
899 pending_bytes -= no_bytes;
900 --entry_count_remaining;
901
902 if (is_purls)
903 ((response_t *)purls)->signature = RESPONSE_PROCESSED;
904 else
905 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
906 wmb();
907
908 do {
909 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
910 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
911 ql_dbg(ql_dbg_async, vha, 0x5084,
912 fmt: "Ran out of IOCBs, partial data 0x%x\n",
913 buffer_copy_offset);
914 cpu_relax();
915 continue;
916 }
917
918 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
919 *pkt = new_pkt;
920
921 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
922 ql_log(ql_log_warn, vha, 0x507a,
923 fmt: "Unexpected IOCB type, partial data 0x%x\n",
924 buffer_copy_offset);
925 break;
926 }
927
928 rsp_q->ring_index++;
929 if (rsp_q->ring_index == rsp_q->length) {
930 rsp_q->ring_index = 0;
931 rsp_q->ring_ptr = rsp_q->ring;
932 } else {
933 rsp_q->ring_ptr++;
934 }
935 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
936 sizeof(new_pkt->data) : pending_bytes;
937 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
938 memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
939 new_pkt->data, no_bytes);
940 buffer_copy_offset += no_bytes;
941 pending_bytes -= no_bytes;
942 --entry_count_remaining;
943 } else {
944 ql_log(ql_log_warn, vha, 0x5044,
945 fmt: "Attempt to copy more that we got, optimizing..%x\n",
946 buffer_copy_offset);
947 memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
948 new_pkt->data,
949 total_bytes - buffer_copy_offset);
950 }
951
952 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
953 wmb();
954 }
955
956 if (pending_bytes != 0 || entry_count_remaining != 0) {
957 ql_log(ql_log_fatal, vha, 0x508b,
958 fmt: "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
959 total_bytes, entry_count_remaining);
960 qla24xx_free_purex_item(item);
961 return NULL;
962 }
963 } while (entry_count_remaining > 0);
964
965 if (byte_order)
966 host_to_fcp_swap(fcp: (uint8_t *)&item->iocb, bsize: total_bytes);
967
968 return item;
969}
970
971int
972qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
973{
974 struct qla_hw_data *ha = vha->hw;
975 scsi_qla_host_t *vp;
976 uint32_t vp_did;
977 unsigned long flags;
978 int ret = 0;
979
980 if (!ha->num_vhosts)
981 return ret;
982
983 spin_lock_irqsave(&ha->vport_slock, flags);
984 list_for_each_entry(vp, &ha->vp_list, list) {
985 vp_did = vp->d_id.b24;
986 if (vp_did == rscn_entry) {
987 ret = 1;
988 break;
989 }
990 }
991 spin_unlock_irqrestore(lock: &ha->vport_slock, flags);
992
993 return ret;
994}
995
996fc_port_t *
997qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
998{
999 fc_port_t *f, *tf;
1000
1001 f = tf = NULL;
1002 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
1003 if (f->loop_id == loop_id)
1004 return f;
1005 return NULL;
1006}
1007
1008fc_port_t *
1009qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
1010{
1011 fc_port_t *f, *tf;
1012
1013 f = tf = NULL;
1014 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
1015 if (memcmp(p: f->port_name, q: wwpn, WWN_SIZE) == 0) {
1016 if (incl_deleted)
1017 return f;
1018 else if (f->deleted == 0)
1019 return f;
1020 }
1021 }
1022 return NULL;
1023}
1024
1025fc_port_t *
1026qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
1027 u8 incl_deleted)
1028{
1029 fc_port_t *f, *tf;
1030
1031 f = tf = NULL;
1032 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
1033 if (f->d_id.b24 == id->b24) {
1034 if (incl_deleted)
1035 return f;
1036 else if (f->deleted == 0)
1037 return f;
1038 }
1039 }
1040 return NULL;
1041}
1042
1043/* Shall be called only on supported adapters. */
1044static void
1045qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
1046{
1047 struct qla_hw_data *ha = vha->hw;
1048 bool reset_isp_needed = false;
1049
1050 ql_log(ql_log_warn, vha, 0x02f0,
1051 fmt: "MPI Heartbeat stop. MPI reset is%s needed. "
1052 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
1053 mb[1] & BIT_8 ? "" : " not",
1054 mb[0], mb[1], mb[2], mb[3]);
1055
1056 if ((mb[1] & BIT_8) == 0)
1057 return;
1058
1059 ql_log(ql_log_warn, vha, 0x02f1,
1060 fmt: "MPI Heartbeat stop. FW dump needed\n");
1061
1062 if (ql2xfulldump_on_mpifail) {
1063 ha->isp_ops->fw_dump(vha);
1064 reset_isp_needed = true;
1065 }
1066
1067 ha->isp_ops->mpi_fw_dump(vha, 1);
1068
1069 if (reset_isp_needed) {
1070 vha->hw->flags.fw_init_done = 0;
1071 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
1072 qla2xxx_wake_dpc(vha);
1073 }
1074}
1075
1076static struct purex_item *
1077qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
1078{
1079 struct purex_item *item = NULL;
1080 uint8_t item_hdr_size = sizeof(*item);
1081
1082 if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
1083 item = kzalloc(size: item_hdr_size +
1084 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
1085 } else {
1086 if (atomic_inc_return(v: &vha->default_item.in_use) == 1) {
1087 item = &vha->default_item;
1088 goto initialize_purex_header;
1089 } else {
1090 item = kzalloc(size: item_hdr_size, GFP_ATOMIC);
1091 }
1092 }
1093 if (!item) {
1094 ql_log(ql_log_warn, vha, 0x5092,
1095 fmt: ">> Failed allocate purex list item.\n");
1096
1097 return NULL;
1098 }
1099
1100initialize_purex_header:
1101 item->vha = vha;
1102 item->size = size;
1103 return item;
1104}
1105
1106void
1107qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
1108 void (*process_item)(struct scsi_qla_host *vha,
1109 struct purex_item *pkt))
1110{
1111 struct purex_list *list = &vha->purex_list;
1112 ulong flags;
1113
1114 pkt->process_item = process_item;
1115
1116 spin_lock_irqsave(&list->lock, flags);
1117 list_add_tail(new: &pkt->list, head: &list->head);
1118 spin_unlock_irqrestore(lock: &list->lock, flags);
1119
1120 set_bit(PROCESS_PUREX_IOCB, addr: &vha->dpc_flags);
1121}
1122
1123/**
1124 * qla24xx_copy_std_pkt() - Copy over purex ELS which is
1125 * contained in a single IOCB.
1126 * purex packet.
1127 * @vha: SCSI driver HA context
1128 * @pkt: ELS packet
1129 */
1130static struct purex_item
1131*qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
1132{
1133 struct purex_item *item;
1134
1135 item = qla24xx_alloc_purex_item(vha,
1136 QLA_DEFAULT_PAYLOAD_SIZE);
1137 if (!item)
1138 return item;
1139
1140 memcpy(&item->iocb, pkt, sizeof(item->iocb));
1141 return item;
1142}
1143
1144/**
1145 * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can
1146 * span over multiple IOCBs.
1147 * @vha: SCSI driver HA context
1148 * @pkt: ELS packet
1149 * @rsp: Response queue
1150 */
1151static struct purex_item *
1152qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt,
1153 struct rsp_que **rsp)
1154{
1155 struct purex_entry_24xx *purex = *pkt;
1156 struct rsp_que *rsp_q = *rsp;
1157 sts_cont_entry_t *new_pkt;
1158 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
1159 uint16_t buffer_copy_offset = 0;
1160 uint16_t entry_count, entry_count_remaining;
1161 struct purex_item *item;
1162 void *fpin_pkt = NULL;
1163
1164 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
1165 - PURX_ELS_HEADER_SIZE;
1166 pending_bytes = total_bytes;
1167 entry_count = entry_count_remaining = purex->entry_count;
1168 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
1169 sizeof(purex->els_frame_payload) : pending_bytes;
1170 ql_log(ql_log_info, vha, 0x509a,
1171 fmt: "FPIN ELS, frame_size 0x%x, entry count %d\n",
1172 total_bytes, entry_count);
1173
1174 item = qla24xx_alloc_purex_item(vha, size: total_bytes);
1175 if (!item)
1176 return item;
1177
1178 fpin_pkt = &item->iocb;
1179
1180 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes);
1181 buffer_copy_offset += no_bytes;
1182 pending_bytes -= no_bytes;
1183 --entry_count_remaining;
1184
1185 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
1186 wmb();
1187
1188 do {
1189 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
1190 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
1191 ql_dbg(ql_dbg_async, vha, 0x5084,
1192 fmt: "Ran out of IOCBs, partial data 0x%x\n",
1193 buffer_copy_offset);
1194 cpu_relax();
1195 continue;
1196 }
1197
1198 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
1199 *pkt = new_pkt;
1200
1201 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
1202 ql_log(ql_log_warn, vha, 0x507a,
1203 fmt: "Unexpected IOCB type, partial data 0x%x\n",
1204 buffer_copy_offset);
1205 break;
1206 }
1207
1208 rsp_q->ring_index++;
1209 if (rsp_q->ring_index == rsp_q->length) {
1210 rsp_q->ring_index = 0;
1211 rsp_q->ring_ptr = rsp_q->ring;
1212 } else {
1213 rsp_q->ring_ptr++;
1214 }
1215 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
1216 sizeof(new_pkt->data) : pending_bytes;
1217 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
1218 memcpy(((uint8_t *)fpin_pkt +
1219 buffer_copy_offset), new_pkt->data,
1220 no_bytes);
1221 buffer_copy_offset += no_bytes;
1222 pending_bytes -= no_bytes;
1223 --entry_count_remaining;
1224 } else {
1225 ql_log(ql_log_warn, vha, 0x5044,
1226 fmt: "Attempt to copy more that we got, optimizing..%x\n",
1227 buffer_copy_offset);
1228 memcpy(((uint8_t *)fpin_pkt +
1229 buffer_copy_offset), new_pkt->data,
1230 total_bytes - buffer_copy_offset);
1231 }
1232
1233 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
1234 wmb();
1235 }
1236
1237 if (pending_bytes != 0 || entry_count_remaining != 0) {
1238 ql_log(ql_log_fatal, vha, 0x508b,
1239 fmt: "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
1240 total_bytes, entry_count_remaining);
1241 qla24xx_free_purex_item(item);
1242 return NULL;
1243 }
1244 } while (entry_count_remaining > 0);
1245 host_to_fcp_swap(fcp: (uint8_t *)&item->iocb, bsize: total_bytes);
1246 return item;
1247}
1248
1249/**
1250 * qla2x00_async_event() - Process aynchronous events.
1251 * @vha: SCSI driver HA context
1252 * @rsp: response queue
1253 * @mb: Mailbox registers (0 - 3)
1254 */
1255void
1256qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1257{
1258 uint16_t handle_cnt;
1259 uint16_t cnt, mbx;
1260 uint32_t handles[5];
1261 struct qla_hw_data *ha = vha->hw;
1262 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1263 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1264 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1265 uint32_t rscn_entry, host_pid;
1266 unsigned long flags;
1267 fc_port_t *fcport = NULL;
1268
1269 if (!vha->hw->flags.fw_started) {
1270 ql_log(ql_log_warn, vha, 0x50ff,
1271 fmt: "Dropping AEN - %04x %04x %04x %04x.\n",
1272 mb[0], mb[1], mb[2], mb[3]);
1273 return;
1274 }
1275
1276 /* Setup to process RIO completion. */
1277 handle_cnt = 0;
1278 if (IS_CNA_CAPABLE(ha))
1279 goto skip_rio;
1280 switch (mb[0]) {
1281 case MBA_SCSI_COMPLETION:
1282 handles[0] = make_handle(x: mb[2], y: mb[1]);
1283 handle_cnt = 1;
1284 break;
1285 case MBA_CMPLT_1_16BIT:
1286 handles[0] = mb[1];
1287 handle_cnt = 1;
1288 mb[0] = MBA_SCSI_COMPLETION;
1289 break;
1290 case MBA_CMPLT_2_16BIT:
1291 handles[0] = mb[1];
1292 handles[1] = mb[2];
1293 handle_cnt = 2;
1294 mb[0] = MBA_SCSI_COMPLETION;
1295 break;
1296 case MBA_CMPLT_3_16BIT:
1297 handles[0] = mb[1];
1298 handles[1] = mb[2];
1299 handles[2] = mb[3];
1300 handle_cnt = 3;
1301 mb[0] = MBA_SCSI_COMPLETION;
1302 break;
1303 case MBA_CMPLT_4_16BIT:
1304 handles[0] = mb[1];
1305 handles[1] = mb[2];
1306 handles[2] = mb[3];
1307 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1308 handle_cnt = 4;
1309 mb[0] = MBA_SCSI_COMPLETION;
1310 break;
1311 case MBA_CMPLT_5_16BIT:
1312 handles[0] = mb[1];
1313 handles[1] = mb[2];
1314 handles[2] = mb[3];
1315 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1316 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
1317 handle_cnt = 5;
1318 mb[0] = MBA_SCSI_COMPLETION;
1319 break;
1320 case MBA_CMPLT_2_32BIT:
1321 handles[0] = make_handle(x: mb[2], y: mb[1]);
1322 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
1323 RD_MAILBOX_REG(ha, reg, 6));
1324 handle_cnt = 2;
1325 mb[0] = MBA_SCSI_COMPLETION;
1326 break;
1327 default:
1328 break;
1329 }
1330skip_rio:
1331 switch (mb[0]) {
1332 case MBA_SCSI_COMPLETION: /* Fast Post */
1333 if (!vha->flags.online)
1334 break;
1335
1336 for (cnt = 0; cnt < handle_cnt; cnt++)
1337 qla2x00_process_completed_request(vha, rsp->req,
1338 handles[cnt]);
1339 break;
1340
1341 case MBA_RESET: /* Reset */
1342 ql_dbg(ql_dbg_async, vha, 0x5002,
1343 fmt: "Asynchronous RESET.\n");
1344
1345 set_bit(RESET_MARKER_NEEDED, addr: &vha->dpc_flags);
1346 break;
1347
1348 case MBA_SYSTEM_ERR: /* System Error */
1349 mbx = 0;
1350
1351 vha->hw_err_cnt++;
1352
1353 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
1354 IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1355 u16 m[4];
1356
1357 m[0] = rd_reg_word(addr: &reg24->mailbox4);
1358 m[1] = rd_reg_word(addr: &reg24->mailbox5);
1359 m[2] = rd_reg_word(addr: &reg24->mailbox6);
1360 mbx = m[3] = rd_reg_word(addr: &reg24->mailbox7);
1361
1362 ql_log(ql_log_warn, vha, 0x5003,
1363 fmt: "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
1364 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
1365 } else
1366 ql_log(ql_log_warn, vha, 0x5003,
1367 fmt: "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
1368 mb[1], mb[2], mb[3]);
1369
1370 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
1371 rd_reg_word(addr: &reg24->mailbox7) & BIT_8)
1372 ha->isp_ops->mpi_fw_dump(vha, 1);
1373 ha->isp_ops->fw_dump(vha);
1374 ha->flags.fw_init_done = 0;
1375 QLA_FW_STOPPED(ha);
1376
1377 if (IS_FWI2_CAPABLE(ha)) {
1378 if (mb[1] == 0 && mb[2] == 0) {
1379 ql_log(ql_log_fatal, vha, 0x5004,
1380 fmt: "Unrecoverable Hardware Error: adapter "
1381 "marked OFFLINE!\n");
1382 vha->flags.online = 0;
1383 vha->device_flags |= DFLG_DEV_FAILED;
1384 } else {
1385 /* Check to see if MPI timeout occurred */
1386 if ((mbx & MBX_3) && (ha->port_no == 0))
1387 set_bit(MPI_RESET_NEEDED,
1388 addr: &vha->dpc_flags);
1389
1390 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
1391 }
1392 } else if (mb[1] == 0) {
1393 ql_log(ql_log_fatal, vha, 0x5005,
1394 fmt: "Unrecoverable Hardware Error: adapter marked "
1395 "OFFLINE!\n");
1396 vha->flags.online = 0;
1397 vha->device_flags |= DFLG_DEV_FAILED;
1398 } else
1399 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
1400 break;
1401
1402 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
1403 ql_log(ql_log_warn, vha, 0x5006,
1404 fmt: "ISP Request Transfer Error (%x).\n", mb[1]);
1405
1406 vha->hw_err_cnt++;
1407
1408 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
1409 break;
1410
1411 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
1412 ql_log(ql_log_warn, vha, 0x5007,
1413 fmt: "ISP Response Transfer Error (%x).\n", mb[1]);
1414
1415 vha->hw_err_cnt++;
1416
1417 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
1418 break;
1419
1420 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
1421 ql_dbg(ql_dbg_async, vha, 0x5008,
1422 fmt: "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
1423 break;
1424
1425 case MBA_LOOP_INIT_ERR:
1426 ql_log(ql_log_warn, vha, 0x5090,
1427 fmt: "LOOP INIT ERROR (%x).\n", mb[1]);
1428 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
1429 break;
1430
1431 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
1432 ha->flags.lip_ae = 1;
1433
1434 ql_dbg(ql_dbg_async, vha, 0x5009,
1435 fmt: "LIP occurred (%x).\n", mb[1]);
1436
1437 if (atomic_read(v: &vha->loop_state) != LOOP_DOWN) {
1438 atomic_set(v: &vha->loop_state, LOOP_DOWN);
1439 atomic_set(v: &vha->loop_down_timer, LOOP_DOWN_TIME);
1440 qla2x00_mark_all_devices_lost(vha);
1441 }
1442
1443 if (vha->vp_idx) {
1444 atomic_set(v: &vha->vp_state, VP_FAILED);
1445 fc_vport_set_state(vport: vha->fc_vport, new_state: FC_VPORT_FAILED);
1446 }
1447
1448 set_bit(REGISTER_FC4_NEEDED, addr: &vha->dpc_flags);
1449 set_bit(REGISTER_FDMI_NEEDED, addr: &vha->dpc_flags);
1450
1451 vha->flags.management_server_logged_in = 0;
1452 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1453 break;
1454
1455 case MBA_LOOP_UP: /* Loop Up Event */
1456 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1457 ha->link_data_rate = PORT_SPEED_1GB;
1458 else
1459 ha->link_data_rate = mb[1];
1460
1461 ql_log(ql_log_info, vha, 0x500a,
1462 fmt: "LOOP UP detected (%s Gbps).\n",
1463 qla2x00_get_link_speed_str(ha, speed: ha->link_data_rate));
1464
1465 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1466 if (mb[2] & BIT_0)
1467 ql_log(ql_log_info, vha, 0x11a0,
1468 fmt: "FEC=enabled (link up).\n");
1469 }
1470
1471 vha->flags.management_server_logged_in = 0;
1472 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1473
1474 if (vha->link_down_time < vha->hw->port_down_retry_count) {
1475 vha->short_link_down_cnt++;
1476 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
1477 }
1478
1479 break;
1480
1481 case MBA_LOOP_DOWN: /* Loop Down Event */
1482 SAVE_TOPO(ha);
1483 ha->flags.lip_ae = 0;
1484 ha->current_topology = 0;
1485 vha->link_down_time = 0;
1486
1487 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
1488 ? rd_reg_word(addr: &reg24->mailbox4) : 0;
1489 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(addr: &reg82->mailbox_out[4])
1490 : mbx;
1491 ql_log(ql_log_info, vha, 0x500b,
1492 fmt: "LOOP DOWN detected (%x %x %x %x).\n",
1493 mb[1], mb[2], mb[3], mbx);
1494
1495 if (atomic_read(v: &vha->loop_state) != LOOP_DOWN) {
1496 atomic_set(v: &vha->loop_state, LOOP_DOWN);
1497 atomic_set(v: &vha->loop_down_timer, LOOP_DOWN_TIME);
1498 /*
1499 * In case of loop down, restore WWPN from
1500 * NVRAM in case of FA-WWPN capable ISP
1501 * Restore for Physical Port only
1502 */
1503 if (!vha->vp_idx) {
1504 if (ha->flags.fawwpn_enabled &&
1505 (ha->current_topology == ISP_CFG_F)) {
1506 memcpy(vha->port_name, ha->port_name, WWN_SIZE);
1507 fc_host_port_name(vha->host) =
1508 wwn_to_u64(wwn: vha->port_name);
1509 ql_dbg(ql_dbg_init + ql_dbg_verbose,
1510 vha, 0x00d8, fmt: "LOOP DOWN detected,"
1511 "restore WWPN %016llx\n",
1512 wwn_to_u64(wwn: vha->port_name));
1513 }
1514
1515 clear_bit(VP_CONFIG_OK, addr: &vha->vp_flags);
1516 }
1517
1518 vha->device_flags |= DFLG_NO_CABLE;
1519 qla2x00_mark_all_devices_lost(vha);
1520 }
1521
1522 if (vha->vp_idx) {
1523 atomic_set(v: &vha->vp_state, VP_FAILED);
1524 fc_vport_set_state(vport: vha->fc_vport, new_state: FC_VPORT_FAILED);
1525 }
1526
1527 vha->flags.management_server_logged_in = 0;
1528 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1529 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1530 break;
1531
1532 case MBA_LIP_RESET: /* LIP reset occurred */
1533 ql_dbg(ql_dbg_async, vha, 0x500c,
1534 fmt: "LIP reset occurred (%x).\n", mb[1]);
1535
1536 if (atomic_read(v: &vha->loop_state) != LOOP_DOWN) {
1537 atomic_set(v: &vha->loop_state, LOOP_DOWN);
1538 atomic_set(v: &vha->loop_down_timer, LOOP_DOWN_TIME);
1539 qla2x00_mark_all_devices_lost(vha);
1540 }
1541
1542 if (vha->vp_idx) {
1543 atomic_set(v: &vha->vp_state, VP_FAILED);
1544 fc_vport_set_state(vport: vha->fc_vport, new_state: FC_VPORT_FAILED);
1545 }
1546
1547 set_bit(RESET_MARKER_NEEDED, addr: &vha->dpc_flags);
1548
1549 ha->operating_mode = LOOP;
1550 vha->flags.management_server_logged_in = 0;
1551 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1552 break;
1553
1554 /* case MBA_DCBX_COMPLETE: */
1555 case MBA_POINT_TO_POINT: /* Point-to-Point */
1556 ha->flags.lip_ae = 0;
1557
1558 if (IS_QLA2100(ha))
1559 break;
1560
1561 if (IS_CNA_CAPABLE(ha)) {
1562 ql_dbg(ql_dbg_async, vha, 0x500d,
1563 fmt: "DCBX Completed -- %04x %04x %04x.\n",
1564 mb[1], mb[2], mb[3]);
1565 if (ha->notify_dcbx_comp && !vha->vp_idx)
1566 complete(&ha->dcbx_comp);
1567
1568 } else
1569 ql_dbg(ql_dbg_async, vha, 0x500e,
1570 fmt: "Asynchronous P2P MODE received.\n");
1571
1572 /*
1573 * Until there's a transition from loop down to loop up, treat
1574 * this as loop down only.
1575 */
1576 if (atomic_read(v: &vha->loop_state) != LOOP_DOWN) {
1577 atomic_set(v: &vha->loop_state, LOOP_DOWN);
1578 if (!atomic_read(v: &vha->loop_down_timer))
1579 atomic_set(v: &vha->loop_down_timer,
1580 LOOP_DOWN_TIME);
1581 if (!N2N_TOPO(ha))
1582 qla2x00_mark_all_devices_lost(vha);
1583 }
1584
1585 if (vha->vp_idx) {
1586 atomic_set(v: &vha->vp_state, VP_FAILED);
1587 fc_vport_set_state(vport: vha->fc_vport, new_state: FC_VPORT_FAILED);
1588 }
1589
1590 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
1591 set_bit(RESET_MARKER_NEEDED, addr: &vha->dpc_flags);
1592
1593 set_bit(REGISTER_FC4_NEEDED, addr: &vha->dpc_flags);
1594 set_bit(REGISTER_FDMI_NEEDED, addr: &vha->dpc_flags);
1595
1596 vha->flags.management_server_logged_in = 0;
1597 break;
1598
1599 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
1600 if (IS_QLA2100(ha))
1601 break;
1602
1603 ql_dbg(ql_dbg_async, vha, 0x500f,
1604 fmt: "Configuration change detected: value=%x.\n", mb[1]);
1605
1606 if (atomic_read(v: &vha->loop_state) != LOOP_DOWN) {
1607 atomic_set(v: &vha->loop_state, LOOP_DOWN);
1608 if (!atomic_read(v: &vha->loop_down_timer))
1609 atomic_set(v: &vha->loop_down_timer,
1610 LOOP_DOWN_TIME);
1611 qla2x00_mark_all_devices_lost(vha);
1612 }
1613
1614 if (vha->vp_idx) {
1615 atomic_set(v: &vha->vp_state, VP_FAILED);
1616 fc_vport_set_state(vport: vha->fc_vport, new_state: FC_VPORT_FAILED);
1617 }
1618
1619 set_bit(LOOP_RESYNC_NEEDED, addr: &vha->dpc_flags);
1620 set_bit(LOCAL_LOOP_UPDATE, addr: &vha->dpc_flags);
1621 break;
1622
1623 case MBA_PORT_UPDATE: /* Port database update */
1624 /*
1625 * Handle only global and vn-port update events
1626 *
1627 * Relevant inputs:
1628 * mb[1] = N_Port handle of changed port
1629 * OR 0xffff for global event
1630 * mb[2] = New login state
1631 * 7 = Port logged out
1632 * mb[3] = LSB is vp_idx, 0xff = all vps
1633 *
1634 * Skip processing if:
1635 * Event is global, vp_idx is NOT all vps,
1636 * vp_idx does not match
1637 * Event is not global, vp_idx does not match
1638 */
1639 if (IS_QLA2XXX_MIDTYPE(ha) &&
1640 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
1641 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
1642 break;
1643
1644 if (mb[2] == 0x7) {
1645 ql_dbg(ql_dbg_async, vha, 0x5010,
1646 fmt: "Port %s %04x %04x %04x.\n",
1647 mb[1] == 0xffff ? "unavailable" : "logout",
1648 mb[1], mb[2], mb[3]);
1649
1650 if (mb[1] == 0xffff)
1651 goto global_port_update;
1652
1653 if (mb[1] == NPH_SNS_LID(ha)) {
1654 set_bit(LOOP_RESYNC_NEEDED, addr: &vha->dpc_flags);
1655 set_bit(LOCAL_LOOP_UPDATE, addr: &vha->dpc_flags);
1656 break;
1657 }
1658
1659 /* use handle_cnt for loop id/nport handle */
1660 if (IS_FWI2_CAPABLE(ha))
1661 handle_cnt = NPH_SNS;
1662 else
1663 handle_cnt = SIMPLE_NAME_SERVER;
1664 if (mb[1] == handle_cnt) {
1665 set_bit(LOOP_RESYNC_NEEDED, addr: &vha->dpc_flags);
1666 set_bit(LOCAL_LOOP_UPDATE, addr: &vha->dpc_flags);
1667 break;
1668 }
1669
1670 /* Port logout */
1671 fcport = qla2x00_find_fcport_by_loopid(vha, loop_id: mb[1]);
1672 if (!fcport)
1673 break;
1674 if (atomic_read(v: &fcport->state) != FCS_ONLINE)
1675 break;
1676 ql_dbg(ql_dbg_async, vha, 0x508a,
1677 fmt: "Marking port lost loopid=%04x portid=%06x.\n",
1678 fcport->loop_id, fcport->d_id.b24);
1679 if (qla_ini_mode_enabled(ha: vha)) {
1680 fcport->logout_on_delete = 0;
1681 qlt_schedule_sess_for_deletion(fcport);
1682 }
1683 break;
1684
1685global_port_update:
1686 if (atomic_read(v: &vha->loop_state) != LOOP_DOWN) {
1687 atomic_set(v: &vha->loop_state, LOOP_DOWN);
1688 atomic_set(v: &vha->loop_down_timer,
1689 LOOP_DOWN_TIME);
1690 vha->device_flags |= DFLG_NO_CABLE;
1691 qla2x00_mark_all_devices_lost(vha);
1692 }
1693
1694 if (vha->vp_idx) {
1695 atomic_set(v: &vha->vp_state, VP_FAILED);
1696 fc_vport_set_state(vport: vha->fc_vport,
1697 new_state: FC_VPORT_FAILED);
1698 qla2x00_mark_all_devices_lost(vha);
1699 }
1700
1701 vha->flags.management_server_logged_in = 0;
1702 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1703 break;
1704 }
1705
1706 /*
1707 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1708 * event etc. earlier indicating loop is down) then process
1709 * it. Otherwise ignore it and Wait for RSCN to come in.
1710 */
1711 atomic_set(v: &vha->loop_down_timer, i: 0);
1712 if (atomic_read(v: &vha->loop_state) != LOOP_DOWN &&
1713 !ha->flags.n2n_ae &&
1714 atomic_read(v: &vha->loop_state) != LOOP_DEAD) {
1715 ql_dbg(ql_dbg_async, vha, 0x5011,
1716 fmt: "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1717 mb[1], mb[2], mb[3]);
1718 break;
1719 }
1720
1721 ql_dbg(ql_dbg_async, vha, 0x5012,
1722 fmt: "Port database changed %04x %04x %04x.\n",
1723 mb[1], mb[2], mb[3]);
1724
1725 /*
1726 * Mark all devices as missing so we will login again.
1727 */
1728 atomic_set(v: &vha->loop_state, LOOP_UP);
1729 vha->scan.scan_retry = 0;
1730
1731 set_bit(LOOP_RESYNC_NEEDED, addr: &vha->dpc_flags);
1732 set_bit(LOCAL_LOOP_UPDATE, addr: &vha->dpc_flags);
1733 set_bit(VP_CONFIG_OK, addr: &vha->vp_flags);
1734 break;
1735
1736 case MBA_RSCN_UPDATE: /* State Change Registration */
1737 /* Check if the Vport has issued a SCR */
1738 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1739 break;
1740 /* Only handle SCNs for our Vport index. */
1741 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1742 break;
1743
1744 ql_log(ql_log_warn, vha, 0x5013,
1745 fmt: "RSCN database changed -- %04x %04x %04x.\n",
1746 mb[1], mb[2], mb[3]);
1747
1748 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1749 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1750 | vha->d_id.b.al_pa;
1751 if (rscn_entry == host_pid) {
1752 ql_dbg(ql_dbg_async, vha, 0x5014,
1753 fmt: "Ignoring RSCN update to local host "
1754 "port ID (%06x).\n", host_pid);
1755 break;
1756 }
1757
1758 /* Ignore reserved bits from RSCN-payload. */
1759 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1760
1761 /* Skip RSCNs for virtual ports on the same physical port */
1762 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1763 break;
1764
1765 atomic_set(v: &vha->loop_down_timer, i: 0);
1766 vha->flags.management_server_logged_in = 0;
1767 {
1768 struct event_arg ea;
1769
1770 memset(&ea, 0, sizeof(ea));
1771 ea.id.b24 = rscn_entry;
1772 ea.id.b.rsvd_1 = rscn_entry >> 24;
1773 qla2x00_handle_rscn(vha, ea: &ea);
1774 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1775 }
1776 break;
1777 case MBA_CONGN_NOTI_RECV:
1778 if (!ha->flags.scm_enabled ||
1779 mb[1] != QLA_CON_PRIMITIVE_RECEIVED)
1780 break;
1781
1782 if (mb[2] == QLA_CONGESTION_ARB_WARNING) {
1783 ql_dbg(ql_dbg_async, vha, 0x509b,
1784 fmt: "Congestion Warning %04x %04x.\n", mb[1], mb[2]);
1785 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) {
1786 ql_log(ql_log_warn, vha, 0x509b,
1787 fmt: "Congestion Alarm %04x %04x.\n", mb[1], mb[2]);
1788 }
1789 break;
1790 /* case MBA_RIO_RESPONSE: */
1791 case MBA_ZIO_RESPONSE:
1792 ql_dbg(ql_dbg_async, vha, 0x5015,
1793 fmt: "[R|Z]IO update completion.\n");
1794
1795 if (IS_FWI2_CAPABLE(ha))
1796 qla24xx_process_response_queue(vha, rsp);
1797 else
1798 qla2x00_process_response_queue(rsp);
1799 break;
1800
1801 case MBA_DISCARD_RND_FRAME:
1802 ql_dbg(ql_dbg_async, vha, 0x5016,
1803 fmt: "Discard RND Frame -- %04x %04x %04x.\n",
1804 mb[1], mb[2], mb[3]);
1805 vha->interface_err_cnt++;
1806 break;
1807
1808 case MBA_TRACE_NOTIFICATION:
1809 ql_dbg(ql_dbg_async, vha, 0x5017,
1810 fmt: "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1811 break;
1812
1813 case MBA_ISP84XX_ALERT:
1814 ql_dbg(ql_dbg_async, vha, 0x5018,
1815 fmt: "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1816 mb[1], mb[2], mb[3]);
1817
1818 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1819 switch (mb[1]) {
1820 case A84_PANIC_RECOVERY:
1821 ql_log(ql_log_info, vha, 0x5019,
1822 fmt: "Alert 84XX: panic recovery %04x %04x.\n",
1823 mb[2], mb[3]);
1824 break;
1825 case A84_OP_LOGIN_COMPLETE:
1826 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1827 ql_log(ql_log_info, vha, 0x501a,
1828 fmt: "Alert 84XX: firmware version %x.\n",
1829 ha->cs84xx->op_fw_version);
1830 break;
1831 case A84_DIAG_LOGIN_COMPLETE:
1832 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1833 ql_log(ql_log_info, vha, 0x501b,
1834 fmt: "Alert 84XX: diagnostic firmware version %x.\n",
1835 ha->cs84xx->diag_fw_version);
1836 break;
1837 case A84_GOLD_LOGIN_COMPLETE:
1838 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1839 ha->cs84xx->fw_update = 1;
1840 ql_log(ql_log_info, vha, 0x501c,
1841 fmt: "Alert 84XX: gold firmware version %x.\n",
1842 ha->cs84xx->gold_fw_version);
1843 break;
1844 default:
1845 ql_log(ql_log_warn, vha, 0x501d,
1846 fmt: "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1847 mb[1], mb[2], mb[3]);
1848 }
1849 spin_unlock_irqrestore(lock: &ha->cs84xx->access_lock, flags);
1850 break;
1851 case MBA_DCBX_START:
1852 ql_dbg(ql_dbg_async, vha, 0x501e,
1853 fmt: "DCBX Started -- %04x %04x %04x.\n",
1854 mb[1], mb[2], mb[3]);
1855 break;
1856 case MBA_DCBX_PARAM_UPDATE:
1857 ql_dbg(ql_dbg_async, vha, 0x501f,
1858 fmt: "DCBX Parameters Updated -- %04x %04x %04x.\n",
1859 mb[1], mb[2], mb[3]);
1860 break;
1861 case MBA_FCF_CONF_ERR:
1862 ql_dbg(ql_dbg_async, vha, 0x5020,
1863 fmt: "FCF Configuration Error -- %04x %04x %04x.\n",
1864 mb[1], mb[2], mb[3]);
1865 break;
1866 case MBA_IDC_NOTIFY:
1867 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1868 mb[4] = rd_reg_word(addr: &reg24->mailbox4);
1869 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1870 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1871 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1872 set_bit(ISP_QUIESCE_NEEDED, addr: &vha->dpc_flags);
1873 /*
1874 * Extend loop down timer since port is active.
1875 */
1876 if (atomic_read(v: &vha->loop_state) == LOOP_DOWN)
1877 atomic_set(v: &vha->loop_down_timer,
1878 LOOP_DOWN_TIME);
1879 qla2xxx_wake_dpc(vha);
1880 }
1881 }
1882 fallthrough;
1883 case MBA_IDC_COMPLETE:
1884 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1885 complete(&ha->lb_portup_comp);
1886 fallthrough;
1887 case MBA_IDC_TIME_EXT:
1888 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1889 IS_QLA8044(ha))
1890 qla81xx_idc_event(vha, aen: mb[0], descr: mb[1]);
1891 break;
1892
1893 case MBA_IDC_AEN:
1894 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1895 vha->hw_err_cnt++;
1896 qla27xx_handle_8200_aen(vha, mb);
1897 } else if (IS_QLA83XX(ha)) {
1898 mb[4] = rd_reg_word(addr: &reg24->mailbox4);
1899 mb[5] = rd_reg_word(addr: &reg24->mailbox5);
1900 mb[6] = rd_reg_word(addr: &reg24->mailbox6);
1901 mb[7] = rd_reg_word(addr: &reg24->mailbox7);
1902 qla83xx_handle_8200_aen(vha, mb);
1903 } else {
1904 ql_dbg(ql_dbg_async, vha, 0x5052,
1905 fmt: "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
1906 mb[0], mb[1], mb[2], mb[3]);
1907 }
1908 break;
1909
1910 case MBA_DPORT_DIAGNOSTICS:
1911 if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR ||
1912 (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR)
1913 vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
1914 ql_dbg(ql_dbg_async, vha, 0x5052,
1915 fmt: "D-Port Diagnostics: %04x %04x %04x %04x\n",
1916 mb[0], mb[1], mb[2], mb[3]);
1917 memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
1918 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1919 static char *results[] = {
1920 "start", "done(pass)", "done(error)", "undefined" };
1921 static char *types[] = {
1922 "none", "dynamic", "static", "other" };
1923 uint result = mb[1] >> 0 & 0x3;
1924 uint type = mb[1] >> 6 & 0x3;
1925 uint sw = mb[1] >> 15 & 0x1;
1926 ql_dbg(ql_dbg_async, vha, 0x5052,
1927 fmt: "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
1928 results[result], types[type], sw);
1929 if (result == 2) {
1930 static char *reasons[] = {
1931 "reserved", "unexpected reject",
1932 "unexpected phase", "retry exceeded",
1933 "timed out", "not supported",
1934 "user stopped" };
1935 uint reason = mb[2] >> 0 & 0xf;
1936 uint phase = mb[2] >> 12 & 0xf;
1937 ql_dbg(ql_dbg_async, vha, 0x5052,
1938 fmt: "D-Port Diagnostics: reason=%s phase=%u \n",
1939 reason < 7 ? reasons[reason] : "other",
1940 phase >> 1);
1941 }
1942 }
1943 break;
1944
1945 case MBA_TEMPERATURE_ALERT:
1946 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1947 display_Laser_info(vha, mb1: mb[1], mb2: mb[2], mb3: mb[3]);
1948 ql_dbg(ql_dbg_async, vha, 0x505e,
1949 fmt: "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1950 break;
1951
1952 case MBA_TRANS_INSERT:
1953 ql_dbg(ql_dbg_async, vha, 0x5091,
1954 fmt: "Transceiver Insertion: %04x\n", mb[1]);
1955 set_bit(DETECT_SFP_CHANGE, addr: &vha->dpc_flags);
1956 break;
1957
1958 case MBA_TRANS_REMOVE:
1959 ql_dbg(ql_dbg_async, vha, 0x5091, fmt: "Transceiver Removal\n");
1960 break;
1961
1962 default:
1963 ql_dbg(ql_dbg_async, vha, 0x5057,
1964 fmt: "Unknown AEN:%04x %04x %04x %04x\n",
1965 mb[0], mb[1], mb[2], mb[3]);
1966 }
1967
1968 qlt_async_event(mb[0], vha, mb);
1969
1970 if (!vha->vp_idx && ha->num_vhosts)
1971 qla2x00_alert_all_vps(rsp, mb);
1972}
1973
1974/**
1975 * qla2x00_process_completed_request() - Process a Fast Post response.
1976 * @vha: SCSI driver HA context
1977 * @req: request queue
1978 * @index: SRB index
1979 */
1980void
1981qla2x00_process_completed_request(struct scsi_qla_host *vha,
1982 struct req_que *req, uint32_t index)
1983{
1984 srb_t *sp;
1985 struct qla_hw_data *ha = vha->hw;
1986
1987 /* Validate handle. */
1988 if (index >= req->num_outstanding_cmds) {
1989 ql_log(ql_log_warn, vha, 0x3014,
1990 fmt: "Invalid SCSI command index (%x).\n", index);
1991
1992 if (IS_P3P_TYPE(ha))
1993 set_bit(FCOE_CTX_RESET_NEEDED, addr: &vha->dpc_flags);
1994 else
1995 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
1996 return;
1997 }
1998
1999 sp = req->outstanding_cmds[index];
2000 if (sp) {
2001 /* Free outstanding command slot. */
2002 req->outstanding_cmds[index] = NULL;
2003
2004 /* Save ISP completion status */
2005 sp->done(sp, DID_OK << 16);
2006 } else {
2007 ql_log(ql_log_warn, vha, 0x3016, fmt: "Invalid SCSI SRB.\n");
2008
2009 if (IS_P3P_TYPE(ha))
2010 set_bit(FCOE_CTX_RESET_NEEDED, addr: &vha->dpc_flags);
2011 else
2012 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
2013 }
2014}
2015
2016static srb_t *
2017qla_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
2018 struct req_que *req, void *iocb, u16 *ret_index)
2019{
2020 struct qla_hw_data *ha = vha->hw;
2021 sts_entry_t *pkt = iocb;
2022 srb_t *sp;
2023 uint16_t index;
2024
2025 if (pkt->handle == QLA_SKIP_HANDLE)
2026 return NULL;
2027
2028 index = LSW(pkt->handle);
2029 if (index >= req->num_outstanding_cmds) {
2030 ql_log(ql_log_warn, vha, 0x5031,
2031 fmt: "%s: Invalid command index (%x) type %8ph.\n",
2032 func, index, iocb);
2033 if (IS_P3P_TYPE(ha))
2034 set_bit(FCOE_CTX_RESET_NEEDED, addr: &vha->dpc_flags);
2035 else
2036 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
2037 return NULL;
2038 }
2039 sp = req->outstanding_cmds[index];
2040 if (!sp) {
2041 ql_log(ql_log_warn, vha, 0x5032,
2042 fmt: "%s: Invalid completion handle (%x) -- timed-out.\n",
2043 func, index);
2044 return NULL;
2045 }
2046 if (sp->handle != index) {
2047 ql_log(ql_log_warn, vha, 0x5033,
2048 fmt: "%s: SRB handle (%x) mismatch %x.\n", func,
2049 sp->handle, index);
2050 return NULL;
2051 }
2052
2053 *ret_index = index;
2054 qla_put_fw_resources(qp: sp->qpair, iores: &sp->iores);
2055 return sp;
2056}
2057
2058srb_t *
2059qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
2060 struct req_que *req, void *iocb)
2061{
2062 uint16_t index;
2063 srb_t *sp;
2064
2065 sp = qla_get_sp_from_handle(vha, func, req, iocb, ret_index: &index);
2066 if (sp)
2067 req->outstanding_cmds[index] = NULL;
2068
2069 return sp;
2070}
2071
2072static void
2073qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2074 struct mbx_entry *mbx)
2075{
2076 const char func[] = "MBX-IOCB";
2077 const char *type;
2078 fc_port_t *fcport;
2079 srb_t *sp;
2080 struct srb_iocb *lio;
2081 uint16_t *data;
2082 uint16_t status;
2083
2084 sp = qla2x00_get_sp_from_handle(vha, func, req, iocb: mbx);
2085 if (!sp)
2086 return;
2087
2088 lio = &sp->u.iocb_cmd;
2089 type = sp->name;
2090 fcport = sp->fcport;
2091 data = lio->u.logio.data;
2092
2093 data[0] = MBS_COMMAND_ERROR;
2094 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
2095 QLA_LOGIO_LOGIN_RETRIED : 0;
2096 if (mbx->entry_status) {
2097 ql_dbg(ql_dbg_async, vha, 0x5043,
2098 fmt: "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
2099 "entry-status=%x status=%x state-flag=%x "
2100 "status-flags=%x.\n", type, sp->handle,
2101 fcport->d_id.b.domain, fcport->d_id.b.area,
2102 fcport->d_id.b.al_pa, mbx->entry_status,
2103 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
2104 le16_to_cpu(mbx->status_flags));
2105
2106 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
2107 mbx, sizeof(*mbx));
2108
2109 goto logio_done;
2110 }
2111
2112 status = le16_to_cpu(mbx->status);
2113 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
2114 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
2115 status = 0;
2116 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
2117 ql_dbg(ql_dbg_async, vha, 0x5045,
2118 fmt: "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
2119 type, sp->handle, fcport->d_id.b.domain,
2120 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2121 le16_to_cpu(mbx->mb1));
2122
2123 data[0] = MBS_COMMAND_COMPLETE;
2124 if (sp->type == SRB_LOGIN_CMD) {
2125 fcport->port_type = FCT_TARGET;
2126 if (le16_to_cpu(mbx->mb1) & BIT_0)
2127 fcport->port_type = FCT_INITIATOR;
2128 else if (le16_to_cpu(mbx->mb1) & BIT_1)
2129 fcport->flags |= FCF_FCP2_DEVICE;
2130 }
2131 goto logio_done;
2132 }
2133
2134 data[0] = le16_to_cpu(mbx->mb0);
2135 switch (data[0]) {
2136 case MBS_PORT_ID_USED:
2137 data[1] = le16_to_cpu(mbx->mb1);
2138 break;
2139 case MBS_LOOP_ID_USED:
2140 break;
2141 default:
2142 data[0] = MBS_COMMAND_ERROR;
2143 break;
2144 }
2145
2146 ql_log(ql_log_warn, vha, 0x5046,
2147 fmt: "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
2148 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
2149 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
2150 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
2151 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
2152 le16_to_cpu(mbx->mb7));
2153
2154logio_done:
2155 sp->done(sp, 0);
2156}
2157
2158static void
2159qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2160 struct mbx_24xx_entry *pkt)
2161{
2162 const char func[] = "MBX-IOCB2";
2163 struct qla_hw_data *ha = vha->hw;
2164 srb_t *sp;
2165 struct srb_iocb *si;
2166 u16 sz, i;
2167 int res;
2168
2169 sp = qla2x00_get_sp_from_handle(vha, func, req, iocb: pkt);
2170 if (!sp)
2171 return;
2172
2173 if (sp->type == SRB_SCSI_CMD ||
2174 sp->type == SRB_NVME_CMD ||
2175 sp->type == SRB_TM_CMD) {
2176 ql_log(ql_log_warn, vha, 0x509d,
2177 fmt: "Inconsistent event entry type %d\n", sp->type);
2178 if (IS_P3P_TYPE(ha))
2179 set_bit(FCOE_CTX_RESET_NEEDED, addr: &vha->dpc_flags);
2180 else
2181 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
2182 return;
2183 }
2184
2185 si = &sp->u.iocb_cmd;
2186 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
2187
2188 for (i = 0; i < sz; i++)
2189 si->u.mbx.in_mb[i] = pkt->mb[i];
2190
2191 res = (si->u.mbx.in_mb[0] & MBS_MASK);
2192
2193 sp->done(sp, res);
2194}
2195
2196static void
2197qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2198 struct nack_to_isp *pkt)
2199{
2200 const char func[] = "nack";
2201 srb_t *sp;
2202 int res = 0;
2203
2204 sp = qla2x00_get_sp_from_handle(vha, func, req, iocb: pkt);
2205 if (!sp)
2206 return;
2207
2208 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
2209 res = QLA_FUNCTION_FAILED;
2210
2211 sp->done(sp, res);
2212}
2213
2214static void
2215qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
2216 sts_entry_t *pkt, int iocb_type)
2217{
2218 const char func[] = "CT_IOCB";
2219 const char *type;
2220 srb_t *sp;
2221 struct bsg_job *bsg_job;
2222 struct fc_bsg_reply *bsg_reply;
2223 uint16_t comp_status;
2224 int res = 0;
2225
2226 sp = qla2x00_get_sp_from_handle(vha, func, req, iocb: pkt);
2227 if (!sp)
2228 return;
2229
2230 switch (sp->type) {
2231 case SRB_CT_CMD:
2232 bsg_job = sp->u.bsg_job;
2233 bsg_reply = bsg_job->reply;
2234
2235 type = "ct pass-through";
2236
2237 comp_status = le16_to_cpu(pkt->comp_status);
2238
2239 /*
2240 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
2241 * fc payload to the caller
2242 */
2243 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2244 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2245
2246 if (comp_status != CS_COMPLETE) {
2247 if (comp_status == CS_DATA_UNDERRUN) {
2248 res = DID_OK << 16;
2249 bsg_reply->reply_payload_rcv_len =
2250 le16_to_cpu(pkt->rsp_info_len);
2251
2252 ql_log(ql_log_warn, vha, 0x5048,
2253 fmt: "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
2254 type, comp_status,
2255 bsg_reply->reply_payload_rcv_len);
2256 } else {
2257 ql_log(ql_log_warn, vha, 0x5049,
2258 fmt: "CT pass-through-%s error comp_status=0x%x.\n",
2259 type, comp_status);
2260 res = DID_ERROR << 16;
2261 bsg_reply->reply_payload_rcv_len = 0;
2262 }
2263 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
2264 pkt, sizeof(*pkt));
2265 } else {
2266 res = DID_OK << 16;
2267 bsg_reply->reply_payload_rcv_len =
2268 bsg_job->reply_payload.payload_len;
2269 bsg_job->reply_len = 0;
2270 }
2271 break;
2272 case SRB_CT_PTHRU_CMD:
2273 /*
2274 * borrowing sts_entry_24xx.comp_status.
2275 * same location as ct_entry_24xx.comp_status
2276 */
2277 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
2278 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2279 sp->name);
2280 break;
2281 }
2282
2283 sp->done(sp, res);
2284}
2285
2286static void
2287qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
2288 struct sts_entry_24xx *pkt, int iocb_type)
2289{
2290 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
2291 const char func[] = "ELS_CT_IOCB";
2292 const char *type;
2293 srb_t *sp;
2294 struct bsg_job *bsg_job;
2295 struct fc_bsg_reply *bsg_reply;
2296 uint16_t comp_status;
2297 uint32_t fw_status[3];
2298 int res, logit = 1;
2299 struct srb_iocb *els;
2300 uint n;
2301 scsi_qla_host_t *vha;
2302 struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
2303
2304 sp = qla2x00_get_sp_from_handle(vha: v, func, req, iocb: pkt);
2305 if (!sp)
2306 return;
2307 bsg_job = sp->u.bsg_job;
2308 vha = sp->vha;
2309
2310 type = NULL;
2311
2312 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
2313 fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
2314 fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
2315
2316 switch (sp->type) {
2317 case SRB_ELS_CMD_RPT:
2318 case SRB_ELS_CMD_HST:
2319 type = "rpt hst";
2320 break;
2321 case SRB_ELS_CMD_HST_NOLOGIN:
2322 type = "els";
2323 {
2324 struct els_entry_24xx *els = (void *)pkt;
2325 struct qla_bsg_auth_els_request *p =
2326 (struct qla_bsg_auth_els_request *)bsg_job->request;
2327
2328 ql_dbg(ql_dbg_user, vha, 0x700f,
2329 fmt: "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
2330 __func__, sc_to_str(cmd: p->e.sub_cmd),
2331 e->d_id[2], e->d_id[1], e->d_id[0],
2332 comp_status, p->e.extra_rx_xchg_address, bsg_job);
2333
2334 if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
2335 if (sp->remap.remapped) {
2336 n = sg_copy_from_buffer(sgl: bsg_job->reply_payload.sg_list,
2337 nents: bsg_job->reply_payload.sg_cnt,
2338 buf: sp->remap.rsp.buf,
2339 buflen: sp->remap.rsp.len);
2340 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
2341 fmt: "%s: SG copied %x of %x\n",
2342 __func__, n, sp->remap.rsp.len);
2343 } else {
2344 ql_dbg(ql_dbg_user, vha, 0x700f,
2345 fmt: "%s: NOT REMAPPED (error)...!!!\n",
2346 __func__);
2347 }
2348 }
2349 }
2350 break;
2351 case SRB_CT_CMD:
2352 type = "ct pass-through";
2353 break;
2354 case SRB_ELS_DCMD:
2355 type = "Driver ELS logo";
2356 if (iocb_type != ELS_IOCB_TYPE) {
2357 ql_dbg(ql_dbg_user, vha, 0x5047,
2358 fmt: "Completing %s: (%p) type=%d.\n",
2359 type, sp, sp->type);
2360 sp->done(sp, 0);
2361 return;
2362 }
2363 break;
2364 case SRB_CT_PTHRU_CMD:
2365 /* borrowing sts_entry_24xx.comp_status.
2366 same location as ct_entry_24xx.comp_status
2367 */
2368 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
2369 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2370 sp->name);
2371 sp->done(sp, res);
2372 return;
2373 default:
2374 ql_dbg(ql_dbg_user, vha, 0x503e,
2375 fmt: "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
2376 return;
2377 }
2378
2379 if (iocb_type == ELS_IOCB_TYPE) {
2380 els = &sp->u.iocb_cmd;
2381 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
2382 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
2383 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
2384 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
2385 if (comp_status == CS_COMPLETE) {
2386 res = DID_OK << 16;
2387 } else {
2388 if (comp_status == CS_DATA_UNDERRUN) {
2389 res = DID_OK << 16;
2390 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
2391 ese->total_byte_count));
2392
2393 if (sp->remap.remapped &&
2394 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
2395 ql_dbg(ql_dbg_user, vha, 0x503f,
2396 fmt: "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
2397 __func__, e->s_id[0], e->s_id[2], e->s_id[1],
2398 e->d_id[2], e->d_id[1], e->d_id[0]);
2399 logit = 0;
2400 }
2401
2402 } else if (comp_status == CS_PORT_LOGGED_OUT) {
2403 ql_dbg(ql_dbg_disc, vha, 0x911e,
2404 fmt: "%s %d schedule session deletion\n",
2405 __func__, __LINE__);
2406
2407 els->u.els_plogi.len = 0;
2408 res = DID_IMM_RETRY << 16;
2409 qlt_schedule_sess_for_deletion(sp->fcport);
2410 } else {
2411 els->u.els_plogi.len = 0;
2412 res = DID_ERROR << 16;
2413 }
2414
2415 if (sp->remap.remapped &&
2416 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
2417 if (logit) {
2418 ql_dbg(ql_dbg_user, vha, 0x503f,
2419 fmt: "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
2420 type, sp->handle, comp_status);
2421
2422 ql_dbg(ql_dbg_user, vha, 0x503f,
2423 fmt: "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2424 fw_status[1], fw_status[2],
2425 le32_to_cpu(((struct els_sts_entry_24xx *)
2426 pkt)->total_byte_count),
2427 e->s_id[0], e->s_id[2], e->s_id[1],
2428 e->d_id[2], e->d_id[1], e->d_id[0]);
2429 }
2430 if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE &&
2431 sp->type == SRB_ELS_CMD_HST_NOLOGIN) {
2432 ql_dbg(ql_dbg_edif, vha, 0x911e,
2433 fmt: "%s rcv reject. Sched delete\n", __func__);
2434 qlt_schedule_sess_for_deletion(sp->fcport);
2435 }
2436 } else if (logit) {
2437 ql_log(ql_log_info, vha, 0x503f,
2438 fmt: "%s IOCB Done hdl=%x comp_status=0x%x\n",
2439 type, sp->handle, comp_status);
2440 ql_log(ql_log_info, vha, 0x503f,
2441 fmt: "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2442 fw_status[1], fw_status[2],
2443 le32_to_cpu(((struct els_sts_entry_24xx *)
2444 pkt)->total_byte_count),
2445 e->s_id[0], e->s_id[2], e->s_id[1],
2446 e->d_id[2], e->d_id[1], e->d_id[0]);
2447 }
2448 }
2449 goto els_ct_done;
2450 }
2451
2452 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
2453 * fc payload to the caller
2454 */
2455 bsg_job = sp->u.bsg_job;
2456 bsg_reply = bsg_job->reply;
2457 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2458 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
2459
2460 if (comp_status != CS_COMPLETE) {
2461 if (comp_status == CS_DATA_UNDERRUN) {
2462 res = DID_OK << 16;
2463 bsg_reply->reply_payload_rcv_len =
2464 le32_to_cpu(ese->total_byte_count);
2465
2466 ql_dbg(ql_dbg_user, vha, 0x503f,
2467 fmt: "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2468 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
2469 type, sp->handle, comp_status, fw_status[1], fw_status[2],
2470 le32_to_cpu(ese->total_byte_count));
2471 } else {
2472 ql_dbg(ql_dbg_user, vha, 0x5040,
2473 fmt: "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2474 "error subcode 1=0x%x error subcode 2=0x%x.\n",
2475 type, sp->handle, comp_status,
2476 le32_to_cpu(ese->error_subcode_1),
2477 le32_to_cpu(ese->error_subcode_2));
2478 res = DID_ERROR << 16;
2479 bsg_reply->reply_payload_rcv_len = 0;
2480 }
2481 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
2482 fw_status, sizeof(fw_status));
2483 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
2484 pkt, sizeof(*pkt));
2485 }
2486 else {
2487 res = DID_OK << 16;
2488 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2489 bsg_job->reply_len = 0;
2490 }
2491els_ct_done:
2492
2493 sp->done(sp, res);
2494}
2495
2496static void
2497qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
2498 struct logio_entry_24xx *logio)
2499{
2500 const char func[] = "LOGIO-IOCB";
2501 const char *type;
2502 fc_port_t *fcport;
2503 srb_t *sp;
2504 struct srb_iocb *lio;
2505 uint16_t *data;
2506 uint32_t iop[2];
2507 int logit = 1;
2508
2509 sp = qla2x00_get_sp_from_handle(vha, func, req, iocb: logio);
2510 if (!sp)
2511 return;
2512
2513 lio = &sp->u.iocb_cmd;
2514 type = sp->name;
2515 fcport = sp->fcport;
2516 data = lio->u.logio.data;
2517
2518 data[0] = MBS_COMMAND_ERROR;
2519 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
2520 QLA_LOGIO_LOGIN_RETRIED : 0;
2521 if (logio->entry_status) {
2522 ql_log(ql_log_warn, vha: fcport->vha, 0x5034,
2523 fmt: "Async-%s error entry - %8phC hdl=%x"
2524 "portid=%02x%02x%02x entry-status=%x.\n",
2525 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
2526 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2527 logio->entry_status);
2528 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
2529 logio, sizeof(*logio));
2530
2531 goto logio_done;
2532 }
2533
2534 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
2535 ql_dbg(ql_dbg_async, vha: sp->vha, 0x5036,
2536 fmt: "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
2537 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2538 le32_to_cpu(logio->io_parameter[0]));
2539
2540 vha->hw->exch_starvation = 0;
2541 data[0] = MBS_COMMAND_COMPLETE;
2542
2543 if (sp->type == SRB_PRLI_CMD) {
2544 lio->u.logio.iop[0] =
2545 le32_to_cpu(logio->io_parameter[0]);
2546 lio->u.logio.iop[1] =
2547 le32_to_cpu(logio->io_parameter[1]);
2548 goto logio_done;
2549 }
2550
2551 if (sp->type != SRB_LOGIN_CMD)
2552 goto logio_done;
2553
2554 lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]);
2555 if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP)
2556 fcport->flags |= FCF_FCSP_DEVICE;
2557
2558 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2559 if (iop[0] & BIT_4) {
2560 fcport->port_type = FCT_TARGET;
2561 if (iop[0] & BIT_8)
2562 fcport->flags |= FCF_FCP2_DEVICE;
2563 } else if (iop[0] & BIT_5)
2564 fcport->port_type = FCT_INITIATOR;
2565
2566 if (iop[0] & BIT_7)
2567 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2568
2569 if (logio->io_parameter[7] || logio->io_parameter[8])
2570 fcport->supported_classes |= FC_COS_CLASS2;
2571 if (logio->io_parameter[9] || logio->io_parameter[10])
2572 fcport->supported_classes |= FC_COS_CLASS3;
2573
2574 goto logio_done;
2575 }
2576
2577 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2578 iop[1] = le32_to_cpu(logio->io_parameter[1]);
2579 lio->u.logio.iop[0] = iop[0];
2580 lio->u.logio.iop[1] = iop[1];
2581 switch (iop[0]) {
2582 case LSC_SCODE_PORTID_USED:
2583 data[0] = MBS_PORT_ID_USED;
2584 data[1] = LSW(iop[1]);
2585 logit = 0;
2586 break;
2587 case LSC_SCODE_NPORT_USED:
2588 data[0] = MBS_LOOP_ID_USED;
2589 logit = 0;
2590 break;
2591 case LSC_SCODE_CMD_FAILED:
2592 if (iop[1] == 0x0606) {
2593 /*
2594 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
2595 * Target side acked.
2596 */
2597 data[0] = MBS_COMMAND_COMPLETE;
2598 goto logio_done;
2599 }
2600 data[0] = MBS_COMMAND_ERROR;
2601 break;
2602 case LSC_SCODE_NOXCB:
2603 vha->hw->exch_starvation++;
2604 if (vha->hw->exch_starvation > 5) {
2605 ql_log(ql_log_warn, vha, 0xd046,
2606 fmt: "Exchange starvation. Resetting RISC\n");
2607
2608 vha->hw->exch_starvation = 0;
2609
2610 if (IS_P3P_TYPE(vha->hw))
2611 set_bit(FCOE_CTX_RESET_NEEDED, addr: &vha->dpc_flags);
2612 else
2613 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
2614 qla2xxx_wake_dpc(vha);
2615 }
2616 fallthrough;
2617 default:
2618 data[0] = MBS_COMMAND_ERROR;
2619 break;
2620 }
2621
2622 if (logit)
2623 ql_log(ql_log_warn, vha: sp->vha, 0x5037, fmt: "Async-%s failed: "
2624 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2625 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2626 le16_to_cpu(logio->comp_status),
2627 le32_to_cpu(logio->io_parameter[0]),
2628 le32_to_cpu(logio->io_parameter[1]));
2629 else
2630 ql_dbg(ql_dbg_disc, vha: sp->vha, 0x5037, fmt: "Async-%s failed: "
2631 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2632 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2633 le16_to_cpu(logio->comp_status),
2634 le32_to_cpu(logio->io_parameter[0]),
2635 le32_to_cpu(logio->io_parameter[1]));
2636
2637logio_done:
2638 sp->done(sp, 0);
2639}
2640
2641static void
2642qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
2643{
2644 const char func[] = "TMF-IOCB";
2645 const char *type;
2646 fc_port_t *fcport;
2647 srb_t *sp;
2648 struct srb_iocb *iocb;
2649 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2650 u16 comp_status;
2651
2652 sp = qla2x00_get_sp_from_handle(vha, func, req, iocb: tsk);
2653 if (!sp)
2654 return;
2655
2656 comp_status = le16_to_cpu(sts->comp_status);
2657 iocb = &sp->u.iocb_cmd;
2658 type = sp->name;
2659 fcport = sp->fcport;
2660 iocb->u.tmf.data = QLA_SUCCESS;
2661
2662 if (sts->entry_status) {
2663 ql_log(ql_log_warn, vha: fcport->vha, 0x5038,
2664 fmt: "Async-%s error - hdl=%x entry-status(%x).\n",
2665 type, sp->handle, sts->entry_status);
2666 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2667 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2668 ql_log(ql_log_warn, vha: fcport->vha, 0x5039,
2669 fmt: "Async-%s error - hdl=%x completion status(%x).\n",
2670 type, sp->handle, comp_status);
2671 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2672 } else if ((le16_to_cpu(sts->scsi_status) &
2673 SS_RESPONSE_INFO_LEN_VALID)) {
2674 host_to_fcp_swap(fcp: sts->data, bsize: sizeof(sts->data));
2675 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2676 ql_log(ql_log_warn, vha: fcport->vha, 0x503b,
2677 fmt: "Async-%s error - hdl=%x not enough response(%d).\n",
2678 type, sp->handle, sts->rsp_data_len);
2679 } else if (sts->data[3]) {
2680 ql_log(ql_log_warn, vha: fcport->vha, 0x503c,
2681 fmt: "Async-%s error - hdl=%x response(%x).\n",
2682 type, sp->handle, sts->data[3]);
2683 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2684 }
2685 }
2686
2687 switch (comp_status) {
2688 case CS_PORT_LOGGED_OUT:
2689 case CS_PORT_CONFIG_CHG:
2690 case CS_PORT_BUSY:
2691 case CS_INCOMPLETE:
2692 case CS_PORT_UNAVAILABLE:
2693 case CS_RESET:
2694 if (atomic_read(v: &fcport->state) == FCS_ONLINE) {
2695 ql_dbg(ql_dbg_disc, vha: fcport->vha, 0x3021,
2696 fmt: "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n",
2697 fcport->d_id.b.domain, fcport->d_id.b.area,
2698 fcport->d_id.b.al_pa,
2699 port_state_str[FCS_ONLINE],
2700 comp_status);
2701
2702 qlt_schedule_sess_for_deletion(fcport);
2703 }
2704 break;
2705
2706 default:
2707 break;
2708 }
2709
2710 if (iocb->u.tmf.data != QLA_SUCCESS)
2711 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
2712 sts, sizeof(*sts));
2713
2714 sp->done(sp, 0);
2715}
2716
2717static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2718 void *tsk, srb_t *sp)
2719{
2720 fc_port_t *fcport;
2721 struct srb_iocb *iocb;
2722 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2723 uint16_t state_flags;
2724 struct nvmefc_fcp_req *fd;
2725 uint16_t ret = QLA_SUCCESS;
2726 __le16 comp_status = sts->comp_status;
2727 int logit = 0;
2728
2729 iocb = &sp->u.iocb_cmd;
2730 fcport = sp->fcport;
2731 iocb->u.nvme.comp_status = comp_status;
2732 state_flags = le16_to_cpu(sts->state_flags);
2733 fd = iocb->u.nvme.desc;
2734
2735 if (unlikely(iocb->u.nvme.aen_op))
2736 atomic_dec(v: &sp->vha->hw->nvme_active_aen_cnt);
2737 else
2738 sp->qpair->cmd_completion_cnt++;
2739
2740 if (unlikely(comp_status != CS_COMPLETE))
2741 logit = 1;
2742
2743 fd->transferred_length = fd->payload_length -
2744 le32_to_cpu(sts->residual_len);
2745
2746 /*
2747 * State flags: Bit 6 and 0.
2748 * If 0 is set, we don't care about 6.
2749 * both cases resp was dma'd to host buffer
2750 * if both are 0, that is good path case.
2751 * if six is set and 0 is clear, we need to
2752 * copy resp data from status iocb to resp buffer.
2753 */
2754 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
2755 iocb->u.nvme.rsp_pyld_len = 0;
2756 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
2757 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
2758 /* Response already DMA'd to fd->rspaddr. */
2759 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2760 } else if ((state_flags & SF_FCP_RSP_DMA)) {
2761 /*
2762 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
2763 * as an error.
2764 */
2765 iocb->u.nvme.rsp_pyld_len = 0;
2766 fd->transferred_length = 0;
2767 ql_dbg(ql_dbg_io, vha: fcport->vha, 0x307a,
2768 fmt: "Unexpected values in NVMe_RSP IU.\n");
2769 logit = 1;
2770 } else if (state_flags & SF_NVME_ERSP) {
2771 uint32_t *inbuf, *outbuf;
2772 uint16_t iter;
2773
2774 inbuf = (uint32_t *)&sts->nvme_ersp_data;
2775 outbuf = (uint32_t *)fd->rspaddr;
2776 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2777 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
2778 sizeof(struct nvme_fc_ersp_iu))) {
2779 if (ql_mask_match(ql_dbg_io)) {
2780 WARN_ONCE(1, "Unexpected response payload length %u.\n",
2781 iocb->u.nvme.rsp_pyld_len);
2782 ql_log(ql_log_warn, vha: fcport->vha, 0x5100,
2783 fmt: "Unexpected response payload length %u.\n",
2784 iocb->u.nvme.rsp_pyld_len);
2785 }
2786 iocb->u.nvme.rsp_pyld_len =
2787 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
2788 }
2789 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
2790 for (; iter; iter--)
2791 *outbuf++ = swab32(*inbuf++);
2792 }
2793
2794 if (state_flags & SF_NVME_ERSP) {
2795 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
2796 u32 tgt_xfer_len;
2797
2798 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
2799 if (fd->transferred_length != tgt_xfer_len) {
2800 ql_log(ql_log_warn, vha: fcport->vha, 0x3079,
2801 fmt: "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
2802 tgt_xfer_len, fd->transferred_length);
2803 logit = 1;
2804 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
2805 /*
2806 * Do not log if this is just an underflow and there
2807 * is no data loss.
2808 */
2809 logit = 0;
2810 }
2811 }
2812
2813 if (unlikely(logit))
2814 ql_dbg(ql_dbg_io, vha: fcport->vha, 0x5060,
2815 fmt: "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
2816 sp->name, sp->handle, comp_status,
2817 fd->transferred_length, le32_to_cpu(sts->residual_len),
2818 sts->ox_id);
2819
2820 /*
2821 * If transport error then Failure (HBA rejects request)
2822 * otherwise transport will handle.
2823 */
2824 switch (le16_to_cpu(comp_status)) {
2825 case CS_COMPLETE:
2826 break;
2827
2828 case CS_RESET:
2829 case CS_PORT_UNAVAILABLE:
2830 case CS_PORT_LOGGED_OUT:
2831 fcport->nvme_flag |= NVME_FLAG_RESETTING;
2832 if (atomic_read(v: &fcport->state) == FCS_ONLINE) {
2833 ql_dbg(ql_dbg_disc, vha: fcport->vha, 0x3021,
2834 fmt: "Port to be marked lost on fcport=%06x, current "
2835 "port state= %s comp_status %x.\n",
2836 fcport->d_id.b24, port_state_str[FCS_ONLINE],
2837 comp_status);
2838
2839 qlt_schedule_sess_for_deletion(fcport);
2840 }
2841 fallthrough;
2842 case CS_ABORTED:
2843 case CS_PORT_BUSY:
2844 fd->transferred_length = 0;
2845 iocb->u.nvme.rsp_pyld_len = 0;
2846 ret = QLA_ABORTED;
2847 break;
2848 case CS_DATA_UNDERRUN:
2849 break;
2850 default:
2851 ret = QLA_FUNCTION_FAILED;
2852 break;
2853 }
2854 sp->done(sp, ret);
2855}
2856
2857static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
2858 struct vp_ctrl_entry_24xx *vce)
2859{
2860 const char func[] = "CTRLVP-IOCB";
2861 srb_t *sp;
2862 int rval = QLA_SUCCESS;
2863
2864 sp = qla2x00_get_sp_from_handle(vha, func, req, iocb: vce);
2865 if (!sp)
2866 return;
2867
2868 if (vce->entry_status != 0) {
2869 ql_dbg(ql_dbg_vport, vha, 0x10c4,
2870 fmt: "%s: Failed to complete IOCB -- error status (%x)\n",
2871 sp->name, vce->entry_status);
2872 rval = QLA_FUNCTION_FAILED;
2873 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
2874 ql_dbg(ql_dbg_vport, vha, 0x10c5,
2875 fmt: "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
2876 sp->name, le16_to_cpu(vce->comp_status),
2877 le16_to_cpu(vce->vp_idx_failed));
2878 rval = QLA_FUNCTION_FAILED;
2879 } else {
2880 ql_dbg(ql_dbg_vport, vha, 0x10c6,
2881 fmt: "Done %s.\n", __func__);
2882 }
2883
2884 sp->rc = rval;
2885 sp->done(sp, rval);
2886}
2887
2888/* Process a single response queue entry. */
2889static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
2890 struct rsp_que *rsp,
2891 sts_entry_t *pkt)
2892{
2893 sts21_entry_t *sts21_entry;
2894 sts22_entry_t *sts22_entry;
2895 uint16_t handle_cnt;
2896 uint16_t cnt;
2897
2898 switch (pkt->entry_type) {
2899 case STATUS_TYPE:
2900 qla2x00_status_entry(vha, rsp, pkt);
2901 break;
2902 case STATUS_TYPE_21:
2903 sts21_entry = (sts21_entry_t *)pkt;
2904 handle_cnt = sts21_entry->handle_count;
2905 for (cnt = 0; cnt < handle_cnt; cnt++)
2906 qla2x00_process_completed_request(vha, req: rsp->req,
2907 index: sts21_entry->handle[cnt]);
2908 break;
2909 case STATUS_TYPE_22:
2910 sts22_entry = (sts22_entry_t *)pkt;
2911 handle_cnt = sts22_entry->handle_count;
2912 for (cnt = 0; cnt < handle_cnt; cnt++)
2913 qla2x00_process_completed_request(vha, req: rsp->req,
2914 index: sts22_entry->handle[cnt]);
2915 break;
2916 case STATUS_CONT_TYPE:
2917 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2918 break;
2919 case MBX_IOCB_TYPE:
2920 qla2x00_mbx_iocb_entry(vha, req: rsp->req, mbx: (struct mbx_entry *)pkt);
2921 break;
2922 case CT_IOCB_TYPE:
2923 qla2x00_ct_entry(vha, req: rsp->req, pkt, CT_IOCB_TYPE);
2924 break;
2925 default:
2926 /* Type Not Supported. */
2927 ql_log(ql_log_warn, vha, 0x504a,
2928 fmt: "Received unknown response pkt type %x entry status=%x.\n",
2929 pkt->entry_type, pkt->entry_status);
2930 break;
2931 }
2932}
2933
2934/**
2935 * qla2x00_process_response_queue() - Process response queue entries.
2936 * @rsp: response queue
2937 */
2938void
2939qla2x00_process_response_queue(struct rsp_que *rsp)
2940{
2941 struct scsi_qla_host *vha;
2942 struct qla_hw_data *ha = rsp->hw;
2943 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2944 sts_entry_t *pkt;
2945
2946 vha = pci_get_drvdata(pdev: ha->pdev);
2947
2948 if (!vha->flags.online)
2949 return;
2950
2951 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2952 pkt = (sts_entry_t *)rsp->ring_ptr;
2953
2954 rsp->ring_index++;
2955 if (rsp->ring_index == rsp->length) {
2956 rsp->ring_index = 0;
2957 rsp->ring_ptr = rsp->ring;
2958 } else {
2959 rsp->ring_ptr++;
2960 }
2961
2962 if (pkt->entry_status != 0) {
2963 qla2x00_error_entry(vha, rsp, pkt);
2964 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2965 wmb();
2966 continue;
2967 }
2968
2969 qla2x00_process_response_entry(vha, rsp, pkt);
2970 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2971 wmb();
2972 }
2973
2974 /* Adjust ring index */
2975 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), data: rsp->ring_index);
2976}
2977
2978static inline void
2979qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2980 uint32_t sense_len, struct rsp_que *rsp, int res)
2981{
2982 struct scsi_qla_host *vha = sp->vha;
2983 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2984 uint32_t track_sense_len;
2985
2986 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2987 sense_len = SCSI_SENSE_BUFFERSIZE;
2988
2989 SET_CMD_SENSE_LEN(sp, sense_len);
2990 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2991 track_sense_len = sense_len;
2992
2993 if (sense_len > par_sense_len)
2994 sense_len = par_sense_len;
2995
2996 memcpy(cp->sense_buffer, sense_data, sense_len);
2997
2998 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2999 track_sense_len -= sense_len;
3000 SET_CMD_SENSE_LEN(sp, track_sense_len);
3001
3002 if (track_sense_len != 0) {
3003 rsp->status_srb = sp;
3004 cp->result = res;
3005 }
3006
3007 if (sense_len) {
3008 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
3009 fmt: "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
3010 sp->vha->host_no, cp->device->id, cp->device->lun,
3011 cp);
3012 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
3013 cp->sense_buffer, sense_len);
3014 }
3015}
3016
3017struct scsi_dif_tuple {
3018 __be16 guard; /* Checksum */
3019 __be16 app_tag; /* APPL identifier */
3020 __be32 ref_tag; /* Target LBA or indirect LBA */
3021};
3022
3023/*
3024 * Checks the guard or meta-data for the type of error
3025 * detected by the HBA. In case of errors, we set the
3026 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
3027 * to indicate to the kernel that the HBA detected error.
3028 */
3029static inline int
3030qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
3031{
3032 struct scsi_qla_host *vha = sp->vha;
3033 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3034 uint8_t *ap = &sts24->data[12];
3035 uint8_t *ep = &sts24->data[20];
3036 uint32_t e_ref_tag, a_ref_tag;
3037 uint16_t e_app_tag, a_app_tag;
3038 uint16_t e_guard, a_guard;
3039
3040 /*
3041 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
3042 * would make guard field appear at offset 2
3043 */
3044 a_guard = get_unaligned_le16(p: ap + 2);
3045 a_app_tag = get_unaligned_le16(p: ap + 0);
3046 a_ref_tag = get_unaligned_le32(p: ap + 4);
3047 e_guard = get_unaligned_le16(p: ep + 2);
3048 e_app_tag = get_unaligned_le16(p: ep + 0);
3049 e_ref_tag = get_unaligned_le32(p: ep + 4);
3050
3051 ql_dbg(ql_dbg_io, vha, 0x3023,
3052 fmt: "iocb(s) %p Returned STATUS.\n", sts24);
3053
3054 ql_dbg(ql_dbg_io, vha, 0x3024,
3055 fmt: "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
3056 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
3057 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
3058 cmd->cmnd[0], (u64)scsi_get_lba(scmd: cmd), a_ref_tag, e_ref_tag,
3059 a_app_tag, e_app_tag, a_guard, e_guard);
3060
3061 /*
3062 * Ignore sector if:
3063 * For type 3: ref & app tag is all 'f's
3064 * For type 0,1,2: app tag is all 'f's
3065 */
3066 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
3067 (scsi_get_prot_type(scmd: cmd) != SCSI_PROT_DIF_TYPE3 ||
3068 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
3069 uint32_t blocks_done, resid;
3070 sector_t lba_s = scsi_get_lba(scmd: cmd);
3071
3072 /* 2TB boundary case covered automatically with this */
3073 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
3074
3075 resid = scsi_bufflen(cmd) - (blocks_done *
3076 cmd->device->sector_size);
3077
3078 scsi_set_resid(cmd, resid);
3079 cmd->result = DID_OK << 16;
3080
3081 /* Update protection tag */
3082 if (scsi_prot_sg_count(cmd)) {
3083 uint32_t i, j = 0, k = 0, num_ent;
3084 struct scatterlist *sg;
3085 struct t10_pi_tuple *spt;
3086
3087 /* Patch the corresponding protection tags */
3088 scsi_for_each_prot_sg(cmd, sg,
3089 scsi_prot_sg_count(cmd), i) {
3090 num_ent = sg_dma_len(sg) / 8;
3091 if (k + num_ent < blocks_done) {
3092 k += num_ent;
3093 continue;
3094 }
3095 j = blocks_done - k - 1;
3096 k = blocks_done;
3097 break;
3098 }
3099
3100 if (k != blocks_done) {
3101 ql_log(ql_log_warn, vha, 0x302f,
3102 fmt: "unexpected tag values tag:lba=%x:%llx)\n",
3103 e_ref_tag, (unsigned long long)lba_s);
3104 return 1;
3105 }
3106
3107 spt = page_address(sg_page(sg)) + sg->offset;
3108 spt += j;
3109
3110 spt->app_tag = T10_PI_APP_ESCAPE;
3111 if (scsi_get_prot_type(scmd: cmd) == SCSI_PROT_DIF_TYPE3)
3112 spt->ref_tag = T10_PI_REF_ESCAPE;
3113 }
3114
3115 return 0;
3116 }
3117
3118 /* check guard */
3119 if (e_guard != a_guard) {
3120 scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x1);
3121 set_host_byte(cmd, status: DID_ABORT);
3122 return 1;
3123 }
3124
3125 /* check ref tag */
3126 if (e_ref_tag != a_ref_tag) {
3127 scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x3);
3128 set_host_byte(cmd, status: DID_ABORT);
3129 return 1;
3130 }
3131
3132 /* check appl tag */
3133 if (e_app_tag != a_app_tag) {
3134 scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x2);
3135 set_host_byte(cmd, status: DID_ABORT);
3136 return 1;
3137 }
3138
3139 return 1;
3140}
3141
3142static void
3143qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
3144 struct req_que *req, uint32_t index)
3145{
3146 struct qla_hw_data *ha = vha->hw;
3147 srb_t *sp;
3148 uint16_t comp_status;
3149 uint16_t scsi_status;
3150 uint16_t thread_id;
3151 uint32_t rval = EXT_STATUS_OK;
3152 struct bsg_job *bsg_job = NULL;
3153 struct fc_bsg_request *bsg_request;
3154 struct fc_bsg_reply *bsg_reply;
3155 sts_entry_t *sts = pkt;
3156 struct sts_entry_24xx *sts24 = pkt;
3157
3158 /* Validate handle. */
3159 if (index >= req->num_outstanding_cmds) {
3160 ql_log(ql_log_warn, vha, 0x70af,
3161 fmt: "Invalid SCSI completion handle 0x%x.\n", index);
3162 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
3163 return;
3164 }
3165
3166 sp = req->outstanding_cmds[index];
3167 if (!sp) {
3168 ql_log(ql_log_warn, vha, 0x70b0,
3169 fmt: "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
3170 req->id, index);
3171
3172 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
3173 return;
3174 }
3175
3176 /* Free outstanding command slot. */
3177 req->outstanding_cmds[index] = NULL;
3178 bsg_job = sp->u.bsg_job;
3179 bsg_request = bsg_job->request;
3180 bsg_reply = bsg_job->reply;
3181
3182 if (IS_FWI2_CAPABLE(ha)) {
3183 comp_status = le16_to_cpu(sts24->comp_status);
3184 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3185 } else {
3186 comp_status = le16_to_cpu(sts->comp_status);
3187 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3188 }
3189
3190 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
3191 switch (comp_status) {
3192 case CS_COMPLETE:
3193 if (scsi_status == 0) {
3194 bsg_reply->reply_payload_rcv_len =
3195 bsg_job->reply_payload.payload_len;
3196 vha->qla_stats.input_bytes +=
3197 bsg_reply->reply_payload_rcv_len;
3198 vha->qla_stats.input_requests++;
3199 rval = EXT_STATUS_OK;
3200 }
3201 goto done;
3202
3203 case CS_DATA_OVERRUN:
3204 ql_dbg(ql_dbg_user, vha, 0x70b1,
3205 fmt: "Command completed with data overrun thread_id=%d\n",
3206 thread_id);
3207 rval = EXT_STATUS_DATA_OVERRUN;
3208 break;
3209
3210 case CS_DATA_UNDERRUN:
3211 ql_dbg(ql_dbg_user, vha, 0x70b2,
3212 fmt: "Command completed with data underrun thread_id=%d\n",
3213 thread_id);
3214 rval = EXT_STATUS_DATA_UNDERRUN;
3215 break;
3216 case CS_BIDIR_RD_OVERRUN:
3217 ql_dbg(ql_dbg_user, vha, 0x70b3,
3218 fmt: "Command completed with read data overrun thread_id=%d\n",
3219 thread_id);
3220 rval = EXT_STATUS_DATA_OVERRUN;
3221 break;
3222
3223 case CS_BIDIR_RD_WR_OVERRUN:
3224 ql_dbg(ql_dbg_user, vha, 0x70b4,
3225 fmt: "Command completed with read and write data overrun "
3226 "thread_id=%d\n", thread_id);
3227 rval = EXT_STATUS_DATA_OVERRUN;
3228 break;
3229
3230 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
3231 ql_dbg(ql_dbg_user, vha, 0x70b5,
3232 fmt: "Command completed with read data over and write data "
3233 "underrun thread_id=%d\n", thread_id);
3234 rval = EXT_STATUS_DATA_OVERRUN;
3235 break;
3236
3237 case CS_BIDIR_RD_UNDERRUN:
3238 ql_dbg(ql_dbg_user, vha, 0x70b6,
3239 fmt: "Command completed with read data underrun "
3240 "thread_id=%d\n", thread_id);
3241 rval = EXT_STATUS_DATA_UNDERRUN;
3242 break;
3243
3244 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
3245 ql_dbg(ql_dbg_user, vha, 0x70b7,
3246 fmt: "Command completed with read data under and write data "
3247 "overrun thread_id=%d\n", thread_id);
3248 rval = EXT_STATUS_DATA_UNDERRUN;
3249 break;
3250
3251 case CS_BIDIR_RD_WR_UNDERRUN:
3252 ql_dbg(ql_dbg_user, vha, 0x70b8,
3253 fmt: "Command completed with read and write data underrun "
3254 "thread_id=%d\n", thread_id);
3255 rval = EXT_STATUS_DATA_UNDERRUN;
3256 break;
3257
3258 case CS_BIDIR_DMA:
3259 ql_dbg(ql_dbg_user, vha, 0x70b9,
3260 fmt: "Command completed with data DMA error thread_id=%d\n",
3261 thread_id);
3262 rval = EXT_STATUS_DMA_ERR;
3263 break;
3264
3265 case CS_TIMEOUT:
3266 ql_dbg(ql_dbg_user, vha, 0x70ba,
3267 fmt: "Command completed with timeout thread_id=%d\n",
3268 thread_id);
3269 rval = EXT_STATUS_TIMEOUT;
3270 break;
3271 default:
3272 ql_dbg(ql_dbg_user, vha, 0x70bb,
3273 fmt: "Command completed with completion status=0x%x "
3274 "thread_id=%d\n", comp_status, thread_id);
3275 rval = EXT_STATUS_ERR;
3276 break;
3277 }
3278 bsg_reply->reply_payload_rcv_len = 0;
3279
3280done:
3281 /* Return the vendor specific reply to API */
3282 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
3283 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
3284 /* Always return DID_OK, bsg will send the vendor specific response
3285 * in this case only */
3286 sp->done(sp, DID_OK << 16);
3287
3288}
3289
3290/**
3291 * qla2x00_status_entry() - Process a Status IOCB entry.
3292 * @vha: SCSI driver HA context
3293 * @rsp: response queue
3294 * @pkt: Entry pointer
3295 */
3296static void
3297qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3298{
3299 srb_t *sp;
3300 fc_port_t *fcport;
3301 struct scsi_cmnd *cp;
3302 sts_entry_t *sts = pkt;
3303 struct sts_entry_24xx *sts24 = pkt;
3304 uint16_t comp_status;
3305 uint16_t scsi_status;
3306 uint16_t ox_id;
3307 uint8_t lscsi_status;
3308 int32_t resid;
3309 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
3310 fw_resid_len;
3311 uint8_t *rsp_info, *sense_data;
3312 struct qla_hw_data *ha = vha->hw;
3313 uint32_t handle;
3314 uint16_t que;
3315 struct req_que *req;
3316 int logit = 1;
3317 int res = 0;
3318 uint16_t state_flags = 0;
3319 uint16_t sts_qual = 0;
3320
3321 if (IS_FWI2_CAPABLE(ha)) {
3322 comp_status = le16_to_cpu(sts24->comp_status);
3323 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3324 state_flags = le16_to_cpu(sts24->state_flags);
3325 } else {
3326 comp_status = le16_to_cpu(sts->comp_status);
3327 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3328 }
3329 handle = (uint32_t) LSW(sts->handle);
3330 que = MSW(sts->handle);
3331 req = ha->req_q_map[que];
3332
3333 /* Check for invalid queue pointer */
3334 if (req == NULL ||
3335 que >= find_first_zero_bit(addr: ha->req_qid_map, size: ha->max_req_queues)) {
3336 ql_dbg(ql_dbg_io, vha, 0x3059,
3337 fmt: "Invalid status handle (0x%x): Bad req pointer. req=%p, "
3338 "que=%u.\n", sts->handle, req, que);
3339 return;
3340 }
3341
3342 /* Validate handle. */
3343 if (handle < req->num_outstanding_cmds) {
3344 sp = req->outstanding_cmds[handle];
3345 if (!sp) {
3346 ql_dbg(ql_dbg_io, vha, 0x3075,
3347 fmt: "%s(%ld): Already returned command for status handle (0x%x).\n",
3348 __func__, vha->host_no, sts->handle);
3349 return;
3350 }
3351 } else {
3352 ql_dbg(ql_dbg_io, vha, 0x3017,
3353 fmt: "Invalid status handle, out of range (0x%x).\n",
3354 sts->handle);
3355
3356 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3357 if (IS_P3P_TYPE(ha))
3358 set_bit(FCOE_CTX_RESET_NEEDED, addr: &vha->dpc_flags);
3359 else
3360 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
3361 qla2xxx_wake_dpc(vha);
3362 }
3363 return;
3364 }
3365 qla_put_fw_resources(qp: sp->qpair, iores: &sp->iores);
3366
3367 if (sp->cmd_type != TYPE_SRB) {
3368 req->outstanding_cmds[handle] = NULL;
3369 ql_dbg(ql_dbg_io, vha, 0x3015,
3370 fmt: "Unknown sp->cmd_type %x %p).\n",
3371 sp->cmd_type, sp);
3372 return;
3373 }
3374
3375 /* NVME completion. */
3376 if (sp->type == SRB_NVME_CMD) {
3377 req->outstanding_cmds[handle] = NULL;
3378 qla24xx_nvme_iocb_entry(vha, req, tsk: pkt, sp);
3379 return;
3380 }
3381
3382 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
3383 qla25xx_process_bidir_status_iocb(vha, pkt, req, index: handle);
3384 return;
3385 }
3386
3387 /* Task Management completion. */
3388 if (sp->type == SRB_TM_CMD) {
3389 qla24xx_tm_iocb_entry(vha, req, tsk: pkt);
3390 return;
3391 }
3392
3393 /* Fast path completion. */
3394 qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24);
3395 sp->qpair->cmd_completion_cnt++;
3396
3397 if (comp_status == CS_COMPLETE && scsi_status == 0) {
3398 qla2x00_process_completed_request(vha, req, index: handle);
3399
3400 return;
3401 }
3402
3403 cp = GET_CMD_SP(sp);
3404 if (cp == NULL) {
3405 ql_dbg(ql_dbg_io, vha, 0x3018,
3406 fmt: "Command already returned (0x%x/%p).\n",
3407 sts->handle, sp);
3408
3409 req->outstanding_cmds[handle] = NULL;
3410 return;
3411 }
3412
3413 lscsi_status = scsi_status & STATUS_MASK;
3414
3415 fcport = sp->fcport;
3416
3417 ox_id = 0;
3418 sense_len = par_sense_len = rsp_info_len = resid_len =
3419 fw_resid_len = 0;
3420 if (IS_FWI2_CAPABLE(ha)) {
3421 if (scsi_status & SS_SENSE_LEN_VALID)
3422 sense_len = le32_to_cpu(sts24->sense_len);
3423 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3424 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
3425 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
3426 resid_len = le32_to_cpu(sts24->rsp_residual_count);
3427 if (comp_status == CS_DATA_UNDERRUN)
3428 fw_resid_len = le32_to_cpu(sts24->residual_len);
3429 rsp_info = sts24->data;
3430 sense_data = sts24->data;
3431 host_to_fcp_swap(fcp: sts24->data, bsize: sizeof(sts24->data));
3432 ox_id = le16_to_cpu(sts24->ox_id);
3433 par_sense_len = sizeof(sts24->data);
3434 sts_qual = le16_to_cpu(sts24->status_qualifier);
3435 } else {
3436 if (scsi_status & SS_SENSE_LEN_VALID)
3437 sense_len = le16_to_cpu(sts->req_sense_length);
3438 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3439 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
3440 resid_len = le32_to_cpu(sts->residual_length);
3441 rsp_info = sts->rsp_info;
3442 sense_data = sts->req_sense_data;
3443 par_sense_len = sizeof(sts->req_sense_data);
3444 }
3445
3446 /* Check for any FCP transport errors. */
3447 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
3448 /* Sense data lies beyond any FCP RESPONSE data. */
3449 if (IS_FWI2_CAPABLE(ha)) {
3450 sense_data += rsp_info_len;
3451 par_sense_len -= rsp_info_len;
3452 }
3453 if (rsp_info_len > 3 && rsp_info[3]) {
3454 ql_dbg(ql_dbg_io, vha: fcport->vha, 0x3019,
3455 fmt: "FCP I/O protocol failure (0x%x/0x%x).\n",
3456 rsp_info_len, rsp_info[3]);
3457
3458 res = DID_BUS_BUSY << 16;
3459 goto out;
3460 }
3461 }
3462
3463 /* Check for overrun. */
3464 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
3465 scsi_status & SS_RESIDUAL_OVER)
3466 comp_status = CS_DATA_OVERRUN;
3467
3468 /*
3469 * Check retry_delay_timer value if we receive a busy or
3470 * queue full.
3471 */
3472 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
3473 lscsi_status == SAM_STAT_BUSY))
3474 qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
3475
3476 /*
3477 * Based on Host and scsi status generate status code for Linux
3478 */
3479 switch (comp_status) {
3480 case CS_COMPLETE:
3481 case CS_QUEUE_FULL:
3482 if (scsi_status == 0) {
3483 res = DID_OK << 16;
3484 break;
3485 }
3486 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
3487 resid = resid_len;
3488 scsi_set_resid(cmd: cp, resid);
3489
3490 if (!lscsi_status &&
3491 ((unsigned)(scsi_bufflen(cmd: cp) - resid) <
3492 cp->underflow)) {
3493 ql_dbg(ql_dbg_io, vha: fcport->vha, 0x301a,
3494 fmt: "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3495 resid, scsi_bufflen(cmd: cp));
3496
3497 res = DID_ERROR << 16;
3498 break;
3499 }
3500 }
3501 res = DID_OK << 16 | lscsi_status;
3502
3503 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3504 ql_dbg(ql_dbg_io, vha: fcport->vha, 0x301b,
3505 fmt: "QUEUE FULL detected.\n");
3506 break;
3507 }
3508 logit = 0;
3509 if (lscsi_status != SS_CHECK_CONDITION)
3510 break;
3511
3512 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3513 if (!(scsi_status & SS_SENSE_LEN_VALID))
3514 break;
3515
3516 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
3517 rsp, res);
3518 break;
3519
3520 case CS_DATA_UNDERRUN:
3521 /* Use F/W calculated residual length. */
3522 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
3523 scsi_set_resid(cmd: cp, resid);
3524 if (scsi_status & SS_RESIDUAL_UNDER) {
3525 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
3526 ql_log(ql_log_warn, vha: fcport->vha, 0x301d,
3527 fmt: "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3528 resid, scsi_bufflen(cmd: cp));
3529
3530 res = DID_ERROR << 16 | lscsi_status;
3531 goto check_scsi_status;
3532 }
3533
3534 if (!lscsi_status &&
3535 ((unsigned)(scsi_bufflen(cmd: cp) - resid) <
3536 cp->underflow)) {
3537 ql_dbg(ql_dbg_io, vha: fcport->vha, 0x301e,
3538 fmt: "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3539 resid, scsi_bufflen(cmd: cp));
3540
3541 res = DID_ERROR << 16;
3542 break;
3543 }
3544 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
3545 lscsi_status != SAM_STAT_BUSY) {
3546 /*
3547 * scsi status of task set and busy are considered to be
3548 * task not completed.
3549 */
3550
3551 ql_log(ql_log_warn, vha: fcport->vha, 0x301f,
3552 fmt: "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3553 resid, scsi_bufflen(cmd: cp));
3554
3555 vha->interface_err_cnt++;
3556
3557 res = DID_ERROR << 16 | lscsi_status;
3558 goto check_scsi_status;
3559 } else {
3560 ql_dbg(ql_dbg_io, vha: fcport->vha, 0x3030,
3561 fmt: "scsi_status: 0x%x, lscsi_status: 0x%x\n",
3562 scsi_status, lscsi_status);
3563 }
3564
3565 res = DID_OK << 16 | lscsi_status;
3566 logit = 0;
3567
3568check_scsi_status:
3569 /*
3570 * Check to see if SCSI Status is non zero. If so report SCSI
3571 * Status.
3572 */
3573 if (lscsi_status != 0) {
3574 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3575 ql_dbg(ql_dbg_io, vha: fcport->vha, 0x3020,
3576 fmt: "QUEUE FULL detected.\n");
3577 logit = 1;
3578 break;
3579 }
3580 if (lscsi_status != SS_CHECK_CONDITION)
3581 break;
3582
3583 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3584 if (!(scsi_status & SS_SENSE_LEN_VALID))
3585 break;
3586
3587 qla2x00_handle_sense(sp, sense_data, par_sense_len,
3588 sense_len, rsp, res);
3589 }
3590 break;
3591
3592 case CS_PORT_LOGGED_OUT:
3593 case CS_PORT_CONFIG_CHG:
3594 case CS_PORT_BUSY:
3595 case CS_INCOMPLETE:
3596 case CS_PORT_UNAVAILABLE:
3597 case CS_TIMEOUT:
3598 case CS_RESET:
3599 case CS_EDIF_INV_REQ:
3600
3601 /*
3602 * We are going to have the fc class block the rport
3603 * while we try to recover so instruct the mid layer
3604 * to requeue until the class decides how to handle this.
3605 */
3606 res = DID_TRANSPORT_DISRUPTED << 16;
3607
3608 if (comp_status == CS_TIMEOUT) {
3609 if (IS_FWI2_CAPABLE(ha))
3610 break;
3611 else if ((le16_to_cpu(sts->status_flags) &
3612 SF_LOGOUT_SENT) == 0)
3613 break;
3614 }
3615
3616 if (atomic_read(v: &fcport->state) == FCS_ONLINE) {
3617 ql_dbg(ql_dbg_disc, vha: fcport->vha, 0x3021,
3618 fmt: "Port to be marked lost on fcport=%02x%02x%02x, current "
3619 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
3620 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3621 port_state_str[FCS_ONLINE],
3622 comp_status);
3623
3624 qlt_schedule_sess_for_deletion(fcport);
3625 }
3626
3627 break;
3628
3629 case CS_ABORTED:
3630 res = DID_RESET << 16;
3631 break;
3632
3633 case CS_DIF_ERROR:
3634 logit = qla2x00_handle_dif_error(sp, sts24);
3635 res = cp->result;
3636 break;
3637
3638 case CS_TRANSPORT:
3639 res = DID_ERROR << 16;
3640 vha->hw_err_cnt++;
3641
3642 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
3643 break;
3644
3645 if (state_flags & BIT_4)
3646 scmd_printk(KERN_WARNING, cp,
3647 "Unsupported device '%s' found.\n",
3648 cp->device->vendor);
3649 break;
3650
3651 case CS_DMA:
3652 ql_log(ql_log_info, vha: fcport->vha, 0x3022,
3653 fmt: "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3654 comp_status, scsi_status, res, vha->host_no,
3655 cp->device->id, cp->device->lun, fcport->d_id.b24,
3656 ox_id, cp->cmnd, scsi_bufflen(cmd: cp), rsp_info_len,
3657 resid_len, fw_resid_len, sp, cp);
3658 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
3659 pkt, sizeof(*sts24));
3660 res = DID_ERROR << 16;
3661 vha->hw_err_cnt++;
3662 break;
3663 default:
3664 res = DID_ERROR << 16;
3665 break;
3666 }
3667
3668out:
3669 if (logit)
3670 ql_dbg(ql_dbg_io, vha: fcport->vha, 0x3022,
3671 fmt: "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3672 comp_status, scsi_status, res, vha->host_no,
3673 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
3674 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
3675 cp->cmnd, scsi_bufflen(cmd: cp), rsp_info_len,
3676 resid_len, fw_resid_len, sp, cp);
3677
3678 if (rsp->status_srb == NULL)
3679 sp->done(sp, res);
3680
3681 /* for io's, clearing of outstanding_cmds[handle] means scsi_done was called */
3682 req->outstanding_cmds[handle] = NULL;
3683}
3684
3685/**
3686 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
3687 * @rsp: response queue
3688 * @pkt: Entry pointer
3689 *
3690 * Extended sense data.
3691 */
3692static void
3693qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
3694{
3695 uint8_t sense_sz = 0;
3696 struct qla_hw_data *ha = rsp->hw;
3697 struct scsi_qla_host *vha = pci_get_drvdata(pdev: ha->pdev);
3698 srb_t *sp = rsp->status_srb;
3699 struct scsi_cmnd *cp;
3700 uint32_t sense_len;
3701 uint8_t *sense_ptr;
3702
3703 if (!sp || !GET_CMD_SENSE_LEN(sp))
3704 return;
3705
3706 sense_len = GET_CMD_SENSE_LEN(sp);
3707 sense_ptr = GET_CMD_SENSE_PTR(sp);
3708
3709 cp = GET_CMD_SP(sp);
3710 if (cp == NULL) {
3711 ql_log(ql_log_warn, vha, 0x3025,
3712 fmt: "cmd is NULL: already returned to OS (sp=%p).\n", sp);
3713
3714 rsp->status_srb = NULL;
3715 return;
3716 }
3717
3718 if (sense_len > sizeof(pkt->data))
3719 sense_sz = sizeof(pkt->data);
3720 else
3721 sense_sz = sense_len;
3722
3723 /* Move sense data. */
3724 if (IS_FWI2_CAPABLE(ha))
3725 host_to_fcp_swap(fcp: pkt->data, bsize: sizeof(pkt->data));
3726 memcpy(sense_ptr, pkt->data, sense_sz);
3727 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
3728 sense_ptr, sense_sz);
3729
3730 sense_len -= sense_sz;
3731 sense_ptr += sense_sz;
3732
3733 SET_CMD_SENSE_PTR(sp, sense_ptr);
3734 SET_CMD_SENSE_LEN(sp, sense_len);
3735
3736 /* Place command on done queue. */
3737 if (sense_len == 0) {
3738 rsp->status_srb = NULL;
3739 sp->done(sp, cp->result);
3740 }
3741}
3742
3743/**
3744 * qla2x00_error_entry() - Process an error entry.
3745 * @vha: SCSI driver HA context
3746 * @rsp: response queue
3747 * @pkt: Entry pointer
3748 * return : 1=allow further error analysis. 0=no additional error analysis.
3749 */
3750static int
3751qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
3752{
3753 srb_t *sp;
3754 struct qla_hw_data *ha = vha->hw;
3755 const char func[] = "ERROR-IOCB";
3756 uint16_t que = MSW(pkt->handle);
3757 struct req_que *req = NULL;
3758 int res = DID_ERROR << 16;
3759 u16 index;
3760
3761 ql_dbg(ql_dbg_async, vha, 0x502a,
3762 fmt: "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
3763 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
3764
3765 if (que >= ha->max_req_queues || !ha->req_q_map[que])
3766 goto fatal;
3767
3768 req = ha->req_q_map[que];
3769
3770 if (pkt->entry_status & RF_BUSY)
3771 res = DID_BUS_BUSY << 16;
3772
3773 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
3774 return 0;
3775
3776 switch (pkt->entry_type) {
3777 case NOTIFY_ACK_TYPE:
3778 case STATUS_CONT_TYPE:
3779 case LOGINOUT_PORT_IOCB_TYPE:
3780 case CT_IOCB_TYPE:
3781 case ELS_IOCB_TYPE:
3782 case ABORT_IOCB_TYPE:
3783 case MBX_IOCB_TYPE:
3784 default:
3785 sp = qla2x00_get_sp_from_handle(vha, func, req, iocb: pkt);
3786 if (sp) {
3787 sp->done(sp, res);
3788 return 0;
3789 }
3790 break;
3791
3792 case SA_UPDATE_IOCB_TYPE:
3793 case ABTS_RESP_24XX:
3794 case CTIO_TYPE7:
3795 case CTIO_CRC2:
3796 return 1;
3797 case STATUS_TYPE:
3798 sp = qla_get_sp_from_handle(vha, func, req, iocb: pkt, ret_index: &index);
3799 if (sp) {
3800 sp->done(sp, res);
3801 req->outstanding_cmds[index] = NULL;
3802 return 0;
3803 }
3804 break;
3805 }
3806fatal:
3807 ql_log(ql_log_warn, vha, 0x5030,
3808 fmt: "Error entry - invalid handle/queue (%04x).\n", que);
3809 return 0;
3810}
3811
3812/**
3813 * qla24xx_mbx_completion() - Process mailbox command completions.
3814 * @vha: SCSI driver HA context
3815 * @mb0: Mailbox0 register
3816 */
3817static void
3818qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3819{
3820 uint16_t cnt;
3821 uint32_t mboxes;
3822 __le16 __iomem *wptr;
3823 struct qla_hw_data *ha = vha->hw;
3824 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3825
3826 /* Read all mbox registers? */
3827 WARN_ON_ONCE(ha->mbx_count > 32);
3828 mboxes = (1ULL << ha->mbx_count) - 1;
3829 if (!ha->mcp)
3830 ql_dbg(ql_dbg_async, vha, 0x504e, fmt: "MBX pointer ERROR.\n");
3831 else
3832 mboxes = ha->mcp->in_mb;
3833
3834 /* Load return mailbox registers. */
3835 ha->flags.mbox_int = 1;
3836 ha->mailbox_out[0] = mb0;
3837 mboxes >>= 1;
3838 wptr = &reg->mailbox1;
3839
3840 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3841 if (mboxes & BIT_0)
3842 ha->mailbox_out[cnt] = rd_reg_word(addr: wptr);
3843
3844 mboxes >>= 1;
3845 wptr++;
3846 }
3847}
3848
3849static void
3850qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
3851 struct abort_entry_24xx *pkt)
3852{
3853 const char func[] = "ABT_IOCB";
3854 srb_t *sp;
3855 srb_t *orig_sp = NULL;
3856 struct srb_iocb *abt;
3857
3858 sp = qla2x00_get_sp_from_handle(vha, func, req, iocb: pkt);
3859 if (!sp)
3860 return;
3861
3862 abt = &sp->u.iocb_cmd;
3863 abt->u.abt.comp_status = pkt->comp_status;
3864 orig_sp = sp->cmd_sp;
3865 /* Need to pass original sp */
3866 if (orig_sp)
3867 qla_nvme_abort_process_comp_status(abt: pkt, sp: orig_sp);
3868
3869 sp->done(sp, 0);
3870}
3871
3872void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
3873 struct pt_ls4_request *pkt, struct req_que *req)
3874{
3875 srb_t *sp;
3876 const char func[] = "LS4_IOCB";
3877 uint16_t comp_status;
3878
3879 sp = qla2x00_get_sp_from_handle(vha, func, req, iocb: pkt);
3880 if (!sp)
3881 return;
3882
3883 comp_status = le16_to_cpu(pkt->status);
3884 sp->done(sp, comp_status);
3885}
3886
3887/**
3888 * qla_chk_cont_iocb_avail - check for all continuation iocbs are available
3889 * before iocb processing can start.
3890 * @vha: host adapter pointer
3891 * @rsp: respond queue
3892 * @pkt: head iocb describing how many continuation iocb
3893 * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
3894 */
3895static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
3896 struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in)
3897{
3898 int start_pkt_ring_index;
3899 u32 iocb_cnt = 0;
3900 int rc = 0;
3901
3902 if (pkt->entry_count == 1)
3903 return rc;
3904
3905 /* ring_index was pre-increment. set it back to current pkt */
3906 if (rsp->ring_index == 0)
3907 start_pkt_ring_index = rsp->length - 1;
3908 else
3909 start_pkt_ring_index = rsp->ring_index - 1;
3910
3911 if (rsp_q_in < start_pkt_ring_index)
3912 /* q in ptr is wrapped */
3913 iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in;
3914 else
3915 iocb_cnt = rsp_q_in - start_pkt_ring_index;
3916
3917 if (iocb_cnt < pkt->entry_count)
3918 rc = -EIO;
3919
3920 ql_dbg(ql_dbg_init, vha, 0x5091,
3921 fmt: "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n",
3922 __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc);
3923
3924 return rc;
3925}
3926
3927static void qla_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
3928 struct mrk_entry_24xx *pkt)
3929{
3930 const char func[] = "MRK-IOCB";
3931 srb_t *sp;
3932 int res = QLA_SUCCESS;
3933
3934 if (!IS_FWI2_CAPABLE(vha->hw))
3935 return;
3936
3937 sp = qla2x00_get_sp_from_handle(vha, func, req, iocb: pkt);
3938 if (!sp)
3939 return;
3940
3941 if (pkt->entry_status) {
3942 ql_dbg(ql_dbg_taskm, vha, 0x8025, fmt: "marker failure.\n");
3943 res = QLA_COMMAND_ERROR;
3944 }
3945 sp->u.iocb_cmd.u.tmf.data = res;
3946 sp->done(sp, res);
3947}
3948
3949/**
3950 * qla24xx_process_response_queue() - Process response queue entries.
3951 * @vha: SCSI driver HA context
3952 * @rsp: response queue
3953 */
3954void qla24xx_process_response_queue(struct scsi_qla_host *vha,
3955 struct rsp_que *rsp)
3956{
3957 struct sts_entry_24xx *pkt;
3958 struct qla_hw_data *ha = vha->hw;
3959 struct purex_entry_24xx *purex_entry;
3960 struct purex_item *pure_item;
3961 struct pt_ls4_rx_unsol *p;
3962 u16 rsp_in = 0, cur_ring_index;
3963 int is_shadow_hba;
3964
3965 if (!ha->flags.fw_started)
3966 return;
3967
3968 if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) {
3969 rsp->qpair->rcv_intr = 1;
3970
3971 if (!rsp->qpair->cpu_mapped)
3972 qla_cpu_update(qpair: rsp->qpair, raw_smp_processor_id());
3973 }
3974
3975#define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \
3976 do { \
3977 _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
3978 rd_reg_dword_relaxed((_rsp)->rsp_q_in); \
3979 } while (0)
3980
3981 is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
3982
3983 __update_rsp_in(is_shadow_hba, rsp, rsp_in);
3984
3985 while (rsp->ring_index != rsp_in &&
3986 rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
3987 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3988 cur_ring_index = rsp->ring_index;
3989
3990 rsp->ring_index++;
3991 if (rsp->ring_index == rsp->length) {
3992 rsp->ring_index = 0;
3993 rsp->ring_ptr = rsp->ring;
3994 } else {
3995 rsp->ring_ptr++;
3996 }
3997
3998 if (pkt->entry_status != 0) {
3999 if (qla2x00_error_entry(vha, rsp, pkt: (sts_entry_t *) pkt))
4000 goto process_err;
4001
4002 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
4003 wmb();
4004 continue;
4005 }
4006process_err:
4007
4008 switch (pkt->entry_type) {
4009 case STATUS_TYPE:
4010 qla2x00_status_entry(vha, rsp, pkt);
4011 break;
4012 case STATUS_CONT_TYPE:
4013 qla2x00_status_cont_entry(rsp, pkt: (sts_cont_entry_t *)pkt);
4014 break;
4015 case VP_RPT_ID_IOCB_TYPE:
4016 qla24xx_report_id_acquisition(vha,
4017 (struct vp_rpt_id_entry_24xx *)pkt);
4018 break;
4019 case LOGINOUT_PORT_IOCB_TYPE:
4020 qla24xx_logio_entry(vha, req: rsp->req,
4021 logio: (struct logio_entry_24xx *)pkt);
4022 break;
4023 case CT_IOCB_TYPE:
4024 qla24xx_els_ct_entry(v: vha, req: rsp->req, pkt, CT_IOCB_TYPE);
4025 break;
4026 case ELS_IOCB_TYPE:
4027 qla24xx_els_ct_entry(v: vha, req: rsp->req, pkt, ELS_IOCB_TYPE);
4028 break;
4029 case ABTS_RECV_24XX:
4030 if (qla_ini_mode_enabled(ha: vha)) {
4031 pure_item = qla24xx_copy_std_pkt(vha, pkt);
4032 if (!pure_item)
4033 break;
4034 qla24xx_queue_purex_item(vha, pkt: pure_item,
4035 process_item: qla24xx_process_abts);
4036 break;
4037 }
4038 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4039 IS_QLA28XX(ha)) {
4040 /* ensure that the ATIO queue is empty */
4041 qlt_handle_abts_recv(vha, rsp,
4042 (response_t *)pkt);
4043 break;
4044 } else {
4045 qlt_24xx_process_atio_queue(vha, 1);
4046 }
4047 fallthrough;
4048 case ABTS_RESP_24XX:
4049 case CTIO_TYPE7:
4050 case CTIO_CRC2:
4051 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
4052 break;
4053 case PT_LS4_REQUEST:
4054 qla24xx_nvme_ls4_iocb(vha, pkt: (struct pt_ls4_request *)pkt,
4055 req: rsp->req);
4056 break;
4057 case NOTIFY_ACK_TYPE:
4058 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
4059 qlt_response_pkt_all_vps(vha, rsp,
4060 (response_t *)pkt);
4061 else
4062 qla24xxx_nack_iocb_entry(vha, req: rsp->req,
4063 pkt: (struct nack_to_isp *)pkt);
4064 break;
4065 case MARKER_TYPE:
4066 qla_marker_iocb_entry(vha, req: rsp->req, pkt: (struct mrk_entry_24xx *)pkt);
4067 break;
4068 case ABORT_IOCB_TYPE:
4069 qla24xx_abort_iocb_entry(vha, req: rsp->req,
4070 pkt: (struct abort_entry_24xx *)pkt);
4071 break;
4072 case MBX_IOCB_TYPE:
4073 qla24xx_mbx_iocb_entry(vha, req: rsp->req,
4074 pkt: (struct mbx_24xx_entry *)pkt);
4075 break;
4076 case VP_CTRL_IOCB_TYPE:
4077 qla_ctrlvp_completed(vha, req: rsp->req,
4078 vce: (struct vp_ctrl_entry_24xx *)pkt);
4079 break;
4080 case PUREX_IOCB_TYPE:
4081 purex_entry = (void *)pkt;
4082 switch (purex_entry->els_frame_payload[3]) {
4083 case ELS_RDP:
4084 pure_item = qla24xx_copy_std_pkt(vha, pkt);
4085 if (!pure_item)
4086 break;
4087 qla24xx_queue_purex_item(vha, pkt: pure_item,
4088 process_item: qla24xx_process_purex_rdp);
4089 break;
4090 case ELS_FPIN:
4091 if (!vha->hw->flags.scm_enabled) {
4092 ql_log(ql_log_warn, vha, 0x5094,
4093 fmt: "SCM not active for this port\n");
4094 break;
4095 }
4096 pure_item = qla27xx_copy_fpin_pkt(vha,
4097 pkt: (void **)&pkt, rsp: &rsp);
4098 __update_rsp_in(is_shadow_hba, rsp, rsp_in);
4099 if (!pure_item)
4100 break;
4101 qla24xx_queue_purex_item(vha, pkt: pure_item,
4102 process_item: qla27xx_process_purex_fpin);
4103 break;
4104
4105 case ELS_AUTH_ELS:
4106 if (qla_chk_cont_iocb_avail(vha, rsp, pkt: (response_t *)pkt, rsp_q_in: rsp_in)) {
4107 /*
4108 * ring_ptr and ring_index were
4109 * pre-incremented above. Reset them
4110 * back to current. Wait for next
4111 * interrupt with all IOCBs to arrive
4112 * and re-process.
4113 */
4114 rsp->ring_ptr = (response_t *)pkt;
4115 rsp->ring_index = cur_ring_index;
4116
4117 ql_dbg(ql_dbg_init, vha, 0x5091,
4118 fmt: "Defer processing ELS opcode %#x...\n",
4119 purex_entry->els_frame_payload[3]);
4120 return;
4121 }
4122 qla24xx_auth_els(vha, pkt: (void **)&pkt, rsp: &rsp);
4123 break;
4124 default:
4125 ql_log(ql_log_warn, vha, 0x509c,
4126 fmt: "Discarding ELS Request opcode 0x%x\n",
4127 purex_entry->els_frame_payload[3]);
4128 }
4129 break;
4130 case SA_UPDATE_IOCB_TYPE:
4131 qla28xx_sa_update_iocb_entry(vha, req: rsp->req,
4132 pkt: (struct sa_update_28xx *)pkt);
4133 break;
4134 case PT_LS4_UNSOL:
4135 p = (void *)pkt;
4136 if (qla_chk_cont_iocb_avail(vha, rsp, pkt: (response_t *)pkt, rsp_q_in: rsp_in)) {
4137 rsp->ring_ptr = (response_t *)pkt;
4138 rsp->ring_index = cur_ring_index;
4139
4140 ql_dbg(ql_dbg_init, vha, 0x2124,
4141 fmt: "Defer processing UNSOL LS req opcode %#x...\n",
4142 p->payload[0]);
4143 return;
4144 }
4145 qla2xxx_process_purls_iocb(pkt: (void **)&pkt, rsp: &rsp);
4146 break;
4147 default:
4148 /* Type Not Supported. */
4149 ql_dbg(ql_dbg_async, vha, 0x5042,
4150 fmt: "Received unknown response pkt type 0x%x entry status=%x.\n",
4151 pkt->entry_type, pkt->entry_status);
4152 break;
4153 }
4154 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
4155 wmb();
4156 }
4157
4158 /* Adjust ring index */
4159 if (IS_P3P_TYPE(ha)) {
4160 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
4161
4162 wrt_reg_dword(addr: &reg->rsp_q_out[0], data: rsp->ring_index);
4163 } else {
4164 wrt_reg_dword(addr: rsp->rsp_q_out, data: rsp->ring_index);
4165 }
4166}
4167
4168static void
4169qla2xxx_check_risc_status(scsi_qla_host_t *vha)
4170{
4171 int rval;
4172 uint32_t cnt;
4173 struct qla_hw_data *ha = vha->hw;
4174 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4175
4176 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4177 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4178 return;
4179
4180 rval = QLA_SUCCESS;
4181 wrt_reg_dword(addr: &reg->iobase_addr, data: 0x7C00);
4182 rd_reg_dword(addr: &reg->iobase_addr);
4183 wrt_reg_dword(addr: &reg->iobase_window, data: 0x0001);
4184 for (cnt = 10000; (rd_reg_dword(addr: &reg->iobase_window) & BIT_0) == 0 &&
4185 rval == QLA_SUCCESS; cnt--) {
4186 if (cnt) {
4187 wrt_reg_dword(addr: &reg->iobase_window, data: 0x0001);
4188 udelay(10);
4189 } else
4190 rval = QLA_FUNCTION_TIMEOUT;
4191 }
4192 if (rval == QLA_SUCCESS)
4193 goto next_test;
4194
4195 rval = QLA_SUCCESS;
4196 wrt_reg_dword(addr: &reg->iobase_window, data: 0x0003);
4197 for (cnt = 100; (rd_reg_dword(addr: &reg->iobase_window) & BIT_0) == 0 &&
4198 rval == QLA_SUCCESS; cnt--) {
4199 if (cnt) {
4200 wrt_reg_dword(addr: &reg->iobase_window, data: 0x0003);
4201 udelay(10);
4202 } else
4203 rval = QLA_FUNCTION_TIMEOUT;
4204 }
4205 if (rval != QLA_SUCCESS)
4206 goto done;
4207
4208next_test:
4209 if (rd_reg_dword(addr: &reg->iobase_c8) & BIT_3)
4210 ql_log(ql_log_info, vha, 0x504c,
4211 fmt: "Additional code -- 0x55AA.\n");
4212
4213done:
4214 wrt_reg_dword(addr: &reg->iobase_window, data: 0x0000);
4215 rd_reg_dword(addr: &reg->iobase_window);
4216}
4217
4218/**
4219 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
4220 * @irq: interrupt number
4221 * @dev_id: SCSI driver HA context
4222 *
4223 * Called by system whenever the host adapter generates an interrupt.
4224 *
4225 * Returns handled flag.
4226 */
4227irqreturn_t
4228qla24xx_intr_handler(int irq, void *dev_id)
4229{
4230 scsi_qla_host_t *vha;
4231 struct qla_hw_data *ha;
4232 struct device_reg_24xx __iomem *reg;
4233 int status;
4234 unsigned long iter;
4235 uint32_t stat;
4236 uint32_t hccr;
4237 uint16_t mb[8];
4238 struct rsp_que *rsp;
4239 unsigned long flags;
4240 bool process_atio = false;
4241
4242 rsp = (struct rsp_que *) dev_id;
4243 if (!rsp) {
4244 ql_log(ql_log_info, NULL, 0x5059,
4245 fmt: "%s: NULL response queue pointer.\n", __func__);
4246 return IRQ_NONE;
4247 }
4248
4249 ha = rsp->hw;
4250 reg = &ha->iobase->isp24;
4251 status = 0;
4252
4253 if (unlikely(pci_channel_offline(ha->pdev)))
4254 return IRQ_HANDLED;
4255
4256 spin_lock_irqsave(&ha->hardware_lock, flags);
4257 vha = pci_get_drvdata(pdev: ha->pdev);
4258 for (iter = 50; iter--; ) {
4259 stat = rd_reg_dword(addr: &reg->host_status);
4260 if (qla2x00_check_reg32_for_disconnect(vha, reg: stat))
4261 break;
4262 if (stat & HSRX_RISC_PAUSED) {
4263 if (unlikely(pci_channel_offline(ha->pdev)))
4264 break;
4265
4266 hccr = rd_reg_dword(addr: &reg->hccr);
4267
4268 ql_log(ql_log_warn, vha, 0x504b,
4269 fmt: "RISC paused -- HCCR=%x, Dumping firmware.\n",
4270 hccr);
4271
4272 qla2xxx_check_risc_status(vha);
4273
4274 ha->isp_ops->fw_dump(vha);
4275 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
4276 break;
4277 } else if ((stat & HSRX_RISC_INT) == 0)
4278 break;
4279
4280 switch (stat & 0xff) {
4281 case INTR_ROM_MB_SUCCESS:
4282 case INTR_ROM_MB_FAILED:
4283 case INTR_MB_SUCCESS:
4284 case INTR_MB_FAILED:
4285 qla24xx_mbx_completion(vha, MSW(stat));
4286 status |= MBX_INTERRUPT;
4287
4288 break;
4289 case INTR_ASYNC_EVENT:
4290 mb[0] = MSW(stat);
4291 mb[1] = rd_reg_word(addr: &reg->mailbox1);
4292 mb[2] = rd_reg_word(addr: &reg->mailbox2);
4293 mb[3] = rd_reg_word(addr: &reg->mailbox3);
4294 qla2x00_async_event(vha, rsp, mb);
4295 break;
4296 case INTR_RSP_QUE_UPDATE:
4297 case INTR_RSP_QUE_UPDATE_83XX:
4298 qla24xx_process_response_queue(vha, rsp);
4299 break;
4300 case INTR_ATIO_QUE_UPDATE_27XX:
4301 case INTR_ATIO_QUE_UPDATE:
4302 process_atio = true;
4303 break;
4304 case INTR_ATIO_RSP_QUE_UPDATE:
4305 process_atio = true;
4306 qla24xx_process_response_queue(vha, rsp);
4307 break;
4308 default:
4309 ql_dbg(ql_dbg_async, vha, 0x504f,
4310 fmt: "Unrecognized interrupt type (%d).\n", stat * 0xff);
4311 break;
4312 }
4313 wrt_reg_dword(addr: &reg->hccr, HCCRX_CLR_RISC_INT);
4314 rd_reg_dword_relaxed(addr: &reg->hccr);
4315 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
4316 ndelay(3500);
4317 }
4318 qla2x00_handle_mbx_completion(ha, status);
4319 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
4320
4321 if (process_atio) {
4322 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4323 qlt_24xx_process_atio_queue(vha, 0);
4324 spin_unlock_irqrestore(lock: &ha->tgt.atio_lock, flags);
4325 }
4326
4327 return IRQ_HANDLED;
4328}
4329
4330static irqreturn_t
4331qla24xx_msix_rsp_q(int irq, void *dev_id)
4332{
4333 struct qla_hw_data *ha;
4334 struct rsp_que *rsp;
4335 struct device_reg_24xx __iomem *reg;
4336 struct scsi_qla_host *vha;
4337 unsigned long flags;
4338
4339 rsp = (struct rsp_que *) dev_id;
4340 if (!rsp) {
4341 ql_log(ql_log_info, NULL, 0x505a,
4342 fmt: "%s: NULL response queue pointer.\n", __func__);
4343 return IRQ_NONE;
4344 }
4345 ha = rsp->hw;
4346 reg = &ha->iobase->isp24;
4347
4348 spin_lock_irqsave(&ha->hardware_lock, flags);
4349
4350 vha = pci_get_drvdata(pdev: ha->pdev);
4351 qla24xx_process_response_queue(vha, rsp);
4352 if (!ha->flags.disable_msix_handshake) {
4353 wrt_reg_dword(addr: &reg->hccr, HCCRX_CLR_RISC_INT);
4354 rd_reg_dword_relaxed(addr: &reg->hccr);
4355 }
4356 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
4357
4358 return IRQ_HANDLED;
4359}
4360
4361static irqreturn_t
4362qla24xx_msix_default(int irq, void *dev_id)
4363{
4364 scsi_qla_host_t *vha;
4365 struct qla_hw_data *ha;
4366 struct rsp_que *rsp;
4367 struct device_reg_24xx __iomem *reg;
4368 int status;
4369 uint32_t stat;
4370 uint32_t hccr;
4371 uint16_t mb[8];
4372 unsigned long flags;
4373 bool process_atio = false;
4374
4375 rsp = (struct rsp_que *) dev_id;
4376 if (!rsp) {
4377 ql_log(ql_log_info, NULL, 0x505c,
4378 fmt: "%s: NULL response queue pointer.\n", __func__);
4379 return IRQ_NONE;
4380 }
4381 ha = rsp->hw;
4382 reg = &ha->iobase->isp24;
4383 status = 0;
4384
4385 spin_lock_irqsave(&ha->hardware_lock, flags);
4386 vha = pci_get_drvdata(pdev: ha->pdev);
4387 do {
4388 stat = rd_reg_dword(addr: &reg->host_status);
4389 if (qla2x00_check_reg32_for_disconnect(vha, reg: stat))
4390 break;
4391 if (stat & HSRX_RISC_PAUSED) {
4392 if (unlikely(pci_channel_offline(ha->pdev)))
4393 break;
4394
4395 hccr = rd_reg_dword(addr: &reg->hccr);
4396
4397 ql_log(ql_log_info, vha, 0x5050,
4398 fmt: "RISC paused -- HCCR=%x, Dumping firmware.\n",
4399 hccr);
4400
4401 qla2xxx_check_risc_status(vha);
4402 vha->hw_err_cnt++;
4403
4404 ha->isp_ops->fw_dump(vha);
4405 set_bit(ISP_ABORT_NEEDED, addr: &vha->dpc_flags);
4406 break;
4407 } else if ((stat & HSRX_RISC_INT) == 0)
4408 break;
4409
4410 switch (stat & 0xff) {
4411 case INTR_ROM_MB_SUCCESS:
4412 case INTR_ROM_MB_FAILED:
4413 case INTR_MB_SUCCESS:
4414 case INTR_MB_FAILED:
4415 qla24xx_mbx_completion(vha, MSW(stat));
4416 status |= MBX_INTERRUPT;
4417
4418 break;
4419 case INTR_ASYNC_EVENT:
4420 mb[0] = MSW(stat);
4421 mb[1] = rd_reg_word(addr: &reg->mailbox1);
4422 mb[2] = rd_reg_word(addr: &reg->mailbox2);
4423 mb[3] = rd_reg_word(addr: &reg->mailbox3);
4424 qla2x00_async_event(vha, rsp, mb);
4425 break;
4426 case INTR_RSP_QUE_UPDATE:
4427 case INTR_RSP_QUE_UPDATE_83XX:
4428 qla24xx_process_response_queue(vha, rsp);
4429 break;
4430 case INTR_ATIO_QUE_UPDATE_27XX:
4431 case INTR_ATIO_QUE_UPDATE:
4432 process_atio = true;
4433 break;
4434 case INTR_ATIO_RSP_QUE_UPDATE:
4435 process_atio = true;
4436 qla24xx_process_response_queue(vha, rsp);
4437 break;
4438 default:
4439 ql_dbg(ql_dbg_async, vha, 0x5051,
4440 fmt: "Unrecognized interrupt type (%d).\n", stat & 0xff);
4441 break;
4442 }
4443 wrt_reg_dword(addr: &reg->hccr, HCCRX_CLR_RISC_INT);
4444 } while (0);
4445 qla2x00_handle_mbx_completion(ha, status);
4446 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
4447
4448 if (process_atio) {
4449 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4450 qlt_24xx_process_atio_queue(vha, 0);
4451 spin_unlock_irqrestore(lock: &ha->tgt.atio_lock, flags);
4452 }
4453
4454 return IRQ_HANDLED;
4455}
4456
4457irqreturn_t
4458qla2xxx_msix_rsp_q(int irq, void *dev_id)
4459{
4460 struct qla_hw_data *ha;
4461 struct qla_qpair *qpair;
4462
4463 qpair = dev_id;
4464 if (!qpair) {
4465 ql_log(ql_log_info, NULL, 0x505b,
4466 fmt: "%s: NULL response queue pointer.\n", __func__);
4467 return IRQ_NONE;
4468 }
4469 ha = qpair->hw;
4470
4471 queue_work(wq: ha->wq, work: &qpair->q_work);
4472
4473 return IRQ_HANDLED;
4474}
4475
4476irqreturn_t
4477qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
4478{
4479 struct qla_hw_data *ha;
4480 struct qla_qpair *qpair;
4481 struct device_reg_24xx __iomem *reg;
4482 unsigned long flags;
4483
4484 qpair = dev_id;
4485 if (!qpair) {
4486 ql_log(ql_log_info, NULL, 0x505b,
4487 fmt: "%s: NULL response queue pointer.\n", __func__);
4488 return IRQ_NONE;
4489 }
4490 ha = qpair->hw;
4491
4492 reg = &ha->iobase->isp24;
4493 spin_lock_irqsave(&ha->hardware_lock, flags);
4494 wrt_reg_dword(addr: &reg->hccr, HCCRX_CLR_RISC_INT);
4495 spin_unlock_irqrestore(lock: &ha->hardware_lock, flags);
4496
4497 queue_work(wq: ha->wq, work: &qpair->q_work);
4498
4499 return IRQ_HANDLED;
4500}
4501
4502/* Interrupt handling helpers. */
4503
4504struct qla_init_msix_entry {
4505 const char *name;
4506 irq_handler_t handler;
4507};
4508
4509static const struct qla_init_msix_entry msix_entries[] = {
4510 { "default", qla24xx_msix_default },
4511 { "rsp_q", qla24xx_msix_rsp_q },
4512 { "atio_q", qla83xx_msix_atio_q },
4513 { "qpair_multiq", qla2xxx_msix_rsp_q },
4514 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
4515};
4516
4517static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
4518 { "qla2xxx (default)", qla82xx_msix_default },
4519 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
4520};
4521
4522static int
4523qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
4524{
4525 int i, ret;
4526 struct qla_msix_entry *qentry;
4527 scsi_qla_host_t *vha = pci_get_drvdata(pdev: ha->pdev);
4528 int min_vecs = QLA_BASE_VECTORS;
4529 struct irq_affinity desc = {
4530 .pre_vectors = QLA_BASE_VECTORS,
4531 };
4532
4533 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4534 IS_ATIO_MSIX_CAPABLE(ha)) {
4535 desc.pre_vectors++;
4536 min_vecs++;
4537 }
4538
4539 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
4540 /* user wants to control IRQ setting for target mode */
4541 ret = pci_alloc_irq_vectors(dev: ha->pdev, min_vecs,
4542 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4543 PCI_IRQ_MSIX);
4544 } else
4545 ret = pci_alloc_irq_vectors_affinity(dev: ha->pdev, min_vecs,
4546 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4547 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
4548 affd: &desc);
4549
4550 if (ret < 0) {
4551 ql_log(ql_log_fatal, vha, 0x00c7,
4552 fmt: "MSI-X: Failed to enable support, "
4553 "giving up -- %d/%d.\n",
4554 ha->msix_count, ret);
4555 goto msix_out;
4556 } else if (ret < ha->msix_count) {
4557 ql_log(ql_log_info, vha, 0x00c6,
4558 fmt: "MSI-X: Using %d vectors\n", ret);
4559 ha->msix_count = ret;
4560 /* Recalculate queue values */
4561 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
4562 ha->max_req_queues = ha->msix_count - 1;
4563
4564 /* ATIOQ needs 1 vector. That's 1 less QPair */
4565 if (QLA_TGT_MODE_ENABLED())
4566 ha->max_req_queues--;
4567
4568 ha->max_rsp_queues = ha->max_req_queues;
4569
4570 ha->max_qpairs = ha->max_req_queues - 1;
4571 ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x0190,
4572 fmt: "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
4573 }
4574 }
4575 vha->irq_offset = desc.pre_vectors;
4576 ha->msix_entries = kcalloc(n: ha->msix_count,
4577 size: sizeof(struct qla_msix_entry),
4578 GFP_KERNEL);
4579 if (!ha->msix_entries) {
4580 ql_log(ql_log_fatal, vha, 0x00c8,
4581 fmt: "Failed to allocate memory for ha->msix_entries.\n");
4582 ret = -ENOMEM;
4583 goto free_irqs;
4584 }
4585 ha->flags.msix_enabled = 1;
4586
4587 for (i = 0; i < ha->msix_count; i++) {
4588 qentry = &ha->msix_entries[i];
4589 qentry->vector = pci_irq_vector(dev: ha->pdev, nr: i);
4590 qentry->vector_base0 = i;
4591 qentry->entry = i;
4592 qentry->have_irq = 0;
4593 qentry->in_use = 0;
4594 qentry->handle = NULL;
4595 }
4596
4597 /* Enable MSI-X vectors for the base queue */
4598 for (i = 0; i < QLA_BASE_VECTORS; i++) {
4599 qentry = &ha->msix_entries[i];
4600 qentry->handle = rsp;
4601 rsp->msix = qentry;
4602 scnprintf(buf: qentry->name, size: sizeof(qentry->name),
4603 fmt: "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
4604 if (IS_P3P_TYPE(ha))
4605 ret = request_irq(irq: qentry->vector,
4606 handler: qla82xx_msix_entries[i].handler,
4607 flags: 0, name: qla82xx_msix_entries[i].name, dev: rsp);
4608 else
4609 ret = request_irq(irq: qentry->vector,
4610 handler: msix_entries[i].handler,
4611 flags: 0, name: qentry->name, dev: rsp);
4612 if (ret)
4613 goto msix_register_fail;
4614 qentry->have_irq = 1;
4615 qentry->in_use = 1;
4616 }
4617
4618 /*
4619 * If target mode is enable, also request the vector for the ATIO
4620 * queue.
4621 */
4622 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4623 IS_ATIO_MSIX_CAPABLE(ha)) {
4624 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
4625 rsp->msix = qentry;
4626 qentry->handle = rsp;
4627 scnprintf(buf: qentry->name, size: sizeof(qentry->name),
4628 fmt: "qla2xxx%lu_%s", vha->host_no,
4629 msix_entries[QLA_ATIO_VECTOR].name);
4630 qentry->in_use = 1;
4631 ret = request_irq(irq: qentry->vector,
4632 handler: msix_entries[QLA_ATIO_VECTOR].handler,
4633 flags: 0, name: qentry->name, dev: rsp);
4634 qentry->have_irq = 1;
4635 }
4636
4637msix_register_fail:
4638 if (ret) {
4639 ql_log(ql_log_fatal, vha, 0x00cb,
4640 fmt: "MSI-X: unable to register handler -- %x/%d.\n",
4641 qentry->vector, ret);
4642 qla2x00_free_irqs(vha);
4643 ha->mqenable = 0;
4644 goto msix_out;
4645 }
4646
4647 /* Enable MSI-X vector for response queue update for queue 0 */
4648 if (IS_MQUE_CAPABLE(ha) &&
4649 (ha->msixbase && ha->mqiobase && ha->max_qpairs))
4650 ha->mqenable = 1;
4651 else
4652 ha->mqenable = 0;
4653
4654 ql_dbg(ql_dbg_multiq, vha, 0xc005,
4655 fmt: "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4656 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4657 ql_dbg(ql_dbg_init, vha, 0x0055,
4658 fmt: "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4659 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4660
4661msix_out:
4662 return ret;
4663
4664free_irqs:
4665 pci_free_irq_vectors(dev: ha->pdev);
4666 goto msix_out;
4667}
4668
4669int
4670qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
4671{
4672 int ret = QLA_FUNCTION_FAILED;
4673 device_reg_t *reg = ha->iobase;
4674 scsi_qla_host_t *vha = pci_get_drvdata(pdev: ha->pdev);
4675
4676 /* If possible, enable MSI-X. */
4677 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
4678 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
4679 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
4680 goto skip_msi;
4681
4682 if (ql2xenablemsix == 2)
4683 goto skip_msix;
4684
4685 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
4686 (ha->pdev->subsystem_device == 0x7040 ||
4687 ha->pdev->subsystem_device == 0x7041 ||
4688 ha->pdev->subsystem_device == 0x1705)) {
4689 ql_log(ql_log_warn, vha, 0x0034,
4690 fmt: "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
4691 ha->pdev->subsystem_vendor,
4692 ha->pdev->subsystem_device);
4693 goto skip_msi;
4694 }
4695
4696 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
4697 ql_log(ql_log_warn, vha, 0x0035,
4698 fmt: "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
4699 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
4700 goto skip_msix;
4701 }
4702
4703 ret = qla24xx_enable_msix(ha, rsp);
4704 if (!ret) {
4705 ql_dbg(ql_dbg_init, vha, 0x0036,
4706 fmt: "MSI-X: Enabled (0x%X, 0x%X).\n",
4707 ha->chip_revision, ha->fw_attributes);
4708 goto clear_risc_ints;
4709 }
4710
4711skip_msix:
4712
4713 ql_log(ql_log_info, vha, 0x0037,
4714 fmt: "Falling back-to MSI mode -- ret=%d.\n", ret);
4715
4716 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
4717 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
4718 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4719 goto skip_msi;
4720
4721 ret = pci_alloc_irq_vectors(dev: ha->pdev, min_vecs: 1, max_vecs: 1, PCI_IRQ_MSI);
4722 if (ret > 0) {
4723 ql_dbg(ql_dbg_init, vha, 0x0038,
4724 fmt: "MSI: Enabled.\n");
4725 ha->flags.msi_enabled = 1;
4726 } else
4727 ql_log(ql_log_warn, vha, 0x0039,
4728 fmt: "Falling back-to INTa mode -- ret=%d.\n", ret);
4729skip_msi:
4730
4731 /* Skip INTx on ISP82xx. */
4732 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
4733 return QLA_FUNCTION_FAILED;
4734
4735 ret = request_irq(irq: ha->pdev->irq, handler: ha->isp_ops->intr_handler,
4736 flags: ha->flags.msi_enabled ? 0 : IRQF_SHARED,
4737 QLA2XXX_DRIVER_NAME, dev: rsp);
4738 if (ret) {
4739 ql_log(ql_log_warn, vha, 0x003a,
4740 fmt: "Failed to reserve interrupt %d already in use.\n",
4741 ha->pdev->irq);
4742 goto fail;
4743 } else if (!ha->flags.msi_enabled) {
4744 ql_dbg(ql_dbg_init, vha, 0x0125,
4745 fmt: "INTa mode: Enabled.\n");
4746 ha->flags.mr_intr_valid = 1;
4747 /* Set max_qpair to 0, as MSI-X and MSI in not enabled */
4748 ha->max_qpairs = 0;
4749 }
4750
4751clear_risc_ints:
4752 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
4753 goto fail;
4754
4755 spin_lock_irq(lock: &ha->hardware_lock);
4756 wrt_reg_word(addr: &reg->isp.semaphore, data: 0);
4757 spin_unlock_irq(lock: &ha->hardware_lock);
4758
4759fail:
4760 return ret;
4761}
4762
4763void
4764qla2x00_free_irqs(scsi_qla_host_t *vha)
4765{
4766 struct qla_hw_data *ha = vha->hw;
4767 struct rsp_que *rsp;
4768 struct qla_msix_entry *qentry;
4769 int i;
4770
4771 /*
4772 * We need to check that ha->rsp_q_map is valid in case we are called
4773 * from a probe failure context.
4774 */
4775 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
4776 goto free_irqs;
4777 rsp = ha->rsp_q_map[0];
4778
4779 if (ha->flags.msix_enabled) {
4780 for (i = 0; i < ha->msix_count; i++) {
4781 qentry = &ha->msix_entries[i];
4782 if (qentry->have_irq) {
4783 irq_set_affinity_notifier(irq: qentry->vector, NULL);
4784 free_irq(pci_irq_vector(dev: ha->pdev, nr: i), qentry->handle);
4785 }
4786 }
4787 kfree(objp: ha->msix_entries);
4788 ha->msix_entries = NULL;
4789 ha->flags.msix_enabled = 0;
4790 ql_dbg(ql_dbg_init, vha, 0x0042,
4791 fmt: "Disabled MSI-X.\n");
4792 } else {
4793 free_irq(pci_irq_vector(dev: ha->pdev, nr: 0), rsp);
4794 }
4795
4796free_irqs:
4797 pci_free_irq_vectors(dev: ha->pdev);
4798}
4799
4800int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
4801 struct qla_msix_entry *msix, int vector_type)
4802{
4803 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
4804 scsi_qla_host_t *vha = pci_get_drvdata(pdev: ha->pdev);
4805 int ret;
4806
4807 scnprintf(buf: msix->name, size: sizeof(msix->name),
4808 fmt: "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
4809 ret = request_irq(irq: msix->vector, handler: intr->handler, flags: 0, name: msix->name, dev: qpair);
4810 if (ret) {
4811 ql_log(ql_log_fatal, vha, 0x00e6,
4812 fmt: "MSI-X: Unable to register handler -- %x/%d.\n",
4813 msix->vector, ret);
4814 return ret;
4815 }
4816 msix->have_irq = 1;
4817 msix->handle = qpair;
4818 qla_mapq_init_qp_cpu_map(ha, msix, qpair);
4819 return ret;
4820}
4821

source code of linux/drivers/scsi/qla2xxx/qla_isr.c