1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * |
9 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
10 | * * |
11 | * This program is free software; you can redistribute it and/or * |
12 | * modify it under the terms of version 2 of the GNU General * |
13 | * Public License as published by the Free Software Foundation. * |
14 | * This program is distributed in the hope that it will be useful. * |
15 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * |
16 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * |
17 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * |
18 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * |
19 | * TO BE LEGALLY INVALID. See the GNU General Public License for * |
20 | * more details, a copy of which can be found in the file COPYING * |
21 | * included with this package. * |
22 | *******************************************************************/ |
23 | #include <linux/pci.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/interrupt.h> |
26 | #include <linux/export.h> |
27 | #include <linux/delay.h> |
28 | #include <asm/unaligned.h> |
29 | #include <linux/t10-pi.h> |
30 | #include <linux/crc-t10dif.h> |
31 | #include <linux/blk-cgroup.h> |
32 | #include <net/checksum.h> |
33 | |
34 | #include <scsi/scsi.h> |
35 | #include <scsi/scsi_device.h> |
36 | #include <scsi/scsi_eh.h> |
37 | #include <scsi/scsi_host.h> |
38 | #include <scsi/scsi_tcq.h> |
39 | #include <scsi/scsi_transport_fc.h> |
40 | |
41 | #include "lpfc_version.h" |
42 | #include "lpfc_hw4.h" |
43 | #include "lpfc_hw.h" |
44 | #include "lpfc_sli.h" |
45 | #include "lpfc_sli4.h" |
46 | #include "lpfc_nl.h" |
47 | #include "lpfc_disc.h" |
48 | #include "lpfc.h" |
49 | #include "lpfc_scsi.h" |
50 | #include "lpfc_logmsg.h" |
51 | #include "lpfc_crtn.h" |
52 | #include "lpfc_vport.h" |
53 | |
54 | #define LPFC_RESET_WAIT 2 |
55 | #define LPFC_ABORT_WAIT 2 |
56 | |
57 | static char *dif_op_str[] = { |
58 | "PROT_NORMAL" , |
59 | "PROT_READ_INSERT" , |
60 | "PROT_WRITE_STRIP" , |
61 | "PROT_READ_STRIP" , |
62 | "PROT_WRITE_INSERT" , |
63 | "PROT_READ_PASS" , |
64 | "PROT_WRITE_PASS" , |
65 | }; |
66 | |
67 | struct scsi_dif_tuple { |
68 | __be16 guard_tag; /* Checksum */ |
69 | __be16 app_tag; /* Opaque storage */ |
70 | __be32 ref_tag; /* Target LBA or indirect LBA */ |
71 | }; |
72 | |
73 | static struct lpfc_rport_data * |
74 | lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) |
75 | { |
76 | struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; |
77 | |
78 | if (vport->phba->cfg_fof) |
79 | return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; |
80 | else |
81 | return (struct lpfc_rport_data *)sdev->hostdata; |
82 | } |
83 | |
84 | static void |
85 | lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb); |
86 | static void |
87 | lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); |
88 | static int |
89 | lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); |
90 | |
91 | /** |
92 | * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. |
93 | * @phba: Pointer to HBA object. |
94 | * @lpfc_cmd: lpfc scsi command object pointer. |
95 | * |
96 | * This function is called from the lpfc_prep_task_mgmt_cmd function to |
97 | * set the last bit in the response sge entry. |
98 | **/ |
99 | static void |
100 | lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, |
101 | struct lpfc_io_buf *lpfc_cmd) |
102 | { |
103 | struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; |
104 | if (sgl) { |
105 | sgl += 1; |
106 | sgl->word2 = le32_to_cpu(sgl->word2); |
107 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
108 | sgl->word2 = cpu_to_le32(sgl->word2); |
109 | } |
110 | } |
111 | |
112 | /** |
113 | * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread |
114 | * @phba: The Hba for which this call is being executed. |
115 | * |
116 | * This routine is called when there is resource error in driver or firmware. |
117 | * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine |
118 | * posts at most 1 event each second. This routine wakes up worker thread of |
119 | * @phba to process WORKER_RAM_DOWN_EVENT event. |
120 | * |
121 | * This routine should be called with no lock held. |
122 | **/ |
123 | void |
124 | lpfc_rampdown_queue_depth(struct lpfc_hba *phba) |
125 | { |
126 | unsigned long flags; |
127 | uint32_t evt_posted; |
128 | unsigned long expires; |
129 | |
130 | spin_lock_irqsave(&phba->hbalock, flags); |
131 | atomic_inc(v: &phba->num_rsrc_err); |
132 | phba->last_rsrc_error_time = jiffies; |
133 | |
134 | expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL; |
135 | if (time_after(expires, jiffies)) { |
136 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
137 | return; |
138 | } |
139 | |
140 | phba->last_ramp_down_time = jiffies; |
141 | |
142 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
143 | |
144 | spin_lock_irqsave(&phba->pport->work_port_lock, flags); |
145 | evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; |
146 | if (!evt_posted) |
147 | phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; |
148 | spin_unlock_irqrestore(lock: &phba->pport->work_port_lock, flags); |
149 | |
150 | if (!evt_posted) |
151 | lpfc_worker_wake_up(phba); |
152 | return; |
153 | } |
154 | |
155 | /** |
156 | * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler |
157 | * @phba: The Hba for which this call is being executed. |
158 | * |
159 | * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker |
160 | * thread.This routine reduces queue depth for all scsi device on each vport |
161 | * associated with @phba. |
162 | **/ |
163 | void |
164 | lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) |
165 | { |
166 | struct lpfc_vport **vports; |
167 | struct Scsi_Host *shost; |
168 | struct scsi_device *sdev; |
169 | unsigned long new_queue_depth; |
170 | unsigned long num_rsrc_err, num_cmd_success; |
171 | int i; |
172 | |
173 | num_rsrc_err = atomic_read(v: &phba->num_rsrc_err); |
174 | num_cmd_success = atomic_read(v: &phba->num_cmd_success); |
175 | |
176 | /* |
177 | * The error and success command counters are global per |
178 | * driver instance. If another handler has already |
179 | * operated on this error event, just exit. |
180 | */ |
181 | if (num_rsrc_err == 0) |
182 | return; |
183 | |
184 | vports = lpfc_create_vport_work_array(phba); |
185 | if (vports != NULL) |
186 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
187 | shost = lpfc_shost_from_vport(vport: vports[i]); |
188 | shost_for_each_device(sdev, shost) { |
189 | new_queue_depth = |
190 | sdev->queue_depth * num_rsrc_err / |
191 | (num_rsrc_err + num_cmd_success); |
192 | if (!new_queue_depth) |
193 | new_queue_depth = sdev->queue_depth - 1; |
194 | else |
195 | new_queue_depth = sdev->queue_depth - |
196 | new_queue_depth; |
197 | scsi_change_queue_depth(sdev, new_queue_depth); |
198 | } |
199 | } |
200 | lpfc_destroy_vport_work_array(phba, vports); |
201 | atomic_set(v: &phba->num_rsrc_err, i: 0); |
202 | atomic_set(v: &phba->num_cmd_success, i: 0); |
203 | } |
204 | |
205 | /** |
206 | * lpfc_scsi_dev_block - set all scsi hosts to block state |
207 | * @phba: Pointer to HBA context object. |
208 | * |
209 | * This function walks vport list and set each SCSI host to block state |
210 | * by invoking fc_remote_port_delete() routine. This function is invoked |
211 | * with EEH when device's PCI slot has been permanently disabled. |
212 | **/ |
213 | void |
214 | lpfc_scsi_dev_block(struct lpfc_hba *phba) |
215 | { |
216 | struct lpfc_vport **vports; |
217 | struct Scsi_Host *shost; |
218 | struct scsi_device *sdev; |
219 | struct fc_rport *rport; |
220 | int i; |
221 | |
222 | vports = lpfc_create_vport_work_array(phba); |
223 | if (vports != NULL) |
224 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
225 | shost = lpfc_shost_from_vport(vport: vports[i]); |
226 | shost_for_each_device(sdev, shost) { |
227 | rport = starget_to_rport(scsi_target(sdev)); |
228 | fc_remote_port_delete(rport); |
229 | } |
230 | } |
231 | lpfc_destroy_vport_work_array(phba, vports); |
232 | } |
233 | |
234 | /** |
235 | * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec |
236 | * @vport: The virtual port for which this call being executed. |
237 | * @num_to_alloc: The requested number of buffers to allocate. |
238 | * |
239 | * This routine allocates a scsi buffer for device with SLI-3 interface spec, |
240 | * the scsi buffer contains all the necessary information needed to initiate |
241 | * a SCSI I/O. The non-DMAable buffer region contains information to build |
242 | * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, |
243 | * and the initial BPL. In addition to allocating memory, the FCP CMND and |
244 | * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. |
245 | * |
246 | * Return codes: |
247 | * int - number of scsi buffers that were allocated. |
248 | * 0 = failure, less than num_to_alloc is a partial failure. |
249 | **/ |
250 | static int |
251 | lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) |
252 | { |
253 | struct lpfc_hba *phba = vport->phba; |
254 | struct lpfc_io_buf *psb; |
255 | struct ulp_bde64 *bpl; |
256 | IOCB_t *iocb; |
257 | dma_addr_t pdma_phys_fcp_cmd; |
258 | dma_addr_t pdma_phys_fcp_rsp; |
259 | dma_addr_t pdma_phys_sgl; |
260 | uint16_t iotag; |
261 | int bcnt, bpl_size; |
262 | |
263 | bpl_size = phba->cfg_sg_dma_buf_size - |
264 | (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); |
265 | |
266 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
267 | "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n" , |
268 | num_to_alloc, phba->cfg_sg_dma_buf_size, |
269 | (int)sizeof(struct fcp_cmnd), |
270 | (int)sizeof(struct fcp_rsp), bpl_size); |
271 | |
272 | for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { |
273 | psb = kzalloc(size: sizeof(struct lpfc_io_buf), GFP_KERNEL); |
274 | if (!psb) |
275 | break; |
276 | |
277 | /* |
278 | * Get memory from the pci pool to map the virt space to pci |
279 | * bus space for an I/O. The DMA buffer includes space for the |
280 | * struct fcp_cmnd, struct fcp_rsp and the number of bde's |
281 | * necessary to support the sg_tablesize. |
282 | */ |
283 | psb->data = dma_pool_zalloc(pool: phba->lpfc_sg_dma_buf_pool, |
284 | GFP_KERNEL, handle: &psb->dma_handle); |
285 | if (!psb->data) { |
286 | kfree(objp: psb); |
287 | break; |
288 | } |
289 | |
290 | |
291 | /* Allocate iotag for psb->cur_iocbq. */ |
292 | iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); |
293 | if (iotag == 0) { |
294 | dma_pool_free(pool: phba->lpfc_sg_dma_buf_pool, |
295 | vaddr: psb->data, addr: psb->dma_handle); |
296 | kfree(objp: psb); |
297 | break; |
298 | } |
299 | psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP; |
300 | |
301 | psb->fcp_cmnd = psb->data; |
302 | psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); |
303 | psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) + |
304 | sizeof(struct fcp_rsp); |
305 | |
306 | /* Initialize local short-hand pointers. */ |
307 | bpl = (struct ulp_bde64 *)psb->dma_sgl; |
308 | pdma_phys_fcp_cmd = psb->dma_handle; |
309 | pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); |
310 | pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) + |
311 | sizeof(struct fcp_rsp); |
312 | |
313 | /* |
314 | * The first two bdes are the FCP_CMD and FCP_RSP. The balance |
315 | * are sg list bdes. Initialize the first two and leave the |
316 | * rest for queuecommand. |
317 | */ |
318 | bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); |
319 | bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); |
320 | bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); |
321 | bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
322 | bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); |
323 | |
324 | /* Setup the physical region for the FCP RSP */ |
325 | bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); |
326 | bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); |
327 | bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); |
328 | bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
329 | bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); |
330 | |
331 | /* |
332 | * Since the IOCB for the FCP I/O is built into this |
333 | * lpfc_scsi_buf, initialize it with all known data now. |
334 | */ |
335 | iocb = &psb->cur_iocbq.iocb; |
336 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; |
337 | if ((phba->sli_rev == 3) && |
338 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { |
339 | /* fill in immediate fcp command BDE */ |
340 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; |
341 | iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); |
342 | iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, |
343 | unsli3.fcp_ext.icd); |
344 | iocb->un.fcpi64.bdl.addrHigh = 0; |
345 | iocb->ulpBdeCount = 0; |
346 | iocb->ulpLe = 0; |
347 | /* fill in response BDE */ |
348 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = |
349 | BUFF_TYPE_BDE_64; |
350 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = |
351 | sizeof(struct fcp_rsp); |
352 | iocb->unsli3.fcp_ext.rbde.addrLow = |
353 | putPaddrLow(pdma_phys_fcp_rsp); |
354 | iocb->unsli3.fcp_ext.rbde.addrHigh = |
355 | putPaddrHigh(pdma_phys_fcp_rsp); |
356 | } else { |
357 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; |
358 | iocb->un.fcpi64.bdl.bdeSize = |
359 | (2 * sizeof(struct ulp_bde64)); |
360 | iocb->un.fcpi64.bdl.addrLow = |
361 | putPaddrLow(pdma_phys_sgl); |
362 | iocb->un.fcpi64.bdl.addrHigh = |
363 | putPaddrHigh(pdma_phys_sgl); |
364 | iocb->ulpBdeCount = 1; |
365 | iocb->ulpLe = 1; |
366 | } |
367 | iocb->ulpClass = CLASS3; |
368 | psb->status = IOSTAT_SUCCESS; |
369 | /* Put it back into the SCSI buffer list */ |
370 | psb->cur_iocbq.io_buf = psb; |
371 | spin_lock_init(&psb->buf_lock); |
372 | lpfc_release_scsi_buf_s3(phba, psb); |
373 | |
374 | } |
375 | |
376 | return bcnt; |
377 | } |
378 | |
379 | /** |
380 | * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport |
381 | * @vport: pointer to lpfc vport data structure. |
382 | * |
383 | * This routine is invoked by the vport cleanup for deletions and the cleanup |
384 | * for an ndlp on removal. |
385 | **/ |
386 | void |
387 | lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) |
388 | { |
389 | struct lpfc_hba *phba = vport->phba; |
390 | struct lpfc_io_buf *psb, *next_psb; |
391 | struct lpfc_sli4_hdw_queue *qp; |
392 | unsigned long iflag = 0; |
393 | int idx; |
394 | |
395 | if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) |
396 | return; |
397 | |
398 | spin_lock_irqsave(&phba->hbalock, iflag); |
399 | for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
400 | qp = &phba->sli4_hba.hdwq[idx]; |
401 | |
402 | spin_lock(lock: &qp->abts_io_buf_list_lock); |
403 | list_for_each_entry_safe(psb, next_psb, |
404 | &qp->lpfc_abts_io_buf_list, list) { |
405 | if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) |
406 | continue; |
407 | |
408 | if (psb->rdata && psb->rdata->pnode && |
409 | psb->rdata->pnode->vport == vport) |
410 | psb->rdata = NULL; |
411 | } |
412 | spin_unlock(lock: &qp->abts_io_buf_list_lock); |
413 | } |
414 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
415 | } |
416 | |
417 | /** |
418 | * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort |
419 | * @phba: pointer to lpfc hba data structure. |
420 | * @axri: pointer to the fcp xri abort wcqe structure. |
421 | * @idx: index into hdwq |
422 | * |
423 | * This routine is invoked by the worker thread to process a SLI4 fast-path |
424 | * FCP or NVME aborted xri. |
425 | **/ |
426 | void |
427 | lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, |
428 | struct sli4_wcqe_xri_aborted *axri, int idx) |
429 | { |
430 | u16 xri = 0; |
431 | u16 rxid = 0; |
432 | struct lpfc_io_buf *psb, *next_psb; |
433 | struct lpfc_sli4_hdw_queue *qp; |
434 | unsigned long iflag = 0; |
435 | struct lpfc_iocbq *iocbq; |
436 | int i; |
437 | struct lpfc_nodelist *ndlp; |
438 | int rrq_empty = 0; |
439 | struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; |
440 | struct scsi_cmnd *cmd; |
441 | int offline = 0; |
442 | |
443 | if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) |
444 | return; |
445 | offline = pci_channel_offline(pdev: phba->pcidev); |
446 | if (!offline) { |
447 | xri = bf_get(lpfc_wcqe_xa_xri, axri); |
448 | rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); |
449 | } |
450 | qp = &phba->sli4_hba.hdwq[idx]; |
451 | spin_lock_irqsave(&phba->hbalock, iflag); |
452 | spin_lock(lock: &qp->abts_io_buf_list_lock); |
453 | list_for_each_entry_safe(psb, next_psb, |
454 | &qp->lpfc_abts_io_buf_list, list) { |
455 | if (offline) |
456 | xri = psb->cur_iocbq.sli4_xritag; |
457 | if (psb->cur_iocbq.sli4_xritag == xri) { |
458 | list_del_init(entry: &psb->list); |
459 | psb->flags &= ~LPFC_SBUF_XBUSY; |
460 | psb->status = IOSTAT_SUCCESS; |
461 | if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) { |
462 | qp->abts_nvme_io_bufs--; |
463 | spin_unlock(lock: &qp->abts_io_buf_list_lock); |
464 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
465 | if (!offline) { |
466 | lpfc_sli4_nvme_xri_aborted(phba, axri, |
467 | lpfc_ncmd: psb); |
468 | return; |
469 | } |
470 | lpfc_sli4_nvme_pci_offline_aborted(phba, lpfc_ncmd: psb); |
471 | spin_lock_irqsave(&phba->hbalock, iflag); |
472 | spin_lock(lock: &qp->abts_io_buf_list_lock); |
473 | continue; |
474 | } |
475 | qp->abts_scsi_io_bufs--; |
476 | spin_unlock(lock: &qp->abts_io_buf_list_lock); |
477 | |
478 | if (psb->rdata && psb->rdata->pnode) |
479 | ndlp = psb->rdata->pnode; |
480 | else |
481 | ndlp = NULL; |
482 | |
483 | rrq_empty = list_empty(head: &phba->active_rrq_list); |
484 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
485 | if (ndlp && !offline) { |
486 | lpfc_set_rrq_active(phba, ndlp, |
487 | psb->cur_iocbq.sli4_lxritag, rxid, 1); |
488 | lpfc_sli4_abts_err_handler(phba, ndlp, axri); |
489 | } |
490 | |
491 | if (phba->cfg_fcp_wait_abts_rsp || offline) { |
492 | spin_lock_irqsave(&psb->buf_lock, iflag); |
493 | cmd = psb->pCmd; |
494 | psb->pCmd = NULL; |
495 | spin_unlock_irqrestore(lock: &psb->buf_lock, flags: iflag); |
496 | |
497 | /* The sdev is not guaranteed to be valid post |
498 | * scsi_done upcall. |
499 | */ |
500 | if (cmd) |
501 | scsi_done(cmd); |
502 | |
503 | /* |
504 | * We expect there is an abort thread waiting |
505 | * for command completion wake up the thread. |
506 | */ |
507 | spin_lock_irqsave(&psb->buf_lock, iflag); |
508 | psb->cur_iocbq.cmd_flag &= |
509 | ~LPFC_DRIVER_ABORTED; |
510 | if (psb->waitq) |
511 | wake_up(psb->waitq); |
512 | spin_unlock_irqrestore(lock: &psb->buf_lock, flags: iflag); |
513 | } |
514 | |
515 | lpfc_release_scsi_buf_s4(phba, psb); |
516 | if (rrq_empty) |
517 | lpfc_worker_wake_up(phba); |
518 | if (!offline) |
519 | return; |
520 | spin_lock_irqsave(&phba->hbalock, iflag); |
521 | spin_lock(lock: &qp->abts_io_buf_list_lock); |
522 | continue; |
523 | } |
524 | } |
525 | spin_unlock(lock: &qp->abts_io_buf_list_lock); |
526 | if (!offline) { |
527 | for (i = 1; i <= phba->sli.last_iotag; i++) { |
528 | iocbq = phba->sli.iocbq_lookup[i]; |
529 | |
530 | if (!(iocbq->cmd_flag & LPFC_IO_FCP) || |
531 | (iocbq->cmd_flag & LPFC_IO_LIBDFC)) |
532 | continue; |
533 | if (iocbq->sli4_xritag != xri) |
534 | continue; |
535 | psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); |
536 | psb->flags &= ~LPFC_SBUF_XBUSY; |
537 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
538 | if (!list_empty(head: &pring->txq)) |
539 | lpfc_worker_wake_up(phba); |
540 | return; |
541 | } |
542 | } |
543 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
544 | } |
545 | |
546 | /** |
547 | * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA |
548 | * @phba: The HBA for which this call is being executed. |
549 | * @ndlp: pointer to a node-list data structure. |
550 | * @cmnd: Pointer to scsi_cmnd data structure. |
551 | * |
552 | * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list |
553 | * and returns to caller. |
554 | * |
555 | * Return codes: |
556 | * NULL - Error |
557 | * Pointer to lpfc_scsi_buf - Success |
558 | **/ |
559 | static struct lpfc_io_buf * |
560 | lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, |
561 | struct scsi_cmnd *cmnd) |
562 | { |
563 | struct lpfc_io_buf *lpfc_cmd = NULL; |
564 | struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; |
565 | unsigned long iflag = 0; |
566 | |
567 | spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); |
568 | list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf, |
569 | list); |
570 | if (!lpfc_cmd) { |
571 | spin_lock(lock: &phba->scsi_buf_list_put_lock); |
572 | list_splice(list: &phba->lpfc_scsi_buf_list_put, |
573 | head: &phba->lpfc_scsi_buf_list_get); |
574 | INIT_LIST_HEAD(list: &phba->lpfc_scsi_buf_list_put); |
575 | list_remove_head(scsi_buf_list_get, lpfc_cmd, |
576 | struct lpfc_io_buf, list); |
577 | spin_unlock(lock: &phba->scsi_buf_list_put_lock); |
578 | } |
579 | spin_unlock_irqrestore(lock: &phba->scsi_buf_list_get_lock, flags: iflag); |
580 | |
581 | if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { |
582 | atomic_inc(v: &ndlp->cmd_pending); |
583 | lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; |
584 | } |
585 | return lpfc_cmd; |
586 | } |
587 | /** |
588 | * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA |
589 | * @phba: The HBA for which this call is being executed. |
590 | * @ndlp: pointer to a node-list data structure. |
591 | * @cmnd: Pointer to scsi_cmnd data structure. |
592 | * |
593 | * This routine removes a scsi buffer from head of @hdwq io_buf_list |
594 | * and returns to caller. |
595 | * |
596 | * Return codes: |
597 | * NULL - Error |
598 | * Pointer to lpfc_scsi_buf - Success |
599 | **/ |
600 | static struct lpfc_io_buf * |
601 | lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, |
602 | struct scsi_cmnd *cmnd) |
603 | { |
604 | struct lpfc_io_buf *lpfc_cmd; |
605 | struct lpfc_sli4_hdw_queue *qp; |
606 | struct sli4_sge *sgl; |
607 | dma_addr_t pdma_phys_fcp_rsp; |
608 | dma_addr_t pdma_phys_fcp_cmd; |
609 | uint32_t cpu, idx; |
610 | int tag; |
611 | struct fcp_cmd_rsp_buf *tmp = NULL; |
612 | |
613 | cpu = raw_smp_processor_id(); |
614 | if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { |
615 | tag = blk_mq_unique_tag(rq: scsi_cmd_to_rq(scmd: cmnd)); |
616 | idx = blk_mq_unique_tag_to_hwq(unique_tag: tag); |
617 | } else { |
618 | idx = phba->sli4_hba.cpu_map[cpu].hdwq; |
619 | } |
620 | |
621 | lpfc_cmd = lpfc_get_io_buf(phba, ndlp, hwqid: idx, |
622 | !phba->cfg_xri_rebalancing); |
623 | if (!lpfc_cmd) { |
624 | qp = &phba->sli4_hba.hdwq[idx]; |
625 | qp->empty_io_bufs++; |
626 | return NULL; |
627 | } |
628 | |
629 | /* Setup key fields in buffer that may have been changed |
630 | * if other protocols used this buffer. |
631 | */ |
632 | lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP; |
633 | lpfc_cmd->prot_seg_cnt = 0; |
634 | lpfc_cmd->seg_cnt = 0; |
635 | lpfc_cmd->timeout = 0; |
636 | lpfc_cmd->flags = 0; |
637 | lpfc_cmd->start_time = jiffies; |
638 | lpfc_cmd->waitq = NULL; |
639 | lpfc_cmd->cpu = cpu; |
640 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
641 | lpfc_cmd->prot_data_type = 0; |
642 | #endif |
643 | tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, buf: lpfc_cmd); |
644 | if (!tmp) { |
645 | lpfc_release_io_buf(phba, ncmd: lpfc_cmd, qp: lpfc_cmd->hdwq); |
646 | return NULL; |
647 | } |
648 | |
649 | lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd; |
650 | lpfc_cmd->fcp_rsp = tmp->fcp_rsp; |
651 | |
652 | /* |
653 | * The first two SGEs are the FCP_CMD and FCP_RSP. |
654 | * The balance are sg list bdes. Initialize the |
655 | * first two and leave the rest for queuecommand. |
656 | */ |
657 | sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; |
658 | pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; |
659 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); |
660 | sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); |
661 | sgl->word2 = le32_to_cpu(sgl->word2); |
662 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
663 | sgl->word2 = cpu_to_le32(sgl->word2); |
664 | sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); |
665 | sgl++; |
666 | |
667 | /* Setup the physical region for the FCP RSP */ |
668 | pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); |
669 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); |
670 | sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); |
671 | sgl->word2 = le32_to_cpu(sgl->word2); |
672 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
673 | sgl->word2 = cpu_to_le32(sgl->word2); |
674 | sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); |
675 | |
676 | if (lpfc_ndlp_check_qdepth(phba, ndlp)) { |
677 | atomic_inc(v: &ndlp->cmd_pending); |
678 | lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; |
679 | } |
680 | return lpfc_cmd; |
681 | } |
682 | /** |
683 | * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA |
684 | * @phba: The HBA for which this call is being executed. |
685 | * @ndlp: pointer to a node-list data structure. |
686 | * @cmnd: Pointer to scsi_cmnd data structure. |
687 | * |
688 | * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list |
689 | * and returns to caller. |
690 | * |
691 | * Return codes: |
692 | * NULL - Error |
693 | * Pointer to lpfc_scsi_buf - Success |
694 | **/ |
695 | static struct lpfc_io_buf* |
696 | lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, |
697 | struct scsi_cmnd *cmnd) |
698 | { |
699 | return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd); |
700 | } |
701 | |
702 | /** |
703 | * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list |
704 | * @phba: The Hba for which this call is being executed. |
705 | * @psb: The scsi buffer which is being released. |
706 | * |
707 | * This routine releases @psb scsi buffer by adding it to tail of @phba |
708 | * lpfc_scsi_buf_list list. |
709 | **/ |
710 | static void |
711 | lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb) |
712 | { |
713 | unsigned long iflag = 0; |
714 | |
715 | psb->seg_cnt = 0; |
716 | psb->prot_seg_cnt = 0; |
717 | |
718 | spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); |
719 | psb->pCmd = NULL; |
720 | psb->cur_iocbq.cmd_flag = LPFC_IO_FCP; |
721 | list_add_tail(new: &psb->list, head: &phba->lpfc_scsi_buf_list_put); |
722 | spin_unlock_irqrestore(lock: &phba->scsi_buf_list_put_lock, flags: iflag); |
723 | } |
724 | |
725 | /** |
726 | * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. |
727 | * @phba: The Hba for which this call is being executed. |
728 | * @psb: The scsi buffer which is being released. |
729 | * |
730 | * This routine releases @psb scsi buffer by adding it to tail of @hdwq |
731 | * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer |
732 | * and cannot be reused for at least RA_TOV amount of time if it was |
733 | * aborted. |
734 | **/ |
735 | static void |
736 | lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) |
737 | { |
738 | struct lpfc_sli4_hdw_queue *qp; |
739 | unsigned long iflag = 0; |
740 | |
741 | psb->seg_cnt = 0; |
742 | psb->prot_seg_cnt = 0; |
743 | |
744 | qp = psb->hdwq; |
745 | if (psb->flags & LPFC_SBUF_XBUSY) { |
746 | spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); |
747 | if (!phba->cfg_fcp_wait_abts_rsp) |
748 | psb->pCmd = NULL; |
749 | list_add_tail(new: &psb->list, head: &qp->lpfc_abts_io_buf_list); |
750 | qp->abts_scsi_io_bufs++; |
751 | spin_unlock_irqrestore(lock: &qp->abts_io_buf_list_lock, flags: iflag); |
752 | } else { |
753 | lpfc_release_io_buf(phba, ncmd: (struct lpfc_io_buf *)psb, qp); |
754 | } |
755 | } |
756 | |
757 | /** |
758 | * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. |
759 | * @phba: The Hba for which this call is being executed. |
760 | * @psb: The scsi buffer which is being released. |
761 | * |
762 | * This routine releases @psb scsi buffer by adding it to tail of @phba |
763 | * lpfc_scsi_buf_list list. |
764 | **/ |
765 | static void |
766 | lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) |
767 | { |
768 | if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) |
769 | atomic_dec(v: &psb->ndlp->cmd_pending); |
770 | |
771 | psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; |
772 | phba->lpfc_release_scsi_buf(phba, psb); |
773 | } |
774 | |
775 | /** |
776 | * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB |
777 | * @data: A pointer to the immediate command data portion of the IOCB. |
778 | * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. |
779 | * |
780 | * The routine copies the entire FCP command from @fcp_cmnd to @data while |
781 | * byte swapping the data to big endian format for transmission on the wire. |
782 | **/ |
783 | static void |
784 | lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd) |
785 | { |
786 | int i, j; |
787 | |
788 | for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); |
789 | i += sizeof(uint32_t), j++) { |
790 | ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); |
791 | } |
792 | } |
793 | |
794 | /** |
795 | * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec |
796 | * @phba: The Hba for which this call is being executed. |
797 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
798 | * |
799 | * This routine does the pci dma mapping for scatter-gather list of scsi cmnd |
800 | * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans |
801 | * through sg elements and format the bde. This routine also initializes all |
802 | * IOCB fields which are dependent on scsi command request buffer. |
803 | * |
804 | * Return codes: |
805 | * 1 - Error |
806 | * 0 - Success |
807 | **/ |
808 | static int |
809 | lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) |
810 | { |
811 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
812 | struct scatterlist *sgel = NULL; |
813 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
814 | struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; |
815 | struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; |
816 | IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; |
817 | struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; |
818 | dma_addr_t physaddr; |
819 | uint32_t num_bde = 0; |
820 | int nseg, datadir = scsi_cmnd->sc_data_direction; |
821 | |
822 | /* |
823 | * There are three possibilities here - use scatter-gather segment, use |
824 | * the single mapping, or neither. Start the lpfc command prep by |
825 | * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first |
826 | * data bde entry. |
827 | */ |
828 | bpl += 2; |
829 | if (scsi_sg_count(cmd: scsi_cmnd)) { |
830 | /* |
831 | * The driver stores the segment count returned from dma_map_sg |
832 | * because this a count of dma-mappings used to map the use_sg |
833 | * pages. They are not guaranteed to be the same for those |
834 | * architectures that implement an IOMMU. |
835 | */ |
836 | |
837 | nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), |
838 | scsi_sg_count(scsi_cmnd), datadir); |
839 | if (unlikely(!nseg)) |
840 | return 1; |
841 | |
842 | lpfc_cmd->seg_cnt = nseg; |
843 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { |
844 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
845 | "9064 BLKGRD: %s: Too many sg segments" |
846 | " from dma_map_sg. Config %d, seg_cnt" |
847 | " %d\n" , __func__, phba->cfg_sg_seg_cnt, |
848 | lpfc_cmd->seg_cnt); |
849 | WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); |
850 | lpfc_cmd->seg_cnt = 0; |
851 | scsi_dma_unmap(cmd: scsi_cmnd); |
852 | return 2; |
853 | } |
854 | |
855 | /* |
856 | * The driver established a maximum scatter-gather segment count |
857 | * during probe that limits the number of sg elements in any |
858 | * single scsi command. Just run through the seg_cnt and format |
859 | * the bde's. |
860 | * When using SLI-3 the driver will try to fit all the BDEs into |
861 | * the IOCB. If it can't then the BDEs get added to a BPL as it |
862 | * does for SLI-2 mode. |
863 | */ |
864 | scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { |
865 | physaddr = sg_dma_address(sgel); |
866 | if (phba->sli_rev == 3 && |
867 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && |
868 | !(iocbq->cmd_flag & DSS_SECURITY_OP) && |
869 | nseg <= LPFC_EXT_DATA_BDE_COUNT) { |
870 | data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
871 | data_bde->tus.f.bdeSize = sg_dma_len(sgel); |
872 | data_bde->addrLow = putPaddrLow(physaddr); |
873 | data_bde->addrHigh = putPaddrHigh(physaddr); |
874 | data_bde++; |
875 | } else { |
876 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
877 | bpl->tus.f.bdeSize = sg_dma_len(sgel); |
878 | bpl->tus.w = le32_to_cpu(bpl->tus.w); |
879 | bpl->addrLow = |
880 | le32_to_cpu(putPaddrLow(physaddr)); |
881 | bpl->addrHigh = |
882 | le32_to_cpu(putPaddrHigh(physaddr)); |
883 | bpl++; |
884 | } |
885 | } |
886 | } |
887 | |
888 | /* |
889 | * Finish initializing those IOCB fields that are dependent on the |
890 | * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is |
891 | * explicitly reinitialized and for SLI-3 the extended bde count is |
892 | * explicitly reinitialized since all iocb memory resources are reused. |
893 | */ |
894 | if (phba->sli_rev == 3 && |
895 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && |
896 | !(iocbq->cmd_flag & DSS_SECURITY_OP)) { |
897 | if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { |
898 | /* |
899 | * The extended IOCB format can only fit 3 BDE or a BPL. |
900 | * This I/O has more than 3 BDE so the 1st data bde will |
901 | * be a BPL that is filled in here. |
902 | */ |
903 | physaddr = lpfc_cmd->dma_handle; |
904 | data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; |
905 | data_bde->tus.f.bdeSize = (num_bde * |
906 | sizeof(struct ulp_bde64)); |
907 | physaddr += (sizeof(struct fcp_cmnd) + |
908 | sizeof(struct fcp_rsp) + |
909 | (2 * sizeof(struct ulp_bde64))); |
910 | data_bde->addrHigh = putPaddrHigh(physaddr); |
911 | data_bde->addrLow = putPaddrLow(physaddr); |
912 | /* ebde count includes the response bde and data bpl */ |
913 | iocb_cmd->unsli3.fcp_ext.ebde_count = 2; |
914 | } else { |
915 | /* ebde count includes the response bde and data bdes */ |
916 | iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); |
917 | } |
918 | } else { |
919 | iocb_cmd->un.fcpi64.bdl.bdeSize = |
920 | ((num_bde + 2) * sizeof(struct ulp_bde64)); |
921 | iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); |
922 | } |
923 | fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); |
924 | |
925 | /* |
926 | * Due to difference in data length between DIF/non-DIF paths, |
927 | * we need to set word 4 of IOCB here |
928 | */ |
929 | iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(cmd: scsi_cmnd); |
930 | lpfc_fcpcmd_to_iocb(data: iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); |
931 | return 0; |
932 | } |
933 | |
934 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
935 | |
936 | /* Return BG_ERR_INIT if error injection is detected by Initiator */ |
937 | #define BG_ERR_INIT 0x1 |
938 | /* Return BG_ERR_TGT if error injection is detected by Target */ |
939 | #define BG_ERR_TGT 0x2 |
940 | /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */ |
941 | #define BG_ERR_SWAP 0x10 |
942 | /* |
943 | * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for |
944 | * error injection |
945 | */ |
946 | #define BG_ERR_CHECK 0x20 |
947 | |
948 | /** |
949 | * lpfc_bg_err_inject - Determine if we should inject an error |
950 | * @phba: The Hba for which this call is being executed. |
951 | * @sc: The SCSI command to examine |
952 | * @reftag: (out) BlockGuard reference tag for transmitted data |
953 | * @apptag: (out) BlockGuard application tag for transmitted data |
954 | * @new_guard: (in) Value to replace CRC with if needed |
955 | * |
956 | * Returns BG_ERR_* bit mask or 0 if request ignored |
957 | **/ |
958 | static int |
959 | lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
960 | uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) |
961 | { |
962 | struct scatterlist *sgpe; /* s/g prot entry */ |
963 | struct lpfc_io_buf *lpfc_cmd = NULL; |
964 | struct scsi_dif_tuple *src = NULL; |
965 | struct lpfc_nodelist *ndlp; |
966 | struct lpfc_rport_data *rdata; |
967 | uint32_t op = scsi_get_prot_op(scmd: sc); |
968 | uint32_t blksize; |
969 | uint32_t numblks; |
970 | u32 lba; |
971 | int rc = 0; |
972 | int blockoff = 0; |
973 | |
974 | if (op == SCSI_PROT_NORMAL) |
975 | return 0; |
976 | |
977 | sgpe = scsi_prot_sglist(cmd: sc); |
978 | lba = scsi_prot_ref_tag(scmd: sc); |
979 | |
980 | /* First check if we need to match the LBA */ |
981 | if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { |
982 | blksize = scsi_prot_interval(scmd: sc); |
983 | numblks = (scsi_bufflen(cmd: sc) + blksize - 1) / blksize; |
984 | |
985 | /* Make sure we have the right LBA if one is specified */ |
986 | if (phba->lpfc_injerr_lba < (u64)lba || |
987 | (phba->lpfc_injerr_lba >= (u64)(lba + numblks))) |
988 | return 0; |
989 | if (sgpe) { |
990 | blockoff = phba->lpfc_injerr_lba - (u64)lba; |
991 | numblks = sg_dma_len(sgpe) / |
992 | sizeof(struct scsi_dif_tuple); |
993 | if (numblks < blockoff) |
994 | blockoff = numblks; |
995 | } |
996 | } |
997 | |
998 | /* Next check if we need to match the remote NPortID or WWPN */ |
999 | rdata = lpfc_rport_data_from_scsi_device(sdev: sc->device); |
1000 | if (rdata && rdata->pnode) { |
1001 | ndlp = rdata->pnode; |
1002 | |
1003 | /* Make sure we have the right NPortID if one is specified */ |
1004 | if (phba->lpfc_injerr_nportid && |
1005 | (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) |
1006 | return 0; |
1007 | |
1008 | /* |
1009 | * Make sure we have the right WWPN if one is specified. |
1010 | * wwn[0] should be a non-zero NAA in a good WWPN. |
1011 | */ |
1012 | if (phba->lpfc_injerr_wwpn.u.wwn[0] && |
1013 | (memcmp(p: &ndlp->nlp_portname, q: &phba->lpfc_injerr_wwpn, |
1014 | size: sizeof(struct lpfc_name)) != 0)) |
1015 | return 0; |
1016 | } |
1017 | |
1018 | /* Setup a ptr to the protection data if the SCSI host provides it */ |
1019 | if (sgpe) { |
1020 | src = (struct scsi_dif_tuple *)sg_virt(sg: sgpe); |
1021 | src += blockoff; |
1022 | lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble; |
1023 | } |
1024 | |
1025 | /* Should we change the Reference Tag */ |
1026 | if (reftag) { |
1027 | if (phba->lpfc_injerr_wref_cnt) { |
1028 | switch (op) { |
1029 | case SCSI_PROT_WRITE_PASS: |
1030 | if (src) { |
1031 | /* |
1032 | * For WRITE_PASS, force the error |
1033 | * to be sent on the wire. It should |
1034 | * be detected by the Target. |
1035 | * If blockoff != 0 error will be |
1036 | * inserted in middle of the IO. |
1037 | */ |
1038 | |
1039 | lpfc_printf_log(phba, KERN_ERR, |
1040 | LOG_TRACE_EVENT, |
1041 | "9076 BLKGRD: Injecting reftag error: " |
1042 | "write lba x%lx + x%x oldrefTag x%x\n" , |
1043 | (unsigned long)lba, blockoff, |
1044 | be32_to_cpu(src->ref_tag)); |
1045 | |
1046 | /* |
1047 | * Save the old ref_tag so we can |
1048 | * restore it on completion. |
1049 | */ |
1050 | if (lpfc_cmd) { |
1051 | lpfc_cmd->prot_data_type = |
1052 | LPFC_INJERR_REFTAG; |
1053 | lpfc_cmd->prot_data_segment = |
1054 | src; |
1055 | lpfc_cmd->prot_data = |
1056 | src->ref_tag; |
1057 | } |
1058 | src->ref_tag = cpu_to_be32(0xDEADBEEF); |
1059 | phba->lpfc_injerr_wref_cnt--; |
1060 | if (phba->lpfc_injerr_wref_cnt == 0) { |
1061 | phba->lpfc_injerr_nportid = 0; |
1062 | phba->lpfc_injerr_lba = |
1063 | LPFC_INJERR_LBA_OFF; |
1064 | memset(&phba->lpfc_injerr_wwpn, |
1065 | 0, sizeof(struct lpfc_name)); |
1066 | } |
1067 | rc = BG_ERR_TGT | BG_ERR_CHECK; |
1068 | |
1069 | break; |
1070 | } |
1071 | fallthrough; |
1072 | case SCSI_PROT_WRITE_INSERT: |
1073 | /* |
1074 | * For WRITE_INSERT, force the error |
1075 | * to be sent on the wire. It should be |
1076 | * detected by the Target. |
1077 | */ |
1078 | /* DEADBEEF will be the reftag on the wire */ |
1079 | *reftag = 0xDEADBEEF; |
1080 | phba->lpfc_injerr_wref_cnt--; |
1081 | if (phba->lpfc_injerr_wref_cnt == 0) { |
1082 | phba->lpfc_injerr_nportid = 0; |
1083 | phba->lpfc_injerr_lba = |
1084 | LPFC_INJERR_LBA_OFF; |
1085 | memset(&phba->lpfc_injerr_wwpn, |
1086 | 0, sizeof(struct lpfc_name)); |
1087 | } |
1088 | rc = BG_ERR_TGT | BG_ERR_CHECK; |
1089 | |
1090 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1091 | "9078 BLKGRD: Injecting reftag error: " |
1092 | "write lba x%lx\n" , (unsigned long)lba); |
1093 | break; |
1094 | case SCSI_PROT_WRITE_STRIP: |
1095 | /* |
1096 | * For WRITE_STRIP and WRITE_PASS, |
1097 | * force the error on data |
1098 | * being copied from SLI-Host to SLI-Port. |
1099 | */ |
1100 | *reftag = 0xDEADBEEF; |
1101 | phba->lpfc_injerr_wref_cnt--; |
1102 | if (phba->lpfc_injerr_wref_cnt == 0) { |
1103 | phba->lpfc_injerr_nportid = 0; |
1104 | phba->lpfc_injerr_lba = |
1105 | LPFC_INJERR_LBA_OFF; |
1106 | memset(&phba->lpfc_injerr_wwpn, |
1107 | 0, sizeof(struct lpfc_name)); |
1108 | } |
1109 | rc = BG_ERR_INIT; |
1110 | |
1111 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1112 | "9077 BLKGRD: Injecting reftag error: " |
1113 | "write lba x%lx\n" , (unsigned long)lba); |
1114 | break; |
1115 | } |
1116 | } |
1117 | if (phba->lpfc_injerr_rref_cnt) { |
1118 | switch (op) { |
1119 | case SCSI_PROT_READ_INSERT: |
1120 | case SCSI_PROT_READ_STRIP: |
1121 | case SCSI_PROT_READ_PASS: |
1122 | /* |
1123 | * For READ_STRIP and READ_PASS, force the |
1124 | * error on data being read off the wire. It |
1125 | * should force an IO error to the driver. |
1126 | */ |
1127 | *reftag = 0xDEADBEEF; |
1128 | phba->lpfc_injerr_rref_cnt--; |
1129 | if (phba->lpfc_injerr_rref_cnt == 0) { |
1130 | phba->lpfc_injerr_nportid = 0; |
1131 | phba->lpfc_injerr_lba = |
1132 | LPFC_INJERR_LBA_OFF; |
1133 | memset(&phba->lpfc_injerr_wwpn, |
1134 | 0, sizeof(struct lpfc_name)); |
1135 | } |
1136 | rc = BG_ERR_INIT; |
1137 | |
1138 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1139 | "9079 BLKGRD: Injecting reftag error: " |
1140 | "read lba x%lx\n" , (unsigned long)lba); |
1141 | break; |
1142 | } |
1143 | } |
1144 | } |
1145 | |
1146 | /* Should we change the Application Tag */ |
1147 | if (apptag) { |
1148 | if (phba->lpfc_injerr_wapp_cnt) { |
1149 | switch (op) { |
1150 | case SCSI_PROT_WRITE_PASS: |
1151 | if (src) { |
1152 | /* |
1153 | * For WRITE_PASS, force the error |
1154 | * to be sent on the wire. It should |
1155 | * be detected by the Target. |
1156 | * If blockoff != 0 error will be |
1157 | * inserted in middle of the IO. |
1158 | */ |
1159 | |
1160 | lpfc_printf_log(phba, KERN_ERR, |
1161 | LOG_TRACE_EVENT, |
1162 | "9080 BLKGRD: Injecting apptag error: " |
1163 | "write lba x%lx + x%x oldappTag x%x\n" , |
1164 | (unsigned long)lba, blockoff, |
1165 | be16_to_cpu(src->app_tag)); |
1166 | |
1167 | /* |
1168 | * Save the old app_tag so we can |
1169 | * restore it on completion. |
1170 | */ |
1171 | if (lpfc_cmd) { |
1172 | lpfc_cmd->prot_data_type = |
1173 | LPFC_INJERR_APPTAG; |
1174 | lpfc_cmd->prot_data_segment = |
1175 | src; |
1176 | lpfc_cmd->prot_data = |
1177 | src->app_tag; |
1178 | } |
1179 | src->app_tag = cpu_to_be16(0xDEAD); |
1180 | phba->lpfc_injerr_wapp_cnt--; |
1181 | if (phba->lpfc_injerr_wapp_cnt == 0) { |
1182 | phba->lpfc_injerr_nportid = 0; |
1183 | phba->lpfc_injerr_lba = |
1184 | LPFC_INJERR_LBA_OFF; |
1185 | memset(&phba->lpfc_injerr_wwpn, |
1186 | 0, sizeof(struct lpfc_name)); |
1187 | } |
1188 | rc = BG_ERR_TGT | BG_ERR_CHECK; |
1189 | break; |
1190 | } |
1191 | fallthrough; |
1192 | case SCSI_PROT_WRITE_INSERT: |
1193 | /* |
1194 | * For WRITE_INSERT, force the |
1195 | * error to be sent on the wire. It should be |
1196 | * detected by the Target. |
1197 | */ |
1198 | /* DEAD will be the apptag on the wire */ |
1199 | *apptag = 0xDEAD; |
1200 | phba->lpfc_injerr_wapp_cnt--; |
1201 | if (phba->lpfc_injerr_wapp_cnt == 0) { |
1202 | phba->lpfc_injerr_nportid = 0; |
1203 | phba->lpfc_injerr_lba = |
1204 | LPFC_INJERR_LBA_OFF; |
1205 | memset(&phba->lpfc_injerr_wwpn, |
1206 | 0, sizeof(struct lpfc_name)); |
1207 | } |
1208 | rc = BG_ERR_TGT | BG_ERR_CHECK; |
1209 | |
1210 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1211 | "0813 BLKGRD: Injecting apptag error: " |
1212 | "write lba x%lx\n" , (unsigned long)lba); |
1213 | break; |
1214 | case SCSI_PROT_WRITE_STRIP: |
1215 | /* |
1216 | * For WRITE_STRIP and WRITE_PASS, |
1217 | * force the error on data |
1218 | * being copied from SLI-Host to SLI-Port. |
1219 | */ |
1220 | *apptag = 0xDEAD; |
1221 | phba->lpfc_injerr_wapp_cnt--; |
1222 | if (phba->lpfc_injerr_wapp_cnt == 0) { |
1223 | phba->lpfc_injerr_nportid = 0; |
1224 | phba->lpfc_injerr_lba = |
1225 | LPFC_INJERR_LBA_OFF; |
1226 | memset(&phba->lpfc_injerr_wwpn, |
1227 | 0, sizeof(struct lpfc_name)); |
1228 | } |
1229 | rc = BG_ERR_INIT; |
1230 | |
1231 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1232 | "0812 BLKGRD: Injecting apptag error: " |
1233 | "write lba x%lx\n" , (unsigned long)lba); |
1234 | break; |
1235 | } |
1236 | } |
1237 | if (phba->lpfc_injerr_rapp_cnt) { |
1238 | switch (op) { |
1239 | case SCSI_PROT_READ_INSERT: |
1240 | case SCSI_PROT_READ_STRIP: |
1241 | case SCSI_PROT_READ_PASS: |
1242 | /* |
1243 | * For READ_STRIP and READ_PASS, force the |
1244 | * error on data being read off the wire. It |
1245 | * should force an IO error to the driver. |
1246 | */ |
1247 | *apptag = 0xDEAD; |
1248 | phba->lpfc_injerr_rapp_cnt--; |
1249 | if (phba->lpfc_injerr_rapp_cnt == 0) { |
1250 | phba->lpfc_injerr_nportid = 0; |
1251 | phba->lpfc_injerr_lba = |
1252 | LPFC_INJERR_LBA_OFF; |
1253 | memset(&phba->lpfc_injerr_wwpn, |
1254 | 0, sizeof(struct lpfc_name)); |
1255 | } |
1256 | rc = BG_ERR_INIT; |
1257 | |
1258 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1259 | "0814 BLKGRD: Injecting apptag error: " |
1260 | "read lba x%lx\n" , (unsigned long)lba); |
1261 | break; |
1262 | } |
1263 | } |
1264 | } |
1265 | |
1266 | |
1267 | /* Should we change the Guard Tag */ |
1268 | if (new_guard) { |
1269 | if (phba->lpfc_injerr_wgrd_cnt) { |
1270 | switch (op) { |
1271 | case SCSI_PROT_WRITE_PASS: |
1272 | rc = BG_ERR_CHECK; |
1273 | fallthrough; |
1274 | |
1275 | case SCSI_PROT_WRITE_INSERT: |
1276 | /* |
1277 | * For WRITE_INSERT, force the |
1278 | * error to be sent on the wire. It should be |
1279 | * detected by the Target. |
1280 | */ |
1281 | phba->lpfc_injerr_wgrd_cnt--; |
1282 | if (phba->lpfc_injerr_wgrd_cnt == 0) { |
1283 | phba->lpfc_injerr_nportid = 0; |
1284 | phba->lpfc_injerr_lba = |
1285 | LPFC_INJERR_LBA_OFF; |
1286 | memset(&phba->lpfc_injerr_wwpn, |
1287 | 0, sizeof(struct lpfc_name)); |
1288 | } |
1289 | |
1290 | rc |= BG_ERR_TGT | BG_ERR_SWAP; |
1291 | /* Signals the caller to swap CRC->CSUM */ |
1292 | |
1293 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1294 | "0817 BLKGRD: Injecting guard error: " |
1295 | "write lba x%lx\n" , (unsigned long)lba); |
1296 | break; |
1297 | case SCSI_PROT_WRITE_STRIP: |
1298 | /* |
1299 | * For WRITE_STRIP and WRITE_PASS, |
1300 | * force the error on data |
1301 | * being copied from SLI-Host to SLI-Port. |
1302 | */ |
1303 | phba->lpfc_injerr_wgrd_cnt--; |
1304 | if (phba->lpfc_injerr_wgrd_cnt == 0) { |
1305 | phba->lpfc_injerr_nportid = 0; |
1306 | phba->lpfc_injerr_lba = |
1307 | LPFC_INJERR_LBA_OFF; |
1308 | memset(&phba->lpfc_injerr_wwpn, |
1309 | 0, sizeof(struct lpfc_name)); |
1310 | } |
1311 | |
1312 | rc = BG_ERR_INIT | BG_ERR_SWAP; |
1313 | /* Signals the caller to swap CRC->CSUM */ |
1314 | |
1315 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1316 | "0816 BLKGRD: Injecting guard error: " |
1317 | "write lba x%lx\n" , (unsigned long)lba); |
1318 | break; |
1319 | } |
1320 | } |
1321 | if (phba->lpfc_injerr_rgrd_cnt) { |
1322 | switch (op) { |
1323 | case SCSI_PROT_READ_INSERT: |
1324 | case SCSI_PROT_READ_STRIP: |
1325 | case SCSI_PROT_READ_PASS: |
1326 | /* |
1327 | * For READ_STRIP and READ_PASS, force the |
1328 | * error on data being read off the wire. It |
1329 | * should force an IO error to the driver. |
1330 | */ |
1331 | phba->lpfc_injerr_rgrd_cnt--; |
1332 | if (phba->lpfc_injerr_rgrd_cnt == 0) { |
1333 | phba->lpfc_injerr_nportid = 0; |
1334 | phba->lpfc_injerr_lba = |
1335 | LPFC_INJERR_LBA_OFF; |
1336 | memset(&phba->lpfc_injerr_wwpn, |
1337 | 0, sizeof(struct lpfc_name)); |
1338 | } |
1339 | |
1340 | rc = BG_ERR_INIT | BG_ERR_SWAP; |
1341 | /* Signals the caller to swap CRC->CSUM */ |
1342 | |
1343 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1344 | "0818 BLKGRD: Injecting guard error: " |
1345 | "read lba x%lx\n" , (unsigned long)lba); |
1346 | } |
1347 | } |
1348 | } |
1349 | |
1350 | return rc; |
1351 | } |
1352 | #endif |
1353 | |
1354 | /** |
1355 | * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with |
1356 | * the specified SCSI command. |
1357 | * @phba: The Hba for which this call is being executed. |
1358 | * @sc: The SCSI command to examine |
1359 | * @txop: (out) BlockGuard operation for transmitted data |
1360 | * @rxop: (out) BlockGuard operation for received data |
1361 | * |
1362 | * Returns: zero on success; non-zero if tx and/or rx op cannot be determined |
1363 | * |
1364 | **/ |
1365 | static int |
1366 | lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
1367 | uint8_t *txop, uint8_t *rxop) |
1368 | { |
1369 | uint8_t ret = 0; |
1370 | |
1371 | if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { |
1372 | switch (scsi_get_prot_op(scmd: sc)) { |
1373 | case SCSI_PROT_READ_INSERT: |
1374 | case SCSI_PROT_WRITE_STRIP: |
1375 | *rxop = BG_OP_IN_NODIF_OUT_CSUM; |
1376 | *txop = BG_OP_IN_CSUM_OUT_NODIF; |
1377 | break; |
1378 | |
1379 | case SCSI_PROT_READ_STRIP: |
1380 | case SCSI_PROT_WRITE_INSERT: |
1381 | *rxop = BG_OP_IN_CRC_OUT_NODIF; |
1382 | *txop = BG_OP_IN_NODIF_OUT_CRC; |
1383 | break; |
1384 | |
1385 | case SCSI_PROT_READ_PASS: |
1386 | case SCSI_PROT_WRITE_PASS: |
1387 | *rxop = BG_OP_IN_CRC_OUT_CSUM; |
1388 | *txop = BG_OP_IN_CSUM_OUT_CRC; |
1389 | break; |
1390 | |
1391 | case SCSI_PROT_NORMAL: |
1392 | default: |
1393 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1394 | "9063 BLKGRD: Bad op/guard:%d/IP combination\n" , |
1395 | scsi_get_prot_op(sc)); |
1396 | ret = 1; |
1397 | break; |
1398 | |
1399 | } |
1400 | } else { |
1401 | switch (scsi_get_prot_op(scmd: sc)) { |
1402 | case SCSI_PROT_READ_STRIP: |
1403 | case SCSI_PROT_WRITE_INSERT: |
1404 | *rxop = BG_OP_IN_CRC_OUT_NODIF; |
1405 | *txop = BG_OP_IN_NODIF_OUT_CRC; |
1406 | break; |
1407 | |
1408 | case SCSI_PROT_READ_PASS: |
1409 | case SCSI_PROT_WRITE_PASS: |
1410 | *rxop = BG_OP_IN_CRC_OUT_CRC; |
1411 | *txop = BG_OP_IN_CRC_OUT_CRC; |
1412 | break; |
1413 | |
1414 | case SCSI_PROT_READ_INSERT: |
1415 | case SCSI_PROT_WRITE_STRIP: |
1416 | *rxop = BG_OP_IN_NODIF_OUT_CRC; |
1417 | *txop = BG_OP_IN_CRC_OUT_NODIF; |
1418 | break; |
1419 | |
1420 | case SCSI_PROT_NORMAL: |
1421 | default: |
1422 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1423 | "9075 BLKGRD: Bad op/guard:%d/CRC combination\n" , |
1424 | scsi_get_prot_op(sc)); |
1425 | ret = 1; |
1426 | break; |
1427 | } |
1428 | } |
1429 | |
1430 | return ret; |
1431 | } |
1432 | |
1433 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1434 | /** |
1435 | * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with |
1436 | * the specified SCSI command in order to force a guard tag error. |
1437 | * @phba: The Hba for which this call is being executed. |
1438 | * @sc: The SCSI command to examine |
1439 | * @txop: (out) BlockGuard operation for transmitted data |
1440 | * @rxop: (out) BlockGuard operation for received data |
1441 | * |
1442 | * Returns: zero on success; non-zero if tx and/or rx op cannot be determined |
1443 | * |
1444 | **/ |
1445 | static int |
1446 | lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
1447 | uint8_t *txop, uint8_t *rxop) |
1448 | { |
1449 | |
1450 | if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { |
1451 | switch (scsi_get_prot_op(scmd: sc)) { |
1452 | case SCSI_PROT_READ_INSERT: |
1453 | case SCSI_PROT_WRITE_STRIP: |
1454 | *rxop = BG_OP_IN_NODIF_OUT_CRC; |
1455 | *txop = BG_OP_IN_CRC_OUT_NODIF; |
1456 | break; |
1457 | |
1458 | case SCSI_PROT_READ_STRIP: |
1459 | case SCSI_PROT_WRITE_INSERT: |
1460 | *rxop = BG_OP_IN_CSUM_OUT_NODIF; |
1461 | *txop = BG_OP_IN_NODIF_OUT_CSUM; |
1462 | break; |
1463 | |
1464 | case SCSI_PROT_READ_PASS: |
1465 | case SCSI_PROT_WRITE_PASS: |
1466 | *rxop = BG_OP_IN_CSUM_OUT_CRC; |
1467 | *txop = BG_OP_IN_CRC_OUT_CSUM; |
1468 | break; |
1469 | |
1470 | case SCSI_PROT_NORMAL: |
1471 | default: |
1472 | break; |
1473 | |
1474 | } |
1475 | } else { |
1476 | switch (scsi_get_prot_op(scmd: sc)) { |
1477 | case SCSI_PROT_READ_STRIP: |
1478 | case SCSI_PROT_WRITE_INSERT: |
1479 | *rxop = BG_OP_IN_CSUM_OUT_NODIF; |
1480 | *txop = BG_OP_IN_NODIF_OUT_CSUM; |
1481 | break; |
1482 | |
1483 | case SCSI_PROT_READ_PASS: |
1484 | case SCSI_PROT_WRITE_PASS: |
1485 | *rxop = BG_OP_IN_CSUM_OUT_CSUM; |
1486 | *txop = BG_OP_IN_CSUM_OUT_CSUM; |
1487 | break; |
1488 | |
1489 | case SCSI_PROT_READ_INSERT: |
1490 | case SCSI_PROT_WRITE_STRIP: |
1491 | *rxop = BG_OP_IN_NODIF_OUT_CSUM; |
1492 | *txop = BG_OP_IN_CSUM_OUT_NODIF; |
1493 | break; |
1494 | |
1495 | case SCSI_PROT_NORMAL: |
1496 | default: |
1497 | break; |
1498 | } |
1499 | } |
1500 | |
1501 | return 0; |
1502 | } |
1503 | #endif |
1504 | |
1505 | /** |
1506 | * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data |
1507 | * @phba: The Hba for which this call is being executed. |
1508 | * @sc: pointer to scsi command we're working on |
1509 | * @bpl: pointer to buffer list for protection groups |
1510 | * @datasegcnt: number of segments of data that have been dma mapped |
1511 | * |
1512 | * This function sets up BPL buffer list for protection groups of |
1513 | * type LPFC_PG_TYPE_NO_DIF |
1514 | * |
1515 | * This is usually used when the HBA is instructed to generate |
1516 | * DIFs and insert them into data stream (or strip DIF from |
1517 | * incoming data stream) |
1518 | * |
1519 | * The buffer list consists of just one protection group described |
1520 | * below: |
1521 | * +-------------------------+ |
1522 | * start of prot group --> | PDE_5 | |
1523 | * +-------------------------+ |
1524 | * | PDE_6 | |
1525 | * +-------------------------+ |
1526 | * | Data BDE | |
1527 | * +-------------------------+ |
1528 | * |more Data BDE's ... (opt)| |
1529 | * +-------------------------+ |
1530 | * |
1531 | * |
1532 | * Note: Data s/g buffers have been dma mapped |
1533 | * |
1534 | * Returns the number of BDEs added to the BPL. |
1535 | **/ |
1536 | static int |
1537 | lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
1538 | struct ulp_bde64 *bpl, int datasegcnt) |
1539 | { |
1540 | struct scatterlist *sgde = NULL; /* s/g data entry */ |
1541 | struct lpfc_pde5 *pde5 = NULL; |
1542 | struct lpfc_pde6 *pde6 = NULL; |
1543 | dma_addr_t physaddr; |
1544 | int i = 0, num_bde = 0, status; |
1545 | int datadir = sc->sc_data_direction; |
1546 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1547 | uint32_t rc; |
1548 | #endif |
1549 | uint32_t checking = 1; |
1550 | uint32_t reftag; |
1551 | uint8_t txop, rxop; |
1552 | |
1553 | status = lpfc_sc_to_bg_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
1554 | if (status) |
1555 | goto out; |
1556 | |
1557 | /* extract some info from the scsi command for pde*/ |
1558 | reftag = scsi_prot_ref_tag(scmd: sc); |
1559 | |
1560 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1561 | rc = lpfc_bg_err_inject(phba, sc, reftag: &reftag, NULL, new_guard: 1); |
1562 | if (rc) { |
1563 | if (rc & BG_ERR_SWAP) |
1564 | lpfc_bg_err_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
1565 | if (rc & BG_ERR_CHECK) |
1566 | checking = 0; |
1567 | } |
1568 | #endif |
1569 | |
1570 | /* setup PDE5 with what we have */ |
1571 | pde5 = (struct lpfc_pde5 *) bpl; |
1572 | memset(pde5, 0, sizeof(struct lpfc_pde5)); |
1573 | bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); |
1574 | |
1575 | /* Endianness conversion if necessary for PDE5 */ |
1576 | pde5->word0 = cpu_to_le32(pde5->word0); |
1577 | pde5->reftag = cpu_to_le32(reftag); |
1578 | |
1579 | /* advance bpl and increment bde count */ |
1580 | num_bde++; |
1581 | bpl++; |
1582 | pde6 = (struct lpfc_pde6 *) bpl; |
1583 | |
1584 | /* setup PDE6 with the rest of the info */ |
1585 | memset(pde6, 0, sizeof(struct lpfc_pde6)); |
1586 | bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); |
1587 | bf_set(pde6_optx, pde6, txop); |
1588 | bf_set(pde6_oprx, pde6, rxop); |
1589 | |
1590 | /* |
1591 | * We only need to check the data on READs, for WRITEs |
1592 | * protection data is automatically generated, not checked. |
1593 | */ |
1594 | if (datadir == DMA_FROM_DEVICE) { |
1595 | if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) |
1596 | bf_set(pde6_ce, pde6, checking); |
1597 | else |
1598 | bf_set(pde6_ce, pde6, 0); |
1599 | |
1600 | if (sc->prot_flags & SCSI_PROT_REF_CHECK) |
1601 | bf_set(pde6_re, pde6, checking); |
1602 | else |
1603 | bf_set(pde6_re, pde6, 0); |
1604 | } |
1605 | bf_set(pde6_ai, pde6, 1); |
1606 | bf_set(pde6_ae, pde6, 0); |
1607 | bf_set(pde6_apptagval, pde6, 0); |
1608 | |
1609 | /* Endianness conversion if necessary for PDE6 */ |
1610 | pde6->word0 = cpu_to_le32(pde6->word0); |
1611 | pde6->word1 = cpu_to_le32(pde6->word1); |
1612 | pde6->word2 = cpu_to_le32(pde6->word2); |
1613 | |
1614 | /* advance bpl and increment bde count */ |
1615 | num_bde++; |
1616 | bpl++; |
1617 | |
1618 | /* assumption: caller has already run dma_map_sg on command data */ |
1619 | scsi_for_each_sg(sc, sgde, datasegcnt, i) { |
1620 | physaddr = sg_dma_address(sgde); |
1621 | bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); |
1622 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); |
1623 | bpl->tus.f.bdeSize = sg_dma_len(sgde); |
1624 | if (datadir == DMA_TO_DEVICE) |
1625 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
1626 | else |
1627 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; |
1628 | bpl->tus.w = le32_to_cpu(bpl->tus.w); |
1629 | bpl++; |
1630 | num_bde++; |
1631 | } |
1632 | |
1633 | out: |
1634 | return num_bde; |
1635 | } |
1636 | |
1637 | /** |
1638 | * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data |
1639 | * @phba: The Hba for which this call is being executed. |
1640 | * @sc: pointer to scsi command we're working on |
1641 | * @bpl: pointer to buffer list for protection groups |
1642 | * @datacnt: number of segments of data that have been dma mapped |
1643 | * @protcnt: number of segment of protection data that have been dma mapped |
1644 | * |
1645 | * This function sets up BPL buffer list for protection groups of |
1646 | * type LPFC_PG_TYPE_DIF |
1647 | * |
1648 | * This is usually used when DIFs are in their own buffers, |
1649 | * separate from the data. The HBA can then by instructed |
1650 | * to place the DIFs in the outgoing stream. For read operations, |
1651 | * The HBA could extract the DIFs and place it in DIF buffers. |
1652 | * |
1653 | * The buffer list for this type consists of one or more of the |
1654 | * protection groups described below: |
1655 | * +-------------------------+ |
1656 | * start of first prot group --> | PDE_5 | |
1657 | * +-------------------------+ |
1658 | * | PDE_6 | |
1659 | * +-------------------------+ |
1660 | * | PDE_7 (Prot BDE) | |
1661 | * +-------------------------+ |
1662 | * | Data BDE | |
1663 | * +-------------------------+ |
1664 | * |more Data BDE's ... (opt)| |
1665 | * +-------------------------+ |
1666 | * start of new prot group --> | PDE_5 | |
1667 | * +-------------------------+ |
1668 | * | ... | |
1669 | * +-------------------------+ |
1670 | * |
1671 | * Note: It is assumed that both data and protection s/g buffers have been |
1672 | * mapped for DMA |
1673 | * |
1674 | * Returns the number of BDEs added to the BPL. |
1675 | **/ |
1676 | static int |
1677 | lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
1678 | struct ulp_bde64 *bpl, int datacnt, int protcnt) |
1679 | { |
1680 | struct scatterlist *sgde = NULL; /* s/g data entry */ |
1681 | struct scatterlist *sgpe = NULL; /* s/g prot entry */ |
1682 | struct lpfc_pde5 *pde5 = NULL; |
1683 | struct lpfc_pde6 *pde6 = NULL; |
1684 | struct lpfc_pde7 *pde7 = NULL; |
1685 | dma_addr_t dataphysaddr, protphysaddr; |
1686 | unsigned short curr_prot = 0; |
1687 | unsigned int split_offset; |
1688 | unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; |
1689 | unsigned int protgrp_blks, protgrp_bytes; |
1690 | unsigned int remainder, subtotal; |
1691 | int status; |
1692 | int datadir = sc->sc_data_direction; |
1693 | unsigned char pgdone = 0, alldone = 0; |
1694 | unsigned blksize; |
1695 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1696 | uint32_t rc; |
1697 | #endif |
1698 | uint32_t checking = 1; |
1699 | uint32_t reftag; |
1700 | uint8_t txop, rxop; |
1701 | int num_bde = 0; |
1702 | |
1703 | sgpe = scsi_prot_sglist(cmd: sc); |
1704 | sgde = scsi_sglist(cmd: sc); |
1705 | |
1706 | if (!sgpe || !sgde) { |
1707 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1708 | "9020 Invalid s/g entry: data=x%px prot=x%px\n" , |
1709 | sgpe, sgde); |
1710 | return 0; |
1711 | } |
1712 | |
1713 | status = lpfc_sc_to_bg_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
1714 | if (status) |
1715 | goto out; |
1716 | |
1717 | /* extract some info from the scsi command */ |
1718 | blksize = scsi_prot_interval(scmd: sc); |
1719 | reftag = scsi_prot_ref_tag(scmd: sc); |
1720 | |
1721 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1722 | rc = lpfc_bg_err_inject(phba, sc, reftag: &reftag, NULL, new_guard: 1); |
1723 | if (rc) { |
1724 | if (rc & BG_ERR_SWAP) |
1725 | lpfc_bg_err_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
1726 | if (rc & BG_ERR_CHECK) |
1727 | checking = 0; |
1728 | } |
1729 | #endif |
1730 | |
1731 | split_offset = 0; |
1732 | do { |
1733 | /* Check to see if we ran out of space */ |
1734 | if (num_bde >= (phba->cfg_total_seg_cnt - 2)) |
1735 | return num_bde + 3; |
1736 | |
1737 | /* setup PDE5 with what we have */ |
1738 | pde5 = (struct lpfc_pde5 *) bpl; |
1739 | memset(pde5, 0, sizeof(struct lpfc_pde5)); |
1740 | bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); |
1741 | |
1742 | /* Endianness conversion if necessary for PDE5 */ |
1743 | pde5->word0 = cpu_to_le32(pde5->word0); |
1744 | pde5->reftag = cpu_to_le32(reftag); |
1745 | |
1746 | /* advance bpl and increment bde count */ |
1747 | num_bde++; |
1748 | bpl++; |
1749 | pde6 = (struct lpfc_pde6 *) bpl; |
1750 | |
1751 | /* setup PDE6 with the rest of the info */ |
1752 | memset(pde6, 0, sizeof(struct lpfc_pde6)); |
1753 | bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); |
1754 | bf_set(pde6_optx, pde6, txop); |
1755 | bf_set(pde6_oprx, pde6, rxop); |
1756 | |
1757 | if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) |
1758 | bf_set(pde6_ce, pde6, checking); |
1759 | else |
1760 | bf_set(pde6_ce, pde6, 0); |
1761 | |
1762 | if (sc->prot_flags & SCSI_PROT_REF_CHECK) |
1763 | bf_set(pde6_re, pde6, checking); |
1764 | else |
1765 | bf_set(pde6_re, pde6, 0); |
1766 | |
1767 | bf_set(pde6_ai, pde6, 1); |
1768 | bf_set(pde6_ae, pde6, 0); |
1769 | bf_set(pde6_apptagval, pde6, 0); |
1770 | |
1771 | /* Endianness conversion if necessary for PDE6 */ |
1772 | pde6->word0 = cpu_to_le32(pde6->word0); |
1773 | pde6->word1 = cpu_to_le32(pde6->word1); |
1774 | pde6->word2 = cpu_to_le32(pde6->word2); |
1775 | |
1776 | /* advance bpl and increment bde count */ |
1777 | num_bde++; |
1778 | bpl++; |
1779 | |
1780 | /* setup the first BDE that points to protection buffer */ |
1781 | protphysaddr = sg_dma_address(sgpe) + protgroup_offset; |
1782 | protgroup_len = sg_dma_len(sgpe) - protgroup_offset; |
1783 | |
1784 | /* must be integer multiple of the DIF block length */ |
1785 | BUG_ON(protgroup_len % 8); |
1786 | |
1787 | pde7 = (struct lpfc_pde7 *) bpl; |
1788 | memset(pde7, 0, sizeof(struct lpfc_pde7)); |
1789 | bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); |
1790 | |
1791 | pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); |
1792 | pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); |
1793 | |
1794 | protgrp_blks = protgroup_len / 8; |
1795 | protgrp_bytes = protgrp_blks * blksize; |
1796 | |
1797 | /* check if this pde is crossing the 4K boundary; if so split */ |
1798 | if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { |
1799 | protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); |
1800 | protgroup_offset += protgroup_remainder; |
1801 | protgrp_blks = protgroup_remainder / 8; |
1802 | protgrp_bytes = protgrp_blks * blksize; |
1803 | } else { |
1804 | protgroup_offset = 0; |
1805 | curr_prot++; |
1806 | } |
1807 | |
1808 | num_bde++; |
1809 | |
1810 | /* setup BDE's for data blocks associated with DIF data */ |
1811 | pgdone = 0; |
1812 | subtotal = 0; /* total bytes processed for current prot grp */ |
1813 | while (!pgdone) { |
1814 | /* Check to see if we ran out of space */ |
1815 | if (num_bde >= phba->cfg_total_seg_cnt) |
1816 | return num_bde + 1; |
1817 | |
1818 | if (!sgde) { |
1819 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1820 | "9065 BLKGRD:%s Invalid data segment\n" , |
1821 | __func__); |
1822 | return 0; |
1823 | } |
1824 | bpl++; |
1825 | dataphysaddr = sg_dma_address(sgde) + split_offset; |
1826 | bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); |
1827 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); |
1828 | |
1829 | remainder = sg_dma_len(sgde) - split_offset; |
1830 | |
1831 | if ((subtotal + remainder) <= protgrp_bytes) { |
1832 | /* we can use this whole buffer */ |
1833 | bpl->tus.f.bdeSize = remainder; |
1834 | split_offset = 0; |
1835 | |
1836 | if ((subtotal + remainder) == protgrp_bytes) |
1837 | pgdone = 1; |
1838 | } else { |
1839 | /* must split this buffer with next prot grp */ |
1840 | bpl->tus.f.bdeSize = protgrp_bytes - subtotal; |
1841 | split_offset += bpl->tus.f.bdeSize; |
1842 | } |
1843 | |
1844 | subtotal += bpl->tus.f.bdeSize; |
1845 | |
1846 | if (datadir == DMA_TO_DEVICE) |
1847 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
1848 | else |
1849 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; |
1850 | bpl->tus.w = le32_to_cpu(bpl->tus.w); |
1851 | |
1852 | num_bde++; |
1853 | |
1854 | if (split_offset) |
1855 | break; |
1856 | |
1857 | /* Move to the next s/g segment if possible */ |
1858 | sgde = sg_next(sgde); |
1859 | |
1860 | } |
1861 | |
1862 | if (protgroup_offset) { |
1863 | /* update the reference tag */ |
1864 | reftag += protgrp_blks; |
1865 | bpl++; |
1866 | continue; |
1867 | } |
1868 | |
1869 | /* are we done ? */ |
1870 | if (curr_prot == protcnt) { |
1871 | alldone = 1; |
1872 | } else if (curr_prot < protcnt) { |
1873 | /* advance to next prot buffer */ |
1874 | sgpe = sg_next(sgpe); |
1875 | bpl++; |
1876 | |
1877 | /* update the reference tag */ |
1878 | reftag += protgrp_blks; |
1879 | } else { |
1880 | /* if we're here, we have a bug */ |
1881 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1882 | "9054 BLKGRD: bug in %s\n" , __func__); |
1883 | } |
1884 | |
1885 | } while (!alldone); |
1886 | out: |
1887 | |
1888 | return num_bde; |
1889 | } |
1890 | |
1891 | /** |
1892 | * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data |
1893 | * @phba: The Hba for which this call is being executed. |
1894 | * @sc: pointer to scsi command we're working on |
1895 | * @sgl: pointer to buffer list for protection groups |
1896 | * @datasegcnt: number of segments of data that have been dma mapped |
1897 | * @lpfc_cmd: lpfc scsi command object pointer. |
1898 | * |
1899 | * This function sets up SGL buffer list for protection groups of |
1900 | * type LPFC_PG_TYPE_NO_DIF |
1901 | * |
1902 | * This is usually used when the HBA is instructed to generate |
1903 | * DIFs and insert them into data stream (or strip DIF from |
1904 | * incoming data stream) |
1905 | * |
1906 | * The buffer list consists of just one protection group described |
1907 | * below: |
1908 | * +-------------------------+ |
1909 | * start of prot group --> | DI_SEED | |
1910 | * +-------------------------+ |
1911 | * | Data SGE | |
1912 | * +-------------------------+ |
1913 | * |more Data SGE's ... (opt)| |
1914 | * +-------------------------+ |
1915 | * |
1916 | * |
1917 | * Note: Data s/g buffers have been dma mapped |
1918 | * |
1919 | * Returns the number of SGEs added to the SGL. |
1920 | **/ |
1921 | static int |
1922 | lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
1923 | struct sli4_sge *sgl, int datasegcnt, |
1924 | struct lpfc_io_buf *lpfc_cmd) |
1925 | { |
1926 | struct scatterlist *sgde = NULL; /* s/g data entry */ |
1927 | struct sli4_sge_diseed *diseed = NULL; |
1928 | dma_addr_t physaddr; |
1929 | int i = 0, num_sge = 0, status; |
1930 | uint32_t reftag; |
1931 | uint8_t txop, rxop; |
1932 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1933 | uint32_t rc; |
1934 | #endif |
1935 | uint32_t checking = 1; |
1936 | uint32_t dma_len; |
1937 | uint32_t dma_offset = 0; |
1938 | struct sli4_hybrid_sgl *sgl_xtra = NULL; |
1939 | int j; |
1940 | bool lsp_just_set = false; |
1941 | |
1942 | status = lpfc_sc_to_bg_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
1943 | if (status) |
1944 | goto out; |
1945 | |
1946 | /* extract some info from the scsi command for pde*/ |
1947 | reftag = scsi_prot_ref_tag(scmd: sc); |
1948 | |
1949 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1950 | rc = lpfc_bg_err_inject(phba, sc, reftag: &reftag, NULL, new_guard: 1); |
1951 | if (rc) { |
1952 | if (rc & BG_ERR_SWAP) |
1953 | lpfc_bg_err_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
1954 | if (rc & BG_ERR_CHECK) |
1955 | checking = 0; |
1956 | } |
1957 | #endif |
1958 | |
1959 | /* setup DISEED with what we have */ |
1960 | diseed = (struct sli4_sge_diseed *) sgl; |
1961 | memset(diseed, 0, sizeof(struct sli4_sge_diseed)); |
1962 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); |
1963 | |
1964 | /* Endianness conversion if necessary */ |
1965 | diseed->ref_tag = cpu_to_le32(reftag); |
1966 | diseed->ref_tag_tran = diseed->ref_tag; |
1967 | |
1968 | /* |
1969 | * We only need to check the data on READs, for WRITEs |
1970 | * protection data is automatically generated, not checked. |
1971 | */ |
1972 | if (sc->sc_data_direction == DMA_FROM_DEVICE) { |
1973 | if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) |
1974 | bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); |
1975 | else |
1976 | bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); |
1977 | |
1978 | if (sc->prot_flags & SCSI_PROT_REF_CHECK) |
1979 | bf_set(lpfc_sli4_sge_dif_re, diseed, checking); |
1980 | else |
1981 | bf_set(lpfc_sli4_sge_dif_re, diseed, 0); |
1982 | } |
1983 | |
1984 | /* setup DISEED with the rest of the info */ |
1985 | bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); |
1986 | bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); |
1987 | |
1988 | bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); |
1989 | bf_set(lpfc_sli4_sge_dif_me, diseed, 0); |
1990 | |
1991 | /* Endianness conversion if necessary for DISEED */ |
1992 | diseed->word2 = cpu_to_le32(diseed->word2); |
1993 | diseed->word3 = cpu_to_le32(diseed->word3); |
1994 | |
1995 | /* advance bpl and increment sge count */ |
1996 | num_sge++; |
1997 | sgl++; |
1998 | |
1999 | /* assumption: caller has already run dma_map_sg on command data */ |
2000 | sgde = scsi_sglist(cmd: sc); |
2001 | j = 3; |
2002 | for (i = 0; i < datasegcnt; i++) { |
2003 | /* clear it */ |
2004 | sgl->word2 = 0; |
2005 | |
2006 | /* do we need to expand the segment */ |
2007 | if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && |
2008 | ((datasegcnt - 1) != i)) { |
2009 | /* set LSP type */ |
2010 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); |
2011 | |
2012 | sgl_xtra = lpfc_get_sgl_per_hdwq(phba, buf: lpfc_cmd); |
2013 | |
2014 | if (unlikely(!sgl_xtra)) { |
2015 | lpfc_cmd->seg_cnt = 0; |
2016 | return 0; |
2017 | } |
2018 | sgl->addr_lo = cpu_to_le32(putPaddrLow( |
2019 | sgl_xtra->dma_phys_sgl)); |
2020 | sgl->addr_hi = cpu_to_le32(putPaddrHigh( |
2021 | sgl_xtra->dma_phys_sgl)); |
2022 | |
2023 | } else { |
2024 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); |
2025 | } |
2026 | |
2027 | if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { |
2028 | if ((datasegcnt - 1) == i) |
2029 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
2030 | physaddr = sg_dma_address(sgde); |
2031 | dma_len = sg_dma_len(sgde); |
2032 | sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); |
2033 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); |
2034 | |
2035 | bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); |
2036 | sgl->word2 = cpu_to_le32(sgl->word2); |
2037 | sgl->sge_len = cpu_to_le32(dma_len); |
2038 | |
2039 | dma_offset += dma_len; |
2040 | sgde = sg_next(sgde); |
2041 | |
2042 | sgl++; |
2043 | num_sge++; |
2044 | lsp_just_set = false; |
2045 | |
2046 | } else { |
2047 | sgl->word2 = cpu_to_le32(sgl->word2); |
2048 | sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); |
2049 | |
2050 | sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; |
2051 | i = i - 1; |
2052 | |
2053 | lsp_just_set = true; |
2054 | } |
2055 | |
2056 | j++; |
2057 | |
2058 | } |
2059 | |
2060 | out: |
2061 | return num_sge; |
2062 | } |
2063 | |
2064 | /** |
2065 | * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data |
2066 | * @phba: The Hba for which this call is being executed. |
2067 | * @sc: pointer to scsi command we're working on |
2068 | * @sgl: pointer to buffer list for protection groups |
2069 | * @datacnt: number of segments of data that have been dma mapped |
2070 | * @protcnt: number of segment of protection data that have been dma mapped |
2071 | * @lpfc_cmd: lpfc scsi command object pointer. |
2072 | * |
2073 | * This function sets up SGL buffer list for protection groups of |
2074 | * type LPFC_PG_TYPE_DIF |
2075 | * |
2076 | * This is usually used when DIFs are in their own buffers, |
2077 | * separate from the data. The HBA can then by instructed |
2078 | * to place the DIFs in the outgoing stream. For read operations, |
2079 | * The HBA could extract the DIFs and place it in DIF buffers. |
2080 | * |
2081 | * The buffer list for this type consists of one or more of the |
2082 | * protection groups described below: |
2083 | * +-------------------------+ |
2084 | * start of first prot group --> | DISEED | |
2085 | * +-------------------------+ |
2086 | * | DIF (Prot SGE) | |
2087 | * +-------------------------+ |
2088 | * | Data SGE | |
2089 | * +-------------------------+ |
2090 | * |more Data SGE's ... (opt)| |
2091 | * +-------------------------+ |
2092 | * start of new prot group --> | DISEED | |
2093 | * +-------------------------+ |
2094 | * | ... | |
2095 | * +-------------------------+ |
2096 | * |
2097 | * Note: It is assumed that both data and protection s/g buffers have been |
2098 | * mapped for DMA |
2099 | * |
2100 | * Returns the number of SGEs added to the SGL. |
2101 | **/ |
2102 | static int |
2103 | lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
2104 | struct sli4_sge *sgl, int datacnt, int protcnt, |
2105 | struct lpfc_io_buf *lpfc_cmd) |
2106 | { |
2107 | struct scatterlist *sgde = NULL; /* s/g data entry */ |
2108 | struct scatterlist *sgpe = NULL; /* s/g prot entry */ |
2109 | struct sli4_sge_diseed *diseed = NULL; |
2110 | dma_addr_t dataphysaddr, protphysaddr; |
2111 | unsigned short curr_prot = 0; |
2112 | unsigned int split_offset; |
2113 | unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; |
2114 | unsigned int protgrp_blks, protgrp_bytes; |
2115 | unsigned int remainder, subtotal; |
2116 | int status; |
2117 | unsigned char pgdone = 0, alldone = 0; |
2118 | unsigned blksize; |
2119 | uint32_t reftag; |
2120 | uint8_t txop, rxop; |
2121 | uint32_t dma_len; |
2122 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
2123 | uint32_t rc; |
2124 | #endif |
2125 | uint32_t checking = 1; |
2126 | uint32_t dma_offset = 0; |
2127 | int num_sge = 0, j = 2; |
2128 | struct sli4_hybrid_sgl *sgl_xtra = NULL; |
2129 | |
2130 | sgpe = scsi_prot_sglist(cmd: sc); |
2131 | sgde = scsi_sglist(cmd: sc); |
2132 | |
2133 | if (!sgpe || !sgde) { |
2134 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2135 | "9082 Invalid s/g entry: data=x%px prot=x%px\n" , |
2136 | sgpe, sgde); |
2137 | return 0; |
2138 | } |
2139 | |
2140 | status = lpfc_sc_to_bg_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
2141 | if (status) |
2142 | goto out; |
2143 | |
2144 | /* extract some info from the scsi command */ |
2145 | blksize = scsi_prot_interval(scmd: sc); |
2146 | reftag = scsi_prot_ref_tag(scmd: sc); |
2147 | |
2148 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
2149 | rc = lpfc_bg_err_inject(phba, sc, reftag: &reftag, NULL, new_guard: 1); |
2150 | if (rc) { |
2151 | if (rc & BG_ERR_SWAP) |
2152 | lpfc_bg_err_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
2153 | if (rc & BG_ERR_CHECK) |
2154 | checking = 0; |
2155 | } |
2156 | #endif |
2157 | |
2158 | split_offset = 0; |
2159 | do { |
2160 | /* Check to see if we ran out of space */ |
2161 | if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) && |
2162 | !(phba->cfg_xpsgl)) |
2163 | return num_sge + 3; |
2164 | |
2165 | /* DISEED and DIF have to be together */ |
2166 | if (!((j + 1) % phba->border_sge_num) || |
2167 | !((j + 2) % phba->border_sge_num) || |
2168 | !((j + 3) % phba->border_sge_num)) { |
2169 | sgl->word2 = 0; |
2170 | |
2171 | /* set LSP type */ |
2172 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); |
2173 | |
2174 | sgl_xtra = lpfc_get_sgl_per_hdwq(phba, buf: lpfc_cmd); |
2175 | |
2176 | if (unlikely(!sgl_xtra)) { |
2177 | goto out; |
2178 | } else { |
2179 | sgl->addr_lo = cpu_to_le32(putPaddrLow( |
2180 | sgl_xtra->dma_phys_sgl)); |
2181 | sgl->addr_hi = cpu_to_le32(putPaddrHigh( |
2182 | sgl_xtra->dma_phys_sgl)); |
2183 | } |
2184 | |
2185 | sgl->word2 = cpu_to_le32(sgl->word2); |
2186 | sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); |
2187 | |
2188 | sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; |
2189 | j = 0; |
2190 | } |
2191 | |
2192 | /* setup DISEED with what we have */ |
2193 | diseed = (struct sli4_sge_diseed *) sgl; |
2194 | memset(diseed, 0, sizeof(struct sli4_sge_diseed)); |
2195 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); |
2196 | |
2197 | /* Endianness conversion if necessary */ |
2198 | diseed->ref_tag = cpu_to_le32(reftag); |
2199 | diseed->ref_tag_tran = diseed->ref_tag; |
2200 | |
2201 | if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) { |
2202 | bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); |
2203 | } else { |
2204 | bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); |
2205 | /* |
2206 | * When in this mode, the hardware will replace |
2207 | * the guard tag from the host with a |
2208 | * newly generated good CRC for the wire. |
2209 | * Switch to raw mode here to avoid this |
2210 | * behavior. What the host sends gets put on the wire. |
2211 | */ |
2212 | if (txop == BG_OP_IN_CRC_OUT_CRC) { |
2213 | txop = BG_OP_RAW_MODE; |
2214 | rxop = BG_OP_RAW_MODE; |
2215 | } |
2216 | } |
2217 | |
2218 | |
2219 | if (sc->prot_flags & SCSI_PROT_REF_CHECK) |
2220 | bf_set(lpfc_sli4_sge_dif_re, diseed, checking); |
2221 | else |
2222 | bf_set(lpfc_sli4_sge_dif_re, diseed, 0); |
2223 | |
2224 | /* setup DISEED with the rest of the info */ |
2225 | bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); |
2226 | bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); |
2227 | |
2228 | bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); |
2229 | bf_set(lpfc_sli4_sge_dif_me, diseed, 0); |
2230 | |
2231 | /* Endianness conversion if necessary for DISEED */ |
2232 | diseed->word2 = cpu_to_le32(diseed->word2); |
2233 | diseed->word3 = cpu_to_le32(diseed->word3); |
2234 | |
2235 | /* advance sgl and increment bde count */ |
2236 | num_sge++; |
2237 | |
2238 | sgl++; |
2239 | j++; |
2240 | |
2241 | /* setup the first BDE that points to protection buffer */ |
2242 | protphysaddr = sg_dma_address(sgpe) + protgroup_offset; |
2243 | protgroup_len = sg_dma_len(sgpe) - protgroup_offset; |
2244 | |
2245 | /* must be integer multiple of the DIF block length */ |
2246 | BUG_ON(protgroup_len % 8); |
2247 | |
2248 | /* Now setup DIF SGE */ |
2249 | sgl->word2 = 0; |
2250 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF); |
2251 | sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); |
2252 | sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); |
2253 | sgl->word2 = cpu_to_le32(sgl->word2); |
2254 | sgl->sge_len = 0; |
2255 | |
2256 | protgrp_blks = protgroup_len / 8; |
2257 | protgrp_bytes = protgrp_blks * blksize; |
2258 | |
2259 | /* check if DIF SGE is crossing the 4K boundary; if so split */ |
2260 | if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) { |
2261 | protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff); |
2262 | protgroup_offset += protgroup_remainder; |
2263 | protgrp_blks = protgroup_remainder / 8; |
2264 | protgrp_bytes = protgrp_blks * blksize; |
2265 | } else { |
2266 | protgroup_offset = 0; |
2267 | curr_prot++; |
2268 | } |
2269 | |
2270 | num_sge++; |
2271 | |
2272 | /* setup SGE's for data blocks associated with DIF data */ |
2273 | pgdone = 0; |
2274 | subtotal = 0; /* total bytes processed for current prot grp */ |
2275 | |
2276 | sgl++; |
2277 | j++; |
2278 | |
2279 | while (!pgdone) { |
2280 | /* Check to see if we ran out of space */ |
2281 | if ((num_sge >= phba->cfg_total_seg_cnt) && |
2282 | !phba->cfg_xpsgl) |
2283 | return num_sge + 1; |
2284 | |
2285 | if (!sgde) { |
2286 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2287 | "9086 BLKGRD:%s Invalid data segment\n" , |
2288 | __func__); |
2289 | return 0; |
2290 | } |
2291 | |
2292 | if (!((j + 1) % phba->border_sge_num)) { |
2293 | sgl->word2 = 0; |
2294 | |
2295 | /* set LSP type */ |
2296 | bf_set(lpfc_sli4_sge_type, sgl, |
2297 | LPFC_SGE_TYPE_LSP); |
2298 | |
2299 | sgl_xtra = lpfc_get_sgl_per_hdwq(phba, |
2300 | buf: lpfc_cmd); |
2301 | |
2302 | if (unlikely(!sgl_xtra)) { |
2303 | goto out; |
2304 | } else { |
2305 | sgl->addr_lo = cpu_to_le32( |
2306 | putPaddrLow(sgl_xtra->dma_phys_sgl)); |
2307 | sgl->addr_hi = cpu_to_le32( |
2308 | putPaddrHigh(sgl_xtra->dma_phys_sgl)); |
2309 | } |
2310 | |
2311 | sgl->word2 = cpu_to_le32(sgl->word2); |
2312 | sgl->sge_len = cpu_to_le32( |
2313 | phba->cfg_sg_dma_buf_size); |
2314 | |
2315 | sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; |
2316 | } else { |
2317 | dataphysaddr = sg_dma_address(sgde) + |
2318 | split_offset; |
2319 | |
2320 | remainder = sg_dma_len(sgde) - split_offset; |
2321 | |
2322 | if ((subtotal + remainder) <= protgrp_bytes) { |
2323 | /* we can use this whole buffer */ |
2324 | dma_len = remainder; |
2325 | split_offset = 0; |
2326 | |
2327 | if ((subtotal + remainder) == |
2328 | protgrp_bytes) |
2329 | pgdone = 1; |
2330 | } else { |
2331 | /* must split this buffer with next |
2332 | * prot grp |
2333 | */ |
2334 | dma_len = protgrp_bytes - subtotal; |
2335 | split_offset += dma_len; |
2336 | } |
2337 | |
2338 | subtotal += dma_len; |
2339 | |
2340 | sgl->word2 = 0; |
2341 | sgl->addr_lo = cpu_to_le32(putPaddrLow( |
2342 | dataphysaddr)); |
2343 | sgl->addr_hi = cpu_to_le32(putPaddrHigh( |
2344 | dataphysaddr)); |
2345 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
2346 | bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); |
2347 | bf_set(lpfc_sli4_sge_type, sgl, |
2348 | LPFC_SGE_TYPE_DATA); |
2349 | |
2350 | sgl->sge_len = cpu_to_le32(dma_len); |
2351 | dma_offset += dma_len; |
2352 | |
2353 | num_sge++; |
2354 | |
2355 | if (split_offset) { |
2356 | sgl++; |
2357 | j++; |
2358 | break; |
2359 | } |
2360 | |
2361 | /* Move to the next s/g segment if possible */ |
2362 | sgde = sg_next(sgde); |
2363 | |
2364 | sgl++; |
2365 | } |
2366 | |
2367 | j++; |
2368 | } |
2369 | |
2370 | if (protgroup_offset) { |
2371 | /* update the reference tag */ |
2372 | reftag += protgrp_blks; |
2373 | continue; |
2374 | } |
2375 | |
2376 | /* are we done ? */ |
2377 | if (curr_prot == protcnt) { |
2378 | /* mark the last SGL */ |
2379 | sgl--; |
2380 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
2381 | alldone = 1; |
2382 | } else if (curr_prot < protcnt) { |
2383 | /* advance to next prot buffer */ |
2384 | sgpe = sg_next(sgpe); |
2385 | |
2386 | /* update the reference tag */ |
2387 | reftag += protgrp_blks; |
2388 | } else { |
2389 | /* if we're here, we have a bug */ |
2390 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2391 | "9085 BLKGRD: bug in %s\n" , __func__); |
2392 | } |
2393 | |
2394 | } while (!alldone); |
2395 | |
2396 | out: |
2397 | |
2398 | return num_sge; |
2399 | } |
2400 | |
2401 | /** |
2402 | * lpfc_prot_group_type - Get prtotection group type of SCSI command |
2403 | * @phba: The Hba for which this call is being executed. |
2404 | * @sc: pointer to scsi command we're working on |
2405 | * |
2406 | * Given a SCSI command that supports DIF, determine composition of protection |
2407 | * groups involved in setting up buffer lists |
2408 | * |
2409 | * Returns: Protection group type (with or without DIF) |
2410 | * |
2411 | **/ |
2412 | static int |
2413 | lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) |
2414 | { |
2415 | int ret = LPFC_PG_TYPE_INVALID; |
2416 | unsigned char op = scsi_get_prot_op(scmd: sc); |
2417 | |
2418 | switch (op) { |
2419 | case SCSI_PROT_READ_STRIP: |
2420 | case SCSI_PROT_WRITE_INSERT: |
2421 | ret = LPFC_PG_TYPE_NO_DIF; |
2422 | break; |
2423 | case SCSI_PROT_READ_INSERT: |
2424 | case SCSI_PROT_WRITE_STRIP: |
2425 | case SCSI_PROT_READ_PASS: |
2426 | case SCSI_PROT_WRITE_PASS: |
2427 | ret = LPFC_PG_TYPE_DIF_BUF; |
2428 | break; |
2429 | default: |
2430 | if (phba) |
2431 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2432 | "9021 Unsupported protection op:%d\n" , |
2433 | op); |
2434 | break; |
2435 | } |
2436 | return ret; |
2437 | } |
2438 | |
2439 | /** |
2440 | * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard |
2441 | * @phba: The Hba for which this call is being executed. |
2442 | * @lpfc_cmd: The scsi buffer which is going to be adjusted. |
2443 | * |
2444 | * Adjust the data length to account for how much data |
2445 | * is actually on the wire. |
2446 | * |
2447 | * returns the adjusted data length |
2448 | **/ |
2449 | static int |
2450 | lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, |
2451 | struct lpfc_io_buf *lpfc_cmd) |
2452 | { |
2453 | struct scsi_cmnd *sc = lpfc_cmd->pCmd; |
2454 | int fcpdl; |
2455 | |
2456 | fcpdl = scsi_bufflen(cmd: sc); |
2457 | |
2458 | /* Check if there is protection data on the wire */ |
2459 | if (sc->sc_data_direction == DMA_FROM_DEVICE) { |
2460 | /* Read check for protection data */ |
2461 | if (scsi_get_prot_op(scmd: sc) == SCSI_PROT_READ_INSERT) |
2462 | return fcpdl; |
2463 | |
2464 | } else { |
2465 | /* Write check for protection data */ |
2466 | if (scsi_get_prot_op(scmd: sc) == SCSI_PROT_WRITE_STRIP) |
2467 | return fcpdl; |
2468 | } |
2469 | |
2470 | /* |
2471 | * If we are in DIF Type 1 mode every data block has a 8 byte |
2472 | * DIF (trailer) attached to it. Must ajust FCP data length |
2473 | * to account for the protection data. |
2474 | */ |
2475 | fcpdl += (fcpdl / scsi_prot_interval(scmd: sc)) * 8; |
2476 | |
2477 | return fcpdl; |
2478 | } |
2479 | |
2480 | /** |
2481 | * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec |
2482 | * @phba: The Hba for which this call is being executed. |
2483 | * @lpfc_cmd: The scsi buffer which is going to be prep'ed. |
2484 | * |
2485 | * This is the protection/DIF aware version of |
2486 | * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the |
2487 | * two functions eventually, but for now, it's here. |
2488 | * RETURNS 0 - SUCCESS, |
2489 | * 1 - Failed DMA map, retry. |
2490 | * 2 - Invalid scsi cmd or prot-type. Do not rety. |
2491 | **/ |
2492 | static int |
2493 | lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, |
2494 | struct lpfc_io_buf *lpfc_cmd) |
2495 | { |
2496 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
2497 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
2498 | struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; |
2499 | IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; |
2500 | uint32_t num_bde = 0; |
2501 | int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; |
2502 | int prot_group_type = 0; |
2503 | int fcpdl; |
2504 | int ret = 1; |
2505 | struct lpfc_vport *vport = phba->pport; |
2506 | |
2507 | /* |
2508 | * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd |
2509 | * fcp_rsp regions to the first data bde entry |
2510 | */ |
2511 | bpl += 2; |
2512 | if (scsi_sg_count(cmd: scsi_cmnd)) { |
2513 | /* |
2514 | * The driver stores the segment count returned from dma_map_sg |
2515 | * because this a count of dma-mappings used to map the use_sg |
2516 | * pages. They are not guaranteed to be the same for those |
2517 | * architectures that implement an IOMMU. |
2518 | */ |
2519 | datasegcnt = dma_map_sg(&phba->pcidev->dev, |
2520 | scsi_sglist(scsi_cmnd), |
2521 | scsi_sg_count(scsi_cmnd), datadir); |
2522 | if (unlikely(!datasegcnt)) |
2523 | return 1; |
2524 | |
2525 | lpfc_cmd->seg_cnt = datasegcnt; |
2526 | |
2527 | /* First check if data segment count from SCSI Layer is good */ |
2528 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { |
2529 | WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); |
2530 | ret = 2; |
2531 | goto err; |
2532 | } |
2533 | |
2534 | prot_group_type = lpfc_prot_group_type(phba, sc: scsi_cmnd); |
2535 | |
2536 | switch (prot_group_type) { |
2537 | case LPFC_PG_TYPE_NO_DIF: |
2538 | |
2539 | /* Here we need to add a PDE5 and PDE6 to the count */ |
2540 | if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) { |
2541 | ret = 2; |
2542 | goto err; |
2543 | } |
2544 | |
2545 | num_bde = lpfc_bg_setup_bpl(phba, sc: scsi_cmnd, bpl, |
2546 | datasegcnt); |
2547 | /* we should have 2 or more entries in buffer list */ |
2548 | if (num_bde < 2) { |
2549 | ret = 2; |
2550 | goto err; |
2551 | } |
2552 | break; |
2553 | |
2554 | case LPFC_PG_TYPE_DIF_BUF: |
2555 | /* |
2556 | * This type indicates that protection buffers are |
2557 | * passed to the driver, so that needs to be prepared |
2558 | * for DMA |
2559 | */ |
2560 | protsegcnt = dma_map_sg(&phba->pcidev->dev, |
2561 | scsi_prot_sglist(scsi_cmnd), |
2562 | scsi_prot_sg_count(scsi_cmnd), datadir); |
2563 | if (unlikely(!protsegcnt)) { |
2564 | scsi_dma_unmap(cmd: scsi_cmnd); |
2565 | return 1; |
2566 | } |
2567 | |
2568 | lpfc_cmd->prot_seg_cnt = protsegcnt; |
2569 | |
2570 | /* |
2571 | * There is a minimun of 4 BPLs used for every |
2572 | * protection data segment. |
2573 | */ |
2574 | if ((lpfc_cmd->prot_seg_cnt * 4) > |
2575 | (phba->cfg_total_seg_cnt - 2)) { |
2576 | ret = 2; |
2577 | goto err; |
2578 | } |
2579 | |
2580 | num_bde = lpfc_bg_setup_bpl_prot(phba, sc: scsi_cmnd, bpl, |
2581 | datacnt: datasegcnt, protcnt: protsegcnt); |
2582 | /* we should have 3 or more entries in buffer list */ |
2583 | if ((num_bde < 3) || |
2584 | (num_bde > phba->cfg_total_seg_cnt)) { |
2585 | ret = 2; |
2586 | goto err; |
2587 | } |
2588 | break; |
2589 | |
2590 | case LPFC_PG_TYPE_INVALID: |
2591 | default: |
2592 | scsi_dma_unmap(cmd: scsi_cmnd); |
2593 | lpfc_cmd->seg_cnt = 0; |
2594 | |
2595 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2596 | "9022 Unexpected protection group %i\n" , |
2597 | prot_group_type); |
2598 | return 2; |
2599 | } |
2600 | } |
2601 | |
2602 | /* |
2603 | * Finish initializing those IOCB fields that are dependent on the |
2604 | * scsi_cmnd request_buffer. Note that the bdeSize is explicitly |
2605 | * reinitialized since all iocb memory resources are used many times |
2606 | * for transmit, receive, and continuation bpl's. |
2607 | */ |
2608 | iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); |
2609 | iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); |
2610 | iocb_cmd->ulpBdeCount = 1; |
2611 | iocb_cmd->ulpLe = 1; |
2612 | |
2613 | fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); |
2614 | fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); |
2615 | |
2616 | /* |
2617 | * Due to difference in data length between DIF/non-DIF paths, |
2618 | * we need to set word 4 of IOCB here |
2619 | */ |
2620 | iocb_cmd->un.fcpi.fcpi_parm = fcpdl; |
2621 | |
2622 | /* |
2623 | * For First burst, we may need to adjust the initial transfer |
2624 | * length for DIF |
2625 | */ |
2626 | if (iocb_cmd->un.fcpi.fcpi_XRdy && |
2627 | (fcpdl < vport->cfg_first_burst_size)) |
2628 | iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; |
2629 | |
2630 | return 0; |
2631 | err: |
2632 | if (lpfc_cmd->seg_cnt) |
2633 | scsi_dma_unmap(cmd: scsi_cmnd); |
2634 | if (lpfc_cmd->prot_seg_cnt) |
2635 | dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), |
2636 | scsi_prot_sg_count(scsi_cmnd), |
2637 | scsi_cmnd->sc_data_direction); |
2638 | |
2639 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2640 | "9023 Cannot setup S/G List for HBA" |
2641 | "IO segs %d/%d BPL %d SCSI %d: %d %d\n" , |
2642 | lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, |
2643 | phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, |
2644 | prot_group_type, num_bde); |
2645 | |
2646 | lpfc_cmd->seg_cnt = 0; |
2647 | lpfc_cmd->prot_seg_cnt = 0; |
2648 | return ret; |
2649 | } |
2650 | |
2651 | /* |
2652 | * This function calcuates the T10 DIF guard tag |
2653 | * on the specified data using a CRC algorithmn |
2654 | * using crc_t10dif. |
2655 | */ |
2656 | static uint16_t |
2657 | lpfc_bg_crc(uint8_t *data, int count) |
2658 | { |
2659 | uint16_t crc = 0; |
2660 | uint16_t x; |
2661 | |
2662 | crc = crc_t10dif(data, count); |
2663 | x = cpu_to_be16(crc); |
2664 | return x; |
2665 | } |
2666 | |
2667 | /* |
2668 | * This function calcuates the T10 DIF guard tag |
2669 | * on the specified data using a CSUM algorithmn |
2670 | * using ip_compute_csum. |
2671 | */ |
2672 | static uint16_t |
2673 | lpfc_bg_csum(uint8_t *data, int count) |
2674 | { |
2675 | uint16_t ret; |
2676 | |
2677 | ret = ip_compute_csum(buff: data, len: count); |
2678 | return ret; |
2679 | } |
2680 | |
2681 | /* |
2682 | * This function examines the protection data to try to determine |
2683 | * what type of T10-DIF error occurred. |
2684 | */ |
2685 | static void |
2686 | lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) |
2687 | { |
2688 | struct scatterlist *sgpe; /* s/g prot entry */ |
2689 | struct scatterlist *sgde; /* s/g data entry */ |
2690 | struct scsi_cmnd *cmd = lpfc_cmd->pCmd; |
2691 | struct scsi_dif_tuple *src = NULL; |
2692 | uint8_t *data_src = NULL; |
2693 | uint16_t guard_tag; |
2694 | uint16_t start_app_tag, app_tag; |
2695 | uint32_t start_ref_tag, ref_tag; |
2696 | int prot, protsegcnt; |
2697 | int err_type, len, data_len; |
2698 | int chk_ref, chk_app, chk_guard; |
2699 | uint16_t sum; |
2700 | unsigned blksize; |
2701 | |
2702 | err_type = BGS_GUARD_ERR_MASK; |
2703 | sum = 0; |
2704 | guard_tag = 0; |
2705 | |
2706 | /* First check to see if there is protection data to examine */ |
2707 | prot = scsi_get_prot_op(scmd: cmd); |
2708 | if ((prot == SCSI_PROT_READ_STRIP) || |
2709 | (prot == SCSI_PROT_WRITE_INSERT) || |
2710 | (prot == SCSI_PROT_NORMAL)) |
2711 | goto out; |
2712 | |
2713 | /* Currently the driver just supports ref_tag and guard_tag checking */ |
2714 | chk_ref = 1; |
2715 | chk_app = 0; |
2716 | chk_guard = 0; |
2717 | |
2718 | /* Setup a ptr to the protection data provided by the SCSI host */ |
2719 | sgpe = scsi_prot_sglist(cmd); |
2720 | protsegcnt = lpfc_cmd->prot_seg_cnt; |
2721 | |
2722 | if (sgpe && protsegcnt) { |
2723 | |
2724 | /* |
2725 | * We will only try to verify guard tag if the segment |
2726 | * data length is a multiple of the blksize. |
2727 | */ |
2728 | sgde = scsi_sglist(cmd); |
2729 | blksize = scsi_prot_interval(scmd: cmd); |
2730 | data_src = (uint8_t *)sg_virt(sg: sgde); |
2731 | data_len = sgde->length; |
2732 | if ((data_len & (blksize - 1)) == 0) |
2733 | chk_guard = 1; |
2734 | |
2735 | src = (struct scsi_dif_tuple *)sg_virt(sg: sgpe); |
2736 | start_ref_tag = scsi_prot_ref_tag(scmd: cmd); |
2737 | start_app_tag = src->app_tag; |
2738 | len = sgpe->length; |
2739 | while (src && protsegcnt) { |
2740 | while (len) { |
2741 | |
2742 | /* |
2743 | * First check to see if a protection data |
2744 | * check is valid |
2745 | */ |
2746 | if ((src->ref_tag == T10_PI_REF_ESCAPE) || |
2747 | (src->app_tag == T10_PI_APP_ESCAPE)) { |
2748 | start_ref_tag++; |
2749 | goto skipit; |
2750 | } |
2751 | |
2752 | /* First Guard Tag checking */ |
2753 | if (chk_guard) { |
2754 | guard_tag = src->guard_tag; |
2755 | if (cmd->prot_flags |
2756 | & SCSI_PROT_IP_CHECKSUM) |
2757 | sum = lpfc_bg_csum(data: data_src, |
2758 | count: blksize); |
2759 | else |
2760 | sum = lpfc_bg_crc(data: data_src, |
2761 | count: blksize); |
2762 | if ((guard_tag != sum)) { |
2763 | err_type = BGS_GUARD_ERR_MASK; |
2764 | goto out; |
2765 | } |
2766 | } |
2767 | |
2768 | /* Reference Tag checking */ |
2769 | ref_tag = be32_to_cpu(src->ref_tag); |
2770 | if (chk_ref && (ref_tag != start_ref_tag)) { |
2771 | err_type = BGS_REFTAG_ERR_MASK; |
2772 | goto out; |
2773 | } |
2774 | start_ref_tag++; |
2775 | |
2776 | /* App Tag checking */ |
2777 | app_tag = src->app_tag; |
2778 | if (chk_app && (app_tag != start_app_tag)) { |
2779 | err_type = BGS_APPTAG_ERR_MASK; |
2780 | goto out; |
2781 | } |
2782 | skipit: |
2783 | len -= sizeof(struct scsi_dif_tuple); |
2784 | if (len < 0) |
2785 | len = 0; |
2786 | src++; |
2787 | |
2788 | data_src += blksize; |
2789 | data_len -= blksize; |
2790 | |
2791 | /* |
2792 | * Are we at the end of the Data segment? |
2793 | * The data segment is only used for Guard |
2794 | * tag checking. |
2795 | */ |
2796 | if (chk_guard && (data_len == 0)) { |
2797 | chk_guard = 0; |
2798 | sgde = sg_next(sgde); |
2799 | if (!sgde) |
2800 | goto out; |
2801 | |
2802 | data_src = (uint8_t *)sg_virt(sg: sgde); |
2803 | data_len = sgde->length; |
2804 | if ((data_len & (blksize - 1)) == 0) |
2805 | chk_guard = 1; |
2806 | } |
2807 | } |
2808 | |
2809 | /* Goto the next Protection data segment */ |
2810 | sgpe = sg_next(sgpe); |
2811 | if (sgpe) { |
2812 | src = (struct scsi_dif_tuple *)sg_virt(sg: sgpe); |
2813 | len = sgpe->length; |
2814 | } else { |
2815 | src = NULL; |
2816 | } |
2817 | protsegcnt--; |
2818 | } |
2819 | } |
2820 | out: |
2821 | if (err_type == BGS_GUARD_ERR_MASK) { |
2822 | scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x1); |
2823 | set_host_byte(cmd, status: DID_ABORT); |
2824 | phba->bg_guard_err_cnt++; |
2825 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2826 | "9069 BLKGRD: reftag %x grd_tag err %x != %x\n" , |
2827 | scsi_prot_ref_tag(cmd), |
2828 | sum, guard_tag); |
2829 | |
2830 | } else if (err_type == BGS_REFTAG_ERR_MASK) { |
2831 | scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x3); |
2832 | set_host_byte(cmd, status: DID_ABORT); |
2833 | |
2834 | phba->bg_reftag_err_cnt++; |
2835 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2836 | "9066 BLKGRD: reftag %x ref_tag err %x != %x\n" , |
2837 | scsi_prot_ref_tag(cmd), |
2838 | ref_tag, start_ref_tag); |
2839 | |
2840 | } else if (err_type == BGS_APPTAG_ERR_MASK) { |
2841 | scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x2); |
2842 | set_host_byte(cmd, status: DID_ABORT); |
2843 | |
2844 | phba->bg_apptag_err_cnt++; |
2845 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2846 | "9041 BLKGRD: reftag %x app_tag err %x != %x\n" , |
2847 | scsi_prot_ref_tag(cmd), |
2848 | app_tag, start_app_tag); |
2849 | } |
2850 | } |
2851 | |
2852 | /* |
2853 | * This function checks for BlockGuard errors detected by |
2854 | * the HBA. In case of errors, the ASC/ASCQ fields in the |
2855 | * sense buffer will be set accordingly, paired with |
2856 | * ILLEGAL_REQUEST to signal to the kernel that the HBA |
2857 | * detected corruption. |
2858 | * |
2859 | * Returns: |
2860 | * 0 - No error found |
2861 | * 1 - BlockGuard error found |
2862 | * -1 - Internal error (bad profile, ...etc) |
2863 | */ |
2864 | static int |
2865 | lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, |
2866 | struct lpfc_iocbq *pIocbOut) |
2867 | { |
2868 | struct scsi_cmnd *cmd = lpfc_cmd->pCmd; |
2869 | struct sli3_bg_fields *bgf; |
2870 | int ret = 0; |
2871 | struct lpfc_wcqe_complete *wcqe; |
2872 | u32 status; |
2873 | u32 bghm = 0; |
2874 | u32 bgstat = 0; |
2875 | u64 failing_sector = 0; |
2876 | |
2877 | if (phba->sli_rev == LPFC_SLI_REV4) { |
2878 | wcqe = &pIocbOut->wcqe_cmpl; |
2879 | status = bf_get(lpfc_wcqe_c_status, wcqe); |
2880 | |
2881 | if (status == CQE_STATUS_DI_ERROR) { |
2882 | /* Guard Check failed */ |
2883 | if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) |
2884 | bgstat |= BGS_GUARD_ERR_MASK; |
2885 | |
2886 | /* AppTag Check failed */ |
2887 | if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) |
2888 | bgstat |= BGS_APPTAG_ERR_MASK; |
2889 | |
2890 | /* RefTag Check failed */ |
2891 | if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) |
2892 | bgstat |= BGS_REFTAG_ERR_MASK; |
2893 | |
2894 | /* Check to see if there was any good data before the |
2895 | * error |
2896 | */ |
2897 | if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { |
2898 | bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK; |
2899 | bghm = wcqe->total_data_placed; |
2900 | } |
2901 | |
2902 | /* |
2903 | * Set ALL the error bits to indicate we don't know what |
2904 | * type of error it is. |
2905 | */ |
2906 | if (!bgstat) |
2907 | bgstat |= (BGS_REFTAG_ERR_MASK | |
2908 | BGS_APPTAG_ERR_MASK | |
2909 | BGS_GUARD_ERR_MASK); |
2910 | } |
2911 | |
2912 | } else { |
2913 | bgf = &pIocbOut->iocb.unsli3.sli3_bg; |
2914 | bghm = bgf->bghm; |
2915 | bgstat = bgf->bgstat; |
2916 | } |
2917 | |
2918 | if (lpfc_bgs_get_invalid_prof(bgstat)) { |
2919 | cmd->result = DID_ERROR << 16; |
2920 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2921 | "9072 BLKGRD: Invalid BG Profile in cmd " |
2922 | "0x%x reftag 0x%x blk cnt 0x%x " |
2923 | "bgstat=x%x bghm=x%x\n" , cmd->cmnd[0], |
2924 | scsi_prot_ref_tag(cmd), |
2925 | scsi_logical_block_count(cmd), bgstat, bghm); |
2926 | ret = (-1); |
2927 | goto out; |
2928 | } |
2929 | |
2930 | if (lpfc_bgs_get_uninit_dif_block(bgstat)) { |
2931 | cmd->result = DID_ERROR << 16; |
2932 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2933 | "9073 BLKGRD: Invalid BG PDIF Block in cmd " |
2934 | "0x%x reftag 0x%x blk cnt 0x%x " |
2935 | "bgstat=x%x bghm=x%x\n" , cmd->cmnd[0], |
2936 | scsi_prot_ref_tag(cmd), |
2937 | scsi_logical_block_count(cmd), bgstat, bghm); |
2938 | ret = (-1); |
2939 | goto out; |
2940 | } |
2941 | |
2942 | if (lpfc_bgs_get_guard_err(bgstat)) { |
2943 | ret = 1; |
2944 | scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x1); |
2945 | set_host_byte(cmd, status: DID_ABORT); |
2946 | phba->bg_guard_err_cnt++; |
2947 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2948 | "9055 BLKGRD: Guard Tag error in cmd " |
2949 | "0x%x reftag 0x%x blk cnt 0x%x " |
2950 | "bgstat=x%x bghm=x%x\n" , cmd->cmnd[0], |
2951 | scsi_prot_ref_tag(cmd), |
2952 | scsi_logical_block_count(cmd), bgstat, bghm); |
2953 | } |
2954 | |
2955 | if (lpfc_bgs_get_reftag_err(bgstat)) { |
2956 | ret = 1; |
2957 | scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x3); |
2958 | set_host_byte(cmd, status: DID_ABORT); |
2959 | phba->bg_reftag_err_cnt++; |
2960 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2961 | "9056 BLKGRD: Ref Tag error in cmd " |
2962 | "0x%x reftag 0x%x blk cnt 0x%x " |
2963 | "bgstat=x%x bghm=x%x\n" , cmd->cmnd[0], |
2964 | scsi_prot_ref_tag(cmd), |
2965 | scsi_logical_block_count(cmd), bgstat, bghm); |
2966 | } |
2967 | |
2968 | if (lpfc_bgs_get_apptag_err(bgstat)) { |
2969 | ret = 1; |
2970 | scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x2); |
2971 | set_host_byte(cmd, status: DID_ABORT); |
2972 | phba->bg_apptag_err_cnt++; |
2973 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2974 | "9061 BLKGRD: App Tag error in cmd " |
2975 | "0x%x reftag 0x%x blk cnt 0x%x " |
2976 | "bgstat=x%x bghm=x%x\n" , cmd->cmnd[0], |
2977 | scsi_prot_ref_tag(cmd), |
2978 | scsi_logical_block_count(cmd), bgstat, bghm); |
2979 | } |
2980 | |
2981 | if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { |
2982 | /* |
2983 | * setup sense data descriptor 0 per SPC-4 as an information |
2984 | * field, and put the failing LBA in it. |
2985 | * This code assumes there was also a guard/app/ref tag error |
2986 | * indication. |
2987 | */ |
2988 | cmd->sense_buffer[7] = 0xc; /* Additional sense length */ |
2989 | cmd->sense_buffer[8] = 0; /* Information descriptor type */ |
2990 | cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ |
2991 | cmd->sense_buffer[10] = 0x80; /* Validity bit */ |
2992 | |
2993 | /* bghm is a "on the wire" FC frame based count */ |
2994 | switch (scsi_get_prot_op(scmd: cmd)) { |
2995 | case SCSI_PROT_READ_INSERT: |
2996 | case SCSI_PROT_WRITE_STRIP: |
2997 | bghm /= cmd->device->sector_size; |
2998 | break; |
2999 | case SCSI_PROT_READ_STRIP: |
3000 | case SCSI_PROT_WRITE_INSERT: |
3001 | case SCSI_PROT_READ_PASS: |
3002 | case SCSI_PROT_WRITE_PASS: |
3003 | bghm /= (cmd->device->sector_size + |
3004 | sizeof(struct scsi_dif_tuple)); |
3005 | break; |
3006 | } |
3007 | |
3008 | failing_sector = scsi_get_lba(scmd: cmd); |
3009 | failing_sector += bghm; |
3010 | |
3011 | /* Descriptor Information */ |
3012 | put_unaligned_be64(val: failing_sector, p: &cmd->sense_buffer[12]); |
3013 | } |
3014 | |
3015 | if (!ret) { |
3016 | /* No error was reported - problem in FW? */ |
3017 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
3018 | "9057 BLKGRD: Unknown error in cmd " |
3019 | "0x%x reftag 0x%x blk cnt 0x%x " |
3020 | "bgstat=x%x bghm=x%x\n" , cmd->cmnd[0], |
3021 | scsi_prot_ref_tag(cmd), |
3022 | scsi_logical_block_count(cmd), bgstat, bghm); |
3023 | |
3024 | /* Calculate what type of error it was */ |
3025 | lpfc_calc_bg_err(phba, lpfc_cmd); |
3026 | } |
3027 | out: |
3028 | return ret; |
3029 | } |
3030 | |
3031 | /** |
3032 | * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec |
3033 | * @phba: The Hba for which this call is being executed. |
3034 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
3035 | * |
3036 | * This routine does the pci dma mapping for scatter-gather list of scsi cmnd |
3037 | * field of @lpfc_cmd for device with SLI-4 interface spec. |
3038 | * |
3039 | * Return codes: |
3040 | * 2 - Error - Do not retry |
3041 | * 1 - Error - Retry |
3042 | * 0 - Success |
3043 | **/ |
3044 | static int |
3045 | lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) |
3046 | { |
3047 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
3048 | struct scatterlist *sgel = NULL; |
3049 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
3050 | struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; |
3051 | struct sli4_sge *first_data_sgl; |
3052 | struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; |
3053 | struct lpfc_vport *vport = phba->pport; |
3054 | union lpfc_wqe128 *wqe = &pwqeq->wqe; |
3055 | dma_addr_t physaddr; |
3056 | uint32_t dma_len; |
3057 | uint32_t dma_offset = 0; |
3058 | int nseg, i, j; |
3059 | struct ulp_bde64 *bde; |
3060 | bool lsp_just_set = false; |
3061 | struct sli4_hybrid_sgl *sgl_xtra = NULL; |
3062 | |
3063 | /* |
3064 | * There are three possibilities here - use scatter-gather segment, use |
3065 | * the single mapping, or neither. Start the lpfc command prep by |
3066 | * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first |
3067 | * data bde entry. |
3068 | */ |
3069 | if (scsi_sg_count(cmd: scsi_cmnd)) { |
3070 | /* |
3071 | * The driver stores the segment count returned from dma_map_sg |
3072 | * because this a count of dma-mappings used to map the use_sg |
3073 | * pages. They are not guaranteed to be the same for those |
3074 | * architectures that implement an IOMMU. |
3075 | */ |
3076 | |
3077 | nseg = scsi_dma_map(cmd: scsi_cmnd); |
3078 | if (unlikely(nseg <= 0)) |
3079 | return 1; |
3080 | sgl += 1; |
3081 | /* clear the last flag in the fcp_rsp map entry */ |
3082 | sgl->word2 = le32_to_cpu(sgl->word2); |
3083 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
3084 | sgl->word2 = cpu_to_le32(sgl->word2); |
3085 | sgl += 1; |
3086 | first_data_sgl = sgl; |
3087 | lpfc_cmd->seg_cnt = nseg; |
3088 | if (!phba->cfg_xpsgl && |
3089 | lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { |
3090 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3091 | "9074 BLKGRD:" |
3092 | " %s: Too many sg segments from " |
3093 | "dma_map_sg. Config %d, seg_cnt %d\n" , |
3094 | __func__, phba->cfg_sg_seg_cnt, |
3095 | lpfc_cmd->seg_cnt); |
3096 | WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); |
3097 | lpfc_cmd->seg_cnt = 0; |
3098 | scsi_dma_unmap(cmd: scsi_cmnd); |
3099 | return 2; |
3100 | } |
3101 | |
3102 | /* |
3103 | * The driver established a maximum scatter-gather segment count |
3104 | * during probe that limits the number of sg elements in any |
3105 | * single scsi command. Just run through the seg_cnt and format |
3106 | * the sge's. |
3107 | * When using SLI-3 the driver will try to fit all the BDEs into |
3108 | * the IOCB. If it can't then the BDEs get added to a BPL as it |
3109 | * does for SLI-2 mode. |
3110 | */ |
3111 | |
3112 | /* for tracking segment boundaries */ |
3113 | sgel = scsi_sglist(cmd: scsi_cmnd); |
3114 | j = 2; |
3115 | for (i = 0; i < nseg; i++) { |
3116 | sgl->word2 = 0; |
3117 | if (nseg == 1) { |
3118 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
3119 | bf_set(lpfc_sli4_sge_type, sgl, |
3120 | LPFC_SGE_TYPE_DATA); |
3121 | } else { |
3122 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
3123 | |
3124 | /* do we need to expand the segment */ |
3125 | if (!lsp_just_set && |
3126 | !((j + 1) % phba->border_sge_num) && |
3127 | ((nseg - 1) != i)) { |
3128 | /* set LSP type */ |
3129 | bf_set(lpfc_sli4_sge_type, sgl, |
3130 | LPFC_SGE_TYPE_LSP); |
3131 | |
3132 | sgl_xtra = lpfc_get_sgl_per_hdwq( |
3133 | phba, buf: lpfc_cmd); |
3134 | |
3135 | if (unlikely(!sgl_xtra)) { |
3136 | lpfc_cmd->seg_cnt = 0; |
3137 | scsi_dma_unmap(cmd: scsi_cmnd); |
3138 | return 1; |
3139 | } |
3140 | sgl->addr_lo = cpu_to_le32(putPaddrLow( |
3141 | sgl_xtra->dma_phys_sgl)); |
3142 | sgl->addr_hi = cpu_to_le32(putPaddrHigh( |
3143 | sgl_xtra->dma_phys_sgl)); |
3144 | |
3145 | } else { |
3146 | bf_set(lpfc_sli4_sge_type, sgl, |
3147 | LPFC_SGE_TYPE_DATA); |
3148 | } |
3149 | } |
3150 | |
3151 | if (!(bf_get(lpfc_sli4_sge_type, sgl) & |
3152 | LPFC_SGE_TYPE_LSP)) { |
3153 | if ((nseg - 1) == i) |
3154 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
3155 | |
3156 | physaddr = sg_dma_address(sgel); |
3157 | dma_len = sg_dma_len(sgel); |
3158 | sgl->addr_lo = cpu_to_le32(putPaddrLow( |
3159 | physaddr)); |
3160 | sgl->addr_hi = cpu_to_le32(putPaddrHigh( |
3161 | physaddr)); |
3162 | |
3163 | bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); |
3164 | sgl->word2 = cpu_to_le32(sgl->word2); |
3165 | sgl->sge_len = cpu_to_le32(dma_len); |
3166 | |
3167 | dma_offset += dma_len; |
3168 | sgel = sg_next(sgel); |
3169 | |
3170 | sgl++; |
3171 | lsp_just_set = false; |
3172 | |
3173 | } else { |
3174 | sgl->word2 = cpu_to_le32(sgl->word2); |
3175 | sgl->sge_len = cpu_to_le32( |
3176 | phba->cfg_sg_dma_buf_size); |
3177 | |
3178 | sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; |
3179 | i = i - 1; |
3180 | |
3181 | lsp_just_set = true; |
3182 | } |
3183 | |
3184 | j++; |
3185 | } |
3186 | |
3187 | /* PBDE support for first data SGE only. |
3188 | * For FCoE, we key off Performance Hints. |
3189 | * For FC, we key off lpfc_enable_pbde. |
3190 | */ |
3191 | if (nseg == 1 && |
3192 | ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || |
3193 | phba->cfg_enable_pbde)) { |
3194 | /* Words 13-15 */ |
3195 | bde = (struct ulp_bde64 *) |
3196 | &wqe->words[13]; |
3197 | bde->addrLow = first_data_sgl->addr_lo; |
3198 | bde->addrHigh = first_data_sgl->addr_hi; |
3199 | bde->tus.f.bdeSize = |
3200 | le32_to_cpu(first_data_sgl->sge_len); |
3201 | bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
3202 | bde->tus.w = cpu_to_le32(bde->tus.w); |
3203 | |
3204 | /* Word 11 - set PBDE bit */ |
3205 | bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); |
3206 | } else { |
3207 | memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); |
3208 | /* Word 11 - PBDE bit disabled by default template */ |
3209 | } |
3210 | } else { |
3211 | sgl += 1; |
3212 | /* set the last flag in the fcp_rsp map entry */ |
3213 | sgl->word2 = le32_to_cpu(sgl->word2); |
3214 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
3215 | sgl->word2 = cpu_to_le32(sgl->word2); |
3216 | |
3217 | if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || |
3218 | phba->cfg_enable_pbde) { |
3219 | bde = (struct ulp_bde64 *) |
3220 | &wqe->words[13]; |
3221 | memset(bde, 0, (sizeof(uint32_t) * 3)); |
3222 | } |
3223 | } |
3224 | |
3225 | /* |
3226 | * Finish initializing those IOCB fields that are dependent on the |
3227 | * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is |
3228 | * explicitly reinitialized. |
3229 | * all iocb memory resources are reused. |
3230 | */ |
3231 | fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); |
3232 | /* Set first-burst provided it was successfully negotiated */ |
3233 | if (!(phba->hba_flag & HBA_FCOE_MODE) && |
3234 | vport->cfg_first_burst_size && |
3235 | scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { |
3236 | u32 init_len, total_len; |
3237 | |
3238 | total_len = be32_to_cpu(fcp_cmnd->fcpDl); |
3239 | init_len = min(total_len, vport->cfg_first_burst_size); |
3240 | |
3241 | /* Word 4 & 5 */ |
3242 | wqe->fcp_iwrite.initial_xfer_len = init_len; |
3243 | wqe->fcp_iwrite.total_xfer_len = total_len; |
3244 | } else { |
3245 | /* Word 4 */ |
3246 | wqe->fcp_iwrite.total_xfer_len = |
3247 | be32_to_cpu(fcp_cmnd->fcpDl); |
3248 | } |
3249 | |
3250 | /* |
3251 | * If the OAS driver feature is enabled and the lun is enabled for |
3252 | * OAS, set the oas iocb related flags. |
3253 | */ |
3254 | if ((phba->cfg_fof) && ((struct lpfc_device_data *) |
3255 | scsi_cmnd->device->hostdata)->oas_enabled) { |
3256 | lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); |
3257 | lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) |
3258 | scsi_cmnd->device->hostdata)->priority; |
3259 | |
3260 | /* Word 10 */ |
3261 | bf_set(wqe_oas, &wqe->generic.wqe_com, 1); |
3262 | bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); |
3263 | |
3264 | if (lpfc_cmd->cur_iocbq.priority) |
3265 | bf_set(wqe_ccp, &wqe->generic.wqe_com, |
3266 | (lpfc_cmd->cur_iocbq.priority << 1)); |
3267 | else |
3268 | bf_set(wqe_ccp, &wqe->generic.wqe_com, |
3269 | (phba->cfg_XLanePriority << 1)); |
3270 | } |
3271 | |
3272 | return 0; |
3273 | } |
3274 | |
3275 | /** |
3276 | * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec |
3277 | * @phba: The Hba for which this call is being executed. |
3278 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
3279 | * |
3280 | * This is the protection/DIF aware version of |
3281 | * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the |
3282 | * two functions eventually, but for now, it's here |
3283 | * Return codes: |
3284 | * 2 - Error - Do not retry |
3285 | * 1 - Error - Retry |
3286 | * 0 - Success |
3287 | **/ |
3288 | static int |
3289 | lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, |
3290 | struct lpfc_io_buf *lpfc_cmd) |
3291 | { |
3292 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
3293 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
3294 | struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl); |
3295 | struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; |
3296 | union lpfc_wqe128 *wqe = &pwqeq->wqe; |
3297 | uint32_t num_sge = 0; |
3298 | int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; |
3299 | int prot_group_type = 0; |
3300 | int fcpdl; |
3301 | int ret = 1; |
3302 | struct lpfc_vport *vport = phba->pport; |
3303 | |
3304 | /* |
3305 | * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd |
3306 | * fcp_rsp regions to the first data sge entry |
3307 | */ |
3308 | if (scsi_sg_count(cmd: scsi_cmnd)) { |
3309 | /* |
3310 | * The driver stores the segment count returned from dma_map_sg |
3311 | * because this a count of dma-mappings used to map the use_sg |
3312 | * pages. They are not guaranteed to be the same for those |
3313 | * architectures that implement an IOMMU. |
3314 | */ |
3315 | datasegcnt = dma_map_sg(&phba->pcidev->dev, |
3316 | scsi_sglist(scsi_cmnd), |
3317 | scsi_sg_count(scsi_cmnd), datadir); |
3318 | if (unlikely(!datasegcnt)) |
3319 | return 1; |
3320 | |
3321 | sgl += 1; |
3322 | /* clear the last flag in the fcp_rsp map entry */ |
3323 | sgl->word2 = le32_to_cpu(sgl->word2); |
3324 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
3325 | sgl->word2 = cpu_to_le32(sgl->word2); |
3326 | |
3327 | sgl += 1; |
3328 | lpfc_cmd->seg_cnt = datasegcnt; |
3329 | |
3330 | /* First check if data segment count from SCSI Layer is good */ |
3331 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt && |
3332 | !phba->cfg_xpsgl) { |
3333 | WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); |
3334 | ret = 2; |
3335 | goto err; |
3336 | } |
3337 | |
3338 | prot_group_type = lpfc_prot_group_type(phba, sc: scsi_cmnd); |
3339 | |
3340 | switch (prot_group_type) { |
3341 | case LPFC_PG_TYPE_NO_DIF: |
3342 | /* Here we need to add a DISEED to the count */ |
3343 | if (((lpfc_cmd->seg_cnt + 1) > |
3344 | phba->cfg_total_seg_cnt) && |
3345 | !phba->cfg_xpsgl) { |
3346 | ret = 2; |
3347 | goto err; |
3348 | } |
3349 | |
3350 | num_sge = lpfc_bg_setup_sgl(phba, sc: scsi_cmnd, sgl, |
3351 | datasegcnt, lpfc_cmd); |
3352 | |
3353 | /* we should have 2 or more entries in buffer list */ |
3354 | if (num_sge < 2) { |
3355 | ret = 2; |
3356 | goto err; |
3357 | } |
3358 | break; |
3359 | |
3360 | case LPFC_PG_TYPE_DIF_BUF: |
3361 | /* |
3362 | * This type indicates that protection buffers are |
3363 | * passed to the driver, so that needs to be prepared |
3364 | * for DMA |
3365 | */ |
3366 | protsegcnt = dma_map_sg(&phba->pcidev->dev, |
3367 | scsi_prot_sglist(scsi_cmnd), |
3368 | scsi_prot_sg_count(scsi_cmnd), datadir); |
3369 | if (unlikely(!protsegcnt)) { |
3370 | scsi_dma_unmap(cmd: scsi_cmnd); |
3371 | return 1; |
3372 | } |
3373 | |
3374 | lpfc_cmd->prot_seg_cnt = protsegcnt; |
3375 | /* |
3376 | * There is a minimun of 3 SGEs used for every |
3377 | * protection data segment. |
3378 | */ |
3379 | if (((lpfc_cmd->prot_seg_cnt * 3) > |
3380 | (phba->cfg_total_seg_cnt - 2)) && |
3381 | !phba->cfg_xpsgl) { |
3382 | ret = 2; |
3383 | goto err; |
3384 | } |
3385 | |
3386 | num_sge = lpfc_bg_setup_sgl_prot(phba, sc: scsi_cmnd, sgl, |
3387 | datacnt: datasegcnt, protcnt: protsegcnt, lpfc_cmd); |
3388 | |
3389 | /* we should have 3 or more entries in buffer list */ |
3390 | if (num_sge < 3 || |
3391 | (num_sge > phba->cfg_total_seg_cnt && |
3392 | !phba->cfg_xpsgl)) { |
3393 | ret = 2; |
3394 | goto err; |
3395 | } |
3396 | break; |
3397 | |
3398 | case LPFC_PG_TYPE_INVALID: |
3399 | default: |
3400 | scsi_dma_unmap(cmd: scsi_cmnd); |
3401 | lpfc_cmd->seg_cnt = 0; |
3402 | |
3403 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3404 | "9083 Unexpected protection group %i\n" , |
3405 | prot_group_type); |
3406 | return 2; |
3407 | } |
3408 | } |
3409 | |
3410 | switch (scsi_get_prot_op(scmd: scsi_cmnd)) { |
3411 | case SCSI_PROT_WRITE_STRIP: |
3412 | case SCSI_PROT_READ_STRIP: |
3413 | lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP; |
3414 | break; |
3415 | case SCSI_PROT_WRITE_INSERT: |
3416 | case SCSI_PROT_READ_INSERT: |
3417 | lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT; |
3418 | break; |
3419 | case SCSI_PROT_WRITE_PASS: |
3420 | case SCSI_PROT_READ_PASS: |
3421 | lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS; |
3422 | break; |
3423 | } |
3424 | |
3425 | fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); |
3426 | fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); |
3427 | |
3428 | /* Set first-burst provided it was successfully negotiated */ |
3429 | if (!(phba->hba_flag & HBA_FCOE_MODE) && |
3430 | vport->cfg_first_burst_size && |
3431 | scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { |
3432 | u32 init_len, total_len; |
3433 | |
3434 | total_len = be32_to_cpu(fcp_cmnd->fcpDl); |
3435 | init_len = min(total_len, vport->cfg_first_burst_size); |
3436 | |
3437 | /* Word 4 & 5 */ |
3438 | wqe->fcp_iwrite.initial_xfer_len = init_len; |
3439 | wqe->fcp_iwrite.total_xfer_len = total_len; |
3440 | } else { |
3441 | /* Word 4 */ |
3442 | wqe->fcp_iwrite.total_xfer_len = |
3443 | be32_to_cpu(fcp_cmnd->fcpDl); |
3444 | } |
3445 | |
3446 | /* |
3447 | * If the OAS driver feature is enabled and the lun is enabled for |
3448 | * OAS, set the oas iocb related flags. |
3449 | */ |
3450 | if ((phba->cfg_fof) && ((struct lpfc_device_data *) |
3451 | scsi_cmnd->device->hostdata)->oas_enabled) { |
3452 | lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); |
3453 | |
3454 | /* Word 10 */ |
3455 | bf_set(wqe_oas, &wqe->generic.wqe_com, 1); |
3456 | bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); |
3457 | bf_set(wqe_ccp, &wqe->generic.wqe_com, |
3458 | (phba->cfg_XLanePriority << 1)); |
3459 | } |
3460 | |
3461 | /* Word 7. DIF Flags */ |
3462 | if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS) |
3463 | bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); |
3464 | else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP) |
3465 | bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); |
3466 | else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT) |
3467 | bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); |
3468 | |
3469 | lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS | |
3470 | LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT); |
3471 | |
3472 | return 0; |
3473 | err: |
3474 | if (lpfc_cmd->seg_cnt) |
3475 | scsi_dma_unmap(cmd: scsi_cmnd); |
3476 | if (lpfc_cmd->prot_seg_cnt) |
3477 | dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), |
3478 | scsi_prot_sg_count(scsi_cmnd), |
3479 | scsi_cmnd->sc_data_direction); |
3480 | |
3481 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3482 | "9084 Cannot setup S/G List for HBA " |
3483 | "IO segs %d/%d SGL %d SCSI %d: %d %d %d\n" , |
3484 | lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, |
3485 | phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, |
3486 | prot_group_type, num_sge, ret); |
3487 | |
3488 | lpfc_cmd->seg_cnt = 0; |
3489 | lpfc_cmd->prot_seg_cnt = 0; |
3490 | return ret; |
3491 | } |
3492 | |
3493 | /** |
3494 | * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer |
3495 | * @phba: The Hba for which this call is being executed. |
3496 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
3497 | * |
3498 | * This routine wraps the actual DMA mapping function pointer from the |
3499 | * lpfc_hba struct. |
3500 | * |
3501 | * Return codes: |
3502 | * 1 - Error |
3503 | * 0 - Success |
3504 | **/ |
3505 | static inline int |
3506 | lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) |
3507 | { |
3508 | return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); |
3509 | } |
3510 | |
3511 | /** |
3512 | * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer |
3513 | * using BlockGuard. |
3514 | * @phba: The Hba for which this call is being executed. |
3515 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
3516 | * |
3517 | * This routine wraps the actual DMA mapping function pointer from the |
3518 | * lpfc_hba struct. |
3519 | * |
3520 | * Return codes: |
3521 | * 1 - Error |
3522 | * 0 - Success |
3523 | **/ |
3524 | static inline int |
3525 | lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) |
3526 | { |
3527 | return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); |
3528 | } |
3529 | |
3530 | /** |
3531 | * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi |
3532 | * buffer |
3533 | * @vport: Pointer to vport object. |
3534 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
3535 | * @tmo: Timeout value for IO |
3536 | * |
3537 | * This routine initializes IOCB/WQE data structure from scsi command |
3538 | * |
3539 | * Return codes: |
3540 | * 1 - Error |
3541 | * 0 - Success |
3542 | **/ |
3543 | static inline int |
3544 | lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, |
3545 | uint8_t tmo) |
3546 | { |
3547 | return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo); |
3548 | } |
3549 | |
3550 | /** |
3551 | * lpfc_send_scsi_error_event - Posts an event when there is SCSI error |
3552 | * @phba: Pointer to hba context object. |
3553 | * @vport: Pointer to vport object. |
3554 | * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. |
3555 | * @fcpi_parm: FCP Initiator parameter. |
3556 | * |
3557 | * This function posts an event when there is a SCSI command reporting |
3558 | * error from the scsi device. |
3559 | **/ |
3560 | static void |
3561 | lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, |
3562 | struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) { |
3563 | struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; |
3564 | struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; |
3565 | uint32_t resp_info = fcprsp->rspStatus2; |
3566 | uint32_t scsi_status = fcprsp->rspStatus3; |
3567 | struct lpfc_fast_path_event *fast_path_evt = NULL; |
3568 | struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; |
3569 | unsigned long flags; |
3570 | |
3571 | if (!pnode) |
3572 | return; |
3573 | |
3574 | /* If there is queuefull or busy condition send a scsi event */ |
3575 | if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || |
3576 | (cmnd->result == SAM_STAT_BUSY)) { |
3577 | fast_path_evt = lpfc_alloc_fast_evt(phba); |
3578 | if (!fast_path_evt) |
3579 | return; |
3580 | fast_path_evt->un.scsi_evt.event_type = |
3581 | FC_REG_SCSI_EVENT; |
3582 | fast_path_evt->un.scsi_evt.subcategory = |
3583 | (cmnd->result == SAM_STAT_TASK_SET_FULL) ? |
3584 | LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; |
3585 | fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; |
3586 | memcpy(&fast_path_evt->un.scsi_evt.wwpn, |
3587 | &pnode->nlp_portname, sizeof(struct lpfc_name)); |
3588 | memcpy(&fast_path_evt->un.scsi_evt.wwnn, |
3589 | &pnode->nlp_nodename, sizeof(struct lpfc_name)); |
3590 | } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && |
3591 | ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { |
3592 | fast_path_evt = lpfc_alloc_fast_evt(phba); |
3593 | if (!fast_path_evt) |
3594 | return; |
3595 | fast_path_evt->un.check_cond_evt.scsi_event.event_type = |
3596 | FC_REG_SCSI_EVENT; |
3597 | fast_path_evt->un.check_cond_evt.scsi_event.subcategory = |
3598 | LPFC_EVENT_CHECK_COND; |
3599 | fast_path_evt->un.check_cond_evt.scsi_event.lun = |
3600 | cmnd->device->lun; |
3601 | memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, |
3602 | &pnode->nlp_portname, sizeof(struct lpfc_name)); |
3603 | memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, |
3604 | &pnode->nlp_nodename, sizeof(struct lpfc_name)); |
3605 | fast_path_evt->un.check_cond_evt.sense_key = |
3606 | cmnd->sense_buffer[2] & 0xf; |
3607 | fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; |
3608 | fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; |
3609 | } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && |
3610 | fcpi_parm && |
3611 | ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || |
3612 | ((scsi_status == SAM_STAT_GOOD) && |
3613 | !(resp_info & (RESID_UNDER | RESID_OVER))))) { |
3614 | /* |
3615 | * If status is good or resid does not match with fcp_param and |
3616 | * there is valid fcpi_parm, then there is a read_check error |
3617 | */ |
3618 | fast_path_evt = lpfc_alloc_fast_evt(phba); |
3619 | if (!fast_path_evt) |
3620 | return; |
3621 | fast_path_evt->un.read_check_error.header.event_type = |
3622 | FC_REG_FABRIC_EVENT; |
3623 | fast_path_evt->un.read_check_error.header.subcategory = |
3624 | LPFC_EVENT_FCPRDCHKERR; |
3625 | memcpy(&fast_path_evt->un.read_check_error.header.wwpn, |
3626 | &pnode->nlp_portname, sizeof(struct lpfc_name)); |
3627 | memcpy(&fast_path_evt->un.read_check_error.header.wwnn, |
3628 | &pnode->nlp_nodename, sizeof(struct lpfc_name)); |
3629 | fast_path_evt->un.read_check_error.lun = cmnd->device->lun; |
3630 | fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; |
3631 | fast_path_evt->un.read_check_error.fcpiparam = |
3632 | fcpi_parm; |
3633 | } else |
3634 | return; |
3635 | |
3636 | fast_path_evt->vport = vport; |
3637 | spin_lock_irqsave(&phba->hbalock, flags); |
3638 | list_add_tail(new: &fast_path_evt->work_evt.evt_listp, head: &phba->work_list); |
3639 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
3640 | lpfc_worker_wake_up(phba); |
3641 | return; |
3642 | } |
3643 | |
3644 | /** |
3645 | * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev |
3646 | * @phba: The HBA for which this call is being executed. |
3647 | * @psb: The scsi buffer which is going to be un-mapped. |
3648 | * |
3649 | * This routine does DMA un-mapping of scatter gather list of scsi command |
3650 | * field of @lpfc_cmd for device with SLI-3 interface spec. |
3651 | **/ |
3652 | static void |
3653 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) |
3654 | { |
3655 | /* |
3656 | * There are only two special cases to consider. (1) the scsi command |
3657 | * requested scatter-gather usage or (2) the scsi command allocated |
3658 | * a request buffer, but did not request use_sg. There is a third |
3659 | * case, but it does not require resource deallocation. |
3660 | */ |
3661 | if (psb->seg_cnt > 0) |
3662 | scsi_dma_unmap(cmd: psb->pCmd); |
3663 | if (psb->prot_seg_cnt > 0) |
3664 | dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), |
3665 | scsi_prot_sg_count(psb->pCmd), |
3666 | psb->pCmd->sc_data_direction); |
3667 | } |
3668 | |
3669 | /** |
3670 | * lpfc_unblock_requests - allow further commands to be queued. |
3671 | * @phba: pointer to phba object |
3672 | * |
3673 | * For single vport, just call scsi_unblock_requests on physical port. |
3674 | * For multiple vports, send scsi_unblock_requests for all the vports. |
3675 | */ |
3676 | void |
3677 | lpfc_unblock_requests(struct lpfc_hba *phba) |
3678 | { |
3679 | struct lpfc_vport **vports; |
3680 | struct Scsi_Host *shost; |
3681 | int i; |
3682 | |
3683 | if (phba->sli_rev == LPFC_SLI_REV4 && |
3684 | !phba->sli4_hba.max_cfg_param.vpi_used) { |
3685 | shost = lpfc_shost_from_vport(vport: phba->pport); |
3686 | scsi_unblock_requests(shost); |
3687 | return; |
3688 | } |
3689 | |
3690 | vports = lpfc_create_vport_work_array(phba); |
3691 | if (vports != NULL) |
3692 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
3693 | shost = lpfc_shost_from_vport(vport: vports[i]); |
3694 | scsi_unblock_requests(shost); |
3695 | } |
3696 | lpfc_destroy_vport_work_array(phba, vports); |
3697 | } |
3698 | |
3699 | /** |
3700 | * lpfc_block_requests - prevent further commands from being queued. |
3701 | * @phba: pointer to phba object |
3702 | * |
3703 | * For single vport, just call scsi_block_requests on physical port. |
3704 | * For multiple vports, send scsi_block_requests for all the vports. |
3705 | */ |
3706 | void |
3707 | lpfc_block_requests(struct lpfc_hba *phba) |
3708 | { |
3709 | struct lpfc_vport **vports; |
3710 | struct Scsi_Host *shost; |
3711 | int i; |
3712 | |
3713 | if (atomic_read(v: &phba->cmf_stop_io)) |
3714 | return; |
3715 | |
3716 | if (phba->sli_rev == LPFC_SLI_REV4 && |
3717 | !phba->sli4_hba.max_cfg_param.vpi_used) { |
3718 | shost = lpfc_shost_from_vport(vport: phba->pport); |
3719 | scsi_block_requests(shost); |
3720 | return; |
3721 | } |
3722 | |
3723 | vports = lpfc_create_vport_work_array(phba); |
3724 | if (vports != NULL) |
3725 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
3726 | shost = lpfc_shost_from_vport(vport: vports[i]); |
3727 | scsi_block_requests(shost); |
3728 | } |
3729 | lpfc_destroy_vport_work_array(phba, vports); |
3730 | } |
3731 | |
3732 | /** |
3733 | * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion |
3734 | * @phba: The HBA for which this call is being executed. |
3735 | * @time: The latency of the IO that completed (in ns) |
3736 | * @size: The size of the IO that completed |
3737 | * @shost: SCSI host the IO completed on (NULL for a NVME IO) |
3738 | * |
3739 | * The routine adjusts the various Burst and Bandwidth counters used in |
3740 | * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT, |
3741 | * that means the IO was never issued to the HBA, so this routine is |
3742 | * just being called to cleanup the counter from a previous |
3743 | * lpfc_update_cmf_cmd call. |
3744 | */ |
3745 | int |
3746 | lpfc_update_cmf_cmpl(struct lpfc_hba *phba, |
3747 | uint64_t time, uint32_t size, struct Scsi_Host *shost) |
3748 | { |
3749 | struct lpfc_cgn_stat *cgs; |
3750 | |
3751 | if (time != LPFC_CGN_NOT_SENT) { |
3752 | /* lat is ns coming in, save latency in us */ |
3753 | if (time < 1000) |
3754 | time = 1; |
3755 | else |
3756 | time = div_u64(dividend: time + 500, divisor: 1000); /* round it */ |
3757 | |
3758 | cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id()); |
3759 | atomic64_add(i: size, v: &cgs->rcv_bytes); |
3760 | atomic64_add(i: time, v: &cgs->rx_latency); |
3761 | atomic_inc(v: &cgs->rx_io_cnt); |
3762 | } |
3763 | return 0; |
3764 | } |
3765 | |
3766 | /** |
3767 | * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission |
3768 | * @phba: The HBA for which this call is being executed. |
3769 | * @size: The size of the IO that will be issued |
3770 | * |
3771 | * The routine adjusts the various Burst and Bandwidth counters used in |
3772 | * Congestion management and E2E. |
3773 | */ |
3774 | int |
3775 | lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size) |
3776 | { |
3777 | uint64_t total; |
3778 | struct lpfc_cgn_stat *cgs; |
3779 | int cpu; |
3780 | |
3781 | /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */ |
3782 | if (phba->cmf_active_mode == LPFC_CFG_MANAGED && |
3783 | phba->cmf_max_bytes_per_interval) { |
3784 | total = 0; |
3785 | for_each_present_cpu(cpu) { |
3786 | cgs = per_cpu_ptr(phba->cmf_stat, cpu); |
3787 | total += atomic64_read(v: &cgs->total_bytes); |
3788 | } |
3789 | if (total >= phba->cmf_max_bytes_per_interval) { |
3790 | if (!atomic_xchg(v: &phba->cmf_bw_wait, new: 1)) { |
3791 | lpfc_block_requests(phba); |
3792 | phba->cmf_last_ts = |
3793 | lpfc_calc_cmf_latency(phba); |
3794 | } |
3795 | atomic_inc(v: &phba->cmf_busy); |
3796 | return -EBUSY; |
3797 | } |
3798 | if (size > atomic_read(v: &phba->rx_max_read_cnt)) |
3799 | atomic_set(v: &phba->rx_max_read_cnt, i: size); |
3800 | } |
3801 | |
3802 | cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id()); |
3803 | atomic64_add(i: size, v: &cgs->total_bytes); |
3804 | return 0; |
3805 | } |
3806 | |
3807 | /** |
3808 | * lpfc_handle_fcp_err - FCP response handler |
3809 | * @vport: The virtual port for which this call is being executed. |
3810 | * @lpfc_cmd: Pointer to lpfc_io_buf data structure. |
3811 | * @fcpi_parm: FCP Initiator parameter. |
3812 | * |
3813 | * This routine is called to process response IOCB with status field |
3814 | * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command |
3815 | * based upon SCSI and FCP error. |
3816 | **/ |
3817 | static void |
3818 | lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, |
3819 | uint32_t fcpi_parm) |
3820 | { |
3821 | struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; |
3822 | struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; |
3823 | struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; |
3824 | uint32_t resp_info = fcprsp->rspStatus2; |
3825 | uint32_t scsi_status = fcprsp->rspStatus3; |
3826 | uint32_t *lp; |
3827 | uint32_t host_status = DID_OK; |
3828 | uint32_t rsplen = 0; |
3829 | uint32_t fcpDl; |
3830 | uint32_t logit = LOG_FCP | LOG_FCP_ERROR; |
3831 | |
3832 | |
3833 | /* |
3834 | * If this is a task management command, there is no |
3835 | * scsi packet associated with this lpfc_cmd. The driver |
3836 | * consumes it. |
3837 | */ |
3838 | if (fcpcmd->fcpCntl2) { |
3839 | scsi_status = 0; |
3840 | goto out; |
3841 | } |
3842 | |
3843 | if (resp_info & RSP_LEN_VALID) { |
3844 | rsplen = be32_to_cpu(fcprsp->rspRspLen); |
3845 | if (rsplen != 0 && rsplen != 4 && rsplen != 8) { |
3846 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3847 | "2719 Invalid response length: " |
3848 | "tgt x%x lun x%llx cmnd x%x rsplen " |
3849 | "x%x\n" , cmnd->device->id, |
3850 | cmnd->device->lun, cmnd->cmnd[0], |
3851 | rsplen); |
3852 | host_status = DID_ERROR; |
3853 | goto out; |
3854 | } |
3855 | if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { |
3856 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3857 | "2757 Protocol failure detected during " |
3858 | "processing of FCP I/O op: " |
3859 | "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n" , |
3860 | cmnd->device->id, |
3861 | cmnd->device->lun, cmnd->cmnd[0], |
3862 | fcprsp->rspInfo3); |
3863 | host_status = DID_ERROR; |
3864 | goto out; |
3865 | } |
3866 | } |
3867 | |
3868 | if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { |
3869 | uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); |
3870 | if (snslen > SCSI_SENSE_BUFFERSIZE) |
3871 | snslen = SCSI_SENSE_BUFFERSIZE; |
3872 | |
3873 | if (resp_info & RSP_LEN_VALID) |
3874 | rsplen = be32_to_cpu(fcprsp->rspRspLen); |
3875 | memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); |
3876 | } |
3877 | lp = (uint32_t *)cmnd->sense_buffer; |
3878 | |
3879 | /* special handling for under run conditions */ |
3880 | if (!scsi_status && (resp_info & RESID_UNDER)) { |
3881 | /* don't log under runs if fcp set... */ |
3882 | if (vport->cfg_log_verbose & LOG_FCP) |
3883 | logit = LOG_FCP_ERROR; |
3884 | /* unless operator says so */ |
3885 | if (vport->cfg_log_verbose & LOG_FCP_UNDER) |
3886 | logit = LOG_FCP_UNDER; |
3887 | } |
3888 | |
3889 | lpfc_printf_vlog(vport, KERN_WARNING, logit, |
3890 | "9024 FCP command x%x failed: x%x SNS x%x x%x " |
3891 | "Data: x%x x%x x%x x%x x%x\n" , |
3892 | cmnd->cmnd[0], scsi_status, |
3893 | be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, |
3894 | be32_to_cpu(fcprsp->rspResId), |
3895 | be32_to_cpu(fcprsp->rspSnsLen), |
3896 | be32_to_cpu(fcprsp->rspRspLen), |
3897 | fcprsp->rspInfo3); |
3898 | |
3899 | scsi_set_resid(cmd: cmnd, resid: 0); |
3900 | fcpDl = be32_to_cpu(fcpcmd->fcpDl); |
3901 | if (resp_info & RESID_UNDER) { |
3902 | scsi_set_resid(cmd: cmnd, be32_to_cpu(fcprsp->rspResId)); |
3903 | |
3904 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER, |
3905 | "9025 FCP Underrun, expected %d, " |
3906 | "residual %d Data: x%x x%x x%x\n" , |
3907 | fcpDl, |
3908 | scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], |
3909 | cmnd->underflow); |
3910 | |
3911 | /* |
3912 | * If there is an under run, check if under run reported by |
3913 | * storage array is same as the under run reported by HBA. |
3914 | * If this is not same, there is a dropped frame. |
3915 | */ |
3916 | if (fcpi_parm && (scsi_get_resid(cmd: cmnd) != fcpi_parm)) { |
3917 | lpfc_printf_vlog(vport, KERN_WARNING, |
3918 | LOG_FCP | LOG_FCP_ERROR, |
3919 | "9026 FCP Read Check Error " |
3920 | "and Underrun Data: x%x x%x x%x x%x\n" , |
3921 | fcpDl, |
3922 | scsi_get_resid(cmnd), fcpi_parm, |
3923 | cmnd->cmnd[0]); |
3924 | scsi_set_resid(cmd: cmnd, resid: scsi_bufflen(cmd: cmnd)); |
3925 | host_status = DID_ERROR; |
3926 | } |
3927 | /* |
3928 | * The cmnd->underflow is the minimum number of bytes that must |
3929 | * be transferred for this command. Provided a sense condition |
3930 | * is not present, make sure the actual amount transferred is at |
3931 | * least the underflow value or fail. |
3932 | */ |
3933 | if (!(resp_info & SNS_LEN_VALID) && |
3934 | (scsi_status == SAM_STAT_GOOD) && |
3935 | (scsi_bufflen(cmd: cmnd) - scsi_get_resid(cmd: cmnd) |
3936 | < cmnd->underflow)) { |
3937 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
3938 | "9027 FCP command x%x residual " |
3939 | "underrun converted to error " |
3940 | "Data: x%x x%x x%x\n" , |
3941 | cmnd->cmnd[0], scsi_bufflen(cmnd), |
3942 | scsi_get_resid(cmnd), cmnd->underflow); |
3943 | host_status = DID_ERROR; |
3944 | } |
3945 | } else if (resp_info & RESID_OVER) { |
3946 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
3947 | "9028 FCP command x%x residual overrun error. " |
3948 | "Data: x%x x%x\n" , cmnd->cmnd[0], |
3949 | scsi_bufflen(cmnd), scsi_get_resid(cmnd)); |
3950 | host_status = DID_ERROR; |
3951 | |
3952 | /* |
3953 | * Check SLI validation that all the transfer was actually done |
3954 | * (fcpi_parm should be zero). Apply check only to reads. |
3955 | */ |
3956 | } else if (fcpi_parm) { |
3957 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, |
3958 | "9029 FCP %s Check Error Data: " |
3959 | "x%x x%x x%x x%x x%x\n" , |
3960 | ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ? |
3961 | "Read" : "Write" ), |
3962 | fcpDl, be32_to_cpu(fcprsp->rspResId), |
3963 | fcpi_parm, cmnd->cmnd[0], scsi_status); |
3964 | |
3965 | /* There is some issue with the LPe12000 that causes it |
3966 | * to miscalculate the fcpi_parm and falsely trip this |
3967 | * recovery logic. Detect this case and don't error when true. |
3968 | */ |
3969 | if (fcpi_parm > fcpDl) |
3970 | goto out; |
3971 | |
3972 | switch (scsi_status) { |
3973 | case SAM_STAT_GOOD: |
3974 | case SAM_STAT_CHECK_CONDITION: |
3975 | /* Fabric dropped a data frame. Fail any successful |
3976 | * command in which we detected dropped frames. |
3977 | * A status of good or some check conditions could |
3978 | * be considered a successful command. |
3979 | */ |
3980 | host_status = DID_ERROR; |
3981 | break; |
3982 | } |
3983 | scsi_set_resid(cmd: cmnd, resid: scsi_bufflen(cmd: cmnd)); |
3984 | } |
3985 | |
3986 | out: |
3987 | cmnd->result = host_status << 16 | scsi_status; |
3988 | lpfc_send_scsi_error_event(phba: vport->phba, vport, lpfc_cmd, fcpi_parm); |
3989 | } |
3990 | |
3991 | /** |
3992 | * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO |
3993 | * @phba: The hba for which this call is being executed. |
3994 | * @pwqeIn: The command WQE for the scsi cmnd. |
3995 | * @pwqeOut: Pointer to driver response WQE object. |
3996 | * |
3997 | * This routine assigns scsi command result by looking into response WQE |
3998 | * status field appropriately. This routine handles QUEUE FULL condition as |
3999 | * well by ramping down device queue depth. |
4000 | **/ |
4001 | static void |
4002 | lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, |
4003 | struct lpfc_iocbq *pwqeOut) |
4004 | { |
4005 | struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf; |
4006 | struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl; |
4007 | struct lpfc_vport *vport = pwqeIn->vport; |
4008 | struct lpfc_rport_data *rdata; |
4009 | struct lpfc_nodelist *ndlp; |
4010 | struct scsi_cmnd *cmd; |
4011 | unsigned long flags; |
4012 | struct lpfc_fast_path_event *fast_path_evt; |
4013 | struct Scsi_Host *shost; |
4014 | u32 logit = LOG_FCP; |
4015 | u32 idx; |
4016 | u32 lat; |
4017 | u8 wait_xb_clr = 0; |
4018 | |
4019 | /* Sanity check on return of outstanding command */ |
4020 | if (!lpfc_cmd) { |
4021 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4022 | "9032 Null lpfc_cmd pointer. No " |
4023 | "release, skip completion\n" ); |
4024 | return; |
4025 | } |
4026 | |
4027 | rdata = lpfc_cmd->rdata; |
4028 | ndlp = rdata->pnode; |
4029 | |
4030 | /* Sanity check on return of outstanding command */ |
4031 | cmd = lpfc_cmd->pCmd; |
4032 | if (!cmd) { |
4033 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4034 | "9042 I/O completion: Not an active IO\n" ); |
4035 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
4036 | return; |
4037 | } |
4038 | /* Guard against abort handler being called at same time */ |
4039 | spin_lock(lock: &lpfc_cmd->buf_lock); |
4040 | idx = lpfc_cmd->cur_iocbq.hba_wqidx; |
4041 | if (phba->sli4_hba.hdwq) |
4042 | phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; |
4043 | |
4044 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
4045 | if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) |
4046 | this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); |
4047 | #endif |
4048 | shost = cmd->device->host; |
4049 | |
4050 | lpfc_cmd->status = bf_get(lpfc_wcqe_c_status, wcqe); |
4051 | lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK); |
4052 | |
4053 | lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; |
4054 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) { |
4055 | lpfc_cmd->flags |= LPFC_SBUF_XBUSY; |
4056 | if (phba->cfg_fcp_wait_abts_rsp) |
4057 | wait_xb_clr = 1; |
4058 | } |
4059 | |
4060 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
4061 | if (lpfc_cmd->prot_data_type) { |
4062 | struct scsi_dif_tuple *src = NULL; |
4063 | |
4064 | src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; |
4065 | /* |
4066 | * Used to restore any changes to protection |
4067 | * data for error injection. |
4068 | */ |
4069 | switch (lpfc_cmd->prot_data_type) { |
4070 | case LPFC_INJERR_REFTAG: |
4071 | src->ref_tag = |
4072 | lpfc_cmd->prot_data; |
4073 | break; |
4074 | case LPFC_INJERR_APPTAG: |
4075 | src->app_tag = |
4076 | (uint16_t)lpfc_cmd->prot_data; |
4077 | break; |
4078 | case LPFC_INJERR_GUARD: |
4079 | src->guard_tag = |
4080 | (uint16_t)lpfc_cmd->prot_data; |
4081 | break; |
4082 | default: |
4083 | break; |
4084 | } |
4085 | |
4086 | lpfc_cmd->prot_data = 0; |
4087 | lpfc_cmd->prot_data_type = 0; |
4088 | lpfc_cmd->prot_data_segment = NULL; |
4089 | } |
4090 | #endif |
4091 | if (unlikely(lpfc_cmd->status)) { |
4092 | if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && |
4093 | !lpfc_cmd->fcp_rsp->rspStatus3 && |
4094 | (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && |
4095 | !(vport->cfg_log_verbose & LOG_FCP_UNDER)) |
4096 | logit = 0; |
4097 | else |
4098 | logit = LOG_FCP | LOG_FCP_UNDER; |
4099 | lpfc_printf_vlog(vport, KERN_WARNING, logit, |
4100 | "9034 FCP cmd x%x failed <%d/%lld> " |
4101 | "status: x%x result: x%x " |
4102 | "sid: x%x did: x%x oxid: x%x " |
4103 | "Data: x%x x%x x%x\n" , |
4104 | cmd->cmnd[0], |
4105 | cmd->device ? cmd->device->id : 0xffff, |
4106 | cmd->device ? cmd->device->lun : 0xffff, |
4107 | lpfc_cmd->status, lpfc_cmd->result, |
4108 | vport->fc_myDID, |
4109 | (ndlp) ? ndlp->nlp_DID : 0, |
4110 | lpfc_cmd->cur_iocbq.sli4_xritag, |
4111 | wcqe->parameter, wcqe->total_data_placed, |
4112 | lpfc_cmd->cur_iocbq.iotag); |
4113 | } |
4114 | |
4115 | switch (lpfc_cmd->status) { |
4116 | case CQE_STATUS_SUCCESS: |
4117 | cmd->result = DID_OK << 16; |
4118 | break; |
4119 | case CQE_STATUS_FCP_RSP_FAILURE: |
4120 | lpfc_handle_fcp_err(vport, lpfc_cmd, |
4121 | fcpi_parm: pwqeIn->wqe.fcp_iread.total_xfer_len - |
4122 | wcqe->total_data_placed); |
4123 | break; |
4124 | case CQE_STATUS_NPORT_BSY: |
4125 | case CQE_STATUS_FABRIC_BSY: |
4126 | cmd->result = DID_TRANSPORT_DISRUPTED << 16; |
4127 | fast_path_evt = lpfc_alloc_fast_evt(phba); |
4128 | if (!fast_path_evt) |
4129 | break; |
4130 | fast_path_evt->un.fabric_evt.event_type = |
4131 | FC_REG_FABRIC_EVENT; |
4132 | fast_path_evt->un.fabric_evt.subcategory = |
4133 | (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? |
4134 | LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; |
4135 | if (ndlp) { |
4136 | memcpy(&fast_path_evt->un.fabric_evt.wwpn, |
4137 | &ndlp->nlp_portname, |
4138 | sizeof(struct lpfc_name)); |
4139 | memcpy(&fast_path_evt->un.fabric_evt.wwnn, |
4140 | &ndlp->nlp_nodename, |
4141 | sizeof(struct lpfc_name)); |
4142 | } |
4143 | fast_path_evt->vport = vport; |
4144 | fast_path_evt->work_evt.evt = |
4145 | LPFC_EVT_FASTPATH_MGMT_EVT; |
4146 | spin_lock_irqsave(&phba->hbalock, flags); |
4147 | list_add_tail(new: &fast_path_evt->work_evt.evt_listp, |
4148 | head: &phba->work_list); |
4149 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
4150 | lpfc_worker_wake_up(phba); |
4151 | lpfc_printf_vlog(vport, KERN_WARNING, logit, |
4152 | "9035 Fabric/Node busy FCP cmd x%x failed" |
4153 | " <%d/%lld> " |
4154 | "status: x%x result: x%x " |
4155 | "sid: x%x did: x%x oxid: x%x " |
4156 | "Data: x%x x%x x%x\n" , |
4157 | cmd->cmnd[0], |
4158 | cmd->device ? cmd->device->id : 0xffff, |
4159 | cmd->device ? cmd->device->lun : 0xffff, |
4160 | lpfc_cmd->status, lpfc_cmd->result, |
4161 | vport->fc_myDID, |
4162 | (ndlp) ? ndlp->nlp_DID : 0, |
4163 | lpfc_cmd->cur_iocbq.sli4_xritag, |
4164 | wcqe->parameter, |
4165 | wcqe->total_data_placed, |
4166 | lpfc_cmd->cur_iocbq.iocb.ulpIoTag); |
4167 | break; |
4168 | case CQE_STATUS_DI_ERROR: |
4169 | if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) |
4170 | lpfc_cmd->result = IOERR_RX_DMA_FAILED; |
4171 | else |
4172 | lpfc_cmd->result = IOERR_TX_DMA_FAILED; |
4173 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_BG, |
4174 | "9048 DI Error xri x%x status x%x DI ext " |
4175 | "status x%x data placed x%x\n" , |
4176 | lpfc_cmd->cur_iocbq.sli4_xritag, |
4177 | lpfc_cmd->status, wcqe->parameter, |
4178 | wcqe->total_data_placed); |
4179 | if (scsi_get_prot_op(scmd: cmd) != SCSI_PROT_NORMAL) { |
4180 | /* BG enabled cmd. Parse BG error */ |
4181 | lpfc_parse_bg_err(phba, lpfc_cmd, pIocbOut: pwqeOut); |
4182 | break; |
4183 | } |
4184 | cmd->result = DID_ERROR << 16; |
4185 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
4186 | "9040 DI Error on unprotected cmd\n" ); |
4187 | break; |
4188 | case CQE_STATUS_REMOTE_STOP: |
4189 | if (ndlp) { |
4190 | /* This I/O was aborted by the target, we don't |
4191 | * know the rxid and because we did not send the |
4192 | * ABTS we cannot generate and RRQ. |
4193 | */ |
4194 | lpfc_set_rrq_active(phba, ndlp, |
4195 | lpfc_cmd->cur_iocbq.sli4_lxritag, |
4196 | 0, 0); |
4197 | } |
4198 | fallthrough; |
4199 | case CQE_STATUS_LOCAL_REJECT: |
4200 | if (lpfc_cmd->result & IOERR_DRVR_MASK) |
4201 | lpfc_cmd->status = IOSTAT_DRIVER_REJECT; |
4202 | if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || |
4203 | lpfc_cmd->result == |
4204 | IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || |
4205 | lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || |
4206 | lpfc_cmd->result == |
4207 | IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { |
4208 | cmd->result = DID_NO_CONNECT << 16; |
4209 | break; |
4210 | } |
4211 | if (lpfc_cmd->result == IOERR_INVALID_RPI || |
4212 | lpfc_cmd->result == IOERR_LINK_DOWN || |
4213 | lpfc_cmd->result == IOERR_NO_RESOURCES || |
4214 | lpfc_cmd->result == IOERR_ABORT_REQUESTED || |
4215 | lpfc_cmd->result == IOERR_RPI_SUSPENDED || |
4216 | lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { |
4217 | cmd->result = DID_TRANSPORT_DISRUPTED << 16; |
4218 | break; |
4219 | } |
4220 | lpfc_printf_vlog(vport, KERN_WARNING, logit, |
4221 | "9036 Local Reject FCP cmd x%x failed" |
4222 | " <%d/%lld> " |
4223 | "status: x%x result: x%x " |
4224 | "sid: x%x did: x%x oxid: x%x " |
4225 | "Data: x%x x%x x%x\n" , |
4226 | cmd->cmnd[0], |
4227 | cmd->device ? cmd->device->id : 0xffff, |
4228 | cmd->device ? cmd->device->lun : 0xffff, |
4229 | lpfc_cmd->status, lpfc_cmd->result, |
4230 | vport->fc_myDID, |
4231 | (ndlp) ? ndlp->nlp_DID : 0, |
4232 | lpfc_cmd->cur_iocbq.sli4_xritag, |
4233 | wcqe->parameter, |
4234 | wcqe->total_data_placed, |
4235 | lpfc_cmd->cur_iocbq.iocb.ulpIoTag); |
4236 | fallthrough; |
4237 | default: |
4238 | cmd->result = DID_ERROR << 16; |
4239 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
4240 | "9037 FCP Completion Error: xri %x " |
4241 | "status x%x result x%x [x%x] " |
4242 | "placed x%x\n" , |
4243 | lpfc_cmd->cur_iocbq.sli4_xritag, |
4244 | lpfc_cmd->status, lpfc_cmd->result, |
4245 | wcqe->parameter, |
4246 | wcqe->total_data_placed); |
4247 | } |
4248 | if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { |
4249 | u32 *lp = (u32 *)cmd->sense_buffer; |
4250 | |
4251 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
4252 | "9039 Iodone <%d/%llu> cmd x%px, error " |
4253 | "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n" , |
4254 | cmd->device->id, cmd->device->lun, cmd, |
4255 | cmd->result, *lp, *(lp + 3), |
4256 | (cmd->device->sector_size) ? |
4257 | (u64)scsi_get_lba(cmd) : 0, |
4258 | cmd->retries, scsi_get_resid(cmd)); |
4259 | } |
4260 | |
4261 | if (vport->cfg_max_scsicmpl_time && |
4262 | time_after(jiffies, lpfc_cmd->start_time + |
4263 | msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { |
4264 | spin_lock_irqsave(shost->host_lock, flags); |
4265 | if (ndlp) { |
4266 | if (ndlp->cmd_qdepth > |
4267 | atomic_read(v: &ndlp->cmd_pending) && |
4268 | (atomic_read(v: &ndlp->cmd_pending) > |
4269 | LPFC_MIN_TGT_QDEPTH) && |
4270 | (cmd->cmnd[0] == READ_10 || |
4271 | cmd->cmnd[0] == WRITE_10)) |
4272 | ndlp->cmd_qdepth = |
4273 | atomic_read(v: &ndlp->cmd_pending); |
4274 | |
4275 | ndlp->last_change_time = jiffies; |
4276 | } |
4277 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
4278 | } |
4279 | lpfc_scsi_unprep_dma_buf(phba, psb: lpfc_cmd); |
4280 | |
4281 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
4282 | if (lpfc_cmd->ts_cmd_start) { |
4283 | lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp; |
4284 | lpfc_cmd->ts_data_io = ktime_get_ns(); |
4285 | phba->ktime_last_cmd = lpfc_cmd->ts_data_io; |
4286 | lpfc_io_ktime(phba, ncmd: lpfc_cmd); |
4287 | } |
4288 | #endif |
4289 | if (likely(!wait_xb_clr)) |
4290 | lpfc_cmd->pCmd = NULL; |
4291 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
4292 | |
4293 | /* Check if IO qualified for CMF */ |
4294 | if (phba->cmf_active_mode != LPFC_CFG_OFF && |
4295 | cmd->sc_data_direction == DMA_FROM_DEVICE && |
4296 | (scsi_sg_count(cmd))) { |
4297 | /* Used when calculating average latency */ |
4298 | lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start; |
4299 | lpfc_update_cmf_cmpl(phba, time: lat, size: scsi_bufflen(cmd), shost); |
4300 | } |
4301 | |
4302 | if (wait_xb_clr) |
4303 | goto out; |
4304 | |
4305 | /* The sdev is not guaranteed to be valid post scsi_done upcall. */ |
4306 | scsi_done(cmd); |
4307 | |
4308 | /* |
4309 | * If there is an abort thread waiting for command completion |
4310 | * wake up the thread. |
4311 | */ |
4312 | spin_lock(lock: &lpfc_cmd->buf_lock); |
4313 | lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED; |
4314 | if (lpfc_cmd->waitq) |
4315 | wake_up(lpfc_cmd->waitq); |
4316 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
4317 | out: |
4318 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
4319 | } |
4320 | |
4321 | /** |
4322 | * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine |
4323 | * @phba: The Hba for which this call is being executed. |
4324 | * @pIocbIn: The command IOCBQ for the scsi cmnd. |
4325 | * @pIocbOut: The response IOCBQ for the scsi cmnd. |
4326 | * |
4327 | * This routine assigns scsi command result by looking into response IOCB |
4328 | * status field appropriately. This routine handles QUEUE FULL condition as |
4329 | * well by ramping down device queue depth. |
4330 | **/ |
4331 | static void |
4332 | lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, |
4333 | struct lpfc_iocbq *pIocbOut) |
4334 | { |
4335 | struct lpfc_io_buf *lpfc_cmd = |
4336 | (struct lpfc_io_buf *) pIocbIn->io_buf; |
4337 | struct lpfc_vport *vport = pIocbIn->vport; |
4338 | struct lpfc_rport_data *rdata = lpfc_cmd->rdata; |
4339 | struct lpfc_nodelist *pnode = rdata->pnode; |
4340 | struct scsi_cmnd *cmd; |
4341 | unsigned long flags; |
4342 | struct lpfc_fast_path_event *fast_path_evt; |
4343 | struct Scsi_Host *shost; |
4344 | int idx; |
4345 | uint32_t logit = LOG_FCP; |
4346 | |
4347 | /* Guard against abort handler being called at same time */ |
4348 | spin_lock(lock: &lpfc_cmd->buf_lock); |
4349 | |
4350 | /* Sanity check on return of outstanding command */ |
4351 | cmd = lpfc_cmd->pCmd; |
4352 | if (!cmd || !phba) { |
4353 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4354 | "2621 IO completion: Not an active IO\n" ); |
4355 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
4356 | return; |
4357 | } |
4358 | |
4359 | idx = lpfc_cmd->cur_iocbq.hba_wqidx; |
4360 | if (phba->sli4_hba.hdwq) |
4361 | phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; |
4362 | |
4363 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
4364 | if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) |
4365 | this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); |
4366 | #endif |
4367 | shost = cmd->device->host; |
4368 | |
4369 | lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); |
4370 | lpfc_cmd->status = pIocbOut->iocb.ulpStatus; |
4371 | /* pick up SLI4 exchange busy status from HBA */ |
4372 | lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; |
4373 | if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY) |
4374 | lpfc_cmd->flags |= LPFC_SBUF_XBUSY; |
4375 | |
4376 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
4377 | if (lpfc_cmd->prot_data_type) { |
4378 | struct scsi_dif_tuple *src = NULL; |
4379 | |
4380 | src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; |
4381 | /* |
4382 | * Used to restore any changes to protection |
4383 | * data for error injection. |
4384 | */ |
4385 | switch (lpfc_cmd->prot_data_type) { |
4386 | case LPFC_INJERR_REFTAG: |
4387 | src->ref_tag = |
4388 | lpfc_cmd->prot_data; |
4389 | break; |
4390 | case LPFC_INJERR_APPTAG: |
4391 | src->app_tag = |
4392 | (uint16_t)lpfc_cmd->prot_data; |
4393 | break; |
4394 | case LPFC_INJERR_GUARD: |
4395 | src->guard_tag = |
4396 | (uint16_t)lpfc_cmd->prot_data; |
4397 | break; |
4398 | default: |
4399 | break; |
4400 | } |
4401 | |
4402 | lpfc_cmd->prot_data = 0; |
4403 | lpfc_cmd->prot_data_type = 0; |
4404 | lpfc_cmd->prot_data_segment = NULL; |
4405 | } |
4406 | #endif |
4407 | |
4408 | if (unlikely(lpfc_cmd->status)) { |
4409 | if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && |
4410 | (lpfc_cmd->result & IOERR_DRVR_MASK)) |
4411 | lpfc_cmd->status = IOSTAT_DRIVER_REJECT; |
4412 | else if (lpfc_cmd->status >= IOSTAT_CNT) |
4413 | lpfc_cmd->status = IOSTAT_DEFAULT; |
4414 | if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && |
4415 | !lpfc_cmd->fcp_rsp->rspStatus3 && |
4416 | (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && |
4417 | !(vport->cfg_log_verbose & LOG_FCP_UNDER)) |
4418 | logit = 0; |
4419 | else |
4420 | logit = LOG_FCP | LOG_FCP_UNDER; |
4421 | lpfc_printf_vlog(vport, KERN_WARNING, logit, |
4422 | "9030 FCP cmd x%x failed <%d/%lld> " |
4423 | "status: x%x result: x%x " |
4424 | "sid: x%x did: x%x oxid: x%x " |
4425 | "Data: x%x x%x\n" , |
4426 | cmd->cmnd[0], |
4427 | cmd->device ? cmd->device->id : 0xffff, |
4428 | cmd->device ? cmd->device->lun : 0xffff, |
4429 | lpfc_cmd->status, lpfc_cmd->result, |
4430 | vport->fc_myDID, |
4431 | (pnode) ? pnode->nlp_DID : 0, |
4432 | phba->sli_rev == LPFC_SLI_REV4 ? |
4433 | lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, |
4434 | pIocbOut->iocb.ulpContext, |
4435 | lpfc_cmd->cur_iocbq.iocb.ulpIoTag); |
4436 | |
4437 | switch (lpfc_cmd->status) { |
4438 | case IOSTAT_FCP_RSP_ERROR: |
4439 | /* Call FCP RSP handler to determine result */ |
4440 | lpfc_handle_fcp_err(vport, lpfc_cmd, |
4441 | fcpi_parm: pIocbOut->iocb.un.fcpi.fcpi_parm); |
4442 | break; |
4443 | case IOSTAT_NPORT_BSY: |
4444 | case IOSTAT_FABRIC_BSY: |
4445 | cmd->result = DID_TRANSPORT_DISRUPTED << 16; |
4446 | fast_path_evt = lpfc_alloc_fast_evt(phba); |
4447 | if (!fast_path_evt) |
4448 | break; |
4449 | fast_path_evt->un.fabric_evt.event_type = |
4450 | FC_REG_FABRIC_EVENT; |
4451 | fast_path_evt->un.fabric_evt.subcategory = |
4452 | (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? |
4453 | LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; |
4454 | if (pnode) { |
4455 | memcpy(&fast_path_evt->un.fabric_evt.wwpn, |
4456 | &pnode->nlp_portname, |
4457 | sizeof(struct lpfc_name)); |
4458 | memcpy(&fast_path_evt->un.fabric_evt.wwnn, |
4459 | &pnode->nlp_nodename, |
4460 | sizeof(struct lpfc_name)); |
4461 | } |
4462 | fast_path_evt->vport = vport; |
4463 | fast_path_evt->work_evt.evt = |
4464 | LPFC_EVT_FASTPATH_MGMT_EVT; |
4465 | spin_lock_irqsave(&phba->hbalock, flags); |
4466 | list_add_tail(new: &fast_path_evt->work_evt.evt_listp, |
4467 | head: &phba->work_list); |
4468 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
4469 | lpfc_worker_wake_up(phba); |
4470 | break; |
4471 | case IOSTAT_LOCAL_REJECT: |
4472 | case IOSTAT_REMOTE_STOP: |
4473 | if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || |
4474 | lpfc_cmd->result == |
4475 | IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || |
4476 | lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || |
4477 | lpfc_cmd->result == |
4478 | IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { |
4479 | cmd->result = DID_NO_CONNECT << 16; |
4480 | break; |
4481 | } |
4482 | if (lpfc_cmd->result == IOERR_INVALID_RPI || |
4483 | lpfc_cmd->result == IOERR_NO_RESOURCES || |
4484 | lpfc_cmd->result == IOERR_ABORT_REQUESTED || |
4485 | lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { |
4486 | cmd->result = DID_TRANSPORT_DISRUPTED << 16; |
4487 | break; |
4488 | } |
4489 | if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || |
4490 | lpfc_cmd->result == IOERR_TX_DMA_FAILED) && |
4491 | pIocbOut->iocb.unsli3.sli3_bg.bgstat) { |
4492 | if (scsi_get_prot_op(scmd: cmd) != SCSI_PROT_NORMAL) { |
4493 | /* |
4494 | * This is a response for a BG enabled |
4495 | * cmd. Parse BG error |
4496 | */ |
4497 | lpfc_parse_bg_err(phba, lpfc_cmd, |
4498 | pIocbOut); |
4499 | break; |
4500 | } else { |
4501 | lpfc_printf_vlog(vport, KERN_WARNING, |
4502 | LOG_BG, |
4503 | "9031 non-zero BGSTAT " |
4504 | "on unprotected cmd\n" ); |
4505 | } |
4506 | } |
4507 | if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) |
4508 | && (phba->sli_rev == LPFC_SLI_REV4) |
4509 | && pnode) { |
4510 | /* This IO was aborted by the target, we don't |
4511 | * know the rxid and because we did not send the |
4512 | * ABTS we cannot generate and RRQ. |
4513 | */ |
4514 | lpfc_set_rrq_active(phba, pnode, |
4515 | lpfc_cmd->cur_iocbq.sli4_lxritag, |
4516 | 0, 0); |
4517 | } |
4518 | fallthrough; |
4519 | default: |
4520 | cmd->result = DID_ERROR << 16; |
4521 | break; |
4522 | } |
4523 | |
4524 | if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) |
4525 | cmd->result = DID_TRANSPORT_DISRUPTED << 16 | |
4526 | SAM_STAT_BUSY; |
4527 | } else |
4528 | cmd->result = DID_OK << 16; |
4529 | |
4530 | if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { |
4531 | uint32_t *lp = (uint32_t *)cmd->sense_buffer; |
4532 | |
4533 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
4534 | "0710 Iodone <%d/%llu> cmd x%px, error " |
4535 | "x%x SNS x%x x%x Data: x%x x%x\n" , |
4536 | cmd->device->id, cmd->device->lun, cmd, |
4537 | cmd->result, *lp, *(lp + 3), cmd->retries, |
4538 | scsi_get_resid(cmd)); |
4539 | } |
4540 | |
4541 | if (vport->cfg_max_scsicmpl_time && |
4542 | time_after(jiffies, lpfc_cmd->start_time + |
4543 | msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { |
4544 | spin_lock_irqsave(shost->host_lock, flags); |
4545 | if (pnode) { |
4546 | if (pnode->cmd_qdepth > |
4547 | atomic_read(v: &pnode->cmd_pending) && |
4548 | (atomic_read(v: &pnode->cmd_pending) > |
4549 | LPFC_MIN_TGT_QDEPTH) && |
4550 | ((cmd->cmnd[0] == READ_10) || |
4551 | (cmd->cmnd[0] == WRITE_10))) |
4552 | pnode->cmd_qdepth = |
4553 | atomic_read(v: &pnode->cmd_pending); |
4554 | |
4555 | pnode->last_change_time = jiffies; |
4556 | } |
4557 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
4558 | } |
4559 | lpfc_scsi_unprep_dma_buf(phba, psb: lpfc_cmd); |
4560 | |
4561 | lpfc_cmd->pCmd = NULL; |
4562 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
4563 | |
4564 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
4565 | if (lpfc_cmd->ts_cmd_start) { |
4566 | lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp; |
4567 | lpfc_cmd->ts_data_io = ktime_get_ns(); |
4568 | phba->ktime_last_cmd = lpfc_cmd->ts_data_io; |
4569 | lpfc_io_ktime(phba, ncmd: lpfc_cmd); |
4570 | } |
4571 | #endif |
4572 | |
4573 | /* The sdev is not guaranteed to be valid post scsi_done upcall. */ |
4574 | scsi_done(cmd); |
4575 | |
4576 | /* |
4577 | * If there is an abort thread waiting for command completion |
4578 | * wake up the thread. |
4579 | */ |
4580 | spin_lock(lock: &lpfc_cmd->buf_lock); |
4581 | lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED; |
4582 | if (lpfc_cmd->waitq) |
4583 | wake_up(lpfc_cmd->waitq); |
4584 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
4585 | |
4586 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
4587 | } |
4588 | |
4589 | /** |
4590 | * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO |
4591 | * @vport: Pointer to vport object. |
4592 | * @lpfc_cmd: The scsi buffer which is going to be prep'ed. |
4593 | * @tmo: timeout value for the IO |
4594 | * |
4595 | * Based on the data-direction of the command, initialize IOCB |
4596 | * in the I/O buffer. Fill in the IOCB fields which are independent |
4597 | * of the scsi buffer |
4598 | * |
4599 | * RETURNS 0 - SUCCESS, |
4600 | **/ |
4601 | static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, |
4602 | struct lpfc_io_buf *lpfc_cmd, |
4603 | uint8_t tmo) |
4604 | { |
4605 | IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; |
4606 | struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq; |
4607 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
4608 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
4609 | struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; |
4610 | int datadir = scsi_cmnd->sc_data_direction; |
4611 | u32 fcpdl; |
4612 | |
4613 | piocbq->iocb.un.fcpi.fcpi_XRdy = 0; |
4614 | |
4615 | /* |
4616 | * There are three possibilities here - use scatter-gather segment, use |
4617 | * the single mapping, or neither. Start the lpfc command prep by |
4618 | * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first |
4619 | * data bde entry. |
4620 | */ |
4621 | if (scsi_sg_count(cmd: scsi_cmnd)) { |
4622 | if (datadir == DMA_TO_DEVICE) { |
4623 | iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; |
4624 | iocb_cmd->ulpPU = PARM_READ_CHECK; |
4625 | if (vport->cfg_first_burst_size && |
4626 | (pnode->nlp_flag & NLP_FIRSTBURST)) { |
4627 | u32 xrdy_len; |
4628 | |
4629 | fcpdl = scsi_bufflen(cmd: scsi_cmnd); |
4630 | xrdy_len = min(fcpdl, |
4631 | vport->cfg_first_burst_size); |
4632 | piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len; |
4633 | } |
4634 | fcp_cmnd->fcpCntl3 = WRITE_DATA; |
4635 | } else { |
4636 | iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; |
4637 | iocb_cmd->ulpPU = PARM_READ_CHECK; |
4638 | fcp_cmnd->fcpCntl3 = READ_DATA; |
4639 | } |
4640 | } else { |
4641 | iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; |
4642 | iocb_cmd->un.fcpi.fcpi_parm = 0; |
4643 | iocb_cmd->ulpPU = 0; |
4644 | fcp_cmnd->fcpCntl3 = 0; |
4645 | } |
4646 | |
4647 | /* |
4648 | * Finish initializing those IOCB fields that are independent |
4649 | * of the scsi_cmnd request_buffer |
4650 | */ |
4651 | piocbq->iocb.ulpContext = pnode->nlp_rpi; |
4652 | if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) |
4653 | piocbq->iocb.ulpFCP2Rcvy = 1; |
4654 | else |
4655 | piocbq->iocb.ulpFCP2Rcvy = 0; |
4656 | |
4657 | piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); |
4658 | piocbq->io_buf = lpfc_cmd; |
4659 | if (!piocbq->cmd_cmpl) |
4660 | piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl; |
4661 | piocbq->iocb.ulpTimeout = tmo; |
4662 | piocbq->vport = vport; |
4663 | return 0; |
4664 | } |
4665 | |
4666 | /** |
4667 | * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO |
4668 | * @vport: Pointer to vport object. |
4669 | * @lpfc_cmd: The scsi buffer which is going to be prep'ed. |
4670 | * @tmo: timeout value for the IO |
4671 | * |
4672 | * Based on the data-direction of the command copy WQE template |
4673 | * to I/O buffer WQE. Fill in the WQE fields which are independent |
4674 | * of the scsi buffer |
4675 | * |
4676 | * RETURNS 0 - SUCCESS, |
4677 | **/ |
4678 | static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, |
4679 | struct lpfc_io_buf *lpfc_cmd, |
4680 | uint8_t tmo) |
4681 | { |
4682 | struct lpfc_hba *phba = vport->phba; |
4683 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
4684 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
4685 | struct lpfc_sli4_hdw_queue *hdwq = NULL; |
4686 | struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; |
4687 | struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; |
4688 | union lpfc_wqe128 *wqe = &pwqeq->wqe; |
4689 | u16 idx = lpfc_cmd->hdwq_no; |
4690 | int datadir = scsi_cmnd->sc_data_direction; |
4691 | |
4692 | hdwq = &phba->sli4_hba.hdwq[idx]; |
4693 | |
4694 | /* Initialize 64 bytes only */ |
4695 | memset(wqe, 0, sizeof(union lpfc_wqe128)); |
4696 | |
4697 | /* |
4698 | * There are three possibilities here - use scatter-gather segment, use |
4699 | * the single mapping, or neither. |
4700 | */ |
4701 | if (scsi_sg_count(cmd: scsi_cmnd)) { |
4702 | if (datadir == DMA_TO_DEVICE) { |
4703 | /* From the iwrite template, initialize words 7 - 11 */ |
4704 | memcpy(&wqe->words[7], |
4705 | &lpfc_iwrite_cmd_template.words[7], |
4706 | sizeof(uint32_t) * 5); |
4707 | |
4708 | fcp_cmnd->fcpCntl3 = WRITE_DATA; |
4709 | if (hdwq) |
4710 | hdwq->scsi_cstat.output_requests++; |
4711 | } else { |
4712 | /* From the iread template, initialize words 7 - 11 */ |
4713 | memcpy(&wqe->words[7], |
4714 | &lpfc_iread_cmd_template.words[7], |
4715 | sizeof(uint32_t) * 5); |
4716 | |
4717 | /* Word 7 */ |
4718 | bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo); |
4719 | |
4720 | fcp_cmnd->fcpCntl3 = READ_DATA; |
4721 | if (hdwq) |
4722 | hdwq->scsi_cstat.input_requests++; |
4723 | |
4724 | /* For a CMF Managed port, iod must be zero'ed */ |
4725 | if (phba->cmf_active_mode == LPFC_CFG_MANAGED) |
4726 | bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, |
4727 | LPFC_WQE_IOD_NONE); |
4728 | } |
4729 | } else { |
4730 | /* From the icmnd template, initialize words 4 - 11 */ |
4731 | memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], |
4732 | sizeof(uint32_t) * 8); |
4733 | |
4734 | /* Word 7 */ |
4735 | bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo); |
4736 | |
4737 | fcp_cmnd->fcpCntl3 = 0; |
4738 | if (hdwq) |
4739 | hdwq->scsi_cstat.control_requests++; |
4740 | } |
4741 | |
4742 | /* |
4743 | * Finish initializing those WQE fields that are independent |
4744 | * of the request_buffer |
4745 | */ |
4746 | |
4747 | /* Word 3 */ |
4748 | bf_set(payload_offset_len, &wqe->fcp_icmd, |
4749 | sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); |
4750 | |
4751 | /* Word 6 */ |
4752 | bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, |
4753 | phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); |
4754 | bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); |
4755 | |
4756 | /* Word 7*/ |
4757 | if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) |
4758 | bf_set(wqe_erp, &wqe->generic.wqe_com, 1); |
4759 | |
4760 | bf_set(wqe_class, &wqe->generic.wqe_com, |
4761 | (pnode->nlp_fcp_info & 0x0f)); |
4762 | |
4763 | /* Word 8 */ |
4764 | wqe->generic.wqe_com.abort_tag = pwqeq->iotag; |
4765 | |
4766 | /* Word 9 */ |
4767 | bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); |
4768 | |
4769 | pwqeq->vport = vport; |
4770 | pwqeq->io_buf = lpfc_cmd; |
4771 | pwqeq->hba_wqidx = lpfc_cmd->hdwq_no; |
4772 | pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl; |
4773 | |
4774 | return 0; |
4775 | } |
4776 | |
4777 | /** |
4778 | * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit |
4779 | * @vport: The virtual port for which this call is being executed. |
4780 | * @lpfc_cmd: The scsi command which needs to send. |
4781 | * @pnode: Pointer to lpfc_nodelist. |
4782 | * |
4783 | * This routine initializes fcp_cmnd and iocb data structure from scsi command |
4784 | * to transfer for device with SLI3 interface spec. |
4785 | **/ |
4786 | static int |
4787 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, |
4788 | struct lpfc_nodelist *pnode) |
4789 | { |
4790 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
4791 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
4792 | u8 *ptr; |
4793 | |
4794 | if (!pnode) |
4795 | return 0; |
4796 | |
4797 | lpfc_cmd->fcp_rsp->rspSnsLen = 0; |
4798 | /* clear task management bits */ |
4799 | lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; |
4800 | |
4801 | int_to_scsilun(lpfc_cmd->pCmd->device->lun, |
4802 | &lpfc_cmd->fcp_cmnd->fcp_lun); |
4803 | |
4804 | ptr = &fcp_cmnd->fcpCdb[0]; |
4805 | memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); |
4806 | if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { |
4807 | ptr += scsi_cmnd->cmd_len; |
4808 | memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); |
4809 | } |
4810 | |
4811 | fcp_cmnd->fcpCntl1 = SIMPLE_Q; |
4812 | |
4813 | lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo: lpfc_cmd->timeout); |
4814 | |
4815 | return 0; |
4816 | } |
4817 | |
4818 | /** |
4819 | * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit |
4820 | * @vport: The virtual port for which this call is being executed. |
4821 | * @lpfc_cmd: Pointer to lpfc_io_buf data structure. |
4822 | * @lun: Logical unit number. |
4823 | * @task_mgmt_cmd: SCSI task management command. |
4824 | * |
4825 | * This routine creates FCP information unit corresponding to @task_mgmt_cmd |
4826 | * for device with SLI-3 interface spec. |
4827 | * |
4828 | * Return codes: |
4829 | * 0 - Error |
4830 | * 1 - Success |
4831 | **/ |
4832 | static int |
4833 | lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, |
4834 | struct lpfc_io_buf *lpfc_cmd, |
4835 | u64 lun, u8 task_mgmt_cmd) |
4836 | { |
4837 | struct lpfc_iocbq *piocbq; |
4838 | IOCB_t *piocb; |
4839 | struct fcp_cmnd *fcp_cmnd; |
4840 | struct lpfc_rport_data *rdata = lpfc_cmd->rdata; |
4841 | struct lpfc_nodelist *ndlp = rdata->pnode; |
4842 | |
4843 | if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) |
4844 | return 0; |
4845 | |
4846 | piocbq = &(lpfc_cmd->cur_iocbq); |
4847 | piocbq->vport = vport; |
4848 | |
4849 | piocb = &piocbq->iocb; |
4850 | |
4851 | fcp_cmnd = lpfc_cmd->fcp_cmnd; |
4852 | /* Clear out any old data in the FCP command area */ |
4853 | memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); |
4854 | int_to_scsilun(lun, &fcp_cmnd->fcp_lun); |
4855 | fcp_cmnd->fcpCntl2 = task_mgmt_cmd; |
4856 | if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) |
4857 | lpfc_fcpcmd_to_iocb(data: piocb->unsli3.fcp_ext.icd, fcp_cmnd); |
4858 | piocb->ulpCommand = CMD_FCP_ICMND64_CR; |
4859 | piocb->ulpContext = ndlp->nlp_rpi; |
4860 | piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; |
4861 | piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); |
4862 | piocb->ulpPU = 0; |
4863 | piocb->un.fcpi.fcpi_parm = 0; |
4864 | |
4865 | /* ulpTimeout is only one byte */ |
4866 | if (lpfc_cmd->timeout > 0xff) { |
4867 | /* |
4868 | * Do not timeout the command at the firmware level. |
4869 | * The driver will provide the timeout mechanism. |
4870 | */ |
4871 | piocb->ulpTimeout = 0; |
4872 | } else |
4873 | piocb->ulpTimeout = lpfc_cmd->timeout; |
4874 | |
4875 | return 1; |
4876 | } |
4877 | |
4878 | /** |
4879 | * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit |
4880 | * @vport: The virtual port for which this call is being executed. |
4881 | * @lpfc_cmd: Pointer to lpfc_io_buf data structure. |
4882 | * @lun: Logical unit number. |
4883 | * @task_mgmt_cmd: SCSI task management command. |
4884 | * |
4885 | * This routine creates FCP information unit corresponding to @task_mgmt_cmd |
4886 | * for device with SLI-4 interface spec. |
4887 | * |
4888 | * Return codes: |
4889 | * 0 - Error |
4890 | * 1 - Success |
4891 | **/ |
4892 | static int |
4893 | lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, |
4894 | struct lpfc_io_buf *lpfc_cmd, |
4895 | u64 lun, u8 task_mgmt_cmd) |
4896 | { |
4897 | struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; |
4898 | union lpfc_wqe128 *wqe = &pwqeq->wqe; |
4899 | struct fcp_cmnd *fcp_cmnd; |
4900 | struct lpfc_rport_data *rdata = lpfc_cmd->rdata; |
4901 | struct lpfc_nodelist *ndlp = rdata->pnode; |
4902 | |
4903 | if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) |
4904 | return 0; |
4905 | |
4906 | pwqeq->vport = vport; |
4907 | /* Initialize 64 bytes only */ |
4908 | memset(wqe, 0, sizeof(union lpfc_wqe128)); |
4909 | |
4910 | /* From the icmnd template, initialize words 4 - 11 */ |
4911 | memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], |
4912 | sizeof(uint32_t) * 8); |
4913 | |
4914 | fcp_cmnd = lpfc_cmd->fcp_cmnd; |
4915 | /* Clear out any old data in the FCP command area */ |
4916 | memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); |
4917 | int_to_scsilun(lun, &fcp_cmnd->fcp_lun); |
4918 | fcp_cmnd->fcpCntl3 = 0; |
4919 | fcp_cmnd->fcpCntl2 = task_mgmt_cmd; |
4920 | |
4921 | bf_set(payload_offset_len, &wqe->fcp_icmd, |
4922 | sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); |
4923 | bf_set(cmd_buff_len, &wqe->fcp_icmd, 0); |
4924 | bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, /* ulpContext */ |
4925 | vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); |
4926 | bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, |
4927 | ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0)); |
4928 | bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, |
4929 | (ndlp->nlp_fcp_info & 0x0f)); |
4930 | |
4931 | /* ulpTimeout is only one byte */ |
4932 | if (lpfc_cmd->timeout > 0xff) { |
4933 | /* |
4934 | * Do not timeout the command at the firmware level. |
4935 | * The driver will provide the timeout mechanism. |
4936 | */ |
4937 | bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0); |
4938 | } else { |
4939 | bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout); |
4940 | } |
4941 | |
4942 | lpfc_prep_embed_io(phba: vport->phba, lpfc_ncmd: lpfc_cmd); |
4943 | bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); |
4944 | wqe->generic.wqe_com.abort_tag = pwqeq->iotag; |
4945 | bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); |
4946 | |
4947 | lpfc_sli4_set_rsp_sgl_last(phba: vport->phba, lpfc_cmd); |
4948 | |
4949 | return 1; |
4950 | } |
4951 | |
4952 | /** |
4953 | * lpfc_scsi_api_table_setup - Set up scsi api function jump table |
4954 | * @phba: The hba struct for which this call is being executed. |
4955 | * @dev_grp: The HBA PCI-Device group number. |
4956 | * |
4957 | * This routine sets up the SCSI interface API function jump table in @phba |
4958 | * struct. |
4959 | * Returns: 0 - success, -ENODEV - failure. |
4960 | **/ |
4961 | int |
4962 | lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) |
4963 | { |
4964 | |
4965 | phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; |
4966 | |
4967 | switch (dev_grp) { |
4968 | case LPFC_PCI_DEV_LP: |
4969 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; |
4970 | phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3; |
4971 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; |
4972 | phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; |
4973 | phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3; |
4974 | phba->lpfc_scsi_prep_task_mgmt_cmd = |
4975 | lpfc_scsi_prep_task_mgmt_cmd_s3; |
4976 | break; |
4977 | case LPFC_PCI_DEV_OC: |
4978 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; |
4979 | phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4; |
4980 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; |
4981 | phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; |
4982 | phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4; |
4983 | phba->lpfc_scsi_prep_task_mgmt_cmd = |
4984 | lpfc_scsi_prep_task_mgmt_cmd_s4; |
4985 | break; |
4986 | default: |
4987 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
4988 | "1418 Invalid HBA PCI-device group: 0x%x\n" , |
4989 | dev_grp); |
4990 | return -ENODEV; |
4991 | } |
4992 | phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; |
4993 | return 0; |
4994 | } |
4995 | |
4996 | /** |
4997 | * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command |
4998 | * @phba: The Hba for which this call is being executed. |
4999 | * @cmdiocbq: Pointer to lpfc_iocbq data structure. |
5000 | * @rspiocbq: Pointer to lpfc_iocbq data structure. |
5001 | * |
5002 | * This routine is IOCB completion routine for device reset and target reset |
5003 | * routine. This routine release scsi buffer associated with lpfc_cmd. |
5004 | **/ |
5005 | static void |
5006 | lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, |
5007 | struct lpfc_iocbq *cmdiocbq, |
5008 | struct lpfc_iocbq *rspiocbq) |
5009 | { |
5010 | struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf; |
5011 | if (lpfc_cmd) |
5012 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
5013 | return; |
5014 | } |
5015 | |
5016 | /** |
5017 | * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check |
5018 | * if issuing a pci_bus_reset is possibly unsafe |
5019 | * @phba: lpfc_hba pointer. |
5020 | * |
5021 | * Description: |
5022 | * Walks the bus_list to ensure only PCI devices with Emulex |
5023 | * vendor id, device ids that support hot reset, and only one occurrence |
5024 | * of function 0. |
5025 | * |
5026 | * Returns: |
5027 | * -EBADSLT, detected invalid device |
5028 | * 0, successful |
5029 | */ |
5030 | int |
5031 | lpfc_check_pci_resettable(struct lpfc_hba *phba) |
5032 | { |
5033 | const struct pci_dev *pdev = phba->pcidev; |
5034 | struct pci_dev *ptr = NULL; |
5035 | u8 counter = 0; |
5036 | |
5037 | /* Walk the list of devices on the pci_dev's bus */ |
5038 | list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { |
5039 | /* Check for Emulex Vendor ID */ |
5040 | if (ptr->vendor != PCI_VENDOR_ID_EMULEX) { |
5041 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
5042 | "8346 Non-Emulex vendor found: " |
5043 | "0x%04x\n" , ptr->vendor); |
5044 | return -EBADSLT; |
5045 | } |
5046 | |
5047 | /* Check for valid Emulex Device ID */ |
5048 | if (phba->sli_rev != LPFC_SLI_REV4 || |
5049 | phba->hba_flag & HBA_FCOE_MODE) { |
5050 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
5051 | "8347 Incapable PCI reset device: " |
5052 | "0x%04x\n" , ptr->device); |
5053 | return -EBADSLT; |
5054 | } |
5055 | |
5056 | /* Check for only one function 0 ID to ensure only one HBA on |
5057 | * secondary bus |
5058 | */ |
5059 | if (ptr->devfn == 0) { |
5060 | if (++counter > 1) { |
5061 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
5062 | "8348 More than one device on " |
5063 | "secondary bus found\n" ); |
5064 | return -EBADSLT; |
5065 | } |
5066 | } |
5067 | } |
5068 | |
5069 | return 0; |
5070 | } |
5071 | |
5072 | /** |
5073 | * lpfc_info - Info entry point of scsi_host_template data structure |
5074 | * @host: The scsi host for which this call is being executed. |
5075 | * |
5076 | * This routine provides module information about hba. |
5077 | * |
5078 | * Reutrn code: |
5079 | * Pointer to char - Success. |
5080 | **/ |
5081 | const char * |
5082 | lpfc_info(struct Scsi_Host *host) |
5083 | { |
5084 | struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; |
5085 | struct lpfc_hba *phba = vport->phba; |
5086 | int link_speed = 0; |
5087 | static char lpfcinfobuf[384]; |
5088 | char tmp[384] = {0}; |
5089 | |
5090 | memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf)); |
5091 | if (phba && phba->pcidev){ |
5092 | /* Model Description */ |
5093 | scnprintf(buf: tmp, size: sizeof(tmp), fmt: phba->ModelDesc); |
5094 | if (strlcat(p: lpfcinfobuf, q: tmp, avail: sizeof(lpfcinfobuf)) >= |
5095 | sizeof(lpfcinfobuf)) |
5096 | goto buffer_done; |
5097 | |
5098 | /* PCI Info */ |
5099 | scnprintf(buf: tmp, size: sizeof(tmp), |
5100 | fmt: " on PCI bus %02x device %02x irq %d" , |
5101 | phba->pcidev->bus->number, phba->pcidev->devfn, |
5102 | phba->pcidev->irq); |
5103 | if (strlcat(p: lpfcinfobuf, q: tmp, avail: sizeof(lpfcinfobuf)) >= |
5104 | sizeof(lpfcinfobuf)) |
5105 | goto buffer_done; |
5106 | |
5107 | /* Port Number */ |
5108 | if (phba->Port[0]) { |
5109 | scnprintf(buf: tmp, size: sizeof(tmp), fmt: " port %s" , phba->Port); |
5110 | if (strlcat(p: lpfcinfobuf, q: tmp, avail: sizeof(lpfcinfobuf)) >= |
5111 | sizeof(lpfcinfobuf)) |
5112 | goto buffer_done; |
5113 | } |
5114 | |
5115 | /* Link Speed */ |
5116 | link_speed = lpfc_sli_port_speed_get(phba); |
5117 | if (link_speed != 0) { |
5118 | scnprintf(buf: tmp, size: sizeof(tmp), |
5119 | fmt: " Logical Link Speed: %d Mbps" , link_speed); |
5120 | if (strlcat(p: lpfcinfobuf, q: tmp, avail: sizeof(lpfcinfobuf)) >= |
5121 | sizeof(lpfcinfobuf)) |
5122 | goto buffer_done; |
5123 | } |
5124 | |
5125 | /* PCI resettable */ |
5126 | if (!lpfc_check_pci_resettable(phba)) { |
5127 | scnprintf(buf: tmp, size: sizeof(tmp), fmt: " PCI resettable" ); |
5128 | strlcat(p: lpfcinfobuf, q: tmp, avail: sizeof(lpfcinfobuf)); |
5129 | } |
5130 | } |
5131 | |
5132 | buffer_done: |
5133 | return lpfcinfobuf; |
5134 | } |
5135 | |
5136 | /** |
5137 | * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba |
5138 | * @phba: The Hba for which this call is being executed. |
5139 | * |
5140 | * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. |
5141 | * The default value of cfg_poll_tmo is 10 milliseconds. |
5142 | **/ |
5143 | static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) |
5144 | { |
5145 | unsigned long poll_tmo_expires = |
5146 | (jiffies + msecs_to_jiffies(m: phba->cfg_poll_tmo)); |
5147 | |
5148 | if (!list_empty(head: &phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) |
5149 | mod_timer(timer: &phba->fcp_poll_timer, |
5150 | expires: poll_tmo_expires); |
5151 | } |
5152 | |
5153 | /** |
5154 | * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA |
5155 | * @phba: The Hba for which this call is being executed. |
5156 | * |
5157 | * This routine starts the fcp_poll_timer of @phba. |
5158 | **/ |
5159 | void lpfc_poll_start_timer(struct lpfc_hba * phba) |
5160 | { |
5161 | lpfc_poll_rearm_timer(phba); |
5162 | } |
5163 | |
5164 | /** |
5165 | * lpfc_poll_timeout - Restart polling timer |
5166 | * @t: Timer construct where lpfc_hba data structure pointer is obtained. |
5167 | * |
5168 | * This routine restarts fcp_poll timer, when FCP ring polling is enable |
5169 | * and FCP Ring interrupt is disable. |
5170 | **/ |
5171 | void lpfc_poll_timeout(struct timer_list *t) |
5172 | { |
5173 | struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); |
5174 | |
5175 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { |
5176 | lpfc_sli_handle_fast_ring_event(phba, |
5177 | &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); |
5178 | |
5179 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) |
5180 | lpfc_poll_rearm_timer(phba); |
5181 | } |
5182 | } |
5183 | |
5184 | /* |
5185 | * lpfc_is_command_vm_io - get the UUID from blk cgroup |
5186 | * @cmd: Pointer to scsi_cmnd data structure |
5187 | * Returns UUID if present, otherwise NULL |
5188 | */ |
5189 | static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd) |
5190 | { |
5191 | struct bio *bio = scsi_cmd_to_rq(scmd: cmd)->bio; |
5192 | |
5193 | if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio) |
5194 | return NULL; |
5195 | return blkcg_get_fc_appid(bio); |
5196 | } |
5197 | |
5198 | /** |
5199 | * lpfc_queuecommand - scsi_host_template queuecommand entry point |
5200 | * @shost: kernel scsi host pointer. |
5201 | * @cmnd: Pointer to scsi_cmnd data structure. |
5202 | * |
5203 | * Driver registers this routine to scsi midlayer to submit a @cmd to process. |
5204 | * This routine prepares an IOCB from scsi command and provides to firmware. |
5205 | * The @done callback is invoked after driver finished processing the command. |
5206 | * |
5207 | * Return value : |
5208 | * 0 - Success |
5209 | * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. |
5210 | **/ |
5211 | static int |
5212 | lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) |
5213 | { |
5214 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
5215 | struct lpfc_hba *phba = vport->phba; |
5216 | struct lpfc_iocbq *cur_iocbq = NULL; |
5217 | struct lpfc_rport_data *rdata; |
5218 | struct lpfc_nodelist *ndlp; |
5219 | struct lpfc_io_buf *lpfc_cmd; |
5220 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); |
5221 | int err, idx; |
5222 | u8 *uuid = NULL; |
5223 | uint64_t start; |
5224 | |
5225 | start = ktime_get_ns(); |
5226 | rdata = lpfc_rport_data_from_scsi_device(sdev: cmnd->device); |
5227 | |
5228 | /* sanity check on references */ |
5229 | if (unlikely(!rdata) || unlikely(!rport)) |
5230 | goto out_fail_command; |
5231 | |
5232 | err = fc_remote_port_chkready(rport); |
5233 | if (err) { |
5234 | cmnd->result = err; |
5235 | goto out_fail_command; |
5236 | } |
5237 | ndlp = rdata->pnode; |
5238 | |
5239 | if ((scsi_get_prot_op(scmd: cmnd) != SCSI_PROT_NORMAL) && |
5240 | (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { |
5241 | |
5242 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
5243 | "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" |
5244 | " op:%02x str=%s without registering for" |
5245 | " BlockGuard - Rejecting command\n" , |
5246 | cmnd->cmnd[0], scsi_get_prot_op(cmnd), |
5247 | dif_op_str[scsi_get_prot_op(cmnd)]); |
5248 | goto out_fail_command; |
5249 | } |
5250 | |
5251 | /* |
5252 | * Catch race where our node has transitioned, but the |
5253 | * transport is still transitioning. |
5254 | */ |
5255 | if (!ndlp) |
5256 | goto out_tgt_busy1; |
5257 | |
5258 | /* Check if IO qualifies for CMF */ |
5259 | if (phba->cmf_active_mode != LPFC_CFG_OFF && |
5260 | cmnd->sc_data_direction == DMA_FROM_DEVICE && |
5261 | (scsi_sg_count(cmd: cmnd))) { |
5262 | /* Latency start time saved in rx_cmd_start later in routine */ |
5263 | err = lpfc_update_cmf_cmd(phba, size: scsi_bufflen(cmd: cmnd)); |
5264 | if (err) |
5265 | goto out_tgt_busy1; |
5266 | } |
5267 | |
5268 | if (lpfc_ndlp_check_qdepth(phba, ndlp)) { |
5269 | if (atomic_read(v: &ndlp->cmd_pending) >= ndlp->cmd_qdepth) { |
5270 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, |
5271 | "3377 Target Queue Full, scsi Id:%d " |
5272 | "Qdepth:%d Pending command:%d" |
5273 | " WWNN:%02x:%02x:%02x:%02x:" |
5274 | "%02x:%02x:%02x:%02x, " |
5275 | " WWPN:%02x:%02x:%02x:%02x:" |
5276 | "%02x:%02x:%02x:%02x" , |
5277 | ndlp->nlp_sid, ndlp->cmd_qdepth, |
5278 | atomic_read(&ndlp->cmd_pending), |
5279 | ndlp->nlp_nodename.u.wwn[0], |
5280 | ndlp->nlp_nodename.u.wwn[1], |
5281 | ndlp->nlp_nodename.u.wwn[2], |
5282 | ndlp->nlp_nodename.u.wwn[3], |
5283 | ndlp->nlp_nodename.u.wwn[4], |
5284 | ndlp->nlp_nodename.u.wwn[5], |
5285 | ndlp->nlp_nodename.u.wwn[6], |
5286 | ndlp->nlp_nodename.u.wwn[7], |
5287 | ndlp->nlp_portname.u.wwn[0], |
5288 | ndlp->nlp_portname.u.wwn[1], |
5289 | ndlp->nlp_portname.u.wwn[2], |
5290 | ndlp->nlp_portname.u.wwn[3], |
5291 | ndlp->nlp_portname.u.wwn[4], |
5292 | ndlp->nlp_portname.u.wwn[5], |
5293 | ndlp->nlp_portname.u.wwn[6], |
5294 | ndlp->nlp_portname.u.wwn[7]); |
5295 | goto out_tgt_busy2; |
5296 | } |
5297 | } |
5298 | |
5299 | lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd); |
5300 | if (lpfc_cmd == NULL) { |
5301 | lpfc_rampdown_queue_depth(phba); |
5302 | |
5303 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, |
5304 | "0707 driver's buffer pool is empty, " |
5305 | "IO busied\n" ); |
5306 | goto out_host_busy; |
5307 | } |
5308 | lpfc_cmd->rx_cmd_start = start; |
5309 | |
5310 | cur_iocbq = &lpfc_cmd->cur_iocbq; |
5311 | /* |
5312 | * Store the midlayer's command structure for the completion phase |
5313 | * and complete the command initialization. |
5314 | */ |
5315 | lpfc_cmd->pCmd = cmnd; |
5316 | lpfc_cmd->rdata = rdata; |
5317 | lpfc_cmd->ndlp = ndlp; |
5318 | cur_iocbq->cmd_cmpl = NULL; |
5319 | cmnd->host_scribble = (unsigned char *)lpfc_cmd; |
5320 | |
5321 | err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode: ndlp); |
5322 | if (err) |
5323 | goto out_host_busy_release_buf; |
5324 | |
5325 | if (scsi_get_prot_op(scmd: cmnd) != SCSI_PROT_NORMAL) { |
5326 | if (vport->phba->cfg_enable_bg) { |
5327 | lpfc_printf_vlog(vport, |
5328 | KERN_INFO, LOG_SCSI_CMD, |
5329 | "9033 BLKGRD: rcvd %s cmd:x%x " |
5330 | "reftag x%x cnt %u pt %x\n" , |
5331 | dif_op_str[scsi_get_prot_op(cmnd)], |
5332 | cmnd->cmnd[0], |
5333 | scsi_prot_ref_tag(cmnd), |
5334 | scsi_logical_block_count(cmnd), |
5335 | (cmnd->cmnd[1]>>5)); |
5336 | } |
5337 | err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); |
5338 | } else { |
5339 | if (vport->phba->cfg_enable_bg) { |
5340 | lpfc_printf_vlog(vport, |
5341 | KERN_INFO, LOG_SCSI_CMD, |
5342 | "9038 BLKGRD: rcvd PROT_NORMAL cmd: " |
5343 | "x%x reftag x%x cnt %u pt %x\n" , |
5344 | cmnd->cmnd[0], |
5345 | scsi_prot_ref_tag(cmnd), |
5346 | scsi_logical_block_count(cmnd), |
5347 | (cmnd->cmnd[1]>>5)); |
5348 | } |
5349 | err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); |
5350 | } |
5351 | |
5352 | if (unlikely(err)) { |
5353 | if (err == 2) { |
5354 | cmnd->result = DID_ERROR << 16; |
5355 | goto out_fail_command_release_buf; |
5356 | } |
5357 | goto out_host_busy_free_buf; |
5358 | } |
5359 | |
5360 | /* check the necessary and sufficient condition to support VMID */ |
5361 | if (lpfc_is_vmid_enabled(phba) && |
5362 | (ndlp->vmid_support || |
5363 | phba->pport->vmid_priority_tagging == |
5364 | LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { |
5365 | /* is the I/O generated by a VM, get the associated virtual */ |
5366 | /* entity id */ |
5367 | uuid = lpfc_is_command_vm_io(cmd: cmnd); |
5368 | |
5369 | if (uuid) { |
5370 | err = lpfc_vmid_get_appid(vport, uuid, |
5371 | iodir: cmnd->sc_data_direction, |
5372 | tag: (union lpfc_vmid_io_tag *) |
5373 | &cur_iocbq->vmid_tag); |
5374 | if (!err) |
5375 | cur_iocbq->cmd_flag |= LPFC_IO_VMID; |
5376 | } |
5377 | } |
5378 | |
5379 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
5380 | if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) |
5381 | this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); |
5382 | #endif |
5383 | /* Issue I/O to adapter */ |
5384 | err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, piocb: cur_iocbq, |
5385 | SLI_IOCB_RET_IOCB); |
5386 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
5387 | if (start) { |
5388 | lpfc_cmd->ts_cmd_start = start; |
5389 | lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd; |
5390 | lpfc_cmd->ts_cmd_wqput = ktime_get_ns(); |
5391 | } else { |
5392 | lpfc_cmd->ts_cmd_start = 0; |
5393 | } |
5394 | #endif |
5395 | if (err) { |
5396 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5397 | "3376 FCP could not issue iocb err %x " |
5398 | "FCP cmd x%x <%d/%llu> " |
5399 | "sid: x%x did: x%x oxid: x%x " |
5400 | "Data: x%x x%x x%x x%x\n" , |
5401 | err, cmnd->cmnd[0], |
5402 | cmnd->device ? cmnd->device->id : 0xffff, |
5403 | cmnd->device ? cmnd->device->lun : (u64)-1, |
5404 | vport->fc_myDID, ndlp->nlp_DID, |
5405 | phba->sli_rev == LPFC_SLI_REV4 ? |
5406 | cur_iocbq->sli4_xritag : 0xffff, |
5407 | phba->sli_rev == LPFC_SLI_REV4 ? |
5408 | phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] : |
5409 | cur_iocbq->iocb.ulpContext, |
5410 | cur_iocbq->iotag, |
5411 | phba->sli_rev == LPFC_SLI_REV4 ? |
5412 | bf_get(wqe_tmo, |
5413 | &cur_iocbq->wqe.generic.wqe_com) : |
5414 | cur_iocbq->iocb.ulpTimeout, |
5415 | (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000)); |
5416 | |
5417 | goto out_host_busy_free_buf; |
5418 | } |
5419 | |
5420 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { |
5421 | lpfc_sli_handle_fast_ring_event(phba, |
5422 | &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); |
5423 | |
5424 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) |
5425 | lpfc_poll_rearm_timer(phba); |
5426 | } |
5427 | |
5428 | if (phba->cfg_xri_rebalancing) |
5429 | lpfc_keep_pvt_pool_above_lowwm(phba, hwqid: lpfc_cmd->hdwq_no); |
5430 | |
5431 | return 0; |
5432 | |
5433 | out_host_busy_free_buf: |
5434 | idx = lpfc_cmd->hdwq_no; |
5435 | lpfc_scsi_unprep_dma_buf(phba, psb: lpfc_cmd); |
5436 | if (phba->sli4_hba.hdwq) { |
5437 | switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { |
5438 | case WRITE_DATA: |
5439 | phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--; |
5440 | break; |
5441 | case READ_DATA: |
5442 | phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--; |
5443 | break; |
5444 | default: |
5445 | phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--; |
5446 | } |
5447 | } |
5448 | out_host_busy_release_buf: |
5449 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
5450 | out_host_busy: |
5451 | lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, size: scsi_bufflen(cmd: cmnd), |
5452 | shost); |
5453 | return SCSI_MLQUEUE_HOST_BUSY; |
5454 | |
5455 | out_tgt_busy2: |
5456 | lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, size: scsi_bufflen(cmd: cmnd), |
5457 | shost); |
5458 | out_tgt_busy1: |
5459 | return SCSI_MLQUEUE_TARGET_BUSY; |
5460 | |
5461 | out_fail_command_release_buf: |
5462 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
5463 | lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, size: scsi_bufflen(cmd: cmnd), |
5464 | shost); |
5465 | |
5466 | out_fail_command: |
5467 | scsi_done(cmd: cmnd); |
5468 | return 0; |
5469 | } |
5470 | |
5471 | /* |
5472 | * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport |
5473 | * @vport: The virtual port for which this call is being executed. |
5474 | */ |
5475 | void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport) |
5476 | { |
5477 | u32 bucket; |
5478 | struct lpfc_vmid *cur; |
5479 | |
5480 | if (vport->port_type == LPFC_PHYSICAL_PORT) |
5481 | del_timer_sync(timer: &vport->phba->inactive_vmid_poll); |
5482 | |
5483 | kfree(objp: vport->qfpa_res); |
5484 | kfree(objp: vport->vmid_priority.vmid_range); |
5485 | kfree(objp: vport->vmid); |
5486 | |
5487 | if (!hash_empty(vport->hash_table)) |
5488 | hash_for_each(vport->hash_table, bucket, cur, hnode) |
5489 | hash_del(node: &cur->hnode); |
5490 | |
5491 | vport->qfpa_res = NULL; |
5492 | vport->vmid_priority.vmid_range = NULL; |
5493 | vport->vmid = NULL; |
5494 | vport->cur_vmid_cnt = 0; |
5495 | } |
5496 | |
5497 | /** |
5498 | * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point |
5499 | * @cmnd: Pointer to scsi_cmnd data structure. |
5500 | * |
5501 | * This routine aborts @cmnd pending in base driver. |
5502 | * |
5503 | * Return code : |
5504 | * 0x2003 - Error |
5505 | * 0x2002 - Success |
5506 | **/ |
5507 | static int |
5508 | lpfc_abort_handler(struct scsi_cmnd *cmnd) |
5509 | { |
5510 | struct Scsi_Host *shost = cmnd->device->host; |
5511 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); |
5512 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
5513 | struct lpfc_hba *phba = vport->phba; |
5514 | struct lpfc_iocbq *iocb; |
5515 | struct lpfc_io_buf *lpfc_cmd; |
5516 | int ret = SUCCESS, status = 0; |
5517 | struct lpfc_sli_ring *pring_s4 = NULL; |
5518 | struct lpfc_sli_ring *pring = NULL; |
5519 | int ret_val; |
5520 | unsigned long flags; |
5521 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); |
5522 | |
5523 | status = fc_block_rport(rport); |
5524 | if (status != 0 && status != SUCCESS) |
5525 | return status; |
5526 | |
5527 | lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble; |
5528 | if (!lpfc_cmd) |
5529 | return ret; |
5530 | |
5531 | /* Guard against IO completion being called at same time */ |
5532 | spin_lock_irqsave(&lpfc_cmd->buf_lock, flags); |
5533 | |
5534 | spin_lock(lock: &phba->hbalock); |
5535 | /* driver queued commands are in process of being flushed */ |
5536 | if (phba->hba_flag & HBA_IOQ_FLUSH) { |
5537 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
5538 | "3168 SCSI Layer abort requested I/O has been " |
5539 | "flushed by LLD.\n" ); |
5540 | ret = FAILED; |
5541 | goto out_unlock_hba; |
5542 | } |
5543 | |
5544 | if (!lpfc_cmd->pCmd) { |
5545 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
5546 | "2873 SCSI Layer I/O Abort Request IO CMPL Status " |
5547 | "x%x ID %d LUN %llu\n" , |
5548 | SUCCESS, cmnd->device->id, cmnd->device->lun); |
5549 | goto out_unlock_hba; |
5550 | } |
5551 | |
5552 | iocb = &lpfc_cmd->cur_iocbq; |
5553 | if (phba->sli_rev == LPFC_SLI_REV4) { |
5554 | pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; |
5555 | if (!pring_s4) { |
5556 | ret = FAILED; |
5557 | goto out_unlock_hba; |
5558 | } |
5559 | spin_lock(lock: &pring_s4->ring_lock); |
5560 | } |
5561 | /* the command is in process of being cancelled */ |
5562 | if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { |
5563 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
5564 | "3169 SCSI Layer abort requested I/O has been " |
5565 | "cancelled by LLD.\n" ); |
5566 | ret = FAILED; |
5567 | goto out_unlock_ring; |
5568 | } |
5569 | /* |
5570 | * If pCmd field of the corresponding lpfc_io_buf structure |
5571 | * points to a different SCSI command, then the driver has |
5572 | * already completed this command, but the midlayer did not |
5573 | * see the completion before the eh fired. Just return SUCCESS. |
5574 | */ |
5575 | if (lpfc_cmd->pCmd != cmnd) { |
5576 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
5577 | "3170 SCSI Layer abort requested I/O has been " |
5578 | "completed by LLD.\n" ); |
5579 | goto out_unlock_ring; |
5580 | } |
5581 | |
5582 | WARN_ON(iocb->io_buf != lpfc_cmd); |
5583 | |
5584 | /* abort issued in recovery is still in progress */ |
5585 | if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) { |
5586 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
5587 | "3389 SCSI Layer I/O Abort Request is pending\n" ); |
5588 | if (phba->sli_rev == LPFC_SLI_REV4) |
5589 | spin_unlock(lock: &pring_s4->ring_lock); |
5590 | spin_unlock(lock: &phba->hbalock); |
5591 | spin_unlock_irqrestore(lock: &lpfc_cmd->buf_lock, flags); |
5592 | goto wait_for_cmpl; |
5593 | } |
5594 | |
5595 | lpfc_cmd->waitq = &waitq; |
5596 | if (phba->sli_rev == LPFC_SLI_REV4) { |
5597 | spin_unlock(lock: &pring_s4->ring_lock); |
5598 | ret_val = lpfc_sli4_issue_abort_iotag(phba, cmdiocb: iocb, |
5599 | cmpl: lpfc_sli_abort_fcp_cmpl); |
5600 | } else { |
5601 | pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; |
5602 | ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, |
5603 | lpfc_sli_abort_fcp_cmpl); |
5604 | } |
5605 | |
5606 | /* Make sure HBA is alive */ |
5607 | lpfc_issue_hb_tmo(phba); |
5608 | |
5609 | if (ret_val != IOCB_SUCCESS) { |
5610 | /* Indicate the IO is not being aborted by the driver. */ |
5611 | lpfc_cmd->waitq = NULL; |
5612 | ret = FAILED; |
5613 | goto out_unlock_hba; |
5614 | } |
5615 | |
5616 | /* no longer need the lock after this point */ |
5617 | spin_unlock(lock: &phba->hbalock); |
5618 | spin_unlock_irqrestore(lock: &lpfc_cmd->buf_lock, flags); |
5619 | |
5620 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) |
5621 | lpfc_sli_handle_fast_ring_event(phba, |
5622 | &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); |
5623 | |
5624 | wait_for_cmpl: |
5625 | /* |
5626 | * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait |
5627 | * for abort to complete. |
5628 | */ |
5629 | wait_event_timeout(waitq, |
5630 | (lpfc_cmd->pCmd != cmnd), |
5631 | msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); |
5632 | |
5633 | spin_lock(lock: &lpfc_cmd->buf_lock); |
5634 | |
5635 | if (lpfc_cmd->pCmd == cmnd) { |
5636 | ret = FAILED; |
5637 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
5638 | "0748 abort handler timed out waiting " |
5639 | "for aborting I/O (xri:x%x) to complete: " |
5640 | "ret %#x, ID %d, LUN %llu\n" , |
5641 | iocb->sli4_xritag, ret, |
5642 | cmnd->device->id, cmnd->device->lun); |
5643 | } |
5644 | |
5645 | lpfc_cmd->waitq = NULL; |
5646 | |
5647 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
5648 | goto out; |
5649 | |
5650 | out_unlock_ring: |
5651 | if (phba->sli_rev == LPFC_SLI_REV4) |
5652 | spin_unlock(lock: &pring_s4->ring_lock); |
5653 | out_unlock_hba: |
5654 | spin_unlock(lock: &phba->hbalock); |
5655 | spin_unlock_irqrestore(lock: &lpfc_cmd->buf_lock, flags); |
5656 | out: |
5657 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
5658 | "0749 SCSI Layer I/O Abort Request Status x%x ID %d " |
5659 | "LUN %llu\n" , ret, cmnd->device->id, |
5660 | cmnd->device->lun); |
5661 | return ret; |
5662 | } |
5663 | |
5664 | static char * |
5665 | lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) |
5666 | { |
5667 | switch (task_mgmt_cmd) { |
5668 | case FCP_ABORT_TASK_SET: |
5669 | return "ABORT_TASK_SET" ; |
5670 | case FCP_CLEAR_TASK_SET: |
5671 | return "FCP_CLEAR_TASK_SET" ; |
5672 | case FCP_BUS_RESET: |
5673 | return "FCP_BUS_RESET" ; |
5674 | case FCP_LUN_RESET: |
5675 | return "FCP_LUN_RESET" ; |
5676 | case FCP_TARGET_RESET: |
5677 | return "FCP_TARGET_RESET" ; |
5678 | case FCP_CLEAR_ACA: |
5679 | return "FCP_CLEAR_ACA" ; |
5680 | case FCP_TERMINATE_TASK: |
5681 | return "FCP_TERMINATE_TASK" ; |
5682 | default: |
5683 | return "unknown" ; |
5684 | } |
5685 | } |
5686 | |
5687 | |
5688 | /** |
5689 | * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed |
5690 | * @vport: The virtual port for which this call is being executed. |
5691 | * @lpfc_cmd: Pointer to lpfc_io_buf data structure. |
5692 | * |
5693 | * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded |
5694 | * |
5695 | * Return code : |
5696 | * 0x2003 - Error |
5697 | * 0x2002 - Success |
5698 | **/ |
5699 | static int |
5700 | lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) |
5701 | { |
5702 | struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; |
5703 | uint32_t rsp_info; |
5704 | uint32_t rsp_len; |
5705 | uint8_t rsp_info_code; |
5706 | int ret = FAILED; |
5707 | |
5708 | |
5709 | if (fcprsp == NULL) |
5710 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5711 | "0703 fcp_rsp is missing\n" ); |
5712 | else { |
5713 | rsp_info = fcprsp->rspStatus2; |
5714 | rsp_len = be32_to_cpu(fcprsp->rspRspLen); |
5715 | rsp_info_code = fcprsp->rspInfo3; |
5716 | |
5717 | |
5718 | lpfc_printf_vlog(vport, KERN_INFO, |
5719 | LOG_FCP, |
5720 | "0706 fcp_rsp valid 0x%x," |
5721 | " rsp len=%d code 0x%x\n" , |
5722 | rsp_info, |
5723 | rsp_len, rsp_info_code); |
5724 | |
5725 | /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN |
5726 | * field specifies the number of valid bytes of FCP_RSP_INFO. |
5727 | * The FCP_RSP_LEN field shall be set to 0x04 or 0x08 |
5728 | */ |
5729 | if ((fcprsp->rspStatus2 & RSP_LEN_VALID) && |
5730 | ((rsp_len == 8) || (rsp_len == 4))) { |
5731 | switch (rsp_info_code) { |
5732 | case RSP_NO_FAILURE: |
5733 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5734 | "0715 Task Mgmt No Failure\n" ); |
5735 | ret = SUCCESS; |
5736 | break; |
5737 | case RSP_TM_NOT_SUPPORTED: /* TM rejected */ |
5738 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5739 | "0716 Task Mgmt Target " |
5740 | "reject\n" ); |
5741 | break; |
5742 | case RSP_TM_NOT_COMPLETED: /* TM failed */ |
5743 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5744 | "0717 Task Mgmt Target " |
5745 | "failed TM\n" ); |
5746 | break; |
5747 | case RSP_TM_INVALID_LU: /* TM to invalid LU! */ |
5748 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5749 | "0718 Task Mgmt to invalid " |
5750 | "LUN\n" ); |
5751 | break; |
5752 | } |
5753 | } |
5754 | } |
5755 | return ret; |
5756 | } |
5757 | |
5758 | |
5759 | /** |
5760 | * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler |
5761 | * @vport: The virtual port for which this call is being executed. |
5762 | * @rport: Pointer to remote port |
5763 | * @tgt_id: Target ID of remote device. |
5764 | * @lun_id: Lun number for the TMF |
5765 | * @task_mgmt_cmd: type of TMF to send |
5766 | * |
5767 | * This routine builds and sends a TMF (SCSI Task Mgmt Function) to |
5768 | * a remote port. |
5769 | * |
5770 | * Return Code: |
5771 | * 0x2003 - Error |
5772 | * 0x2002 - Success. |
5773 | **/ |
5774 | static int |
5775 | lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport, |
5776 | unsigned int tgt_id, uint64_t lun_id, |
5777 | uint8_t task_mgmt_cmd) |
5778 | { |
5779 | struct lpfc_hba *phba = vport->phba; |
5780 | struct lpfc_io_buf *lpfc_cmd; |
5781 | struct lpfc_iocbq *iocbq; |
5782 | struct lpfc_iocbq *iocbqrsp; |
5783 | struct lpfc_rport_data *rdata; |
5784 | struct lpfc_nodelist *pnode; |
5785 | int ret; |
5786 | int status; |
5787 | |
5788 | rdata = rport->dd_data; |
5789 | if (!rdata || !rdata->pnode) |
5790 | return FAILED; |
5791 | pnode = rdata->pnode; |
5792 | |
5793 | lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp: rdata->pnode, NULL); |
5794 | if (lpfc_cmd == NULL) |
5795 | return FAILED; |
5796 | lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; |
5797 | lpfc_cmd->rdata = rdata; |
5798 | lpfc_cmd->pCmd = NULL; |
5799 | lpfc_cmd->ndlp = pnode; |
5800 | |
5801 | status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, |
5802 | task_mgmt_cmd); |
5803 | if (!status) { |
5804 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
5805 | return FAILED; |
5806 | } |
5807 | |
5808 | iocbq = &lpfc_cmd->cur_iocbq; |
5809 | iocbqrsp = lpfc_sli_get_iocbq(phba); |
5810 | if (iocbqrsp == NULL) { |
5811 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
5812 | return FAILED; |
5813 | } |
5814 | iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl; |
5815 | iocbq->vport = vport; |
5816 | |
5817 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5818 | "0702 Issue %s to TGT %d LUN %llu " |
5819 | "rpi x%x nlp_flag x%x Data: x%x x%x\n" , |
5820 | lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, |
5821 | pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, |
5822 | iocbq->cmd_flag); |
5823 | |
5824 | status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, |
5825 | iocbq, iocbqrsp, lpfc_cmd->timeout); |
5826 | if ((status != IOCB_SUCCESS) || |
5827 | (get_job_ulpstatus(phba, iocbq: iocbqrsp) != IOSTAT_SUCCESS)) { |
5828 | if (status != IOCB_SUCCESS || |
5829 | get_job_ulpstatus(phba, iocbq: iocbqrsp) != IOSTAT_FCP_RSP_ERROR) |
5830 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
5831 | "0727 TMF %s to TGT %d LUN %llu " |
5832 | "failed (%d, %d) cmd_flag x%x\n" , |
5833 | lpfc_taskmgmt_name(task_mgmt_cmd), |
5834 | tgt_id, lun_id, |
5835 | get_job_ulpstatus(phba, iocbqrsp), |
5836 | get_job_word4(phba, iocbqrsp), |
5837 | iocbq->cmd_flag); |
5838 | /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ |
5839 | if (status == IOCB_SUCCESS) { |
5840 | if (get_job_ulpstatus(phba, iocbq: iocbqrsp) == |
5841 | IOSTAT_FCP_RSP_ERROR) |
5842 | /* Something in the FCP_RSP was invalid. |
5843 | * Check conditions */ |
5844 | ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); |
5845 | else |
5846 | ret = FAILED; |
5847 | } else if ((status == IOCB_TIMEDOUT) || |
5848 | (status == IOCB_ABORTED)) { |
5849 | ret = TIMEOUT_ERROR; |
5850 | } else { |
5851 | ret = FAILED; |
5852 | } |
5853 | } else |
5854 | ret = SUCCESS; |
5855 | |
5856 | lpfc_sli_release_iocbq(phba, iocbqrsp); |
5857 | |
5858 | if (status != IOCB_TIMEDOUT) |
5859 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
5860 | |
5861 | return ret; |
5862 | } |
5863 | |
5864 | /** |
5865 | * lpfc_chk_tgt_mapped - |
5866 | * @vport: The virtual port to check on |
5867 | * @rport: Pointer to fc_rport data structure. |
5868 | * |
5869 | * This routine delays until the scsi target (aka rport) for the |
5870 | * command exists (is present and logged in) or we declare it non-existent. |
5871 | * |
5872 | * Return code : |
5873 | * 0x2003 - Error |
5874 | * 0x2002 - Success |
5875 | **/ |
5876 | static int |
5877 | lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport) |
5878 | { |
5879 | struct lpfc_rport_data *rdata; |
5880 | struct lpfc_nodelist *pnode = NULL; |
5881 | unsigned long later; |
5882 | |
5883 | rdata = rport->dd_data; |
5884 | if (!rdata) { |
5885 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5886 | "0797 Tgt Map rport failure: rdata x%px\n" , rdata); |
5887 | return FAILED; |
5888 | } |
5889 | pnode = rdata->pnode; |
5890 | |
5891 | /* |
5892 | * If target is not in a MAPPED state, delay until |
5893 | * target is rediscovered or devloss timeout expires. |
5894 | */ |
5895 | later = msecs_to_jiffies(m: 2 * vport->cfg_devloss_tmo * 1000) + jiffies; |
5896 | while (time_after(later, jiffies)) { |
5897 | if (!pnode) |
5898 | return FAILED; |
5899 | if (pnode->nlp_state == NLP_STE_MAPPED_NODE) |
5900 | return SUCCESS; |
5901 | schedule_timeout_uninterruptible(timeout: msecs_to_jiffies(m: 500)); |
5902 | rdata = rport->dd_data; |
5903 | if (!rdata) |
5904 | return FAILED; |
5905 | pnode = rdata->pnode; |
5906 | } |
5907 | if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) |
5908 | return FAILED; |
5909 | return SUCCESS; |
5910 | } |
5911 | |
5912 | /** |
5913 | * lpfc_reset_flush_io_context - |
5914 | * @vport: The virtual port (scsi_host) for the flush context |
5915 | * @tgt_id: If aborting by Target contect - specifies the target id |
5916 | * @lun_id: If aborting by Lun context - specifies the lun id |
5917 | * @context: specifies the context level to flush at. |
5918 | * |
5919 | * After a reset condition via TMF, we need to flush orphaned i/o |
5920 | * contexts from the adapter. This routine aborts any contexts |
5921 | * outstanding, then waits for their completions. The wait is |
5922 | * bounded by devloss_tmo though. |
5923 | * |
5924 | * Return code : |
5925 | * 0x2003 - Error |
5926 | * 0x2002 - Success |
5927 | **/ |
5928 | static int |
5929 | lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, |
5930 | uint64_t lun_id, lpfc_ctx_cmd context) |
5931 | { |
5932 | struct lpfc_hba *phba = vport->phba; |
5933 | unsigned long later; |
5934 | int cnt; |
5935 | |
5936 | cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); |
5937 | if (cnt) |
5938 | lpfc_sli_abort_taskmgmt(vport, |
5939 | &phba->sli.sli3_ring[LPFC_FCP_RING], |
5940 | tgt_id, lun_id, context); |
5941 | later = msecs_to_jiffies(m: 2 * vport->cfg_devloss_tmo * 1000) + jiffies; |
5942 | while (time_after(later, jiffies) && cnt) { |
5943 | schedule_timeout_uninterruptible(timeout: msecs_to_jiffies(m: 20)); |
5944 | cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); |
5945 | } |
5946 | if (cnt) { |
5947 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
5948 | "0724 I/O flush failure for context %s : cnt x%x\n" , |
5949 | ((context == LPFC_CTX_LUN) ? "LUN" : |
5950 | ((context == LPFC_CTX_TGT) ? "TGT" : |
5951 | ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown" ))), |
5952 | cnt); |
5953 | return FAILED; |
5954 | } |
5955 | return SUCCESS; |
5956 | } |
5957 | |
5958 | /** |
5959 | * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point |
5960 | * @cmnd: Pointer to scsi_cmnd data structure. |
5961 | * |
5962 | * This routine does a device reset by sending a LUN_RESET task management |
5963 | * command. |
5964 | * |
5965 | * Return code : |
5966 | * 0x2003 - Error |
5967 | * 0x2002 - Success |
5968 | **/ |
5969 | static int |
5970 | lpfc_device_reset_handler(struct scsi_cmnd *cmnd) |
5971 | { |
5972 | struct Scsi_Host *shost = cmnd->device->host; |
5973 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); |
5974 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
5975 | struct lpfc_rport_data *rdata; |
5976 | struct lpfc_nodelist *pnode; |
5977 | unsigned tgt_id = cmnd->device->id; |
5978 | uint64_t lun_id = cmnd->device->lun; |
5979 | struct lpfc_scsi_event_header scsi_event; |
5980 | int status; |
5981 | u32 logit = LOG_FCP; |
5982 | |
5983 | if (!rport) |
5984 | return FAILED; |
5985 | |
5986 | rdata = rport->dd_data; |
5987 | if (!rdata || !rdata->pnode) { |
5988 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
5989 | "0798 Device Reset rdata failure: rdata x%px\n" , |
5990 | rdata); |
5991 | return FAILED; |
5992 | } |
5993 | pnode = rdata->pnode; |
5994 | status = fc_block_rport(rport); |
5995 | if (status != 0 && status != SUCCESS) |
5996 | return status; |
5997 | |
5998 | status = lpfc_chk_tgt_mapped(vport, rport); |
5999 | if (status == FAILED) { |
6000 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
6001 | "0721 Device Reset rport failure: rdata x%px\n" , rdata); |
6002 | return FAILED; |
6003 | } |
6004 | |
6005 | scsi_event.event_type = FC_REG_SCSI_EVENT; |
6006 | scsi_event.subcategory = LPFC_EVENT_LUNRESET; |
6007 | scsi_event.lun = lun_id; |
6008 | memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); |
6009 | memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); |
6010 | |
6011 | fc_host_post_vendor_event(shost, event_number: fc_get_event_number(), |
6012 | data_len: sizeof(scsi_event), data_buf: (char *)&scsi_event, LPFC_NL_VENDOR_ID); |
6013 | |
6014 | status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id, |
6015 | FCP_LUN_RESET); |
6016 | if (status != SUCCESS) |
6017 | logit = LOG_TRACE_EVENT; |
6018 | |
6019 | lpfc_printf_vlog(vport, KERN_ERR, logit, |
6020 | "0713 SCSI layer issued Device Reset (%d, %llu) " |
6021 | "return x%x\n" , tgt_id, lun_id, status); |
6022 | |
6023 | /* |
6024 | * We have to clean up i/o as : they may be orphaned by the TMF; |
6025 | * or if the TMF failed, they may be in an indeterminate state. |
6026 | * So, continue on. |
6027 | * We will report success if all the i/o aborts successfully. |
6028 | */ |
6029 | if (status == SUCCESS) |
6030 | status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, |
6031 | context: LPFC_CTX_LUN); |
6032 | |
6033 | return status; |
6034 | } |
6035 | |
6036 | /** |
6037 | * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point |
6038 | * @cmnd: Pointer to scsi_cmnd data structure. |
6039 | * |
6040 | * This routine does a target reset by sending a TARGET_RESET task management |
6041 | * command. |
6042 | * |
6043 | * Return code : |
6044 | * 0x2003 - Error |
6045 | * 0x2002 - Success |
6046 | **/ |
6047 | static int |
6048 | lpfc_target_reset_handler(struct scsi_cmnd *cmnd) |
6049 | { |
6050 | struct Scsi_Host *shost = cmnd->device->host; |
6051 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); |
6052 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
6053 | struct lpfc_rport_data *rdata; |
6054 | struct lpfc_nodelist *pnode; |
6055 | unsigned tgt_id = cmnd->device->id; |
6056 | uint64_t lun_id = cmnd->device->lun; |
6057 | struct lpfc_scsi_event_header scsi_event; |
6058 | int status; |
6059 | u32 logit = LOG_FCP; |
6060 | u32 dev_loss_tmo = vport->cfg_devloss_tmo; |
6061 | unsigned long flags; |
6062 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); |
6063 | |
6064 | if (!rport) |
6065 | return FAILED; |
6066 | |
6067 | rdata = rport->dd_data; |
6068 | if (!rdata || !rdata->pnode) { |
6069 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
6070 | "0799 Target Reset rdata failure: rdata x%px\n" , |
6071 | rdata); |
6072 | return FAILED; |
6073 | } |
6074 | pnode = rdata->pnode; |
6075 | status = fc_block_rport(rport); |
6076 | if (status != 0 && status != SUCCESS) |
6077 | return status; |
6078 | |
6079 | status = lpfc_chk_tgt_mapped(vport, rport); |
6080 | if (status == FAILED) { |
6081 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
6082 | "0722 Target Reset rport failure: rdata x%px\n" , rdata); |
6083 | if (pnode) { |
6084 | spin_lock_irqsave(&pnode->lock, flags); |
6085 | pnode->nlp_flag &= ~NLP_NPR_ADISC; |
6086 | pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; |
6087 | spin_unlock_irqrestore(lock: &pnode->lock, flags); |
6088 | } |
6089 | lpfc_reset_flush_io_context(vport, tgt_id, lun_id, |
6090 | context: LPFC_CTX_TGT); |
6091 | return FAST_IO_FAIL; |
6092 | } |
6093 | |
6094 | scsi_event.event_type = FC_REG_SCSI_EVENT; |
6095 | scsi_event.subcategory = LPFC_EVENT_TGTRESET; |
6096 | scsi_event.lun = 0; |
6097 | memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); |
6098 | memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); |
6099 | |
6100 | fc_host_post_vendor_event(shost, event_number: fc_get_event_number(), |
6101 | data_len: sizeof(scsi_event), data_buf: (char *)&scsi_event, LPFC_NL_VENDOR_ID); |
6102 | |
6103 | status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id, |
6104 | FCP_TARGET_RESET); |
6105 | if (status != SUCCESS) { |
6106 | logit = LOG_TRACE_EVENT; |
6107 | |
6108 | /* Issue LOGO, if no LOGO is outstanding */ |
6109 | spin_lock_irqsave(&pnode->lock, flags); |
6110 | if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) && |
6111 | !pnode->logo_waitq) { |
6112 | pnode->logo_waitq = &waitq; |
6113 | pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; |
6114 | pnode->nlp_flag |= NLP_ISSUE_LOGO; |
6115 | pnode->save_flags |= NLP_WAIT_FOR_LOGO; |
6116 | spin_unlock_irqrestore(lock: &pnode->lock, flags); |
6117 | lpfc_unreg_rpi(vport, pnode); |
6118 | wait_event_timeout(waitq, |
6119 | (!(pnode->save_flags & |
6120 | NLP_WAIT_FOR_LOGO)), |
6121 | msecs_to_jiffies(dev_loss_tmo * |
6122 | 1000)); |
6123 | |
6124 | if (pnode->save_flags & NLP_WAIT_FOR_LOGO) { |
6125 | lpfc_printf_vlog(vport, KERN_ERR, logit, |
6126 | "0725 SCSI layer TGTRST " |
6127 | "failed & LOGO TMO (%d, %llu) " |
6128 | "return x%x\n" , |
6129 | tgt_id, lun_id, status); |
6130 | spin_lock_irqsave(&pnode->lock, flags); |
6131 | pnode->save_flags &= ~NLP_WAIT_FOR_LOGO; |
6132 | } else { |
6133 | spin_lock_irqsave(&pnode->lock, flags); |
6134 | } |
6135 | pnode->logo_waitq = NULL; |
6136 | spin_unlock_irqrestore(lock: &pnode->lock, flags); |
6137 | status = SUCCESS; |
6138 | |
6139 | } else { |
6140 | spin_unlock_irqrestore(lock: &pnode->lock, flags); |
6141 | status = FAILED; |
6142 | } |
6143 | } |
6144 | |
6145 | lpfc_printf_vlog(vport, KERN_ERR, logit, |
6146 | "0723 SCSI layer issued Target Reset (%d, %llu) " |
6147 | "return x%x\n" , tgt_id, lun_id, status); |
6148 | |
6149 | /* |
6150 | * We have to clean up i/o as : they may be orphaned by the TMF; |
6151 | * or if the TMF failed, they may be in an indeterminate state. |
6152 | * So, continue on. |
6153 | * We will report success if all the i/o aborts successfully. |
6154 | */ |
6155 | if (status == SUCCESS) |
6156 | status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, |
6157 | context: LPFC_CTX_TGT); |
6158 | return status; |
6159 | } |
6160 | |
6161 | /** |
6162 | * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt |
6163 | * @cmnd: Pointer to scsi_cmnd data structure. |
6164 | * |
6165 | * This routine does host reset to the adaptor port. It brings the HBA |
6166 | * offline, performs a board restart, and then brings the board back online. |
6167 | * The lpfc_offline calls lpfc_sli_hba_down which will abort and local |
6168 | * reject all outstanding SCSI commands to the host and error returned |
6169 | * back to SCSI mid-level. As this will be SCSI mid-level's last resort |
6170 | * of error handling, it will only return error if resetting of the adapter |
6171 | * is not successful; in all other cases, will return success. |
6172 | * |
6173 | * Return code : |
6174 | * 0x2003 - Error |
6175 | * 0x2002 - Success |
6176 | **/ |
6177 | static int |
6178 | lpfc_host_reset_handler(struct scsi_cmnd *cmnd) |
6179 | { |
6180 | struct Scsi_Host *shost = cmnd->device->host; |
6181 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
6182 | struct lpfc_hba *phba = vport->phba; |
6183 | int rc, ret = SUCCESS; |
6184 | |
6185 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
6186 | "3172 SCSI layer issued Host Reset Data:\n" ); |
6187 | |
6188 | lpfc_offline_prep(phba, LPFC_MBX_WAIT); |
6189 | lpfc_offline(phba); |
6190 | rc = lpfc_sli_brdrestart(phba); |
6191 | if (rc) |
6192 | goto error; |
6193 | |
6194 | /* Wait for successful restart of adapter */ |
6195 | if (phba->sli_rev < LPFC_SLI_REV4) { |
6196 | rc = lpfc_sli_chipset_init(phba); |
6197 | if (rc) |
6198 | goto error; |
6199 | } |
6200 | |
6201 | rc = lpfc_online(phba); |
6202 | if (rc) |
6203 | goto error; |
6204 | |
6205 | lpfc_unblock_mgmt_io(phba); |
6206 | |
6207 | return ret; |
6208 | error: |
6209 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
6210 | "3323 Failed host reset\n" ); |
6211 | lpfc_unblock_mgmt_io(phba); |
6212 | return FAILED; |
6213 | } |
6214 | |
6215 | /** |
6216 | * lpfc_slave_alloc - scsi_host_template slave_alloc entry point |
6217 | * @sdev: Pointer to scsi_device. |
6218 | * |
6219 | * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's |
6220 | * globally available list of scsi buffers. This routine also makes sure scsi |
6221 | * buffer is not allocated more than HBA limit conveyed to midlayer. This list |
6222 | * of scsi buffer exists for the lifetime of the driver. |
6223 | * |
6224 | * Return codes: |
6225 | * non-0 - Error |
6226 | * 0 - Success |
6227 | **/ |
6228 | static int |
6229 | lpfc_slave_alloc(struct scsi_device *sdev) |
6230 | { |
6231 | struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; |
6232 | struct lpfc_hba *phba = vport->phba; |
6233 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); |
6234 | uint32_t total = 0; |
6235 | uint32_t num_to_alloc = 0; |
6236 | int num_allocated = 0; |
6237 | uint32_t sdev_cnt; |
6238 | struct lpfc_device_data *device_data; |
6239 | unsigned long flags; |
6240 | struct lpfc_name target_wwpn; |
6241 | |
6242 | if (!rport || fc_remote_port_chkready(rport)) |
6243 | return -ENXIO; |
6244 | |
6245 | if (phba->cfg_fof) { |
6246 | |
6247 | /* |
6248 | * Check to see if the device data structure for the lun |
6249 | * exists. If not, create one. |
6250 | */ |
6251 | |
6252 | u64_to_wwn(inm: rport->port_name, wwn: target_wwpn.u.wwn); |
6253 | spin_lock_irqsave(&phba->devicelock, flags); |
6254 | device_data = __lpfc_get_device_data(phba, |
6255 | list: &phba->luns, |
6256 | &vport->fc_portname, |
6257 | &target_wwpn, |
6258 | sdev->lun); |
6259 | if (!device_data) { |
6260 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6261 | device_data = lpfc_create_device_data(phba, |
6262 | &vport->fc_portname, |
6263 | &target_wwpn, |
6264 | sdev->lun, |
6265 | phba->cfg_XLanePriority, |
6266 | true); |
6267 | if (!device_data) |
6268 | return -ENOMEM; |
6269 | spin_lock_irqsave(&phba->devicelock, flags); |
6270 | list_add_tail(new: &device_data->listentry, head: &phba->luns); |
6271 | } |
6272 | device_data->rport_data = rport->dd_data; |
6273 | device_data->available = true; |
6274 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6275 | sdev->hostdata = device_data; |
6276 | } else { |
6277 | sdev->hostdata = rport->dd_data; |
6278 | } |
6279 | sdev_cnt = atomic_inc_return(v: &phba->sdev_cnt); |
6280 | |
6281 | /* For SLI4, all IO buffers are pre-allocated */ |
6282 | if (phba->sli_rev == LPFC_SLI_REV4) |
6283 | return 0; |
6284 | |
6285 | /* This code path is now ONLY for SLI3 adapters */ |
6286 | |
6287 | /* |
6288 | * Populate the cmds_per_lun count scsi_bufs into this host's globally |
6289 | * available list of scsi buffers. Don't allocate more than the |
6290 | * HBA limit conveyed to the midlayer via the host structure. The |
6291 | * formula accounts for the lun_queue_depth + error handlers + 1 |
6292 | * extra. This list of scsi bufs exists for the lifetime of the driver. |
6293 | */ |
6294 | total = phba->total_scsi_bufs; |
6295 | num_to_alloc = vport->cfg_lun_queue_depth + 2; |
6296 | |
6297 | /* If allocated buffers are enough do nothing */ |
6298 | if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total) |
6299 | return 0; |
6300 | |
6301 | /* Allow some exchanges to be available always to complete discovery */ |
6302 | if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { |
6303 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
6304 | "0704 At limitation of %d preallocated " |
6305 | "command buffers\n" , total); |
6306 | return 0; |
6307 | /* Allow some exchanges to be available always to complete discovery */ |
6308 | } else if (total + num_to_alloc > |
6309 | phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { |
6310 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
6311 | "0705 Allocation request of %d " |
6312 | "command buffers will exceed max of %d. " |
6313 | "Reducing allocation request to %d.\n" , |
6314 | num_to_alloc, phba->cfg_hba_queue_depth, |
6315 | (phba->cfg_hba_queue_depth - total)); |
6316 | num_to_alloc = phba->cfg_hba_queue_depth - total; |
6317 | } |
6318 | num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc); |
6319 | if (num_to_alloc != num_allocated) { |
6320 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
6321 | "0708 Allocation request of %d " |
6322 | "command buffers did not succeed. " |
6323 | "Allocated %d buffers.\n" , |
6324 | num_to_alloc, num_allocated); |
6325 | } |
6326 | if (num_allocated > 0) |
6327 | phba->total_scsi_bufs += num_allocated; |
6328 | return 0; |
6329 | } |
6330 | |
6331 | /** |
6332 | * lpfc_slave_configure - scsi_host_template slave_configure entry point |
6333 | * @sdev: Pointer to scsi_device. |
6334 | * |
6335 | * This routine configures following items |
6336 | * - Tag command queuing support for @sdev if supported. |
6337 | * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. |
6338 | * |
6339 | * Return codes: |
6340 | * 0 - Success |
6341 | **/ |
6342 | static int |
6343 | lpfc_slave_configure(struct scsi_device *sdev) |
6344 | { |
6345 | struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; |
6346 | struct lpfc_hba *phba = vport->phba; |
6347 | |
6348 | scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth); |
6349 | |
6350 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { |
6351 | lpfc_sli_handle_fast_ring_event(phba, |
6352 | &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); |
6353 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) |
6354 | lpfc_poll_rearm_timer(phba); |
6355 | } |
6356 | |
6357 | return 0; |
6358 | } |
6359 | |
6360 | /** |
6361 | * lpfc_slave_destroy - slave_destroy entry point of SHT data structure |
6362 | * @sdev: Pointer to scsi_device. |
6363 | * |
6364 | * This routine sets @sdev hostatdata filed to null. |
6365 | **/ |
6366 | static void |
6367 | lpfc_slave_destroy(struct scsi_device *sdev) |
6368 | { |
6369 | struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; |
6370 | struct lpfc_hba *phba = vport->phba; |
6371 | unsigned long flags; |
6372 | struct lpfc_device_data *device_data = sdev->hostdata; |
6373 | |
6374 | atomic_dec(v: &phba->sdev_cnt); |
6375 | if ((phba->cfg_fof) && (device_data)) { |
6376 | spin_lock_irqsave(&phba->devicelock, flags); |
6377 | device_data->available = false; |
6378 | if (!device_data->oas_enabled) |
6379 | lpfc_delete_device_data(phba, device_data); |
6380 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6381 | } |
6382 | sdev->hostdata = NULL; |
6383 | return; |
6384 | } |
6385 | |
6386 | /** |
6387 | * lpfc_create_device_data - creates and initializes device data structure for OAS |
6388 | * @phba: Pointer to host bus adapter structure. |
6389 | * @vport_wwpn: Pointer to vport's wwpn information |
6390 | * @target_wwpn: Pointer to target's wwpn information |
6391 | * @lun: Lun on target |
6392 | * @pri: Priority |
6393 | * @atomic_create: Flag to indicate if memory should be allocated using the |
6394 | * GFP_ATOMIC flag or not. |
6395 | * |
6396 | * This routine creates a device data structure which will contain identifying |
6397 | * information for the device (host wwpn, target wwpn, lun), state of OAS, |
6398 | * whether or not the corresponding lun is available by the system, |
6399 | * and pointer to the rport data. |
6400 | * |
6401 | * Return codes: |
6402 | * NULL - Error |
6403 | * Pointer to lpfc_device_data - Success |
6404 | **/ |
6405 | struct lpfc_device_data* |
6406 | lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, |
6407 | struct lpfc_name *target_wwpn, uint64_t lun, |
6408 | uint32_t pri, bool atomic_create) |
6409 | { |
6410 | |
6411 | struct lpfc_device_data *lun_info; |
6412 | int memory_flags; |
6413 | |
6414 | if (unlikely(!phba) || !vport_wwpn || !target_wwpn || |
6415 | !(phba->cfg_fof)) |
6416 | return NULL; |
6417 | |
6418 | /* Attempt to create the device data to contain lun info */ |
6419 | |
6420 | if (atomic_create) |
6421 | memory_flags = GFP_ATOMIC; |
6422 | else |
6423 | memory_flags = GFP_KERNEL; |
6424 | lun_info = mempool_alloc(pool: phba->device_data_mem_pool, gfp_mask: memory_flags); |
6425 | if (!lun_info) |
6426 | return NULL; |
6427 | INIT_LIST_HEAD(list: &lun_info->listentry); |
6428 | lun_info->rport_data = NULL; |
6429 | memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, |
6430 | sizeof(struct lpfc_name)); |
6431 | memcpy(&lun_info->device_id.target_wwpn, target_wwpn, |
6432 | sizeof(struct lpfc_name)); |
6433 | lun_info->device_id.lun = lun; |
6434 | lun_info->oas_enabled = false; |
6435 | lun_info->priority = pri; |
6436 | lun_info->available = false; |
6437 | return lun_info; |
6438 | } |
6439 | |
6440 | /** |
6441 | * lpfc_delete_device_data - frees a device data structure for OAS |
6442 | * @phba: Pointer to host bus adapter structure. |
6443 | * @lun_info: Pointer to device data structure to free. |
6444 | * |
6445 | * This routine frees the previously allocated device data structure passed. |
6446 | * |
6447 | **/ |
6448 | void |
6449 | lpfc_delete_device_data(struct lpfc_hba *phba, |
6450 | struct lpfc_device_data *lun_info) |
6451 | { |
6452 | |
6453 | if (unlikely(!phba) || !lun_info || |
6454 | !(phba->cfg_fof)) |
6455 | return; |
6456 | |
6457 | if (!list_empty(head: &lun_info->listentry)) |
6458 | list_del(entry: &lun_info->listentry); |
6459 | mempool_free(element: lun_info, pool: phba->device_data_mem_pool); |
6460 | return; |
6461 | } |
6462 | |
6463 | /** |
6464 | * __lpfc_get_device_data - returns the device data for the specified lun |
6465 | * @phba: Pointer to host bus adapter structure. |
6466 | * @list: Point to list to search. |
6467 | * @vport_wwpn: Pointer to vport's wwpn information |
6468 | * @target_wwpn: Pointer to target's wwpn information |
6469 | * @lun: Lun on target |
6470 | * |
6471 | * This routine searches the list passed for the specified lun's device data. |
6472 | * This function does not hold locks, it is the responsibility of the caller |
6473 | * to ensure the proper lock is held before calling the function. |
6474 | * |
6475 | * Return codes: |
6476 | * NULL - Error |
6477 | * Pointer to lpfc_device_data - Success |
6478 | **/ |
6479 | struct lpfc_device_data* |
6480 | __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, |
6481 | struct lpfc_name *vport_wwpn, |
6482 | struct lpfc_name *target_wwpn, uint64_t lun) |
6483 | { |
6484 | |
6485 | struct lpfc_device_data *lun_info; |
6486 | |
6487 | if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || |
6488 | !phba->cfg_fof) |
6489 | return NULL; |
6490 | |
6491 | /* Check to see if the lun is already enabled for OAS. */ |
6492 | |
6493 | list_for_each_entry(lun_info, list, listentry) { |
6494 | if ((memcmp(p: &lun_info->device_id.vport_wwpn, q: vport_wwpn, |
6495 | size: sizeof(struct lpfc_name)) == 0) && |
6496 | (memcmp(p: &lun_info->device_id.target_wwpn, q: target_wwpn, |
6497 | size: sizeof(struct lpfc_name)) == 0) && |
6498 | (lun_info->device_id.lun == lun)) |
6499 | return lun_info; |
6500 | } |
6501 | |
6502 | return NULL; |
6503 | } |
6504 | |
6505 | /** |
6506 | * lpfc_find_next_oas_lun - searches for the next oas lun |
6507 | * @phba: Pointer to host bus adapter structure. |
6508 | * @vport_wwpn: Pointer to vport's wwpn information |
6509 | * @target_wwpn: Pointer to target's wwpn information |
6510 | * @starting_lun: Pointer to the lun to start searching for |
6511 | * @found_vport_wwpn: Pointer to the found lun's vport wwpn information |
6512 | * @found_target_wwpn: Pointer to the found lun's target wwpn information |
6513 | * @found_lun: Pointer to the found lun. |
6514 | * @found_lun_status: Pointer to status of the found lun. |
6515 | * @found_lun_pri: Pointer to priority of the found lun. |
6516 | * |
6517 | * This routine searches the luns list for the specified lun |
6518 | * or the first lun for the vport/target. If the vport wwpn contains |
6519 | * a zero value then a specific vport is not specified. In this case |
6520 | * any vport which contains the lun will be considered a match. If the |
6521 | * target wwpn contains a zero value then a specific target is not specified. |
6522 | * In this case any target which contains the lun will be considered a |
6523 | * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status |
6524 | * are returned. The function will also return the next lun if available. |
6525 | * If the next lun is not found, starting_lun parameter will be set to |
6526 | * NO_MORE_OAS_LUN. |
6527 | * |
6528 | * Return codes: |
6529 | * non-0 - Error |
6530 | * 0 - Success |
6531 | **/ |
6532 | bool |
6533 | lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, |
6534 | struct lpfc_name *target_wwpn, uint64_t *starting_lun, |
6535 | struct lpfc_name *found_vport_wwpn, |
6536 | struct lpfc_name *found_target_wwpn, |
6537 | uint64_t *found_lun, |
6538 | uint32_t *found_lun_status, |
6539 | uint32_t *found_lun_pri) |
6540 | { |
6541 | |
6542 | unsigned long flags; |
6543 | struct lpfc_device_data *lun_info; |
6544 | struct lpfc_device_id *device_id; |
6545 | uint64_t lun; |
6546 | bool found = false; |
6547 | |
6548 | if (unlikely(!phba) || !vport_wwpn || !target_wwpn || |
6549 | !starting_lun || !found_vport_wwpn || |
6550 | !found_target_wwpn || !found_lun || !found_lun_status || |
6551 | (*starting_lun == NO_MORE_OAS_LUN) || |
6552 | !phba->cfg_fof) |
6553 | return false; |
6554 | |
6555 | lun = *starting_lun; |
6556 | *found_lun = NO_MORE_OAS_LUN; |
6557 | *starting_lun = NO_MORE_OAS_LUN; |
6558 | |
6559 | /* Search for lun or the lun closet in value */ |
6560 | |
6561 | spin_lock_irqsave(&phba->devicelock, flags); |
6562 | list_for_each_entry(lun_info, &phba->luns, listentry) { |
6563 | if (((wwn_to_u64(wwn: vport_wwpn->u.wwn) == 0) || |
6564 | (memcmp(p: &lun_info->device_id.vport_wwpn, q: vport_wwpn, |
6565 | size: sizeof(struct lpfc_name)) == 0)) && |
6566 | ((wwn_to_u64(wwn: target_wwpn->u.wwn) == 0) || |
6567 | (memcmp(p: &lun_info->device_id.target_wwpn, q: target_wwpn, |
6568 | size: sizeof(struct lpfc_name)) == 0)) && |
6569 | (lun_info->oas_enabled)) { |
6570 | device_id = &lun_info->device_id; |
6571 | if ((!found) && |
6572 | ((lun == FIND_FIRST_OAS_LUN) || |
6573 | (device_id->lun == lun))) { |
6574 | *found_lun = device_id->lun; |
6575 | memcpy(found_vport_wwpn, |
6576 | &device_id->vport_wwpn, |
6577 | sizeof(struct lpfc_name)); |
6578 | memcpy(found_target_wwpn, |
6579 | &device_id->target_wwpn, |
6580 | sizeof(struct lpfc_name)); |
6581 | if (lun_info->available) |
6582 | *found_lun_status = |
6583 | OAS_LUN_STATUS_EXISTS; |
6584 | else |
6585 | *found_lun_status = 0; |
6586 | *found_lun_pri = lun_info->priority; |
6587 | if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) |
6588 | memset(vport_wwpn, 0x0, |
6589 | sizeof(struct lpfc_name)); |
6590 | if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) |
6591 | memset(target_wwpn, 0x0, |
6592 | sizeof(struct lpfc_name)); |
6593 | found = true; |
6594 | } else if (found) { |
6595 | *starting_lun = device_id->lun; |
6596 | memcpy(vport_wwpn, &device_id->vport_wwpn, |
6597 | sizeof(struct lpfc_name)); |
6598 | memcpy(target_wwpn, &device_id->target_wwpn, |
6599 | sizeof(struct lpfc_name)); |
6600 | break; |
6601 | } |
6602 | } |
6603 | } |
6604 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6605 | return found; |
6606 | } |
6607 | |
6608 | /** |
6609 | * lpfc_enable_oas_lun - enables a lun for OAS operations |
6610 | * @phba: Pointer to host bus adapter structure. |
6611 | * @vport_wwpn: Pointer to vport's wwpn information |
6612 | * @target_wwpn: Pointer to target's wwpn information |
6613 | * @lun: Lun |
6614 | * @pri: Priority |
6615 | * |
6616 | * This routine enables a lun for oas operations. The routines does so by |
6617 | * doing the following : |
6618 | * |
6619 | * 1) Checks to see if the device data for the lun has been created. |
6620 | * 2) If found, sets the OAS enabled flag if not set and returns. |
6621 | * 3) Otherwise, creates a device data structure. |
6622 | * 4) If successfully created, indicates the device data is for an OAS lun, |
6623 | * indicates the lun is not available and add to the list of luns. |
6624 | * |
6625 | * Return codes: |
6626 | * false - Error |
6627 | * true - Success |
6628 | **/ |
6629 | bool |
6630 | lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, |
6631 | struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) |
6632 | { |
6633 | |
6634 | struct lpfc_device_data *lun_info; |
6635 | unsigned long flags; |
6636 | |
6637 | if (unlikely(!phba) || !vport_wwpn || !target_wwpn || |
6638 | !phba->cfg_fof) |
6639 | return false; |
6640 | |
6641 | spin_lock_irqsave(&phba->devicelock, flags); |
6642 | |
6643 | /* Check to see if the device data for the lun has been created */ |
6644 | lun_info = __lpfc_get_device_data(phba, list: &phba->luns, vport_wwpn, |
6645 | target_wwpn, lun); |
6646 | if (lun_info) { |
6647 | if (!lun_info->oas_enabled) |
6648 | lun_info->oas_enabled = true; |
6649 | lun_info->priority = pri; |
6650 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6651 | return true; |
6652 | } |
6653 | |
6654 | /* Create an lun info structure and add to list of luns */ |
6655 | lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, |
6656 | pri, atomic_create: true); |
6657 | if (lun_info) { |
6658 | lun_info->oas_enabled = true; |
6659 | lun_info->priority = pri; |
6660 | lun_info->available = false; |
6661 | list_add_tail(new: &lun_info->listentry, head: &phba->luns); |
6662 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6663 | return true; |
6664 | } |
6665 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6666 | return false; |
6667 | } |
6668 | |
6669 | /** |
6670 | * lpfc_disable_oas_lun - disables a lun for OAS operations |
6671 | * @phba: Pointer to host bus adapter structure. |
6672 | * @vport_wwpn: Pointer to vport's wwpn information |
6673 | * @target_wwpn: Pointer to target's wwpn information |
6674 | * @lun: Lun |
6675 | * @pri: Priority |
6676 | * |
6677 | * This routine disables a lun for oas operations. The routines does so by |
6678 | * doing the following : |
6679 | * |
6680 | * 1) Checks to see if the device data for the lun is created. |
6681 | * 2) If present, clears the flag indicating this lun is for OAS. |
6682 | * 3) If the lun is not available by the system, the device data is |
6683 | * freed. |
6684 | * |
6685 | * Return codes: |
6686 | * false - Error |
6687 | * true - Success |
6688 | **/ |
6689 | bool |
6690 | lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, |
6691 | struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) |
6692 | { |
6693 | |
6694 | struct lpfc_device_data *lun_info; |
6695 | unsigned long flags; |
6696 | |
6697 | if (unlikely(!phba) || !vport_wwpn || !target_wwpn || |
6698 | !phba->cfg_fof) |
6699 | return false; |
6700 | |
6701 | spin_lock_irqsave(&phba->devicelock, flags); |
6702 | |
6703 | /* Check to see if the lun is available. */ |
6704 | lun_info = __lpfc_get_device_data(phba, |
6705 | list: &phba->luns, vport_wwpn, |
6706 | target_wwpn, lun); |
6707 | if (lun_info) { |
6708 | lun_info->oas_enabled = false; |
6709 | lun_info->priority = pri; |
6710 | if (!lun_info->available) |
6711 | lpfc_delete_device_data(phba, lun_info); |
6712 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6713 | return true; |
6714 | } |
6715 | |
6716 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6717 | return false; |
6718 | } |
6719 | |
6720 | static int |
6721 | lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) |
6722 | { |
6723 | return SCSI_MLQUEUE_HOST_BUSY; |
6724 | } |
6725 | |
6726 | static int |
6727 | lpfc_no_slave(struct scsi_device *sdev) |
6728 | { |
6729 | return -ENODEV; |
6730 | } |
6731 | |
6732 | struct scsi_host_template lpfc_template_nvme = { |
6733 | .module = THIS_MODULE, |
6734 | .name = LPFC_DRIVER_NAME, |
6735 | .proc_name = LPFC_DRIVER_NAME, |
6736 | .info = lpfc_info, |
6737 | .queuecommand = lpfc_no_command, |
6738 | .slave_alloc = lpfc_no_slave, |
6739 | .slave_configure = lpfc_no_slave, |
6740 | .scan_finished = lpfc_scan_finished, |
6741 | .this_id = -1, |
6742 | .sg_tablesize = 1, |
6743 | .cmd_per_lun = 1, |
6744 | .shost_groups = lpfc_hba_groups, |
6745 | .max_sectors = 0xFFFFFFFF, |
6746 | .vendor_id = LPFC_NL_VENDOR_ID, |
6747 | .track_queue_depth = 0, |
6748 | }; |
6749 | |
6750 | struct scsi_host_template lpfc_template = { |
6751 | .module = THIS_MODULE, |
6752 | .name = LPFC_DRIVER_NAME, |
6753 | .proc_name = LPFC_DRIVER_NAME, |
6754 | .info = lpfc_info, |
6755 | .queuecommand = lpfc_queuecommand, |
6756 | .eh_timed_out = fc_eh_timed_out, |
6757 | .eh_should_retry_cmd = fc_eh_should_retry_cmd, |
6758 | .eh_abort_handler = lpfc_abort_handler, |
6759 | .eh_device_reset_handler = lpfc_device_reset_handler, |
6760 | .eh_target_reset_handler = lpfc_target_reset_handler, |
6761 | .eh_host_reset_handler = lpfc_host_reset_handler, |
6762 | .slave_alloc = lpfc_slave_alloc, |
6763 | .slave_configure = lpfc_slave_configure, |
6764 | .slave_destroy = lpfc_slave_destroy, |
6765 | .scan_finished = lpfc_scan_finished, |
6766 | .this_id = -1, |
6767 | .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, |
6768 | .cmd_per_lun = LPFC_CMD_PER_LUN, |
6769 | .shost_groups = lpfc_hba_groups, |
6770 | .max_sectors = 0xFFFFFFFF, |
6771 | .vendor_id = LPFC_NL_VENDOR_ID, |
6772 | .change_queue_depth = scsi_change_queue_depth, |
6773 | .track_queue_depth = 1, |
6774 | }; |
6775 | |
6776 | struct scsi_host_template lpfc_vport_template = { |
6777 | .module = THIS_MODULE, |
6778 | .name = LPFC_DRIVER_NAME, |
6779 | .proc_name = LPFC_DRIVER_NAME, |
6780 | .info = lpfc_info, |
6781 | .queuecommand = lpfc_queuecommand, |
6782 | .eh_timed_out = fc_eh_timed_out, |
6783 | .eh_should_retry_cmd = fc_eh_should_retry_cmd, |
6784 | .eh_abort_handler = lpfc_abort_handler, |
6785 | .eh_device_reset_handler = lpfc_device_reset_handler, |
6786 | .eh_target_reset_handler = lpfc_target_reset_handler, |
6787 | .eh_bus_reset_handler = NULL, |
6788 | .eh_host_reset_handler = NULL, |
6789 | .slave_alloc = lpfc_slave_alloc, |
6790 | .slave_configure = lpfc_slave_configure, |
6791 | .slave_destroy = lpfc_slave_destroy, |
6792 | .scan_finished = lpfc_scan_finished, |
6793 | .this_id = -1, |
6794 | .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, |
6795 | .cmd_per_lun = LPFC_CMD_PER_LUN, |
6796 | .shost_groups = lpfc_vport_groups, |
6797 | .max_sectors = 0xFFFFFFFF, |
6798 | .vendor_id = 0, |
6799 | .change_queue_depth = scsi_change_queue_depth, |
6800 | .track_queue_depth = 1, |
6801 | }; |
6802 | |