1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * |
9 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
10 | * * |
11 | * This program is free software; you can redistribute it and/or * |
12 | * modify it under the terms of version 2 of the GNU General * |
13 | * Public License as published by the Free Software Foundation. * |
14 | * This program is distributed in the hope that it will be useful. * |
15 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * |
16 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * |
17 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * |
18 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * |
19 | * TO BE LEGALLY INVALID. See the GNU General Public License for * |
20 | * more details, a copy of which can be found in the file COPYING * |
21 | * included with this package. * |
22 | ********************************************************************/ |
23 | #include <linux/pci.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/interrupt.h> |
26 | #include <linux/delay.h> |
27 | #include <asm/unaligned.h> |
28 | #include <linux/crc-t10dif.h> |
29 | #include <net/checksum.h> |
30 | |
31 | #include <scsi/scsi.h> |
32 | #include <scsi/scsi_device.h> |
33 | #include <scsi/scsi_eh.h> |
34 | #include <scsi/scsi_host.h> |
35 | #include <scsi/scsi_tcq.h> |
36 | #include <scsi/scsi_transport_fc.h> |
37 | #include <scsi/fc/fc_fs.h> |
38 | |
39 | #include "lpfc_version.h" |
40 | #include "lpfc_hw4.h" |
41 | #include "lpfc_hw.h" |
42 | #include "lpfc_sli.h" |
43 | #include "lpfc_sli4.h" |
44 | #include "lpfc_nl.h" |
45 | #include "lpfc_disc.h" |
46 | #include "lpfc.h" |
47 | #include "lpfc_scsi.h" |
48 | #include "lpfc_nvme.h" |
49 | #include "lpfc_logmsg.h" |
50 | #include "lpfc_crtn.h" |
51 | #include "lpfc_vport.h" |
52 | #include "lpfc_debugfs.h" |
53 | |
54 | static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, |
55 | struct lpfc_async_xchg_ctx *, |
56 | dma_addr_t rspbuf, |
57 | uint16_t rspsize); |
58 | static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *, |
59 | struct lpfc_async_xchg_ctx *); |
60 | static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *, |
61 | struct lpfc_async_xchg_ctx *, |
62 | uint32_t, uint16_t); |
63 | static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *, |
64 | struct lpfc_async_xchg_ctx *, |
65 | uint32_t, uint16_t); |
66 | static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *, |
67 | struct lpfc_async_xchg_ctx *); |
68 | static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *); |
69 | |
70 | static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf); |
71 | |
72 | static union lpfc_wqe128 lpfc_tsend_cmd_template; |
73 | static union lpfc_wqe128 lpfc_treceive_cmd_template; |
74 | static union lpfc_wqe128 lpfc_trsp_cmd_template; |
75 | |
76 | /* Setup WQE templates for NVME IOs */ |
77 | void |
78 | lpfc_nvmet_cmd_template(void) |
79 | { |
80 | union lpfc_wqe128 *wqe; |
81 | |
82 | /* TSEND template */ |
83 | wqe = &lpfc_tsend_cmd_template; |
84 | memset(wqe, 0, sizeof(union lpfc_wqe128)); |
85 | |
86 | /* Word 0, 1, 2 - BDE is variable */ |
87 | |
88 | /* Word 3 - payload_offset_len is zero */ |
89 | |
90 | /* Word 4 - relative_offset is variable */ |
91 | |
92 | /* Word 5 - is zero */ |
93 | |
94 | /* Word 6 - ctxt_tag, xri_tag is variable */ |
95 | |
96 | /* Word 7 - wqe_ar is variable */ |
97 | bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); |
98 | bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF); |
99 | bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3); |
100 | bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI); |
101 | bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); |
102 | |
103 | /* Word 8 - abort_tag is variable */ |
104 | |
105 | /* Word 9 - reqtag, rcvoxid is variable */ |
106 | |
107 | /* Word 10 - wqes, xc is variable */ |
108 | bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG); |
109 | bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); |
110 | bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); |
111 | bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1); |
112 | bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE); |
113 | bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12); |
114 | |
115 | /* Word 11 - sup, irsp, irsplen is variable */ |
116 | bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND); |
117 | bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
118 | bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); |
119 | bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); |
120 | bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); |
121 | bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0); |
122 | |
123 | /* Word 12 - fcp_data_len is variable */ |
124 | |
125 | /* Word 13, 14, 15 - PBDE is zero */ |
126 | |
127 | /* TRECEIVE template */ |
128 | wqe = &lpfc_treceive_cmd_template; |
129 | memset(wqe, 0, sizeof(union lpfc_wqe128)); |
130 | |
131 | /* Word 0, 1, 2 - BDE is variable */ |
132 | |
133 | /* Word 3 */ |
134 | wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN; |
135 | |
136 | /* Word 4 - relative_offset is variable */ |
137 | |
138 | /* Word 5 - is zero */ |
139 | |
140 | /* Word 6 - ctxt_tag, xri_tag is variable */ |
141 | |
142 | /* Word 7 */ |
143 | bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE); |
144 | bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF); |
145 | bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3); |
146 | bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI); |
147 | bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0); |
148 | |
149 | /* Word 8 - abort_tag is variable */ |
150 | |
151 | /* Word 9 - reqtag, rcvoxid is variable */ |
152 | |
153 | /* Word 10 - xc is variable */ |
154 | bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); |
155 | bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); |
156 | bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG); |
157 | bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); |
158 | bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12); |
159 | bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1); |
160 | |
161 | /* Word 11 - pbde is variable */ |
162 | bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE); |
163 | bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
164 | bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0); |
165 | bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0); |
166 | bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0); |
167 | bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1); |
168 | |
169 | /* Word 12 - fcp_data_len is variable */ |
170 | |
171 | /* Word 13, 14, 15 - PBDE is variable */ |
172 | |
173 | /* TRSP template */ |
174 | wqe = &lpfc_trsp_cmd_template; |
175 | memset(wqe, 0, sizeof(union lpfc_wqe128)); |
176 | |
177 | /* Word 0, 1, 2 - BDE is variable */ |
178 | |
179 | /* Word 3 - response_len is variable */ |
180 | |
181 | /* Word 4, 5 - is zero */ |
182 | |
183 | /* Word 6 - ctxt_tag, xri_tag is variable */ |
184 | |
185 | /* Word 7 */ |
186 | bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE); |
187 | bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED); |
188 | bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3); |
189 | bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI); |
190 | bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */ |
191 | |
192 | /* Word 8 - abort_tag is variable */ |
193 | |
194 | /* Word 9 - reqtag is variable */ |
195 | |
196 | /* Word 10 wqes, xc is variable */ |
197 | bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1); |
198 | bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG); |
199 | bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); |
200 | bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0); |
201 | bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE); |
202 | bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3); |
203 | |
204 | /* Word 11 irsp, irsplen is variable */ |
205 | bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP); |
206 | bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
207 | bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0); |
208 | bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0); |
209 | bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0); |
210 | bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0); |
211 | |
212 | /* Word 12, 13, 14, 15 - is zero */ |
213 | } |
214 | |
215 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
216 | static struct lpfc_async_xchg_ctx * |
217 | lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri) |
218 | { |
219 | struct lpfc_async_xchg_ctx *ctxp; |
220 | unsigned long iflag; |
221 | bool found = false; |
222 | |
223 | spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); |
224 | list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) { |
225 | if (ctxp->ctxbuf->sglq->sli4_xritag != xri) |
226 | continue; |
227 | |
228 | found = true; |
229 | break; |
230 | } |
231 | spin_unlock_irqrestore(lock: &phba->sli4_hba.t_active_list_lock, flags: iflag); |
232 | if (found) |
233 | return ctxp; |
234 | |
235 | return NULL; |
236 | } |
237 | |
238 | static struct lpfc_async_xchg_ctx * |
239 | lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid) |
240 | { |
241 | struct lpfc_async_xchg_ctx *ctxp; |
242 | unsigned long iflag; |
243 | bool found = false; |
244 | |
245 | spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); |
246 | list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) { |
247 | if (ctxp->oxid != oxid || ctxp->sid != sid) |
248 | continue; |
249 | |
250 | found = true; |
251 | break; |
252 | } |
253 | spin_unlock_irqrestore(lock: &phba->sli4_hba.t_active_list_lock, flags: iflag); |
254 | if (found) |
255 | return ctxp; |
256 | |
257 | return NULL; |
258 | } |
259 | #endif |
260 | |
261 | static void |
262 | lpfc_nvmet_defer_release(struct lpfc_hba *phba, |
263 | struct lpfc_async_xchg_ctx *ctxp) |
264 | { |
265 | lockdep_assert_held(&ctxp->ctxlock); |
266 | |
267 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
268 | "6313 NVMET Defer ctx release oxid x%x flg x%x\n" , |
269 | ctxp->oxid, ctxp->flag); |
270 | |
271 | if (ctxp->flag & LPFC_NVME_CTX_RLS) |
272 | return; |
273 | |
274 | ctxp->flag |= LPFC_NVME_CTX_RLS; |
275 | spin_lock(lock: &phba->sli4_hba.t_active_list_lock); |
276 | list_del(entry: &ctxp->list); |
277 | spin_unlock(lock: &phba->sli4_hba.t_active_list_lock); |
278 | spin_lock(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock); |
279 | list_add_tail(new: &ctxp->list, head: &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); |
280 | spin_unlock(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock); |
281 | } |
282 | |
283 | /** |
284 | * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the |
285 | * transmission of an NVME LS response. |
286 | * @phba: Pointer to HBA context object. |
287 | * @cmdwqe: Pointer to driver command WQE object. |
288 | * @rspwqe: Pointer to driver response WQE object. |
289 | * |
290 | * The function is called from SLI ring event handler with no |
291 | * lock held. The function frees memory resources used for the command |
292 | * used to send the NVME LS RSP. |
293 | **/ |
294 | void |
295 | __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
296 | struct lpfc_iocbq *rspwqe) |
297 | { |
298 | struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg; |
299 | struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; |
300 | struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp; |
301 | uint32_t status, result; |
302 | |
303 | status = bf_get(lpfc_wcqe_c_status, wcqe); |
304 | result = wcqe->parameter; |
305 | |
306 | if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) { |
307 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
308 | "6410 NVMEx LS cmpl state mismatch IO x%x: " |
309 | "%d %d\n" , |
310 | axchg->oxid, axchg->state, axchg->entry_cnt); |
311 | } |
312 | |
313 | lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n" , |
314 | axchg->oxid, status, result); |
315 | |
316 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, |
317 | "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n" , |
318 | status, result, axchg->oxid); |
319 | |
320 | lpfc_nlp_put(cmdwqe->ndlp); |
321 | cmdwqe->context_un.axchg = NULL; |
322 | cmdwqe->bpl_dmabuf = NULL; |
323 | lpfc_sli_release_iocbq(phba, cmdwqe); |
324 | ls_rsp->done(ls_rsp); |
325 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, |
326 | "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n" , |
327 | status, axchg->oxid); |
328 | kfree(objp: axchg); |
329 | } |
330 | |
331 | /** |
332 | * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response |
333 | * @phba: Pointer to HBA context object. |
334 | * @cmdwqe: Pointer to driver command WQE object. |
335 | * @rspwqe: Pointer to driver response WQE object. |
336 | * |
337 | * The function is called from SLI ring event handler with no |
338 | * lock held. This function is the completion handler for NVME LS commands |
339 | * The function updates any states and statistics, then calls the |
340 | * generic completion handler to free resources. |
341 | **/ |
342 | static void |
343 | lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
344 | struct lpfc_iocbq *rspwqe) |
345 | { |
346 | struct lpfc_nvmet_tgtport *tgtp; |
347 | uint32_t status, result; |
348 | struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; |
349 | |
350 | if (!phba->targetport) |
351 | goto finish; |
352 | |
353 | status = bf_get(lpfc_wcqe_c_status, wcqe); |
354 | result = wcqe->parameter; |
355 | |
356 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
357 | if (tgtp) { |
358 | if (status) { |
359 | atomic_inc(v: &tgtp->xmt_ls_rsp_error); |
360 | if (result == IOERR_ABORT_REQUESTED) |
361 | atomic_inc(v: &tgtp->xmt_ls_rsp_aborted); |
362 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) |
363 | atomic_inc(v: &tgtp->xmt_ls_rsp_xb_set); |
364 | } else { |
365 | atomic_inc(v: &tgtp->xmt_ls_rsp_cmpl); |
366 | } |
367 | } |
368 | |
369 | finish: |
370 | __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, rspwqe); |
371 | } |
372 | |
373 | /** |
374 | * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context |
375 | * @phba: HBA buffer is associated with |
376 | * @ctx_buf: ctx buffer context |
377 | * |
378 | * Description: Frees the given DMA buffer in the appropriate way given by |
379 | * reposting it to its associated RQ so it can be reused. |
380 | * |
381 | * Notes: Takes phba->hbalock. Can be called with or without other locks held. |
382 | * |
383 | * Returns: None |
384 | **/ |
385 | void |
386 | lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) |
387 | { |
388 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
389 | struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context; |
390 | struct lpfc_nvmet_tgtport *tgtp; |
391 | struct fc_frame_header *fc_hdr; |
392 | struct rqb_dmabuf *nvmebuf; |
393 | struct lpfc_nvmet_ctx_info *infop; |
394 | uint32_t size, oxid, sid; |
395 | int cpu; |
396 | unsigned long iflag; |
397 | |
398 | if (ctxp->state == LPFC_NVME_STE_FREE) { |
399 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
400 | "6411 NVMET free, already free IO x%x: %d %d\n" , |
401 | ctxp->oxid, ctxp->state, ctxp->entry_cnt); |
402 | } |
403 | |
404 | if (ctxp->rqb_buffer) { |
405 | spin_lock_irqsave(&ctxp->ctxlock, iflag); |
406 | nvmebuf = ctxp->rqb_buffer; |
407 | /* check if freed in another path whilst acquiring lock */ |
408 | if (nvmebuf) { |
409 | ctxp->rqb_buffer = NULL; |
410 | if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) { |
411 | ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ; |
412 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflag); |
413 | nvmebuf->hrq->rqbp->rqb_free_buffer(phba, |
414 | nvmebuf); |
415 | } else { |
416 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflag); |
417 | /* repost */ |
418 | lpfc_rq_buf_free(phba, mp: &nvmebuf->hbuf); |
419 | } |
420 | } else { |
421 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflag); |
422 | } |
423 | } |
424 | ctxp->state = LPFC_NVME_STE_FREE; |
425 | |
426 | spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); |
427 | if (phba->sli4_hba.nvmet_io_wait_cnt) { |
428 | list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list, |
429 | nvmebuf, struct rqb_dmabuf, |
430 | hbuf.list); |
431 | phba->sli4_hba.nvmet_io_wait_cnt--; |
432 | spin_unlock_irqrestore(lock: &phba->sli4_hba.nvmet_io_wait_lock, |
433 | flags: iflag); |
434 | |
435 | fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); |
436 | oxid = be16_to_cpu(fc_hdr->fh_ox_id); |
437 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
438 | size = nvmebuf->bytes_recv; |
439 | sid = sli4_sid_from_fc_hdr(fc_hdr); |
440 | |
441 | ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context; |
442 | ctxp->wqeq = NULL; |
443 | ctxp->offset = 0; |
444 | ctxp->phba = phba; |
445 | ctxp->size = size; |
446 | ctxp->oxid = oxid; |
447 | ctxp->sid = sid; |
448 | ctxp->state = LPFC_NVME_STE_RCV; |
449 | ctxp->entry_cnt = 1; |
450 | ctxp->flag = 0; |
451 | ctxp->ctxbuf = ctx_buf; |
452 | ctxp->rqb_buffer = (void *)nvmebuf; |
453 | spin_lock_init(&ctxp->ctxlock); |
454 | |
455 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
456 | /* NOTE: isr time stamp is stale when context is re-assigned*/ |
457 | if (ctxp->ts_isr_cmd) { |
458 | ctxp->ts_cmd_nvme = 0; |
459 | ctxp->ts_nvme_data = 0; |
460 | ctxp->ts_data_wqput = 0; |
461 | ctxp->ts_isr_data = 0; |
462 | ctxp->ts_data_nvme = 0; |
463 | ctxp->ts_nvme_status = 0; |
464 | ctxp->ts_status_wqput = 0; |
465 | ctxp->ts_isr_status = 0; |
466 | ctxp->ts_status_nvme = 0; |
467 | } |
468 | #endif |
469 | atomic_inc(v: &tgtp->rcv_fcp_cmd_in); |
470 | |
471 | /* Indicate that a replacement buffer has been posted */ |
472 | spin_lock_irqsave(&ctxp->ctxlock, iflag); |
473 | ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ; |
474 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflag); |
475 | |
476 | if (!queue_work(wq: phba->wq, work: &ctx_buf->defer_work)) { |
477 | atomic_inc(v: &tgtp->rcv_fcp_cmd_drop); |
478 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
479 | "6181 Unable to queue deferred work " |
480 | "for oxid x%x. " |
481 | "FCP Drop IO [x%x x%x x%x]\n" , |
482 | ctxp->oxid, |
483 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
484 | atomic_read(&tgtp->rcv_fcp_cmd_out), |
485 | atomic_read(&tgtp->xmt_fcp_release)); |
486 | |
487 | spin_lock_irqsave(&ctxp->ctxlock, iflag); |
488 | lpfc_nvmet_defer_release(phba, ctxp); |
489 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflag); |
490 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); |
491 | } |
492 | return; |
493 | } |
494 | spin_unlock_irqrestore(lock: &phba->sli4_hba.nvmet_io_wait_lock, flags: iflag); |
495 | |
496 | /* |
497 | * Use the CPU context list, from the MRQ the IO was received on |
498 | * (ctxp->idx), to save context structure. |
499 | */ |
500 | spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); |
501 | list_del_init(entry: &ctxp->list); |
502 | spin_unlock_irqrestore(lock: &phba->sli4_hba.t_active_list_lock, flags: iflag); |
503 | cpu = raw_smp_processor_id(); |
504 | infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx); |
505 | spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag); |
506 | list_add_tail(new: &ctx_buf->list, head: &infop->nvmet_ctx_list); |
507 | infop->nvmet_ctx_list_cnt++; |
508 | spin_unlock_irqrestore(lock: &infop->nvmet_ctx_list_lock, flags: iflag); |
509 | #endif |
510 | } |
511 | |
512 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
513 | static void |
514 | lpfc_nvmet_ktime(struct lpfc_hba *phba, |
515 | struct lpfc_async_xchg_ctx *ctxp) |
516 | { |
517 | uint64_t seg1, seg2, seg3, seg4, seg5; |
518 | uint64_t seg6, seg7, seg8, seg9, seg10; |
519 | uint64_t segsum; |
520 | |
521 | if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme || |
522 | !ctxp->ts_nvme_data || !ctxp->ts_data_wqput || |
523 | !ctxp->ts_isr_data || !ctxp->ts_data_nvme || |
524 | !ctxp->ts_nvme_status || !ctxp->ts_status_wqput || |
525 | !ctxp->ts_isr_status || !ctxp->ts_status_nvme) |
526 | return; |
527 | |
528 | if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd) |
529 | return; |
530 | if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme) |
531 | return; |
532 | if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data) |
533 | return; |
534 | if (ctxp->ts_nvme_data > ctxp->ts_data_wqput) |
535 | return; |
536 | if (ctxp->ts_data_wqput > ctxp->ts_isr_data) |
537 | return; |
538 | if (ctxp->ts_isr_data > ctxp->ts_data_nvme) |
539 | return; |
540 | if (ctxp->ts_data_nvme > ctxp->ts_nvme_status) |
541 | return; |
542 | if (ctxp->ts_nvme_status > ctxp->ts_status_wqput) |
543 | return; |
544 | if (ctxp->ts_status_wqput > ctxp->ts_isr_status) |
545 | return; |
546 | if (ctxp->ts_isr_status > ctxp->ts_status_nvme) |
547 | return; |
548 | /* |
549 | * Segment 1 - Time from FCP command received by MSI-X ISR |
550 | * to FCP command is passed to NVME Layer. |
551 | * Segment 2 - Time from FCP command payload handed |
552 | * off to NVME Layer to Driver receives a Command op |
553 | * from NVME Layer. |
554 | * Segment 3 - Time from Driver receives a Command op |
555 | * from NVME Layer to Command is put on WQ. |
556 | * Segment 4 - Time from Driver WQ put is done |
557 | * to MSI-X ISR for Command cmpl. |
558 | * Segment 5 - Time from MSI-X ISR for Command cmpl to |
559 | * Command cmpl is passed to NVME Layer. |
560 | * Segment 6 - Time from Command cmpl is passed to NVME |
561 | * Layer to Driver receives a RSP op from NVME Layer. |
562 | * Segment 7 - Time from Driver receives a RSP op from |
563 | * NVME Layer to WQ put is done on TRSP FCP Status. |
564 | * Segment 8 - Time from Driver WQ put is done on TRSP |
565 | * FCP Status to MSI-X ISR for TRSP cmpl. |
566 | * Segment 9 - Time from MSI-X ISR for TRSP cmpl to |
567 | * TRSP cmpl is passed to NVME Layer. |
568 | * Segment 10 - Time from FCP command received by |
569 | * MSI-X ISR to command is completed on wire. |
570 | * (Segments 1 thru 8) for READDATA / WRITEDATA |
571 | * (Segments 1 thru 4) for READDATA_RSP |
572 | */ |
573 | seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd; |
574 | segsum = seg1; |
575 | |
576 | seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd; |
577 | if (segsum > seg2) |
578 | return; |
579 | seg2 -= segsum; |
580 | segsum += seg2; |
581 | |
582 | seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd; |
583 | if (segsum > seg3) |
584 | return; |
585 | seg3 -= segsum; |
586 | segsum += seg3; |
587 | |
588 | seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd; |
589 | if (segsum > seg4) |
590 | return; |
591 | seg4 -= segsum; |
592 | segsum += seg4; |
593 | |
594 | seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd; |
595 | if (segsum > seg5) |
596 | return; |
597 | seg5 -= segsum; |
598 | segsum += seg5; |
599 | |
600 | |
601 | /* For auto rsp commands seg6 thru seg10 will be 0 */ |
602 | if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) { |
603 | seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd; |
604 | if (segsum > seg6) |
605 | return; |
606 | seg6 -= segsum; |
607 | segsum += seg6; |
608 | |
609 | seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd; |
610 | if (segsum > seg7) |
611 | return; |
612 | seg7 -= segsum; |
613 | segsum += seg7; |
614 | |
615 | seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd; |
616 | if (segsum > seg8) |
617 | return; |
618 | seg8 -= segsum; |
619 | segsum += seg8; |
620 | |
621 | seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd; |
622 | if (segsum > seg9) |
623 | return; |
624 | seg9 -= segsum; |
625 | segsum += seg9; |
626 | |
627 | if (ctxp->ts_isr_status < ctxp->ts_isr_cmd) |
628 | return; |
629 | seg10 = (ctxp->ts_isr_status - |
630 | ctxp->ts_isr_cmd); |
631 | } else { |
632 | if (ctxp->ts_isr_data < ctxp->ts_isr_cmd) |
633 | return; |
634 | seg6 = 0; |
635 | seg7 = 0; |
636 | seg8 = 0; |
637 | seg9 = 0; |
638 | seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd); |
639 | } |
640 | |
641 | phba->ktime_seg1_total += seg1; |
642 | if (seg1 < phba->ktime_seg1_min) |
643 | phba->ktime_seg1_min = seg1; |
644 | else if (seg1 > phba->ktime_seg1_max) |
645 | phba->ktime_seg1_max = seg1; |
646 | |
647 | phba->ktime_seg2_total += seg2; |
648 | if (seg2 < phba->ktime_seg2_min) |
649 | phba->ktime_seg2_min = seg2; |
650 | else if (seg2 > phba->ktime_seg2_max) |
651 | phba->ktime_seg2_max = seg2; |
652 | |
653 | phba->ktime_seg3_total += seg3; |
654 | if (seg3 < phba->ktime_seg3_min) |
655 | phba->ktime_seg3_min = seg3; |
656 | else if (seg3 > phba->ktime_seg3_max) |
657 | phba->ktime_seg3_max = seg3; |
658 | |
659 | phba->ktime_seg4_total += seg4; |
660 | if (seg4 < phba->ktime_seg4_min) |
661 | phba->ktime_seg4_min = seg4; |
662 | else if (seg4 > phba->ktime_seg4_max) |
663 | phba->ktime_seg4_max = seg4; |
664 | |
665 | phba->ktime_seg5_total += seg5; |
666 | if (seg5 < phba->ktime_seg5_min) |
667 | phba->ktime_seg5_min = seg5; |
668 | else if (seg5 > phba->ktime_seg5_max) |
669 | phba->ktime_seg5_max = seg5; |
670 | |
671 | phba->ktime_data_samples++; |
672 | if (!seg6) |
673 | goto out; |
674 | |
675 | phba->ktime_seg6_total += seg6; |
676 | if (seg6 < phba->ktime_seg6_min) |
677 | phba->ktime_seg6_min = seg6; |
678 | else if (seg6 > phba->ktime_seg6_max) |
679 | phba->ktime_seg6_max = seg6; |
680 | |
681 | phba->ktime_seg7_total += seg7; |
682 | if (seg7 < phba->ktime_seg7_min) |
683 | phba->ktime_seg7_min = seg7; |
684 | else if (seg7 > phba->ktime_seg7_max) |
685 | phba->ktime_seg7_max = seg7; |
686 | |
687 | phba->ktime_seg8_total += seg8; |
688 | if (seg8 < phba->ktime_seg8_min) |
689 | phba->ktime_seg8_min = seg8; |
690 | else if (seg8 > phba->ktime_seg8_max) |
691 | phba->ktime_seg8_max = seg8; |
692 | |
693 | phba->ktime_seg9_total += seg9; |
694 | if (seg9 < phba->ktime_seg9_min) |
695 | phba->ktime_seg9_min = seg9; |
696 | else if (seg9 > phba->ktime_seg9_max) |
697 | phba->ktime_seg9_max = seg9; |
698 | out: |
699 | phba->ktime_seg10_total += seg10; |
700 | if (seg10 < phba->ktime_seg10_min) |
701 | phba->ktime_seg10_min = seg10; |
702 | else if (seg10 > phba->ktime_seg10_max) |
703 | phba->ktime_seg10_max = seg10; |
704 | phba->ktime_status_samples++; |
705 | } |
706 | #endif |
707 | |
708 | /** |
709 | * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response |
710 | * @phba: Pointer to HBA context object. |
711 | * @cmdwqe: Pointer to driver command WQE object. |
712 | * @rspwqe: Pointer to driver response WQE object. |
713 | * |
714 | * The function is called from SLI ring event handler with no |
715 | * lock held. This function is the completion handler for NVME FCP commands |
716 | * The function frees memory resources used for the NVME commands. |
717 | **/ |
718 | static void |
719 | lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
720 | struct lpfc_iocbq *rspwqe) |
721 | { |
722 | struct lpfc_nvmet_tgtport *tgtp; |
723 | struct nvmefc_tgt_fcp_req *rsp; |
724 | struct lpfc_async_xchg_ctx *ctxp; |
725 | uint32_t status, result, op, logerr; |
726 | struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; |
727 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
728 | int id; |
729 | #endif |
730 | |
731 | ctxp = cmdwqe->context_un.axchg; |
732 | ctxp->flag &= ~LPFC_NVME_IO_INP; |
733 | |
734 | rsp = &ctxp->hdlrctx.fcp_req; |
735 | op = rsp->op; |
736 | |
737 | status = bf_get(lpfc_wcqe_c_status, wcqe); |
738 | result = wcqe->parameter; |
739 | |
740 | if (phba->targetport) |
741 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
742 | else |
743 | tgtp = NULL; |
744 | |
745 | lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n" , |
746 | ctxp->oxid, op, status); |
747 | |
748 | if (status) { |
749 | rsp->fcp_error = NVME_SC_DATA_XFER_ERROR; |
750 | rsp->transferred_length = 0; |
751 | if (tgtp) { |
752 | atomic_inc(v: &tgtp->xmt_fcp_rsp_error); |
753 | if (result == IOERR_ABORT_REQUESTED) |
754 | atomic_inc(v: &tgtp->xmt_fcp_rsp_aborted); |
755 | } |
756 | |
757 | logerr = LOG_NVME_IOERR; |
758 | |
759 | /* pick up SLI4 exhange busy condition */ |
760 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) { |
761 | ctxp->flag |= LPFC_NVME_XBUSY; |
762 | logerr |= LOG_NVME_ABTS; |
763 | if (tgtp) |
764 | atomic_inc(v: &tgtp->xmt_fcp_rsp_xb_set); |
765 | |
766 | } else { |
767 | ctxp->flag &= ~LPFC_NVME_XBUSY; |
768 | } |
769 | |
770 | lpfc_printf_log(phba, KERN_INFO, logerr, |
771 | "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x " |
772 | "XBUSY:x%x\n" , |
773 | ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag, |
774 | status, result, ctxp->flag); |
775 | |
776 | } else { |
777 | rsp->fcp_error = NVME_SC_SUCCESS; |
778 | if (op == NVMET_FCOP_RSP) |
779 | rsp->transferred_length = rsp->rsplen; |
780 | else |
781 | rsp->transferred_length = rsp->transfer_length; |
782 | if (tgtp) |
783 | atomic_inc(v: &tgtp->xmt_fcp_rsp_cmpl); |
784 | } |
785 | |
786 | if ((op == NVMET_FCOP_READDATA_RSP) || |
787 | (op == NVMET_FCOP_RSP)) { |
788 | /* Sanity check */ |
789 | ctxp->state = LPFC_NVME_STE_DONE; |
790 | ctxp->entry_cnt++; |
791 | |
792 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
793 | if (ctxp->ts_cmd_nvme) { |
794 | if (rsp->op == NVMET_FCOP_READDATA_RSP) { |
795 | ctxp->ts_isr_data = |
796 | cmdwqe->isr_timestamp; |
797 | ctxp->ts_data_nvme = |
798 | ktime_get_ns(); |
799 | ctxp->ts_nvme_status = |
800 | ctxp->ts_data_nvme; |
801 | ctxp->ts_status_wqput = |
802 | ctxp->ts_data_nvme; |
803 | ctxp->ts_isr_status = |
804 | ctxp->ts_data_nvme; |
805 | ctxp->ts_status_nvme = |
806 | ctxp->ts_data_nvme; |
807 | } else { |
808 | ctxp->ts_isr_status = |
809 | cmdwqe->isr_timestamp; |
810 | ctxp->ts_status_nvme = |
811 | ktime_get_ns(); |
812 | } |
813 | } |
814 | #endif |
815 | rsp->done(rsp); |
816 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
817 | if (ctxp->ts_cmd_nvme) |
818 | lpfc_nvmet_ktime(phba, ctxp); |
819 | #endif |
820 | /* lpfc_nvmet_xmt_fcp_release() will recycle the context */ |
821 | } else { |
822 | ctxp->entry_cnt++; |
823 | memset_startat(cmdwqe, 0, cmd_flag); |
824 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
825 | if (ctxp->ts_cmd_nvme) { |
826 | ctxp->ts_isr_data = cmdwqe->isr_timestamp; |
827 | ctxp->ts_data_nvme = ktime_get_ns(); |
828 | } |
829 | #endif |
830 | rsp->done(rsp); |
831 | } |
832 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
833 | if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { |
834 | id = raw_smp_processor_id(); |
835 | this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); |
836 | if (ctxp->cpu != id) |
837 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, |
838 | "6704 CPU Check cmdcmpl: " |
839 | "cpu %d expect %d\n" , |
840 | id, ctxp->cpu); |
841 | } |
842 | #endif |
843 | } |
844 | |
845 | /** |
846 | * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit |
847 | * an NVME LS rsp for a prior NVME LS request that was received. |
848 | * @axchg: pointer to exchange context for the NVME LS request the response |
849 | * is for. |
850 | * @ls_rsp: pointer to the transport LS RSP that is to be sent |
851 | * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done |
852 | * |
853 | * This routine is used to format and send a WQE to transmit a NVME LS |
854 | * Response. The response is for a prior NVME LS request that was |
855 | * received and posted to the transport. |
856 | * |
857 | * Returns: |
858 | * 0 : if response successfully transmit |
859 | * non-zero : if response failed to transmit, of the form -Exxx. |
860 | **/ |
861 | int |
862 | __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg, |
863 | struct nvmefc_ls_rsp *ls_rsp, |
864 | void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba, |
865 | struct lpfc_iocbq *cmdwqe, |
866 | struct lpfc_iocbq *rspwqe)) |
867 | { |
868 | struct lpfc_hba *phba = axchg->phba; |
869 | struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer; |
870 | struct lpfc_iocbq *nvmewqeq; |
871 | struct lpfc_dmabuf dmabuf; |
872 | struct ulp_bde64 bpl; |
873 | int rc; |
874 | |
875 | if (phba->pport->load_flag & FC_UNLOADING) |
876 | return -ENODEV; |
877 | |
878 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, |
879 | "6023 NVMEx LS rsp oxid x%x\n" , axchg->oxid); |
880 | |
881 | if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) { |
882 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
883 | "6412 NVMEx LS rsp state mismatch " |
884 | "oxid x%x: %d %d\n" , |
885 | axchg->oxid, axchg->state, axchg->entry_cnt); |
886 | return -EALREADY; |
887 | } |
888 | axchg->state = LPFC_NVME_STE_LS_RSP; |
889 | axchg->entry_cnt++; |
890 | |
891 | nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, rspbuf: ls_rsp->rspdma, |
892 | rspsize: ls_rsp->rsplen); |
893 | if (nvmewqeq == NULL) { |
894 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
895 | "6150 NVMEx LS Drop Rsp x%x: Prep\n" , |
896 | axchg->oxid); |
897 | rc = -ENOMEM; |
898 | goto out_free_buf; |
899 | } |
900 | |
901 | /* Save numBdes for bpl2sgl */ |
902 | nvmewqeq->num_bdes = 1; |
903 | nvmewqeq->hba_wqidx = 0; |
904 | nvmewqeq->bpl_dmabuf = &dmabuf; |
905 | dmabuf.virt = &bpl; |
906 | bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow; |
907 | bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh; |
908 | bpl.tus.f.bdeSize = ls_rsp->rsplen; |
909 | bpl.tus.f.bdeFlags = 0; |
910 | bpl.tus.w = le32_to_cpu(bpl.tus.w); |
911 | /* |
912 | * Note: although we're using stack space for the dmabuf, the |
913 | * call to lpfc_sli4_issue_wqe is synchronous, so it will not |
914 | * be referenced after it returns back to this routine. |
915 | */ |
916 | |
917 | nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp; |
918 | nvmewqeq->context_un.axchg = axchg; |
919 | |
920 | lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n" , |
921 | axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen); |
922 | |
923 | rc = lpfc_sli4_issue_wqe(phba, qp: axchg->hdwq, pwqe: nvmewqeq); |
924 | |
925 | /* clear to be sure there's no reference */ |
926 | nvmewqeq->bpl_dmabuf = NULL; |
927 | |
928 | if (rc == WQE_SUCCESS) { |
929 | /* |
930 | * Okay to repost buffer here, but wait till cmpl |
931 | * before freeing ctxp and iocbq. |
932 | */ |
933 | lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
934 | return 0; |
935 | } |
936 | |
937 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
938 | "6151 NVMEx LS RSP x%x: failed to transmit %d\n" , |
939 | axchg->oxid, rc); |
940 | |
941 | rc = -ENXIO; |
942 | |
943 | lpfc_nlp_put(nvmewqeq->ndlp); |
944 | |
945 | out_free_buf: |
946 | /* Give back resources */ |
947 | lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
948 | |
949 | /* |
950 | * As transport doesn't track completions of responses, if the rsp |
951 | * fails to send, the transport will effectively ignore the rsp |
952 | * and consider the LS done. However, the driver has an active |
953 | * exchange open for the LS - so be sure to abort the exchange |
954 | * if the response isn't sent. |
955 | */ |
956 | lpfc_nvme_unsol_ls_issue_abort(phba, ctxp: axchg, sid: axchg->sid, xri: axchg->oxid); |
957 | return rc; |
958 | } |
959 | |
960 | /** |
961 | * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response |
962 | * @tgtport: pointer to target port that NVME LS is to be transmit from. |
963 | * @ls_rsp: pointer to the transport LS RSP that is to be sent |
964 | * |
965 | * Driver registers this routine to transmit responses for received NVME |
966 | * LS requests. |
967 | * |
968 | * This routine is used to format and send a WQE to transmit a NVME LS |
969 | * Response. The ls_rsp is used to reverse-map the LS to the original |
970 | * NVME LS request sequence, which provides addressing information for |
971 | * the remote port the LS to be sent to, as well as the exchange id |
972 | * that is the LS is bound to. |
973 | * |
974 | * Returns: |
975 | * 0 : if response successfully transmit |
976 | * non-zero : if response failed to transmit, of the form -Exxx. |
977 | **/ |
978 | static int |
979 | lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, |
980 | struct nvmefc_ls_rsp *ls_rsp) |
981 | { |
982 | struct lpfc_async_xchg_ctx *axchg = |
983 | container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp); |
984 | struct lpfc_nvmet_tgtport *nvmep = tgtport->private; |
985 | int rc; |
986 | |
987 | if (axchg->phba->pport->load_flag & FC_UNLOADING) |
988 | return -ENODEV; |
989 | |
990 | rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, xmt_ls_rsp_cmp: lpfc_nvmet_xmt_ls_rsp_cmp); |
991 | |
992 | if (rc) { |
993 | atomic_inc(v: &nvmep->xmt_ls_drop); |
994 | /* |
995 | * unless the failure is due to having already sent |
996 | * the response, an abort will be generated for the |
997 | * exchange if the rsp can't be sent. |
998 | */ |
999 | if (rc != -EALREADY) |
1000 | atomic_inc(v: &nvmep->xmt_ls_abort); |
1001 | return rc; |
1002 | } |
1003 | |
1004 | atomic_inc(v: &nvmep->xmt_ls_rsp); |
1005 | return 0; |
1006 | } |
1007 | |
1008 | static int |
1009 | lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, |
1010 | struct nvmefc_tgt_fcp_req *rsp) |
1011 | { |
1012 | struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; |
1013 | struct lpfc_async_xchg_ctx *ctxp = |
1014 | container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); |
1015 | struct lpfc_hba *phba = ctxp->phba; |
1016 | struct lpfc_queue *wq; |
1017 | struct lpfc_iocbq *nvmewqeq; |
1018 | struct lpfc_sli_ring *pring; |
1019 | unsigned long iflags; |
1020 | int rc; |
1021 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1022 | int id; |
1023 | #endif |
1024 | |
1025 | if (phba->pport->load_flag & FC_UNLOADING) { |
1026 | rc = -ENODEV; |
1027 | goto aerr; |
1028 | } |
1029 | |
1030 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1031 | if (ctxp->ts_cmd_nvme) { |
1032 | if (rsp->op == NVMET_FCOP_RSP) |
1033 | ctxp->ts_nvme_status = ktime_get_ns(); |
1034 | else |
1035 | ctxp->ts_nvme_data = ktime_get_ns(); |
1036 | } |
1037 | |
1038 | /* Setup the hdw queue if not already set */ |
1039 | if (!ctxp->hdwq) |
1040 | ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid]; |
1041 | |
1042 | if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { |
1043 | id = raw_smp_processor_id(); |
1044 | this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); |
1045 | if (rsp->hwqid != id) |
1046 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, |
1047 | "6705 CPU Check OP: " |
1048 | "cpu %d expect %d\n" , |
1049 | id, rsp->hwqid); |
1050 | ctxp->cpu = id; /* Setup cpu for cmpl check */ |
1051 | } |
1052 | #endif |
1053 | |
1054 | /* Sanity check */ |
1055 | if ((ctxp->flag & LPFC_NVME_ABTS_RCV) || |
1056 | (ctxp->state == LPFC_NVME_STE_ABORT)) { |
1057 | atomic_inc(v: &lpfc_nvmep->xmt_fcp_drop); |
1058 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1059 | "6102 IO oxid x%x aborted\n" , |
1060 | ctxp->oxid); |
1061 | rc = -ENXIO; |
1062 | goto aerr; |
1063 | } |
1064 | |
1065 | nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); |
1066 | if (nvmewqeq == NULL) { |
1067 | atomic_inc(v: &lpfc_nvmep->xmt_fcp_drop); |
1068 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1069 | "6152 FCP Drop IO x%x: Prep\n" , |
1070 | ctxp->oxid); |
1071 | rc = -ENXIO; |
1072 | goto aerr; |
1073 | } |
1074 | |
1075 | nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; |
1076 | nvmewqeq->context_un.axchg = ctxp; |
1077 | nvmewqeq->cmd_flag |= LPFC_IO_NVMET; |
1078 | ctxp->wqeq->hba_wqidx = rsp->hwqid; |
1079 | |
1080 | lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n" , |
1081 | ctxp->oxid, rsp->op, rsp->rsplen); |
1082 | |
1083 | ctxp->flag |= LPFC_NVME_IO_INP; |
1084 | rc = lpfc_sli4_issue_wqe(phba, qp: ctxp->hdwq, pwqe: nvmewqeq); |
1085 | if (rc == WQE_SUCCESS) { |
1086 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1087 | if (!ctxp->ts_cmd_nvme) |
1088 | return 0; |
1089 | if (rsp->op == NVMET_FCOP_RSP) |
1090 | ctxp->ts_status_wqput = ktime_get_ns(); |
1091 | else |
1092 | ctxp->ts_data_wqput = ktime_get_ns(); |
1093 | #endif |
1094 | return 0; |
1095 | } |
1096 | |
1097 | if (rc == -EBUSY) { |
1098 | /* |
1099 | * WQ was full, so queue nvmewqeq to be sent after |
1100 | * WQE release CQE |
1101 | */ |
1102 | ctxp->flag |= LPFC_NVME_DEFER_WQFULL; |
1103 | wq = ctxp->hdwq->io_wq; |
1104 | pring = wq->pring; |
1105 | spin_lock_irqsave(&pring->ring_lock, iflags); |
1106 | list_add_tail(new: &nvmewqeq->list, head: &wq->wqfull_list); |
1107 | wq->q_flag |= HBA_NVMET_WQFULL; |
1108 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
1109 | atomic_inc(v: &lpfc_nvmep->defer_wqfull); |
1110 | return 0; |
1111 | } |
1112 | |
1113 | /* Give back resources */ |
1114 | atomic_inc(v: &lpfc_nvmep->xmt_fcp_drop); |
1115 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1116 | "6153 FCP Drop IO x%x: Issue: %d\n" , |
1117 | ctxp->oxid, rc); |
1118 | |
1119 | ctxp->wqeq->hba_wqidx = 0; |
1120 | nvmewqeq->context_un.axchg = NULL; |
1121 | nvmewqeq->bpl_dmabuf = NULL; |
1122 | rc = -EBUSY; |
1123 | aerr: |
1124 | return rc; |
1125 | } |
1126 | |
1127 | static void |
1128 | lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) |
1129 | { |
1130 | struct lpfc_nvmet_tgtport *tport = targetport->private; |
1131 | |
1132 | /* release any threads waiting for the unreg to complete */ |
1133 | if (tport->phba->targetport) |
1134 | complete(tport->tport_unreg_cmp); |
1135 | } |
1136 | |
1137 | static void |
1138 | lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, |
1139 | struct nvmefc_tgt_fcp_req *req) |
1140 | { |
1141 | struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; |
1142 | struct lpfc_async_xchg_ctx *ctxp = |
1143 | container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); |
1144 | struct lpfc_hba *phba = ctxp->phba; |
1145 | struct lpfc_queue *wq; |
1146 | unsigned long flags; |
1147 | |
1148 | if (phba->pport->load_flag & FC_UNLOADING) |
1149 | return; |
1150 | |
1151 | if (!ctxp->hdwq) |
1152 | ctxp->hdwq = &phba->sli4_hba.hdwq[0]; |
1153 | |
1154 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
1155 | "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n" , |
1156 | ctxp->oxid, ctxp->flag, ctxp->state); |
1157 | |
1158 | lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n" , |
1159 | ctxp->oxid, ctxp->flag, ctxp->state); |
1160 | |
1161 | atomic_inc(v: &lpfc_nvmep->xmt_fcp_abort); |
1162 | |
1163 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
1164 | |
1165 | /* Since iaab/iaar are NOT set, we need to check |
1166 | * if the firmware is in process of aborting IO |
1167 | */ |
1168 | if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) { |
1169 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags); |
1170 | return; |
1171 | } |
1172 | ctxp->flag |= LPFC_NVME_ABORT_OP; |
1173 | |
1174 | if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) { |
1175 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags); |
1176 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, |
1177 | ctxp->oxid); |
1178 | wq = ctxp->hdwq->io_wq; |
1179 | lpfc_nvmet_wqfull_flush(phba, wq, ctxp); |
1180 | return; |
1181 | } |
1182 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags); |
1183 | |
1184 | /* A state of LPFC_NVME_STE_RCV means we have just received |
1185 | * the NVME command and have not started processing it. |
1186 | * (by issuing any IO WQEs on this exchange yet) |
1187 | */ |
1188 | if (ctxp->state == LPFC_NVME_STE_RCV) |
1189 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, |
1190 | ctxp->oxid); |
1191 | else |
1192 | lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, |
1193 | ctxp->oxid); |
1194 | } |
1195 | |
1196 | static void |
1197 | lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, |
1198 | struct nvmefc_tgt_fcp_req *rsp) |
1199 | { |
1200 | struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; |
1201 | struct lpfc_async_xchg_ctx *ctxp = |
1202 | container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); |
1203 | struct lpfc_hba *phba = ctxp->phba; |
1204 | unsigned long flags; |
1205 | bool aborting = false; |
1206 | |
1207 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
1208 | if (ctxp->flag & LPFC_NVME_XBUSY) |
1209 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, |
1210 | "6027 NVMET release with XBUSY flag x%x" |
1211 | " oxid x%x\n" , |
1212 | ctxp->flag, ctxp->oxid); |
1213 | else if (ctxp->state != LPFC_NVME_STE_DONE && |
1214 | ctxp->state != LPFC_NVME_STE_ABORT) |
1215 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1216 | "6413 NVMET release bad state %d %d oxid x%x\n" , |
1217 | ctxp->state, ctxp->entry_cnt, ctxp->oxid); |
1218 | |
1219 | if ((ctxp->flag & LPFC_NVME_ABORT_OP) || |
1220 | (ctxp->flag & LPFC_NVME_XBUSY)) { |
1221 | aborting = true; |
1222 | /* let the abort path do the real release */ |
1223 | lpfc_nvmet_defer_release(phba, ctxp); |
1224 | } |
1225 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags); |
1226 | |
1227 | lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n" , ctxp->oxid, |
1228 | ctxp->state, aborting); |
1229 | |
1230 | atomic_inc(v: &lpfc_nvmep->xmt_fcp_release); |
1231 | ctxp->flag &= ~LPFC_NVME_TNOTIFY; |
1232 | |
1233 | if (aborting) |
1234 | return; |
1235 | |
1236 | lpfc_nvmet_ctxbuf_post(phba, ctx_buf: ctxp->ctxbuf); |
1237 | } |
1238 | |
1239 | static void |
1240 | lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, |
1241 | struct nvmefc_tgt_fcp_req *rsp) |
1242 | { |
1243 | struct lpfc_nvmet_tgtport *tgtp; |
1244 | struct lpfc_async_xchg_ctx *ctxp = |
1245 | container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); |
1246 | struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; |
1247 | struct lpfc_hba *phba = ctxp->phba; |
1248 | unsigned long iflag; |
1249 | |
1250 | |
1251 | lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n" , |
1252 | ctxp->oxid, ctxp->size, raw_smp_processor_id()); |
1253 | |
1254 | if (!nvmebuf) { |
1255 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, |
1256 | "6425 Defer rcv: no buffer oxid x%x: " |
1257 | "flg %x ste %x\n" , |
1258 | ctxp->oxid, ctxp->flag, ctxp->state); |
1259 | return; |
1260 | } |
1261 | |
1262 | tgtp = phba->targetport->private; |
1263 | if (tgtp) |
1264 | atomic_inc(v: &tgtp->rcv_fcp_cmd_defer); |
1265 | |
1266 | /* Free the nvmebuf since a new buffer already replaced it */ |
1267 | nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); |
1268 | spin_lock_irqsave(&ctxp->ctxlock, iflag); |
1269 | ctxp->rqb_buffer = NULL; |
1270 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflag); |
1271 | } |
1272 | |
1273 | /** |
1274 | * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request |
1275 | * @phba: Pointer to HBA context object |
1276 | * @cmdwqe: Pointer to driver command WQE object. |
1277 | * @rspwqe: Pointer to driver response WQE object. |
1278 | * |
1279 | * This function is the completion handler for NVME LS requests. |
1280 | * The function updates any states and statistics, then calls the |
1281 | * generic completion handler to finish completion of the request. |
1282 | **/ |
1283 | static void |
1284 | lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
1285 | struct lpfc_iocbq *rspwqe) |
1286 | { |
1287 | struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; |
1288 | __lpfc_nvme_ls_req_cmp(phba, vport: cmdwqe->vport, cmdwqe, wcqe); |
1289 | } |
1290 | |
1291 | /** |
1292 | * lpfc_nvmet_ls_req - Issue an Link Service request |
1293 | * @targetport: pointer to target instance registered with nvmet transport. |
1294 | * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv. |
1295 | * Driver sets this value to the ndlp pointer. |
1296 | * @pnvme_lsreq: the transport nvme_ls_req structure for the LS |
1297 | * |
1298 | * Driver registers this routine to handle any link service request |
1299 | * from the nvme_fc transport to a remote nvme-aware port. |
1300 | * |
1301 | * Return value : |
1302 | * 0 - Success |
1303 | * non-zero: various error codes, in form of -Exxx |
1304 | **/ |
1305 | static int |
1306 | lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport, |
1307 | void *hosthandle, |
1308 | struct nvmefc_ls_req *pnvme_lsreq) |
1309 | { |
1310 | struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private; |
1311 | struct lpfc_hba *phba; |
1312 | struct lpfc_nodelist *ndlp; |
1313 | int ret; |
1314 | u32 hstate; |
1315 | |
1316 | if (!lpfc_nvmet) |
1317 | return -EINVAL; |
1318 | |
1319 | phba = lpfc_nvmet->phba; |
1320 | if (phba->pport->load_flag & FC_UNLOADING) |
1321 | return -EINVAL; |
1322 | |
1323 | hstate = atomic_read(v: &lpfc_nvmet->state); |
1324 | if (hstate == LPFC_NVMET_INV_HOST_ACTIVE) |
1325 | return -EACCES; |
1326 | |
1327 | ndlp = (struct lpfc_nodelist *)hosthandle; |
1328 | |
1329 | ret = __lpfc_nvme_ls_req(vport: phba->pport, ndlp, pnvme_lsreq, |
1330 | gen_req_cmp: lpfc_nvmet_ls_req_cmp); |
1331 | |
1332 | return ret; |
1333 | } |
1334 | |
1335 | /** |
1336 | * lpfc_nvmet_ls_abort - Abort a prior NVME LS request |
1337 | * @targetport: Transport targetport, that LS was issued from. |
1338 | * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv. |
1339 | * Driver sets this value to the ndlp pointer. |
1340 | * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted |
1341 | * |
1342 | * Driver registers this routine to abort an NVME LS request that is |
1343 | * in progress (from the transports perspective). |
1344 | **/ |
1345 | static void |
1346 | lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport, |
1347 | void *hosthandle, |
1348 | struct nvmefc_ls_req *pnvme_lsreq) |
1349 | { |
1350 | struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private; |
1351 | struct lpfc_hba *phba; |
1352 | struct lpfc_nodelist *ndlp; |
1353 | int ret; |
1354 | |
1355 | phba = lpfc_nvmet->phba; |
1356 | if (phba->pport->load_flag & FC_UNLOADING) |
1357 | return; |
1358 | |
1359 | ndlp = (struct lpfc_nodelist *)hosthandle; |
1360 | |
1361 | ret = __lpfc_nvme_ls_abort(vport: phba->pport, ndlp, pnvme_lsreq); |
1362 | if (!ret) |
1363 | atomic_inc(v: &lpfc_nvmet->xmt_ls_abort); |
1364 | } |
1365 | |
1366 | static void |
1367 | lpfc_nvmet_host_release(void *hosthandle) |
1368 | { |
1369 | struct lpfc_nodelist *ndlp = hosthandle; |
1370 | struct lpfc_hba *phba = ndlp->phba; |
1371 | struct lpfc_nvmet_tgtport *tgtp; |
1372 | |
1373 | if (!phba->targetport || !phba->targetport->private) |
1374 | return; |
1375 | |
1376 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME, |
1377 | "6202 NVMET XPT releasing hosthandle x%px " |
1378 | "DID x%x xflags x%x refcnt %d\n" , |
1379 | hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags, |
1380 | kref_read(&ndlp->kref)); |
1381 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
1382 | spin_lock_irq(lock: &ndlp->lock); |
1383 | ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH; |
1384 | spin_unlock_irq(lock: &ndlp->lock); |
1385 | lpfc_nlp_put(ndlp); |
1386 | atomic_set(v: &tgtp->state, i: 0); |
1387 | } |
1388 | |
1389 | static void |
1390 | lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport) |
1391 | { |
1392 | struct lpfc_nvmet_tgtport *tgtp; |
1393 | struct lpfc_hba *phba; |
1394 | uint32_t rc; |
1395 | |
1396 | tgtp = tgtport->private; |
1397 | phba = tgtp->phba; |
1398 | |
1399 | rc = lpfc_issue_els_rscn(vport: phba->pport, retry: 0); |
1400 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1401 | "6420 NVMET subsystem change: Notification %s\n" , |
1402 | (rc) ? "Failed" : "Sent" ); |
1403 | } |
1404 | |
1405 | static struct nvmet_fc_target_template lpfc_tgttemplate = { |
1406 | .targetport_delete = lpfc_nvmet_targetport_delete, |
1407 | .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, |
1408 | .fcp_op = lpfc_nvmet_xmt_fcp_op, |
1409 | .fcp_abort = lpfc_nvmet_xmt_fcp_abort, |
1410 | .fcp_req_release = lpfc_nvmet_xmt_fcp_release, |
1411 | .defer_rcv = lpfc_nvmet_defer_rcv, |
1412 | .discovery_event = lpfc_nvmet_discovery_event, |
1413 | .ls_req = lpfc_nvmet_ls_req, |
1414 | .ls_abort = lpfc_nvmet_ls_abort, |
1415 | .host_release = lpfc_nvmet_host_release, |
1416 | |
1417 | .max_hw_queues = 1, |
1418 | .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, |
1419 | .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, |
1420 | .dma_boundary = 0xFFFFFFFF, |
1421 | |
1422 | /* optional features */ |
1423 | .target_features = 0, |
1424 | /* sizes of additional private data for data structures */ |
1425 | .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), |
1426 | .lsrqst_priv_sz = 0, |
1427 | }; |
1428 | |
1429 | static void |
1430 | __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba, |
1431 | struct lpfc_nvmet_ctx_info *infop) |
1432 | { |
1433 | struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf; |
1434 | unsigned long flags; |
1435 | |
1436 | spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags); |
1437 | list_for_each_entry_safe(ctx_buf, next_ctx_buf, |
1438 | &infop->nvmet_ctx_list, list) { |
1439 | spin_lock(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock); |
1440 | list_del_init(entry: &ctx_buf->list); |
1441 | spin_unlock(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock); |
1442 | |
1443 | spin_lock(lock: &phba->hbalock); |
1444 | __lpfc_clear_active_sglq(phba, xri: ctx_buf->sglq->sli4_lxritag); |
1445 | spin_unlock(lock: &phba->hbalock); |
1446 | |
1447 | ctx_buf->sglq->state = SGL_FREED; |
1448 | ctx_buf->sglq->ndlp = NULL; |
1449 | |
1450 | spin_lock(lock: &phba->sli4_hba.sgl_list_lock); |
1451 | list_add_tail(new: &ctx_buf->sglq->list, |
1452 | head: &phba->sli4_hba.lpfc_nvmet_sgl_list); |
1453 | spin_unlock(lock: &phba->sli4_hba.sgl_list_lock); |
1454 | |
1455 | lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); |
1456 | kfree(objp: ctx_buf->context); |
1457 | } |
1458 | spin_unlock_irqrestore(lock: &infop->nvmet_ctx_list_lock, flags); |
1459 | } |
1460 | |
1461 | static void |
1462 | lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) |
1463 | { |
1464 | struct lpfc_nvmet_ctx_info *infop; |
1465 | int i, j; |
1466 | |
1467 | /* The first context list, MRQ 0 CPU 0 */ |
1468 | infop = phba->sli4_hba.nvmet_ctx_info; |
1469 | if (!infop) |
1470 | return; |
1471 | |
1472 | /* Cycle the entire CPU context list for every MRQ */ |
1473 | for (i = 0; i < phba->cfg_nvmet_mrq; i++) { |
1474 | for_each_present_cpu(j) { |
1475 | infop = lpfc_get_ctx_list(phba, j, i); |
1476 | __lpfc_nvmet_clean_io_for_cpu(phba, infop); |
1477 | } |
1478 | } |
1479 | kfree(objp: phba->sli4_hba.nvmet_ctx_info); |
1480 | phba->sli4_hba.nvmet_ctx_info = NULL; |
1481 | } |
1482 | |
1483 | static int |
1484 | lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) |
1485 | { |
1486 | struct lpfc_nvmet_ctxbuf *ctx_buf; |
1487 | struct lpfc_iocbq *nvmewqe; |
1488 | union lpfc_wqe128 *wqe; |
1489 | struct lpfc_nvmet_ctx_info *last_infop; |
1490 | struct lpfc_nvmet_ctx_info *infop; |
1491 | int i, j, idx, cpu; |
1492 | |
1493 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME, |
1494 | "6403 Allocate NVMET resources for %d XRIs\n" , |
1495 | phba->sli4_hba.nvmet_xri_cnt); |
1496 | |
1497 | phba->sli4_hba.nvmet_ctx_info = kcalloc( |
1498 | n: phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq, |
1499 | size: sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); |
1500 | if (!phba->sli4_hba.nvmet_ctx_info) { |
1501 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1502 | "6419 Failed allocate memory for " |
1503 | "nvmet context lists\n" ); |
1504 | return -ENOMEM; |
1505 | } |
1506 | |
1507 | /* |
1508 | * Assuming X CPUs in the system, and Y MRQs, allocate some |
1509 | * lpfc_nvmet_ctx_info structures as follows: |
1510 | * |
1511 | * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0 |
1512 | * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1 |
1513 | * ... |
1514 | * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY |
1515 | * |
1516 | * Each line represents a MRQ "silo" containing an entry for |
1517 | * every CPU. |
1518 | * |
1519 | * MRQ X is initially assumed to be associated with CPU X, thus |
1520 | * contexts are initially distributed across all MRQs using |
1521 | * the MRQ index (N) as follows cpuN/mrqN. When contexts are |
1522 | * freed, the are freed to the MRQ silo based on the CPU number |
1523 | * of the IO completion. Thus a context that was allocated for MRQ A |
1524 | * whose IO completed on CPU B will be freed to cpuB/mrqA. |
1525 | */ |
1526 | for_each_possible_cpu(i) { |
1527 | for (j = 0; j < phba->cfg_nvmet_mrq; j++) { |
1528 | infop = lpfc_get_ctx_list(phba, i, j); |
1529 | INIT_LIST_HEAD(list: &infop->nvmet_ctx_list); |
1530 | spin_lock_init(&infop->nvmet_ctx_list_lock); |
1531 | infop->nvmet_ctx_list_cnt = 0; |
1532 | } |
1533 | } |
1534 | |
1535 | /* |
1536 | * Setup the next CPU context info ptr for each MRQ. |
1537 | * MRQ 0 will cycle thru CPUs 0 - X separately from |
1538 | * MRQ 1 cycling thru CPUs 0 - X, and so on. |
1539 | */ |
1540 | for (j = 0; j < phba->cfg_nvmet_mrq; j++) { |
1541 | last_infop = lpfc_get_ctx_list(phba, |
1542 | cpumask_first(cpu_present_mask), |
1543 | j); |
1544 | for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) { |
1545 | infop = lpfc_get_ctx_list(phba, i, j); |
1546 | infop->nvmet_ctx_next_cpu = last_infop; |
1547 | last_infop = infop; |
1548 | } |
1549 | } |
1550 | |
1551 | /* For all nvmet xris, allocate resources needed to process a |
1552 | * received command on a per xri basis. |
1553 | */ |
1554 | idx = 0; |
1555 | cpu = cpumask_first(cpu_present_mask); |
1556 | for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { |
1557 | ctx_buf = kzalloc(size: sizeof(*ctx_buf), GFP_KERNEL); |
1558 | if (!ctx_buf) { |
1559 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1560 | "6404 Ran out of memory for NVMET\n" ); |
1561 | return -ENOMEM; |
1562 | } |
1563 | |
1564 | ctx_buf->context = kzalloc(size: sizeof(*ctx_buf->context), |
1565 | GFP_KERNEL); |
1566 | if (!ctx_buf->context) { |
1567 | kfree(objp: ctx_buf); |
1568 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1569 | "6405 Ran out of NVMET " |
1570 | "context memory\n" ); |
1571 | return -ENOMEM; |
1572 | } |
1573 | ctx_buf->context->ctxbuf = ctx_buf; |
1574 | ctx_buf->context->state = LPFC_NVME_STE_FREE; |
1575 | |
1576 | ctx_buf->iocbq = lpfc_sli_get_iocbq(phba); |
1577 | if (!ctx_buf->iocbq) { |
1578 | kfree(objp: ctx_buf->context); |
1579 | kfree(objp: ctx_buf); |
1580 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1581 | "6406 Ran out of NVMET iocb/WQEs\n" ); |
1582 | return -ENOMEM; |
1583 | } |
1584 | ctx_buf->iocbq->cmd_flag = LPFC_IO_NVMET; |
1585 | nvmewqe = ctx_buf->iocbq; |
1586 | wqe = &nvmewqe->wqe; |
1587 | |
1588 | /* Initialize WQE */ |
1589 | memset(wqe, 0, sizeof(union lpfc_wqe)); |
1590 | |
1591 | ctx_buf->iocbq->cmd_dmabuf = NULL; |
1592 | spin_lock(lock: &phba->sli4_hba.sgl_list_lock); |
1593 | ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, piocbq: ctx_buf->iocbq); |
1594 | spin_unlock(lock: &phba->sli4_hba.sgl_list_lock); |
1595 | if (!ctx_buf->sglq) { |
1596 | lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); |
1597 | kfree(objp: ctx_buf->context); |
1598 | kfree(objp: ctx_buf); |
1599 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1600 | "6407 Ran out of NVMET XRIs\n" ); |
1601 | return -ENOMEM; |
1602 | } |
1603 | INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work); |
1604 | |
1605 | /* |
1606 | * Add ctx to MRQidx context list. Our initial assumption |
1607 | * is MRQidx will be associated with CPUidx. This association |
1608 | * can change on the fly. |
1609 | */ |
1610 | infop = lpfc_get_ctx_list(phba, cpu, idx); |
1611 | spin_lock(lock: &infop->nvmet_ctx_list_lock); |
1612 | list_add_tail(new: &ctx_buf->list, head: &infop->nvmet_ctx_list); |
1613 | infop->nvmet_ctx_list_cnt++; |
1614 | spin_unlock(lock: &infop->nvmet_ctx_list_lock); |
1615 | |
1616 | /* Spread ctx structures evenly across all MRQs */ |
1617 | idx++; |
1618 | if (idx >= phba->cfg_nvmet_mrq) { |
1619 | idx = 0; |
1620 | cpu = cpumask_first(cpu_present_mask); |
1621 | continue; |
1622 | } |
1623 | cpu = lpfc_next_present_cpu(n: cpu); |
1624 | } |
1625 | |
1626 | for_each_present_cpu(i) { |
1627 | for (j = 0; j < phba->cfg_nvmet_mrq; j++) { |
1628 | infop = lpfc_get_ctx_list(phba, i, j); |
1629 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, |
1630 | "6408 TOTAL NVMET ctx for CPU %d " |
1631 | "MRQ %d: cnt %d nextcpu x%px\n" , |
1632 | i, j, infop->nvmet_ctx_list_cnt, |
1633 | infop->nvmet_ctx_next_cpu); |
1634 | } |
1635 | } |
1636 | return 0; |
1637 | } |
1638 | |
1639 | int |
1640 | lpfc_nvmet_create_targetport(struct lpfc_hba *phba) |
1641 | { |
1642 | struct lpfc_vport *vport = phba->pport; |
1643 | struct lpfc_nvmet_tgtport *tgtp; |
1644 | struct nvmet_fc_port_info pinfo; |
1645 | int error; |
1646 | |
1647 | if (phba->targetport) |
1648 | return 0; |
1649 | |
1650 | error = lpfc_nvmet_setup_io_context(phba); |
1651 | if (error) |
1652 | return error; |
1653 | |
1654 | memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); |
1655 | pinfo.node_name = wwn_to_u64(wwn: vport->fc_nodename.u.wwn); |
1656 | pinfo.port_name = wwn_to_u64(wwn: vport->fc_portname.u.wwn); |
1657 | pinfo.port_id = vport->fc_myDID; |
1658 | |
1659 | /* We need to tell the transport layer + 1 because it takes page |
1660 | * alignment into account. When space for the SGL is allocated we |
1661 | * allocate + 3, one for cmd, one for rsp and one for this alignment |
1662 | */ |
1663 | lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; |
1664 | lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue; |
1665 | lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP; |
1666 | |
1667 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
1668 | error = nvmet_fc_register_targetport(portinfo: &pinfo, template: &lpfc_tgttemplate, |
1669 | dev: &phba->pcidev->dev, |
1670 | tgtport_p: &phba->targetport); |
1671 | #else |
1672 | error = -ENOENT; |
1673 | #endif |
1674 | if (error) { |
1675 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1676 | "6025 Cannot register NVME targetport x%x: " |
1677 | "portnm %llx nodenm %llx segs %d qs %d\n" , |
1678 | error, |
1679 | pinfo.port_name, pinfo.node_name, |
1680 | lpfc_tgttemplate.max_sgl_segments, |
1681 | lpfc_tgttemplate.max_hw_queues); |
1682 | phba->targetport = NULL; |
1683 | phba->nvmet_support = 0; |
1684 | |
1685 | lpfc_nvmet_cleanup_io_context(phba); |
1686 | |
1687 | } else { |
1688 | tgtp = (struct lpfc_nvmet_tgtport *) |
1689 | phba->targetport->private; |
1690 | tgtp->phba = phba; |
1691 | |
1692 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, |
1693 | "6026 Registered NVME " |
1694 | "targetport: x%px, private x%px " |
1695 | "portnm %llx nodenm %llx segs %d qs %d\n" , |
1696 | phba->targetport, tgtp, |
1697 | pinfo.port_name, pinfo.node_name, |
1698 | lpfc_tgttemplate.max_sgl_segments, |
1699 | lpfc_tgttemplate.max_hw_queues); |
1700 | |
1701 | atomic_set(v: &tgtp->rcv_ls_req_in, i: 0); |
1702 | atomic_set(v: &tgtp->rcv_ls_req_out, i: 0); |
1703 | atomic_set(v: &tgtp->rcv_ls_req_drop, i: 0); |
1704 | atomic_set(v: &tgtp->xmt_ls_abort, i: 0); |
1705 | atomic_set(v: &tgtp->xmt_ls_abort_cmpl, i: 0); |
1706 | atomic_set(v: &tgtp->xmt_ls_rsp, i: 0); |
1707 | atomic_set(v: &tgtp->xmt_ls_drop, i: 0); |
1708 | atomic_set(v: &tgtp->xmt_ls_rsp_error, i: 0); |
1709 | atomic_set(v: &tgtp->xmt_ls_rsp_xb_set, i: 0); |
1710 | atomic_set(v: &tgtp->xmt_ls_rsp_aborted, i: 0); |
1711 | atomic_set(v: &tgtp->xmt_ls_rsp_cmpl, i: 0); |
1712 | atomic_set(v: &tgtp->rcv_fcp_cmd_in, i: 0); |
1713 | atomic_set(v: &tgtp->rcv_fcp_cmd_out, i: 0); |
1714 | atomic_set(v: &tgtp->rcv_fcp_cmd_drop, i: 0); |
1715 | atomic_set(v: &tgtp->xmt_fcp_drop, i: 0); |
1716 | atomic_set(v: &tgtp->xmt_fcp_read_rsp, i: 0); |
1717 | atomic_set(v: &tgtp->xmt_fcp_read, i: 0); |
1718 | atomic_set(v: &tgtp->xmt_fcp_write, i: 0); |
1719 | atomic_set(v: &tgtp->xmt_fcp_rsp, i: 0); |
1720 | atomic_set(v: &tgtp->xmt_fcp_release, i: 0); |
1721 | atomic_set(v: &tgtp->xmt_fcp_rsp_cmpl, i: 0); |
1722 | atomic_set(v: &tgtp->xmt_fcp_rsp_error, i: 0); |
1723 | atomic_set(v: &tgtp->xmt_fcp_rsp_xb_set, i: 0); |
1724 | atomic_set(v: &tgtp->xmt_fcp_rsp_aborted, i: 0); |
1725 | atomic_set(v: &tgtp->xmt_fcp_rsp_drop, i: 0); |
1726 | atomic_set(v: &tgtp->xmt_fcp_xri_abort_cqe, i: 0); |
1727 | atomic_set(v: &tgtp->xmt_fcp_abort, i: 0); |
1728 | atomic_set(v: &tgtp->xmt_fcp_abort_cmpl, i: 0); |
1729 | atomic_set(v: &tgtp->xmt_abort_unsol, i: 0); |
1730 | atomic_set(v: &tgtp->xmt_abort_sol, i: 0); |
1731 | atomic_set(v: &tgtp->xmt_abort_rsp, i: 0); |
1732 | atomic_set(v: &tgtp->xmt_abort_rsp_error, i: 0); |
1733 | atomic_set(v: &tgtp->defer_ctx, i: 0); |
1734 | atomic_set(v: &tgtp->defer_fod, i: 0); |
1735 | atomic_set(v: &tgtp->defer_wqfull, i: 0); |
1736 | } |
1737 | return error; |
1738 | } |
1739 | |
1740 | int |
1741 | lpfc_nvmet_update_targetport(struct lpfc_hba *phba) |
1742 | { |
1743 | struct lpfc_vport *vport = phba->pport; |
1744 | |
1745 | if (!phba->targetport) |
1746 | return 0; |
1747 | |
1748 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, |
1749 | "6007 Update NVMET port x%px did x%x\n" , |
1750 | phba->targetport, vport->fc_myDID); |
1751 | |
1752 | phba->targetport->port_id = vport->fc_myDID; |
1753 | return 0; |
1754 | } |
1755 | |
1756 | /** |
1757 | * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort |
1758 | * @phba: pointer to lpfc hba data structure. |
1759 | * @axri: pointer to the nvmet xri abort wcqe structure. |
1760 | * |
1761 | * This routine is invoked by the worker thread to process a SLI4 fast-path |
1762 | * NVMET aborted xri. |
1763 | **/ |
1764 | void |
1765 | lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, |
1766 | struct sli4_wcqe_xri_aborted *axri) |
1767 | { |
1768 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
1769 | uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); |
1770 | uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); |
1771 | struct lpfc_async_xchg_ctx *ctxp, *next_ctxp; |
1772 | struct lpfc_nvmet_tgtport *tgtp; |
1773 | struct nvmefc_tgt_fcp_req *req = NULL; |
1774 | struct lpfc_nodelist *ndlp; |
1775 | unsigned long iflag = 0; |
1776 | int rrq_empty = 0; |
1777 | bool released = false; |
1778 | |
1779 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
1780 | "6317 XB aborted xri x%x rxid x%x\n" , xri, rxid); |
1781 | |
1782 | if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) |
1783 | return; |
1784 | |
1785 | if (phba->targetport) { |
1786 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
1787 | atomic_inc(v: &tgtp->xmt_fcp_xri_abort_cqe); |
1788 | } |
1789 | |
1790 | spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); |
1791 | list_for_each_entry_safe(ctxp, next_ctxp, |
1792 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, |
1793 | list) { |
1794 | if (ctxp->ctxbuf->sglq->sli4_xritag != xri) |
1795 | continue; |
1796 | |
1797 | spin_unlock_irqrestore(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock, |
1798 | flags: iflag); |
1799 | |
1800 | spin_lock_irqsave(&ctxp->ctxlock, iflag); |
1801 | /* Check if we already received a free context call |
1802 | * and we have completed processing an abort situation. |
1803 | */ |
1804 | if (ctxp->flag & LPFC_NVME_CTX_RLS && |
1805 | !(ctxp->flag & LPFC_NVME_ABORT_OP)) { |
1806 | spin_lock(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock); |
1807 | list_del_init(entry: &ctxp->list); |
1808 | spin_unlock(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock); |
1809 | released = true; |
1810 | } |
1811 | ctxp->flag &= ~LPFC_NVME_XBUSY; |
1812 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflag); |
1813 | |
1814 | rrq_empty = list_empty(head: &phba->active_rrq_list); |
1815 | ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); |
1816 | if (ndlp && |
1817 | (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || |
1818 | ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { |
1819 | lpfc_set_rrq_active(phba, ndlp, |
1820 | ctxp->ctxbuf->sglq->sli4_lxritag, |
1821 | rxid, 1); |
1822 | lpfc_sli4_abts_err_handler(phba, ndlp, axri); |
1823 | } |
1824 | |
1825 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
1826 | "6318 XB aborted oxid x%x flg x%x (%x)\n" , |
1827 | ctxp->oxid, ctxp->flag, released); |
1828 | if (released) |
1829 | lpfc_nvmet_ctxbuf_post(phba, ctx_buf: ctxp->ctxbuf); |
1830 | |
1831 | if (rrq_empty) |
1832 | lpfc_worker_wake_up(phba); |
1833 | return; |
1834 | } |
1835 | spin_unlock_irqrestore(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock, flags: iflag); |
1836 | ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri); |
1837 | if (ctxp) { |
1838 | /* |
1839 | * Abort already done by FW, so BA_ACC sent. |
1840 | * However, the transport may be unaware. |
1841 | */ |
1842 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
1843 | "6323 NVMET Rcv ABTS xri x%x ctxp state x%x " |
1844 | "flag x%x oxid x%x rxid x%x\n" , |
1845 | xri, ctxp->state, ctxp->flag, ctxp->oxid, |
1846 | rxid); |
1847 | |
1848 | spin_lock_irqsave(&ctxp->ctxlock, iflag); |
1849 | ctxp->flag |= LPFC_NVME_ABTS_RCV; |
1850 | ctxp->state = LPFC_NVME_STE_ABORT; |
1851 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflag); |
1852 | |
1853 | lpfc_nvmeio_data(phba, |
1854 | "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n" , |
1855 | xri, raw_smp_processor_id(), 0); |
1856 | |
1857 | req = &ctxp->hdlrctx.fcp_req; |
1858 | if (req) |
1859 | nvmet_fc_rcv_fcp_abort(tgtport: phba->targetport, fcpreq: req); |
1860 | } |
1861 | #endif |
1862 | } |
1863 | |
1864 | int |
1865 | lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, |
1866 | struct fc_frame_header *fc_hdr) |
1867 | { |
1868 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
1869 | struct lpfc_hba *phba = vport->phba; |
1870 | struct lpfc_async_xchg_ctx *ctxp, *next_ctxp; |
1871 | struct nvmefc_tgt_fcp_req *rsp; |
1872 | uint32_t sid; |
1873 | uint16_t oxid, xri; |
1874 | unsigned long iflag = 0; |
1875 | |
1876 | sid = sli4_sid_from_fc_hdr(fc_hdr); |
1877 | oxid = be16_to_cpu(fc_hdr->fh_ox_id); |
1878 | |
1879 | spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); |
1880 | list_for_each_entry_safe(ctxp, next_ctxp, |
1881 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, |
1882 | list) { |
1883 | if (ctxp->oxid != oxid || ctxp->sid != sid) |
1884 | continue; |
1885 | |
1886 | xri = ctxp->ctxbuf->sglq->sli4_xritag; |
1887 | |
1888 | spin_unlock_irqrestore(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock, |
1889 | flags: iflag); |
1890 | spin_lock_irqsave(&ctxp->ctxlock, iflag); |
1891 | ctxp->flag |= LPFC_NVME_ABTS_RCV; |
1892 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflag); |
1893 | |
1894 | lpfc_nvmeio_data(phba, |
1895 | "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n" , |
1896 | xri, raw_smp_processor_id(), 0); |
1897 | |
1898 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
1899 | "6319 NVMET Rcv ABTS:acc xri x%x\n" , xri); |
1900 | |
1901 | rsp = &ctxp->hdlrctx.fcp_req; |
1902 | nvmet_fc_rcv_fcp_abort(tgtport: phba->targetport, fcpreq: rsp); |
1903 | |
1904 | /* Respond with BA_ACC accordingly */ |
1905 | lpfc_sli4_seq_abort_rsp(vport, fc_hdr, aborted: 1); |
1906 | return 0; |
1907 | } |
1908 | spin_unlock_irqrestore(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock, flags: iflag); |
1909 | /* check the wait list */ |
1910 | if (phba->sli4_hba.nvmet_io_wait_cnt) { |
1911 | struct rqb_dmabuf *nvmebuf; |
1912 | struct fc_frame_header *fc_hdr_tmp; |
1913 | u32 sid_tmp; |
1914 | u16 oxid_tmp; |
1915 | bool found = false; |
1916 | |
1917 | spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); |
1918 | |
1919 | /* match by oxid and s_id */ |
1920 | list_for_each_entry(nvmebuf, |
1921 | &phba->sli4_hba.lpfc_nvmet_io_wait_list, |
1922 | hbuf.list) { |
1923 | fc_hdr_tmp = (struct fc_frame_header *) |
1924 | (nvmebuf->hbuf.virt); |
1925 | oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id); |
1926 | sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp); |
1927 | if (oxid_tmp != oxid || sid_tmp != sid) |
1928 | continue; |
1929 | |
1930 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
1931 | "6321 NVMET Rcv ABTS oxid x%x from x%x " |
1932 | "is waiting for a ctxp\n" , |
1933 | oxid, sid); |
1934 | |
1935 | list_del_init(entry: &nvmebuf->hbuf.list); |
1936 | phba->sli4_hba.nvmet_io_wait_cnt--; |
1937 | found = true; |
1938 | break; |
1939 | } |
1940 | spin_unlock_irqrestore(lock: &phba->sli4_hba.nvmet_io_wait_lock, |
1941 | flags: iflag); |
1942 | |
1943 | /* free buffer since already posted a new DMA buffer to RQ */ |
1944 | if (found) { |
1945 | nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); |
1946 | /* Respond with BA_ACC accordingly */ |
1947 | lpfc_sli4_seq_abort_rsp(vport, fc_hdr, aborted: 1); |
1948 | return 0; |
1949 | } |
1950 | } |
1951 | |
1952 | /* check active list */ |
1953 | ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid); |
1954 | if (ctxp) { |
1955 | xri = ctxp->ctxbuf->sglq->sli4_xritag; |
1956 | |
1957 | spin_lock_irqsave(&ctxp->ctxlock, iflag); |
1958 | ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP); |
1959 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflag); |
1960 | |
1961 | lpfc_nvmeio_data(phba, |
1962 | "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n" , |
1963 | xri, raw_smp_processor_id(), 0); |
1964 | |
1965 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
1966 | "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x " |
1967 | "flag x%x state x%x\n" , |
1968 | ctxp->oxid, xri, ctxp->flag, ctxp->state); |
1969 | |
1970 | if (ctxp->flag & LPFC_NVME_TNOTIFY) { |
1971 | /* Notify the transport */ |
1972 | nvmet_fc_rcv_fcp_abort(tgtport: phba->targetport, |
1973 | fcpreq: &ctxp->hdlrctx.fcp_req); |
1974 | } else { |
1975 | cancel_work_sync(work: &ctxp->ctxbuf->defer_work); |
1976 | spin_lock_irqsave(&ctxp->ctxlock, iflag); |
1977 | lpfc_nvmet_defer_release(phba, ctxp); |
1978 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflag); |
1979 | } |
1980 | lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, |
1981 | ctxp->oxid); |
1982 | |
1983 | lpfc_sli4_seq_abort_rsp(vport, fc_hdr, aborted: 1); |
1984 | return 0; |
1985 | } |
1986 | |
1987 | lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n" , |
1988 | oxid, raw_smp_processor_id(), 1); |
1989 | |
1990 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
1991 | "6320 NVMET Rcv ABTS:rjt oxid x%x\n" , oxid); |
1992 | |
1993 | /* Respond with BA_RJT accordingly */ |
1994 | lpfc_sli4_seq_abort_rsp(vport, fc_hdr, aborted: 0); |
1995 | #endif |
1996 | return 0; |
1997 | } |
1998 | |
1999 | static void |
2000 | lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, |
2001 | struct lpfc_async_xchg_ctx *ctxp) |
2002 | { |
2003 | struct lpfc_sli_ring *pring; |
2004 | struct lpfc_iocbq *nvmewqeq; |
2005 | struct lpfc_iocbq *next_nvmewqeq; |
2006 | unsigned long iflags; |
2007 | struct lpfc_wcqe_complete wcqe; |
2008 | struct lpfc_wcqe_complete *wcqep; |
2009 | |
2010 | pring = wq->pring; |
2011 | wcqep = &wcqe; |
2012 | |
2013 | /* Fake an ABORT error code back to cmpl routine */ |
2014 | memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete)); |
2015 | bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT); |
2016 | wcqep->parameter = IOERR_ABORT_REQUESTED; |
2017 | |
2018 | spin_lock_irqsave(&pring->ring_lock, iflags); |
2019 | list_for_each_entry_safe(nvmewqeq, next_nvmewqeq, |
2020 | &wq->wqfull_list, list) { |
2021 | if (ctxp) { |
2022 | /* Checking for a specific IO to flush */ |
2023 | if (nvmewqeq->context_un.axchg == ctxp) { |
2024 | list_del(entry: &nvmewqeq->list); |
2025 | spin_unlock_irqrestore(lock: &pring->ring_lock, |
2026 | flags: iflags); |
2027 | memcpy(&nvmewqeq->wcqe_cmpl, wcqep, |
2028 | sizeof(*wcqep)); |
2029 | lpfc_nvmet_xmt_fcp_op_cmp(phba, cmdwqe: nvmewqeq, |
2030 | rspwqe: nvmewqeq); |
2031 | return; |
2032 | } |
2033 | continue; |
2034 | } else { |
2035 | /* Flush all IOs */ |
2036 | list_del(entry: &nvmewqeq->list); |
2037 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
2038 | memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep)); |
2039 | lpfc_nvmet_xmt_fcp_op_cmp(phba, cmdwqe: nvmewqeq, rspwqe: nvmewqeq); |
2040 | spin_lock_irqsave(&pring->ring_lock, iflags); |
2041 | } |
2042 | } |
2043 | if (!ctxp) |
2044 | wq->q_flag &= ~HBA_NVMET_WQFULL; |
2045 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
2046 | } |
2047 | |
2048 | void |
2049 | lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, |
2050 | struct lpfc_queue *wq) |
2051 | { |
2052 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
2053 | struct lpfc_sli_ring *pring; |
2054 | struct lpfc_iocbq *nvmewqeq; |
2055 | struct lpfc_async_xchg_ctx *ctxp; |
2056 | unsigned long iflags; |
2057 | int rc; |
2058 | |
2059 | /* |
2060 | * Some WQE slots are available, so try to re-issue anything |
2061 | * on the WQ wqfull_list. |
2062 | */ |
2063 | pring = wq->pring; |
2064 | spin_lock_irqsave(&pring->ring_lock, iflags); |
2065 | while (!list_empty(head: &wq->wqfull_list)) { |
2066 | list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq, |
2067 | list); |
2068 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
2069 | ctxp = nvmewqeq->context_un.axchg; |
2070 | rc = lpfc_sli4_issue_wqe(phba, qp: ctxp->hdwq, pwqe: nvmewqeq); |
2071 | spin_lock_irqsave(&pring->ring_lock, iflags); |
2072 | if (rc == -EBUSY) { |
2073 | /* WQ was full again, so put it back on the list */ |
2074 | list_add(new: &nvmewqeq->list, head: &wq->wqfull_list); |
2075 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
2076 | return; |
2077 | } |
2078 | if (rc == WQE_SUCCESS) { |
2079 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
2080 | if (ctxp->ts_cmd_nvme) { |
2081 | if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP) |
2082 | ctxp->ts_status_wqput = ktime_get_ns(); |
2083 | else |
2084 | ctxp->ts_data_wqput = ktime_get_ns(); |
2085 | } |
2086 | #endif |
2087 | } else { |
2088 | WARN_ON(rc); |
2089 | } |
2090 | } |
2091 | wq->q_flag &= ~HBA_NVMET_WQFULL; |
2092 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
2093 | |
2094 | #endif |
2095 | } |
2096 | |
2097 | void |
2098 | lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) |
2099 | { |
2100 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
2101 | struct lpfc_nvmet_tgtport *tgtp; |
2102 | struct lpfc_queue *wq; |
2103 | uint32_t qidx; |
2104 | DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp); |
2105 | |
2106 | if (phba->nvmet_support == 0) |
2107 | return; |
2108 | if (phba->targetport) { |
2109 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
2110 | for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { |
2111 | wq = phba->sli4_hba.hdwq[qidx].io_wq; |
2112 | lpfc_nvmet_wqfull_flush(phba, wq, NULL); |
2113 | } |
2114 | tgtp->tport_unreg_cmp = &tport_unreg_cmp; |
2115 | nvmet_fc_unregister_targetport(tgtport: phba->targetport); |
2116 | if (!wait_for_completion_timeout(x: &tport_unreg_cmp, |
2117 | timeout: msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) |
2118 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2119 | "6179 Unreg targetport x%px timeout " |
2120 | "reached.\n" , phba->targetport); |
2121 | lpfc_nvmet_cleanup_io_context(phba); |
2122 | } |
2123 | phba->targetport = NULL; |
2124 | #endif |
2125 | } |
2126 | |
2127 | /** |
2128 | * lpfc_nvmet_handle_lsreq - Process an NVME LS request |
2129 | * @phba: pointer to lpfc hba data structure. |
2130 | * @axchg: pointer to exchange context for the NVME LS request |
2131 | * |
2132 | * This routine is used for processing an asychronously received NVME LS |
2133 | * request. Any remaining validation is done and the LS is then forwarded |
2134 | * to the nvmet-fc transport via nvmet_fc_rcv_ls_req(). |
2135 | * |
2136 | * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing) |
2137 | * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done. |
2138 | * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg. |
2139 | * |
2140 | * Returns 0 if LS was handled and delivered to the transport |
2141 | * Returns 1 if LS failed to be handled and should be dropped |
2142 | */ |
2143 | int |
2144 | lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba, |
2145 | struct lpfc_async_xchg_ctx *axchg) |
2146 | { |
2147 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
2148 | struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private; |
2149 | uint32_t *payload = axchg->payload; |
2150 | int rc; |
2151 | |
2152 | atomic_inc(v: &tgtp->rcv_ls_req_in); |
2153 | |
2154 | /* |
2155 | * Driver passes the ndlp as the hosthandle argument allowing |
2156 | * the transport to generate LS requests for any associateions |
2157 | * that are created. |
2158 | */ |
2159 | rc = nvmet_fc_rcv_ls_req(tgtport: phba->targetport, hosthandle: axchg->ndlp, rsp: &axchg->ls_rsp, |
2160 | lsreqbuf: axchg->payload, lsreqbuf_len: axchg->size); |
2161 | |
2162 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, |
2163 | "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x " |
2164 | "%08x %08x %08x\n" , axchg->size, rc, |
2165 | *payload, *(payload+1), *(payload+2), |
2166 | *(payload+3), *(payload+4), *(payload+5)); |
2167 | |
2168 | if (!rc) { |
2169 | atomic_inc(v: &tgtp->rcv_ls_req_out); |
2170 | return 0; |
2171 | } |
2172 | |
2173 | atomic_inc(v: &tgtp->rcv_ls_req_drop); |
2174 | #endif |
2175 | return 1; |
2176 | } |
2177 | |
2178 | static void |
2179 | lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) |
2180 | { |
2181 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
2182 | struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context; |
2183 | struct lpfc_hba *phba = ctxp->phba; |
2184 | struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; |
2185 | struct lpfc_nvmet_tgtport *tgtp; |
2186 | uint32_t *payload, qno; |
2187 | uint32_t rc; |
2188 | unsigned long iflags; |
2189 | |
2190 | if (!nvmebuf) { |
2191 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2192 | "6159 process_rcv_fcp_req, nvmebuf is NULL, " |
2193 | "oxid: x%x flg: x%x state: x%x\n" , |
2194 | ctxp->oxid, ctxp->flag, ctxp->state); |
2195 | spin_lock_irqsave(&ctxp->ctxlock, iflags); |
2196 | lpfc_nvmet_defer_release(phba, ctxp); |
2197 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflags); |
2198 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, |
2199 | ctxp->oxid); |
2200 | return; |
2201 | } |
2202 | |
2203 | if (ctxp->flag & LPFC_NVME_ABTS_RCV) { |
2204 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2205 | "6324 IO oxid x%x aborted\n" , |
2206 | ctxp->oxid); |
2207 | return; |
2208 | } |
2209 | |
2210 | payload = (uint32_t *)(nvmebuf->dbuf.virt); |
2211 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
2212 | ctxp->flag |= LPFC_NVME_TNOTIFY; |
2213 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
2214 | if (ctxp->ts_isr_cmd) |
2215 | ctxp->ts_cmd_nvme = ktime_get_ns(); |
2216 | #endif |
2217 | /* |
2218 | * The calling sequence should be: |
2219 | * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done |
2220 | * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. |
2221 | * When we return from nvmet_fc_rcv_fcp_req, all relevant info |
2222 | * the NVME command / FC header is stored. |
2223 | * A buffer has already been reposted for this IO, so just free |
2224 | * the nvmebuf. |
2225 | */ |
2226 | rc = nvmet_fc_rcv_fcp_req(tgtport: phba->targetport, fcpreq: &ctxp->hdlrctx.fcp_req, |
2227 | cmdiubuf: payload, cmdiubuf_len: ctxp->size); |
2228 | /* Process FCP command */ |
2229 | if (rc == 0) { |
2230 | atomic_inc(v: &tgtp->rcv_fcp_cmd_out); |
2231 | spin_lock_irqsave(&ctxp->ctxlock, iflags); |
2232 | if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) || |
2233 | (nvmebuf != ctxp->rqb_buffer)) { |
2234 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflags); |
2235 | return; |
2236 | } |
2237 | ctxp->rqb_buffer = NULL; |
2238 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflags); |
2239 | lpfc_rq_buf_free(phba, mp: &nvmebuf->hbuf); /* repost */ |
2240 | return; |
2241 | } |
2242 | |
2243 | /* Processing of FCP command is deferred */ |
2244 | if (rc == -EOVERFLOW) { |
2245 | lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d " |
2246 | "from %06x\n" , |
2247 | ctxp->oxid, ctxp->size, ctxp->sid); |
2248 | atomic_inc(v: &tgtp->rcv_fcp_cmd_out); |
2249 | atomic_inc(v: &tgtp->defer_fod); |
2250 | spin_lock_irqsave(&ctxp->ctxlock, iflags); |
2251 | if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) { |
2252 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflags); |
2253 | return; |
2254 | } |
2255 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflags); |
2256 | /* |
2257 | * Post a replacement DMA buffer to RQ and defer |
2258 | * freeing rcv buffer till .defer_rcv callback |
2259 | */ |
2260 | qno = nvmebuf->idx; |
2261 | lpfc_post_rq_buffer( |
2262 | phba, hrq: phba->sli4_hba.nvmet_mrq_hdr[qno], |
2263 | drq: phba->sli4_hba.nvmet_mrq_data[qno], count: 1, idx: qno); |
2264 | return; |
2265 | } |
2266 | ctxp->flag &= ~LPFC_NVME_TNOTIFY; |
2267 | atomic_inc(v: &tgtp->rcv_fcp_cmd_drop); |
2268 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2269 | "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n" , |
2270 | ctxp->oxid, rc, |
2271 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
2272 | atomic_read(&tgtp->rcv_fcp_cmd_out), |
2273 | atomic_read(&tgtp->xmt_fcp_release)); |
2274 | lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n" , |
2275 | ctxp->oxid, ctxp->size, ctxp->sid); |
2276 | spin_lock_irqsave(&ctxp->ctxlock, iflags); |
2277 | lpfc_nvmet_defer_release(phba, ctxp); |
2278 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflags); |
2279 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); |
2280 | #endif |
2281 | } |
2282 | |
2283 | static void |
2284 | lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work) |
2285 | { |
2286 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
2287 | struct lpfc_nvmet_ctxbuf *ctx_buf = |
2288 | container_of(work, struct lpfc_nvmet_ctxbuf, defer_work); |
2289 | |
2290 | lpfc_nvmet_process_rcv_fcp_req(ctx_buf); |
2291 | #endif |
2292 | } |
2293 | |
2294 | static struct lpfc_nvmet_ctxbuf * |
2295 | lpfc_nvmet_replenish_context(struct lpfc_hba *phba, |
2296 | struct lpfc_nvmet_ctx_info *current_infop) |
2297 | { |
2298 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
2299 | struct lpfc_nvmet_ctxbuf *ctx_buf = NULL; |
2300 | struct lpfc_nvmet_ctx_info *get_infop; |
2301 | int i; |
2302 | |
2303 | /* |
2304 | * The current_infop for the MRQ a NVME command IU was received |
2305 | * on is empty. Our goal is to replenish this MRQs context |
2306 | * list from a another CPUs. |
2307 | * |
2308 | * First we need to pick a context list to start looking on. |
2309 | * nvmet_ctx_start_cpu has available context the last time |
2310 | * we needed to replenish this CPU where nvmet_ctx_next_cpu |
2311 | * is just the next sequential CPU for this MRQ. |
2312 | */ |
2313 | if (current_infop->nvmet_ctx_start_cpu) |
2314 | get_infop = current_infop->nvmet_ctx_start_cpu; |
2315 | else |
2316 | get_infop = current_infop->nvmet_ctx_next_cpu; |
2317 | |
2318 | for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) { |
2319 | if (get_infop == current_infop) { |
2320 | get_infop = get_infop->nvmet_ctx_next_cpu; |
2321 | continue; |
2322 | } |
2323 | spin_lock(lock: &get_infop->nvmet_ctx_list_lock); |
2324 | |
2325 | /* Just take the entire context list, if there are any */ |
2326 | if (get_infop->nvmet_ctx_list_cnt) { |
2327 | list_splice_init(list: &get_infop->nvmet_ctx_list, |
2328 | head: ¤t_infop->nvmet_ctx_list); |
2329 | current_infop->nvmet_ctx_list_cnt = |
2330 | get_infop->nvmet_ctx_list_cnt - 1; |
2331 | get_infop->nvmet_ctx_list_cnt = 0; |
2332 | spin_unlock(lock: &get_infop->nvmet_ctx_list_lock); |
2333 | |
2334 | current_infop->nvmet_ctx_start_cpu = get_infop; |
2335 | list_remove_head(¤t_infop->nvmet_ctx_list, |
2336 | ctx_buf, struct lpfc_nvmet_ctxbuf, |
2337 | list); |
2338 | return ctx_buf; |
2339 | } |
2340 | |
2341 | /* Otherwise, move on to the next CPU for this MRQ */ |
2342 | spin_unlock(lock: &get_infop->nvmet_ctx_list_lock); |
2343 | get_infop = get_infop->nvmet_ctx_next_cpu; |
2344 | } |
2345 | |
2346 | #endif |
2347 | /* Nothing found, all contexts for the MRQ are in-flight */ |
2348 | return NULL; |
2349 | } |
2350 | |
2351 | /** |
2352 | * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer |
2353 | * @phba: pointer to lpfc hba data structure. |
2354 | * @idx: relative index of MRQ vector |
2355 | * @nvmebuf: pointer to lpfc nvme command HBQ data structure. |
2356 | * @isr_timestamp: in jiffies. |
2357 | * @cqflag: cq processing information regarding workload. |
2358 | * |
2359 | * This routine is used for processing the WQE associated with a unsolicited |
2360 | * event. It first determines whether there is an existing ndlp that matches |
2361 | * the DID from the unsolicited WQE. If not, it will create a new one with |
2362 | * the DID from the unsolicited WQE. The ELS command from the unsolicited |
2363 | * WQE is then used to invoke the proper routine and to set up proper state |
2364 | * of the discovery state machine. |
2365 | **/ |
2366 | static void |
2367 | lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, |
2368 | uint32_t idx, |
2369 | struct rqb_dmabuf *nvmebuf, |
2370 | uint64_t isr_timestamp, |
2371 | uint8_t cqflag) |
2372 | { |
2373 | struct lpfc_async_xchg_ctx *ctxp; |
2374 | struct lpfc_nvmet_tgtport *tgtp; |
2375 | struct fc_frame_header *fc_hdr; |
2376 | struct lpfc_nvmet_ctxbuf *ctx_buf; |
2377 | struct lpfc_nvmet_ctx_info *current_infop; |
2378 | uint32_t size, oxid, sid, qno; |
2379 | unsigned long iflag; |
2380 | int current_cpu; |
2381 | |
2382 | if (!IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
2383 | return; |
2384 | |
2385 | ctx_buf = NULL; |
2386 | if (!nvmebuf || !phba->targetport) { |
2387 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2388 | "6157 NVMET FCP Drop IO\n" ); |
2389 | if (nvmebuf) |
2390 | lpfc_rq_buf_free(phba, mp: &nvmebuf->hbuf); |
2391 | return; |
2392 | } |
2393 | |
2394 | /* |
2395 | * Get a pointer to the context list for this MRQ based on |
2396 | * the CPU this MRQ IRQ is associated with. If the CPU association |
2397 | * changes from our initial assumption, the context list could |
2398 | * be empty, thus it would need to be replenished with the |
2399 | * context list from another CPU for this MRQ. |
2400 | */ |
2401 | current_cpu = raw_smp_processor_id(); |
2402 | current_infop = lpfc_get_ctx_list(phba, current_cpu, idx); |
2403 | spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag); |
2404 | if (current_infop->nvmet_ctx_list_cnt) { |
2405 | list_remove_head(¤t_infop->nvmet_ctx_list, |
2406 | ctx_buf, struct lpfc_nvmet_ctxbuf, list); |
2407 | current_infop->nvmet_ctx_list_cnt--; |
2408 | } else { |
2409 | ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop); |
2410 | } |
2411 | spin_unlock_irqrestore(lock: ¤t_infop->nvmet_ctx_list_lock, flags: iflag); |
2412 | |
2413 | fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); |
2414 | oxid = be16_to_cpu(fc_hdr->fh_ox_id); |
2415 | size = nvmebuf->bytes_recv; |
2416 | |
2417 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
2418 | if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { |
2419 | this_cpu_inc(phba->sli4_hba.c_stat->rcv_io); |
2420 | if (idx != current_cpu) |
2421 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, |
2422 | "6703 CPU Check rcv: " |
2423 | "cpu %d expect %d\n" , |
2424 | current_cpu, idx); |
2425 | } |
2426 | #endif |
2427 | |
2428 | lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n" , |
2429 | oxid, size, raw_smp_processor_id()); |
2430 | |
2431 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
2432 | |
2433 | if (!ctx_buf) { |
2434 | /* Queue this NVME IO to process later */ |
2435 | spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); |
2436 | list_add_tail(new: &nvmebuf->hbuf.list, |
2437 | head: &phba->sli4_hba.lpfc_nvmet_io_wait_list); |
2438 | phba->sli4_hba.nvmet_io_wait_cnt++; |
2439 | phba->sli4_hba.nvmet_io_wait_total++; |
2440 | spin_unlock_irqrestore(lock: &phba->sli4_hba.nvmet_io_wait_lock, |
2441 | flags: iflag); |
2442 | |
2443 | /* Post a brand new DMA buffer to RQ */ |
2444 | qno = nvmebuf->idx; |
2445 | lpfc_post_rq_buffer( |
2446 | phba, hrq: phba->sli4_hba.nvmet_mrq_hdr[qno], |
2447 | drq: phba->sli4_hba.nvmet_mrq_data[qno], count: 1, idx: qno); |
2448 | |
2449 | atomic_inc(v: &tgtp->defer_ctx); |
2450 | return; |
2451 | } |
2452 | |
2453 | sid = sli4_sid_from_fc_hdr(fc_hdr); |
2454 | |
2455 | ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context; |
2456 | spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); |
2457 | list_add_tail(new: &ctxp->list, head: &phba->sli4_hba.t_active_ctx_list); |
2458 | spin_unlock_irqrestore(lock: &phba->sli4_hba.t_active_list_lock, flags: iflag); |
2459 | if (ctxp->state != LPFC_NVME_STE_FREE) { |
2460 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2461 | "6414 NVMET Context corrupt %d %d oxid x%x\n" , |
2462 | ctxp->state, ctxp->entry_cnt, ctxp->oxid); |
2463 | } |
2464 | ctxp->wqeq = NULL; |
2465 | ctxp->offset = 0; |
2466 | ctxp->phba = phba; |
2467 | ctxp->size = size; |
2468 | ctxp->oxid = oxid; |
2469 | ctxp->sid = sid; |
2470 | ctxp->idx = idx; |
2471 | ctxp->state = LPFC_NVME_STE_RCV; |
2472 | ctxp->entry_cnt = 1; |
2473 | ctxp->flag = 0; |
2474 | ctxp->ctxbuf = ctx_buf; |
2475 | ctxp->rqb_buffer = (void *)nvmebuf; |
2476 | ctxp->hdwq = NULL; |
2477 | spin_lock_init(&ctxp->ctxlock); |
2478 | |
2479 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
2480 | if (isr_timestamp) |
2481 | ctxp->ts_isr_cmd = isr_timestamp; |
2482 | ctxp->ts_cmd_nvme = 0; |
2483 | ctxp->ts_nvme_data = 0; |
2484 | ctxp->ts_data_wqput = 0; |
2485 | ctxp->ts_isr_data = 0; |
2486 | ctxp->ts_data_nvme = 0; |
2487 | ctxp->ts_nvme_status = 0; |
2488 | ctxp->ts_status_wqput = 0; |
2489 | ctxp->ts_isr_status = 0; |
2490 | ctxp->ts_status_nvme = 0; |
2491 | #endif |
2492 | |
2493 | atomic_inc(v: &tgtp->rcv_fcp_cmd_in); |
2494 | /* check for cq processing load */ |
2495 | if (!cqflag) { |
2496 | lpfc_nvmet_process_rcv_fcp_req(ctx_buf); |
2497 | return; |
2498 | } |
2499 | |
2500 | if (!queue_work(wq: phba->wq, work: &ctx_buf->defer_work)) { |
2501 | atomic_inc(v: &tgtp->rcv_fcp_cmd_drop); |
2502 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2503 | "6325 Unable to queue work for oxid x%x. " |
2504 | "FCP Drop IO [x%x x%x x%x]\n" , |
2505 | ctxp->oxid, |
2506 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
2507 | atomic_read(&tgtp->rcv_fcp_cmd_out), |
2508 | atomic_read(&tgtp->xmt_fcp_release)); |
2509 | |
2510 | spin_lock_irqsave(&ctxp->ctxlock, iflag); |
2511 | lpfc_nvmet_defer_release(phba, ctxp); |
2512 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags: iflag); |
2513 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); |
2514 | } |
2515 | } |
2516 | |
2517 | /** |
2518 | * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport |
2519 | * @phba: pointer to lpfc hba data structure. |
2520 | * @idx: relative index of MRQ vector |
2521 | * @nvmebuf: pointer to received nvme data structure. |
2522 | * @isr_timestamp: in jiffies. |
2523 | * @cqflag: cq processing information regarding workload. |
2524 | * |
2525 | * This routine is used to process an unsolicited event received from a SLI |
2526 | * (Service Level Interface) ring. The actual processing of the data buffer |
2527 | * associated with the unsolicited event is done by invoking the routine |
2528 | * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the |
2529 | * SLI RQ on which the unsolicited event was received. |
2530 | **/ |
2531 | void |
2532 | lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, |
2533 | uint32_t idx, |
2534 | struct rqb_dmabuf *nvmebuf, |
2535 | uint64_t isr_timestamp, |
2536 | uint8_t cqflag) |
2537 | { |
2538 | if (!nvmebuf) { |
2539 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2540 | "3167 NVMET FCP Drop IO\n" ); |
2541 | return; |
2542 | } |
2543 | if (phba->nvmet_support == 0) { |
2544 | lpfc_rq_buf_free(phba, mp: &nvmebuf->hbuf); |
2545 | return; |
2546 | } |
2547 | lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag); |
2548 | } |
2549 | |
2550 | /** |
2551 | * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure |
2552 | * @phba: pointer to a host N_Port data structure. |
2553 | * @ctxp: Context info for NVME LS Request |
2554 | * @rspbuf: DMA buffer of NVME command. |
2555 | * @rspsize: size of the NVME command. |
2556 | * |
2557 | * This routine is used for allocating a lpfc-WQE data structure from |
2558 | * the driver lpfc-WQE free-list and prepare the WQE with the parameters |
2559 | * passed into the routine for discovery state machine to issue an Extended |
2560 | * Link Service (NVME) commands. It is a generic lpfc-WQE allocation |
2561 | * and preparation routine that is used by all the discovery state machine |
2562 | * routines and the NVME command-specific fields will be later set up by |
2563 | * the individual discovery machine routines after calling this routine |
2564 | * allocating and preparing a generic WQE data structure. It fills in the |
2565 | * Buffer Descriptor Entries (BDEs), allocates buffers for both command |
2566 | * payload and response payload (if expected). The reference count on the |
2567 | * ndlp is incremented by 1 and the reference to the ndlp is put into |
2568 | * context1 of the WQE data structure for this WQE to hold the ndlp |
2569 | * reference for the command's callback function to access later. |
2570 | * |
2571 | * Return code |
2572 | * Pointer to the newly allocated/prepared nvme wqe data structure |
2573 | * NULL - when nvme wqe data structure allocation/preparation failed |
2574 | **/ |
2575 | static struct lpfc_iocbq * |
2576 | lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, |
2577 | struct lpfc_async_xchg_ctx *ctxp, |
2578 | dma_addr_t rspbuf, uint16_t rspsize) |
2579 | { |
2580 | struct lpfc_nodelist *ndlp; |
2581 | struct lpfc_iocbq *nvmewqe; |
2582 | union lpfc_wqe128 *wqe; |
2583 | |
2584 | if (!lpfc_is_link_up(phba)) { |
2585 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2586 | "6104 NVMET prep LS wqe: link err: " |
2587 | "NPORT x%x oxid:x%x ste %d\n" , |
2588 | ctxp->sid, ctxp->oxid, ctxp->state); |
2589 | return NULL; |
2590 | } |
2591 | |
2592 | /* Allocate buffer for command wqe */ |
2593 | nvmewqe = lpfc_sli_get_iocbq(phba); |
2594 | if (nvmewqe == NULL) { |
2595 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2596 | "6105 NVMET prep LS wqe: No WQE: " |
2597 | "NPORT x%x oxid x%x ste %d\n" , |
2598 | ctxp->sid, ctxp->oxid, ctxp->state); |
2599 | return NULL; |
2600 | } |
2601 | |
2602 | ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); |
2603 | if (!ndlp || |
2604 | ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && |
2605 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { |
2606 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2607 | "6106 NVMET prep LS wqe: No ndlp: " |
2608 | "NPORT x%x oxid x%x ste %d\n" , |
2609 | ctxp->sid, ctxp->oxid, ctxp->state); |
2610 | goto nvme_wqe_free_wqeq_exit; |
2611 | } |
2612 | ctxp->wqeq = nvmewqe; |
2613 | |
2614 | /* prevent preparing wqe with NULL ndlp reference */ |
2615 | nvmewqe->ndlp = lpfc_nlp_get(ndlp); |
2616 | if (!nvmewqe->ndlp) |
2617 | goto nvme_wqe_free_wqeq_exit; |
2618 | nvmewqe->context_un.axchg = ctxp; |
2619 | |
2620 | wqe = &nvmewqe->wqe; |
2621 | memset(wqe, 0, sizeof(union lpfc_wqe)); |
2622 | |
2623 | /* Words 0 - 2 */ |
2624 | wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
2625 | wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize; |
2626 | wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf)); |
2627 | wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf)); |
2628 | |
2629 | /* Word 3 */ |
2630 | |
2631 | /* Word 4 */ |
2632 | |
2633 | /* Word 5 */ |
2634 | bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); |
2635 | bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1); |
2636 | bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0); |
2637 | bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP); |
2638 | bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME); |
2639 | |
2640 | /* Word 6 */ |
2641 | bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, |
2642 | phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); |
2643 | bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag); |
2644 | |
2645 | /* Word 7 */ |
2646 | bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, |
2647 | CMD_XMIT_SEQUENCE64_WQE); |
2648 | bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI); |
2649 | bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); |
2650 | bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); |
2651 | |
2652 | /* Word 8 */ |
2653 | wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag; |
2654 | |
2655 | /* Word 9 */ |
2656 | bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag); |
2657 | /* Needs to be set by caller */ |
2658 | bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid); |
2659 | |
2660 | /* Word 10 */ |
2661 | bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); |
2662 | bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); |
2663 | bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, |
2664 | LPFC_WQE_LENLOC_WORD12); |
2665 | bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); |
2666 | |
2667 | /* Word 11 */ |
2668 | bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com, |
2669 | LPFC_WQE_CQ_ID_DEFAULT); |
2670 | bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com, |
2671 | OTHER_COMMAND); |
2672 | |
2673 | /* Word 12 */ |
2674 | wqe->xmit_sequence.xmit_len = rspsize; |
2675 | |
2676 | nvmewqe->retry = 1; |
2677 | nvmewqe->vport = phba->pport; |
2678 | nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; |
2679 | nvmewqe->cmd_flag |= LPFC_IO_NVME_LS; |
2680 | |
2681 | /* Xmit NVMET response to remote NPORT <did> */ |
2682 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, |
2683 | "6039 Xmit NVMET LS response to remote " |
2684 | "NPORT x%x iotag:x%x oxid:x%x size:x%x\n" , |
2685 | ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid, |
2686 | rspsize); |
2687 | return nvmewqe; |
2688 | |
2689 | nvme_wqe_free_wqeq_exit: |
2690 | nvmewqe->context_un.axchg = NULL; |
2691 | nvmewqe->ndlp = NULL; |
2692 | nvmewqe->bpl_dmabuf = NULL; |
2693 | lpfc_sli_release_iocbq(phba, nvmewqe); |
2694 | return NULL; |
2695 | } |
2696 | |
2697 | |
2698 | static struct lpfc_iocbq * |
2699 | lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, |
2700 | struct lpfc_async_xchg_ctx *ctxp) |
2701 | { |
2702 | struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req; |
2703 | struct lpfc_nvmet_tgtport *tgtp; |
2704 | struct sli4_sge *sgl; |
2705 | struct lpfc_nodelist *ndlp; |
2706 | struct lpfc_iocbq *nvmewqe; |
2707 | struct scatterlist *sgel; |
2708 | union lpfc_wqe128 *wqe; |
2709 | struct ulp_bde64 *bde; |
2710 | dma_addr_t physaddr; |
2711 | int i, cnt, nsegs; |
2712 | bool use_pbde = false; |
2713 | int xc = 1; |
2714 | |
2715 | if (!lpfc_is_link_up(phba)) { |
2716 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2717 | "6107 NVMET prep FCP wqe: link err:" |
2718 | "NPORT x%x oxid x%x ste %d\n" , |
2719 | ctxp->sid, ctxp->oxid, ctxp->state); |
2720 | return NULL; |
2721 | } |
2722 | |
2723 | ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); |
2724 | if (!ndlp || |
2725 | ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && |
2726 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { |
2727 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2728 | "6108 NVMET prep FCP wqe: no ndlp: " |
2729 | "NPORT x%x oxid x%x ste %d\n" , |
2730 | ctxp->sid, ctxp->oxid, ctxp->state); |
2731 | return NULL; |
2732 | } |
2733 | |
2734 | if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) { |
2735 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2736 | "6109 NVMET prep FCP wqe: seg cnt err: " |
2737 | "NPORT x%x oxid x%x ste %d cnt %d\n" , |
2738 | ctxp->sid, ctxp->oxid, ctxp->state, |
2739 | phba->cfg_nvme_seg_cnt); |
2740 | return NULL; |
2741 | } |
2742 | nsegs = rsp->sg_cnt; |
2743 | |
2744 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
2745 | nvmewqe = ctxp->wqeq; |
2746 | if (nvmewqe == NULL) { |
2747 | /* Allocate buffer for command wqe */ |
2748 | nvmewqe = ctxp->ctxbuf->iocbq; |
2749 | if (nvmewqe == NULL) { |
2750 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2751 | "6110 NVMET prep FCP wqe: No " |
2752 | "WQE: NPORT x%x oxid x%x ste %d\n" , |
2753 | ctxp->sid, ctxp->oxid, ctxp->state); |
2754 | return NULL; |
2755 | } |
2756 | ctxp->wqeq = nvmewqe; |
2757 | xc = 0; /* create new XRI */ |
2758 | nvmewqe->sli4_lxritag = NO_XRI; |
2759 | nvmewqe->sli4_xritag = NO_XRI; |
2760 | } |
2761 | |
2762 | /* Sanity check */ |
2763 | if (((ctxp->state == LPFC_NVME_STE_RCV) && |
2764 | (ctxp->entry_cnt == 1)) || |
2765 | (ctxp->state == LPFC_NVME_STE_DATA)) { |
2766 | wqe = &nvmewqe->wqe; |
2767 | } else { |
2768 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2769 | "6111 Wrong state NVMET FCP: %d cnt %d\n" , |
2770 | ctxp->state, ctxp->entry_cnt); |
2771 | return NULL; |
2772 | } |
2773 | |
2774 | sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl; |
2775 | switch (rsp->op) { |
2776 | case NVMET_FCOP_READDATA: |
2777 | case NVMET_FCOP_READDATA_RSP: |
2778 | /* From the tsend template, initialize words 7 - 11 */ |
2779 | memcpy(&wqe->words[7], |
2780 | &lpfc_tsend_cmd_template.words[7], |
2781 | sizeof(uint32_t) * 5); |
2782 | |
2783 | /* Words 0 - 2 : The first sg segment */ |
2784 | sgel = &rsp->sg[0]; |
2785 | physaddr = sg_dma_address(sgel); |
2786 | wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
2787 | wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel); |
2788 | wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr)); |
2789 | wqe->fcp_tsend.bde.addrHigh = |
2790 | cpu_to_le32(putPaddrHigh(physaddr)); |
2791 | |
2792 | /* Word 3 */ |
2793 | wqe->fcp_tsend.payload_offset_len = 0; |
2794 | |
2795 | /* Word 4 */ |
2796 | wqe->fcp_tsend.relative_offset = ctxp->offset; |
2797 | |
2798 | /* Word 5 */ |
2799 | wqe->fcp_tsend.reserved = 0; |
2800 | |
2801 | /* Word 6 */ |
2802 | bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com, |
2803 | phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); |
2804 | bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com, |
2805 | nvmewqe->sli4_xritag); |
2806 | |
2807 | /* Word 7 - set ar later */ |
2808 | |
2809 | /* Word 8 */ |
2810 | wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag; |
2811 | |
2812 | /* Word 9 */ |
2813 | bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag); |
2814 | bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid); |
2815 | |
2816 | /* Word 10 - set wqes later, in template xc=1 */ |
2817 | if (!xc) |
2818 | bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0); |
2819 | |
2820 | /* Word 12 */ |
2821 | wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; |
2822 | |
2823 | /* Setup 2 SKIP SGEs */ |
2824 | sgl->addr_hi = 0; |
2825 | sgl->addr_lo = 0; |
2826 | sgl->word2 = 0; |
2827 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); |
2828 | sgl->word2 = cpu_to_le32(sgl->word2); |
2829 | sgl->sge_len = 0; |
2830 | sgl++; |
2831 | sgl->addr_hi = 0; |
2832 | sgl->addr_lo = 0; |
2833 | sgl->word2 = 0; |
2834 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); |
2835 | sgl->word2 = cpu_to_le32(sgl->word2); |
2836 | sgl->sge_len = 0; |
2837 | sgl++; |
2838 | if (rsp->op == NVMET_FCOP_READDATA_RSP) { |
2839 | atomic_inc(v: &tgtp->xmt_fcp_read_rsp); |
2840 | |
2841 | /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ |
2842 | |
2843 | if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { |
2844 | if (ndlp->nlp_flag & NLP_SUPPRESS_RSP) |
2845 | bf_set(wqe_sup, |
2846 | &wqe->fcp_tsend.wqe_com, 1); |
2847 | } else { |
2848 | bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1); |
2849 | bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1); |
2850 | bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, |
2851 | ((rsp->rsplen >> 2) - 1)); |
2852 | memcpy(&wqe->words[16], rsp->rspaddr, |
2853 | rsp->rsplen); |
2854 | } |
2855 | } else { |
2856 | atomic_inc(v: &tgtp->xmt_fcp_read); |
2857 | |
2858 | /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ |
2859 | bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0); |
2860 | } |
2861 | break; |
2862 | |
2863 | case NVMET_FCOP_WRITEDATA: |
2864 | /* From the treceive template, initialize words 3 - 11 */ |
2865 | memcpy(&wqe->words[3], |
2866 | &lpfc_treceive_cmd_template.words[3], |
2867 | sizeof(uint32_t) * 9); |
2868 | |
2869 | /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */ |
2870 | wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP; |
2871 | wqe->fcp_treceive.bde.tus.f.bdeSize = 0; |
2872 | wqe->fcp_treceive.bde.addrLow = 0; |
2873 | wqe->fcp_treceive.bde.addrHigh = 0; |
2874 | |
2875 | /* Word 4 */ |
2876 | wqe->fcp_treceive.relative_offset = ctxp->offset; |
2877 | |
2878 | /* Word 6 */ |
2879 | bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com, |
2880 | phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); |
2881 | bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com, |
2882 | nvmewqe->sli4_xritag); |
2883 | |
2884 | /* Word 7 */ |
2885 | |
2886 | /* Word 8 */ |
2887 | wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag; |
2888 | |
2889 | /* Word 9 */ |
2890 | bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag); |
2891 | bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid); |
2892 | |
2893 | /* Word 10 - in template xc=1 */ |
2894 | if (!xc) |
2895 | bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0); |
2896 | |
2897 | /* Word 11 - check for pbde */ |
2898 | if (nsegs == 1 && phba->cfg_enable_pbde) { |
2899 | use_pbde = true; |
2900 | /* Word 11 - PBDE bit already preset by template */ |
2901 | } else { |
2902 | /* Overwrite default template setting */ |
2903 | bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0); |
2904 | } |
2905 | |
2906 | /* Word 12 */ |
2907 | wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; |
2908 | |
2909 | /* Setup 2 SKIP SGEs */ |
2910 | sgl->addr_hi = 0; |
2911 | sgl->addr_lo = 0; |
2912 | sgl->word2 = 0; |
2913 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); |
2914 | sgl->word2 = cpu_to_le32(sgl->word2); |
2915 | sgl->sge_len = 0; |
2916 | sgl++; |
2917 | sgl->addr_hi = 0; |
2918 | sgl->addr_lo = 0; |
2919 | sgl->word2 = 0; |
2920 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); |
2921 | sgl->word2 = cpu_to_le32(sgl->word2); |
2922 | sgl->sge_len = 0; |
2923 | sgl++; |
2924 | atomic_inc(v: &tgtp->xmt_fcp_write); |
2925 | break; |
2926 | |
2927 | case NVMET_FCOP_RSP: |
2928 | /* From the treceive template, initialize words 4 - 11 */ |
2929 | memcpy(&wqe->words[4], |
2930 | &lpfc_trsp_cmd_template.words[4], |
2931 | sizeof(uint32_t) * 8); |
2932 | |
2933 | /* Words 0 - 2 */ |
2934 | physaddr = rsp->rspdma; |
2935 | wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
2936 | wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen; |
2937 | wqe->fcp_trsp.bde.addrLow = |
2938 | cpu_to_le32(putPaddrLow(physaddr)); |
2939 | wqe->fcp_trsp.bde.addrHigh = |
2940 | cpu_to_le32(putPaddrHigh(physaddr)); |
2941 | |
2942 | /* Word 3 */ |
2943 | wqe->fcp_trsp.response_len = rsp->rsplen; |
2944 | |
2945 | /* Word 6 */ |
2946 | bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com, |
2947 | phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); |
2948 | bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com, |
2949 | nvmewqe->sli4_xritag); |
2950 | |
2951 | /* Word 7 */ |
2952 | |
2953 | /* Word 8 */ |
2954 | wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag; |
2955 | |
2956 | /* Word 9 */ |
2957 | bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag); |
2958 | bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid); |
2959 | |
2960 | /* Word 10 */ |
2961 | if (xc) |
2962 | bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1); |
2963 | |
2964 | /* Word 11 */ |
2965 | /* In template wqes=0 irsp=0 irsplen=0 - good response */ |
2966 | if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) { |
2967 | /* Bad response - embed it */ |
2968 | bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1); |
2969 | bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1); |
2970 | bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, |
2971 | ((rsp->rsplen >> 2) - 1)); |
2972 | memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); |
2973 | } |
2974 | |
2975 | /* Word 12 */ |
2976 | wqe->fcp_trsp.rsvd_12_15[0] = 0; |
2977 | |
2978 | /* Use rspbuf, NOT sg list */ |
2979 | nsegs = 0; |
2980 | sgl->word2 = 0; |
2981 | atomic_inc(v: &tgtp->xmt_fcp_rsp); |
2982 | break; |
2983 | |
2984 | default: |
2985 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, |
2986 | "6064 Unknown Rsp Op %d\n" , |
2987 | rsp->op); |
2988 | return NULL; |
2989 | } |
2990 | |
2991 | nvmewqe->retry = 1; |
2992 | nvmewqe->vport = phba->pport; |
2993 | nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; |
2994 | nvmewqe->ndlp = ndlp; |
2995 | |
2996 | for_each_sg(rsp->sg, sgel, nsegs, i) { |
2997 | physaddr = sg_dma_address(sgel); |
2998 | cnt = sg_dma_len(sgel); |
2999 | sgl->addr_hi = putPaddrHigh(physaddr); |
3000 | sgl->addr_lo = putPaddrLow(physaddr); |
3001 | sgl->word2 = 0; |
3002 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); |
3003 | bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset); |
3004 | if ((i+1) == rsp->sg_cnt) |
3005 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
3006 | sgl->word2 = cpu_to_le32(sgl->word2); |
3007 | sgl->sge_len = cpu_to_le32(cnt); |
3008 | sgl++; |
3009 | ctxp->offset += cnt; |
3010 | } |
3011 | |
3012 | bde = (struct ulp_bde64 *)&wqe->words[13]; |
3013 | if (use_pbde) { |
3014 | /* decrement sgl ptr backwards once to first data sge */ |
3015 | sgl--; |
3016 | |
3017 | /* Words 13-15 (PBDE) */ |
3018 | bde->addrLow = sgl->addr_lo; |
3019 | bde->addrHigh = sgl->addr_hi; |
3020 | bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len); |
3021 | bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
3022 | bde->tus.w = cpu_to_le32(bde->tus.w); |
3023 | } else { |
3024 | memset(bde, 0, sizeof(struct ulp_bde64)); |
3025 | } |
3026 | ctxp->state = LPFC_NVME_STE_DATA; |
3027 | ctxp->entry_cnt++; |
3028 | return nvmewqe; |
3029 | } |
3030 | |
3031 | /** |
3032 | * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS |
3033 | * @phba: Pointer to HBA context object. |
3034 | * @cmdwqe: Pointer to driver command WQE object. |
3035 | * @rspwqe: Pointer to driver response WQE object. |
3036 | * |
3037 | * The function is called from SLI ring event handler with no |
3038 | * lock held. This function is the completion handler for NVME ABTS for FCP cmds |
3039 | * The function frees memory resources used for the NVME commands. |
3040 | **/ |
3041 | static void |
3042 | lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
3043 | struct lpfc_iocbq *rspwqe) |
3044 | { |
3045 | struct lpfc_async_xchg_ctx *ctxp; |
3046 | struct lpfc_nvmet_tgtport *tgtp; |
3047 | uint32_t result; |
3048 | unsigned long flags; |
3049 | bool released = false; |
3050 | struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; |
3051 | |
3052 | ctxp = cmdwqe->context_un.axchg; |
3053 | result = wcqe->parameter; |
3054 | |
3055 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
3056 | if (ctxp->flag & LPFC_NVME_ABORT_OP) |
3057 | atomic_inc(v: &tgtp->xmt_fcp_abort_cmpl); |
3058 | |
3059 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
3060 | ctxp->state = LPFC_NVME_STE_DONE; |
3061 | |
3062 | /* Check if we already received a free context call |
3063 | * and we have completed processing an abort situation. |
3064 | */ |
3065 | if ((ctxp->flag & LPFC_NVME_CTX_RLS) && |
3066 | !(ctxp->flag & LPFC_NVME_XBUSY)) { |
3067 | spin_lock(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock); |
3068 | list_del_init(entry: &ctxp->list); |
3069 | spin_unlock(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock); |
3070 | released = true; |
3071 | } |
3072 | ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
3073 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags); |
3074 | atomic_inc(v: &tgtp->xmt_abort_rsp); |
3075 | |
3076 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
3077 | "6165 ABORT cmpl: oxid x%x flg x%x (%d) " |
3078 | "WCQE: %08x %08x %08x %08x\n" , |
3079 | ctxp->oxid, ctxp->flag, released, |
3080 | wcqe->word0, wcqe->total_data_placed, |
3081 | result, wcqe->word3); |
3082 | |
3083 | cmdwqe->rsp_dmabuf = NULL; |
3084 | cmdwqe->bpl_dmabuf = NULL; |
3085 | /* |
3086 | * if transport has released ctx, then can reuse it. Otherwise, |
3087 | * will be recycled by transport release call. |
3088 | */ |
3089 | if (released) |
3090 | lpfc_nvmet_ctxbuf_post(phba, ctx_buf: ctxp->ctxbuf); |
3091 | |
3092 | /* This is the iocbq for the abort, not the command */ |
3093 | lpfc_sli_release_iocbq(phba, cmdwqe); |
3094 | |
3095 | /* Since iaab/iaar are NOT set, there is no work left. |
3096 | * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted |
3097 | * should have been called already. |
3098 | */ |
3099 | } |
3100 | |
3101 | /** |
3102 | * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS |
3103 | * @phba: Pointer to HBA context object. |
3104 | * @cmdwqe: Pointer to driver command WQE object. |
3105 | * @rspwqe: Pointer to driver response WQE object. |
3106 | * |
3107 | * The function is called from SLI ring event handler with no |
3108 | * lock held. This function is the completion handler for NVME ABTS for FCP cmds |
3109 | * The function frees memory resources used for the NVME commands. |
3110 | **/ |
3111 | static void |
3112 | lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
3113 | struct lpfc_iocbq *rspwqe) |
3114 | { |
3115 | struct lpfc_async_xchg_ctx *ctxp; |
3116 | struct lpfc_nvmet_tgtport *tgtp; |
3117 | unsigned long flags; |
3118 | uint32_t result; |
3119 | bool released = false; |
3120 | struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; |
3121 | |
3122 | ctxp = cmdwqe->context_un.axchg; |
3123 | result = wcqe->parameter; |
3124 | |
3125 | if (!ctxp) { |
3126 | /* if context is clear, related io alrady complete */ |
3127 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
3128 | "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n" , |
3129 | wcqe->word0, wcqe->total_data_placed, |
3130 | result, wcqe->word3); |
3131 | return; |
3132 | } |
3133 | |
3134 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
3135 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
3136 | if (ctxp->flag & LPFC_NVME_ABORT_OP) |
3137 | atomic_inc(v: &tgtp->xmt_fcp_abort_cmpl); |
3138 | |
3139 | /* Sanity check */ |
3140 | if (ctxp->state != LPFC_NVME_STE_ABORT) { |
3141 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3142 | "6112 ABTS Wrong state:%d oxid x%x\n" , |
3143 | ctxp->state, ctxp->oxid); |
3144 | } |
3145 | |
3146 | /* Check if we already received a free context call |
3147 | * and we have completed processing an abort situation. |
3148 | */ |
3149 | ctxp->state = LPFC_NVME_STE_DONE; |
3150 | if ((ctxp->flag & LPFC_NVME_CTX_RLS) && |
3151 | !(ctxp->flag & LPFC_NVME_XBUSY)) { |
3152 | spin_lock(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock); |
3153 | list_del_init(entry: &ctxp->list); |
3154 | spin_unlock(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock); |
3155 | released = true; |
3156 | } |
3157 | ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
3158 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags); |
3159 | atomic_inc(v: &tgtp->xmt_abort_rsp); |
3160 | |
3161 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
3162 | "6316 ABTS cmpl oxid x%x flg x%x (%x) " |
3163 | "WCQE: %08x %08x %08x %08x\n" , |
3164 | ctxp->oxid, ctxp->flag, released, |
3165 | wcqe->word0, wcqe->total_data_placed, |
3166 | result, wcqe->word3); |
3167 | |
3168 | cmdwqe->rsp_dmabuf = NULL; |
3169 | cmdwqe->bpl_dmabuf = NULL; |
3170 | /* |
3171 | * if transport has released ctx, then can reuse it. Otherwise, |
3172 | * will be recycled by transport release call. |
3173 | */ |
3174 | if (released) |
3175 | lpfc_nvmet_ctxbuf_post(phba, ctx_buf: ctxp->ctxbuf); |
3176 | |
3177 | /* Since iaab/iaar are NOT set, there is no work left. |
3178 | * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted |
3179 | * should have been called already. |
3180 | */ |
3181 | } |
3182 | |
3183 | /** |
3184 | * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS |
3185 | * @phba: Pointer to HBA context object. |
3186 | * @cmdwqe: Pointer to driver command WQE object. |
3187 | * @rspwqe: Pointer to driver response WQE object. |
3188 | * |
3189 | * The function is called from SLI ring event handler with no |
3190 | * lock held. This function is the completion handler for NVME ABTS for LS cmds |
3191 | * The function frees memory resources used for the NVME commands. |
3192 | **/ |
3193 | static void |
3194 | lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
3195 | struct lpfc_iocbq *rspwqe) |
3196 | { |
3197 | struct lpfc_async_xchg_ctx *ctxp; |
3198 | struct lpfc_nvmet_tgtport *tgtp; |
3199 | uint32_t result; |
3200 | struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; |
3201 | |
3202 | ctxp = cmdwqe->context_un.axchg; |
3203 | result = wcqe->parameter; |
3204 | |
3205 | if (phba->nvmet_support) { |
3206 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
3207 | atomic_inc(v: &tgtp->xmt_ls_abort_cmpl); |
3208 | } |
3209 | |
3210 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
3211 | "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n" , |
3212 | ctxp, wcqe->word0, wcqe->total_data_placed, |
3213 | result, wcqe->word3); |
3214 | |
3215 | if (!ctxp) { |
3216 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3217 | "6415 NVMET LS Abort No ctx: WCQE: " |
3218 | "%08x %08x %08x %08x\n" , |
3219 | wcqe->word0, wcqe->total_data_placed, |
3220 | result, wcqe->word3); |
3221 | |
3222 | lpfc_sli_release_iocbq(phba, cmdwqe); |
3223 | return; |
3224 | } |
3225 | |
3226 | if (ctxp->state != LPFC_NVME_STE_LS_ABORT) { |
3227 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3228 | "6416 NVMET LS abort cmpl state mismatch: " |
3229 | "oxid x%x: %d %d\n" , |
3230 | ctxp->oxid, ctxp->state, ctxp->entry_cnt); |
3231 | } |
3232 | |
3233 | cmdwqe->rsp_dmabuf = NULL; |
3234 | cmdwqe->bpl_dmabuf = NULL; |
3235 | lpfc_sli_release_iocbq(phba, cmdwqe); |
3236 | kfree(objp: ctxp); |
3237 | } |
3238 | |
3239 | static int |
3240 | lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, |
3241 | struct lpfc_async_xchg_ctx *ctxp, |
3242 | uint32_t sid, uint16_t xri) |
3243 | { |
3244 | struct lpfc_nvmet_tgtport *tgtp = NULL; |
3245 | struct lpfc_iocbq *abts_wqeq; |
3246 | union lpfc_wqe128 *wqe_abts; |
3247 | struct lpfc_nodelist *ndlp; |
3248 | |
3249 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
3250 | "6067 ABTS: sid %x xri x%x/x%x\n" , |
3251 | sid, xri, ctxp->wqeq->sli4_xritag); |
3252 | |
3253 | if (phba->nvmet_support && phba->targetport) |
3254 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
3255 | |
3256 | ndlp = lpfc_findnode_did(phba->pport, sid); |
3257 | if (!ndlp || |
3258 | ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && |
3259 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { |
3260 | if (tgtp) |
3261 | atomic_inc(v: &tgtp->xmt_abort_rsp_error); |
3262 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3263 | "6134 Drop ABTS - wrong NDLP state x%x.\n" , |
3264 | (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); |
3265 | |
3266 | /* No failure to an ABTS request. */ |
3267 | return 0; |
3268 | } |
3269 | |
3270 | abts_wqeq = ctxp->wqeq; |
3271 | wqe_abts = &abts_wqeq->wqe; |
3272 | |
3273 | /* |
3274 | * Since we zero the whole WQE, we need to ensure we set the WQE fields |
3275 | * that were initialized in lpfc_sli4_nvmet_alloc. |
3276 | */ |
3277 | memset(wqe_abts, 0, sizeof(union lpfc_wqe)); |
3278 | |
3279 | /* Word 5 */ |
3280 | bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0); |
3281 | bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1); |
3282 | bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0); |
3283 | bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS); |
3284 | bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS); |
3285 | |
3286 | /* Word 6 */ |
3287 | bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com, |
3288 | phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); |
3289 | bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com, |
3290 | abts_wqeq->sli4_xritag); |
3291 | |
3292 | /* Word 7 */ |
3293 | bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com, |
3294 | CMD_XMIT_SEQUENCE64_WQE); |
3295 | bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI); |
3296 | bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3); |
3297 | bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0); |
3298 | |
3299 | /* Word 8 */ |
3300 | wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag; |
3301 | |
3302 | /* Word 9 */ |
3303 | bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag); |
3304 | /* Needs to be set by caller */ |
3305 | bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri); |
3306 | |
3307 | /* Word 10 */ |
3308 | bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); |
3309 | bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com, |
3310 | LPFC_WQE_LENLOC_WORD12); |
3311 | bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0); |
3312 | bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0); |
3313 | |
3314 | /* Word 11 */ |
3315 | bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com, |
3316 | LPFC_WQE_CQ_ID_DEFAULT); |
3317 | bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com, |
3318 | OTHER_COMMAND); |
3319 | |
3320 | abts_wqeq->vport = phba->pport; |
3321 | abts_wqeq->ndlp = ndlp; |
3322 | abts_wqeq->context_un.axchg = ctxp; |
3323 | abts_wqeq->bpl_dmabuf = NULL; |
3324 | abts_wqeq->num_bdes = 0; |
3325 | /* hba_wqidx should already be setup from command we are aborting */ |
3326 | abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR; |
3327 | abts_wqeq->iocb.ulpLe = 1; |
3328 | |
3329 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
3330 | "6069 Issue ABTS to xri x%x reqtag x%x\n" , |
3331 | xri, abts_wqeq->iotag); |
3332 | return 1; |
3333 | } |
3334 | |
3335 | static int |
3336 | lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, |
3337 | struct lpfc_async_xchg_ctx *ctxp, |
3338 | uint32_t sid, uint16_t xri) |
3339 | { |
3340 | struct lpfc_nvmet_tgtport *tgtp; |
3341 | struct lpfc_iocbq *abts_wqeq; |
3342 | struct lpfc_nodelist *ndlp; |
3343 | unsigned long flags; |
3344 | bool ia; |
3345 | int rc; |
3346 | |
3347 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
3348 | if (!ctxp->wqeq) { |
3349 | ctxp->wqeq = ctxp->ctxbuf->iocbq; |
3350 | ctxp->wqeq->hba_wqidx = 0; |
3351 | } |
3352 | |
3353 | ndlp = lpfc_findnode_did(phba->pport, sid); |
3354 | if (!ndlp || |
3355 | ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && |
3356 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { |
3357 | atomic_inc(v: &tgtp->xmt_abort_rsp_error); |
3358 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3359 | "6160 Drop ABORT - wrong NDLP state x%x.\n" , |
3360 | (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); |
3361 | |
3362 | /* No failure to an ABTS request. */ |
3363 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
3364 | ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
3365 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags); |
3366 | return 0; |
3367 | } |
3368 | |
3369 | /* Issue ABTS for this WQE based on iotag */ |
3370 | ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); |
3371 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
3372 | if (!ctxp->abort_wqeq) { |
3373 | atomic_inc(v: &tgtp->xmt_abort_rsp_error); |
3374 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3375 | "6161 ABORT failed: No wqeqs: " |
3376 | "xri: x%x\n" , ctxp->oxid); |
3377 | /* No failure to an ABTS request. */ |
3378 | ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
3379 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags); |
3380 | return 0; |
3381 | } |
3382 | abts_wqeq = ctxp->abort_wqeq; |
3383 | ctxp->state = LPFC_NVME_STE_ABORT; |
3384 | ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false; |
3385 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags); |
3386 | |
3387 | /* Announce entry to new IO submit field. */ |
3388 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
3389 | "6162 ABORT Request to rport DID x%06x " |
3390 | "for xri x%x x%x\n" , |
3391 | ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag); |
3392 | |
3393 | /* If the hba is getting reset, this flag is set. It is |
3394 | * cleared when the reset is complete and rings reestablished. |
3395 | */ |
3396 | spin_lock_irqsave(&phba->hbalock, flags); |
3397 | /* driver queued commands are in process of being flushed */ |
3398 | if (phba->hba_flag & HBA_IOQ_FLUSH) { |
3399 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
3400 | atomic_inc(v: &tgtp->xmt_abort_rsp_error); |
3401 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3402 | "6163 Driver in reset cleanup - flushing " |
3403 | "NVME Req now. hba_flag x%x oxid x%x\n" , |
3404 | phba->hba_flag, ctxp->oxid); |
3405 | lpfc_sli_release_iocbq(phba, abts_wqeq); |
3406 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
3407 | ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
3408 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags); |
3409 | return 0; |
3410 | } |
3411 | |
3412 | /* Outstanding abort is in progress */ |
3413 | if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) { |
3414 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
3415 | atomic_inc(v: &tgtp->xmt_abort_rsp_error); |
3416 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3417 | "6164 Outstanding NVME I/O Abort Request " |
3418 | "still pending on oxid x%x\n" , |
3419 | ctxp->oxid); |
3420 | lpfc_sli_release_iocbq(phba, abts_wqeq); |
3421 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
3422 | ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
3423 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags); |
3424 | return 0; |
3425 | } |
3426 | |
3427 | /* Ready - mark outstanding as aborted by driver. */ |
3428 | abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED; |
3429 | |
3430 | lpfc_sli_prep_abort_xri(phba, cmdiocbq: abts_wqeq, ulp_context: ctxp->wqeq->sli4_xritag, |
3431 | iotag: abts_wqeq->iotag, CLASS3, |
3432 | LPFC_WQE_CQ_ID_DEFAULT, ia, wqec: true); |
3433 | |
3434 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ |
3435 | abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; |
3436 | abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; |
3437 | abts_wqeq->cmd_flag |= LPFC_IO_NVME; |
3438 | abts_wqeq->context_un.axchg = ctxp; |
3439 | abts_wqeq->vport = phba->pport; |
3440 | if (!ctxp->hdwq) |
3441 | ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; |
3442 | |
3443 | rc = lpfc_sli4_issue_wqe(phba, qp: ctxp->hdwq, pwqe: abts_wqeq); |
3444 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
3445 | if (rc == WQE_SUCCESS) { |
3446 | atomic_inc(v: &tgtp->xmt_abort_sol); |
3447 | return 0; |
3448 | } |
3449 | |
3450 | atomic_inc(v: &tgtp->xmt_abort_rsp_error); |
3451 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
3452 | ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
3453 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags); |
3454 | lpfc_sli_release_iocbq(phba, abts_wqeq); |
3455 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3456 | "6166 Failed ABORT issue_wqe with status x%x " |
3457 | "for oxid x%x.\n" , |
3458 | rc, ctxp->oxid); |
3459 | return 1; |
3460 | } |
3461 | |
3462 | static int |
3463 | lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, |
3464 | struct lpfc_async_xchg_ctx *ctxp, |
3465 | uint32_t sid, uint16_t xri) |
3466 | { |
3467 | struct lpfc_nvmet_tgtport *tgtp; |
3468 | struct lpfc_iocbq *abts_wqeq; |
3469 | unsigned long flags; |
3470 | bool released = false; |
3471 | int rc; |
3472 | |
3473 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
3474 | if (!ctxp->wqeq) { |
3475 | ctxp->wqeq = ctxp->ctxbuf->iocbq; |
3476 | ctxp->wqeq->hba_wqidx = 0; |
3477 | } |
3478 | |
3479 | if (ctxp->state == LPFC_NVME_STE_FREE) { |
3480 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3481 | "6417 NVMET ABORT ctx freed %d %d oxid x%x\n" , |
3482 | ctxp->state, ctxp->entry_cnt, ctxp->oxid); |
3483 | rc = WQE_BUSY; |
3484 | goto aerr; |
3485 | } |
3486 | ctxp->state = LPFC_NVME_STE_ABORT; |
3487 | ctxp->entry_cnt++; |
3488 | rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); |
3489 | if (rc == 0) |
3490 | goto aerr; |
3491 | |
3492 | spin_lock_irqsave(&phba->hbalock, flags); |
3493 | abts_wqeq = ctxp->wqeq; |
3494 | abts_wqeq->cmd_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; |
3495 | abts_wqeq->cmd_flag |= LPFC_IO_NVMET; |
3496 | if (!ctxp->hdwq) |
3497 | ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; |
3498 | |
3499 | rc = lpfc_sli4_issue_wqe(phba, qp: ctxp->hdwq, pwqe: abts_wqeq); |
3500 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
3501 | if (rc == WQE_SUCCESS) { |
3502 | return 0; |
3503 | } |
3504 | |
3505 | aerr: |
3506 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
3507 | if (ctxp->flag & LPFC_NVME_CTX_RLS) { |
3508 | spin_lock(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock); |
3509 | list_del_init(entry: &ctxp->list); |
3510 | spin_unlock(lock: &phba->sli4_hba.abts_nvmet_buf_list_lock); |
3511 | released = true; |
3512 | } |
3513 | ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS); |
3514 | spin_unlock_irqrestore(lock: &ctxp->ctxlock, flags); |
3515 | |
3516 | atomic_inc(v: &tgtp->xmt_abort_rsp_error); |
3517 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3518 | "6135 Failed to Issue ABTS for oxid x%x. Status x%x " |
3519 | "(%x)\n" , |
3520 | ctxp->oxid, rc, released); |
3521 | if (released) |
3522 | lpfc_nvmet_ctxbuf_post(phba, ctx_buf: ctxp->ctxbuf); |
3523 | return 1; |
3524 | } |
3525 | |
3526 | /** |
3527 | * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received |
3528 | * via async frame receive where the frame is not handled. |
3529 | * @phba: pointer to adapter structure |
3530 | * @ctxp: pointer to the asynchronously received received sequence |
3531 | * @sid: address of the remote port to send the ABTS to |
3532 | * @xri: oxid value to for the ABTS (other side's exchange id). |
3533 | **/ |
3534 | int |
3535 | lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba, |
3536 | struct lpfc_async_xchg_ctx *ctxp, |
3537 | uint32_t sid, uint16_t xri) |
3538 | { |
3539 | struct lpfc_nvmet_tgtport *tgtp = NULL; |
3540 | struct lpfc_iocbq *abts_wqeq; |
3541 | unsigned long flags; |
3542 | int rc; |
3543 | |
3544 | if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) || |
3545 | (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) { |
3546 | ctxp->state = LPFC_NVME_STE_LS_ABORT; |
3547 | ctxp->entry_cnt++; |
3548 | } else { |
3549 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3550 | "6418 NVMET LS abort state mismatch " |
3551 | "IO x%x: %d %d\n" , |
3552 | ctxp->oxid, ctxp->state, ctxp->entry_cnt); |
3553 | ctxp->state = LPFC_NVME_STE_LS_ABORT; |
3554 | } |
3555 | |
3556 | if (phba->nvmet_support && phba->targetport) |
3557 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
3558 | |
3559 | if (!ctxp->wqeq) { |
3560 | /* Issue ABTS for this WQE based on iotag */ |
3561 | ctxp->wqeq = lpfc_sli_get_iocbq(phba); |
3562 | if (!ctxp->wqeq) { |
3563 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3564 | "6068 Abort failed: No wqeqs: " |
3565 | "xri: x%x\n" , xri); |
3566 | /* No failure to an ABTS request. */ |
3567 | kfree(objp: ctxp); |
3568 | return 0; |
3569 | } |
3570 | } |
3571 | abts_wqeq = ctxp->wqeq; |
3572 | |
3573 | if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) { |
3574 | rc = WQE_BUSY; |
3575 | goto out; |
3576 | } |
3577 | |
3578 | spin_lock_irqsave(&phba->hbalock, flags); |
3579 | abts_wqeq->cmd_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; |
3580 | abts_wqeq->cmd_flag |= LPFC_IO_NVME_LS; |
3581 | rc = lpfc_sli4_issue_wqe(phba, qp: ctxp->hdwq, pwqe: abts_wqeq); |
3582 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
3583 | if (rc == WQE_SUCCESS) { |
3584 | if (tgtp) |
3585 | atomic_inc(v: &tgtp->xmt_abort_unsol); |
3586 | return 0; |
3587 | } |
3588 | out: |
3589 | if (tgtp) |
3590 | atomic_inc(v: &tgtp->xmt_abort_rsp_error); |
3591 | abts_wqeq->rsp_dmabuf = NULL; |
3592 | abts_wqeq->bpl_dmabuf = NULL; |
3593 | lpfc_sli_release_iocbq(phba, abts_wqeq); |
3594 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3595 | "6056 Failed to Issue ABTS. Status x%x\n" , rc); |
3596 | return 1; |
3597 | } |
3598 | |
3599 | /** |
3600 | * lpfc_nvmet_invalidate_host |
3601 | * |
3602 | * @phba: pointer to the driver instance bound to an adapter port. |
3603 | * @ndlp: pointer to an lpfc_nodelist type |
3604 | * |
3605 | * This routine upcalls the nvmet transport to invalidate an NVME |
3606 | * host to which this target instance had active connections. |
3607 | */ |
3608 | void |
3609 | lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) |
3610 | { |
3611 | u32 ndlp_has_hh; |
3612 | struct lpfc_nvmet_tgtport *tgtp; |
3613 | |
3614 | lpfc_printf_log(phba, KERN_INFO, |
3615 | LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC, |
3616 | "6203 Invalidating hosthandle x%px\n" , |
3617 | ndlp); |
3618 | |
3619 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
3620 | atomic_set(v: &tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE); |
3621 | |
3622 | spin_lock_irq(lock: &ndlp->lock); |
3623 | ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH; |
3624 | spin_unlock_irq(lock: &ndlp->lock); |
3625 | |
3626 | /* Do not invalidate any nodes that do not have a hosthandle. |
3627 | * The host_release callbk will cause a node reference |
3628 | * count imbalance and a crash. |
3629 | */ |
3630 | if (!ndlp_has_hh) { |
3631 | lpfc_printf_log(phba, KERN_INFO, |
3632 | LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC, |
3633 | "6204 Skip invalidate on node x%px DID x%x\n" , |
3634 | ndlp, ndlp->nlp_DID); |
3635 | return; |
3636 | } |
3637 | |
3638 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
3639 | /* Need to get the nvmet_fc_target_port pointer here.*/ |
3640 | nvmet_fc_invalidate_host(tgtport: phba->targetport, hosthandle: ndlp); |
3641 | #endif |
3642 | } |
3643 | |