1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23/* See Fibre Channel protocol T11 FC-LS for details */
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/slab.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29
30#include <scsi/scsi.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h>
34#include <uapi/scsi/fc/fc_fs.h>
35#include <uapi/scsi/fc/fc_els.h>
36
37#include "lpfc_hw4.h"
38#include "lpfc_hw.h"
39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
41#include "lpfc_nl.h"
42#include "lpfc_disc.h"
43#include "lpfc_scsi.h"
44#include "lpfc.h"
45#include "lpfc_logmsg.h"
46#include "lpfc_crtn.h"
47#include "lpfc_vport.h"
48#include "lpfc_debugfs.h"
49
50static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
51 struct lpfc_iocbq *);
52static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
53 struct lpfc_iocbq *);
54static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
55static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
56 struct lpfc_nodelist *ndlp, uint8_t retry);
57static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
58 struct lpfc_iocbq *iocb);
59static void lpfc_cmpl_els_edc(struct lpfc_hba *phba,
60 struct lpfc_iocbq *cmdiocb,
61 struct lpfc_iocbq *rspiocb);
62static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *,
63 struct lpfc_iocbq *);
64
65static int lpfc_max_els_tries = 3;
66
67static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport);
68static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max);
69static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid);
70
71/**
72 * lpfc_els_chk_latt - Check host link attention event for a vport
73 * @vport: pointer to a host virtual N_Port data structure.
74 *
75 * This routine checks whether there is an outstanding host link
76 * attention event during the discovery process with the @vport. It is done
77 * by reading the HBA's Host Attention (HA) register. If there is any host
78 * link attention events during this @vport's discovery process, the @vport
79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
80 * be issued if the link state is not already in host link cleared state,
81 * and a return code shall indicate whether the host link attention event
82 * had happened.
83 *
84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
85 * state in LPFC_VPORT_READY, the request for checking host link attention
86 * event will be ignored and a return code shall indicate no host link
87 * attention event had happened.
88 *
89 * Return codes
90 * 0 - no host link attention event happened
91 * 1 - host link attention event happened
92 **/
93int
94lpfc_els_chk_latt(struct lpfc_vport *vport)
95{
96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
97 struct lpfc_hba *phba = vport->phba;
98 uint32_t ha_copy;
99
100 if (vport->port_state >= LPFC_VPORT_READY ||
101 phba->link_state == LPFC_LINK_DOWN ||
102 phba->sli_rev > LPFC_SLI_REV3)
103 return 0;
104
105 /* Read the HBA Host Attention Register */
106 if (lpfc_readl(addr: phba->HAregaddr, data: &ha_copy))
107 return 1;
108
109 if (!(ha_copy & HA_LATT))
110 return 0;
111
112 /* Pending Link Event during Discovery */
113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
114 "0237 Pending Link Event during "
115 "Discovery: State x%x\n",
116 phba->pport->port_state);
117
118 /* CLEAR_LA should re-enable link attention events and
119 * we should then immediately take a LATT event. The
120 * LATT processing should call lpfc_linkdown() which
121 * will cleanup any left over in-progress discovery
122 * events.
123 */
124 spin_lock_irq(lock: shost->host_lock);
125 vport->fc_flag |= FC_ABORT_DISCOVERY;
126 spin_unlock_irq(lock: shost->host_lock);
127
128 if (phba->link_state != LPFC_CLEAR_LA)
129 lpfc_issue_clear_la(phba, vport);
130
131 return 1;
132}
133
134static bool lpfc_is_els_acc_rsp(struct lpfc_dmabuf *buf)
135{
136 struct fc_els_ls_acc *rsp = buf->virt;
137
138 if (rsp && rsp->la_cmd == ELS_LS_ACC)
139 return true;
140 return false;
141}
142
143/**
144 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
145 * @vport: pointer to a host virtual N_Port data structure.
146 * @expect_rsp: flag indicating whether response is expected.
147 * @cmd_size: size of the ELS command.
148 * @retry: number of retries to the command when it fails.
149 * @ndlp: pointer to a node-list data structure.
150 * @did: destination identifier.
151 * @elscmd: the ELS command code.
152 *
153 * This routine is used for allocating a lpfc-IOCB data structure from
154 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
155 * passed into the routine for discovery state machine to issue an Extended
156 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
157 * and preparation routine that is used by all the discovery state machine
158 * routines and the ELS command-specific fields will be later set up by
159 * the individual discovery machine routines after calling this routine
160 * allocating and preparing a generic IOCB data structure. It fills in the
161 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
162 * payload and response payload (if expected). The reference count on the
163 * ndlp is incremented by 1 and the reference to the ndlp is put into
164 * ndlp of the IOCB data structure for this IOCB to hold the ndlp
165 * reference for the command's callback function to access later.
166 *
167 * Return code
168 * Pointer to the newly allocated/prepared els iocb data structure
169 * NULL - when els iocb data structure allocation/preparation failed
170 **/
171struct lpfc_iocbq *
172lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
173 u16 cmd_size, u8 retry,
174 struct lpfc_nodelist *ndlp, u32 did,
175 u32 elscmd)
176{
177 struct lpfc_hba *phba = vport->phba;
178 struct lpfc_iocbq *elsiocb;
179 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp;
180 struct ulp_bde64_le *bpl;
181 u32 timeout = 0;
182
183 if (!lpfc_is_link_up(phba))
184 return NULL;
185
186 /* Allocate buffer for command iocb */
187 elsiocb = lpfc_sli_get_iocbq(phba);
188 if (!elsiocb)
189 return NULL;
190
191 /*
192 * If this command is for fabric controller and HBA running
193 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
194 */
195 if ((did == Fabric_DID) &&
196 (phba->hba_flag & HBA_FIP_SUPPORT) &&
197 ((elscmd == ELS_CMD_FLOGI) ||
198 (elscmd == ELS_CMD_FDISC) ||
199 (elscmd == ELS_CMD_LOGO)))
200 switch (elscmd) {
201 case ELS_CMD_FLOGI:
202 elsiocb->cmd_flag |=
203 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
204 & LPFC_FIP_ELS_ID_MASK);
205 break;
206 case ELS_CMD_FDISC:
207 elsiocb->cmd_flag |=
208 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
209 & LPFC_FIP_ELS_ID_MASK);
210 break;
211 case ELS_CMD_LOGO:
212 elsiocb->cmd_flag |=
213 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
214 & LPFC_FIP_ELS_ID_MASK);
215 break;
216 }
217 else
218 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
219
220 /* fill in BDEs for command */
221 /* Allocate buffer for command payload */
222 pcmd = kmalloc(size: sizeof(*pcmd), GFP_KERNEL);
223 if (pcmd)
224 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
225 if (!pcmd || !pcmd->virt)
226 goto els_iocb_free_pcmb_exit;
227
228 INIT_LIST_HEAD(list: &pcmd->list);
229
230 /* Allocate buffer for response payload */
231 if (expect_rsp) {
232 prsp = kmalloc(size: sizeof(*prsp), GFP_KERNEL);
233 if (prsp)
234 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
235 &prsp->phys);
236 if (!prsp || !prsp->virt)
237 goto els_iocb_free_prsp_exit;
238 INIT_LIST_HEAD(list: &prsp->list);
239 } else {
240 prsp = NULL;
241 }
242
243 /* Allocate buffer for Buffer ptr list */
244 pbuflist = kmalloc(size: sizeof(*pbuflist), GFP_KERNEL);
245 if (pbuflist)
246 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
247 &pbuflist->phys);
248 if (!pbuflist || !pbuflist->virt)
249 goto els_iocb_free_pbuf_exit;
250
251 INIT_LIST_HEAD(list: &pbuflist->list);
252
253 if (expect_rsp) {
254 switch (elscmd) {
255 case ELS_CMD_FLOGI:
256 timeout = FF_DEF_RATOV * 2;
257 break;
258 case ELS_CMD_LOGO:
259 timeout = phba->fc_ratov;
260 break;
261 default:
262 timeout = phba->fc_ratov * 2;
263 }
264
265 /* Fill SGE for the num bde count */
266 elsiocb->num_bdes = 2;
267 }
268
269 if (phba->sli_rev == LPFC_SLI_REV4)
270 bmp = pcmd;
271 else
272 bmp = pbuflist;
273
274 lpfc_sli_prep_els_req_rsp(phba, cmdiocbq: elsiocb, vport, bmp, cmd_size, did,
275 elscmd, tmo: timeout, expect_rsp);
276
277 bpl = (struct ulp_bde64_le *)pbuflist->virt;
278 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys));
279 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys));
280 bpl->type_size = cpu_to_le32(cmd_size);
281 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
282
283 if (expect_rsp) {
284 bpl++;
285 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys));
286 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys));
287 bpl->type_size = cpu_to_le32(FCELSSIZE);
288 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
289 }
290
291 elsiocb->cmd_dmabuf = pcmd;
292 elsiocb->bpl_dmabuf = pbuflist;
293 elsiocb->retry = retry;
294 elsiocb->vport = vport;
295 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
296
297 if (prsp)
298 list_add(new: &prsp->list, head: &pcmd->list);
299 if (expect_rsp) {
300 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
302 "0116 Xmit ELS command x%x to remote "
303 "NPORT x%x I/O tag: x%x, port state:x%x "
304 "rpi x%x fc_flag:x%x\n",
305 elscmd, did, elsiocb->iotag,
306 vport->port_state, ndlp->nlp_rpi,
307 vport->fc_flag);
308 } else {
309 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
310 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
311 "0117 Xmit ELS response x%x to remote "
312 "NPORT x%x I/O tag: x%x, size: x%x "
313 "port_state x%x rpi x%x fc_flag x%x\n",
314 elscmd, ndlp->nlp_DID, elsiocb->iotag,
315 cmd_size, vport->port_state,
316 ndlp->nlp_rpi, vport->fc_flag);
317 }
318
319 return elsiocb;
320
321els_iocb_free_pbuf_exit:
322 if (expect_rsp)
323 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
324 kfree(objp: pbuflist);
325
326els_iocb_free_prsp_exit:
327 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
328 kfree(objp: prsp);
329
330els_iocb_free_pcmb_exit:
331 kfree(objp: pcmd);
332 lpfc_sli_release_iocbq(phba, elsiocb);
333 return NULL;
334}
335
336/**
337 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
338 * @vport: pointer to a host virtual N_Port data structure.
339 *
340 * This routine issues a fabric registration login for a @vport. An
341 * active ndlp node with Fabric_DID must already exist for this @vport.
342 * The routine invokes two mailbox commands to carry out fabric registration
343 * login through the HBA firmware: the first mailbox command requests the
344 * HBA to perform link configuration for the @vport; and the second mailbox
345 * command requests the HBA to perform the actual fabric registration login
346 * with the @vport.
347 *
348 * Return code
349 * 0 - successfully issued fabric registration login for @vport
350 * -ENXIO -- failed to issue fabric registration login for @vport
351 **/
352int
353lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
354{
355 struct lpfc_hba *phba = vport->phba;
356 LPFC_MBOXQ_t *mbox;
357 struct lpfc_nodelist *ndlp;
358 struct serv_parm *sp;
359 int rc;
360 int err = 0;
361
362 sp = &phba->fc_fabparam;
363 ndlp = lpfc_findnode_did(vport, Fabric_DID);
364 if (!ndlp) {
365 err = 1;
366 goto fail;
367 }
368
369 mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL);
370 if (!mbox) {
371 err = 2;
372 goto fail;
373 }
374
375 vport->port_state = LPFC_FABRIC_CFG_LINK;
376 lpfc_config_link(phba, mbox);
377 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
378 mbox->vport = vport;
379
380 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
381 if (rc == MBX_NOT_FINISHED) {
382 err = 3;
383 goto fail_free_mbox;
384 }
385
386 mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL);
387 if (!mbox) {
388 err = 4;
389 goto fail;
390 }
391 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
392 ndlp->nlp_rpi);
393 if (rc) {
394 err = 5;
395 goto fail_free_mbox;
396 }
397
398 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
399 mbox->vport = vport;
400 /* increment the reference count on ndlp to hold reference
401 * for the callback routine.
402 */
403 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
404 if (!mbox->ctx_ndlp) {
405 err = 6;
406 goto fail_free_mbox;
407 }
408
409 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
410 if (rc == MBX_NOT_FINISHED) {
411 err = 7;
412 goto fail_issue_reg_login;
413 }
414
415 return 0;
416
417fail_issue_reg_login:
418 /* decrement the reference count on ndlp just incremented
419 * for the failed mbox command.
420 */
421 lpfc_nlp_put(ndlp);
422fail_free_mbox:
423 lpfc_mbox_rsrc_cleanup(phba, mbox, locked: MBOX_THD_UNLOCKED);
424fail:
425 lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED);
426 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
427 "0249 Cannot issue Register Fabric login: Err %d\n",
428 err);
429 return -ENXIO;
430}
431
432/**
433 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
434 * @vport: pointer to a host virtual N_Port data structure.
435 *
436 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
437 * the @vport. This mailbox command is necessary for SLI4 port only.
438 *
439 * Return code
440 * 0 - successfully issued REG_VFI for @vport
441 * A failure code otherwise.
442 **/
443int
444lpfc_issue_reg_vfi(struct lpfc_vport *vport)
445{
446 struct lpfc_hba *phba = vport->phba;
447 LPFC_MBOXQ_t *mboxq = NULL;
448 struct lpfc_nodelist *ndlp;
449 struct lpfc_dmabuf *dmabuf = NULL;
450 int rc = 0;
451
452 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
453 if ((phba->sli_rev == LPFC_SLI_REV4) &&
454 !(phba->link_flag & LS_LOOPBACK_MODE) &&
455 !(vport->fc_flag & FC_PT2PT)) {
456 ndlp = lpfc_findnode_did(vport, Fabric_DID);
457 if (!ndlp) {
458 rc = -ENODEV;
459 goto fail;
460 }
461 }
462
463 mboxq = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL);
464 if (!mboxq) {
465 rc = -ENOMEM;
466 goto fail;
467 }
468
469 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
470 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
471 rc = lpfc_mbox_rsrc_prep(phba, mbox: mboxq);
472 if (rc) {
473 rc = -ENOMEM;
474 goto fail_mbox;
475 }
476 dmabuf = mboxq->ctx_buf;
477 memcpy(dmabuf->virt, &phba->fc_fabparam,
478 sizeof(struct serv_parm));
479 }
480
481 vport->port_state = LPFC_FABRIC_CFG_LINK;
482 if (dmabuf) {
483 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
484 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */
485 mboxq->ctx_buf = dmabuf;
486 } else {
487 lpfc_reg_vfi(mboxq, vport, 0);
488 }
489
490 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
491 mboxq->vport = vport;
492 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
493 if (rc == MBX_NOT_FINISHED) {
494 rc = -ENXIO;
495 goto fail_mbox;
496 }
497 return 0;
498
499fail_mbox:
500 lpfc_mbox_rsrc_cleanup(phba, mbox: mboxq, locked: MBOX_THD_UNLOCKED);
501fail:
502 lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED);
503 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
504 "0289 Issue Register VFI failed: Err %d\n", rc);
505 return rc;
506}
507
508/**
509 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
510 * @vport: pointer to a host virtual N_Port data structure.
511 *
512 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
513 * the @vport. This mailbox command is necessary for SLI4 port only.
514 *
515 * Return code
516 * 0 - successfully issued REG_VFI for @vport
517 * A failure code otherwise.
518 **/
519int
520lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
521{
522 struct lpfc_hba *phba = vport->phba;
523 struct Scsi_Host *shost;
524 LPFC_MBOXQ_t *mboxq;
525 int rc;
526
527 mboxq = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL);
528 if (!mboxq) {
529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
530 "2556 UNREG_VFI mbox allocation failed"
531 "HBA state x%x\n", phba->pport->port_state);
532 return -ENOMEM;
533 }
534
535 lpfc_unreg_vfi(mboxq, vport);
536 mboxq->vport = vport;
537 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
538
539 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
540 if (rc == MBX_NOT_FINISHED) {
541 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
542 "2557 UNREG_VFI issue mbox failed rc x%x "
543 "HBA state x%x\n",
544 rc, phba->pport->port_state);
545 mempool_free(element: mboxq, pool: phba->mbox_mem_pool);
546 return -EIO;
547 }
548
549 shost = lpfc_shost_from_vport(vport);
550 spin_lock_irq(lock: shost->host_lock);
551 vport->fc_flag &= ~FC_VFI_REGISTERED;
552 spin_unlock_irq(lock: shost->host_lock);
553 return 0;
554}
555
556/**
557 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
558 * @vport: pointer to a host virtual N_Port data structure.
559 * @sp: pointer to service parameter data structure.
560 *
561 * This routine is called from FLOGI/FDISC completion handler functions.
562 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
563 * node nodename is changed in the completion service parameter else return
564 * 0. This function also set flag in the vport data structure to delay
565 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
566 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
567 * node nodename is changed in the completion service parameter.
568 *
569 * Return code
570 * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
571 * 1 - FCID or Fabric Nodename or Fabric portname is changed.
572 *
573 **/
574static uint8_t
575lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
576 struct serv_parm *sp)
577{
578 struct lpfc_hba *phba = vport->phba;
579 uint8_t fabric_param_changed = 0;
580 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
581
582 if ((vport->fc_prevDID != vport->fc_myDID) ||
583 memcmp(p: &vport->fabric_portname, q: &sp->portName,
584 size: sizeof(struct lpfc_name)) ||
585 memcmp(p: &vport->fabric_nodename, q: &sp->nodeName,
586 size: sizeof(struct lpfc_name)) ||
587 (vport->vport_flag & FAWWPN_PARAM_CHG)) {
588 fabric_param_changed = 1;
589 vport->vport_flag &= ~FAWWPN_PARAM_CHG;
590 }
591 /*
592 * Word 1 Bit 31 in common service parameter is overloaded.
593 * Word 1 Bit 31 in FLOGI request is multiple NPort request
594 * Word 1 Bit 31 in FLOGI response is clean address bit
595 *
596 * If fabric parameter is changed and clean address bit is
597 * cleared delay nport discovery if
598 * - vport->fc_prevDID != 0 (not initial discovery) OR
599 * - lpfc_delay_discovery module parameter is set.
600 */
601 if (fabric_param_changed && !sp->cmn.clean_address_bit &&
602 (vport->fc_prevDID || phba->cfg_delay_discovery)) {
603 spin_lock_irq(lock: shost->host_lock);
604 vport->fc_flag |= FC_DISC_DELAYED;
605 spin_unlock_irq(lock: shost->host_lock);
606 }
607
608 return fabric_param_changed;
609}
610
611
612/**
613 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
614 * @vport: pointer to a host virtual N_Port data structure.
615 * @ndlp: pointer to a node-list data structure.
616 * @sp: pointer to service parameter data structure.
617 * @ulp_word4: command response value
618 *
619 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
620 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
621 * port in a fabric topology. It properly sets up the parameters to the @ndlp
622 * from the IOCB response. It also check the newly assigned N_Port ID to the
623 * @vport against the previously assigned N_Port ID. If it is different from
624 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
625 * is invoked on all the remaining nodes with the @vport to unregister the
626 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
627 * is invoked to register login to the fabric.
628 *
629 * Return code
630 * 0 - Success (currently, always return 0)
631 **/
632static int
633lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
634 struct serv_parm *sp, uint32_t ulp_word4)
635{
636 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
637 struct lpfc_hba *phba = vport->phba;
638 struct lpfc_nodelist *np;
639 struct lpfc_nodelist *next_np;
640 uint8_t fabric_param_changed;
641
642 spin_lock_irq(lock: shost->host_lock);
643 vport->fc_flag |= FC_FABRIC;
644 spin_unlock_irq(lock: shost->host_lock);
645
646 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
647 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
648 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
649
650 phba->fc_edtovResol = sp->cmn.edtovResolution;
651 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
652
653 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
654 spin_lock_irq(lock: shost->host_lock);
655 vport->fc_flag |= FC_PUBLIC_LOOP;
656 spin_unlock_irq(lock: shost->host_lock);
657 }
658
659 vport->fc_myDID = ulp_word4 & Mask_DID;
660 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
661 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
662 ndlp->nlp_class_sup = 0;
663 if (sp->cls1.classValid)
664 ndlp->nlp_class_sup |= FC_COS_CLASS1;
665 if (sp->cls2.classValid)
666 ndlp->nlp_class_sup |= FC_COS_CLASS2;
667 if (sp->cls3.classValid)
668 ndlp->nlp_class_sup |= FC_COS_CLASS3;
669 if (sp->cls4.classValid)
670 ndlp->nlp_class_sup |= FC_COS_CLASS4;
671 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
672 sp->cmn.bbRcvSizeLsb;
673
674 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
675 if (fabric_param_changed) {
676 /* Reset FDMI attribute masks based on config parameter */
677 if (phba->cfg_enable_SmartSAN ||
678 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
679 /* Setup appropriate attribute masks */
680 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
681 if (phba->cfg_enable_SmartSAN)
682 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
683 else
684 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
685 } else {
686 vport->fdmi_hba_mask = 0;
687 vport->fdmi_port_mask = 0;
688 }
689
690 }
691 memcpy(&vport->fabric_portname, &sp->portName,
692 sizeof(struct lpfc_name));
693 memcpy(&vport->fabric_nodename, &sp->nodeName,
694 sizeof(struct lpfc_name));
695 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
696
697 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
698 if (sp->cmn.response_multiple_NPort) {
699 lpfc_printf_vlog(vport, KERN_WARNING,
700 LOG_ELS | LOG_VPORT,
701 "1816 FLOGI NPIV supported, "
702 "response data 0x%x\n",
703 sp->cmn.response_multiple_NPort);
704 spin_lock_irq(lock: &phba->hbalock);
705 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
706 spin_unlock_irq(lock: &phba->hbalock);
707 } else {
708 /* Because we asked f/w for NPIV it still expects us
709 to call reg_vnpid at least for the physical host */
710 lpfc_printf_vlog(vport, KERN_WARNING,
711 LOG_ELS | LOG_VPORT,
712 "1817 Fabric does not support NPIV "
713 "- configuring single port mode.\n");
714 spin_lock_irq(lock: &phba->hbalock);
715 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
716 spin_unlock_irq(lock: &phba->hbalock);
717 }
718 }
719
720 /*
721 * For FC we need to do some special processing because of the SLI
722 * Port's default settings of the Common Service Parameters.
723 */
724 if ((phba->sli_rev == LPFC_SLI_REV4) &&
725 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
726 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
727 if (fabric_param_changed)
728 lpfc_unregister_fcf_prep(phba);
729
730 /* This should just update the VFI CSPs*/
731 if (vport->fc_flag & FC_VFI_REGISTERED)
732 lpfc_issue_reg_vfi(vport);
733 }
734
735 if (fabric_param_changed &&
736 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
737
738 /* If our NportID changed, we need to ensure all
739 * remaining NPORTs get unreg_login'ed.
740 */
741 list_for_each_entry_safe(np, next_np,
742 &vport->fc_nodes, nlp_listp) {
743 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
744 !(np->nlp_flag & NLP_NPR_ADISC))
745 continue;
746 spin_lock_irq(lock: &np->lock);
747 np->nlp_flag &= ~NLP_NPR_ADISC;
748 spin_unlock_irq(lock: &np->lock);
749 lpfc_unreg_rpi(vport, np);
750 }
751 lpfc_cleanup_pending_mbox(vport);
752
753 if (phba->sli_rev == LPFC_SLI_REV4) {
754 lpfc_sli4_unreg_all_rpis(vport);
755 lpfc_mbx_unreg_vpi(vport);
756 spin_lock_irq(lock: shost->host_lock);
757 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
758 spin_unlock_irq(lock: shost->host_lock);
759 }
760
761 /*
762 * For SLI3 and SLI4, the VPI needs to be reregistered in
763 * response to this fabric parameter change event.
764 */
765 spin_lock_irq(lock: shost->host_lock);
766 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
767 spin_unlock_irq(lock: shost->host_lock);
768 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
769 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
770 /*
771 * Driver needs to re-reg VPI in order for f/w
772 * to update the MAC address.
773 */
774 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
775 lpfc_register_new_vport(phba, vport, ndlp);
776 return 0;
777 }
778
779 if (phba->sli_rev < LPFC_SLI_REV4) {
780 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
781 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
782 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
783 lpfc_register_new_vport(phba, vport, ndlp);
784 else
785 lpfc_issue_fabric_reglogin(vport);
786 } else {
787 ndlp->nlp_type |= NLP_FABRIC;
788 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
789 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
790 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
791 lpfc_start_fdiscs(phba);
792 lpfc_do_scr_ns_plogi(phba, vport);
793 } else if (vport->fc_flag & FC_VFI_REGISTERED)
794 lpfc_issue_init_vpi(vport);
795 else {
796 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
797 "3135 Need register VFI: (x%x/%x)\n",
798 vport->fc_prevDID, vport->fc_myDID);
799 lpfc_issue_reg_vfi(vport);
800 }
801 }
802 return 0;
803}
804
805/**
806 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
807 * @vport: pointer to a host virtual N_Port data structure.
808 * @ndlp: pointer to a node-list data structure.
809 * @sp: pointer to service parameter data structure.
810 *
811 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
812 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
813 * in a point-to-point topology. First, the @vport's N_Port Name is compared
814 * with the received N_Port Name: if the @vport's N_Port Name is greater than
815 * the received N_Port Name lexicographically, this node shall assign local
816 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
817 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
818 * this node shall just wait for the remote node to issue PLOGI and assign
819 * N_Port IDs.
820 *
821 * Return code
822 * 0 - Success
823 * -ENXIO - Fail
824 **/
825static int
826lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
827 struct serv_parm *sp)
828{
829 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
830 struct lpfc_hba *phba = vport->phba;
831 LPFC_MBOXQ_t *mbox;
832 int rc;
833
834 spin_lock_irq(lock: shost->host_lock);
835 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
836 vport->fc_flag |= FC_PT2PT;
837 spin_unlock_irq(lock: shost->host_lock);
838
839 /* If we are pt2pt with another NPort, force NPIV off! */
840 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
841
842 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
843 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
844 lpfc_unregister_fcf_prep(phba);
845
846 spin_lock_irq(lock: shost->host_lock);
847 vport->fc_flag &= ~FC_VFI_REGISTERED;
848 spin_unlock_irq(lock: shost->host_lock);
849 phba->fc_topology_changed = 0;
850 }
851
852 rc = memcmp(p: &vport->fc_portname, q: &sp->portName,
853 size: sizeof(vport->fc_portname));
854
855 if (rc >= 0) {
856 /* This side will initiate the PLOGI */
857 spin_lock_irq(lock: shost->host_lock);
858 vport->fc_flag |= FC_PT2PT_PLOGI;
859 spin_unlock_irq(lock: shost->host_lock);
860
861 /*
862 * N_Port ID cannot be 0, set our Id to LocalID
863 * the other side will be RemoteID.
864 */
865
866 /* not equal */
867 if (rc)
868 vport->fc_myDID = PT2PT_LocalID;
869
870 /* If not registered with a transport, decrement ndlp reference
871 * count indicating that ndlp can be safely released when other
872 * references are removed.
873 */
874 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
875 lpfc_nlp_put(ndlp);
876
877 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
878 if (!ndlp) {
879 /*
880 * Cannot find existing Fabric ndlp, so allocate a
881 * new one
882 */
883 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
884 if (!ndlp)
885 goto fail;
886 }
887
888 memcpy(&ndlp->nlp_portname, &sp->portName,
889 sizeof(struct lpfc_name));
890 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
891 sizeof(struct lpfc_name));
892 /* Set state will put ndlp onto node list if not already done */
893 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
894 spin_lock_irq(lock: &ndlp->lock);
895 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
896 spin_unlock_irq(lock: &ndlp->lock);
897
898 mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL);
899 if (!mbox)
900 goto fail;
901
902 lpfc_config_link(phba, mbox);
903
904 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
905 mbox->vport = vport;
906 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
907 if (rc == MBX_NOT_FINISHED) {
908 mempool_free(element: mbox, pool: phba->mbox_mem_pool);
909 goto fail;
910 }
911 } else {
912 /* This side will wait for the PLOGI. If not registered with
913 * a transport, decrement node reference count indicating that
914 * ndlp can be released when other references are removed.
915 */
916 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
917 lpfc_nlp_put(ndlp);
918
919 /* Start discovery - this should just do CLEAR_LA */
920 lpfc_disc_start(vport);
921 }
922
923 return 0;
924fail:
925 return -ENXIO;
926}
927
928/**
929 * lpfc_cmpl_els_flogi - Completion callback function for flogi
930 * @phba: pointer to lpfc hba data structure.
931 * @cmdiocb: pointer to lpfc command iocb data structure.
932 * @rspiocb: pointer to lpfc response iocb data structure.
933 *
934 * This routine is the top-level completion callback function for issuing
935 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
936 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
937 * retry has been made (either immediately or delayed with lpfc_els_retry()
938 * returning 1), the command IOCB will be released and function returned.
939 * If the retry attempt has been given up (possibly reach the maximum
940 * number of retries), one additional decrement of ndlp reference shall be
941 * invoked before going out after releasing the command IOCB. This will
942 * actually release the remote node (Note, lpfc_els_free_iocb() will also
943 * invoke one decrement of ndlp reference count). If no error reported in
944 * the IOCB status, the command Port ID field is used to determine whether
945 * this is a point-to-point topology or a fabric topology: if the Port ID
946 * field is assigned, it is a fabric topology; otherwise, it is a
947 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
948 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
949 * specific topology completion conditions.
950 **/
951static void
952lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
953 struct lpfc_iocbq *rspiocb)
954{
955 struct lpfc_vport *vport = cmdiocb->vport;
956 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
957 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
958 IOCB_t *irsp;
959 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
960 struct serv_parm *sp;
961 uint16_t fcf_index;
962 int rc;
963 u32 ulp_status, ulp_word4, tmo;
964 bool flogi_in_retry = false;
965
966 /* Check to see if link went down during discovery */
967 if (lpfc_els_chk_latt(vport)) {
968 /* One additional decrement on node reference count to
969 * trigger the release of the node
970 */
971 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
972 lpfc_nlp_put(ndlp);
973 goto out;
974 }
975
976 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
977 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
978
979 if (phba->sli_rev == LPFC_SLI_REV4) {
980 tmo = get_wqe_tmo(cmdiocb);
981 } else {
982 irsp = &rspiocb->iocb;
983 tmo = irsp->ulpTimeout;
984 }
985
986 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
987 "FLOGI cmpl: status:x%x/x%x state:x%x",
988 ulp_status, ulp_word4,
989 vport->port_state);
990
991 if (ulp_status) {
992 /*
993 * In case of FIP mode, perform roundrobin FCF failover
994 * due to new FCF discovery
995 */
996 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
997 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
998 if (phba->link_state < LPFC_LINK_UP)
999 goto stop_rr_fcf_flogi;
1000 if ((phba->fcoe_cvl_eventtag_attn ==
1001 phba->fcoe_cvl_eventtag) &&
1002 (ulp_status == IOSTAT_LOCAL_REJECT) &&
1003 ((ulp_word4 & IOERR_PARAM_MASK) ==
1004 IOERR_SLI_ABORTED))
1005 goto stop_rr_fcf_flogi;
1006 else
1007 phba->fcoe_cvl_eventtag_attn =
1008 phba->fcoe_cvl_eventtag;
1009 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1010 "2611 FLOGI failed on FCF (x%x), "
1011 "status:x%x/x%x, tmo:x%x, perform "
1012 "roundrobin FCF failover\n",
1013 phba->fcf.current_rec.fcf_indx,
1014 ulp_status, ulp_word4, tmo);
1015 lpfc_sli4_set_fcf_flogi_fail(phba,
1016 phba->fcf.current_rec.fcf_indx);
1017 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
1018 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
1019 if (rc)
1020 goto out;
1021 }
1022
1023stop_rr_fcf_flogi:
1024 /* FLOGI failure */
1025 if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
1026 ((ulp_word4 & IOERR_PARAM_MASK) ==
1027 IOERR_LOOP_OPEN_FAILURE)))
1028 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1029 "2858 FLOGI failure Status:x%x/x%x TMO"
1030 ":x%x Data x%x x%x\n",
1031 ulp_status, ulp_word4, tmo,
1032 phba->hba_flag, phba->fcf.fcf_flag);
1033
1034 /* Check for retry */
1035 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1036 /* Address a timing race with dev_loss. If dev_loss
1037 * is active on this FPort node, put the initial ref
1038 * count back to stop premature node release actions.
1039 */
1040 lpfc_check_nlp_post_devloss(vport, ndlp);
1041 flogi_in_retry = true;
1042 goto out;
1043 }
1044
1045 /* The FLOGI will not be retried. If the FPort node is not
1046 * registered with the SCSI transport, remove the initial
1047 * reference to trigger node release.
1048 */
1049 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) &&
1050 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
1051 lpfc_nlp_put(ndlp);
1052
1053 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
1054 "0150 FLOGI failure Status:x%x/x%x "
1055 "xri x%x TMO:x%x refcnt %d\n",
1056 ulp_status, ulp_word4, cmdiocb->sli4_xritag,
1057 tmo, kref_read(&ndlp->kref));
1058
1059 /* If this is not a loop open failure, bail out */
1060 if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
1061 ((ulp_word4 & IOERR_PARAM_MASK) ==
1062 IOERR_LOOP_OPEN_FAILURE))) {
1063 /* FLOGI failure */
1064 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1065 "0100 FLOGI failure Status:x%x/x%x "
1066 "TMO:x%x\n",
1067 ulp_status, ulp_word4, tmo);
1068 goto flogifail;
1069 }
1070
1071 /* FLOGI failed, so there is no fabric */
1072 spin_lock_irq(lock: shost->host_lock);
1073 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
1074 FC_PT2PT_NO_NVME);
1075 spin_unlock_irq(lock: shost->host_lock);
1076
1077 /* If private loop, then allow max outstanding els to be
1078 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1079 * alpa map would take too long otherwise.
1080 */
1081 if (phba->alpa_map[0] == 0)
1082 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
1083 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1084 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
1085 (vport->fc_prevDID != vport->fc_myDID) ||
1086 phba->fc_topology_changed)) {
1087 if (vport->fc_flag & FC_VFI_REGISTERED) {
1088 if (phba->fc_topology_changed) {
1089 lpfc_unregister_fcf_prep(phba);
1090 spin_lock_irq(lock: shost->host_lock);
1091 vport->fc_flag &= ~FC_VFI_REGISTERED;
1092 spin_unlock_irq(lock: shost->host_lock);
1093 phba->fc_topology_changed = 0;
1094 } else {
1095 lpfc_sli4_unreg_all_rpis(vport);
1096 }
1097 }
1098
1099 /* Do not register VFI if the driver aborted FLOGI */
1100 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
1101 lpfc_issue_reg_vfi(vport);
1102
1103 goto out;
1104 }
1105 goto flogifail;
1106 }
1107 spin_lock_irq(lock: shost->host_lock);
1108 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
1109 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
1110 spin_unlock_irq(lock: shost->host_lock);
1111
1112 /*
1113 * The FLOGI succeeded. Sync the data for the CPU before
1114 * accessing it.
1115 */
1116 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1117 if (!prsp)
1118 goto out;
1119 if (!lpfc_is_els_acc_rsp(buf: prsp))
1120 goto out;
1121 sp = prsp->virt + sizeof(uint32_t);
1122
1123 /* FLOGI completes successfully */
1124 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1125 "0101 FLOGI completes successfully, I/O tag:x%x "
1126 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n",
1127 cmdiocb->iotag, cmdiocb->sli4_xritag,
1128 ulp_word4, sp->cmn.e_d_tov,
1129 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1130 vport->port_state, vport->fc_flag,
1131 sp->cmn.priority_tagging, kref_read(&ndlp->kref));
1132
1133 if (sp->cmn.priority_tagging)
1134 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
1135 LPFC_VMID_TYPE_PRIO);
1136 /* reinitialize the VMID datastructure before returning */
1137 if (lpfc_is_vmid_enabled(phba))
1138 lpfc_reinit_vmid(vport);
1139
1140 /*
1141 * Address a timing race with dev_loss. If dev_loss is active on
1142 * this FPort node, put the initial ref count back to stop premature
1143 * node release actions.
1144 */
1145 lpfc_check_nlp_post_devloss(vport, ndlp);
1146 if (vport->port_state == LPFC_FLOGI) {
1147 /*
1148 * If Common Service Parameters indicate Nport
1149 * we are point to point, if Fport we are Fabric.
1150 */
1151 if (sp->cmn.fPort)
1152 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp,
1153 ulp_word4);
1154 else if (!(phba->hba_flag & HBA_FCOE_MODE))
1155 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
1156 else {
1157 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1158 "2831 FLOGI response with cleared Fabric "
1159 "bit fcf_index 0x%x "
1160 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1161 "Fabric Name "
1162 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
1163 phba->fcf.current_rec.fcf_indx,
1164 phba->fcf.current_rec.switch_name[0],
1165 phba->fcf.current_rec.switch_name[1],
1166 phba->fcf.current_rec.switch_name[2],
1167 phba->fcf.current_rec.switch_name[3],
1168 phba->fcf.current_rec.switch_name[4],
1169 phba->fcf.current_rec.switch_name[5],
1170 phba->fcf.current_rec.switch_name[6],
1171 phba->fcf.current_rec.switch_name[7],
1172 phba->fcf.current_rec.fabric_name[0],
1173 phba->fcf.current_rec.fabric_name[1],
1174 phba->fcf.current_rec.fabric_name[2],
1175 phba->fcf.current_rec.fabric_name[3],
1176 phba->fcf.current_rec.fabric_name[4],
1177 phba->fcf.current_rec.fabric_name[5],
1178 phba->fcf.current_rec.fabric_name[6],
1179 phba->fcf.current_rec.fabric_name[7]);
1180
1181 lpfc_nlp_put(ndlp);
1182 spin_lock_irq(lock: &phba->hbalock);
1183 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1184 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1185 spin_unlock_irq(lock: &phba->hbalock);
1186 phba->fcf.fcf_redisc_attempted = 0; /* reset */
1187 goto out;
1188 }
1189 if (!rc) {
1190 /* Mark the FCF discovery process done */
1191 if (phba->hba_flag & HBA_FIP_SUPPORT)
1192 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
1193 LOG_ELS,
1194 "2769 FLOGI to FCF (x%x) "
1195 "completed successfully\n",
1196 phba->fcf.current_rec.fcf_indx);
1197 spin_lock_irq(lock: &phba->hbalock);
1198 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1199 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1200 spin_unlock_irq(lock: &phba->hbalock);
1201 phba->fcf.fcf_redisc_attempted = 0; /* reset */
1202 goto out;
1203 }
1204 } else if (vport->port_state > LPFC_FLOGI &&
1205 vport->fc_flag & FC_PT2PT) {
1206 /*
1207 * In a p2p topology, it is possible that discovery has
1208 * already progressed, and this completion can be ignored.
1209 * Recheck the indicated topology.
1210 */
1211 if (!sp->cmn.fPort)
1212 goto out;
1213 }
1214
1215flogifail:
1216 spin_lock_irq(lock: &phba->hbalock);
1217 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1218 spin_unlock_irq(lock: &phba->hbalock);
1219
1220 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
1221 /* FLOGI failed, so just use loop map to make discovery list */
1222 lpfc_disc_list_loopmap(vport);
1223
1224 /* Start discovery */
1225 lpfc_disc_start(vport);
1226 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) ||
1227 (((ulp_word4 & IOERR_PARAM_MASK) !=
1228 IOERR_SLI_ABORTED) &&
1229 ((ulp_word4 & IOERR_PARAM_MASK) !=
1230 IOERR_SLI_DOWN))) &&
1231 (phba->link_state != LPFC_CLEAR_LA)) {
1232 /* If FLOGI failed enable link interrupt. */
1233 lpfc_issue_clear_la(phba, vport);
1234 }
1235out:
1236 if (!flogi_in_retry)
1237 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING;
1238
1239 lpfc_els_free_iocb(phba, cmdiocb);
1240 lpfc_nlp_put(ndlp);
1241}
1242
1243/**
1244 * lpfc_cmpl_els_link_down - Completion callback function for ELS command
1245 * aborted during a link down
1246 * @phba: pointer to lpfc hba data structure.
1247 * @cmdiocb: pointer to lpfc command iocb data structure.
1248 * @rspiocb: pointer to lpfc response iocb data structure.
1249 *
1250 */
1251static void
1252lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1253 struct lpfc_iocbq *rspiocb)
1254{
1255 uint32_t *pcmd;
1256 uint32_t cmd;
1257 u32 ulp_status, ulp_word4;
1258
1259 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
1260 cmd = *pcmd;
1261
1262 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
1263 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
1264
1265 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1266 "6445 ELS completes after LINK_DOWN: "
1267 " Status %x/%x cmd x%x flg x%x\n",
1268 ulp_status, ulp_word4, cmd,
1269 cmdiocb->cmd_flag);
1270
1271 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) {
1272 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
1273 atomic_dec(v: &phba->fabric_iocb_count);
1274 }
1275 lpfc_els_free_iocb(phba, cmdiocb);
1276}
1277
1278/**
1279 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1280 * @vport: pointer to a host virtual N_Port data structure.
1281 * @ndlp: pointer to a node-list data structure.
1282 * @retry: number of retries to the command IOCB.
1283 *
1284 * This routine issues a Fabric Login (FLOGI) Request ELS command
1285 * for a @vport. The initiator service parameters are put into the payload
1286 * of the FLOGI Request IOCB and the top-level callback function pointer
1287 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1288 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1289 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1290 *
1291 * Note that the ndlp reference count will be incremented by 1 for holding the
1292 * ndlp and the reference to ndlp will be stored into the ndlp field of
1293 * the IOCB for the completion callback function to the FLOGI ELS command.
1294 *
1295 * Return code
1296 * 0 - successfully issued flogi iocb for @vport
1297 * 1 - failed to issue flogi iocb for @vport
1298 **/
1299static int
1300lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1301 uint8_t retry)
1302{
1303 struct lpfc_hba *phba = vport->phba;
1304 struct serv_parm *sp;
1305 union lpfc_wqe128 *wqe = NULL;
1306 IOCB_t *icmd = NULL;
1307 struct lpfc_iocbq *elsiocb;
1308 struct lpfc_iocbq defer_flogi_acc;
1309 u8 *pcmd, ct;
1310 uint16_t cmdsize;
1311 uint32_t tmo, did;
1312 int rc;
1313
1314 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1315 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, cmd_size: cmdsize, retry, ndlp,
1316 did: ndlp->nlp_DID, ELS_CMD_FLOGI);
1317
1318 if (!elsiocb)
1319 return 1;
1320
1321 wqe = &elsiocb->wqe;
1322 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
1323 icmd = &elsiocb->iocb;
1324
1325 /* For FLOGI request, remainder of payload is service parameters */
1326 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
1327 pcmd += sizeof(uint32_t);
1328 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1329 sp = (struct serv_parm *) pcmd;
1330
1331 /* Setup CSPs accordingly for Fabric */
1332 sp->cmn.e_d_tov = 0;
1333 sp->cmn.w2.r_a_tov = 0;
1334 sp->cmn.virtual_fabric_support = 0;
1335 sp->cls1.classValid = 0;
1336 if (sp->cmn.fcphLow < FC_PH3)
1337 sp->cmn.fcphLow = FC_PH3;
1338 if (sp->cmn.fcphHigh < FC_PH3)
1339 sp->cmn.fcphHigh = FC_PH3;
1340
1341 /* Determine if switch supports priority tagging */
1342 if (phba->cfg_vmid_priority_tagging) {
1343 sp->cmn.priority_tagging = 1;
1344 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */
1345 if (!memchr_inv(p: vport->lpfc_vmid_host_uuid, c: 0,
1346 size: sizeof(vport->lpfc_vmid_host_uuid))) {
1347 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn,
1348 sizeof(phba->wwpn));
1349 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn,
1350 sizeof(phba->wwnn));
1351 }
1352 }
1353
1354 if (phba->sli_rev == LPFC_SLI_REV4) {
1355 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1356 LPFC_SLI_INTF_IF_TYPE_0) {
1357 /* FLOGI needs to be 3 for WQE FCFI */
1358 ct = SLI4_CT_FCFI;
1359 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
1360
1361 /* Set the fcfi to the fcfi we registered with */
1362 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
1363 phba->fcf.fcfi);
1364 }
1365
1366 /* Can't do SLI4 class2 without support sequence coalescing */
1367 sp->cls2.classValid = 0;
1368 sp->cls2.seqDelivery = 0;
1369 } else {
1370 /* Historical, setting sequential-delivery bit for SLI3 */
1371 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
1372 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
1373 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1374 sp->cmn.request_multiple_Nport = 1;
1375 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1376 icmd->ulpCt_h = 1;
1377 icmd->ulpCt_l = 0;
1378 } else {
1379 sp->cmn.request_multiple_Nport = 0;
1380 }
1381
1382 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
1383 icmd->un.elsreq64.myID = 0;
1384 icmd->un.elsreq64.fl = 1;
1385 }
1386 }
1387
1388 tmo = phba->fc_ratov;
1389 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1390 lpfc_set_disctmo(vport);
1391 phba->fc_ratov = tmo;
1392
1393 phba->fc_stat.elsXmitFLOGI++;
1394 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi;
1395
1396 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1397 "Issue FLOGI: opt:x%x",
1398 phba->sli3_options, 0, 0);
1399
1400 elsiocb->ndlp = lpfc_nlp_get(ndlp);
1401 if (!elsiocb->ndlp) {
1402 lpfc_els_free_iocb(phba, elsiocb);
1403 return 1;
1404 }
1405
1406 /* Avoid race with FLOGI completion and hba_flags. */
1407 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
1408
1409 rc = lpfc_issue_fabric_iocb(phba, iocb: elsiocb);
1410 if (rc == IOCB_ERROR) {
1411 phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
1412 lpfc_els_free_iocb(phba, elsiocb);
1413 lpfc_nlp_put(ndlp);
1414 return 1;
1415 }
1416
1417 /* Clear external loopback plug detected flag */
1418 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
1419
1420 /* Check for a deferred FLOGI ACC condition */
1421 if (phba->defer_flogi_acc_flag) {
1422 /* lookup ndlp for received FLOGI */
1423 ndlp = lpfc_findnode_did(vport, 0);
1424 if (!ndlp)
1425 return 0;
1426
1427 did = vport->fc_myDID;
1428 vport->fc_myDID = Fabric_DID;
1429
1430 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
1431
1432 if (phba->sli_rev == LPFC_SLI_REV4) {
1433 bf_set(wqe_ctxt_tag,
1434 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
1435 phba->defer_flogi_acc_rx_id);
1436 bf_set(wqe_rcvoxid,
1437 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
1438 phba->defer_flogi_acc_ox_id);
1439 } else {
1440 icmd = &defer_flogi_acc.iocb;
1441 icmd->ulpContext = phba->defer_flogi_acc_rx_id;
1442 icmd->unsli3.rcvsli3.ox_id =
1443 phba->defer_flogi_acc_ox_id;
1444 }
1445
1446 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1447 "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
1448 " ox_id: x%x, hba_flag x%x\n",
1449 phba->defer_flogi_acc_rx_id,
1450 phba->defer_flogi_acc_ox_id, phba->hba_flag);
1451
1452 /* Send deferred FLOGI ACC */
1453 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
1454 ndlp, NULL);
1455
1456 phba->defer_flogi_acc_flag = false;
1457 vport->fc_myDID = did;
1458
1459 /* Decrement ndlp reference count to indicate the node can be
1460 * released when other references are removed.
1461 */
1462 lpfc_nlp_put(ndlp);
1463 }
1464
1465 return 0;
1466}
1467
1468/**
1469 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1470 * @phba: pointer to lpfc hba data structure.
1471 *
1472 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1473 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1474 * list and issues an abort IOCB commond on each outstanding IOCB that
1475 * contains a active Fabric_DID ndlp. Note that this function is to issue
1476 * the abort IOCB command on all the outstanding IOCBs, thus when this
1477 * function returns, it does not guarantee all the IOCBs are actually aborted.
1478 *
1479 * Return code
1480 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1481 **/
1482int
1483lpfc_els_abort_flogi(struct lpfc_hba *phba)
1484{
1485 struct lpfc_sli_ring *pring;
1486 struct lpfc_iocbq *iocb, *next_iocb;
1487 struct lpfc_nodelist *ndlp;
1488 u32 ulp_command;
1489
1490 /* Abort outstanding I/O on NPort <nlp_DID> */
1491 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1492 "0201 Abort outstanding I/O on NPort x%x\n",
1493 Fabric_DID);
1494
1495 pring = lpfc_phba_elsring(phba);
1496 if (unlikely(!pring))
1497 return -EIO;
1498
1499 /*
1500 * Check the txcmplq for an iocb that matches the nport the driver is
1501 * searching for.
1502 */
1503 spin_lock_irq(lock: &phba->hbalock);
1504 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1505 ulp_command = get_job_cmnd(phba, iocbq: iocb);
1506 if (ulp_command == CMD_ELS_REQUEST64_CR) {
1507 ndlp = iocb->ndlp;
1508 if (ndlp && ndlp->nlp_DID == Fabric_DID) {
1509 if ((phba->pport->fc_flag & FC_PT2PT) &&
1510 !(phba->pport->fc_flag & FC_PT2PT_PLOGI))
1511 iocb->fabric_cmd_cmpl =
1512 lpfc_ignore_els_cmpl;
1513 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
1514 NULL);
1515 }
1516 }
1517 }
1518 /* Make sure HBA is alive */
1519 lpfc_issue_hb_tmo(phba);
1520
1521 spin_unlock_irq(lock: &phba->hbalock);
1522
1523 return 0;
1524}
1525
1526/**
1527 * lpfc_initial_flogi - Issue an initial fabric login for a vport
1528 * @vport: pointer to a host virtual N_Port data structure.
1529 *
1530 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1531 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1532 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1533 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1534 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1535 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1536 * @vport.
1537 *
1538 * Return code
1539 * 0 - failed to issue initial flogi for @vport
1540 * 1 - successfully issued initial flogi for @vport
1541 **/
1542int
1543lpfc_initial_flogi(struct lpfc_vport *vport)
1544{
1545 struct lpfc_nodelist *ndlp;
1546
1547 vport->port_state = LPFC_FLOGI;
1548 lpfc_set_disctmo(vport);
1549
1550 /* First look for the Fabric ndlp */
1551 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1552 if (!ndlp) {
1553 /* Cannot find existing Fabric ndlp, so allocate a new one */
1554 ndlp = lpfc_nlp_init(vport, Fabric_DID);
1555 if (!ndlp)
1556 return 0;
1557 /* Set the node type */
1558 ndlp->nlp_type |= NLP_FABRIC;
1559
1560 /* Put ndlp onto node list */
1561 lpfc_enqueue_node(vport, ndlp);
1562 }
1563
1564 /* Reset the Fabric flag, topology change may have happened */
1565 vport->fc_flag &= ~FC_FABRIC;
1566 if (lpfc_issue_els_flogi(vport, ndlp, retry: 0)) {
1567 /* A node reference should be retained while registered with a
1568 * transport or dev-loss-evt work is pending.
1569 * Otherwise, decrement node reference to trigger release.
1570 */
1571 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
1572 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
1573 lpfc_nlp_put(ndlp);
1574 return 0;
1575 }
1576 return 1;
1577}
1578
1579/**
1580 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1581 * @vport: pointer to a host virtual N_Port data structure.
1582 *
1583 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1584 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1585 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1586 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1587 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1588 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1589 * @vport.
1590 *
1591 * Return code
1592 * 0 - failed to issue initial fdisc for @vport
1593 * 1 - successfully issued initial fdisc for @vport
1594 **/
1595int
1596lpfc_initial_fdisc(struct lpfc_vport *vport)
1597{
1598 struct lpfc_nodelist *ndlp;
1599
1600 /* First look for the Fabric ndlp */
1601 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1602 if (!ndlp) {
1603 /* Cannot find existing Fabric ndlp, so allocate a new one */
1604 ndlp = lpfc_nlp_init(vport, Fabric_DID);
1605 if (!ndlp)
1606 return 0;
1607
1608 /* NPIV is only supported in Fabrics. */
1609 ndlp->nlp_type |= NLP_FABRIC;
1610
1611 /* Put ndlp onto node list */
1612 lpfc_enqueue_node(vport, ndlp);
1613 }
1614
1615 if (lpfc_issue_els_fdisc(vport, ndlp, retry: 0)) {
1616 /* A node reference should be retained while registered with a
1617 * transport or dev-loss-evt work is pending.
1618 * Otherwise, decrement node reference to trigger release.
1619 */
1620 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
1621 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
1622 lpfc_nlp_put(ndlp);
1623 return 0;
1624 }
1625 return 1;
1626}
1627
1628/**
1629 * lpfc_more_plogi - Check and issue remaining plogis for a vport
1630 * @vport: pointer to a host virtual N_Port data structure.
1631 *
1632 * This routine checks whether there are more remaining Port Logins
1633 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1634 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1635 * to issue ELS PLOGIs up to the configured discover threads with the
1636 * @vport (@vport->cfg_discovery_threads). The function also decrement
1637 * the @vport's num_disc_node by 1 if it is not already 0.
1638 **/
1639void
1640lpfc_more_plogi(struct lpfc_vport *vport)
1641{
1642 if (vport->num_disc_nodes)
1643 vport->num_disc_nodes--;
1644
1645 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
1646 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1647 "0232 Continue discovery with %d PLOGIs to go "
1648 "Data: x%x x%x x%x\n",
1649 vport->num_disc_nodes, vport->fc_plogi_cnt,
1650 vport->fc_flag, vport->port_state);
1651 /* Check to see if there are more PLOGIs to be sent */
1652 if (vport->fc_flag & FC_NLP_MORE)
1653 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1654 lpfc_els_disc_plogi(vport);
1655
1656 return;
1657}
1658
1659/**
1660 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp
1661 * @phba: pointer to lpfc hba data structure.
1662 * @prsp: pointer to response IOCB payload.
1663 * @ndlp: pointer to a node-list data structure.
1664 *
1665 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1666 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1667 * The following cases are considered N_Port confirmed:
1668 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1669 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1670 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1671 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1672 * 1) if there is a node on vport list other than the @ndlp with the same
1673 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1674 * on that node to release the RPI associated with the node; 2) if there is
1675 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1676 * into, a new node shall be allocated (or activated). In either case, the
1677 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1678 * be released and the new_ndlp shall be put on to the vport node list and
1679 * its pointer returned as the confirmed node.
1680 *
1681 * Note that before the @ndlp got "released", the keepDID from not-matching
1682 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1683 * of the @ndlp. This is because the release of @ndlp is actually to put it
1684 * into an inactive state on the vport node list and the vport node list
1685 * management algorithm does not allow two node with a same DID.
1686 *
1687 * Return code
1688 * pointer to the PLOGI N_Port @ndlp
1689 **/
1690static struct lpfc_nodelist *
1691lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1692 struct lpfc_nodelist *ndlp)
1693{
1694 struct lpfc_vport *vport = ndlp->vport;
1695 struct lpfc_nodelist *new_ndlp;
1696 struct serv_parm *sp;
1697 uint8_t name[sizeof(struct lpfc_name)];
1698 uint32_t keepDID = 0, keep_nlp_flag = 0;
1699 uint32_t keep_new_nlp_flag = 0;
1700 uint16_t keep_nlp_state;
1701 u32 keep_nlp_fc4_type = 0;
1702 struct lpfc_nvme_rport *keep_nrport = NULL;
1703 unsigned long *active_rrqs_xri_bitmap = NULL;
1704
1705 /* Fabric nodes can have the same WWPN so we don't bother searching
1706 * by WWPN. Just return the ndlp that was given to us.
1707 */
1708 if (ndlp->nlp_type & NLP_FABRIC)
1709 return ndlp;
1710
1711 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1712 memset(name, 0, sizeof(struct lpfc_name));
1713
1714 /* Now we find out if the NPort we are logging into, matches the WWPN
1715 * we have for that ndlp. If not, we have some work to do.
1716 */
1717 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1718
1719 /* return immediately if the WWPN matches ndlp */
1720 if (!new_ndlp || (new_ndlp == ndlp))
1721 return ndlp;
1722
1723 /*
1724 * Unregister from backend if not done yet. Could have been skipped
1725 * due to ADISC
1726 */
1727 lpfc_nlp_unreg_node(vport, ndlp: new_ndlp);
1728
1729 if (phba->sli_rev == LPFC_SLI_REV4) {
1730 active_rrqs_xri_bitmap = mempool_alloc(pool: phba->active_rrq_pool,
1731 GFP_KERNEL);
1732 if (active_rrqs_xri_bitmap)
1733 memset(active_rrqs_xri_bitmap, 0,
1734 phba->cfg_rrq_xri_bitmap_sz);
1735 }
1736
1737 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1738 "3178 PLOGI confirm: ndlp x%x x%x x%x: "
1739 "new_ndlp x%x x%x x%x\n",
1740 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type,
1741 (new_ndlp ? new_ndlp->nlp_DID : 0),
1742 (new_ndlp ? new_ndlp->nlp_flag : 0),
1743 (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
1744
1745 keepDID = new_ndlp->nlp_DID;
1746
1747 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
1748 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
1749 phba->cfg_rrq_xri_bitmap_sz);
1750
1751 /* At this point in this routine, we know new_ndlp will be
1752 * returned. however, any previous GID_FTs that were done
1753 * would have updated nlp_fc4_type in ndlp, so we must ensure
1754 * new_ndlp has the right value.
1755 */
1756 if (vport->fc_flag & FC_FABRIC) {
1757 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type;
1758 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
1759 }
1760
1761 lpfc_unreg_rpi(vport, new_ndlp);
1762 new_ndlp->nlp_DID = ndlp->nlp_DID;
1763 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1764 if (phba->sli_rev == LPFC_SLI_REV4)
1765 memcpy(new_ndlp->active_rrqs_xri_bitmap,
1766 ndlp->active_rrqs_xri_bitmap,
1767 phba->cfg_rrq_xri_bitmap_sz);
1768
1769 /* Lock both ndlps */
1770 spin_lock_irq(lock: &ndlp->lock);
1771 spin_lock_irq(lock: &new_ndlp->lock);
1772 keep_new_nlp_flag = new_ndlp->nlp_flag;
1773 keep_nlp_flag = ndlp->nlp_flag;
1774 new_ndlp->nlp_flag = ndlp->nlp_flag;
1775
1776 /* if new_ndlp had NLP_UNREG_INP set, keep it */
1777 if (keep_new_nlp_flag & NLP_UNREG_INP)
1778 new_ndlp->nlp_flag |= NLP_UNREG_INP;
1779 else
1780 new_ndlp->nlp_flag &= ~NLP_UNREG_INP;
1781
1782 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */
1783 if (keep_new_nlp_flag & NLP_RPI_REGISTERED)
1784 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1785 else
1786 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1787
1788 /*
1789 * Retain the DROPPED flag. This will take care of the init
1790 * refcount when affecting the state change
1791 */
1792 if (keep_new_nlp_flag & NLP_DROPPED)
1793 new_ndlp->nlp_flag |= NLP_DROPPED;
1794 else
1795 new_ndlp->nlp_flag &= ~NLP_DROPPED;
1796
1797 ndlp->nlp_flag = keep_new_nlp_flag;
1798
1799 /* if ndlp had NLP_UNREG_INP set, keep it */
1800 if (keep_nlp_flag & NLP_UNREG_INP)
1801 ndlp->nlp_flag |= NLP_UNREG_INP;
1802 else
1803 ndlp->nlp_flag &= ~NLP_UNREG_INP;
1804
1805 /* if ndlp had NLP_RPI_REGISTERED set, keep it */
1806 if (keep_nlp_flag & NLP_RPI_REGISTERED)
1807 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1808 else
1809 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1810
1811 /*
1812 * Retain the DROPPED flag. This will take care of the init
1813 * refcount when affecting the state change
1814 */
1815 if (keep_nlp_flag & NLP_DROPPED)
1816 ndlp->nlp_flag |= NLP_DROPPED;
1817 else
1818 ndlp->nlp_flag &= ~NLP_DROPPED;
1819
1820 spin_unlock_irq(lock: &new_ndlp->lock);
1821 spin_unlock_irq(lock: &ndlp->lock);
1822
1823 /* Set nlp_states accordingly */
1824 keep_nlp_state = new_ndlp->nlp_state;
1825 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1826
1827 /* interchange the nvme remoteport structs */
1828 keep_nrport = new_ndlp->nrport;
1829 new_ndlp->nrport = ndlp->nrport;
1830
1831 /* Move this back to NPR state */
1832 if (memcmp(p: &ndlp->nlp_portname, q: name, size: sizeof(struct lpfc_name)) == 0) {
1833 /* The ndlp doesn't have a portname yet, but does have an
1834 * NPort ID. The new_ndlp portname matches the Rport's
1835 * portname. Reinstantiate the new_ndlp and reset the ndlp.
1836 */
1837 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1838 "3179 PLOGI confirm NEW: %x %x\n",
1839 new_ndlp->nlp_DID, keepDID);
1840
1841 /* Two ndlps cannot have the same did on the nodelist.
1842 * The KeepDID and keep_nlp_fc4_type need to be swapped
1843 * because ndlp is inflight with no WWPN.
1844 */
1845 ndlp->nlp_DID = keepDID;
1846 ndlp->nlp_fc4_type = keep_nlp_fc4_type;
1847 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1848 if (phba->sli_rev == LPFC_SLI_REV4 &&
1849 active_rrqs_xri_bitmap)
1850 memcpy(ndlp->active_rrqs_xri_bitmap,
1851 active_rrqs_xri_bitmap,
1852 phba->cfg_rrq_xri_bitmap_sz);
1853
1854 } else {
1855 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1856 "3180 PLOGI confirm SWAP: %x %x\n",
1857 new_ndlp->nlp_DID, keepDID);
1858
1859 lpfc_unreg_rpi(vport, ndlp);
1860
1861 /* The ndlp and new_ndlp both have WWPNs but are swapping
1862 * NPort Ids and attributes.
1863 */
1864 ndlp->nlp_DID = keepDID;
1865 ndlp->nlp_fc4_type = keep_nlp_fc4_type;
1866
1867 if (phba->sli_rev == LPFC_SLI_REV4 &&
1868 active_rrqs_xri_bitmap)
1869 memcpy(ndlp->active_rrqs_xri_bitmap,
1870 active_rrqs_xri_bitmap,
1871 phba->cfg_rrq_xri_bitmap_sz);
1872
1873 /* Since we are switching over to the new_ndlp,
1874 * reset the old ndlp state
1875 */
1876 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1877 (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
1878 keep_nlp_state = NLP_STE_NPR_NODE;
1879 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1880 ndlp->nrport = keep_nrport;
1881 }
1882
1883 /*
1884 * If ndlp is not associated with any rport we can drop it here else
1885 * let dev_loss_tmo_callbk trigger DEVICE_RM event
1886 */
1887 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE))
1888 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
1889
1890 if (phba->sli_rev == LPFC_SLI_REV4 &&
1891 active_rrqs_xri_bitmap)
1892 mempool_free(element: active_rrqs_xri_bitmap,
1893 pool: phba->active_rrq_pool);
1894
1895 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1896 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
1897 new_ndlp->nlp_DID, new_ndlp->nlp_flag,
1898 new_ndlp->nlp_fc4_type);
1899
1900 return new_ndlp;
1901}
1902
1903/**
1904 * lpfc_end_rscn - Check and handle more rscn for a vport
1905 * @vport: pointer to a host virtual N_Port data structure.
1906 *
1907 * This routine checks whether more Registration State Change
1908 * Notifications (RSCNs) came in while the discovery state machine was in
1909 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1910 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1911 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1912 * handling the RSCNs.
1913 **/
1914void
1915lpfc_end_rscn(struct lpfc_vport *vport)
1916{
1917 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1918
1919 if (vport->fc_flag & FC_RSCN_MODE) {
1920 /*
1921 * Check to see if more RSCNs came in while we were
1922 * processing this one.
1923 */
1924 if (vport->fc_rscn_id_cnt ||
1925 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1926 lpfc_els_handle_rscn(vport);
1927 else {
1928 spin_lock_irq(lock: shost->host_lock);
1929 vport->fc_flag &= ~FC_RSCN_MODE;
1930 spin_unlock_irq(lock: shost->host_lock);
1931 }
1932 }
1933}
1934
1935/**
1936 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1937 * @phba: pointer to lpfc hba data structure.
1938 * @cmdiocb: pointer to lpfc command iocb data structure.
1939 * @rspiocb: pointer to lpfc response iocb data structure.
1940 *
1941 * This routine will call the clear rrq function to free the rrq and
1942 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1943 * exist then the clear_rrq is still called because the rrq needs to
1944 * be freed.
1945 **/
1946
1947static void
1948lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1949 struct lpfc_iocbq *rspiocb)
1950{
1951 struct lpfc_vport *vport = cmdiocb->vport;
1952 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
1953 struct lpfc_node_rrq *rrq;
1954 u32 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
1955 u32 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
1956
1957 /* we pass cmdiocb to state machine which needs rspiocb as well */
1958 rrq = cmdiocb->context_un.rrq;
1959 cmdiocb->rsp_iocb = rspiocb;
1960
1961 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1962 "RRQ cmpl: status:x%x/x%x did:x%x",
1963 ulp_status, ulp_word4,
1964 get_job_els_rsp64_did(phba, iocbq: cmdiocb));
1965
1966
1967 /* rrq completes to NPort <nlp_DID> */
1968 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1969 "2880 RRQ completes to DID x%x "
1970 "Data: x%x x%x x%x x%x x%x\n",
1971 ndlp->nlp_DID, ulp_status, ulp_word4,
1972 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid);
1973
1974 if (ulp_status) {
1975 /* Check for retry */
1976 /* RRQ failed Don't print the vport to vport rjts */
1977 if (ulp_status != IOSTAT_LS_RJT ||
1978 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
1979 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
1980 (phba)->pport->cfg_log_verbose & LOG_ELS)
1981 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1982 "2881 RRQ failure DID:%06X Status:"
1983 "x%x/x%x\n",
1984 ndlp->nlp_DID, ulp_status,
1985 ulp_word4);
1986 }
1987
1988 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1989 lpfc_els_free_iocb(phba, cmdiocb);
1990 lpfc_nlp_put(ndlp);
1991 return;
1992}
1993/**
1994 * lpfc_cmpl_els_plogi - Completion callback function for plogi
1995 * @phba: pointer to lpfc hba data structure.
1996 * @cmdiocb: pointer to lpfc command iocb data structure.
1997 * @rspiocb: pointer to lpfc response iocb data structure.
1998 *
1999 * This routine is the completion callback function for issuing the Port
2000 * Login (PLOGI) command. For PLOGI completion, there must be an active
2001 * ndlp on the vport node list that matches the remote node ID from the
2002 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
2003 * ignored and command IOCB released. The PLOGI response IOCB status is
2004 * checked for error conditions. If there is error status reported, PLOGI
2005 * retry shall be attempted by invoking the lpfc_els_retry() routine.
2006 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
2007 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
2008 * (DSM) is set for this PLOGI completion. Finally, it checks whether
2009 * there are additional N_Port nodes with the vport that need to perform
2010 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
2011 * PLOGIs.
2012 **/
2013static void
2014lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2015 struct lpfc_iocbq *rspiocb)
2016{
2017 struct lpfc_vport *vport = cmdiocb->vport;
2018 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2019 IOCB_t *irsp;
2020 struct lpfc_nodelist *ndlp, *free_ndlp;
2021 struct lpfc_dmabuf *prsp;
2022 int disc;
2023 struct serv_parm *sp = NULL;
2024 u32 ulp_status, ulp_word4, did, iotag;
2025 bool release_node = false;
2026
2027 /* we pass cmdiocb to state machine which needs rspiocb as well */
2028 cmdiocb->rsp_iocb = rspiocb;
2029
2030 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
2031 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
2032 did = get_job_els_rsp64_did(phba, iocbq: cmdiocb);
2033
2034 if (phba->sli_rev == LPFC_SLI_REV4) {
2035 iotag = get_wqe_reqtag(cmdiocb);
2036 } else {
2037 irsp = &rspiocb->iocb;
2038 iotag = irsp->ulpIoTag;
2039 }
2040
2041 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2042 "PLOGI cmpl: status:x%x/x%x did:x%x",
2043 ulp_status, ulp_word4, did);
2044
2045 ndlp = lpfc_findnode_did(vport, did);
2046 if (!ndlp) {
2047 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2048 "0136 PLOGI completes to NPort x%x "
2049 "with no ndlp. Data: x%x x%x x%x\n",
2050 did, ulp_status, ulp_word4, iotag);
2051 goto out_freeiocb;
2052 }
2053
2054 /* Since ndlp can be freed in the disc state machine, note if this node
2055 * is being used during discovery.
2056 */
2057 spin_lock_irq(lock: &ndlp->lock);
2058 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2059 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2060 spin_unlock_irq(lock: &ndlp->lock);
2061
2062 /* PLOGI completes to NPort <nlp_DID> */
2063 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2064 "0102 PLOGI completes to NPort x%06x "
2065 "Data: x%x x%x x%x x%x x%x\n",
2066 ndlp->nlp_DID, ndlp->nlp_fc4_type,
2067 ulp_status, ulp_word4,
2068 disc, vport->num_disc_nodes);
2069
2070 /* Check to see if link went down during discovery */
2071 if (lpfc_els_chk_latt(vport)) {
2072 spin_lock_irq(lock: &ndlp->lock);
2073 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2074 spin_unlock_irq(lock: &ndlp->lock);
2075 goto out;
2076 }
2077
2078 if (ulp_status) {
2079 /* Check for retry */
2080 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2081 /* ELS command is being retried */
2082 if (disc) {
2083 spin_lock_irq(lock: &ndlp->lock);
2084 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2085 spin_unlock_irq(lock: &ndlp->lock);
2086 }
2087 goto out;
2088 }
2089 /* PLOGI failed Don't print the vport to vport rjts */
2090 if (ulp_status != IOSTAT_LS_RJT ||
2091 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
2092 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
2093 (phba)->pport->cfg_log_verbose & LOG_ELS)
2094 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2095 "2753 PLOGI failure DID:%06X "
2096 "Status:x%x/x%x\n",
2097 ndlp->nlp_DID, ulp_status,
2098 ulp_word4);
2099
2100 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2101 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
2102 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2103 NLP_EVT_CMPL_PLOGI);
2104
2105 /* If a PLOGI collision occurred, the node needs to continue
2106 * with the reglogin process.
2107 */
2108 spin_lock_irq(lock: &ndlp->lock);
2109 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) &&
2110 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
2111 spin_unlock_irq(lock: &ndlp->lock);
2112 goto out;
2113 }
2114
2115 /* No PLOGI collision and the node is not registered with the
2116 * scsi or nvme transport. It is no longer an active node. Just
2117 * start the device remove process.
2118 */
2119 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
2120 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2121 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
2122 release_node = true;
2123 }
2124 spin_unlock_irq(lock: &ndlp->lock);
2125
2126 if (release_node)
2127 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2128 NLP_EVT_DEVICE_RM);
2129 } else {
2130 /* Good status, call state machine */
2131 prsp = list_entry(cmdiocb->cmd_dmabuf->list.next,
2132 struct lpfc_dmabuf, list);
2133 if (!prsp)
2134 goto out;
2135 if (!lpfc_is_els_acc_rsp(buf: prsp))
2136 goto out;
2137 ndlp = lpfc_plogi_confirm_nport(phba, prsp: prsp->virt, ndlp);
2138
2139 sp = (struct serv_parm *)((u8 *)prsp->virt +
2140 sizeof(u32));
2141
2142 ndlp->vmid_support = 0;
2143 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) ||
2144 (phba->cfg_vmid_priority_tagging &&
2145 sp->cmn.priority_tagging)) {
2146 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS,
2147 "4018 app_hdr_support %d tagging %d DID x%x\n",
2148 sp->cmn.app_hdr_support,
2149 sp->cmn.priority_tagging,
2150 ndlp->nlp_DID);
2151 /* if the dest port supports VMID, mark it in ndlp */
2152 ndlp->vmid_support = 1;
2153 }
2154
2155 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2156 NLP_EVT_CMPL_PLOGI);
2157 }
2158
2159 if (disc && vport->num_disc_nodes) {
2160 /* Check to see if there are more PLOGIs to be sent */
2161 lpfc_more_plogi(vport);
2162
2163 if (vport->num_disc_nodes == 0) {
2164 spin_lock_irq(lock: shost->host_lock);
2165 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2166 spin_unlock_irq(lock: shost->host_lock);
2167
2168 lpfc_can_disctmo(vport);
2169 lpfc_end_rscn(vport);
2170 }
2171 }
2172
2173out:
2174 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2175 "PLOGI Cmpl PUT: did:x%x refcnt %d",
2176 ndlp->nlp_DID, kref_read(kref: &ndlp->kref), 0);
2177
2178out_freeiocb:
2179 /* Release the reference on the original I/O request. */
2180 free_ndlp = cmdiocb->ndlp;
2181
2182 lpfc_els_free_iocb(phba, cmdiocb);
2183 lpfc_nlp_put(free_ndlp);
2184 return;
2185}
2186
2187/**
2188 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
2189 * @vport: pointer to a host virtual N_Port data structure.
2190 * @did: destination port identifier.
2191 * @retry: number of retries to the command IOCB.
2192 *
2193 * This routine issues a Port Login (PLOGI) command to a remote N_Port
2194 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
2195 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
2196 * This routine constructs the proper fields of the PLOGI IOCB and invokes
2197 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
2198 *
2199 * Note that the ndlp reference count will be incremented by 1 for holding
2200 * the ndlp and the reference to ndlp will be stored into the ndlp field
2201 * of the IOCB for the completion callback function to the PLOGI ELS command.
2202 *
2203 * Return code
2204 * 0 - Successfully issued a plogi for @vport
2205 * 1 - failed to issue a plogi for @vport
2206 **/
2207int
2208lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
2209{
2210 struct lpfc_hba *phba = vport->phba;
2211 struct serv_parm *sp;
2212 struct lpfc_nodelist *ndlp;
2213 struct lpfc_iocbq *elsiocb;
2214 uint8_t *pcmd;
2215 uint16_t cmdsize;
2216 int ret;
2217
2218 ndlp = lpfc_findnode_did(vport, did);
2219 if (!ndlp)
2220 return 1;
2221
2222 /* Defer the processing of the issue PLOGI until after the
2223 * outstanding UNREG_RPI mbox command completes, unless we
2224 * are going offline. This logic does not apply for Fabric DIDs
2225 */
2226 if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) &&
2227 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
2228 !(vport->fc_flag & FC_OFFLINE_MODE)) {
2229 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2230 "4110 Issue PLOGI x%x deferred "
2231 "on NPort x%x rpi x%x flg x%x Data:"
2232 " x%px\n",
2233 ndlp->nlp_defer_did, ndlp->nlp_DID,
2234 ndlp->nlp_rpi, ndlp->nlp_flag, ndlp);
2235
2236 /* We can only defer 1st PLOGI */
2237 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
2238 ndlp->nlp_defer_did = did;
2239 return 0;
2240 }
2241
2242 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
2243 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, cmd_size: cmdsize, retry, ndlp, did,
2244 ELS_CMD_PLOGI);
2245 if (!elsiocb)
2246 return 1;
2247
2248 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
2249
2250 /* For PLOGI request, remainder of payload is service parameters */
2251 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
2252 pcmd += sizeof(uint32_t);
2253 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
2254 sp = (struct serv_parm *) pcmd;
2255
2256 /*
2257 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
2258 * to device on remote loops work.
2259 */
2260 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
2261 sp->cmn.altBbCredit = 1;
2262
2263 if (sp->cmn.fcphLow < FC_PH_4_3)
2264 sp->cmn.fcphLow = FC_PH_4_3;
2265
2266 if (sp->cmn.fcphHigh < FC_PH3)
2267 sp->cmn.fcphHigh = FC_PH3;
2268
2269 sp->cmn.valid_vendor_ver_level = 0;
2270 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
2271 sp->cmn.bbRcvSizeMsb &= 0xF;
2272
2273 /* Check if the destination port supports VMID */
2274 ndlp->vmid_support = 0;
2275 if (vport->vmid_priority_tagging)
2276 sp->cmn.priority_tagging = 1;
2277 else if (phba->cfg_vmid_app_header &&
2278 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags))
2279 sp->cmn.app_hdr_support = 1;
2280
2281 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2282 "Issue PLOGI: did:x%x",
2283 did, 0, 0);
2284
2285 /* If our firmware supports this feature, convey that
2286 * information to the target using the vendor specific field.
2287 */
2288 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
2289 sp->cmn.valid_vendor_ver_level = 1;
2290 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
2291 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
2292 }
2293
2294 phba->fc_stat.elsXmitPLOGI++;
2295 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi;
2296
2297 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2298 "Issue PLOGI: did:x%x refcnt %d",
2299 did, kref_read(kref: &ndlp->kref), 0);
2300 elsiocb->ndlp = lpfc_nlp_get(ndlp);
2301 if (!elsiocb->ndlp) {
2302 lpfc_els_free_iocb(phba, elsiocb);
2303 return 1;
2304 }
2305
2306 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2307 if (ret) {
2308 lpfc_els_free_iocb(phba, elsiocb);
2309 lpfc_nlp_put(ndlp);
2310 return 1;
2311 }
2312
2313 return 0;
2314}
2315
2316/**
2317 * lpfc_cmpl_els_prli - Completion callback function for prli
2318 * @phba: pointer to lpfc hba data structure.
2319 * @cmdiocb: pointer to lpfc command iocb data structure.
2320 * @rspiocb: pointer to lpfc response iocb data structure.
2321 *
2322 * This routine is the completion callback function for a Process Login
2323 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
2324 * status. If there is error status reported, PRLI retry shall be attempted
2325 * by invoking the lpfc_els_retry() routine. Otherwise, the state
2326 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
2327 * ndlp to mark the PRLI completion.
2328 **/
2329static void
2330lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2331 struct lpfc_iocbq *rspiocb)
2332{
2333 struct lpfc_vport *vport = cmdiocb->vport;
2334 struct lpfc_nodelist *ndlp;
2335 char *mode;
2336 u32 loglevel;
2337 u32 ulp_status;
2338 u32 ulp_word4;
2339 bool release_node = false;
2340
2341 /* we pass cmdiocb to state machine which needs rspiocb as well */
2342 cmdiocb->rsp_iocb = rspiocb;
2343
2344 ndlp = cmdiocb->ndlp;
2345
2346 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
2347 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
2348
2349 spin_lock_irq(lock: &ndlp->lock);
2350 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2351
2352 /* Driver supports multiple FC4 types. Counters matter. */
2353 vport->fc_prli_sent--;
2354 ndlp->fc4_prli_sent--;
2355 spin_unlock_irq(lock: &ndlp->lock);
2356
2357 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2358 "PRLI cmpl: status:x%x/x%x did:x%x",
2359 ulp_status, ulp_word4,
2360 ndlp->nlp_DID);
2361
2362 /* PRLI completes to NPort <nlp_DID> */
2363 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2364 "0103 PRLI completes to NPort x%06x "
2365 "Data: x%x x%x x%x x%x\n",
2366 ndlp->nlp_DID, ulp_status, ulp_word4,
2367 vport->num_disc_nodes, ndlp->fc4_prli_sent);
2368
2369 /* Check to see if link went down during discovery */
2370 if (lpfc_els_chk_latt(vport))
2371 goto out;
2372
2373 if (ulp_status) {
2374 /* Check for retry */
2375 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2376 /* ELS command is being retried */
2377 goto out;
2378 }
2379
2380 /* If we don't send GFT_ID to Fabric, a PRLI error
2381 * could be expected.
2382 */
2383 if ((vport->fc_flag & FC_FABRIC) ||
2384 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) {
2385 mode = KERN_ERR;
2386 loglevel = LOG_TRACE_EVENT;
2387 } else {
2388 mode = KERN_INFO;
2389 loglevel = LOG_ELS;
2390 }
2391
2392 /* PRLI failed */
2393 lpfc_printf_vlog(vport, mode, loglevel,
2394 "2754 PRLI failure DID:%06X Status:x%x/x%x, "
2395 "data: x%x x%x x%x\n",
2396 ndlp->nlp_DID, ulp_status,
2397 ulp_word4, ndlp->nlp_state,
2398 ndlp->fc4_prli_sent, ndlp->nlp_flag);
2399
2400 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2401 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
2402 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2403 NLP_EVT_CMPL_PRLI);
2404
2405 /* The following condition catches an inflight transition
2406 * mismatch typically caused by an RSCN. Skip any
2407 * processing to allow recovery.
2408 */
2409 if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
2410 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) ||
2411 (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2412 ndlp->nlp_flag & NLP_DELAY_TMO)) {
2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
2414 "2784 PRLI cmpl: Allow Node recovery "
2415 "DID x%06x nstate x%x nflag x%x\n",
2416 ndlp->nlp_DID, ndlp->nlp_state,
2417 ndlp->nlp_flag);
2418 goto out;
2419 }
2420
2421 /*
2422 * For P2P topology, retain the node so that PLOGI can be
2423 * attempted on it again.
2424 */
2425 if (vport->fc_flag & FC_PT2PT)
2426 goto out;
2427
2428 /* As long as this node is not registered with the SCSI
2429 * or NVMe transport and no other PRLIs are outstanding,
2430 * it is no longer an active node. Otherwise devloss
2431 * handles the final cleanup.
2432 */
2433 spin_lock_irq(lock: &ndlp->lock);
2434 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
2435 !ndlp->fc4_prli_sent) {
2436 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2437 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
2438 release_node = true;
2439 }
2440 spin_unlock_irq(lock: &ndlp->lock);
2441
2442 if (release_node)
2443 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2444 NLP_EVT_DEVICE_RM);
2445 } else {
2446 /* Good status, call state machine. However, if another
2447 * PRLI is outstanding, don't call the state machine
2448 * because final disposition to Mapped or Unmapped is
2449 * completed there.
2450 */
2451 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2452 NLP_EVT_CMPL_PRLI);
2453 }
2454
2455out:
2456 lpfc_els_free_iocb(phba, cmdiocb);
2457 lpfc_nlp_put(ndlp);
2458 return;
2459}
2460
2461/**
2462 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
2463 * @vport: pointer to a host virtual N_Port data structure.
2464 * @ndlp: pointer to a node-list data structure.
2465 * @retry: number of retries to the command IOCB.
2466 *
2467 * This routine issues a Process Login (PRLI) ELS command for the
2468 * @vport. The PRLI service parameters are set up in the payload of the
2469 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
2470 * is put to the IOCB completion callback func field before invoking the
2471 * routine lpfc_sli_issue_iocb() to send out PRLI command.
2472 *
2473 * Note that the ndlp reference count will be incremented by 1 for holding the
2474 * ndlp and the reference to ndlp will be stored into the ndlp field of
2475 * the IOCB for the completion callback function to the PRLI ELS command.
2476 *
2477 * Return code
2478 * 0 - successfully issued prli iocb command for @vport
2479 * 1 - failed to issue prli iocb command for @vport
2480 **/
2481int
2482lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2483 uint8_t retry)
2484{
2485 int rc = 0;
2486 struct lpfc_hba *phba = vport->phba;
2487 PRLI *npr;
2488 struct lpfc_nvme_prli *npr_nvme;
2489 struct lpfc_iocbq *elsiocb;
2490 uint8_t *pcmd;
2491 uint16_t cmdsize;
2492 u32 local_nlp_type, elscmd;
2493
2494 /*
2495 * If we are in RSCN mode, the FC4 types supported from a
2496 * previous GFT_ID command may not be accurate. So, if we
2497 * are a NVME Initiator, always look for the possibility of
2498 * the remote NPort beng a NVME Target.
2499 */
2500 if (phba->sli_rev == LPFC_SLI_REV4 &&
2501 vport->fc_flag & FC_RSCN_MODE &&
2502 vport->nvmei_support)
2503 ndlp->nlp_fc4_type |= NLP_FC4_NVME;
2504 local_nlp_type = ndlp->nlp_fc4_type;
2505
2506 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
2507 * fields here before any of them can complete.
2508 */
2509 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
2510 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
2511 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
2512 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC);
2513 ndlp->nvme_fb_size = 0;
2514
2515 send_next_prli:
2516 if (local_nlp_type & NLP_FC4_FCP) {
2517 /* Payload is 4 + 16 = 20 x14 bytes. */
2518 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2519 elscmd = ELS_CMD_PRLI;
2520 } else if (local_nlp_type & NLP_FC4_NVME) {
2521 /* Payload is 4 + 20 = 24 x18 bytes. */
2522 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli));
2523 elscmd = ELS_CMD_NVMEPRLI;
2524 } else {
2525 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2526 "3083 Unknown FC_TYPE x%x ndlp x%06x\n",
2527 ndlp->nlp_fc4_type, ndlp->nlp_DID);
2528 return 1;
2529 }
2530
2531 /* SLI3 ports don't support NVME. If this rport is a strict NVME
2532 * FC4 type, implicitly LOGO.
2533 */
2534 if (phba->sli_rev == LPFC_SLI_REV3 &&
2535 ndlp->nlp_fc4_type == NLP_FC4_NVME) {
2536 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2537 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
2538 ndlp->nlp_type);
2539 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
2540 return 1;
2541 }
2542
2543 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, cmd_size: cmdsize, retry, ndlp,
2544 did: ndlp->nlp_DID, elscmd);
2545 if (!elsiocb)
2546 return 1;
2547
2548 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
2549
2550 /* For PRLI request, remainder of payload is service parameters */
2551 memset(pcmd, 0, cmdsize);
2552
2553 if (local_nlp_type & NLP_FC4_FCP) {
2554 /* Remainder of payload is FCP PRLI parameter page.
2555 * Note: this data structure is defined as
2556 * BE/LE in the structure definition so no
2557 * byte swap call is made.
2558 */
2559 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI;
2560 pcmd += sizeof(uint32_t);
2561 npr = (PRLI *)pcmd;
2562
2563 /*
2564 * If our firmware version is 3.20 or later,
2565 * set the following bits for FC-TAPE support.
2566 */
2567 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
2568 npr->ConfmComplAllowed = 1;
2569 npr->Retry = 1;
2570 npr->TaskRetryIdReq = 1;
2571 }
2572 npr->estabImagePair = 1;
2573 npr->readXferRdyDis = 1;
2574 if (vport->cfg_first_burst_size)
2575 npr->writeXferRdyDis = 1;
2576
2577 /* For FCP support */
2578 npr->prliType = PRLI_FCP_TYPE;
2579 npr->initiatorFunc = 1;
2580 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ;
2581
2582 /* Remove FCP type - processed. */
2583 local_nlp_type &= ~NLP_FC4_FCP;
2584 } else if (local_nlp_type & NLP_FC4_NVME) {
2585 /* Remainder of payload is NVME PRLI parameter page.
2586 * This data structure is the newer definition that
2587 * uses bf macros so a byte swap is required.
2588 */
2589 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI;
2590 pcmd += sizeof(uint32_t);
2591 npr_nvme = (struct lpfc_nvme_prli *)pcmd;
2592 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
2593 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
2594 if (phba->nsler) {
2595 bf_set(prli_nsler, npr_nvme, 1);
2596 bf_set(prli_conf, npr_nvme, 1);
2597 }
2598
2599 /* Only initiators request first burst. */
2600 if ((phba->cfg_nvme_enable_fb) &&
2601 !phba->nvmet_support)
2602 bf_set(prli_fba, npr_nvme, 1);
2603
2604 if (phba->nvmet_support) {
2605 bf_set(prli_tgt, npr_nvme, 1);
2606 bf_set(prli_disc, npr_nvme, 1);
2607 } else {
2608 bf_set(prli_init, npr_nvme, 1);
2609 bf_set(prli_conf, npr_nvme, 1);
2610 }
2611
2612 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
2613 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
2614 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ;
2615
2616 /* Remove NVME type - processed. */
2617 local_nlp_type &= ~NLP_FC4_NVME;
2618 }
2619
2620 phba->fc_stat.elsXmitPRLI++;
2621 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli;
2622
2623 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2624 "Issue PRLI: did:x%x refcnt %d",
2625 ndlp->nlp_DID, kref_read(kref: &ndlp->kref), 0);
2626 elsiocb->ndlp = lpfc_nlp_get(ndlp);
2627 if (!elsiocb->ndlp) {
2628 lpfc_els_free_iocb(phba, elsiocb);
2629 return 1;
2630 }
2631
2632 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2633 if (rc == IOCB_ERROR) {
2634 lpfc_els_free_iocb(phba, elsiocb);
2635 lpfc_nlp_put(ndlp);
2636 return 1;
2637 }
2638
2639 /* The vport counters are used for lpfc_scan_finished, but
2640 * the ndlp is used to track outstanding PRLIs for different
2641 * FC4 types.
2642 */
2643 spin_lock_irq(lock: &ndlp->lock);
2644 ndlp->nlp_flag |= NLP_PRLI_SND;
2645 vport->fc_prli_sent++;
2646 ndlp->fc4_prli_sent++;
2647 spin_unlock_irq(lock: &ndlp->lock);
2648
2649 /* The driver supports 2 FC4 types. Make sure
2650 * a PRLI is issued for all types before exiting.
2651 */
2652 if (phba->sli_rev == LPFC_SLI_REV4 &&
2653 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
2654 goto send_next_prli;
2655 else
2656 return 0;
2657}
2658
2659/**
2660 * lpfc_rscn_disc - Perform rscn discovery for a vport
2661 * @vport: pointer to a host virtual N_Port data structure.
2662 *
2663 * This routine performs Registration State Change Notification (RSCN)
2664 * discovery for a @vport. If the @vport's node port recovery count is not
2665 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
2666 * the nodes that need recovery. If none of the PLOGI were needed through
2667 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
2668 * invoked to check and handle possible more RSCN came in during the period
2669 * of processing the current ones.
2670 **/
2671static void
2672lpfc_rscn_disc(struct lpfc_vport *vport)
2673{
2674 lpfc_can_disctmo(vport);
2675
2676 /* RSCN discovery */
2677 /* go thru NPR nodes and issue ELS PLOGIs */
2678 if (vport->fc_npr_cnt)
2679 if (lpfc_els_disc_plogi(vport))
2680 return;
2681
2682 lpfc_end_rscn(vport);
2683}
2684
2685/**
2686 * lpfc_adisc_done - Complete the adisc phase of discovery
2687 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2688 *
2689 * This function is called when the final ADISC is completed during discovery.
2690 * This function handles clearing link attention or issuing reg_vpi depending
2691 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2692 * discovery.
2693 * This function is called with no locks held.
2694 **/
2695static void
2696lpfc_adisc_done(struct lpfc_vport *vport)
2697{
2698 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2699 struct lpfc_hba *phba = vport->phba;
2700
2701 /*
2702 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2703 * and continue discovery.
2704 */
2705 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2706 !(vport->fc_flag & FC_RSCN_MODE) &&
2707 (phba->sli_rev < LPFC_SLI_REV4)) {
2708
2709 /*
2710 * If link is down, clear_la and reg_vpi will be done after
2711 * flogi following a link up event
2712 */
2713 if (!lpfc_is_link_up(phba))
2714 return;
2715
2716 /* The ADISCs are complete. Doesn't matter if they
2717 * succeeded or failed because the ADISC completion
2718 * routine guarantees to call the state machine and
2719 * the RPI is either unregistered (failed ADISC response)
2720 * or the RPI is still valid and the node is marked
2721 * mapped for a target. The exchanges should be in the
2722 * correct state. This code is specific to SLI3.
2723 */
2724 lpfc_issue_clear_la(phba, vport);
2725 lpfc_issue_reg_vpi(phba, vport);
2726 return;
2727 }
2728 /*
2729 * For SLI2, we need to set port_state to READY
2730 * and continue discovery.
2731 */
2732 if (vport->port_state < LPFC_VPORT_READY) {
2733 /* If we get here, there is nothing to ADISC */
2734 lpfc_issue_clear_la(phba, vport);
2735 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2736 vport->num_disc_nodes = 0;
2737 /* go thru NPR list, issue ELS PLOGIs */
2738 if (vport->fc_npr_cnt)
2739 lpfc_els_disc_plogi(vport);
2740 if (!vport->num_disc_nodes) {
2741 spin_lock_irq(lock: shost->host_lock);
2742 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2743 spin_unlock_irq(lock: shost->host_lock);
2744 lpfc_can_disctmo(vport);
2745 lpfc_end_rscn(vport);
2746 }
2747 }
2748 vport->port_state = LPFC_VPORT_READY;
2749 } else
2750 lpfc_rscn_disc(vport);
2751}
2752
2753/**
2754 * lpfc_more_adisc - Issue more adisc as needed
2755 * @vport: pointer to a host virtual N_Port data structure.
2756 *
2757 * This routine determines whether there are more ndlps on a @vport
2758 * node list need to have Address Discover (ADISC) issued. If so, it will
2759 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2760 * remaining nodes which need to have ADISC sent.
2761 **/
2762void
2763lpfc_more_adisc(struct lpfc_vport *vport)
2764{
2765 if (vport->num_disc_nodes)
2766 vport->num_disc_nodes--;
2767 /* Continue discovery with <num_disc_nodes> ADISCs to go */
2768 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2769 "0210 Continue discovery with %d ADISCs to go "
2770 "Data: x%x x%x x%x\n",
2771 vport->num_disc_nodes, vport->fc_adisc_cnt,
2772 vport->fc_flag, vport->port_state);
2773 /* Check to see if there are more ADISCs to be sent */
2774 if (vport->fc_flag & FC_NLP_MORE) {
2775 lpfc_set_disctmo(vport);
2776 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2777 lpfc_els_disc_adisc(vport);
2778 }
2779 if (!vport->num_disc_nodes)
2780 lpfc_adisc_done(vport);
2781 return;
2782}
2783
2784/**
2785 * lpfc_cmpl_els_adisc - Completion callback function for adisc
2786 * @phba: pointer to lpfc hba data structure.
2787 * @cmdiocb: pointer to lpfc command iocb data structure.
2788 * @rspiocb: pointer to lpfc response iocb data structure.
2789 *
2790 * This routine is the completion function for issuing the Address Discover
2791 * (ADISC) command. It first checks to see whether link went down during
2792 * the discovery process. If so, the node will be marked as node port
2793 * recovery for issuing discover IOCB by the link attention handler and
2794 * exit. Otherwise, the response status is checked. If error was reported
2795 * in the response status, the ADISC command shall be retried by invoking
2796 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2797 * the response status, the state machine is invoked to set transition
2798 * with respect to NLP_EVT_CMPL_ADISC event.
2799 **/
2800static void
2801lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2802 struct lpfc_iocbq *rspiocb)
2803{
2804 struct lpfc_vport *vport = cmdiocb->vport;
2805 IOCB_t *irsp;
2806 struct lpfc_nodelist *ndlp;
2807 int disc;
2808 u32 ulp_status, ulp_word4, tmo;
2809 bool release_node = false;
2810
2811 /* we pass cmdiocb to state machine which needs rspiocb as well */
2812 cmdiocb->rsp_iocb = rspiocb;
2813
2814 ndlp = cmdiocb->ndlp;
2815
2816 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
2817 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
2818
2819 if (phba->sli_rev == LPFC_SLI_REV4) {
2820 tmo = get_wqe_tmo(cmdiocb);
2821 } else {
2822 irsp = &rspiocb->iocb;
2823 tmo = irsp->ulpTimeout;
2824 }
2825
2826 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2827 "ADISC cmpl: status:x%x/x%x did:x%x",
2828 ulp_status, ulp_word4,
2829 ndlp->nlp_DID);
2830
2831 /* Since ndlp can be freed in the disc state machine, note if this node
2832 * is being used during discovery.
2833 */
2834 spin_lock_irq(lock: &ndlp->lock);
2835 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2836 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2837 spin_unlock_irq(lock: &ndlp->lock);
2838 /* ADISC completes to NPort <nlp_DID> */
2839 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2840 "0104 ADISC completes to NPort x%x "
2841 "Data: x%x x%x x%x x%x x%x\n",
2842 ndlp->nlp_DID, ulp_status, ulp_word4,
2843 tmo, disc, vport->num_disc_nodes);
2844 /* Check to see if link went down during discovery */
2845 if (lpfc_els_chk_latt(vport)) {
2846 spin_lock_irq(lock: &ndlp->lock);
2847 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2848 spin_unlock_irq(lock: &ndlp->lock);
2849 goto out;
2850 }
2851
2852 if (ulp_status) {
2853 /* Check for retry */
2854 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2855 /* ELS command is being retried */
2856 if (disc) {
2857 spin_lock_irq(lock: &ndlp->lock);
2858 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2859 spin_unlock_irq(lock: &ndlp->lock);
2860 lpfc_set_disctmo(vport);
2861 }
2862 goto out;
2863 }
2864 /* ADISC failed */
2865 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2866 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2867 ndlp->nlp_DID, ulp_status,
2868 ulp_word4);
2869 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2870 NLP_EVT_CMPL_ADISC);
2871
2872 /* As long as this node is not registered with the SCSI or NVMe
2873 * transport, it is no longer an active node. Otherwise
2874 * devloss handles the final cleanup.
2875 */
2876 spin_lock_irq(lock: &ndlp->lock);
2877 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
2878 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2879 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
2880 release_node = true;
2881 }
2882 spin_unlock_irq(lock: &ndlp->lock);
2883
2884 if (release_node)
2885 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2886 NLP_EVT_DEVICE_RM);
2887 } else
2888 /* Good status, call state machine */
2889 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2890 NLP_EVT_CMPL_ADISC);
2891
2892 /* Check to see if there are more ADISCs to be sent */
2893 if (disc && vport->num_disc_nodes)
2894 lpfc_more_adisc(vport);
2895out:
2896 lpfc_els_free_iocb(phba, cmdiocb);
2897 lpfc_nlp_put(ndlp);
2898 return;
2899}
2900
2901/**
2902 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
2903 * @vport: pointer to a virtual N_Port data structure.
2904 * @ndlp: pointer to a node-list data structure.
2905 * @retry: number of retries to the command IOCB.
2906 *
2907 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2908 * @vport. It prepares the payload of the ADISC ELS command, updates the
2909 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2910 * to issue the ADISC ELS command.
2911 *
2912 * Note that the ndlp reference count will be incremented by 1 for holding the
2913 * ndlp and the reference to ndlp will be stored into the ndlp field of
2914 * the IOCB for the completion callback function to the ADISC ELS command.
2915 *
2916 * Return code
2917 * 0 - successfully issued adisc
2918 * 1 - failed to issue adisc
2919 **/
2920int
2921lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2922 uint8_t retry)
2923{
2924 int rc = 0;
2925 struct lpfc_hba *phba = vport->phba;
2926 ADISC *ap;
2927 struct lpfc_iocbq *elsiocb;
2928 uint8_t *pcmd;
2929 uint16_t cmdsize;
2930
2931 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2932 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, cmd_size: cmdsize, retry, ndlp,
2933 did: ndlp->nlp_DID, ELS_CMD_ADISC);
2934 if (!elsiocb)
2935 return 1;
2936
2937 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
2938
2939 /* For ADISC request, remainder of payload is service parameters */
2940 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
2941 pcmd += sizeof(uint32_t);
2942
2943 /* Fill in ADISC payload */
2944 ap = (ADISC *) pcmd;
2945 ap->hardAL_PA = phba->fc_pref_ALPA;
2946 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2947 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2948 ap->DID = be32_to_cpu(vport->fc_myDID);
2949
2950 phba->fc_stat.elsXmitADISC++;
2951 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc;
2952 spin_lock_irq(lock: &ndlp->lock);
2953 ndlp->nlp_flag |= NLP_ADISC_SND;
2954 spin_unlock_irq(lock: &ndlp->lock);
2955 elsiocb->ndlp = lpfc_nlp_get(ndlp);
2956 if (!elsiocb->ndlp) {
2957 lpfc_els_free_iocb(phba, elsiocb);
2958 goto err;
2959 }
2960
2961 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2962 "Issue ADISC: did:x%x refcnt %d",
2963 ndlp->nlp_DID, kref_read(kref: &ndlp->kref), 0);
2964
2965 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2966 if (rc == IOCB_ERROR) {
2967 lpfc_els_free_iocb(phba, elsiocb);
2968 lpfc_nlp_put(ndlp);
2969 goto err;
2970 }
2971
2972 return 0;
2973
2974err:
2975 spin_lock_irq(lock: &ndlp->lock);
2976 ndlp->nlp_flag &= ~NLP_ADISC_SND;
2977 spin_unlock_irq(lock: &ndlp->lock);
2978 return 1;
2979}
2980
2981/**
2982 * lpfc_cmpl_els_logo - Completion callback function for logo
2983 * @phba: pointer to lpfc hba data structure.
2984 * @cmdiocb: pointer to lpfc command iocb data structure.
2985 * @rspiocb: pointer to lpfc response iocb data structure.
2986 *
2987 * This routine is the completion function for issuing the ELS Logout (LOGO)
2988 * command. If no error status was reported from the LOGO response, the
2989 * state machine of the associated ndlp shall be invoked for transition with
2990 * respect to NLP_EVT_CMPL_LOGO event.
2991 **/
2992static void
2993(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2994 struct lpfc_iocbq *rspiocb)
2995{
2996 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
2997 struct lpfc_vport *vport = ndlp->vport;
2998 IOCB_t *irsp;
2999 unsigned long flags;
3000 uint32_t skip_recovery = 0;
3001 int wake_up_waiter = 0;
3002 u32 ulp_status;
3003 u32 ulp_word4;
3004 u32 tmo;
3005
3006 /* we pass cmdiocb to state machine which needs rspiocb as well */
3007 cmdiocb->rsp_iocb = rspiocb;
3008
3009 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
3010 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
3011
3012 if (phba->sli_rev == LPFC_SLI_REV4) {
3013 tmo = get_wqe_tmo(cmdiocb);
3014 } else {
3015 irsp = &rspiocb->iocb;
3016 tmo = irsp->ulpTimeout;
3017 }
3018
3019 spin_lock_irq(lock: &ndlp->lock);
3020 ndlp->nlp_flag &= ~NLP_LOGO_SND;
3021 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
3022 wake_up_waiter = 1;
3023 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
3024 }
3025 spin_unlock_irq(lock: &ndlp->lock);
3026
3027 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3028 "LOGO cmpl: status:x%x/x%x did:x%x",
3029 ulp_status, ulp_word4,
3030 ndlp->nlp_DID);
3031
3032 /* LOGO completes to NPort <nlp_DID> */
3033 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3034 "0105 LOGO completes to NPort x%x "
3035 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n",
3036 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
3037 ulp_status, ulp_word4,
3038 tmo, vport->num_disc_nodes);
3039
3040 if (lpfc_els_chk_latt(vport)) {
3041 skip_recovery = 1;
3042 goto out;
3043 }
3044
3045 /* The LOGO will not be retried on failure. A LOGO was
3046 * issued to the remote rport and a ACC or RJT or no Answer are
3047 * all acceptable. Note the failure and move forward with
3048 * discovery. The PLOGI will retry.
3049 */
3050 if (ulp_status) {
3051 /* LOGO failed */
3052 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3053 "2756 LOGO failure, No Retry DID:%06X "
3054 "Status:x%x/x%x\n",
3055 ndlp->nlp_DID, ulp_status,
3056 ulp_word4);
3057
3058 if (lpfc_error_lost_link(vport, ulp_status, ulp_word4))
3059 skip_recovery = 1;
3060 }
3061
3062 /* Call state machine. This will unregister the rpi if needed. */
3063 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
3064
3065 if (skip_recovery)
3066 goto out;
3067
3068 /* The driver sets this flag for an NPIV instance that doesn't want to
3069 * log into the remote port.
3070 */
3071 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
3072 spin_lock_irq(lock: &ndlp->lock);
3073 if (phba->sli_rev == LPFC_SLI_REV4)
3074 ndlp->nlp_flag |= NLP_RELEASE_RPI;
3075 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
3076 spin_unlock_irq(lock: &ndlp->lock);
3077 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
3078 NLP_EVT_DEVICE_RM);
3079 goto out_rsrc_free;
3080 }
3081
3082out:
3083 /* At this point, the LOGO processing is complete. NOTE: For a
3084 * pt2pt topology, we are assuming the NPortID will only change
3085 * on link up processing. For a LOGO / PLOGI initiated by the
3086 * Initiator, we are assuming the NPortID is not going to change.
3087 */
3088
3089 if (wake_up_waiter && ndlp->logo_waitq)
3090 wake_up(ndlp->logo_waitq);
3091 /*
3092 * If the node is a target, the handling attempts to recover the port.
3093 * For any other port type, the rpi is unregistered as an implicit
3094 * LOGO.
3095 */
3096 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
3097 skip_recovery == 0) {
3098 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3099 spin_lock_irqsave(&ndlp->lock, flags);
3100 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
3101 spin_unlock_irqrestore(lock: &ndlp->lock, flags);
3102
3103 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3104 "3187 LOGO completes to NPort x%x: Start "
3105 "Recovery Data: x%x x%x x%x x%x\n",
3106 ndlp->nlp_DID, ulp_status,
3107 ulp_word4, tmo,
3108 vport->num_disc_nodes);
3109
3110 lpfc_els_free_iocb(phba, cmdiocb);
3111 lpfc_nlp_put(ndlp);
3112
3113 lpfc_disc_start(vport);
3114 return;
3115 }
3116
3117 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the
3118 * driver sends a LOGO to the rport to cleanup. For fabric and
3119 * initiator ports cleanup the node as long as it the node is not
3120 * register with the transport.
3121 */
3122 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
3123 spin_lock_irq(lock: &ndlp->lock);
3124 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
3125 spin_unlock_irq(lock: &ndlp->lock);
3126 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
3127 NLP_EVT_DEVICE_RM);
3128 }
3129out_rsrc_free:
3130 /* Driver is done with the I/O. */
3131 lpfc_els_free_iocb(phba, cmdiocb);
3132 lpfc_nlp_put(ndlp);
3133}
3134
3135/**
3136 * lpfc_issue_els_logo - Issue a logo to an node on a vport
3137 * @vport: pointer to a virtual N_Port data structure.
3138 * @ndlp: pointer to a node-list data structure.
3139 * @retry: number of retries to the command IOCB.
3140 *
3141 * This routine constructs and issues an ELS Logout (LOGO) iocb command
3142 * to a remote node, referred by an @ndlp on a @vport. It constructs the
3143 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
3144 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
3145 *
3146 * Note that the ndlp reference count will be incremented by 1 for holding the
3147 * ndlp and the reference to ndlp will be stored into the ndlp field of
3148 * the IOCB for the completion callback function to the LOGO ELS command.
3149 *
3150 * Callers of this routine are expected to unregister the RPI first
3151 *
3152 * Return code
3153 * 0 - successfully issued logo
3154 * 1 - failed to issue logo
3155 **/
3156int
3157(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3158 uint8_t retry)
3159{
3160 struct lpfc_hba *phba = vport->phba;
3161 struct lpfc_iocbq *elsiocb;
3162 uint8_t *pcmd;
3163 uint16_t cmdsize;
3164 int rc;
3165
3166 spin_lock_irq(lock: &ndlp->lock);
3167 if (ndlp->nlp_flag & NLP_LOGO_SND) {
3168 spin_unlock_irq(lock: &ndlp->lock);
3169 return 0;
3170 }
3171 spin_unlock_irq(lock: &ndlp->lock);
3172
3173 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
3174 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, cmd_size: cmdsize, retry, ndlp,
3175 did: ndlp->nlp_DID, ELS_CMD_LOGO);
3176 if (!elsiocb)
3177 return 1;
3178
3179 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
3180 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
3181 pcmd += sizeof(uint32_t);
3182
3183 /* Fill in LOGO payload */
3184 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
3185 pcmd += sizeof(uint32_t);
3186 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
3187
3188 phba->fc_stat.elsXmitLOGO++;
3189 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo;
3190 spin_lock_irq(lock: &ndlp->lock);
3191 ndlp->nlp_flag |= NLP_LOGO_SND;
3192 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
3193 spin_unlock_irq(lock: &ndlp->lock);
3194 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3195 if (!elsiocb->ndlp) {
3196 lpfc_els_free_iocb(phba, elsiocb);
3197 goto err;
3198 }
3199
3200 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3201 "Issue LOGO: did:x%x refcnt %d",
3202 ndlp->nlp_DID, kref_read(kref: &ndlp->kref), 0);
3203
3204 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3205 if (rc == IOCB_ERROR) {
3206 lpfc_els_free_iocb(phba, elsiocb);
3207 lpfc_nlp_put(ndlp);
3208 goto err;
3209 }
3210
3211 spin_lock_irq(lock: &ndlp->lock);
3212 ndlp->nlp_prev_state = ndlp->nlp_state;
3213 spin_unlock_irq(lock: &ndlp->lock);
3214 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3215 return 0;
3216
3217err:
3218 spin_lock_irq(lock: &ndlp->lock);
3219 ndlp->nlp_flag &= ~NLP_LOGO_SND;
3220 spin_unlock_irq(lock: &ndlp->lock);
3221 return 1;
3222}
3223
3224/**
3225 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
3226 * @phba: pointer to lpfc hba data structure.
3227 * @cmdiocb: pointer to lpfc command iocb data structure.
3228 * @rspiocb: pointer to lpfc response iocb data structure.
3229 *
3230 * This routine is a generic completion callback function for ELS commands.
3231 * Specifically, it is the callback function which does not need to perform
3232 * any command specific operations. It is currently used by the ELS command
3233 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel
3234 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr().
3235 * Other than certain debug loggings, this callback function simply invokes the
3236 * lpfc_els_chk_latt() routine to check whether link went down during the
3237 * discovery process.
3238 **/
3239static void
3240lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3241 struct lpfc_iocbq *rspiocb)
3242{
3243 struct lpfc_vport *vport = cmdiocb->vport;
3244 struct lpfc_nodelist *free_ndlp;
3245 IOCB_t *irsp;
3246 u32 ulp_status, ulp_word4, tmo, did, iotag;
3247
3248 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
3249 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
3250 did = get_job_els_rsp64_did(phba, iocbq: cmdiocb);
3251
3252 if (phba->sli_rev == LPFC_SLI_REV4) {
3253 tmo = get_wqe_tmo(cmdiocb);
3254 iotag = get_wqe_reqtag(cmdiocb);
3255 } else {
3256 irsp = &rspiocb->iocb;
3257 tmo = irsp->ulpTimeout;
3258 iotag = irsp->ulpIoTag;
3259 }
3260
3261 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3262 "ELS cmd cmpl: status:x%x/x%x did:x%x",
3263 ulp_status, ulp_word4, did);
3264
3265 /* ELS cmd tag <ulpIoTag> completes */
3266 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3267 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
3268 iotag, ulp_status, ulp_word4, tmo);
3269
3270 /* Check to see if link went down during discovery */
3271 lpfc_els_chk_latt(vport);
3272
3273 free_ndlp = cmdiocb->ndlp;
3274
3275 lpfc_els_free_iocb(phba, cmdiocb);
3276 lpfc_nlp_put(free_ndlp);
3277}
3278
3279/**
3280 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node.
3281 * @vport: pointer to lpfc_vport data structure.
3282 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node.
3283 *
3284 * This routine registers the rpi assigned to the fabric controller
3285 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED
3286 * state triggering a registration with the SCSI transport.
3287 *
3288 * This routine is single out because the fabric controller node
3289 * does not receive a PLOGI. This routine is consumed by the
3290 * SCR and RDF ELS commands. Callers are expected to qualify
3291 * with SLI4 first.
3292 **/
3293static int
3294lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
3295{
3296 int rc = 0;
3297 struct lpfc_hba *phba = vport->phba;
3298 struct lpfc_nodelist *ns_ndlp;
3299 LPFC_MBOXQ_t *mbox;
3300
3301 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED)
3302 return rc;
3303
3304 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
3305 if (!ns_ndlp)
3306 return -ENODEV;
3307
3308 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3309 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n",
3310 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID,
3311 ns_ndlp->nlp_state);
3312 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
3313 return -ENODEV;
3314
3315 mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL);
3316 if (!mbox) {
3317 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3318 "0936 %s: no memory for reg_login "
3319 "Data: x%x x%x x%x x%x\n", __func__,
3320 fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
3321 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
3322 return -ENOMEM;
3323 }
3324 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID,
3325 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi);
3326 if (rc) {
3327 rc = -EACCES;
3328 goto out;
3329 }
3330
3331 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
3332 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login;
3333 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp);
3334 if (!mbox->ctx_ndlp) {
3335 rc = -ENOMEM;
3336 goto out;
3337 }
3338
3339 mbox->vport = vport;
3340 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3341 if (rc == MBX_NOT_FINISHED) {
3342 rc = -ENODEV;
3343 lpfc_nlp_put(fc_ndlp);
3344 goto out;
3345 }
3346 /* Success path. Exit. */
3347 lpfc_nlp_set_state(vport, fc_ndlp,
3348 NLP_STE_REG_LOGIN_ISSUE);
3349 return 0;
3350
3351 out:
3352 lpfc_mbox_rsrc_cleanup(phba, mbox, locked: MBOX_THD_UNLOCKED);
3353 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3354 "0938 %s: failed to format reg_login "
3355 "Data: x%x x%x x%x x%x\n", __func__,
3356 fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
3357 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
3358 return rc;
3359}
3360
3361/**
3362 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd
3363 * @phba: pointer to lpfc hba data structure.
3364 * @cmdiocb: pointer to lpfc command iocb data structure.
3365 * @rspiocb: pointer to lpfc response iocb data structure.
3366 *
3367 * This routine is a generic completion callback function for Discovery ELS cmd.
3368 * Currently used by the ELS command issuing routines for the ELS State Change
3369 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf().
3370 * These commands will be retried once only for ELS timeout errors.
3371 **/
3372static void
3373lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3374 struct lpfc_iocbq *rspiocb)
3375{
3376 struct lpfc_vport *vport = cmdiocb->vport;
3377 IOCB_t *irsp;
3378 struct lpfc_els_rdf_rsp *prdf;
3379 struct lpfc_dmabuf *pcmd, *prsp;
3380 u32 *pdata;
3381 u32 cmd;
3382 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
3383 u32 ulp_status, ulp_word4, tmo, did, iotag;
3384
3385 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
3386 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
3387 did = get_job_els_rsp64_did(phba, iocbq: cmdiocb);
3388
3389 if (phba->sli_rev == LPFC_SLI_REV4) {
3390 tmo = get_wqe_tmo(cmdiocb);
3391 iotag = get_wqe_reqtag(cmdiocb);
3392 } else {
3393 irsp = &rspiocb->iocb;
3394 tmo = irsp->ulpTimeout;
3395 iotag = irsp->ulpIoTag;
3396 }
3397
3398 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3399 "ELS cmd cmpl: status:x%x/x%x did:x%x",
3400 ulp_status, ulp_word4, did);
3401
3402 /* ELS cmd tag <ulpIoTag> completes */
3403 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3404 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n",
3405 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry);
3406
3407 pcmd = cmdiocb->cmd_dmabuf;
3408 if (!pcmd)
3409 goto out;
3410
3411 pdata = (u32 *)pcmd->virt;
3412 if (!pdata)
3413 goto out;
3414 cmd = *pdata;
3415
3416 /* Only 1 retry for ELS Timeout only */
3417 if (ulp_status == IOSTAT_LOCAL_REJECT &&
3418 ((ulp_word4 & IOERR_PARAM_MASK) ==
3419 IOERR_SEQUENCE_TIMEOUT)) {
3420 cmdiocb->retry++;
3421 if (cmdiocb->retry <= 1) {
3422 switch (cmd) {
3423 case ELS_CMD_SCR:
3424 lpfc_issue_els_scr(vport, retry: cmdiocb->retry);
3425 break;
3426 case ELS_CMD_EDC:
3427 lpfc_issue_els_edc(vport, retry: cmdiocb->retry);
3428 break;
3429 case ELS_CMD_RDF:
3430 lpfc_issue_els_rdf(vport, retry: cmdiocb->retry);
3431 break;
3432 }
3433 goto out;
3434 }
3435 phba->fc_stat.elsRetryExceeded++;
3436 }
3437 if (cmd == ELS_CMD_EDC) {
3438 /* must be called before checking uplStatus and returning */
3439 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
3440 return;
3441 }
3442 if (ulp_status) {
3443 /* ELS discovery cmd completes with error */
3444 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
3445 "4203 ELS cmd x%x error: x%x x%X\n", cmd,
3446 ulp_status, ulp_word4);
3447 goto out;
3448 }
3449
3450 /* The RDF response doesn't have any impact on the running driver
3451 * but the notification descriptors are dumped here for support.
3452 */
3453 if (cmd == ELS_CMD_RDF) {
3454 int i;
3455
3456 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
3457 if (!prsp)
3458 goto out;
3459
3460 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt;
3461 if (!prdf)
3462 goto out;
3463 if (!lpfc_is_els_acc_rsp(buf: prsp))
3464 goto out;
3465
3466 for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
3467 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
3468 lpfc_printf_vlog(vport, KERN_INFO,
3469 LOG_ELS | LOG_CGN_MGMT,
3470 "4677 Fabric RDF Notification Grant "
3471 "Data: 0x%08x Reg: %x %x\n",
3472 be32_to_cpu(
3473 prdf->reg_d1.desc_tags[i]),
3474 phba->cgn_reg_signal,
3475 phba->cgn_reg_fpin);
3476 }
3477
3478out:
3479 /* Check to see if link went down during discovery */
3480 lpfc_els_chk_latt(vport);
3481 lpfc_els_free_iocb(phba, cmdiocb);
3482 lpfc_nlp_put(ndlp);
3483 return;
3484}
3485
3486/**
3487 * lpfc_issue_els_scr - Issue a scr to an node on a vport
3488 * @vport: pointer to a host virtual N_Port data structure.
3489 * @retry: retry counter for the command IOCB.
3490 *
3491 * This routine issues a State Change Request (SCR) to a fabric node
3492 * on a @vport. The remote node is Fabric Controller (0xfffffd). It
3493 * first search the @vport node list to find the matching ndlp. If no such
3494 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
3495 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
3496 * routine is invoked to send the SCR IOCB.
3497 *
3498 * Note that the ndlp reference count will be incremented by 1 for holding the
3499 * ndlp and the reference to ndlp will be stored into the ndlp field of
3500 * the IOCB for the completion callback function to the SCR ELS command.
3501 *
3502 * Return code
3503 * 0 - Successfully issued scr command
3504 * 1 - Failed to issue scr command
3505 **/
3506int
3507lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
3508{
3509 int rc = 0;
3510 struct lpfc_hba *phba = vport->phba;
3511 struct lpfc_iocbq *elsiocb;
3512 uint8_t *pcmd;
3513 uint16_t cmdsize;
3514 struct lpfc_nodelist *ndlp;
3515
3516 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
3517
3518 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
3519 if (!ndlp) {
3520 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
3521 if (!ndlp)
3522 return 1;
3523 lpfc_enqueue_node(vport, ndlp);
3524 }
3525
3526 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, cmd_size: cmdsize, retry, ndlp,
3527 did: ndlp->nlp_DID, ELS_CMD_SCR);
3528 if (!elsiocb)
3529 return 1;
3530
3531 if (phba->sli_rev == LPFC_SLI_REV4) {
3532 rc = lpfc_reg_fab_ctrl_node(vport, fc_ndlp: ndlp);
3533 if (rc) {
3534 lpfc_els_free_iocb(phba, elsiocb);
3535 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3536 "0937 %s: Failed to reg fc node, rc %d\n",
3537 __func__, rc);
3538 return 1;
3539 }
3540 }
3541 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
3542
3543 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
3544 pcmd += sizeof(uint32_t);
3545
3546 /* For SCR, remainder of payload is SCR parameter page */
3547 memset(pcmd, 0, sizeof(SCR));
3548 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
3549
3550 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3551 "Issue SCR: did:x%x",
3552 ndlp->nlp_DID, 0, 0);
3553
3554 phba->fc_stat.elsXmitSCR++;
3555 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
3556 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3557 if (!elsiocb->ndlp) {
3558 lpfc_els_free_iocb(phba, elsiocb);
3559 return 1;
3560 }
3561
3562 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3563 "Issue SCR: did:x%x refcnt %d",
3564 ndlp->nlp_DID, kref_read(kref: &ndlp->kref), 0);
3565
3566 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3567 if (rc == IOCB_ERROR) {
3568 lpfc_els_free_iocb(phba, elsiocb);
3569 lpfc_nlp_put(ndlp);
3570 return 1;
3571 }
3572
3573 return 0;
3574}
3575
3576/**
3577 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric)
3578 * or the other nport (pt2pt).
3579 * @vport: pointer to a host virtual N_Port data structure.
3580 * @retry: number of retries to the command IOCB.
3581 *
3582 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD)
3583 * when connected to a fabric, or to the remote port when connected
3584 * in point-to-point mode. When sent to the Fabric Controller, it will
3585 * replay the RSCN to registered recipients.
3586 *
3587 * Note that the ndlp reference count will be incremented by 1 for holding the
3588 * ndlp and the reference to ndlp will be stored into the ndlp field of
3589 * the IOCB for the completion callback function to the RSCN ELS command.
3590 *
3591 * Return code
3592 * 0 - Successfully issued RSCN command
3593 * 1 - Failed to issue RSCN command
3594 **/
3595int
3596lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
3597{
3598 int rc = 0;
3599 struct lpfc_hba *phba = vport->phba;
3600 struct lpfc_iocbq *elsiocb;
3601 struct lpfc_nodelist *ndlp;
3602 struct {
3603 struct fc_els_rscn rscn;
3604 struct fc_els_rscn_page portid;
3605 } *event;
3606 uint32_t nportid;
3607 uint16_t cmdsize = sizeof(*event);
3608
3609 /* Not supported for private loop */
3610 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
3611 !(vport->fc_flag & FC_PUBLIC_LOOP))
3612 return 1;
3613
3614 if (vport->fc_flag & FC_PT2PT) {
3615 /* find any mapped nport - that would be the other nport */
3616 ndlp = lpfc_findnode_mapped(vport);
3617 if (!ndlp)
3618 return 1;
3619 } else {
3620 nportid = FC_FID_FCTRL;
3621 /* find the fabric controller node */
3622 ndlp = lpfc_findnode_did(vport, nportid);
3623 if (!ndlp) {
3624 /* if one didn't exist, make one */
3625 ndlp = lpfc_nlp_init(vport, did: nportid);
3626 if (!ndlp)
3627 return 1;
3628 lpfc_enqueue_node(vport, ndlp);
3629 }
3630 }
3631
3632 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, cmd_size: cmdsize, retry, ndlp,
3633 did: ndlp->nlp_DID, ELS_CMD_RSCN_XMT);
3634
3635 if (!elsiocb)
3636 return 1;
3637
3638 event = elsiocb->cmd_dmabuf->virt;
3639
3640 event->rscn.rscn_cmd = ELS_RSCN;
3641 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
3642 event->rscn.rscn_plen = cpu_to_be16(cmdsize);
3643
3644 nportid = vport->fc_myDID;
3645 /* appears that page flags must be 0 for fabric to broadcast RSCN */
3646 event->portid.rscn_page_flags = 0;
3647 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16;
3648 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8;
3649 event->portid.rscn_fid[2] = nportid & 0x000000FF;
3650
3651 phba->fc_stat.elsXmitRSCN++;
3652 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
3653 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3654 if (!elsiocb->ndlp) {
3655 lpfc_els_free_iocb(phba, elsiocb);
3656 return 1;
3657 }
3658
3659 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3660 "Issue RSCN: did:x%x",
3661 ndlp->nlp_DID, 0, 0);
3662
3663 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3664 if (rc == IOCB_ERROR) {
3665 lpfc_els_free_iocb(phba, elsiocb);
3666 lpfc_nlp_put(ndlp);
3667 return 1;
3668 }
3669
3670 return 0;
3671}
3672
3673/**
3674 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
3675 * @vport: pointer to a host virtual N_Port data structure.
3676 * @nportid: N_Port identifier to the remote node.
3677 * @retry: number of retries to the command IOCB.
3678 *
3679 * This routine issues a Fibre Channel Address Resolution Response
3680 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
3681 * is passed into the function. It first search the @vport node list to find
3682 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
3683 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
3684 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
3685 *
3686 * Note that the ndlp reference count will be incremented by 1 for holding the
3687 * ndlp and the reference to ndlp will be stored into the ndlp field of
3688 * the IOCB for the completion callback function to the FARPR ELS command.
3689 *
3690 * Return code
3691 * 0 - Successfully issued farpr command
3692 * 1 - Failed to issue farpr command
3693 **/
3694static int
3695lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
3696{
3697 int rc = 0;
3698 struct lpfc_hba *phba = vport->phba;
3699 struct lpfc_iocbq *elsiocb;
3700 FARP *fp;
3701 uint8_t *pcmd;
3702 uint32_t *lp;
3703 uint16_t cmdsize;
3704 struct lpfc_nodelist *ondlp;
3705 struct lpfc_nodelist *ndlp;
3706
3707 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
3708
3709 ndlp = lpfc_findnode_did(vport, nportid);
3710 if (!ndlp) {
3711 ndlp = lpfc_nlp_init(vport, did: nportid);
3712 if (!ndlp)
3713 return 1;
3714 lpfc_enqueue_node(vport, ndlp);
3715 }
3716
3717 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, cmd_size: cmdsize, retry, ndlp,
3718 did: ndlp->nlp_DID, ELS_CMD_FARPR);
3719 if (!elsiocb)
3720 return 1;
3721
3722 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
3723
3724 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
3725 pcmd += sizeof(uint32_t);
3726
3727 /* Fill in FARPR payload */
3728 fp = (FARP *) (pcmd);
3729 memset(fp, 0, sizeof(FARP));
3730 lp = (uint32_t *) pcmd;
3731 *lp++ = be32_to_cpu(nportid);
3732 *lp++ = be32_to_cpu(vport->fc_myDID);
3733 fp->Rflags = 0;
3734 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
3735
3736 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
3737 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3738 ondlp = lpfc_findnode_did(vport, nportid);
3739 if (ondlp) {
3740 memcpy(&fp->OportName, &ondlp->nlp_portname,
3741 sizeof(struct lpfc_name));
3742 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
3743 sizeof(struct lpfc_name));
3744 }
3745
3746 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3747 "Issue FARPR: did:x%x",
3748 ndlp->nlp_DID, 0, 0);
3749
3750 phba->fc_stat.elsXmitFARPR++;
3751 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
3752 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3753 if (!elsiocb->ndlp) {
3754 lpfc_els_free_iocb(phba, elsiocb);
3755 return 1;
3756 }
3757
3758 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3759 if (rc == IOCB_ERROR) {
3760 /* The additional lpfc_nlp_put will cause the following
3761 * lpfc_els_free_iocb routine to trigger the release of
3762 * the node.
3763 */
3764 lpfc_els_free_iocb(phba, elsiocb);
3765 lpfc_nlp_put(ndlp);
3766 return 1;
3767 }
3768 /* This will cause the callback-function lpfc_cmpl_els_cmd to
3769 * trigger the release of the node.
3770 */
3771 /* Don't release reference count as RDF is likely outstanding */
3772 return 0;
3773}
3774
3775/**
3776 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric.
3777 * @vport: pointer to a host virtual N_Port data structure.
3778 * @retry: retry counter for the command IOCB.
3779 *
3780 * This routine issues an ELS RDF to the Fabric Controller to register
3781 * for diagnostic functions.
3782 *
3783 * Note that the ndlp reference count will be incremented by 1 for holding the
3784 * ndlp and the reference to ndlp will be stored into the ndlp field of
3785 * the IOCB for the completion callback function to the RDF ELS command.
3786 *
3787 * Return code
3788 * 0 - Successfully issued rdf command
3789 * 1 - Failed to issue rdf command
3790 **/
3791int
3792lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
3793{
3794 struct lpfc_hba *phba = vport->phba;
3795 struct lpfc_iocbq *elsiocb;
3796 struct lpfc_els_rdf_req *prdf;
3797 struct lpfc_nodelist *ndlp;
3798 uint16_t cmdsize;
3799 int rc;
3800
3801 cmdsize = sizeof(*prdf);
3802
3803 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
3804 if (!ndlp) {
3805 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
3806 if (!ndlp)
3807 return -ENODEV;
3808 lpfc_enqueue_node(vport, ndlp);
3809 }
3810
3811 /* RDF ELS is not required on an NPIV VN_Port. */
3812 if (vport->port_type == LPFC_NPIV_PORT)
3813 return -EACCES;
3814
3815 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, cmd_size: cmdsize, retry, ndlp,
3816 did: ndlp->nlp_DID, ELS_CMD_RDF);
3817 if (!elsiocb)
3818 return -ENOMEM;
3819
3820 /* Configure the payload for the supported FPIN events. */
3821 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt;
3822 memset(prdf, 0, cmdsize);
3823 prdf->rdf.fpin_cmd = ELS_RDF;
3824 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
3825 sizeof(struct fc_els_rdf));
3826 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER);
3827 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32(
3828 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
3829 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT);
3830 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
3831 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY);
3832 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
3833 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
3834
3835 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3836 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n",
3837 ndlp->nlp_DID, phba->cgn_reg_signal,
3838 phba->cgn_reg_fpin);
3839
3840 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ;
3841 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
3842 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3843 if (!elsiocb->ndlp) {
3844 lpfc_els_free_iocb(phba, elsiocb);
3845 return -EIO;
3846 }
3847
3848 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3849 "Issue RDF: did:x%x refcnt %d",
3850 ndlp->nlp_DID, kref_read(kref: &ndlp->kref), 0);
3851
3852 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3853 if (rc == IOCB_ERROR) {
3854 lpfc_els_free_iocb(phba, elsiocb);
3855 lpfc_nlp_put(ndlp);
3856 return -EIO;
3857 }
3858 return 0;
3859}
3860
3861 /**
3862 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric.
3863 * @vport: pointer to a host virtual N_Port data structure.
3864 * @cmdiocb: pointer to lpfc command iocb data structure.
3865 * @ndlp: pointer to a node-list data structure.
3866 *
3867 * A received RDF implies a possible change to fabric supported diagnostic
3868 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new
3869 * RDF request to reregister for supported diagnostic functions.
3870 *
3871 * Return code
3872 * 0 - Success
3873 * -EIO - Failed to process received RDF
3874 **/
3875static int
3876lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3877 struct lpfc_nodelist *ndlp)
3878{
3879 /* Send LS_ACC */
3880 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) {
3881 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3882 "1623 Failed to RDF_ACC from x%x for x%x\n",
3883 ndlp->nlp_DID, vport->fc_myDID);
3884 return -EIO;
3885 }
3886
3887 /* Issue new RDF for reregistering */
3888 if (lpfc_issue_els_rdf(vport, retry: 0)) {
3889 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3890 "2623 Failed to re register RDF for x%x\n",
3891 vport->fc_myDID);
3892 return -EIO;
3893 }
3894
3895 return 0;
3896}
3897
3898/**
3899 * lpfc_least_capable_settings - helper function for EDC rsp processing
3900 * @phba: pointer to lpfc hba data structure.
3901 * @pcgd: pointer to congestion detection descriptor in EDC rsp.
3902 *
3903 * This helper routine determines the least capable setting for
3904 * congestion signals, signal freq, including scale, from the
3905 * congestion detection descriptor in the EDC rsp. The routine
3906 * sets @phba values in preparation for a set_featues mailbox.
3907 **/
3908static void
3909lpfc_least_capable_settings(struct lpfc_hba *phba,
3910 struct fc_diag_cg_sig_desc *pcgd)
3911{
3912 u32 rsp_sig_cap = 0, drv_sig_cap = 0;
3913 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0;
3914
3915 /* Get rsp signal and frequency capabilities. */
3916 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability);
3917 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count);
3918 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units);
3919
3920 /* If the Fport does not support signals. Set FPIN only */
3921 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED)
3922 goto out_no_support;
3923
3924 /* Apply the xmt scale to the xmt cycle to get the correct frequency.
3925 * Adapter default is 100 millisSeconds. Convert all xmt cycle values
3926 * to milliSeconds.
3927 */
3928 switch (rsp_sig_freq_scale) {
3929 case EDC_CG_SIGFREQ_SEC:
3930 rsp_sig_freq_cyc *= MSEC_PER_SEC;
3931 break;
3932 case EDC_CG_SIGFREQ_MSEC:
3933 rsp_sig_freq_cyc = 1;
3934 break;
3935 default:
3936 goto out_no_support;
3937 }
3938
3939 /* Convenient shorthand. */
3940 drv_sig_cap = phba->cgn_reg_signal;
3941
3942 /* Choose the least capable frequency. */
3943 if (rsp_sig_freq_cyc > phba->cgn_sig_freq)
3944 phba->cgn_sig_freq = rsp_sig_freq_cyc;
3945
3946 /* Should be some common signals support. Settle on least capable
3947 * signal and adjust FPIN values. Initialize defaults to ease the
3948 * decision.
3949 */
3950 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
3951 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
3952 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY &&
3953 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY ||
3954 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) {
3955 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
3956 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
3957 }
3958 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) {
3959 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) {
3960 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM;
3961 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE;
3962 }
3963 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) {
3964 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
3965 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
3966 }
3967 }
3968
3969 /* We are NOT recording signal frequency in congestion info buffer */
3970 return;
3971
3972out_no_support:
3973 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
3974 phba->cgn_sig_freq = 0;
3975 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
3976}
3977
3978DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag,
3979 FC_LS_TLV_DTAG_INIT);
3980
3981/**
3982 * lpfc_cmpl_els_edc - Completion callback function for EDC
3983 * @phba: pointer to lpfc hba data structure.
3984 * @cmdiocb: pointer to lpfc command iocb data structure.
3985 * @rspiocb: pointer to lpfc response iocb data structure.
3986 *
3987 * This routine is the completion callback function for issuing the Exchange
3988 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to
3989 * notify the FPort of its Congestion and Link Fault capabilities. This
3990 * routine parses the FPort's response and decides on the least common
3991 * values applicable to both FPort and NPort for Warnings and Alarms that
3992 * are communicated via hardware signals.
3993 **/
3994static void
3995lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3996 struct lpfc_iocbq *rspiocb)
3997{
3998 IOCB_t *irsp_iocb;
3999 struct fc_els_edc_resp *edc_rsp;
4000 struct fc_tlv_desc *tlv;
4001 struct fc_diag_cg_sig_desc *pcgd;
4002 struct fc_diag_lnkflt_desc *plnkflt;
4003 struct lpfc_dmabuf *pcmd, *prsp;
4004 const char *dtag_nm;
4005 u32 *pdata, dtag;
4006 int desc_cnt = 0, bytes_remain;
4007 bool rcv_cap_desc = false;
4008 struct lpfc_nodelist *ndlp;
4009 u32 ulp_status, ulp_word4, tmo, did, iotag;
4010
4011 ndlp = cmdiocb->ndlp;
4012
4013 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
4014 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
4015 did = get_job_els_rsp64_did(phba, iocbq: rspiocb);
4016
4017 if (phba->sli_rev == LPFC_SLI_REV4) {
4018 tmo = get_wqe_tmo(rspiocb);
4019 iotag = get_wqe_reqtag(rspiocb);
4020 } else {
4021 irsp_iocb = &rspiocb->iocb;
4022 tmo = irsp_iocb->ulpTimeout;
4023 iotag = irsp_iocb->ulpIoTag;
4024 }
4025
4026 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
4027 "EDC cmpl: status:x%x/x%x did:x%x",
4028 ulp_status, ulp_word4, did);
4029
4030 /* ELS cmd tag <ulpIoTag> completes */
4031 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
4032 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n",
4033 iotag, ulp_status, ulp_word4, tmo);
4034
4035 pcmd = cmdiocb->cmd_dmabuf;
4036 if (!pcmd)
4037 goto out;
4038
4039 pdata = (u32 *)pcmd->virt;
4040 if (!pdata)
4041 goto out;
4042
4043 /* Need to clear signal values, send features MB and RDF with FPIN. */
4044 if (ulp_status)
4045 goto out;
4046
4047 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
4048 if (!prsp)
4049 goto out;
4050
4051 edc_rsp = prsp->virt;
4052 if (!edc_rsp)
4053 goto out;
4054
4055 /* ELS cmd tag <ulpIoTag> completes */
4056 lpfc_printf_log(phba, KERN_INFO,
4057 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
4058 "4676 Fabric EDC Rsp: "
4059 "0x%02x, 0x%08x\n",
4060 edc_rsp->acc_hdr.la_cmd,
4061 be32_to_cpu(edc_rsp->desc_list_len));
4062
4063 if (!lpfc_is_els_acc_rsp(buf: prsp))
4064 goto out;
4065
4066 /*
4067 * Payload length in bytes is the response descriptor list
4068 * length minus the 12 bytes of Link Service Request
4069 * Information descriptor in the reply.
4070 */
4071 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) -
4072 sizeof(struct fc_els_lsri_desc);
4073 if (bytes_remain <= 0)
4074 goto out;
4075
4076 tlv = edc_rsp->desc;
4077
4078 /*
4079 * cycle through EDC diagnostic descriptors to find the
4080 * congestion signaling capability descriptor
4081 */
4082 while (bytes_remain) {
4083 if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
4084 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
4085 "6461 Truncated TLV hdr on "
4086 "Diagnostic descriptor[%d]\n",
4087 desc_cnt);
4088 goto out;
4089 }
4090
4091 dtag = be32_to_cpu(tlv->desc_tag);
4092 switch (dtag) {
4093 case ELS_DTAG_LNK_FAULT_CAP:
4094 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
4095 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
4096 sizeof(struct fc_diag_lnkflt_desc)) {
4097 lpfc_printf_log(phba, KERN_WARNING,
4098 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
4099 "6462 Truncated Link Fault Diagnostic "
4100 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
4101 desc_cnt, bytes_remain,
4102 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
4103 sizeof(struct fc_diag_lnkflt_desc));
4104 goto out;
4105 }
4106 plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
4107 lpfc_printf_log(phba, KERN_INFO,
4108 LOG_ELS | LOG_LDS_EVENT,
4109 "4617 Link Fault Desc Data: 0x%08x 0x%08x "
4110 "0x%08x 0x%08x 0x%08x\n",
4111 be32_to_cpu(plnkflt->desc_tag),
4112 be32_to_cpu(plnkflt->desc_len),
4113 be32_to_cpu(
4114 plnkflt->degrade_activate_threshold),
4115 be32_to_cpu(
4116 plnkflt->degrade_deactivate_threshold),
4117 be32_to_cpu(plnkflt->fec_degrade_interval));
4118 break;
4119 case ELS_DTAG_CG_SIGNAL_CAP:
4120 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
4121 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
4122 sizeof(struct fc_diag_cg_sig_desc)) {
4123 lpfc_printf_log(
4124 phba, KERN_WARNING, LOG_CGN_MGMT,
4125 "6463 Truncated Cgn Signal Diagnostic "
4126 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
4127 desc_cnt, bytes_remain,
4128 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
4129 sizeof(struct fc_diag_cg_sig_desc));
4130 goto out;
4131 }
4132
4133 pcgd = (struct fc_diag_cg_sig_desc *)tlv;
4134 lpfc_printf_log(
4135 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
4136 "4616 CGN Desc Data: 0x%08x 0x%08x "
4137 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n",
4138 be32_to_cpu(pcgd->desc_tag),
4139 be32_to_cpu(pcgd->desc_len),
4140 be32_to_cpu(pcgd->xmt_signal_capability),
4141 be16_to_cpu(pcgd->xmt_signal_frequency.count),
4142 be16_to_cpu(pcgd->xmt_signal_frequency.units),
4143 be32_to_cpu(pcgd->rcv_signal_capability),
4144 be16_to_cpu(pcgd->rcv_signal_frequency.count),
4145 be16_to_cpu(pcgd->rcv_signal_frequency.units));
4146
4147 /* Compare driver and Fport capabilities and choose
4148 * least common.
4149 */
4150 lpfc_least_capable_settings(phba, pcgd);
4151 rcv_cap_desc = true;
4152 break;
4153 default:
4154 dtag_nm = lpfc_get_tlv_dtag_nm(table_key: dtag);
4155 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
4156 "4919 unknown Diagnostic "
4157 "Descriptor[%d]: tag x%x (%s)\n",
4158 desc_cnt, dtag, dtag_nm);
4159 }
4160
4161 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
4162 tlv = fc_tlv_next_desc(desc: tlv);
4163 desc_cnt++;
4164 }
4165
4166out:
4167 if (!rcv_cap_desc) {
4168 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
4169 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
4170 phba->cgn_sig_freq = 0;
4171 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
4172 "4202 EDC rsp error - sending RDF "
4173 "for FPIN only.\n");
4174 }
4175
4176 lpfc_config_cgn_signal(phba);
4177
4178 /* Check to see if link went down during discovery */
4179 lpfc_els_chk_latt(vport: phba->pport);
4180 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
4181 "EDC Cmpl: did:x%x refcnt %d",
4182 ndlp->nlp_DID, kref_read(kref: &ndlp->kref), 0);
4183 lpfc_els_free_iocb(phba, cmdiocb);
4184 lpfc_nlp_put(ndlp);
4185}
4186
4187static void
4188lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
4189{
4190 struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv;
4191
4192 lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP);
4193 lft->desc_len = cpu_to_be32(
4194 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc));
4195
4196 lft->degrade_activate_threshold =
4197 cpu_to_be32(phba->degrade_activate_threshold);
4198 lft->degrade_deactivate_threshold =
4199 cpu_to_be32(phba->degrade_deactivate_threshold);
4200 lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval);
4201}
4202
4203static void
4204lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
4205{
4206 struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv;
4207
4208 /* We are assuming cgd was zero'ed before calling this routine */
4209
4210 /* Configure the congestion detection capability */
4211 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP);
4212
4213 /* Descriptor len doesn't include the tag or len fields. */
4214 cgd->desc_len = cpu_to_be32(
4215 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc));
4216
4217 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
4218 * xmt_signal_frequency.count already set to 0.
4219 * xmt_signal_frequency.units already set to 0.
4220 */
4221
4222 if (phba->cmf_active_mode == LPFC_CFG_OFF) {
4223 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
4224 * rcv_signal_frequency.count already set to 0.
4225 * rcv_signal_frequency.units already set to 0.
4226 */
4227 phba->cgn_sig_freq = 0;
4228 return;
4229 }
4230 switch (phba->cgn_reg_signal) {
4231 case EDC_CG_SIG_WARN_ONLY:
4232 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY);
4233 break;
4234 case EDC_CG_SIG_WARN_ALARM:
4235 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM);
4236 break;
4237 default:
4238 /* rcv_signal_capability left 0 thus no support */
4239 break;
4240 }
4241
4242 /* We start negotiation with lpfc_fabric_cgn_frequency, after
4243 * the completion we settle on the higher frequency.
4244 */
4245 cgd->rcv_signal_frequency.count =
4246 cpu_to_be16(lpfc_fabric_cgn_frequency);
4247 cgd->rcv_signal_frequency.units =
4248 cpu_to_be16(EDC_CG_SIGFREQ_MSEC);
4249}
4250
4251static bool
4252lpfc_link_is_lds_capable(struct lpfc_hba *phba)
4253{
4254 if (!(phba->lmt & LMT_64Gb))
4255 return false;
4256 if (phba->sli_rev != LPFC_SLI_REV4)
4257 return false;
4258
4259 if (phba->sli4_hba.conf_trunk) {
4260 if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G)
4261 return true;
4262 } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) {
4263 return true;
4264 }
4265 return false;
4266}
4267
4268 /**
4269 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric.
4270 * @vport: pointer to a host virtual N_Port data structure.
4271 * @retry: retry counter for the command iocb.
4272 *
4273 * This routine issues an ELS EDC to the F-Port Controller to communicate
4274 * this N_Port's support of hardware signals in its Congestion
4275 * Capabilities Descriptor.
4276 *
4277 * Note: This routine does not check if one or more signals are
4278 * set in the cgn_reg_signal parameter. The caller makes the
4279 * decision to enforce cgn_reg_signal as nonzero or zero depending
4280 * on the conditions. During Fabric requests, the driver
4281 * requires cgn_reg_signals to be nonzero. But a dynamic request
4282 * to set the congestion mode to OFF from Monitor or Manage
4283 * would correctly issue an EDC with no signals enabled to
4284 * turn off switch functionality and then update the FW.
4285 *
4286 * Return code
4287 * 0 - Successfully issued edc command
4288 * 1 - Failed to issue edc command
4289 **/
4290int
4291lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
4292{
4293 struct lpfc_hba *phba = vport->phba;
4294 struct lpfc_iocbq *elsiocb;
4295 struct fc_els_edc *edc_req;
4296 struct fc_tlv_desc *tlv;
4297 u16 cmdsize;
4298 struct lpfc_nodelist *ndlp;
4299 u8 *pcmd = NULL;
4300 u32 cgn_desc_size, lft_desc_size;
4301 int rc;
4302
4303 if (vport->port_type == LPFC_NPIV_PORT)
4304 return -EACCES;
4305
4306 ndlp = lpfc_findnode_did(vport, Fabric_DID);
4307 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
4308 return -ENODEV;
4309
4310 cgn_desc_size = (phba->cgn_init_reg_signal) ?
4311 sizeof(struct fc_diag_cg_sig_desc) : 0;
4312 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
4313 sizeof(struct fc_diag_lnkflt_desc) : 0;
4314 cmdsize = cgn_desc_size + lft_desc_size;
4315
4316 /* Skip EDC if no applicable descriptors */
4317 if (!cmdsize)
4318 goto try_rdf;
4319
4320 cmdsize += sizeof(struct fc_els_edc);
4321 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, cmd_size: cmdsize, retry, ndlp,
4322 did: ndlp->nlp_DID, ELS_CMD_EDC);
4323 if (!elsiocb)
4324 goto try_rdf;
4325
4326 /* Configure the payload for the supported Diagnostics capabilities. */
4327 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
4328 memset(pcmd, 0, cmdsize);
4329 edc_req = (struct fc_els_edc *)pcmd;
4330 edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size);
4331 edc_req->edc_cmd = ELS_EDC;
4332 tlv = edc_req->desc;
4333
4334 if (cgn_desc_size) {
4335 lpfc_format_edc_cgn_desc(phba, tlv);
4336 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
4337 tlv = fc_tlv_next_desc(desc: tlv);
4338 }
4339
4340 if (lft_desc_size)
4341 lpfc_format_edc_lft_desc(phba, tlv);
4342
4343 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
4344 "4623 Xmit EDC to remote "
4345 "NPORT x%x reg_sig x%x reg_fpin:x%x\n",
4346 ndlp->nlp_DID, phba->cgn_reg_signal,
4347 phba->cgn_reg_fpin);
4348
4349 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
4350 elsiocb->ndlp = lpfc_nlp_get(ndlp);
4351 if (!elsiocb->ndlp) {
4352 lpfc_els_free_iocb(phba, elsiocb);
4353 return -EIO;
4354 }
4355
4356 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4357 "Issue EDC: did:x%x refcnt %d",
4358 ndlp->nlp_DID, kref_read(kref: &ndlp->kref), 0);
4359 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4360 if (rc == IOCB_ERROR) {
4361 /* The additional lpfc_nlp_put will cause the following
4362 * lpfc_els_free_iocb routine to trigger the rlease of
4363 * the node.
4364 */
4365 lpfc_els_free_iocb(phba, elsiocb);
4366 lpfc_nlp_put(ndlp);
4367 goto try_rdf;
4368 }
4369 return 0;
4370try_rdf:
4371 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
4372 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
4373 rc = lpfc_issue_els_rdf(vport, retry: 0);
4374 return rc;
4375}
4376
4377/**
4378 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
4379 * @vport: pointer to a host virtual N_Port data structure.
4380 * @nlp: pointer to a node-list data structure.
4381 *
4382 * This routine cancels the timer with a delayed IOCB-command retry for
4383 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
4384 * removes the ELS retry event if it presents. In addition, if the
4385 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
4386 * commands are sent for the @vport's nodes that require issuing discovery
4387 * ADISC.
4388 **/
4389void
4390lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
4391{
4392 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4393 struct lpfc_work_evt *evtp;
4394
4395 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
4396 return;
4397 spin_lock_irq(lock: &nlp->lock);
4398 nlp->nlp_flag &= ~NLP_DELAY_TMO;
4399 spin_unlock_irq(lock: &nlp->lock);
4400 del_timer_sync(timer: &nlp->nlp_delayfunc);
4401 nlp->nlp_last_elscmd = 0;
4402 if (!list_empty(head: &nlp->els_retry_evt.evt_listp)) {
4403 list_del_init(entry: &nlp->els_retry_evt.evt_listp);
4404 /* Decrement nlp reference count held for the delayed retry */
4405 evtp = &nlp->els_retry_evt;
4406 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
4407 }
4408 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
4409 spin_lock_irq(lock: &nlp->lock);
4410 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
4411 spin_unlock_irq(lock: &nlp->lock);
4412 if (vport->num_disc_nodes) {
4413 if (vport->port_state < LPFC_VPORT_READY) {
4414 /* Check if there are more ADISCs to be sent */
4415 lpfc_more_adisc(vport);
4416 } else {
4417 /* Check if there are more PLOGIs to be sent */
4418 lpfc_more_plogi(vport);
4419 if (vport->num_disc_nodes == 0) {
4420 spin_lock_irq(lock: shost->host_lock);
4421 vport->fc_flag &= ~FC_NDISC_ACTIVE;
4422 spin_unlock_irq(lock: shost->host_lock);
4423 lpfc_can_disctmo(vport);
4424 lpfc_end_rscn(vport);
4425 }
4426 }
4427 }
4428 }
4429 return;
4430}
4431
4432/**
4433 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
4434 * @t: pointer to the timer function associated data (ndlp).
4435 *
4436 * This routine is invoked by the ndlp delayed-function timer to check
4437 * whether there is any pending ELS retry event(s) with the node. If not, it
4438 * simply returns. Otherwise, if there is at least one ELS delayed event, it
4439 * adds the delayed events to the HBA work list and invokes the
4440 * lpfc_worker_wake_up() routine to wake up worker thread to process the
4441 * event. Note that lpfc_nlp_get() is called before posting the event to
4442 * the work list to hold reference count of ndlp so that it guarantees the
4443 * reference to ndlp will still be available when the worker thread gets
4444 * to the event associated with the ndlp.
4445 **/
4446void
4447lpfc_els_retry_delay(struct timer_list *t)
4448{
4449 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc);
4450 struct lpfc_vport *vport = ndlp->vport;
4451 struct lpfc_hba *phba = vport->phba;
4452 unsigned long flags;
4453 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
4454
4455 spin_lock_irqsave(&phba->hbalock, flags);
4456 if (!list_empty(head: &evtp->evt_listp)) {
4457 spin_unlock_irqrestore(lock: &phba->hbalock, flags);
4458 return;
4459 }
4460
4461 /* We need to hold the node by incrementing the reference
4462 * count until the queued work is done
4463 */
4464 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
4465 if (evtp->evt_arg1) {
4466 evtp->evt = LPFC_EVT_ELS_RETRY;
4467 list_add_tail(new: &evtp->evt_listp, head: &phba->work_list);
4468 lpfc_worker_wake_up(phba);
4469 }
4470 spin_unlock_irqrestore(lock: &phba->hbalock, flags);
4471 return;
4472}
4473
4474/**
4475 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
4476 * @ndlp: pointer to a node-list data structure.
4477 *
4478 * This routine is the worker-thread handler for processing the @ndlp delayed
4479 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
4480 * the last ELS command from the associated ndlp and invokes the proper ELS
4481 * function according to the delayed ELS command to retry the command.
4482 **/
4483void
4484lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
4485{
4486 struct lpfc_vport *vport = ndlp->vport;
4487 uint32_t cmd, retry;
4488
4489 spin_lock_irq(lock: &ndlp->lock);
4490 cmd = ndlp->nlp_last_elscmd;
4491 ndlp->nlp_last_elscmd = 0;
4492
4493 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
4494 spin_unlock_irq(lock: &ndlp->lock);
4495 return;
4496 }
4497
4498 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
4499 spin_unlock_irq(lock: &ndlp->lock);
4500 /*
4501 * If a discovery event readded nlp_delayfunc after timer
4502 * firing and before processing the timer, cancel the
4503 * nlp_delayfunc.
4504 */
4505 del_timer_sync(timer: &ndlp->nlp_delayfunc);
4506 retry = ndlp->nlp_retry;
4507 ndlp->nlp_retry = 0;
4508
4509 switch (cmd) {
4510 case ELS_CMD_FLOGI:
4511 lpfc_issue_els_flogi(vport, ndlp, retry);
4512 break;
4513 case ELS_CMD_PLOGI:
4514 if (!lpfc_issue_els_plogi(vport, did: ndlp->nlp_DID, retry)) {
4515 ndlp->nlp_prev_state = ndlp->nlp_state;
4516 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4517 }
4518 break;
4519 case ELS_CMD_ADISC:
4520 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
4521 ndlp->nlp_prev_state = ndlp->nlp_state;
4522 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4523 }
4524 break;
4525 case ELS_CMD_PRLI:
4526 case ELS_CMD_NVMEPRLI:
4527 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
4528 ndlp->nlp_prev_state = ndlp->nlp_state;
4529 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
4530 }
4531 break;
4532 case ELS_CMD_LOGO:
4533 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
4534 ndlp->nlp_prev_state = ndlp->nlp_state;
4535 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
4536 }
4537 break;
4538 case ELS_CMD_FDISC:
4539 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
4540 lpfc_issue_els_fdisc(vport, ndlp, retry);
4541 break;
4542 }
4543 return;
4544}
4545
4546/**
4547 * lpfc_link_reset - Issue link reset
4548 * @vport: pointer to a virtual N_Port data structure.
4549 *
4550 * This routine performs link reset by sending INIT_LINK mailbox command.
4551 * For SLI-3 adapter, link attention interrupt is enabled before issuing
4552 * INIT_LINK mailbox command.
4553 *
4554 * Return code
4555 * 0 - Link reset initiated successfully
4556 * 1 - Failed to initiate link reset
4557 **/
4558int
4559lpfc_link_reset(struct lpfc_vport *vport)
4560{
4561 struct lpfc_hba *phba = vport->phba;
4562 LPFC_MBOXQ_t *mbox;
4563 uint32_t control;
4564 int rc;
4565
4566 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4567 "2851 Attempt link reset\n");
4568 mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL);
4569 if (!mbox) {
4570 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4571 "2852 Failed to allocate mbox memory");
4572 return 1;
4573 }
4574
4575 /* Enable Link attention interrupts */
4576 if (phba->sli_rev <= LPFC_SLI_REV3) {
4577 spin_lock_irq(lock: &phba->hbalock);
4578 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4579 control = readl(addr: phba->HCregaddr);
4580 control |= HC_LAINT_ENA;
4581 writel(val: control, addr: phba->HCregaddr);
4582 readl(addr: phba->HCregaddr); /* flush */
4583 spin_unlock_irq(lock: &phba->hbalock);
4584 }
4585
4586 lpfc_init_link(phba, mbox, phba->cfg_topology,
4587 phba->cfg_link_speed);
4588 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4589 mbox->vport = vport;
4590 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4591 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4593 "2853 Failed to issue INIT_LINK "
4594 "mbox command, rc:x%x\n", rc);
4595 mempool_free(element: mbox, pool: phba->mbox_mem_pool);
4596 return 1;
4597 }
4598
4599 return 0;
4600}
4601
4602/**
4603 * lpfc_els_retry - Make retry decision on an els command iocb
4604 * @phba: pointer to lpfc hba data structure.
4605 * @cmdiocb: pointer to lpfc command iocb data structure.
4606 * @rspiocb: pointer to lpfc response iocb data structure.
4607 *
4608 * This routine makes a retry decision on an ELS command IOCB, which has
4609 * failed. The following ELS IOCBs use this function for retrying the command
4610 * when previously issued command responsed with error status: FLOGI, PLOGI,
4611 * PRLI, ADISC and FDISC. Based on the ELS command type and the
4612 * returned error status, it makes the decision whether a retry shall be
4613 * issued for the command, and whether a retry shall be made immediately or
4614 * delayed. In the former case, the corresponding ELS command issuing-function
4615 * is called to retry the command. In the later case, the ELS command shall
4616 * be posted to the ndlp delayed event and delayed function timer set to the
4617 * ndlp for the delayed command issusing.
4618 *
4619 * Return code
4620 * 0 - No retry of els command is made
4621 * 1 - Immediate or delayed retry of els command is made
4622 **/
4623static int
4624lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4625 struct lpfc_iocbq *rspiocb)
4626{
4627 struct lpfc_vport *vport = cmdiocb->vport;
4628 union lpfc_wqe128 *irsp = &rspiocb->wqe;
4629 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
4630 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
4631 uint32_t *elscmd;
4632 struct ls_rjt stat;
4633 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
4634 int logerr = 0;
4635 uint32_t cmd = 0;
4636 uint32_t did;
4637 int link_reset = 0, rc;
4638 u32 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
4639 u32 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
4640
4641
4642 /* Note: cmd_dmabuf may be 0 for internal driver abort
4643 * of delays ELS command.
4644 */
4645
4646 if (pcmd && pcmd->virt) {
4647 elscmd = (uint32_t *) (pcmd->virt);
4648 cmd = *elscmd++;
4649 }
4650
4651 if (ndlp)
4652 did = ndlp->nlp_DID;
4653 else {
4654 /* We should only hit this case for retrying PLOGI */
4655 did = get_job_els_rsp64_did(phba, iocbq: rspiocb);
4656 ndlp = lpfc_findnode_did(vport, did);
4657 if (!ndlp && (cmd != ELS_CMD_PLOGI))
4658 return 0;
4659 }
4660
4661 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4662 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
4663 *(((uint32_t *)irsp) + 7), ulp_word4, did);
4664
4665 switch (ulp_status) {
4666 case IOSTAT_FCP_RSP_ERROR:
4667 break;
4668 case IOSTAT_REMOTE_STOP:
4669 if (phba->sli_rev == LPFC_SLI_REV4) {
4670 /* This IO was aborted by the target, we don't
4671 * know the rxid and because we did not send the
4672 * ABTS we cannot generate and RRQ.
4673 */
4674 lpfc_set_rrq_active(phba, ndlp,
4675 cmdiocb->sli4_lxritag, 0, 0);
4676 }
4677 break;
4678 case IOSTAT_LOCAL_REJECT:
4679 switch ((ulp_word4 & IOERR_PARAM_MASK)) {
4680 case IOERR_LOOP_OPEN_FAILURE:
4681 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
4682 delay = 1000;
4683 retry = 1;
4684 break;
4685
4686 case IOERR_ILLEGAL_COMMAND:
4687 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4688 "0124 Retry illegal cmd x%x "
4689 "retry:x%x delay:x%x\n",
4690 cmd, cmdiocb->retry, delay);
4691 retry = 1;
4692 /* All command's retry policy */
4693 maxretry = 8;
4694 if (cmdiocb->retry > 2)
4695 delay = 1000;
4696 break;
4697
4698 case IOERR_NO_RESOURCES:
4699 logerr = 1; /* HBA out of resources */
4700 retry = 1;
4701 if (cmdiocb->retry > 100)
4702 delay = 100;
4703 maxretry = 250;
4704 break;
4705
4706 case IOERR_ILLEGAL_FRAME:
4707 delay = 100;
4708 retry = 1;
4709 break;
4710
4711 case IOERR_INVALID_RPI:
4712 if (cmd == ELS_CMD_PLOGI &&
4713 did == NameServer_DID) {
4714 /* Continue forever if plogi to */
4715 /* the nameserver fails */
4716 maxretry = 0;
4717 delay = 100;
4718 } else if (cmd == ELS_CMD_PRLI &&
4719 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) {
4720 /* State-command disagreement. The PRLI was
4721 * failed with an invalid rpi meaning there
4722 * some unexpected state change. Don't retry.
4723 */
4724 maxretry = 0;
4725 retry = 0;
4726 break;
4727 }
4728 retry = 1;
4729 break;
4730
4731 case IOERR_SEQUENCE_TIMEOUT:
4732 if (cmd == ELS_CMD_PLOGI &&
4733 did == NameServer_DID &&
4734 (cmdiocb->retry + 1) == maxretry) {
4735 /* Reset the Link */
4736 link_reset = 1;
4737 break;
4738 }
4739 retry = 1;
4740 delay = 100;
4741 break;
4742 case IOERR_SLI_ABORTED:
4743 /* Retry ELS PLOGI command?
4744 * Possibly the rport just wasn't ready.
4745 */
4746 if (cmd == ELS_CMD_PLOGI) {
4747 /* No retry if state change */
4748 if (ndlp &&
4749 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
4750 goto out_retry;
4751 retry = 1;
4752 maxretry = 2;
4753 }
4754 break;
4755 }
4756 break;
4757
4758 case IOSTAT_NPORT_RJT:
4759 case IOSTAT_FABRIC_RJT:
4760 if (ulp_word4 & RJT_UNAVAIL_TEMP) {
4761 retry = 1;
4762 break;
4763 }
4764 break;
4765
4766 case IOSTAT_NPORT_BSY:
4767 case IOSTAT_FABRIC_BSY:
4768 logerr = 1; /* Fabric / Remote NPort out of resources */
4769 retry = 1;
4770 break;
4771
4772 case IOSTAT_LS_RJT:
4773 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
4774 /* Added for Vendor specifc support
4775 * Just keep retrying for these Rsn / Exp codes
4776 */
4777 if ((vport->fc_flag & FC_PT2PT) &&
4778 cmd == ELS_CMD_NVMEPRLI) {
4779 switch (stat.un.b.lsRjtRsnCode) {
4780 case LSRJT_UNABLE_TPC:
4781 case LSRJT_INVALID_CMD:
4782 case LSRJT_LOGICAL_ERR:
4783 case LSRJT_CMD_UNSUPPORTED:
4784 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
4785 "0168 NVME PRLI LS_RJT "
4786 "reason %x port doesn't "
4787 "support NVME, disabling NVME\n",
4788 stat.un.b.lsRjtRsnCode);
4789 retry = 0;
4790 vport->fc_flag |= FC_PT2PT_NO_NVME;
4791 goto out_retry;
4792 }
4793 }
4794 switch (stat.un.b.lsRjtRsnCode) {
4795 case LSRJT_UNABLE_TPC:
4796 /* Special case for PRLI LS_RJTs. Recall that lpfc
4797 * uses a single routine to issue both PRLI FC4 types.
4798 * If the PRLI is rejected because that FC4 type
4799 * isn't really supported, don't retry and cause
4800 * multiple transport registrations. Otherwise, parse
4801 * the reason code/reason code explanation and take the
4802 * appropriate action.
4803 */
4804 lpfc_printf_vlog(vport, KERN_INFO,
4805 LOG_DISCOVERY | LOG_ELS | LOG_NODE,
4806 "0153 ELS cmd x%x LS_RJT by x%x. "
4807 "RsnCode x%x RsnCodeExp x%x\n",
4808 cmd, did, stat.un.b.lsRjtRsnCode,
4809 stat.un.b.lsRjtRsnCodeExp);
4810
4811 switch (stat.un.b.lsRjtRsnCodeExp) {
4812 case LSEXP_CANT_GIVE_DATA:
4813 case LSEXP_CMD_IN_PROGRESS:
4814 if (cmd == ELS_CMD_PLOGI) {
4815 delay = 1000;
4816 maxretry = 48;
4817 }
4818 retry = 1;
4819 break;
4820 case LSEXP_REQ_UNSUPPORTED:
4821 case LSEXP_NO_RSRC_ASSIGN:
4822 /* These explanation codes get no retry. */
4823 if (cmd == ELS_CMD_PRLI ||
4824 cmd == ELS_CMD_NVMEPRLI)
4825 break;
4826 fallthrough;
4827 default:
4828 /* Limit the delay and retry action to a limited
4829 * cmd set. There are other ELS commands where
4830 * a retry is not expected.
4831 */
4832 if (cmd == ELS_CMD_PLOGI ||
4833 cmd == ELS_CMD_PRLI ||
4834 cmd == ELS_CMD_NVMEPRLI) {
4835 delay = 1000;
4836 maxretry = lpfc_max_els_tries + 1;
4837 retry = 1;
4838 }
4839 break;
4840 }
4841
4842 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4843 (cmd == ELS_CMD_FDISC) &&
4844 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
4845 lpfc_printf_vlog(vport, KERN_ERR,
4846 LOG_TRACE_EVENT,
4847 "0125 FDISC Failed (x%x). "
4848 "Fabric out of resources\n",
4849 stat.un.lsRjtError);
4850 lpfc_vport_set_state(vport,
4851 new_state: FC_VPORT_NO_FABRIC_RSCS);
4852 }
4853 break;
4854
4855 case LSRJT_LOGICAL_BSY:
4856 if ((cmd == ELS_CMD_PLOGI) ||
4857 (cmd == ELS_CMD_PRLI) ||
4858 (cmd == ELS_CMD_NVMEPRLI)) {
4859 delay = 1000;
4860 maxretry = 48;
4861 } else if (cmd == ELS_CMD_FDISC) {
4862 /* FDISC retry policy */
4863 maxretry = 48;
4864 if (cmdiocb->retry >= 32)
4865 delay = 1000;
4866 }
4867 retry = 1;
4868 break;
4869
4870 case LSRJT_LOGICAL_ERR:
4871 /* There are some cases where switches return this
4872 * error when they are not ready and should be returning
4873 * Logical Busy. We should delay every time.
4874 */
4875 if (cmd == ELS_CMD_FDISC &&
4876 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
4877 maxretry = 3;
4878 delay = 1000;
4879 retry = 1;
4880 } else if (cmd == ELS_CMD_FLOGI &&
4881 stat.un.b.lsRjtRsnCodeExp ==
4882 LSEXP_NOTHING_MORE) {
4883 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
4884 retry = 1;
4885 lpfc_printf_vlog(vport, KERN_ERR,
4886 LOG_TRACE_EVENT,
4887 "0820 FLOGI Failed (x%x). "
4888 "BBCredit Not Supported\n",
4889 stat.un.lsRjtError);
4890 }
4891 break;
4892
4893 case LSRJT_PROTOCOL_ERR:
4894 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4895 (cmd == ELS_CMD_FDISC) &&
4896 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
4897 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
4898 ) {
4899 lpfc_printf_vlog(vport, KERN_ERR,
4900 LOG_TRACE_EVENT,
4901 "0122 FDISC Failed (x%x). "
4902 "Fabric Detected Bad WWN\n",
4903 stat.un.lsRjtError);
4904 lpfc_vport_set_state(vport,
4905 new_state: FC_VPORT_FABRIC_REJ_WWN);
4906 }
4907 break;
4908 case LSRJT_VENDOR_UNIQUE:
4909 if ((stat.un.b.vendorUnique == 0x45) &&
4910 (cmd == ELS_CMD_FLOGI)) {
4911 goto out_retry;
4912 }
4913 break;
4914 case LSRJT_CMD_UNSUPPORTED:
4915 /* lpfc nvmet returns this type of LS_RJT when it
4916 * receives an FCP PRLI because lpfc nvmet only
4917 * support NVME. ELS request is terminated for FCP4
4918 * on this rport.
4919 */
4920 if (stat.un.b.lsRjtRsnCodeExp ==
4921 LSEXP_REQ_UNSUPPORTED) {
4922 if (cmd == ELS_CMD_PRLI)
4923 goto out_retry;
4924 }
4925 break;
4926 }
4927 break;
4928
4929 case IOSTAT_INTERMED_RSP:
4930 case IOSTAT_BA_RJT:
4931 break;
4932
4933 default:
4934 break;
4935 }
4936
4937 if (link_reset) {
4938 rc = lpfc_link_reset(vport);
4939 if (rc) {
4940 /* Do not give up. Retry PLOGI one more time and attempt
4941 * link reset if PLOGI fails again.
4942 */
4943 retry = 1;
4944 delay = 100;
4945 goto out_retry;
4946 }
4947 return 1;
4948 }
4949
4950 if (did == FDMI_DID)
4951 retry = 1;
4952
4953 if ((cmd == ELS_CMD_FLOGI) &&
4954 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
4955 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
4956 /* FLOGI retry policy */
4957 retry = 1;
4958 /* retry FLOGI forever */
4959 if (phba->link_flag != LS_LOOPBACK_MODE)
4960 maxretry = 0;
4961 else
4962 maxretry = 2;
4963
4964 if (cmdiocb->retry >= 100)
4965 delay = 5000;
4966 else if (cmdiocb->retry >= 32)
4967 delay = 1000;
4968 } else if ((cmd == ELS_CMD_FDISC) &&
4969 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
4970 /* retry FDISCs every second up to devloss */
4971 retry = 1;
4972 maxretry = vport->cfg_devloss_tmo;
4973 delay = 1000;
4974 }
4975
4976 cmdiocb->retry++;
4977 if (maxretry && (cmdiocb->retry >= maxretry)) {
4978 phba->fc_stat.elsRetryExceeded++;
4979 retry = 0;
4980 }
4981
4982 if ((vport->load_flag & FC_UNLOADING) != 0)
4983 retry = 0;
4984
4985out_retry:
4986 if (retry) {
4987 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
4988 /* Stop retrying PLOGI and FDISC if in FCF discovery */
4989 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4990 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4991 "2849 Stop retry ELS command "
4992 "x%x to remote NPORT x%x, "
4993 "Data: x%x x%x\n", cmd, did,
4994 cmdiocb->retry, delay);
4995 return 0;
4996 }
4997 }
4998
4999 /* Retry ELS command <elsCmd> to remote NPORT <did> */
5000 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5001 "0107 Retry ELS command x%x to remote "
5002 "NPORT x%x Data: x%x x%x\n",
5003 cmd, did, cmdiocb->retry, delay);
5004
5005 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
5006 ((ulp_status != IOSTAT_LOCAL_REJECT) ||
5007 ((ulp_word4 & IOERR_PARAM_MASK) !=
5008 IOERR_NO_RESOURCES))) {
5009 /* Don't reset timer for no resources */
5010
5011 /* If discovery / RSCN timer is running, reset it */
5012 if (timer_pending(timer: &vport->fc_disctmo) ||
5013 (vport->fc_flag & FC_RSCN_MODE))
5014 lpfc_set_disctmo(vport);
5015 }
5016
5017 phba->fc_stat.elsXmitRetry++;
5018 if (ndlp && delay) {
5019 phba->fc_stat.elsDelayRetry++;
5020 ndlp->nlp_retry = cmdiocb->retry;
5021
5022 /* delay is specified in milliseconds */
5023 mod_timer(timer: &ndlp->nlp_delayfunc,
5024 expires: jiffies + msecs_to_jiffies(m: delay));
5025 spin_lock_irq(lock: &ndlp->lock);
5026 ndlp->nlp_flag |= NLP_DELAY_TMO;
5027 spin_unlock_irq(lock: &ndlp->lock);
5028
5029 ndlp->nlp_prev_state = ndlp->nlp_state;
5030 if ((cmd == ELS_CMD_PRLI) ||
5031 (cmd == ELS_CMD_NVMEPRLI))
5032 lpfc_nlp_set_state(vport, ndlp,
5033 NLP_STE_PRLI_ISSUE);
5034 else if (cmd != ELS_CMD_ADISC)
5035 lpfc_nlp_set_state(vport, ndlp,
5036 NLP_STE_NPR_NODE);
5037 ndlp->nlp_last_elscmd = cmd;
5038
5039 return 1;
5040 }
5041 switch (cmd) {
5042 case ELS_CMD_FLOGI:
5043 lpfc_issue_els_flogi(vport, ndlp, retry: cmdiocb->retry);
5044 return 1;
5045 case ELS_CMD_FDISC:
5046 lpfc_issue_els_fdisc(vport, ndlp, retry: cmdiocb->retry);
5047 return 1;
5048 case ELS_CMD_PLOGI:
5049 if (ndlp) {
5050 ndlp->nlp_prev_state = ndlp->nlp_state;
5051 lpfc_nlp_set_state(vport, ndlp,
5052 NLP_STE_PLOGI_ISSUE);
5053 }
5054 lpfc_issue_els_plogi(vport, did, retry: cmdiocb->retry);
5055 return 1;
5056 case ELS_CMD_ADISC:
5057 ndlp->nlp_prev_state = ndlp->nlp_state;
5058 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
5059 lpfc_issue_els_adisc(vport, ndlp, retry: cmdiocb->retry);
5060 return 1;
5061 case ELS_CMD_PRLI:
5062 case ELS_CMD_NVMEPRLI:
5063 ndlp->nlp_prev_state = ndlp->nlp_state;
5064 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
5065 lpfc_issue_els_prli(vport, ndlp, retry: cmdiocb->retry);
5066 return 1;
5067 case ELS_CMD_LOGO:
5068 ndlp->nlp_prev_state = ndlp->nlp_state;
5069 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
5070 lpfc_issue_els_logo(vport, ndlp, retry: cmdiocb->retry);
5071 return 1;
5072 }
5073 }
5074 /* No retry ELS command <elsCmd> to remote NPORT <did> */
5075 if (logerr) {
5076 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5077 "0137 No retry ELS command x%x to remote "
5078 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
5079 cmd, did, ulp_status,
5080 ulp_word4);
5081 }
5082 else {
5083 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5084 "0108 No retry ELS command x%x to remote "
5085 "NPORT x%x Retried:%d Error:x%x/%x\n",
5086 cmd, did, cmdiocb->retry, ulp_status,
5087 ulp_word4);
5088 }
5089 return 0;
5090}
5091
5092/**
5093 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
5094 * @phba: pointer to lpfc hba data structure.
5095 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
5096 *
5097 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
5098 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
5099 * checks to see whether there is a lpfc DMA buffer associated with the
5100 * response of the command IOCB. If so, it will be released before releasing
5101 * the lpfc DMA buffer associated with the IOCB itself.
5102 *
5103 * Return code
5104 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
5105 **/
5106static int
5107lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
5108{
5109 struct lpfc_dmabuf *buf_ptr;
5110
5111 /* Free the response before processing the command. */
5112 if (!list_empty(head: &buf_ptr1->list)) {
5113 list_remove_head(&buf_ptr1->list, buf_ptr,
5114 struct lpfc_dmabuf,
5115 list);
5116 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
5117 kfree(objp: buf_ptr);
5118 }
5119 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
5120 kfree(objp: buf_ptr1);
5121 return 0;
5122}
5123
5124/**
5125 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
5126 * @phba: pointer to lpfc hba data structure.
5127 * @buf_ptr: pointer to the lpfc dma buffer data structure.
5128 *
5129 * This routine releases the lpfc Direct Memory Access (DMA) buffer
5130 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
5131 * pool.
5132 *
5133 * Return code
5134 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
5135 **/
5136static int
5137lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
5138{
5139 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
5140 kfree(objp: buf_ptr);
5141 return 0;
5142}
5143
5144/**
5145 * lpfc_els_free_iocb - Free a command iocb and its associated resources
5146 * @phba: pointer to lpfc hba data structure.
5147 * @elsiocb: pointer to lpfc els command iocb data structure.
5148 *
5149 * This routine frees a command IOCB and its associated resources. The
5150 * command IOCB data structure contains the reference to various associated
5151 * resources, these fields must be set to NULL if the associated reference
5152 * not present:
5153 * cmd_dmabuf - reference to cmd.
5154 * cmd_dmabuf->next - reference to rsp
5155 * rsp_dmabuf - unused
5156 * bpl_dmabuf - reference to bpl
5157 *
5158 * It first properly decrements the reference count held on ndlp for the
5159 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
5160 * set, it invokes the lpfc_els_free_data() routine to release the Direct
5161 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
5162 * adds the DMA buffer the @phba data structure for the delayed release.
5163 * If reference to the Buffer Pointer List (BPL) is present, the
5164 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
5165 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
5166 * invoked to release the IOCB data structure back to @phba IOCBQ list.
5167 *
5168 * Return code
5169 * 0 - Success (currently, always return 0)
5170 **/
5171int
5172lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
5173{
5174 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
5175
5176 /* The I/O iocb is complete. Clear the node and first dmbuf */
5177 elsiocb->ndlp = NULL;
5178
5179 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */
5180 if (elsiocb->cmd_dmabuf) {
5181 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) {
5182 /* Firmware could still be in progress of DMAing
5183 * payload, so don't free data buffer till after
5184 * a hbeat.
5185 */
5186 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE;
5187 buf_ptr = elsiocb->cmd_dmabuf;
5188 elsiocb->cmd_dmabuf = NULL;
5189 if (buf_ptr) {
5190 buf_ptr1 = NULL;
5191 spin_lock_irq(lock: &phba->hbalock);
5192 if (!list_empty(head: &buf_ptr->list)) {
5193 list_remove_head(&buf_ptr->list,
5194 buf_ptr1, struct lpfc_dmabuf,
5195 list);
5196 INIT_LIST_HEAD(list: &buf_ptr1->list);
5197 list_add_tail(new: &buf_ptr1->list,
5198 head: &phba->elsbuf);
5199 phba->elsbuf_cnt++;
5200 }
5201 INIT_LIST_HEAD(list: &buf_ptr->list);
5202 list_add_tail(new: &buf_ptr->list, head: &phba->elsbuf);
5203 phba->elsbuf_cnt++;
5204 spin_unlock_irq(lock: &phba->hbalock);
5205 }
5206 } else {
5207 buf_ptr1 = elsiocb->cmd_dmabuf;
5208 lpfc_els_free_data(phba, buf_ptr1);
5209 elsiocb->cmd_dmabuf = NULL;
5210 }
5211 }
5212
5213 if (elsiocb->bpl_dmabuf) {
5214 buf_ptr = elsiocb->bpl_dmabuf;
5215 lpfc_els_free_bpl(phba, buf_ptr);
5216 elsiocb->bpl_dmabuf = NULL;
5217 }
5218 lpfc_sli_release_iocbq(phba, elsiocb);
5219 return 0;
5220}
5221
5222/**
5223 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
5224 * @phba: pointer to lpfc hba data structure.
5225 * @cmdiocb: pointer to lpfc command iocb data structure.
5226 * @rspiocb: pointer to lpfc response iocb data structure.
5227 *
5228 * This routine is the completion callback function to the Logout (LOGO)
5229 * Accept (ACC) Response ELS command. This routine is invoked to indicate
5230 * the completion of the LOGO process. If the node has transitioned to NPR,
5231 * this routine unregisters the RPI if it is still registered. The
5232 * lpfc_els_free_iocb() is invoked to release the IOCB data structure.
5233 **/
5234static void
5235lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
5236 struct lpfc_iocbq *rspiocb)
5237{
5238 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
5239 struct lpfc_vport *vport = cmdiocb->vport;
5240 u32 ulp_status, ulp_word4;
5241
5242 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
5243 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
5244
5245 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5246 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
5247 ulp_status, ulp_word4, ndlp->nlp_DID);
5248 /* ACC to LOGO completes to NPort <nlp_DID> */
5249 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5250 "0109 ACC to LOGO completes to NPort x%x refcnt %d "
5251 "Data: x%x x%x x%x\n",
5252 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
5253 ndlp->nlp_state, ndlp->nlp_rpi);
5254
5255 /* This clause allows the LOGO ACC to complete and free resources
5256 * for the Fabric Domain Controller. It does deliberately skip
5257 * the unreg_rpi and release rpi because some fabrics send RDP
5258 * requests after logging out from the initiator.
5259 */
5260 if (ndlp->nlp_type & NLP_FABRIC &&
5261 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK))
5262 goto out;
5263
5264 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
5265 /* If PLOGI is being retried, PLOGI completion will cleanup the
5266 * node. The NLP_NPR_2B_DISC flag needs to be retained to make
5267 * progress on nodes discovered from last RSCN.
5268 */
5269 if ((ndlp->nlp_flag & NLP_DELAY_TMO) &&
5270 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
5271 goto out;
5272
5273 if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
5274 lpfc_unreg_rpi(vport, ndlp);
5275
5276 }
5277 out:
5278 /*
5279 * The driver received a LOGO from the rport and has ACK'd it.
5280 * At this point, the driver is done so release the IOCB
5281 */
5282 lpfc_els_free_iocb(phba, elsiocb: cmdiocb);
5283 lpfc_nlp_put(ndlp);
5284}
5285
5286/**
5287 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
5288 * @phba: pointer to lpfc hba data structure.
5289 * @pmb: pointer to the driver internal queue element for mailbox command.
5290 *
5291 * This routine is the completion callback function for unregister default
5292 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
5293 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
5294 * decrements the ndlp reference count held for this completion callback
5295 * function. After that, it invokes the lpfc_drop_node to check
5296 * whether it is appropriate to release the node.
5297 **/
5298void
5299lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5300{
5301 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
5302 u32 mbx_flag = pmb->mbox_flag;
5303 u32 mbx_cmd = pmb->u.mb.mbxCommand;
5304
5305 if (ndlp) {
5306 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
5307 "0006 rpi x%x DID:%x flg:%x %d x%px "
5308 "mbx_cmd x%x mbx_flag x%x x%px\n",
5309 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5310 kref_read(&ndlp->kref), ndlp, mbx_cmd,
5311 mbx_flag, pmb);
5312
5313 /* This ends the default/temporary RPI cleanup logic for this
5314 * ndlp and the node and rpi needs to be released. Free the rpi
5315 * first on an UNREG_LOGIN and then release the final
5316 * references.
5317 */
5318 spin_lock_irq(lock: &ndlp->lock);
5319 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
5320 if (mbx_cmd == MBX_UNREG_LOGIN)
5321 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5322 spin_unlock_irq(lock: &ndlp->lock);
5323 lpfc_nlp_put(ndlp);
5324 lpfc_drop_node(ndlp->vport, ndlp);
5325 }
5326
5327 lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED);
5328}
5329
5330/**
5331 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
5332 * @phba: pointer to lpfc hba data structure.
5333 * @cmdiocb: pointer to lpfc command iocb data structure.
5334 * @rspiocb: pointer to lpfc response iocb data structure.
5335 *
5336 * This routine is the completion callback function for ELS Response IOCB
5337 * command. In normal case, this callback function just properly sets the
5338 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
5339 * field in the command IOCB is not NULL, the referred mailbox command will
5340 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
5341 * the IOCB.
5342 **/
5343static void
5344lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
5345 struct lpfc_iocbq *rspiocb)
5346{
5347 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
5348 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
5349 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
5350 IOCB_t *irsp;
5351 LPFC_MBOXQ_t *mbox = NULL;
5352 u32 ulp_status, ulp_word4, tmo, did, iotag;
5353
5354 if (!vport) {
5355 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5356 "3177 ELS response failed\n");
5357 goto out;
5358 }
5359 if (cmdiocb->context_un.mbox)
5360 mbox = cmdiocb->context_un.mbox;
5361
5362 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
5363 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
5364 did = get_job_els_rsp64_did(phba, iocbq: cmdiocb);
5365
5366 if (phba->sli_rev == LPFC_SLI_REV4) {
5367 tmo = get_wqe_tmo(cmdiocb);
5368 iotag = get_wqe_reqtag(cmdiocb);
5369 } else {
5370 irsp = &rspiocb->iocb;
5371 tmo = irsp->ulpTimeout;
5372 iotag = irsp->ulpIoTag;
5373 }
5374
5375 /* Check to see if link went down during discovery */
5376 if (!ndlp || lpfc_els_chk_latt(vport)) {
5377 if (mbox)
5378 lpfc_mbox_rsrc_cleanup(phba, mbox, locked: MBOX_THD_UNLOCKED);
5379 goto out;
5380 }
5381
5382 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5383 "ELS rsp cmpl: status:x%x/x%x did:x%x",
5384 ulp_status, ulp_word4, did);
5385 /* ELS response tag <ulpIoTag> completes */
5386 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5387 "0110 ELS response tag x%x completes "
5388 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n",
5389 iotag, ulp_status, ulp_word4, tmo,
5390 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5391 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp);
5392 if (mbox) {
5393 if (ulp_status == 0
5394 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
5395 if (!lpfc_unreg_rpi(vport, ndlp) &&
5396 (!(vport->fc_flag & FC_PT2PT))) {
5397 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5398 ndlp->nlp_state ==
5399 NLP_STE_REG_LOGIN_ISSUE) {
5400 lpfc_printf_vlog(vport, KERN_INFO,
5401 LOG_DISCOVERY,
5402 "0314 PLOGI recov "
5403 "DID x%x "
5404 "Data: x%x x%x x%x\n",
5405 ndlp->nlp_DID,
5406 ndlp->nlp_state,
5407 ndlp->nlp_rpi,
5408 ndlp->nlp_flag);
5409 goto out_free_mbox;
5410 }
5411 }
5412
5413 /* Increment reference count to ndlp to hold the
5414 * reference to ndlp for the callback function.
5415 */
5416 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
5417 if (!mbox->ctx_ndlp)
5418 goto out_free_mbox;
5419
5420 mbox->vport = vport;
5421 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
5422 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
5423 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
5424 }
5425 else {
5426 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
5427 ndlp->nlp_prev_state = ndlp->nlp_state;
5428 lpfc_nlp_set_state(vport, ndlp,
5429 NLP_STE_REG_LOGIN_ISSUE);
5430 }
5431
5432 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
5433 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
5434 != MBX_NOT_FINISHED)
5435 goto out;
5436
5437 /* Decrement the ndlp reference count we
5438 * set for this failed mailbox command.
5439 */
5440 lpfc_nlp_put(ndlp);
5441 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
5442
5443 /* ELS rsp: Cannot issue reg_login for <NPortid> */
5444 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5445 "0138 ELS rsp: Cannot issue reg_login for x%x "
5446 "Data: x%x x%x x%x\n",
5447 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5448 ndlp->nlp_rpi);
5449 }
5450out_free_mbox:
5451 lpfc_mbox_rsrc_cleanup(phba, mbox, locked: MBOX_THD_UNLOCKED);
5452 }
5453out:
5454 if (ndlp && shost) {
5455 spin_lock_irq(lock: &ndlp->lock);
5456 if (mbox)
5457 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
5458 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
5459 spin_unlock_irq(lock: &ndlp->lock);
5460 }
5461
5462 /* An SLI4 NPIV instance wants to drop the node at this point under
5463 * these conditions and release the RPI.
5464 */
5465 if (phba->sli_rev == LPFC_SLI_REV4 &&
5466 vport && vport->port_type == LPFC_NPIV_PORT &&
5467 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
5468 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
5469 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
5470 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
5471 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
5472 spin_lock_irq(lock: &ndlp->lock);
5473 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
5474 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
5475 spin_unlock_irq(lock: &ndlp->lock);
5476 }
5477 lpfc_drop_node(vport, ndlp);
5478 } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
5479 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE &&
5480 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) {
5481 /* Drop ndlp if there is no planned or outstanding
5482 * issued PRLI.
5483 *
5484 * In cases when the ndlp is acting as both an initiator
5485 * and target function, let our issued PRLI determine
5486 * the final ndlp kref drop.
5487 */
5488 lpfc_drop_node(vport, ndlp);
5489 }
5490 }
5491
5492 /* Release the originating I/O reference. */
5493 lpfc_els_free_iocb(phba, elsiocb: cmdiocb);
5494 lpfc_nlp_put(ndlp);
5495 return;
5496}
5497
5498/**
5499 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
5500 * @vport: pointer to a host virtual N_Port data structure.
5501 * @flag: the els command code to be accepted.
5502 * @oldiocb: pointer to the original lpfc command iocb data structure.
5503 * @ndlp: pointer to a node-list data structure.
5504 * @mbox: pointer to the driver internal queue element for mailbox command.
5505 *
5506 * This routine prepares and issues an Accept (ACC) response IOCB
5507 * command. It uses the @flag to properly set up the IOCB field for the
5508 * specific ACC response command to be issued and invokes the
5509 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
5510 * @mbox pointer is passed in, it will be put into the context_un.mbox
5511 * field of the IOCB for the completion callback function to issue the
5512 * mailbox command to the HBA later when callback is invoked.
5513 *
5514 * Note that the ndlp reference count will be incremented by 1 for holding the
5515 * ndlp and the reference to ndlp will be stored into the ndlp field of
5516 * the IOCB for the completion callback function to the corresponding
5517 * response ELS IOCB command.
5518 *
5519 * Return code
5520 * 0 - Successfully issued acc response
5521 * 1 - Failed to issue acc response
5522 **/
5523int
5524lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
5525 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
5526 LPFC_MBOXQ_t *mbox)
5527{
5528 struct lpfc_hba *phba = vport->phba;
5529 IOCB_t *icmd;
5530 IOCB_t *oldcmd;
5531 union lpfc_wqe128 *wqe;
5532 union lpfc_wqe128 *oldwqe = &oldiocb->wqe;
5533 struct lpfc_iocbq *elsiocb;
5534 uint8_t *pcmd;
5535 struct serv_parm *sp;
5536 uint16_t cmdsize;
5537 int rc;
5538 ELS_PKT *els_pkt_ptr;
5539 struct fc_els_rdf_resp *rdf_resp;
5540
5541 switch (flag) {
5542 case ELS_CMD_ACC:
5543 cmdsize = sizeof(uint32_t);
5544 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 0, cmd_size: cmdsize, retry: oldiocb->retry,
5545 ndlp, did: ndlp->nlp_DID, ELS_CMD_ACC);
5546 if (!elsiocb) {
5547 spin_lock_irq(lock: &ndlp->lock);
5548 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5549 spin_unlock_irq(lock: &ndlp->lock);
5550 return 1;
5551 }
5552
5553 if (phba->sli_rev == LPFC_SLI_REV4) {
5554 wqe = &elsiocb->wqe;
5555 /* XRI / rx_id */
5556 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5557 bf_get(wqe_ctxt_tag,
5558 &oldwqe->xmit_els_rsp.wqe_com));
5559
5560 /* oxid */
5561 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5562 bf_get(wqe_rcvoxid,
5563 &oldwqe->xmit_els_rsp.wqe_com));
5564 } else {
5565 icmd = &elsiocb->iocb;
5566 oldcmd = &oldiocb->iocb;
5567 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5568 icmd->unsli3.rcvsli3.ox_id =
5569 oldcmd->unsli3.rcvsli3.ox_id;
5570 }
5571
5572 pcmd = elsiocb->cmd_dmabuf->virt;
5573 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5574 pcmd += sizeof(uint32_t);
5575
5576 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5577 "Issue ACC: did:x%x flg:x%x",
5578 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5579 break;
5580 case ELS_CMD_FLOGI:
5581 case ELS_CMD_PLOGI:
5582 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
5583 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 0, cmd_size: cmdsize, retry: oldiocb->retry,
5584 ndlp, did: ndlp->nlp_DID, ELS_CMD_ACC);
5585 if (!elsiocb)
5586 return 1;
5587
5588 if (phba->sli_rev == LPFC_SLI_REV4) {
5589 wqe = &elsiocb->wqe;
5590 /* XRI / rx_id */
5591 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5592 bf_get(wqe_ctxt_tag,
5593 &oldwqe->xmit_els_rsp.wqe_com));
5594
5595 /* oxid */
5596 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5597 bf_get(wqe_rcvoxid,
5598 &oldwqe->xmit_els_rsp.wqe_com));
5599 } else {
5600 icmd = &elsiocb->iocb;
5601 oldcmd = &oldiocb->iocb;
5602 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5603 icmd->unsli3.rcvsli3.ox_id =
5604 oldcmd->unsli3.rcvsli3.ox_id;
5605 }
5606
5607 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
5608
5609 if (mbox)
5610 elsiocb->context_un.mbox = mbox;
5611
5612 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5613 pcmd += sizeof(uint32_t);
5614 sp = (struct serv_parm *)pcmd;
5615
5616 if (flag == ELS_CMD_FLOGI) {
5617 /* Copy the received service parameters back */
5618 memcpy(sp, &phba->fc_fabparam,
5619 sizeof(struct serv_parm));
5620
5621 /* Clear the F_Port bit */
5622 sp->cmn.fPort = 0;
5623
5624 /* Mark all class service parameters as invalid */
5625 sp->cls1.classValid = 0;
5626 sp->cls2.classValid = 0;
5627 sp->cls3.classValid = 0;
5628 sp->cls4.classValid = 0;
5629
5630 /* Copy our worldwide names */
5631 memcpy(&sp->portName, &vport->fc_sparam.portName,
5632 sizeof(struct lpfc_name));
5633 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
5634 sizeof(struct lpfc_name));
5635 } else {
5636 memcpy(pcmd, &vport->fc_sparam,
5637 sizeof(struct serv_parm));
5638
5639 sp->cmn.valid_vendor_ver_level = 0;
5640 memset(sp->un.vendorVersion, 0,
5641 sizeof(sp->un.vendorVersion));
5642 sp->cmn.bbRcvSizeMsb &= 0xF;
5643
5644 /* If our firmware supports this feature, convey that
5645 * info to the target using the vendor specific field.
5646 */
5647 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
5648 sp->cmn.valid_vendor_ver_level = 1;
5649 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
5650 sp->un.vv.flags =
5651 cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
5652 }
5653 }
5654
5655 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5656 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
5657 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5658 break;
5659 case ELS_CMD_PRLO:
5660 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
5661 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 0, cmd_size: cmdsize, retry: oldiocb->retry,
5662 ndlp, did: ndlp->nlp_DID, ELS_CMD_PRLO);
5663 if (!elsiocb)
5664 return 1;
5665
5666 if (phba->sli_rev == LPFC_SLI_REV4) {
5667 wqe = &elsiocb->wqe;
5668 /* XRI / rx_id */
5669 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5670 bf_get(wqe_ctxt_tag,
5671 &oldwqe->xmit_els_rsp.wqe_com));
5672
5673 /* oxid */
5674 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5675 bf_get(wqe_rcvoxid,
5676 &oldwqe->xmit_els_rsp.wqe_com));
5677 } else {
5678 icmd = &elsiocb->iocb;
5679 oldcmd = &oldiocb->iocb;
5680 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5681 icmd->unsli3.rcvsli3.ox_id =
5682 oldcmd->unsli3.rcvsli3.ox_id;
5683 }
5684
5685 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt;
5686
5687 memcpy(pcmd, oldiocb->cmd_dmabuf->virt,
5688 sizeof(uint32_t) + sizeof(PRLO));
5689 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
5690 els_pkt_ptr = (ELS_PKT *) pcmd;
5691 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
5692
5693 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5694 "Issue ACC PRLO: did:x%x flg:x%x",
5695 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5696 break;
5697 case ELS_CMD_RDF:
5698 cmdsize = sizeof(*rdf_resp);
5699 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 0, cmd_size: cmdsize, retry: oldiocb->retry,
5700 ndlp, did: ndlp->nlp_DID, ELS_CMD_ACC);
5701 if (!elsiocb)
5702 return 1;
5703
5704 if (phba->sli_rev == LPFC_SLI_REV4) {
5705 wqe = &elsiocb->wqe;
5706 /* XRI / rx_id */
5707 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5708 bf_get(wqe_ctxt_tag,
5709 &oldwqe->xmit_els_rsp.wqe_com));
5710
5711 /* oxid */
5712 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5713 bf_get(wqe_rcvoxid,
5714 &oldwqe->xmit_els_rsp.wqe_com));
5715 } else {
5716 icmd = &elsiocb->iocb;
5717 oldcmd = &oldiocb->iocb;
5718 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5719 icmd->unsli3.rcvsli3.ox_id =
5720 oldcmd->unsli3.rcvsli3.ox_id;
5721 }
5722
5723 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
5724 rdf_resp = (struct fc_els_rdf_resp *)pcmd;
5725 memset(rdf_resp, 0, sizeof(*rdf_resp));
5726 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC;
5727
5728 /* FC-LS-5 specifies desc_list_len shall be set to 12 */
5729 rdf_resp->desc_list_len = cpu_to_be32(12);
5730
5731 /* FC-LS-5 specifies LS REQ Information descriptor */
5732 rdf_resp->lsri.desc_tag = cpu_to_be32(1);
5733 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32));
5734 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF;
5735 break;
5736 default:
5737 return 1;
5738 }
5739 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
5740 spin_lock_irq(lock: &ndlp->lock);
5741 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
5742 ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
5743 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5744 spin_unlock_irq(lock: &ndlp->lock);
5745 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc;
5746 } else {
5747 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
5748 }
5749
5750 phba->fc_stat.elsXmitACC++;
5751 elsiocb->ndlp = lpfc_nlp_get(ndlp);
5752 if (!elsiocb->ndlp) {
5753 lpfc_els_free_iocb(phba, elsiocb);
5754 return 1;
5755 }
5756
5757 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5758 if (rc == IOCB_ERROR) {
5759 lpfc_els_free_iocb(phba, elsiocb);
5760 lpfc_nlp_put(ndlp);
5761 return 1;
5762 }
5763
5764 /* Xmit ELS ACC response tag <ulpIoTag> */
5765 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5766 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
5767 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
5768 "RPI: x%x, fc_flag x%x refcnt %d\n",
5769 rc, elsiocb->iotag, elsiocb->sli4_xritag,
5770 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5771 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref));
5772 return 0;
5773}
5774
5775/**
5776 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command
5777 * @vport: pointer to a virtual N_Port data structure.
5778 * @rejectError: reject response to issue
5779 * @oldiocb: pointer to the original lpfc command iocb data structure.
5780 * @ndlp: pointer to a node-list data structure.
5781 * @mbox: pointer to the driver internal queue element for mailbox command.
5782 *
5783 * This routine prepares and issue an Reject (RJT) response IOCB
5784 * command. If a @mbox pointer is passed in, it will be put into the
5785 * context_un.mbox field of the IOCB for the completion callback function
5786 * to issue to the HBA later.
5787 *
5788 * Note that the ndlp reference count will be incremented by 1 for holding the
5789 * ndlp and the reference to ndlp will be stored into the ndlp field of
5790 * the IOCB for the completion callback function to the reject response
5791 * ELS IOCB command.
5792 *
5793 * Return code
5794 * 0 - Successfully issued reject response
5795 * 1 - Failed to issue reject response
5796 **/
5797int
5798lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
5799 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
5800 LPFC_MBOXQ_t *mbox)
5801{
5802 int rc;
5803 struct lpfc_hba *phba = vport->phba;
5804 IOCB_t *icmd;
5805 IOCB_t *oldcmd;
5806 union lpfc_wqe128 *wqe;
5807 struct lpfc_iocbq *elsiocb;
5808 uint8_t *pcmd;
5809 uint16_t cmdsize;
5810
5811 cmdsize = 2 * sizeof(uint32_t);
5812 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 0, cmd_size: cmdsize, retry: oldiocb->retry, ndlp,
5813 did: ndlp->nlp_DID, ELS_CMD_LS_RJT);
5814 if (!elsiocb)
5815 return 1;
5816
5817 if (phba->sli_rev == LPFC_SLI_REV4) {
5818 wqe = &elsiocb->wqe;
5819 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
5820 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
5821 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5822 get_job_rcvoxid(phba, oldiocb));
5823 } else {
5824 icmd = &elsiocb->iocb;
5825 oldcmd = &oldiocb->iocb;
5826 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5827 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5828 }
5829
5830 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
5831
5832 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
5833 pcmd += sizeof(uint32_t);
5834 *((uint32_t *) (pcmd)) = rejectError;
5835
5836 if (mbox)
5837 elsiocb->context_un.mbox = mbox;
5838
5839 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
5840 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5841 "0129 Xmit ELS RJT x%x response tag x%x "
5842 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
5843 "rpi x%x\n",
5844 rejectError, elsiocb->iotag,
5845 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID,
5846 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
5847 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5848 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
5849 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
5850
5851 phba->fc_stat.elsXmitLSRJT++;
5852 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
5853 elsiocb->ndlp = lpfc_nlp_get(ndlp);
5854 if (!elsiocb->ndlp) {
5855 lpfc_els_free_iocb(phba, elsiocb);
5856 return 1;
5857 }
5858
5859 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the
5860 * node's assigned RPI gets released provided this node is not already
5861 * registered with the transport.
5862 */
5863 if (phba->sli_rev == LPFC_SLI_REV4 &&
5864 vport->port_type == LPFC_NPIV_PORT &&
5865 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
5866 spin_lock_irq(lock: &ndlp->lock);
5867 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5868 spin_unlock_irq(lock: &ndlp->lock);
5869 }
5870
5871 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5872 if (rc == IOCB_ERROR) {
5873 lpfc_els_free_iocb(phba, elsiocb);
5874 lpfc_nlp_put(ndlp);
5875 return 1;
5876 }
5877
5878 return 0;
5879}
5880
5881 /**
5882 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric.
5883 * @vport: pointer to a host virtual N_Port data structure.
5884 * @cmdiocb: pointer to the original lpfc command iocb data structure.
5885 * @ndlp: NPort to where rsp is directed
5886 *
5887 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate
5888 * this N_Port's support of hardware signals in its Congestion
5889 * Capabilities Descriptor.
5890 *
5891 * Return code
5892 * 0 - Successfully issued edc rsp command
5893 * 1 - Failed to issue edc rsp command
5894 **/
5895static int
5896lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5897 struct lpfc_nodelist *ndlp)
5898{
5899 struct lpfc_hba *phba = vport->phba;
5900 struct fc_els_edc_resp *edc_rsp;
5901 struct fc_tlv_desc *tlv;
5902 struct lpfc_iocbq *elsiocb;
5903 IOCB_t *icmd, *cmd;
5904 union lpfc_wqe128 *wqe;
5905 u32 cgn_desc_size, lft_desc_size;
5906 u16 cmdsize;
5907 uint8_t *pcmd;
5908 int rc;
5909
5910 cmdsize = sizeof(struct fc_els_edc_resp);
5911 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
5912 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
5913 sizeof(struct fc_diag_lnkflt_desc) : 0;
5914 cmdsize += cgn_desc_size + lft_desc_size;
5915 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 0, cmd_size: cmdsize, retry: cmdiocb->retry,
5916 ndlp, did: ndlp->nlp_DID, ELS_CMD_ACC);
5917 if (!elsiocb)
5918 return 1;
5919
5920 if (phba->sli_rev == LPFC_SLI_REV4) {
5921 wqe = &elsiocb->wqe;
5922 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
5923 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */
5924 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5925 get_job_rcvoxid(phba, cmdiocb));
5926 } else {
5927 icmd = &elsiocb->iocb;
5928 cmd = &cmdiocb->iocb;
5929 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */
5930 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
5931 }
5932
5933 pcmd = elsiocb->cmd_dmabuf->virt;
5934 memset(pcmd, 0, cmdsize);
5935
5936 edc_rsp = (struct fc_els_edc_resp *)pcmd;
5937 edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC;
5938 edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) +
5939 cgn_desc_size + lft_desc_size);
5940 edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
5941 edc_rsp->lsri.desc_len = cpu_to_be32(
5942 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc));
5943 edc_rsp->lsri.rqst_w0.cmd = ELS_EDC;
5944 tlv = edc_rsp->desc;
5945 lpfc_format_edc_cgn_desc(phba, tlv);
5946 tlv = fc_tlv_next_desc(desc: tlv);
5947 if (lft_desc_size)
5948 lpfc_format_edc_lft_desc(phba, tlv);
5949
5950 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5951 "Issue EDC ACC: did:x%x flg:x%x refcnt %d",
5952 ndlp->nlp_DID, ndlp->nlp_flag,
5953 kref_read(kref: &ndlp->kref));
5954 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
5955
5956 phba->fc_stat.elsXmitACC++;
5957 elsiocb->ndlp = lpfc_nlp_get(ndlp);
5958 if (!elsiocb->ndlp) {
5959 lpfc_els_free_iocb(phba, elsiocb);
5960 return 1;
5961 }
5962
5963 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5964 if (rc == IOCB_ERROR) {
5965 lpfc_els_free_iocb(phba, elsiocb);
5966 lpfc_nlp_put(ndlp);
5967 return 1;
5968 }
5969
5970 /* Xmit ELS ACC response tag <ulpIoTag> */
5971 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5972 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, "
5973 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
5974 "RPI: x%x, fc_flag x%x\n",
5975 rc, elsiocb->iotag, elsiocb->sli4_xritag,
5976 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5977 ndlp->nlp_rpi, vport->fc_flag);
5978
5979 return 0;
5980}
5981
5982/**
5983 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
5984 * @vport: pointer to a virtual N_Port data structure.
5985 * @oldiocb: pointer to the original lpfc command iocb data structure.
5986 * @ndlp: pointer to a node-list data structure.
5987 *
5988 * This routine prepares and issues an Accept (ACC) response to Address
5989 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
5990 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
5991 *
5992 * Note that the ndlp reference count will be incremented by 1 for holding the
5993 * ndlp and the reference to ndlp will be stored into the ndlp field of
5994 * the IOCB for the completion callback function to the ADISC Accept response
5995 * ELS IOCB command.
5996 *
5997 * Return code
5998 * 0 - Successfully issued acc adisc response
5999 * 1 - Failed to issue adisc acc response
6000 **/
6001int
6002lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
6003 struct lpfc_nodelist *ndlp)
6004{
6005 struct lpfc_hba *phba = vport->phba;
6006 ADISC *ap;
6007 IOCB_t *icmd, *oldcmd;
6008 union lpfc_wqe128 *wqe;
6009 struct lpfc_iocbq *elsiocb;
6010 uint8_t *pcmd;
6011 uint16_t cmdsize;
6012 int rc;
6013 u32 ulp_context;
6014
6015 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
6016 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 0, cmd_size: cmdsize, retry: oldiocb->retry, ndlp,
6017 did: ndlp->nlp_DID, ELS_CMD_ACC);
6018 if (!elsiocb)
6019 return 1;
6020
6021 if (phba->sli_rev == LPFC_SLI_REV4) {
6022 wqe = &elsiocb->wqe;
6023 /* XRI / rx_id */
6024 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6025 get_job_ulpcontext(phba, oldiocb));
6026 ulp_context = get_job_ulpcontext(phba, iocbq: elsiocb);
6027 /* oxid */
6028 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6029 get_job_rcvoxid(phba, oldiocb));
6030 } else {
6031 icmd = &elsiocb->iocb;
6032 oldcmd = &oldiocb->iocb;
6033 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6034 ulp_context = elsiocb->iocb.ulpContext;
6035 icmd->unsli3.rcvsli3.ox_id =
6036 oldcmd->unsli3.rcvsli3.ox_id;
6037 }
6038
6039 /* Xmit ADISC ACC response tag <ulpIoTag> */
6040 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6041 "0130 Xmit ADISC ACC response iotag x%x xri: "
6042 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
6043 elsiocb->iotag, ulp_context,
6044 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6045 ndlp->nlp_rpi);
6046 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6047
6048 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6049 pcmd += sizeof(uint32_t);
6050
6051 ap = (ADISC *) (pcmd);
6052 ap->hardAL_PA = phba->fc_pref_ALPA;
6053 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
6054 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
6055 ap->DID = be32_to_cpu(vport->fc_myDID);
6056
6057 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6058 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d",
6059 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(kref: &ndlp->kref));
6060
6061 phba->fc_stat.elsXmitACC++;
6062 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6063 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6064 if (!elsiocb->ndlp) {
6065 lpfc_els_free_iocb(phba, elsiocb);
6066 return 1;
6067 }
6068
6069 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6070 if (rc == IOCB_ERROR) {
6071 lpfc_els_free_iocb(phba, elsiocb);
6072 lpfc_nlp_put(ndlp);
6073 return 1;
6074 }
6075
6076 return 0;
6077}
6078
6079/**
6080 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
6081 * @vport: pointer to a virtual N_Port data structure.
6082 * @oldiocb: pointer to the original lpfc command iocb data structure.
6083 * @ndlp: pointer to a node-list data structure.
6084 *
6085 * This routine prepares and issues an Accept (ACC) response to Process
6086 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
6087 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
6088 *
6089 * Note that the ndlp reference count will be incremented by 1 for holding the
6090 * ndlp and the reference to ndlp will be stored into the ndlp field of
6091 * the IOCB for the completion callback function to the PRLI Accept response
6092 * ELS IOCB command.
6093 *
6094 * Return code
6095 * 0 - Successfully issued acc prli response
6096 * 1 - Failed to issue acc prli response
6097 **/
6098int
6099lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
6100 struct lpfc_nodelist *ndlp)
6101{
6102 struct lpfc_hba *phba = vport->phba;
6103 PRLI *npr;
6104 struct lpfc_nvme_prli *npr_nvme;
6105 lpfc_vpd_t *vpd;
6106 IOCB_t *icmd;
6107 IOCB_t *oldcmd;
6108 union lpfc_wqe128 *wqe;
6109 struct lpfc_iocbq *elsiocb;
6110 uint8_t *pcmd;
6111 uint16_t cmdsize;
6112 uint32_t prli_fc4_req, *req_payload;
6113 struct lpfc_dmabuf *req_buf;
6114 int rc;
6115 u32 elsrspcmd, ulp_context;
6116
6117 /* Need the incoming PRLI payload to determine if the ACC is for an
6118 * FC4 or NVME PRLI type. The PRLI type is at word 1.
6119 */
6120 req_buf = oldiocb->cmd_dmabuf;
6121 req_payload = (((uint32_t *)req_buf->virt) + 1);
6122
6123 /* PRLI type payload is at byte 3 for FCP or NVME. */
6124 prli_fc4_req = be32_to_cpu(*req_payload);
6125 prli_fc4_req = (prli_fc4_req >> 24) & 0xff;
6126 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6127 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n",
6128 prli_fc4_req, *((uint32_t *)req_payload));
6129
6130 if (prli_fc4_req == PRLI_FCP_TYPE) {
6131 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
6132 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
6133 } else if (prli_fc4_req == PRLI_NVME_TYPE) {
6134 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
6135 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
6136 } else {
6137 return 1;
6138 }
6139
6140 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 0, cmd_size: cmdsize, retry: oldiocb->retry, ndlp,
6141 did: ndlp->nlp_DID, elscmd: elsrspcmd);
6142 if (!elsiocb)
6143 return 1;
6144
6145 if (phba->sli_rev == LPFC_SLI_REV4) {
6146 wqe = &elsiocb->wqe;
6147 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6148 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
6149 ulp_context = get_job_ulpcontext(phba, iocbq: elsiocb);
6150 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6151 get_job_rcvoxid(phba, oldiocb));
6152 } else {
6153 icmd = &elsiocb->iocb;
6154 oldcmd = &oldiocb->iocb;
6155 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6156 ulp_context = elsiocb->iocb.ulpContext;
6157 icmd->unsli3.rcvsli3.ox_id =
6158 oldcmd->unsli3.rcvsli3.ox_id;
6159 }
6160
6161 /* Xmit PRLI ACC response tag <ulpIoTag> */
6162 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6163 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
6164 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
6165 elsiocb->iotag, ulp_context,
6166 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6167 ndlp->nlp_rpi);
6168 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6169 memset(pcmd, 0, cmdsize);
6170
6171 *((uint32_t *)(pcmd)) = elsrspcmd;
6172 pcmd += sizeof(uint32_t);
6173
6174 /* For PRLI, remainder of payload is PRLI parameter page */
6175 vpd = &phba->vpd;
6176
6177 if (prli_fc4_req == PRLI_FCP_TYPE) {
6178 /*
6179 * If the remote port is a target and our firmware version
6180 * is 3.20 or later, set the following bits for FC-TAPE
6181 * support.
6182 */
6183 npr = (PRLI *) pcmd;
6184 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
6185 (vpd->rev.feaLevelHigh >= 0x02)) {
6186 npr->ConfmComplAllowed = 1;
6187 npr->Retry = 1;
6188 npr->TaskRetryIdReq = 1;
6189 }
6190 npr->acceptRspCode = PRLI_REQ_EXECUTED;
6191
6192 /* Set image pair for complementary pairs only. */
6193 if (ndlp->nlp_type & NLP_FCP_TARGET)
6194 npr->estabImagePair = 1;
6195 else
6196 npr->estabImagePair = 0;
6197 npr->readXferRdyDis = 1;
6198 npr->ConfmComplAllowed = 1;
6199 npr->prliType = PRLI_FCP_TYPE;
6200 npr->initiatorFunc = 1;
6201
6202 /* Xmit PRLI ACC response tag <ulpIoTag> */
6203 lpfc_printf_vlog(vport, KERN_INFO,
6204 LOG_ELS | LOG_NODE | LOG_DISCOVERY,
6205 "6014 FCP issue PRLI ACC imgpair %d "
6206 "retry %d task %d\n",
6207 npr->estabImagePair,
6208 npr->Retry, npr->TaskRetryIdReq);
6209
6210 } else if (prli_fc4_req == PRLI_NVME_TYPE) {
6211 /* Respond with an NVME PRLI Type */
6212 npr_nvme = (struct lpfc_nvme_prli *) pcmd;
6213 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
6214 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
6215 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED);
6216 if (phba->nvmet_support) {
6217 bf_set(prli_tgt, npr_nvme, 1);
6218 bf_set(prli_disc, npr_nvme, 1);
6219 if (phba->cfg_nvme_enable_fb) {
6220 bf_set(prli_fba, npr_nvme, 1);
6221
6222 /* TBD. Target mode needs to post buffers
6223 * that support the configured first burst
6224 * byte size.
6225 */
6226 bf_set(prli_fb_sz, npr_nvme,
6227 phba->cfg_nvmet_fb_size);
6228 }
6229 } else {
6230 bf_set(prli_init, npr_nvme, 1);
6231 }
6232
6233 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
6234 "6015 NVME issue PRLI ACC word1 x%08x "
6235 "word4 x%08x word5 x%08x flag x%x, "
6236 "fcp_info x%x nlp_type x%x\n",
6237 npr_nvme->word1, npr_nvme->word4,
6238 npr_nvme->word5, ndlp->nlp_flag,
6239 ndlp->nlp_fcp_info, ndlp->nlp_type);
6240 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
6241 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
6242 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5);
6243 } else
6244 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6245 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n",
6246 prli_fc4_req, ndlp->nlp_fc4_type,
6247 ndlp->nlp_DID);
6248
6249 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6250 "Issue ACC PRLI: did:x%x flg:x%x",
6251 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(kref: &ndlp->kref));
6252
6253 phba->fc_stat.elsXmitACC++;
6254 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6255 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6256 if (!elsiocb->ndlp) {
6257 lpfc_els_free_iocb(phba, elsiocb);
6258 return 1;
6259 }
6260
6261 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6262 if (rc == IOCB_ERROR) {
6263 lpfc_els_free_iocb(phba, elsiocb);
6264 lpfc_nlp_put(ndlp);
6265 return 1;
6266 }
6267
6268 return 0;
6269}
6270
6271/**
6272 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
6273 * @vport: pointer to a virtual N_Port data structure.
6274 * @format: rnid command format.
6275 * @oldiocb: pointer to the original lpfc command iocb data structure.
6276 * @ndlp: pointer to a node-list data structure.
6277 *
6278 * This routine issues a Request Node Identification Data (RNID) Accept
6279 * (ACC) response. It constructs the RNID ACC response command according to
6280 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
6281 * issue the response.
6282 *
6283 * Note that the ndlp reference count will be incremented by 1 for holding the
6284 * ndlp and the reference to ndlp will be stored into the ndlp field of
6285 * the IOCB for the completion callback function.
6286 *
6287 * Return code
6288 * 0 - Successfully issued acc rnid response
6289 * 1 - Failed to issue acc rnid response
6290 **/
6291static int
6292lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
6293 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
6294{
6295 struct lpfc_hba *phba = vport->phba;
6296 RNID *rn;
6297 IOCB_t *icmd, *oldcmd;
6298 union lpfc_wqe128 *wqe;
6299 struct lpfc_iocbq *elsiocb;
6300 uint8_t *pcmd;
6301 uint16_t cmdsize;
6302 int rc;
6303 u32 ulp_context;
6304
6305 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
6306 + (2 * sizeof(struct lpfc_name));
6307 if (format)
6308 cmdsize += sizeof(RNID_TOP_DISC);
6309
6310 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 0, cmd_size: cmdsize, retry: oldiocb->retry, ndlp,
6311 did: ndlp->nlp_DID, ELS_CMD_ACC);
6312 if (!elsiocb)
6313 return 1;
6314
6315 if (phba->sli_rev == LPFC_SLI_REV4) {
6316 wqe = &elsiocb->wqe;
6317 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6318 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
6319 ulp_context = get_job_ulpcontext(phba, iocbq: elsiocb);
6320 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6321 get_job_rcvoxid(phba, oldiocb));
6322 } else {
6323 icmd = &elsiocb->iocb;
6324 oldcmd = &oldiocb->iocb;
6325 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6326 ulp_context = elsiocb->iocb.ulpContext;
6327 icmd->unsli3.rcvsli3.ox_id =
6328 oldcmd->unsli3.rcvsli3.ox_id;
6329 }
6330
6331 /* Xmit RNID ACC response tag <ulpIoTag> */
6332 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6333 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
6334 elsiocb->iotag, ulp_context);
6335 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6336 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6337 pcmd += sizeof(uint32_t);
6338
6339 memset(pcmd, 0, sizeof(RNID));
6340 rn = (RNID *) (pcmd);
6341 rn->Format = format;
6342 rn->CommonLen = (2 * sizeof(struct lpfc_name));
6343 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
6344 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
6345 switch (format) {
6346 case 0:
6347 rn->SpecificLen = 0;
6348 break;
6349 case RNID_TOPOLOGY_DISC:
6350 rn->SpecificLen = sizeof(RNID_TOP_DISC);
6351 memcpy(&rn->un.topologyDisc.portName,
6352 &vport->fc_portname, sizeof(struct lpfc_name));
6353 rn->un.topologyDisc.unitType = RNID_HBA;
6354 rn->un.topologyDisc.physPort = 0;
6355 rn->un.topologyDisc.attachedNodes = 0;
6356 break;
6357 default:
6358 rn->CommonLen = 0;
6359 rn->SpecificLen = 0;
6360 break;
6361 }
6362
6363 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6364 "Issue ACC RNID: did:x%x flg:x%x refcnt %d",
6365 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(kref: &ndlp->kref));
6366
6367 phba->fc_stat.elsXmitACC++;
6368 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6369 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6370 if (!elsiocb->ndlp) {
6371 lpfc_els_free_iocb(phba, elsiocb);
6372 return 1;
6373 }
6374
6375 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6376 if (rc == IOCB_ERROR) {
6377 lpfc_els_free_iocb(phba, elsiocb);
6378 lpfc_nlp_put(ndlp);
6379 return 1;
6380 }
6381
6382 return 0;
6383}
6384
6385/**
6386 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
6387 * @vport: pointer to a virtual N_Port data structure.
6388 * @iocb: pointer to the lpfc command iocb data structure.
6389 * @ndlp: pointer to a node-list data structure.
6390 *
6391 * Return
6392 **/
6393static void
6394lpfc_els_clear_rrq(struct lpfc_vport *vport,
6395 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
6396{
6397 struct lpfc_hba *phba = vport->phba;
6398 uint8_t *pcmd;
6399 struct RRQ *rrq;
6400 uint16_t rxid;
6401 uint16_t xri;
6402 struct lpfc_node_rrq *prrq;
6403
6404
6405 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt;
6406 pcmd += sizeof(uint32_t);
6407 rrq = (struct RRQ *)pcmd;
6408 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
6409 rxid = bf_get(rrq_rxid, rrq);
6410
6411 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6412 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
6413 " x%x x%x\n",
6414 be32_to_cpu(bf_get(rrq_did, rrq)),
6415 bf_get(rrq_oxid, rrq),
6416 rxid,
6417 get_wqe_reqtag(iocb),
6418 get_job_ulpcontext(phba, iocb));
6419
6420 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6421 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
6422 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
6423 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
6424 xri = bf_get(rrq_oxid, rrq);
6425 else
6426 xri = rxid;
6427 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
6428 if (prrq)
6429 lpfc_clr_rrq_active(phba, xri, prrq);
6430 return;
6431}
6432
6433/**
6434 * lpfc_els_rsp_echo_acc - Issue echo acc response
6435 * @vport: pointer to a virtual N_Port data structure.
6436 * @data: pointer to echo data to return in the accept.
6437 * @oldiocb: pointer to the original lpfc command iocb data structure.
6438 * @ndlp: pointer to a node-list data structure.
6439 *
6440 * Return code
6441 * 0 - Successfully issued acc echo response
6442 * 1 - Failed to issue acc echo response
6443 **/
6444static int
6445lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
6446 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
6447{
6448 struct lpfc_hba *phba = vport->phba;
6449 IOCB_t *icmd, *oldcmd;
6450 union lpfc_wqe128 *wqe;
6451 struct lpfc_iocbq *elsiocb;
6452 uint8_t *pcmd;
6453 uint16_t cmdsize;
6454 int rc;
6455 u32 ulp_context;
6456
6457 if (phba->sli_rev == LPFC_SLI_REV4)
6458 cmdsize = oldiocb->wcqe_cmpl.total_data_placed;
6459 else
6460 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
6461
6462 /* The accumulated length can exceed the BPL_SIZE. For
6463 * now, use this as the limit
6464 */
6465 if (cmdsize > LPFC_BPL_SIZE)
6466 cmdsize = LPFC_BPL_SIZE;
6467 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 0, cmd_size: cmdsize, retry: oldiocb->retry, ndlp,
6468 did: ndlp->nlp_DID, ELS_CMD_ACC);
6469 if (!elsiocb)
6470 return 1;
6471
6472 if (phba->sli_rev == LPFC_SLI_REV4) {
6473 wqe = &elsiocb->wqe;
6474 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6475 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
6476 ulp_context = get_job_ulpcontext(phba, iocbq: elsiocb);
6477 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6478 get_job_rcvoxid(phba, oldiocb));
6479 } else {
6480 icmd = &elsiocb->iocb;
6481 oldcmd = &oldiocb->iocb;
6482 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6483 ulp_context = elsiocb->iocb.ulpContext;
6484 icmd->unsli3.rcvsli3.ox_id =
6485 oldcmd->unsli3.rcvsli3.ox_id;
6486 }
6487
6488 /* Xmit ECHO ACC response tag <ulpIoTag> */
6489 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6490 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
6491 elsiocb->iotag, ulp_context);
6492 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6493 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6494 pcmd += sizeof(uint32_t);
6495 memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
6496
6497 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6498 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d",
6499 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(kref: &ndlp->kref));
6500
6501 phba->fc_stat.elsXmitACC++;
6502 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6503 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6504 if (!elsiocb->ndlp) {
6505 lpfc_els_free_iocb(phba, elsiocb);
6506 return 1;
6507 }
6508
6509 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6510 if (rc == IOCB_ERROR) {
6511 lpfc_els_free_iocb(phba, elsiocb);
6512 lpfc_nlp_put(ndlp);
6513 return 1;
6514 }
6515
6516 return 0;
6517}
6518
6519/**
6520 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
6521 * @vport: pointer to a host virtual N_Port data structure.
6522 *
6523 * This routine issues Address Discover (ADISC) ELS commands to those
6524 * N_Ports which are in node port recovery state and ADISC has not been issued
6525 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
6526 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
6527 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
6528 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
6529 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
6530 * IOCBs quit for later pick up. On the other hand, after walking through
6531 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
6532 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
6533 * no more ADISC need to be sent.
6534 *
6535 * Return code
6536 * The number of N_Ports with adisc issued.
6537 **/
6538int
6539lpfc_els_disc_adisc(struct lpfc_vport *vport)
6540{
6541 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6542 struct lpfc_nodelist *ndlp, *next_ndlp;
6543 int sentadisc = 0;
6544
6545 /* go thru NPR nodes and issue any remaining ELS ADISCs */
6546 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
6547
6548 if (ndlp->nlp_state != NLP_STE_NPR_NODE ||
6549 !(ndlp->nlp_flag & NLP_NPR_ADISC))
6550 continue;
6551
6552 spin_lock_irq(lock: &ndlp->lock);
6553 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
6554 spin_unlock_irq(lock: &ndlp->lock);
6555
6556 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
6557 /* This node was marked for ADISC but was not picked
6558 * for discovery. This is possible if the node was
6559 * missing in gidft response.
6560 *
6561 * At time of marking node for ADISC, we skipped unreg
6562 * from backend
6563 */
6564 lpfc_nlp_unreg_node(vport, ndlp);
6565 lpfc_unreg_rpi(vport, ndlp);
6566 continue;
6567 }
6568
6569 ndlp->nlp_prev_state = ndlp->nlp_state;
6570 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
6571 lpfc_issue_els_adisc(vport, ndlp, retry: 0);
6572 sentadisc++;
6573 vport->num_disc_nodes++;
6574 if (vport->num_disc_nodes >=
6575 vport->cfg_discovery_threads) {
6576 spin_lock_irq(lock: shost->host_lock);
6577 vport->fc_flag |= FC_NLP_MORE;
6578 spin_unlock_irq(lock: shost->host_lock);
6579 break;
6580 }
6581
6582 }
6583 if (sentadisc == 0) {
6584 spin_lock_irq(lock: shost->host_lock);
6585 vport->fc_flag &= ~FC_NLP_MORE;
6586 spin_unlock_irq(lock: shost->host_lock);
6587 }
6588 return sentadisc;
6589}
6590
6591/**
6592 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
6593 * @vport: pointer to a host virtual N_Port data structure.
6594 *
6595 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
6596 * which are in node port recovery state, with a @vport. Each time an ELS
6597 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
6598 * the per @vport number of discover count (num_disc_nodes) shall be
6599 * incremented. If the num_disc_nodes reaches a pre-configured threshold
6600 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
6601 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
6602 * later pick up. On the other hand, after walking through all the ndlps with
6603 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
6604 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
6605 * PLOGI need to be sent.
6606 *
6607 * Return code
6608 * The number of N_Ports with plogi issued.
6609 **/
6610int
6611lpfc_els_disc_plogi(struct lpfc_vport *vport)
6612{
6613 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6614 struct lpfc_nodelist *ndlp, *next_ndlp;
6615 int sentplogi = 0;
6616
6617 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
6618 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
6619 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
6620 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
6621 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
6622 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
6623 ndlp->nlp_prev_state = ndlp->nlp_state;
6624 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6625 lpfc_issue_els_plogi(vport, did: ndlp->nlp_DID, retry: 0);
6626 sentplogi++;
6627 vport->num_disc_nodes++;
6628 if (vport->num_disc_nodes >=
6629 vport->cfg_discovery_threads) {
6630 spin_lock_irq(lock: shost->host_lock);
6631 vport->fc_flag |= FC_NLP_MORE;
6632 spin_unlock_irq(lock: shost->host_lock);
6633 break;
6634 }
6635 }
6636 }
6637
6638 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6639 "6452 Discover PLOGI %d flag x%x\n",
6640 sentplogi, vport->fc_flag);
6641
6642 if (sentplogi) {
6643 lpfc_set_disctmo(vport);
6644 }
6645 else {
6646 spin_lock_irq(lock: shost->host_lock);
6647 vport->fc_flag &= ~FC_NLP_MORE;
6648 spin_unlock_irq(lock: shost->host_lock);
6649 }
6650 return sentplogi;
6651}
6652
6653static uint32_t
6654lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
6655 uint32_t word0)
6656{
6657
6658 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
6659 desc->payload.els_req = word0;
6660 desc->length = cpu_to_be32(sizeof(desc->payload));
6661
6662 return sizeof(struct fc_rdp_link_service_desc);
6663}
6664
6665static uint32_t
6666lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
6667 uint8_t *page_a0, uint8_t *page_a2)
6668{
6669 uint16_t wavelength;
6670 uint16_t temperature;
6671 uint16_t rx_power;
6672 uint16_t tx_bias;
6673 uint16_t tx_power;
6674 uint16_t vcc;
6675 uint16_t flag = 0;
6676 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
6677 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
6678
6679 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
6680
6681 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
6682 &page_a0[SSF_TRANSCEIVER_CODE_B4];
6683 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
6684 &page_a0[SSF_TRANSCEIVER_CODE_B5];
6685
6686 if ((trasn_code_byte4->fc_sw_laser) ||
6687 (trasn_code_byte5->fc_sw_laser_sl) ||
6688 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */
6689 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
6690 } else if (trasn_code_byte4->fc_lw_laser) {
6691 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
6692 page_a0[SSF_WAVELENGTH_B0];
6693 if (wavelength == SFP_WAVELENGTH_LC1310)
6694 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
6695 if (wavelength == SFP_WAVELENGTH_LL1550)
6696 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
6697 }
6698 /* check if its SFP+ */
6699 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
6700 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
6701 << SFP_FLAG_CT_SHIFT;
6702
6703 /* check if its OPTICAL */
6704 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
6705 SFP_FLAG_IS_OPTICAL_PORT : 0)
6706 << SFP_FLAG_IS_OPTICAL_SHIFT;
6707
6708 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
6709 page_a2[SFF_TEMPERATURE_B0]);
6710 vcc = (page_a2[SFF_VCC_B1] << 8 |
6711 page_a2[SFF_VCC_B0]);
6712 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
6713 page_a2[SFF_TXPOWER_B0]);
6714 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
6715 page_a2[SFF_TX_BIAS_CURRENT_B0]);
6716 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
6717 page_a2[SFF_RXPOWER_B0]);
6718 desc->sfp_info.temperature = cpu_to_be16(temperature);
6719 desc->sfp_info.rx_power = cpu_to_be16(rx_power);
6720 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
6721 desc->sfp_info.tx_power = cpu_to_be16(tx_power);
6722 desc->sfp_info.vcc = cpu_to_be16(vcc);
6723
6724 desc->sfp_info.flags = cpu_to_be16(flag);
6725 desc->length = cpu_to_be32(sizeof(desc->sfp_info));
6726
6727 return sizeof(struct fc_rdp_sfp_desc);
6728}
6729
6730static uint32_t
6731lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
6732 READ_LNK_VAR *stat)
6733{
6734 uint32_t type;
6735
6736 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
6737
6738 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
6739
6740 desc->info.port_type = cpu_to_be32(type);
6741
6742 desc->info.link_status.link_failure_cnt =
6743 cpu_to_be32(stat->linkFailureCnt);
6744 desc->info.link_status.loss_of_synch_cnt =
6745 cpu_to_be32(stat->lossSyncCnt);
6746 desc->info.link_status.loss_of_signal_cnt =
6747 cpu_to_be32(stat->lossSignalCnt);
6748 desc->info.link_status.primitive_seq_proto_err =
6749 cpu_to_be32(stat->primSeqErrCnt);
6750 desc->info.link_status.invalid_trans_word =
6751 cpu_to_be32(stat->invalidXmitWord);
6752 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
6753
6754 desc->length = cpu_to_be32(sizeof(desc->info));
6755
6756 return sizeof(struct fc_rdp_link_error_status_desc);
6757}
6758
6759static uint32_t
6760lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
6761 struct lpfc_vport *vport)
6762{
6763 uint32_t bbCredit;
6764
6765 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
6766
6767 bbCredit = vport->fc_sparam.cmn.bbCreditLsb |
6768 (vport->fc_sparam.cmn.bbCreditMsb << 8);
6769 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit);
6770 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
6771 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb |
6772 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8);
6773 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit);
6774 } else {
6775 desc->bbc_info.attached_port_bbc = 0;
6776 }
6777
6778 desc->bbc_info.rtt = 0;
6779 desc->length = cpu_to_be32(sizeof(desc->bbc_info));
6780
6781 return sizeof(struct fc_rdp_bbc_desc);
6782}
6783
6784static uint32_t
6785lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
6786 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
6787{
6788 uint32_t flags = 0;
6789
6790 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6791
6792 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM];
6793 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM];
6794 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING];
6795 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING];
6796
6797 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
6798 flags |= RDP_OET_HIGH_ALARM;
6799 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
6800 flags |= RDP_OET_LOW_ALARM;
6801 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
6802 flags |= RDP_OET_HIGH_WARNING;
6803 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
6804 flags |= RDP_OET_LOW_WARNING;
6805
6806 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
6807 desc->oed_info.function_flags = cpu_to_be32(flags);
6808 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6809 return sizeof(struct fc_rdp_oed_sfp_desc);
6810}
6811
6812static uint32_t
6813lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
6814 struct fc_rdp_oed_sfp_desc *desc,
6815 uint8_t *page_a2)
6816{
6817 uint32_t flags = 0;
6818
6819 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6820
6821 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM];
6822 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM];
6823 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING];
6824 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING];
6825
6826 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
6827 flags |= RDP_OET_HIGH_ALARM;
6828 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE)
6829 flags |= RDP_OET_LOW_ALARM;
6830 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
6831 flags |= RDP_OET_HIGH_WARNING;
6832 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE)
6833 flags |= RDP_OET_LOW_WARNING;
6834
6835 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
6836 desc->oed_info.function_flags = cpu_to_be32(flags);
6837 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6838 return sizeof(struct fc_rdp_oed_sfp_desc);
6839}
6840
6841static uint32_t
6842lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
6843 struct fc_rdp_oed_sfp_desc *desc,
6844 uint8_t *page_a2)
6845{
6846 uint32_t flags = 0;
6847
6848 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6849
6850 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM];
6851 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM];
6852 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING];
6853 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING];
6854
6855 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS)
6856 flags |= RDP_OET_HIGH_ALARM;
6857 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS)
6858 flags |= RDP_OET_LOW_ALARM;
6859 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS)
6860 flags |= RDP_OET_HIGH_WARNING;
6861 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS)
6862 flags |= RDP_OET_LOW_WARNING;
6863
6864 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
6865 desc->oed_info.function_flags = cpu_to_be32(flags);
6866 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6867 return sizeof(struct fc_rdp_oed_sfp_desc);
6868}
6869
6870static uint32_t
6871lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
6872 struct fc_rdp_oed_sfp_desc *desc,
6873 uint8_t *page_a2)
6874{
6875 uint32_t flags = 0;
6876
6877 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6878
6879 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM];
6880 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM];
6881 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING];
6882 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING];
6883
6884 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER)
6885 flags |= RDP_OET_HIGH_ALARM;
6886 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER)
6887 flags |= RDP_OET_LOW_ALARM;
6888 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER)
6889 flags |= RDP_OET_HIGH_WARNING;
6890 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER)
6891 flags |= RDP_OET_LOW_WARNING;
6892
6893 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
6894 desc->oed_info.function_flags = cpu_to_be32(flags);
6895 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6896 return sizeof(struct fc_rdp_oed_sfp_desc);
6897}
6898
6899
6900static uint32_t
6901lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
6902 struct fc_rdp_oed_sfp_desc *desc,
6903 uint8_t *page_a2)
6904{
6905 uint32_t flags = 0;
6906
6907 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6908
6909 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM];
6910 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM];
6911 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING];
6912 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING];
6913
6914 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER)
6915 flags |= RDP_OET_HIGH_ALARM;
6916 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER)
6917 flags |= RDP_OET_LOW_ALARM;
6918 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER)
6919 flags |= RDP_OET_HIGH_WARNING;
6920 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER)
6921 flags |= RDP_OET_LOW_WARNING;
6922
6923 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
6924 desc->oed_info.function_flags = cpu_to_be32(flags);
6925 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6926 return sizeof(struct fc_rdp_oed_sfp_desc);
6927}
6928
6929static uint32_t
6930lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
6931 uint8_t *page_a0, struct lpfc_vport *vport)
6932{
6933 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
6934 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
6935 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
6936 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
6937 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4);
6938 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
6939 desc->length = cpu_to_be32(sizeof(desc->opd_info));
6940 return sizeof(struct fc_rdp_opd_sfp_desc);
6941}
6942
6943static uint32_t
6944lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
6945{
6946 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
6947 return 0;
6948 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG);
6949
6950 desc->info.CorrectedBlocks =
6951 cpu_to_be32(stat->fecCorrBlkCount);
6952 desc->info.UncorrectableBlocks =
6953 cpu_to_be32(stat->fecUncorrBlkCount);
6954
6955 desc->length = cpu_to_be32(sizeof(desc->info));
6956
6957 return sizeof(struct fc_fec_rdp_desc);
6958}
6959
6960static uint32_t
6961lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
6962{
6963 uint16_t rdp_cap = 0;
6964 uint16_t rdp_speed;
6965
6966 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
6967
6968 switch (phba->fc_linkspeed) {
6969 case LPFC_LINK_SPEED_1GHZ:
6970 rdp_speed = RDP_PS_1GB;
6971 break;
6972 case LPFC_LINK_SPEED_2GHZ:
6973 rdp_speed = RDP_PS_2GB;
6974 break;
6975 case LPFC_LINK_SPEED_4GHZ:
6976 rdp_speed = RDP_PS_4GB;
6977 break;
6978 case LPFC_LINK_SPEED_8GHZ:
6979 rdp_speed = RDP_PS_8GB;
6980 break;
6981 case LPFC_LINK_SPEED_10GHZ:
6982 rdp_speed = RDP_PS_10GB;
6983 break;
6984 case LPFC_LINK_SPEED_16GHZ:
6985 rdp_speed = RDP_PS_16GB;
6986 break;
6987 case LPFC_LINK_SPEED_32GHZ:
6988 rdp_speed = RDP_PS_32GB;
6989 break;
6990 case LPFC_LINK_SPEED_64GHZ:
6991 rdp_speed = RDP_PS_64GB;
6992 break;
6993 case LPFC_LINK_SPEED_128GHZ:
6994 rdp_speed = RDP_PS_128GB;
6995 break;
6996 case LPFC_LINK_SPEED_256GHZ:
6997 rdp_speed = RDP_PS_256GB;
6998 break;
6999 default:
7000 rdp_speed = RDP_PS_UNKNOWN;
7001 break;
7002 }
7003
7004 desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
7005
7006 if (phba->lmt & LMT_256Gb)
7007 rdp_cap |= RDP_PS_256GB;
7008 if (phba->lmt & LMT_128Gb)
7009 rdp_cap |= RDP_PS_128GB;
7010 if (phba->lmt & LMT_64Gb)
7011 rdp_cap |= RDP_PS_64GB;
7012 if (phba->lmt & LMT_32Gb)
7013 rdp_cap |= RDP_PS_32GB;
7014 if (phba->lmt & LMT_16Gb)
7015 rdp_cap |= RDP_PS_16GB;
7016 if (phba->lmt & LMT_10Gb)
7017 rdp_cap |= RDP_PS_10GB;
7018 if (phba->lmt & LMT_8Gb)
7019 rdp_cap |= RDP_PS_8GB;
7020 if (phba->lmt & LMT_4Gb)
7021 rdp_cap |= RDP_PS_4GB;
7022 if (phba->lmt & LMT_2Gb)
7023 rdp_cap |= RDP_PS_2GB;
7024 if (phba->lmt & LMT_1Gb)
7025 rdp_cap |= RDP_PS_1GB;
7026
7027 if (rdp_cap == 0)
7028 rdp_cap = RDP_CAP_UNKNOWN;
7029 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
7030 rdp_cap |= RDP_CAP_USER_CONFIGURED;
7031
7032 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
7033 desc->length = cpu_to_be32(sizeof(desc->info));
7034 return sizeof(struct fc_rdp_port_speed_desc);
7035}
7036
7037static uint32_t
7038lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
7039 struct lpfc_vport *vport)
7040{
7041
7042 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
7043
7044 memcpy(desc->port_names.wwnn, &vport->fc_nodename,
7045 sizeof(desc->port_names.wwnn));
7046
7047 memcpy(desc->port_names.wwpn, &vport->fc_portname,
7048 sizeof(desc->port_names.wwpn));
7049
7050 desc->length = cpu_to_be32(sizeof(desc->port_names));
7051 return sizeof(struct fc_rdp_port_name_desc);
7052}
7053
7054static uint32_t
7055lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
7056 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
7057{
7058
7059 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
7060 if (vport->fc_flag & FC_FABRIC) {
7061 memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
7062 sizeof(desc->port_names.wwnn));
7063
7064 memcpy(desc->port_names.wwpn, &vport->fabric_portname,
7065 sizeof(desc->port_names.wwpn));
7066 } else { /* Point to Point */
7067 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
7068 sizeof(desc->port_names.wwnn));
7069
7070 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname,
7071 sizeof(desc->port_names.wwpn));
7072 }
7073
7074 desc->length = cpu_to_be32(sizeof(desc->port_names));
7075 return sizeof(struct fc_rdp_port_name_desc);
7076}
7077
7078static void
7079lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
7080 int status)
7081{
7082 struct lpfc_nodelist *ndlp = rdp_context->ndlp;
7083 struct lpfc_vport *vport = ndlp->vport;
7084 struct lpfc_iocbq *elsiocb;
7085 struct ulp_bde64 *bpl;
7086 IOCB_t *icmd;
7087 union lpfc_wqe128 *wqe;
7088 uint8_t *pcmd;
7089 struct ls_rjt *stat;
7090 struct fc_rdp_res_frame *rdp_res;
7091 uint32_t cmdsize, len;
7092 uint16_t *flag_ptr;
7093 int rc;
7094 u32 ulp_context;
7095
7096 if (status != SUCCESS)
7097 goto error;
7098
7099 /* This will change once we know the true size of the RDP payload */
7100 cmdsize = sizeof(struct fc_rdp_res_frame);
7101
7102 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 0, cmd_size: cmdsize,
7103 retry: lpfc_max_els_tries, ndlp: rdp_context->ndlp,
7104 did: rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
7105 if (!elsiocb)
7106 goto free_rdp_context;
7107
7108 ulp_context = get_job_ulpcontext(phba, iocbq: elsiocb);
7109 if (phba->sli_rev == LPFC_SLI_REV4) {
7110 wqe = &elsiocb->wqe;
7111 /* ox-id of the frame */
7112 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7113 rdp_context->ox_id);
7114 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
7115 rdp_context->rx_id);
7116 } else {
7117 icmd = &elsiocb->iocb;
7118 icmd->ulpContext = rdp_context->rx_id;
7119 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
7120 }
7121
7122 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7123 "2171 Xmit RDP response tag x%x xri x%x, "
7124 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
7125 elsiocb->iotag, ulp_context,
7126 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7127 ndlp->nlp_rpi);
7128 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt;
7129 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7130 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
7131 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7132
7133 /* Update Alarm and Warning */
7134 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS);
7135 phba->sfp_alarm |= *flag_ptr;
7136 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS);
7137 phba->sfp_warning |= *flag_ptr;
7138
7139 /* For RDP payload */
7140 len = 8;
7141 len += lpfc_rdp_res_link_service(desc: (struct fc_rdp_link_service_desc *)
7142 (len + pcmd), ELS_CMD_RDP);
7143
7144 len += lpfc_rdp_res_sfp_desc(desc: (struct fc_rdp_sfp_desc *)(len + pcmd),
7145 page_a0: rdp_context->page_a0, page_a2: rdp_context->page_a2);
7146 len += lpfc_rdp_res_speed(desc: (struct fc_rdp_port_speed_desc *)(len + pcmd),
7147 phba);
7148 len += lpfc_rdp_res_link_error(desc: (struct fc_rdp_link_error_status_desc *)
7149 (len + pcmd), stat: &rdp_context->link_stat);
7150 len += lpfc_rdp_res_diag_port_names(desc: (struct fc_rdp_port_name_desc *)
7151 (len + pcmd), vport);
7152 len += lpfc_rdp_res_attach_port_names(desc: (struct fc_rdp_port_name_desc *)
7153 (len + pcmd), vport, ndlp);
7154 len += lpfc_rdp_res_fec_desc(desc: (struct fc_fec_rdp_desc *)(len + pcmd),
7155 stat: &rdp_context->link_stat);
7156 len += lpfc_rdp_res_bbc_desc(desc: (struct fc_rdp_bbc_desc *)(len + pcmd),
7157 stat: &rdp_context->link_stat, vport);
7158 len += lpfc_rdp_res_oed_temp_desc(phba,
7159 desc: (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7160 page_a2: rdp_context->page_a2);
7161 len += lpfc_rdp_res_oed_voltage_desc(phba,
7162 desc: (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7163 page_a2: rdp_context->page_a2);
7164 len += lpfc_rdp_res_oed_txbias_desc(phba,
7165 desc: (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7166 page_a2: rdp_context->page_a2);
7167 len += lpfc_rdp_res_oed_txpower_desc(phba,
7168 desc: (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7169 page_a2: rdp_context->page_a2);
7170 len += lpfc_rdp_res_oed_rxpower_desc(phba,
7171 desc: (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7172 page_a2: rdp_context->page_a2);
7173 len += lpfc_rdp_res_opd_desc(desc: (struct fc_rdp_opd_sfp_desc *)(len + pcmd),
7174 page_a0: rdp_context->page_a0, vport);
7175
7176 rdp_res->length = cpu_to_be32(len - 8);
7177 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7178
7179 /* Now that we know the true size of the payload, update the BPL */
7180 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt;
7181 bpl->tus.f.bdeSize = len;
7182 bpl->tus.f.bdeFlags = 0;
7183 bpl->tus.w = le32_to_cpu(bpl->tus.w);
7184
7185 phba->fc_stat.elsXmitACC++;
7186 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7187 if (!elsiocb->ndlp) {
7188 lpfc_els_free_iocb(phba, elsiocb);
7189 goto free_rdp_context;
7190 }
7191
7192 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7193 if (rc == IOCB_ERROR) {
7194 lpfc_els_free_iocb(phba, elsiocb);
7195 lpfc_nlp_put(ndlp);
7196 }
7197
7198 goto free_rdp_context;
7199
7200error:
7201 cmdsize = 2 * sizeof(uint32_t);
7202 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 0, cmd_size: cmdsize, retry: lpfc_max_els_tries,
7203 ndlp, did: ndlp->nlp_DID, ELS_CMD_LS_RJT);
7204 if (!elsiocb)
7205 goto free_rdp_context;
7206
7207 if (phba->sli_rev == LPFC_SLI_REV4) {
7208 wqe = &elsiocb->wqe;
7209 /* ox-id of the frame */
7210 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7211 rdp_context->ox_id);
7212 bf_set(wqe_ctxt_tag,
7213 &wqe->xmit_els_rsp.wqe_com,
7214 rdp_context->rx_id);
7215 } else {
7216 icmd = &elsiocb->iocb;
7217 icmd->ulpContext = rdp_context->rx_id;
7218 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
7219 }
7220
7221 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7222
7223 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
7224 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
7225 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7226
7227 phba->fc_stat.elsXmitLSRJT++;
7228 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7229 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7230 if (!elsiocb->ndlp) {
7231 lpfc_els_free_iocb(phba, elsiocb);
7232 goto free_rdp_context;
7233 }
7234
7235 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7236 if (rc == IOCB_ERROR) {
7237 lpfc_els_free_iocb(phba, elsiocb);
7238 lpfc_nlp_put(ndlp);
7239 }
7240
7241free_rdp_context:
7242 /* This reference put is for the original unsolicited RDP. If the
7243 * prep failed, there is no reference to remove.
7244 */
7245 lpfc_nlp_put(ndlp);
7246 kfree(objp: rdp_context);
7247}
7248
7249static int
7250lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
7251{
7252 LPFC_MBOXQ_t *mbox = NULL;
7253 int rc;
7254
7255 mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL);
7256 if (!mbox) {
7257 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
7258 "7105 failed to allocate mailbox memory");
7259 return 1;
7260 }
7261
7262 if (lpfc_sli4_dump_page_a0(phba, mbox))
7263 goto rdp_fail;
7264 mbox->vport = rdp_context->ndlp->vport;
7265 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
7266 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
7267 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7268 if (rc == MBX_NOT_FINISHED) {
7269 lpfc_mbox_rsrc_cleanup(phba, mbox, locked: MBOX_THD_UNLOCKED);
7270 return 1;
7271 }
7272
7273 return 0;
7274
7275rdp_fail:
7276 mempool_free(element: mbox, pool: phba->mbox_mem_pool);
7277 return 1;
7278}
7279
7280int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
7281 struct lpfc_rdp_context *rdp_context)
7282{
7283 LPFC_MBOXQ_t *mbox = NULL;
7284 int rc;
7285 struct lpfc_dmabuf *mp;
7286 struct lpfc_dmabuf *mpsave;
7287 void *virt;
7288 MAILBOX_t *mb;
7289
7290 mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL);
7291 if (!mbox) {
7292 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
7293 "7205 failed to allocate mailbox memory");
7294 return 1;
7295 }
7296
7297 if (lpfc_sli4_dump_page_a0(phba, mbox))
7298 goto sfp_fail;
7299 mp = mbox->ctx_buf;
7300 mpsave = mp;
7301 virt = mp->virt;
7302 if (phba->sli_rev < LPFC_SLI_REV4) {
7303 mb = &mbox->u.mb;
7304 mb->un.varDmp.cv = 1;
7305 mb->un.varDmp.co = 1;
7306 mb->un.varWords[2] = 0;
7307 mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4;
7308 mb->un.varWords[4] = 0;
7309 mb->un.varWords[5] = 0;
7310 mb->un.varWords[6] = 0;
7311 mb->un.varWords[7] = 0;
7312 mb->un.varWords[8] = 0;
7313 mb->un.varWords[9] = 0;
7314 mb->un.varWords[10] = 0;
7315 mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE;
7316 mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE;
7317 mbox->mbox_offset_word = 5;
7318 mbox->ctx_buf = virt;
7319 } else {
7320 bf_set(lpfc_mbx_memory_dump_type3_length,
7321 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
7322 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
7323 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
7324 }
7325 mbox->vport = phba->pport;
7326 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
7327
7328 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
7329 if (rc == MBX_NOT_FINISHED) {
7330 rc = 1;
7331 goto error;
7332 }
7333
7334 if (phba->sli_rev == LPFC_SLI_REV4)
7335 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
7336 else
7337 mp = mpsave;
7338
7339 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) {
7340 rc = 1;
7341 goto error;
7342 }
7343
7344 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0,
7345 DMP_SFF_PAGE_A0_SIZE);
7346
7347 memset(mbox, 0, sizeof(*mbox));
7348 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE);
7349 INIT_LIST_HEAD(list: &mp->list);
7350
7351 /* save address for completion */
7352 mbox->ctx_buf = mp;
7353 mbox->vport = phba->pport;
7354
7355 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
7356 bf_set(lpfc_mbx_memory_dump_type3_type,
7357 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
7358 bf_set(lpfc_mbx_memory_dump_type3_link,
7359 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
7360 bf_set(lpfc_mbx_memory_dump_type3_page_no,
7361 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2);
7362 if (phba->sli_rev < LPFC_SLI_REV4) {
7363 mb = &mbox->u.mb;
7364 mb->un.varDmp.cv = 1;
7365 mb->un.varDmp.co = 1;
7366 mb->un.varWords[2] = 0;
7367 mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4;
7368 mb->un.varWords[4] = 0;
7369 mb->un.varWords[5] = 0;
7370 mb->un.varWords[6] = 0;
7371 mb->un.varWords[7] = 0;
7372 mb->un.varWords[8] = 0;
7373 mb->un.varWords[9] = 0;
7374 mb->un.varWords[10] = 0;
7375 mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE;
7376 mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE;
7377 mbox->mbox_offset_word = 5;
7378 mbox->ctx_buf = virt;
7379 } else {
7380 bf_set(lpfc_mbx_memory_dump_type3_length,
7381 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE);
7382 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
7383 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
7384 }
7385
7386 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
7387 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
7388 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) {
7389 rc = 1;
7390 goto error;
7391 }
7392 rc = 0;
7393
7394 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
7395 DMP_SFF_PAGE_A2_SIZE);
7396
7397error:
7398 mbox->ctx_buf = mpsave;
7399 lpfc_mbox_rsrc_cleanup(phba, mbox, locked: MBOX_THD_UNLOCKED);
7400
7401 return rc;
7402
7403sfp_fail:
7404 mempool_free(element: mbox, pool: phba->mbox_mem_pool);
7405 return 1;
7406}
7407
7408/*
7409 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
7410 * @vport: pointer to a host virtual N_Port data structure.
7411 * @cmdiocb: pointer to lpfc command iocb data structure.
7412 * @ndlp: pointer to a node-list data structure.
7413 *
7414 * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
7415 * IOCB. First, the payload of the unsolicited RDP is checked.
7416 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
7417 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
7418 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
7419 * gather all data and send RDP response.
7420 *
7421 * Return code
7422 * 0 - Sent the acc response
7423 * 1 - Sent the reject response.
7424 */
7425static int
7426lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7427 struct lpfc_nodelist *ndlp)
7428{
7429 struct lpfc_hba *phba = vport->phba;
7430 struct lpfc_dmabuf *pcmd;
7431 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
7432 struct fc_rdp_req_frame *rdp_req;
7433 struct lpfc_rdp_context *rdp_context;
7434 union lpfc_wqe128 *cmd = NULL;
7435 struct ls_rjt stat;
7436
7437 if (phba->sli_rev < LPFC_SLI_REV4 ||
7438 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
7439 LPFC_SLI_INTF_IF_TYPE_2) {
7440 rjt_err = LSRJT_UNABLE_TPC;
7441 rjt_expl = LSEXP_REQ_UNSUPPORTED;
7442 goto error;
7443 }
7444
7445 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) {
7446 rjt_err = LSRJT_UNABLE_TPC;
7447 rjt_expl = LSEXP_REQ_UNSUPPORTED;
7448 goto error;
7449 }
7450
7451 pcmd = cmdiocb->cmd_dmabuf;
7452 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
7453
7454 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7455 "2422 ELS RDP Request "
7456 "dec len %d tag x%x port_id %d len %d\n",
7457 be32_to_cpu(rdp_req->rdp_des_length),
7458 be32_to_cpu(rdp_req->nport_id_desc.tag),
7459 be32_to_cpu(rdp_req->nport_id_desc.nport_id),
7460 be32_to_cpu(rdp_req->nport_id_desc.length));
7461
7462 if (sizeof(struct fc_rdp_nport_desc) !=
7463 be32_to_cpu(rdp_req->rdp_des_length))
7464 goto rjt_logerr;
7465 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag))
7466 goto rjt_logerr;
7467 if (RDP_NPORT_ID_SIZE !=
7468 be32_to_cpu(rdp_req->nport_id_desc.length))
7469 goto rjt_logerr;
7470 rdp_context = kzalloc(size: sizeof(struct lpfc_rdp_context), GFP_KERNEL);
7471 if (!rdp_context) {
7472 rjt_err = LSRJT_UNABLE_TPC;
7473 goto error;
7474 }
7475
7476 cmd = &cmdiocb->wqe;
7477 rdp_context->ndlp = lpfc_nlp_get(ndlp);
7478 if (!rdp_context->ndlp) {
7479 kfree(objp: rdp_context);
7480 rjt_err = LSRJT_UNABLE_TPC;
7481 goto error;
7482 }
7483 rdp_context->ox_id = bf_get(wqe_rcvoxid,
7484 &cmd->xmit_els_rsp.wqe_com);
7485 rdp_context->rx_id = bf_get(wqe_ctxt_tag,
7486 &cmd->xmit_els_rsp.wqe_com);
7487 rdp_context->cmpl = lpfc_els_rdp_cmpl;
7488 if (lpfc_get_rdp_info(phba, rdp_context)) {
7489 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
7490 "2423 Unable to send mailbox");
7491 kfree(objp: rdp_context);
7492 rjt_err = LSRJT_UNABLE_TPC;
7493 lpfc_nlp_put(ndlp);
7494 goto error;
7495 }
7496
7497 return 0;
7498
7499rjt_logerr:
7500 rjt_err = LSRJT_LOGICAL_ERR;
7501
7502error:
7503 memset(&stat, 0, sizeof(stat));
7504 stat.un.b.lsRjtRsnCode = rjt_err;
7505 stat.un.b.lsRjtRsnCodeExp = rjt_expl;
7506 lpfc_els_rsp_reject(vport, rejectError: stat.un.lsRjtError, oldiocb: cmdiocb, ndlp, NULL);
7507 return 1;
7508}
7509
7510
7511static void
7512lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7513{
7514 MAILBOX_t *mb;
7515 IOCB_t *icmd;
7516 union lpfc_wqe128 *wqe;
7517 uint8_t *pcmd;
7518 struct lpfc_iocbq *elsiocb;
7519 struct lpfc_nodelist *ndlp;
7520 struct ls_rjt *stat;
7521 union lpfc_sli4_cfg_shdr *shdr;
7522 struct lpfc_lcb_context *lcb_context;
7523 struct fc_lcb_res_frame *lcb_res;
7524 uint32_t cmdsize, shdr_status, shdr_add_status;
7525 int rc;
7526
7527 mb = &pmb->u.mb;
7528 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp;
7529 ndlp = lcb_context->ndlp;
7530 pmb->ctx_ndlp = NULL;
7531 pmb->ctx_buf = NULL;
7532
7533 shdr = (union lpfc_sli4_cfg_shdr *)
7534 &pmb->u.mqe.un.beacon_config.header.cfg_shdr;
7535 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7536 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7537
7538 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX,
7539 "0194 SET_BEACON_CONFIG mailbox "
7540 "completed with status x%x add_status x%x,"
7541 " mbx status x%x\n",
7542 shdr_status, shdr_add_status, mb->mbxStatus);
7543
7544 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status ||
7545 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) ||
7546 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) {
7547 mempool_free(element: pmb, pool: phba->mbox_mem_pool);
7548 goto error;
7549 }
7550
7551 mempool_free(element: pmb, pool: phba->mbox_mem_pool);
7552 cmdsize = sizeof(struct fc_lcb_res_frame);
7553 elsiocb = lpfc_prep_els_iocb(vport: phba->pport, expect_rsp: 0, cmd_size: cmdsize,
7554 retry: lpfc_max_els_tries, ndlp,
7555 did: ndlp->nlp_DID, ELS_CMD_ACC);
7556
7557 /* Decrement the ndlp reference count from previous mbox command */
7558 lpfc_nlp_put(ndlp);
7559
7560 if (!elsiocb)
7561 goto free_lcb_context;
7562
7563 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt;
7564
7565 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
7566
7567 if (phba->sli_rev == LPFC_SLI_REV4) {
7568 wqe = &elsiocb->wqe;
7569 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
7570 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7571 lcb_context->ox_id);
7572 } else {
7573 icmd = &elsiocb->iocb;
7574 icmd->ulpContext = lcb_context->rx_id;
7575 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
7576 }
7577
7578 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7579 *((uint32_t *)(pcmd)) = ELS_CMD_ACC;
7580 lcb_res->lcb_sub_command = lcb_context->sub_command;
7581 lcb_res->lcb_type = lcb_context->type;
7582 lcb_res->capability = lcb_context->capability;
7583 lcb_res->lcb_frequency = lcb_context->frequency;
7584 lcb_res->lcb_duration = lcb_context->duration;
7585 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7586 phba->fc_stat.elsXmitACC++;
7587
7588 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7589 if (!elsiocb->ndlp) {
7590 lpfc_els_free_iocb(phba, elsiocb);
7591 goto out;
7592 }
7593
7594 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7595 if (rc == IOCB_ERROR) {
7596 lpfc_els_free_iocb(phba, elsiocb);
7597 lpfc_nlp_put(ndlp);
7598 }
7599 out:
7600 kfree(objp: lcb_context);
7601 return;
7602
7603error:
7604 cmdsize = sizeof(struct fc_lcb_res_frame);
7605 elsiocb = lpfc_prep_els_iocb(vport: phba->pport, expect_rsp: 0, cmd_size: cmdsize,
7606 retry: lpfc_max_els_tries, ndlp,
7607 did: ndlp->nlp_DID, ELS_CMD_LS_RJT);
7608 lpfc_nlp_put(ndlp);
7609 if (!elsiocb)
7610 goto free_lcb_context;
7611
7612 if (phba->sli_rev == LPFC_SLI_REV4) {
7613 wqe = &elsiocb->wqe;
7614 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
7615 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7616 lcb_context->ox_id);
7617 } else {
7618 icmd = &elsiocb->iocb;
7619 icmd->ulpContext = lcb_context->rx_id;
7620 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
7621 }
7622
7623 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7624
7625 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
7626 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
7627 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7628
7629 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
7630 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
7631
7632 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7633 phba->fc_stat.elsXmitLSRJT++;
7634 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7635 if (!elsiocb->ndlp) {
7636 lpfc_els_free_iocb(phba, elsiocb);
7637 goto free_lcb_context;
7638 }
7639
7640 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7641 if (rc == IOCB_ERROR) {
7642 lpfc_els_free_iocb(phba, elsiocb);
7643 lpfc_nlp_put(ndlp);
7644 }
7645free_lcb_context:
7646 kfree(objp: lcb_context);
7647}
7648
7649static int
7650lpfc_sli4_set_beacon(struct lpfc_vport *vport,
7651 struct lpfc_lcb_context *lcb_context,
7652 uint32_t beacon_state)
7653{
7654 struct lpfc_hba *phba = vport->phba;
7655 union lpfc_sli4_cfg_shdr *cfg_shdr;
7656 LPFC_MBOXQ_t *mbox = NULL;
7657 uint32_t len;
7658 int rc;
7659
7660 mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL);
7661 if (!mbox)
7662 return 1;
7663
7664 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
7665 len = sizeof(struct lpfc_mbx_set_beacon_config) -
7666 sizeof(struct lpfc_sli4_cfg_mhdr);
7667 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7668 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
7669 LPFC_SLI4_MBX_EMBED);
7670 mbox->ctx_ndlp = (void *)lcb_context;
7671 mbox->vport = phba->pport;
7672 mbox->mbox_cmpl = lpfc_els_lcb_rsp;
7673 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
7674 phba->sli4_hba.physical_port);
7675 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
7676 beacon_state);
7677 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */
7678
7679 /*
7680 * Check bv1s bit before issuing the mailbox
7681 * if bv1s == 1, LCB V1 supported
7682 * else, LCB V0 supported
7683 */
7684
7685 if (phba->sli4_hba.pc_sli4_params.bv1s) {
7686 /* COMMON_SET_BEACON_CONFIG_V1 */
7687 cfg_shdr->request.word9 = BEACON_VERSION_V1;
7688 lcb_context->capability |= LCB_CAPABILITY_DURATION;
7689 bf_set(lpfc_mbx_set_beacon_port_type,
7690 &mbox->u.mqe.un.beacon_config, 0);
7691 bf_set(lpfc_mbx_set_beacon_duration_v1,
7692 &mbox->u.mqe.un.beacon_config,
7693 be16_to_cpu(lcb_context->duration));
7694 } else {
7695 /* COMMON_SET_BEACON_CONFIG_V0 */
7696 if (be16_to_cpu(lcb_context->duration) != 0) {
7697 mempool_free(element: mbox, pool: phba->mbox_mem_pool);
7698 return 1;
7699 }
7700 cfg_shdr->request.word9 = BEACON_VERSION_V0;
7701 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION);
7702 bf_set(lpfc_mbx_set_beacon_state,
7703 &mbox->u.mqe.un.beacon_config, beacon_state);
7704 bf_set(lpfc_mbx_set_beacon_port_type,
7705 &mbox->u.mqe.un.beacon_config, 1);
7706 bf_set(lpfc_mbx_set_beacon_duration,
7707 &mbox->u.mqe.un.beacon_config,
7708 be16_to_cpu(lcb_context->duration));
7709 }
7710
7711 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7712 if (rc == MBX_NOT_FINISHED) {
7713 mempool_free(element: mbox, pool: phba->mbox_mem_pool);
7714 return 1;
7715 }
7716
7717 return 0;
7718}
7719
7720
7721/**
7722 * lpfc_els_rcv_lcb - Process an unsolicited LCB
7723 * @vport: pointer to a host virtual N_Port data structure.
7724 * @cmdiocb: pointer to lpfc command iocb data structure.
7725 * @ndlp: pointer to a node-list data structure.
7726 *
7727 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
7728 * First, the payload of the unsolicited LCB is checked.
7729 * Then based on Subcommand beacon will either turn on or off.
7730 *
7731 * Return code
7732 * 0 - Sent the acc response
7733 * 1 - Sent the reject response.
7734 **/
7735static int
7736lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7737 struct lpfc_nodelist *ndlp)
7738{
7739 struct lpfc_hba *phba = vport->phba;
7740 struct lpfc_dmabuf *pcmd;
7741 uint8_t *lp;
7742 struct fc_lcb_request_frame *beacon;
7743 struct lpfc_lcb_context *lcb_context;
7744 u8 state, rjt_err = 0;
7745 struct ls_rjt stat;
7746
7747 pcmd = cmdiocb->cmd_dmabuf;
7748 lp = (uint8_t *)pcmd->virt;
7749 beacon = (struct fc_lcb_request_frame *)pcmd->virt;
7750
7751 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7752 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
7753 "type x%x frequency %x duration x%x\n",
7754 lp[0], lp[1], lp[2],
7755 beacon->lcb_command,
7756 beacon->lcb_sub_command,
7757 beacon->lcb_type,
7758 beacon->lcb_frequency,
7759 be16_to_cpu(beacon->lcb_duration));
7760
7761 if (beacon->lcb_sub_command != LPFC_LCB_ON &&
7762 beacon->lcb_sub_command != LPFC_LCB_OFF) {
7763 rjt_err = LSRJT_CMD_UNSUPPORTED;
7764 goto rjt;
7765 }
7766
7767 if (phba->sli_rev < LPFC_SLI_REV4 ||
7768 phba->hba_flag & HBA_FCOE_MODE ||
7769 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
7770 LPFC_SLI_INTF_IF_TYPE_2)) {
7771 rjt_err = LSRJT_CMD_UNSUPPORTED;
7772 goto rjt;
7773 }
7774
7775 lcb_context = kmalloc(size: sizeof(*lcb_context), GFP_KERNEL);
7776 if (!lcb_context) {
7777 rjt_err = LSRJT_UNABLE_TPC;
7778 goto rjt;
7779 }
7780
7781 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
7782 lcb_context->sub_command = beacon->lcb_sub_command;
7783 lcb_context->capability = 0;
7784 lcb_context->type = beacon->lcb_type;
7785 lcb_context->frequency = beacon->lcb_frequency;
7786 lcb_context->duration = beacon->lcb_duration;
7787 lcb_context->ox_id = get_job_rcvoxid(phba, iocbq: cmdiocb);
7788 lcb_context->rx_id = get_job_ulpcontext(phba, iocbq: cmdiocb);
7789 lcb_context->ndlp = lpfc_nlp_get(ndlp);
7790 if (!lcb_context->ndlp) {
7791 rjt_err = LSRJT_UNABLE_TPC;
7792 goto rjt_free;
7793 }
7794
7795 if (lpfc_sli4_set_beacon(vport, lcb_context, beacon_state: state)) {
7796 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT,
7797 "0193 failed to send mail box");
7798 lpfc_nlp_put(ndlp);
7799 rjt_err = LSRJT_UNABLE_TPC;
7800 goto rjt_free;
7801 }
7802 return 0;
7803
7804rjt_free:
7805 kfree(objp: lcb_context);
7806rjt:
7807 memset(&stat, 0, sizeof(stat));
7808 stat.un.b.lsRjtRsnCode = rjt_err;
7809 lpfc_els_rsp_reject(vport, rejectError: stat.un.lsRjtError, oldiocb: cmdiocb, ndlp, NULL);
7810 return 1;
7811}
7812
7813
7814/**
7815 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
7816 * @vport: pointer to a host virtual N_Port data structure.
7817 *
7818 * This routine cleans up any Registration State Change Notification
7819 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
7820 * @vport together with the host_lock is used to prevent multiple thread
7821 * trying to access the RSCN array on a same @vport at the same time.
7822 **/
7823void
7824lpfc_els_flush_rscn(struct lpfc_vport *vport)
7825{
7826 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7827 struct lpfc_hba *phba = vport->phba;
7828 int i;
7829
7830 spin_lock_irq(lock: shost->host_lock);
7831 if (vport->fc_rscn_flush) {
7832 /* Another thread is walking fc_rscn_id_list on this vport */
7833 spin_unlock_irq(lock: shost->host_lock);
7834 return;
7835 }
7836 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
7837 vport->fc_rscn_flush = 1;
7838 spin_unlock_irq(lock: shost->host_lock);
7839
7840 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
7841 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
7842 vport->fc_rscn_id_list[i] = NULL;
7843 }
7844 spin_lock_irq(lock: shost->host_lock);
7845 vport->fc_rscn_id_cnt = 0;
7846 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
7847 spin_unlock_irq(lock: shost->host_lock);
7848 lpfc_can_disctmo(vport);
7849 /* Indicate we are done walking this fc_rscn_id_list */
7850 vport->fc_rscn_flush = 0;
7851}
7852
7853/**
7854 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
7855 * @vport: pointer to a host virtual N_Port data structure.
7856 * @did: remote destination port identifier.
7857 *
7858 * This routine checks whether there is any pending Registration State
7859 * Configuration Notification (RSCN) to a @did on @vport.
7860 *
7861 * Return code
7862 * None zero - The @did matched with a pending rscn
7863 * 0 - not able to match @did with a pending rscn
7864 **/
7865int
7866lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
7867{
7868 D_ID ns_did;
7869 D_ID rscn_did;
7870 uint32_t *lp;
7871 uint32_t payload_len, i;
7872 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7873
7874 ns_did.un.word = did;
7875
7876 /* Never match fabric nodes for RSCNs */
7877 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
7878 return 0;
7879
7880 /* If we are doing a FULL RSCN rediscovery, match everything */
7881 if (vport->fc_flag & FC_RSCN_DISCOVERY)
7882 return did;
7883
7884 spin_lock_irq(lock: shost->host_lock);
7885 if (vport->fc_rscn_flush) {
7886 /* Another thread is walking fc_rscn_id_list on this vport */
7887 spin_unlock_irq(lock: shost->host_lock);
7888 return 0;
7889 }
7890 /* Indicate we are walking fc_rscn_id_list on this vport */
7891 vport->fc_rscn_flush = 1;
7892 spin_unlock_irq(lock: shost->host_lock);
7893 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
7894 lp = vport->fc_rscn_id_list[i]->virt;
7895 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
7896 payload_len -= sizeof(uint32_t); /* take off word 0 */
7897 while (payload_len) {
7898 rscn_did.un.word = be32_to_cpu(*lp++);
7899 payload_len -= sizeof(uint32_t);
7900 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
7901 case RSCN_ADDRESS_FORMAT_PORT:
7902 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
7903 && (ns_did.un.b.area == rscn_did.un.b.area)
7904 && (ns_did.un.b.id == rscn_did.un.b.id))
7905 goto return_did_out;
7906 break;
7907 case RSCN_ADDRESS_FORMAT_AREA:
7908 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
7909 && (ns_did.un.b.area == rscn_did.un.b.area))
7910 goto return_did_out;
7911 break;
7912 case RSCN_ADDRESS_FORMAT_DOMAIN:
7913 if (ns_did.un.b.domain == rscn_did.un.b.domain)
7914 goto return_did_out;
7915 break;
7916 case RSCN_ADDRESS_FORMAT_FABRIC:
7917 goto return_did_out;
7918 }
7919 }
7920 }
7921 /* Indicate we are done with walking fc_rscn_id_list on this vport */
7922 vport->fc_rscn_flush = 0;
7923 return 0;
7924return_did_out:
7925 /* Indicate we are done with walking fc_rscn_id_list on this vport */
7926 vport->fc_rscn_flush = 0;
7927 return did;
7928}
7929
7930/**
7931 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
7932 * @vport: pointer to a host virtual N_Port data structure.
7933 *
7934 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
7935 * state machine for a @vport's nodes that are with pending RSCN (Registration
7936 * State Change Notification).
7937 *
7938 * Return code
7939 * 0 - Successful (currently alway return 0)
7940 **/
7941static int
7942lpfc_rscn_recovery_check(struct lpfc_vport *vport)
7943{
7944 struct lpfc_nodelist *ndlp = NULL, *n;
7945
7946 /* Move all affected nodes by pending RSCNs to NPR state. */
7947 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) {
7948 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
7949 !lpfc_rscn_payload_check(vport, did: ndlp->nlp_DID))
7950 continue;
7951
7952 /* NVME Target mode does not do RSCN Recovery. */
7953 if (vport->phba->nvmet_support)
7954 continue;
7955
7956 /* If we are in the process of doing discovery on this
7957 * NPort, let it continue on its own.
7958 */
7959 switch (ndlp->nlp_state) {
7960 case NLP_STE_PLOGI_ISSUE:
7961 case NLP_STE_ADISC_ISSUE:
7962 case NLP_STE_REG_LOGIN_ISSUE:
7963 case NLP_STE_PRLI_ISSUE:
7964 case NLP_STE_LOGO_ISSUE:
7965 continue;
7966 }
7967
7968 lpfc_disc_state_machine(vport, ndlp, NULL,
7969 NLP_EVT_DEVICE_RECOVERY);
7970 lpfc_cancel_retry_delay_tmo(vport, nlp: ndlp);
7971 }
7972 return 0;
7973}
7974
7975/**
7976 * lpfc_send_rscn_event - Send an RSCN event to management application
7977 * @vport: pointer to a host virtual N_Port data structure.
7978 * @cmdiocb: pointer to lpfc command iocb data structure.
7979 *
7980 * lpfc_send_rscn_event sends an RSCN netlink event to management
7981 * applications.
7982 */
7983static void
7984lpfc_send_rscn_event(struct lpfc_vport *vport,
7985 struct lpfc_iocbq *cmdiocb)
7986{
7987 struct lpfc_dmabuf *pcmd;
7988 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7989 uint32_t *payload_ptr;
7990 uint32_t payload_len;
7991 struct lpfc_rscn_event_header *rscn_event_data;
7992
7993 pcmd = cmdiocb->cmd_dmabuf;
7994 payload_ptr = (uint32_t *) pcmd->virt;
7995 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
7996
7997 rscn_event_data = kmalloc(size: sizeof(struct lpfc_rscn_event_header) +
7998 payload_len, GFP_KERNEL);
7999 if (!rscn_event_data) {
8000 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8001 "0147 Failed to allocate memory for RSCN event\n");
8002 return;
8003 }
8004 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
8005 rscn_event_data->payload_length = payload_len;
8006 memcpy(rscn_event_data->rscn_payload, payload_ptr,
8007 payload_len);
8008
8009 fc_host_post_vendor_event(shost,
8010 event_number: fc_get_event_number(),
8011 data_len: sizeof(struct lpfc_rscn_event_header) + payload_len,
8012 data_buf: (char *)rscn_event_data,
8013 LPFC_NL_VENDOR_ID);
8014
8015 kfree(objp: rscn_event_data);
8016}
8017
8018/**
8019 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
8020 * @vport: pointer to a host virtual N_Port data structure.
8021 * @cmdiocb: pointer to lpfc command iocb data structure.
8022 * @ndlp: pointer to a node-list data structure.
8023 *
8024 * This routine processes an unsolicited RSCN (Registration State Change
8025 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
8026 * to invoke fc_host_post_event() routine to the FC transport layer. If the
8027 * discover state machine is about to begin discovery, it just accepts the
8028 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
8029 * contains N_Port IDs for other vports on this HBA, it just accepts the
8030 * RSCN and ignore processing it. If the state machine is in the recovery
8031 * state, the fc_rscn_id_list of this @vport is walked and the
8032 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
8033 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
8034 * routine is invoked to handle the RSCN event.
8035 *
8036 * Return code
8037 * 0 - Just sent the acc response
8038 * 1 - Sent the acc response and waited for name server completion
8039 **/
8040static int
8041lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8042 struct lpfc_nodelist *ndlp)
8043{
8044 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8045 struct lpfc_hba *phba = vport->phba;
8046 struct lpfc_dmabuf *pcmd;
8047 uint32_t *lp, *datap;
8048 uint32_t payload_len, length, nportid, *cmd;
8049 int rscn_cnt;
8050 int rscn_id = 0, hba_id = 0;
8051 int i, tmo;
8052
8053 pcmd = cmdiocb->cmd_dmabuf;
8054 lp = (uint32_t *) pcmd->virt;
8055
8056 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
8057 payload_len -= sizeof(uint32_t); /* take off word 0 */
8058 /* RSCN received */
8059 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8060 "0214 RSCN received Data: x%x x%x x%x x%x\n",
8061 vport->fc_flag, payload_len, *lp,
8062 vport->fc_rscn_id_cnt);
8063
8064 /* Send an RSCN event to the management application */
8065 lpfc_send_rscn_event(vport, cmdiocb);
8066
8067 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
8068 fc_host_post_event(shost, event_number: fc_get_event_number(),
8069 event_code: FCH_EVT_RSCN, event_data: lp[i]);
8070
8071 /* Check if RSCN is coming from a direct-connected remote NPort */
8072 if (vport->fc_flag & FC_PT2PT) {
8073 /* If so, just ACC it, no other action needed for now */
8074 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8075 "2024 pt2pt RSCN %08x Data: x%x x%x\n",
8076 *lp, vport->fc_flag, payload_len);
8077 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, oldiocb: cmdiocb, ndlp, NULL);
8078
8079 /* Check to see if we need to NVME rescan this target
8080 * remoteport.
8081 */
8082 if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
8083 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
8084 lpfc_nvme_rescan_port(vport, ndlp);
8085 return 0;
8086 }
8087
8088 /* If we are about to begin discovery, just ACC the RSCN.
8089 * Discovery processing will satisfy it.
8090 */
8091 if (vport->port_state <= LPFC_NS_QRY) {
8092 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8093 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
8094 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
8095
8096 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, oldiocb: cmdiocb, ndlp, NULL);
8097 return 0;
8098 }
8099
8100 /* If this RSCN just contains NPortIDs for other vports on this HBA,
8101 * just ACC and ignore it.
8102 */
8103 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
8104 !(vport->cfg_peer_port_login)) {
8105 i = payload_len;
8106 datap = lp;
8107 while (i > 0) {
8108 nportid = *datap++;
8109 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
8110 i -= sizeof(uint32_t);
8111 rscn_id++;
8112 if (lpfc_find_vport_by_did(phba, nportid))
8113 hba_id++;
8114 }
8115 if (rscn_id == hba_id) {
8116 /* ALL NPortIDs in RSCN are on HBA */
8117 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8118 "0219 Ignore RSCN "
8119 "Data: x%x x%x x%x x%x\n",
8120 vport->fc_flag, payload_len,
8121 *lp, vport->fc_rscn_id_cnt);
8122 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8123 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
8124 ndlp->nlp_DID, vport->port_state,
8125 ndlp->nlp_flag);
8126
8127 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, oldiocb: cmdiocb,
8128 ndlp, NULL);
8129 /* Restart disctmo if its already running */
8130 if (vport->fc_flag & FC_DISC_TMO) {
8131 tmo = ((phba->fc_ratov * 3) + 3);
8132 mod_timer(timer: &vport->fc_disctmo,
8133 expires: jiffies +
8134 msecs_to_jiffies(m: 1000 * tmo));
8135 }
8136 return 0;
8137 }
8138 }
8139
8140 spin_lock_irq(lock: shost->host_lock);
8141 if (vport->fc_rscn_flush) {
8142 /* Another thread is walking fc_rscn_id_list on this vport */
8143 vport->fc_flag |= FC_RSCN_DISCOVERY;
8144 spin_unlock_irq(lock: shost->host_lock);
8145 /* Send back ACC */
8146 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, oldiocb: cmdiocb, ndlp, NULL);
8147 return 0;
8148 }
8149 /* Indicate we are walking fc_rscn_id_list on this vport */
8150 vport->fc_rscn_flush = 1;
8151 spin_unlock_irq(lock: shost->host_lock);
8152 /* Get the array count after successfully have the token */
8153 rscn_cnt = vport->fc_rscn_id_cnt;
8154 /* If we are already processing an RSCN, save the received
8155 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later.
8156 */
8157 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
8158 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8159 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
8160 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
8161
8162 spin_lock_irq(lock: shost->host_lock);
8163 vport->fc_flag |= FC_RSCN_DEFERRED;
8164
8165 /* Restart disctmo if its already running */
8166 if (vport->fc_flag & FC_DISC_TMO) {
8167 tmo = ((phba->fc_ratov * 3) + 3);
8168 mod_timer(timer: &vport->fc_disctmo,
8169 expires: jiffies + msecs_to_jiffies(m: 1000 * tmo));
8170 }
8171 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
8172 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
8173 vport->fc_flag |= FC_RSCN_MODE;
8174 spin_unlock_irq(lock: shost->host_lock);
8175 if (rscn_cnt) {
8176 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
8177 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
8178 }
8179 if ((rscn_cnt) &&
8180 (payload_len + length <= LPFC_BPL_SIZE)) {
8181 *cmd &= ELS_CMD_MASK;
8182 *cmd |= cpu_to_be32(payload_len + length);
8183 memcpy(((uint8_t *)cmd) + length, lp,
8184 payload_len);
8185 } else {
8186 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
8187 vport->fc_rscn_id_cnt++;
8188 /* If we zero, cmdiocb->cmd_dmabuf, the calling
8189 * routine will not try to free it.
8190 */
8191 cmdiocb->cmd_dmabuf = NULL;
8192 }
8193 /* Deferred RSCN */
8194 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8195 "0235 Deferred RSCN "
8196 "Data: x%x x%x x%x\n",
8197 vport->fc_rscn_id_cnt, vport->fc_flag,
8198 vport->port_state);
8199 } else {
8200 vport->fc_flag |= FC_RSCN_DISCOVERY;
8201 spin_unlock_irq(lock: shost->host_lock);
8202 /* ReDiscovery RSCN */
8203 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8204 "0234 ReDiscovery RSCN "
8205 "Data: x%x x%x x%x\n",
8206 vport->fc_rscn_id_cnt, vport->fc_flag,
8207 vport->port_state);
8208 }
8209 /* Indicate we are done walking fc_rscn_id_list on this vport */
8210 vport->fc_rscn_flush = 0;
8211 /* Send back ACC */
8212 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, oldiocb: cmdiocb, ndlp, NULL);
8213 /* send RECOVERY event for ALL nodes that match RSCN payload */
8214 lpfc_rscn_recovery_check(vport);
8215 return 0;
8216 }
8217 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8218 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
8219 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
8220
8221 spin_lock_irq(lock: shost->host_lock);
8222 vport->fc_flag |= FC_RSCN_MODE;
8223 spin_unlock_irq(lock: shost->host_lock);
8224 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
8225 /* Indicate we are done walking fc_rscn_id_list on this vport */
8226 vport->fc_rscn_flush = 0;
8227 /*
8228 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will
8229 * not try to free it.
8230 */
8231 cmdiocb->cmd_dmabuf = NULL;
8232 lpfc_set_disctmo(vport);
8233 /* Send back ACC */
8234 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, oldiocb: cmdiocb, ndlp, NULL);
8235 /* send RECOVERY event for ALL nodes that match RSCN payload */
8236 lpfc_rscn_recovery_check(vport);
8237 return lpfc_els_handle_rscn(vport);
8238}
8239
8240/**
8241 * lpfc_els_handle_rscn - Handle rscn for a vport
8242 * @vport: pointer to a host virtual N_Port data structure.
8243 *
8244 * This routine handles the Registration State Configuration Notification
8245 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
8246 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
8247 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
8248 * NameServer shall be issued. If CT command to the NameServer fails to be
8249 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
8250 * RSCN activities with the @vport.
8251 *
8252 * Return code
8253 * 0 - Cleaned up rscn on the @vport
8254 * 1 - Wait for plogi to name server before proceed
8255 **/
8256int
8257lpfc_els_handle_rscn(struct lpfc_vport *vport)
8258{
8259 struct lpfc_nodelist *ndlp;
8260 struct lpfc_hba *phba = vport->phba;
8261
8262 /* Ignore RSCN if the port is being torn down. */
8263 if (vport->load_flag & FC_UNLOADING) {
8264 lpfc_els_flush_rscn(vport);
8265 return 0;
8266 }
8267
8268 /* Start timer for RSCN processing */
8269 lpfc_set_disctmo(vport);
8270
8271 /* RSCN processed */
8272 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8273 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n",
8274 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
8275 vport->port_state, vport->num_disc_nodes,
8276 vport->gidft_inp);
8277
8278 /* To process RSCN, first compare RSCN data with NameServer */
8279 vport->fc_ns_retry = 0;
8280 vport->num_disc_nodes = 0;
8281
8282 ndlp = lpfc_findnode_did(vport, NameServer_DID);
8283 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
8284 /* Good ndlp, issue CT Request to NameServer. Need to
8285 * know how many gidfts were issued. If none, then just
8286 * flush the RSCN. Otherwise, the outstanding requests
8287 * need to complete.
8288 */
8289 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) {
8290 if (lpfc_issue_gidft(vport) > 0)
8291 return 1;
8292 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) {
8293 if (lpfc_issue_gidpt(vport) > 0)
8294 return 1;
8295 } else {
8296 return 1;
8297 }
8298 } else {
8299 /* Nameserver login in question. Revalidate. */
8300 if (ndlp) {
8301 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
8302 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8303 } else {
8304 ndlp = lpfc_nlp_init(vport, NameServer_DID);
8305 if (!ndlp) {
8306 lpfc_els_flush_rscn(vport);
8307 return 0;
8308 }
8309 ndlp->nlp_prev_state = ndlp->nlp_state;
8310 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8311 }
8312 ndlp->nlp_type |= NLP_FABRIC;
8313 lpfc_issue_els_plogi(vport, NameServer_DID, retry: 0);
8314 /* Wait for NameServer login cmpl before we can
8315 * continue
8316 */
8317 return 1;
8318 }
8319
8320 lpfc_els_flush_rscn(vport);
8321 return 0;
8322}
8323
8324/**
8325 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
8326 * @vport: pointer to a host virtual N_Port data structure.
8327 * @cmdiocb: pointer to lpfc command iocb data structure.
8328 * @ndlp: pointer to a node-list data structure.
8329 *
8330 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
8331 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
8332 * point topology. As an unsolicited FLOGI should not be received in a loop
8333 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
8334 * lpfc_check_sparm() routine is invoked to check the parameters in the
8335 * unsolicited FLOGI. If parameters validation failed, the routine
8336 * lpfc_els_rsp_reject() shall be called with reject reason code set to
8337 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
8338 * FLOGI shall be compared with the Port WWN of the @vport to determine who
8339 * will initiate PLOGI. The higher lexicographical value party shall has
8340 * higher priority (as the winning port) and will initiate PLOGI and
8341 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
8342 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
8343 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
8344 *
8345 * Return code
8346 * 0 - Successfully processed the unsolicited flogi
8347 * 1 - Failed to process the unsolicited flogi
8348 **/
8349static int
8350lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8351 struct lpfc_nodelist *ndlp)
8352{
8353 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8354 struct lpfc_hba *phba = vport->phba;
8355 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
8356 uint32_t *lp = (uint32_t *) pcmd->virt;
8357 union lpfc_wqe128 *wqe = &cmdiocb->wqe;
8358 struct serv_parm *sp;
8359 LPFC_MBOXQ_t *mbox;
8360 uint32_t cmd, did;
8361 int rc;
8362 uint32_t fc_flag = 0;
8363 uint32_t port_state = 0;
8364
8365 /* Clear external loopback plug detected flag */
8366 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
8367
8368 cmd = *lp++;
8369 sp = (struct serv_parm *) lp;
8370
8371 /* FLOGI received */
8372
8373 lpfc_set_disctmo(vport);
8374
8375 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8376 /* We should never receive a FLOGI in loop mode, ignore it */
8377 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
8378
8379 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
8380 Loop Mode */
8381 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8382 "0113 An FLOGI ELS command x%x was "
8383 "received from DID x%x in Loop Mode\n",
8384 cmd, did);
8385 return 1;
8386 }
8387
8388 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
8389
8390 /*
8391 * If our portname is greater than the remote portname,
8392 * then we initiate Nport login.
8393 */
8394
8395 rc = memcmp(p: &vport->fc_portname, q: &sp->portName,
8396 size: sizeof(struct lpfc_name));
8397
8398 if (!rc) {
8399 if (phba->sli_rev < LPFC_SLI_REV4) {
8400 mbox = mempool_alloc(pool: phba->mbox_mem_pool,
8401 GFP_KERNEL);
8402 if (!mbox)
8403 return 1;
8404 lpfc_linkdown(phba);
8405 lpfc_init_link(phba, mbox,
8406 phba->cfg_topology,
8407 phba->cfg_link_speed);
8408 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
8409 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
8410 mbox->vport = vport;
8411 rc = lpfc_sli_issue_mbox(phba, mbox,
8412 MBX_NOWAIT);
8413 lpfc_set_loopback_flag(phba);
8414 if (rc == MBX_NOT_FINISHED)
8415 mempool_free(element: mbox, pool: phba->mbox_mem_pool);
8416 return 1;
8417 }
8418
8419 /* External loopback plug insertion detected */
8420 phba->link_flag |= LS_EXTERNAL_LOOPBACK;
8421
8422 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC,
8423 "1119 External Loopback plug detected\n");
8424
8425 /* abort the flogi coming back to ourselves
8426 * due to external loopback on the port.
8427 */
8428 lpfc_els_abort_flogi(phba);
8429 return 0;
8430
8431 } else if (rc > 0) { /* greater than */
8432 spin_lock_irq(lock: shost->host_lock);
8433 vport->fc_flag |= FC_PT2PT_PLOGI;
8434 spin_unlock_irq(lock: shost->host_lock);
8435
8436 /* If we have the high WWPN we can assign our own
8437 * myDID; otherwise, we have to WAIT for a PLOGI
8438 * from the remote NPort to find out what it
8439 * will be.
8440 */
8441 vport->fc_myDID = PT2PT_LocalID;
8442 } else {
8443 vport->fc_myDID = PT2PT_RemoteID;
8444 }
8445
8446 /*
8447 * The vport state should go to LPFC_FLOGI only
8448 * AFTER we issue a FLOGI, not receive one.
8449 */
8450 spin_lock_irq(lock: shost->host_lock);
8451 fc_flag = vport->fc_flag;
8452 port_state = vport->port_state;
8453 vport->fc_flag |= FC_PT2PT;
8454 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
8455
8456 /* Acking an unsol FLOGI. Count 1 for link bounce
8457 * work-around.
8458 */
8459 vport->rcv_flogi_cnt++;
8460 spin_unlock_irq(lock: shost->host_lock);
8461 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8462 "3311 Rcv Flogi PS x%x new PS x%x "
8463 "fc_flag x%x new fc_flag x%x\n",
8464 port_state, vport->port_state,
8465 fc_flag, vport->fc_flag);
8466
8467 /*
8468 * We temporarily set fc_myDID to make it look like we are
8469 * a Fabric. This is done just so we end up with the right
8470 * did / sid on the FLOGI ACC rsp.
8471 */
8472 did = vport->fc_myDID;
8473 vport->fc_myDID = Fabric_DID;
8474
8475 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
8476
8477 /* Defer ACC response until AFTER we issue a FLOGI */
8478 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
8479 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag,
8480 &wqe->xmit_els_rsp.wqe_com);
8481 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid,
8482 &wqe->xmit_els_rsp.wqe_com);
8483
8484 vport->fc_myDID = did;
8485
8486 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8487 "3344 Deferring FLOGI ACC: rx_id: x%x,"
8488 " ox_id: x%x, hba_flag x%x\n",
8489 phba->defer_flogi_acc_rx_id,
8490 phba->defer_flogi_acc_ox_id, phba->hba_flag);
8491
8492 phba->defer_flogi_acc_flag = true;
8493
8494 return 0;
8495 }
8496
8497 /* Send back ACC */
8498 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, oldiocb: cmdiocb, ndlp, NULL);
8499
8500 /* Now lets put fc_myDID back to what its supposed to be */
8501 vport->fc_myDID = did;
8502
8503 return 0;
8504}
8505
8506/**
8507 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
8508 * @vport: pointer to a host virtual N_Port data structure.
8509 * @cmdiocb: pointer to lpfc command iocb data structure.
8510 * @ndlp: pointer to a node-list data structure.
8511 *
8512 * This routine processes Request Node Identification Data (RNID) IOCB
8513 * received as an ELS unsolicited event. Only when the RNID specified format
8514 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
8515 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
8516 * Accept (ACC) the RNID ELS command. All the other RNID formats are
8517 * rejected by invoking the lpfc_els_rsp_reject() routine.
8518 *
8519 * Return code
8520 * 0 - Successfully processed rnid iocb (currently always return 0)
8521 **/
8522static int
8523lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8524 struct lpfc_nodelist *ndlp)
8525{
8526 struct lpfc_dmabuf *pcmd;
8527 uint32_t *lp;
8528 RNID *rn;
8529 struct ls_rjt stat;
8530
8531 pcmd = cmdiocb->cmd_dmabuf;
8532 lp = (uint32_t *) pcmd->virt;
8533
8534 lp++;
8535 rn = (RNID *) lp;
8536
8537 /* RNID received */
8538
8539 switch (rn->Format) {
8540 case 0:
8541 case RNID_TOPOLOGY_DISC:
8542 /* Send back ACC */
8543 lpfc_els_rsp_rnid_acc(vport, format: rn->Format, oldiocb: cmdiocb, ndlp);
8544 break;
8545 default:
8546 /* Reject this request because format not supported */
8547 stat.un.b.lsRjtRsvd0 = 0;
8548 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8549 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8550 stat.un.b.vendorUnique = 0;
8551 lpfc_els_rsp_reject(vport, rejectError: stat.un.lsRjtError, oldiocb: cmdiocb, ndlp,
8552 NULL);
8553 }
8554 return 0;
8555}
8556
8557/**
8558 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
8559 * @vport: pointer to a host virtual N_Port data structure.
8560 * @cmdiocb: pointer to lpfc command iocb data structure.
8561 * @ndlp: pointer to a node-list data structure.
8562 *
8563 * Return code
8564 * 0 - Successfully processed echo iocb (currently always return 0)
8565 **/
8566static int
8567lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8568 struct lpfc_nodelist *ndlp)
8569{
8570 uint8_t *pcmd;
8571
8572 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt;
8573
8574 /* skip over first word of echo command to find echo data */
8575 pcmd += sizeof(uint32_t);
8576
8577 lpfc_els_rsp_echo_acc(vport, data: pcmd, oldiocb: cmdiocb, ndlp);
8578 return 0;
8579}
8580
8581/**
8582 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
8583 * @vport: pointer to a host virtual N_Port data structure.
8584 * @cmdiocb: pointer to lpfc command iocb data structure.
8585 * @ndlp: pointer to a node-list data structure.
8586 *
8587 * This routine processes a Link Incident Report Registration(LIRR) IOCB
8588 * received as an ELS unsolicited event. Currently, this function just invokes
8589 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
8590 *
8591 * Return code
8592 * 0 - Successfully processed lirr iocb (currently always return 0)
8593 **/
8594static int
8595lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8596 struct lpfc_nodelist *ndlp)
8597{
8598 struct ls_rjt stat;
8599
8600 /* For now, unconditionally reject this command */
8601 stat.un.b.lsRjtRsvd0 = 0;
8602 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8603 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8604 stat.un.b.vendorUnique = 0;
8605 lpfc_els_rsp_reject(vport, rejectError: stat.un.lsRjtError, oldiocb: cmdiocb, ndlp, NULL);
8606 return 0;
8607}
8608
8609/**
8610 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
8611 * @vport: pointer to a host virtual N_Port data structure.
8612 * @cmdiocb: pointer to lpfc command iocb data structure.
8613 * @ndlp: pointer to a node-list data structure.
8614 *
8615 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
8616 * received as an ELS unsolicited event. A request to RRQ shall only
8617 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
8618 * Nx_Port N_Port_ID of the target Exchange is the same as the
8619 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
8620 * not accepted, an LS_RJT with reason code "Unable to perform
8621 * command request" and reason code explanation "Invalid Originator
8622 * S_ID" shall be returned. For now, we just unconditionally accept
8623 * RRQ from the target.
8624 **/
8625static void
8626lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8627 struct lpfc_nodelist *ndlp)
8628{
8629 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, oldiocb: cmdiocb, ndlp, NULL);
8630 if (vport->phba->sli_rev == LPFC_SLI_REV4)
8631 lpfc_els_clear_rrq(vport, iocb: cmdiocb, ndlp);
8632}
8633
8634/**
8635 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
8636 * @phba: pointer to lpfc hba data structure.
8637 * @pmb: pointer to the driver internal queue element for mailbox command.
8638 *
8639 * This routine is the completion callback function for the MBX_READ_LNK_STAT
8640 * mailbox command. This callback function is to actually send the Accept
8641 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It
8642 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
8643 * mailbox command, constructs the RLS response with the link statistics
8644 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
8645 * response to the RLS.
8646 *
8647 * Note that the ndlp reference count will be incremented by 1 for holding the
8648 * ndlp and the reference to ndlp will be stored into the ndlp field of
8649 * the IOCB for the completion callback function to the RLS Accept Response
8650 * ELS IOCB command.
8651 *
8652 **/
8653static void
8654lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
8655{
8656 int rc = 0;
8657 MAILBOX_t *mb;
8658 IOCB_t *icmd;
8659 union lpfc_wqe128 *wqe;
8660 struct RLS_RSP *rls_rsp;
8661 uint8_t *pcmd;
8662 struct lpfc_iocbq *elsiocb;
8663 struct lpfc_nodelist *ndlp;
8664 uint16_t oxid;
8665 uint16_t rxid;
8666 uint32_t cmdsize;
8667 u32 ulp_context;
8668
8669 mb = &pmb->u.mb;
8670
8671 ndlp = pmb->ctx_ndlp;
8672 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
8673 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
8674 pmb->ctx_buf = NULL;
8675 pmb->ctx_ndlp = NULL;
8676
8677 if (mb->mbxStatus) {
8678 mempool_free(element: pmb, pool: phba->mbox_mem_pool);
8679 return;
8680 }
8681
8682 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
8683 elsiocb = lpfc_prep_els_iocb(vport: phba->pport, expect_rsp: 0, cmd_size: cmdsize,
8684 retry: lpfc_max_els_tries, ndlp,
8685 did: ndlp->nlp_DID, ELS_CMD_ACC);
8686
8687 /* Decrement the ndlp reference count from previous mbox command */
8688 lpfc_nlp_put(ndlp);
8689
8690 if (!elsiocb) {
8691 mempool_free(element: pmb, pool: phba->mbox_mem_pool);
8692 return;
8693 }
8694
8695 ulp_context = get_job_ulpcontext(phba, iocbq: elsiocb);
8696 if (phba->sli_rev == LPFC_SLI_REV4) {
8697 wqe = &elsiocb->wqe;
8698 /* Xri / rx_id */
8699 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid);
8700 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid);
8701 } else {
8702 icmd = &elsiocb->iocb;
8703 icmd->ulpContext = rxid;
8704 icmd->unsli3.rcvsli3.ox_id = oxid;
8705 }
8706
8707 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
8708 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
8709 pcmd += sizeof(uint32_t); /* Skip past command */
8710 rls_rsp = (struct RLS_RSP *)pcmd;
8711
8712 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
8713 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
8714 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
8715 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
8716 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
8717 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
8718 mempool_free(element: pmb, pool: phba->mbox_mem_pool);
8719 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
8720 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
8721 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
8722 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
8723 elsiocb->iotag, ulp_context,
8724 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
8725 ndlp->nlp_rpi);
8726 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
8727 phba->fc_stat.elsXmitACC++;
8728 elsiocb->ndlp = lpfc_nlp_get(ndlp);
8729 if (!elsiocb->ndlp) {
8730 lpfc_els_free_iocb(phba, elsiocb);
8731 return;
8732 }
8733
8734 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
8735 if (rc == IOCB_ERROR) {
8736 lpfc_els_free_iocb(phba, elsiocb);
8737 lpfc_nlp_put(ndlp);
8738 }
8739 return;
8740}
8741
8742/**
8743 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
8744 * @vport: pointer to a host virtual N_Port data structure.
8745 * @cmdiocb: pointer to lpfc command iocb data structure.
8746 * @ndlp: pointer to a node-list data structure.
8747 *
8748 * This routine processes Read Link Status (RLS) IOCB received as an
8749 * ELS unsolicited event. It first checks the remote port state. If the
8750 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
8751 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
8752 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
8753 * for reading the HBA link statistics. It is for the callback function,
8754 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
8755 * to actually sending out RPL Accept (ACC) response.
8756 *
8757 * Return codes
8758 * 0 - Successfully processed rls iocb (currently always return 0)
8759 **/
8760static int
8761lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8762 struct lpfc_nodelist *ndlp)
8763{
8764 struct lpfc_hba *phba = vport->phba;
8765 LPFC_MBOXQ_t *mbox;
8766 struct ls_rjt stat;
8767 u32 ctx = get_job_ulpcontext(phba, iocbq: cmdiocb);
8768 u32 ox_id = get_job_rcvoxid(phba, iocbq: cmdiocb);
8769
8770 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
8771 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
8772 /* reject the unsolicited RLS request and done with it */
8773 goto reject_out;
8774
8775 mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_ATOMIC);
8776 if (mbox) {
8777 lpfc_read_lnk_stat(phba, mbox);
8778 mbox->ctx_buf = (void *)((unsigned long)
8779 (ox_id << 16 | ctx));
8780 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
8781 if (!mbox->ctx_ndlp)
8782 goto node_err;
8783 mbox->vport = vport;
8784 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
8785 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
8786 != MBX_NOT_FINISHED)
8787 /* Mbox completion will send ELS Response */
8788 return 0;
8789 /* Decrement reference count used for the failed mbox
8790 * command.
8791 */
8792 lpfc_nlp_put(ndlp);
8793node_err:
8794 mempool_free(element: mbox, pool: phba->mbox_mem_pool);
8795 }
8796reject_out:
8797 /* issue rejection response */
8798 stat.un.b.lsRjtRsvd0 = 0;
8799 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8800 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8801 stat.un.b.vendorUnique = 0;
8802 lpfc_els_rsp_reject(vport, rejectError: stat.un.lsRjtError, oldiocb: cmdiocb, ndlp, NULL);
8803 return 0;
8804}
8805
8806/**
8807 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
8808 * @vport: pointer to a host virtual N_Port data structure.
8809 * @cmdiocb: pointer to lpfc command iocb data structure.
8810 * @ndlp: pointer to a node-list data structure.
8811 *
8812 * This routine processes Read Timout Value (RTV) IOCB received as an
8813 * ELS unsolicited event. It first checks the remote port state. If the
8814 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
8815 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
8816 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
8817 * Value (RTV) unsolicited IOCB event.
8818 *
8819 * Note that the ndlp reference count will be incremented by 1 for holding the
8820 * ndlp and the reference to ndlp will be stored into the ndlp field of
8821 * the IOCB for the completion callback function to the RTV Accept Response
8822 * ELS IOCB command.
8823 *
8824 * Return codes
8825 * 0 - Successfully processed rtv iocb (currently always return 0)
8826 **/
8827static int
8828lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8829 struct lpfc_nodelist *ndlp)
8830{
8831 int rc = 0;
8832 IOCB_t *icmd;
8833 union lpfc_wqe128 *wqe;
8834 struct lpfc_hba *phba = vport->phba;
8835 struct ls_rjt stat;
8836 struct RTV_RSP *rtv_rsp;
8837 uint8_t *pcmd;
8838 struct lpfc_iocbq *elsiocb;
8839 uint32_t cmdsize;
8840 u32 ulp_context;
8841
8842 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
8843 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
8844 /* reject the unsolicited RTV request and done with it */
8845 goto reject_out;
8846
8847 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
8848 elsiocb = lpfc_prep_els_iocb(vport: phba->pport, expect_rsp: 0, cmd_size: cmdsize,
8849 retry: lpfc_max_els_tries, ndlp,
8850 did: ndlp->nlp_DID, ELS_CMD_ACC);
8851
8852 if (!elsiocb)
8853 return 1;
8854
8855 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
8856 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
8857 pcmd += sizeof(uint32_t); /* Skip past command */
8858
8859 ulp_context = get_job_ulpcontext(phba, iocbq: elsiocb);
8860 /* use the command's xri in the response */
8861 if (phba->sli_rev == LPFC_SLI_REV4) {
8862 wqe = &elsiocb->wqe;
8863 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
8864 get_job_ulpcontext(phba, cmdiocb));
8865 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
8866 get_job_rcvoxid(phba, cmdiocb));
8867 } else {
8868 icmd = &elsiocb->iocb;
8869 icmd->ulpContext = get_job_ulpcontext(phba, iocbq: cmdiocb);
8870 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, iocbq: cmdiocb);
8871 }
8872
8873 rtv_rsp = (struct RTV_RSP *)pcmd;
8874
8875 /* populate RTV payload */
8876 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
8877 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
8878 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
8879 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
8880 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
8881
8882 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
8883 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
8884 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
8885 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
8886 "Data: x%x x%x x%x\n",
8887 elsiocb->iotag, ulp_context,
8888 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
8889 ndlp->nlp_rpi,
8890 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
8891 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
8892 phba->fc_stat.elsXmitACC++;
8893 elsiocb->ndlp = lpfc_nlp_get(ndlp);
8894 if (!elsiocb->ndlp) {
8895 lpfc_els_free_iocb(phba, elsiocb);
8896 return 0;
8897 }
8898
8899 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
8900 if (rc == IOCB_ERROR) {
8901 lpfc_els_free_iocb(phba, elsiocb);
8902 lpfc_nlp_put(ndlp);
8903 }
8904 return 0;
8905
8906reject_out:
8907 /* issue rejection response */
8908 stat.un.b.lsRjtRsvd0 = 0;
8909 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8910 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8911 stat.un.b.vendorUnique = 0;
8912 lpfc_els_rsp_reject(vport, rejectError: stat.un.lsRjtError, oldiocb: cmdiocb, ndlp, NULL);
8913 return 0;
8914}
8915
8916/* lpfc_issue_els_rrq - Process an unsolicited rrq iocb
8917 * @vport: pointer to a host virtual N_Port data structure.
8918 * @ndlp: pointer to a node-list data structure.
8919 * @did: DID of the target.
8920 * @rrq: Pointer to the rrq struct.
8921 *
8922 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
8923 * successful, the completion handler will clear the RRQ.
8924 *
8925 * Return codes
8926 * 0 - Successfully sent rrq els iocb.
8927 * 1 - Failed to send rrq els iocb.
8928 **/
8929static int
8930lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
8931 uint32_t did, struct lpfc_node_rrq *rrq)
8932{
8933 struct lpfc_hba *phba = vport->phba;
8934 struct RRQ *els_rrq;
8935 struct lpfc_iocbq *elsiocb;
8936 uint8_t *pcmd;
8937 uint16_t cmdsize;
8938 int ret;
8939
8940 if (!ndlp)
8941 return 1;
8942
8943 /* If ndlp is not NULL, we will bump the reference count on it */
8944 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
8945 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, cmd_size: cmdsize, retry: 0, ndlp, did,
8946 ELS_CMD_RRQ);
8947 if (!elsiocb)
8948 return 1;
8949
8950 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
8951
8952 /* For RRQ request, remainder of payload is Exchange IDs */
8953 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
8954 pcmd += sizeof(uint32_t);
8955 els_rrq = (struct RRQ *) pcmd;
8956
8957 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
8958 bf_set(rrq_rxid, els_rrq, rrq->rxid);
8959 bf_set(rrq_did, els_rrq, vport->fc_myDID);
8960 els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
8961 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
8962
8963
8964 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8965 "Issue RRQ: did:x%x",
8966 did, rrq->xritag, rrq->rxid);
8967 elsiocb->context_un.rrq = rrq;
8968 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq;
8969
8970 elsiocb->ndlp = lpfc_nlp_get(ndlp);
8971 if (!elsiocb->ndlp)
8972 goto io_err;
8973
8974 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
8975 if (ret == IOCB_ERROR) {
8976 lpfc_nlp_put(ndlp);
8977 goto io_err;
8978 }
8979 return 0;
8980
8981 io_err:
8982 lpfc_els_free_iocb(phba, elsiocb);
8983 return 1;
8984}
8985
8986/**
8987 * lpfc_send_rrq - Sends ELS RRQ if needed.
8988 * @phba: pointer to lpfc hba data structure.
8989 * @rrq: pointer to the active rrq.
8990 *
8991 * This routine will call the lpfc_issue_els_rrq if the rrq is
8992 * still active for the xri. If this function returns a failure then
8993 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
8994 *
8995 * Returns 0 Success.
8996 * 1 Failure.
8997 **/
8998int
8999lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
9000{
9001 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
9002 rrq->nlp_DID);
9003 if (!ndlp)
9004 return 1;
9005
9006 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
9007 return lpfc_issue_els_rrq(vport: rrq->vport, ndlp,
9008 did: rrq->nlp_DID, rrq);
9009 else
9010 return 1;
9011}
9012
9013/**
9014 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
9015 * @vport: pointer to a host virtual N_Port data structure.
9016 * @cmdsize: size of the ELS command.
9017 * @oldiocb: pointer to the original lpfc command iocb data structure.
9018 * @ndlp: pointer to a node-list data structure.
9019 *
9020 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
9021 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
9022 *
9023 * Note that the ndlp reference count will be incremented by 1 for holding the
9024 * ndlp and the reference to ndlp will be stored into the ndlp field of
9025 * the IOCB for the completion callback function to the RPL Accept Response
9026 * ELS command.
9027 *
9028 * Return code
9029 * 0 - Successfully issued ACC RPL ELS command
9030 * 1 - Failed to issue ACC RPL ELS command
9031 **/
9032static int
9033lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
9034 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
9035{
9036 int rc = 0;
9037 struct lpfc_hba *phba = vport->phba;
9038 IOCB_t *icmd;
9039 union lpfc_wqe128 *wqe;
9040 RPL_RSP rpl_rsp;
9041 struct lpfc_iocbq *elsiocb;
9042 uint8_t *pcmd;
9043 u32 ulp_context;
9044
9045 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 0, cmd_size: cmdsize, retry: oldiocb->retry, ndlp,
9046 did: ndlp->nlp_DID, ELS_CMD_ACC);
9047
9048 if (!elsiocb)
9049 return 1;
9050
9051 ulp_context = get_job_ulpcontext(phba, iocbq: elsiocb);
9052 if (phba->sli_rev == LPFC_SLI_REV4) {
9053 wqe = &elsiocb->wqe;
9054 /* Xri / rx_id */
9055 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
9056 get_job_ulpcontext(phba, oldiocb));
9057 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9058 get_job_rcvoxid(phba, oldiocb));
9059 } else {
9060 icmd = &elsiocb->iocb;
9061 icmd->ulpContext = get_job_ulpcontext(phba, iocbq: oldiocb);
9062 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, iocbq: oldiocb);
9063 }
9064
9065 pcmd = elsiocb->cmd_dmabuf->virt;
9066 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
9067 pcmd += sizeof(uint16_t);
9068 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
9069 pcmd += sizeof(uint16_t);
9070
9071 /* Setup the RPL ACC payload */
9072 rpl_rsp.listLen = be32_to_cpu(1);
9073 rpl_rsp.index = 0;
9074 rpl_rsp.port_num_blk.portNum = 0;
9075 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
9076 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
9077 sizeof(struct lpfc_name));
9078 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
9079 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
9080 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9081 "0120 Xmit ELS RPL ACC response tag x%x "
9082 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
9083 "rpi x%x\n",
9084 elsiocb->iotag, ulp_context,
9085 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
9086 ndlp->nlp_rpi);
9087 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
9088 phba->fc_stat.elsXmitACC++;
9089 elsiocb->ndlp = lpfc_nlp_get(ndlp);
9090 if (!elsiocb->ndlp) {
9091 lpfc_els_free_iocb(phba, elsiocb);
9092 return 1;
9093 }
9094
9095 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
9096 if (rc == IOCB_ERROR) {
9097 lpfc_els_free_iocb(phba, elsiocb);
9098 lpfc_nlp_put(ndlp);
9099 return 1;
9100 }
9101
9102 return 0;
9103}
9104
9105/**
9106 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
9107 * @vport: pointer to a host virtual N_Port data structure.
9108 * @cmdiocb: pointer to lpfc command iocb data structure.
9109 * @ndlp: pointer to a node-list data structure.
9110 *
9111 * This routine processes Read Port List (RPL) IOCB received as an ELS
9112 * unsolicited event. It first checks the remote port state. If the remote
9113 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
9114 * invokes the lpfc_els_rsp_reject() routine to send reject response.
9115 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
9116 * to accept the RPL.
9117 *
9118 * Return code
9119 * 0 - Successfully processed rpl iocb (currently always return 0)
9120 **/
9121static int
9122lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9123 struct lpfc_nodelist *ndlp)
9124{
9125 struct lpfc_dmabuf *pcmd;
9126 uint32_t *lp;
9127 uint32_t maxsize;
9128 uint16_t cmdsize;
9129 RPL *rpl;
9130 struct ls_rjt stat;
9131
9132 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
9133 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
9134 /* issue rejection response */
9135 stat.un.b.lsRjtRsvd0 = 0;
9136 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
9137 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
9138 stat.un.b.vendorUnique = 0;
9139 lpfc_els_rsp_reject(vport, rejectError: stat.un.lsRjtError, oldiocb: cmdiocb, ndlp,
9140 NULL);
9141 /* rejected the unsolicited RPL request and done with it */
9142 return 0;
9143 }
9144
9145 pcmd = cmdiocb->cmd_dmabuf;
9146 lp = (uint32_t *) pcmd->virt;
9147 rpl = (RPL *) (lp + 1);
9148 maxsize = be32_to_cpu(rpl->maxsize);
9149
9150 /* We support only one port */
9151 if ((rpl->index == 0) &&
9152 ((maxsize == 0) ||
9153 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
9154 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
9155 } else {
9156 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
9157 }
9158 lpfc_els_rsp_rpl_acc(vport, cmdsize, oldiocb: cmdiocb, ndlp);
9159
9160 return 0;
9161}
9162
9163/**
9164 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
9165 * @vport: pointer to a virtual N_Port data structure.
9166 * @cmdiocb: pointer to lpfc command iocb data structure.
9167 * @ndlp: pointer to a node-list data structure.
9168 *
9169 * This routine processes Fibre Channel Address Resolution Protocol
9170 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
9171 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
9172 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
9173 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
9174 * remote PortName is compared against the FC PortName stored in the @vport
9175 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
9176 * compared against the FC NodeName stored in the @vport data structure.
9177 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
9178 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
9179 * invoked to send out FARP Response to the remote node. Before sending the
9180 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
9181 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
9182 * routine is invoked to log into the remote port first.
9183 *
9184 * Return code
9185 * 0 - Either the FARP Match Mode not supported or successfully processed
9186 **/
9187static int
9188lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9189 struct lpfc_nodelist *ndlp)
9190{
9191 struct lpfc_dmabuf *pcmd;
9192 uint32_t *lp;
9193 FARP *fp;
9194 uint32_t cnt, did;
9195
9196 did = get_job_els_rsp64_did(phba: vport->phba, iocbq: cmdiocb);
9197 pcmd = cmdiocb->cmd_dmabuf;
9198 lp = (uint32_t *) pcmd->virt;
9199
9200 lp++;
9201 fp = (FARP *) lp;
9202 /* FARP-REQ received from DID <did> */
9203 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9204 "0601 FARP-REQ received from DID x%x\n", did);
9205 /* We will only support match on WWPN or WWNN */
9206 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
9207 return 0;
9208 }
9209
9210 cnt = 0;
9211 /* If this FARP command is searching for my portname */
9212 if (fp->Mflags & FARP_MATCH_PORT) {
9213 if (memcmp(p: &fp->RportName, q: &vport->fc_portname,
9214 size: sizeof(struct lpfc_name)) == 0)
9215 cnt = 1;
9216 }
9217
9218 /* If this FARP command is searching for my nodename */
9219 if (fp->Mflags & FARP_MATCH_NODE) {
9220 if (memcmp(p: &fp->RnodeName, q: &vport->fc_nodename,
9221 size: sizeof(struct lpfc_name)) == 0)
9222 cnt = 1;
9223 }
9224
9225 if (cnt) {
9226 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
9227 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
9228 /* Log back into the node before sending the FARP. */
9229 if (fp->Rflags & FARP_REQUEST_PLOGI) {
9230 ndlp->nlp_prev_state = ndlp->nlp_state;
9231 lpfc_nlp_set_state(vport, ndlp,
9232 NLP_STE_PLOGI_ISSUE);
9233 lpfc_issue_els_plogi(vport, did: ndlp->nlp_DID, retry: 0);
9234 }
9235
9236 /* Send a FARP response to that node */
9237 if (fp->Rflags & FARP_REQUEST_FARPR)
9238 lpfc_issue_els_farpr(vport, nportid: did, retry: 0);
9239 }
9240 }
9241 return 0;
9242}
9243
9244/**
9245 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
9246 * @vport: pointer to a host virtual N_Port data structure.
9247 * @cmdiocb: pointer to lpfc command iocb data structure.
9248 * @ndlp: pointer to a node-list data structure.
9249 *
9250 * This routine processes Fibre Channel Address Resolution Protocol
9251 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
9252 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
9253 * the FARP response request.
9254 *
9255 * Return code
9256 * 0 - Successfully processed FARPR IOCB (currently always return 0)
9257 **/
9258static int
9259lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9260 struct lpfc_nodelist *ndlp)
9261{
9262 uint32_t did;
9263
9264 did = get_job_els_rsp64_did(phba: vport->phba, iocbq: cmdiocb);
9265
9266 /* FARP-RSP received from DID <did> */
9267 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9268 "0600 FARP-RSP received from DID x%x\n", did);
9269 /* ACCEPT the Farp resp request */
9270 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, oldiocb: cmdiocb, ndlp, NULL);
9271
9272 return 0;
9273}
9274
9275/**
9276 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
9277 * @vport: pointer to a host virtual N_Port data structure.
9278 * @cmdiocb: pointer to lpfc command iocb data structure.
9279 * @fan_ndlp: pointer to a node-list data structure.
9280 *
9281 * This routine processes a Fabric Address Notification (FAN) IOCB
9282 * command received as an ELS unsolicited event. The FAN ELS command will
9283 * only be processed on a physical port (i.e., the @vport represents the
9284 * physical port). The fabric NodeName and PortName from the FAN IOCB are
9285 * compared against those in the phba data structure. If any of those is
9286 * different, the lpfc_initial_flogi() routine is invoked to initialize
9287 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
9288 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
9289 * is invoked to register login to the fabric.
9290 *
9291 * Return code
9292 * 0 - Successfully processed fan iocb (currently always return 0).
9293 **/
9294static int
9295lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9296 struct lpfc_nodelist *fan_ndlp)
9297{
9298 struct lpfc_hba *phba = vport->phba;
9299 uint32_t *lp;
9300 FAN *fp;
9301
9302 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
9303 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
9304 fp = (FAN *) ++lp;
9305 /* FAN received; Fan does not have a reply sequence */
9306 if ((vport == phba->pport) &&
9307 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
9308 if ((memcmp(p: &phba->fc_fabparam.nodeName, q: &fp->FnodeName,
9309 size: sizeof(struct lpfc_name))) ||
9310 (memcmp(p: &phba->fc_fabparam.portName, q: &fp->FportName,
9311 size: sizeof(struct lpfc_name)))) {
9312 /* This port has switched fabrics. FLOGI is required */
9313 lpfc_issue_init_vfi(vport);
9314 } else {
9315 /* FAN verified - skip FLOGI */
9316 vport->fc_myDID = vport->fc_prevDID;
9317 if (phba->sli_rev < LPFC_SLI_REV4)
9318 lpfc_issue_fabric_reglogin(vport);
9319 else {
9320 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9321 "3138 Need register VFI: (x%x/%x)\n",
9322 vport->fc_prevDID, vport->fc_myDID);
9323 lpfc_issue_reg_vfi(vport);
9324 }
9325 }
9326 }
9327 return 0;
9328}
9329
9330/**
9331 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb
9332 * @vport: pointer to a host virtual N_Port data structure.
9333 * @cmdiocb: pointer to lpfc command iocb data structure.
9334 * @ndlp: pointer to a node-list data structure.
9335 *
9336 * Return code
9337 * 0 - Successfully processed echo iocb (currently always return 0)
9338 **/
9339static int
9340lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9341 struct lpfc_nodelist *ndlp)
9342{
9343 struct lpfc_hba *phba = vport->phba;
9344 struct fc_els_edc *edc_req;
9345 struct fc_tlv_desc *tlv;
9346 uint8_t *payload;
9347 uint32_t *ptr, dtag;
9348 const char *dtag_nm;
9349 int desc_cnt = 0, bytes_remain;
9350 struct fc_diag_lnkflt_desc *plnkflt;
9351
9352 payload = cmdiocb->cmd_dmabuf->virt;
9353
9354 edc_req = (struct fc_els_edc *)payload;
9355 bytes_remain = be32_to_cpu(edc_req->desc_len);
9356
9357 ptr = (uint32_t *)payload;
9358 lpfc_printf_vlog(vport, KERN_INFO,
9359 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
9360 "3319 Rcv EDC payload len %d: x%x x%x x%x\n",
9361 bytes_remain, be32_to_cpu(*ptr),
9362 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2)));
9363
9364 /* No signal support unless there is a congestion descriptor */
9365 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
9366 phba->cgn_sig_freq = 0;
9367 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
9368
9369 if (bytes_remain <= 0)
9370 goto out;
9371
9372 tlv = edc_req->desc;
9373
9374 /*
9375 * cycle through EDC diagnostic descriptors to find the
9376 * congestion signaling capability descriptor
9377 */
9378 while (bytes_remain) {
9379 if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
9380 lpfc_printf_log(phba, KERN_WARNING,
9381 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
9382 "6464 Truncated TLV hdr on "
9383 "Diagnostic descriptor[%d]\n",
9384 desc_cnt);
9385 goto out;
9386 }
9387
9388 dtag = be32_to_cpu(tlv->desc_tag);
9389 switch (dtag) {
9390 case ELS_DTAG_LNK_FAULT_CAP:
9391 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
9392 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
9393 sizeof(struct fc_diag_lnkflt_desc)) {
9394 lpfc_printf_log(phba, KERN_WARNING,
9395 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
9396 "6465 Truncated Link Fault Diagnostic "
9397 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
9398 desc_cnt, bytes_remain,
9399 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
9400 sizeof(struct fc_diag_lnkflt_desc));
9401 goto out;
9402 }
9403 plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
9404 lpfc_printf_log(phba, KERN_INFO,
9405 LOG_ELS | LOG_LDS_EVENT,
9406 "4626 Link Fault Desc Data: x%08x len x%x "
9407 "da x%x dd x%x interval x%x\n",
9408 be32_to_cpu(plnkflt->desc_tag),
9409 be32_to_cpu(plnkflt->desc_len),
9410 be32_to_cpu(
9411 plnkflt->degrade_activate_threshold),
9412 be32_to_cpu(
9413 plnkflt->degrade_deactivate_threshold),
9414 be32_to_cpu(plnkflt->fec_degrade_interval));
9415 break;
9416 case ELS_DTAG_CG_SIGNAL_CAP:
9417 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
9418 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
9419 sizeof(struct fc_diag_cg_sig_desc)) {
9420 lpfc_printf_log(
9421 phba, KERN_WARNING, LOG_CGN_MGMT,
9422 "6466 Truncated cgn signal Diagnostic "
9423 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
9424 desc_cnt, bytes_remain,
9425 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
9426 sizeof(struct fc_diag_cg_sig_desc));
9427 goto out;
9428 }
9429
9430 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
9431 phba->cgn_reg_signal = phba->cgn_init_reg_signal;
9432
9433 /* We start negotiation with lpfc_fabric_cgn_frequency.
9434 * When we process the EDC, we will settle on the
9435 * higher frequency.
9436 */
9437 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
9438
9439 lpfc_least_capable_settings(
9440 phba, pcgd: (struct fc_diag_cg_sig_desc *)tlv);
9441 break;
9442 default:
9443 dtag_nm = lpfc_get_tlv_dtag_nm(table_key: dtag);
9444 lpfc_printf_log(phba, KERN_WARNING,
9445 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
9446 "6467 unknown Diagnostic "
9447 "Descriptor[%d]: tag x%x (%s)\n",
9448 desc_cnt, dtag, dtag_nm);
9449 }
9450 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
9451 tlv = fc_tlv_next_desc(desc: tlv);
9452 desc_cnt++;
9453 }
9454out:
9455 /* Need to send back an ACC */
9456 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp);
9457
9458 lpfc_config_cgn_signal(phba);
9459 return 0;
9460}
9461
9462/**
9463 * lpfc_els_timeout - Handler funciton to the els timer
9464 * @t: timer context used to obtain the vport.
9465 *
9466 * This routine is invoked by the ELS timer after timeout. It posts the ELS
9467 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
9468 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
9469 * up the worker thread. It is for the worker thread to invoke the routine
9470 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
9471 **/
9472void
9473lpfc_els_timeout(struct timer_list *t)
9474{
9475 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc);
9476 struct lpfc_hba *phba = vport->phba;
9477 uint32_t tmo_posted;
9478 unsigned long iflag;
9479
9480 spin_lock_irqsave(&vport->work_port_lock, iflag);
9481 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
9482 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
9483 vport->work_port_events |= WORKER_ELS_TMO;
9484 spin_unlock_irqrestore(lock: &vport->work_port_lock, flags: iflag);
9485
9486 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
9487 lpfc_worker_wake_up(phba);
9488 return;
9489}
9490
9491
9492/**
9493 * lpfc_els_timeout_handler - Process an els timeout event
9494 * @vport: pointer to a virtual N_Port data structure.
9495 *
9496 * This routine is the actual handler function that processes an ELS timeout
9497 * event. It walks the ELS ring to get and abort all the IOCBs (except the
9498 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
9499 * invoking the lpfc_sli_issue_abort_iotag() routine.
9500 **/
9501void
9502lpfc_els_timeout_handler(struct lpfc_vport *vport)
9503{
9504 struct lpfc_hba *phba = vport->phba;
9505 struct lpfc_sli_ring *pring;
9506 struct lpfc_iocbq *tmp_iocb, *piocb;
9507 IOCB_t *cmd = NULL;
9508 struct lpfc_dmabuf *pcmd;
9509 uint32_t els_command = 0;
9510 uint32_t timeout;
9511 uint32_t remote_ID = 0xffffffff;
9512 LIST_HEAD(abort_list);
9513 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0;
9514
9515
9516 timeout = (uint32_t)(phba->fc_ratov << 1);
9517
9518 pring = lpfc_phba_elsring(phba);
9519 if (unlikely(!pring))
9520 return;
9521
9522 if (phba->pport->load_flag & FC_UNLOADING)
9523 return;
9524
9525 spin_lock_irq(lock: &phba->hbalock);
9526 if (phba->sli_rev == LPFC_SLI_REV4)
9527 spin_lock(lock: &pring->ring_lock);
9528
9529 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
9530 ulp_command = get_job_cmnd(phba, iocbq: piocb);
9531 ulp_context = get_job_ulpcontext(phba, iocbq: piocb);
9532 did = get_job_els_rsp64_did(phba, iocbq: piocb);
9533
9534 if (phba->sli_rev == LPFC_SLI_REV4) {
9535 iotag = get_wqe_reqtag(piocb);
9536 } else {
9537 cmd = &piocb->iocb;
9538 iotag = cmd->ulpIoTag;
9539 }
9540
9541 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 ||
9542 ulp_command == CMD_ABORT_XRI_CX ||
9543 ulp_command == CMD_ABORT_XRI_CN ||
9544 ulp_command == CMD_CLOSE_XRI_CN)
9545 continue;
9546
9547 if (piocb->vport != vport)
9548 continue;
9549
9550 pcmd = piocb->cmd_dmabuf;
9551 if (pcmd)
9552 els_command = *(uint32_t *) (pcmd->virt);
9553
9554 if (els_command == ELS_CMD_FARP ||
9555 els_command == ELS_CMD_FARPR ||
9556 els_command == ELS_CMD_FDISC)
9557 continue;
9558
9559 if (piocb->drvrTimeout > 0) {
9560 if (piocb->drvrTimeout >= timeout)
9561 piocb->drvrTimeout -= timeout;
9562 else
9563 piocb->drvrTimeout = 0;
9564 continue;
9565 }
9566
9567 remote_ID = 0xffffffff;
9568 if (ulp_command != CMD_GEN_REQUEST64_CR) {
9569 remote_ID = did;
9570 } else {
9571 struct lpfc_nodelist *ndlp;
9572 ndlp = __lpfc_findnode_rpi(vport, ulp_context);
9573 if (ndlp)
9574 remote_ID = ndlp->nlp_DID;
9575 }
9576 list_add_tail(new: &piocb->dlist, head: &abort_list);
9577 }
9578 if (phba->sli_rev == LPFC_SLI_REV4)
9579 spin_unlock(lock: &pring->ring_lock);
9580 spin_unlock_irq(lock: &phba->hbalock);
9581
9582 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
9583 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9584 "0127 ELS timeout Data: x%x x%x x%x "
9585 "x%x\n", els_command,
9586 remote_ID, ulp_command, iotag);
9587
9588 spin_lock_irq(lock: &phba->hbalock);
9589 list_del_init(entry: &piocb->dlist);
9590 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
9591 spin_unlock_irq(lock: &phba->hbalock);
9592 }
9593
9594 /* Make sure HBA is alive */
9595 lpfc_issue_hb_tmo(phba);
9596
9597 if (!list_empty(head: &pring->txcmplq))
9598 if (!(phba->pport->load_flag & FC_UNLOADING))
9599 mod_timer(timer: &vport->els_tmofunc,
9600 expires: jiffies + msecs_to_jiffies(m: 1000 * timeout));
9601}
9602
9603/**
9604 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
9605 * @vport: pointer to a host virtual N_Port data structure.
9606 *
9607 * This routine is used to clean up all the outstanding ELS commands on a
9608 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
9609 * routine. After that, it walks the ELS transmit queue to remove all the
9610 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
9611 * the IOCBs with a non-NULL completion callback function, the callback
9612 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
9613 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
9614 * callback function, the IOCB will simply be released. Finally, it walks
9615 * the ELS transmit completion queue to issue an abort IOCB to any transmit
9616 * completion queue IOCB that is associated with the @vport and is not
9617 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
9618 * part of the discovery state machine) out to HBA by invoking the
9619 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
9620 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
9621 * the IOCBs are aborted when this function returns.
9622 **/
9623void
9624lpfc_els_flush_cmd(struct lpfc_vport *vport)
9625{
9626 LIST_HEAD(abort_list);
9627 LIST_HEAD(cancel_list);
9628 struct lpfc_hba *phba = vport->phba;
9629 struct lpfc_sli_ring *pring;
9630 struct lpfc_iocbq *tmp_iocb, *piocb;
9631 u32 ulp_command;
9632 unsigned long iflags = 0;
9633 bool mbx_tmo_err;
9634
9635 lpfc_fabric_abort_vport(vport);
9636
9637 /*
9638 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
9639 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
9640 * ultimately grabs the ring_lock, the driver must splice the list into
9641 * a working list and release the locks before calling the abort.
9642 */
9643 spin_lock_irqsave(&phba->hbalock, iflags);
9644 pring = lpfc_phba_elsring(phba);
9645
9646 /* Bail out if we've no ELS wq, like in PCI error recovery case. */
9647 if (unlikely(!pring)) {
9648 spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags);
9649 return;
9650 }
9651
9652 if (phba->sli_rev == LPFC_SLI_REV4)
9653 spin_lock(lock: &pring->ring_lock);
9654
9655 mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
9656 /* First we need to issue aborts to outstanding cmds on txcmpl */
9657 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
9658 if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err)
9659 continue;
9660
9661 if (piocb->vport != vport)
9662 continue;
9663
9664 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err)
9665 continue;
9666
9667 /* On the ELS ring we can have ELS_REQUESTs or
9668 * GEN_REQUESTs waiting for a response.
9669 */
9670 ulp_command = get_job_cmnd(phba, iocbq: piocb);
9671 if (ulp_command == CMD_ELS_REQUEST64_CR) {
9672 list_add_tail(new: &piocb->dlist, head: &abort_list);
9673
9674 /* If the link is down when flushing ELS commands
9675 * the firmware will not complete them till after
9676 * the link comes back up. This may confuse
9677 * discovery for the new link up, so we need to
9678 * change the compl routine to just clean up the iocb
9679 * and avoid any retry logic.
9680 */
9681 if (phba->link_state == LPFC_LINK_DOWN)
9682 piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
9683 } else if (ulp_command == CMD_GEN_REQUEST64_CR ||
9684 mbx_tmo_err)
9685 list_add_tail(new: &piocb->dlist, head: &abort_list);
9686 }
9687
9688 if (phba->sli_rev == LPFC_SLI_REV4)
9689 spin_unlock(lock: &pring->ring_lock);
9690 spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags);
9691
9692 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */
9693 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
9694 spin_lock_irqsave(&phba->hbalock, iflags);
9695 list_del_init(entry: &piocb->dlist);
9696 if (mbx_tmo_err)
9697 list_move_tail(list: &piocb->list, head: &cancel_list);
9698 else
9699 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
9700
9701 spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags);
9702 }
9703 if (!list_empty(head: &cancel_list))
9704 lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT,
9705 IOERR_SLI_ABORTED);
9706 else
9707 /* Make sure HBA is alive */
9708 lpfc_issue_hb_tmo(phba);
9709
9710 if (!list_empty(head: &abort_list))
9711 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9712 "3387 abort list for txq not empty\n");
9713 INIT_LIST_HEAD(list: &abort_list);
9714
9715 spin_lock_irqsave(&phba->hbalock, iflags);
9716 if (phba->sli_rev == LPFC_SLI_REV4)
9717 spin_lock(lock: &pring->ring_lock);
9718
9719 /* No need to abort the txq list,
9720 * just queue them up for lpfc_sli_cancel_iocbs
9721 */
9722 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
9723 ulp_command = get_job_cmnd(phba, iocbq: piocb);
9724
9725 if (piocb->cmd_flag & LPFC_IO_LIBDFC)
9726 continue;
9727
9728 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
9729 if (ulp_command == CMD_QUE_RING_BUF_CN ||
9730 ulp_command == CMD_QUE_RING_BUF64_CN ||
9731 ulp_command == CMD_CLOSE_XRI_CN ||
9732 ulp_command == CMD_ABORT_XRI_CN ||
9733 ulp_command == CMD_ABORT_XRI_CX)
9734 continue;
9735
9736 if (piocb->vport != vport)
9737 continue;
9738
9739 list_del_init(entry: &piocb->list);
9740 list_add_tail(new: &piocb->list, head: &abort_list);
9741 }
9742
9743 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */
9744 if (vport == phba->pport) {
9745 list_for_each_entry_safe(piocb, tmp_iocb,
9746 &phba->fabric_iocb_list, list) {
9747 list_del_init(entry: &piocb->list);
9748 list_add_tail(new: &piocb->list, head: &abort_list);
9749 }
9750 }
9751
9752 if (phba->sli_rev == LPFC_SLI_REV4)
9753 spin_unlock(lock: &pring->ring_lock);
9754 spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags);
9755
9756 /* Cancel all the IOCBs from the completions list */
9757 lpfc_sli_cancel_iocbs(phba, &abort_list,
9758 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
9759
9760 return;
9761}
9762
9763/**
9764 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
9765 * @phba: pointer to lpfc hba data structure.
9766 *
9767 * This routine is used to clean up all the outstanding ELS commands on a
9768 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
9769 * routine. After that, it walks the ELS transmit queue to remove all the
9770 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
9771 * the IOCBs with the completion callback function associated, the callback
9772 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
9773 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
9774 * callback function associated, the IOCB will simply be released. Finally,
9775 * it walks the ELS transmit completion queue to issue an abort IOCB to any
9776 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
9777 * management plane IOCBs that are not part of the discovery state machine)
9778 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
9779 **/
9780void
9781lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
9782{
9783 struct lpfc_vport *vport;
9784
9785 spin_lock_irq(lock: &phba->port_list_lock);
9786 list_for_each_entry(vport, &phba->port_list, listentry)
9787 lpfc_els_flush_cmd(vport);
9788 spin_unlock_irq(lock: &phba->port_list_lock);
9789
9790 return;
9791}
9792
9793/**
9794 * lpfc_send_els_failure_event - Posts an ELS command failure event
9795 * @phba: Pointer to hba context object.
9796 * @cmdiocbp: Pointer to command iocb which reported error.
9797 * @rspiocbp: Pointer to response iocb which reported error.
9798 *
9799 * This function sends an event when there is an ELS command
9800 * failure.
9801 **/
9802void
9803lpfc_send_els_failure_event(struct lpfc_hba *phba,
9804 struct lpfc_iocbq *cmdiocbp,
9805 struct lpfc_iocbq *rspiocbp)
9806{
9807 struct lpfc_vport *vport = cmdiocbp->vport;
9808 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9809 struct lpfc_lsrjt_event lsrjt_event;
9810 struct lpfc_fabric_event_header fabric_event;
9811 struct ls_rjt stat;
9812 struct lpfc_nodelist *ndlp;
9813 uint32_t *pcmd;
9814 u32 ulp_status, ulp_word4;
9815
9816 ndlp = cmdiocbp->ndlp;
9817 if (!ndlp)
9818 return;
9819
9820 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocbp);
9821 ulp_word4 = get_job_word4(phba, iocbq: rspiocbp);
9822
9823 if (ulp_status == IOSTAT_LS_RJT) {
9824 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
9825 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
9826 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
9827 sizeof(struct lpfc_name));
9828 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
9829 sizeof(struct lpfc_name));
9830 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt;
9831 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
9832 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
9833 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
9834 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
9835 fc_host_post_vendor_event(shost,
9836 event_number: fc_get_event_number(),
9837 data_len: sizeof(lsrjt_event),
9838 data_buf: (char *)&lsrjt_event,
9839 LPFC_NL_VENDOR_ID);
9840 return;
9841 }
9842 if (ulp_status == IOSTAT_NPORT_BSY ||
9843 ulp_status == IOSTAT_FABRIC_BSY) {
9844 fabric_event.event_type = FC_REG_FABRIC_EVENT;
9845 if (ulp_status == IOSTAT_NPORT_BSY)
9846 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
9847 else
9848 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
9849 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
9850 sizeof(struct lpfc_name));
9851 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
9852 sizeof(struct lpfc_name));
9853 fc_host_post_vendor_event(shost,
9854 event_number: fc_get_event_number(),
9855 data_len: sizeof(fabric_event),
9856 data_buf: (char *)&fabric_event,
9857 LPFC_NL_VENDOR_ID);
9858 return;
9859 }
9860
9861}
9862
9863/**
9864 * lpfc_send_els_event - Posts unsolicited els event
9865 * @vport: Pointer to vport object.
9866 * @ndlp: Pointer FC node object.
9867 * @payload: ELS command code type.
9868 *
9869 * This function posts an event when there is an incoming
9870 * unsolicited ELS command.
9871 **/
9872static void
9873lpfc_send_els_event(struct lpfc_vport *vport,
9874 struct lpfc_nodelist *ndlp,
9875 uint32_t *payload)
9876{
9877 struct lpfc_els_event_header *els_data = NULL;
9878 struct lpfc_logo_event *logo_data = NULL;
9879 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9880
9881 if (*payload == ELS_CMD_LOGO) {
9882 logo_data = kmalloc(size: sizeof(struct lpfc_logo_event), GFP_KERNEL);
9883 if (!logo_data) {
9884 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9885 "0148 Failed to allocate memory "
9886 "for LOGO event\n");
9887 return;
9888 }
9889 els_data = &logo_data->header;
9890 } else {
9891 els_data = kmalloc(size: sizeof(struct lpfc_els_event_header),
9892 GFP_KERNEL);
9893 if (!els_data) {
9894 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9895 "0149 Failed to allocate memory "
9896 "for ELS event\n");
9897 return;
9898 }
9899 }
9900 els_data->event_type = FC_REG_ELS_EVENT;
9901 switch (*payload) {
9902 case ELS_CMD_PLOGI:
9903 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
9904 break;
9905 case ELS_CMD_PRLO:
9906 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
9907 break;
9908 case ELS_CMD_ADISC:
9909 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
9910 break;
9911 case ELS_CMD_LOGO:
9912 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
9913 /* Copy the WWPN in the LOGO payload */
9914 memcpy(logo_data->logo_wwpn, &payload[2],
9915 sizeof(struct lpfc_name));
9916 break;
9917 default:
9918 kfree(objp: els_data);
9919 return;
9920 }
9921 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
9922 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
9923 if (*payload == ELS_CMD_LOGO) {
9924 fc_host_post_vendor_event(shost,
9925 event_number: fc_get_event_number(),
9926 data_len: sizeof(struct lpfc_logo_event),
9927 data_buf: (char *)logo_data,
9928 LPFC_NL_VENDOR_ID);
9929 kfree(objp: logo_data);
9930 } else {
9931 fc_host_post_vendor_event(shost,
9932 event_number: fc_get_event_number(),
9933 data_len: sizeof(struct lpfc_els_event_header),
9934 data_buf: (char *)els_data,
9935 LPFC_NL_VENDOR_ID);
9936 kfree(objp: els_data);
9937 }
9938
9939 return;
9940}
9941
9942
9943DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types,
9944 FC_FPIN_LI_EVT_TYPES_INIT);
9945
9946DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types,
9947 FC_FPIN_DELI_EVT_TYPES_INIT);
9948
9949DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types,
9950 FC_FPIN_CONGN_EVT_TYPES_INIT);
9951
9952DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm,
9953 fc_fpin_congn_severity_types,
9954 FC_FPIN_CONGN_SEVERITY_INIT);
9955
9956
9957/**
9958 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port
9959 * @phba: Pointer to phba object.
9960 * @wwnlist: Pointer to list of WWPNs in FPIN payload
9961 * @cnt: count of WWPNs in FPIN payload
9962 *
9963 * This routine is called by LI and PC descriptors.
9964 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message
9965 */
9966static void
9967lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt)
9968{
9969 char buf[LPFC_FPIN_WWPN_LINE_SZ];
9970 __be64 wwn;
9971 u64 wwpn;
9972 int i, len;
9973 int line = 0;
9974 int wcnt = 0;
9975 bool endit = false;
9976
9977 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, fmt: "Accessible WWPNs:");
9978 for (i = 0; i < cnt; i++) {
9979 /* Are we on the last WWPN */
9980 if (i == (cnt - 1))
9981 endit = true;
9982
9983 /* Extract the next WWPN from the payload */
9984 wwn = *wwnlist++;
9985 wwpn = be64_to_cpu(wwn);
9986 len += scnprintf(buf: buf + len, LPFC_FPIN_WWPN_LINE_SZ - len,
9987 fmt: " %016llx", wwpn);
9988
9989 /* Log a message if we are on the last WWPN
9990 * or if we hit the max allowed per message.
9991 */
9992 wcnt++;
9993 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) {
9994 buf[len] = 0;
9995 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9996 "4686 %s\n", buf);
9997
9998 /* Check if we reached the last WWPN */
9999 if (endit)
10000 return;
10001
10002 /* Limit the number of log message displayed per FPIN */
10003 line++;
10004 if (line == LPFC_FPIN_WWPN_NUM_LINE) {
10005 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10006 "4687 %d WWPNs Truncated\n",
10007 cnt - i - 1);
10008 return;
10009 }
10010
10011 /* Start over with next log message */
10012 wcnt = 0;
10013 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ,
10014 fmt: "Additional WWPNs:");
10015 }
10016 }
10017}
10018
10019/**
10020 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event.
10021 * @phba: Pointer to phba object.
10022 * @tlv: Pointer to the Link Integrity Notification Descriptor.
10023 *
10024 * This function processes a Link Integrity FPIN event by logging a message.
10025 **/
10026static void
10027lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
10028{
10029 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv;
10030 const char *li_evt_str;
10031 u32 li_evt, cnt;
10032
10033 li_evt = be16_to_cpu(li->event_type);
10034 li_evt_str = lpfc_get_fpin_li_event_nm(table_key: li_evt);
10035 cnt = be32_to_cpu(li->pname_count);
10036
10037 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10038 "4680 FPIN Link Integrity %s (x%x) "
10039 "Detecting PN x%016llx Attached PN x%016llx "
10040 "Duration %d mSecs Count %d Port Cnt %d\n",
10041 li_evt_str, li_evt,
10042 be64_to_cpu(li->detecting_wwpn),
10043 be64_to_cpu(li->attached_wwpn),
10044 be32_to_cpu(li->event_threshold),
10045 be32_to_cpu(li->event_count), cnt);
10046
10047 lpfc_display_fpin_wwpn(phba, wwnlist: (__be64 *)&li->pname_list, cnt);
10048}
10049
10050/**
10051 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event.
10052 * @phba: Pointer to hba object.
10053 * @tlv: Pointer to the Delivery Notification Descriptor TLV
10054 *
10055 * This function processes a Delivery FPIN event by logging a message.
10056 **/
10057static void
10058lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
10059{
10060 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv;
10061 const char *del_rsn_str;
10062 u32 del_rsn;
10063 __be32 *frame;
10064
10065 del_rsn = be16_to_cpu(del->deli_reason_code);
10066 del_rsn_str = lpfc_get_fpin_deli_event_nm(table_key: del_rsn);
10067
10068 /* Skip over desc_tag/desc_len header to payload */
10069 frame = (__be32 *)(del + 1);
10070
10071 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10072 "4681 FPIN Delivery %s (x%x) "
10073 "Detecting PN x%016llx Attached PN x%016llx "
10074 "DiscHdr0 x%08x "
10075 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x "
10076 "DiscHdr4 x%08x DiscHdr5 x%08x\n",
10077 del_rsn_str, del_rsn,
10078 be64_to_cpu(del->detecting_wwpn),
10079 be64_to_cpu(del->attached_wwpn),
10080 be32_to_cpu(frame[0]),
10081 be32_to_cpu(frame[1]),
10082 be32_to_cpu(frame[2]),
10083 be32_to_cpu(frame[3]),
10084 be32_to_cpu(frame[4]),
10085 be32_to_cpu(frame[5]));
10086}
10087
10088/**
10089 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event.
10090 * @phba: Pointer to hba object.
10091 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV
10092 *
10093 * This function processes a Peer Congestion FPIN event by logging a message.
10094 **/
10095static void
10096lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
10097{
10098 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv;
10099 const char *pc_evt_str;
10100 u32 pc_evt, cnt;
10101
10102 pc_evt = be16_to_cpu(pc->event_type);
10103 pc_evt_str = lpfc_get_fpin_congn_event_nm(table_key: pc_evt);
10104 cnt = be32_to_cpu(pc->pname_count);
10105
10106 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS,
10107 "4684 FPIN Peer Congestion %s (x%x) "
10108 "Duration %d mSecs "
10109 "Detecting PN x%016llx Attached PN x%016llx "
10110 "Impacted Port Cnt %d\n",
10111 pc_evt_str, pc_evt,
10112 be32_to_cpu(pc->event_period),
10113 be64_to_cpu(pc->detecting_wwpn),
10114 be64_to_cpu(pc->attached_wwpn),
10115 cnt);
10116
10117 lpfc_display_fpin_wwpn(phba, wwnlist: (__be64 *)&pc->pname_list, cnt);
10118}
10119
10120/**
10121 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification
10122 * @phba: Pointer to hba object.
10123 * @tlv: Pointer to the Congestion Notification Descriptor TLV
10124 *
10125 * This function processes an FPIN Congestion Notifiction. The notification
10126 * could be an Alarm or Warning. This routine feeds that data into driver's
10127 * running congestion algorithm. It also processes the FPIN by
10128 * logging a message. It returns 1 to indicate deliver this message
10129 * to the upper layer or 0 to indicate don't deliver it.
10130 **/
10131static int
10132lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
10133{
10134 struct lpfc_cgn_info *cp;
10135 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv;
10136 const char *cgn_evt_str;
10137 u32 cgn_evt;
10138 const char *cgn_sev_str;
10139 u32 cgn_sev;
10140 uint16_t value;
10141 u32 crc;
10142 bool nm_log = false;
10143 int rc = 1;
10144
10145 cgn_evt = be16_to_cpu(cgn->event_type);
10146 cgn_evt_str = lpfc_get_fpin_congn_event_nm(table_key: cgn_evt);
10147 cgn_sev = cgn->severity;
10148 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(table_key: cgn_sev);
10149
10150 /* The driver only takes action on a Credit Stall or Oversubscription
10151 * event type to engage the IO algorithm. The driver prints an
10152 * unmaskable message only for Lost Credit and Credit Stall.
10153 * TODO: Still need to have definition of host action on clear,
10154 * lost credit and device specific event types.
10155 */
10156 switch (cgn_evt) {
10157 case FPIN_CONGN_LOST_CREDIT:
10158 nm_log = true;
10159 break;
10160 case FPIN_CONGN_CREDIT_STALL:
10161 nm_log = true;
10162 fallthrough;
10163 case FPIN_CONGN_OVERSUBSCRIPTION:
10164 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION)
10165 nm_log = false;
10166 switch (cgn_sev) {
10167 case FPIN_CONGN_SEVERITY_ERROR:
10168 /* Take action here for an Alarm event */
10169 if (phba->cmf_active_mode != LPFC_CFG_OFF) {
10170 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) {
10171 /* Track of alarm cnt for SYNC_WQE */
10172 atomic_inc(v: &phba->cgn_sync_alarm_cnt);
10173 }
10174 /* Track alarm cnt for cgn_info regardless
10175 * of whether CMF is configured for Signals
10176 * or FPINs.
10177 */
10178 atomic_inc(v: &phba->cgn_fabric_alarm_cnt);
10179 goto cleanup;
10180 }
10181 break;
10182 case FPIN_CONGN_SEVERITY_WARNING:
10183 /* Take action here for a Warning event */
10184 if (phba->cmf_active_mode != LPFC_CFG_OFF) {
10185 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) {
10186 /* Track of warning cnt for SYNC_WQE */
10187 atomic_inc(v: &phba->cgn_sync_warn_cnt);
10188 }
10189 /* Track warning cnt and freq for cgn_info
10190 * regardless of whether CMF is configured for
10191 * Signals or FPINs.
10192 */
10193 atomic_inc(v: &phba->cgn_fabric_warn_cnt);
10194cleanup:
10195 /* Save frequency in ms */
10196 phba->cgn_fpin_frequency =
10197 be32_to_cpu(cgn->event_period);
10198 value = phba->cgn_fpin_frequency;
10199 if (phba->cgn_i) {
10200 cp = (struct lpfc_cgn_info *)
10201 phba->cgn_i->virt;
10202 cp->cgn_alarm_freq =
10203 cpu_to_le16(value);
10204 cp->cgn_warn_freq =
10205 cpu_to_le16(value);
10206 crc = lpfc_cgn_calc_crc32
10207 (bufp: cp,
10208 LPFC_CGN_INFO_SZ,
10209 LPFC_CGN_CRC32_SEED);
10210 cp->cgn_info_crc = cpu_to_le32(crc);
10211 }
10212
10213 /* Don't deliver to upper layer since
10214 * driver took action on this tlv.
10215 */
10216 rc = 0;
10217 }
10218 break;
10219 }
10220 break;
10221 }
10222
10223 /* Change the log level to unmaskable for the following event types. */
10224 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO),
10225 LOG_CGN_MGMT | LOG_ELS,
10226 "4683 FPIN CONGESTION %s type %s (x%x) Event "
10227 "Duration %d mSecs\n",
10228 cgn_sev_str, cgn_evt_str, cgn_evt,
10229 be32_to_cpu(cgn->event_period));
10230 return rc;
10231}
10232
10233void
10234lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length)
10235{
10236 struct lpfc_hba *phba = vport->phba;
10237 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p;
10238 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv;
10239 const char *dtag_nm;
10240 int desc_cnt = 0, bytes_remain, cnt;
10241 u32 dtag, deliver = 0;
10242 int len;
10243
10244 /* FPINs handled only if we are in the right discovery state */
10245 if (vport->port_state < LPFC_DISC_AUTH)
10246 return;
10247
10248 /* make sure there is the full fpin header */
10249 if (fpin_length < sizeof(struct fc_els_fpin))
10250 return;
10251
10252 /* Sanity check descriptor length. The desc_len value does not
10253 * include space for the ELS command and the desc_len fields.
10254 */
10255 len = be32_to_cpu(fpin->desc_len);
10256 if (fpin_length < len + sizeof(struct fc_els_fpin)) {
10257 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
10258 "4671 Bad ELS FPIN length %d: %d\n",
10259 len, fpin_length);
10260 return;
10261 }
10262
10263 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
10264 first_tlv = tlv;
10265 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc);
10266 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
10267
10268 /* process each descriptor separately */
10269 while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
10270 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
10271 dtag = be32_to_cpu(tlv->desc_tag);
10272 switch (dtag) {
10273 case ELS_DTAG_LNK_INTEGRITY:
10274 lpfc_els_rcv_fpin_li(phba, tlv);
10275 deliver = 1;
10276 break;
10277 case ELS_DTAG_DELIVERY:
10278 lpfc_els_rcv_fpin_del(phba, tlv);
10279 deliver = 1;
10280 break;
10281 case ELS_DTAG_PEER_CONGEST:
10282 lpfc_els_rcv_fpin_peer_cgn(phba, tlv);
10283 deliver = 1;
10284 break;
10285 case ELS_DTAG_CONGESTION:
10286 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv);
10287 break;
10288 default:
10289 dtag_nm = lpfc_get_tlv_dtag_nm(table_key: dtag);
10290 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
10291 "4678 unknown FPIN descriptor[%d]: "
10292 "tag x%x (%s)\n",
10293 desc_cnt, dtag, dtag_nm);
10294
10295 /* If descriptor is bad, drop the rest of the data */
10296 return;
10297 }
10298 lpfc_cgn_update_stat(phba, dtag);
10299 cnt = be32_to_cpu(tlv->desc_len);
10300
10301 /* Sanity check descriptor length. The desc_len value does not
10302 * include space for the desc_tag and the desc_len fields.
10303 */
10304 len -= (cnt + sizeof(struct fc_tlv_desc));
10305 if (len < 0) {
10306 dtag_nm = lpfc_get_tlv_dtag_nm(table_key: dtag);
10307 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
10308 "4672 Bad FPIN descriptor TLV length "
10309 "%d: %d %d %s\n",
10310 cnt, len, fpin_length, dtag_nm);
10311 return;
10312 }
10313
10314 current_tlv = tlv;
10315 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
10316 tlv = fc_tlv_next_desc(desc: tlv);
10317
10318 /* Format payload such that the FPIN delivered to the
10319 * upper layer is a single descriptor FPIN.
10320 */
10321 if (desc_cnt)
10322 memcpy(first_tlv, current_tlv,
10323 (cnt + sizeof(struct fc_els_fpin)));
10324
10325 /* Adjust the length so that it only reflects a
10326 * single descriptor FPIN.
10327 */
10328 fpin_length = cnt + sizeof(struct fc_els_fpin);
10329 fpin->desc_len = cpu_to_be32(fpin_length);
10330 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */
10331
10332 /* Send every descriptor individually to the upper layer */
10333 if (deliver)
10334 fc_host_fpin_rcv(shost: lpfc_shost_from_vport(vport),
10335 fpin_len: fpin_length, fpin_buf: (char *)fpin, event_acknowledge: 0);
10336 desc_cnt++;
10337 }
10338}
10339
10340/**
10341 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
10342 * @phba: pointer to lpfc hba data structure.
10343 * @pring: pointer to a SLI ring.
10344 * @vport: pointer to a host virtual N_Port data structure.
10345 * @elsiocb: pointer to lpfc els command iocb data structure.
10346 *
10347 * This routine is used for processing the IOCB associated with a unsolicited
10348 * event. It first determines whether there is an existing ndlp that matches
10349 * the DID from the unsolicited IOCB. If not, it will create a new one with
10350 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
10351 * IOCB is then used to invoke the proper routine and to set up proper state
10352 * of the discovery state machine.
10353 **/
10354static void
10355lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10356 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
10357{
10358 struct lpfc_nodelist *ndlp;
10359 struct ls_rjt stat;
10360 u32 *payload, payload_len;
10361 u32 cmd = 0, did = 0, newnode, status = 0;
10362 uint8_t rjt_exp, rjt_err = 0, init_link = 0;
10363 struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
10364 LPFC_MBOXQ_t *mbox;
10365
10366 if (!vport || !elsiocb->cmd_dmabuf)
10367 goto dropit;
10368
10369 newnode = 0;
10370 wcqe_cmpl = &elsiocb->wcqe_cmpl;
10371 payload = elsiocb->cmd_dmabuf->virt;
10372 if (phba->sli_rev == LPFC_SLI_REV4)
10373 payload_len = wcqe_cmpl->total_data_placed;
10374 else
10375 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
10376 status = get_job_ulpstatus(phba, iocbq: elsiocb);
10377 cmd = *payload;
10378 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
10379 lpfc_sli3_post_buffer(phba, pring, cnt: 1);
10380
10381 did = get_job_els_rsp64_did(phba, iocbq: elsiocb);
10382 if (status) {
10383 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10384 "RCV Unsol ELS: status:x%x/x%x did:x%x",
10385 status, get_job_word4(phba, iocbq: elsiocb), did);
10386 goto dropit;
10387 }
10388
10389 /* Check to see if link went down during discovery */
10390 if (lpfc_els_chk_latt(vport))
10391 goto dropit;
10392
10393 /* Ignore traffic received during vport shutdown. */
10394 if (vport->load_flag & FC_UNLOADING)
10395 goto dropit;
10396
10397 /* If NPort discovery is delayed drop incoming ELS */
10398 if ((vport->fc_flag & FC_DISC_DELAYED) &&
10399 (cmd != ELS_CMD_PLOGI))
10400 goto dropit;
10401
10402 ndlp = lpfc_findnode_did(vport, did);
10403 if (!ndlp) {
10404 /* Cannot find existing Fabric ndlp, so allocate a new one */
10405 ndlp = lpfc_nlp_init(vport, did);
10406 if (!ndlp)
10407 goto dropit;
10408 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
10409 newnode = 1;
10410 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
10411 ndlp->nlp_type |= NLP_FABRIC;
10412 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
10413 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
10414 newnode = 1;
10415 }
10416
10417 phba->fc_stat.elsRcvFrame++;
10418
10419 /*
10420 * Do not process any unsolicited ELS commands
10421 * if the ndlp is in DEV_LOSS
10422 */
10423 spin_lock_irq(lock: &ndlp->lock);
10424 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
10425 spin_unlock_irq(lock: &ndlp->lock);
10426 if (newnode)
10427 lpfc_nlp_put(ndlp);
10428 goto dropit;
10429 }
10430 spin_unlock_irq(lock: &ndlp->lock);
10431
10432 elsiocb->ndlp = lpfc_nlp_get(ndlp);
10433 if (!elsiocb->ndlp)
10434 goto dropit;
10435 elsiocb->vport = vport;
10436
10437 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
10438 cmd &= ELS_CMD_MASK;
10439 }
10440 /* ELS command <elsCmd> received from NPORT <did> */
10441 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
10442 "0112 ELS command x%x received from NPORT x%x "
10443 "refcnt %d Data: x%x x%x x%x x%x\n",
10444 cmd, did, kref_read(&ndlp->kref), vport->port_state,
10445 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID);
10446
10447 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
10448 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
10449 (cmd != ELS_CMD_FLOGI) &&
10450 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) {
10451 rjt_err = LSRJT_LOGICAL_BSY;
10452 rjt_exp = LSEXP_NOTHING_MORE;
10453 goto lsrjt;
10454 }
10455
10456 switch (cmd) {
10457 case ELS_CMD_PLOGI:
10458 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10459 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
10460 did, vport->port_state, ndlp->nlp_flag);
10461
10462 phba->fc_stat.elsRcvPLOGI++;
10463 ndlp = lpfc_plogi_confirm_nport(phba, prsp: payload, ndlp);
10464 if (phba->sli_rev == LPFC_SLI_REV4 &&
10465 (phba->pport->fc_flag & FC_PT2PT)) {
10466 vport->fc_prevDID = vport->fc_myDID;
10467 /* Our DID needs to be updated before registering
10468 * the vfi. This is done in lpfc_rcv_plogi but
10469 * that is called after the reg_vfi.
10470 */
10471 vport->fc_myDID =
10472 bf_get(els_rsp64_sid,
10473 &elsiocb->wqe.xmit_els_rsp);
10474 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
10475 "3312 Remote port assigned DID x%x "
10476 "%x\n", vport->fc_myDID,
10477 vport->fc_prevDID);
10478 }
10479
10480 lpfc_send_els_event(vport, ndlp, payload);
10481
10482 /* If Nport discovery is delayed, reject PLOGIs */
10483 if (vport->fc_flag & FC_DISC_DELAYED) {
10484 rjt_err = LSRJT_UNABLE_TPC;
10485 rjt_exp = LSEXP_NOTHING_MORE;
10486 break;
10487 }
10488
10489 if (vport->port_state < LPFC_DISC_AUTH) {
10490 if (!(phba->pport->fc_flag & FC_PT2PT) ||
10491 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
10492 rjt_err = LSRJT_UNABLE_TPC;
10493 rjt_exp = LSEXP_NOTHING_MORE;
10494 break;
10495 }
10496 }
10497
10498 spin_lock_irq(lock: &ndlp->lock);
10499 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
10500 spin_unlock_irq(lock: &ndlp->lock);
10501
10502 lpfc_disc_state_machine(vport, ndlp, elsiocb,
10503 NLP_EVT_RCV_PLOGI);
10504
10505 break;
10506 case ELS_CMD_FLOGI:
10507 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10508 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
10509 did, vport->port_state, ndlp->nlp_flag);
10510
10511 phba->fc_stat.elsRcvFLOGI++;
10512
10513 /* If the driver believes fabric discovery is done and is ready,
10514 * bounce the link. There is some descrepancy.
10515 */
10516 if (vport->port_state >= LPFC_LOCAL_CFG_LINK &&
10517 vport->fc_flag & FC_PT2PT &&
10518 vport->rcv_flogi_cnt >= 1) {
10519 rjt_err = LSRJT_LOGICAL_BSY;
10520 rjt_exp = LSEXP_NOTHING_MORE;
10521 init_link++;
10522 goto lsrjt;
10523 }
10524
10525 lpfc_els_rcv_flogi(vport, cmdiocb: elsiocb, ndlp);
10526 /* retain node if our response is deferred */
10527 if (phba->defer_flogi_acc_flag)
10528 break;
10529 if (newnode)
10530 lpfc_disc_state_machine(vport, ndlp, NULL,
10531 NLP_EVT_DEVICE_RM);
10532 break;
10533 case ELS_CMD_LOGO:
10534 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10535 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
10536 did, vport->port_state, ndlp->nlp_flag);
10537
10538 phba->fc_stat.elsRcvLOGO++;
10539 lpfc_send_els_event(vport, ndlp, payload);
10540 if (vport->port_state < LPFC_DISC_AUTH) {
10541 rjt_err = LSRJT_UNABLE_TPC;
10542 rjt_exp = LSEXP_NOTHING_MORE;
10543 break;
10544 }
10545 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
10546 if (newnode)
10547 lpfc_disc_state_machine(vport, ndlp, NULL,
10548 NLP_EVT_DEVICE_RM);
10549 break;
10550 case ELS_CMD_PRLO:
10551 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10552 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
10553 did, vport->port_state, ndlp->nlp_flag);
10554
10555 phba->fc_stat.elsRcvPRLO++;
10556 lpfc_send_els_event(vport, ndlp, payload);
10557 if (vport->port_state < LPFC_DISC_AUTH) {
10558 rjt_err = LSRJT_UNABLE_TPC;
10559 rjt_exp = LSEXP_NOTHING_MORE;
10560 break;
10561 }
10562 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
10563 break;
10564 case ELS_CMD_LCB:
10565 phba->fc_stat.elsRcvLCB++;
10566 lpfc_els_rcv_lcb(vport, cmdiocb: elsiocb, ndlp);
10567 break;
10568 case ELS_CMD_RDP:
10569 phba->fc_stat.elsRcvRDP++;
10570 lpfc_els_rcv_rdp(vport, cmdiocb: elsiocb, ndlp);
10571 break;
10572 case ELS_CMD_RSCN:
10573 phba->fc_stat.elsRcvRSCN++;
10574 lpfc_els_rcv_rscn(vport, cmdiocb: elsiocb, ndlp);
10575 if (newnode)
10576 lpfc_disc_state_machine(vport, ndlp, NULL,
10577 NLP_EVT_DEVICE_RM);
10578 break;
10579 case ELS_CMD_ADISC:
10580 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10581 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
10582 did, vport->port_state, ndlp->nlp_flag);
10583
10584 lpfc_send_els_event(vport, ndlp, payload);
10585 phba->fc_stat.elsRcvADISC++;
10586 if (vport->port_state < LPFC_DISC_AUTH) {
10587 rjt_err = LSRJT_UNABLE_TPC;
10588 rjt_exp = LSEXP_NOTHING_MORE;
10589 break;
10590 }
10591 lpfc_disc_state_machine(vport, ndlp, elsiocb,
10592 NLP_EVT_RCV_ADISC);
10593 break;
10594 case ELS_CMD_PDISC:
10595 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10596 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
10597 did, vport->port_state, ndlp->nlp_flag);
10598
10599 phba->fc_stat.elsRcvPDISC++;
10600 if (vport->port_state < LPFC_DISC_AUTH) {
10601 rjt_err = LSRJT_UNABLE_TPC;
10602 rjt_exp = LSEXP_NOTHING_MORE;
10603 break;
10604 }
10605 lpfc_disc_state_machine(vport, ndlp, elsiocb,
10606 NLP_EVT_RCV_PDISC);
10607 break;
10608 case ELS_CMD_FARPR:
10609 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10610 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
10611 did, vport->port_state, ndlp->nlp_flag);
10612
10613 phba->fc_stat.elsRcvFARPR++;
10614 lpfc_els_rcv_farpr(vport, cmdiocb: elsiocb, ndlp);
10615 break;
10616 case ELS_CMD_FARP:
10617 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10618 "RCV FARP: did:x%x/ste:x%x flg:x%x",
10619 did, vport->port_state, ndlp->nlp_flag);
10620
10621 phba->fc_stat.elsRcvFARP++;
10622 lpfc_els_rcv_farp(vport, cmdiocb: elsiocb, ndlp);
10623 break;
10624 case ELS_CMD_FAN:
10625 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10626 "RCV FAN: did:x%x/ste:x%x flg:x%x",
10627 did, vport->port_state, ndlp->nlp_flag);
10628
10629 phba->fc_stat.elsRcvFAN++;
10630 lpfc_els_rcv_fan(vport, cmdiocb: elsiocb, fan_ndlp: ndlp);
10631 break;
10632 case ELS_CMD_PRLI:
10633 case ELS_CMD_NVMEPRLI:
10634 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10635 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
10636 did, vport->port_state, ndlp->nlp_flag);
10637
10638 phba->fc_stat.elsRcvPRLI++;
10639 if ((vport->port_state < LPFC_DISC_AUTH) &&
10640 (vport->fc_flag & FC_FABRIC)) {
10641 rjt_err = LSRJT_UNABLE_TPC;
10642 rjt_exp = LSEXP_NOTHING_MORE;
10643 break;
10644 }
10645 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
10646 break;
10647 case ELS_CMD_LIRR:
10648 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10649 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
10650 did, vport->port_state, ndlp->nlp_flag);
10651
10652 phba->fc_stat.elsRcvLIRR++;
10653 lpfc_els_rcv_lirr(vport, cmdiocb: elsiocb, ndlp);
10654 if (newnode)
10655 lpfc_disc_state_machine(vport, ndlp, NULL,
10656 NLP_EVT_DEVICE_RM);
10657 break;
10658 case ELS_CMD_RLS:
10659 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10660 "RCV RLS: did:x%x/ste:x%x flg:x%x",
10661 did, vport->port_state, ndlp->nlp_flag);
10662
10663 phba->fc_stat.elsRcvRLS++;
10664 lpfc_els_rcv_rls(vport, cmdiocb: elsiocb, ndlp);
10665 if (newnode)
10666 lpfc_disc_state_machine(vport, ndlp, NULL,
10667 NLP_EVT_DEVICE_RM);
10668 break;
10669 case ELS_CMD_RPL:
10670 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10671 "RCV RPL: did:x%x/ste:x%x flg:x%x",
10672 did, vport->port_state, ndlp->nlp_flag);
10673
10674 phba->fc_stat.elsRcvRPL++;
10675 lpfc_els_rcv_rpl(vport, cmdiocb: elsiocb, ndlp);
10676 if (newnode)
10677 lpfc_disc_state_machine(vport, ndlp, NULL,
10678 NLP_EVT_DEVICE_RM);
10679 break;
10680 case ELS_CMD_RNID:
10681 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10682 "RCV RNID: did:x%x/ste:x%x flg:x%x",
10683 did, vport->port_state, ndlp->nlp_flag);
10684
10685 phba->fc_stat.elsRcvRNID++;
10686 lpfc_els_rcv_rnid(vport, cmdiocb: elsiocb, ndlp);
10687 if (newnode)
10688 lpfc_disc_state_machine(vport, ndlp, NULL,
10689 NLP_EVT_DEVICE_RM);
10690 break;
10691 case ELS_CMD_RTV:
10692 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10693 "RCV RTV: did:x%x/ste:x%x flg:x%x",
10694 did, vport->port_state, ndlp->nlp_flag);
10695 phba->fc_stat.elsRcvRTV++;
10696 lpfc_els_rcv_rtv(vport, cmdiocb: elsiocb, ndlp);
10697 if (newnode)
10698 lpfc_disc_state_machine(vport, ndlp, NULL,
10699 NLP_EVT_DEVICE_RM);
10700 break;
10701 case ELS_CMD_RRQ:
10702 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10703 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
10704 did, vport->port_state, ndlp->nlp_flag);
10705
10706 phba->fc_stat.elsRcvRRQ++;
10707 lpfc_els_rcv_rrq(vport, cmdiocb: elsiocb, ndlp);
10708 if (newnode)
10709 lpfc_disc_state_machine(vport, ndlp, NULL,
10710 NLP_EVT_DEVICE_RM);
10711 break;
10712 case ELS_CMD_ECHO:
10713 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10714 "RCV ECHO: did:x%x/ste:x%x flg:x%x",
10715 did, vport->port_state, ndlp->nlp_flag);
10716
10717 phba->fc_stat.elsRcvECHO++;
10718 lpfc_els_rcv_echo(vport, cmdiocb: elsiocb, ndlp);
10719 if (newnode)
10720 lpfc_disc_state_machine(vport, ndlp, NULL,
10721 NLP_EVT_DEVICE_RM);
10722 break;
10723 case ELS_CMD_REC:
10724 /* receive this due to exchange closed */
10725 rjt_err = LSRJT_UNABLE_TPC;
10726 rjt_exp = LSEXP_INVALID_OX_RX;
10727 break;
10728 case ELS_CMD_FPIN:
10729 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10730 "RCV FPIN: did:x%x/ste:x%x flg:x%x",
10731 did, vport->port_state, ndlp->nlp_flag);
10732
10733 lpfc_els_rcv_fpin(vport, p: (struct fc_els_fpin *)payload,
10734 fpin_length: payload_len);
10735
10736 /* There are no replies, so no rjt codes */
10737 break;
10738 case ELS_CMD_EDC:
10739 lpfc_els_rcv_edc(vport, cmdiocb: elsiocb, ndlp);
10740 break;
10741 case ELS_CMD_RDF:
10742 phba->fc_stat.elsRcvRDF++;
10743 /* Accept RDF only from fabric controller */
10744 if (did != Fabric_Cntl_DID) {
10745 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
10746 "1115 Received RDF from invalid DID "
10747 "x%x\n", did);
10748 rjt_err = LSRJT_PROTOCOL_ERR;
10749 rjt_exp = LSEXP_NOTHING_MORE;
10750 goto lsrjt;
10751 }
10752
10753 lpfc_els_rcv_rdf(vport, cmdiocb: elsiocb, ndlp);
10754 break;
10755 default:
10756 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10757 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
10758 cmd, did, vport->port_state);
10759
10760 /* Unsupported ELS command, reject */
10761 rjt_err = LSRJT_CMD_UNSUPPORTED;
10762 rjt_exp = LSEXP_NOTHING_MORE;
10763
10764 /* Unknown ELS command <elsCmd> received from NPORT <did> */
10765 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10766 "0115 Unknown ELS command x%x "
10767 "received from NPORT x%x\n", cmd, did);
10768 if (newnode)
10769 lpfc_disc_state_machine(vport, ndlp, NULL,
10770 NLP_EVT_DEVICE_RM);
10771 break;
10772 }
10773
10774lsrjt:
10775 /* check if need to LS_RJT received ELS cmd */
10776 if (rjt_err) {
10777 memset(&stat, 0, sizeof(stat));
10778 stat.un.b.lsRjtRsnCode = rjt_err;
10779 stat.un.b.lsRjtRsnCodeExp = rjt_exp;
10780 lpfc_els_rsp_reject(vport, rejectError: stat.un.lsRjtError, oldiocb: elsiocb, ndlp,
10781 NULL);
10782 /* Remove the reference from above for new nodes. */
10783 if (newnode)
10784 lpfc_disc_state_machine(vport, ndlp, NULL,
10785 NLP_EVT_DEVICE_RM);
10786 }
10787
10788 /* Release the reference on this elsiocb, not the ndlp. */
10789 lpfc_nlp_put(elsiocb->ndlp);
10790 elsiocb->ndlp = NULL;
10791
10792 /* Special case. Driver received an unsolicited command that
10793 * unsupportable given the driver's current state. Reset the
10794 * link and start over.
10795 */
10796 if (init_link) {
10797 mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL);
10798 if (!mbox)
10799 return;
10800 lpfc_linkdown(phba);
10801 lpfc_init_link(phba, mbox,
10802 phba->cfg_topology,
10803 phba->cfg_link_speed);
10804 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
10805 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10806 mbox->vport = vport;
10807 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
10808 MBX_NOT_FINISHED)
10809 mempool_free(element: mbox, pool: phba->mbox_mem_pool);
10810 }
10811
10812 return;
10813
10814dropit:
10815 if (vport && !(vport->load_flag & FC_UNLOADING))
10816 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10817 "0111 Dropping received ELS cmd "
10818 "Data: x%x x%x x%x x%x\n",
10819 cmd, status, get_job_word4(phba, elsiocb), did);
10820
10821 phba->fc_stat.elsRcvDrop++;
10822}
10823
10824/**
10825 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
10826 * @phba: pointer to lpfc hba data structure.
10827 * @pring: pointer to a SLI ring.
10828 * @elsiocb: pointer to lpfc els iocb data structure.
10829 *
10830 * This routine is used to process an unsolicited event received from a SLI
10831 * (Service Level Interface) ring. The actual processing of the data buffer
10832 * associated with the unsolicited event is done by invoking the routine
10833 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
10834 * SLI ring on which the unsolicited event was received.
10835 **/
10836void
10837lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10838 struct lpfc_iocbq *elsiocb)
10839{
10840 struct lpfc_vport *vport = elsiocb->vport;
10841 u32 ulp_command, status, parameter, bde_count = 0;
10842 IOCB_t *icmd;
10843 struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
10844 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf;
10845 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf;
10846 dma_addr_t paddr;
10847
10848 elsiocb->cmd_dmabuf = NULL;
10849 elsiocb->rsp_dmabuf = NULL;
10850 elsiocb->bpl_dmabuf = NULL;
10851
10852 wcqe_cmpl = &elsiocb->wcqe_cmpl;
10853 ulp_command = get_job_cmnd(phba, iocbq: elsiocb);
10854 status = get_job_ulpstatus(phba, iocbq: elsiocb);
10855 parameter = get_job_word4(phba, iocbq: elsiocb);
10856 if (phba->sli_rev == LPFC_SLI_REV4)
10857 bde_count = wcqe_cmpl->word3;
10858 else
10859 bde_count = elsiocb->iocb.ulpBdeCount;
10860
10861 if (status == IOSTAT_NEED_BUFFER) {
10862 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
10863 } else if (status == IOSTAT_LOCAL_REJECT &&
10864 (parameter & IOERR_PARAM_MASK) ==
10865 IOERR_RCV_BUFFER_WAITING) {
10866 phba->fc_stat.NoRcvBuf++;
10867 /* Not enough posted buffers; Try posting more buffers */
10868 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
10869 lpfc_sli3_post_buffer(phba, pring, cnt: 0);
10870 return;
10871 }
10872
10873 if (phba->sli_rev == LPFC_SLI_REV3) {
10874 icmd = &elsiocb->iocb;
10875 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
10876 (ulp_command == CMD_IOCB_RCV_ELS64_CX ||
10877 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) {
10878 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
10879 vport = phba->pport;
10880 else
10881 vport = lpfc_find_vport_by_vpid(phba,
10882 icmd->unsli3.rcvsli3.vpi);
10883 }
10884 }
10885
10886 /* If there are no BDEs associated
10887 * with this IOCB, there is nothing to do.
10888 */
10889 if (bde_count == 0)
10890 return;
10891
10892 /* Account for SLI2 or SLI3 and later unsolicited buffering */
10893 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
10894 elsiocb->cmd_dmabuf = bdeBuf1;
10895 if (bde_count == 2)
10896 elsiocb->bpl_dmabuf = bdeBuf2;
10897 } else {
10898 icmd = &elsiocb->iocb;
10899 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
10900 icmd->un.cont64[0].addrLow);
10901 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
10902 paddr);
10903 if (bde_count == 2) {
10904 paddr = getPaddr(icmd->un.cont64[1].addrHigh,
10905 icmd->un.cont64[1].addrLow);
10906 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
10907 pring,
10908 paddr);
10909 }
10910 }
10911
10912 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
10913 /*
10914 * The different unsolicited event handlers would tell us
10915 * if they are done with "mp" by setting cmd_dmabuf to NULL.
10916 */
10917 if (elsiocb->cmd_dmabuf) {
10918 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf);
10919 elsiocb->cmd_dmabuf = NULL;
10920 }
10921
10922 if (elsiocb->bpl_dmabuf) {
10923 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf);
10924 elsiocb->bpl_dmabuf = NULL;
10925 }
10926
10927}
10928
10929static void
10930lpfc_start_fdmi(struct lpfc_vport *vport)
10931{
10932 struct lpfc_nodelist *ndlp;
10933
10934 /* If this is the first time, allocate an ndlp and initialize
10935 * it. Otherwise, make sure the node is enabled and then do the
10936 * login.
10937 */
10938 ndlp = lpfc_findnode_did(vport, FDMI_DID);
10939 if (!ndlp) {
10940 ndlp = lpfc_nlp_init(vport, FDMI_DID);
10941 if (ndlp) {
10942 ndlp->nlp_type |= NLP_FABRIC;
10943 } else {
10944 return;
10945 }
10946 }
10947
10948 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
10949 lpfc_issue_els_plogi(vport, did: ndlp->nlp_DID, retry: 0);
10950}
10951
10952/**
10953 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
10954 * @phba: pointer to lpfc hba data structure.
10955 * @vport: pointer to a virtual N_Port data structure.
10956 *
10957 * This routine issues a Port Login (PLOGI) to the Name Server with
10958 * State Change Request (SCR) for a @vport. This routine will create an
10959 * ndlp for the Name Server associated to the @vport if such node does
10960 * not already exist. The PLOGI to Name Server is issued by invoking the
10961 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
10962 * (FDMI) is configured to the @vport, a FDMI node will be created and
10963 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
10964 **/
10965void
10966lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
10967{
10968 struct lpfc_nodelist *ndlp;
10969 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
10970
10971 /*
10972 * If lpfc_delay_discovery parameter is set and the clean address
10973 * bit is cleared and fc fabric parameters chenged, delay FC NPort
10974 * discovery.
10975 */
10976 spin_lock_irq(lock: shost->host_lock);
10977 if (vport->fc_flag & FC_DISC_DELAYED) {
10978 spin_unlock_irq(lock: shost->host_lock);
10979 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10980 "3334 Delay fc port discovery for %d secs\n",
10981 phba->fc_ratov);
10982 mod_timer(timer: &vport->delayed_disc_tmo,
10983 expires: jiffies + msecs_to_jiffies(m: 1000 * phba->fc_ratov));
10984 return;
10985 }
10986 spin_unlock_irq(lock: shost->host_lock);
10987
10988 ndlp = lpfc_findnode_did(vport, NameServer_DID);
10989 if (!ndlp) {
10990 ndlp = lpfc_nlp_init(vport, NameServer_DID);
10991 if (!ndlp) {
10992 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
10993 lpfc_disc_start(vport);
10994 return;
10995 }
10996 lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED);
10997 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10998 "0251 NameServer login: no memory\n");
10999 return;
11000 }
11001 }
11002
11003 ndlp->nlp_type |= NLP_FABRIC;
11004
11005 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
11006
11007 if (lpfc_issue_els_plogi(vport, did: ndlp->nlp_DID, retry: 0)) {
11008 lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED);
11009 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11010 "0252 Cannot issue NameServer login\n");
11011 return;
11012 }
11013
11014 if ((phba->cfg_enable_SmartSAN ||
11015 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
11016 (vport->load_flag & FC_ALLOW_FDMI))
11017 lpfc_start_fdmi(vport);
11018}
11019
11020/**
11021 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
11022 * @phba: pointer to lpfc hba data structure.
11023 * @pmb: pointer to the driver internal queue element for mailbox command.
11024 *
11025 * This routine is the completion callback function to register new vport
11026 * mailbox command. If the new vport mailbox command completes successfully,
11027 * the fabric registration login shall be performed on physical port (the
11028 * new vport created is actually a physical port, with VPI 0) or the port
11029 * login to Name Server for State Change Request (SCR) will be performed
11030 * on virtual port (real virtual port, with VPI greater than 0).
11031 **/
11032static void
11033lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
11034{
11035 struct lpfc_vport *vport = pmb->vport;
11036 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11037 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
11038 MAILBOX_t *mb = &pmb->u.mb;
11039 int rc;
11040
11041 spin_lock_irq(lock: shost->host_lock);
11042 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
11043 spin_unlock_irq(lock: shost->host_lock);
11044
11045 if (mb->mbxStatus) {
11046 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11047 "0915 Register VPI failed : Status: x%x"
11048 " upd bit: x%x \n", mb->mbxStatus,
11049 mb->un.varRegVpi.upd);
11050 if (phba->sli_rev == LPFC_SLI_REV4 &&
11051 mb->un.varRegVpi.upd)
11052 goto mbox_err_exit ;
11053
11054 switch (mb->mbxStatus) {
11055 case 0x11: /* unsupported feature */
11056 case 0x9603: /* max_vpi exceeded */
11057 case 0x9602: /* Link event since CLEAR_LA */
11058 /* giving up on vport registration */
11059 lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED);
11060 spin_lock_irq(lock: shost->host_lock);
11061 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
11062 spin_unlock_irq(lock: shost->host_lock);
11063 lpfc_can_disctmo(vport);
11064 break;
11065 /* If reg_vpi fail with invalid VPI status, re-init VPI */
11066 case 0x20:
11067 spin_lock_irq(lock: shost->host_lock);
11068 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
11069 spin_unlock_irq(lock: shost->host_lock);
11070 lpfc_init_vpi(phba, pmb, vport->vpi);
11071 pmb->vport = vport;
11072 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
11073 rc = lpfc_sli_issue_mbox(phba, pmb,
11074 MBX_NOWAIT);
11075 if (rc == MBX_NOT_FINISHED) {
11076 lpfc_printf_vlog(vport, KERN_ERR,
11077 LOG_TRACE_EVENT,
11078 "2732 Failed to issue INIT_VPI"
11079 " mailbox command\n");
11080 } else {
11081 lpfc_nlp_put(ndlp);
11082 return;
11083 }
11084 fallthrough;
11085 default:
11086 /* Try to recover from this error */
11087 if (phba->sli_rev == LPFC_SLI_REV4)
11088 lpfc_sli4_unreg_all_rpis(vport);
11089 lpfc_mbx_unreg_vpi(vport);
11090 spin_lock_irq(lock: shost->host_lock);
11091 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
11092 spin_unlock_irq(lock: shost->host_lock);
11093 if (mb->mbxStatus == MBX_NOT_FINISHED)
11094 break;
11095 if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
11096 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
11097 if (phba->sli_rev == LPFC_SLI_REV4)
11098 lpfc_issue_init_vfi(vport);
11099 else
11100 lpfc_initial_flogi(vport);
11101 } else {
11102 lpfc_initial_fdisc(vport);
11103 }
11104 break;
11105 }
11106 } else {
11107 spin_lock_irq(lock: shost->host_lock);
11108 vport->vpi_state |= LPFC_VPI_REGISTERED;
11109 spin_unlock_irq(lock: shost->host_lock);
11110 if (vport == phba->pport) {
11111 if (phba->sli_rev < LPFC_SLI_REV4)
11112 lpfc_issue_fabric_reglogin(vport);
11113 else {
11114 /*
11115 * If the physical port is instantiated using
11116 * FDISC, do not start vport discovery.
11117 */
11118 if (vport->port_state != LPFC_FDISC)
11119 lpfc_start_fdiscs(phba);
11120 lpfc_do_scr_ns_plogi(phba, vport);
11121 }
11122 } else {
11123 lpfc_do_scr_ns_plogi(phba, vport);
11124 }
11125 }
11126mbox_err_exit:
11127 /* Now, we decrement the ndlp reference count held for this
11128 * callback function
11129 */
11130 lpfc_nlp_put(ndlp);
11131
11132 mempool_free(element: pmb, pool: phba->mbox_mem_pool);
11133 return;
11134}
11135
11136/**
11137 * lpfc_register_new_vport - Register a new vport with a HBA
11138 * @phba: pointer to lpfc hba data structure.
11139 * @vport: pointer to a host virtual N_Port data structure.
11140 * @ndlp: pointer to a node-list data structure.
11141 *
11142 * This routine registers the @vport as a new virtual port with a HBA.
11143 * It is done through a registering vpi mailbox command.
11144 **/
11145void
11146lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
11147 struct lpfc_nodelist *ndlp)
11148{
11149 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11150 LPFC_MBOXQ_t *mbox;
11151
11152 mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL);
11153 if (mbox) {
11154 lpfc_reg_vpi(vport, mbox);
11155 mbox->vport = vport;
11156 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
11157 if (!mbox->ctx_ndlp) {
11158 mempool_free(element: mbox, pool: phba->mbox_mem_pool);
11159 goto mbox_err_exit;
11160 }
11161
11162 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
11163 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
11164 == MBX_NOT_FINISHED) {
11165 /* mailbox command not success, decrement ndlp
11166 * reference count for this command
11167 */
11168 lpfc_nlp_put(ndlp);
11169 mempool_free(element: mbox, pool: phba->mbox_mem_pool);
11170
11171 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11172 "0253 Register VPI: Can't send mbox\n");
11173 goto mbox_err_exit;
11174 }
11175 } else {
11176 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11177 "0254 Register VPI: no memory\n");
11178 goto mbox_err_exit;
11179 }
11180 return;
11181
11182mbox_err_exit:
11183 lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED);
11184 spin_lock_irq(lock: shost->host_lock);
11185 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
11186 spin_unlock_irq(lock: shost->host_lock);
11187 return;
11188}
11189
11190/**
11191 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
11192 * @phba: pointer to lpfc hba data structure.
11193 *
11194 * This routine cancels the retry delay timers to all the vports.
11195 **/
11196void
11197lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
11198{
11199 struct lpfc_vport **vports;
11200 struct lpfc_nodelist *ndlp;
11201 uint32_t link_state;
11202 int i;
11203
11204 /* Treat this failure as linkdown for all vports */
11205 link_state = phba->link_state;
11206 lpfc_linkdown(phba);
11207 phba->link_state = link_state;
11208
11209 vports = lpfc_create_vport_work_array(phba);
11210
11211 if (vports) {
11212 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
11213 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
11214 if (ndlp)
11215 lpfc_cancel_retry_delay_tmo(vport: vports[i], nlp: ndlp);
11216 lpfc_els_flush_cmd(vport: vports[i]);
11217 }
11218 lpfc_destroy_vport_work_array(phba, vports);
11219 }
11220}
11221
11222/**
11223 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
11224 * @phba: pointer to lpfc hba data structure.
11225 *
11226 * This routine abort all pending discovery commands and
11227 * start a timer to retry FLOGI for the physical port
11228 * discovery.
11229 **/
11230void
11231lpfc_retry_pport_discovery(struct lpfc_hba *phba)
11232{
11233 struct lpfc_nodelist *ndlp;
11234
11235 /* Cancel the all vports retry delay retry timers */
11236 lpfc_cancel_all_vport_retry_delay_timer(phba);
11237
11238 /* If fabric require FLOGI, then re-instantiate physical login */
11239 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
11240 if (!ndlp)
11241 return;
11242
11243 mod_timer(timer: &ndlp->nlp_delayfunc, expires: jiffies + msecs_to_jiffies(m: 1000));
11244 spin_lock_irq(lock: &ndlp->lock);
11245 ndlp->nlp_flag |= NLP_DELAY_TMO;
11246 spin_unlock_irq(lock: &ndlp->lock);
11247 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
11248 phba->pport->port_state = LPFC_FLOGI;
11249 return;
11250}
11251
11252/**
11253 * lpfc_fabric_login_reqd - Check if FLOGI required.
11254 * @phba: pointer to lpfc hba data structure.
11255 * @cmdiocb: pointer to FDISC command iocb.
11256 * @rspiocb: pointer to FDISC response iocb.
11257 *
11258 * This routine checks if a FLOGI is reguired for FDISC
11259 * to succeed.
11260 **/
11261static int
11262lpfc_fabric_login_reqd(struct lpfc_hba *phba,
11263 struct lpfc_iocbq *cmdiocb,
11264 struct lpfc_iocbq *rspiocb)
11265{
11266 u32 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
11267 u32 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
11268
11269 if (ulp_status != IOSTAT_FABRIC_RJT ||
11270 ulp_word4 != RJT_LOGIN_REQUIRED)
11271 return 0;
11272 else
11273 return 1;
11274}
11275
11276/**
11277 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
11278 * @phba: pointer to lpfc hba data structure.
11279 * @cmdiocb: pointer to lpfc command iocb data structure.
11280 * @rspiocb: pointer to lpfc response iocb data structure.
11281 *
11282 * This routine is the completion callback function to a Fabric Discover
11283 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
11284 * single threaded, each FDISC completion callback function will reset
11285 * the discovery timer for all vports such that the timers will not get
11286 * unnecessary timeout. The function checks the FDISC IOCB status. If error
11287 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
11288 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
11289 * assigned to the vport has been changed with the completion of the FDISC
11290 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
11291 * are unregistered from the HBA, and then the lpfc_register_new_vport()
11292 * routine is invoked to register new vport with the HBA. Otherwise, the
11293 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
11294 * Server for State Change Request (SCR).
11295 **/
11296static void
11297lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11298 struct lpfc_iocbq *rspiocb)
11299{
11300 struct lpfc_vport *vport = cmdiocb->vport;
11301 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11302 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
11303 struct lpfc_nodelist *np;
11304 struct lpfc_nodelist *next_np;
11305 struct lpfc_iocbq *piocb;
11306 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
11307 struct serv_parm *sp;
11308 uint8_t fabric_param_changed;
11309 u32 ulp_status, ulp_word4;
11310
11311 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
11312 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
11313
11314 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
11315 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
11316 ulp_status, ulp_word4,
11317 vport->fc_prevDID);
11318 /* Since all FDISCs are being single threaded, we
11319 * must reset the discovery timer for ALL vports
11320 * waiting to send FDISC when one completes.
11321 */
11322 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
11323 lpfc_set_disctmo(piocb->vport);
11324 }
11325
11326 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11327 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
11328 ulp_status, ulp_word4, vport->fc_prevDID);
11329
11330 if (ulp_status) {
11331
11332 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
11333 lpfc_retry_pport_discovery(phba);
11334 goto out;
11335 }
11336
11337 /* Check for retry */
11338 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
11339 goto out;
11340 /* FDISC failed */
11341 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11342 "0126 FDISC failed. (x%x/x%x)\n",
11343 ulp_status, ulp_word4);
11344 goto fdisc_failed;
11345 }
11346
11347 lpfc_check_nlp_post_devloss(vport, ndlp);
11348
11349 spin_lock_irq(lock: shost->host_lock);
11350 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
11351 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
11352 vport->fc_flag |= FC_FABRIC;
11353 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
11354 vport->fc_flag |= FC_PUBLIC_LOOP;
11355 spin_unlock_irq(lock: shost->host_lock);
11356
11357 vport->fc_myDID = ulp_word4 & Mask_DID;
11358 lpfc_vport_set_state(vport, new_state: FC_VPORT_ACTIVE);
11359 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
11360 if (!prsp)
11361 goto out;
11362 if (!lpfc_is_els_acc_rsp(buf: prsp))
11363 goto out;
11364
11365 sp = prsp->virt + sizeof(uint32_t);
11366 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
11367 memcpy(&vport->fabric_portname, &sp->portName,
11368 sizeof(struct lpfc_name));
11369 memcpy(&vport->fabric_nodename, &sp->nodeName,
11370 sizeof(struct lpfc_name));
11371 if (fabric_param_changed &&
11372 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
11373 /* If our NportID changed, we need to ensure all
11374 * remaining NPORTs get unreg_login'ed so we can
11375 * issue unreg_vpi.
11376 */
11377 list_for_each_entry_safe(np, next_np,
11378 &vport->fc_nodes, nlp_listp) {
11379 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
11380 !(np->nlp_flag & NLP_NPR_ADISC))
11381 continue;
11382 spin_lock_irq(lock: &ndlp->lock);
11383 np->nlp_flag &= ~NLP_NPR_ADISC;
11384 spin_unlock_irq(lock: &ndlp->lock);
11385 lpfc_unreg_rpi(vport, np);
11386 }
11387 lpfc_cleanup_pending_mbox(vport);
11388
11389 if (phba->sli_rev == LPFC_SLI_REV4)
11390 lpfc_sli4_unreg_all_rpis(vport);
11391
11392 lpfc_mbx_unreg_vpi(vport);
11393 spin_lock_irq(lock: shost->host_lock);
11394 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
11395 if (phba->sli_rev == LPFC_SLI_REV4)
11396 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
11397 else
11398 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
11399 spin_unlock_irq(lock: shost->host_lock);
11400 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
11401 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
11402 /*
11403 * Driver needs to re-reg VPI in order for f/w
11404 * to update the MAC address.
11405 */
11406 lpfc_register_new_vport(phba, vport, ndlp);
11407 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
11408 goto out;
11409 }
11410
11411 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
11412 lpfc_issue_init_vpi(vport);
11413 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
11414 lpfc_register_new_vport(phba, vport, ndlp);
11415 else
11416 lpfc_do_scr_ns_plogi(phba, vport);
11417
11418 /* The FDISC completed successfully. Move the fabric ndlp to
11419 * UNMAPPED state and register with the transport.
11420 */
11421 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
11422 goto out;
11423
11424fdisc_failed:
11425 if (vport->fc_vport &&
11426 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
11427 lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED);
11428 /* Cancel discovery timer */
11429 lpfc_can_disctmo(vport);
11430out:
11431 lpfc_els_free_iocb(phba, elsiocb: cmdiocb);
11432 lpfc_nlp_put(ndlp);
11433}
11434
11435/**
11436 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
11437 * @vport: pointer to a virtual N_Port data structure.
11438 * @ndlp: pointer to a node-list data structure.
11439 * @retry: number of retries to the command IOCB.
11440 *
11441 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
11442 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
11443 * routine to issue the IOCB, which makes sure only one outstanding fabric
11444 * IOCB will be sent off HBA at any given time.
11445 *
11446 * Note that the ndlp reference count will be incremented by 1 for holding the
11447 * ndlp and the reference to ndlp will be stored into the ndlp field of
11448 * the IOCB for the completion callback function to the FDISC ELS command.
11449 *
11450 * Return code
11451 * 0 - Successfully issued fdisc iocb command
11452 * 1 - Failed to issue fdisc iocb command
11453 **/
11454static int
11455lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
11456 uint8_t retry)
11457{
11458 struct lpfc_hba *phba = vport->phba;
11459 IOCB_t *icmd;
11460 union lpfc_wqe128 *wqe = NULL;
11461 struct lpfc_iocbq *elsiocb;
11462 struct serv_parm *sp;
11463 uint8_t *pcmd;
11464 uint16_t cmdsize;
11465 int did = ndlp->nlp_DID;
11466 int rc;
11467
11468 vport->port_state = LPFC_FDISC;
11469 vport->fc_myDID = 0;
11470 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
11471 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, cmd_size: cmdsize, retry, ndlp, did,
11472 ELS_CMD_FDISC);
11473 if (!elsiocb) {
11474 lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED);
11475 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11476 "0255 Issue FDISC: no IOCB\n");
11477 return 1;
11478 }
11479
11480 if (phba->sli_rev == LPFC_SLI_REV4) {
11481 wqe = &elsiocb->wqe;
11482 bf_set(els_req64_sid, &wqe->els_req, 0);
11483 bf_set(els_req64_sp, &wqe->els_req, 1);
11484 } else {
11485 icmd = &elsiocb->iocb;
11486 icmd->un.elsreq64.myID = 0;
11487 icmd->un.elsreq64.fl = 1;
11488 icmd->ulpCt_h = 1;
11489 icmd->ulpCt_l = 0;
11490 }
11491
11492 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
11493 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
11494 pcmd += sizeof(uint32_t); /* CSP Word 1 */
11495 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
11496 sp = (struct serv_parm *) pcmd;
11497 /* Setup CSPs accordingly for Fabric */
11498 sp->cmn.e_d_tov = 0;
11499 sp->cmn.w2.r_a_tov = 0;
11500 sp->cmn.virtual_fabric_support = 0;
11501 sp->cls1.classValid = 0;
11502 sp->cls2.seqDelivery = 1;
11503 sp->cls3.seqDelivery = 1;
11504
11505 pcmd += sizeof(uint32_t); /* CSP Word 2 */
11506 pcmd += sizeof(uint32_t); /* CSP Word 3 */
11507 pcmd += sizeof(uint32_t); /* CSP Word 4 */
11508 pcmd += sizeof(uint32_t); /* Port Name */
11509 memcpy(pcmd, &vport->fc_portname, 8);
11510 pcmd += sizeof(uint32_t); /* Node Name */
11511 pcmd += sizeof(uint32_t); /* Node Name */
11512 memcpy(pcmd, &vport->fc_nodename, 8);
11513 sp->cmn.valid_vendor_ver_level = 0;
11514 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
11515 lpfc_set_disctmo(vport);
11516
11517 phba->fc_stat.elsXmitFDISC++;
11518 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc;
11519
11520 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11521 "Issue FDISC: did:x%x",
11522 did, 0, 0);
11523
11524 elsiocb->ndlp = lpfc_nlp_get(ndlp);
11525 if (!elsiocb->ndlp)
11526 goto err_out;
11527
11528 rc = lpfc_issue_fabric_iocb(phba, iocb: elsiocb);
11529 if (rc == IOCB_ERROR) {
11530 lpfc_nlp_put(ndlp);
11531 goto err_out;
11532 }
11533
11534 lpfc_vport_set_state(vport, new_state: FC_VPORT_INITIALIZING);
11535 return 0;
11536
11537 err_out:
11538 lpfc_els_free_iocb(phba, elsiocb);
11539 lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED);
11540 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11541 "0256 Issue FDISC: Cannot send IOCB\n");
11542 return 1;
11543}
11544
11545/**
11546 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
11547 * @phba: pointer to lpfc hba data structure.
11548 * @cmdiocb: pointer to lpfc command iocb data structure.
11549 * @rspiocb: pointer to lpfc response iocb data structure.
11550 *
11551 * This routine is the completion callback function to the issuing of a LOGO
11552 * ELS command off a vport. It frees the command IOCB and then decrement the
11553 * reference count held on ndlp for this completion function, indicating that
11554 * the reference to the ndlp is no long needed. Note that the
11555 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
11556 * callback function and an additional explicit ndlp reference decrementation
11557 * will trigger the actual release of the ndlp.
11558 **/
11559static void
11560(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11561 struct lpfc_iocbq *rspiocb)
11562{
11563 struct lpfc_vport *vport = cmdiocb->vport;
11564 IOCB_t *irsp;
11565 struct lpfc_nodelist *ndlp;
11566 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11567 u32 ulp_status, ulp_word4, did, tmo;
11568
11569 ndlp = cmdiocb->ndlp;
11570
11571 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
11572 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
11573
11574 if (phba->sli_rev == LPFC_SLI_REV4) {
11575 did = get_job_els_rsp64_did(phba, iocbq: cmdiocb);
11576 tmo = get_wqe_tmo(cmdiocb);
11577 } else {
11578 irsp = &rspiocb->iocb;
11579 did = get_job_els_rsp64_did(phba, iocbq: rspiocb);
11580 tmo = irsp->ulpTimeout;
11581 }
11582
11583 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11584 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
11585 ulp_status, ulp_word4, did);
11586
11587 /* NPIV LOGO completes to NPort <nlp_DID> */
11588 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
11589 "2928 NPIV LOGO completes to NPort x%x "
11590 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
11591 ndlp->nlp_DID, ulp_status, ulp_word4,
11592 tmo, vport->num_disc_nodes,
11593 kref_read(&ndlp->kref), ndlp->nlp_flag,
11594 ndlp->fc4_xpt_flags);
11595
11596 if (ulp_status == IOSTAT_SUCCESS) {
11597 spin_lock_irq(lock: shost->host_lock);
11598 vport->fc_flag &= ~FC_NDISC_ACTIVE;
11599 vport->fc_flag &= ~FC_FABRIC;
11600 spin_unlock_irq(lock: shost->host_lock);
11601 lpfc_can_disctmo(vport);
11602 }
11603
11604 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
11605 /* Wake up lpfc_vport_delete if waiting...*/
11606 if (ndlp->logo_waitq)
11607 wake_up(ndlp->logo_waitq);
11608 spin_lock_irq(lock: &ndlp->lock);
11609 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND);
11610 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
11611 spin_unlock_irq(lock: &ndlp->lock);
11612 }
11613
11614 /* Safe to release resources now. */
11615 lpfc_els_free_iocb(phba, elsiocb: cmdiocb);
11616 lpfc_nlp_put(ndlp);
11617}
11618
11619/**
11620 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
11621 * @vport: pointer to a virtual N_Port data structure.
11622 * @ndlp: pointer to a node-list data structure.
11623 *
11624 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
11625 *
11626 * Note that the ndlp reference count will be incremented by 1 for holding the
11627 * ndlp and the reference to ndlp will be stored into the ndlp field of
11628 * the IOCB for the completion callback function to the LOGO ELS command.
11629 *
11630 * Return codes
11631 * 0 - Successfully issued logo off the @vport
11632 * 1 - Failed to issue logo off the @vport
11633 **/
11634int
11635(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
11636{
11637 int rc = 0;
11638 struct lpfc_hba *phba = vport->phba;
11639 struct lpfc_iocbq *elsiocb;
11640 uint8_t *pcmd;
11641 uint16_t cmdsize;
11642
11643 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
11644 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, cmd_size: cmdsize, retry: 0, ndlp, did: ndlp->nlp_DID,
11645 ELS_CMD_LOGO);
11646 if (!elsiocb)
11647 return 1;
11648
11649 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
11650 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
11651 pcmd += sizeof(uint32_t);
11652
11653 /* Fill in LOGO payload */
11654 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
11655 pcmd += sizeof(uint32_t);
11656 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
11657
11658 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11659 "Issue LOGO npiv did:x%x flg:x%x",
11660 ndlp->nlp_DID, ndlp->nlp_flag, 0);
11661
11662 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo;
11663 spin_lock_irq(lock: &ndlp->lock);
11664 ndlp->nlp_flag |= NLP_LOGO_SND;
11665 spin_unlock_irq(lock: &ndlp->lock);
11666 elsiocb->ndlp = lpfc_nlp_get(ndlp);
11667 if (!elsiocb->ndlp) {
11668 lpfc_els_free_iocb(phba, elsiocb);
11669 goto err;
11670 }
11671
11672 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
11673 if (rc == IOCB_ERROR) {
11674 lpfc_els_free_iocb(phba, elsiocb);
11675 lpfc_nlp_put(ndlp);
11676 goto err;
11677 }
11678 return 0;
11679
11680err:
11681 spin_lock_irq(lock: &ndlp->lock);
11682 ndlp->nlp_flag &= ~NLP_LOGO_SND;
11683 spin_unlock_irq(lock: &ndlp->lock);
11684 return 1;
11685}
11686
11687/**
11688 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
11689 * @t: timer context used to obtain the lpfc hba.
11690 *
11691 * This routine is invoked by the fabric iocb block timer after
11692 * timeout. It posts the fabric iocb block timeout event by setting the
11693 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
11694 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
11695 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
11696 * posted event WORKER_FABRIC_BLOCK_TMO.
11697 **/
11698void
11699lpfc_fabric_block_timeout(struct timer_list *t)
11700{
11701 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer);
11702 unsigned long iflags;
11703 uint32_t tmo_posted;
11704
11705 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11706 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
11707 if (!tmo_posted)
11708 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
11709 spin_unlock_irqrestore(lock: &phba->pport->work_port_lock, flags: iflags);
11710
11711 if (!tmo_posted)
11712 lpfc_worker_wake_up(phba);
11713 return;
11714}
11715
11716/**
11717 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
11718 * @phba: pointer to lpfc hba data structure.
11719 *
11720 * This routine issues one fabric iocb from the driver internal list to
11721 * the HBA. It first checks whether it's ready to issue one fabric iocb to
11722 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
11723 * remove one pending fabric iocb from the driver internal list and invokes
11724 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
11725 **/
11726static void
11727lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
11728{
11729 struct lpfc_iocbq *iocb;
11730 unsigned long iflags;
11731 int ret;
11732
11733repeat:
11734 iocb = NULL;
11735 spin_lock_irqsave(&phba->hbalock, iflags);
11736 /* Post any pending iocb to the SLI layer */
11737 if (atomic_read(v: &phba->fabric_iocb_count) == 0) {
11738 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
11739 list);
11740 if (iocb)
11741 /* Increment fabric iocb count to hold the position */
11742 atomic_inc(v: &phba->fabric_iocb_count);
11743 }
11744 spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags);
11745 if (iocb) {
11746 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
11747 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
11748 iocb->cmd_flag |= LPFC_IO_FABRIC;
11749
11750 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
11751 "Fabric sched1: ste:x%x",
11752 iocb->vport->port_state, 0, 0);
11753
11754 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
11755
11756 if (ret == IOCB_ERROR) {
11757 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
11758 iocb->fabric_cmd_cmpl = NULL;
11759 iocb->cmd_flag &= ~LPFC_IO_FABRIC;
11760 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT);
11761 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
11762 iocb->cmd_cmpl(phba, iocb, iocb);
11763
11764 atomic_dec(v: &phba->fabric_iocb_count);
11765 goto repeat;
11766 }
11767 }
11768}
11769
11770/**
11771 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
11772 * @phba: pointer to lpfc hba data structure.
11773 *
11774 * This routine unblocks the issuing fabric iocb command. The function
11775 * will clear the fabric iocb block bit and then invoke the routine
11776 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
11777 * from the driver internal fabric iocb list.
11778 **/
11779void
11780lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
11781{
11782 clear_bit(nr: FABRIC_COMANDS_BLOCKED, addr: &phba->bit_flags);
11783
11784 lpfc_resume_fabric_iocbs(phba);
11785 return;
11786}
11787
11788/**
11789 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
11790 * @phba: pointer to lpfc hba data structure.
11791 *
11792 * This routine blocks the issuing fabric iocb for a specified amount of
11793 * time (currently 100 ms). This is done by set the fabric iocb block bit
11794 * and set up a timeout timer for 100ms. When the block bit is set, no more
11795 * fabric iocb will be issued out of the HBA.
11796 **/
11797static void
11798lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
11799{
11800 int blocked;
11801
11802 blocked = test_and_set_bit(nr: FABRIC_COMANDS_BLOCKED, addr: &phba->bit_flags);
11803 /* Start a timer to unblock fabric iocbs after 100ms */
11804 if (!blocked)
11805 mod_timer(timer: &phba->fabric_block_timer,
11806 expires: jiffies + msecs_to_jiffies(m: 100));
11807
11808 return;
11809}
11810
11811/**
11812 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
11813 * @phba: pointer to lpfc hba data structure.
11814 * @cmdiocb: pointer to lpfc command iocb data structure.
11815 * @rspiocb: pointer to lpfc response iocb data structure.
11816 *
11817 * This routine is the callback function that is put to the fabric iocb's
11818 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback
11819 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback
11820 * function first restores and invokes the original iocb's callback function
11821 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
11822 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
11823 **/
11824static void
11825lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11826 struct lpfc_iocbq *rspiocb)
11827{
11828 struct ls_rjt stat;
11829 u32 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
11830 u32 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
11831
11832 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
11833
11834 switch (ulp_status) {
11835 case IOSTAT_NPORT_RJT:
11836 case IOSTAT_FABRIC_RJT:
11837 if (ulp_word4 & RJT_UNAVAIL_TEMP)
11838 lpfc_block_fabric_iocbs(phba);
11839 break;
11840
11841 case IOSTAT_NPORT_BSY:
11842 case IOSTAT_FABRIC_BSY:
11843 lpfc_block_fabric_iocbs(phba);
11844 break;
11845
11846 case IOSTAT_LS_RJT:
11847 stat.un.ls_rjt_error_be =
11848 cpu_to_be32(ulp_word4);
11849 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
11850 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
11851 lpfc_block_fabric_iocbs(phba);
11852 break;
11853 }
11854
11855 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
11856
11857 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl;
11858 cmdiocb->fabric_cmd_cmpl = NULL;
11859 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
11860 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb);
11861
11862 atomic_dec(v: &phba->fabric_iocb_count);
11863 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
11864 /* Post any pending iocbs to HBA */
11865 lpfc_resume_fabric_iocbs(phba);
11866 }
11867}
11868
11869/**
11870 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
11871 * @phba: pointer to lpfc hba data structure.
11872 * @iocb: pointer to lpfc command iocb data structure.
11873 *
11874 * This routine is used as the top-level API for issuing a fabric iocb command
11875 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
11876 * function makes sure that only one fabric bound iocb will be outstanding at
11877 * any given time. As such, this function will first check to see whether there
11878 * is already an outstanding fabric iocb on the wire. If so, it will put the
11879 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
11880 * issued later. Otherwise, it will issue the iocb on the wire and update the
11881 * fabric iocb count it indicate that there is one fabric iocb on the wire.
11882 *
11883 * Note, this implementation has a potential sending out fabric IOCBs out of
11884 * order. The problem is caused by the construction of the "ready" boolen does
11885 * not include the condition that the internal fabric IOCB list is empty. As
11886 * such, it is possible a fabric IOCB issued by this routine might be "jump"
11887 * ahead of the fabric IOCBs in the internal list.
11888 *
11889 * Return code
11890 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
11891 * IOCB_ERROR - failed to issue fabric iocb
11892 **/
11893static int
11894lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
11895{
11896 unsigned long iflags;
11897 int ready;
11898 int ret;
11899
11900 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1);
11901
11902 spin_lock_irqsave(&phba->hbalock, iflags);
11903 ready = atomic_read(v: &phba->fabric_iocb_count) == 0 &&
11904 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
11905
11906 if (ready)
11907 /* Increment fabric iocb count to hold the position */
11908 atomic_inc(v: &phba->fabric_iocb_count);
11909 spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags);
11910 if (ready) {
11911 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
11912 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
11913 iocb->cmd_flag |= LPFC_IO_FABRIC;
11914
11915 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
11916 "Fabric sched2: ste:x%x",
11917 iocb->vport->port_state, 0, 0);
11918
11919 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
11920
11921 if (ret == IOCB_ERROR) {
11922 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
11923 iocb->fabric_cmd_cmpl = NULL;
11924 iocb->cmd_flag &= ~LPFC_IO_FABRIC;
11925 atomic_dec(v: &phba->fabric_iocb_count);
11926 }
11927 } else {
11928 spin_lock_irqsave(&phba->hbalock, iflags);
11929 list_add_tail(new: &iocb->list, head: &phba->fabric_iocb_list);
11930 spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags);
11931 ret = IOCB_SUCCESS;
11932 }
11933 return ret;
11934}
11935
11936/**
11937 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
11938 * @vport: pointer to a virtual N_Port data structure.
11939 *
11940 * This routine aborts all the IOCBs associated with a @vport from the
11941 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
11942 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
11943 * list, removes each IOCB associated with the @vport off the list, set the
11944 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
11945 * associated with the IOCB.
11946 **/
11947static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
11948{
11949 LIST_HEAD(completions);
11950 struct lpfc_hba *phba = vport->phba;
11951 struct lpfc_iocbq *tmp_iocb, *piocb;
11952
11953 spin_lock_irq(lock: &phba->hbalock);
11954 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
11955 list) {
11956
11957 if (piocb->vport != vport)
11958 continue;
11959
11960 list_move_tail(list: &piocb->list, head: &completions);
11961 }
11962 spin_unlock_irq(lock: &phba->hbalock);
11963
11964 /* Cancel all the IOCBs from the completions list */
11965 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11966 IOERR_SLI_ABORTED);
11967}
11968
11969/**
11970 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
11971 * @ndlp: pointer to a node-list data structure.
11972 *
11973 * This routine aborts all the IOCBs associated with an @ndlp from the
11974 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
11975 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
11976 * list, removes each IOCB associated with the @ndlp off the list, set the
11977 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
11978 * associated with the IOCB.
11979 **/
11980void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
11981{
11982 LIST_HEAD(completions);
11983 struct lpfc_hba *phba = ndlp->phba;
11984 struct lpfc_iocbq *tmp_iocb, *piocb;
11985 struct lpfc_sli_ring *pring;
11986
11987 pring = lpfc_phba_elsring(phba);
11988
11989 if (unlikely(!pring))
11990 return;
11991
11992 spin_lock_irq(lock: &phba->hbalock);
11993 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
11994 list) {
11995 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
11996
11997 list_move_tail(list: &piocb->list, head: &completions);
11998 }
11999 }
12000 spin_unlock_irq(lock: &phba->hbalock);
12001
12002 /* Cancel all the IOCBs from the completions list */
12003 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12004 IOERR_SLI_ABORTED);
12005}
12006
12007/**
12008 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
12009 * @phba: pointer to lpfc hba data structure.
12010 *
12011 * This routine aborts all the IOCBs currently on the driver internal
12012 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
12013 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
12014 * list, removes IOCBs off the list, set the status field to
12015 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
12016 * the IOCB.
12017 **/
12018void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
12019{
12020 LIST_HEAD(completions);
12021
12022 spin_lock_irq(lock: &phba->hbalock);
12023 list_splice_init(list: &phba->fabric_iocb_list, head: &completions);
12024 spin_unlock_irq(lock: &phba->hbalock);
12025
12026 /* Cancel all the IOCBs from the completions list */
12027 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12028 IOERR_SLI_ABORTED);
12029}
12030
12031/**
12032 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
12033 * @vport: pointer to lpfc vport data structure.
12034 *
12035 * This routine is invoked by the vport cleanup for deletions and the cleanup
12036 * for an ndlp on removal.
12037 **/
12038void
12039lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
12040{
12041 struct lpfc_hba *phba = vport->phba;
12042 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
12043 struct lpfc_nodelist *ndlp = NULL;
12044 unsigned long iflag = 0;
12045
12046 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
12047 list_for_each_entry_safe(sglq_entry, sglq_next,
12048 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
12049 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) {
12050 lpfc_nlp_put(sglq_entry->ndlp);
12051 ndlp = sglq_entry->ndlp;
12052 sglq_entry->ndlp = NULL;
12053
12054 /* If the xri on the abts_els_sgl list is for the Fport
12055 * node and the vport is unloading, the xri aborted wcqe
12056 * likely isn't coming back. Just release the sgl.
12057 */
12058 if ((vport->load_flag & FC_UNLOADING) &&
12059 ndlp->nlp_DID == Fabric_DID) {
12060 list_del(entry: &sglq_entry->list);
12061 sglq_entry->state = SGL_FREED;
12062 list_add_tail(new: &sglq_entry->list,
12063 head: &phba->sli4_hba.lpfc_els_sgl_list);
12064 }
12065 }
12066 }
12067 spin_unlock_irqrestore(lock: &phba->sli4_hba.sgl_list_lock, flags: iflag);
12068 return;
12069}
12070
12071/**
12072 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
12073 * @phba: pointer to lpfc hba data structure.
12074 * @axri: pointer to the els xri abort wcqe structure.
12075 *
12076 * This routine is invoked by the worker thread to process a SLI4 slow-path
12077 * ELS aborted xri.
12078 **/
12079void
12080lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
12081 struct sli4_wcqe_xri_aborted *axri)
12082{
12083 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
12084 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
12085 uint16_t lxri = 0;
12086
12087 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
12088 unsigned long iflag = 0;
12089 struct lpfc_nodelist *ndlp;
12090 struct lpfc_sli_ring *pring;
12091
12092 pring = lpfc_phba_elsring(phba);
12093
12094 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
12095 list_for_each_entry_safe(sglq_entry, sglq_next,
12096 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
12097 if (sglq_entry->sli4_xritag == xri) {
12098 list_del(entry: &sglq_entry->list);
12099 ndlp = sglq_entry->ndlp;
12100 sglq_entry->ndlp = NULL;
12101 list_add_tail(new: &sglq_entry->list,
12102 head: &phba->sli4_hba.lpfc_els_sgl_list);
12103 sglq_entry->state = SGL_FREED;
12104 spin_unlock_irqrestore(lock: &phba->sli4_hba.sgl_list_lock,
12105 flags: iflag);
12106
12107 if (ndlp) {
12108 lpfc_set_rrq_active(phba, ndlp,
12109 sglq_entry->sli4_lxritag,
12110 rxid, 1);
12111 lpfc_nlp_put(ndlp);
12112 }
12113
12114 /* Check if TXQ queue needs to be serviced */
12115 if (pring && !list_empty(head: &pring->txq))
12116 lpfc_worker_wake_up(phba);
12117 return;
12118 }
12119 }
12120 spin_unlock_irqrestore(lock: &phba->sli4_hba.sgl_list_lock, flags: iflag);
12121 lxri = lpfc_sli4_xri_inrange(phba, xri);
12122 if (lxri == NO_XRI)
12123 return;
12124
12125 spin_lock_irqsave(&phba->hbalock, iflag);
12126 sglq_entry = __lpfc_get_active_sglq(phba, lxri);
12127 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
12128 spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag);
12129 return;
12130 }
12131 sglq_entry->state = SGL_XRI_ABORTED;
12132 spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag);
12133 return;
12134}
12135
12136/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
12137 * @vport: pointer to virtual port object.
12138 * @ndlp: nodelist pointer for the impacted node.
12139 *
12140 * The driver calls this routine in response to an SLI4 XRI ABORT CQE
12141 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
12142 * the driver is required to send a LOGO to the remote node before it
12143 * attempts to recover its login to the remote node.
12144 */
12145void
12146lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
12147 struct lpfc_nodelist *ndlp)
12148{
12149 struct Scsi_Host *shost;
12150 struct lpfc_hba *phba;
12151 unsigned long flags = 0;
12152
12153 shost = lpfc_shost_from_vport(vport);
12154 phba = vport->phba;
12155 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
12156 lpfc_printf_log(phba, KERN_INFO,
12157 LOG_SLI, "3093 No rport recovery needed. "
12158 "rport in state 0x%x\n", ndlp->nlp_state);
12159 return;
12160 }
12161 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12162 "3094 Start rport recovery on shost id 0x%x "
12163 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
12164 "flags 0x%x\n",
12165 shost->host_no, ndlp->nlp_DID,
12166 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
12167 ndlp->nlp_flag);
12168 /*
12169 * The rport is not responding. Remove the FCP-2 flag to prevent
12170 * an ADISC in the follow-up recovery code.
12171 */
12172 spin_lock_irqsave(&ndlp->lock, flags);
12173 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
12174 ndlp->nlp_flag |= NLP_ISSUE_LOGO;
12175 spin_unlock_irqrestore(lock: &ndlp->lock, flags);
12176 lpfc_unreg_rpi(vport, ndlp);
12177}
12178
12179static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport)
12180{
12181 bitmap_zero(dst: vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE);
12182}
12183
12184static void
12185lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max)
12186{
12187 u32 i;
12188
12189 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE))
12190 return;
12191
12192 for (i = min; i <= max; i++)
12193 set_bit(nr: i, addr: vport->vmid_priority_range);
12194}
12195
12196static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid)
12197{
12198 set_bit(nr: ctcl_vmid, addr: vport->vmid_priority_range);
12199}
12200
12201u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport)
12202{
12203 u32 i;
12204
12205 i = find_first_bit(addr: vport->vmid_priority_range,
12206 LPFC_VMID_MAX_PRIORITY_RANGE);
12207
12208 if (i == LPFC_VMID_MAX_PRIORITY_RANGE)
12209 return 0;
12210
12211 clear_bit(nr: i, addr: vport->vmid_priority_range);
12212 return i;
12213}
12214
12215#define MAX_PRIORITY_DESC 255
12216
12217static void
12218lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12219 struct lpfc_iocbq *rspiocb)
12220{
12221 struct lpfc_vport *vport = cmdiocb->vport;
12222 struct priority_range_desc *desc;
12223 struct lpfc_dmabuf *prsp = NULL;
12224 struct lpfc_vmid_priority_range *vmid_range = NULL;
12225 u32 *data;
12226 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf;
12227 u32 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
12228 u32 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
12229 u8 *pcmd, max_desc;
12230 u32 len, i;
12231 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12232
12233 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
12234 if (!prsp)
12235 goto out;
12236
12237 pcmd = prsp->virt;
12238 data = (u32 *)pcmd;
12239 if (data[0] == ELS_CMD_LS_RJT) {
12240 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
12241 "3277 QFPA LS_RJT x%x x%x\n",
12242 data[0], data[1]);
12243 goto out;
12244 }
12245 if (ulp_status) {
12246 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
12247 "6529 QFPA failed with status x%x x%x\n",
12248 ulp_status, ulp_word4);
12249 goto out;
12250 }
12251
12252 if (!vport->qfpa_res) {
12253 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res);
12254 vport->qfpa_res = kcalloc(n: max_desc, size: sizeof(*vport->qfpa_res),
12255 GFP_KERNEL);
12256 if (!vport->qfpa_res)
12257 goto out;
12258 }
12259
12260 len = *((u32 *)(pcmd + 4));
12261 len = be32_to_cpu(len);
12262 memcpy(vport->qfpa_res, pcmd, len + 8);
12263 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE;
12264
12265 desc = (struct priority_range_desc *)(pcmd + 8);
12266 vmid_range = vport->vmid_priority.vmid_range;
12267 if (!vmid_range) {
12268 vmid_range = kcalloc(MAX_PRIORITY_DESC, size: sizeof(*vmid_range),
12269 GFP_KERNEL);
12270 if (!vmid_range) {
12271 kfree(objp: vport->qfpa_res);
12272 goto out;
12273 }
12274 vport->vmid_priority.vmid_range = vmid_range;
12275 }
12276 vport->vmid_priority.num_descriptors = len;
12277
12278 for (i = 0; i < len; i++, vmid_range++, desc++) {
12279 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
12280 "6539 vmid values low=%d, high=%d, qos=%d, "
12281 "local ve id=%d\n", desc->lo_range,
12282 desc->hi_range, desc->qos_priority,
12283 desc->local_ve_id);
12284
12285 vmid_range->low = desc->lo_range << 1;
12286 if (desc->local_ve_id == QFPA_ODD_ONLY)
12287 vmid_range->low++;
12288 if (desc->qos_priority)
12289 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED;
12290 vmid_range->qos = desc->qos_priority;
12291
12292 vmid_range->high = desc->hi_range << 1;
12293 if ((desc->local_ve_id == QFPA_ODD_ONLY) ||
12294 (desc->local_ve_id == QFPA_EVEN_ODD))
12295 vmid_range->high++;
12296 }
12297 lpfc_init_cs_ctl_bitmap(vport);
12298 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) {
12299 lpfc_vmid_set_cs_ctl_range(vport,
12300 min: vport->vmid_priority.vmid_range[i].low,
12301 max: vport->vmid_priority.vmid_range[i].high);
12302 }
12303
12304 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL;
12305 out:
12306 lpfc_els_free_iocb(phba, elsiocb: cmdiocb);
12307 lpfc_nlp_put(ndlp);
12308}
12309
12310int lpfc_issue_els_qfpa(struct lpfc_vport *vport)
12311{
12312 struct lpfc_hba *phba = vport->phba;
12313 struct lpfc_nodelist *ndlp;
12314 struct lpfc_iocbq *elsiocb;
12315 u8 *pcmd;
12316 int ret;
12317
12318 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
12319 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
12320 return -ENXIO;
12321
12322 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, LPFC_QFPA_SIZE, retry: 2, ndlp,
12323 did: ndlp->nlp_DID, ELS_CMD_QFPA);
12324 if (!elsiocb)
12325 return -ENOMEM;
12326
12327 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
12328
12329 *((u32 *)(pcmd)) = ELS_CMD_QFPA;
12330 pcmd += 4;
12331
12332 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa;
12333
12334 elsiocb->ndlp = lpfc_nlp_get(ndlp);
12335 if (!elsiocb->ndlp) {
12336 lpfc_els_free_iocb(phba: vport->phba, elsiocb);
12337 return -ENXIO;
12338 }
12339
12340 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2);
12341 if (ret != IOCB_SUCCESS) {
12342 lpfc_els_free_iocb(phba, elsiocb);
12343 lpfc_nlp_put(ndlp);
12344 return -EIO;
12345 }
12346 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED;
12347 return 0;
12348}
12349
12350int
12351lpfc_vmid_uvem(struct lpfc_vport *vport,
12352 struct lpfc_vmid *vmid, bool instantiated)
12353{
12354 struct lpfc_vem_id_desc *vem_id_desc;
12355 struct lpfc_nodelist *ndlp;
12356 struct lpfc_iocbq *elsiocb;
12357 struct instantiated_ve_desc *inst_desc;
12358 struct lpfc_vmid_context *vmid_context;
12359 u8 *pcmd;
12360 u32 *len;
12361 int ret = 0;
12362
12363 ndlp = lpfc_findnode_did(vport, Fabric_DID);
12364 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
12365 return -ENXIO;
12366
12367 vmid_context = kmalloc(size: sizeof(*vmid_context), GFP_KERNEL);
12368 if (!vmid_context)
12369 return -ENOMEM;
12370 elsiocb = lpfc_prep_els_iocb(vport, expect_rsp: 1, LPFC_UVEM_SIZE, retry: 2,
12371 ndlp, Fabric_DID, ELS_CMD_UVEM);
12372 if (!elsiocb)
12373 goto out;
12374
12375 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
12376 "3427 Host vmid %s %d\n",
12377 vmid->host_vmid, instantiated);
12378 vmid_context->vmp = vmid;
12379 vmid_context->nlp = ndlp;
12380 vmid_context->instantiated = instantiated;
12381 elsiocb->vmid_tag.vmid_context = vmid_context;
12382 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
12383
12384 if (!memchr_inv(p: vport->lpfc_vmid_host_uuid, c: 0,
12385 size: sizeof(vport->lpfc_vmid_host_uuid)))
12386 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
12387 sizeof(vport->lpfc_vmid_host_uuid));
12388
12389 *((u32 *)(pcmd)) = ELS_CMD_UVEM;
12390 len = (u32 *)(pcmd + 4);
12391 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8);
12392
12393 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8);
12394 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG);
12395 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE);
12396 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid,
12397 sizeof(vem_id_desc->vem_id));
12398
12399 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32);
12400 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
12401 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE);
12402 memcpy(inst_desc->global_vem_id, vmid->host_vmid,
12403 sizeof(inst_desc->global_vem_id));
12404
12405 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID);
12406 bf_set(lpfc_instantiated_local_id, inst_desc,
12407 vmid->un.cs_ctl_vmid);
12408 if (instantiated) {
12409 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
12410 } else {
12411 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG);
12412 lpfc_vmid_put_cs_ctl(vport, ctcl_vmid: vmid->un.cs_ctl_vmid);
12413 }
12414 inst_desc->word6 = cpu_to_be32(inst_desc->word6);
12415
12416 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem;
12417
12418 elsiocb->ndlp = lpfc_nlp_get(ndlp);
12419 if (!elsiocb->ndlp) {
12420 lpfc_els_free_iocb(phba: vport->phba, elsiocb);
12421 goto out;
12422 }
12423
12424 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0);
12425 if (ret != IOCB_SUCCESS) {
12426 lpfc_els_free_iocb(phba: vport->phba, elsiocb);
12427 lpfc_nlp_put(ndlp);
12428 goto out;
12429 }
12430
12431 return 0;
12432 out:
12433 kfree(objp: vmid_context);
12434 return -EIO;
12435}
12436
12437static void
12438lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb,
12439 struct lpfc_iocbq *rspiocb)
12440{
12441 struct lpfc_vport *vport = icmdiocb->vport;
12442 struct lpfc_dmabuf *prsp = NULL;
12443 struct lpfc_vmid_context *vmid_context =
12444 icmdiocb->vmid_tag.vmid_context;
12445 struct lpfc_nodelist *ndlp = icmdiocb->ndlp;
12446 u8 *pcmd;
12447 u32 *data;
12448 u32 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb);
12449 u32 ulp_word4 = get_job_word4(phba, iocbq: rspiocb);
12450 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf;
12451 struct lpfc_vmid *vmid;
12452
12453 vmid = vmid_context->vmp;
12454 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
12455 ndlp = NULL;
12456
12457 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
12458 if (!prsp)
12459 goto out;
12460 pcmd = prsp->virt;
12461 data = (u32 *)pcmd;
12462 if (data[0] == ELS_CMD_LS_RJT) {
12463 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
12464 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]);
12465 goto out;
12466 }
12467 if (ulp_status) {
12468 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
12469 "4533 UVEM error status %x: %x\n",
12470 ulp_status, ulp_word4);
12471 goto out;
12472 }
12473 spin_lock(lock: &phba->hbalock);
12474 /* Set IN USE flag */
12475 vport->vmid_flag |= LPFC_VMID_IN_USE;
12476 phba->pport->vmid_flag |= LPFC_VMID_IN_USE;
12477 spin_unlock(lock: &phba->hbalock);
12478
12479 if (vmid_context->instantiated) {
12480 write_lock(&vport->vmid_lock);
12481 vmid->flag |= LPFC_VMID_REGISTERED;
12482 vmid->flag &= ~LPFC_VMID_REQ_REGISTER;
12483 write_unlock(&vport->vmid_lock);
12484 }
12485
12486 out:
12487 kfree(objp: vmid_context);
12488 lpfc_els_free_iocb(phba, elsiocb: icmdiocb);
12489 lpfc_nlp_put(ndlp);
12490}
12491

source code of linux/drivers/scsi/lpfc/lpfc_els.c