1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * |
9 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
10 | * * |
11 | * This program is free software; you can redistribute it and/or * |
12 | * modify it under the terms of version 2 of the GNU General * |
13 | * Public License as published by the Free Software Foundation. * |
14 | * This program is distributed in the hope that it will be useful. * |
15 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * |
16 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * |
17 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * |
18 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * |
19 | * TO BE LEGALLY INVALID. See the GNU General Public License for * |
20 | * more details, a copy of which can be found in the file COPYING * |
21 | * included with this package. * |
22 | *******************************************************************/ |
23 | |
24 | #include <linux/blkdev.h> |
25 | #include <linux/delay.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/pci.h> |
28 | #include <linux/kthread.h> |
29 | #include <linux/interrupt.h> |
30 | #include <linux/lockdep.h> |
31 | #include <linux/utsname.h> |
32 | |
33 | #include <scsi/scsi.h> |
34 | #include <scsi/scsi_device.h> |
35 | #include <scsi/scsi_host.h> |
36 | #include <scsi/scsi_transport_fc.h> |
37 | #include <scsi/fc/fc_fs.h> |
38 | |
39 | #include "lpfc_hw4.h" |
40 | #include "lpfc_hw.h" |
41 | #include "lpfc_nl.h" |
42 | #include "lpfc_disc.h" |
43 | #include "lpfc_sli.h" |
44 | #include "lpfc_sli4.h" |
45 | #include "lpfc.h" |
46 | #include "lpfc_scsi.h" |
47 | #include "lpfc_nvme.h" |
48 | #include "lpfc_logmsg.h" |
49 | #include "lpfc_crtn.h" |
50 | #include "lpfc_vport.h" |
51 | #include "lpfc_debugfs.h" |
52 | |
53 | /* AlpaArray for assignment of scsid for scan-down and bind_method */ |
54 | static uint8_t lpfcAlpaArray[] = { |
55 | 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6, |
56 | 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, |
57 | 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, |
58 | 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, |
59 | 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97, |
60 | 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79, |
61 | 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, |
62 | 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, |
63 | 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, |
64 | 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35, |
65 | 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, |
66 | 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, |
67 | 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 |
68 | }; |
69 | |
70 | static void lpfc_disc_timeout_handler(struct lpfc_vport *); |
71 | static void lpfc_disc_flush_list(struct lpfc_vport *vport); |
72 | static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); |
73 | static int lpfc_fcf_inuse(struct lpfc_hba *); |
74 | static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); |
75 | static void lpfc_check_inactive_vmid(struct lpfc_hba *phba); |
76 | static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba); |
77 | |
78 | static int |
79 | lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp) |
80 | { |
81 | if (ndlp->nlp_fc4_type || |
82 | ndlp->nlp_type & NLP_FABRIC) |
83 | return 1; |
84 | return 0; |
85 | } |
86 | /* The source of a terminate rport I/O is either a dev_loss_tmo |
87 | * event or a call to fc_remove_host. While the rport should be |
88 | * valid during these downcalls, the transport can call twice |
89 | * in a single event. This routine provides somoe protection |
90 | * as the NDLP isn't really free, just released to the pool. |
91 | */ |
92 | static int |
93 | lpfc_rport_invalid(struct fc_rport *rport) |
94 | { |
95 | struct lpfc_rport_data *rdata; |
96 | struct lpfc_nodelist *ndlp; |
97 | |
98 | if (!rport) { |
99 | pr_err("**** %s: NULL rport, exit.\n" , __func__); |
100 | return -EINVAL; |
101 | } |
102 | |
103 | rdata = rport->dd_data; |
104 | if (!rdata) { |
105 | pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n" , |
106 | __func__, rport, rport->scsi_target_id); |
107 | return -EINVAL; |
108 | } |
109 | |
110 | ndlp = rdata->pnode; |
111 | if (!rdata->pnode) { |
112 | pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n" , |
113 | __func__, rport, rport->scsi_target_id); |
114 | return -EINVAL; |
115 | } |
116 | |
117 | if (!ndlp->vport) { |
118 | pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px " |
119 | "SID x%x\n" , __func__, ndlp, ndlp->nlp_DID, rport, |
120 | rport->scsi_target_id); |
121 | return -EINVAL; |
122 | } |
123 | return 0; |
124 | } |
125 | |
126 | void |
127 | lpfc_terminate_rport_io(struct fc_rport *rport) |
128 | { |
129 | struct lpfc_rport_data *rdata; |
130 | struct lpfc_nodelist *ndlp; |
131 | struct lpfc_vport *vport; |
132 | |
133 | if (lpfc_rport_invalid(rport)) |
134 | return; |
135 | |
136 | rdata = rport->dd_data; |
137 | ndlp = rdata->pnode; |
138 | vport = ndlp->vport; |
139 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
140 | "rport terminate: sid:x%x did:x%x flg:x%x" , |
141 | ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); |
142 | |
143 | if (ndlp->nlp_sid != NLP_NO_SID) |
144 | lpfc_sli_abort_iocb(vport, tgt_id: ndlp->nlp_sid, lun_id: 0, abort_cmd: LPFC_CTX_TGT); |
145 | } |
146 | |
147 | /* |
148 | * This function will be called when dev_loss_tmo fire. |
149 | */ |
150 | void |
151 | lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) |
152 | { |
153 | struct lpfc_nodelist *ndlp; |
154 | struct lpfc_vport *vport; |
155 | struct lpfc_hba *phba; |
156 | struct lpfc_work_evt *evtp; |
157 | unsigned long iflags; |
158 | |
159 | ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode; |
160 | if (!ndlp) |
161 | return; |
162 | |
163 | vport = ndlp->vport; |
164 | phba = vport->phba; |
165 | |
166 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
167 | "rport devlosscb: sid:x%x did:x%x flg:x%x" , |
168 | ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); |
169 | |
170 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, |
171 | "3181 dev_loss_callbk x%06x, rport x%px flg x%x " |
172 | "load_flag x%x refcnt %u state %d xpt x%x\n" , |
173 | ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, |
174 | vport->load_flag, kref_read(&ndlp->kref), |
175 | ndlp->nlp_state, ndlp->fc4_xpt_flags); |
176 | |
177 | /* Don't schedule a worker thread event if the vport is going down. */ |
178 | if (vport->load_flag & FC_UNLOADING) { |
179 | spin_lock_irqsave(&ndlp->lock, iflags); |
180 | ndlp->rport = NULL; |
181 | |
182 | /* The scsi_transport is done with the rport so lpfc cannot |
183 | * call to unregister. Remove the scsi transport reference |
184 | * and clean up the SCSI transport node details. |
185 | */ |
186 | if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) { |
187 | ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; |
188 | |
189 | /* NVME transport-registered rports need the |
190 | * NLP_XPT_REGD flag to complete an unregister. |
191 | */ |
192 | if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) |
193 | ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; |
194 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
195 | lpfc_nlp_put(ndlp); |
196 | spin_lock_irqsave(&ndlp->lock, iflags); |
197 | } |
198 | |
199 | /* Only 1 thread can drop the initial node reference. If |
200 | * another thread has set NLP_DROPPED, this thread is done. |
201 | */ |
202 | if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) && |
203 | !(ndlp->nlp_flag & NLP_DROPPED)) { |
204 | ndlp->nlp_flag |= NLP_DROPPED; |
205 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
206 | lpfc_nlp_put(ndlp); |
207 | return; |
208 | } |
209 | |
210 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
211 | return; |
212 | } |
213 | |
214 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) |
215 | return; |
216 | |
217 | if (rport->port_name != wwn_to_u64(wwn: ndlp->nlp_portname.u.wwn)) |
218 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
219 | "6789 rport name %llx != node port name %llx" , |
220 | rport->port_name, |
221 | wwn_to_u64(ndlp->nlp_portname.u.wwn)); |
222 | |
223 | evtp = &ndlp->dev_loss_evt; |
224 | |
225 | if (!list_empty(head: &evtp->evt_listp)) { |
226 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
227 | "6790 rport name %llx dev_loss_evt pending\n" , |
228 | rport->port_name); |
229 | return; |
230 | } |
231 | |
232 | spin_lock_irqsave(&ndlp->lock, iflags); |
233 | ndlp->nlp_flag |= NLP_IN_DEV_LOSS; |
234 | |
235 | /* If there is a PLOGI in progress, and we are in a |
236 | * NLP_NPR_2B_DISC state, don't turn off the flag. |
237 | */ |
238 | if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) |
239 | ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; |
240 | |
241 | /* |
242 | * The backend does not expect any more calls associated with this |
243 | * rport. Remove the association between rport and ndlp. |
244 | */ |
245 | ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; |
246 | ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL; |
247 | ndlp->rport = NULL; |
248 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
249 | |
250 | if (phba->worker_thread) { |
251 | /* We need to hold the node by incrementing the reference |
252 | * count until this queued work is done |
253 | */ |
254 | evtp->evt_arg1 = lpfc_nlp_get(ndlp); |
255 | |
256 | spin_lock_irqsave(&phba->hbalock, iflags); |
257 | if (evtp->evt_arg1) { |
258 | evtp->evt = LPFC_EVT_DEV_LOSS; |
259 | list_add_tail(new: &evtp->evt_listp, head: &phba->work_list); |
260 | lpfc_worker_wake_up(phba); |
261 | } |
262 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
263 | } else { |
264 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, |
265 | "3188 worker thread is stopped %s x%06x, " |
266 | " rport x%px flg x%x load_flag x%x refcnt " |
267 | "%d\n" , __func__, ndlp->nlp_DID, |
268 | ndlp->rport, ndlp->nlp_flag, |
269 | vport->load_flag, kref_read(&ndlp->kref)); |
270 | if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { |
271 | spin_lock_irqsave(&ndlp->lock, iflags); |
272 | /* Node is in dev loss. No further transaction. */ |
273 | ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; |
274 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
275 | lpfc_disc_state_machine(vport, ndlp, NULL, |
276 | NLP_EVT_DEVICE_RM); |
277 | } |
278 | |
279 | } |
280 | |
281 | return; |
282 | } |
283 | |
284 | /** |
285 | * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport |
286 | * @vport: Pointer to vport context object. |
287 | * |
288 | * This function checks for idle VMID entries related to a particular vport. If |
289 | * found unused/idle, free them accordingly. |
290 | **/ |
291 | static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport) |
292 | { |
293 | u16 keep; |
294 | u32 difftime = 0, r, bucket; |
295 | u64 *lta; |
296 | int cpu; |
297 | struct lpfc_vmid *vmp; |
298 | |
299 | write_lock(&vport->vmid_lock); |
300 | |
301 | if (!vport->cur_vmid_cnt) |
302 | goto out; |
303 | |
304 | /* iterate through the table */ |
305 | hash_for_each(vport->hash_table, bucket, vmp, hnode) { |
306 | keep = 0; |
307 | if (vmp->flag & LPFC_VMID_REGISTERED) { |
308 | /* check if the particular VMID is in use */ |
309 | /* for all available per cpu variable */ |
310 | for_each_possible_cpu(cpu) { |
311 | /* if last access time is less than timeout */ |
312 | lta = per_cpu_ptr(vmp->last_io_time, cpu); |
313 | if (!lta) |
314 | continue; |
315 | difftime = (jiffies) - (*lta); |
316 | if ((vport->vmid_inactivity_timeout * |
317 | JIFFIES_PER_HR) > difftime) { |
318 | keep = 1; |
319 | break; |
320 | } |
321 | } |
322 | |
323 | /* if none of the cpus have been used by the vm, */ |
324 | /* remove the entry if already registered */ |
325 | if (!keep) { |
326 | /* mark the entry for deregistration */ |
327 | vmp->flag = LPFC_VMID_DE_REGISTER; |
328 | write_unlock(&vport->vmid_lock); |
329 | if (vport->vmid_priority_tagging) |
330 | r = lpfc_vmid_uvem(vport, vmid: vmp, ins: false); |
331 | else |
332 | r = lpfc_vmid_cmd(vport, |
333 | SLI_CTAS_DAPP_IDENT, |
334 | vmid: vmp); |
335 | |
336 | /* decrement number of active vms and mark */ |
337 | /* entry in slot as free */ |
338 | write_lock(&vport->vmid_lock); |
339 | if (!r) { |
340 | struct lpfc_vmid *ht = vmp; |
341 | |
342 | vport->cur_vmid_cnt--; |
343 | ht->flag = LPFC_VMID_SLOT_FREE; |
344 | free_percpu(pdata: ht->last_io_time); |
345 | ht->last_io_time = NULL; |
346 | hash_del(node: &ht->hnode); |
347 | } |
348 | } |
349 | } |
350 | } |
351 | out: |
352 | write_unlock(&vport->vmid_lock); |
353 | } |
354 | |
355 | /** |
356 | * lpfc_check_inactive_vmid - VMID inactivity checker |
357 | * @phba: Pointer to hba context object. |
358 | * |
359 | * This function is called from the worker thread to determine if an entry in |
360 | * the VMID table can be released since there was no I/O activity seen from that |
361 | * particular VM for the specified time. When this happens, the entry in the |
362 | * table is released and also the resources on the switch cleared. |
363 | **/ |
364 | |
365 | static void lpfc_check_inactive_vmid(struct lpfc_hba *phba) |
366 | { |
367 | struct lpfc_vport *vport; |
368 | struct lpfc_vport **vports; |
369 | int i; |
370 | |
371 | vports = lpfc_create_vport_work_array(phba); |
372 | if (!vports) |
373 | return; |
374 | |
375 | for (i = 0; i <= phba->max_vports; i++) { |
376 | if ((!vports[i]) && (i == 0)) |
377 | vport = phba->pport; |
378 | else |
379 | vport = vports[i]; |
380 | if (!vport) |
381 | break; |
382 | |
383 | lpfc_check_inactive_vmid_one(vport); |
384 | } |
385 | lpfc_destroy_vport_work_array(phba, vports); |
386 | } |
387 | |
388 | /** |
389 | * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss |
390 | * @vport: Pointer to vport object. |
391 | * @ndlp: Pointer to remote node object. |
392 | * |
393 | * If NLP_IN_RECOV_POST_DEV_LOSS flag was set due to outstanding recovery of |
394 | * node during dev_loss_tmo processing, then this function restores the nlp_put |
395 | * kref decrement from lpfc_dev_loss_tmo_handler. |
396 | **/ |
397 | void |
398 | lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, |
399 | struct lpfc_nodelist *ndlp) |
400 | { |
401 | unsigned long iflags; |
402 | |
403 | spin_lock_irqsave(&ndlp->lock, iflags); |
404 | if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) { |
405 | ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS; |
406 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
407 | lpfc_nlp_get(ndlp); |
408 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, |
409 | "8438 Devloss timeout reversed on DID x%x " |
410 | "refcnt %d ndlp %p flag x%x " |
411 | "port_state = x%x\n" , |
412 | ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, |
413 | ndlp->nlp_flag, vport->port_state); |
414 | spin_lock_irqsave(&ndlp->lock, iflags); |
415 | } |
416 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
417 | } |
418 | |
419 | /** |
420 | * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler |
421 | * @ndlp: Pointer to remote node object. |
422 | * |
423 | * This function is called from the worker thread when devloss timeout timer |
424 | * expires. For SLI4 host, this routine shall return 1 when at lease one |
425 | * remote node, including this @ndlp, is still in use of FCF; otherwise, this |
426 | * routine shall return 0 when there is no remote node is still in use of FCF |
427 | * when devloss timeout happened to this @ndlp. |
428 | **/ |
429 | static int |
430 | lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) |
431 | { |
432 | struct lpfc_vport *vport; |
433 | struct lpfc_hba *phba; |
434 | uint8_t *name; |
435 | int warn_on = 0; |
436 | int fcf_inuse = 0; |
437 | bool recovering = false; |
438 | struct fc_vport *fc_vport = NULL; |
439 | unsigned long iflags; |
440 | |
441 | vport = ndlp->vport; |
442 | name = (uint8_t *)&ndlp->nlp_portname; |
443 | phba = vport->phba; |
444 | |
445 | if (phba->sli_rev == LPFC_SLI_REV4) |
446 | fcf_inuse = lpfc_fcf_inuse(phba); |
447 | |
448 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
449 | "rport devlosstmo:did:x%x type:x%x id:x%x" , |
450 | ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid); |
451 | |
452 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, |
453 | "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n" , |
454 | __func__, ndlp->nlp_DID, ndlp->nlp_flag, |
455 | ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); |
456 | |
457 | /* If the driver is recovering the rport, ignore devloss. */ |
458 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { |
459 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
460 | "0284 Devloss timeout Ignored on " |
461 | "WWPN %x:%x:%x:%x:%x:%x:%x:%x " |
462 | "NPort x%x\n" , |
463 | *name, *(name+1), *(name+2), *(name+3), |
464 | *(name+4), *(name+5), *(name+6), *(name+7), |
465 | ndlp->nlp_DID); |
466 | |
467 | spin_lock_irqsave(&ndlp->lock, iflags); |
468 | ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; |
469 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
470 | return fcf_inuse; |
471 | } |
472 | |
473 | /* Fabric nodes are done. */ |
474 | if (ndlp->nlp_type & NLP_FABRIC) { |
475 | spin_lock_irqsave(&ndlp->lock, iflags); |
476 | |
477 | /* The driver has to account for a race between any fabric |
478 | * node that's in recovery when dev_loss_tmo expires. When this |
479 | * happens, the driver has to allow node recovery. |
480 | */ |
481 | switch (ndlp->nlp_DID) { |
482 | case Fabric_DID: |
483 | fc_vport = vport->fc_vport; |
484 | if (fc_vport) { |
485 | /* NPIV path. */ |
486 | if (fc_vport->vport_state == |
487 | FC_VPORT_INITIALIZING) |
488 | recovering = true; |
489 | } else { |
490 | /* Physical port path. */ |
491 | if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) |
492 | recovering = true; |
493 | } |
494 | break; |
495 | case Fabric_Cntl_DID: |
496 | if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) |
497 | recovering = true; |
498 | break; |
499 | case FDMI_DID: |
500 | fallthrough; |
501 | case NameServer_DID: |
502 | if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && |
503 | ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) |
504 | recovering = true; |
505 | break; |
506 | default: |
507 | /* Ensure the nlp_DID at least has the correct prefix. |
508 | * The fabric domain controller's last three nibbles |
509 | * vary so we handle it in the default case. |
510 | */ |
511 | if (ndlp->nlp_DID & Fabric_DID_MASK) { |
512 | if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && |
513 | ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) |
514 | recovering = true; |
515 | } |
516 | break; |
517 | } |
518 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
519 | |
520 | /* Mark an NLP_IN_RECOV_POST_DEV_LOSS flag to know if reversing |
521 | * the following lpfc_nlp_put is necessary after fabric node is |
522 | * recovered. |
523 | */ |
524 | if (recovering) { |
525 | lpfc_printf_vlog(vport, KERN_INFO, |
526 | LOG_DISCOVERY | LOG_NODE, |
527 | "8436 Devloss timeout marked on " |
528 | "DID x%x refcnt %d ndlp %p " |
529 | "flag x%x port_state = x%x\n" , |
530 | ndlp->nlp_DID, kref_read(&ndlp->kref), |
531 | ndlp, ndlp->nlp_flag, |
532 | vport->port_state); |
533 | spin_lock_irqsave(&ndlp->lock, iflags); |
534 | ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS; |
535 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
536 | } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { |
537 | /* Fabric node fully recovered before this dev_loss_tmo |
538 | * queue work is processed. Thus, ignore the |
539 | * dev_loss_tmo event. |
540 | */ |
541 | lpfc_printf_vlog(vport, KERN_INFO, |
542 | LOG_DISCOVERY | LOG_NODE, |
543 | "8437 Devloss timeout ignored on " |
544 | "DID x%x refcnt %d ndlp %p " |
545 | "flag x%x port_state = x%x\n" , |
546 | ndlp->nlp_DID, kref_read(&ndlp->kref), |
547 | ndlp, ndlp->nlp_flag, |
548 | vport->port_state); |
549 | return fcf_inuse; |
550 | } |
551 | |
552 | spin_lock_irqsave(&ndlp->lock, iflags); |
553 | ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; |
554 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
555 | lpfc_nlp_put(ndlp); |
556 | return fcf_inuse; |
557 | } |
558 | |
559 | if (ndlp->nlp_sid != NLP_NO_SID) { |
560 | warn_on = 1; |
561 | lpfc_sli_abort_iocb(vport, tgt_id: ndlp->nlp_sid, lun_id: 0, abort_cmd: LPFC_CTX_TGT); |
562 | } |
563 | |
564 | if (warn_on) { |
565 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
566 | "0203 Devloss timeout on " |
567 | "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " |
568 | "NPort x%06x Data: x%x x%x x%x refcnt %d\n" , |
569 | *name, *(name+1), *(name+2), *(name+3), |
570 | *(name+4), *(name+5), *(name+6), *(name+7), |
571 | ndlp->nlp_DID, ndlp->nlp_flag, |
572 | ndlp->nlp_state, ndlp->nlp_rpi, |
573 | kref_read(&ndlp->kref)); |
574 | } else { |
575 | lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, |
576 | "0204 Devloss timeout on " |
577 | "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " |
578 | "NPort x%06x Data: x%x x%x x%x\n" , |
579 | *name, *(name+1), *(name+2), *(name+3), |
580 | *(name+4), *(name+5), *(name+6), *(name+7), |
581 | ndlp->nlp_DID, ndlp->nlp_flag, |
582 | ndlp->nlp_state, ndlp->nlp_rpi); |
583 | } |
584 | spin_lock_irqsave(&ndlp->lock, iflags); |
585 | ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; |
586 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
587 | |
588 | /* If we are devloss, but we are in the process of rediscovering the |
589 | * ndlp, don't issue a NLP_EVT_DEVICE_RM event. |
590 | */ |
591 | if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && |
592 | ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) { |
593 | return fcf_inuse; |
594 | } |
595 | |
596 | if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) |
597 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); |
598 | |
599 | return fcf_inuse; |
600 | } |
601 | |
602 | static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba) |
603 | { |
604 | struct lpfc_vport *vport; |
605 | struct lpfc_vport **vports; |
606 | int i; |
607 | |
608 | vports = lpfc_create_vport_work_array(phba); |
609 | if (!vports) |
610 | return; |
611 | |
612 | for (i = 0; i <= phba->max_vports; i++) { |
613 | if ((!vports[i]) && (i == 0)) |
614 | vport = phba->pport; |
615 | else |
616 | vport = vports[i]; |
617 | if (!vport) |
618 | break; |
619 | |
620 | if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) { |
621 | if (!lpfc_issue_els_qfpa(vport)) |
622 | vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA; |
623 | } |
624 | } |
625 | lpfc_destroy_vport_work_array(phba, vports); |
626 | } |
627 | |
628 | /** |
629 | * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler |
630 | * @phba: Pointer to hba context object. |
631 | * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. |
632 | * @nlp_did: remote node identifer with devloss timeout. |
633 | * |
634 | * This function is called from the worker thread after invoking devloss |
635 | * timeout handler and releasing the reference count for the ndlp with |
636 | * which the devloss timeout was handled for SLI4 host. For the devloss |
637 | * timeout of the last remote node which had been in use of FCF, when this |
638 | * routine is invoked, it shall be guaranteed that none of the remote are |
639 | * in-use of FCF. When devloss timeout to the last remote using the FCF, |
640 | * if the FIP engine is neither in FCF table scan process nor roundrobin |
641 | * failover process, the in-use FCF shall be unregistered. If the FIP |
642 | * engine is in FCF discovery process, the devloss timeout state shall |
643 | * be set for either the FCF table scan process or roundrobin failover |
644 | * process to unregister the in-use FCF. |
645 | **/ |
646 | static void |
647 | lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, |
648 | uint32_t nlp_did) |
649 | { |
650 | /* If devloss timeout happened to a remote node when FCF had no |
651 | * longer been in-use, do nothing. |
652 | */ |
653 | if (!fcf_inuse) |
654 | return; |
655 | |
656 | if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { |
657 | spin_lock_irq(lock: &phba->hbalock); |
658 | if (phba->fcf.fcf_flag & FCF_DISCOVERY) { |
659 | if (phba->hba_flag & HBA_DEVLOSS_TMO) { |
660 | spin_unlock_irq(lock: &phba->hbalock); |
661 | return; |
662 | } |
663 | phba->hba_flag |= HBA_DEVLOSS_TMO; |
664 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
665 | "2847 Last remote node (x%x) using " |
666 | "FCF devloss tmo\n" , nlp_did); |
667 | } |
668 | if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { |
669 | spin_unlock_irq(lock: &phba->hbalock); |
670 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
671 | "2868 Devloss tmo to FCF rediscovery " |
672 | "in progress\n" ); |
673 | return; |
674 | } |
675 | if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { |
676 | spin_unlock_irq(lock: &phba->hbalock); |
677 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
678 | "2869 Devloss tmo to idle FIP engine, " |
679 | "unreg in-use FCF and rescan.\n" ); |
680 | /* Unregister in-use FCF and rescan */ |
681 | lpfc_unregister_fcf_rescan(phba); |
682 | return; |
683 | } |
684 | spin_unlock_irq(lock: &phba->hbalock); |
685 | if (phba->hba_flag & FCF_TS_INPROG) |
686 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
687 | "2870 FCF table scan in progress\n" ); |
688 | if (phba->hba_flag & FCF_RR_INPROG) |
689 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
690 | "2871 FLOGI roundrobin FCF failover " |
691 | "in progress\n" ); |
692 | } |
693 | lpfc_unregister_unused_fcf(phba); |
694 | } |
695 | |
696 | /** |
697 | * lpfc_alloc_fast_evt - Allocates data structure for posting event |
698 | * @phba: Pointer to hba context object. |
699 | * |
700 | * This function is called from the functions which need to post |
701 | * events from interrupt context. This function allocates data |
702 | * structure required for posting event. It also keeps track of |
703 | * number of events pending and prevent event storm when there are |
704 | * too many events. |
705 | **/ |
706 | struct lpfc_fast_path_event * |
707 | lpfc_alloc_fast_evt(struct lpfc_hba *phba) { |
708 | struct lpfc_fast_path_event *ret; |
709 | |
710 | /* If there are lot of fast event do not exhaust memory due to this */ |
711 | if (atomic_read(v: &phba->fast_event_count) > LPFC_MAX_EVT_COUNT) |
712 | return NULL; |
713 | |
714 | ret = kzalloc(size: sizeof(struct lpfc_fast_path_event), |
715 | GFP_ATOMIC); |
716 | if (ret) { |
717 | atomic_inc(v: &phba->fast_event_count); |
718 | INIT_LIST_HEAD(list: &ret->work_evt.evt_listp); |
719 | ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; |
720 | } |
721 | return ret; |
722 | } |
723 | |
724 | /** |
725 | * lpfc_free_fast_evt - Frees event data structure |
726 | * @phba: Pointer to hba context object. |
727 | * @evt: Event object which need to be freed. |
728 | * |
729 | * This function frees the data structure required for posting |
730 | * events. |
731 | **/ |
732 | void |
733 | lpfc_free_fast_evt(struct lpfc_hba *phba, |
734 | struct lpfc_fast_path_event *evt) { |
735 | |
736 | atomic_dec(v: &phba->fast_event_count); |
737 | kfree(objp: evt); |
738 | } |
739 | |
740 | /** |
741 | * lpfc_send_fastpath_evt - Posts events generated from fast path |
742 | * @phba: Pointer to hba context object. |
743 | * @evtp: Event data structure. |
744 | * |
745 | * This function is called from worker thread, when the interrupt |
746 | * context need to post an event. This function posts the event |
747 | * to fc transport netlink interface. |
748 | **/ |
749 | static void |
750 | lpfc_send_fastpath_evt(struct lpfc_hba *phba, |
751 | struct lpfc_work_evt *evtp) |
752 | { |
753 | unsigned long evt_category, evt_sub_category; |
754 | struct lpfc_fast_path_event *fast_evt_data; |
755 | char *evt_data; |
756 | uint32_t evt_data_size; |
757 | struct Scsi_Host *shost; |
758 | |
759 | fast_evt_data = container_of(evtp, struct lpfc_fast_path_event, |
760 | work_evt); |
761 | |
762 | evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type; |
763 | evt_sub_category = (unsigned long) fast_evt_data->un. |
764 | fabric_evt.subcategory; |
765 | shost = lpfc_shost_from_vport(vport: fast_evt_data->vport); |
766 | if (evt_category == FC_REG_FABRIC_EVENT) { |
767 | if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) { |
768 | evt_data = (char *) &fast_evt_data->un.read_check_error; |
769 | evt_data_size = sizeof(fast_evt_data->un. |
770 | read_check_error); |
771 | } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || |
772 | (evt_sub_category == LPFC_EVENT_PORT_BUSY)) { |
773 | evt_data = (char *) &fast_evt_data->un.fabric_evt; |
774 | evt_data_size = sizeof(fast_evt_data->un.fabric_evt); |
775 | } else { |
776 | lpfc_free_fast_evt(phba, evt: fast_evt_data); |
777 | return; |
778 | } |
779 | } else if (evt_category == FC_REG_SCSI_EVENT) { |
780 | switch (evt_sub_category) { |
781 | case LPFC_EVENT_QFULL: |
782 | case LPFC_EVENT_DEVBSY: |
783 | evt_data = (char *) &fast_evt_data->un.scsi_evt; |
784 | evt_data_size = sizeof(fast_evt_data->un.scsi_evt); |
785 | break; |
786 | case LPFC_EVENT_CHECK_COND: |
787 | evt_data = (char *) &fast_evt_data->un.check_cond_evt; |
788 | evt_data_size = sizeof(fast_evt_data->un. |
789 | check_cond_evt); |
790 | break; |
791 | case LPFC_EVENT_VARQUEDEPTH: |
792 | evt_data = (char *) &fast_evt_data->un.queue_depth_evt; |
793 | evt_data_size = sizeof(fast_evt_data->un. |
794 | queue_depth_evt); |
795 | break; |
796 | default: |
797 | lpfc_free_fast_evt(phba, evt: fast_evt_data); |
798 | return; |
799 | } |
800 | } else { |
801 | lpfc_free_fast_evt(phba, evt: fast_evt_data); |
802 | return; |
803 | } |
804 | |
805 | if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) |
806 | fc_host_post_vendor_event(shost, |
807 | event_number: fc_get_event_number(), |
808 | data_len: evt_data_size, |
809 | data_buf: evt_data, |
810 | LPFC_NL_VENDOR_ID); |
811 | |
812 | lpfc_free_fast_evt(phba, evt: fast_evt_data); |
813 | return; |
814 | } |
815 | |
816 | static void |
817 | lpfc_work_list_done(struct lpfc_hba *phba) |
818 | { |
819 | struct lpfc_work_evt *evtp = NULL; |
820 | struct lpfc_nodelist *ndlp; |
821 | int free_evt; |
822 | int fcf_inuse; |
823 | uint32_t nlp_did; |
824 | bool hba_pci_err; |
825 | |
826 | spin_lock_irq(lock: &phba->hbalock); |
827 | while (!list_empty(head: &phba->work_list)) { |
828 | list_remove_head((&phba->work_list), evtp, typeof(*evtp), |
829 | evt_listp); |
830 | spin_unlock_irq(lock: &phba->hbalock); |
831 | hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); |
832 | free_evt = 1; |
833 | switch (evtp->evt) { |
834 | case LPFC_EVT_ELS_RETRY: |
835 | ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); |
836 | if (!hba_pci_err) { |
837 | lpfc_els_retry_delay_handler(ndlp); |
838 | free_evt = 0; /* evt is part of ndlp */ |
839 | } |
840 | /* decrement the node reference count held |
841 | * for this queued work |
842 | */ |
843 | lpfc_nlp_put(ndlp); |
844 | break; |
845 | case LPFC_EVT_DEV_LOSS: |
846 | ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); |
847 | fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); |
848 | free_evt = 0; |
849 | /* decrement the node reference count held for |
850 | * this queued work |
851 | */ |
852 | nlp_did = ndlp->nlp_DID; |
853 | lpfc_nlp_put(ndlp); |
854 | if (phba->sli_rev == LPFC_SLI_REV4) |
855 | lpfc_sli4_post_dev_loss_tmo_handler(phba, |
856 | fcf_inuse, |
857 | nlp_did); |
858 | break; |
859 | case LPFC_EVT_RECOVER_PORT: |
860 | ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); |
861 | if (!hba_pci_err) { |
862 | lpfc_sli_abts_recover_port(ndlp->vport, ndlp); |
863 | free_evt = 0; |
864 | } |
865 | /* decrement the node reference count held for |
866 | * this queued work |
867 | */ |
868 | lpfc_nlp_put(ndlp); |
869 | break; |
870 | case LPFC_EVT_ONLINE: |
871 | if (phba->link_state < LPFC_LINK_DOWN) |
872 | *(int *) (evtp->evt_arg1) = lpfc_online(phba); |
873 | else |
874 | *(int *) (evtp->evt_arg1) = 0; |
875 | complete((struct completion *)(evtp->evt_arg2)); |
876 | break; |
877 | case LPFC_EVT_OFFLINE_PREP: |
878 | if (phba->link_state >= LPFC_LINK_DOWN) |
879 | lpfc_offline_prep(phba, LPFC_MBX_WAIT); |
880 | *(int *)(evtp->evt_arg1) = 0; |
881 | complete((struct completion *)(evtp->evt_arg2)); |
882 | break; |
883 | case LPFC_EVT_OFFLINE: |
884 | lpfc_offline(phba); |
885 | lpfc_sli_brdrestart(phba); |
886 | *(int *)(evtp->evt_arg1) = |
887 | lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY); |
888 | lpfc_unblock_mgmt_io(phba); |
889 | complete((struct completion *)(evtp->evt_arg2)); |
890 | break; |
891 | case LPFC_EVT_WARM_START: |
892 | lpfc_offline(phba); |
893 | lpfc_reset_barrier(phba); |
894 | lpfc_sli_brdreset(phba); |
895 | lpfc_hba_down_post(phba); |
896 | *(int *)(evtp->evt_arg1) = |
897 | lpfc_sli_brdready(phba, HS_MBRDY); |
898 | lpfc_unblock_mgmt_io(phba); |
899 | complete((struct completion *)(evtp->evt_arg2)); |
900 | break; |
901 | case LPFC_EVT_KILL: |
902 | lpfc_offline(phba); |
903 | *(int *)(evtp->evt_arg1) |
904 | = (phba->pport->stopped) |
905 | ? 0 : lpfc_sli_brdkill(phba); |
906 | lpfc_unblock_mgmt_io(phba); |
907 | complete((struct completion *)(evtp->evt_arg2)); |
908 | break; |
909 | case LPFC_EVT_FASTPATH_MGMT_EVT: |
910 | lpfc_send_fastpath_evt(phba, evtp); |
911 | free_evt = 0; |
912 | break; |
913 | case LPFC_EVT_RESET_HBA: |
914 | if (!(phba->pport->load_flag & FC_UNLOADING)) |
915 | lpfc_reset_hba(phba); |
916 | break; |
917 | } |
918 | if (free_evt) |
919 | kfree(objp: evtp); |
920 | spin_lock_irq(lock: &phba->hbalock); |
921 | } |
922 | spin_unlock_irq(lock: &phba->hbalock); |
923 | |
924 | } |
925 | |
926 | static void |
927 | lpfc_work_done(struct lpfc_hba *phba) |
928 | { |
929 | struct lpfc_sli_ring *pring; |
930 | uint32_t ha_copy, status, control, work_port_events; |
931 | struct lpfc_vport **vports; |
932 | struct lpfc_vport *vport; |
933 | int i; |
934 | bool hba_pci_err; |
935 | |
936 | hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); |
937 | spin_lock_irq(lock: &phba->hbalock); |
938 | ha_copy = phba->work_ha; |
939 | phba->work_ha = 0; |
940 | spin_unlock_irq(lock: &phba->hbalock); |
941 | if (hba_pci_err) |
942 | ha_copy = 0; |
943 | |
944 | /* First, try to post the next mailbox command to SLI4 device */ |
945 | if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err) |
946 | lpfc_sli4_post_async_mbox(phba); |
947 | |
948 | if (ha_copy & HA_ERATT) { |
949 | /* Handle the error attention event */ |
950 | lpfc_handle_eratt(phba); |
951 | |
952 | if (phba->fw_dump_cmpl) { |
953 | complete(phba->fw_dump_cmpl); |
954 | phba->fw_dump_cmpl = NULL; |
955 | } |
956 | } |
957 | |
958 | if (ha_copy & HA_MBATT) |
959 | lpfc_sli_handle_mb_event(phba); |
960 | |
961 | if (ha_copy & HA_LATT) |
962 | lpfc_handle_latt(phba); |
963 | |
964 | /* Handle VMID Events */ |
965 | if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) { |
966 | if (phba->pport->work_port_events & |
967 | WORKER_CHECK_VMID_ISSUE_QFPA) { |
968 | lpfc_check_vmid_qfpa_issue(phba); |
969 | phba->pport->work_port_events &= |
970 | ~WORKER_CHECK_VMID_ISSUE_QFPA; |
971 | } |
972 | if (phba->pport->work_port_events & |
973 | WORKER_CHECK_INACTIVE_VMID) { |
974 | lpfc_check_inactive_vmid(phba); |
975 | phba->pport->work_port_events &= |
976 | ~WORKER_CHECK_INACTIVE_VMID; |
977 | } |
978 | } |
979 | |
980 | /* Process SLI4 events */ |
981 | if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { |
982 | if (phba->hba_flag & HBA_RRQ_ACTIVE) |
983 | lpfc_handle_rrq_active(phba); |
984 | if (phba->hba_flag & ELS_XRI_ABORT_EVENT) |
985 | lpfc_sli4_els_xri_abort_event_proc(phba); |
986 | if (phba->hba_flag & ASYNC_EVENT) |
987 | lpfc_sli4_async_event_proc(phba); |
988 | if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { |
989 | spin_lock_irq(lock: &phba->hbalock); |
990 | phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; |
991 | spin_unlock_irq(lock: &phba->hbalock); |
992 | lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); |
993 | } |
994 | if (phba->fcf.fcf_flag & FCF_REDISC_EVT) |
995 | lpfc_sli4_fcf_redisc_event_proc(phba); |
996 | } |
997 | |
998 | vports = lpfc_create_vport_work_array(phba); |
999 | if (vports != NULL) |
1000 | for (i = 0; i <= phba->max_vports; i++) { |
1001 | /* |
1002 | * We could have no vports in array if unloading, so if |
1003 | * this happens then just use the pport |
1004 | */ |
1005 | if (vports[i] == NULL && i == 0) |
1006 | vport = phba->pport; |
1007 | else |
1008 | vport = vports[i]; |
1009 | if (vport == NULL) |
1010 | break; |
1011 | spin_lock_irq(lock: &vport->work_port_lock); |
1012 | work_port_events = vport->work_port_events; |
1013 | vport->work_port_events &= ~work_port_events; |
1014 | spin_unlock_irq(lock: &vport->work_port_lock); |
1015 | if (hba_pci_err) |
1016 | continue; |
1017 | if (work_port_events & WORKER_DISC_TMO) |
1018 | lpfc_disc_timeout_handler(vport); |
1019 | if (work_port_events & WORKER_ELS_TMO) |
1020 | lpfc_els_timeout_handler(vport); |
1021 | if (work_port_events & WORKER_HB_TMO) |
1022 | lpfc_hb_timeout_handler(phba); |
1023 | if (work_port_events & WORKER_MBOX_TMO) |
1024 | lpfc_mbox_timeout_handler(phba); |
1025 | if (work_port_events & WORKER_FABRIC_BLOCK_TMO) |
1026 | lpfc_unblock_fabric_iocbs(phba); |
1027 | if (work_port_events & WORKER_RAMP_DOWN_QUEUE) |
1028 | lpfc_ramp_down_queue_handler(phba); |
1029 | if (work_port_events & WORKER_DELAYED_DISC_TMO) |
1030 | lpfc_delayed_disc_timeout_handler(vport); |
1031 | } |
1032 | lpfc_destroy_vport_work_array(phba, vports); |
1033 | |
1034 | pring = lpfc_phba_elsring(phba); |
1035 | status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); |
1036 | status >>= (4*LPFC_ELS_RING); |
1037 | if (pring && (status & HA_RXMASK || |
1038 | pring->flag & LPFC_DEFERRED_RING_EVENT || |
1039 | phba->hba_flag & HBA_SP_QUEUE_EVT)) { |
1040 | if (pring->flag & LPFC_STOP_IOCB_EVENT) { |
1041 | pring->flag |= LPFC_DEFERRED_RING_EVENT; |
1042 | /* Preserve legacy behavior. */ |
1043 | if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) |
1044 | set_bit(LPFC_DATA_READY, addr: &phba->data_flags); |
1045 | } else { |
1046 | /* Driver could have abort request completed in queue |
1047 | * when link goes down. Allow for this transition. |
1048 | */ |
1049 | if (phba->link_state >= LPFC_LINK_DOWN || |
1050 | phba->link_flag & LS_MDS_LOOPBACK) { |
1051 | pring->flag &= ~LPFC_DEFERRED_RING_EVENT; |
1052 | lpfc_sli_handle_slow_ring_event(phba, pring, |
1053 | (status & |
1054 | HA_RXMASK)); |
1055 | } |
1056 | } |
1057 | if (phba->sli_rev == LPFC_SLI_REV4) |
1058 | lpfc_drain_txq(phba); |
1059 | /* |
1060 | * Turn on Ring interrupts |
1061 | */ |
1062 | if (phba->sli_rev <= LPFC_SLI_REV3) { |
1063 | spin_lock_irq(lock: &phba->hbalock); |
1064 | control = readl(addr: phba->HCregaddr); |
1065 | if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { |
1066 | lpfc_debugfs_slow_ring_trc(phba, |
1067 | "WRK Enable ring: cntl:x%x hacopy:x%x" , |
1068 | control, ha_copy, 0); |
1069 | |
1070 | control |= (HC_R0INT_ENA << LPFC_ELS_RING); |
1071 | writel(val: control, addr: phba->HCregaddr); |
1072 | readl(addr: phba->HCregaddr); /* flush */ |
1073 | } else { |
1074 | lpfc_debugfs_slow_ring_trc(phba, |
1075 | "WRK Ring ok: cntl:x%x hacopy:x%x" , |
1076 | control, ha_copy, 0); |
1077 | } |
1078 | spin_unlock_irq(lock: &phba->hbalock); |
1079 | } |
1080 | } |
1081 | lpfc_work_list_done(phba); |
1082 | } |
1083 | |
1084 | int |
1085 | lpfc_do_work(void *p) |
1086 | { |
1087 | struct lpfc_hba *phba = p; |
1088 | int rc; |
1089 | |
1090 | set_user_nice(current, MIN_NICE); |
1091 | current->flags |= PF_NOFREEZE; |
1092 | phba->data_flags = 0; |
1093 | |
1094 | while (!kthread_should_stop()) { |
1095 | /* wait and check worker queue activities */ |
1096 | rc = wait_event_interruptible(phba->work_waitq, |
1097 | (test_and_clear_bit(LPFC_DATA_READY, |
1098 | &phba->data_flags) |
1099 | || kthread_should_stop())); |
1100 | /* Signal wakeup shall terminate the worker thread */ |
1101 | if (rc) { |
1102 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1103 | "0433 Wakeup on signal: rc=x%x\n" , rc); |
1104 | break; |
1105 | } |
1106 | |
1107 | /* Attend pending lpfc data processing */ |
1108 | lpfc_work_done(phba); |
1109 | } |
1110 | phba->worker_thread = NULL; |
1111 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
1112 | "0432 Worker thread stopped.\n" ); |
1113 | return 0; |
1114 | } |
1115 | |
1116 | /* |
1117 | * This is only called to handle FC worker events. Since this a rare |
1118 | * occurrence, we allocate a struct lpfc_work_evt structure here instead of |
1119 | * embedding it in the IOCB. |
1120 | */ |
1121 | int |
1122 | lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, |
1123 | uint32_t evt) |
1124 | { |
1125 | struct lpfc_work_evt *evtp; |
1126 | unsigned long flags; |
1127 | |
1128 | /* |
1129 | * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will |
1130 | * be queued to worker thread for processing |
1131 | */ |
1132 | evtp = kmalloc(size: sizeof(struct lpfc_work_evt), GFP_ATOMIC); |
1133 | if (!evtp) |
1134 | return 0; |
1135 | |
1136 | evtp->evt_arg1 = arg1; |
1137 | evtp->evt_arg2 = arg2; |
1138 | evtp->evt = evt; |
1139 | |
1140 | spin_lock_irqsave(&phba->hbalock, flags); |
1141 | list_add_tail(new: &evtp->evt_listp, head: &phba->work_list); |
1142 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
1143 | |
1144 | lpfc_worker_wake_up(phba); |
1145 | |
1146 | return 1; |
1147 | } |
1148 | |
1149 | void |
1150 | lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) |
1151 | { |
1152 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1153 | struct lpfc_hba *phba = vport->phba; |
1154 | struct lpfc_nodelist *ndlp, *next_ndlp; |
1155 | |
1156 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { |
1157 | if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || |
1158 | ((vport->port_type == LPFC_NPIV_PORT) && |
1159 | ((ndlp->nlp_DID == NameServer_DID) || |
1160 | (ndlp->nlp_DID == FDMI_DID) || |
1161 | (ndlp->nlp_DID == Fabric_Cntl_DID)))) |
1162 | lpfc_unreg_rpi(vport, ndlp); |
1163 | |
1164 | /* Leave Fabric nodes alone on link down */ |
1165 | if ((phba->sli_rev < LPFC_SLI_REV4) && |
1166 | (!remove && ndlp->nlp_type & NLP_FABRIC)) |
1167 | continue; |
1168 | |
1169 | /* Notify transport of connectivity loss to trigger cleanup. */ |
1170 | if (phba->nvmet_support && |
1171 | ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) |
1172 | lpfc_nvmet_invalidate_host(phba, ndlp); |
1173 | |
1174 | lpfc_disc_state_machine(vport, ndlp, NULL, |
1175 | remove |
1176 | ? NLP_EVT_DEVICE_RM |
1177 | : NLP_EVT_DEVICE_RECOVERY); |
1178 | } |
1179 | if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { |
1180 | if (phba->sli_rev == LPFC_SLI_REV4) |
1181 | lpfc_sli4_unreg_all_rpis(vport); |
1182 | lpfc_mbx_unreg_vpi(vport); |
1183 | spin_lock_irq(lock: shost->host_lock); |
1184 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
1185 | spin_unlock_irq(lock: shost->host_lock); |
1186 | } |
1187 | } |
1188 | |
1189 | void |
1190 | lpfc_port_link_failure(struct lpfc_vport *vport) |
1191 | { |
1192 | lpfc_vport_set_state(vport, new_state: FC_VPORT_LINKDOWN); |
1193 | |
1194 | /* Cleanup any outstanding received buffers */ |
1195 | lpfc_cleanup_rcv_buffers(vport); |
1196 | |
1197 | /* Cleanup any outstanding RSCN activity */ |
1198 | lpfc_els_flush_rscn(vport); |
1199 | |
1200 | /* Cleanup any outstanding ELS commands */ |
1201 | lpfc_els_flush_cmd(vport); |
1202 | |
1203 | lpfc_cleanup_rpis(vport, remove: 0); |
1204 | |
1205 | /* Turn off discovery timer if its running */ |
1206 | lpfc_can_disctmo(vport); |
1207 | } |
1208 | |
1209 | void |
1210 | lpfc_linkdown_port(struct lpfc_vport *vport) |
1211 | { |
1212 | struct lpfc_hba *phba = vport->phba; |
1213 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1214 | |
1215 | if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) |
1216 | fc_host_post_event(shost, event_number: fc_get_event_number(), |
1217 | event_code: FCH_EVT_LINKDOWN, event_data: 0); |
1218 | |
1219 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
1220 | "Link Down: state:x%x rtry:x%x flg:x%x" , |
1221 | vport->port_state, vport->fc_ns_retry, vport->fc_flag); |
1222 | |
1223 | lpfc_port_link_failure(vport); |
1224 | |
1225 | /* Stop delayed Nport discovery */ |
1226 | spin_lock_irq(lock: shost->host_lock); |
1227 | vport->fc_flag &= ~FC_DISC_DELAYED; |
1228 | spin_unlock_irq(lock: shost->host_lock); |
1229 | del_timer_sync(timer: &vport->delayed_disc_tmo); |
1230 | |
1231 | if (phba->sli_rev == LPFC_SLI_REV4 && |
1232 | vport->port_type == LPFC_PHYSICAL_PORT && |
1233 | phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) { |
1234 | /* Assume success on link up */ |
1235 | phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; |
1236 | } |
1237 | } |
1238 | |
1239 | int |
1240 | lpfc_linkdown(struct lpfc_hba *phba) |
1241 | { |
1242 | struct lpfc_vport *vport = phba->pport; |
1243 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1244 | struct lpfc_vport **vports; |
1245 | LPFC_MBOXQ_t *mb; |
1246 | int i; |
1247 | int offline; |
1248 | |
1249 | if (phba->link_state == LPFC_LINK_DOWN) |
1250 | return 0; |
1251 | |
1252 | /* Block all SCSI stack I/Os */ |
1253 | lpfc_scsi_dev_block(phba); |
1254 | offline = pci_channel_offline(pdev: phba->pcidev); |
1255 | |
1256 | phba->defer_flogi_acc_flag = false; |
1257 | |
1258 | /* Clear external loopback plug detected flag */ |
1259 | phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; |
1260 | |
1261 | spin_lock_irq(lock: &phba->hbalock); |
1262 | phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); |
1263 | spin_unlock_irq(lock: &phba->hbalock); |
1264 | if (phba->link_state > LPFC_LINK_DOWN) { |
1265 | phba->link_state = LPFC_LINK_DOWN; |
1266 | if (phba->sli4_hba.conf_trunk) { |
1267 | phba->trunk_link.link0.state = 0; |
1268 | phba->trunk_link.link1.state = 0; |
1269 | phba->trunk_link.link2.state = 0; |
1270 | phba->trunk_link.link3.state = 0; |
1271 | phba->trunk_link.phy_lnk_speed = |
1272 | LPFC_LINK_SPEED_UNKNOWN; |
1273 | phba->sli4_hba.link_state.logical_speed = |
1274 | LPFC_LINK_SPEED_UNKNOWN; |
1275 | } |
1276 | spin_lock_irq(lock: shost->host_lock); |
1277 | phba->pport->fc_flag &= ~FC_LBIT; |
1278 | spin_unlock_irq(lock: shost->host_lock); |
1279 | } |
1280 | vports = lpfc_create_vport_work_array(phba); |
1281 | if (vports != NULL) { |
1282 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
1283 | /* Issue a LINK DOWN event to all nodes */ |
1284 | lpfc_linkdown_port(vport: vports[i]); |
1285 | |
1286 | vports[i]->fc_myDID = 0; |
1287 | |
1288 | if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || |
1289 | (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { |
1290 | if (phba->nvmet_support) |
1291 | lpfc_nvmet_update_targetport(phba); |
1292 | else |
1293 | lpfc_nvme_update_localport(vport: vports[i]); |
1294 | } |
1295 | } |
1296 | } |
1297 | lpfc_destroy_vport_work_array(phba, vports); |
1298 | |
1299 | /* Clean up any SLI3 firmware default rpi's */ |
1300 | if (phba->sli_rev > LPFC_SLI_REV3 || offline) |
1301 | goto skip_unreg_did; |
1302 | |
1303 | mb = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
1304 | if (mb) { |
1305 | lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb); |
1306 | mb->vport = vport; |
1307 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
1308 | if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) |
1309 | == MBX_NOT_FINISHED) { |
1310 | mempool_free(element: mb, pool: phba->mbox_mem_pool); |
1311 | } |
1312 | } |
1313 | |
1314 | skip_unreg_did: |
1315 | /* Setup myDID for link up if we are in pt2pt mode */ |
1316 | if (phba->pport->fc_flag & FC_PT2PT) { |
1317 | mb = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
1318 | if (mb) { |
1319 | lpfc_config_link(phba, mb); |
1320 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
1321 | mb->vport = vport; |
1322 | if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) |
1323 | == MBX_NOT_FINISHED) { |
1324 | mempool_free(element: mb, pool: phba->mbox_mem_pool); |
1325 | } |
1326 | } |
1327 | spin_lock_irq(lock: shost->host_lock); |
1328 | phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); |
1329 | phba->pport->rcv_flogi_cnt = 0; |
1330 | spin_unlock_irq(lock: shost->host_lock); |
1331 | } |
1332 | return 0; |
1333 | } |
1334 | |
1335 | static void |
1336 | lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) |
1337 | { |
1338 | struct lpfc_nodelist *ndlp; |
1339 | |
1340 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
1341 | ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); |
1342 | |
1343 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
1344 | continue; |
1345 | if (ndlp->nlp_type & NLP_FABRIC) { |
1346 | /* On Linkup its safe to clean up the ndlp |
1347 | * from Fabric connections. |
1348 | */ |
1349 | if (ndlp->nlp_DID != Fabric_DID) |
1350 | lpfc_unreg_rpi(vport, ndlp); |
1351 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
1352 | } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { |
1353 | /* Fail outstanding IO now since device is |
1354 | * marked for PLOGI. |
1355 | */ |
1356 | lpfc_unreg_rpi(vport, ndlp); |
1357 | } |
1358 | } |
1359 | } |
1360 | |
1361 | static void |
1362 | lpfc_linkup_port(struct lpfc_vport *vport) |
1363 | { |
1364 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1365 | struct lpfc_hba *phba = vport->phba; |
1366 | |
1367 | if ((vport->load_flag & FC_UNLOADING) != 0) |
1368 | return; |
1369 | |
1370 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
1371 | "Link Up: top:x%x speed:x%x flg:x%x" , |
1372 | phba->fc_topology, phba->fc_linkspeed, phba->link_flag); |
1373 | |
1374 | /* If NPIV is not enabled, only bring the physical port up */ |
1375 | if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && |
1376 | (vport != phba->pport)) |
1377 | return; |
1378 | |
1379 | if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) |
1380 | fc_host_post_event(shost, event_number: fc_get_event_number(), |
1381 | event_code: FCH_EVT_LINKUP, event_data: 0); |
1382 | |
1383 | spin_lock_irq(lock: shost->host_lock); |
1384 | if (phba->defer_flogi_acc_flag) |
1385 | vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_RSCN_MODE | |
1386 | FC_NLP_MORE | FC_RSCN_DISCOVERY); |
1387 | else |
1388 | vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | |
1389 | FC_ABORT_DISCOVERY | FC_RSCN_MODE | |
1390 | FC_NLP_MORE | FC_RSCN_DISCOVERY); |
1391 | vport->fc_flag |= FC_NDISC_ACTIVE; |
1392 | vport->fc_ns_retry = 0; |
1393 | spin_unlock_irq(lock: shost->host_lock); |
1394 | lpfc_setup_fdmi_mask(vport); |
1395 | |
1396 | lpfc_linkup_cleanup_nodes(vport); |
1397 | } |
1398 | |
1399 | static int |
1400 | lpfc_linkup(struct lpfc_hba *phba) |
1401 | { |
1402 | struct lpfc_vport **vports; |
1403 | int i; |
1404 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport: phba->pport); |
1405 | |
1406 | phba->link_state = LPFC_LINK_UP; |
1407 | |
1408 | /* Unblock fabric iocbs if they are blocked */ |
1409 | clear_bit(nr: FABRIC_COMANDS_BLOCKED, addr: &phba->bit_flags); |
1410 | del_timer_sync(timer: &phba->fabric_block_timer); |
1411 | |
1412 | vports = lpfc_create_vport_work_array(phba); |
1413 | if (vports != NULL) |
1414 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) |
1415 | lpfc_linkup_port(vport: vports[i]); |
1416 | lpfc_destroy_vport_work_array(phba, vports); |
1417 | |
1418 | /* Clear the pport flogi counter in case the link down was |
1419 | * absorbed without an ACQE. No lock here - in worker thread |
1420 | * and discovery is synchronized. |
1421 | */ |
1422 | spin_lock_irq(lock: shost->host_lock); |
1423 | phba->pport->rcv_flogi_cnt = 0; |
1424 | spin_unlock_irq(lock: shost->host_lock); |
1425 | |
1426 | /* reinitialize initial HBA flag */ |
1427 | phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL); |
1428 | |
1429 | return 0; |
1430 | } |
1431 | |
1432 | /* |
1433 | * This routine handles processing a CLEAR_LA mailbox |
1434 | * command upon completion. It is setup in the LPFC_MBOXQ |
1435 | * as the completion routine when the command is |
1436 | * handed off to the SLI layer. SLI3 only. |
1437 | */ |
1438 | static void |
1439 | lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
1440 | { |
1441 | struct lpfc_vport *vport = pmb->vport; |
1442 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1443 | struct lpfc_sli *psli = &phba->sli; |
1444 | MAILBOX_t *mb = &pmb->u.mb; |
1445 | uint32_t control; |
1446 | |
1447 | /* Since we don't do discovery right now, turn these off here */ |
1448 | psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT; |
1449 | psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT; |
1450 | |
1451 | /* Check for error */ |
1452 | if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { |
1453 | /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ |
1454 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
1455 | "0320 CLEAR_LA mbxStatus error x%x hba " |
1456 | "state x%x\n" , |
1457 | mb->mbxStatus, vport->port_state); |
1458 | phba->link_state = LPFC_HBA_ERROR; |
1459 | goto out; |
1460 | } |
1461 | |
1462 | if (vport->port_type == LPFC_PHYSICAL_PORT) |
1463 | phba->link_state = LPFC_HBA_READY; |
1464 | |
1465 | spin_lock_irq(lock: &phba->hbalock); |
1466 | psli->sli_flag |= LPFC_PROCESS_LA; |
1467 | control = readl(addr: phba->HCregaddr); |
1468 | control |= HC_LAINT_ENA; |
1469 | writel(val: control, addr: phba->HCregaddr); |
1470 | readl(addr: phba->HCregaddr); /* flush */ |
1471 | spin_unlock_irq(lock: &phba->hbalock); |
1472 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
1473 | return; |
1474 | |
1475 | out: |
1476 | /* Device Discovery completes */ |
1477 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
1478 | "0225 Device Discovery completes\n" ); |
1479 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
1480 | |
1481 | spin_lock_irq(lock: shost->host_lock); |
1482 | vport->fc_flag &= ~FC_ABORT_DISCOVERY; |
1483 | spin_unlock_irq(lock: shost->host_lock); |
1484 | |
1485 | lpfc_can_disctmo(vport); |
1486 | |
1487 | /* turn on Link Attention interrupts */ |
1488 | |
1489 | spin_lock_irq(lock: &phba->hbalock); |
1490 | psli->sli_flag |= LPFC_PROCESS_LA; |
1491 | control = readl(addr: phba->HCregaddr); |
1492 | control |= HC_LAINT_ENA; |
1493 | writel(val: control, addr: phba->HCregaddr); |
1494 | readl(addr: phba->HCregaddr); /* flush */ |
1495 | spin_unlock_irq(lock: &phba->hbalock); |
1496 | |
1497 | return; |
1498 | } |
1499 | |
1500 | void |
1501 | lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
1502 | { |
1503 | struct lpfc_vport *vport = pmb->vport; |
1504 | LPFC_MBOXQ_t *sparam_mb; |
1505 | u16 status = pmb->u.mb.mbxStatus; |
1506 | int rc; |
1507 | |
1508 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
1509 | |
1510 | if (status) |
1511 | goto out; |
1512 | |
1513 | /* don't perform discovery for SLI4 loopback diagnostic test */ |
1514 | if ((phba->sli_rev == LPFC_SLI_REV4) && |
1515 | !(phba->hba_flag & HBA_FCOE_MODE) && |
1516 | (phba->link_flag & LS_LOOPBACK_MODE)) |
1517 | return; |
1518 | |
1519 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && |
1520 | vport->fc_flag & FC_PUBLIC_LOOP && |
1521 | !(vport->fc_flag & FC_LBIT)) { |
1522 | /* Need to wait for FAN - use discovery timer |
1523 | * for timeout. port_state is identically |
1524 | * LPFC_LOCAL_CFG_LINK while waiting for FAN |
1525 | */ |
1526 | lpfc_set_disctmo(vport); |
1527 | return; |
1528 | } |
1529 | |
1530 | /* Start discovery by sending a FLOGI. port_state is identically |
1531 | * LPFC_FLOGI while waiting for FLOGI cmpl. |
1532 | */ |
1533 | if (vport->port_state != LPFC_FLOGI) { |
1534 | /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if |
1535 | * bb-credit recovery is in place. |
1536 | */ |
1537 | if (phba->bbcredit_support && phba->cfg_enable_bbcr && |
1538 | !(phba->link_flag & LS_LOOPBACK_MODE)) { |
1539 | sparam_mb = mempool_alloc(pool: phba->mbox_mem_pool, |
1540 | GFP_KERNEL); |
1541 | if (!sparam_mb) |
1542 | goto sparam_out; |
1543 | |
1544 | rc = lpfc_read_sparam(phba, sparam_mb, 0); |
1545 | if (rc) { |
1546 | mempool_free(element: sparam_mb, pool: phba->mbox_mem_pool); |
1547 | goto sparam_out; |
1548 | } |
1549 | sparam_mb->vport = vport; |
1550 | sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; |
1551 | rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT); |
1552 | if (rc == MBX_NOT_FINISHED) { |
1553 | lpfc_mbox_rsrc_cleanup(phba, mbox: sparam_mb, |
1554 | locked: MBOX_THD_UNLOCKED); |
1555 | goto sparam_out; |
1556 | } |
1557 | |
1558 | phba->hba_flag |= HBA_DEFER_FLOGI; |
1559 | } else { |
1560 | lpfc_initial_flogi(vport); |
1561 | } |
1562 | } else { |
1563 | if (vport->fc_flag & FC_PT2PT) |
1564 | lpfc_disc_start(vport); |
1565 | } |
1566 | return; |
1567 | |
1568 | out: |
1569 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
1570 | "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n" , |
1571 | status, vport->port_state); |
1572 | |
1573 | sparam_out: |
1574 | lpfc_linkdown(phba); |
1575 | |
1576 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
1577 | "0200 CONFIG_LINK bad hba state x%x\n" , |
1578 | vport->port_state); |
1579 | |
1580 | lpfc_issue_clear_la(phba, vport); |
1581 | return; |
1582 | } |
1583 | |
1584 | /** |
1585 | * lpfc_sli4_clear_fcf_rr_bmask |
1586 | * @phba: pointer to the struct lpfc_hba for this port. |
1587 | * This fucnction resets the round robin bit mask and clears the |
1588 | * fcf priority list. The list deletions are done while holding the |
1589 | * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared |
1590 | * from the lpfc_fcf_pri record. |
1591 | **/ |
1592 | void |
1593 | lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba) |
1594 | { |
1595 | struct lpfc_fcf_pri *fcf_pri; |
1596 | struct lpfc_fcf_pri *next_fcf_pri; |
1597 | memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); |
1598 | spin_lock_irq(lock: &phba->hbalock); |
1599 | list_for_each_entry_safe(fcf_pri, next_fcf_pri, |
1600 | &phba->fcf.fcf_pri_list, list) { |
1601 | list_del_init(entry: &fcf_pri->list); |
1602 | fcf_pri->fcf_rec.flag = 0; |
1603 | } |
1604 | spin_unlock_irq(lock: &phba->hbalock); |
1605 | } |
1606 | static void |
1607 | lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
1608 | { |
1609 | struct lpfc_vport *vport = mboxq->vport; |
1610 | |
1611 | if (mboxq->u.mb.mbxStatus) { |
1612 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
1613 | "2017 REG_FCFI mbxStatus error x%x " |
1614 | "HBA state x%x\n" , mboxq->u.mb.mbxStatus, |
1615 | vport->port_state); |
1616 | goto fail_out; |
1617 | } |
1618 | |
1619 | /* Start FCoE discovery by sending a FLOGI. */ |
1620 | phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); |
1621 | /* Set the FCFI registered flag */ |
1622 | spin_lock_irq(lock: &phba->hbalock); |
1623 | phba->fcf.fcf_flag |= FCF_REGISTERED; |
1624 | spin_unlock_irq(lock: &phba->hbalock); |
1625 | |
1626 | /* If there is a pending FCoE event, restart FCF table scan. */ |
1627 | if ((!(phba->hba_flag & FCF_RR_INPROG)) && |
1628 | lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) |
1629 | goto fail_out; |
1630 | |
1631 | /* Mark successful completion of FCF table scan */ |
1632 | spin_lock_irq(lock: &phba->hbalock); |
1633 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); |
1634 | phba->hba_flag &= ~FCF_TS_INPROG; |
1635 | if (vport->port_state != LPFC_FLOGI) { |
1636 | phba->hba_flag |= FCF_RR_INPROG; |
1637 | spin_unlock_irq(lock: &phba->hbalock); |
1638 | lpfc_issue_init_vfi(vport); |
1639 | goto out; |
1640 | } |
1641 | spin_unlock_irq(lock: &phba->hbalock); |
1642 | goto out; |
1643 | |
1644 | fail_out: |
1645 | spin_lock_irq(lock: &phba->hbalock); |
1646 | phba->hba_flag &= ~FCF_RR_INPROG; |
1647 | spin_unlock_irq(lock: &phba->hbalock); |
1648 | out: |
1649 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
1650 | } |
1651 | |
1652 | /** |
1653 | * lpfc_fab_name_match - Check if the fcf fabric name match. |
1654 | * @fab_name: pointer to fabric name. |
1655 | * @new_fcf_record: pointer to fcf record. |
1656 | * |
1657 | * This routine compare the fcf record's fabric name with provided |
1658 | * fabric name. If the fabric name are identical this function |
1659 | * returns 1 else return 0. |
1660 | **/ |
1661 | static uint32_t |
1662 | lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) |
1663 | { |
1664 | if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) |
1665 | return 0; |
1666 | if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) |
1667 | return 0; |
1668 | if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) |
1669 | return 0; |
1670 | if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) |
1671 | return 0; |
1672 | if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) |
1673 | return 0; |
1674 | if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) |
1675 | return 0; |
1676 | if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) |
1677 | return 0; |
1678 | if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)) |
1679 | return 0; |
1680 | return 1; |
1681 | } |
1682 | |
1683 | /** |
1684 | * lpfc_sw_name_match - Check if the fcf switch name match. |
1685 | * @sw_name: pointer to switch name. |
1686 | * @new_fcf_record: pointer to fcf record. |
1687 | * |
1688 | * This routine compare the fcf record's switch name with provided |
1689 | * switch name. If the switch name are identical this function |
1690 | * returns 1 else return 0. |
1691 | **/ |
1692 | static uint32_t |
1693 | lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) |
1694 | { |
1695 | if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) |
1696 | return 0; |
1697 | if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) |
1698 | return 0; |
1699 | if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) |
1700 | return 0; |
1701 | if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) |
1702 | return 0; |
1703 | if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) |
1704 | return 0; |
1705 | if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) |
1706 | return 0; |
1707 | if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) |
1708 | return 0; |
1709 | if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)) |
1710 | return 0; |
1711 | return 1; |
1712 | } |
1713 | |
1714 | /** |
1715 | * lpfc_mac_addr_match - Check if the fcf mac address match. |
1716 | * @mac_addr: pointer to mac address. |
1717 | * @new_fcf_record: pointer to fcf record. |
1718 | * |
1719 | * This routine compare the fcf record's mac address with HBA's |
1720 | * FCF mac address. If the mac addresses are identical this function |
1721 | * returns 1 else return 0. |
1722 | **/ |
1723 | static uint32_t |
1724 | lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record) |
1725 | { |
1726 | if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) |
1727 | return 0; |
1728 | if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) |
1729 | return 0; |
1730 | if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) |
1731 | return 0; |
1732 | if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) |
1733 | return 0; |
1734 | if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) |
1735 | return 0; |
1736 | if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record)) |
1737 | return 0; |
1738 | return 1; |
1739 | } |
1740 | |
1741 | static bool |
1742 | lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id) |
1743 | { |
1744 | return (curr_vlan_id == new_vlan_id); |
1745 | } |
1746 | |
1747 | /** |
1748 | * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record. |
1749 | * @phba: pointer to lpfc hba data structure. |
1750 | * @fcf_index: Index for the lpfc_fcf_record. |
1751 | * @new_fcf_record: pointer to hba fcf record. |
1752 | * |
1753 | * This routine updates the driver FCF priority record from the new HBA FCF |
1754 | * record. The hbalock is asserted held in the code path calling this |
1755 | * routine. |
1756 | **/ |
1757 | static void |
1758 | __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, |
1759 | struct fcf_record *new_fcf_record |
1760 | ) |
1761 | { |
1762 | struct lpfc_fcf_pri *fcf_pri; |
1763 | |
1764 | fcf_pri = &phba->fcf.fcf_pri[fcf_index]; |
1765 | fcf_pri->fcf_rec.fcf_index = fcf_index; |
1766 | /* FCF record priority */ |
1767 | fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; |
1768 | |
1769 | } |
1770 | |
1771 | /** |
1772 | * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. |
1773 | * @fcf_rec: pointer to driver fcf record. |
1774 | * @new_fcf_record: pointer to fcf record. |
1775 | * |
1776 | * This routine copies the FCF information from the FCF |
1777 | * record to lpfc_hba data structure. |
1778 | **/ |
1779 | static void |
1780 | lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec, |
1781 | struct fcf_record *new_fcf_record) |
1782 | { |
1783 | /* Fabric name */ |
1784 | fcf_rec->fabric_name[0] = |
1785 | bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); |
1786 | fcf_rec->fabric_name[1] = |
1787 | bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); |
1788 | fcf_rec->fabric_name[2] = |
1789 | bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); |
1790 | fcf_rec->fabric_name[3] = |
1791 | bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); |
1792 | fcf_rec->fabric_name[4] = |
1793 | bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); |
1794 | fcf_rec->fabric_name[5] = |
1795 | bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); |
1796 | fcf_rec->fabric_name[6] = |
1797 | bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); |
1798 | fcf_rec->fabric_name[7] = |
1799 | bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); |
1800 | /* Mac address */ |
1801 | fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record); |
1802 | fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record); |
1803 | fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record); |
1804 | fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record); |
1805 | fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record); |
1806 | fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record); |
1807 | /* FCF record index */ |
1808 | fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); |
1809 | /* FCF record priority */ |
1810 | fcf_rec->priority = new_fcf_record->fip_priority; |
1811 | /* Switch name */ |
1812 | fcf_rec->switch_name[0] = |
1813 | bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); |
1814 | fcf_rec->switch_name[1] = |
1815 | bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); |
1816 | fcf_rec->switch_name[2] = |
1817 | bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); |
1818 | fcf_rec->switch_name[3] = |
1819 | bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); |
1820 | fcf_rec->switch_name[4] = |
1821 | bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); |
1822 | fcf_rec->switch_name[5] = |
1823 | bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); |
1824 | fcf_rec->switch_name[6] = |
1825 | bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); |
1826 | fcf_rec->switch_name[7] = |
1827 | bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); |
1828 | } |
1829 | |
1830 | /** |
1831 | * __lpfc_update_fcf_record - Update driver fcf record |
1832 | * @phba: pointer to lpfc hba data structure. |
1833 | * @fcf_rec: pointer to driver fcf record. |
1834 | * @new_fcf_record: pointer to hba fcf record. |
1835 | * @addr_mode: address mode to be set to the driver fcf record. |
1836 | * @vlan_id: vlan tag to be set to the driver fcf record. |
1837 | * @flag: flag bits to be set to the driver fcf record. |
1838 | * |
1839 | * This routine updates the driver FCF record from the new HBA FCF record |
1840 | * together with the address mode, vlan_id, and other informations. This |
1841 | * routine is called with the hbalock held. |
1842 | **/ |
1843 | static void |
1844 | __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, |
1845 | struct fcf_record *new_fcf_record, uint32_t addr_mode, |
1846 | uint16_t vlan_id, uint32_t flag) |
1847 | { |
1848 | lockdep_assert_held(&phba->hbalock); |
1849 | |
1850 | /* Copy the fields from the HBA's FCF record */ |
1851 | lpfc_copy_fcf_record(fcf_rec, new_fcf_record); |
1852 | /* Update other fields of driver FCF record */ |
1853 | fcf_rec->addr_mode = addr_mode; |
1854 | fcf_rec->vlan_id = vlan_id; |
1855 | fcf_rec->flag |= (flag | RECORD_VALID); |
1856 | __lpfc_update_fcf_record_pri(phba, |
1857 | bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), |
1858 | new_fcf_record); |
1859 | } |
1860 | |
1861 | /** |
1862 | * lpfc_register_fcf - Register the FCF with hba. |
1863 | * @phba: pointer to lpfc hba data structure. |
1864 | * |
1865 | * This routine issues a register fcfi mailbox command to register |
1866 | * the fcf with HBA. |
1867 | **/ |
1868 | static void |
1869 | lpfc_register_fcf(struct lpfc_hba *phba) |
1870 | { |
1871 | LPFC_MBOXQ_t *fcf_mbxq; |
1872 | int rc; |
1873 | |
1874 | spin_lock_irq(lock: &phba->hbalock); |
1875 | /* If the FCF is not available do nothing. */ |
1876 | if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { |
1877 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1878 | spin_unlock_irq(lock: &phba->hbalock); |
1879 | return; |
1880 | } |
1881 | |
1882 | /* The FCF is already registered, start discovery */ |
1883 | if (phba->fcf.fcf_flag & FCF_REGISTERED) { |
1884 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); |
1885 | phba->hba_flag &= ~FCF_TS_INPROG; |
1886 | if (phba->pport->port_state != LPFC_FLOGI && |
1887 | phba->pport->fc_flag & FC_FABRIC) { |
1888 | phba->hba_flag |= FCF_RR_INPROG; |
1889 | spin_unlock_irq(lock: &phba->hbalock); |
1890 | lpfc_initial_flogi(phba->pport); |
1891 | return; |
1892 | } |
1893 | spin_unlock_irq(lock: &phba->hbalock); |
1894 | return; |
1895 | } |
1896 | spin_unlock_irq(lock: &phba->hbalock); |
1897 | |
1898 | fcf_mbxq = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
1899 | if (!fcf_mbxq) { |
1900 | spin_lock_irq(lock: &phba->hbalock); |
1901 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1902 | spin_unlock_irq(lock: &phba->hbalock); |
1903 | return; |
1904 | } |
1905 | |
1906 | lpfc_reg_fcfi(phba, fcf_mbxq); |
1907 | fcf_mbxq->vport = phba->pport; |
1908 | fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; |
1909 | rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); |
1910 | if (rc == MBX_NOT_FINISHED) { |
1911 | spin_lock_irq(lock: &phba->hbalock); |
1912 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1913 | spin_unlock_irq(lock: &phba->hbalock); |
1914 | mempool_free(element: fcf_mbxq, pool: phba->mbox_mem_pool); |
1915 | } |
1916 | |
1917 | return; |
1918 | } |
1919 | |
1920 | /** |
1921 | * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. |
1922 | * @phba: pointer to lpfc hba data structure. |
1923 | * @new_fcf_record: pointer to fcf record. |
1924 | * @boot_flag: Indicates if this record used by boot bios. |
1925 | * @addr_mode: The address mode to be used by this FCF |
1926 | * @vlan_id: The vlan id to be used as vlan tagging by this FCF. |
1927 | * |
1928 | * This routine compare the fcf record with connect list obtained from the |
1929 | * config region to decide if this FCF can be used for SAN discovery. It returns |
1930 | * 1 if this record can be used for SAN discovery else return zero. If this FCF |
1931 | * record can be used for SAN discovery, the boot_flag will indicate if this FCF |
1932 | * is used by boot bios and addr_mode will indicate the addressing mode to be |
1933 | * used for this FCF when the function returns. |
1934 | * If the FCF record need to be used with a particular vlan id, the vlan is |
1935 | * set in the vlan_id on return of the function. If not VLAN tagging need to |
1936 | * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID; |
1937 | **/ |
1938 | static int |
1939 | lpfc_match_fcf_conn_list(struct lpfc_hba *phba, |
1940 | struct fcf_record *new_fcf_record, |
1941 | uint32_t *boot_flag, uint32_t *addr_mode, |
1942 | uint16_t *vlan_id) |
1943 | { |
1944 | struct lpfc_fcf_conn_entry *conn_entry; |
1945 | int i, j, fcf_vlan_id = 0; |
1946 | |
1947 | /* Find the lowest VLAN id in the FCF record */ |
1948 | for (i = 0; i < 512; i++) { |
1949 | if (new_fcf_record->vlan_bitmap[i]) { |
1950 | fcf_vlan_id = i * 8; |
1951 | j = 0; |
1952 | while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) { |
1953 | j++; |
1954 | fcf_vlan_id++; |
1955 | } |
1956 | break; |
1957 | } |
1958 | } |
1959 | |
1960 | /* FCF not valid/available or solicitation in progress */ |
1961 | if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || |
1962 | !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) || |
1963 | bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)) |
1964 | return 0; |
1965 | |
1966 | if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { |
1967 | *boot_flag = 0; |
1968 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, |
1969 | new_fcf_record); |
1970 | if (phba->valid_vlan) |
1971 | *vlan_id = phba->vlan_id; |
1972 | else |
1973 | *vlan_id = LPFC_FCOE_NULL_VID; |
1974 | return 1; |
1975 | } |
1976 | |
1977 | /* |
1978 | * If there are no FCF connection table entry, driver connect to all |
1979 | * FCFs. |
1980 | */ |
1981 | if (list_empty(head: &phba->fcf_conn_rec_list)) { |
1982 | *boot_flag = 0; |
1983 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, |
1984 | new_fcf_record); |
1985 | |
1986 | /* |
1987 | * When there are no FCF connect entries, use driver's default |
1988 | * addressing mode - FPMA. |
1989 | */ |
1990 | if (*addr_mode & LPFC_FCF_FPMA) |
1991 | *addr_mode = LPFC_FCF_FPMA; |
1992 | |
1993 | /* If FCF record report a vlan id use that vlan id */ |
1994 | if (fcf_vlan_id) |
1995 | *vlan_id = fcf_vlan_id; |
1996 | else |
1997 | *vlan_id = LPFC_FCOE_NULL_VID; |
1998 | return 1; |
1999 | } |
2000 | |
2001 | list_for_each_entry(conn_entry, |
2002 | &phba->fcf_conn_rec_list, list) { |
2003 | if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) |
2004 | continue; |
2005 | |
2006 | if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && |
2007 | !lpfc_fab_name_match(fab_name: conn_entry->conn_rec.fabric_name, |
2008 | new_fcf_record)) |
2009 | continue; |
2010 | if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) && |
2011 | !lpfc_sw_name_match(sw_name: conn_entry->conn_rec.switch_name, |
2012 | new_fcf_record)) |
2013 | continue; |
2014 | if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { |
2015 | /* |
2016 | * If the vlan bit map does not have the bit set for the |
2017 | * vlan id to be used, then it is not a match. |
2018 | */ |
2019 | if (!(new_fcf_record->vlan_bitmap |
2020 | [conn_entry->conn_rec.vlan_tag / 8] & |
2021 | (1 << (conn_entry->conn_rec.vlan_tag % 8)))) |
2022 | continue; |
2023 | } |
2024 | |
2025 | /* |
2026 | * If connection record does not support any addressing mode, |
2027 | * skip the FCF record. |
2028 | */ |
2029 | if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) |
2030 | & (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) |
2031 | continue; |
2032 | |
2033 | /* |
2034 | * Check if the connection record specifies a required |
2035 | * addressing mode. |
2036 | */ |
2037 | if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && |
2038 | !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { |
2039 | |
2040 | /* |
2041 | * If SPMA required but FCF not support this continue. |
2042 | */ |
2043 | if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && |
2044 | !(bf_get(lpfc_fcf_record_mac_addr_prov, |
2045 | new_fcf_record) & LPFC_FCF_SPMA)) |
2046 | continue; |
2047 | |
2048 | /* |
2049 | * If FPMA required but FCF not support this continue. |
2050 | */ |
2051 | if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && |
2052 | !(bf_get(lpfc_fcf_record_mac_addr_prov, |
2053 | new_fcf_record) & LPFC_FCF_FPMA)) |
2054 | continue; |
2055 | } |
2056 | |
2057 | /* |
2058 | * This fcf record matches filtering criteria. |
2059 | */ |
2060 | if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) |
2061 | *boot_flag = 1; |
2062 | else |
2063 | *boot_flag = 0; |
2064 | |
2065 | /* |
2066 | * If user did not specify any addressing mode, or if the |
2067 | * preferred addressing mode specified by user is not supported |
2068 | * by FCF, allow fabric to pick the addressing mode. |
2069 | */ |
2070 | *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, |
2071 | new_fcf_record); |
2072 | /* |
2073 | * If the user specified a required address mode, assign that |
2074 | * address mode |
2075 | */ |
2076 | if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && |
2077 | (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) |
2078 | *addr_mode = (conn_entry->conn_rec.flags & |
2079 | FCFCNCT_AM_SPMA) ? |
2080 | LPFC_FCF_SPMA : LPFC_FCF_FPMA; |
2081 | /* |
2082 | * If the user specified a preferred address mode, use the |
2083 | * addr mode only if FCF support the addr_mode. |
2084 | */ |
2085 | else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && |
2086 | (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && |
2087 | (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && |
2088 | (*addr_mode & LPFC_FCF_SPMA)) |
2089 | *addr_mode = LPFC_FCF_SPMA; |
2090 | else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && |
2091 | (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && |
2092 | !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && |
2093 | (*addr_mode & LPFC_FCF_FPMA)) |
2094 | *addr_mode = LPFC_FCF_FPMA; |
2095 | |
2096 | /* If matching connect list has a vlan id, use it */ |
2097 | if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) |
2098 | *vlan_id = conn_entry->conn_rec.vlan_tag; |
2099 | /* |
2100 | * If no vlan id is specified in connect list, use the vlan id |
2101 | * in the FCF record |
2102 | */ |
2103 | else if (fcf_vlan_id) |
2104 | *vlan_id = fcf_vlan_id; |
2105 | else |
2106 | *vlan_id = LPFC_FCOE_NULL_VID; |
2107 | |
2108 | return 1; |
2109 | } |
2110 | |
2111 | return 0; |
2112 | } |
2113 | |
2114 | /** |
2115 | * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event. |
2116 | * @phba: pointer to lpfc hba data structure. |
2117 | * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned. |
2118 | * |
2119 | * This function check if there is any fcoe event pending while driver |
2120 | * scan FCF entries. If there is any pending event, it will restart the |
2121 | * FCF saning and return 1 else return 0. |
2122 | */ |
2123 | int |
2124 | lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) |
2125 | { |
2126 | /* |
2127 | * If the Link is up and no FCoE events while in the |
2128 | * FCF discovery, no need to restart FCF discovery. |
2129 | */ |
2130 | if ((phba->link_state >= LPFC_LINK_UP) && |
2131 | (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) |
2132 | return 0; |
2133 | |
2134 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2135 | "2768 Pending link or FCF event during current " |
2136 | "handling of the previous event: link_state:x%x, " |
2137 | "evt_tag_at_scan:x%x, evt_tag_current:x%x\n" , |
2138 | phba->link_state, phba->fcoe_eventtag_at_fcf_scan, |
2139 | phba->fcoe_eventtag); |
2140 | |
2141 | spin_lock_irq(lock: &phba->hbalock); |
2142 | phba->fcf.fcf_flag &= ~FCF_AVAILABLE; |
2143 | spin_unlock_irq(lock: &phba->hbalock); |
2144 | |
2145 | if (phba->link_state >= LPFC_LINK_UP) { |
2146 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
2147 | "2780 Restart FCF table scan due to " |
2148 | "pending FCF event:evt_tag_at_scan:x%x, " |
2149 | "evt_tag_current:x%x\n" , |
2150 | phba->fcoe_eventtag_at_fcf_scan, |
2151 | phba->fcoe_eventtag); |
2152 | lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); |
2153 | } else { |
2154 | /* |
2155 | * Do not continue FCF discovery and clear FCF_TS_INPROG |
2156 | * flag |
2157 | */ |
2158 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
2159 | "2833 Stop FCF discovery process due to link " |
2160 | "state change (x%x)\n" , phba->link_state); |
2161 | spin_lock_irq(lock: &phba->hbalock); |
2162 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
2163 | phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); |
2164 | spin_unlock_irq(lock: &phba->hbalock); |
2165 | } |
2166 | |
2167 | /* Unregister the currently registered FCF if required */ |
2168 | if (unreg_fcf) { |
2169 | spin_lock_irq(lock: &phba->hbalock); |
2170 | phba->fcf.fcf_flag &= ~FCF_REGISTERED; |
2171 | spin_unlock_irq(lock: &phba->hbalock); |
2172 | lpfc_sli4_unregister_fcf(phba); |
2173 | } |
2174 | return 1; |
2175 | } |
2176 | |
2177 | /** |
2178 | * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record |
2179 | * @phba: pointer to lpfc hba data structure. |
2180 | * @fcf_cnt: number of eligible fcf record seen so far. |
2181 | * |
2182 | * This function makes an running random selection decision on FCF record to |
2183 | * use through a sequence of @fcf_cnt eligible FCF records with equal |
2184 | * probability. To perform integer manunipulation of random numbers with |
2185 | * size unit32_t, a 16-bit random number returned from get_random_u16() is |
2186 | * taken as the random random number generated. |
2187 | * |
2188 | * Returns true when outcome is for the newly read FCF record should be |
2189 | * chosen; otherwise, return false when outcome is for keeping the previously |
2190 | * chosen FCF record. |
2191 | **/ |
2192 | static bool |
2193 | lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) |
2194 | { |
2195 | uint32_t rand_num; |
2196 | |
2197 | /* Get 16-bit uniform random number */ |
2198 | rand_num = get_random_u16(); |
2199 | |
2200 | /* Decision with probability 1/fcf_cnt */ |
2201 | if ((fcf_cnt * rand_num) < 0xFFFF) |
2202 | return true; |
2203 | else |
2204 | return false; |
2205 | } |
2206 | |
2207 | /** |
2208 | * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command. |
2209 | * @phba: pointer to lpfc hba data structure. |
2210 | * @mboxq: pointer to mailbox object. |
2211 | * @next_fcf_index: pointer to holder of next fcf index. |
2212 | * |
2213 | * This routine parses the non-embedded fcf mailbox command by performing the |
2214 | * necessarily error checking, non-embedded read FCF record mailbox command |
2215 | * SGE parsing, and endianness swapping. |
2216 | * |
2217 | * Returns the pointer to the new FCF record in the non-embedded mailbox |
2218 | * command DMA memory if successfully, other NULL. |
2219 | */ |
2220 | static struct fcf_record * |
2221 | lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, |
2222 | uint16_t *next_fcf_index) |
2223 | { |
2224 | void *virt_addr; |
2225 | struct lpfc_mbx_sge sge; |
2226 | struct lpfc_mbx_read_fcf_tbl *read_fcf; |
2227 | uint32_t shdr_status, shdr_add_status, if_type; |
2228 | union lpfc_sli4_cfg_shdr *shdr; |
2229 | struct fcf_record *new_fcf_record; |
2230 | |
2231 | /* Get the first SGE entry from the non-embedded DMA memory. This |
2232 | * routine only uses a single SGE. |
2233 | */ |
2234 | lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); |
2235 | if (unlikely(!mboxq->sge_array)) { |
2236 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2237 | "2524 Failed to get the non-embedded SGE " |
2238 | "virtual address\n" ); |
2239 | return NULL; |
2240 | } |
2241 | virt_addr = mboxq->sge_array->addr[0]; |
2242 | |
2243 | shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; |
2244 | lpfc_sli_pcimem_bcopy(shdr, shdr, |
2245 | sizeof(union lpfc_sli4_cfg_shdr)); |
2246 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
2247 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
2248 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
2249 | if (shdr_status || shdr_add_status) { |
2250 | if (shdr_status == STATUS_FCF_TABLE_EMPTY || |
2251 | if_type == LPFC_SLI_INTF_IF_TYPE_2) |
2252 | lpfc_printf_log(phba, KERN_ERR, |
2253 | LOG_TRACE_EVENT, |
2254 | "2726 READ_FCF_RECORD Indicates empty " |
2255 | "FCF table.\n" ); |
2256 | else |
2257 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2258 | "2521 READ_FCF_RECORD mailbox failed " |
2259 | "with status x%x add_status x%x, " |
2260 | "mbx\n" , shdr_status, shdr_add_status); |
2261 | return NULL; |
2262 | } |
2263 | |
2264 | /* Interpreting the returned information of the FCF record */ |
2265 | read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; |
2266 | lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, |
2267 | sizeof(struct lpfc_mbx_read_fcf_tbl)); |
2268 | *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); |
2269 | new_fcf_record = (struct fcf_record *)(virt_addr + |
2270 | sizeof(struct lpfc_mbx_read_fcf_tbl)); |
2271 | lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, |
2272 | offsetof(struct fcf_record, vlan_bitmap)); |
2273 | new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137); |
2274 | new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138); |
2275 | |
2276 | return new_fcf_record; |
2277 | } |
2278 | |
2279 | /** |
2280 | * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record |
2281 | * @phba: pointer to lpfc hba data structure. |
2282 | * @fcf_record: pointer to the fcf record. |
2283 | * @vlan_id: the lowest vlan identifier associated to this fcf record. |
2284 | * @next_fcf_index: the index to the next fcf record in hba's fcf table. |
2285 | * |
2286 | * This routine logs the detailed FCF record if the LOG_FIP loggin is |
2287 | * enabled. |
2288 | **/ |
2289 | static void |
2290 | lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, |
2291 | struct fcf_record *fcf_record, |
2292 | uint16_t vlan_id, |
2293 | uint16_t next_fcf_index) |
2294 | { |
2295 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2296 | "2764 READ_FCF_RECORD:\n" |
2297 | "\tFCF_Index : x%x\n" |
2298 | "\tFCF_Avail : x%x\n" |
2299 | "\tFCF_Valid : x%x\n" |
2300 | "\tFCF_SOL : x%x\n" |
2301 | "\tFIP_Priority : x%x\n" |
2302 | "\tMAC_Provider : x%x\n" |
2303 | "\tLowest VLANID : x%x\n" |
2304 | "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n" |
2305 | "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" |
2306 | "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" |
2307 | "\tNext_FCF_Index: x%x\n" , |
2308 | bf_get(lpfc_fcf_record_fcf_index, fcf_record), |
2309 | bf_get(lpfc_fcf_record_fcf_avail, fcf_record), |
2310 | bf_get(lpfc_fcf_record_fcf_valid, fcf_record), |
2311 | bf_get(lpfc_fcf_record_fcf_sol, fcf_record), |
2312 | fcf_record->fip_priority, |
2313 | bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), |
2314 | vlan_id, |
2315 | bf_get(lpfc_fcf_record_mac_0, fcf_record), |
2316 | bf_get(lpfc_fcf_record_mac_1, fcf_record), |
2317 | bf_get(lpfc_fcf_record_mac_2, fcf_record), |
2318 | bf_get(lpfc_fcf_record_mac_3, fcf_record), |
2319 | bf_get(lpfc_fcf_record_mac_4, fcf_record), |
2320 | bf_get(lpfc_fcf_record_mac_5, fcf_record), |
2321 | bf_get(lpfc_fcf_record_fab_name_0, fcf_record), |
2322 | bf_get(lpfc_fcf_record_fab_name_1, fcf_record), |
2323 | bf_get(lpfc_fcf_record_fab_name_2, fcf_record), |
2324 | bf_get(lpfc_fcf_record_fab_name_3, fcf_record), |
2325 | bf_get(lpfc_fcf_record_fab_name_4, fcf_record), |
2326 | bf_get(lpfc_fcf_record_fab_name_5, fcf_record), |
2327 | bf_get(lpfc_fcf_record_fab_name_6, fcf_record), |
2328 | bf_get(lpfc_fcf_record_fab_name_7, fcf_record), |
2329 | bf_get(lpfc_fcf_record_switch_name_0, fcf_record), |
2330 | bf_get(lpfc_fcf_record_switch_name_1, fcf_record), |
2331 | bf_get(lpfc_fcf_record_switch_name_2, fcf_record), |
2332 | bf_get(lpfc_fcf_record_switch_name_3, fcf_record), |
2333 | bf_get(lpfc_fcf_record_switch_name_4, fcf_record), |
2334 | bf_get(lpfc_fcf_record_switch_name_5, fcf_record), |
2335 | bf_get(lpfc_fcf_record_switch_name_6, fcf_record), |
2336 | bf_get(lpfc_fcf_record_switch_name_7, fcf_record), |
2337 | next_fcf_index); |
2338 | } |
2339 | |
2340 | /** |
2341 | * lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF |
2342 | * @phba: pointer to lpfc hba data structure. |
2343 | * @fcf_rec: pointer to an existing FCF record. |
2344 | * @new_fcf_record: pointer to a new FCF record. |
2345 | * @new_vlan_id: vlan id from the new FCF record. |
2346 | * |
2347 | * This function performs matching test of a new FCF record against an existing |
2348 | * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id |
2349 | * will not be used as part of the FCF record matching criteria. |
2350 | * |
2351 | * Returns true if all the fields matching, otherwise returns false. |
2352 | */ |
2353 | static bool |
2354 | lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, |
2355 | struct lpfc_fcf_rec *fcf_rec, |
2356 | struct fcf_record *new_fcf_record, |
2357 | uint16_t new_vlan_id) |
2358 | { |
2359 | if (new_vlan_id != LPFC_FCOE_IGNORE_VID) |
2360 | if (!lpfc_vlan_id_match(curr_vlan_id: fcf_rec->vlan_id, new_vlan_id)) |
2361 | return false; |
2362 | if (!lpfc_mac_addr_match(mac_addr: fcf_rec->mac_addr, new_fcf_record)) |
2363 | return false; |
2364 | if (!lpfc_sw_name_match(sw_name: fcf_rec->switch_name, new_fcf_record)) |
2365 | return false; |
2366 | if (!lpfc_fab_name_match(fab_name: fcf_rec->fabric_name, new_fcf_record)) |
2367 | return false; |
2368 | if (fcf_rec->priority != new_fcf_record->fip_priority) |
2369 | return false; |
2370 | return true; |
2371 | } |
2372 | |
2373 | /** |
2374 | * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf |
2375 | * @vport: Pointer to vport object. |
2376 | * @fcf_index: index to next fcf. |
2377 | * |
2378 | * This function processing the roundrobin fcf failover to next fcf index. |
2379 | * When this function is invoked, there will be a current fcf registered |
2380 | * for flogi. |
2381 | * Return: 0 for continue retrying flogi on currently registered fcf; |
2382 | * 1 for stop flogi on currently registered fcf; |
2383 | */ |
2384 | int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) |
2385 | { |
2386 | struct lpfc_hba *phba = vport->phba; |
2387 | int rc; |
2388 | |
2389 | if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { |
2390 | spin_lock_irq(lock: &phba->hbalock); |
2391 | if (phba->hba_flag & HBA_DEVLOSS_TMO) { |
2392 | spin_unlock_irq(lock: &phba->hbalock); |
2393 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2394 | "2872 Devloss tmo with no eligible " |
2395 | "FCF, unregister in-use FCF (x%x) " |
2396 | "and rescan FCF table\n" , |
2397 | phba->fcf.current_rec.fcf_indx); |
2398 | lpfc_unregister_fcf_rescan(phba); |
2399 | goto stop_flogi_current_fcf; |
2400 | } |
2401 | /* Mark the end to FLOGI roundrobin failover */ |
2402 | phba->hba_flag &= ~FCF_RR_INPROG; |
2403 | /* Allow action to new fcf asynchronous event */ |
2404 | phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); |
2405 | spin_unlock_irq(lock: &phba->hbalock); |
2406 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2407 | "2865 No FCF available, stop roundrobin FCF " |
2408 | "failover and change port state:x%x/x%x\n" , |
2409 | phba->pport->port_state, LPFC_VPORT_UNKNOWN); |
2410 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; |
2411 | |
2412 | if (!phba->fcf.fcf_redisc_attempted) { |
2413 | lpfc_unregister_fcf(phba); |
2414 | |
2415 | rc = lpfc_sli4_redisc_fcf_table(phba); |
2416 | if (!rc) { |
2417 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2418 | "3195 Rediscover FCF table\n" ); |
2419 | phba->fcf.fcf_redisc_attempted = 1; |
2420 | lpfc_sli4_clear_fcf_rr_bmask(phba); |
2421 | } else { |
2422 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
2423 | "3196 Rediscover FCF table " |
2424 | "failed. Status:x%x\n" , rc); |
2425 | } |
2426 | } else { |
2427 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
2428 | "3197 Already rediscover FCF table " |
2429 | "attempted. No more retry\n" ); |
2430 | } |
2431 | goto stop_flogi_current_fcf; |
2432 | } else { |
2433 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, |
2434 | "2794 Try FLOGI roundrobin FCF failover to " |
2435 | "(x%x)\n" , fcf_index); |
2436 | rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); |
2437 | if (rc) |
2438 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, |
2439 | "2761 FLOGI roundrobin FCF failover " |
2440 | "failed (rc:x%x) to read FCF (x%x)\n" , |
2441 | rc, phba->fcf.current_rec.fcf_indx); |
2442 | else |
2443 | goto stop_flogi_current_fcf; |
2444 | } |
2445 | return 0; |
2446 | |
2447 | stop_flogi_current_fcf: |
2448 | lpfc_can_disctmo(vport); |
2449 | return 1; |
2450 | } |
2451 | |
2452 | /** |
2453 | * lpfc_sli4_fcf_pri_list_del |
2454 | * @phba: pointer to lpfc hba data structure. |
2455 | * @fcf_index: the index of the fcf record to delete |
2456 | * This routine checks the on list flag of the fcf_index to be deleted. |
2457 | * If it is one the list then it is removed from the list, and the flag |
2458 | * is cleared. This routine grab the hbalock before removing the fcf |
2459 | * record from the list. |
2460 | **/ |
2461 | static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba, |
2462 | uint16_t fcf_index) |
2463 | { |
2464 | struct lpfc_fcf_pri *new_fcf_pri; |
2465 | |
2466 | new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; |
2467 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2468 | "3058 deleting idx x%x pri x%x flg x%x\n" , |
2469 | fcf_index, new_fcf_pri->fcf_rec.priority, |
2470 | new_fcf_pri->fcf_rec.flag); |
2471 | spin_lock_irq(lock: &phba->hbalock); |
2472 | if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) { |
2473 | if (phba->fcf.current_rec.priority == |
2474 | new_fcf_pri->fcf_rec.priority) |
2475 | phba->fcf.eligible_fcf_cnt--; |
2476 | list_del_init(entry: &new_fcf_pri->list); |
2477 | new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST; |
2478 | } |
2479 | spin_unlock_irq(lock: &phba->hbalock); |
2480 | } |
2481 | |
2482 | /** |
2483 | * lpfc_sli4_set_fcf_flogi_fail |
2484 | * @phba: pointer to lpfc hba data structure. |
2485 | * @fcf_index: the index of the fcf record to update |
2486 | * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED |
2487 | * flag so the round robin selection for the particular priority level |
2488 | * will try a different fcf record that does not have this bit set. |
2489 | * If the fcf record is re-read for any reason this flag is cleared brfore |
2490 | * adding it to the priority list. |
2491 | **/ |
2492 | void |
2493 | lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index) |
2494 | { |
2495 | struct lpfc_fcf_pri *new_fcf_pri; |
2496 | new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; |
2497 | spin_lock_irq(lock: &phba->hbalock); |
2498 | new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED; |
2499 | spin_unlock_irq(lock: &phba->hbalock); |
2500 | } |
2501 | |
2502 | /** |
2503 | * lpfc_sli4_fcf_pri_list_add |
2504 | * @phba: pointer to lpfc hba data structure. |
2505 | * @fcf_index: the index of the fcf record to add |
2506 | * @new_fcf_record: pointer to a new FCF record. |
2507 | * This routine checks the priority of the fcf_index to be added. |
2508 | * If it is a lower priority than the current head of the fcf_pri list |
2509 | * then it is added to the list in the right order. |
2510 | * If it is the same priority as the current head of the list then it |
2511 | * is added to the head of the list and its bit in the rr_bmask is set. |
2512 | * If the fcf_index to be added is of a higher priority than the current |
2513 | * head of the list then the rr_bmask is cleared, its bit is set in the |
2514 | * rr_bmask and it is added to the head of the list. |
2515 | * returns: |
2516 | * 0=success 1=failure |
2517 | **/ |
2518 | static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, |
2519 | uint16_t fcf_index, |
2520 | struct fcf_record *new_fcf_record) |
2521 | { |
2522 | uint16_t current_fcf_pri; |
2523 | uint16_t last_index; |
2524 | struct lpfc_fcf_pri *fcf_pri; |
2525 | struct lpfc_fcf_pri *next_fcf_pri; |
2526 | struct lpfc_fcf_pri *new_fcf_pri; |
2527 | int ret; |
2528 | |
2529 | new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; |
2530 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2531 | "3059 adding idx x%x pri x%x flg x%x\n" , |
2532 | fcf_index, new_fcf_record->fip_priority, |
2533 | new_fcf_pri->fcf_rec.flag); |
2534 | spin_lock_irq(lock: &phba->hbalock); |
2535 | if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) |
2536 | list_del_init(entry: &new_fcf_pri->list); |
2537 | new_fcf_pri->fcf_rec.fcf_index = fcf_index; |
2538 | new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; |
2539 | if (list_empty(head: &phba->fcf.fcf_pri_list)) { |
2540 | list_add(new: &new_fcf_pri->list, head: &phba->fcf.fcf_pri_list); |
2541 | ret = lpfc_sli4_fcf_rr_index_set(phba, |
2542 | new_fcf_pri->fcf_rec.fcf_index); |
2543 | goto out; |
2544 | } |
2545 | |
2546 | last_index = find_first_bit(addr: phba->fcf.fcf_rr_bmask, |
2547 | LPFC_SLI4_FCF_TBL_INDX_MAX); |
2548 | if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { |
2549 | ret = 0; /* Empty rr list */ |
2550 | goto out; |
2551 | } |
2552 | current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority; |
2553 | if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) { |
2554 | list_add(new: &new_fcf_pri->list, head: &phba->fcf.fcf_pri_list); |
2555 | if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) { |
2556 | memset(phba->fcf.fcf_rr_bmask, 0, |
2557 | sizeof(*phba->fcf.fcf_rr_bmask)); |
2558 | /* fcfs_at_this_priority_level = 1; */ |
2559 | phba->fcf.eligible_fcf_cnt = 1; |
2560 | } else |
2561 | /* fcfs_at_this_priority_level++; */ |
2562 | phba->fcf.eligible_fcf_cnt++; |
2563 | ret = lpfc_sli4_fcf_rr_index_set(phba, |
2564 | new_fcf_pri->fcf_rec.fcf_index); |
2565 | goto out; |
2566 | } |
2567 | |
2568 | list_for_each_entry_safe(fcf_pri, next_fcf_pri, |
2569 | &phba->fcf.fcf_pri_list, list) { |
2570 | if (new_fcf_pri->fcf_rec.priority <= |
2571 | fcf_pri->fcf_rec.priority) { |
2572 | if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list) |
2573 | list_add(new: &new_fcf_pri->list, |
2574 | head: &phba->fcf.fcf_pri_list); |
2575 | else |
2576 | list_add(new: &new_fcf_pri->list, |
2577 | head: &((struct lpfc_fcf_pri *) |
2578 | fcf_pri->list.prev)->list); |
2579 | ret = 0; |
2580 | goto out; |
2581 | } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list |
2582 | || new_fcf_pri->fcf_rec.priority < |
2583 | next_fcf_pri->fcf_rec.priority) { |
2584 | list_add(new: &new_fcf_pri->list, head: &fcf_pri->list); |
2585 | ret = 0; |
2586 | goto out; |
2587 | } |
2588 | if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority) |
2589 | continue; |
2590 | |
2591 | } |
2592 | ret = 1; |
2593 | out: |
2594 | /* we use = instead of |= to clear the FLOGI_FAILED flag. */ |
2595 | new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST; |
2596 | spin_unlock_irq(lock: &phba->hbalock); |
2597 | return ret; |
2598 | } |
2599 | |
2600 | /** |
2601 | * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. |
2602 | * @phba: pointer to lpfc hba data structure. |
2603 | * @mboxq: pointer to mailbox object. |
2604 | * |
2605 | * This function iterates through all the fcf records available in |
2606 | * HBA and chooses the optimal FCF record for discovery. After finding |
2607 | * the FCF for discovery it registers the FCF record and kicks start |
2608 | * discovery. |
2609 | * If FCF_IN_USE flag is set in currently used FCF, the routine tries to |
2610 | * use an FCF record which matches fabric name and mac address of the |
2611 | * currently used FCF record. |
2612 | * If the driver supports only one FCF, it will try to use the FCF record |
2613 | * used by BOOT_BIOS. |
2614 | */ |
2615 | void |
2616 | lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
2617 | { |
2618 | struct fcf_record *new_fcf_record; |
2619 | uint32_t boot_flag, addr_mode; |
2620 | uint16_t fcf_index, next_fcf_index; |
2621 | struct lpfc_fcf_rec *fcf_rec = NULL; |
2622 | uint16_t vlan_id = LPFC_FCOE_NULL_VID; |
2623 | bool select_new_fcf; |
2624 | int rc; |
2625 | |
2626 | /* If there is pending FCoE event restart FCF table scan */ |
2627 | if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { |
2628 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
2629 | return; |
2630 | } |
2631 | |
2632 | /* Parse the FCF record from the non-embedded mailbox command */ |
2633 | new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, |
2634 | next_fcf_index: &next_fcf_index); |
2635 | if (!new_fcf_record) { |
2636 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2637 | "2765 Mailbox command READ_FCF_RECORD " |
2638 | "failed to retrieve a FCF record.\n" ); |
2639 | /* Let next new FCF event trigger fast failover */ |
2640 | spin_lock_irq(lock: &phba->hbalock); |
2641 | phba->hba_flag &= ~FCF_TS_INPROG; |
2642 | spin_unlock_irq(lock: &phba->hbalock); |
2643 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
2644 | return; |
2645 | } |
2646 | |
2647 | /* Check the FCF record against the connection list */ |
2648 | rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, boot_flag: &boot_flag, |
2649 | addr_mode: &addr_mode, vlan_id: &vlan_id); |
2650 | |
2651 | /* Log the FCF record information if turned on */ |
2652 | lpfc_sli4_log_fcf_record_info(phba, fcf_record: new_fcf_record, vlan_id, |
2653 | next_fcf_index); |
2654 | |
2655 | /* |
2656 | * If the fcf record does not match with connect list entries |
2657 | * read the next entry; otherwise, this is an eligible FCF |
2658 | * record for roundrobin FCF failover. |
2659 | */ |
2660 | if (!rc) { |
2661 | lpfc_sli4_fcf_pri_list_del(phba, |
2662 | bf_get(lpfc_fcf_record_fcf_index, |
2663 | new_fcf_record)); |
2664 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
2665 | "2781 FCF (x%x) failed connection " |
2666 | "list check: (x%x/x%x/%x)\n" , |
2667 | bf_get(lpfc_fcf_record_fcf_index, |
2668 | new_fcf_record), |
2669 | bf_get(lpfc_fcf_record_fcf_avail, |
2670 | new_fcf_record), |
2671 | bf_get(lpfc_fcf_record_fcf_valid, |
2672 | new_fcf_record), |
2673 | bf_get(lpfc_fcf_record_fcf_sol, |
2674 | new_fcf_record)); |
2675 | if ((phba->fcf.fcf_flag & FCF_IN_USE) && |
2676 | lpfc_sli4_fcf_record_match(phba, fcf_rec: &phba->fcf.current_rec, |
2677 | new_fcf_record, LPFC_FCOE_IGNORE_VID)) { |
2678 | if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != |
2679 | phba->fcf.current_rec.fcf_indx) { |
2680 | lpfc_printf_log(phba, KERN_ERR, |
2681 | LOG_TRACE_EVENT, |
2682 | "2862 FCF (x%x) matches property " |
2683 | "of in-use FCF (x%x)\n" , |
2684 | bf_get(lpfc_fcf_record_fcf_index, |
2685 | new_fcf_record), |
2686 | phba->fcf.current_rec.fcf_indx); |
2687 | goto read_next_fcf; |
2688 | } |
2689 | /* |
2690 | * In case the current in-use FCF record becomes |
2691 | * invalid/unavailable during FCF discovery that |
2692 | * was not triggered by fast FCF failover process, |
2693 | * treat it as fast FCF failover. |
2694 | */ |
2695 | if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) && |
2696 | !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { |
2697 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
2698 | "2835 Invalid in-use FCF " |
2699 | "(x%x), enter FCF failover " |
2700 | "table scan.\n" , |
2701 | phba->fcf.current_rec.fcf_indx); |
2702 | spin_lock_irq(lock: &phba->hbalock); |
2703 | phba->fcf.fcf_flag |= FCF_REDISC_FOV; |
2704 | spin_unlock_irq(lock: &phba->hbalock); |
2705 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
2706 | lpfc_sli4_fcf_scan_read_fcf_rec(phba, |
2707 | LPFC_FCOE_FCF_GET_FIRST); |
2708 | return; |
2709 | } |
2710 | } |
2711 | goto read_next_fcf; |
2712 | } else { |
2713 | fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); |
2714 | rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, |
2715 | new_fcf_record); |
2716 | if (rc) |
2717 | goto read_next_fcf; |
2718 | } |
2719 | |
2720 | /* |
2721 | * If this is not the first FCF discovery of the HBA, use last |
2722 | * FCF record for the discovery. The condition that a rescan |
2723 | * matches the in-use FCF record: fabric name, switch name, mac |
2724 | * address, and vlan_id. |
2725 | */ |
2726 | spin_lock_irq(lock: &phba->hbalock); |
2727 | if (phba->fcf.fcf_flag & FCF_IN_USE) { |
2728 | if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && |
2729 | lpfc_sli4_fcf_record_match(phba, fcf_rec: &phba->fcf.current_rec, |
2730 | new_fcf_record, new_vlan_id: vlan_id)) { |
2731 | if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == |
2732 | phba->fcf.current_rec.fcf_indx) { |
2733 | phba->fcf.fcf_flag |= FCF_AVAILABLE; |
2734 | if (phba->fcf.fcf_flag & FCF_REDISC_PEND) |
2735 | /* Stop FCF redisc wait timer */ |
2736 | __lpfc_sli4_stop_fcf_redisc_wait_timer( |
2737 | phba); |
2738 | else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) |
2739 | /* Fast failover, mark completed */ |
2740 | phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; |
2741 | spin_unlock_irq(lock: &phba->hbalock); |
2742 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2743 | "2836 New FCF matches in-use " |
2744 | "FCF (x%x), port_state:x%x, " |
2745 | "fc_flag:x%x\n" , |
2746 | phba->fcf.current_rec.fcf_indx, |
2747 | phba->pport->port_state, |
2748 | phba->pport->fc_flag); |
2749 | goto out; |
2750 | } else |
2751 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2752 | "2863 New FCF (x%x) matches " |
2753 | "property of in-use FCF (x%x)\n" , |
2754 | bf_get(lpfc_fcf_record_fcf_index, |
2755 | new_fcf_record), |
2756 | phba->fcf.current_rec.fcf_indx); |
2757 | } |
2758 | /* |
2759 | * Read next FCF record from HBA searching for the matching |
2760 | * with in-use record only if not during the fast failover |
2761 | * period. In case of fast failover period, it shall try to |
2762 | * determine whether the FCF record just read should be the |
2763 | * next candidate. |
2764 | */ |
2765 | if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { |
2766 | spin_unlock_irq(lock: &phba->hbalock); |
2767 | goto read_next_fcf; |
2768 | } |
2769 | } |
2770 | /* |
2771 | * Update on failover FCF record only if it's in FCF fast-failover |
2772 | * period; otherwise, update on current FCF record. |
2773 | */ |
2774 | if (phba->fcf.fcf_flag & FCF_REDISC_FOV) |
2775 | fcf_rec = &phba->fcf.failover_rec; |
2776 | else |
2777 | fcf_rec = &phba->fcf.current_rec; |
2778 | |
2779 | if (phba->fcf.fcf_flag & FCF_AVAILABLE) { |
2780 | /* |
2781 | * If the driver FCF record does not have boot flag |
2782 | * set and new hba fcf record has boot flag set, use |
2783 | * the new hba fcf record. |
2784 | */ |
2785 | if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) { |
2786 | /* Choose this FCF record */ |
2787 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2788 | "2837 Update current FCF record " |
2789 | "(x%x) with new FCF record (x%x)\n" , |
2790 | fcf_rec->fcf_indx, |
2791 | bf_get(lpfc_fcf_record_fcf_index, |
2792 | new_fcf_record)); |
2793 | __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, |
2794 | addr_mode, vlan_id, BOOT_ENABLE); |
2795 | spin_unlock_irq(lock: &phba->hbalock); |
2796 | goto read_next_fcf; |
2797 | } |
2798 | /* |
2799 | * If the driver FCF record has boot flag set and the |
2800 | * new hba FCF record does not have boot flag, read |
2801 | * the next FCF record. |
2802 | */ |
2803 | if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { |
2804 | spin_unlock_irq(lock: &phba->hbalock); |
2805 | goto read_next_fcf; |
2806 | } |
2807 | /* |
2808 | * If the new hba FCF record has lower priority value |
2809 | * than the driver FCF record, use the new record. |
2810 | */ |
2811 | if (new_fcf_record->fip_priority < fcf_rec->priority) { |
2812 | /* Choose the new FCF record with lower priority */ |
2813 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2814 | "2838 Update current FCF record " |
2815 | "(x%x) with new FCF record (x%x)\n" , |
2816 | fcf_rec->fcf_indx, |
2817 | bf_get(lpfc_fcf_record_fcf_index, |
2818 | new_fcf_record)); |
2819 | __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, |
2820 | addr_mode, vlan_id, flag: 0); |
2821 | /* Reset running random FCF selection count */ |
2822 | phba->fcf.eligible_fcf_cnt = 1; |
2823 | } else if (new_fcf_record->fip_priority == fcf_rec->priority) { |
2824 | /* Update running random FCF selection count */ |
2825 | phba->fcf.eligible_fcf_cnt++; |
2826 | select_new_fcf = lpfc_sli4_new_fcf_random_select(phba, |
2827 | fcf_cnt: phba->fcf.eligible_fcf_cnt); |
2828 | if (select_new_fcf) { |
2829 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2830 | "2839 Update current FCF record " |
2831 | "(x%x) with new FCF record (x%x)\n" , |
2832 | fcf_rec->fcf_indx, |
2833 | bf_get(lpfc_fcf_record_fcf_index, |
2834 | new_fcf_record)); |
2835 | /* Choose the new FCF by random selection */ |
2836 | __lpfc_update_fcf_record(phba, fcf_rec, |
2837 | new_fcf_record, |
2838 | addr_mode, vlan_id, flag: 0); |
2839 | } |
2840 | } |
2841 | spin_unlock_irq(lock: &phba->hbalock); |
2842 | goto read_next_fcf; |
2843 | } |
2844 | /* |
2845 | * This is the first suitable FCF record, choose this record for |
2846 | * initial best-fit FCF. |
2847 | */ |
2848 | if (fcf_rec) { |
2849 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2850 | "2840 Update initial FCF candidate " |
2851 | "with FCF (x%x)\n" , |
2852 | bf_get(lpfc_fcf_record_fcf_index, |
2853 | new_fcf_record)); |
2854 | __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, |
2855 | addr_mode, vlan_id, flag: (boot_flag ? |
2856 | BOOT_ENABLE : 0)); |
2857 | phba->fcf.fcf_flag |= FCF_AVAILABLE; |
2858 | /* Setup initial running random FCF selection count */ |
2859 | phba->fcf.eligible_fcf_cnt = 1; |
2860 | } |
2861 | spin_unlock_irq(lock: &phba->hbalock); |
2862 | goto read_next_fcf; |
2863 | |
2864 | read_next_fcf: |
2865 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
2866 | if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) { |
2867 | if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { |
2868 | /* |
2869 | * Case of FCF fast failover scan |
2870 | */ |
2871 | |
2872 | /* |
2873 | * It has not found any suitable FCF record, cancel |
2874 | * FCF scan inprogress, and do nothing |
2875 | */ |
2876 | if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { |
2877 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
2878 | "2782 No suitable FCF found: " |
2879 | "(x%x/x%x)\n" , |
2880 | phba->fcoe_eventtag_at_fcf_scan, |
2881 | bf_get(lpfc_fcf_record_fcf_index, |
2882 | new_fcf_record)); |
2883 | spin_lock_irq(lock: &phba->hbalock); |
2884 | if (phba->hba_flag & HBA_DEVLOSS_TMO) { |
2885 | phba->hba_flag &= ~FCF_TS_INPROG; |
2886 | spin_unlock_irq(lock: &phba->hbalock); |
2887 | /* Unregister in-use FCF and rescan */ |
2888 | lpfc_printf_log(phba, KERN_INFO, |
2889 | LOG_FIP, |
2890 | "2864 On devloss tmo " |
2891 | "unreg in-use FCF and " |
2892 | "rescan FCF table\n" ); |
2893 | lpfc_unregister_fcf_rescan(phba); |
2894 | return; |
2895 | } |
2896 | /* |
2897 | * Let next new FCF event trigger fast failover |
2898 | */ |
2899 | phba->hba_flag &= ~FCF_TS_INPROG; |
2900 | spin_unlock_irq(lock: &phba->hbalock); |
2901 | return; |
2902 | } |
2903 | /* |
2904 | * It has found a suitable FCF record that is not |
2905 | * the same as in-use FCF record, unregister the |
2906 | * in-use FCF record, replace the in-use FCF record |
2907 | * with the new FCF record, mark FCF fast failover |
2908 | * completed, and then start register the new FCF |
2909 | * record. |
2910 | */ |
2911 | |
2912 | /* Unregister the current in-use FCF record */ |
2913 | lpfc_unregister_fcf(phba); |
2914 | |
2915 | /* Replace in-use record with the new record */ |
2916 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2917 | "2842 Replace in-use FCF (x%x) " |
2918 | "with failover FCF (x%x)\n" , |
2919 | phba->fcf.current_rec.fcf_indx, |
2920 | phba->fcf.failover_rec.fcf_indx); |
2921 | memcpy(&phba->fcf.current_rec, |
2922 | &phba->fcf.failover_rec, |
2923 | sizeof(struct lpfc_fcf_rec)); |
2924 | /* |
2925 | * Mark the fast FCF failover rediscovery completed |
2926 | * and the start of the first round of the roundrobin |
2927 | * FCF failover. |
2928 | */ |
2929 | spin_lock_irq(lock: &phba->hbalock); |
2930 | phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; |
2931 | spin_unlock_irq(lock: &phba->hbalock); |
2932 | /* Register to the new FCF record */ |
2933 | lpfc_register_fcf(phba); |
2934 | } else { |
2935 | /* |
2936 | * In case of transaction period to fast FCF failover, |
2937 | * do nothing when search to the end of the FCF table. |
2938 | */ |
2939 | if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) || |
2940 | (phba->fcf.fcf_flag & FCF_REDISC_PEND)) |
2941 | return; |
2942 | |
2943 | if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && |
2944 | phba->fcf.fcf_flag & FCF_IN_USE) { |
2945 | /* |
2946 | * In case the current in-use FCF record no |
2947 | * longer existed during FCF discovery that |
2948 | * was not triggered by fast FCF failover |
2949 | * process, treat it as fast FCF failover. |
2950 | */ |
2951 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2952 | "2841 In-use FCF record (x%x) " |
2953 | "not reported, entering fast " |
2954 | "FCF failover mode scanning.\n" , |
2955 | phba->fcf.current_rec.fcf_indx); |
2956 | spin_lock_irq(lock: &phba->hbalock); |
2957 | phba->fcf.fcf_flag |= FCF_REDISC_FOV; |
2958 | spin_unlock_irq(lock: &phba->hbalock); |
2959 | lpfc_sli4_fcf_scan_read_fcf_rec(phba, |
2960 | LPFC_FCOE_FCF_GET_FIRST); |
2961 | return; |
2962 | } |
2963 | /* Register to the new FCF record */ |
2964 | lpfc_register_fcf(phba); |
2965 | } |
2966 | } else |
2967 | lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); |
2968 | return; |
2969 | |
2970 | out: |
2971 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
2972 | lpfc_register_fcf(phba); |
2973 | |
2974 | return; |
2975 | } |
2976 | |
2977 | /** |
2978 | * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler |
2979 | * @phba: pointer to lpfc hba data structure. |
2980 | * @mboxq: pointer to mailbox object. |
2981 | * |
2982 | * This is the callback function for FLOGI failure roundrobin FCF failover |
2983 | * read FCF record mailbox command from the eligible FCF record bmask for |
2984 | * performing the failover. If the FCF read back is not valid/available, it |
2985 | * fails through to retrying FLOGI to the currently registered FCF again. |
2986 | * Otherwise, if the FCF read back is valid and available, it will set the |
2987 | * newly read FCF record to the failover FCF record, unregister currently |
2988 | * registered FCF record, copy the failover FCF record to the current |
2989 | * FCF record, and then register the current FCF record before proceeding |
2990 | * to trying FLOGI on the new failover FCF. |
2991 | */ |
2992 | void |
2993 | lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
2994 | { |
2995 | struct fcf_record *new_fcf_record; |
2996 | uint32_t boot_flag, addr_mode; |
2997 | uint16_t next_fcf_index, fcf_index; |
2998 | uint16_t current_fcf_index; |
2999 | uint16_t vlan_id = LPFC_FCOE_NULL_VID; |
3000 | int rc; |
3001 | |
3002 | /* If link state is not up, stop the roundrobin failover process */ |
3003 | if (phba->link_state < LPFC_LINK_UP) { |
3004 | spin_lock_irq(lock: &phba->hbalock); |
3005 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; |
3006 | phba->hba_flag &= ~FCF_RR_INPROG; |
3007 | spin_unlock_irq(lock: &phba->hbalock); |
3008 | goto out; |
3009 | } |
3010 | |
3011 | /* Parse the FCF record from the non-embedded mailbox command */ |
3012 | new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, |
3013 | next_fcf_index: &next_fcf_index); |
3014 | if (!new_fcf_record) { |
3015 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
3016 | "2766 Mailbox command READ_FCF_RECORD " |
3017 | "failed to retrieve a FCF record. " |
3018 | "hba_flg x%x fcf_flg x%x\n" , phba->hba_flag, |
3019 | phba->fcf.fcf_flag); |
3020 | lpfc_unregister_fcf_rescan(phba); |
3021 | goto out; |
3022 | } |
3023 | |
3024 | /* Get the needed parameters from FCF record */ |
3025 | rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, boot_flag: &boot_flag, |
3026 | addr_mode: &addr_mode, vlan_id: &vlan_id); |
3027 | |
3028 | /* Log the FCF record information if turned on */ |
3029 | lpfc_sli4_log_fcf_record_info(phba, fcf_record: new_fcf_record, vlan_id, |
3030 | next_fcf_index); |
3031 | |
3032 | fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); |
3033 | if (!rc) { |
3034 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
3035 | "2848 Remove ineligible FCF (x%x) from " |
3036 | "from roundrobin bmask\n" , fcf_index); |
3037 | /* Clear roundrobin bmask bit for ineligible FCF */ |
3038 | lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); |
3039 | /* Perform next round of roundrobin FCF failover */ |
3040 | fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); |
3041 | rc = lpfc_sli4_fcf_rr_next_proc(vport: phba->pport, fcf_index); |
3042 | if (rc) |
3043 | goto out; |
3044 | goto error_out; |
3045 | } |
3046 | |
3047 | if (fcf_index == phba->fcf.current_rec.fcf_indx) { |
3048 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
3049 | "2760 Perform FLOGI roundrobin FCF failover: " |
3050 | "FCF (x%x) back to FCF (x%x)\n" , |
3051 | phba->fcf.current_rec.fcf_indx, fcf_index); |
3052 | /* Wait 500 ms before retrying FLOGI to current FCF */ |
3053 | msleep(msecs: 500); |
3054 | lpfc_issue_init_vfi(phba->pport); |
3055 | goto out; |
3056 | } |
3057 | |
3058 | /* Upload new FCF record to the failover FCF record */ |
3059 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
3060 | "2834 Update current FCF (x%x) with new FCF (x%x)\n" , |
3061 | phba->fcf.failover_rec.fcf_indx, fcf_index); |
3062 | spin_lock_irq(lock: &phba->hbalock); |
3063 | __lpfc_update_fcf_record(phba, fcf_rec: &phba->fcf.failover_rec, |
3064 | new_fcf_record, addr_mode, vlan_id, |
3065 | flag: (boot_flag ? BOOT_ENABLE : 0)); |
3066 | spin_unlock_irq(lock: &phba->hbalock); |
3067 | |
3068 | current_fcf_index = phba->fcf.current_rec.fcf_indx; |
3069 | |
3070 | /* Unregister the current in-use FCF record */ |
3071 | lpfc_unregister_fcf(phba); |
3072 | |
3073 | /* Replace in-use record with the new record */ |
3074 | memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, |
3075 | sizeof(struct lpfc_fcf_rec)); |
3076 | |
3077 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
3078 | "2783 Perform FLOGI roundrobin FCF failover: FCF " |
3079 | "(x%x) to FCF (x%x)\n" , current_fcf_index, fcf_index); |
3080 | |
3081 | error_out: |
3082 | lpfc_register_fcf(phba); |
3083 | out: |
3084 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
3085 | } |
3086 | |
3087 | /** |
3088 | * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler. |
3089 | * @phba: pointer to lpfc hba data structure. |
3090 | * @mboxq: pointer to mailbox object. |
3091 | * |
3092 | * This is the callback function of read FCF record mailbox command for |
3093 | * updating the eligible FCF bmask for FLOGI failure roundrobin FCF |
3094 | * failover when a new FCF event happened. If the FCF read back is |
3095 | * valid/available and it passes the connection list check, it updates |
3096 | * the bmask for the eligible FCF record for roundrobin failover. |
3097 | */ |
3098 | void |
3099 | lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
3100 | { |
3101 | struct fcf_record *new_fcf_record; |
3102 | uint32_t boot_flag, addr_mode; |
3103 | uint16_t fcf_index, next_fcf_index; |
3104 | uint16_t vlan_id = LPFC_FCOE_NULL_VID; |
3105 | int rc; |
3106 | |
3107 | /* If link state is not up, no need to proceed */ |
3108 | if (phba->link_state < LPFC_LINK_UP) |
3109 | goto out; |
3110 | |
3111 | /* If FCF discovery period is over, no need to proceed */ |
3112 | if (!(phba->fcf.fcf_flag & FCF_DISCOVERY)) |
3113 | goto out; |
3114 | |
3115 | /* Parse the FCF record from the non-embedded mailbox command */ |
3116 | new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, |
3117 | next_fcf_index: &next_fcf_index); |
3118 | if (!new_fcf_record) { |
3119 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
3120 | "2767 Mailbox command READ_FCF_RECORD " |
3121 | "failed to retrieve a FCF record.\n" ); |
3122 | goto out; |
3123 | } |
3124 | |
3125 | /* Check the connection list for eligibility */ |
3126 | rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, boot_flag: &boot_flag, |
3127 | addr_mode: &addr_mode, vlan_id: &vlan_id); |
3128 | |
3129 | /* Log the FCF record information if turned on */ |
3130 | lpfc_sli4_log_fcf_record_info(phba, fcf_record: new_fcf_record, vlan_id, |
3131 | next_fcf_index); |
3132 | |
3133 | if (!rc) |
3134 | goto out; |
3135 | |
3136 | /* Update the eligible FCF record index bmask */ |
3137 | fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); |
3138 | |
3139 | rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); |
3140 | |
3141 | out: |
3142 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
3143 | } |
3144 | |
3145 | /** |
3146 | * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command. |
3147 | * @phba: pointer to lpfc hba data structure. |
3148 | * @mboxq: pointer to mailbox data structure. |
3149 | * |
3150 | * This function handles completion of init vfi mailbox command. |
3151 | */ |
3152 | static void |
3153 | lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
3154 | { |
3155 | struct lpfc_vport *vport = mboxq->vport; |
3156 | |
3157 | /* |
3158 | * VFI not supported on interface type 0, just do the flogi |
3159 | * Also continue if the VFI is in use - just use the same one. |
3160 | */ |
3161 | if (mboxq->u.mb.mbxStatus && |
3162 | (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != |
3163 | LPFC_SLI_INTF_IF_TYPE_0) && |
3164 | mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { |
3165 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3166 | "2891 Init VFI mailbox failed 0x%x\n" , |
3167 | mboxq->u.mb.mbxStatus); |
3168 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
3169 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
3170 | return; |
3171 | } |
3172 | |
3173 | lpfc_initial_flogi(vport); |
3174 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
3175 | return; |
3176 | } |
3177 | |
3178 | /** |
3179 | * lpfc_issue_init_vfi - Issue init_vfi mailbox command. |
3180 | * @vport: pointer to lpfc_vport data structure. |
3181 | * |
3182 | * This function issue a init_vfi mailbox command to initialize the VFI and |
3183 | * VPI for the physical port. |
3184 | */ |
3185 | void |
3186 | lpfc_issue_init_vfi(struct lpfc_vport *vport) |
3187 | { |
3188 | LPFC_MBOXQ_t *mboxq; |
3189 | int rc; |
3190 | struct lpfc_hba *phba = vport->phba; |
3191 | |
3192 | mboxq = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
3193 | if (!mboxq) { |
3194 | lpfc_printf_vlog(vport, KERN_ERR, |
3195 | LOG_TRACE_EVENT, "2892 Failed to allocate " |
3196 | "init_vfi mailbox\n" ); |
3197 | return; |
3198 | } |
3199 | lpfc_init_vfi(mboxq, vport); |
3200 | mboxq->mbox_cmpl = lpfc_init_vfi_cmpl; |
3201 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); |
3202 | if (rc == MBX_NOT_FINISHED) { |
3203 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3204 | "2893 Failed to issue init_vfi mailbox\n" ); |
3205 | mempool_free(element: mboxq, pool: vport->phba->mbox_mem_pool); |
3206 | } |
3207 | } |
3208 | |
3209 | /** |
3210 | * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. |
3211 | * @phba: pointer to lpfc hba data structure. |
3212 | * @mboxq: pointer to mailbox data structure. |
3213 | * |
3214 | * This function handles completion of init vpi mailbox command. |
3215 | */ |
3216 | void |
3217 | lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
3218 | { |
3219 | struct lpfc_vport *vport = mboxq->vport; |
3220 | struct lpfc_nodelist *ndlp; |
3221 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
3222 | |
3223 | if (mboxq->u.mb.mbxStatus) { |
3224 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3225 | "2609 Init VPI mailbox failed 0x%x\n" , |
3226 | mboxq->u.mb.mbxStatus); |
3227 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
3228 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
3229 | return; |
3230 | } |
3231 | spin_lock_irq(lock: shost->host_lock); |
3232 | vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; |
3233 | spin_unlock_irq(lock: shost->host_lock); |
3234 | |
3235 | /* If this port is physical port or FDISC is done, do reg_vpi */ |
3236 | if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) { |
3237 | ndlp = lpfc_findnode_did(vport, Fabric_DID); |
3238 | if (!ndlp) |
3239 | lpfc_printf_vlog(vport, KERN_ERR, |
3240 | LOG_TRACE_EVENT, |
3241 | "2731 Cannot find fabric " |
3242 | "controller node\n" ); |
3243 | else |
3244 | lpfc_register_new_vport(phba, vport, ndlp); |
3245 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
3246 | return; |
3247 | } |
3248 | |
3249 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) |
3250 | lpfc_initial_fdisc(vport); |
3251 | else { |
3252 | lpfc_vport_set_state(vport, new_state: FC_VPORT_NO_FABRIC_SUPP); |
3253 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3254 | "2606 No NPIV Fabric support\n" ); |
3255 | } |
3256 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
3257 | return; |
3258 | } |
3259 | |
3260 | /** |
3261 | * lpfc_issue_init_vpi - Issue init_vpi mailbox command. |
3262 | * @vport: pointer to lpfc_vport data structure. |
3263 | * |
3264 | * This function issue a init_vpi mailbox command to initialize |
3265 | * VPI for the vport. |
3266 | */ |
3267 | void |
3268 | lpfc_issue_init_vpi(struct lpfc_vport *vport) |
3269 | { |
3270 | LPFC_MBOXQ_t *mboxq; |
3271 | int rc, vpi; |
3272 | |
3273 | if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { |
3274 | vpi = lpfc_alloc_vpi(phba: vport->phba); |
3275 | if (!vpi) { |
3276 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3277 | "3303 Failed to obtain vport vpi\n" ); |
3278 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
3279 | return; |
3280 | } |
3281 | vport->vpi = vpi; |
3282 | } |
3283 | |
3284 | mboxq = mempool_alloc(pool: vport->phba->mbox_mem_pool, GFP_KERNEL); |
3285 | if (!mboxq) { |
3286 | lpfc_printf_vlog(vport, KERN_ERR, |
3287 | LOG_TRACE_EVENT, "2607 Failed to allocate " |
3288 | "init_vpi mailbox\n" ); |
3289 | return; |
3290 | } |
3291 | lpfc_init_vpi(vport->phba, mboxq, vport->vpi); |
3292 | mboxq->vport = vport; |
3293 | mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; |
3294 | rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT); |
3295 | if (rc == MBX_NOT_FINISHED) { |
3296 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3297 | "2608 Failed to issue init_vpi mailbox\n" ); |
3298 | mempool_free(element: mboxq, pool: vport->phba->mbox_mem_pool); |
3299 | } |
3300 | } |
3301 | |
3302 | /** |
3303 | * lpfc_start_fdiscs - send fdiscs for each vports on this port. |
3304 | * @phba: pointer to lpfc hba data structure. |
3305 | * |
3306 | * This function loops through the list of vports on the @phba and issues an |
3307 | * FDISC if possible. |
3308 | */ |
3309 | void |
3310 | lpfc_start_fdiscs(struct lpfc_hba *phba) |
3311 | { |
3312 | struct lpfc_vport **vports; |
3313 | int i; |
3314 | |
3315 | vports = lpfc_create_vport_work_array(phba); |
3316 | if (vports != NULL) { |
3317 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
3318 | if (vports[i]->port_type == LPFC_PHYSICAL_PORT) |
3319 | continue; |
3320 | /* There are no vpi for this vport */ |
3321 | if (vports[i]->vpi > phba->max_vpi) { |
3322 | lpfc_vport_set_state(vport: vports[i], |
3323 | new_state: FC_VPORT_FAILED); |
3324 | continue; |
3325 | } |
3326 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
3327 | lpfc_vport_set_state(vport: vports[i], |
3328 | new_state: FC_VPORT_LINKDOWN); |
3329 | continue; |
3330 | } |
3331 | if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { |
3332 | lpfc_issue_init_vpi(vport: vports[i]); |
3333 | continue; |
3334 | } |
3335 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) |
3336 | lpfc_initial_fdisc(vports[i]); |
3337 | else { |
3338 | lpfc_vport_set_state(vport: vports[i], |
3339 | new_state: FC_VPORT_NO_FABRIC_SUPP); |
3340 | lpfc_printf_vlog(vports[i], KERN_ERR, |
3341 | LOG_TRACE_EVENT, |
3342 | "0259 No NPIV " |
3343 | "Fabric support\n" ); |
3344 | } |
3345 | } |
3346 | } |
3347 | lpfc_destroy_vport_work_array(phba, vports); |
3348 | } |
3349 | |
3350 | void |
3351 | lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
3352 | { |
3353 | struct lpfc_vport *vport = mboxq->vport; |
3354 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
3355 | |
3356 | /* |
3357 | * VFI not supported for interface type 0, so ignore any mailbox |
3358 | * error (except VFI in use) and continue with the discovery. |
3359 | */ |
3360 | if (mboxq->u.mb.mbxStatus && |
3361 | (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != |
3362 | LPFC_SLI_INTF_IF_TYPE_0) && |
3363 | mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { |
3364 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3365 | "2018 REG_VFI mbxStatus error x%x " |
3366 | "HBA state x%x\n" , |
3367 | mboxq->u.mb.mbxStatus, vport->port_state); |
3368 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
3369 | /* FLOGI failed, use loop map to make discovery list */ |
3370 | lpfc_disc_list_loopmap(vport); |
3371 | /* Start discovery */ |
3372 | lpfc_disc_start(vport); |
3373 | goto out_free_mem; |
3374 | } |
3375 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
3376 | goto out_free_mem; |
3377 | } |
3378 | |
3379 | /* If the VFI is already registered, there is nothing else to do |
3380 | * Unless this was a VFI update and we are in PT2PT mode, then |
3381 | * we should drop through to set the port state to ready. |
3382 | */ |
3383 | if (vport->fc_flag & FC_VFI_REGISTERED) |
3384 | if (!(phba->sli_rev == LPFC_SLI_REV4 && |
3385 | vport->fc_flag & FC_PT2PT)) |
3386 | goto out_free_mem; |
3387 | |
3388 | /* The VPI is implicitly registered when the VFI is registered */ |
3389 | spin_lock_irq(lock: shost->host_lock); |
3390 | vport->vpi_state |= LPFC_VPI_REGISTERED; |
3391 | vport->fc_flag |= FC_VFI_REGISTERED; |
3392 | vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; |
3393 | vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; |
3394 | spin_unlock_irq(lock: shost->host_lock); |
3395 | |
3396 | /* In case SLI4 FC loopback test, we are ready */ |
3397 | if ((phba->sli_rev == LPFC_SLI_REV4) && |
3398 | (phba->link_flag & LS_LOOPBACK_MODE)) { |
3399 | phba->link_state = LPFC_HBA_READY; |
3400 | goto out_free_mem; |
3401 | } |
3402 | |
3403 | lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, |
3404 | "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x " |
3405 | "alpacnt:%d LinkState:%x topology:%x\n" , |
3406 | vport->port_state, vport->fc_flag, vport->fc_myDID, |
3407 | vport->phba->alpa_map[0], |
3408 | phba->link_state, phba->fc_topology); |
3409 | |
3410 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { |
3411 | /* |
3412 | * For private loop or for NPort pt2pt, |
3413 | * just start discovery and we are done. |
3414 | */ |
3415 | if ((vport->fc_flag & FC_PT2PT) || |
3416 | ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && |
3417 | !(vport->fc_flag & FC_PUBLIC_LOOP))) { |
3418 | |
3419 | /* Use loop map to make discovery list */ |
3420 | lpfc_disc_list_loopmap(vport); |
3421 | /* Start discovery */ |
3422 | if (vport->fc_flag & FC_PT2PT) |
3423 | vport->port_state = LPFC_VPORT_READY; |
3424 | else |
3425 | lpfc_disc_start(vport); |
3426 | } else { |
3427 | lpfc_start_fdiscs(phba); |
3428 | lpfc_do_scr_ns_plogi(phba, vport); |
3429 | } |
3430 | } |
3431 | |
3432 | out_free_mem: |
3433 | lpfc_mbox_rsrc_cleanup(phba, mbox: mboxq, locked: MBOX_THD_UNLOCKED); |
3434 | } |
3435 | |
3436 | static void |
3437 | lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
3438 | { |
3439 | MAILBOX_t *mb = &pmb->u.mb; |
3440 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf; |
3441 | struct lpfc_vport *vport = pmb->vport; |
3442 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
3443 | struct serv_parm *sp = &vport->fc_sparam; |
3444 | uint32_t ed_tov; |
3445 | |
3446 | /* Check for error */ |
3447 | if (mb->mbxStatus) { |
3448 | /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ |
3449 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3450 | "0319 READ_SPARAM mbxStatus error x%x " |
3451 | "hba state x%x>\n" , |
3452 | mb->mbxStatus, vport->port_state); |
3453 | lpfc_linkdown(phba); |
3454 | goto out; |
3455 | } |
3456 | |
3457 | memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, |
3458 | sizeof (struct serv_parm)); |
3459 | |
3460 | ed_tov = be32_to_cpu(sp->cmn.e_d_tov); |
3461 | if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ |
3462 | ed_tov = (ed_tov + 999999) / 1000000; |
3463 | |
3464 | phba->fc_edtov = ed_tov; |
3465 | phba->fc_ratov = (2 * ed_tov) / 1000; |
3466 | if (phba->fc_ratov < FF_DEF_RATOV) { |
3467 | /* RA_TOV should be atleast 10sec for initial flogi */ |
3468 | phba->fc_ratov = FF_DEF_RATOV; |
3469 | } |
3470 | |
3471 | lpfc_update_vport_wwn(vport); |
3472 | fc_host_port_name(shost) = wwn_to_u64(wwn: vport->fc_portname.u.wwn); |
3473 | if (vport->port_type == LPFC_PHYSICAL_PORT) { |
3474 | memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); |
3475 | memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); |
3476 | } |
3477 | |
3478 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
3479 | |
3480 | /* Check if sending the FLOGI is being deferred to after we get |
3481 | * up to date CSPs from MBX_READ_SPARAM. |
3482 | */ |
3483 | if (phba->hba_flag & HBA_DEFER_FLOGI) { |
3484 | lpfc_initial_flogi(vport); |
3485 | phba->hba_flag &= ~HBA_DEFER_FLOGI; |
3486 | } |
3487 | return; |
3488 | |
3489 | out: |
3490 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
3491 | lpfc_issue_clear_la(phba, vport); |
3492 | } |
3493 | |
3494 | static void |
3495 | lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) |
3496 | { |
3497 | struct lpfc_vport *vport = phba->pport; |
3498 | LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; |
3499 | struct Scsi_Host *shost; |
3500 | int i; |
3501 | int rc; |
3502 | struct fcf_record *fcf_record; |
3503 | uint32_t fc_flags = 0; |
3504 | unsigned long iflags; |
3505 | |
3506 | spin_lock_irqsave(&phba->hbalock, iflags); |
3507 | phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); |
3508 | |
3509 | if (!(phba->hba_flag & HBA_FCOE_MODE)) { |
3510 | switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { |
3511 | case LPFC_LINK_SPEED_1GHZ: |
3512 | case LPFC_LINK_SPEED_2GHZ: |
3513 | case LPFC_LINK_SPEED_4GHZ: |
3514 | case LPFC_LINK_SPEED_8GHZ: |
3515 | case LPFC_LINK_SPEED_10GHZ: |
3516 | case LPFC_LINK_SPEED_16GHZ: |
3517 | case LPFC_LINK_SPEED_32GHZ: |
3518 | case LPFC_LINK_SPEED_64GHZ: |
3519 | case LPFC_LINK_SPEED_128GHZ: |
3520 | case LPFC_LINK_SPEED_256GHZ: |
3521 | break; |
3522 | default: |
3523 | phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; |
3524 | break; |
3525 | } |
3526 | } |
3527 | |
3528 | if (phba->fc_topology && |
3529 | phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { |
3530 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
3531 | "3314 Toplogy changed was 0x%x is 0x%x\n" , |
3532 | phba->fc_topology, |
3533 | bf_get(lpfc_mbx_read_top_topology, la)); |
3534 | phba->fc_topology_changed = 1; |
3535 | } |
3536 | |
3537 | phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); |
3538 | phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA); |
3539 | |
3540 | shost = lpfc_shost_from_vport(vport); |
3541 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
3542 | phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; |
3543 | |
3544 | /* if npiv is enabled and this adapter supports npiv log |
3545 | * a message that npiv is not supported in this topology |
3546 | */ |
3547 | if (phba->cfg_enable_npiv && phba->max_vpi) |
3548 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
3549 | "1309 Link Up Event npiv not supported in loop " |
3550 | "topology\n" ); |
3551 | /* Get Loop Map information */ |
3552 | if (bf_get(lpfc_mbx_read_top_il, la)) |
3553 | fc_flags |= FC_LBIT; |
3554 | |
3555 | vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); |
3556 | i = la->lilpBde64.tus.f.bdeSize; |
3557 | |
3558 | if (i == 0) { |
3559 | phba->alpa_map[0] = 0; |
3560 | } else { |
3561 | if (vport->cfg_log_verbose & LOG_LINK_EVENT) { |
3562 | int numalpa, j, k; |
3563 | union { |
3564 | uint8_t pamap[16]; |
3565 | struct { |
3566 | uint32_t wd1; |
3567 | uint32_t wd2; |
3568 | uint32_t wd3; |
3569 | uint32_t wd4; |
3570 | } pa; |
3571 | } un; |
3572 | numalpa = phba->alpa_map[0]; |
3573 | j = 0; |
3574 | while (j < numalpa) { |
3575 | memset(un.pamap, 0, 16); |
3576 | for (k = 1; j < numalpa; k++) { |
3577 | un.pamap[k - 1] = |
3578 | phba->alpa_map[j + 1]; |
3579 | j++; |
3580 | if (k == 16) |
3581 | break; |
3582 | } |
3583 | /* Link Up Event ALPA map */ |
3584 | lpfc_printf_log(phba, |
3585 | KERN_WARNING, |
3586 | LOG_LINK_EVENT, |
3587 | "1304 Link Up Event " |
3588 | "ALPA map Data: x%x " |
3589 | "x%x x%x x%x\n" , |
3590 | un.pa.wd1, un.pa.wd2, |
3591 | un.pa.wd3, un.pa.wd4); |
3592 | } |
3593 | } |
3594 | } |
3595 | } else { |
3596 | if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { |
3597 | if (phba->max_vpi && phba->cfg_enable_npiv && |
3598 | (phba->sli_rev >= LPFC_SLI_REV3)) |
3599 | phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; |
3600 | } |
3601 | vport->fc_myDID = phba->fc_pref_DID; |
3602 | fc_flags |= FC_LBIT; |
3603 | } |
3604 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
3605 | |
3606 | if (fc_flags) { |
3607 | spin_lock_irqsave(shost->host_lock, iflags); |
3608 | vport->fc_flag |= fc_flags; |
3609 | spin_unlock_irqrestore(lock: shost->host_lock, flags: iflags); |
3610 | } |
3611 | |
3612 | lpfc_linkup(phba); |
3613 | sparam_mbox = NULL; |
3614 | |
3615 | sparam_mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
3616 | if (!sparam_mbox) |
3617 | goto out; |
3618 | |
3619 | rc = lpfc_read_sparam(phba, sparam_mbox, 0); |
3620 | if (rc) { |
3621 | mempool_free(element: sparam_mbox, pool: phba->mbox_mem_pool); |
3622 | goto out; |
3623 | } |
3624 | sparam_mbox->vport = vport; |
3625 | sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; |
3626 | rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); |
3627 | if (rc == MBX_NOT_FINISHED) { |
3628 | lpfc_mbox_rsrc_cleanup(phba, mbox: sparam_mbox, locked: MBOX_THD_UNLOCKED); |
3629 | goto out; |
3630 | } |
3631 | |
3632 | if (!(phba->hba_flag & HBA_FCOE_MODE)) { |
3633 | cfglink_mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
3634 | if (!cfglink_mbox) |
3635 | goto out; |
3636 | vport->port_state = LPFC_LOCAL_CFG_LINK; |
3637 | lpfc_config_link(phba, cfglink_mbox); |
3638 | cfglink_mbox->vport = vport; |
3639 | cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; |
3640 | rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); |
3641 | if (rc == MBX_NOT_FINISHED) { |
3642 | mempool_free(element: cfglink_mbox, pool: phba->mbox_mem_pool); |
3643 | goto out; |
3644 | } |
3645 | } else { |
3646 | vport->port_state = LPFC_VPORT_UNKNOWN; |
3647 | /* |
3648 | * Add the driver's default FCF record at FCF index 0 now. This |
3649 | * is phase 1 implementation that support FCF index 0 and driver |
3650 | * defaults. |
3651 | */ |
3652 | if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { |
3653 | fcf_record = kzalloc(size: sizeof(struct fcf_record), |
3654 | GFP_KERNEL); |
3655 | if (unlikely(!fcf_record)) { |
3656 | lpfc_printf_log(phba, KERN_ERR, |
3657 | LOG_TRACE_EVENT, |
3658 | "2554 Could not allocate memory for " |
3659 | "fcf record\n" ); |
3660 | rc = -ENODEV; |
3661 | goto out; |
3662 | } |
3663 | |
3664 | lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, |
3665 | LPFC_FCOE_FCF_DEF_INDEX); |
3666 | rc = lpfc_sli4_add_fcf_record(phba, fcf_record); |
3667 | if (unlikely(rc)) { |
3668 | lpfc_printf_log(phba, KERN_ERR, |
3669 | LOG_TRACE_EVENT, |
3670 | "2013 Could not manually add FCF " |
3671 | "record 0, status %d\n" , rc); |
3672 | rc = -ENODEV; |
3673 | kfree(objp: fcf_record); |
3674 | goto out; |
3675 | } |
3676 | kfree(objp: fcf_record); |
3677 | } |
3678 | /* |
3679 | * The driver is expected to do FIP/FCF. Call the port |
3680 | * and get the FCF Table. |
3681 | */ |
3682 | spin_lock_irqsave(&phba->hbalock, iflags); |
3683 | if (phba->hba_flag & FCF_TS_INPROG) { |
3684 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
3685 | return; |
3686 | } |
3687 | /* This is the initial FCF discovery scan */ |
3688 | phba->fcf.fcf_flag |= FCF_INIT_DISC; |
3689 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
3690 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
3691 | "2778 Start FCF table scan at linkup\n" ); |
3692 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, |
3693 | LPFC_FCOE_FCF_GET_FIRST); |
3694 | if (rc) { |
3695 | spin_lock_irqsave(&phba->hbalock, iflags); |
3696 | phba->fcf.fcf_flag &= ~FCF_INIT_DISC; |
3697 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
3698 | goto out; |
3699 | } |
3700 | /* Reset FCF roundrobin bmask for new discovery */ |
3701 | lpfc_sli4_clear_fcf_rr_bmask(phba); |
3702 | } |
3703 | |
3704 | /* Prepare for LINK up registrations */ |
3705 | memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); |
3706 | scnprintf(buf: phba->os_host_name, size: sizeof(phba->os_host_name), fmt: "%s" , |
3707 | init_utsname()->nodename); |
3708 | return; |
3709 | out: |
3710 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
3711 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3712 | "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n" , |
3713 | vport->port_state, sparam_mbox, cfglink_mbox); |
3714 | lpfc_issue_clear_la(phba, vport); |
3715 | return; |
3716 | } |
3717 | |
3718 | static void |
3719 | lpfc_enable_la(struct lpfc_hba *phba) |
3720 | { |
3721 | uint32_t control; |
3722 | struct lpfc_sli *psli = &phba->sli; |
3723 | spin_lock_irq(lock: &phba->hbalock); |
3724 | psli->sli_flag |= LPFC_PROCESS_LA; |
3725 | if (phba->sli_rev <= LPFC_SLI_REV3) { |
3726 | control = readl(addr: phba->HCregaddr); |
3727 | control |= HC_LAINT_ENA; |
3728 | writel(val: control, addr: phba->HCregaddr); |
3729 | readl(addr: phba->HCregaddr); /* flush */ |
3730 | } |
3731 | spin_unlock_irq(lock: &phba->hbalock); |
3732 | } |
3733 | |
3734 | static void |
3735 | lpfc_mbx_issue_link_down(struct lpfc_hba *phba) |
3736 | { |
3737 | lpfc_linkdown(phba); |
3738 | lpfc_enable_la(phba); |
3739 | lpfc_unregister_unused_fcf(phba); |
3740 | /* turn on Link Attention interrupts - no CLEAR_LA needed */ |
3741 | } |
3742 | |
3743 | |
3744 | /* |
3745 | * This routine handles processing a READ_TOPOLOGY mailbox |
3746 | * command upon completion. It is setup in the LPFC_MBOXQ |
3747 | * as the completion routine when the command is |
3748 | * handed off to the SLI layer. SLI4 only. |
3749 | */ |
3750 | void |
3751 | lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
3752 | { |
3753 | struct lpfc_vport *vport = pmb->vport; |
3754 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
3755 | struct lpfc_mbx_read_top *la; |
3756 | struct lpfc_sli_ring *pring; |
3757 | MAILBOX_t *mb = &pmb->u.mb; |
3758 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); |
3759 | uint8_t attn_type; |
3760 | unsigned long iflags; |
3761 | |
3762 | /* Unblock ELS traffic */ |
3763 | pring = lpfc_phba_elsring(phba); |
3764 | if (pring) |
3765 | pring->flag &= ~LPFC_STOP_IOCB_EVENT; |
3766 | |
3767 | /* Check for error */ |
3768 | if (mb->mbxStatus) { |
3769 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, |
3770 | "1307 READ_LA mbox error x%x state x%x\n" , |
3771 | mb->mbxStatus, vport->port_state); |
3772 | lpfc_mbx_issue_link_down(phba); |
3773 | phba->link_state = LPFC_HBA_ERROR; |
3774 | goto lpfc_mbx_cmpl_read_topology_free_mbuf; |
3775 | } |
3776 | |
3777 | la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; |
3778 | attn_type = bf_get(lpfc_mbx_read_top_att_type, la); |
3779 | |
3780 | memcpy(&phba->alpa_map[0], mp->virt, 128); |
3781 | |
3782 | spin_lock_irqsave(shost->host_lock, iflags); |
3783 | if (bf_get(lpfc_mbx_read_top_pb, la)) |
3784 | vport->fc_flag |= FC_BYPASSED_MODE; |
3785 | else |
3786 | vport->fc_flag &= ~FC_BYPASSED_MODE; |
3787 | spin_unlock_irqrestore(lock: shost->host_lock, flags: iflags); |
3788 | |
3789 | if (phba->fc_eventTag <= la->eventTag) { |
3790 | phba->fc_stat.LinkMultiEvent++; |
3791 | if (attn_type == LPFC_ATT_LINK_UP) |
3792 | if (phba->fc_eventTag != 0) |
3793 | lpfc_linkdown(phba); |
3794 | } |
3795 | |
3796 | phba->fc_eventTag = la->eventTag; |
3797 | phba->link_events++; |
3798 | if (attn_type == LPFC_ATT_LINK_UP) { |
3799 | phba->fc_stat.LinkUp++; |
3800 | if (phba->link_flag & LS_LOOPBACK_MODE) { |
3801 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
3802 | "1306 Link Up Event in loop back mode " |
3803 | "x%x received Data: x%x x%x x%x x%x\n" , |
3804 | la->eventTag, phba->fc_eventTag, |
3805 | bf_get(lpfc_mbx_read_top_alpa_granted, |
3806 | la), |
3807 | bf_get(lpfc_mbx_read_top_link_spd, la), |
3808 | phba->alpa_map[0]); |
3809 | } else { |
3810 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
3811 | "1303 Link Up Event x%x received " |
3812 | "Data: x%x x%x x%x x%x x%x\n" , |
3813 | la->eventTag, phba->fc_eventTag, |
3814 | bf_get(lpfc_mbx_read_top_alpa_granted, |
3815 | la), |
3816 | bf_get(lpfc_mbx_read_top_link_spd, la), |
3817 | phba->alpa_map[0], |
3818 | bf_get(lpfc_mbx_read_top_fa, la)); |
3819 | } |
3820 | lpfc_mbx_process_link_up(phba, la); |
3821 | |
3822 | if (phba->cmf_active_mode != LPFC_CFG_OFF) |
3823 | lpfc_cmf_signal_init(phba); |
3824 | |
3825 | if (phba->lmt & LMT_64Gb) |
3826 | lpfc_read_lds_params(phba); |
3827 | |
3828 | } else if (attn_type == LPFC_ATT_LINK_DOWN || |
3829 | attn_type == LPFC_ATT_UNEXP_WWPN) { |
3830 | phba->fc_stat.LinkDown++; |
3831 | if (phba->link_flag & LS_LOOPBACK_MODE) |
3832 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
3833 | "1308 Link Down Event in loop back mode " |
3834 | "x%x received " |
3835 | "Data: x%x x%x x%x\n" , |
3836 | la->eventTag, phba->fc_eventTag, |
3837 | phba->pport->port_state, vport->fc_flag); |
3838 | else if (attn_type == LPFC_ATT_UNEXP_WWPN) |
3839 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
3840 | "1313 Link Down Unexpected FA WWPN Event x%x " |
3841 | "received Data: x%x x%x x%x x%x\n" , |
3842 | la->eventTag, phba->fc_eventTag, |
3843 | phba->pport->port_state, vport->fc_flag, |
3844 | bf_get(lpfc_mbx_read_top_fa, la)); |
3845 | else |
3846 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
3847 | "1305 Link Down Event x%x received " |
3848 | "Data: x%x x%x x%x x%x\n" , |
3849 | la->eventTag, phba->fc_eventTag, |
3850 | phba->pport->port_state, vport->fc_flag, |
3851 | bf_get(lpfc_mbx_read_top_fa, la)); |
3852 | lpfc_mbx_issue_link_down(phba); |
3853 | } |
3854 | |
3855 | if ((phba->sli_rev < LPFC_SLI_REV4) && |
3856 | bf_get(lpfc_mbx_read_top_fa, la)) |
3857 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, |
3858 | "1311 fa %d\n" , |
3859 | bf_get(lpfc_mbx_read_top_fa, la)); |
3860 | |
3861 | lpfc_mbx_cmpl_read_topology_free_mbuf: |
3862 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
3863 | } |
3864 | |
3865 | /* |
3866 | * This routine handles processing a REG_LOGIN mailbox |
3867 | * command upon completion. It is setup in the LPFC_MBOXQ |
3868 | * as the completion routine when the command is |
3869 | * handed off to the SLI layer. |
3870 | */ |
3871 | void |
3872 | lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
3873 | { |
3874 | struct lpfc_vport *vport = pmb->vport; |
3875 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf; |
3876 | struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; |
3877 | |
3878 | /* The driver calls the state machine with the pmb pointer |
3879 | * but wants to make sure a stale ctx_buf isn't acted on. |
3880 | * The ctx_buf is restored later and cleaned up. |
3881 | */ |
3882 | pmb->ctx_buf = NULL; |
3883 | pmb->ctx_ndlp = NULL; |
3884 | |
3885 | lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY, |
3886 | "0002 rpi:%x DID:%x flg:%x %d x%px\n" , |
3887 | ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, |
3888 | kref_read(&ndlp->kref), |
3889 | ndlp); |
3890 | if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) |
3891 | ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; |
3892 | |
3893 | if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || |
3894 | ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { |
3895 | /* We rcvd a rscn after issuing this |
3896 | * mbox reg login, we may have cycled |
3897 | * back through the state and be |
3898 | * back at reg login state so this |
3899 | * mbox needs to be ignored becase |
3900 | * there is another reg login in |
3901 | * process. |
3902 | */ |
3903 | spin_lock_irq(lock: &ndlp->lock); |
3904 | ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; |
3905 | spin_unlock_irq(lock: &ndlp->lock); |
3906 | |
3907 | /* |
3908 | * We cannot leave the RPI registered because |
3909 | * if we go thru discovery again for this ndlp |
3910 | * a subsequent REG_RPI will fail. |
3911 | */ |
3912 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
3913 | lpfc_unreg_rpi(vport, ndlp); |
3914 | } |
3915 | |
3916 | /* Call state machine */ |
3917 | lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); |
3918 | pmb->ctx_buf = mp; |
3919 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
3920 | |
3921 | /* decrement the node reference count held for this callback |
3922 | * function. |
3923 | */ |
3924 | lpfc_nlp_put(ndlp); |
3925 | |
3926 | return; |
3927 | } |
3928 | |
3929 | static void |
3930 | lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
3931 | { |
3932 | MAILBOX_t *mb = &pmb->u.mb; |
3933 | struct lpfc_vport *vport = pmb->vport; |
3934 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
3935 | |
3936 | switch (mb->mbxStatus) { |
3937 | case 0x0011: |
3938 | case 0x0020: |
3939 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
3940 | "0911 cmpl_unreg_vpi, mb status = 0x%x\n" , |
3941 | mb->mbxStatus); |
3942 | break; |
3943 | /* If VPI is busy, reset the HBA */ |
3944 | case 0x9700: |
3945 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3946 | "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n" , |
3947 | vport->vpi, mb->mbxStatus); |
3948 | if (!(phba->pport->load_flag & FC_UNLOADING)) |
3949 | lpfc_workq_post_event(phba, NULL, NULL, |
3950 | evt: LPFC_EVT_RESET_HBA); |
3951 | } |
3952 | spin_lock_irq(lock: shost->host_lock); |
3953 | vport->vpi_state &= ~LPFC_VPI_REGISTERED; |
3954 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
3955 | spin_unlock_irq(lock: shost->host_lock); |
3956 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
3957 | lpfc_cleanup_vports_rrqs(vport, NULL); |
3958 | /* |
3959 | * This shost reference might have been taken at the beginning of |
3960 | * lpfc_vport_delete() |
3961 | */ |
3962 | if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport)) |
3963 | scsi_host_put(t: shost); |
3964 | } |
3965 | |
3966 | int |
3967 | lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) |
3968 | { |
3969 | struct lpfc_hba *phba = vport->phba; |
3970 | LPFC_MBOXQ_t *mbox; |
3971 | int rc; |
3972 | |
3973 | mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
3974 | if (!mbox) |
3975 | return 1; |
3976 | |
3977 | lpfc_unreg_vpi(phba, vport->vpi, mbox); |
3978 | mbox->vport = vport; |
3979 | mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; |
3980 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
3981 | if (rc == MBX_NOT_FINISHED) { |
3982 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3983 | "1800 Could not issue unreg_vpi\n" ); |
3984 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
3985 | return rc; |
3986 | } |
3987 | return 0; |
3988 | } |
3989 | |
3990 | static void |
3991 | lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
3992 | { |
3993 | struct lpfc_vport *vport = pmb->vport; |
3994 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
3995 | MAILBOX_t *mb = &pmb->u.mb; |
3996 | |
3997 | switch (mb->mbxStatus) { |
3998 | case 0x0011: |
3999 | case 0x9601: |
4000 | case 0x9602: |
4001 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
4002 | "0912 cmpl_reg_vpi, mb status = 0x%x\n" , |
4003 | mb->mbxStatus); |
4004 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
4005 | spin_lock_irq(lock: shost->host_lock); |
4006 | vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); |
4007 | spin_unlock_irq(lock: shost->host_lock); |
4008 | vport->fc_myDID = 0; |
4009 | |
4010 | if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || |
4011 | (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { |
4012 | if (phba->nvmet_support) |
4013 | lpfc_nvmet_update_targetport(phba); |
4014 | else |
4015 | lpfc_nvme_update_localport(vport); |
4016 | } |
4017 | goto out; |
4018 | } |
4019 | |
4020 | spin_lock_irq(lock: shost->host_lock); |
4021 | vport->vpi_state |= LPFC_VPI_REGISTERED; |
4022 | vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; |
4023 | spin_unlock_irq(lock: shost->host_lock); |
4024 | vport->num_disc_nodes = 0; |
4025 | /* go thru NPR list and issue ELS PLOGIs */ |
4026 | if (vport->fc_npr_cnt) |
4027 | lpfc_els_disc_plogi(vport); |
4028 | |
4029 | if (!vport->num_disc_nodes) { |
4030 | spin_lock_irq(lock: shost->host_lock); |
4031 | vport->fc_flag &= ~FC_NDISC_ACTIVE; |
4032 | spin_unlock_irq(lock: shost->host_lock); |
4033 | lpfc_can_disctmo(vport); |
4034 | } |
4035 | vport->port_state = LPFC_VPORT_READY; |
4036 | |
4037 | out: |
4038 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
4039 | return; |
4040 | } |
4041 | |
4042 | /** |
4043 | * lpfc_create_static_vport - Read HBA config region to create static vports. |
4044 | * @phba: pointer to lpfc hba data structure. |
4045 | * |
4046 | * This routine issue a DUMP mailbox command for config region 22 to get |
4047 | * the list of static vports to be created. The function create vports |
4048 | * based on the information returned from the HBA. |
4049 | **/ |
4050 | void |
4051 | lpfc_create_static_vport(struct lpfc_hba *phba) |
4052 | { |
4053 | LPFC_MBOXQ_t *pmb = NULL; |
4054 | MAILBOX_t *mb; |
4055 | struct static_vport_info *vport_info; |
4056 | int mbx_wait_rc = 0, i; |
4057 | struct fc_vport_identifiers vport_id; |
4058 | struct fc_vport *new_fc_vport; |
4059 | struct Scsi_Host *shost; |
4060 | struct lpfc_vport *vport; |
4061 | uint16_t offset = 0; |
4062 | uint8_t *vport_buff; |
4063 | struct lpfc_dmabuf *mp; |
4064 | uint32_t byte_count = 0; |
4065 | |
4066 | pmb = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
4067 | if (!pmb) { |
4068 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
4069 | "0542 lpfc_create_static_vport failed to" |
4070 | " allocate mailbox memory\n" ); |
4071 | return; |
4072 | } |
4073 | memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); |
4074 | mb = &pmb->u.mb; |
4075 | |
4076 | vport_info = kzalloc(size: sizeof(struct static_vport_info), GFP_KERNEL); |
4077 | if (!vport_info) { |
4078 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
4079 | "0543 lpfc_create_static_vport failed to" |
4080 | " allocate vport_info\n" ); |
4081 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
4082 | return; |
4083 | } |
4084 | |
4085 | vport_buff = (uint8_t *) vport_info; |
4086 | do { |
4087 | /* While loop iteration forces a free dma buffer from |
4088 | * the previous loop because the mbox is reused and |
4089 | * the dump routine is a single-use construct. |
4090 | */ |
4091 | if (pmb->ctx_buf) { |
4092 | mp = (struct lpfc_dmabuf *)pmb->ctx_buf; |
4093 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
4094 | kfree(objp: mp); |
4095 | pmb->ctx_buf = NULL; |
4096 | } |
4097 | if (lpfc_dump_static_vport(phba, pmb, offset)) |
4098 | goto out; |
4099 | |
4100 | pmb->vport = phba->pport; |
4101 | mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb, |
4102 | LPFC_MBOX_TMO); |
4103 | |
4104 | if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) { |
4105 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
4106 | "0544 lpfc_create_static_vport failed to" |
4107 | " issue dump mailbox command ret 0x%x " |
4108 | "status 0x%x\n" , |
4109 | mbx_wait_rc, mb->mbxStatus); |
4110 | goto out; |
4111 | } |
4112 | |
4113 | if (phba->sli_rev == LPFC_SLI_REV4) { |
4114 | byte_count = pmb->u.mqe.un.mb_words[5]; |
4115 | mp = (struct lpfc_dmabuf *)pmb->ctx_buf; |
4116 | if (byte_count > sizeof(struct static_vport_info) - |
4117 | offset) |
4118 | byte_count = sizeof(struct static_vport_info) |
4119 | - offset; |
4120 | memcpy(vport_buff + offset, mp->virt, byte_count); |
4121 | offset += byte_count; |
4122 | } else { |
4123 | if (mb->un.varDmp.word_cnt > |
4124 | sizeof(struct static_vport_info) - offset) |
4125 | mb->un.varDmp.word_cnt = |
4126 | sizeof(struct static_vport_info) |
4127 | - offset; |
4128 | byte_count = mb->un.varDmp.word_cnt; |
4129 | lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, |
4130 | vport_buff + offset, |
4131 | byte_count); |
4132 | |
4133 | offset += byte_count; |
4134 | } |
4135 | |
4136 | } while (byte_count && |
4137 | offset < sizeof(struct static_vport_info)); |
4138 | |
4139 | |
4140 | if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || |
4141 | ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) |
4142 | != VPORT_INFO_REV)) { |
4143 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
4144 | "0545 lpfc_create_static_vport bad" |
4145 | " information header 0x%x 0x%x\n" , |
4146 | le32_to_cpu(vport_info->signature), |
4147 | le32_to_cpu(vport_info->rev) & |
4148 | VPORT_INFO_REV_MASK); |
4149 | |
4150 | goto out; |
4151 | } |
4152 | |
4153 | shost = lpfc_shost_from_vport(vport: phba->pport); |
4154 | |
4155 | for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { |
4156 | memset(&vport_id, 0, sizeof(vport_id)); |
4157 | vport_id.port_name = wwn_to_u64(wwn: vport_info->vport_list[i].wwpn); |
4158 | vport_id.node_name = wwn_to_u64(wwn: vport_info->vport_list[i].wwnn); |
4159 | if (!vport_id.port_name || !vport_id.node_name) |
4160 | continue; |
4161 | |
4162 | vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; |
4163 | vport_id.vport_type = FC_PORTTYPE_NPIV; |
4164 | vport_id.disable = false; |
4165 | new_fc_vport = fc_vport_create(shost, channel: 0, &vport_id); |
4166 | |
4167 | if (!new_fc_vport) { |
4168 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
4169 | "0546 lpfc_create_static_vport failed to" |
4170 | " create vport\n" ); |
4171 | continue; |
4172 | } |
4173 | |
4174 | vport = *(struct lpfc_vport **)new_fc_vport->dd_data; |
4175 | vport->vport_flag |= STATIC_VPORT; |
4176 | } |
4177 | |
4178 | out: |
4179 | kfree(objp: vport_info); |
4180 | if (mbx_wait_rc != MBX_TIMEOUT) |
4181 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
4182 | } |
4183 | |
4184 | /* |
4185 | * This routine handles processing a Fabric REG_LOGIN mailbox |
4186 | * command upon completion. It is setup in the LPFC_MBOXQ |
4187 | * as the completion routine when the command is |
4188 | * handed off to the SLI layer. |
4189 | */ |
4190 | void |
4191 | lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
4192 | { |
4193 | struct lpfc_vport *vport = pmb->vport; |
4194 | MAILBOX_t *mb = &pmb->u.mb; |
4195 | struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; |
4196 | struct Scsi_Host *shost; |
4197 | |
4198 | pmb->ctx_ndlp = NULL; |
4199 | |
4200 | if (mb->mbxStatus) { |
4201 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4202 | "0258 Register Fabric login error: 0x%x\n" , |
4203 | mb->mbxStatus); |
4204 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
4205 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
4206 | /* FLOGI failed, use loop map to make discovery list */ |
4207 | lpfc_disc_list_loopmap(vport); |
4208 | |
4209 | /* Start discovery */ |
4210 | lpfc_disc_start(vport); |
4211 | /* Decrement the reference count to ndlp after the |
4212 | * reference to the ndlp are done. |
4213 | */ |
4214 | lpfc_nlp_put(ndlp); |
4215 | return; |
4216 | } |
4217 | |
4218 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
4219 | /* Decrement the reference count to ndlp after the reference |
4220 | * to the ndlp are done. |
4221 | */ |
4222 | lpfc_nlp_put(ndlp); |
4223 | return; |
4224 | } |
4225 | |
4226 | if (phba->sli_rev < LPFC_SLI_REV4) |
4227 | ndlp->nlp_rpi = mb->un.varWords[0]; |
4228 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
4229 | ndlp->nlp_type |= NLP_FABRIC; |
4230 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
4231 | |
4232 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { |
4233 | /* when physical port receive logo donot start |
4234 | * vport discovery */ |
4235 | if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) |
4236 | lpfc_start_fdiscs(phba); |
4237 | else { |
4238 | shost = lpfc_shost_from_vport(vport); |
4239 | spin_lock_irq(lock: shost->host_lock); |
4240 | vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ; |
4241 | spin_unlock_irq(lock: shost->host_lock); |
4242 | } |
4243 | lpfc_do_scr_ns_plogi(phba, vport); |
4244 | } |
4245 | |
4246 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
4247 | |
4248 | /* Drop the reference count from the mbox at the end after |
4249 | * all the current reference to the ndlp have been done. |
4250 | */ |
4251 | lpfc_nlp_put(ndlp); |
4252 | return; |
4253 | } |
4254 | |
4255 | /* |
4256 | * This routine will issue a GID_FT for each FC4 Type supported |
4257 | * by the driver. ALL GID_FTs must complete before discovery is started. |
4258 | */ |
4259 | int |
4260 | lpfc_issue_gidft(struct lpfc_vport *vport) |
4261 | { |
4262 | /* Good status, issue CT Request to NameServer */ |
4263 | if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || |
4264 | (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) { |
4265 | if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) { |
4266 | /* Cannot issue NameServer FCP Query, so finish up |
4267 | * discovery |
4268 | */ |
4269 | lpfc_printf_vlog(vport, KERN_ERR, |
4270 | LOG_TRACE_EVENT, |
4271 | "0604 %s FC TYPE %x %s\n" , |
4272 | "Failed to issue GID_FT to " , |
4273 | FC_TYPE_FCP, |
4274 | "Finishing discovery." ); |
4275 | return 0; |
4276 | } |
4277 | vport->gidft_inp++; |
4278 | } |
4279 | |
4280 | if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || |
4281 | (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { |
4282 | if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) { |
4283 | /* Cannot issue NameServer NVME Query, so finish up |
4284 | * discovery |
4285 | */ |
4286 | lpfc_printf_vlog(vport, KERN_ERR, |
4287 | LOG_TRACE_EVENT, |
4288 | "0605 %s FC_TYPE %x %s %d\n" , |
4289 | "Failed to issue GID_FT to " , |
4290 | FC_TYPE_NVME, |
4291 | "Finishing discovery: gidftinp " , |
4292 | vport->gidft_inp); |
4293 | if (vport->gidft_inp == 0) |
4294 | return 0; |
4295 | } else |
4296 | vport->gidft_inp++; |
4297 | } |
4298 | return vport->gidft_inp; |
4299 | } |
4300 | |
4301 | /** |
4302 | * lpfc_issue_gidpt - issue a GID_PT for all N_Ports |
4303 | * @vport: The virtual port for which this call is being executed. |
4304 | * |
4305 | * This routine will issue a GID_PT to get a list of all N_Ports |
4306 | * |
4307 | * Return value : |
4308 | * 0 - Failure to issue a GID_PT |
4309 | * 1 - GID_PT issued |
4310 | **/ |
4311 | int |
4312 | lpfc_issue_gidpt(struct lpfc_vport *vport) |
4313 | { |
4314 | /* Good status, issue CT Request to NameServer */ |
4315 | if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) { |
4316 | /* Cannot issue NameServer FCP Query, so finish up |
4317 | * discovery |
4318 | */ |
4319 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4320 | "0606 %s Port TYPE %x %s\n" , |
4321 | "Failed to issue GID_PT to " , |
4322 | GID_PT_N_PORT, |
4323 | "Finishing discovery." ); |
4324 | return 0; |
4325 | } |
4326 | vport->gidft_inp++; |
4327 | return 1; |
4328 | } |
4329 | |
4330 | /* |
4331 | * This routine handles processing a NameServer REG_LOGIN mailbox |
4332 | * command upon completion. It is setup in the LPFC_MBOXQ |
4333 | * as the completion routine when the command is |
4334 | * handed off to the SLI layer. |
4335 | */ |
4336 | void |
4337 | lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
4338 | { |
4339 | MAILBOX_t *mb = &pmb->u.mb; |
4340 | struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; |
4341 | struct lpfc_vport *vport = pmb->vport; |
4342 | int rc; |
4343 | |
4344 | pmb->ctx_ndlp = NULL; |
4345 | vport->gidft_inp = 0; |
4346 | |
4347 | if (mb->mbxStatus) { |
4348 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4349 | "0260 Register NameServer error: 0x%x\n" , |
4350 | mb->mbxStatus); |
4351 | |
4352 | out: |
4353 | /* decrement the node reference count held for this |
4354 | * callback function. |
4355 | */ |
4356 | lpfc_nlp_put(ndlp); |
4357 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
4358 | |
4359 | /* If the node is not registered with the scsi or nvme |
4360 | * transport, remove the fabric node. The failed reg_login |
4361 | * is terminal and forces the removal of the last node |
4362 | * reference. |
4363 | */ |
4364 | if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { |
4365 | spin_lock_irq(lock: &ndlp->lock); |
4366 | ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; |
4367 | spin_unlock_irq(lock: &ndlp->lock); |
4368 | lpfc_nlp_put(ndlp); |
4369 | } |
4370 | |
4371 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
4372 | /* |
4373 | * RegLogin failed, use loop map to make discovery |
4374 | * list |
4375 | */ |
4376 | lpfc_disc_list_loopmap(vport); |
4377 | |
4378 | /* Start discovery */ |
4379 | lpfc_disc_start(vport); |
4380 | return; |
4381 | } |
4382 | lpfc_vport_set_state(vport, new_state: FC_VPORT_FAILED); |
4383 | return; |
4384 | } |
4385 | |
4386 | if (phba->sli_rev < LPFC_SLI_REV4) |
4387 | ndlp->nlp_rpi = mb->un.varWords[0]; |
4388 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
4389 | ndlp->nlp_type |= NLP_FABRIC; |
4390 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
4391 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, |
4392 | "0003 rpi:%x DID:%x flg:%x %d x%px\n" , |
4393 | ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, |
4394 | kref_read(&ndlp->kref), |
4395 | ndlp); |
4396 | |
4397 | if (vport->port_state < LPFC_VPORT_READY) { |
4398 | /* Link up discovery requires Fabric registration. */ |
4399 | lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); |
4400 | lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); |
4401 | lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); |
4402 | lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); |
4403 | |
4404 | if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || |
4405 | (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) |
4406 | lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP); |
4407 | |
4408 | if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || |
4409 | (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) |
4410 | lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, |
4411 | FC_TYPE_NVME); |
4412 | |
4413 | /* Issue SCR just before NameServer GID_FT Query */ |
4414 | lpfc_issue_els_scr(vport, retry: 0); |
4415 | |
4416 | /* Link was bounced or a Fabric LOGO occurred. Start EDC |
4417 | * with initial FW values provided the congestion mode is |
4418 | * not off. Note that signals may or may not be supported |
4419 | * by the adapter but FPIN is provided by default for 1 |
4420 | * or both missing signals support. |
4421 | */ |
4422 | if (phba->cmf_active_mode != LPFC_CFG_OFF) { |
4423 | phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; |
4424 | phba->cgn_reg_signal = phba->cgn_init_reg_signal; |
4425 | rc = lpfc_issue_els_edc(vport, retry: 0); |
4426 | lpfc_printf_log(phba, KERN_INFO, |
4427 | LOG_INIT | LOG_ELS | LOG_DISCOVERY, |
4428 | "4220 Issue EDC status x%x Data x%x\n" , |
4429 | rc, phba->cgn_init_reg_signal); |
4430 | } else if (phba->lmt & LMT_64Gb) { |
4431 | /* may send link fault capability descriptor */ |
4432 | lpfc_issue_els_edc(vport, retry: 0); |
4433 | } else { |
4434 | lpfc_issue_els_rdf(vport, retry: 0); |
4435 | } |
4436 | } |
4437 | |
4438 | vport->fc_ns_retry = 0; |
4439 | if (lpfc_issue_gidft(vport) == 0) |
4440 | goto out; |
4441 | |
4442 | /* |
4443 | * At this point in time we may need to wait for multiple |
4444 | * SLI_CTNS_GID_FT CT commands to complete before we start discovery. |
4445 | * |
4446 | * decrement the node reference count held for this |
4447 | * callback function. |
4448 | */ |
4449 | lpfc_nlp_put(ndlp); |
4450 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
4451 | return; |
4452 | } |
4453 | |
4454 | /* |
4455 | * This routine handles processing a Fabric Controller REG_LOGIN mailbox |
4456 | * command upon completion. It is setup in the LPFC_MBOXQ |
4457 | * as the completion routine when the command is handed off to the SLI layer. |
4458 | */ |
4459 | void |
4460 | lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
4461 | { |
4462 | struct lpfc_vport *vport = pmb->vport; |
4463 | MAILBOX_t *mb = &pmb->u.mb; |
4464 | struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; |
4465 | |
4466 | pmb->ctx_ndlp = NULL; |
4467 | if (mb->mbxStatus) { |
4468 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4469 | "0933 %s: Register FC login error: 0x%x\n" , |
4470 | __func__, mb->mbxStatus); |
4471 | goto out; |
4472 | } |
4473 | |
4474 | lpfc_check_nlp_post_devloss(vport, ndlp); |
4475 | |
4476 | if (phba->sli_rev < LPFC_SLI_REV4) |
4477 | ndlp->nlp_rpi = mb->un.varWords[0]; |
4478 | |
4479 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
4480 | "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n" , |
4481 | __func__, ndlp->nlp_DID, ndlp->nlp_rpi, |
4482 | ndlp->nlp_state); |
4483 | |
4484 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
4485 | ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; |
4486 | ndlp->nlp_type |= NLP_FABRIC; |
4487 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
4488 | |
4489 | out: |
4490 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
4491 | |
4492 | /* Drop the reference count from the mbox at the end after |
4493 | * all the current reference to the ndlp have been done. |
4494 | */ |
4495 | lpfc_nlp_put(ndlp); |
4496 | } |
4497 | |
4498 | static void |
4499 | lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
4500 | { |
4501 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
4502 | struct fc_rport *rport; |
4503 | struct lpfc_rport_data *rdata; |
4504 | struct fc_rport_identifiers rport_ids; |
4505 | struct lpfc_hba *phba = vport->phba; |
4506 | unsigned long flags; |
4507 | |
4508 | if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) |
4509 | return; |
4510 | |
4511 | /* Remote port has reappeared. Re-register w/ FC transport */ |
4512 | rport_ids.node_name = wwn_to_u64(wwn: ndlp->nlp_nodename.u.wwn); |
4513 | rport_ids.port_name = wwn_to_u64(wwn: ndlp->nlp_portname.u.wwn); |
4514 | rport_ids.port_id = ndlp->nlp_DID; |
4515 | rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; |
4516 | |
4517 | |
4518 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
4519 | "rport add: did:x%x flg:x%x type x%x" , |
4520 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); |
4521 | |
4522 | /* Don't add the remote port if unloading. */ |
4523 | if (vport->load_flag & FC_UNLOADING) |
4524 | return; |
4525 | |
4526 | ndlp->rport = rport = fc_remote_port_add(shost, channel: 0, ids: &rport_ids); |
4527 | if (!rport) { |
4528 | dev_printk(KERN_WARNING, &phba->pcidev->dev, |
4529 | "Warning: fc_remote_port_add failed\n" ); |
4530 | return; |
4531 | } |
4532 | |
4533 | /* Successful port add. Complete initializing node data */ |
4534 | rport->maxframe_size = ndlp->nlp_maxframe; |
4535 | rport->supported_classes = ndlp->nlp_class_sup; |
4536 | rdata = rport->dd_data; |
4537 | rdata->pnode = lpfc_nlp_get(ndlp); |
4538 | if (!rdata->pnode) { |
4539 | dev_warn(&phba->pcidev->dev, |
4540 | "Warning - node ref failed. Unreg rport\n" ); |
4541 | fc_remote_port_delete(rport); |
4542 | ndlp->rport = NULL; |
4543 | return; |
4544 | } |
4545 | |
4546 | spin_lock_irqsave(&ndlp->lock, flags); |
4547 | ndlp->fc4_xpt_flags |= SCSI_XPT_REGD; |
4548 | spin_unlock_irqrestore(lock: &ndlp->lock, flags); |
4549 | |
4550 | if (ndlp->nlp_type & NLP_FCP_TARGET) |
4551 | rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; |
4552 | if (ndlp->nlp_type & NLP_FCP_INITIATOR) |
4553 | rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; |
4554 | if (ndlp->nlp_type & NLP_NVME_INITIATOR) |
4555 | rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR; |
4556 | if (ndlp->nlp_type & NLP_NVME_TARGET) |
4557 | rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET; |
4558 | if (ndlp->nlp_type & NLP_NVME_DISCOVERY) |
4559 | rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; |
4560 | |
4561 | if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) |
4562 | fc_remote_port_rolechg(rport, roles: rport_ids.roles); |
4563 | |
4564 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, |
4565 | "3183 %s rport x%px DID x%x, role x%x refcnt %d\n" , |
4566 | __func__, rport, rport->port_id, rport->roles, |
4567 | kref_read(&ndlp->kref)); |
4568 | |
4569 | if ((rport->scsi_target_id != -1) && |
4570 | (rport->scsi_target_id < LPFC_MAX_TARGET)) { |
4571 | ndlp->nlp_sid = rport->scsi_target_id; |
4572 | } |
4573 | |
4574 | return; |
4575 | } |
4576 | |
4577 | static void |
4578 | lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) |
4579 | { |
4580 | struct fc_rport *rport = ndlp->rport; |
4581 | struct lpfc_vport *vport = ndlp->vport; |
4582 | |
4583 | if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) |
4584 | return; |
4585 | |
4586 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
4587 | "rport delete: did:x%x flg:x%x type x%x" , |
4588 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); |
4589 | |
4590 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
4591 | "3184 rport unregister x%06x, rport x%px " |
4592 | "xptflg x%x refcnt %d\n" , |
4593 | ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags, |
4594 | kref_read(&ndlp->kref)); |
4595 | |
4596 | fc_remote_port_delete(rport); |
4597 | lpfc_nlp_put(ndlp); |
4598 | } |
4599 | |
4600 | static void |
4601 | lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) |
4602 | { |
4603 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
4604 | unsigned long iflags; |
4605 | |
4606 | spin_lock_irqsave(shost->host_lock, iflags); |
4607 | switch (state) { |
4608 | case NLP_STE_UNUSED_NODE: |
4609 | vport->fc_unused_cnt += count; |
4610 | break; |
4611 | case NLP_STE_PLOGI_ISSUE: |
4612 | vport->fc_plogi_cnt += count; |
4613 | break; |
4614 | case NLP_STE_ADISC_ISSUE: |
4615 | vport->fc_adisc_cnt += count; |
4616 | break; |
4617 | case NLP_STE_REG_LOGIN_ISSUE: |
4618 | vport->fc_reglogin_cnt += count; |
4619 | break; |
4620 | case NLP_STE_PRLI_ISSUE: |
4621 | vport->fc_prli_cnt += count; |
4622 | break; |
4623 | case NLP_STE_UNMAPPED_NODE: |
4624 | vport->fc_unmap_cnt += count; |
4625 | break; |
4626 | case NLP_STE_MAPPED_NODE: |
4627 | vport->fc_map_cnt += count; |
4628 | break; |
4629 | case NLP_STE_NPR_NODE: |
4630 | if (vport->fc_npr_cnt == 0 && count == -1) |
4631 | vport->fc_npr_cnt = 0; |
4632 | else |
4633 | vport->fc_npr_cnt += count; |
4634 | break; |
4635 | } |
4636 | spin_unlock_irqrestore(lock: shost->host_lock, flags: iflags); |
4637 | } |
4638 | |
4639 | /* Register a node with backend if not already done */ |
4640 | void |
4641 | lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
4642 | { |
4643 | unsigned long iflags; |
4644 | |
4645 | lpfc_check_nlp_post_devloss(vport, ndlp); |
4646 | |
4647 | spin_lock_irqsave(&ndlp->lock, iflags); |
4648 | if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { |
4649 | /* Already registered with backend, trigger rescan */ |
4650 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
4651 | |
4652 | if (ndlp->fc4_xpt_flags & NVME_XPT_REGD && |
4653 | ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) { |
4654 | lpfc_nvme_rescan_port(vport, ndlp); |
4655 | } |
4656 | return; |
4657 | } |
4658 | |
4659 | ndlp->fc4_xpt_flags |= NLP_XPT_REGD; |
4660 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
4661 | |
4662 | if (lpfc_valid_xpt_node(ndlp)) { |
4663 | vport->phba->nport_event_cnt++; |
4664 | /* |
4665 | * Tell the fc transport about the port, if we haven't |
4666 | * already. If we have, and it's a scsi entity, be |
4667 | */ |
4668 | lpfc_register_remote_port(vport, ndlp); |
4669 | } |
4670 | |
4671 | /* We are done if we do not have any NVME remote node */ |
4672 | if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME)) |
4673 | return; |
4674 | |
4675 | /* Notify the NVME transport of this new rport. */ |
4676 | if (vport->phba->sli_rev >= LPFC_SLI_REV4 && |
4677 | ndlp->nlp_fc4_type & NLP_FC4_NVME) { |
4678 | if (vport->phba->nvmet_support == 0) { |
4679 | /* Register this rport with the transport. |
4680 | * Only NVME Target Rports are registered with |
4681 | * the transport. |
4682 | */ |
4683 | if (ndlp->nlp_type & NLP_NVME_TARGET) { |
4684 | vport->phba->nport_event_cnt++; |
4685 | lpfc_nvme_register_port(vport, ndlp); |
4686 | } |
4687 | } else { |
4688 | /* Just take an NDLP ref count since the |
4689 | * target does not register rports. |
4690 | */ |
4691 | lpfc_nlp_get(ndlp); |
4692 | } |
4693 | } |
4694 | } |
4695 | |
4696 | /* Unregister a node with backend if not already done */ |
4697 | void |
4698 | lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
4699 | { |
4700 | unsigned long iflags; |
4701 | |
4702 | spin_lock_irqsave(&ndlp->lock, iflags); |
4703 | if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) { |
4704 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
4705 | lpfc_printf_vlog(vport, KERN_INFO, |
4706 | LOG_ELS | LOG_NODE | LOG_DISCOVERY, |
4707 | "0999 %s Not regd: ndlp x%px rport x%px DID " |
4708 | "x%x FLG x%x XPT x%x\n" , |
4709 | __func__, ndlp, ndlp->rport, ndlp->nlp_DID, |
4710 | ndlp->nlp_flag, ndlp->fc4_xpt_flags); |
4711 | return; |
4712 | } |
4713 | |
4714 | ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; |
4715 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
4716 | |
4717 | if (ndlp->rport && |
4718 | ndlp->fc4_xpt_flags & SCSI_XPT_REGD) { |
4719 | vport->phba->nport_event_cnt++; |
4720 | lpfc_unregister_remote_port(ndlp); |
4721 | } else if (!ndlp->rport) { |
4722 | lpfc_printf_vlog(vport, KERN_INFO, |
4723 | LOG_ELS | LOG_NODE | LOG_DISCOVERY, |
4724 | "1999 %s NDLP in devloss x%px DID x%x FLG x%x" |
4725 | " XPT x%x refcnt %u\n" , |
4726 | __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, |
4727 | ndlp->fc4_xpt_flags, |
4728 | kref_read(&ndlp->kref)); |
4729 | } |
4730 | |
4731 | if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) { |
4732 | vport->phba->nport_event_cnt++; |
4733 | if (vport->phba->nvmet_support == 0) { |
4734 | /* Start devloss if target. */ |
4735 | if (ndlp->nlp_type & NLP_NVME_TARGET) |
4736 | lpfc_nvme_unregister_port(vport, ndlp); |
4737 | } else { |
4738 | /* NVMET has no upcall. */ |
4739 | lpfc_nlp_put(ndlp); |
4740 | } |
4741 | } |
4742 | |
4743 | } |
4744 | |
4745 | /* |
4746 | * Adisc state change handling |
4747 | */ |
4748 | static void |
4749 | lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
4750 | int new_state) |
4751 | { |
4752 | switch (new_state) { |
4753 | /* |
4754 | * Any state to ADISC_ISSUE |
4755 | * Do nothing, adisc cmpl handling will trigger state changes |
4756 | */ |
4757 | case NLP_STE_ADISC_ISSUE: |
4758 | break; |
4759 | |
4760 | /* |
4761 | * ADISC_ISSUE to mapped states |
4762 | * Trigger a registration with backend, it will be nop if |
4763 | * already registered |
4764 | */ |
4765 | case NLP_STE_UNMAPPED_NODE: |
4766 | ndlp->nlp_type |= NLP_FC_NODE; |
4767 | fallthrough; |
4768 | case NLP_STE_MAPPED_NODE: |
4769 | ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; |
4770 | lpfc_nlp_reg_node(vport, ndlp); |
4771 | break; |
4772 | |
4773 | /* |
4774 | * ADISC_ISSUE to non-mapped states |
4775 | * We are moving from ADISC_ISSUE to a non-mapped state because |
4776 | * ADISC failed, we would have skipped unregistering with |
4777 | * backend, attempt it now |
4778 | */ |
4779 | case NLP_STE_NPR_NODE: |
4780 | ndlp->nlp_flag &= ~NLP_RCV_PLOGI; |
4781 | fallthrough; |
4782 | default: |
4783 | lpfc_nlp_unreg_node(vport, ndlp); |
4784 | break; |
4785 | } |
4786 | |
4787 | } |
4788 | |
4789 | static void |
4790 | lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
4791 | int old_state, int new_state) |
4792 | { |
4793 | /* Trap ADISC changes here */ |
4794 | if (new_state == NLP_STE_ADISC_ISSUE || |
4795 | old_state == NLP_STE_ADISC_ISSUE) { |
4796 | lpfc_handle_adisc_state(vport, ndlp, new_state); |
4797 | return; |
4798 | } |
4799 | |
4800 | if (new_state == NLP_STE_UNMAPPED_NODE) { |
4801 | ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; |
4802 | ndlp->nlp_type |= NLP_FC_NODE; |
4803 | } |
4804 | if (new_state == NLP_STE_MAPPED_NODE) |
4805 | ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; |
4806 | if (new_state == NLP_STE_NPR_NODE) |
4807 | ndlp->nlp_flag &= ~NLP_RCV_PLOGI; |
4808 | |
4809 | /* Reg/Unreg for FCP and NVME Transport interface */ |
4810 | if ((old_state == NLP_STE_MAPPED_NODE || |
4811 | old_state == NLP_STE_UNMAPPED_NODE)) { |
4812 | /* For nodes marked for ADISC, Handle unreg in ADISC cmpl |
4813 | * if linkup. In linkdown do unreg_node |
4814 | */ |
4815 | if (!(ndlp->nlp_flag & NLP_NPR_ADISC) || |
4816 | !lpfc_is_link_up(phba: vport->phba)) |
4817 | lpfc_nlp_unreg_node(vport, ndlp); |
4818 | } |
4819 | |
4820 | if (new_state == NLP_STE_MAPPED_NODE || |
4821 | new_state == NLP_STE_UNMAPPED_NODE) |
4822 | lpfc_nlp_reg_node(vport, ndlp); |
4823 | |
4824 | /* |
4825 | * If the node just added to Mapped list was an FCP target, |
4826 | * but the remote port registration failed or assigned a target |
4827 | * id outside the presentable range - move the node to the |
4828 | * Unmapped List. |
4829 | */ |
4830 | if ((new_state == NLP_STE_MAPPED_NODE) && |
4831 | (ndlp->nlp_type & NLP_FCP_TARGET) && |
4832 | (!ndlp->rport || |
4833 | ndlp->rport->scsi_target_id == -1 || |
4834 | ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { |
4835 | spin_lock_irq(lock: &ndlp->lock); |
4836 | ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; |
4837 | spin_unlock_irq(lock: &ndlp->lock); |
4838 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
4839 | } |
4840 | } |
4841 | |
4842 | static char * |
4843 | lpfc_nlp_state_name(char *buffer, size_t size, int state) |
4844 | { |
4845 | static char *states[] = { |
4846 | [NLP_STE_UNUSED_NODE] = "UNUSED" , |
4847 | [NLP_STE_PLOGI_ISSUE] = "PLOGI" , |
4848 | [NLP_STE_ADISC_ISSUE] = "ADISC" , |
4849 | [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN" , |
4850 | [NLP_STE_PRLI_ISSUE] = "PRLI" , |
4851 | [NLP_STE_LOGO_ISSUE] = "LOGO" , |
4852 | [NLP_STE_UNMAPPED_NODE] = "UNMAPPED" , |
4853 | [NLP_STE_MAPPED_NODE] = "MAPPED" , |
4854 | [NLP_STE_NPR_NODE] = "NPR" , |
4855 | }; |
4856 | |
4857 | if (state < NLP_STE_MAX_STATE && states[state]) |
4858 | strscpy(p: buffer, q: states[state], size); |
4859 | else |
4860 | snprintf(buf: buffer, size, fmt: "unknown (%d)" , state); |
4861 | return buffer; |
4862 | } |
4863 | |
4864 | void |
4865 | lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
4866 | int state) |
4867 | { |
4868 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
4869 | int old_state = ndlp->nlp_state; |
4870 | int node_dropped = ndlp->nlp_flag & NLP_DROPPED; |
4871 | char name1[16], name2[16]; |
4872 | |
4873 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
4874 | "0904 NPort state transition x%06x, %s -> %s\n" , |
4875 | ndlp->nlp_DID, |
4876 | lpfc_nlp_state_name(name1, sizeof(name1), old_state), |
4877 | lpfc_nlp_state_name(name2, sizeof(name2), state)); |
4878 | |
4879 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, |
4880 | "node statechg did:x%x old:%d ste:%d" , |
4881 | ndlp->nlp_DID, old_state, state); |
4882 | |
4883 | if (node_dropped && old_state == NLP_STE_UNUSED_NODE && |
4884 | state != NLP_STE_UNUSED_NODE) { |
4885 | ndlp->nlp_flag &= ~NLP_DROPPED; |
4886 | lpfc_nlp_get(ndlp); |
4887 | } |
4888 | |
4889 | if (old_state == NLP_STE_NPR_NODE && |
4890 | state != NLP_STE_NPR_NODE) |
4891 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
4892 | if (old_state == NLP_STE_UNMAPPED_NODE) { |
4893 | ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; |
4894 | ndlp->nlp_type &= ~NLP_FC_NODE; |
4895 | } |
4896 | |
4897 | if (list_empty(head: &ndlp->nlp_listp)) { |
4898 | spin_lock_irq(lock: shost->host_lock); |
4899 | list_add_tail(new: &ndlp->nlp_listp, head: &vport->fc_nodes); |
4900 | spin_unlock_irq(lock: shost->host_lock); |
4901 | } else if (old_state) |
4902 | lpfc_nlp_counters(vport, state: old_state, count: -1); |
4903 | |
4904 | ndlp->nlp_state = state; |
4905 | lpfc_nlp_counters(vport, state, count: 1); |
4906 | lpfc_nlp_state_cleanup(vport, ndlp, old_state, new_state: state); |
4907 | } |
4908 | |
4909 | void |
4910 | lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
4911 | { |
4912 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
4913 | |
4914 | if (list_empty(head: &ndlp->nlp_listp)) { |
4915 | spin_lock_irq(lock: shost->host_lock); |
4916 | list_add_tail(new: &ndlp->nlp_listp, head: &vport->fc_nodes); |
4917 | spin_unlock_irq(lock: shost->host_lock); |
4918 | } |
4919 | } |
4920 | |
4921 | void |
4922 | lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
4923 | { |
4924 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
4925 | |
4926 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
4927 | if (ndlp->nlp_state && !list_empty(head: &ndlp->nlp_listp)) |
4928 | lpfc_nlp_counters(vport, state: ndlp->nlp_state, count: -1); |
4929 | spin_lock_irq(lock: shost->host_lock); |
4930 | list_del_init(entry: &ndlp->nlp_listp); |
4931 | spin_unlock_irq(lock: shost->host_lock); |
4932 | lpfc_nlp_state_cleanup(vport, ndlp, old_state: ndlp->nlp_state, |
4933 | NLP_STE_UNUSED_NODE); |
4934 | } |
4935 | |
4936 | /** |
4937 | * lpfc_initialize_node - Initialize all fields of node object |
4938 | * @vport: Pointer to Virtual Port object. |
4939 | * @ndlp: Pointer to FC node object. |
4940 | * @did: FC_ID of the node. |
4941 | * |
4942 | * This function is always called when node object need to be initialized. |
4943 | * It initializes all the fields of the node object. Although the reference |
4944 | * to phba from @ndlp can be obtained indirectly through it's reference to |
4945 | * @vport, a direct reference to phba is taken here by @ndlp. This is due |
4946 | * to the life-span of the @ndlp might go beyond the existence of @vport as |
4947 | * the final release of ndlp is determined by its reference count. And, the |
4948 | * operation on @ndlp needs the reference to phba. |
4949 | **/ |
4950 | static inline void |
4951 | lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
4952 | uint32_t did) |
4953 | { |
4954 | INIT_LIST_HEAD(list: &ndlp->els_retry_evt.evt_listp); |
4955 | INIT_LIST_HEAD(list: &ndlp->dev_loss_evt.evt_listp); |
4956 | timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0); |
4957 | INIT_LIST_HEAD(list: &ndlp->recovery_evt.evt_listp); |
4958 | |
4959 | ndlp->nlp_DID = did; |
4960 | ndlp->vport = vport; |
4961 | ndlp->phba = vport->phba; |
4962 | ndlp->nlp_sid = NLP_NO_SID; |
4963 | ndlp->nlp_fc4_type = NLP_FC4_NONE; |
4964 | kref_init(kref: &ndlp->kref); |
4965 | atomic_set(v: &ndlp->cmd_pending, i: 0); |
4966 | ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; |
4967 | ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; |
4968 | } |
4969 | |
4970 | void |
4971 | lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
4972 | { |
4973 | /* |
4974 | * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should |
4975 | * be used when lpfc wants to remove the "last" lpfc_nlp_put() to |
4976 | * release the ndlp from the vport when conditions are correct. |
4977 | */ |
4978 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
4979 | return; |
4980 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); |
4981 | if (vport->phba->sli_rev == LPFC_SLI_REV4) { |
4982 | lpfc_cleanup_vports_rrqs(vport, ndlp); |
4983 | lpfc_unreg_rpi(vport, ndlp); |
4984 | } |
4985 | |
4986 | /* NLP_DROPPED means another thread already removed the initial |
4987 | * reference from lpfc_nlp_init. If set, don't drop it again and |
4988 | * introduce an imbalance. |
4989 | */ |
4990 | spin_lock_irq(lock: &ndlp->lock); |
4991 | if (!(ndlp->nlp_flag & NLP_DROPPED)) { |
4992 | ndlp->nlp_flag |= NLP_DROPPED; |
4993 | spin_unlock_irq(lock: &ndlp->lock); |
4994 | lpfc_nlp_put(ndlp); |
4995 | return; |
4996 | } |
4997 | spin_unlock_irq(lock: &ndlp->lock); |
4998 | } |
4999 | |
5000 | /* |
5001 | * Start / ReStart rescue timer for Discovery / RSCN handling |
5002 | */ |
5003 | void |
5004 | lpfc_set_disctmo(struct lpfc_vport *vport) |
5005 | { |
5006 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
5007 | struct lpfc_hba *phba = vport->phba; |
5008 | uint32_t tmo; |
5009 | |
5010 | if (vport->port_state == LPFC_LOCAL_CFG_LINK) { |
5011 | /* For FAN, timeout should be greater than edtov */ |
5012 | tmo = (((phba->fc_edtov + 999) / 1000) + 1); |
5013 | } else { |
5014 | /* Normal discovery timeout should be > than ELS/CT timeout |
5015 | * FC spec states we need 3 * ratov for CT requests |
5016 | */ |
5017 | tmo = ((phba->fc_ratov * 3) + 3); |
5018 | } |
5019 | |
5020 | |
5021 | if (!timer_pending(timer: &vport->fc_disctmo)) { |
5022 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
5023 | "set disc timer: tmo:x%x state:x%x flg:x%x" , |
5024 | tmo, vport->port_state, vport->fc_flag); |
5025 | } |
5026 | |
5027 | mod_timer(timer: &vport->fc_disctmo, expires: jiffies + msecs_to_jiffies(m: 1000 * tmo)); |
5028 | spin_lock_irq(lock: shost->host_lock); |
5029 | vport->fc_flag |= FC_DISC_TMO; |
5030 | spin_unlock_irq(lock: shost->host_lock); |
5031 | |
5032 | /* Start Discovery Timer state <hba_state> */ |
5033 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5034 | "0247 Start Discovery Timer state x%x " |
5035 | "Data: x%x x%lx x%x x%x\n" , |
5036 | vport->port_state, tmo, |
5037 | (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt, |
5038 | vport->fc_adisc_cnt); |
5039 | |
5040 | return; |
5041 | } |
5042 | |
5043 | /* |
5044 | * Cancel rescue timer for Discovery / RSCN handling |
5045 | */ |
5046 | int |
5047 | lpfc_can_disctmo(struct lpfc_vport *vport) |
5048 | { |
5049 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
5050 | unsigned long iflags; |
5051 | |
5052 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
5053 | "can disc timer: state:x%x rtry:x%x flg:x%x" , |
5054 | vport->port_state, vport->fc_ns_retry, vport->fc_flag); |
5055 | |
5056 | /* Turn off discovery timer if its running */ |
5057 | if (vport->fc_flag & FC_DISC_TMO || |
5058 | timer_pending(timer: &vport->fc_disctmo)) { |
5059 | spin_lock_irqsave(shost->host_lock, iflags); |
5060 | vport->fc_flag &= ~FC_DISC_TMO; |
5061 | spin_unlock_irqrestore(lock: shost->host_lock, flags: iflags); |
5062 | del_timer_sync(timer: &vport->fc_disctmo); |
5063 | spin_lock_irqsave(&vport->work_port_lock, iflags); |
5064 | vport->work_port_events &= ~WORKER_DISC_TMO; |
5065 | spin_unlock_irqrestore(lock: &vport->work_port_lock, flags: iflags); |
5066 | } |
5067 | |
5068 | /* Cancel Discovery Timer state <hba_state> */ |
5069 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5070 | "0248 Cancel Discovery Timer state x%x " |
5071 | "Data: x%x x%x x%x\n" , |
5072 | vport->port_state, vport->fc_flag, |
5073 | vport->fc_plogi_cnt, vport->fc_adisc_cnt); |
5074 | return 0; |
5075 | } |
5076 | |
5077 | /* |
5078 | * Check specified ring for outstanding IOCB on the SLI queue |
5079 | * Return true if iocb matches the specified nport |
5080 | */ |
5081 | int |
5082 | lpfc_check_sli_ndlp(struct lpfc_hba *phba, |
5083 | struct lpfc_sli_ring *pring, |
5084 | struct lpfc_iocbq *iocb, |
5085 | struct lpfc_nodelist *ndlp) |
5086 | { |
5087 | struct lpfc_vport *vport = ndlp->vport; |
5088 | u8 ulp_command; |
5089 | u16 ulp_context; |
5090 | u32 remote_id; |
5091 | |
5092 | if (iocb->vport != vport) |
5093 | return 0; |
5094 | |
5095 | ulp_command = get_job_cmnd(phba, iocbq: iocb); |
5096 | ulp_context = get_job_ulpcontext(phba, iocbq: iocb); |
5097 | remote_id = get_job_els_rsp64_did(phba, iocbq: iocb); |
5098 | |
5099 | if (pring->ringno == LPFC_ELS_RING) { |
5100 | switch (ulp_command) { |
5101 | case CMD_GEN_REQUEST64_CR: |
5102 | if (iocb->ndlp == ndlp) |
5103 | return 1; |
5104 | fallthrough; |
5105 | case CMD_ELS_REQUEST64_CR: |
5106 | if (remote_id == ndlp->nlp_DID) |
5107 | return 1; |
5108 | fallthrough; |
5109 | case CMD_XMIT_ELS_RSP64_CX: |
5110 | if (iocb->ndlp == ndlp) |
5111 | return 1; |
5112 | } |
5113 | } else if (pring->ringno == LPFC_FCP_RING) { |
5114 | /* Skip match check if waiting to relogin to FCP target */ |
5115 | if ((ndlp->nlp_type & NLP_FCP_TARGET) && |
5116 | (ndlp->nlp_flag & NLP_DELAY_TMO)) { |
5117 | return 0; |
5118 | } |
5119 | if (ulp_context == ndlp->nlp_rpi) |
5120 | return 1; |
5121 | } |
5122 | return 0; |
5123 | } |
5124 | |
5125 | static void |
5126 | __lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba, |
5127 | struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring, |
5128 | struct list_head *dequeue_list) |
5129 | { |
5130 | struct lpfc_iocbq *iocb, *next_iocb; |
5131 | |
5132 | list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { |
5133 | /* Check to see if iocb matches the nport */ |
5134 | if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) |
5135 | /* match, dequeue */ |
5136 | list_move_tail(list: &iocb->list, head: dequeue_list); |
5137 | } |
5138 | } |
5139 | |
5140 | static void |
5141 | lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba, |
5142 | struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) |
5143 | { |
5144 | struct lpfc_sli *psli = &phba->sli; |
5145 | uint32_t i; |
5146 | |
5147 | spin_lock_irq(lock: &phba->hbalock); |
5148 | for (i = 0; i < psli->num_rings; i++) |
5149 | __lpfc_dequeue_nport_iocbs(phba, ndlp, pring: &psli->sli3_ring[i], |
5150 | dequeue_list); |
5151 | spin_unlock_irq(lock: &phba->hbalock); |
5152 | } |
5153 | |
5154 | static void |
5155 | lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba, |
5156 | struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) |
5157 | { |
5158 | struct lpfc_sli_ring *pring; |
5159 | struct lpfc_queue *qp = NULL; |
5160 | |
5161 | spin_lock_irq(lock: &phba->hbalock); |
5162 | list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { |
5163 | pring = qp->pring; |
5164 | if (!pring) |
5165 | continue; |
5166 | spin_lock(lock: &pring->ring_lock); |
5167 | __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list); |
5168 | spin_unlock(lock: &pring->ring_lock); |
5169 | } |
5170 | spin_unlock_irq(lock: &phba->hbalock); |
5171 | } |
5172 | |
5173 | /* |
5174 | * Free resources / clean up outstanding I/Os |
5175 | * associated with nlp_rpi in the LPFC_NODELIST entry. |
5176 | */ |
5177 | static int |
5178 | lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) |
5179 | { |
5180 | LIST_HEAD(completions); |
5181 | |
5182 | lpfc_fabric_abort_nport(ndlp); |
5183 | |
5184 | /* |
5185 | * Everything that matches on txcmplq will be returned |
5186 | * by firmware with a no rpi error. |
5187 | */ |
5188 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { |
5189 | if (phba->sli_rev != LPFC_SLI_REV4) |
5190 | lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, dequeue_list: &completions); |
5191 | else |
5192 | lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, dequeue_list: &completions); |
5193 | } |
5194 | |
5195 | /* Cancel all the IOCBs from the completions list */ |
5196 | lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, |
5197 | IOERR_SLI_ABORTED); |
5198 | |
5199 | return 0; |
5200 | } |
5201 | |
5202 | /** |
5203 | * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO |
5204 | * @phba: Pointer to HBA context object. |
5205 | * @pmb: Pointer to mailbox object. |
5206 | * |
5207 | * This function will issue an ELS LOGO command after completing |
5208 | * the UNREG_RPI. |
5209 | **/ |
5210 | static void |
5211 | lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
5212 | { |
5213 | struct lpfc_vport *vport = pmb->vport; |
5214 | struct lpfc_nodelist *ndlp; |
5215 | |
5216 | ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp); |
5217 | if (!ndlp) |
5218 | return; |
5219 | lpfc_issue_els_logo(vport, ndlp, 0); |
5220 | |
5221 | /* Check to see if there are any deferred events to process */ |
5222 | if ((ndlp->nlp_flag & NLP_UNREG_INP) && |
5223 | (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { |
5224 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5225 | "1434 UNREG cmpl deferred logo x%x " |
5226 | "on NPort x%x Data: x%x x%px\n" , |
5227 | ndlp->nlp_rpi, ndlp->nlp_DID, |
5228 | ndlp->nlp_defer_did, ndlp); |
5229 | |
5230 | ndlp->nlp_flag &= ~NLP_UNREG_INP; |
5231 | ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; |
5232 | lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); |
5233 | } else { |
5234 | /* NLP_RELEASE_RPI is only set for SLI4 ports. */ |
5235 | if (ndlp->nlp_flag & NLP_RELEASE_RPI) { |
5236 | lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); |
5237 | spin_lock_irq(lock: &ndlp->lock); |
5238 | ndlp->nlp_flag &= ~NLP_RELEASE_RPI; |
5239 | ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; |
5240 | spin_unlock_irq(lock: &ndlp->lock); |
5241 | } |
5242 | spin_lock_irq(lock: &ndlp->lock); |
5243 | ndlp->nlp_flag &= ~NLP_UNREG_INP; |
5244 | spin_unlock_irq(lock: &ndlp->lock); |
5245 | } |
5246 | |
5247 | /* The node has an outstanding reference for the unreg. Now |
5248 | * that the LOGO action and cleanup are finished, release |
5249 | * resources. |
5250 | */ |
5251 | lpfc_nlp_put(ndlp); |
5252 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
5253 | } |
5254 | |
5255 | /* |
5256 | * Sets the mailbox completion handler to be used for the |
5257 | * unreg_rpi command. The handler varies based on the state of |
5258 | * the port and what will be happening to the rpi next. |
5259 | */ |
5260 | static void |
5261 | lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, |
5262 | struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) |
5263 | { |
5264 | unsigned long iflags; |
5265 | |
5266 | /* Driver always gets a reference on the mailbox job |
5267 | * in support of async jobs. |
5268 | */ |
5269 | mbox->ctx_ndlp = lpfc_nlp_get(ndlp); |
5270 | if (!mbox->ctx_ndlp) |
5271 | return; |
5272 | |
5273 | if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { |
5274 | mbox->mbox_cmpl = lpfc_nlp_logo_unreg; |
5275 | |
5276 | } else if (phba->sli_rev == LPFC_SLI_REV4 && |
5277 | (!(vport->load_flag & FC_UNLOADING)) && |
5278 | (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= |
5279 | LPFC_SLI_INTF_IF_TYPE_2) && |
5280 | (kref_read(kref: &ndlp->kref) > 0)) { |
5281 | mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; |
5282 | } else { |
5283 | if (vport->load_flag & FC_UNLOADING) { |
5284 | if (phba->sli_rev == LPFC_SLI_REV4) { |
5285 | spin_lock_irqsave(&ndlp->lock, iflags); |
5286 | ndlp->nlp_flag |= NLP_RELEASE_RPI; |
5287 | spin_unlock_irqrestore(lock: &ndlp->lock, flags: iflags); |
5288 | } |
5289 | } |
5290 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
5291 | } |
5292 | } |
5293 | |
5294 | /* |
5295 | * Free rpi associated with LPFC_NODELIST entry. |
5296 | * This routine is called from lpfc_freenode(), when we are removing |
5297 | * a LPFC_NODELIST entry. It is also called if the driver initiates a |
5298 | * LOGO that completes successfully, and we are waiting to PLOGI back |
5299 | * to the remote NPort. In addition, it is called after we receive |
5300 | * and unsolicated ELS cmd, send back a rsp, the rsp completes and |
5301 | * we are waiting to PLOGI back to the remote NPort. |
5302 | */ |
5303 | int |
5304 | lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
5305 | { |
5306 | struct lpfc_hba *phba = vport->phba; |
5307 | LPFC_MBOXQ_t *mbox; |
5308 | int rc, acc_plogi = 1; |
5309 | uint16_t rpi; |
5310 | |
5311 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED || |
5312 | ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { |
5313 | if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) |
5314 | lpfc_printf_vlog(vport, KERN_INFO, |
5315 | LOG_NODE | LOG_DISCOVERY, |
5316 | "3366 RPI x%x needs to be " |
5317 | "unregistered nlp_flag x%x " |
5318 | "did x%x\n" , |
5319 | ndlp->nlp_rpi, ndlp->nlp_flag, |
5320 | ndlp->nlp_DID); |
5321 | |
5322 | /* If there is already an UNREG in progress for this ndlp, |
5323 | * no need to queue up another one. |
5324 | */ |
5325 | if (ndlp->nlp_flag & NLP_UNREG_INP) { |
5326 | lpfc_printf_vlog(vport, KERN_INFO, |
5327 | LOG_NODE | LOG_DISCOVERY, |
5328 | "1436 unreg_rpi SKIP UNREG x%x on " |
5329 | "NPort x%x deferred x%x flg x%x " |
5330 | "Data: x%px\n" , |
5331 | ndlp->nlp_rpi, ndlp->nlp_DID, |
5332 | ndlp->nlp_defer_did, |
5333 | ndlp->nlp_flag, ndlp); |
5334 | goto out; |
5335 | } |
5336 | |
5337 | mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
5338 | if (mbox) { |
5339 | /* SLI4 ports require the physical rpi value. */ |
5340 | rpi = ndlp->nlp_rpi; |
5341 | if (phba->sli_rev == LPFC_SLI_REV4) |
5342 | rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; |
5343 | |
5344 | lpfc_unreg_login(phba, vport->vpi, rpi, mbox); |
5345 | mbox->vport = vport; |
5346 | lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox); |
5347 | if (!mbox->ctx_ndlp) { |
5348 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
5349 | return 1; |
5350 | } |
5351 | |
5352 | if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) |
5353 | /* |
5354 | * accept PLOGIs after unreg_rpi_cmpl |
5355 | */ |
5356 | acc_plogi = 0; |
5357 | if (((ndlp->nlp_DID & Fabric_DID_MASK) != |
5358 | Fabric_DID_MASK) && |
5359 | (!(vport->fc_flag & FC_OFFLINE_MODE))) |
5360 | ndlp->nlp_flag |= NLP_UNREG_INP; |
5361 | |
5362 | lpfc_printf_vlog(vport, KERN_INFO, |
5363 | LOG_NODE | LOG_DISCOVERY, |
5364 | "1433 unreg_rpi UNREG x%x on " |
5365 | "NPort x%x deferred flg x%x " |
5366 | "Data:x%px\n" , |
5367 | ndlp->nlp_rpi, ndlp->nlp_DID, |
5368 | ndlp->nlp_flag, ndlp); |
5369 | |
5370 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
5371 | if (rc == MBX_NOT_FINISHED) { |
5372 | ndlp->nlp_flag &= ~NLP_UNREG_INP; |
5373 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
5374 | acc_plogi = 1; |
5375 | lpfc_nlp_put(ndlp); |
5376 | } |
5377 | } else { |
5378 | lpfc_printf_vlog(vport, KERN_INFO, |
5379 | LOG_NODE | LOG_DISCOVERY, |
5380 | "1444 Failed to allocate mempool " |
5381 | "unreg_rpi UNREG x%x, " |
5382 | "DID x%x, flag x%x, " |
5383 | "ndlp x%px\n" , |
5384 | ndlp->nlp_rpi, ndlp->nlp_DID, |
5385 | ndlp->nlp_flag, ndlp); |
5386 | |
5387 | /* Because mempool_alloc failed, we |
5388 | * will issue a LOGO here and keep the rpi alive if |
5389 | * not unloading. |
5390 | */ |
5391 | if (!(vport->load_flag & FC_UNLOADING)) { |
5392 | ndlp->nlp_flag &= ~NLP_UNREG_INP; |
5393 | lpfc_issue_els_logo(vport, ndlp, 0); |
5394 | ndlp->nlp_prev_state = ndlp->nlp_state; |
5395 | lpfc_nlp_set_state(vport, ndlp, |
5396 | NLP_STE_NPR_NODE); |
5397 | } |
5398 | |
5399 | return 1; |
5400 | } |
5401 | lpfc_no_rpi(phba, ndlp); |
5402 | out: |
5403 | if (phba->sli_rev != LPFC_SLI_REV4) |
5404 | ndlp->nlp_rpi = 0; |
5405 | ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; |
5406 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; |
5407 | if (acc_plogi) |
5408 | ndlp->nlp_flag &= ~NLP_LOGO_ACC; |
5409 | return 1; |
5410 | } |
5411 | ndlp->nlp_flag &= ~NLP_LOGO_ACC; |
5412 | return 0; |
5413 | } |
5414 | |
5415 | /** |
5416 | * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba. |
5417 | * @phba: pointer to lpfc hba data structure. |
5418 | * |
5419 | * This routine is invoked to unregister all the currently registered RPIs |
5420 | * to the HBA. |
5421 | **/ |
5422 | void |
5423 | lpfc_unreg_hba_rpis(struct lpfc_hba *phba) |
5424 | { |
5425 | struct lpfc_vport **vports; |
5426 | struct lpfc_nodelist *ndlp; |
5427 | struct Scsi_Host *shost; |
5428 | int i; |
5429 | |
5430 | vports = lpfc_create_vport_work_array(phba); |
5431 | if (!vports) { |
5432 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
5433 | "2884 Vport array allocation failed \n" ); |
5434 | return; |
5435 | } |
5436 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
5437 | shost = lpfc_shost_from_vport(vport: vports[i]); |
5438 | spin_lock_irq(lock: shost->host_lock); |
5439 | list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { |
5440 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { |
5441 | /* The mempool_alloc might sleep */ |
5442 | spin_unlock_irq(lock: shost->host_lock); |
5443 | lpfc_unreg_rpi(vport: vports[i], ndlp); |
5444 | spin_lock_irq(lock: shost->host_lock); |
5445 | } |
5446 | } |
5447 | spin_unlock_irq(lock: shost->host_lock); |
5448 | } |
5449 | lpfc_destroy_vport_work_array(phba, vports); |
5450 | } |
5451 | |
5452 | void |
5453 | lpfc_unreg_all_rpis(struct lpfc_vport *vport) |
5454 | { |
5455 | struct lpfc_hba *phba = vport->phba; |
5456 | LPFC_MBOXQ_t *mbox; |
5457 | int rc; |
5458 | |
5459 | if (phba->sli_rev == LPFC_SLI_REV4) { |
5460 | lpfc_sli4_unreg_all_rpis(vport); |
5461 | return; |
5462 | } |
5463 | |
5464 | mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
5465 | if (mbox) { |
5466 | lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT, |
5467 | mbox); |
5468 | mbox->vport = vport; |
5469 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
5470 | mbox->ctx_ndlp = NULL; |
5471 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); |
5472 | if (rc != MBX_TIMEOUT) |
5473 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
5474 | |
5475 | if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) |
5476 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
5477 | "1836 Could not issue " |
5478 | "unreg_login(all_rpis) status %d\n" , |
5479 | rc); |
5480 | } |
5481 | } |
5482 | |
5483 | void |
5484 | lpfc_unreg_default_rpis(struct lpfc_vport *vport) |
5485 | { |
5486 | struct lpfc_hba *phba = vport->phba; |
5487 | LPFC_MBOXQ_t *mbox; |
5488 | int rc; |
5489 | |
5490 | /* Unreg DID is an SLI3 operation. */ |
5491 | if (phba->sli_rev > LPFC_SLI_REV3) |
5492 | return; |
5493 | |
5494 | mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
5495 | if (mbox) { |
5496 | lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS, |
5497 | mbox); |
5498 | mbox->vport = vport; |
5499 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
5500 | mbox->ctx_ndlp = NULL; |
5501 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); |
5502 | if (rc != MBX_TIMEOUT) |
5503 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
5504 | |
5505 | if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) |
5506 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
5507 | "1815 Could not issue " |
5508 | "unreg_did (default rpis) status %d\n" , |
5509 | rc); |
5510 | } |
5511 | } |
5512 | |
5513 | /* |
5514 | * Free resources associated with LPFC_NODELIST entry |
5515 | * so it can be freed. |
5516 | */ |
5517 | static int |
5518 | lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
5519 | { |
5520 | struct lpfc_hba *phba = vport->phba; |
5521 | LPFC_MBOXQ_t *mb, *nextmb; |
5522 | |
5523 | /* Cleanup node for NPort <nlp_DID> */ |
5524 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
5525 | "0900 Cleanup node for NPort x%x " |
5526 | "Data: x%x x%x x%x\n" , |
5527 | ndlp->nlp_DID, ndlp->nlp_flag, |
5528 | ndlp->nlp_state, ndlp->nlp_rpi); |
5529 | lpfc_dequeue_node(vport, ndlp); |
5530 | |
5531 | /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */ |
5532 | |
5533 | /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ |
5534 | if ((mb = phba->sli.mbox_active)) { |
5535 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && |
5536 | !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && |
5537 | (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { |
5538 | mb->ctx_ndlp = NULL; |
5539 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
5540 | } |
5541 | } |
5542 | |
5543 | spin_lock_irq(lock: &phba->hbalock); |
5544 | /* Cleanup REG_LOGIN completions which are not yet processed */ |
5545 | list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { |
5546 | if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || |
5547 | (mb->mbox_flag & LPFC_MBX_IMED_UNREG) || |
5548 | (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp)) |
5549 | continue; |
5550 | |
5551 | mb->ctx_ndlp = NULL; |
5552 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
5553 | } |
5554 | |
5555 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { |
5556 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && |
5557 | !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && |
5558 | (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { |
5559 | list_del(entry: &mb->list); |
5560 | lpfc_mbox_rsrc_cleanup(phba, mbox: mb, locked: MBOX_THD_LOCKED); |
5561 | |
5562 | /* Don't invoke lpfc_nlp_put. The driver is in |
5563 | * lpfc_nlp_release context. |
5564 | */ |
5565 | } |
5566 | } |
5567 | spin_unlock_irq(lock: &phba->hbalock); |
5568 | |
5569 | lpfc_els_abort(phba, ndlp); |
5570 | |
5571 | spin_lock_irq(lock: &ndlp->lock); |
5572 | ndlp->nlp_flag &= ~NLP_DELAY_TMO; |
5573 | spin_unlock_irq(lock: &ndlp->lock); |
5574 | |
5575 | ndlp->nlp_last_elscmd = 0; |
5576 | del_timer_sync(timer: &ndlp->nlp_delayfunc); |
5577 | |
5578 | list_del_init(entry: &ndlp->els_retry_evt.evt_listp); |
5579 | list_del_init(entry: &ndlp->dev_loss_evt.evt_listp); |
5580 | list_del_init(entry: &ndlp->recovery_evt.evt_listp); |
5581 | lpfc_cleanup_vports_rrqs(vport, ndlp); |
5582 | |
5583 | if (phba->sli_rev == LPFC_SLI_REV4) |
5584 | ndlp->nlp_flag |= NLP_RELEASE_RPI; |
5585 | |
5586 | return 0; |
5587 | } |
5588 | |
5589 | static int |
5590 | lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
5591 | uint32_t did) |
5592 | { |
5593 | D_ID mydid, ndlpdid, matchdid; |
5594 | |
5595 | if (did == Bcast_DID) |
5596 | return 0; |
5597 | |
5598 | /* First check for Direct match */ |
5599 | if (ndlp->nlp_DID == did) |
5600 | return 1; |
5601 | |
5602 | /* Next check for area/domain identically equals 0 match */ |
5603 | mydid.un.word = vport->fc_myDID; |
5604 | if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { |
5605 | return 0; |
5606 | } |
5607 | |
5608 | matchdid.un.word = did; |
5609 | ndlpdid.un.word = ndlp->nlp_DID; |
5610 | if (matchdid.un.b.id == ndlpdid.un.b.id) { |
5611 | if ((mydid.un.b.domain == matchdid.un.b.domain) && |
5612 | (mydid.un.b.area == matchdid.un.b.area)) { |
5613 | /* This code is supposed to match the ID |
5614 | * for a private loop device that is |
5615 | * connect to fl_port. But we need to |
5616 | * check that the port did not just go |
5617 | * from pt2pt to fabric or we could end |
5618 | * up matching ndlp->nlp_DID 000001 to |
5619 | * fabric DID 0x20101 |
5620 | */ |
5621 | if ((ndlpdid.un.b.domain == 0) && |
5622 | (ndlpdid.un.b.area == 0)) { |
5623 | if (ndlpdid.un.b.id && |
5624 | vport->phba->fc_topology == |
5625 | LPFC_TOPOLOGY_LOOP) |
5626 | return 1; |
5627 | } |
5628 | return 0; |
5629 | } |
5630 | |
5631 | matchdid.un.word = ndlp->nlp_DID; |
5632 | if ((mydid.un.b.domain == ndlpdid.un.b.domain) && |
5633 | (mydid.un.b.area == ndlpdid.un.b.area)) { |
5634 | if ((matchdid.un.b.domain == 0) && |
5635 | (matchdid.un.b.area == 0)) { |
5636 | if (matchdid.un.b.id) |
5637 | return 1; |
5638 | } |
5639 | } |
5640 | } |
5641 | return 0; |
5642 | } |
5643 | |
5644 | /* Search for a nodelist entry */ |
5645 | static struct lpfc_nodelist * |
5646 | __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) |
5647 | { |
5648 | struct lpfc_nodelist *ndlp; |
5649 | uint32_t data1; |
5650 | |
5651 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
5652 | if (lpfc_matchdid(vport, ndlp, did)) { |
5653 | data1 = (((uint32_t)ndlp->nlp_state << 24) | |
5654 | ((uint32_t)ndlp->nlp_xri << 16) | |
5655 | ((uint32_t)ndlp->nlp_type << 8) |
5656 | ); |
5657 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, |
5658 | "0929 FIND node DID " |
5659 | "Data: x%px x%x x%x x%x x%x x%px\n" , |
5660 | ndlp, ndlp->nlp_DID, |
5661 | ndlp->nlp_flag, data1, ndlp->nlp_rpi, |
5662 | ndlp->active_rrqs_xri_bitmap); |
5663 | return ndlp; |
5664 | } |
5665 | } |
5666 | |
5667 | /* FIND node did <did> NOT FOUND */ |
5668 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
5669 | "0932 FIND node did x%x NOT FOUND.\n" , did); |
5670 | return NULL; |
5671 | } |
5672 | |
5673 | struct lpfc_nodelist * |
5674 | lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) |
5675 | { |
5676 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
5677 | struct lpfc_nodelist *ndlp; |
5678 | unsigned long iflags; |
5679 | |
5680 | spin_lock_irqsave(shost->host_lock, iflags); |
5681 | ndlp = __lpfc_findnode_did(vport, did); |
5682 | spin_unlock_irqrestore(lock: shost->host_lock, flags: iflags); |
5683 | return ndlp; |
5684 | } |
5685 | |
5686 | struct lpfc_nodelist * |
5687 | lpfc_findnode_mapped(struct lpfc_vport *vport) |
5688 | { |
5689 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
5690 | struct lpfc_nodelist *ndlp; |
5691 | uint32_t data1; |
5692 | unsigned long iflags; |
5693 | |
5694 | spin_lock_irqsave(shost->host_lock, iflags); |
5695 | |
5696 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
5697 | if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || |
5698 | ndlp->nlp_state == NLP_STE_MAPPED_NODE) { |
5699 | data1 = (((uint32_t)ndlp->nlp_state << 24) | |
5700 | ((uint32_t)ndlp->nlp_xri << 16) | |
5701 | ((uint32_t)ndlp->nlp_type << 8) | |
5702 | ((uint32_t)ndlp->nlp_rpi & 0xff)); |
5703 | spin_unlock_irqrestore(lock: shost->host_lock, flags: iflags); |
5704 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, |
5705 | "2025 FIND node DID MAPPED " |
5706 | "Data: x%px x%x x%x x%x x%px\n" , |
5707 | ndlp, ndlp->nlp_DID, |
5708 | ndlp->nlp_flag, data1, |
5709 | ndlp->active_rrqs_xri_bitmap); |
5710 | return ndlp; |
5711 | } |
5712 | } |
5713 | spin_unlock_irqrestore(lock: shost->host_lock, flags: iflags); |
5714 | |
5715 | /* FIND node did <did> NOT FOUND */ |
5716 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
5717 | "2026 FIND mapped did NOT FOUND.\n" ); |
5718 | return NULL; |
5719 | } |
5720 | |
5721 | struct lpfc_nodelist * |
5722 | lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) |
5723 | { |
5724 | struct lpfc_nodelist *ndlp; |
5725 | |
5726 | ndlp = lpfc_findnode_did(vport, did); |
5727 | if (!ndlp) { |
5728 | if (vport->phba->nvmet_support) |
5729 | return NULL; |
5730 | if ((vport->fc_flag & FC_RSCN_MODE) != 0 && |
5731 | lpfc_rscn_payload_check(vport, did) == 0) |
5732 | return NULL; |
5733 | ndlp = lpfc_nlp_init(vport, did); |
5734 | if (!ndlp) |
5735 | return NULL; |
5736 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
5737 | |
5738 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5739 | "6453 Setup New Node 2B_DISC x%x " |
5740 | "Data:x%x x%x x%x\n" , |
5741 | ndlp->nlp_DID, ndlp->nlp_flag, |
5742 | ndlp->nlp_state, vport->fc_flag); |
5743 | |
5744 | spin_lock_irq(lock: &ndlp->lock); |
5745 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
5746 | spin_unlock_irq(lock: &ndlp->lock); |
5747 | return ndlp; |
5748 | } |
5749 | |
5750 | /* The NVME Target does not want to actively manage an rport. |
5751 | * The goal is to allow the target to reset its state and clear |
5752 | * pending IO in preparation for the initiator to recover. |
5753 | */ |
5754 | if ((vport->fc_flag & FC_RSCN_MODE) && |
5755 | !(vport->fc_flag & FC_NDISC_ACTIVE)) { |
5756 | if (lpfc_rscn_payload_check(vport, did)) { |
5757 | |
5758 | /* Since this node is marked for discovery, |
5759 | * delay timeout is not needed. |
5760 | */ |
5761 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
5762 | |
5763 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5764 | "6455 Setup RSCN Node 2B_DISC x%x " |
5765 | "Data:x%x x%x x%x\n" , |
5766 | ndlp->nlp_DID, ndlp->nlp_flag, |
5767 | ndlp->nlp_state, vport->fc_flag); |
5768 | |
5769 | /* NVME Target mode waits until rport is known to be |
5770 | * impacted by the RSCN before it transitions. No |
5771 | * active management - just go to NPR provided the |
5772 | * node had a valid login. |
5773 | */ |
5774 | if (vport->phba->nvmet_support) |
5775 | return ndlp; |
5776 | |
5777 | /* If we've already received a PLOGI from this NPort |
5778 | * we don't need to try to discover it again. |
5779 | */ |
5780 | if (ndlp->nlp_flag & NLP_RCV_PLOGI && |
5781 | !(ndlp->nlp_type & |
5782 | (NLP_FCP_TARGET | NLP_NVME_TARGET))) |
5783 | return NULL; |
5784 | |
5785 | if (ndlp->nlp_state > NLP_STE_UNUSED_NODE && |
5786 | ndlp->nlp_state < NLP_STE_PRLI_ISSUE) { |
5787 | lpfc_disc_state_machine(vport, ndlp, NULL, |
5788 | NLP_EVT_DEVICE_RECOVERY); |
5789 | } |
5790 | |
5791 | spin_lock_irq(lock: &ndlp->lock); |
5792 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
5793 | spin_unlock_irq(lock: &ndlp->lock); |
5794 | } else { |
5795 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5796 | "6456 Skip Setup RSCN Node x%x " |
5797 | "Data:x%x x%x x%x\n" , |
5798 | ndlp->nlp_DID, ndlp->nlp_flag, |
5799 | ndlp->nlp_state, vport->fc_flag); |
5800 | ndlp = NULL; |
5801 | } |
5802 | } else { |
5803 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5804 | "6457 Setup Active Node 2B_DISC x%x " |
5805 | "Data:x%x x%x x%x\n" , |
5806 | ndlp->nlp_DID, ndlp->nlp_flag, |
5807 | ndlp->nlp_state, vport->fc_flag); |
5808 | |
5809 | /* If the initiator received a PLOGI from this NPort or if the |
5810 | * initiator is already in the process of discovery on it, |
5811 | * there's no need to try to discover it again. |
5812 | */ |
5813 | if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || |
5814 | ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || |
5815 | (!vport->phba->nvmet_support && |
5816 | ndlp->nlp_flag & NLP_RCV_PLOGI)) |
5817 | return NULL; |
5818 | |
5819 | if (vport->phba->nvmet_support) |
5820 | return ndlp; |
5821 | |
5822 | /* Moving to NPR state clears unsolicited flags and |
5823 | * allows for rediscovery |
5824 | */ |
5825 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
5826 | |
5827 | spin_lock_irq(lock: &ndlp->lock); |
5828 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
5829 | spin_unlock_irq(lock: &ndlp->lock); |
5830 | } |
5831 | return ndlp; |
5832 | } |
5833 | |
5834 | /* Build a list of nodes to discover based on the loopmap */ |
5835 | void |
5836 | lpfc_disc_list_loopmap(struct lpfc_vport *vport) |
5837 | { |
5838 | struct lpfc_hba *phba = vport->phba; |
5839 | int j; |
5840 | uint32_t alpa, index; |
5841 | |
5842 | if (!lpfc_is_link_up(phba)) |
5843 | return; |
5844 | |
5845 | if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) |
5846 | return; |
5847 | |
5848 | /* Check for loop map present or not */ |
5849 | if (phba->alpa_map[0]) { |
5850 | for (j = 1; j <= phba->alpa_map[0]; j++) { |
5851 | alpa = phba->alpa_map[j]; |
5852 | if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0)) |
5853 | continue; |
5854 | lpfc_setup_disc_node(vport, did: alpa); |
5855 | } |
5856 | } else { |
5857 | /* No alpamap, so try all alpa's */ |
5858 | for (j = 0; j < FC_MAXLOOP; j++) { |
5859 | /* If cfg_scan_down is set, start from highest |
5860 | * ALPA (0xef) to lowest (0x1). |
5861 | */ |
5862 | if (vport->cfg_scan_down) |
5863 | index = j; |
5864 | else |
5865 | index = FC_MAXLOOP - j - 1; |
5866 | alpa = lpfcAlpaArray[index]; |
5867 | if ((vport->fc_myDID & 0xff) == alpa) |
5868 | continue; |
5869 | lpfc_setup_disc_node(vport, did: alpa); |
5870 | } |
5871 | } |
5872 | return; |
5873 | } |
5874 | |
5875 | /* SLI3 only */ |
5876 | void |
5877 | lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) |
5878 | { |
5879 | LPFC_MBOXQ_t *mbox; |
5880 | struct lpfc_sli *psli = &phba->sli; |
5881 | struct lpfc_sli_ring * = &psli->sli3_ring[LPFC_EXTRA_RING]; |
5882 | struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING]; |
5883 | int rc; |
5884 | |
5885 | /* |
5886 | * if it's not a physical port or if we already send |
5887 | * clear_la then don't send it. |
5888 | */ |
5889 | if ((phba->link_state >= LPFC_CLEAR_LA) || |
5890 | (vport->port_type != LPFC_PHYSICAL_PORT) || |
5891 | (phba->sli_rev == LPFC_SLI_REV4)) |
5892 | return; |
5893 | |
5894 | /* Link up discovery */ |
5895 | if ((mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { |
5896 | phba->link_state = LPFC_CLEAR_LA; |
5897 | lpfc_clear_la(phba, mbox); |
5898 | mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; |
5899 | mbox->vport = vport; |
5900 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
5901 | if (rc == MBX_NOT_FINISHED) { |
5902 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
5903 | lpfc_disc_flush_list(vport); |
5904 | extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; |
5905 | fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; |
5906 | phba->link_state = LPFC_HBA_ERROR; |
5907 | } |
5908 | } |
5909 | } |
5910 | |
5911 | /* Reg_vpi to tell firmware to resume normal operations */ |
5912 | void |
5913 | lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) |
5914 | { |
5915 | LPFC_MBOXQ_t *regvpimbox; |
5916 | |
5917 | regvpimbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
5918 | if (regvpimbox) { |
5919 | lpfc_reg_vpi(vport, regvpimbox); |
5920 | regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; |
5921 | regvpimbox->vport = vport; |
5922 | if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) |
5923 | == MBX_NOT_FINISHED) { |
5924 | mempool_free(element: regvpimbox, pool: phba->mbox_mem_pool); |
5925 | } |
5926 | } |
5927 | } |
5928 | |
5929 | /* Start Link up / RSCN discovery on NPR nodes */ |
5930 | void |
5931 | lpfc_disc_start(struct lpfc_vport *vport) |
5932 | { |
5933 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
5934 | struct lpfc_hba *phba = vport->phba; |
5935 | uint32_t num_sent; |
5936 | uint32_t clear_la_pending; |
5937 | |
5938 | if (!lpfc_is_link_up(phba)) { |
5939 | lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, |
5940 | "3315 Link is not up %x\n" , |
5941 | phba->link_state); |
5942 | return; |
5943 | } |
5944 | |
5945 | if (phba->link_state == LPFC_CLEAR_LA) |
5946 | clear_la_pending = 1; |
5947 | else |
5948 | clear_la_pending = 0; |
5949 | |
5950 | if (vport->port_state < LPFC_VPORT_READY) |
5951 | vport->port_state = LPFC_DISC_AUTH; |
5952 | |
5953 | lpfc_set_disctmo(vport); |
5954 | |
5955 | vport->fc_prevDID = vport->fc_myDID; |
5956 | vport->num_disc_nodes = 0; |
5957 | |
5958 | /* Start Discovery state <hba_state> */ |
5959 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
5960 | "0202 Start Discovery port state x%x " |
5961 | "flg x%x Data: x%x x%x x%x\n" , |
5962 | vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, |
5963 | vport->fc_adisc_cnt, vport->fc_npr_cnt); |
5964 | |
5965 | /* First do ADISCs - if any */ |
5966 | num_sent = lpfc_els_disc_adisc(vport); |
5967 | |
5968 | if (num_sent) |
5969 | return; |
5970 | |
5971 | /* Register the VPI for SLI3, NPIV only. */ |
5972 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && |
5973 | !(vport->fc_flag & FC_PT2PT) && |
5974 | !(vport->fc_flag & FC_RSCN_MODE) && |
5975 | (phba->sli_rev < LPFC_SLI_REV4)) { |
5976 | lpfc_issue_clear_la(phba, vport); |
5977 | lpfc_issue_reg_vpi(phba, vport); |
5978 | return; |
5979 | } |
5980 | |
5981 | /* |
5982 | * For SLI2, we need to set port_state to READY and continue |
5983 | * discovery. |
5984 | */ |
5985 | if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { |
5986 | /* If we get here, there is nothing to ADISC */ |
5987 | lpfc_issue_clear_la(phba, vport); |
5988 | |
5989 | if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { |
5990 | vport->num_disc_nodes = 0; |
5991 | /* go thru NPR nodes and issue ELS PLOGIs */ |
5992 | if (vport->fc_npr_cnt) |
5993 | lpfc_els_disc_plogi(vport); |
5994 | |
5995 | if (!vport->num_disc_nodes) { |
5996 | spin_lock_irq(lock: shost->host_lock); |
5997 | vport->fc_flag &= ~FC_NDISC_ACTIVE; |
5998 | spin_unlock_irq(lock: shost->host_lock); |
5999 | lpfc_can_disctmo(vport); |
6000 | } |
6001 | } |
6002 | vport->port_state = LPFC_VPORT_READY; |
6003 | } else { |
6004 | /* Next do PLOGIs - if any */ |
6005 | num_sent = lpfc_els_disc_plogi(vport); |
6006 | |
6007 | if (num_sent) |
6008 | return; |
6009 | |
6010 | if (vport->fc_flag & FC_RSCN_MODE) { |
6011 | /* Check to see if more RSCNs came in while we |
6012 | * were processing this one. |
6013 | */ |
6014 | if ((vport->fc_rscn_id_cnt == 0) && |
6015 | (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { |
6016 | spin_lock_irq(lock: shost->host_lock); |
6017 | vport->fc_flag &= ~FC_RSCN_MODE; |
6018 | spin_unlock_irq(lock: shost->host_lock); |
6019 | lpfc_can_disctmo(vport); |
6020 | } else |
6021 | lpfc_els_handle_rscn(vport); |
6022 | } |
6023 | } |
6024 | return; |
6025 | } |
6026 | |
6027 | /* |
6028 | * Ignore completion for all IOCBs on tx and txcmpl queue for ELS |
6029 | * ring the match the sppecified nodelist. |
6030 | */ |
6031 | static void |
6032 | lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) |
6033 | { |
6034 | LIST_HEAD(completions); |
6035 | struct lpfc_iocbq *iocb, *next_iocb; |
6036 | struct lpfc_sli_ring *pring; |
6037 | u32 ulp_command; |
6038 | |
6039 | pring = lpfc_phba_elsring(phba); |
6040 | if (unlikely(!pring)) |
6041 | return; |
6042 | |
6043 | /* Error matching iocb on txq or txcmplq |
6044 | * First check the txq. |
6045 | */ |
6046 | spin_lock_irq(lock: &phba->hbalock); |
6047 | list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { |
6048 | if (iocb->ndlp != ndlp) |
6049 | continue; |
6050 | |
6051 | ulp_command = get_job_cmnd(phba, iocbq: iocb); |
6052 | |
6053 | if (ulp_command == CMD_ELS_REQUEST64_CR || |
6054 | ulp_command == CMD_XMIT_ELS_RSP64_CX) { |
6055 | |
6056 | list_move_tail(list: &iocb->list, head: &completions); |
6057 | } |
6058 | } |
6059 | |
6060 | /* Next check the txcmplq */ |
6061 | list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { |
6062 | if (iocb->ndlp != ndlp) |
6063 | continue; |
6064 | |
6065 | ulp_command = get_job_cmnd(phba, iocbq: iocb); |
6066 | |
6067 | if (ulp_command == CMD_ELS_REQUEST64_CR || |
6068 | ulp_command == CMD_XMIT_ELS_RSP64_CX) { |
6069 | lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); |
6070 | } |
6071 | } |
6072 | spin_unlock_irq(lock: &phba->hbalock); |
6073 | |
6074 | /* Make sure HBA is alive */ |
6075 | lpfc_issue_hb_tmo(phba); |
6076 | |
6077 | /* Cancel all the IOCBs from the completions list */ |
6078 | lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, |
6079 | IOERR_SLI_ABORTED); |
6080 | } |
6081 | |
6082 | static void |
6083 | lpfc_disc_flush_list(struct lpfc_vport *vport) |
6084 | { |
6085 | struct lpfc_nodelist *ndlp, *next_ndlp; |
6086 | struct lpfc_hba *phba = vport->phba; |
6087 | |
6088 | if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { |
6089 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
6090 | nlp_listp) { |
6091 | if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || |
6092 | ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { |
6093 | lpfc_free_tx(phba, ndlp); |
6094 | } |
6095 | } |
6096 | } |
6097 | } |
6098 | |
6099 | /* |
6100 | * lpfc_notify_xport_npr - notifies xport of node disappearance |
6101 | * @vport: Pointer to Virtual Port object. |
6102 | * |
6103 | * Transitions all ndlps to NPR state. When lpfc_nlp_set_state |
6104 | * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered |
6105 | * and transport notified that the node is gone. |
6106 | * Return Code: |
6107 | * none |
6108 | */ |
6109 | static void |
6110 | lpfc_notify_xport_npr(struct lpfc_vport *vport) |
6111 | { |
6112 | struct lpfc_nodelist *ndlp, *next_ndlp; |
6113 | |
6114 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
6115 | nlp_listp) { |
6116 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
6117 | } |
6118 | } |
6119 | void |
6120 | lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) |
6121 | { |
6122 | lpfc_els_flush_rscn(vport); |
6123 | lpfc_els_flush_cmd(vport); |
6124 | lpfc_disc_flush_list(vport); |
6125 | if (pci_channel_offline(pdev: vport->phba->pcidev)) |
6126 | lpfc_notify_xport_npr(vport); |
6127 | } |
6128 | |
6129 | /*****************************************************************************/ |
6130 | /* |
6131 | * NAME: lpfc_disc_timeout |
6132 | * |
6133 | * FUNCTION: Fibre Channel driver discovery timeout routine. |
6134 | * |
6135 | * EXECUTION ENVIRONMENT: interrupt only |
6136 | * |
6137 | * CALLED FROM: |
6138 | * Timer function |
6139 | * |
6140 | * RETURNS: |
6141 | * none |
6142 | */ |
6143 | /*****************************************************************************/ |
6144 | void |
6145 | lpfc_disc_timeout(struct timer_list *t) |
6146 | { |
6147 | struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo); |
6148 | struct lpfc_hba *phba = vport->phba; |
6149 | uint32_t tmo_posted; |
6150 | unsigned long flags = 0; |
6151 | |
6152 | if (unlikely(!phba)) |
6153 | return; |
6154 | |
6155 | spin_lock_irqsave(&vport->work_port_lock, flags); |
6156 | tmo_posted = vport->work_port_events & WORKER_DISC_TMO; |
6157 | if (!tmo_posted) |
6158 | vport->work_port_events |= WORKER_DISC_TMO; |
6159 | spin_unlock_irqrestore(lock: &vport->work_port_lock, flags); |
6160 | |
6161 | if (!tmo_posted) |
6162 | lpfc_worker_wake_up(phba); |
6163 | return; |
6164 | } |
6165 | |
6166 | static void |
6167 | lpfc_disc_timeout_handler(struct lpfc_vport *vport) |
6168 | { |
6169 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
6170 | struct lpfc_hba *phba = vport->phba; |
6171 | struct lpfc_sli *psli = &phba->sli; |
6172 | struct lpfc_nodelist *ndlp, *next_ndlp; |
6173 | LPFC_MBOXQ_t *initlinkmbox; |
6174 | int rc, clrlaerr = 0; |
6175 | |
6176 | if (!(vport->fc_flag & FC_DISC_TMO)) |
6177 | return; |
6178 | |
6179 | spin_lock_irq(lock: shost->host_lock); |
6180 | vport->fc_flag &= ~FC_DISC_TMO; |
6181 | spin_unlock_irq(lock: shost->host_lock); |
6182 | |
6183 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
6184 | "disc timeout: state:x%x rtry:x%x flg:x%x" , |
6185 | vport->port_state, vport->fc_ns_retry, vport->fc_flag); |
6186 | |
6187 | switch (vport->port_state) { |
6188 | |
6189 | case LPFC_LOCAL_CFG_LINK: |
6190 | /* |
6191 | * port_state is identically LPFC_LOCAL_CFG_LINK while |
6192 | * waiting for FAN timeout |
6193 | */ |
6194 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, |
6195 | "0221 FAN timeout\n" ); |
6196 | |
6197 | /* Start discovery by sending FLOGI, clean up old rpis */ |
6198 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
6199 | nlp_listp) { |
6200 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) |
6201 | continue; |
6202 | if (ndlp->nlp_type & NLP_FABRIC) { |
6203 | /* Clean up the ndlp on Fabric connections */ |
6204 | lpfc_drop_node(vport, ndlp); |
6205 | |
6206 | } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { |
6207 | /* Fail outstanding IO now since device |
6208 | * is marked for PLOGI. |
6209 | */ |
6210 | lpfc_unreg_rpi(vport, ndlp); |
6211 | } |
6212 | } |
6213 | if (vport->port_state != LPFC_FLOGI) { |
6214 | if (phba->sli_rev <= LPFC_SLI_REV3) |
6215 | lpfc_initial_flogi(vport); |
6216 | else |
6217 | lpfc_issue_init_vfi(vport); |
6218 | return; |
6219 | } |
6220 | break; |
6221 | |
6222 | case LPFC_FDISC: |
6223 | case LPFC_FLOGI: |
6224 | /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ |
6225 | /* Initial FLOGI timeout */ |
6226 | lpfc_printf_vlog(vport, KERN_ERR, |
6227 | LOG_TRACE_EVENT, |
6228 | "0222 Initial %s timeout\n" , |
6229 | vport->vpi ? "FDISC" : "FLOGI" ); |
6230 | |
6231 | /* Assume no Fabric and go on with discovery. |
6232 | * Check for outstanding ELS FLOGI to abort. |
6233 | */ |
6234 | |
6235 | /* FLOGI failed, so just use loop map to make discovery list */ |
6236 | lpfc_disc_list_loopmap(vport); |
6237 | |
6238 | /* Start discovery */ |
6239 | lpfc_disc_start(vport); |
6240 | break; |
6241 | |
6242 | case LPFC_FABRIC_CFG_LINK: |
6243 | /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for |
6244 | NameServer login */ |
6245 | lpfc_printf_vlog(vport, KERN_ERR, |
6246 | LOG_TRACE_EVENT, |
6247 | "0223 Timeout while waiting for " |
6248 | "NameServer login\n" ); |
6249 | /* Next look for NameServer ndlp */ |
6250 | ndlp = lpfc_findnode_did(vport, NameServer_DID); |
6251 | if (ndlp) |
6252 | lpfc_els_abort(phba, ndlp); |
6253 | |
6254 | /* ReStart discovery */ |
6255 | goto restart_disc; |
6256 | |
6257 | case LPFC_NS_QRY: |
6258 | /* Check for wait for NameServer Rsp timeout */ |
6259 | lpfc_printf_vlog(vport, KERN_ERR, |
6260 | LOG_TRACE_EVENT, |
6261 | "0224 NameServer Query timeout " |
6262 | "Data: x%x x%x\n" , |
6263 | vport->fc_ns_retry, LPFC_MAX_NS_RETRY); |
6264 | |
6265 | if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { |
6266 | /* Try it one more time */ |
6267 | vport->fc_ns_retry++; |
6268 | vport->gidft_inp = 0; |
6269 | rc = lpfc_issue_gidft(vport); |
6270 | if (rc == 0) |
6271 | break; |
6272 | } |
6273 | vport->fc_ns_retry = 0; |
6274 | |
6275 | restart_disc: |
6276 | /* |
6277 | * Discovery is over. |
6278 | * set port_state to PORT_READY if SLI2. |
6279 | * cmpl_reg_vpi will set port_state to READY for SLI3. |
6280 | */ |
6281 | if (phba->sli_rev < LPFC_SLI_REV4) { |
6282 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) |
6283 | lpfc_issue_reg_vpi(phba, vport); |
6284 | else { |
6285 | lpfc_issue_clear_la(phba, vport); |
6286 | vport->port_state = LPFC_VPORT_READY; |
6287 | } |
6288 | } |
6289 | |
6290 | /* Setup and issue mailbox INITIALIZE LINK command */ |
6291 | initlinkmbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
6292 | if (!initlinkmbox) { |
6293 | lpfc_printf_vlog(vport, KERN_ERR, |
6294 | LOG_TRACE_EVENT, |
6295 | "0206 Device Discovery " |
6296 | "completion error\n" ); |
6297 | phba->link_state = LPFC_HBA_ERROR; |
6298 | break; |
6299 | } |
6300 | |
6301 | lpfc_linkdown(phba); |
6302 | lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, |
6303 | phba->cfg_link_speed); |
6304 | initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; |
6305 | initlinkmbox->vport = vport; |
6306 | initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
6307 | rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); |
6308 | lpfc_set_loopback_flag(phba); |
6309 | if (rc == MBX_NOT_FINISHED) |
6310 | mempool_free(element: initlinkmbox, pool: phba->mbox_mem_pool); |
6311 | |
6312 | break; |
6313 | |
6314 | case LPFC_DISC_AUTH: |
6315 | /* Node Authentication timeout */ |
6316 | lpfc_printf_vlog(vport, KERN_ERR, |
6317 | LOG_TRACE_EVENT, |
6318 | "0227 Node Authentication timeout\n" ); |
6319 | lpfc_disc_flush_list(vport); |
6320 | |
6321 | /* |
6322 | * set port_state to PORT_READY if SLI2. |
6323 | * cmpl_reg_vpi will set port_state to READY for SLI3. |
6324 | */ |
6325 | if (phba->sli_rev < LPFC_SLI_REV4) { |
6326 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) |
6327 | lpfc_issue_reg_vpi(phba, vport); |
6328 | else { /* NPIV Not enabled */ |
6329 | lpfc_issue_clear_la(phba, vport); |
6330 | vport->port_state = LPFC_VPORT_READY; |
6331 | } |
6332 | } |
6333 | break; |
6334 | |
6335 | case LPFC_VPORT_READY: |
6336 | if (vport->fc_flag & FC_RSCN_MODE) { |
6337 | lpfc_printf_vlog(vport, KERN_ERR, |
6338 | LOG_TRACE_EVENT, |
6339 | "0231 RSCN timeout Data: x%x " |
6340 | "x%x x%x x%x\n" , |
6341 | vport->fc_ns_retry, LPFC_MAX_NS_RETRY, |
6342 | vport->port_state, vport->gidft_inp); |
6343 | |
6344 | /* Cleanup any outstanding ELS commands */ |
6345 | lpfc_els_flush_cmd(vport); |
6346 | |
6347 | lpfc_els_flush_rscn(vport); |
6348 | lpfc_disc_flush_list(vport); |
6349 | } |
6350 | break; |
6351 | |
6352 | default: |
6353 | lpfc_printf_vlog(vport, KERN_ERR, |
6354 | LOG_TRACE_EVENT, |
6355 | "0273 Unexpected discovery timeout, " |
6356 | "vport State x%x\n" , vport->port_state); |
6357 | break; |
6358 | } |
6359 | |
6360 | switch (phba->link_state) { |
6361 | case LPFC_CLEAR_LA: |
6362 | /* CLEAR LA timeout */ |
6363 | lpfc_printf_vlog(vport, KERN_ERR, |
6364 | LOG_TRACE_EVENT, |
6365 | "0228 CLEAR LA timeout\n" ); |
6366 | clrlaerr = 1; |
6367 | break; |
6368 | |
6369 | case LPFC_LINK_UP: |
6370 | lpfc_issue_clear_la(phba, vport); |
6371 | fallthrough; |
6372 | case LPFC_LINK_UNKNOWN: |
6373 | case LPFC_WARM_START: |
6374 | case LPFC_INIT_START: |
6375 | case LPFC_INIT_MBX_CMDS: |
6376 | case LPFC_LINK_DOWN: |
6377 | case LPFC_HBA_ERROR: |
6378 | lpfc_printf_vlog(vport, KERN_ERR, |
6379 | LOG_TRACE_EVENT, |
6380 | "0230 Unexpected timeout, hba link " |
6381 | "state x%x\n" , phba->link_state); |
6382 | clrlaerr = 1; |
6383 | break; |
6384 | |
6385 | case LPFC_HBA_READY: |
6386 | break; |
6387 | } |
6388 | |
6389 | if (clrlaerr) { |
6390 | lpfc_disc_flush_list(vport); |
6391 | if (phba->sli_rev != LPFC_SLI_REV4) { |
6392 | psli->sli3_ring[(LPFC_EXTRA_RING)].flag &= |
6393 | ~LPFC_STOP_IOCB_EVENT; |
6394 | psli->sli3_ring[LPFC_FCP_RING].flag &= |
6395 | ~LPFC_STOP_IOCB_EVENT; |
6396 | } |
6397 | vport->port_state = LPFC_VPORT_READY; |
6398 | } |
6399 | return; |
6400 | } |
6401 | |
6402 | /* |
6403 | * This routine handles processing a NameServer REG_LOGIN mailbox |
6404 | * command upon completion. It is setup in the LPFC_MBOXQ |
6405 | * as the completion routine when the command is |
6406 | * handed off to the SLI layer. |
6407 | */ |
6408 | void |
6409 | lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
6410 | { |
6411 | MAILBOX_t *mb = &pmb->u.mb; |
6412 | struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; |
6413 | struct lpfc_vport *vport = pmb->vport; |
6414 | |
6415 | pmb->ctx_ndlp = NULL; |
6416 | |
6417 | if (phba->sli_rev < LPFC_SLI_REV4) |
6418 | ndlp->nlp_rpi = mb->un.varWords[0]; |
6419 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
6420 | ndlp->nlp_type |= NLP_FABRIC; |
6421 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
6422 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, |
6423 | "0004 rpi:%x DID:%x flg:%x %d x%px\n" , |
6424 | ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, |
6425 | kref_read(&ndlp->kref), |
6426 | ndlp); |
6427 | /* |
6428 | * Start issuing Fabric-Device Management Interface (FDMI) command to |
6429 | * 0xfffffa (FDMI well known port). |
6430 | * DHBA -> DPRT -> RHBA -> RPA (physical port) |
6431 | * DPRT -> RPRT (vports) |
6432 | */ |
6433 | if (vport->port_type == LPFC_PHYSICAL_PORT) { |
6434 | phba->link_flag &= ~LS_CT_VEN_RPA; /* For extra Vendor RPA */ |
6435 | lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); |
6436 | } else { |
6437 | lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); |
6438 | } |
6439 | |
6440 | |
6441 | /* decrement the node reference count held for this callback |
6442 | * function. |
6443 | */ |
6444 | lpfc_nlp_put(ndlp); |
6445 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
6446 | return; |
6447 | } |
6448 | |
6449 | static int |
6450 | lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param) |
6451 | { |
6452 | uint16_t *rpi = param; |
6453 | |
6454 | return ndlp->nlp_rpi == *rpi; |
6455 | } |
6456 | |
6457 | static int |
6458 | lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) |
6459 | { |
6460 | return memcmp(p: &ndlp->nlp_portname, q: param, |
6461 | size: sizeof(ndlp->nlp_portname)) == 0; |
6462 | } |
6463 | |
6464 | static struct lpfc_nodelist * |
6465 | __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) |
6466 | { |
6467 | struct lpfc_nodelist *ndlp; |
6468 | |
6469 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
6470 | if (filter(ndlp, param)) { |
6471 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, |
6472 | "3185 FIND node filter %ps DID " |
6473 | "ndlp x%px did x%x flg x%x st x%x " |
6474 | "xri x%x type x%x rpi x%x\n" , |
6475 | filter, ndlp, ndlp->nlp_DID, |
6476 | ndlp->nlp_flag, ndlp->nlp_state, |
6477 | ndlp->nlp_xri, ndlp->nlp_type, |
6478 | ndlp->nlp_rpi); |
6479 | return ndlp; |
6480 | } |
6481 | } |
6482 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
6483 | "3186 FIND node filter %ps NOT FOUND.\n" , filter); |
6484 | return NULL; |
6485 | } |
6486 | |
6487 | /* |
6488 | * This routine looks up the ndlp lists for the given RPI. If rpi found it |
6489 | * returns the node list element pointer else return NULL. |
6490 | */ |
6491 | struct lpfc_nodelist * |
6492 | __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) |
6493 | { |
6494 | return __lpfc_find_node(vport, filter: lpfc_filter_by_rpi, param: &rpi); |
6495 | } |
6496 | |
6497 | /* |
6498 | * This routine looks up the ndlp lists for the given WWPN. If WWPN found it |
6499 | * returns the node element list pointer else return NULL. |
6500 | */ |
6501 | struct lpfc_nodelist * |
6502 | lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn) |
6503 | { |
6504 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
6505 | struct lpfc_nodelist *ndlp; |
6506 | |
6507 | spin_lock_irq(lock: shost->host_lock); |
6508 | ndlp = __lpfc_find_node(vport, filter: lpfc_filter_by_wwpn, param: wwpn); |
6509 | spin_unlock_irq(lock: shost->host_lock); |
6510 | return ndlp; |
6511 | } |
6512 | |
6513 | /* |
6514 | * This routine looks up the ndlp lists for the given RPI. If the rpi |
6515 | * is found, the routine returns the node element list pointer else |
6516 | * return NULL. |
6517 | */ |
6518 | struct lpfc_nodelist * |
6519 | lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) |
6520 | { |
6521 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
6522 | struct lpfc_nodelist *ndlp; |
6523 | unsigned long flags; |
6524 | |
6525 | spin_lock_irqsave(shost->host_lock, flags); |
6526 | ndlp = __lpfc_findnode_rpi(vport, rpi); |
6527 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
6528 | return ndlp; |
6529 | } |
6530 | |
6531 | /** |
6532 | * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier |
6533 | * @phba: pointer to lpfc hba data structure. |
6534 | * @vpi: the physical host virtual N_Port identifier. |
6535 | * |
6536 | * This routine finds a vport on a HBA (referred by @phba) through a |
6537 | * @vpi. The function walks the HBA's vport list and returns the address |
6538 | * of the vport with the matching @vpi. |
6539 | * |
6540 | * Return code |
6541 | * NULL - No vport with the matching @vpi found |
6542 | * Otherwise - Address to the vport with the matching @vpi. |
6543 | **/ |
6544 | struct lpfc_vport * |
6545 | lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) |
6546 | { |
6547 | struct lpfc_vport *vport; |
6548 | unsigned long flags; |
6549 | int i = 0; |
6550 | |
6551 | /* The physical ports are always vpi 0 - translate is unnecessary. */ |
6552 | if (vpi > 0) { |
6553 | /* |
6554 | * Translate the physical vpi to the logical vpi. The |
6555 | * vport stores the logical vpi. |
6556 | */ |
6557 | for (i = 0; i <= phba->max_vpi; i++) { |
6558 | if (vpi == phba->vpi_ids[i]) |
6559 | break; |
6560 | } |
6561 | |
6562 | if (i > phba->max_vpi) { |
6563 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6564 | "2936 Could not find Vport mapped " |
6565 | "to vpi %d\n" , vpi); |
6566 | return NULL; |
6567 | } |
6568 | } |
6569 | |
6570 | spin_lock_irqsave(&phba->port_list_lock, flags); |
6571 | list_for_each_entry(vport, &phba->port_list, listentry) { |
6572 | if (vport->vpi == i) { |
6573 | spin_unlock_irqrestore(lock: &phba->port_list_lock, flags); |
6574 | return vport; |
6575 | } |
6576 | } |
6577 | spin_unlock_irqrestore(lock: &phba->port_list_lock, flags); |
6578 | return NULL; |
6579 | } |
6580 | |
6581 | struct lpfc_nodelist * |
6582 | lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) |
6583 | { |
6584 | struct lpfc_nodelist *ndlp; |
6585 | int rpi = LPFC_RPI_ALLOC_ERROR; |
6586 | |
6587 | if (vport->phba->sli_rev == LPFC_SLI_REV4) { |
6588 | rpi = lpfc_sli4_alloc_rpi(vport->phba); |
6589 | if (rpi == LPFC_RPI_ALLOC_ERROR) |
6590 | return NULL; |
6591 | } |
6592 | |
6593 | ndlp = mempool_alloc(pool: vport->phba->nlp_mem_pool, GFP_KERNEL); |
6594 | if (!ndlp) { |
6595 | if (vport->phba->sli_rev == LPFC_SLI_REV4) |
6596 | lpfc_sli4_free_rpi(vport->phba, rpi); |
6597 | return NULL; |
6598 | } |
6599 | |
6600 | memset(ndlp, 0, sizeof (struct lpfc_nodelist)); |
6601 | |
6602 | spin_lock_init(&ndlp->lock); |
6603 | |
6604 | lpfc_initialize_node(vport, ndlp, did); |
6605 | INIT_LIST_HEAD(list: &ndlp->nlp_listp); |
6606 | if (vport->phba->sli_rev == LPFC_SLI_REV4) { |
6607 | ndlp->nlp_rpi = rpi; |
6608 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, |
6609 | "0007 Init New ndlp x%px, rpi:x%x DID:%x " |
6610 | "flg:x%x refcnt:%d\n" , |
6611 | ndlp, ndlp->nlp_rpi, ndlp->nlp_DID, |
6612 | ndlp->nlp_flag, kref_read(&ndlp->kref)); |
6613 | |
6614 | ndlp->active_rrqs_xri_bitmap = |
6615 | mempool_alloc(pool: vport->phba->active_rrq_pool, |
6616 | GFP_KERNEL); |
6617 | if (ndlp->active_rrqs_xri_bitmap) |
6618 | memset(ndlp->active_rrqs_xri_bitmap, 0, |
6619 | ndlp->phba->cfg_rrq_xri_bitmap_sz); |
6620 | } |
6621 | |
6622 | |
6623 | |
6624 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, |
6625 | "node init: did:x%x" , |
6626 | ndlp->nlp_DID, 0, 0); |
6627 | |
6628 | return ndlp; |
6629 | } |
6630 | |
6631 | /* This routine releases all resources associated with a specifc NPort's ndlp |
6632 | * and mempool_free's the nodelist. |
6633 | */ |
6634 | static void |
6635 | lpfc_nlp_release(struct kref *kref) |
6636 | { |
6637 | struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, |
6638 | kref); |
6639 | struct lpfc_vport *vport = ndlp->vport; |
6640 | |
6641 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, |
6642 | "node release: did:x%x flg:x%x type:x%x" , |
6643 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); |
6644 | |
6645 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
6646 | "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n" , |
6647 | __func__, ndlp, ndlp->nlp_DID, |
6648 | kref_read(&ndlp->kref), ndlp->nlp_rpi); |
6649 | |
6650 | /* remove ndlp from action. */ |
6651 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
6652 | lpfc_cleanup_node(vport, ndlp); |
6653 | |
6654 | /* Not all ELS transactions have registered the RPI with the port. |
6655 | * In these cases the rpi usage is temporary and the node is |
6656 | * released when the WQE is completed. Catch this case to free the |
6657 | * RPI to the pool. Because this node is in the release path, a lock |
6658 | * is unnecessary. All references are gone and the node has been |
6659 | * dequeued. |
6660 | */ |
6661 | if (ndlp->nlp_flag & NLP_RELEASE_RPI) { |
6662 | if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR && |
6663 | !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) { |
6664 | lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); |
6665 | ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; |
6666 | } |
6667 | } |
6668 | |
6669 | /* The node is not freed back to memory, it is released to a pool so |
6670 | * the node fields need to be cleaned up. |
6671 | */ |
6672 | ndlp->vport = NULL; |
6673 | ndlp->nlp_state = NLP_STE_FREED_NODE; |
6674 | ndlp->nlp_flag = 0; |
6675 | ndlp->fc4_xpt_flags = 0; |
6676 | |
6677 | /* free ndlp memory for final ndlp release */ |
6678 | if (ndlp->phba->sli_rev == LPFC_SLI_REV4) |
6679 | mempool_free(element: ndlp->active_rrqs_xri_bitmap, |
6680 | pool: ndlp->phba->active_rrq_pool); |
6681 | mempool_free(element: ndlp, pool: ndlp->phba->nlp_mem_pool); |
6682 | } |
6683 | |
6684 | /* This routine bumps the reference count for a ndlp structure to ensure |
6685 | * that one discovery thread won't free a ndlp while another discovery thread |
6686 | * is using it. |
6687 | */ |
6688 | struct lpfc_nodelist * |
6689 | lpfc_nlp_get(struct lpfc_nodelist *ndlp) |
6690 | { |
6691 | unsigned long flags; |
6692 | |
6693 | if (ndlp) { |
6694 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, |
6695 | "node get: did:x%x flg:x%x refcnt:x%x" , |
6696 | ndlp->nlp_DID, ndlp->nlp_flag, |
6697 | kref_read(kref: &ndlp->kref)); |
6698 | |
6699 | /* The check of ndlp usage to prevent incrementing the |
6700 | * ndlp reference count that is in the process of being |
6701 | * released. |
6702 | */ |
6703 | spin_lock_irqsave(&ndlp->lock, flags); |
6704 | if (!kref_get_unless_zero(kref: &ndlp->kref)) { |
6705 | spin_unlock_irqrestore(lock: &ndlp->lock, flags); |
6706 | lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, |
6707 | "0276 %s: ndlp:x%px refcnt:%d\n" , |
6708 | __func__, (void *)ndlp, kref_read(&ndlp->kref)); |
6709 | return NULL; |
6710 | } |
6711 | spin_unlock_irqrestore(lock: &ndlp->lock, flags); |
6712 | } else { |
6713 | WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!" , __func__); |
6714 | } |
6715 | |
6716 | return ndlp; |
6717 | } |
6718 | |
6719 | /* This routine decrements the reference count for a ndlp structure. If the |
6720 | * count goes to 0, this indicates the associated nodelist should be freed. |
6721 | */ |
6722 | int |
6723 | lpfc_nlp_put(struct lpfc_nodelist *ndlp) |
6724 | { |
6725 | if (ndlp) { |
6726 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, |
6727 | "node put: did:x%x flg:x%x refcnt:x%x" , |
6728 | ndlp->nlp_DID, ndlp->nlp_flag, |
6729 | kref_read(kref: &ndlp->kref)); |
6730 | } else { |
6731 | WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!" , __func__); |
6732 | } |
6733 | |
6734 | return ndlp ? kref_put(kref: &ndlp->kref, release: lpfc_nlp_release) : 0; |
6735 | } |
6736 | |
6737 | /** |
6738 | * lpfc_fcf_inuse - Check if FCF can be unregistered. |
6739 | * @phba: Pointer to hba context object. |
6740 | * |
6741 | * This function iterate through all FC nodes associated |
6742 | * will all vports to check if there is any node with |
6743 | * fc_rports associated with it. If there is an fc_rport |
6744 | * associated with the node, then the node is either in |
6745 | * discovered state or its devloss_timer is pending. |
6746 | */ |
6747 | static int |
6748 | lpfc_fcf_inuse(struct lpfc_hba *phba) |
6749 | { |
6750 | struct lpfc_vport **vports; |
6751 | int i, ret = 0; |
6752 | struct lpfc_nodelist *ndlp; |
6753 | struct Scsi_Host *shost; |
6754 | |
6755 | vports = lpfc_create_vport_work_array(phba); |
6756 | |
6757 | /* If driver cannot allocate memory, indicate fcf is in use */ |
6758 | if (!vports) |
6759 | return 1; |
6760 | |
6761 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
6762 | shost = lpfc_shost_from_vport(vport: vports[i]); |
6763 | spin_lock_irq(lock: shost->host_lock); |
6764 | /* |
6765 | * IF the CVL_RCVD bit is not set then we have sent the |
6766 | * flogi. |
6767 | * If dev_loss fires while we are waiting we do not want to |
6768 | * unreg the fcf. |
6769 | */ |
6770 | if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) { |
6771 | spin_unlock_irq(lock: shost->host_lock); |
6772 | ret = 1; |
6773 | goto out; |
6774 | } |
6775 | list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { |
6776 | if (ndlp->rport && |
6777 | (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { |
6778 | ret = 1; |
6779 | spin_unlock_irq(lock: shost->host_lock); |
6780 | goto out; |
6781 | } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { |
6782 | ret = 1; |
6783 | lpfc_printf_log(phba, KERN_INFO, |
6784 | LOG_NODE | LOG_DISCOVERY, |
6785 | "2624 RPI %x DID %x flag %x " |
6786 | "still logged in\n" , |
6787 | ndlp->nlp_rpi, ndlp->nlp_DID, |
6788 | ndlp->nlp_flag); |
6789 | } |
6790 | } |
6791 | spin_unlock_irq(lock: shost->host_lock); |
6792 | } |
6793 | out: |
6794 | lpfc_destroy_vport_work_array(phba, vports); |
6795 | return ret; |
6796 | } |
6797 | |
6798 | /** |
6799 | * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. |
6800 | * @phba: Pointer to hba context object. |
6801 | * @mboxq: Pointer to mailbox object. |
6802 | * |
6803 | * This function frees memory associated with the mailbox command. |
6804 | */ |
6805 | void |
6806 | lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
6807 | { |
6808 | struct lpfc_vport *vport = mboxq->vport; |
6809 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
6810 | |
6811 | if (mboxq->u.mb.mbxStatus) { |
6812 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6813 | "2555 UNREG_VFI mbxStatus error x%x " |
6814 | "HBA state x%x\n" , |
6815 | mboxq->u.mb.mbxStatus, vport->port_state); |
6816 | } |
6817 | spin_lock_irq(lock: shost->host_lock); |
6818 | phba->pport->fc_flag &= ~FC_VFI_REGISTERED; |
6819 | spin_unlock_irq(lock: shost->host_lock); |
6820 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
6821 | return; |
6822 | } |
6823 | |
6824 | /** |
6825 | * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. |
6826 | * @phba: Pointer to hba context object. |
6827 | * @mboxq: Pointer to mailbox object. |
6828 | * |
6829 | * This function frees memory associated with the mailbox command. |
6830 | */ |
6831 | static void |
6832 | lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
6833 | { |
6834 | struct lpfc_vport *vport = mboxq->vport; |
6835 | |
6836 | if (mboxq->u.mb.mbxStatus) { |
6837 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6838 | "2550 UNREG_FCFI mbxStatus error x%x " |
6839 | "HBA state x%x\n" , |
6840 | mboxq->u.mb.mbxStatus, vport->port_state); |
6841 | } |
6842 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
6843 | return; |
6844 | } |
6845 | |
6846 | /** |
6847 | * lpfc_unregister_fcf_prep - Unregister fcf record preparation |
6848 | * @phba: Pointer to hba context object. |
6849 | * |
6850 | * This function prepare the HBA for unregistering the currently registered |
6851 | * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and |
6852 | * VFIs. |
6853 | */ |
6854 | int |
6855 | lpfc_unregister_fcf_prep(struct lpfc_hba *phba) |
6856 | { |
6857 | struct lpfc_vport **vports; |
6858 | struct lpfc_nodelist *ndlp; |
6859 | struct Scsi_Host *shost; |
6860 | int i = 0, rc; |
6861 | |
6862 | /* Unregister RPIs */ |
6863 | if (lpfc_fcf_inuse(phba)) |
6864 | lpfc_unreg_hba_rpis(phba); |
6865 | |
6866 | /* At this point, all discovery is aborted */ |
6867 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; |
6868 | |
6869 | /* Unregister VPIs */ |
6870 | vports = lpfc_create_vport_work_array(phba); |
6871 | if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) |
6872 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
6873 | /* Stop FLOGI/FDISC retries */ |
6874 | ndlp = lpfc_findnode_did(vport: vports[i], Fabric_DID); |
6875 | if (ndlp) |
6876 | lpfc_cancel_retry_delay_tmo(vports[i], ndlp); |
6877 | lpfc_cleanup_pending_mbox(vports[i]); |
6878 | if (phba->sli_rev == LPFC_SLI_REV4) |
6879 | lpfc_sli4_unreg_all_rpis(vports[i]); |
6880 | lpfc_mbx_unreg_vpi(vport: vports[i]); |
6881 | shost = lpfc_shost_from_vport(vport: vports[i]); |
6882 | spin_lock_irq(lock: shost->host_lock); |
6883 | vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; |
6884 | vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; |
6885 | spin_unlock_irq(lock: shost->host_lock); |
6886 | } |
6887 | lpfc_destroy_vport_work_array(phba, vports); |
6888 | if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { |
6889 | ndlp = lpfc_findnode_did(vport: phba->pport, Fabric_DID); |
6890 | if (ndlp) |
6891 | lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); |
6892 | lpfc_cleanup_pending_mbox(phba->pport); |
6893 | if (phba->sli_rev == LPFC_SLI_REV4) |
6894 | lpfc_sli4_unreg_all_rpis(phba->pport); |
6895 | lpfc_mbx_unreg_vpi(vport: phba->pport); |
6896 | shost = lpfc_shost_from_vport(vport: phba->pport); |
6897 | spin_lock_irq(lock: shost->host_lock); |
6898 | phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; |
6899 | phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; |
6900 | spin_unlock_irq(lock: shost->host_lock); |
6901 | } |
6902 | |
6903 | /* Cleanup any outstanding ELS commands */ |
6904 | lpfc_els_flush_all_cmd(phba); |
6905 | |
6906 | /* Unregister the physical port VFI */ |
6907 | rc = lpfc_issue_unreg_vfi(phba->pport); |
6908 | return rc; |
6909 | } |
6910 | |
6911 | /** |
6912 | * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record |
6913 | * @phba: Pointer to hba context object. |
6914 | * |
6915 | * This function issues synchronous unregister FCF mailbox command to HBA to |
6916 | * unregister the currently registered FCF record. The driver does not reset |
6917 | * the driver FCF usage state flags. |
6918 | * |
6919 | * Return 0 if successfully issued, none-zero otherwise. |
6920 | */ |
6921 | int |
6922 | lpfc_sli4_unregister_fcf(struct lpfc_hba *phba) |
6923 | { |
6924 | LPFC_MBOXQ_t *mbox; |
6925 | int rc; |
6926 | |
6927 | mbox = mempool_alloc(pool: phba->mbox_mem_pool, GFP_KERNEL); |
6928 | if (!mbox) { |
6929 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6930 | "2551 UNREG_FCFI mbox allocation failed" |
6931 | "HBA state x%x\n" , phba->pport->port_state); |
6932 | return -ENOMEM; |
6933 | } |
6934 | lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); |
6935 | mbox->vport = phba->pport; |
6936 | mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; |
6937 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
6938 | |
6939 | if (rc == MBX_NOT_FINISHED) { |
6940 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6941 | "2552 Unregister FCFI command failed rc x%x " |
6942 | "HBA state x%x\n" , |
6943 | rc, phba->pport->port_state); |
6944 | return -EINVAL; |
6945 | } |
6946 | return 0; |
6947 | } |
6948 | |
6949 | /** |
6950 | * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan |
6951 | * @phba: Pointer to hba context object. |
6952 | * |
6953 | * This function unregisters the currently reigstered FCF. This function |
6954 | * also tries to find another FCF for discovery by rescan the HBA FCF table. |
6955 | */ |
6956 | void |
6957 | lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) |
6958 | { |
6959 | int rc; |
6960 | |
6961 | /* Preparation for unregistering fcf */ |
6962 | rc = lpfc_unregister_fcf_prep(phba); |
6963 | if (rc) { |
6964 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
6965 | "2748 Failed to prepare for unregistering " |
6966 | "HBA's FCF record: rc=%d\n" , rc); |
6967 | return; |
6968 | } |
6969 | |
6970 | /* Now, unregister FCF record and reset HBA FCF state */ |
6971 | rc = lpfc_sli4_unregister_fcf(phba); |
6972 | if (rc) |
6973 | return; |
6974 | /* Reset HBA FCF states after successful unregister FCF */ |
6975 | spin_lock_irq(lock: &phba->hbalock); |
6976 | phba->fcf.fcf_flag = 0; |
6977 | spin_unlock_irq(lock: &phba->hbalock); |
6978 | phba->fcf.current_rec.flag = 0; |
6979 | |
6980 | /* |
6981 | * If driver is not unloading, check if there is any other |
6982 | * FCF record that can be used for discovery. |
6983 | */ |
6984 | if ((phba->pport->load_flag & FC_UNLOADING) || |
6985 | (phba->link_state < LPFC_LINK_UP)) |
6986 | return; |
6987 | |
6988 | /* This is considered as the initial FCF discovery scan */ |
6989 | spin_lock_irq(lock: &phba->hbalock); |
6990 | phba->fcf.fcf_flag |= FCF_INIT_DISC; |
6991 | spin_unlock_irq(lock: &phba->hbalock); |
6992 | |
6993 | /* Reset FCF roundrobin bmask for new discovery */ |
6994 | lpfc_sli4_clear_fcf_rr_bmask(phba); |
6995 | |
6996 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); |
6997 | |
6998 | if (rc) { |
6999 | spin_lock_irq(lock: &phba->hbalock); |
7000 | phba->fcf.fcf_flag &= ~FCF_INIT_DISC; |
7001 | spin_unlock_irq(lock: &phba->hbalock); |
7002 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
7003 | "2553 lpfc_unregister_unused_fcf failed " |
7004 | "to read FCF record HBA state x%x\n" , |
7005 | phba->pport->port_state); |
7006 | } |
7007 | } |
7008 | |
7009 | /** |
7010 | * lpfc_unregister_fcf - Unregister the currently registered fcf record |
7011 | * @phba: Pointer to hba context object. |
7012 | * |
7013 | * This function just unregisters the currently reigstered FCF. It does not |
7014 | * try to find another FCF for discovery. |
7015 | */ |
7016 | void |
7017 | lpfc_unregister_fcf(struct lpfc_hba *phba) |
7018 | { |
7019 | int rc; |
7020 | |
7021 | /* Preparation for unregistering fcf */ |
7022 | rc = lpfc_unregister_fcf_prep(phba); |
7023 | if (rc) { |
7024 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
7025 | "2749 Failed to prepare for unregistering " |
7026 | "HBA's FCF record: rc=%d\n" , rc); |
7027 | return; |
7028 | } |
7029 | |
7030 | /* Now, unregister FCF record and reset HBA FCF state */ |
7031 | rc = lpfc_sli4_unregister_fcf(phba); |
7032 | if (rc) |
7033 | return; |
7034 | /* Set proper HBA FCF states after successful unregister FCF */ |
7035 | spin_lock_irq(lock: &phba->hbalock); |
7036 | phba->fcf.fcf_flag &= ~FCF_REGISTERED; |
7037 | spin_unlock_irq(lock: &phba->hbalock); |
7038 | } |
7039 | |
7040 | /** |
7041 | * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. |
7042 | * @phba: Pointer to hba context object. |
7043 | * |
7044 | * This function check if there are any connected remote port for the FCF and |
7045 | * if all the devices are disconnected, this function unregister FCFI. |
7046 | * This function also tries to use another FCF for discovery. |
7047 | */ |
7048 | void |
7049 | lpfc_unregister_unused_fcf(struct lpfc_hba *phba) |
7050 | { |
7051 | /* |
7052 | * If HBA is not running in FIP mode, if HBA does not support |
7053 | * FCoE, if FCF discovery is ongoing, or if FCF has not been |
7054 | * registered, do nothing. |
7055 | */ |
7056 | spin_lock_irq(lock: &phba->hbalock); |
7057 | if (!(phba->hba_flag & HBA_FCOE_MODE) || |
7058 | !(phba->fcf.fcf_flag & FCF_REGISTERED) || |
7059 | !(phba->hba_flag & HBA_FIP_SUPPORT) || |
7060 | (phba->fcf.fcf_flag & FCF_DISCOVERY) || |
7061 | (phba->pport->port_state == LPFC_FLOGI)) { |
7062 | spin_unlock_irq(lock: &phba->hbalock); |
7063 | return; |
7064 | } |
7065 | spin_unlock_irq(lock: &phba->hbalock); |
7066 | |
7067 | if (lpfc_fcf_inuse(phba)) |
7068 | return; |
7069 | |
7070 | lpfc_unregister_fcf_rescan(phba); |
7071 | } |
7072 | |
7073 | /** |
7074 | * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. |
7075 | * @phba: Pointer to hba context object. |
7076 | * @buff: Buffer containing the FCF connection table as in the config |
7077 | * region. |
7078 | * This function create driver data structure for the FCF connection |
7079 | * record table read from config region 23. |
7080 | */ |
7081 | static void |
7082 | lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, |
7083 | uint8_t *buff) |
7084 | { |
7085 | struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; |
7086 | struct lpfc_fcf_conn_hdr *conn_hdr; |
7087 | struct lpfc_fcf_conn_rec *conn_rec; |
7088 | uint32_t record_count; |
7089 | int i; |
7090 | |
7091 | /* Free the current connect table */ |
7092 | list_for_each_entry_safe(conn_entry, next_conn_entry, |
7093 | &phba->fcf_conn_rec_list, list) { |
7094 | list_del_init(entry: &conn_entry->list); |
7095 | kfree(objp: conn_entry); |
7096 | } |
7097 | |
7098 | conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; |
7099 | record_count = conn_hdr->length * sizeof(uint32_t)/ |
7100 | sizeof(struct lpfc_fcf_conn_rec); |
7101 | |
7102 | conn_rec = (struct lpfc_fcf_conn_rec *) |
7103 | (buff + sizeof(struct lpfc_fcf_conn_hdr)); |
7104 | |
7105 | for (i = 0; i < record_count; i++) { |
7106 | if (!(conn_rec[i].flags & FCFCNCT_VALID)) |
7107 | continue; |
7108 | conn_entry = kzalloc(size: sizeof(struct lpfc_fcf_conn_entry), |
7109 | GFP_KERNEL); |
7110 | if (!conn_entry) { |
7111 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
7112 | "2566 Failed to allocate connection" |
7113 | " table entry\n" ); |
7114 | return; |
7115 | } |
7116 | |
7117 | memcpy(&conn_entry->conn_rec, &conn_rec[i], |
7118 | sizeof(struct lpfc_fcf_conn_rec)); |
7119 | list_add_tail(new: &conn_entry->list, |
7120 | head: &phba->fcf_conn_rec_list); |
7121 | } |
7122 | |
7123 | if (!list_empty(head: &phba->fcf_conn_rec_list)) { |
7124 | i = 0; |
7125 | list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, |
7126 | list) { |
7127 | conn_rec = &conn_entry->conn_rec; |
7128 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
7129 | "3345 FCF connection list rec[%02d]: " |
7130 | "flags:x%04x, vtag:x%04x, " |
7131 | "fabric_name:x%02x:%02x:%02x:%02x:" |
7132 | "%02x:%02x:%02x:%02x, " |
7133 | "switch_name:x%02x:%02x:%02x:%02x:" |
7134 | "%02x:%02x:%02x:%02x\n" , i++, |
7135 | conn_rec->flags, conn_rec->vlan_tag, |
7136 | conn_rec->fabric_name[0], |
7137 | conn_rec->fabric_name[1], |
7138 | conn_rec->fabric_name[2], |
7139 | conn_rec->fabric_name[3], |
7140 | conn_rec->fabric_name[4], |
7141 | conn_rec->fabric_name[5], |
7142 | conn_rec->fabric_name[6], |
7143 | conn_rec->fabric_name[7], |
7144 | conn_rec->switch_name[0], |
7145 | conn_rec->switch_name[1], |
7146 | conn_rec->switch_name[2], |
7147 | conn_rec->switch_name[3], |
7148 | conn_rec->switch_name[4], |
7149 | conn_rec->switch_name[5], |
7150 | conn_rec->switch_name[6], |
7151 | conn_rec->switch_name[7]); |
7152 | } |
7153 | } |
7154 | } |
7155 | |
7156 | /** |
7157 | * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. |
7158 | * @phba: Pointer to hba context object. |
7159 | * @buff: Buffer containing the FCoE parameter data structure. |
7160 | * |
7161 | * This function update driver data structure with config |
7162 | * parameters read from config region 23. |
7163 | */ |
7164 | static void |
7165 | lpfc_read_fcoe_param(struct lpfc_hba *phba, |
7166 | uint8_t *buff) |
7167 | { |
7168 | struct lpfc_fip_param_hdr *fcoe_param_hdr; |
7169 | struct lpfc_fcoe_params *fcoe_param; |
7170 | |
7171 | fcoe_param_hdr = (struct lpfc_fip_param_hdr *) |
7172 | buff; |
7173 | fcoe_param = (struct lpfc_fcoe_params *) |
7174 | (buff + sizeof(struct lpfc_fip_param_hdr)); |
7175 | |
7176 | if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || |
7177 | (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) |
7178 | return; |
7179 | |
7180 | if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { |
7181 | phba->valid_vlan = 1; |
7182 | phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & |
7183 | 0xFFF; |
7184 | } |
7185 | |
7186 | phba->fc_map[0] = fcoe_param->fc_map[0]; |
7187 | phba->fc_map[1] = fcoe_param->fc_map[1]; |
7188 | phba->fc_map[2] = fcoe_param->fc_map[2]; |
7189 | return; |
7190 | } |
7191 | |
7192 | /** |
7193 | * lpfc_get_rec_conf23 - Get a record type in config region data. |
7194 | * @buff: Buffer containing config region 23 data. |
7195 | * @size: Size of the data buffer. |
7196 | * @rec_type: Record type to be searched. |
7197 | * |
7198 | * This function searches config region data to find the beginning |
7199 | * of the record specified by record_type. If record found, this |
7200 | * function return pointer to the record else return NULL. |
7201 | */ |
7202 | static uint8_t * |
7203 | lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) |
7204 | { |
7205 | uint32_t offset = 0, rec_length; |
7206 | |
7207 | if ((buff[0] == LPFC_REGION23_LAST_REC) || |
7208 | (size < sizeof(uint32_t))) |
7209 | return NULL; |
7210 | |
7211 | rec_length = buff[offset + 1]; |
7212 | |
7213 | /* |
7214 | * One TLV record has one word header and number of data words |
7215 | * specified in the rec_length field of the record header. |
7216 | */ |
7217 | while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) |
7218 | <= size) { |
7219 | if (buff[offset] == rec_type) |
7220 | return &buff[offset]; |
7221 | |
7222 | if (buff[offset] == LPFC_REGION23_LAST_REC) |
7223 | return NULL; |
7224 | |
7225 | offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); |
7226 | rec_length = buff[offset + 1]; |
7227 | } |
7228 | return NULL; |
7229 | } |
7230 | |
7231 | /** |
7232 | * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. |
7233 | * @phba: Pointer to lpfc_hba data structure. |
7234 | * @buff: Buffer containing config region 23 data. |
7235 | * @size: Size of the data buffer. |
7236 | * |
7237 | * This function parses the FCoE config parameters in config region 23 and |
7238 | * populate driver data structure with the parameters. |
7239 | */ |
7240 | void |
7241 | lpfc_parse_fcoe_conf(struct lpfc_hba *phba, |
7242 | uint8_t *buff, |
7243 | uint32_t size) |
7244 | { |
7245 | uint32_t offset = 0; |
7246 | uint8_t *rec_ptr; |
7247 | |
7248 | /* |
7249 | * If data size is less than 2 words signature and version cannot be |
7250 | * verified. |
7251 | */ |
7252 | if (size < 2*sizeof(uint32_t)) |
7253 | return; |
7254 | |
7255 | /* Check the region signature first */ |
7256 | if (memcmp(p: buff, LPFC_REGION23_SIGNATURE, size: 4)) { |
7257 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
7258 | "2567 Config region 23 has bad signature\n" ); |
7259 | return; |
7260 | } |
7261 | |
7262 | offset += 4; |
7263 | |
7264 | /* Check the data structure version */ |
7265 | if (buff[offset] != LPFC_REGION23_VERSION) { |
7266 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
7267 | "2568 Config region 23 has bad version\n" ); |
7268 | return; |
7269 | } |
7270 | offset += 4; |
7271 | |
7272 | /* Read FCoE param record */ |
7273 | rec_ptr = lpfc_get_rec_conf23(buff: &buff[offset], |
7274 | size: size - offset, FCOE_PARAM_TYPE); |
7275 | if (rec_ptr) |
7276 | lpfc_read_fcoe_param(phba, buff: rec_ptr); |
7277 | |
7278 | /* Read FCF connection table */ |
7279 | rec_ptr = lpfc_get_rec_conf23(buff: &buff[offset], |
7280 | size: size - offset, FCOE_CONN_TBL_TYPE); |
7281 | if (rec_ptr) |
7282 | lpfc_read_fcf_conn_tbl(phba, buff: rec_ptr); |
7283 | |
7284 | } |
7285 | |
7286 | /* |
7287 | * lpfc_error_lost_link - IO failure from link event or FW reset check. |
7288 | * |
7289 | * @vport: Pointer to lpfc_vport data structure. |
7290 | * @ulp_status: IO completion status. |
7291 | * @ulp_word4: Reason code for the ulp_status. |
7292 | * |
7293 | * This function evaluates the ulp_status and ulp_word4 values |
7294 | * for specific error values that indicate an internal link fault |
7295 | * or fw reset event for the completing IO. Callers require this |
7296 | * common data to decide next steps on the IO. |
7297 | * |
7298 | * Return: |
7299 | * false - No link or reset error occurred. |
7300 | * true - A link or reset error occurred. |
7301 | */ |
7302 | bool |
7303 | lpfc_error_lost_link(struct lpfc_vport *vport, u32 ulp_status, u32 ulp_word4) |
7304 | { |
7305 | /* Mask off the extra port data to get just the reason code. */ |
7306 | u32 rsn_code = IOERR_PARAM_MASK & ulp_word4; |
7307 | |
7308 | if (ulp_status == IOSTAT_LOCAL_REJECT && |
7309 | (rsn_code == IOERR_SLI_ABORTED || |
7310 | rsn_code == IOERR_LINK_DOWN || |
7311 | rsn_code == IOERR_SLI_DOWN)) { |
7312 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI | LOG_ELS, |
7313 | "0408 Report link error true: <x%x:x%x>\n" , |
7314 | ulp_status, ulp_word4); |
7315 | return true; |
7316 | } |
7317 | |
7318 | return false; |
7319 | } |
7320 | |