1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * QLogic FCoE Offload Driver
4 * Copyright (c) 2016-2018 Cavium Inc.
5 */
6#include <linux/init.h>
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/pci.h>
10#include <linux/device.h>
11#include <linux/highmem.h>
12#include <linux/crc32.h>
13#include <linux/interrupt.h>
14#include <linux/list.h>
15#include <linux/kthread.h>
16#include <linux/phylink.h>
17#include <scsi/libfc.h>
18#include <scsi/scsi_host.h>
19#include <scsi/fc_frame.h>
20#include <linux/if_ether.h>
21#include <linux/if_vlan.h>
22#include <linux/cpu.h>
23#include "qedf.h"
24#include "qedf_dbg.h"
25#include <uapi/linux/pci_regs.h>
26
27const struct qed_fcoe_ops *qed_ops;
28
29static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
30static void qedf_remove(struct pci_dev *pdev);
31static void qedf_shutdown(struct pci_dev *pdev);
32static void qedf_schedule_recovery_handler(void *dev);
33static void qedf_recovery_handler(struct work_struct *work);
34static int qedf_suspend(struct pci_dev *pdev, pm_message_t state);
35
36/*
37 * Driver module parameters.
38 */
39static unsigned int qedf_dev_loss_tmo = 60;
40module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO);
41MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached "
42 "remote ports (default 60)");
43
44uint qedf_debug = QEDF_LOG_INFO;
45module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
46MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
47 " mask");
48
49static uint qedf_fipvlan_retries = 60;
50module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
51MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt "
52 "before giving up (default 60)");
53
54static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
55module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
56MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails "
57 "(default 1002).");
58
59static int qedf_default_prio = -1;
60module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
61MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE"
62 " traffic (value between 0 and 7, default 3).");
63
64uint qedf_dump_frames;
65module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
66MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames "
67 "(default off)");
68
69static uint qedf_queue_depth;
70module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO);
71MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered "
72 "by the qedf driver. Default is 0 (use OS default).");
73
74uint qedf_io_tracing;
75module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR);
76MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions "
77 "into trace buffer. (default off).");
78
79static uint qedf_max_lun = MAX_FIBRE_LUNS;
80module_param_named(max_lun, qedf_max_lun, int, S_IRUGO);
81MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver "
82 "supports. (default 0xffffffff)");
83
84uint qedf_link_down_tmo;
85module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO);
86MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the "
87 "link is down by N seconds.");
88
89bool qedf_retry_delay;
90module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR);
91MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry "
92 "delay handling (default off).");
93
94static bool qedf_dcbx_no_wait;
95module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR);
96MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start "
97 "sending FIP VLAN requests on link up (Default: off).");
98
99static uint qedf_dp_module;
100module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
101MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
102 "qed module during probe.");
103
104static uint qedf_dp_level = QED_LEVEL_NOTICE;
105module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
106MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
107 "during probe (0-3: 0 more verbose).");
108
109static bool qedf_enable_recovery = true;
110module_param_named(enable_recovery, qedf_enable_recovery,
111 bool, S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
113 "interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
114
115struct workqueue_struct *qedf_io_wq;
116
117static struct fcoe_percpu_s qedf_global;
118static DEFINE_SPINLOCK(qedf_global_lock);
119
120static struct kmem_cache *qedf_io_work_cache;
121
122void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
123{
124 int vlan_id_tmp = 0;
125
126 vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT);
127 qedf->vlan_id = vlan_id_tmp;
128 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
129 "Setting vlan_id=0x%04x prio=%d.\n",
130 vlan_id_tmp, qedf->prio);
131}
132
133/* Returns true if we have a valid vlan, false otherwise */
134static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
135{
136
137 while (qedf->fipvlan_retries--) {
138 /* This is to catch if link goes down during fipvlan retries */
139 if (atomic_read(v: &qedf->link_state) == QEDF_LINK_DOWN) {
140 QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
141 return false;
142 }
143
144 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
145 QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
146 return false;
147 }
148
149 if (qedf->vlan_id > 0) {
150 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
151 "vlan = 0x%x already set, calling ctlr_link_up.\n",
152 qedf->vlan_id);
153 if (atomic_read(v: &qedf->link_state) == QEDF_LINK_UP)
154 fcoe_ctlr_link_up(&qedf->ctlr);
155 return true;
156 }
157
158 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
159 "Retry %d.\n", qedf->fipvlan_retries);
160 init_completion(x: &qedf->fipvlan_compl);
161 qedf_fcoe_send_vlan_req(qedf);
162 wait_for_completion_timeout(x: &qedf->fipvlan_compl, timeout: 1 * HZ);
163 }
164
165 return false;
166}
167
168static void qedf_handle_link_update(struct work_struct *work)
169{
170 struct qedf_ctx *qedf =
171 container_of(work, struct qedf_ctx, link_update.work);
172 int rc;
173
174 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
175 atomic_read(&qedf->link_state));
176
177 if (atomic_read(v: &qedf->link_state) == QEDF_LINK_UP) {
178 rc = qedf_initiate_fipvlan_req(qedf);
179 if (rc)
180 return;
181
182 if (atomic_read(v: &qedf->link_state) != QEDF_LINK_UP) {
183 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
184 "Link is down, resetting vlan_id.\n");
185 qedf->vlan_id = 0;
186 return;
187 }
188
189 /*
190 * If we get here then we never received a repsonse to our
191 * fip vlan request so set the vlan_id to the default and
192 * tell FCoE that the link is up
193 */
194 QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
195 "response, falling back to default VLAN %d.\n",
196 qedf_fallback_vlan);
197 qedf_set_vlan_id(qedf, vlan_id: qedf_fallback_vlan);
198
199 /*
200 * Zero out data_src_addr so we'll update it with the new
201 * lport port_id
202 */
203 eth_zero_addr(addr: qedf->data_src_addr);
204 fcoe_ctlr_link_up(&qedf->ctlr);
205 } else if (atomic_read(v: &qedf->link_state) == QEDF_LINK_DOWN) {
206 /*
207 * If we hit here and link_down_tmo_valid is still 1 it means
208 * that link_down_tmo timed out so set it to 0 to make sure any
209 * other readers have accurate state.
210 */
211 atomic_set(v: &qedf->link_down_tmo_valid, i: 0);
212 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
213 "Calling fcoe_ctlr_link_down().\n");
214 fcoe_ctlr_link_down(&qedf->ctlr);
215 if (qedf_wait_for_upload(qedf) == false)
216 QEDF_ERR(&qedf->dbg_ctx,
217 "Could not upload all sessions.\n");
218 /* Reset the number of FIP VLAN retries */
219 qedf->fipvlan_retries = qedf_fipvlan_retries;
220 }
221}
222
223#define QEDF_FCOE_MAC_METHOD_GRANGED_MAC 1
224#define QEDF_FCOE_MAC_METHOD_FCF_MAP 2
225#define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC 3
226static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp)
227{
228 u8 *granted_mac;
229 struct fc_frame_header *fh = fc_frame_header_get(fp);
230 u8 fc_map[3];
231 int method = 0;
232
233 /* Get granted MAC address from FIP FLOGI payload */
234 granted_mac = fr_cb(fp)->granted_mac;
235
236 /*
237 * We set the source MAC for FCoE traffic based on the Granted MAC
238 * address from the switch.
239 *
240 * If granted_mac is non-zero, we used that.
241 * If the granted_mac is zeroed out, created the FCoE MAC based on
242 * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
243 * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the
244 * d_id of the FLOGI frame.
245 */
246 if (!is_zero_ether_addr(addr: granted_mac)) {
247 ether_addr_copy(dst: qedf->data_src_addr, src: granted_mac);
248 method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC;
249 } else if (qedf->ctlr.sel_fcf->fc_map != 0) {
250 hton24(p: fc_map, v: qedf->ctlr.sel_fcf->fc_map);
251 qedf->data_src_addr[0] = fc_map[0];
252 qedf->data_src_addr[1] = fc_map[1];
253 qedf->data_src_addr[2] = fc_map[2];
254 qedf->data_src_addr[3] = fh->fh_d_id[0];
255 qedf->data_src_addr[4] = fh->fh_d_id[1];
256 qedf->data_src_addr[5] = fh->fh_d_id[2];
257 method = QEDF_FCOE_MAC_METHOD_FCF_MAP;
258 } else {
259 fc_fcoe_set_mac(mac: qedf->data_src_addr, did: fh->fh_d_id);
260 method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC;
261 }
262
263 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
264 "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method);
265}
266
267static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
268 void *arg)
269{
270 struct fc_exch *exch = fc_seq_exch(seq);
271 struct fc_lport *lport = exch->lp;
272 struct qedf_ctx *qedf = lport_priv(lport);
273
274 if (!qedf) {
275 QEDF_ERR(NULL, "qedf is NULL.\n");
276 return;
277 }
278
279 /*
280 * If ERR_PTR is set then don't try to stat anything as it will cause
281 * a crash when we access fp.
282 */
283 if (IS_ERR(ptr: fp)) {
284 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
285 "fp has IS_ERR() set.\n");
286 goto skip_stat;
287 }
288
289 /* Log stats for FLOGI reject */
290 if (fc_frame_payload_op(fp) == ELS_LS_RJT)
291 qedf->flogi_failed++;
292 else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
293 /* Set the source MAC we will use for FCoE traffic */
294 qedf_set_data_src_addr(qedf, fp);
295 qedf->flogi_pending = 0;
296 }
297
298 /* Complete flogi_compl so we can proceed to sending ADISCs */
299 complete(&qedf->flogi_compl);
300
301skip_stat:
302 /* Report response to libfc */
303 fc_lport_flogi_resp(seq, fp, lport);
304}
305
306static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
307 struct fc_frame *fp, unsigned int op,
308 void (*resp)(struct fc_seq *,
309 struct fc_frame *,
310 void *),
311 void *arg, u32 timeout)
312{
313 struct qedf_ctx *qedf = lport_priv(lport);
314
315 /*
316 * Intercept FLOGI for statistic purposes. Note we use the resp
317 * callback to tell if this is really a flogi.
318 */
319 if (resp == fc_lport_flogi_resp) {
320 qedf->flogi_cnt++;
321 if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
322 schedule_delayed_work(dwork: &qedf->stag_work, delay: 2);
323 return NULL;
324 }
325 qedf->flogi_pending++;
326 return fc_elsct_send(lport, did, fp, op, resp: qedf_flogi_resp,
327 arg, timer_msec: timeout);
328 }
329
330 return fc_elsct_send(lport, did, fp, op, resp, arg, timer_msec: timeout);
331}
332
333int qedf_send_flogi(struct qedf_ctx *qedf)
334{
335 struct fc_lport *lport;
336 struct fc_frame *fp;
337
338 lport = qedf->lport;
339
340 if (!lport->tt.elsct_send) {
341 QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
342 return -EINVAL;
343 }
344
345 fp = fc_frame_alloc(dev: lport, len: sizeof(struct fc_els_flogi));
346 if (!fp) {
347 QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n");
348 return -ENOMEM;
349 }
350
351 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
352 "Sending FLOGI to reestablish session with switch.\n");
353 lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
354 ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov);
355
356 init_completion(x: &qedf->flogi_compl);
357
358 return 0;
359}
360
361/*
362 * This function is called if link_down_tmo is in use. If we get a link up and
363 * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
364 * sessions with targets. Otherwise, just call fcoe_ctlr_link_up().
365 */
366static void qedf_link_recovery(struct work_struct *work)
367{
368 struct qedf_ctx *qedf =
369 container_of(work, struct qedf_ctx, link_recovery.work);
370 struct fc_lport *lport = qedf->lport;
371 struct fc_rport_priv *rdata;
372 bool rc;
373 int retries = 30;
374 int rval, i;
375 struct list_head rdata_login_list;
376
377 INIT_LIST_HEAD(list: &rdata_login_list);
378
379 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
380 "Link down tmo did not expire.\n");
381
382 /*
383 * Essentially reset the fcoe_ctlr here without affecting the state
384 * of the libfc structs.
385 */
386 qedf->ctlr.state = FIP_ST_LINK_WAIT;
387 fcoe_ctlr_link_down(&qedf->ctlr);
388
389 /*
390 * Bring the link up before we send the fipvlan request so libfcoe
391 * can select a new fcf in parallel
392 */
393 fcoe_ctlr_link_up(&qedf->ctlr);
394
395 /* Since the link when down and up to verify which vlan we're on */
396 qedf->fipvlan_retries = qedf_fipvlan_retries;
397 rc = qedf_initiate_fipvlan_req(qedf);
398 /* If getting the VLAN fails, set the VLAN to the fallback one */
399 if (!rc)
400 qedf_set_vlan_id(qedf, vlan_id: qedf_fallback_vlan);
401
402 /*
403 * We need to wait for an FCF to be selected due to the
404 * fcoe_ctlr_link_up other the FLOGI will be rejected.
405 */
406 while (retries > 0) {
407 if (qedf->ctlr.sel_fcf) {
408 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
409 "FCF reselected, proceeding with FLOGI.\n");
410 break;
411 }
412 msleep(msecs: 500);
413 retries--;
414 }
415
416 if (retries < 1) {
417 QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for "
418 "FCF selection.\n");
419 return;
420 }
421
422 rval = qedf_send_flogi(qedf);
423 if (rval)
424 return;
425
426 /* Wait for FLOGI completion before proceeding with sending ADISCs */
427 i = wait_for_completion_timeout(x: &qedf->flogi_compl,
428 timeout: qedf->lport->r_a_tov);
429 if (i == 0) {
430 QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n");
431 return;
432 }
433
434 /*
435 * Call lport->tt.rport_login which will cause libfc to send an
436 * ADISC since the rport is in state ready.
437 */
438 mutex_lock(&lport->disc.disc_mutex);
439 list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
440 if (kref_get_unless_zero(kref: &rdata->kref)) {
441 fc_rport_login(rdata);
442 kref_put(kref: &rdata->kref, release: fc_rport_destroy);
443 }
444 }
445 mutex_unlock(lock: &lport->disc.disc_mutex);
446}
447
448static void qedf_update_link_speed(struct qedf_ctx *qedf,
449 struct qed_link_output *link)
450{
451 __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
452 struct fc_lport *lport = qedf->lport;
453
454 lport->link_speed = FC_PORTSPEED_UNKNOWN;
455 lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
456
457 /* Set fc_host link speed */
458 switch (link->speed) {
459 case 10000:
460 lport->link_speed = FC_PORTSPEED_10GBIT;
461 break;
462 case 25000:
463 lport->link_speed = FC_PORTSPEED_25GBIT;
464 break;
465 case 40000:
466 lport->link_speed = FC_PORTSPEED_40GBIT;
467 break;
468 case 50000:
469 lport->link_speed = FC_PORTSPEED_50GBIT;
470 break;
471 case 100000:
472 lport->link_speed = FC_PORTSPEED_100GBIT;
473 break;
474 case 20000:
475 lport->link_speed = FC_PORTSPEED_20GBIT;
476 break;
477 default:
478 lport->link_speed = FC_PORTSPEED_UNKNOWN;
479 break;
480 }
481
482 /*
483 * Set supported link speed by querying the supported
484 * capabilities of the link.
485 */
486
487 phylink_zero(sup_caps);
488 phylink_set(sup_caps, 10000baseT_Full);
489 phylink_set(sup_caps, 10000baseKX4_Full);
490 phylink_set(sup_caps, 10000baseR_FEC);
491 phylink_set(sup_caps, 10000baseCR_Full);
492 phylink_set(sup_caps, 10000baseSR_Full);
493 phylink_set(sup_caps, 10000baseLR_Full);
494 phylink_set(sup_caps, 10000baseLRM_Full);
495 phylink_set(sup_caps, 10000baseKR_Full);
496
497 if (linkmode_intersects(src1: link->supported_caps, src2: sup_caps))
498 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
499
500 phylink_zero(sup_caps);
501 phylink_set(sup_caps, 25000baseKR_Full);
502 phylink_set(sup_caps, 25000baseCR_Full);
503 phylink_set(sup_caps, 25000baseSR_Full);
504
505 if (linkmode_intersects(src1: link->supported_caps, src2: sup_caps))
506 lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
507
508 phylink_zero(sup_caps);
509 phylink_set(sup_caps, 40000baseLR4_Full);
510 phylink_set(sup_caps, 40000baseKR4_Full);
511 phylink_set(sup_caps, 40000baseCR4_Full);
512 phylink_set(sup_caps, 40000baseSR4_Full);
513
514 if (linkmode_intersects(src1: link->supported_caps, src2: sup_caps))
515 lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
516
517 phylink_zero(sup_caps);
518 phylink_set(sup_caps, 50000baseKR2_Full);
519 phylink_set(sup_caps, 50000baseCR2_Full);
520 phylink_set(sup_caps, 50000baseSR2_Full);
521
522 if (linkmode_intersects(src1: link->supported_caps, src2: sup_caps))
523 lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
524
525 phylink_zero(sup_caps);
526 phylink_set(sup_caps, 100000baseKR4_Full);
527 phylink_set(sup_caps, 100000baseSR4_Full);
528 phylink_set(sup_caps, 100000baseCR4_Full);
529 phylink_set(sup_caps, 100000baseLR4_ER4_Full);
530
531 if (linkmode_intersects(src1: link->supported_caps, src2: sup_caps))
532 lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
533
534 phylink_zero(sup_caps);
535 phylink_set(sup_caps, 20000baseKR2_Full);
536
537 if (linkmode_intersects(src1: link->supported_caps, src2: sup_caps))
538 lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
539
540 if (lport->host && lport->host->shost_data)
541 fc_host_supported_speeds(lport->host) =
542 lport->link_supported_speeds;
543}
544
545static void qedf_bw_update(void *dev)
546{
547 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
548 struct qed_link_output link;
549
550 /* Get the latest status of the link */
551 qed_ops->common->get_link(qedf->cdev, &link);
552
553 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
554 QEDF_ERR(&qedf->dbg_ctx,
555 "Ignore link update, driver getting unload.\n");
556 return;
557 }
558
559 if (link.link_up) {
560 if (atomic_read(v: &qedf->link_state) == QEDF_LINK_UP)
561 qedf_update_link_speed(qedf, link: &link);
562 else
563 QEDF_ERR(&qedf->dbg_ctx,
564 "Ignore bw update, link is down.\n");
565
566 } else {
567 QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
568 }
569}
570
571static void qedf_link_update(void *dev, struct qed_link_output *link)
572{
573 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
574
575 /*
576 * Prevent race where we're removing the module and we get link update
577 * for qed.
578 */
579 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
580 QEDF_ERR(&qedf->dbg_ctx,
581 "Ignore link update, driver getting unload.\n");
582 return;
583 }
584
585 if (link->link_up) {
586 if (atomic_read(v: &qedf->link_state) == QEDF_LINK_UP) {
587 QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
588 "Ignoring link up event as link is already up.\n");
589 return;
590 }
591 QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
592 link->speed / 1000);
593
594 /* Cancel any pending link down work */
595 cancel_delayed_work(dwork: &qedf->link_update);
596
597 atomic_set(v: &qedf->link_state, QEDF_LINK_UP);
598 qedf_update_link_speed(qedf, link);
599
600 if (atomic_read(v: &qedf->dcbx) == QEDF_DCBX_DONE ||
601 qedf_dcbx_no_wait) {
602 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
603 "DCBx done.\n");
604 if (atomic_read(v: &qedf->link_down_tmo_valid) > 0)
605 queue_delayed_work(wq: qedf->link_update_wq,
606 dwork: &qedf->link_recovery, delay: 0);
607 else
608 queue_delayed_work(wq: qedf->link_update_wq,
609 dwork: &qedf->link_update, delay: 0);
610 atomic_set(v: &qedf->link_down_tmo_valid, i: 0);
611 }
612
613 } else {
614 QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n");
615
616 atomic_set(v: &qedf->link_state, QEDF_LINK_DOWN);
617 atomic_set(v: &qedf->dcbx, QEDF_DCBX_PENDING);
618 /*
619 * Flag that we're waiting for the link to come back up before
620 * informing the fcoe layer of the event.
621 */
622 if (qedf_link_down_tmo > 0) {
623 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
624 "Starting link down tmo.\n");
625 atomic_set(v: &qedf->link_down_tmo_valid, i: 1);
626 }
627 qedf->vlan_id = 0;
628 qedf_update_link_speed(qedf, link);
629 queue_delayed_work(wq: qedf->link_update_wq, dwork: &qedf->link_update,
630 delay: qedf_link_down_tmo * HZ);
631 }
632}
633
634
635static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
636{
637 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
638 u8 tmp_prio;
639
640 QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
641 "prio=%d.\n", get->operational.valid, get->operational.enabled,
642 get->operational.app_prio.fcoe);
643
644 if (get->operational.enabled && get->operational.valid) {
645 /* If DCBX was already negotiated on link up then just exit */
646 if (atomic_read(v: &qedf->dcbx) == QEDF_DCBX_DONE) {
647 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
648 "DCBX already set on link up.\n");
649 return;
650 }
651
652 atomic_set(v: &qedf->dcbx, QEDF_DCBX_DONE);
653
654 /*
655 * Set the 8021q priority in the following manner:
656 *
657 * 1. If a modparam is set use that
658 * 2. If the value is not between 0..7 use the default
659 * 3. Use the priority we get from the DCBX app tag
660 */
661 tmp_prio = get->operational.app_prio.fcoe;
662 if (qedf_default_prio > -1)
663 qedf->prio = qedf_default_prio;
664 else if (tmp_prio > 7) {
665 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
666 "FIP/FCoE prio %d out of range, setting to %d.\n",
667 tmp_prio, QEDF_DEFAULT_PRIO);
668 qedf->prio = QEDF_DEFAULT_PRIO;
669 } else
670 qedf->prio = tmp_prio;
671
672 if (atomic_read(v: &qedf->link_state) == QEDF_LINK_UP &&
673 !qedf_dcbx_no_wait) {
674 if (atomic_read(v: &qedf->link_down_tmo_valid) > 0)
675 queue_delayed_work(wq: qedf->link_update_wq,
676 dwork: &qedf->link_recovery, delay: 0);
677 else
678 queue_delayed_work(wq: qedf->link_update_wq,
679 dwork: &qedf->link_update, delay: 0);
680 atomic_set(v: &qedf->link_down_tmo_valid, i: 0);
681 }
682 }
683
684}
685
686static u32 qedf_get_login_failures(void *cookie)
687{
688 struct qedf_ctx *qedf;
689
690 qedf = (struct qedf_ctx *)cookie;
691 return qedf->flogi_failed;
692}
693
694static struct qed_fcoe_cb_ops qedf_cb_ops = {
695 {
696 .link_update = qedf_link_update,
697 .bw_update = qedf_bw_update,
698 .schedule_recovery_handler = qedf_schedule_recovery_handler,
699 .dcbx_aen = qedf_dcbx_handler,
700 .get_generic_tlv_data = qedf_get_generic_tlv_data,
701 .get_protocol_tlv_data = qedf_get_protocol_tlv_data,
702 .schedule_hw_err_handler = qedf_schedule_hw_err_handler,
703 }
704};
705
706/*
707 * Various transport templates.
708 */
709
710static struct scsi_transport_template *qedf_fc_transport_template;
711static struct scsi_transport_template *qedf_fc_vport_transport_template;
712
713/*
714 * SCSI EH handlers
715 */
716static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
717{
718 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
719 struct fc_lport *lport;
720 struct qedf_ctx *qedf;
721 struct qedf_ioreq *io_req;
722 struct fc_rport_libfc_priv *rp = rport->dd_data;
723 struct fc_rport_priv *rdata;
724 struct qedf_rport *fcport = NULL;
725 int rc = FAILED;
726 int wait_count = 100;
727 int refcount = 0;
728 int rval;
729 int got_ref = 0;
730
731 lport = shost_priv(shost: sc_cmd->device->host);
732 qedf = (struct qedf_ctx *)lport_priv(lport);
733
734 /* rport and tgt are allocated together, so tgt should be non-NULL */
735 fcport = (struct qedf_rport *)&rp[1];
736 rdata = fcport->rdata;
737 if (!rdata || !kref_get_unless_zero(kref: &rdata->kref)) {
738 QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
739 rc = SUCCESS;
740 goto out;
741 }
742
743
744 io_req = qedf_priv(cmd: sc_cmd)->io_req;
745 if (!io_req) {
746 QEDF_ERR(&qedf->dbg_ctx,
747 "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
748 sc_cmd, sc_cmd->cmnd[0],
749 rdata->ids.port_id);
750 rc = SUCCESS;
751 goto drop_rdata_kref;
752 }
753
754 rval = kref_get_unless_zero(kref: &io_req->refcount); /* ID: 005 */
755 if (rval)
756 got_ref = 1;
757
758 /* If we got a valid io_req, confirm it belongs to this sc_cmd. */
759 if (!rval || io_req->sc_cmd != sc_cmd) {
760 QEDF_ERR(&qedf->dbg_ctx,
761 "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
762 io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
763
764 goto drop_rdata_kref;
765 }
766
767 if (fc_remote_port_chkready(rport)) {
768 refcount = kref_read(kref: &io_req->refcount);
769 QEDF_ERR(&qedf->dbg_ctx,
770 "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
771 io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
772 refcount, rdata->ids.port_id);
773
774 goto drop_rdata_kref;
775 }
776
777 rc = fc_block_rport(rport);
778 if (rc)
779 goto drop_rdata_kref;
780
781 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
782 QEDF_ERR(&qedf->dbg_ctx,
783 "Connection uploading, xid=0x%x., port_id=%06x\n",
784 io_req->xid, rdata->ids.port_id);
785 while (io_req->sc_cmd && (wait_count != 0)) {
786 msleep(msecs: 100);
787 wait_count--;
788 }
789 if (wait_count) {
790 QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
791 rc = SUCCESS;
792 } else {
793 QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
794 rc = FAILED;
795 }
796 goto drop_rdata_kref;
797 }
798
799 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
800 QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
801 goto drop_rdata_kref;
802 }
803
804 QEDF_ERR(&qedf->dbg_ctx,
805 "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
806 io_req, sc_cmd, io_req->xid, io_req->fp_idx,
807 rdata->ids.port_id);
808
809 if (qedf->stop_io_on_error) {
810 qedf_stop_all_io(qedf);
811 rc = SUCCESS;
812 goto drop_rdata_kref;
813 }
814
815 init_completion(x: &io_req->abts_done);
816 rval = qedf_initiate_abts(io_req, return_scsi_cmd_on_abts: true);
817 if (rval) {
818 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
819 /*
820 * If we fail to queue the ABTS then return this command to
821 * the SCSI layer as it will own and free the xid
822 */
823 rc = SUCCESS;
824 qedf_scsi_done(qedf, io_req, result: DID_ERROR);
825 goto drop_rdata_kref;
826 }
827
828 wait_for_completion(&io_req->abts_done);
829
830 if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS ||
831 io_req->event == QEDF_IOREQ_EV_ABORT_FAILED ||
832 io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) {
833 /*
834 * If we get a reponse to the abort this is success from
835 * the perspective that all references to the command have
836 * been removed from the driver and firmware
837 */
838 rc = SUCCESS;
839 } else {
840 /* If the abort and cleanup failed then return a failure */
841 rc = FAILED;
842 }
843
844 if (rc == SUCCESS)
845 QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n",
846 io_req->xid);
847 else
848 QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
849 io_req->xid);
850
851drop_rdata_kref:
852 kref_put(kref: &rdata->kref, release: fc_rport_destroy);
853out:
854 if (got_ref)
855 kref_put(kref: &io_req->refcount, release: qedf_release_cmd);
856 return rc;
857}
858
859static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
860{
861 struct scsi_target *starget = scsi_target(sdev: sc_cmd->device);
862 struct fc_rport *rport = starget_to_rport(starget);
863
864 QEDF_ERR(NULL, "TARGET RESET Issued...");
865 return qedf_initiate_tmf(rport, lun: 0, FCP_TMF_TGT_RESET);
866}
867
868static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
869{
870 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
871
872 QEDF_ERR(NULL, "LUN RESET Issued...\n");
873 return qedf_initiate_tmf(rport, lun: sc_cmd->device->lun, FCP_TMF_LUN_RESET);
874}
875
876bool qedf_wait_for_upload(struct qedf_ctx *qedf)
877{
878 struct qedf_rport *fcport;
879 int wait_cnt = 120;
880
881 while (wait_cnt--) {
882 if (atomic_read(v: &qedf->num_offloads))
883 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
884 "Waiting for all uploads to complete num_offloads = 0x%x.\n",
885 atomic_read(&qedf->num_offloads));
886 else
887 return true;
888 msleep(msecs: 500);
889 }
890
891 rcu_read_lock();
892 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
893 if (test_bit(QEDF_RPORT_SESSION_READY,
894 &fcport->flags)) {
895 if (fcport->rdata)
896 QEDF_ERR(&qedf->dbg_ctx,
897 "Waiting for fcport %p portid=%06x.\n",
898 fcport, fcport->rdata->ids.port_id);
899 } else {
900 QEDF_ERR(&qedf->dbg_ctx,
901 "Waiting for fcport %p.\n", fcport);
902 }
903 }
904
905 rcu_read_unlock();
906 return false;
907}
908
909/* Performs soft reset of qedf_ctx by simulating a link down/up */
910void qedf_ctx_soft_reset(struct fc_lport *lport)
911{
912 struct qedf_ctx *qedf;
913 struct qed_link_output if_link;
914
915 if (lport->vport) {
916 printk_ratelimited("Cannot issue host reset on NPIV port.\n");
917 return;
918 }
919
920 qedf = lport_priv(lport);
921
922 qedf->flogi_pending = 0;
923 /* For host reset, essentially do a soft link up/down */
924 atomic_set(v: &qedf->link_state, QEDF_LINK_DOWN);
925 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
926 "Queuing link down work.\n");
927 queue_delayed_work(wq: qedf->link_update_wq, dwork: &qedf->link_update,
928 delay: 0);
929
930 if (qedf_wait_for_upload(qedf) == false) {
931 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
932 WARN_ON(atomic_read(&qedf->num_offloads));
933 }
934
935 /* Before setting link up query physical link state */
936 qed_ops->common->get_link(qedf->cdev, &if_link);
937 /* Bail if the physical link is not up */
938 if (!if_link.link_up) {
939 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
940 "Physical link is not up.\n");
941 return;
942 }
943 /* Flush and wait to make sure link down is processed */
944 flush_delayed_work(dwork: &qedf->link_update);
945 msleep(msecs: 500);
946
947 atomic_set(v: &qedf->link_state, QEDF_LINK_UP);
948 qedf->vlan_id = 0;
949 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
950 "Queue link up work.\n");
951 queue_delayed_work(wq: qedf->link_update_wq, dwork: &qedf->link_update,
952 delay: 0);
953}
954
955/* Reset the host by gracefully logging out and then logging back in */
956static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
957{
958 struct fc_lport *lport;
959 struct qedf_ctx *qedf;
960
961 lport = shost_priv(shost: sc_cmd->device->host);
962 qedf = lport_priv(lport);
963
964 if (atomic_read(v: &qedf->link_state) == QEDF_LINK_DOWN ||
965 test_bit(QEDF_UNLOADING, &qedf->flags))
966 return FAILED;
967
968 QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
969
970 qedf_ctx_soft_reset(lport);
971
972 return SUCCESS;
973}
974
975static int qedf_slave_configure(struct scsi_device *sdev)
976{
977 if (qedf_queue_depth) {
978 scsi_change_queue_depth(sdev, qedf_queue_depth);
979 }
980
981 return 0;
982}
983
984static const struct scsi_host_template qedf_host_template = {
985 .module = THIS_MODULE,
986 .name = QEDF_MODULE_NAME,
987 .this_id = -1,
988 .cmd_per_lun = 32,
989 .max_sectors = 0xffff,
990 .queuecommand = qedf_queuecommand,
991 .shost_groups = qedf_host_groups,
992 .eh_abort_handler = qedf_eh_abort,
993 .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
994 .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
995 .eh_host_reset_handler = qedf_eh_host_reset,
996 .slave_configure = qedf_slave_configure,
997 .dma_boundary = QED_HW_DMA_BOUNDARY,
998 .sg_tablesize = QEDF_MAX_BDS_PER_CMD,
999 .can_queue = FCOE_PARAMS_NUM_TASKS,
1000 .change_queue_depth = scsi_change_queue_depth,
1001 .cmd_size = sizeof(struct qedf_cmd_priv),
1002};
1003
1004static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
1005{
1006 int rc;
1007
1008 spin_lock(lock: &qedf_global_lock);
1009 rc = fcoe_get_paged_crc_eof(skb, tlen, fps: &qedf_global);
1010 spin_unlock(lock: &qedf_global_lock);
1011
1012 return rc;
1013}
1014
1015static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id)
1016{
1017 struct qedf_rport *fcport;
1018 struct fc_rport_priv *rdata;
1019
1020 rcu_read_lock();
1021 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
1022 rdata = fcport->rdata;
1023 if (rdata == NULL)
1024 continue;
1025 if (rdata->ids.port_id == port_id) {
1026 rcu_read_unlock();
1027 return fcport;
1028 }
1029 }
1030 rcu_read_unlock();
1031
1032 /* Return NULL to caller to let them know fcport was not found */
1033 return NULL;
1034}
1035
1036/* Transmits an ELS frame over an offloaded session */
1037static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
1038{
1039 struct fc_frame_header *fh;
1040 int rc = 0;
1041
1042 fh = fc_frame_header_get(fp);
1043 if ((fh->fh_type == FC_TYPE_ELS) &&
1044 (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
1045 switch (fc_frame_payload_op(fp)) {
1046 case ELS_ADISC:
1047 qedf_send_adisc(fcport, fp);
1048 rc = 1;
1049 break;
1050 }
1051 }
1052
1053 return rc;
1054}
1055
1056/*
1057 * qedf_xmit - qedf FCoE frame transmit function
1058 */
1059static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
1060{
1061 struct fc_lport *base_lport;
1062 struct qedf_ctx *qedf;
1063 struct ethhdr *eh;
1064 struct fcoe_crc_eof *cp;
1065 struct sk_buff *skb;
1066 struct fc_frame_header *fh;
1067 struct fcoe_hdr *hp;
1068 u8 sof, eof;
1069 u32 crc;
1070 unsigned int hlen, tlen, elen;
1071 int wlen;
1072 struct fc_lport *tmp_lport;
1073 struct fc_lport *vn_port = NULL;
1074 struct qedf_rport *fcport;
1075 int rc;
1076 u16 vlan_tci = 0;
1077
1078 qedf = (struct qedf_ctx *)lport_priv(lport);
1079
1080 fh = fc_frame_header_get(fp);
1081 skb = fp_skb(fp);
1082
1083 /* Filter out traffic to other NPIV ports on the same host */
1084 if (lport->vport)
1085 base_lport = shost_priv(vport_to_shost(lport->vport));
1086 else
1087 base_lport = lport;
1088
1089 /* Flag if the destination is the base port */
1090 if (base_lport->port_id == ntoh24(p: fh->fh_d_id)) {
1091 vn_port = base_lport;
1092 } else {
1093 /* Got through the list of vports attached to the base_lport
1094 * and see if we have a match with the destination address.
1095 */
1096 list_for_each_entry(tmp_lport, &base_lport->vports, list) {
1097 if (tmp_lport->port_id == ntoh24(p: fh->fh_d_id)) {
1098 vn_port = tmp_lport;
1099 break;
1100 }
1101 }
1102 }
1103 if (vn_port && ntoh24(p: fh->fh_d_id) != FC_FID_FLOGI) {
1104 struct fc_rport_priv *rdata = NULL;
1105
1106 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
1107 "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
1108 kfree_skb(skb);
1109 rdata = fc_rport_lookup(lport, port_id: ntoh24(p: fh->fh_d_id));
1110 if (rdata) {
1111 rdata->retries = lport->max_rport_retry_count;
1112 kref_put(kref: &rdata->kref, release: fc_rport_destroy);
1113 }
1114 return -EINVAL;
1115 }
1116 /* End NPIV filtering */
1117
1118 if (!qedf->ctlr.sel_fcf) {
1119 kfree_skb(skb);
1120 return 0;
1121 }
1122
1123 if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
1124 QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
1125 kfree_skb(skb);
1126 return 0;
1127 }
1128
1129 if (atomic_read(v: &qedf->link_state) != QEDF_LINK_UP) {
1130 QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
1131 kfree_skb(skb);
1132 return 0;
1133 }
1134
1135 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
1136 if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
1137 return 0;
1138 }
1139
1140 /* Check to see if this needs to be sent on an offloaded session */
1141 fcport = qedf_fcport_lookup(qedf, port_id: ntoh24(p: fh->fh_d_id));
1142
1143 if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1144 rc = qedf_xmit_l2_frame(fcport, fp);
1145 /*
1146 * If the frame was successfully sent over the middle path
1147 * then do not try to also send it over the LL2 path
1148 */
1149 if (rc)
1150 return 0;
1151 }
1152
1153 sof = fr_sof(fp);
1154 eof = fr_eof(fp);
1155
1156 elen = sizeof(struct ethhdr);
1157 hlen = sizeof(struct fcoe_hdr);
1158 tlen = sizeof(struct fcoe_crc_eof);
1159 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1160
1161 skb->ip_summed = CHECKSUM_NONE;
1162 crc = fcoe_fc_crc(fp);
1163
1164 /* copy port crc and eof to the skb buff */
1165 if (skb_is_nonlinear(skb)) {
1166 skb_frag_t *frag;
1167
1168 if (qedf_get_paged_crc_eof(skb, tlen)) {
1169 kfree_skb(skb);
1170 return -ENOMEM;
1171 }
1172 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1173 cp = kmap_atomic(page: skb_frag_page(frag)) + skb_frag_off(frag);
1174 } else {
1175 cp = skb_put(skb, len: tlen);
1176 }
1177
1178 memset(cp, 0, sizeof(*cp));
1179 cp->fcoe_eof = eof;
1180 cp->fcoe_crc32 = cpu_to_le32(~crc);
1181 if (skb_is_nonlinear(skb)) {
1182 kunmap_atomic(cp);
1183 cp = NULL;
1184 }
1185
1186
1187 /* adjust skb network/transport offsets to match mac/fcoe/port */
1188 skb_push(skb, len: elen + hlen);
1189 skb_reset_mac_header(skb);
1190 skb_reset_network_header(skb);
1191 skb->mac_len = elen;
1192 skb->protocol = htons(ETH_P_FCOE);
1193
1194 /*
1195 * Add VLAN tag to non-offload FCoE frame based on current stored VLAN
1196 * for FIP/FCoE traffic.
1197 */
1198 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: qedf->vlan_id);
1199
1200 /* fill up mac and fcoe headers */
1201 eh = eth_hdr(skb);
1202 eh->h_proto = htons(ETH_P_FCOE);
1203 if (qedf->ctlr.map_dest)
1204 fc_fcoe_set_mac(mac: eh->h_dest, did: fh->fh_d_id);
1205 else
1206 /* insert GW address */
1207 ether_addr_copy(dst: eh->h_dest, src: qedf->ctlr.dest_addr);
1208
1209 /* Set the source MAC address */
1210 ether_addr_copy(dst: eh->h_source, src: qedf->data_src_addr);
1211
1212 hp = (struct fcoe_hdr *)(eh + 1);
1213 memset(hp, 0, sizeof(*hp));
1214 if (FC_FCOE_VER)
1215 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1216 hp->fcoe_sof = sof;
1217
1218 /*update tx stats */
1219 this_cpu_inc(lport->stats->TxFrames);
1220 this_cpu_add(lport->stats->TxWords, wlen);
1221
1222 /* Get VLAN ID from skb for printing purposes */
1223 __vlan_hwaccel_get_tag(skb, vlan_tci: &vlan_tci);
1224
1225 /* send down to lld */
1226 fr_dev(fp) = lport;
1227 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: "
1228 "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n",
1229 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type,
1230 vlan_tci);
1231 if (qedf_dump_frames)
1232 print_hex_dump(KERN_WARNING, prefix_str: "fcoe: ", prefix_type: DUMP_PREFIX_OFFSET, rowsize: 16,
1233 groupsize: 1, buf: skb->data, len: skb->len, ascii: false);
1234 rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
1235 if (rc) {
1236 QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
1237 kfree_skb(skb);
1238 return rc;
1239 }
1240
1241 return 0;
1242}
1243
1244static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1245{
1246 int rval = 0;
1247 u32 *pbl;
1248 dma_addr_t page;
1249 int num_pages;
1250
1251 /* Calculate appropriate queue and PBL sizes */
1252 fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
1253 fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE);
1254 fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) *
1255 sizeof(void *);
1256 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
1257
1258 fcport->sq = dma_alloc_coherent(dev: &qedf->pdev->dev, size: fcport->sq_mem_size,
1259 dma_handle: &fcport->sq_dma, GFP_KERNEL);
1260 if (!fcport->sq) {
1261 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
1262 rval = 1;
1263 goto out;
1264 }
1265
1266 fcport->sq_pbl = dma_alloc_coherent(dev: &qedf->pdev->dev,
1267 size: fcport->sq_pbl_size,
1268 dma_handle: &fcport->sq_pbl_dma, GFP_KERNEL);
1269 if (!fcport->sq_pbl) {
1270 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
1271 rval = 1;
1272 goto out_free_sq;
1273 }
1274
1275 /* Create PBL */
1276 num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
1277 page = fcport->sq_dma;
1278 pbl = (u32 *)fcport->sq_pbl;
1279
1280 while (num_pages--) {
1281 *pbl = U64_LO(page);
1282 pbl++;
1283 *pbl = U64_HI(page);
1284 pbl++;
1285 page += QEDF_PAGE_SIZE;
1286 }
1287
1288 return rval;
1289
1290out_free_sq:
1291 dma_free_coherent(dev: &qedf->pdev->dev, size: fcport->sq_mem_size, cpu_addr: fcport->sq,
1292 dma_handle: fcport->sq_dma);
1293out:
1294 return rval;
1295}
1296
1297static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1298{
1299 if (fcport->sq_pbl)
1300 dma_free_coherent(dev: &qedf->pdev->dev, size: fcport->sq_pbl_size,
1301 cpu_addr: fcport->sq_pbl, dma_handle: fcport->sq_pbl_dma);
1302 if (fcport->sq)
1303 dma_free_coherent(dev: &qedf->pdev->dev, size: fcport->sq_mem_size,
1304 cpu_addr: fcport->sq, dma_handle: fcport->sq_dma);
1305}
1306
1307static int qedf_offload_connection(struct qedf_ctx *qedf,
1308 struct qedf_rport *fcport)
1309{
1310 struct qed_fcoe_params_offload conn_info;
1311 u32 port_id;
1312 int rval;
1313 uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe));
1314
1315 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection "
1316 "portid=%06x.\n", fcport->rdata->ids.port_id);
1317 rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle,
1318 &fcport->fw_cid, &fcport->p_doorbell);
1319 if (rval) {
1320 QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection "
1321 "for portid=%06x.\n", fcport->rdata->ids.port_id);
1322 rval = 1; /* For some reason qed returns 0 on failure here */
1323 goto out;
1324 }
1325
1326 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x "
1327 "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id,
1328 fcport->fw_cid, fcport->handle);
1329
1330 memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload));
1331
1332 /* Fill in the offload connection info */
1333 conn_info.sq_pbl_addr = fcport->sq_pbl_dma;
1334
1335 conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl);
1336 conn_info.sq_next_page_addr =
1337 (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8));
1338
1339 /* Need to use our FCoE MAC for the offload session */
1340 ether_addr_copy(dst: conn_info.src_mac, src: qedf->data_src_addr);
1341
1342 ether_addr_copy(dst: conn_info.dst_mac, src: qedf->ctlr.dest_addr);
1343
1344 conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
1345 conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
1346 conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
1347 conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
1348
1349 /* Set VLAN data */
1350 conn_info.vlan_tag = qedf->vlan_id <<
1351 FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
1352 conn_info.vlan_tag |=
1353 qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
1354 conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
1355 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
1356
1357 /* Set host port source id */
1358 port_id = fc_host_port_id(qedf->lport->host);
1359 fcport->sid = port_id;
1360 conn_info.s_id.addr_hi = (port_id & 0x000000FF);
1361 conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1362 conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1363
1364 conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq;
1365
1366 /* Set remote port destination id */
1367 port_id = fcport->rdata->rport->port_id;
1368 conn_info.d_id.addr_hi = (port_id & 0x000000FF);
1369 conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1370 conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1371
1372 conn_info.def_q_idx = 0; /* Default index for send queue? */
1373
1374 /* Set FC-TAPE specific flags if needed */
1375 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1376 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN,
1377 "Enable CONF, REC for portid=%06x.\n",
1378 fcport->rdata->ids.port_id);
1379 conn_info.flags |= 1 <<
1380 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT;
1381 conn_info.flags |=
1382 ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
1383 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT;
1384 }
1385
1386 rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info);
1387 if (rval) {
1388 QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection "
1389 "for portid=%06x.\n", fcport->rdata->ids.port_id);
1390 goto out_free_conn;
1391 } else
1392 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload "
1393 "succeeded portid=%06x total_sqe=%d.\n",
1394 fcport->rdata->ids.port_id, total_sqe);
1395
1396 spin_lock_init(&fcport->rport_lock);
1397 atomic_set(v: &fcport->free_sqes, i: total_sqe);
1398 return 0;
1399out_free_conn:
1400 qed_ops->release_conn(qedf->cdev, fcport->handle);
1401out:
1402 return rval;
1403}
1404
1405#define QEDF_TERM_BUFF_SIZE 10
1406static void qedf_upload_connection(struct qedf_ctx *qedf,
1407 struct qedf_rport *fcport)
1408{
1409 void *term_params;
1410 dma_addr_t term_params_dma;
1411
1412 /* Term params needs to be a DMA coherent buffer as qed shared the
1413 * physical DMA address with the firmware. The buffer may be used in
1414 * the receive path so we may eventually have to move this.
1415 */
1416 term_params = dma_alloc_coherent(dev: &qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
1417 dma_handle: &term_params_dma, GFP_KERNEL);
1418 if (!term_params)
1419 return;
1420
1421 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
1422 "port_id=%06x.\n", fcport->rdata->ids.port_id);
1423
1424 qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma);
1425 qed_ops->release_conn(qedf->cdev, fcport->handle);
1426
1427 dma_free_coherent(dev: &qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, cpu_addr: term_params,
1428 dma_handle: term_params_dma);
1429}
1430
1431static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
1432 struct qedf_rport *fcport)
1433{
1434 struct fc_rport_priv *rdata = fcport->rdata;
1435
1436 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
1437 fcport->rdata->ids.port_id);
1438
1439 /* Flush any remaining i/o's before we upload the connection */
1440 qedf_flush_active_ios(fcport, lun: -1);
1441
1442 if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, addr: &fcport->flags))
1443 qedf_upload_connection(qedf, fcport);
1444 qedf_free_sq(qedf, fcport);
1445 fcport->rdata = NULL;
1446 fcport->qedf = NULL;
1447 kref_put(kref: &rdata->kref, release: fc_rport_destroy);
1448}
1449
1450/*
1451 * This event_callback is called after successful completion of libfc
1452 * initiated target login. qedf can proceed with initiating the session
1453 * establishment.
1454 */
1455static void qedf_rport_event_handler(struct fc_lport *lport,
1456 struct fc_rport_priv *rdata,
1457 enum fc_rport_event event)
1458{
1459 struct qedf_ctx *qedf = lport_priv(lport);
1460 struct fc_rport *rport = rdata->rport;
1461 struct fc_rport_libfc_priv *rp;
1462 struct qedf_rport *fcport;
1463 u32 port_id;
1464 int rval;
1465 unsigned long flags;
1466
1467 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, "
1468 "port_id = 0x%x\n", event, rdata->ids.port_id);
1469
1470 switch (event) {
1471 case RPORT_EV_READY:
1472 if (!rport) {
1473 QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n");
1474 break;
1475 }
1476
1477 rp = rport->dd_data;
1478 fcport = (struct qedf_rport *)&rp[1];
1479 fcport->qedf = qedf;
1480
1481 if (atomic_read(v: &qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
1482 QEDF_ERR(&(qedf->dbg_ctx), "Not offloading "
1483 "portid=0x%x as max number of offloaded sessions "
1484 "reached.\n", rdata->ids.port_id);
1485 return;
1486 }
1487
1488 /*
1489 * Don't try to offload the session again. Can happen when we
1490 * get an ADISC
1491 */
1492 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1493 QEDF_WARN(&(qedf->dbg_ctx), "Session already "
1494 "offloaded, portid=0x%x.\n",
1495 rdata->ids.port_id);
1496 return;
1497 }
1498
1499 if (rport->port_id == FC_FID_DIR_SERV) {
1500 /*
1501 * qedf_rport structure doesn't exist for
1502 * directory server.
1503 * We should not come here, as lport will
1504 * take care of fabric login
1505 */
1506 QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not "
1507 "exist for dir server port_id=%x\n",
1508 rdata->ids.port_id);
1509 break;
1510 }
1511
1512 if (rdata->spp_type != FC_TYPE_FCP) {
1513 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1514 "Not offloading since spp type isn't FCP\n");
1515 break;
1516 }
1517 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
1518 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1519 "Not FCP target so not offloading\n");
1520 break;
1521 }
1522
1523 /* Initial reference held on entry, so this can't fail */
1524 kref_get(kref: &rdata->kref);
1525 fcport->rdata = rdata;
1526 fcport->rport = rport;
1527
1528 rval = qedf_alloc_sq(qedf, fcport);
1529 if (rval) {
1530 qedf_cleanup_fcport(qedf, fcport);
1531 break;
1532 }
1533
1534 /* Set device type */
1535 if (rdata->flags & FC_RP_FLAGS_RETRY &&
1536 rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
1537 !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
1538 fcport->dev_type = QEDF_RPORT_TYPE_TAPE;
1539 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1540 "portid=%06x is a TAPE device.\n",
1541 rdata->ids.port_id);
1542 } else {
1543 fcport->dev_type = QEDF_RPORT_TYPE_DISK;
1544 }
1545
1546 rval = qedf_offload_connection(qedf, fcport);
1547 if (rval) {
1548 qedf_cleanup_fcport(qedf, fcport);
1549 break;
1550 }
1551
1552 /* Add fcport to list of qedf_ctx list of offloaded ports */
1553 spin_lock_irqsave(&qedf->hba_lock, flags);
1554 list_add_rcu(new: &fcport->peers, head: &qedf->fcports);
1555 spin_unlock_irqrestore(lock: &qedf->hba_lock, flags);
1556
1557 /*
1558 * Set the session ready bit to let everyone know that this
1559 * connection is ready for I/O
1560 */
1561 set_bit(QEDF_RPORT_SESSION_READY, addr: &fcport->flags);
1562 atomic_inc(v: &qedf->num_offloads);
1563
1564 break;
1565 case RPORT_EV_LOGO:
1566 case RPORT_EV_FAILED:
1567 case RPORT_EV_STOP:
1568 port_id = rdata->ids.port_id;
1569 if (port_id == FC_FID_DIR_SERV)
1570 break;
1571
1572 if (rdata->spp_type != FC_TYPE_FCP) {
1573 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1574 "No action since spp type isn't FCP\n");
1575 break;
1576 }
1577 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
1578 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1579 "Not FCP target so no action\n");
1580 break;
1581 }
1582
1583 if (!rport) {
1584 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1585 "port_id=%x - rport notcreated Yet!!\n", port_id);
1586 break;
1587 }
1588 rp = rport->dd_data;
1589 /*
1590 * Perform session upload. Note that rdata->peers is already
1591 * removed from disc->rports list before we get this event.
1592 */
1593 fcport = (struct qedf_rport *)&rp[1];
1594
1595 spin_lock_irqsave(&fcport->rport_lock, flags);
1596 /* Only free this fcport if it is offloaded already */
1597 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
1598 !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1599 &fcport->flags)) {
1600 set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1601 addr: &fcport->flags);
1602 spin_unlock_irqrestore(lock: &fcport->rport_lock, flags);
1603 qedf_cleanup_fcport(qedf, fcport);
1604 /*
1605 * Remove fcport to list of qedf_ctx list of offloaded
1606 * ports
1607 */
1608 spin_lock_irqsave(&qedf->hba_lock, flags);
1609 list_del_rcu(entry: &fcport->peers);
1610 spin_unlock_irqrestore(lock: &qedf->hba_lock, flags);
1611
1612 clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1613 addr: &fcport->flags);
1614 atomic_dec(v: &qedf->num_offloads);
1615 } else {
1616 spin_unlock_irqrestore(lock: &fcport->rport_lock, flags);
1617 }
1618 break;
1619
1620 case RPORT_EV_NONE:
1621 break;
1622 }
1623}
1624
1625static void qedf_abort_io(struct fc_lport *lport)
1626{
1627 /* NO-OP but need to fill in the template */
1628}
1629
1630static void qedf_fcp_cleanup(struct fc_lport *lport)
1631{
1632 /*
1633 * NO-OP but need to fill in template to prevent a NULL
1634 * function pointer dereference during link down. I/Os
1635 * will be flushed when port is uploaded.
1636 */
1637}
1638
1639static struct libfc_function_template qedf_lport_template = {
1640 .frame_send = qedf_xmit,
1641 .fcp_abort_io = qedf_abort_io,
1642 .fcp_cleanup = qedf_fcp_cleanup,
1643 .rport_event_callback = qedf_rport_event_handler,
1644 .elsct_send = qedf_elsct_send,
1645};
1646
1647static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
1648{
1649 fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
1650
1651 qedf->ctlr.send = qedf_fip_send;
1652 qedf->ctlr.get_src_addr = qedf_get_src_mac;
1653 ether_addr_copy(dst: qedf->ctlr.ctl_src_addr, src: qedf->mac);
1654}
1655
1656static void qedf_setup_fdmi(struct qedf_ctx *qedf)
1657{
1658 struct fc_lport *lport = qedf->lport;
1659 u8 buf[8];
1660 int pos;
1661 uint32_t i;
1662
1663 /*
1664 * fdmi_enabled needs to be set for libfc
1665 * to execute FDMI registration
1666 */
1667 lport->fdmi_enabled = 1;
1668
1669 /*
1670 * Setup the necessary fc_host attributes to that will be used to fill
1671 * in the FDMI information.
1672 */
1673
1674 /* Get the PCI-e Device Serial Number Capability */
1675 pos = pci_find_ext_capability(dev: qedf->pdev, PCI_EXT_CAP_ID_DSN);
1676 if (pos) {
1677 pos += 4;
1678 for (i = 0; i < 8; i++)
1679 pci_read_config_byte(dev: qedf->pdev, where: pos + i, val: &buf[i]);
1680
1681 snprintf(fc_host_serial_number(lport->host),
1682 FC_SERIAL_NUMBER_SIZE,
1683 fmt: "%02X%02X%02X%02X%02X%02X%02X%02X",
1684 buf[7], buf[6], buf[5], buf[4],
1685 buf[3], buf[2], buf[1], buf[0]);
1686 } else
1687 snprintf(fc_host_serial_number(lport->host),
1688 FC_SERIAL_NUMBER_SIZE, fmt: "Unknown");
1689
1690 snprintf(fc_host_manufacturer(lport->host),
1691 FC_SERIAL_NUMBER_SIZE, fmt: "%s", "Marvell Semiconductor Inc.");
1692
1693 if (qedf->pdev->device == QL45xxx) {
1694 snprintf(fc_host_model(lport->host),
1695 FC_SYMBOLIC_NAME_SIZE, fmt: "%s", "QL45xxx");
1696
1697 snprintf(fc_host_model_description(lport->host),
1698 FC_SYMBOLIC_NAME_SIZE, fmt: "%s",
1699 "Marvell FastLinQ QL45xxx FCoE Adapter");
1700 }
1701
1702 if (qedf->pdev->device == QL41xxx) {
1703 snprintf(fc_host_model(lport->host),
1704 FC_SYMBOLIC_NAME_SIZE, fmt: "%s", "QL41xxx");
1705
1706 snprintf(fc_host_model_description(lport->host),
1707 FC_SYMBOLIC_NAME_SIZE, fmt: "%s",
1708 "Marvell FastLinQ QL41xxx FCoE Adapter");
1709 }
1710
1711 snprintf(fc_host_hardware_version(lport->host),
1712 FC_VERSION_STRING_SIZE, fmt: "Rev %d", qedf->pdev->revision);
1713
1714 snprintf(fc_host_driver_version(lport->host),
1715 FC_VERSION_STRING_SIZE, fmt: "%s", QEDF_VERSION);
1716
1717 snprintf(fc_host_firmware_version(lport->host),
1718 FC_VERSION_STRING_SIZE, fmt: "%d.%d.%d.%d",
1719 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1720 FW_ENGINEERING_VERSION);
1721
1722 snprintf(fc_host_vendor_identifier(lport->host),
1723 FC_VENDOR_IDENTIFIER, fmt: "%s", "Marvell");
1724
1725}
1726
1727static int qedf_lport_setup(struct qedf_ctx *qedf)
1728{
1729 struct fc_lport *lport = qedf->lport;
1730
1731 lport->link_up = 0;
1732 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1733 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1734 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1735 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1736 lport->boot_time = jiffies;
1737 lport->e_d_tov = 2 * 1000;
1738 lport->r_a_tov = 10 * 1000;
1739
1740 /* Set NPIV support */
1741 lport->does_npiv = 1;
1742 fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV;
1743
1744 fc_set_wwnn(lport, wwnn: qedf->wwnn);
1745 fc_set_wwpn(lport, wwpn: qedf->wwpn);
1746
1747 if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, init_fcp: 0)) {
1748 QEDF_ERR(&qedf->dbg_ctx,
1749 "fcoe_libfc_config failed.\n");
1750 return -ENOMEM;
1751 }
1752
1753 /* Allocate the exchange manager */
1754 fc_exch_mgr_alloc(lport, class: FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
1755 max_xid: 0xfffe, NULL);
1756
1757 if (fc_lport_init_stats(lport))
1758 return -ENOMEM;
1759
1760 /* Finish lport config */
1761 fc_lport_config(lport);
1762
1763 /* Set max frame size */
1764 fc_set_mfs(lport, QEDF_MFS);
1765 fc_host_maxframe_size(lport->host) = lport->mfs;
1766
1767 /* Set default dev_loss_tmo based on module parameter */
1768 fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
1769
1770 /* Set symbolic node name */
1771 if (qedf->pdev->device == QL45xxx)
1772 snprintf(fc_host_symbolic_name(lport->host), size: 256,
1773 fmt: "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
1774
1775 if (qedf->pdev->device == QL41xxx)
1776 snprintf(fc_host_symbolic_name(lport->host), size: 256,
1777 fmt: "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
1778
1779 qedf_setup_fdmi(qedf);
1780
1781 return 0;
1782}
1783
1784/*
1785 * NPIV functions
1786 */
1787
1788static int qedf_vport_libfc_config(struct fc_vport *vport,
1789 struct fc_lport *lport)
1790{
1791 lport->link_up = 0;
1792 lport->qfull = 0;
1793 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1794 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1795 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1796 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1797 lport->boot_time = jiffies;
1798 lport->e_d_tov = 2 * 1000;
1799 lport->r_a_tov = 10 * 1000;
1800 lport->does_npiv = 1; /* Temporary until we add NPIV support */
1801
1802 /* Allocate stats for vport */
1803 if (fc_lport_init_stats(lport))
1804 return -ENOMEM;
1805
1806 /* Finish lport config */
1807 fc_lport_config(lport);
1808
1809 /* offload related configuration */
1810 lport->crc_offload = 0;
1811 lport->seq_offload = 0;
1812 lport->lro_enabled = 0;
1813 lport->lro_xid = 0;
1814 lport->lso_max = 0;
1815
1816 return 0;
1817}
1818
1819static int qedf_vport_create(struct fc_vport *vport, bool disabled)
1820{
1821 struct Scsi_Host *shost = vport_to_shost(vport);
1822 struct fc_lport *n_port = shost_priv(shost);
1823 struct fc_lport *vn_port;
1824 struct qedf_ctx *base_qedf = lport_priv(lport: n_port);
1825 struct qedf_ctx *vport_qedf;
1826
1827 char buf[32];
1828 int rc = 0;
1829
1830 rc = fcoe_validate_vport_create(vport);
1831 if (rc) {
1832 fcoe_wwn_to_str(wwn: vport->port_name, buf, len: sizeof(buf));
1833 QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
1834 "WWPN (0x%s) already exists.\n", buf);
1835 return rc;
1836 }
1837
1838 if (atomic_read(v: &base_qedf->link_state) != QEDF_LINK_UP) {
1839 QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
1840 "because link is not up.\n");
1841 return -EIO;
1842 }
1843
1844 vn_port = libfc_vport_create(vport, privsize: sizeof(struct qedf_ctx));
1845 if (!vn_port) {
1846 QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
1847 "for vport.\n");
1848 return -ENOMEM;
1849 }
1850
1851 fcoe_wwn_to_str(wwn: vport->port_name, buf, len: sizeof(buf));
1852 QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n",
1853 buf);
1854
1855 /* Copy some fields from base_qedf */
1856 vport_qedf = lport_priv(lport: vn_port);
1857 memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx));
1858
1859 /* Set qedf data specific to this vport */
1860 vport_qedf->lport = vn_port;
1861 /* Use same hba_lock as base_qedf */
1862 vport_qedf->hba_lock = base_qedf->hba_lock;
1863 vport_qedf->pdev = base_qedf->pdev;
1864 vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
1865 init_completion(x: &vport_qedf->flogi_compl);
1866 INIT_LIST_HEAD(list: &vport_qedf->fcports);
1867 INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
1868
1869 rc = qedf_vport_libfc_config(vport, lport: vn_port);
1870 if (rc) {
1871 QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
1872 "for lport stats.\n");
1873 goto err;
1874 }
1875
1876 fc_set_wwnn(lport: vn_port, wwnn: vport->node_name);
1877 fc_set_wwpn(lport: vn_port, wwpn: vport->port_name);
1878 vport_qedf->wwnn = vn_port->wwnn;
1879 vport_qedf->wwpn = vn_port->wwpn;
1880
1881 vn_port->host->transportt = qedf_fc_vport_transport_template;
1882 vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
1883 vn_port->host->max_lun = qedf_max_lun;
1884 vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
1885 vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
1886 vn_port->host->max_id = QEDF_MAX_SESSIONS;
1887
1888 rc = scsi_add_host(host: vn_port->host, dev: &vport->dev);
1889 if (rc) {
1890 QEDF_WARN(&base_qedf->dbg_ctx,
1891 "Error adding Scsi_Host rc=0x%x.\n", rc);
1892 goto err;
1893 }
1894
1895 /* Set default dev_loss_tmo based on module parameter */
1896 fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo;
1897
1898 /* Init libfc stuffs */
1899 memcpy(&vn_port->tt, &qedf_lport_template,
1900 sizeof(qedf_lport_template));
1901 fc_exch_init(vn_port);
1902 fc_elsct_init(vn_port);
1903 fc_lport_init(vn_port);
1904 fc_disc_init(vn_port);
1905 fc_disc_config(vn_port, vn_port);
1906
1907
1908 /* Allocate the exchange manager */
1909 shost = vport_to_shost(vport);
1910 n_port = shost_priv(shost);
1911 fc_exch_mgr_list_clone(src: n_port, dst: vn_port);
1912
1913 /* Set max frame size */
1914 fc_set_mfs(vn_port, QEDF_MFS);
1915
1916 fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN;
1917
1918 if (disabled) {
1919 fc_vport_set_state(vport, new_state: FC_VPORT_DISABLED);
1920 } else {
1921 vn_port->boot_time = jiffies;
1922 fc_fabric_login(vn_port);
1923 fc_vport_setlink(vn_port);
1924 }
1925
1926 /* Set symbolic node name */
1927 if (base_qedf->pdev->device == QL45xxx)
1928 snprintf(fc_host_symbolic_name(vn_port->host), size: 256,
1929 fmt: "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
1930
1931 if (base_qedf->pdev->device == QL41xxx)
1932 snprintf(fc_host_symbolic_name(vn_port->host), size: 256,
1933 fmt: "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
1934
1935 /* Set supported speed */
1936 fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds;
1937
1938 /* Set speed */
1939 vn_port->link_speed = n_port->link_speed;
1940
1941 /* Set port type */
1942 fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV;
1943
1944 /* Set maxframe size */
1945 fc_host_maxframe_size(vn_port->host) = n_port->mfs;
1946
1947 QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
1948 vn_port);
1949
1950 /* Set up debug context for vport */
1951 vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
1952 vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
1953
1954 return 0;
1955
1956err:
1957 scsi_host_put(t: vn_port->host);
1958 return rc;
1959}
1960
1961static int qedf_vport_destroy(struct fc_vport *vport)
1962{
1963 struct Scsi_Host *shost = vport_to_shost(vport);
1964 struct fc_lport *n_port = shost_priv(shost);
1965 struct fc_lport *vn_port = vport->dd_data;
1966 struct qedf_ctx *qedf = lport_priv(lport: vn_port);
1967
1968 if (!qedf) {
1969 QEDF_ERR(NULL, "qedf is NULL.\n");
1970 goto out;
1971 }
1972
1973 /* Set unloading bit on vport qedf_ctx to prevent more I/O */
1974 set_bit(QEDF_UNLOADING, addr: &qedf->flags);
1975
1976 mutex_lock(&n_port->lp_mutex);
1977 list_del(entry: &vn_port->list);
1978 mutex_unlock(lock: &n_port->lp_mutex);
1979
1980 fc_fabric_logoff(vn_port);
1981 fc_lport_destroy(vn_port);
1982
1983 /* Detach from scsi-ml */
1984 fc_remove_host(vn_port->host);
1985 scsi_remove_host(vn_port->host);
1986
1987 /*
1988 * Only try to release the exchange manager if the vn_port
1989 * configuration is complete.
1990 */
1991 if (vn_port->state == LPORT_ST_READY)
1992 fc_exch_mgr_free(vn_port);
1993
1994 /* Free memory used by statistical counters */
1995 fc_lport_free_stats(lport: vn_port);
1996
1997 /* Release Scsi_Host */
1998 scsi_host_put(t: vn_port->host);
1999
2000out:
2001 return 0;
2002}
2003
2004static int qedf_vport_disable(struct fc_vport *vport, bool disable)
2005{
2006 struct fc_lport *lport = vport->dd_data;
2007
2008 if (disable) {
2009 fc_vport_set_state(vport, new_state: FC_VPORT_DISABLED);
2010 fc_fabric_logoff(lport);
2011 } else {
2012 lport->boot_time = jiffies;
2013 fc_fabric_login(lport);
2014 fc_vport_setlink(lport);
2015 }
2016 return 0;
2017}
2018
2019/*
2020 * During removal we need to wait for all the vports associated with a port
2021 * to be destroyed so we avoid a race condition where libfc is still trying
2022 * to reap vports while the driver remove function has already reaped the
2023 * driver contexts associated with the physical port.
2024 */
2025static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
2026{
2027 struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
2028
2029 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2030 "Entered.\n");
2031 while (fc_host->npiv_vports_inuse > 0) {
2032 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2033 "Waiting for all vports to be reaped.\n");
2034 msleep(msecs: 1000);
2035 }
2036}
2037
2038/**
2039 * qedf_fcoe_reset - Resets the fcoe
2040 *
2041 * @shost: shost the reset is from
2042 *
2043 * Returns: always 0
2044 */
2045static int qedf_fcoe_reset(struct Scsi_Host *shost)
2046{
2047 struct fc_lport *lport = shost_priv(shost);
2048
2049 qedf_ctx_soft_reset(lport);
2050 return 0;
2051}
2052
2053static void qedf_get_host_port_id(struct Scsi_Host *shost)
2054{
2055 struct fc_lport *lport = shost_priv(shost);
2056
2057 fc_host_port_id(shost) = lport->port_id;
2058}
2059
2060static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
2061 *shost)
2062{
2063 struct fc_host_statistics *qedf_stats;
2064 struct fc_lport *lport = shost_priv(shost);
2065 struct qedf_ctx *qedf = lport_priv(lport);
2066 struct qed_fcoe_stats *fw_fcoe_stats;
2067
2068 qedf_stats = fc_get_host_stats(shost);
2069
2070 /* We don't collect offload stats for specific NPIV ports */
2071 if (lport->vport)
2072 goto out;
2073
2074 fw_fcoe_stats = kmalloc(size: sizeof(struct qed_fcoe_stats), GFP_KERNEL);
2075 if (!fw_fcoe_stats) {
2076 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
2077 "fw_fcoe_stats.\n");
2078 goto out;
2079 }
2080
2081 mutex_lock(&qedf->stats_mutex);
2082
2083 /* Query firmware for offload stats */
2084 qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
2085
2086 /*
2087 * The expectation is that we add our offload stats to the stats
2088 * being maintained by libfc each time the fc_get_host_status callback
2089 * is invoked. The additions are not carried over for each call to
2090 * the fc_get_host_stats callback.
2091 */
2092 qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt +
2093 fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt +
2094 fw_fcoe_stats->fcoe_tx_other_pkt_cnt;
2095 qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt +
2096 fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt +
2097 fw_fcoe_stats->fcoe_rx_other_pkt_cnt;
2098 qedf_stats->fcp_input_megabytes +=
2099 do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000);
2100 qedf_stats->fcp_output_megabytes +=
2101 do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000);
2102 qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4;
2103 qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4;
2104 qedf_stats->invalid_crc_count +=
2105 fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt;
2106 qedf_stats->dumped_frames =
2107 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
2108 qedf_stats->error_frames +=
2109 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
2110 qedf_stats->fcp_input_requests += qedf->input_requests;
2111 qedf_stats->fcp_output_requests += qedf->output_requests;
2112 qedf_stats->fcp_control_requests += qedf->control_requests;
2113 qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
2114 qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
2115
2116 mutex_unlock(lock: &qedf->stats_mutex);
2117 kfree(objp: fw_fcoe_stats);
2118out:
2119 return qedf_stats;
2120}
2121
2122static struct fc_function_template qedf_fc_transport_fn = {
2123 .show_host_node_name = 1,
2124 .show_host_port_name = 1,
2125 .show_host_supported_classes = 1,
2126 .show_host_supported_fc4s = 1,
2127 .show_host_active_fc4s = 1,
2128 .show_host_maxframe_size = 1,
2129
2130 .get_host_port_id = qedf_get_host_port_id,
2131 .show_host_port_id = 1,
2132 .show_host_supported_speeds = 1,
2133 .get_host_speed = fc_get_host_speed,
2134 .show_host_speed = 1,
2135 .show_host_port_type = 1,
2136 .get_host_port_state = fc_get_host_port_state,
2137 .show_host_port_state = 1,
2138 .show_host_symbolic_name = 1,
2139
2140 /*
2141 * Tell FC transport to allocate enough space to store the backpointer
2142 * for the associate qedf_rport struct.
2143 */
2144 .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
2145 sizeof(struct qedf_rport)),
2146 .show_rport_maxframe_size = 1,
2147 .show_rport_supported_classes = 1,
2148 .show_host_fabric_name = 1,
2149 .show_starget_node_name = 1,
2150 .show_starget_port_name = 1,
2151 .show_starget_port_id = 1,
2152 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2153 .show_rport_dev_loss_tmo = 1,
2154 .get_fc_host_stats = qedf_fc_get_host_stats,
2155 .issue_fc_host_lip = qedf_fcoe_reset,
2156 .vport_create = qedf_vport_create,
2157 .vport_delete = qedf_vport_destroy,
2158 .vport_disable = qedf_vport_disable,
2159 .bsg_request = fc_lport_bsg_request,
2160};
2161
2162static struct fc_function_template qedf_fc_vport_transport_fn = {
2163 .show_host_node_name = 1,
2164 .show_host_port_name = 1,
2165 .show_host_supported_classes = 1,
2166 .show_host_supported_fc4s = 1,
2167 .show_host_active_fc4s = 1,
2168 .show_host_maxframe_size = 1,
2169 .show_host_port_id = 1,
2170 .show_host_supported_speeds = 1,
2171 .get_host_speed = fc_get_host_speed,
2172 .show_host_speed = 1,
2173 .show_host_port_type = 1,
2174 .get_host_port_state = fc_get_host_port_state,
2175 .show_host_port_state = 1,
2176 .show_host_symbolic_name = 1,
2177 .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
2178 sizeof(struct qedf_rport)),
2179 .show_rport_maxframe_size = 1,
2180 .show_rport_supported_classes = 1,
2181 .show_host_fabric_name = 1,
2182 .show_starget_node_name = 1,
2183 .show_starget_port_name = 1,
2184 .show_starget_port_id = 1,
2185 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2186 .show_rport_dev_loss_tmo = 1,
2187 .get_fc_host_stats = fc_get_host_stats,
2188 .issue_fc_host_lip = qedf_fcoe_reset,
2189 .bsg_request = fc_lport_bsg_request,
2190};
2191
2192static bool qedf_fp_has_work(struct qedf_fastpath *fp)
2193{
2194 struct qedf_ctx *qedf = fp->qedf;
2195 struct global_queue *que;
2196 struct qed_sb_info *sb_info = fp->sb_info;
2197 struct status_block *sb = sb_info->sb_virt;
2198 u16 prod_idx;
2199
2200 /* Get the pointer to the global CQ this completion is on */
2201 que = qedf->global_queues[fp->sb_id];
2202
2203 /* Be sure all responses have been written to PI */
2204 rmb();
2205
2206 /* Get the current firmware producer index */
2207 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
2208
2209 return (que->cq_prod_idx != prod_idx);
2210}
2211
2212/*
2213 * Interrupt handler code.
2214 */
2215
2216/* Process completion queue and copy CQE contents for deferred processesing
2217 *
2218 * Return true if we should wake the I/O thread, false if not.
2219 */
2220static bool qedf_process_completions(struct qedf_fastpath *fp)
2221{
2222 struct qedf_ctx *qedf = fp->qedf;
2223 struct qed_sb_info *sb_info = fp->sb_info;
2224 struct status_block *sb = sb_info->sb_virt;
2225 struct global_queue *que;
2226 u16 prod_idx;
2227 struct fcoe_cqe *cqe;
2228 struct qedf_io_work *io_work;
2229 unsigned int cpu;
2230 struct qedf_ioreq *io_req = NULL;
2231 u16 xid;
2232 u16 new_cqes;
2233 u32 comp_type;
2234
2235 /* Get the current firmware producer index */
2236 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
2237
2238 /* Get the pointer to the global CQ this completion is on */
2239 que = qedf->global_queues[fp->sb_id];
2240
2241 /* Calculate the amount of new elements since last processing */
2242 new_cqes = (prod_idx >= que->cq_prod_idx) ?
2243 (prod_idx - que->cq_prod_idx) :
2244 0x10000 - que->cq_prod_idx + prod_idx;
2245
2246 /* Save producer index */
2247 que->cq_prod_idx = prod_idx;
2248
2249 while (new_cqes) {
2250 fp->completions++;
2251 cqe = &que->cq[que->cq_cons_idx];
2252
2253 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2254 FCOE_CQE_CQE_TYPE_MASK;
2255
2256 /*
2257 * Process unsolicited CQEs directly in the interrupt handler
2258 * sine we need the fastpath ID
2259 */
2260 if (comp_type == FCOE_UNSOLIC_CQE_TYPE) {
2261 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2262 "Unsolicated CQE.\n");
2263 qedf_process_unsol_compl(qedf, que_idx: fp->sb_id, cqe);
2264 /*
2265 * Don't add a work list item. Increment consumer
2266 * consumer index and move on.
2267 */
2268 goto inc_idx;
2269 }
2270
2271 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2272 io_req = &qedf->cmd_mgr->cmds[xid];
2273
2274 /*
2275 * Figure out which percpu thread we should queue this I/O
2276 * on.
2277 */
2278 if (!io_req)
2279 /* If there is not io_req assocated with this CQE
2280 * just queue it on CPU 0
2281 */
2282 cpu = 0;
2283 else {
2284 cpu = io_req->cpu;
2285 io_req->int_cpu = smp_processor_id();
2286 }
2287
2288 io_work = mempool_alloc(pool: qedf->io_mempool, GFP_ATOMIC);
2289 if (!io_work) {
2290 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2291 "work for I/O completion.\n");
2292 continue;
2293 }
2294 memset(io_work, 0, sizeof(struct qedf_io_work));
2295
2296 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2297
2298 /* Copy contents of CQE for deferred processing */
2299 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2300
2301 io_work->qedf = fp->qedf;
2302 io_work->fp = NULL; /* Only used for unsolicited frames */
2303
2304 queue_work_on(cpu, wq: qedf_io_wq, work: &io_work->work);
2305
2306inc_idx:
2307 que->cq_cons_idx++;
2308 if (que->cq_cons_idx == fp->cq_num_entries)
2309 que->cq_cons_idx = 0;
2310 new_cqes--;
2311 }
2312
2313 return true;
2314}
2315
2316
2317/* MSI-X fastpath handler code */
2318static irqreturn_t qedf_msix_handler(int irq, void *dev_id)
2319{
2320 struct qedf_fastpath *fp = dev_id;
2321
2322 if (!fp) {
2323 QEDF_ERR(NULL, "fp is null.\n");
2324 return IRQ_HANDLED;
2325 }
2326 if (!fp->sb_info) {
2327 QEDF_ERR(NULL, "fp->sb_info in null.");
2328 return IRQ_HANDLED;
2329 }
2330
2331 /*
2332 * Disable interrupts for this status block while we process new
2333 * completions
2334 */
2335 qed_sb_ack(sb_info: fp->sb_info, int_cmd: IGU_INT_DISABLE, upd_flg: 0 /*do not update*/);
2336
2337 while (1) {
2338 qedf_process_completions(fp);
2339
2340 if (qedf_fp_has_work(fp) == 0) {
2341 /* Update the sb information */
2342 qed_sb_update_sb_idx(sb_info: fp->sb_info);
2343
2344 /* Check for more work */
2345 rmb();
2346
2347 if (qedf_fp_has_work(fp) == 0) {
2348 /* Re-enable interrupts */
2349 qed_sb_ack(sb_info: fp->sb_info, int_cmd: IGU_INT_ENABLE, upd_flg: 1);
2350 return IRQ_HANDLED;
2351 }
2352 }
2353 }
2354
2355 /* Do we ever want to break out of above loop? */
2356 return IRQ_HANDLED;
2357}
2358
2359/* simd handler for MSI/INTa */
2360static void qedf_simd_int_handler(void *cookie)
2361{
2362 /* Cookie is qedf_ctx struct */
2363 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2364
2365 QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf);
2366}
2367
2368#define QEDF_SIMD_HANDLER_NUM 0
2369static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
2370{
2371 int i;
2372 u16 vector_idx = 0;
2373 u32 vector;
2374
2375 if (qedf->int_info.msix_cnt) {
2376 for (i = 0; i < qedf->int_info.used_cnt; i++) {
2377 vector_idx = i * qedf->dev_info.common.num_hwfns +
2378 qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2379 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2380 "Freeing IRQ #%d vector_idx=%d.\n",
2381 i, vector_idx);
2382 vector = qedf->int_info.msix[vector_idx].vector;
2383 synchronize_irq(irq: vector);
2384 irq_set_affinity_hint(irq: vector, NULL);
2385 irq_set_affinity_notifier(irq: vector, NULL);
2386 free_irq(vector, &qedf->fp_array[i]);
2387 }
2388 } else
2389 qed_ops->common->simd_handler_clean(qedf->cdev,
2390 QEDF_SIMD_HANDLER_NUM);
2391
2392 qedf->int_info.used_cnt = 0;
2393 qed_ops->common->set_fp_int(qedf->cdev, 0);
2394}
2395
2396static int qedf_request_msix_irq(struct qedf_ctx *qedf)
2397{
2398 int i, rc, cpu;
2399 u16 vector_idx = 0;
2400 u32 vector;
2401
2402 cpu = cpumask_first(cpu_online_mask);
2403 for (i = 0; i < qedf->num_queues; i++) {
2404 vector_idx = i * qedf->dev_info.common.num_hwfns +
2405 qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2406 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2407 "Requesting IRQ #%d vector_idx=%d.\n",
2408 i, vector_idx);
2409 vector = qedf->int_info.msix[vector_idx].vector;
2410 rc = request_irq(irq: vector, handler: qedf_msix_handler, flags: 0, name: "qedf",
2411 dev: &qedf->fp_array[i]);
2412
2413 if (rc) {
2414 QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
2415 qedf_sync_free_irqs(qedf);
2416 return rc;
2417 }
2418
2419 qedf->int_info.used_cnt++;
2420 rc = irq_set_affinity_hint(irq: vector, m: get_cpu_mask(cpu));
2421 cpu = cpumask_next(n: cpu, cpu_online_mask);
2422 }
2423
2424 return 0;
2425}
2426
2427static int qedf_setup_int(struct qedf_ctx *qedf)
2428{
2429 int rc = 0;
2430
2431 /*
2432 * Learn interrupt configuration
2433 */
2434 rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
2435 if (rc <= 0)
2436 return 0;
2437
2438 rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
2439 if (rc)
2440 return 0;
2441
2442 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = "
2443 "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt,
2444 num_online_cpus());
2445
2446 if (qedf->int_info.msix_cnt)
2447 return qedf_request_msix_irq(qedf);
2448
2449 qed_ops->common->simd_handler_config(qedf->cdev, &qedf,
2450 QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
2451 qedf->int_info.used_cnt = 1;
2452
2453 QEDF_ERR(&qedf->dbg_ctx,
2454 "Cannot load driver due to a lack of MSI-X vectors.\n");
2455 return -EINVAL;
2456}
2457
2458/* Main function for libfc frame reception */
2459static void qedf_recv_frame(struct qedf_ctx *qedf,
2460 struct sk_buff *skb)
2461{
2462 u32 fr_len;
2463 struct fc_lport *lport;
2464 struct fc_frame_header *fh;
2465 struct fcoe_crc_eof crc_eof;
2466 struct fc_frame *fp;
2467 u8 *mac = NULL;
2468 u8 *dest_mac = NULL;
2469 struct fcoe_hdr *hp;
2470 struct qedf_rport *fcport;
2471 struct fc_lport *vn_port;
2472 u32 f_ctl;
2473
2474 lport = qedf->lport;
2475 if (lport == NULL || lport->state == LPORT_ST_DISABLED) {
2476 QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n");
2477 kfree_skb(skb);
2478 return;
2479 }
2480
2481 if (skb_is_nonlinear(skb))
2482 skb_linearize(skb);
2483 mac = eth_hdr(skb)->h_source;
2484 dest_mac = eth_hdr(skb)->h_dest;
2485
2486 /* Pull the header */
2487 hp = (struct fcoe_hdr *)skb->data;
2488 fh = (struct fc_frame_header *) skb_transport_header(skb);
2489 skb_pull(skb, len: sizeof(struct fcoe_hdr));
2490 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
2491
2492 fp = (struct fc_frame *)skb;
2493 fc_frame_init(fp);
2494 fr_dev(fp) = lport;
2495 fr_sof(fp) = hp->fcoe_sof;
2496 if (skb_copy_bits(skb, offset: fr_len, to: &crc_eof, len: sizeof(crc_eof))) {
2497 QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n");
2498 kfree_skb(skb);
2499 return;
2500 }
2501 fr_eof(fp) = crc_eof.fcoe_eof;
2502 fr_crc(fp) = crc_eof.fcoe_crc32;
2503 if (pskb_trim(skb, len: fr_len)) {
2504 QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n");
2505 kfree_skb(skb);
2506 return;
2507 }
2508
2509 fh = fc_frame_header_get(fp);
2510
2511 /*
2512 * Invalid frame filters.
2513 */
2514
2515 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
2516 fh->fh_type == FC_TYPE_FCP) {
2517 /* Drop FCP data. We dont this in L2 path */
2518 kfree_skb(skb);
2519 return;
2520 }
2521 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
2522 fh->fh_type == FC_TYPE_ELS) {
2523 switch (fc_frame_payload_op(fp)) {
2524 case ELS_LOGO:
2525 if (ntoh24(p: fh->fh_s_id) == FC_FID_FLOGI) {
2526 /* drop non-FIP LOGO */
2527 kfree_skb(skb);
2528 return;
2529 }
2530 break;
2531 }
2532 }
2533
2534 if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
2535 /* Drop incoming ABTS */
2536 kfree_skb(skb);
2537 return;
2538 }
2539
2540 if (ntoh24(p: &dest_mac[3]) != ntoh24(p: fh->fh_d_id)) {
2541 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2542 "FC frame d_id mismatch with MAC %pM.\n", dest_mac);
2543 kfree_skb(skb);
2544 return;
2545 }
2546
2547 if (qedf->ctlr.state) {
2548 if (!ether_addr_equal(addr1: mac, addr2: qedf->ctlr.dest_addr)) {
2549 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2550 "Wrong source address: mac:%pM dest_addr:%pM.\n",
2551 mac, qedf->ctlr.dest_addr);
2552 kfree_skb(skb);
2553 return;
2554 }
2555 }
2556
2557 vn_port = fc_vport_id_lookup(lport, port_id: ntoh24(p: fh->fh_d_id));
2558
2559 /*
2560 * If the destination ID from the frame header does not match what we
2561 * have on record for lport and the search for a NPIV port came up
2562 * empty then this is not addressed to our port so simply drop it.
2563 */
2564 if (lport->port_id != ntoh24(p: fh->fh_d_id) && !vn_port) {
2565 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2566 "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n",
2567 lport->port_id, ntoh24(fh->fh_d_id));
2568 kfree_skb(skb);
2569 return;
2570 }
2571
2572 f_ctl = ntoh24(p: fh->fh_f_ctl);
2573 if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
2574 (f_ctl & FC_FC_EX_CTX)) {
2575 /* Drop incoming ABTS response that has both SEQ/EX CTX set */
2576 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2577 "Dropping ABTS response as both SEQ/EX CTX set.\n");
2578 kfree_skb(skb);
2579 return;
2580 }
2581
2582 /*
2583 * If a connection is uploading, drop incoming FCoE frames as there
2584 * is a small window where we could try to return a frame while libfc
2585 * is trying to clean things up.
2586 */
2587
2588 /* Get fcport associated with d_id if it exists */
2589 fcport = qedf_fcport_lookup(qedf, port_id: ntoh24(p: fh->fh_d_id));
2590
2591 if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
2592 &fcport->flags)) {
2593 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2594 "Connection uploading, dropping fp=%p.\n", fp);
2595 kfree_skb(skb);
2596 return;
2597 }
2598
2599 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: "
2600 "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp,
2601 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2602 fh->fh_type);
2603 if (qedf_dump_frames)
2604 print_hex_dump(KERN_WARNING, prefix_str: "fcoe: ", prefix_type: DUMP_PREFIX_OFFSET, rowsize: 16,
2605 groupsize: 1, buf: skb->data, len: skb->len, ascii: false);
2606 fc_exch_recv(lport, fp);
2607}
2608
2609static void qedf_ll2_process_skb(struct work_struct *work)
2610{
2611 struct qedf_skb_work *skb_work =
2612 container_of(work, struct qedf_skb_work, work);
2613 struct qedf_ctx *qedf = skb_work->qedf;
2614 struct sk_buff *skb = skb_work->skb;
2615 struct ethhdr *eh;
2616
2617 if (!qedf) {
2618 QEDF_ERR(NULL, "qedf is NULL\n");
2619 goto err_out;
2620 }
2621
2622 eh = (struct ethhdr *)skb->data;
2623
2624 /* Undo VLAN encapsulation */
2625 if (eh->h_proto == htons(ETH_P_8021Q)) {
2626 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
2627 eh = skb_pull(skb, VLAN_HLEN);
2628 skb_reset_mac_header(skb);
2629 }
2630
2631 /*
2632 * Process either a FIP frame or FCoE frame based on the
2633 * protocol value. If it's not either just drop the
2634 * frame.
2635 */
2636 if (eh->h_proto == htons(ETH_P_FIP)) {
2637 qedf_fip_recv(qedf, skb);
2638 goto out;
2639 } else if (eh->h_proto == htons(ETH_P_FCOE)) {
2640 __skb_pull(skb, ETH_HLEN);
2641 qedf_recv_frame(qedf, skb);
2642 goto out;
2643 } else
2644 goto err_out;
2645
2646err_out:
2647 kfree_skb(skb);
2648out:
2649 kfree(objp: skb_work);
2650 return;
2651}
2652
2653static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
2654 u32 arg1, u32 arg2)
2655{
2656 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2657 struct qedf_skb_work *skb_work;
2658
2659 if (atomic_read(v: &qedf->link_state) == QEDF_LINK_DOWN) {
2660 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2661 "Dropping frame as link state is down.\n");
2662 kfree_skb(skb);
2663 return 0;
2664 }
2665
2666 skb_work = kzalloc(size: sizeof(struct qedf_skb_work), GFP_ATOMIC);
2667 if (!skb_work) {
2668 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
2669 "dropping frame.\n");
2670 kfree_skb(skb);
2671 return 0;
2672 }
2673
2674 INIT_WORK(&skb_work->work, qedf_ll2_process_skb);
2675 skb_work->skb = skb;
2676 skb_work->qedf = qedf;
2677 queue_work(wq: qedf->ll2_recv_wq, work: &skb_work->work);
2678
2679 return 0;
2680}
2681
2682static struct qed_ll2_cb_ops qedf_ll2_cb_ops = {
2683 .rx_cb = qedf_ll2_rx,
2684 .tx_cb = NULL,
2685};
2686
2687/* Main thread to process I/O completions */
2688void qedf_fp_io_handler(struct work_struct *work)
2689{
2690 struct qedf_io_work *io_work =
2691 container_of(work, struct qedf_io_work, work);
2692 u32 comp_type;
2693
2694 /*
2695 * Deferred part of unsolicited CQE sends
2696 * frame to libfc.
2697 */
2698 comp_type = (io_work->cqe.cqe_data >>
2699 FCOE_CQE_CQE_TYPE_SHIFT) &
2700 FCOE_CQE_CQE_TYPE_MASK;
2701 if (comp_type == FCOE_UNSOLIC_CQE_TYPE &&
2702 io_work->fp)
2703 fc_exch_recv(io_work->qedf->lport, io_work->fp);
2704 else
2705 qedf_process_cqe(qedf: io_work->qedf, cqe: &io_work->cqe);
2706
2707 kfree(objp: io_work);
2708}
2709
2710static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
2711 struct qed_sb_info *sb_info, u16 sb_id)
2712{
2713 struct status_block *sb_virt;
2714 dma_addr_t sb_phys;
2715 int ret;
2716
2717 sb_virt = dma_alloc_coherent(dev: &qedf->pdev->dev,
2718 size: sizeof(struct status_block), dma_handle: &sb_phys, GFP_KERNEL);
2719
2720 if (!sb_virt) {
2721 QEDF_ERR(&qedf->dbg_ctx,
2722 "Status block allocation failed for id = %d.\n",
2723 sb_id);
2724 return -ENOMEM;
2725 }
2726
2727 ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys,
2728 sb_id, QED_SB_TYPE_STORAGE);
2729
2730 if (ret) {
2731 QEDF_ERR(&qedf->dbg_ctx,
2732 "Status block initialization failed (0x%x) for id = %d.\n",
2733 ret, sb_id);
2734 return ret;
2735 }
2736
2737 return 0;
2738}
2739
2740static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info)
2741{
2742 if (sb_info->sb_virt)
2743 dma_free_coherent(dev: &qedf->pdev->dev, size: sizeof(*sb_info->sb_virt),
2744 cpu_addr: (void *)sb_info->sb_virt, dma_handle: sb_info->sb_phys);
2745}
2746
2747static void qedf_destroy_sb(struct qedf_ctx *qedf)
2748{
2749 int id;
2750 struct qedf_fastpath *fp = NULL;
2751
2752 for (id = 0; id < qedf->num_queues; id++) {
2753 fp = &(qedf->fp_array[id]);
2754 if (fp->sb_id == QEDF_SB_ID_NULL)
2755 break;
2756 qedf_free_sb(qedf, sb_info: fp->sb_info);
2757 kfree(objp: fp->sb_info);
2758 }
2759 kfree(objp: qedf->fp_array);
2760}
2761
2762static int qedf_prepare_sb(struct qedf_ctx *qedf)
2763{
2764 int id;
2765 struct qedf_fastpath *fp;
2766 int ret;
2767
2768 qedf->fp_array =
2769 kcalloc(n: qedf->num_queues, size: sizeof(struct qedf_fastpath),
2770 GFP_KERNEL);
2771
2772 if (!qedf->fp_array) {
2773 QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation "
2774 "failed.\n");
2775 return -ENOMEM;
2776 }
2777
2778 for (id = 0; id < qedf->num_queues; id++) {
2779 fp = &(qedf->fp_array[id]);
2780 fp->sb_id = QEDF_SB_ID_NULL;
2781 fp->sb_info = kcalloc(n: 1, size: sizeof(*fp->sb_info), GFP_KERNEL);
2782 if (!fp->sb_info) {
2783 QEDF_ERR(&(qedf->dbg_ctx), "SB info struct "
2784 "allocation failed.\n");
2785 goto err;
2786 }
2787 ret = qedf_alloc_and_init_sb(qedf, sb_info: fp->sb_info, sb_id: id);
2788 if (ret) {
2789 QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and "
2790 "initialization failed.\n");
2791 goto err;
2792 }
2793 fp->sb_id = id;
2794 fp->qedf = qedf;
2795 fp->cq_num_entries =
2796 qedf->global_queues[id]->cq_mem_size /
2797 sizeof(struct fcoe_cqe);
2798 }
2799err:
2800 return 0;
2801}
2802
2803void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
2804{
2805 u16 xid;
2806 struct qedf_ioreq *io_req;
2807 struct qedf_rport *fcport;
2808 u32 comp_type;
2809 u8 io_comp_type;
2810 unsigned long flags;
2811
2812 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2813 FCOE_CQE_CQE_TYPE_MASK;
2814
2815 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2816 io_req = &qedf->cmd_mgr->cmds[xid];
2817
2818 /* Completion not for a valid I/O anymore so just return */
2819 if (!io_req) {
2820 QEDF_ERR(&qedf->dbg_ctx,
2821 "io_req is NULL for xid=0x%x.\n", xid);
2822 return;
2823 }
2824
2825 fcport = io_req->fcport;
2826
2827 if (fcport == NULL) {
2828 QEDF_ERR(&qedf->dbg_ctx,
2829 "fcport is NULL for xid=0x%x io_req=%p.\n",
2830 xid, io_req);
2831 return;
2832 }
2833
2834 /*
2835 * Check that fcport is offloaded. If it isn't then the spinlock
2836 * isn't valid and shouldn't be taken. We should just return.
2837 */
2838 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2839 QEDF_ERR(&qedf->dbg_ctx,
2840 "Session not offloaded yet, fcport = %p.\n", fcport);
2841 return;
2842 }
2843
2844 spin_lock_irqsave(&fcport->rport_lock, flags);
2845 io_comp_type = io_req->cmd_type;
2846 spin_unlock_irqrestore(lock: &fcport->rport_lock, flags);
2847
2848 switch (comp_type) {
2849 case FCOE_GOOD_COMPLETION_CQE_TYPE:
2850 atomic_inc(v: &fcport->free_sqes);
2851 switch (io_comp_type) {
2852 case QEDF_SCSI_CMD:
2853 qedf_scsi_completion(qedf, cqe, io_req);
2854 break;
2855 case QEDF_ELS:
2856 qedf_process_els_compl(qedf, cqe, els_req: io_req);
2857 break;
2858 case QEDF_TASK_MGMT_CMD:
2859 qedf_process_tmf_compl(qedf, cqe, io_req);
2860 break;
2861 case QEDF_SEQ_CLEANUP:
2862 qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
2863 break;
2864 }
2865 break;
2866 case FCOE_ERROR_DETECTION_CQE_TYPE:
2867 atomic_inc(v: &fcport->free_sqes);
2868 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2869 "Error detect CQE.\n");
2870 qedf_process_error_detect(qedf, cqe, io_req);
2871 break;
2872 case FCOE_EXCH_CLEANUP_CQE_TYPE:
2873 atomic_inc(v: &fcport->free_sqes);
2874 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2875 "Cleanup CQE.\n");
2876 qedf_process_cleanup_compl(qedf, cqe, io_req);
2877 break;
2878 case FCOE_ABTS_CQE_TYPE:
2879 atomic_inc(v: &fcport->free_sqes);
2880 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2881 "Abort CQE.\n");
2882 qedf_process_abts_compl(qedf, cqe, io_req);
2883 break;
2884 case FCOE_DUMMY_CQE_TYPE:
2885 atomic_inc(v: &fcport->free_sqes);
2886 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2887 "Dummy CQE.\n");
2888 break;
2889 case FCOE_LOCAL_COMP_CQE_TYPE:
2890 atomic_inc(v: &fcport->free_sqes);
2891 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2892 "Local completion CQE.\n");
2893 break;
2894 case FCOE_WARNING_CQE_TYPE:
2895 atomic_inc(v: &fcport->free_sqes);
2896 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2897 "Warning CQE.\n");
2898 qedf_process_warning_compl(qedf, cqe, io_req);
2899 break;
2900 case MAX_FCOE_CQE_TYPE:
2901 atomic_inc(v: &fcport->free_sqes);
2902 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2903 "Max FCoE CQE.\n");
2904 break;
2905 default:
2906 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2907 "Default CQE.\n");
2908 break;
2909 }
2910}
2911
2912static void qedf_free_bdq(struct qedf_ctx *qedf)
2913{
2914 int i;
2915
2916 if (qedf->bdq_pbl_list)
2917 dma_free_coherent(dev: &qedf->pdev->dev, QEDF_PAGE_SIZE,
2918 cpu_addr: qedf->bdq_pbl_list, dma_handle: qedf->bdq_pbl_list_dma);
2919
2920 if (qedf->bdq_pbl)
2921 dma_free_coherent(dev: &qedf->pdev->dev, size: qedf->bdq_pbl_mem_size,
2922 cpu_addr: qedf->bdq_pbl, dma_handle: qedf->bdq_pbl_dma);
2923
2924 for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2925 if (qedf->bdq[i].buf_addr) {
2926 dma_free_coherent(dev: &qedf->pdev->dev, QEDF_BDQ_BUF_SIZE,
2927 cpu_addr: qedf->bdq[i].buf_addr, dma_handle: qedf->bdq[i].buf_dma);
2928 }
2929 }
2930}
2931
2932static void qedf_free_global_queues(struct qedf_ctx *qedf)
2933{
2934 int i;
2935 struct global_queue **gl = qedf->global_queues;
2936
2937 for (i = 0; i < qedf->num_queues; i++) {
2938 if (!gl[i])
2939 continue;
2940
2941 if (gl[i]->cq)
2942 dma_free_coherent(dev: &qedf->pdev->dev,
2943 size: gl[i]->cq_mem_size, cpu_addr: gl[i]->cq, dma_handle: gl[i]->cq_dma);
2944 if (gl[i]->cq_pbl)
2945 dma_free_coherent(dev: &qedf->pdev->dev, size: gl[i]->cq_pbl_size,
2946 cpu_addr: gl[i]->cq_pbl, dma_handle: gl[i]->cq_pbl_dma);
2947
2948 kfree(objp: gl[i]);
2949 }
2950
2951 qedf_free_bdq(qedf);
2952}
2953
2954static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2955{
2956 int i;
2957 struct scsi_bd *pbl;
2958 u64 *list;
2959
2960 /* Alloc dma memory for BDQ buffers */
2961 for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2962 qedf->bdq[i].buf_addr = dma_alloc_coherent(dev: &qedf->pdev->dev,
2963 QEDF_BDQ_BUF_SIZE, dma_handle: &qedf->bdq[i].buf_dma, GFP_KERNEL);
2964 if (!qedf->bdq[i].buf_addr) {
2965 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ "
2966 "buffer %d.\n", i);
2967 return -ENOMEM;
2968 }
2969 }
2970
2971 /* Alloc dma memory for BDQ page buffer list */
2972 qedf->bdq_pbl_mem_size =
2973 QEDF_BDQ_SIZE * sizeof(struct scsi_bd);
2974 qedf->bdq_pbl_mem_size =
2975 ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE);
2976
2977 qedf->bdq_pbl = dma_alloc_coherent(dev: &qedf->pdev->dev,
2978 size: qedf->bdq_pbl_mem_size, dma_handle: &qedf->bdq_pbl_dma, GFP_KERNEL);
2979 if (!qedf->bdq_pbl) {
2980 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n");
2981 return -ENOMEM;
2982 }
2983
2984 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2985 "BDQ PBL addr=0x%p dma=%pad\n",
2986 qedf->bdq_pbl, &qedf->bdq_pbl_dma);
2987
2988 /*
2989 * Populate BDQ PBL with physical and virtual address of individual
2990 * BDQ buffers
2991 */
2992 pbl = (struct scsi_bd *)qedf->bdq_pbl;
2993 for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2994 pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
2995 pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
2996 pbl->opaque.fcoe_opaque.hi = 0;
2997 /* Opaque lo data is an index into the BDQ array */
2998 pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
2999 pbl++;
3000 }
3001
3002 /* Allocate list of PBL pages */
3003 qedf->bdq_pbl_list = dma_alloc_coherent(dev: &qedf->pdev->dev,
3004 QEDF_PAGE_SIZE,
3005 dma_handle: &qedf->bdq_pbl_list_dma,
3006 GFP_KERNEL);
3007 if (!qedf->bdq_pbl_list) {
3008 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
3009 return -ENOMEM;
3010 }
3011
3012 /*
3013 * Now populate PBL list with pages that contain pointers to the
3014 * individual buffers.
3015 */
3016 qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
3017 QEDF_PAGE_SIZE;
3018 list = (u64 *)qedf->bdq_pbl_list;
3019 for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
3020 *list = qedf->bdq_pbl_dma;
3021 list++;
3022 }
3023
3024 return 0;
3025}
3026
3027static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
3028{
3029 u32 *list;
3030 int i;
3031 int status;
3032 u32 *pbl;
3033 dma_addr_t page;
3034 int num_pages;
3035
3036 /* Allocate and map CQs, RQs */
3037 /*
3038 * Number of global queues (CQ / RQ). This should
3039 * be <= number of available MSIX vectors for the PF
3040 */
3041 if (!qedf->num_queues) {
3042 QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
3043 return -ENOMEM;
3044 }
3045
3046 /*
3047 * Make sure we allocated the PBL that will contain the physical
3048 * addresses of our queues
3049 */
3050 if (!qedf->p_cpuq) {
3051 QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
3052 return -EINVAL;
3053 }
3054
3055 qedf->global_queues = kzalloc(size: (sizeof(struct global_queue *)
3056 * qedf->num_queues), GFP_KERNEL);
3057 if (!qedf->global_queues) {
3058 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global "
3059 "queues array ptr memory\n");
3060 return -ENOMEM;
3061 }
3062 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3063 "qedf->global_queues=%p.\n", qedf->global_queues);
3064
3065 /* Allocate DMA coherent buffers for BDQ */
3066 status = qedf_alloc_bdq(qedf);
3067 if (status) {
3068 QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
3069 goto mem_alloc_failure;
3070 }
3071
3072 /* Allocate a CQ and an associated PBL for each MSI-X vector */
3073 for (i = 0; i < qedf->num_queues; i++) {
3074 qedf->global_queues[i] = kzalloc(size: sizeof(struct global_queue),
3075 GFP_KERNEL);
3076 if (!qedf->global_queues[i]) {
3077 QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
3078 "global queue %d.\n", i);
3079 status = -ENOMEM;
3080 goto mem_alloc_failure;
3081 }
3082
3083 qedf->global_queues[i]->cq_mem_size =
3084 FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
3085 qedf->global_queues[i]->cq_mem_size =
3086 ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE);
3087
3088 qedf->global_queues[i]->cq_pbl_size =
3089 (qedf->global_queues[i]->cq_mem_size /
3090 PAGE_SIZE) * sizeof(void *);
3091 qedf->global_queues[i]->cq_pbl_size =
3092 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
3093
3094 qedf->global_queues[i]->cq =
3095 dma_alloc_coherent(dev: &qedf->pdev->dev,
3096 size: qedf->global_queues[i]->cq_mem_size,
3097 dma_handle: &qedf->global_queues[i]->cq_dma,
3098 GFP_KERNEL);
3099
3100 if (!qedf->global_queues[i]->cq) {
3101 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
3102 status = -ENOMEM;
3103 goto mem_alloc_failure;
3104 }
3105
3106 qedf->global_queues[i]->cq_pbl =
3107 dma_alloc_coherent(dev: &qedf->pdev->dev,
3108 size: qedf->global_queues[i]->cq_pbl_size,
3109 dma_handle: &qedf->global_queues[i]->cq_pbl_dma,
3110 GFP_KERNEL);
3111
3112 if (!qedf->global_queues[i]->cq_pbl) {
3113 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
3114 status = -ENOMEM;
3115 goto mem_alloc_failure;
3116 }
3117
3118 /* Create PBL */
3119 num_pages = qedf->global_queues[i]->cq_mem_size /
3120 QEDF_PAGE_SIZE;
3121 page = qedf->global_queues[i]->cq_dma;
3122 pbl = (u32 *)qedf->global_queues[i]->cq_pbl;
3123
3124 while (num_pages--) {
3125 *pbl = U64_LO(page);
3126 pbl++;
3127 *pbl = U64_HI(page);
3128 pbl++;
3129 page += QEDF_PAGE_SIZE;
3130 }
3131 /* Set the initial consumer index for cq */
3132 qedf->global_queues[i]->cq_cons_idx = 0;
3133 }
3134
3135 list = (u32 *)qedf->p_cpuq;
3136
3137 /*
3138 * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
3139 * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
3140 * to the physical address which contains an array of pointers to
3141 * the physical addresses of the specific queue pages.
3142 */
3143 for (i = 0; i < qedf->num_queues; i++) {
3144 *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma);
3145 list++;
3146 *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma);
3147 list++;
3148 *list = U64_LO(0);
3149 list++;
3150 *list = U64_HI(0);
3151 list++;
3152 }
3153
3154 return 0;
3155
3156mem_alloc_failure:
3157 qedf_free_global_queues(qedf);
3158 return status;
3159}
3160
3161static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
3162{
3163 u8 sq_num_pbl_pages;
3164 u32 sq_mem_size;
3165 u32 cq_mem_size;
3166 u32 cq_num_entries;
3167 int rval;
3168
3169 /*
3170 * The number of completion queues/fastpath interrupts/status blocks
3171 * we allocation is the minimum off:
3172 *
3173 * Number of CPUs
3174 * Number allocated by qed for our PCI function
3175 */
3176 qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
3177
3178 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
3179 qedf->num_queues);
3180
3181 qedf->p_cpuq = dma_alloc_coherent(dev: &qedf->pdev->dev,
3182 size: qedf->num_queues * sizeof(struct qedf_glbl_q_params),
3183 dma_handle: &qedf->hw_p_cpuq, GFP_KERNEL);
3184
3185 if (!qedf->p_cpuq) {
3186 QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
3187 return 1;
3188 }
3189
3190 rval = qedf_alloc_global_queues(qedf);
3191 if (rval) {
3192 QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation "
3193 "failed.\n");
3194 return 1;
3195 }
3196
3197 /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */
3198 sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
3199 sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE);
3200 sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE);
3201
3202 /* Calculate CQ num entries */
3203 cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
3204 cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE);
3205 cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe);
3206
3207 memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params));
3208
3209 /* Setup the value for fcoe PF */
3210 qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
3211 qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS;
3212 qedf->pf_params.fcoe_pf_params.glbl_q_params_addr =
3213 (u64)qedf->hw_p_cpuq;
3214 qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages;
3215
3216 qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0;
3217
3218 qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries;
3219 qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues;
3220
3221 /* log_page_size: 12 for 4KB pages */
3222 qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE);
3223
3224 qedf->pf_params.fcoe_pf_params.mtu = 9000;
3225 qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI;
3226 qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI;
3227
3228 /* BDQ address and size */
3229 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] =
3230 qedf->bdq_pbl_list_dma;
3231 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] =
3232 qedf->bdq_pbl_list_num_entries;
3233 qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE;
3234
3235 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3236 "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n",
3237 qedf->bdq_pbl_list,
3238 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0],
3239 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]);
3240
3241 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3242 "cq_num_entries=%d.\n",
3243 qedf->pf_params.fcoe_pf_params.cq_num_entries);
3244
3245 return 0;
3246}
3247
3248/* Free DMA coherent memory for array of queue pointers we pass to qed */
3249static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
3250{
3251 size_t size = 0;
3252
3253 if (qedf->p_cpuq) {
3254 size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
3255 dma_free_coherent(dev: &qedf->pdev->dev, size, cpu_addr: qedf->p_cpuq,
3256 dma_handle: qedf->hw_p_cpuq);
3257 }
3258
3259 qedf_free_global_queues(qedf);
3260
3261 kfree(objp: qedf->global_queues);
3262}
3263
3264/*
3265 * PCI driver functions
3266 */
3267
3268static const struct pci_device_id qedf_pci_tbl[] = {
3269 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) },
3270 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) },
3271 {0}
3272};
3273MODULE_DEVICE_TABLE(pci, qedf_pci_tbl);
3274
3275static struct pci_driver qedf_pci_driver = {
3276 .name = QEDF_MODULE_NAME,
3277 .id_table = qedf_pci_tbl,
3278 .probe = qedf_probe,
3279 .remove = qedf_remove,
3280 .shutdown = qedf_shutdown,
3281 .suspend = qedf_suspend,
3282};
3283
3284static int __qedf_probe(struct pci_dev *pdev, int mode)
3285{
3286 int rc = -EINVAL;
3287 struct fc_lport *lport;
3288 struct qedf_ctx *qedf = NULL;
3289 struct Scsi_Host *host;
3290 bool is_vf = false;
3291 struct qed_ll2_params params;
3292 char host_buf[20];
3293 struct qed_link_params link_params;
3294 int status;
3295 void *task_start, *task_end;
3296 struct qed_slowpath_params slowpath_params;
3297 struct qed_probe_params qed_params;
3298 u16 retry_cnt = 10;
3299
3300 /*
3301 * When doing error recovery we didn't reap the lport so don't try
3302 * to reallocate it.
3303 */
3304retry_probe:
3305 if (mode == QEDF_MODE_RECOVERY)
3306 msleep(msecs: 2000);
3307
3308 if (mode != QEDF_MODE_RECOVERY) {
3309 lport = libfc_host_alloc(sht: &qedf_host_template,
3310 priv_size: sizeof(struct qedf_ctx));
3311
3312 if (!lport) {
3313 QEDF_ERR(NULL, "Could not allocate lport.\n");
3314 rc = -ENOMEM;
3315 goto err0;
3316 }
3317
3318 fc_disc_init(lport);
3319
3320 /* Initialize qedf_ctx */
3321 qedf = lport_priv(lport);
3322 set_bit(QEDF_PROBING, addr: &qedf->flags);
3323 qedf->lport = lport;
3324 qedf->ctlr.lp = lport;
3325 qedf->pdev = pdev;
3326 qedf->dbg_ctx.pdev = pdev;
3327 qedf->dbg_ctx.host_no = lport->host->host_no;
3328 spin_lock_init(&qedf->hba_lock);
3329 INIT_LIST_HEAD(list: &qedf->fcports);
3330 qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1;
3331 atomic_set(v: &qedf->num_offloads, i: 0);
3332 qedf->stop_io_on_error = false;
3333 pci_set_drvdata(pdev, data: qedf);
3334 init_completion(x: &qedf->fipvlan_compl);
3335 mutex_init(&qedf->stats_mutex);
3336 mutex_init(&qedf->flush_mutex);
3337 qedf->flogi_pending = 0;
3338
3339 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
3340 "QLogic FastLinQ FCoE Module qedf %s, "
3341 "FW %d.%d.%d.%d\n", QEDF_VERSION,
3342 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
3343 FW_ENGINEERING_VERSION);
3344 } else {
3345 /* Init pointers during recovery */
3346 qedf = pci_get_drvdata(pdev);
3347 set_bit(QEDF_PROBING, addr: &qedf->flags);
3348 lport = qedf->lport;
3349 }
3350
3351 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
3352
3353 host = lport->host;
3354
3355 /* Allocate mempool for qedf_io_work structs */
3356 qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN,
3357 kc: qedf_io_work_cache);
3358 if (qedf->io_mempool == NULL) {
3359 QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n");
3360 goto err1;
3361 }
3362 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
3363 qedf->io_mempool);
3364
3365 sprintf(buf: host_buf, fmt: "qedf_%u_link",
3366 qedf->lport->host->host_no);
3367 qedf->link_update_wq = create_workqueue(host_buf);
3368 INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
3369 INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
3370 INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
3371 INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
3372 qedf->fipvlan_retries = qedf_fipvlan_retries;
3373 /* Set a default prio in case DCBX doesn't converge */
3374 if (qedf_default_prio > -1) {
3375 /*
3376 * This is the case where we pass a modparam in so we want to
3377 * honor it even if dcbx doesn't converge.
3378 */
3379 qedf->prio = qedf_default_prio;
3380 } else
3381 qedf->prio = QEDF_DEFAULT_PRIO;
3382
3383 /*
3384 * Common probe. Takes care of basic hardware init and pci_*
3385 * functions.
3386 */
3387 memset(&qed_params, 0, sizeof(qed_params));
3388 qed_params.protocol = QED_PROTOCOL_FCOE;
3389 qed_params.dp_module = qedf_dp_module;
3390 qed_params.dp_level = qedf_dp_level;
3391 qed_params.is_vf = is_vf;
3392 qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
3393 if (!qedf->cdev) {
3394 if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
3395 QEDF_ERR(&qedf->dbg_ctx,
3396 "Retry %d initialize hardware\n", retry_cnt);
3397 retry_cnt--;
3398 goto retry_probe;
3399 }
3400 QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
3401 rc = -ENODEV;
3402 goto err1;
3403 }
3404
3405 /* Learn information crucial for qedf to progress */
3406 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3407 if (rc) {
3408 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
3409 goto err1;
3410 }
3411
3412 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
3413 "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
3414 qedf->dev_info.common.num_hwfns,
3415 qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
3416
3417 /* queue allocation code should come here
3418 * order should be
3419 * slowpath_start
3420 * status block allocation
3421 * interrupt registration (to get min number of queues)
3422 * set_fcoe_pf_param
3423 * qed_sp_fcoe_func_start
3424 */
3425 rc = qedf_set_fcoe_pf_param(qedf);
3426 if (rc) {
3427 QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n");
3428 goto err2;
3429 }
3430 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3431
3432 /* Learn information crucial for qedf to progress */
3433 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3434 if (rc) {
3435 QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
3436 goto err2;
3437 }
3438
3439 if (mode != QEDF_MODE_RECOVERY) {
3440 qedf->devlink = qed_ops->common->devlink_register(qedf->cdev);
3441 if (IS_ERR(ptr: qedf->devlink)) {
3442 QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n");
3443 rc = PTR_ERR(ptr: qedf->devlink);
3444 qedf->devlink = NULL;
3445 goto err2;
3446 }
3447 }
3448
3449 /* Record BDQ producer doorbell addresses */
3450 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
3451 qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
3452 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3453 "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod,
3454 qedf->bdq_secondary_prod);
3455
3456 qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf);
3457
3458 rc = qedf_prepare_sb(qedf);
3459 if (rc) {
3460
3461 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3462 goto err2;
3463 }
3464
3465 /* Start the Slowpath-process */
3466 slowpath_params.int_mode = QED_INT_MODE_MSIX;
3467 slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
3468 slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
3469 slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
3470 slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
3471 strncpy(p: slowpath_params.name, q: "qedf", QED_DRV_VER_STR_SIZE);
3472 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
3473 if (rc) {
3474 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3475 goto err2;
3476 }
3477
3478 /*
3479 * update_pf_params needs to be called before and after slowpath
3480 * start
3481 */
3482 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3483
3484 /* Setup interrupts */
3485 rc = qedf_setup_int(qedf);
3486 if (rc) {
3487 QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
3488 goto err3;
3489 }
3490
3491 rc = qed_ops->start(qedf->cdev, &qedf->tasks);
3492 if (rc) {
3493 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n");
3494 goto err4;
3495 }
3496 task_start = qedf_get_task_mem(info: &qedf->tasks, tid: 0);
3497 task_end = qedf_get_task_mem(info: &qedf->tasks, MAX_TID_BLOCKS_FCOE - 1);
3498 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, "
3499 "end=%p block_size=%u.\n", task_start, task_end,
3500 qedf->tasks.size);
3501
3502 /*
3503 * We need to write the number of BDs in the BDQ we've preallocated so
3504 * the f/w will do a prefetch and we'll get an unsolicited CQE when a
3505 * packet arrives.
3506 */
3507 qedf->bdq_prod_idx = QEDF_BDQ_SIZE;
3508 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3509 "Writing %d to primary and secondary BDQ doorbell registers.\n",
3510 qedf->bdq_prod_idx);
3511 writew(val: qedf->bdq_prod_idx, addr: qedf->bdq_primary_prod);
3512 readw(addr: qedf->bdq_primary_prod);
3513 writew(val: qedf->bdq_prod_idx, addr: qedf->bdq_secondary_prod);
3514 readw(addr: qedf->bdq_secondary_prod);
3515
3516 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3517
3518 /* Now that the dev_info struct has been filled in set the MAC
3519 * address
3520 */
3521 ether_addr_copy(dst: qedf->mac, src: qedf->dev_info.common.hw_mac);
3522 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
3523 qedf->mac);
3524
3525 /*
3526 * Set the WWNN and WWPN in the following way:
3527 *
3528 * If the info we get from qed is non-zero then use that to set the
3529 * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based
3530 * on the MAC address.
3531 */
3532 if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) {
3533 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3534 "Setting WWPN and WWNN from qed dev_info.\n");
3535 qedf->wwnn = qedf->dev_info.wwnn;
3536 qedf->wwpn = qedf->dev_info.wwpn;
3537 } else {
3538 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3539 "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n");
3540 qedf->wwnn = fcoe_wwn_from_mac(mac: qedf->mac, scheme: 1, port: 0);
3541 qedf->wwpn = fcoe_wwn_from_mac(mac: qedf->mac, scheme: 2, port: 0);
3542 }
3543 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx "
3544 "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
3545
3546 sprintf(buf: host_buf, fmt: "host_%d", host->host_no);
3547 qed_ops->common->set_name(qedf->cdev, host_buf);
3548
3549 /* Allocate cmd mgr */
3550 qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
3551 if (!qedf->cmd_mgr) {
3552 QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
3553 rc = -ENOMEM;
3554 goto err5;
3555 }
3556
3557 if (mode != QEDF_MODE_RECOVERY) {
3558 host->transportt = qedf_fc_transport_template;
3559 host->max_lun = qedf_max_lun;
3560 host->max_cmd_len = QEDF_MAX_CDB_LEN;
3561 host->max_id = QEDF_MAX_SESSIONS;
3562 host->can_queue = FCOE_PARAMS_NUM_TASKS;
3563 rc = scsi_add_host(host, dev: &pdev->dev);
3564 if (rc) {
3565 QEDF_WARN(&qedf->dbg_ctx,
3566 "Error adding Scsi_Host rc=0x%x.\n", rc);
3567 goto err6;
3568 }
3569 }
3570
3571 memset(&params, 0, sizeof(params));
3572 params.mtu = QEDF_LL2_BUF_SIZE;
3573 ether_addr_copy(dst: params.ll2_mac_address, src: qedf->mac);
3574
3575 /* Start LL2 processing thread */
3576 snprintf(buf: host_buf, size: 20, fmt: "qedf_%d_ll2", host->host_no);
3577 qedf->ll2_recv_wq =
3578 create_workqueue(host_buf);
3579 if (!qedf->ll2_recv_wq) {
3580 QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
3581 rc = -ENOMEM;
3582 goto err7;
3583 }
3584
3585#ifdef CONFIG_DEBUG_FS
3586 qedf_dbg_host_init(qedf: &(qedf->dbg_ctx), dops: qedf_debugfs_ops,
3587 fops: qedf_dbg_fops);
3588#endif
3589
3590 /* Start LL2 */
3591 qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf);
3592 rc = qed_ops->ll2->start(qedf->cdev, &params);
3593 if (rc) {
3594 QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n");
3595 goto err7;
3596 }
3597 set_bit(QEDF_LL2_STARTED, addr: &qedf->flags);
3598
3599 /* Set initial FIP/FCoE VLAN to NULL */
3600 qedf->vlan_id = 0;
3601
3602 /*
3603 * No need to setup fcoe_ctlr or fc_lport objects during recovery since
3604 * they were not reaped during the unload process.
3605 */
3606 if (mode != QEDF_MODE_RECOVERY) {
3607 /* Setup imbedded fcoe controller */
3608 qedf_fcoe_ctlr_setup(qedf);
3609
3610 /* Setup lport */
3611 rc = qedf_lport_setup(qedf);
3612 if (rc) {
3613 QEDF_ERR(&(qedf->dbg_ctx),
3614 "qedf_lport_setup failed.\n");
3615 goto err7;
3616 }
3617 }
3618
3619 sprintf(buf: host_buf, fmt: "qedf_%u_timer", qedf->lport->host->host_no);
3620 qedf->timer_work_queue =
3621 create_workqueue(host_buf);
3622 if (!qedf->timer_work_queue) {
3623 QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
3624 "workqueue.\n");
3625 rc = -ENOMEM;
3626 goto err7;
3627 }
3628
3629 /* DPC workqueue is not reaped during recovery unload */
3630 if (mode != QEDF_MODE_RECOVERY) {
3631 sprintf(buf: host_buf, fmt: "qedf_%u_dpc",
3632 qedf->lport->host->host_no);
3633 qedf->dpc_wq = create_workqueue(host_buf);
3634 }
3635 INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
3636
3637 /*
3638 * GRC dump and sysfs parameters are not reaped during the recovery
3639 * unload process.
3640 */
3641 if (mode != QEDF_MODE_RECOVERY) {
3642 qedf->grcdump_size =
3643 qed_ops->common->dbg_all_data_size(qedf->cdev);
3644 if (qedf->grcdump_size) {
3645 rc = qedf_alloc_grc_dump_buf(buf: &qedf->grcdump,
3646 len: qedf->grcdump_size);
3647 if (rc) {
3648 QEDF_ERR(&(qedf->dbg_ctx),
3649 "GRC Dump buffer alloc failed.\n");
3650 qedf->grcdump = NULL;
3651 }
3652
3653 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3654 "grcdump: addr=%p, size=%u.\n",
3655 qedf->grcdump, qedf->grcdump_size);
3656 }
3657 qedf_create_sysfs_ctx_attr(qedf);
3658
3659 /* Initialize I/O tracing for this adapter */
3660 spin_lock_init(&qedf->io_trace_lock);
3661 qedf->io_trace_idx = 0;
3662 }
3663
3664 init_completion(x: &qedf->flogi_compl);
3665
3666 status = qed_ops->common->update_drv_state(qedf->cdev, true);
3667 if (status)
3668 QEDF_ERR(&(qedf->dbg_ctx),
3669 "Failed to send drv state to MFW.\n");
3670
3671 memset(&link_params, 0, sizeof(struct qed_link_params));
3672 link_params.link_up = true;
3673 status = qed_ops->common->set_link(qedf->cdev, &link_params);
3674 if (status)
3675 QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n");
3676
3677 /* Start/restart discovery */
3678 if (mode == QEDF_MODE_RECOVERY)
3679 fcoe_ctlr_link_up(&qedf->ctlr);
3680 else
3681 fc_fabric_login(lport);
3682
3683 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
3684
3685 clear_bit(QEDF_PROBING, addr: &qedf->flags);
3686
3687 /* All good */
3688 return 0;
3689
3690err7:
3691 if (qedf->ll2_recv_wq)
3692 destroy_workqueue(wq: qedf->ll2_recv_wq);
3693 fc_remove_host(qedf->lport->host);
3694 scsi_remove_host(qedf->lport->host);
3695#ifdef CONFIG_DEBUG_FS
3696 qedf_dbg_host_exit(qedf: &(qedf->dbg_ctx));
3697#endif
3698err6:
3699 qedf_cmd_mgr_free(cmgr: qedf->cmd_mgr);
3700err5:
3701 qed_ops->stop(qedf->cdev);
3702err4:
3703 qedf_free_fcoe_pf_param(qedf);
3704 qedf_sync_free_irqs(qedf);
3705err3:
3706 qed_ops->common->slowpath_stop(qedf->cdev);
3707err2:
3708 qed_ops->common->remove(qedf->cdev);
3709err1:
3710 scsi_host_put(t: lport->host);
3711err0:
3712 return rc;
3713}
3714
3715static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3716{
3717 return __qedf_probe(pdev, QEDF_MODE_NORMAL);
3718}
3719
3720static void __qedf_remove(struct pci_dev *pdev, int mode)
3721{
3722 struct qedf_ctx *qedf;
3723 int rc;
3724
3725 if (!pdev) {
3726 QEDF_ERR(NULL, "pdev is NULL.\n");
3727 return;
3728 }
3729
3730 qedf = pci_get_drvdata(pdev);
3731
3732 /*
3733 * Prevent race where we're in board disable work and then try to
3734 * rmmod the module.
3735 */
3736 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
3737 QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n");
3738 return;
3739 }
3740
3741 if (mode != QEDF_MODE_RECOVERY)
3742 set_bit(QEDF_UNLOADING, addr: &qedf->flags);
3743
3744 /* Logoff the fabric to upload all connections */
3745 if (mode == QEDF_MODE_RECOVERY)
3746 fcoe_ctlr_link_down(&qedf->ctlr);
3747 else
3748 fc_fabric_logoff(qedf->lport);
3749
3750 if (!qedf_wait_for_upload(qedf))
3751 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
3752
3753#ifdef CONFIG_DEBUG_FS
3754 qedf_dbg_host_exit(qedf: &(qedf->dbg_ctx));
3755#endif
3756
3757 /* Stop any link update handling */
3758 cancel_delayed_work_sync(dwork: &qedf->link_update);
3759 destroy_workqueue(wq: qedf->link_update_wq);
3760 qedf->link_update_wq = NULL;
3761
3762 if (qedf->timer_work_queue)
3763 destroy_workqueue(wq: qedf->timer_work_queue);
3764
3765 /* Stop Light L2 */
3766 clear_bit(QEDF_LL2_STARTED, addr: &qedf->flags);
3767 qed_ops->ll2->stop(qedf->cdev);
3768 if (qedf->ll2_recv_wq)
3769 destroy_workqueue(wq: qedf->ll2_recv_wq);
3770
3771 /* Stop fastpath */
3772 qedf_sync_free_irqs(qedf);
3773 qedf_destroy_sb(qedf);
3774
3775 /*
3776 * During recovery don't destroy OS constructs that represent the
3777 * physical port.
3778 */
3779 if (mode != QEDF_MODE_RECOVERY) {
3780 qedf_free_grc_dump_buf(buf: &qedf->grcdump);
3781 qedf_remove_sysfs_ctx_attr(qedf);
3782
3783 /* Remove all SCSI/libfc/libfcoe structures */
3784 fcoe_ctlr_destroy(&qedf->ctlr);
3785 fc_lport_destroy(qedf->lport);
3786 fc_remove_host(qedf->lport->host);
3787 scsi_remove_host(qedf->lport->host);
3788 }
3789
3790 qedf_cmd_mgr_free(cmgr: qedf->cmd_mgr);
3791
3792 if (mode != QEDF_MODE_RECOVERY) {
3793 fc_exch_mgr_free(qedf->lport);
3794 fc_lport_free_stats(lport: qedf->lport);
3795
3796 /* Wait for all vports to be reaped */
3797 qedf_wait_for_vport_destroy(qedf);
3798 }
3799
3800 /*
3801 * Now that all connections have been uploaded we can stop the
3802 * rest of the qed operations
3803 */
3804 qed_ops->stop(qedf->cdev);
3805
3806 if (mode != QEDF_MODE_RECOVERY) {
3807 if (qedf->dpc_wq) {
3808 /* Stop general DPC handling */
3809 destroy_workqueue(wq: qedf->dpc_wq);
3810 qedf->dpc_wq = NULL;
3811 }
3812 }
3813
3814 /* Final shutdown for the board */
3815 qedf_free_fcoe_pf_param(qedf);
3816 if (mode != QEDF_MODE_RECOVERY) {
3817 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3818 pci_set_drvdata(pdev, NULL);
3819 }
3820
3821 rc = qed_ops->common->update_drv_state(qedf->cdev, false);
3822 if (rc)
3823 QEDF_ERR(&(qedf->dbg_ctx),
3824 "Failed to send drv state to MFW.\n");
3825
3826 if (mode != QEDF_MODE_RECOVERY && qedf->devlink) {
3827 qed_ops->common->devlink_unregister(qedf->devlink);
3828 qedf->devlink = NULL;
3829 }
3830
3831 qed_ops->common->slowpath_stop(qedf->cdev);
3832 qed_ops->common->remove(qedf->cdev);
3833
3834 mempool_destroy(pool: qedf->io_mempool);
3835
3836 /* Only reap the Scsi_host on a real removal */
3837 if (mode != QEDF_MODE_RECOVERY)
3838 scsi_host_put(t: qedf->lport->host);
3839}
3840
3841static void qedf_remove(struct pci_dev *pdev)
3842{
3843 /* Check to make sure this function wasn't already disabled */
3844 if (!atomic_read(v: &pdev->enable_cnt))
3845 return;
3846
3847 __qedf_remove(pdev, QEDF_MODE_NORMAL);
3848}
3849
3850void qedf_wq_grcdump(struct work_struct *work)
3851{
3852 struct qedf_ctx *qedf =
3853 container_of(work, struct qedf_ctx, grcdump_work.work);
3854
3855 QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n");
3856 qedf_capture_grc_dump(qedf);
3857}
3858
3859void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
3860{
3861 struct qedf_ctx *qedf = dev;
3862
3863 QEDF_ERR(&(qedf->dbg_ctx),
3864 "Hardware error handler scheduled, event=%d.\n",
3865 err_type);
3866
3867 if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
3868 QEDF_ERR(&(qedf->dbg_ctx),
3869 "Already in recovery, not scheduling board disable work.\n");
3870 return;
3871 }
3872
3873 switch (err_type) {
3874 case QED_HW_ERR_FAN_FAIL:
3875 schedule_delayed_work(dwork: &qedf->board_disable_work, delay: 0);
3876 break;
3877 case QED_HW_ERR_MFW_RESP_FAIL:
3878 case QED_HW_ERR_HW_ATTN:
3879 case QED_HW_ERR_DMAE_FAIL:
3880 case QED_HW_ERR_FW_ASSERT:
3881 /* Prevent HW attentions from being reasserted */
3882 qed_ops->common->attn_clr_enable(qedf->cdev, true);
3883 break;
3884 case QED_HW_ERR_RAMROD_FAIL:
3885 /* Prevent HW attentions from being reasserted */
3886 qed_ops->common->attn_clr_enable(qedf->cdev, true);
3887
3888 if (qedf_enable_recovery && qedf->devlink)
3889 qed_ops->common->report_fatal_error(qedf->devlink,
3890 err_type);
3891
3892 break;
3893 default:
3894 break;
3895 }
3896}
3897
3898/*
3899 * Protocol TLV handler
3900 */
3901void qedf_get_protocol_tlv_data(void *dev, void *data)
3902{
3903 struct qedf_ctx *qedf = dev;
3904 struct qed_mfw_tlv_fcoe *fcoe = data;
3905 struct fc_lport *lport;
3906 struct Scsi_Host *host;
3907 struct fc_host_attrs *fc_host;
3908 struct fc_host_statistics *hst;
3909
3910 if (!qedf) {
3911 QEDF_ERR(NULL, "qedf is null.\n");
3912 return;
3913 }
3914
3915 if (test_bit(QEDF_PROBING, &qedf->flags)) {
3916 QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
3917 return;
3918 }
3919
3920 lport = qedf->lport;
3921 host = lport->host;
3922 fc_host = shost_to_fc_host(host);
3923
3924 /* Force a refresh of the fc_host stats including offload stats */
3925 hst = qedf_fc_get_host_stats(shost: host);
3926
3927 fcoe->qos_pri_set = true;
3928 fcoe->qos_pri = 3; /* Hard coded to 3 in driver */
3929
3930 fcoe->ra_tov_set = true;
3931 fcoe->ra_tov = lport->r_a_tov;
3932
3933 fcoe->ed_tov_set = true;
3934 fcoe->ed_tov = lport->e_d_tov;
3935
3936 fcoe->npiv_state_set = true;
3937 fcoe->npiv_state = 1; /* NPIV always enabled */
3938
3939 fcoe->num_npiv_ids_set = true;
3940 fcoe->num_npiv_ids = fc_host->npiv_vports_inuse;
3941
3942 /* Certain attributes we only want to set if we've selected an FCF */
3943 if (qedf->ctlr.sel_fcf) {
3944 fcoe->switch_name_set = true;
3945 u64_to_wwn(inm: qedf->ctlr.sel_fcf->switch_name, wwn: fcoe->switch_name);
3946 }
3947
3948 fcoe->port_state_set = true;
3949 /* For qedf we're either link down or fabric attach */
3950 if (lport->link_up)
3951 fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC;
3952 else
3953 fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE;
3954
3955 fcoe->link_failures_set = true;
3956 fcoe->link_failures = (u16)hst->link_failure_count;
3957
3958 fcoe->fcoe_txq_depth_set = true;
3959 fcoe->fcoe_rxq_depth_set = true;
3960 fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS;
3961 fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS;
3962
3963 fcoe->fcoe_rx_frames_set = true;
3964 fcoe->fcoe_rx_frames = hst->rx_frames;
3965
3966 fcoe->fcoe_tx_frames_set = true;
3967 fcoe->fcoe_tx_frames = hst->tx_frames;
3968
3969 fcoe->fcoe_rx_bytes_set = true;
3970 fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000;
3971
3972 fcoe->fcoe_tx_bytes_set = true;
3973 fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000;
3974
3975 fcoe->crc_count_set = true;
3976 fcoe->crc_count = hst->invalid_crc_count;
3977
3978 fcoe->tx_abts_set = true;
3979 fcoe->tx_abts = hst->fcp_packet_aborts;
3980
3981 fcoe->tx_lun_rst_set = true;
3982 fcoe->tx_lun_rst = qedf->lun_resets;
3983
3984 fcoe->abort_task_sets_set = true;
3985 fcoe->abort_task_sets = qedf->packet_aborts;
3986
3987 fcoe->scsi_busy_set = true;
3988 fcoe->scsi_busy = qedf->busy;
3989
3990 fcoe->scsi_tsk_full_set = true;
3991 fcoe->scsi_tsk_full = qedf->task_set_fulls;
3992}
3993
3994/* Deferred work function to perform soft context reset on STAG change */
3995void qedf_stag_change_work(struct work_struct *work)
3996{
3997 struct qedf_ctx *qedf =
3998 container_of(work, struct qedf_ctx, stag_work.work);
3999
4000 printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
4001 dev_name(&qedf->pdev->dev), __func__, __LINE__,
4002 qedf->dbg_ctx.host_no);
4003 qedf_ctx_soft_reset(lport: qedf->lport);
4004}
4005
4006static void qedf_shutdown(struct pci_dev *pdev)
4007{
4008 __qedf_remove(pdev, QEDF_MODE_NORMAL);
4009}
4010
4011static int qedf_suspend(struct pci_dev *pdev, pm_message_t state)
4012{
4013 struct qedf_ctx *qedf;
4014
4015 if (!pdev) {
4016 QEDF_ERR(NULL, "pdev is NULL.\n");
4017 return -ENODEV;
4018 }
4019
4020 qedf = pci_get_drvdata(pdev);
4021
4022 QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
4023
4024 return -EPERM;
4025}
4026
4027/*
4028 * Recovery handler code
4029 */
4030static void qedf_schedule_recovery_handler(void *dev)
4031{
4032 struct qedf_ctx *qedf = dev;
4033
4034 QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
4035 schedule_delayed_work(dwork: &qedf->recovery_work, delay: 0);
4036}
4037
4038static void qedf_recovery_handler(struct work_struct *work)
4039{
4040 struct qedf_ctx *qedf =
4041 container_of(work, struct qedf_ctx, recovery_work.work);
4042
4043 if (test_and_set_bit(QEDF_IN_RECOVERY, addr: &qedf->flags))
4044 return;
4045
4046 /*
4047 * Call common_ops->recovery_prolog to allow the MFW to quiesce
4048 * any PCI transactions.
4049 */
4050 qed_ops->common->recovery_prolog(qedf->cdev);
4051
4052 QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
4053 __qedf_remove(pdev: qedf->pdev, QEDF_MODE_RECOVERY);
4054 /*
4055 * Reset link and dcbx to down state since we will not get a link down
4056 * event from the MFW but calling __qedf_remove will essentially be a
4057 * link down event.
4058 */
4059 atomic_set(v: &qedf->link_state, QEDF_LINK_DOWN);
4060 atomic_set(v: &qedf->dcbx, QEDF_DCBX_PENDING);
4061 __qedf_probe(pdev: qedf->pdev, QEDF_MODE_RECOVERY);
4062 clear_bit(QEDF_IN_RECOVERY, addr: &qedf->flags);
4063 QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
4064}
4065
4066/* Generic TLV data callback */
4067void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
4068{
4069 struct qedf_ctx *qedf;
4070
4071 if (!dev) {
4072 QEDF_INFO(NULL, QEDF_LOG_EVT,
4073 "dev is NULL so ignoring get_generic_tlv_data request.\n");
4074 return;
4075 }
4076 qedf = (struct qedf_ctx *)dev;
4077
4078 memset(data, 0, sizeof(struct qed_generic_tlvs));
4079 ether_addr_copy(dst: data->mac[0], src: qedf->mac);
4080}
4081
4082/*
4083 * Module Init/Remove
4084 */
4085
4086static int __init qedf_init(void)
4087{
4088 int ret;
4089
4090 /* If debug=1 passed, set the default log mask */
4091 if (qedf_debug == QEDF_LOG_DEFAULT)
4092 qedf_debug = QEDF_DEFAULT_LOG_MASK;
4093
4094 /*
4095 * Check that default prio for FIP/FCoE traffic is between 0..7 if a
4096 * value has been set
4097 */
4098 if (qedf_default_prio > -1)
4099 if (qedf_default_prio > 7) {
4100 qedf_default_prio = QEDF_DEFAULT_PRIO;
4101 QEDF_ERR(NULL, "FCoE/FIP priority out of range, resetting to %d.\n",
4102 QEDF_DEFAULT_PRIO);
4103 }
4104
4105 /* Print driver banner */
4106 QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR,
4107 QEDF_VERSION);
4108
4109 /* Create kmem_cache for qedf_io_work structs */
4110 qedf_io_work_cache = kmem_cache_create(name: "qedf_io_work_cache",
4111 size: sizeof(struct qedf_io_work), align: 0, SLAB_HWCACHE_ALIGN, NULL);
4112 if (qedf_io_work_cache == NULL) {
4113 QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n");
4114 goto err1;
4115 }
4116 QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n",
4117 qedf_io_work_cache);
4118
4119 qed_ops = qed_get_fcoe_ops();
4120 if (!qed_ops) {
4121 QEDF_ERR(NULL, "Failed to get qed fcoe operations\n");
4122 goto err1;
4123 }
4124
4125#ifdef CONFIG_DEBUG_FS
4126 qedf_dbg_init(drv_name: "qedf");
4127#endif
4128
4129 qedf_fc_transport_template =
4130 fc_attach_transport(&qedf_fc_transport_fn);
4131 if (!qedf_fc_transport_template) {
4132 QEDF_ERR(NULL, "Could not register with FC transport\n");
4133 goto err2;
4134 }
4135
4136 qedf_fc_vport_transport_template =
4137 fc_attach_transport(&qedf_fc_vport_transport_fn);
4138 if (!qedf_fc_vport_transport_template) {
4139 QEDF_ERR(NULL, "Could not register vport template with FC "
4140 "transport\n");
4141 goto err3;
4142 }
4143
4144 qedf_io_wq = create_workqueue("qedf_io_wq");
4145 if (!qedf_io_wq) {
4146 QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
4147 goto err4;
4148 }
4149
4150 qedf_cb_ops.get_login_failures = qedf_get_login_failures;
4151
4152 ret = pci_register_driver(&qedf_pci_driver);
4153 if (ret) {
4154 QEDF_ERR(NULL, "Failed to register driver\n");
4155 goto err5;
4156 }
4157
4158 return 0;
4159
4160err5:
4161 destroy_workqueue(wq: qedf_io_wq);
4162err4:
4163 fc_release_transport(qedf_fc_vport_transport_template);
4164err3:
4165 fc_release_transport(qedf_fc_transport_template);
4166err2:
4167#ifdef CONFIG_DEBUG_FS
4168 qedf_dbg_exit();
4169#endif
4170 qed_put_fcoe_ops();
4171err1:
4172 return -EINVAL;
4173}
4174
4175static void __exit qedf_cleanup(void)
4176{
4177 pci_unregister_driver(dev: &qedf_pci_driver);
4178
4179 destroy_workqueue(wq: qedf_io_wq);
4180
4181 fc_release_transport(qedf_fc_vport_transport_template);
4182 fc_release_transport(qedf_fc_transport_template);
4183#ifdef CONFIG_DEBUG_FS
4184 qedf_dbg_exit();
4185#endif
4186 qed_put_fcoe_ops();
4187
4188 kmem_cache_destroy(s: qedf_io_work_cache);
4189}
4190
4191MODULE_LICENSE("GPL");
4192MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module");
4193MODULE_AUTHOR("QLogic Corporation");
4194MODULE_VERSION(QEDF_VERSION);
4195module_init(qedf_init);
4196module_exit(qedf_cleanup);
4197

source code of linux/drivers/scsi/qedf/qedf_main.c