1/* cnic.c: QLogic CNIC core network driver.
2 *
3 * Copyright (c) 2006-2014 Broadcom Corporation
4 * Copyright (c) 2014-2015 QLogic Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
11 * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
12 * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/module.h>
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/list.h>
22#include <linux/slab.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/uio_driver.h>
27#include <linux/in.h>
28#include <linux/dma-mapping.h>
29#include <linux/delay.h>
30#include <linux/ethtool.h>
31#include <linux/if_vlan.h>
32#include <linux/prefetch.h>
33#include <linux/random.h>
34#if IS_ENABLED(CONFIG_VLAN_8021Q)
35#define BCM_VLAN 1
36#endif
37#include <net/ip.h>
38#include <net/tcp.h>
39#include <net/route.h>
40#include <net/ipv6.h>
41#include <net/ip6_route.h>
42#include <net/ip6_checksum.h>
43#include <scsi/iscsi_if.h>
44
45#define BCM_CNIC 1
46#include "cnic_if.h"
47#include "bnx2.h"
48#include "bnx2x/bnx2x.h"
49#include "bnx2x/bnx2x_reg.h"
50#include "bnx2x/bnx2x_fw_defs.h"
51#include "bnx2x/bnx2x_hsi.h"
52#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
53#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
54#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
55#include "cnic.h"
56#include "cnic_defs.h"
57
58#define CNIC_MODULE_NAME "cnic"
59
60static char version[] =
61 "QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
62
63MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
64 "Chen (zongxi@broadcom.com");
65MODULE_DESCRIPTION("QLogic cnic Driver");
66MODULE_LICENSE("GPL");
67MODULE_VERSION(CNIC_MODULE_VERSION);
68
69/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
70static LIST_HEAD(cnic_dev_list);
71static LIST_HEAD(cnic_udev_list);
72static DEFINE_RWLOCK(cnic_dev_lock);
73static DEFINE_MUTEX(cnic_lock);
74
75static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
76
77/* helper function, assuming cnic_lock is held */
78static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
79{
80 return rcu_dereference_protected(cnic_ulp_tbl[type],
81 lockdep_is_held(&cnic_lock));
82}
83
84static int cnic_service_bnx2(void *, void *);
85static int cnic_service_bnx2x(void *, void *);
86static int cnic_ctl(void *, struct cnic_ctl_info *);
87
88static struct cnic_ops cnic_bnx2_ops = {
89 .cnic_owner = THIS_MODULE,
90 .cnic_handler = cnic_service_bnx2,
91 .cnic_ctl = cnic_ctl,
92};
93
94static struct cnic_ops cnic_bnx2x_ops = {
95 .cnic_owner = THIS_MODULE,
96 .cnic_handler = cnic_service_bnx2x,
97 .cnic_ctl = cnic_ctl,
98};
99
100static struct workqueue_struct *cnic_wq;
101
102static void cnic_shutdown_rings(struct cnic_dev *);
103static void cnic_init_rings(struct cnic_dev *);
104static int cnic_cm_set_pg(struct cnic_sock *);
105
106static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
107{
108 struct cnic_uio_dev *udev = uinfo->priv;
109 struct cnic_dev *dev;
110
111 if (!capable(CAP_NET_ADMIN))
112 return -EPERM;
113
114 if (udev->uio_dev != -1)
115 return -EBUSY;
116
117 rtnl_lock();
118 dev = udev->dev;
119
120 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
121 rtnl_unlock();
122 return -ENODEV;
123 }
124
125 udev->uio_dev = iminor(inode);
126
127 cnic_shutdown_rings(dev);
128 cnic_init_rings(dev);
129 rtnl_unlock();
130
131 return 0;
132}
133
134static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
135{
136 struct cnic_uio_dev *udev = uinfo->priv;
137
138 udev->uio_dev = -1;
139 return 0;
140}
141
142static inline void cnic_hold(struct cnic_dev *dev)
143{
144 atomic_inc(&dev->ref_count);
145}
146
147static inline void cnic_put(struct cnic_dev *dev)
148{
149 atomic_dec(&dev->ref_count);
150}
151
152static inline void csk_hold(struct cnic_sock *csk)
153{
154 atomic_inc(&csk->ref_count);
155}
156
157static inline void csk_put(struct cnic_sock *csk)
158{
159 atomic_dec(&csk->ref_count);
160}
161
162static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
163{
164 struct cnic_dev *cdev;
165
166 read_lock(&cnic_dev_lock);
167 list_for_each_entry(cdev, &cnic_dev_list, list) {
168 if (netdev == cdev->netdev) {
169 cnic_hold(cdev);
170 read_unlock(&cnic_dev_lock);
171 return cdev;
172 }
173 }
174 read_unlock(&cnic_dev_lock);
175 return NULL;
176}
177
178static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
179{
180 atomic_inc(&ulp_ops->ref_count);
181}
182
183static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
184{
185 atomic_dec(&ulp_ops->ref_count);
186}
187
188static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
189{
190 struct cnic_local *cp = dev->cnic_priv;
191 struct cnic_eth_dev *ethdev = cp->ethdev;
192 struct drv_ctl_info info;
193 struct drv_ctl_io *io = &info.data.io;
194
195 memset(&info, 0, sizeof(struct drv_ctl_info));
196 info.cmd = DRV_CTL_CTX_WR_CMD;
197 io->cid_addr = cid_addr;
198 io->offset = off;
199 io->data = val;
200 ethdev->drv_ctl(dev->netdev, &info);
201}
202
203static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
204{
205 struct cnic_local *cp = dev->cnic_priv;
206 struct cnic_eth_dev *ethdev = cp->ethdev;
207 struct drv_ctl_info info;
208 struct drv_ctl_io *io = &info.data.io;
209
210 memset(&info, 0, sizeof(struct drv_ctl_info));
211 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
212 io->offset = off;
213 io->dma_addr = addr;
214 ethdev->drv_ctl(dev->netdev, &info);
215}
216
217static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
218{
219 struct cnic_local *cp = dev->cnic_priv;
220 struct cnic_eth_dev *ethdev = cp->ethdev;
221 struct drv_ctl_info info;
222 struct drv_ctl_l2_ring *ring = &info.data.ring;
223
224 memset(&info, 0, sizeof(struct drv_ctl_info));
225 if (start)
226 info.cmd = DRV_CTL_START_L2_CMD;
227 else
228 info.cmd = DRV_CTL_STOP_L2_CMD;
229
230 ring->cid = cid;
231 ring->client_id = cl_id;
232 ethdev->drv_ctl(dev->netdev, &info);
233}
234
235static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
236{
237 struct cnic_local *cp = dev->cnic_priv;
238 struct cnic_eth_dev *ethdev = cp->ethdev;
239 struct drv_ctl_info info;
240 struct drv_ctl_io *io = &info.data.io;
241
242 memset(&info, 0, sizeof(struct drv_ctl_info));
243 info.cmd = DRV_CTL_IO_WR_CMD;
244 io->offset = off;
245 io->data = val;
246 ethdev->drv_ctl(dev->netdev, &info);
247}
248
249static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
250{
251 struct cnic_local *cp = dev->cnic_priv;
252 struct cnic_eth_dev *ethdev = cp->ethdev;
253 struct drv_ctl_info info;
254 struct drv_ctl_io *io = &info.data.io;
255
256 memset(&info, 0, sizeof(struct drv_ctl_info));
257 info.cmd = DRV_CTL_IO_RD_CMD;
258 io->offset = off;
259 ethdev->drv_ctl(dev->netdev, &info);
260 return io->data;
261}
262
263static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
264{
265 struct cnic_local *cp = dev->cnic_priv;
266 struct cnic_eth_dev *ethdev = cp->ethdev;
267 struct drv_ctl_info info;
268 struct fcoe_capabilities *fcoe_cap =
269 &info.data.register_data.fcoe_features;
270
271 memset(&info, 0, sizeof(struct drv_ctl_info));
272 if (reg) {
273 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
274 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
275 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
276 } else {
277 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
278 }
279
280 info.data.ulp_type = ulp_type;
281 info.drv_state = state;
282 ethdev->drv_ctl(dev->netdev, &info);
283}
284
285static int cnic_in_use(struct cnic_sock *csk)
286{
287 return test_bit(SK_F_INUSE, &csk->flags);
288}
289
290static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
291{
292 struct cnic_local *cp = dev->cnic_priv;
293 struct cnic_eth_dev *ethdev = cp->ethdev;
294 struct drv_ctl_info info;
295
296 memset(&info, 0, sizeof(struct drv_ctl_info));
297 info.cmd = cmd;
298 info.data.credit.credit_count = count;
299 ethdev->drv_ctl(dev->netdev, &info);
300}
301
302static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
303{
304 u32 i;
305
306 if (!cp->ctx_tbl)
307 return -EINVAL;
308
309 for (i = 0; i < cp->max_cid_space; i++) {
310 if (cp->ctx_tbl[i].cid == cid) {
311 *l5_cid = i;
312 return 0;
313 }
314 }
315 return -EINVAL;
316}
317
318static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
319 struct cnic_sock *csk)
320{
321 struct iscsi_path path_req;
322 char *buf = NULL;
323 u16 len = 0;
324 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
325 struct cnic_ulp_ops *ulp_ops;
326 struct cnic_uio_dev *udev = cp->udev;
327 int rc = 0, retry = 0;
328
329 if (!udev || udev->uio_dev == -1)
330 return -ENODEV;
331
332 if (csk) {
333 len = sizeof(path_req);
334 buf = (char *) &path_req;
335 memset(&path_req, 0, len);
336
337 msg_type = ISCSI_KEVENT_PATH_REQ;
338 path_req.handle = (u64) csk->l5_cid;
339 if (test_bit(SK_F_IPV6, &csk->flags)) {
340 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
341 sizeof(struct in6_addr));
342 path_req.ip_addr_len = 16;
343 } else {
344 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
345 sizeof(struct in_addr));
346 path_req.ip_addr_len = 4;
347 }
348 path_req.vlan_id = csk->vlan_id;
349 path_req.pmtu = csk->mtu;
350 }
351
352 while (retry < 3) {
353 rc = 0;
354 rcu_read_lock();
355 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
356 if (ulp_ops)
357 rc = ulp_ops->iscsi_nl_send_msg(
358 cp->ulp_handle[CNIC_ULP_ISCSI],
359 msg_type, buf, len);
360 rcu_read_unlock();
361 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
362 break;
363
364 msleep(100);
365 retry++;
366 }
367 return rc;
368}
369
370static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
371
372static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
373 char *buf, u16 len)
374{
375 int rc = -EINVAL;
376
377 switch (msg_type) {
378 case ISCSI_UEVENT_PATH_UPDATE: {
379 struct cnic_local *cp;
380 u32 l5_cid;
381 struct cnic_sock *csk;
382 struct iscsi_path *path_resp;
383
384 if (len < sizeof(*path_resp))
385 break;
386
387 path_resp = (struct iscsi_path *) buf;
388 cp = dev->cnic_priv;
389 l5_cid = (u32) path_resp->handle;
390 if (l5_cid >= MAX_CM_SK_TBL_SZ)
391 break;
392
393 if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
394 rc = -ENODEV;
395 break;
396 }
397 csk = &cp->csk_tbl[l5_cid];
398 csk_hold(csk);
399 if (cnic_in_use(csk) &&
400 test_bit(SK_F_CONNECT_START, &csk->flags)) {
401
402 csk->vlan_id = path_resp->vlan_id;
403
404 memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
405 if (test_bit(SK_F_IPV6, &csk->flags))
406 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
407 sizeof(struct in6_addr));
408 else
409 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
410 sizeof(struct in_addr));
411
412 if (is_valid_ether_addr(csk->ha)) {
413 cnic_cm_set_pg(csk);
414 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
415 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
416
417 cnic_cm_upcall(cp, csk,
418 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
419 clear_bit(SK_F_CONNECT_START, &csk->flags);
420 }
421 }
422 csk_put(csk);
423 rc = 0;
424 }
425 }
426
427 return rc;
428}
429
430static int cnic_offld_prep(struct cnic_sock *csk)
431{
432 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
433 return 0;
434
435 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
436 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
437 return 0;
438 }
439
440 return 1;
441}
442
443static int cnic_close_prep(struct cnic_sock *csk)
444{
445 clear_bit(SK_F_CONNECT_START, &csk->flags);
446 smp_mb__after_atomic();
447
448 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
449 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
450 msleep(1);
451
452 return 1;
453 }
454 return 0;
455}
456
457static int cnic_abort_prep(struct cnic_sock *csk)
458{
459 clear_bit(SK_F_CONNECT_START, &csk->flags);
460 smp_mb__after_atomic();
461
462 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
463 msleep(1);
464
465 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
466 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
467 return 1;
468 }
469
470 return 0;
471}
472
473int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
474{
475 struct cnic_dev *dev;
476
477 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
478 pr_err("%s: Bad type %d\n", __func__, ulp_type);
479 return -EINVAL;
480 }
481 mutex_lock(&cnic_lock);
482 if (cnic_ulp_tbl_prot(ulp_type)) {
483 pr_err("%s: Type %d has already been registered\n",
484 __func__, ulp_type);
485 mutex_unlock(&cnic_lock);
486 return -EBUSY;
487 }
488
489 read_lock(&cnic_dev_lock);
490 list_for_each_entry(dev, &cnic_dev_list, list) {
491 struct cnic_local *cp = dev->cnic_priv;
492
493 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
494 }
495 read_unlock(&cnic_dev_lock);
496
497 atomic_set(&ulp_ops->ref_count, 0);
498 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
499 mutex_unlock(&cnic_lock);
500
501 /* Prevent race conditions with netdev_event */
502 rtnl_lock();
503 list_for_each_entry(dev, &cnic_dev_list, list) {
504 struct cnic_local *cp = dev->cnic_priv;
505
506 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
507 ulp_ops->cnic_init(dev);
508 }
509 rtnl_unlock();
510
511 return 0;
512}
513
514int cnic_unregister_driver(int ulp_type)
515{
516 struct cnic_dev *dev;
517 struct cnic_ulp_ops *ulp_ops;
518 int i = 0;
519
520 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
521 pr_err("%s: Bad type %d\n", __func__, ulp_type);
522 return -EINVAL;
523 }
524 mutex_lock(&cnic_lock);
525 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
526 if (!ulp_ops) {
527 pr_err("%s: Type %d has not been registered\n",
528 __func__, ulp_type);
529 goto out_unlock;
530 }
531 read_lock(&cnic_dev_lock);
532 list_for_each_entry(dev, &cnic_dev_list, list) {
533 struct cnic_local *cp = dev->cnic_priv;
534
535 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
536 pr_err("%s: Type %d still has devices registered\n",
537 __func__, ulp_type);
538 read_unlock(&cnic_dev_lock);
539 goto out_unlock;
540 }
541 }
542 read_unlock(&cnic_dev_lock);
543
544 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
545
546 mutex_unlock(&cnic_lock);
547 synchronize_rcu();
548 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
549 msleep(100);
550 i++;
551 }
552
553 if (atomic_read(&ulp_ops->ref_count) != 0)
554 pr_warn("%s: Failed waiting for ref count to go to zero\n",
555 __func__);
556 return 0;
557
558out_unlock:
559 mutex_unlock(&cnic_lock);
560 return -EINVAL;
561}
562
563static int cnic_start_hw(struct cnic_dev *);
564static void cnic_stop_hw(struct cnic_dev *);
565
566static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
567 void *ulp_ctx)
568{
569 struct cnic_local *cp = dev->cnic_priv;
570 struct cnic_ulp_ops *ulp_ops;
571
572 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
573 pr_err("%s: Bad type %d\n", __func__, ulp_type);
574 return -EINVAL;
575 }
576 mutex_lock(&cnic_lock);
577 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
578 pr_err("%s: Driver with type %d has not been registered\n",
579 __func__, ulp_type);
580 mutex_unlock(&cnic_lock);
581 return -EAGAIN;
582 }
583 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
584 pr_err("%s: Type %d has already been registered to this device\n",
585 __func__, ulp_type);
586 mutex_unlock(&cnic_lock);
587 return -EBUSY;
588 }
589
590 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
591 cp->ulp_handle[ulp_type] = ulp_ctx;
592 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
593 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
594 cnic_hold(dev);
595
596 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
597 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
598 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
599
600 mutex_unlock(&cnic_lock);
601
602 cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
603
604 return 0;
605
606}
607EXPORT_SYMBOL(cnic_register_driver);
608
609static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
610{
611 struct cnic_local *cp = dev->cnic_priv;
612 int i = 0;
613
614 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
615 pr_err("%s: Bad type %d\n", __func__, ulp_type);
616 return -EINVAL;
617 }
618
619 if (ulp_type == CNIC_ULP_ISCSI)
620 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
621
622 mutex_lock(&cnic_lock);
623 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
624 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
625 cnic_put(dev);
626 } else {
627 pr_err("%s: device not registered to this ulp type %d\n",
628 __func__, ulp_type);
629 mutex_unlock(&cnic_lock);
630 return -EINVAL;
631 }
632 mutex_unlock(&cnic_lock);
633
634 if (ulp_type == CNIC_ULP_FCOE)
635 dev->fcoe_cap = NULL;
636
637 synchronize_rcu();
638
639 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
640 i < 20) {
641 msleep(100);
642 i++;
643 }
644 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
645 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
646
647 if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
648 cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
649 else
650 cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
651
652 return 0;
653}
654EXPORT_SYMBOL(cnic_unregister_driver);
655
656static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
657 u32 next)
658{
659 id_tbl->start = start_id;
660 id_tbl->max = size;
661 id_tbl->next = next;
662 spin_lock_init(&id_tbl->lock);
663 id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
664 if (!id_tbl->table)
665 return -ENOMEM;
666
667 return 0;
668}
669
670static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
671{
672 kfree(id_tbl->table);
673 id_tbl->table = NULL;
674}
675
676static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
677{
678 int ret = -1;
679
680 id -= id_tbl->start;
681 if (id >= id_tbl->max)
682 return ret;
683
684 spin_lock(&id_tbl->lock);
685 if (!test_bit(id, id_tbl->table)) {
686 set_bit(id, id_tbl->table);
687 ret = 0;
688 }
689 spin_unlock(&id_tbl->lock);
690 return ret;
691}
692
693/* Returns -1 if not successful */
694static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
695{
696 u32 id;
697
698 spin_lock(&id_tbl->lock);
699 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
700 if (id >= id_tbl->max) {
701 id = -1;
702 if (id_tbl->next != 0) {
703 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
704 if (id >= id_tbl->next)
705 id = -1;
706 }
707 }
708
709 if (id < id_tbl->max) {
710 set_bit(id, id_tbl->table);
711 id_tbl->next = (id + 1) & (id_tbl->max - 1);
712 id += id_tbl->start;
713 }
714
715 spin_unlock(&id_tbl->lock);
716
717 return id;
718}
719
720static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
721{
722 if (id == -1)
723 return;
724
725 id -= id_tbl->start;
726 if (id >= id_tbl->max)
727 return;
728
729 clear_bit(id, id_tbl->table);
730}
731
732static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
733{
734 int i;
735
736 if (!dma->pg_arr)
737 return;
738
739 for (i = 0; i < dma->num_pages; i++) {
740 if (dma->pg_arr[i]) {
741 dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
742 dma->pg_arr[i], dma->pg_map_arr[i]);
743 dma->pg_arr[i] = NULL;
744 }
745 }
746 if (dma->pgtbl) {
747 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
748 dma->pgtbl, dma->pgtbl_map);
749 dma->pgtbl = NULL;
750 }
751 kfree(dma->pg_arr);
752 dma->pg_arr = NULL;
753 dma->num_pages = 0;
754}
755
756static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
757{
758 int i;
759 __le32 *page_table = (__le32 *) dma->pgtbl;
760
761 for (i = 0; i < dma->num_pages; i++) {
762 /* Each entry needs to be in big endian format. */
763 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
764 page_table++;
765 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
766 page_table++;
767 }
768}
769
770static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
771{
772 int i;
773 __le32 *page_table = (__le32 *) dma->pgtbl;
774
775 for (i = 0; i < dma->num_pages; i++) {
776 /* Each entry needs to be in little endian format. */
777 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
778 page_table++;
779 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
780 page_table++;
781 }
782}
783
784static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
785 int pages, int use_pg_tbl)
786{
787 int i, size;
788 struct cnic_local *cp = dev->cnic_priv;
789
790 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
791 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
792 if (dma->pg_arr == NULL)
793 return -ENOMEM;
794
795 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
796 dma->num_pages = pages;
797
798 for (i = 0; i < pages; i++) {
799 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
800 CNIC_PAGE_SIZE,
801 &dma->pg_map_arr[i],
802 GFP_ATOMIC);
803 if (dma->pg_arr[i] == NULL)
804 goto error;
805 }
806 if (!use_pg_tbl)
807 return 0;
808
809 dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
810 ~(CNIC_PAGE_SIZE - 1);
811 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
812 &dma->pgtbl_map, GFP_ATOMIC);
813 if (dma->pgtbl == NULL)
814 goto error;
815
816 cp->setup_pgtbl(dev, dma);
817
818 return 0;
819
820error:
821 cnic_free_dma(dev, dma);
822 return -ENOMEM;
823}
824
825static void cnic_free_context(struct cnic_dev *dev)
826{
827 struct cnic_local *cp = dev->cnic_priv;
828 int i;
829
830 for (i = 0; i < cp->ctx_blks; i++) {
831 if (cp->ctx_arr[i].ctx) {
832 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
833 cp->ctx_arr[i].ctx,
834 cp->ctx_arr[i].mapping);
835 cp->ctx_arr[i].ctx = NULL;
836 }
837 }
838}
839
840static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
841{
842 if (udev->l2_buf) {
843 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
844 udev->l2_buf, udev->l2_buf_map);
845 udev->l2_buf = NULL;
846 }
847
848 if (udev->l2_ring) {
849 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
850 udev->l2_ring, udev->l2_ring_map);
851 udev->l2_ring = NULL;
852 }
853
854}
855
856static void __cnic_free_uio(struct cnic_uio_dev *udev)
857{
858 uio_unregister_device(&udev->cnic_uinfo);
859
860 __cnic_free_uio_rings(udev);
861
862 pci_dev_put(udev->pdev);
863 kfree(udev);
864}
865
866static void cnic_free_uio(struct cnic_uio_dev *udev)
867{
868 if (!udev)
869 return;
870
871 write_lock(&cnic_dev_lock);
872 list_del_init(&udev->list);
873 write_unlock(&cnic_dev_lock);
874 __cnic_free_uio(udev);
875}
876
877static void cnic_free_resc(struct cnic_dev *dev)
878{
879 struct cnic_local *cp = dev->cnic_priv;
880 struct cnic_uio_dev *udev = cp->udev;
881
882 if (udev) {
883 udev->dev = NULL;
884 cp->udev = NULL;
885 if (udev->uio_dev == -1)
886 __cnic_free_uio_rings(udev);
887 }
888
889 cnic_free_context(dev);
890 kfree(cp->ctx_arr);
891 cp->ctx_arr = NULL;
892 cp->ctx_blks = 0;
893
894 cnic_free_dma(dev, &cp->gbl_buf_info);
895 cnic_free_dma(dev, &cp->kwq_info);
896 cnic_free_dma(dev, &cp->kwq_16_data_info);
897 cnic_free_dma(dev, &cp->kcq2.dma);
898 cnic_free_dma(dev, &cp->kcq1.dma);
899 kfree(cp->iscsi_tbl);
900 cp->iscsi_tbl = NULL;
901 kfree(cp->ctx_tbl);
902 cp->ctx_tbl = NULL;
903
904 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
905 cnic_free_id_tbl(&cp->cid_tbl);
906}
907
908static int cnic_alloc_context(struct cnic_dev *dev)
909{
910 struct cnic_local *cp = dev->cnic_priv;
911
912 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
913 int i, k, arr_size;
914
915 cp->ctx_blk_size = CNIC_PAGE_SIZE;
916 cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
917 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
918 sizeof(struct cnic_ctx);
919 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
920 if (cp->ctx_arr == NULL)
921 return -ENOMEM;
922
923 k = 0;
924 for (i = 0; i < 2; i++) {
925 u32 j, reg, off, lo, hi;
926
927 if (i == 0)
928 off = BNX2_PG_CTX_MAP;
929 else
930 off = BNX2_ISCSI_CTX_MAP;
931
932 reg = cnic_reg_rd_ind(dev, off);
933 lo = reg >> 16;
934 hi = reg & 0xffff;
935 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
936 cp->ctx_arr[k].cid = j;
937 }
938
939 cp->ctx_blks = k;
940 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
941 cp->ctx_blks = 0;
942 return -ENOMEM;
943 }
944
945 for (i = 0; i < cp->ctx_blks; i++) {
946 cp->ctx_arr[i].ctx =
947 dma_alloc_coherent(&dev->pcidev->dev,
948 CNIC_PAGE_SIZE,
949 &cp->ctx_arr[i].mapping,
950 GFP_KERNEL);
951 if (cp->ctx_arr[i].ctx == NULL)
952 return -ENOMEM;
953 }
954 }
955 return 0;
956}
957
958static u16 cnic_bnx2_next_idx(u16 idx)
959{
960 return idx + 1;
961}
962
963static u16 cnic_bnx2_hw_idx(u16 idx)
964{
965 return idx;
966}
967
968static u16 cnic_bnx2x_next_idx(u16 idx)
969{
970 idx++;
971 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
972 idx++;
973
974 return idx;
975}
976
977static u16 cnic_bnx2x_hw_idx(u16 idx)
978{
979 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
980 idx++;
981 return idx;
982}
983
984static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
985 bool use_pg_tbl)
986{
987 int err, i, use_page_tbl = 0;
988 struct kcqe **kcq;
989
990 if (use_pg_tbl)
991 use_page_tbl = 1;
992
993 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
994 if (err)
995 return err;
996
997 kcq = (struct kcqe **) info->dma.pg_arr;
998 info->kcq = kcq;
999
1000 info->next_idx = cnic_bnx2_next_idx;
1001 info->hw_idx = cnic_bnx2_hw_idx;
1002 if (use_pg_tbl)
1003 return 0;
1004
1005 info->next_idx = cnic_bnx2x_next_idx;
1006 info->hw_idx = cnic_bnx2x_hw_idx;
1007
1008 for (i = 0; i < KCQ_PAGE_CNT; i++) {
1009 struct bnx2x_bd_chain_next *next =
1010 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
1011 int j = i + 1;
1012
1013 if (j >= KCQ_PAGE_CNT)
1014 j = 0;
1015 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1016 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1017 }
1018 return 0;
1019}
1020
1021static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1022{
1023 struct cnic_local *cp = udev->dev->cnic_priv;
1024
1025 if (udev->l2_ring)
1026 return 0;
1027
1028 udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
1029 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1030 &udev->l2_ring_map,
1031 GFP_KERNEL | __GFP_COMP);
1032 if (!udev->l2_ring)
1033 return -ENOMEM;
1034
1035 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1036 udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
1037 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1038 &udev->l2_buf_map,
1039 GFP_KERNEL | __GFP_COMP);
1040 if (!udev->l2_buf) {
1041 __cnic_free_uio_rings(udev);
1042 return -ENOMEM;
1043 }
1044
1045 return 0;
1046
1047}
1048
1049static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1050{
1051 struct cnic_local *cp = dev->cnic_priv;
1052 struct cnic_uio_dev *udev;
1053
1054 list_for_each_entry(udev, &cnic_udev_list, list) {
1055 if (udev->pdev == dev->pcidev) {
1056 udev->dev = dev;
1057 if (__cnic_alloc_uio_rings(udev, pages)) {
1058 udev->dev = NULL;
1059 return -ENOMEM;
1060 }
1061 cp->udev = udev;
1062 return 0;
1063 }
1064 }
1065
1066 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1067 if (!udev)
1068 return -ENOMEM;
1069
1070 udev->uio_dev = -1;
1071
1072 udev->dev = dev;
1073 udev->pdev = dev->pcidev;
1074
1075 if (__cnic_alloc_uio_rings(udev, pages))
1076 goto err_udev;
1077
1078 list_add(&udev->list, &cnic_udev_list);
1079
1080 pci_dev_get(udev->pdev);
1081
1082 cp->udev = udev;
1083
1084 return 0;
1085
1086 err_udev:
1087 kfree(udev);
1088 return -ENOMEM;
1089}
1090
1091static int cnic_init_uio(struct cnic_dev *dev)
1092{
1093 struct cnic_local *cp = dev->cnic_priv;
1094 struct cnic_uio_dev *udev = cp->udev;
1095 struct uio_info *uinfo;
1096 int ret = 0;
1097
1098 if (!udev)
1099 return -ENOMEM;
1100
1101 uinfo = &udev->cnic_uinfo;
1102
1103 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1104 uinfo->mem[0].internal_addr = dev->regview;
1105 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1106
1107 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1108 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1109 TX_MAX_TSS_RINGS + 1);
1110 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1111 CNIC_PAGE_MASK;
1112 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1113 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1114 else
1115 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1116
1117 uinfo->name = "bnx2_cnic";
1118 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1119 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1120
1121 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1122 CNIC_PAGE_MASK;
1123 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1124
1125 uinfo->name = "bnx2x_cnic";
1126 }
1127
1128 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1129
1130 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1131 uinfo->mem[2].size = udev->l2_ring_size;
1132 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1133
1134 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1135 uinfo->mem[3].size = udev->l2_buf_size;
1136 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1137
1138 uinfo->version = CNIC_MODULE_VERSION;
1139 uinfo->irq = UIO_IRQ_CUSTOM;
1140
1141 uinfo->open = cnic_uio_open;
1142 uinfo->release = cnic_uio_close;
1143
1144 if (udev->uio_dev == -1) {
1145 if (!uinfo->priv) {
1146 uinfo->priv = udev;
1147
1148 ret = uio_register_device(&udev->pdev->dev, uinfo);
1149 }
1150 } else {
1151 cnic_init_rings(dev);
1152 }
1153
1154 return ret;
1155}
1156
1157static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1158{
1159 struct cnic_local *cp = dev->cnic_priv;
1160 int ret;
1161
1162 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1163 if (ret)
1164 goto error;
1165 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1166
1167 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1168 if (ret)
1169 goto error;
1170
1171 ret = cnic_alloc_context(dev);
1172 if (ret)
1173 goto error;
1174
1175 ret = cnic_alloc_uio_rings(dev, 2);
1176 if (ret)
1177 goto error;
1178
1179 ret = cnic_init_uio(dev);
1180 if (ret)
1181 goto error;
1182
1183 return 0;
1184
1185error:
1186 cnic_free_resc(dev);
1187 return ret;
1188}
1189
1190static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1191{
1192 struct cnic_local *cp = dev->cnic_priv;
1193 struct bnx2x *bp = netdev_priv(dev->netdev);
1194 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1195 int total_mem, blks, i;
1196
1197 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1198 blks = total_mem / ctx_blk_size;
1199 if (total_mem % ctx_blk_size)
1200 blks++;
1201
1202 if (blks > cp->ethdev->ctx_tbl_len)
1203 return -ENOMEM;
1204
1205 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1206 if (cp->ctx_arr == NULL)
1207 return -ENOMEM;
1208
1209 cp->ctx_blks = blks;
1210 cp->ctx_blk_size = ctx_blk_size;
1211 if (!CHIP_IS_E1(bp))
1212 cp->ctx_align = 0;
1213 else
1214 cp->ctx_align = ctx_blk_size;
1215
1216 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1217
1218 for (i = 0; i < blks; i++) {
1219 cp->ctx_arr[i].ctx =
1220 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1221 &cp->ctx_arr[i].mapping,
1222 GFP_KERNEL);
1223 if (cp->ctx_arr[i].ctx == NULL)
1224 return -ENOMEM;
1225
1226 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1227 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1228 cnic_free_context(dev);
1229 cp->ctx_blk_size += cp->ctx_align;
1230 i = -1;
1231 continue;
1232 }
1233 }
1234 }
1235 return 0;
1236}
1237
1238static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1239{
1240 struct cnic_local *cp = dev->cnic_priv;
1241 struct bnx2x *bp = netdev_priv(dev->netdev);
1242 struct cnic_eth_dev *ethdev = cp->ethdev;
1243 u32 start_cid = ethdev->starting_cid;
1244 int i, j, n, ret, pages;
1245 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1246
1247 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1248 cp->iscsi_start_cid = start_cid;
1249 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1250
1251 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
1252 cp->max_cid_space += dev->max_fcoe_conn;
1253 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1254 if (!cp->fcoe_init_cid)
1255 cp->fcoe_init_cid = 0x10;
1256 }
1257
1258 cp->iscsi_tbl = kcalloc(MAX_ISCSI_TBL_SZ, sizeof(struct cnic_iscsi),
1259 GFP_KERNEL);
1260 if (!cp->iscsi_tbl)
1261 goto error;
1262
1263 cp->ctx_tbl = kcalloc(cp->max_cid_space, sizeof(struct cnic_context),
1264 GFP_KERNEL);
1265 if (!cp->ctx_tbl)
1266 goto error;
1267
1268 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1269 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1270 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1271 }
1272
1273 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1274 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1275
1276 pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1277 CNIC_PAGE_SIZE;
1278
1279 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1280 if (ret)
1281 goto error;
1282
1283 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1284 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1285 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1286
1287 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1288 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1289 off;
1290
1291 if ((i % n) == (n - 1))
1292 j++;
1293 }
1294
1295 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1296 if (ret)
1297 goto error;
1298
1299 if (CNIC_SUPPORTS_FCOE(bp)) {
1300 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1301 if (ret)
1302 goto error;
1303 }
1304
1305 pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
1306 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1307 if (ret)
1308 goto error;
1309
1310 ret = cnic_alloc_bnx2x_context(dev);
1311 if (ret)
1312 goto error;
1313
1314 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1315 return 0;
1316
1317 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1318
1319 cp->l2_rx_ring_size = 15;
1320
1321 ret = cnic_alloc_uio_rings(dev, 4);
1322 if (ret)
1323 goto error;
1324
1325 ret = cnic_init_uio(dev);
1326 if (ret)
1327 goto error;
1328
1329 return 0;
1330
1331error:
1332 cnic_free_resc(dev);
1333 return -ENOMEM;
1334}
1335
1336static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1337{
1338 return cp->max_kwq_idx -
1339 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1340}
1341
1342static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1343 u32 num_wqes)
1344{
1345 struct cnic_local *cp = dev->cnic_priv;
1346 struct kwqe *prod_qe;
1347 u16 prod, sw_prod, i;
1348
1349 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1350 return -EAGAIN; /* bnx2 is down */
1351
1352 spin_lock_bh(&cp->cnic_ulp_lock);
1353 if (num_wqes > cnic_kwq_avail(cp) &&
1354 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1355 spin_unlock_bh(&cp->cnic_ulp_lock);
1356 return -EAGAIN;
1357 }
1358
1359 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1360
1361 prod = cp->kwq_prod_idx;
1362 sw_prod = prod & MAX_KWQ_IDX;
1363 for (i = 0; i < num_wqes; i++) {
1364 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1365 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1366 prod++;
1367 sw_prod = prod & MAX_KWQ_IDX;
1368 }
1369 cp->kwq_prod_idx = prod;
1370
1371 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1372
1373 spin_unlock_bh(&cp->cnic_ulp_lock);
1374 return 0;
1375}
1376
1377static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1378 union l5cm_specific_data *l5_data)
1379{
1380 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1381 dma_addr_t map;
1382
1383 map = ctx->kwqe_data_mapping;
1384 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1385 l5_data->phy_address.hi = (u64) map >> 32;
1386 return ctx->kwqe_data;
1387}
1388
1389static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1390 u32 type, union l5cm_specific_data *l5_data)
1391{
1392 struct cnic_local *cp = dev->cnic_priv;
1393 struct bnx2x *bp = netdev_priv(dev->netdev);
1394 struct l5cm_spe kwqe;
1395 struct kwqe_16 *kwq[1];
1396 u16 type_16;
1397 int ret;
1398
1399 kwqe.hdr.conn_and_cmd_data =
1400 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1401 BNX2X_HW_CID(bp, cid)));
1402
1403 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1404 type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1405 SPE_HDR_FUNCTION_ID;
1406
1407 kwqe.hdr.type = cpu_to_le16(type_16);
1408 kwqe.hdr.reserved1 = 0;
1409 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1410 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1411
1412 kwq[0] = (struct kwqe_16 *) &kwqe;
1413
1414 spin_lock_bh(&cp->cnic_ulp_lock);
1415 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1416 spin_unlock_bh(&cp->cnic_ulp_lock);
1417
1418 if (ret == 1)
1419 return 0;
1420
1421 return ret;
1422}
1423
1424static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1425 struct kcqe *cqes[], u32 num_cqes)
1426{
1427 struct cnic_local *cp = dev->cnic_priv;
1428 struct cnic_ulp_ops *ulp_ops;
1429
1430 rcu_read_lock();
1431 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1432 if (likely(ulp_ops)) {
1433 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1434 cqes, num_cqes);
1435 }
1436 rcu_read_unlock();
1437}
1438
1439static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1440 int en_tcp_dack)
1441{
1442 struct bnx2x *bp = netdev_priv(dev->netdev);
1443 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1444 u16 tstorm_flags = 0;
1445
1446 if (time_stamps) {
1447 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1448 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1449 }
1450 if (en_tcp_dack)
1451 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1452
1453 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1454 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
1455
1456 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1457 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
1458}
1459
1460static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1461{
1462 struct cnic_local *cp = dev->cnic_priv;
1463 struct bnx2x *bp = netdev_priv(dev->netdev);
1464 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1465 int hq_bds, pages;
1466 u32 pfid = bp->pfid;
1467
1468 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1469 cp->num_ccells = req1->num_ccells_per_conn;
1470 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1471 cp->num_iscsi_tasks;
1472 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1473 BNX2X_ISCSI_R2TQE_SIZE;
1474 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1475 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1476 hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1477 cp->num_cqs = req1->num_cqs;
1478
1479 if (!dev->max_iscsi_conn)
1480 return 0;
1481
1482 /* init Tstorm RAM */
1483 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1484 req1->rq_num_wqes);
1485 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1486 CNIC_PAGE_SIZE);
1487 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1488 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1489 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1490 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1491 req1->num_tasks_per_conn);
1492
1493 /* init Ustorm RAM */
1494 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1495 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1496 req1->rq_buffer_size);
1497 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1498 CNIC_PAGE_SIZE);
1499 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1500 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1501 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1502 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1503 req1->num_tasks_per_conn);
1504 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1505 req1->rq_num_wqes);
1506 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1507 req1->cq_num_wqes);
1508 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1509 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1510
1511 /* init Xstorm RAM */
1512 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1513 CNIC_PAGE_SIZE);
1514 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1515 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1516 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1517 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1518 req1->num_tasks_per_conn);
1519 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1520 hq_bds);
1521 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1522 req1->num_tasks_per_conn);
1523 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1524 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1525
1526 /* init Cstorm RAM */
1527 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1528 CNIC_PAGE_SIZE);
1529 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1530 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1531 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1532 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1533 req1->num_tasks_per_conn);
1534 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1535 req1->cq_num_wqes);
1536 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1537 hq_bds);
1538
1539 cnic_bnx2x_set_tcp_options(dev,
1540 req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1541 req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1542
1543 return 0;
1544}
1545
1546static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1547{
1548 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1549 struct bnx2x *bp = netdev_priv(dev->netdev);
1550 u32 pfid = bp->pfid;
1551 struct iscsi_kcqe kcqe;
1552 struct kcqe *cqes[1];
1553
1554 memset(&kcqe, 0, sizeof(kcqe));
1555 if (!dev->max_iscsi_conn) {
1556 kcqe.completion_status =
1557 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1558 goto done;
1559 }
1560
1561 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1562 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1563 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1564 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1565 req2->error_bit_map[1]);
1566
1567 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1568 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1569 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1570 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1571 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1572 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1573 req2->error_bit_map[1]);
1574
1575 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1576 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1577
1578 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1579
1580done:
1581 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1582 cqes[0] = (struct kcqe *) &kcqe;
1583 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1584
1585 return 0;
1586}
1587
1588static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1589{
1590 struct cnic_local *cp = dev->cnic_priv;
1591 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1592
1593 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1594 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1595
1596 cnic_free_dma(dev, &iscsi->hq_info);
1597 cnic_free_dma(dev, &iscsi->r2tq_info);
1598 cnic_free_dma(dev, &iscsi->task_array_info);
1599 cnic_free_id(&cp->cid_tbl, ctx->cid);
1600 } else {
1601 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1602 }
1603
1604 ctx->cid = 0;
1605}
1606
1607static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1608{
1609 u32 cid;
1610 int ret, pages;
1611 struct cnic_local *cp = dev->cnic_priv;
1612 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1613 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1614
1615 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1616 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1617 if (cid == -1) {
1618 ret = -ENOMEM;
1619 goto error;
1620 }
1621 ctx->cid = cid;
1622 return 0;
1623 }
1624
1625 cid = cnic_alloc_new_id(&cp->cid_tbl);
1626 if (cid == -1) {
1627 ret = -ENOMEM;
1628 goto error;
1629 }
1630
1631 ctx->cid = cid;
1632 pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
1633
1634 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1635 if (ret)
1636 goto error;
1637
1638 pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
1639 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1640 if (ret)
1641 goto error;
1642
1643 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1644 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1645 if (ret)
1646 goto error;
1647
1648 return 0;
1649
1650error:
1651 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1652 return ret;
1653}
1654
1655static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1656 struct regpair *ctx_addr)
1657{
1658 struct cnic_local *cp = dev->cnic_priv;
1659 struct cnic_eth_dev *ethdev = cp->ethdev;
1660 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1661 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1662 unsigned long align_off = 0;
1663 dma_addr_t ctx_map;
1664 void *ctx;
1665
1666 if (cp->ctx_align) {
1667 unsigned long mask = cp->ctx_align - 1;
1668
1669 if (cp->ctx_arr[blk].mapping & mask)
1670 align_off = cp->ctx_align -
1671 (cp->ctx_arr[blk].mapping & mask);
1672 }
1673 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1674 (off * BNX2X_CONTEXT_MEM_SIZE);
1675 ctx = cp->ctx_arr[blk].ctx + align_off +
1676 (off * BNX2X_CONTEXT_MEM_SIZE);
1677 if (init)
1678 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1679
1680 ctx_addr->lo = ctx_map & 0xffffffff;
1681 ctx_addr->hi = (u64) ctx_map >> 32;
1682 return ctx;
1683}
1684
1685static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1686 u32 num)
1687{
1688 struct cnic_local *cp = dev->cnic_priv;
1689 struct bnx2x *bp = netdev_priv(dev->netdev);
1690 struct iscsi_kwqe_conn_offload1 *req1 =
1691 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1692 struct iscsi_kwqe_conn_offload2 *req2 =
1693 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1694 struct iscsi_kwqe_conn_offload3 *req3;
1695 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1696 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1697 u32 cid = ctx->cid;
1698 u32 hw_cid = BNX2X_HW_CID(bp, cid);
1699 struct iscsi_context *ictx;
1700 struct regpair context_addr;
1701 int i, j, n = 2, n_max;
1702 u8 port = BP_PORT(bp);
1703
1704 ctx->ctx_flags = 0;
1705 if (!req2->num_additional_wqes)
1706 return -EINVAL;
1707
1708 n_max = req2->num_additional_wqes + 2;
1709
1710 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1711 if (ictx == NULL)
1712 return -ENOMEM;
1713
1714 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1715
1716 ictx->xstorm_ag_context.hq_prod = 1;
1717
1718 ictx->xstorm_st_context.iscsi.first_burst_length =
1719 ISCSI_DEF_FIRST_BURST_LEN;
1720 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1721 ISCSI_DEF_MAX_RECV_SEG_LEN;
1722 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1723 req1->sq_page_table_addr_lo;
1724 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1725 req1->sq_page_table_addr_hi;
1726 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1727 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1728 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1729 iscsi->hq_info.pgtbl_map & 0xffffffff;
1730 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1731 (u64) iscsi->hq_info.pgtbl_map >> 32;
1732 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1733 iscsi->hq_info.pgtbl[0];
1734 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1735 iscsi->hq_info.pgtbl[1];
1736 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1737 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1738 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1739 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1740 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1741 iscsi->r2tq_info.pgtbl[0];
1742 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1743 iscsi->r2tq_info.pgtbl[1];
1744 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1745 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1746 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1747 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1748 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1749 BNX2X_ISCSI_PBL_NOT_CACHED;
1750 ictx->xstorm_st_context.iscsi.flags.flags |=
1751 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1752 ictx->xstorm_st_context.iscsi.flags.flags |=
1753 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1754 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1755 ETH_P_8021Q;
1756 if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
1757 bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
1758
1759 port = 0;
1760 }
1761 ictx->xstorm_st_context.common.flags =
1762 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1763 ictx->xstorm_st_context.common.flags =
1764 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1765
1766 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1767 /* TSTORM requires the base address of RQ DB & not PTE */
1768 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1769 req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
1770 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1771 req2->rq_page_table_addr_hi;
1772 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1773 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1774 ictx->tstorm_st_context.tcp.flags2 |=
1775 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1776 ictx->tstorm_st_context.tcp.ooo_support_mode =
1777 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1778
1779 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1780
1781 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1782 req2->rq_page_table_addr_lo;
1783 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1784 req2->rq_page_table_addr_hi;
1785 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1786 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1787 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1788 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1789 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1790 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1791 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1792 iscsi->r2tq_info.pgtbl[0];
1793 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1794 iscsi->r2tq_info.pgtbl[1];
1795 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1796 req1->cq_page_table_addr_lo;
1797 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1798 req1->cq_page_table_addr_hi;
1799 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1800 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1801 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1802 ictx->ustorm_st_context.task_pbe_cache_index =
1803 BNX2X_ISCSI_PBL_NOT_CACHED;
1804 ictx->ustorm_st_context.task_pdu_cache_index =
1805 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1806
1807 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1808 if (j == 3) {
1809 if (n >= n_max)
1810 break;
1811 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1812 j = 0;
1813 }
1814 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1815 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1816 req3->qp_first_pte[j].hi;
1817 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1818 req3->qp_first_pte[j].lo;
1819 }
1820
1821 ictx->ustorm_st_context.task_pbl_base.lo =
1822 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1823 ictx->ustorm_st_context.task_pbl_base.hi =
1824 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1825 ictx->ustorm_st_context.tce_phy_addr.lo =
1826 iscsi->task_array_info.pgtbl[0];
1827 ictx->ustorm_st_context.tce_phy_addr.hi =
1828 iscsi->task_array_info.pgtbl[1];
1829 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1830 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1831 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1832 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1833 ISCSI_DEF_MAX_BURST_LEN;
1834 ictx->ustorm_st_context.negotiated_rx |=
1835 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1836 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1837
1838 ictx->cstorm_st_context.hq_pbl_base.lo =
1839 iscsi->hq_info.pgtbl_map & 0xffffffff;
1840 ictx->cstorm_st_context.hq_pbl_base.hi =
1841 (u64) iscsi->hq_info.pgtbl_map >> 32;
1842 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1843 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1844 ictx->cstorm_st_context.task_pbl_base.lo =
1845 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1846 ictx->cstorm_st_context.task_pbl_base.hi =
1847 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1848 /* CSTORM and USTORM initialization is different, CSTORM requires
1849 * CQ DB base & not PTE addr */
1850 ictx->cstorm_st_context.cq_db_base.lo =
1851 req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
1852 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1853 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1854 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1855 for (i = 0; i < cp->num_cqs; i++) {
1856 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1857 ISCSI_INITIAL_SN;
1858 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1859 ISCSI_INITIAL_SN;
1860 }
1861
1862 ictx->xstorm_ag_context.cdu_reserved =
1863 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1864 ISCSI_CONNECTION_TYPE);
1865 ictx->ustorm_ag_context.cdu_usage =
1866 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1867 ISCSI_CONNECTION_TYPE);
1868 return 0;
1869
1870}
1871
1872static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1873 u32 num, int *work)
1874{
1875 struct iscsi_kwqe_conn_offload1 *req1;
1876 struct iscsi_kwqe_conn_offload2 *req2;
1877 struct cnic_local *cp = dev->cnic_priv;
1878 struct bnx2x *bp = netdev_priv(dev->netdev);
1879 struct cnic_context *ctx;
1880 struct iscsi_kcqe kcqe;
1881 struct kcqe *cqes[1];
1882 u32 l5_cid;
1883 int ret = 0;
1884
1885 if (num < 2) {
1886 *work = num;
1887 return -EINVAL;
1888 }
1889
1890 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1891 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1892 if ((num - 2) < req2->num_additional_wqes) {
1893 *work = num;
1894 return -EINVAL;
1895 }
1896 *work = 2 + req2->num_additional_wqes;
1897
1898 l5_cid = req1->iscsi_conn_id;
1899 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1900 return -EINVAL;
1901
1902 memset(&kcqe, 0, sizeof(kcqe));
1903 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1904 kcqe.iscsi_conn_id = l5_cid;
1905 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1906
1907 ctx = &cp->ctx_tbl[l5_cid];
1908 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1909 kcqe.completion_status =
1910 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1911 goto done;
1912 }
1913
1914 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1915 atomic_dec(&cp->iscsi_conn);
1916 goto done;
1917 }
1918 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1919 if (ret) {
1920 atomic_dec(&cp->iscsi_conn);
1921 ret = 0;
1922 goto done;
1923 }
1924 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1925 if (ret < 0) {
1926 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1927 atomic_dec(&cp->iscsi_conn);
1928 goto done;
1929 }
1930
1931 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1932 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
1933
1934done:
1935 cqes[0] = (struct kcqe *) &kcqe;
1936 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1937 return 0;
1938}
1939
1940
1941static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1942{
1943 struct cnic_local *cp = dev->cnic_priv;
1944 struct iscsi_kwqe_conn_update *req =
1945 (struct iscsi_kwqe_conn_update *) kwqe;
1946 void *data;
1947 union l5cm_specific_data l5_data;
1948 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1949 int ret;
1950
1951 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1952 return -EINVAL;
1953
1954 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1955 if (!data)
1956 return -ENOMEM;
1957
1958 memcpy(data, kwqe, sizeof(struct kwqe));
1959
1960 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1961 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1962 return ret;
1963}
1964
1965static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1966{
1967 struct cnic_local *cp = dev->cnic_priv;
1968 struct bnx2x *bp = netdev_priv(dev->netdev);
1969 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1970 union l5cm_specific_data l5_data;
1971 int ret;
1972 u32 hw_cid;
1973
1974 init_waitqueue_head(&ctx->waitq);
1975 ctx->wait_cond = 0;
1976 memset(&l5_data, 0, sizeof(l5_data));
1977 hw_cid = BNX2X_HW_CID(bp, ctx->cid);
1978
1979 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1980 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1981
1982 if (ret == 0) {
1983 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1984 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1985 return -EBUSY;
1986 }
1987
1988 return 0;
1989}
1990
1991static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1992{
1993 struct cnic_local *cp = dev->cnic_priv;
1994 struct iscsi_kwqe_conn_destroy *req =
1995 (struct iscsi_kwqe_conn_destroy *) kwqe;
1996 u32 l5_cid = req->reserved0;
1997 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1998 int ret = 0;
1999 struct iscsi_kcqe kcqe;
2000 struct kcqe *cqes[1];
2001
2002 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2003 goto skip_cfc_delete;
2004
2005 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
2006 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
2007
2008 if (delta > (2 * HZ))
2009 delta = 0;
2010
2011 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2012 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2013 goto destroy_reply;
2014 }
2015
2016 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2017
2018skip_cfc_delete:
2019 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2020
2021 if (!ret) {
2022 atomic_dec(&cp->iscsi_conn);
2023 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2024 }
2025
2026destroy_reply:
2027 memset(&kcqe, 0, sizeof(kcqe));
2028 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2029 kcqe.iscsi_conn_id = l5_cid;
2030 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2031 kcqe.iscsi_conn_context_id = req->context_id;
2032
2033 cqes[0] = (struct kcqe *) &kcqe;
2034 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2035
2036 return 0;
2037}
2038
2039static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2040 struct l4_kwq_connect_req1 *kwqe1,
2041 struct l4_kwq_connect_req3 *kwqe3,
2042 struct l5cm_active_conn_buffer *conn_buf)
2043{
2044 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2045 struct l5cm_xstorm_conn_buffer *xstorm_buf =
2046 &conn_buf->xstorm_conn_buffer;
2047 struct l5cm_tstorm_conn_buffer *tstorm_buf =
2048 &conn_buf->tstorm_conn_buffer;
2049 struct regpair context_addr;
2050 u32 cid = BNX2X_SW_CID(kwqe1->cid);
2051 struct in6_addr src_ip, dst_ip;
2052 int i;
2053 u32 *addrp;
2054
2055 addrp = (u32 *) &conn_addr->local_ip_addr;
2056 for (i = 0; i < 4; i++, addrp++)
2057 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2058
2059 addrp = (u32 *) &conn_addr->remote_ip_addr;
2060 for (i = 0; i < 4; i++, addrp++)
2061 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2062
2063 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2064
2065 xstorm_buf->context_addr.hi = context_addr.hi;
2066 xstorm_buf->context_addr.lo = context_addr.lo;
2067 xstorm_buf->mss = 0xffff;
2068 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2069 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2070 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2071 xstorm_buf->pseudo_header_checksum =
2072 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2073
2074 if (kwqe3->ka_timeout) {
2075 tstorm_buf->ka_enable = 1;
2076 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2077 tstorm_buf->ka_interval = kwqe3->ka_interval;
2078 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2079 }
2080 tstorm_buf->max_rt_time = 0xffffffff;
2081}
2082
2083static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2084{
2085 struct bnx2x *bp = netdev_priv(dev->netdev);
2086 u32 pfid = bp->pfid;
2087 u8 *mac = dev->mac_addr;
2088
2089 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2090 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2091 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2092 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2093 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2094 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2095 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2096 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2097 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2098 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2099 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2100 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2101
2102 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2103 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2104 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2105 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2106 mac[4]);
2107 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2108 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2109 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2110 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2111 mac[2]);
2112 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2113 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2114 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2115 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2116 mac[0]);
2117}
2118
2119static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2120 u32 num, int *work)
2121{
2122 struct cnic_local *cp = dev->cnic_priv;
2123 struct bnx2x *bp = netdev_priv(dev->netdev);
2124 struct l4_kwq_connect_req1 *kwqe1 =
2125 (struct l4_kwq_connect_req1 *) wqes[0];
2126 struct l4_kwq_connect_req3 *kwqe3;
2127 struct l5cm_active_conn_buffer *conn_buf;
2128 struct l5cm_conn_addr_params *conn_addr;
2129 union l5cm_specific_data l5_data;
2130 u32 l5_cid = kwqe1->pg_cid;
2131 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2132 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2133 int ret;
2134
2135 if (num < 2) {
2136 *work = num;
2137 return -EINVAL;
2138 }
2139
2140 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2141 *work = 3;
2142 else
2143 *work = 2;
2144
2145 if (num < *work) {
2146 *work = num;
2147 return -EINVAL;
2148 }
2149
2150 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2151 netdev_err(dev->netdev, "conn_buf size too big\n");
2152 return -ENOMEM;
2153 }
2154 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2155 if (!conn_buf)
2156 return -ENOMEM;
2157
2158 memset(conn_buf, 0, sizeof(*conn_buf));
2159
2160 conn_addr = &conn_buf->conn_addr_buf;
2161 conn_addr->remote_addr_0 = csk->ha[0];
2162 conn_addr->remote_addr_1 = csk->ha[1];
2163 conn_addr->remote_addr_2 = csk->ha[2];
2164 conn_addr->remote_addr_3 = csk->ha[3];
2165 conn_addr->remote_addr_4 = csk->ha[4];
2166 conn_addr->remote_addr_5 = csk->ha[5];
2167
2168 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2169 struct l4_kwq_connect_req2 *kwqe2 =
2170 (struct l4_kwq_connect_req2 *) wqes[1];
2171
2172 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2173 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2174 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2175
2176 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2177 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2178 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2179 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2180 }
2181 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2182
2183 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2184 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2185 conn_addr->local_tcp_port = kwqe1->src_port;
2186 conn_addr->remote_tcp_port = kwqe1->dst_port;
2187
2188 conn_addr->pmtu = kwqe3->pmtu;
2189 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2190
2191 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2192 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
2193
2194 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2195 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2196 if (!ret)
2197 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2198
2199 return ret;
2200}
2201
2202static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2203{
2204 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2205 union l5cm_specific_data l5_data;
2206 int ret;
2207
2208 memset(&l5_data, 0, sizeof(l5_data));
2209 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2210 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2211 return ret;
2212}
2213
2214static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2215{
2216 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2217 union l5cm_specific_data l5_data;
2218 int ret;
2219
2220 memset(&l5_data, 0, sizeof(l5_data));
2221 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2222 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2223 return ret;
2224}
2225static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2226{
2227 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2228 struct l4_kcq kcqe;
2229 struct kcqe *cqes[1];
2230
2231 memset(&kcqe, 0, sizeof(kcqe));
2232 kcqe.pg_host_opaque = req->host_opaque;
2233 kcqe.pg_cid = req->host_opaque;
2234 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2235 cqes[0] = (struct kcqe *) &kcqe;
2236 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2237 return 0;
2238}
2239
2240static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2241{
2242 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2243 struct l4_kcq kcqe;
2244 struct kcqe *cqes[1];
2245
2246 memset(&kcqe, 0, sizeof(kcqe));
2247 kcqe.pg_host_opaque = req->pg_host_opaque;
2248 kcqe.pg_cid = req->pg_cid;
2249 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2250 cqes[0] = (struct kcqe *) &kcqe;
2251 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2252 return 0;
2253}
2254
2255static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2256{
2257 struct fcoe_kwqe_stat *req;
2258 struct fcoe_stat_ramrod_params *fcoe_stat;
2259 union l5cm_specific_data l5_data;
2260 struct cnic_local *cp = dev->cnic_priv;
2261 struct bnx2x *bp = netdev_priv(dev->netdev);
2262 int ret;
2263 u32 cid;
2264
2265 req = (struct fcoe_kwqe_stat *) kwqe;
2266 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2267
2268 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2269 if (!fcoe_stat)
2270 return -ENOMEM;
2271
2272 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2273 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2274
2275 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2276 FCOE_CONNECTION_TYPE, &l5_data);
2277 return ret;
2278}
2279
2280static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2281 u32 num, int *work)
2282{
2283 int ret;
2284 struct cnic_local *cp = dev->cnic_priv;
2285 struct bnx2x *bp = netdev_priv(dev->netdev);
2286 u32 cid;
2287 struct fcoe_init_ramrod_params *fcoe_init;
2288 struct fcoe_kwqe_init1 *req1;
2289 struct fcoe_kwqe_init2 *req2;
2290 struct fcoe_kwqe_init3 *req3;
2291 union l5cm_specific_data l5_data;
2292
2293 if (num < 3) {
2294 *work = num;
2295 return -EINVAL;
2296 }
2297 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2298 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2299 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2300 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2301 *work = 1;
2302 return -EINVAL;
2303 }
2304 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2305 *work = 2;
2306 return -EINVAL;
2307 }
2308
2309 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2310 netdev_err(dev->netdev, "fcoe_init size too big\n");
2311 return -ENOMEM;
2312 }
2313 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2314 if (!fcoe_init)
2315 return -ENOMEM;
2316
2317 memset(fcoe_init, 0, sizeof(*fcoe_init));
2318 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2319 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2320 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2321 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2322 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2323 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2324
2325 fcoe_init->sb_num = cp->status_blk_num;
2326 fcoe_init->eq_prod = MAX_KCQ_IDX;
2327 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2328 cp->kcq2.sw_prod_idx = 0;
2329
2330 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2331 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2332 FCOE_CONNECTION_TYPE, &l5_data);
2333 *work = 3;
2334 return ret;
2335}
2336
2337static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2338 u32 num, int *work)
2339{
2340 int ret = 0;
2341 u32 cid = -1, l5_cid;
2342 struct cnic_local *cp = dev->cnic_priv;
2343 struct bnx2x *bp = netdev_priv(dev->netdev);
2344 struct fcoe_kwqe_conn_offload1 *req1;
2345 struct fcoe_kwqe_conn_offload2 *req2;
2346 struct fcoe_kwqe_conn_offload3 *req3;
2347 struct fcoe_kwqe_conn_offload4 *req4;
2348 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2349 struct cnic_context *ctx;
2350 struct fcoe_context *fctx;
2351 struct regpair ctx_addr;
2352 union l5cm_specific_data l5_data;
2353 struct fcoe_kcqe kcqe;
2354 struct kcqe *cqes[1];
2355
2356 if (num < 4) {
2357 *work = num;
2358 return -EINVAL;
2359 }
2360 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2361 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2362 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2363 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2364
2365 *work = 4;
2366
2367 l5_cid = req1->fcoe_conn_id;
2368 if (l5_cid >= dev->max_fcoe_conn)
2369 goto err_reply;
2370
2371 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2372
2373 ctx = &cp->ctx_tbl[l5_cid];
2374 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2375 goto err_reply;
2376
2377 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2378 if (ret) {
2379 ret = 0;
2380 goto err_reply;
2381 }
2382 cid = ctx->cid;
2383
2384 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2385 if (fctx) {
2386 u32 hw_cid = BNX2X_HW_CID(bp, cid);
2387 u32 val;
2388
2389 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2390 FCOE_CONNECTION_TYPE);
2391 fctx->xstorm_ag_context.cdu_reserved = val;
2392 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2393 FCOE_CONNECTION_TYPE);
2394 fctx->ustorm_ag_context.cdu_usage = val;
2395 }
2396 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2397 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2398 goto err_reply;
2399 }
2400 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2401 if (!fcoe_offload)
2402 goto err_reply;
2403
2404 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2405 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2406 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2407 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2408 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2409
2410 cid = BNX2X_HW_CID(bp, cid);
2411 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2412 FCOE_CONNECTION_TYPE, &l5_data);
2413 if (!ret)
2414 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2415
2416 return ret;
2417
2418err_reply:
2419 if (cid != -1)
2420 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2421
2422 memset(&kcqe, 0, sizeof(kcqe));
2423 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2424 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2425 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2426
2427 cqes[0] = (struct kcqe *) &kcqe;
2428 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2429 return ret;
2430}
2431
2432static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2433{
2434 struct fcoe_kwqe_conn_enable_disable *req;
2435 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2436 union l5cm_specific_data l5_data;
2437 int ret;
2438 u32 cid, l5_cid;
2439 struct cnic_local *cp = dev->cnic_priv;
2440
2441 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2442 cid = req->context_id;
2443 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2444
2445 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2446 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2447 return -ENOMEM;
2448 }
2449 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2450 if (!fcoe_enable)
2451 return -ENOMEM;
2452
2453 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2454 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2455 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2456 FCOE_CONNECTION_TYPE, &l5_data);
2457 return ret;
2458}
2459
2460static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2461{
2462 struct fcoe_kwqe_conn_enable_disable *req;
2463 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2464 union l5cm_specific_data l5_data;
2465 int ret;
2466 u32 cid, l5_cid;
2467 struct cnic_local *cp = dev->cnic_priv;
2468
2469 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2470 cid = req->context_id;
2471 l5_cid = req->conn_id;
2472 if (l5_cid >= dev->max_fcoe_conn)
2473 return -EINVAL;
2474
2475 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2476
2477 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2478 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2479 return -ENOMEM;
2480 }
2481 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2482 if (!fcoe_disable)
2483 return -ENOMEM;
2484
2485 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2486 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2487 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2488 FCOE_CONNECTION_TYPE, &l5_data);
2489 return ret;
2490}
2491
2492static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2493{
2494 struct fcoe_kwqe_conn_destroy *req;
2495 union l5cm_specific_data l5_data;
2496 int ret;
2497 u32 cid, l5_cid;
2498 struct cnic_local *cp = dev->cnic_priv;
2499 struct cnic_context *ctx;
2500 struct fcoe_kcqe kcqe;
2501 struct kcqe *cqes[1];
2502
2503 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2504 cid = req->context_id;
2505 l5_cid = req->conn_id;
2506 if (l5_cid >= dev->max_fcoe_conn)
2507 return -EINVAL;
2508
2509 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2510
2511 ctx = &cp->ctx_tbl[l5_cid];
2512
2513 init_waitqueue_head(&ctx->waitq);
2514 ctx->wait_cond = 0;
2515
2516 memset(&kcqe, 0, sizeof(kcqe));
2517 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2518 memset(&l5_data, 0, sizeof(l5_data));
2519 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2520 FCOE_CONNECTION_TYPE, &l5_data);
2521 if (ret == 0) {
2522 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2523 if (ctx->wait_cond)
2524 kcqe.completion_status = 0;
2525 }
2526
2527 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2528 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2529
2530 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2531 kcqe.fcoe_conn_id = req->conn_id;
2532 kcqe.fcoe_conn_context_id = cid;
2533
2534 cqes[0] = (struct kcqe *) &kcqe;
2535 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2536 return ret;
2537}
2538
2539static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2540{
2541 struct cnic_local *cp = dev->cnic_priv;
2542 u32 i;
2543
2544 for (i = start_cid; i < cp->max_cid_space; i++) {
2545 struct cnic_context *ctx = &cp->ctx_tbl[i];
2546 int j;
2547
2548 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2549 msleep(10);
2550
2551 for (j = 0; j < 5; j++) {
2552 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2553 break;
2554 msleep(20);
2555 }
2556
2557 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2558 netdev_warn(dev->netdev, "CID %x not deleted\n",
2559 ctx->cid);
2560 }
2561}
2562
2563static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2564{
2565 union l5cm_specific_data l5_data;
2566 struct cnic_local *cp = dev->cnic_priv;
2567 struct bnx2x *bp = netdev_priv(dev->netdev);
2568 int ret;
2569 u32 cid;
2570
2571 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2572
2573 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2574
2575 memset(&l5_data, 0, sizeof(l5_data));
2576 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2577 FCOE_CONNECTION_TYPE, &l5_data);
2578 return ret;
2579}
2580
2581static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2582{
2583 struct cnic_local *cp = dev->cnic_priv;
2584 struct kcqe kcqe;
2585 struct kcqe *cqes[1];
2586 u32 cid;
2587 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2588 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2589 u32 kcqe_op;
2590 int ulp_type;
2591
2592 cid = kwqe->kwqe_info0;
2593 memset(&kcqe, 0, sizeof(kcqe));
2594
2595 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2596 u32 l5_cid = 0;
2597
2598 ulp_type = CNIC_ULP_FCOE;
2599 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2600 struct fcoe_kwqe_conn_enable_disable *req;
2601
2602 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2603 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2604 cid = req->context_id;
2605 l5_cid = req->conn_id;
2606 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2607 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2608 } else {
2609 return;
2610 }
2611 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2612 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2613 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2614 kcqe.kcqe_info2 = cid;
2615 kcqe.kcqe_info0 = l5_cid;
2616
2617 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2618 ulp_type = CNIC_ULP_ISCSI;
2619 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2620 cid = kwqe->kwqe_info1;
2621
2622 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2623 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2624 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2625 kcqe.kcqe_info2 = cid;
2626 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2627
2628 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2629 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2630
2631 ulp_type = CNIC_ULP_L4;
2632 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2633 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2634 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2635 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2636 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2637 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2638 else
2639 return;
2640
2641 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2642 KCQE_FLAGS_LAYER_MASK_L4;
2643 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2644 l4kcqe->cid = cid;
2645 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2646 } else {
2647 return;
2648 }
2649
2650 cqes[0] = &kcqe;
2651 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2652}
2653
2654static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2655 struct kwqe *wqes[], u32 num_wqes)
2656{
2657 int i, work, ret;
2658 u32 opcode;
2659 struct kwqe *kwqe;
2660
2661 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2662 return -EAGAIN; /* bnx2 is down */
2663
2664 for (i = 0; i < num_wqes; ) {
2665 kwqe = wqes[i];
2666 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2667 work = 1;
2668
2669 switch (opcode) {
2670 case ISCSI_KWQE_OPCODE_INIT1:
2671 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2672 break;
2673 case ISCSI_KWQE_OPCODE_INIT2:
2674 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2675 break;
2676 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2677 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2678 num_wqes - i, &work);
2679 break;
2680 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2681 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2682 break;
2683 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2684 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2685 break;
2686 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2687 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2688 &work);
2689 break;
2690 case L4_KWQE_OPCODE_VALUE_CLOSE:
2691 ret = cnic_bnx2x_close(dev, kwqe);
2692 break;
2693 case L4_KWQE_OPCODE_VALUE_RESET:
2694 ret = cnic_bnx2x_reset(dev, kwqe);
2695 break;
2696 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2697 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2698 break;
2699 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2700 ret = cnic_bnx2x_update_pg(dev, kwqe);
2701 break;
2702 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2703 ret = 0;
2704 break;
2705 default:
2706 ret = 0;
2707 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2708 opcode);
2709 break;
2710 }
2711 if (ret < 0) {
2712 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2713 opcode);
2714
2715 /* Possibly bnx2x parity error, send completion
2716 * to ulp drivers with error code to speed up
2717 * cleanup and reset recovery.
2718 */
2719 if (ret == -EIO || ret == -EAGAIN)
2720 cnic_bnx2x_kwqe_err(dev, kwqe);
2721 }
2722 i += work;
2723 }
2724 return 0;
2725}
2726
2727static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2728 struct kwqe *wqes[], u32 num_wqes)
2729{
2730 struct bnx2x *bp = netdev_priv(dev->netdev);
2731 int i, work, ret;
2732 u32 opcode;
2733 struct kwqe *kwqe;
2734
2735 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2736 return -EAGAIN; /* bnx2 is down */
2737
2738 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
2739 return -EINVAL;
2740
2741 for (i = 0; i < num_wqes; ) {
2742 kwqe = wqes[i];
2743 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2744 work = 1;
2745
2746 switch (opcode) {
2747 case FCOE_KWQE_OPCODE_INIT1:
2748 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2749 num_wqes - i, &work);
2750 break;
2751 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2752 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2753 num_wqes - i, &work);
2754 break;
2755 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2756 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2757 break;
2758 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2759 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2760 break;
2761 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2762 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2763 break;
2764 case FCOE_KWQE_OPCODE_DESTROY:
2765 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2766 break;
2767 case FCOE_KWQE_OPCODE_STAT:
2768 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2769 break;
2770 default:
2771 ret = 0;
2772 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2773 opcode);
2774 break;
2775 }
2776 if (ret < 0) {
2777 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2778 opcode);
2779
2780 /* Possibly bnx2x parity error, send completion
2781 * to ulp drivers with error code to speed up
2782 * cleanup and reset recovery.
2783 */
2784 if (ret == -EIO || ret == -EAGAIN)
2785 cnic_bnx2x_kwqe_err(dev, kwqe);
2786 }
2787 i += work;
2788 }
2789 return 0;
2790}
2791
2792static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2793 u32 num_wqes)
2794{
2795 int ret = -EINVAL;
2796 u32 layer_code;
2797
2798 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2799 return -EAGAIN; /* bnx2x is down */
2800
2801 if (!num_wqes)
2802 return 0;
2803
2804 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2805 switch (layer_code) {
2806 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2807 case KWQE_FLAGS_LAYER_MASK_L4:
2808 case KWQE_FLAGS_LAYER_MASK_L2:
2809 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2810 break;
2811
2812 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2813 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2814 break;
2815 }
2816 return ret;
2817}
2818
2819static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2820{
2821 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2822 return KCQE_FLAGS_LAYER_MASK_L4;
2823
2824 return opflag & KCQE_FLAGS_LAYER_MASK;
2825}
2826
2827static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2828{
2829 struct cnic_local *cp = dev->cnic_priv;
2830 int i, j, comp = 0;
2831
2832 i = 0;
2833 j = 1;
2834 while (num_cqes) {
2835 struct cnic_ulp_ops *ulp_ops;
2836 int ulp_type;
2837 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2838 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2839
2840 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2841 comp++;
2842
2843 while (j < num_cqes) {
2844 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2845
2846 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2847 break;
2848
2849 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2850 comp++;
2851 j++;
2852 }
2853
2854 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2855 ulp_type = CNIC_ULP_RDMA;
2856 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2857 ulp_type = CNIC_ULP_ISCSI;
2858 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2859 ulp_type = CNIC_ULP_FCOE;
2860 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2861 ulp_type = CNIC_ULP_L4;
2862 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2863 goto end;
2864 else {
2865 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2866 kcqe_op_flag);
2867 goto end;
2868 }
2869
2870 rcu_read_lock();
2871 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2872 if (likely(ulp_ops)) {
2873 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2874 cp->completed_kcq + i, j);
2875 }
2876 rcu_read_unlock();
2877end:
2878 num_cqes -= j;
2879 i += j;
2880 j = 1;
2881 }
2882 if (unlikely(comp))
2883 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2884}
2885
2886static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2887{
2888 struct cnic_local *cp = dev->cnic_priv;
2889 u16 i, ri, hw_prod, last;
2890 struct kcqe *kcqe;
2891 int kcqe_cnt = 0, last_cnt = 0;
2892
2893 i = ri = last = info->sw_prod_idx;
2894 ri &= MAX_KCQ_IDX;
2895 hw_prod = *info->hw_prod_idx_ptr;
2896 hw_prod = info->hw_idx(hw_prod);
2897
2898 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2899 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(