1/* cnic.c: QLogic CNIC core network driver.
2 *
3 * Copyright (c) 2006-2014 Broadcom Corporation
4 * Copyright (c) 2014-2015 QLogic Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
11 * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
12 * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/module.h>
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/list.h>
22#include <linux/slab.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/uio_driver.h>
27#include <linux/in.h>
28#include <linux/dma-mapping.h>
29#include <linux/delay.h>
30#include <linux/ethtool.h>
31#include <linux/if_vlan.h>
32#include <linux/prefetch.h>
33#include <linux/random.h>
34#if IS_ENABLED(CONFIG_VLAN_8021Q)
35#define BCM_VLAN 1
36#endif
37#include <net/ip.h>
38#include <net/tcp.h>
39#include <net/route.h>
40#include <net/ipv6.h>
41#include <net/ip6_route.h>
42#include <net/ip6_checksum.h>
43#include <scsi/iscsi_if.h>
44
45#define BCM_CNIC	1
46#include "cnic_if.h"
47#include "bnx2.h"
48#include "bnx2x/bnx2x.h"
49#include "bnx2x/bnx2x_reg.h"
50#include "bnx2x/bnx2x_fw_defs.h"
51#include "bnx2x/bnx2x_hsi.h"
52#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
53#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
54#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
55#include "cnic.h"
56#include "cnic_defs.h"
57
58#define CNIC_MODULE_NAME	"cnic"
59
60static char version[] =
61	"QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
62
63MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
64	      "Chen (zongxi@broadcom.com");
65MODULE_DESCRIPTION("QLogic cnic Driver");
66MODULE_LICENSE("GPL");
67MODULE_VERSION(CNIC_MODULE_VERSION);
68
69/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
70static LIST_HEAD(cnic_dev_list);
71static LIST_HEAD(cnic_udev_list);
72static DEFINE_RWLOCK(cnic_dev_lock);
73static DEFINE_MUTEX(cnic_lock);
74
75static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
76
77/* helper function, assuming cnic_lock is held */
78static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
79{
80	return rcu_dereference_protected(cnic_ulp_tbl[type],
81					 lockdep_is_held(&cnic_lock));
82}
83
84static int cnic_service_bnx2(void *, void *);
85static int cnic_service_bnx2x(void *, void *);
86static int cnic_ctl(void *, struct cnic_ctl_info *);
87
88static struct cnic_ops cnic_bnx2_ops = {
89	.cnic_owner	= THIS_MODULE,
90	.cnic_handler	= cnic_service_bnx2,
91	.cnic_ctl	= cnic_ctl,
92};
93
94static struct cnic_ops cnic_bnx2x_ops = {
95	.cnic_owner	= THIS_MODULE,
96	.cnic_handler	= cnic_service_bnx2x,
97	.cnic_ctl	= cnic_ctl,
98};
99
100static struct workqueue_struct *cnic_wq;
101
102static void cnic_shutdown_rings(struct cnic_dev *);
103static void cnic_init_rings(struct cnic_dev *);
104static int cnic_cm_set_pg(struct cnic_sock *);
105
106static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
107{
108	struct cnic_uio_dev *udev = uinfo->priv;
109	struct cnic_dev *dev;
110
111	if (!capable(CAP_NET_ADMIN))
112		return -EPERM;
113
114	if (udev->uio_dev != -1)
115		return -EBUSY;
116
117	rtnl_lock();
118	dev = udev->dev;
119
120	if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
121		rtnl_unlock();
122		return -ENODEV;
123	}
124
125	udev->uio_dev = iminor(inode);
126
127	cnic_shutdown_rings(dev);
128	cnic_init_rings(dev);
129	rtnl_unlock();
130
131	return 0;
132}
133
134static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
135{
136	struct cnic_uio_dev *udev = uinfo->priv;
137
138	udev->uio_dev = -1;
139	return 0;
140}
141
142static inline void cnic_hold(struct cnic_dev *dev)
143{
144	atomic_inc(&dev->ref_count);
145}
146
147static inline void cnic_put(struct cnic_dev *dev)
148{
149	atomic_dec(&dev->ref_count);
150}
151
152static inline void csk_hold(struct cnic_sock *csk)
153{
154	atomic_inc(&csk->ref_count);
155}
156
157static inline void csk_put(struct cnic_sock *csk)
158{
159	atomic_dec(&csk->ref_count);
160}
161
162static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
163{
164	struct cnic_dev *cdev;
165
166	read_lock(&cnic_dev_lock);
167	list_for_each_entry(cdev, &cnic_dev_list, list) {
168		if (netdev == cdev->netdev) {
169			cnic_hold(cdev);
170			read_unlock(&cnic_dev_lock);
171			return cdev;
172		}
173	}
174	read_unlock(&cnic_dev_lock);
175	return NULL;
176}
177
178static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
179{
180	atomic_inc(&ulp_ops->ref_count);
181}
182
183static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
184{
185	atomic_dec(&ulp_ops->ref_count);
186}
187
188static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
189{
190	struct cnic_local *cp = dev->cnic_priv;
191	struct cnic_eth_dev *ethdev = cp->ethdev;
192	struct drv_ctl_info info;
193	struct drv_ctl_io *io = &info.data.io;
194
195	memset(&info, 0, sizeof(struct drv_ctl_info));
196	info.cmd = DRV_CTL_CTX_WR_CMD;
197	io->cid_addr = cid_addr;
198	io->offset = off;
199	io->data = val;
200	ethdev->drv_ctl(dev->netdev, &info);
201}
202
203static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
204{
205	struct cnic_local *cp = dev->cnic_priv;
206	struct cnic_eth_dev *ethdev = cp->ethdev;
207	struct drv_ctl_info info;
208	struct drv_ctl_io *io = &info.data.io;
209
210	memset(&info, 0, sizeof(struct drv_ctl_info));
211	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
212	io->offset = off;
213	io->dma_addr = addr;
214	ethdev->drv_ctl(dev->netdev, &info);
215}
216
217static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
218{
219	struct cnic_local *cp = dev->cnic_priv;
220	struct cnic_eth_dev *ethdev = cp->ethdev;
221	struct drv_ctl_info info;
222	struct drv_ctl_l2_ring *ring = &info.data.ring;
223
224	memset(&info, 0, sizeof(struct drv_ctl_info));
225	if (start)
226		info.cmd = DRV_CTL_START_L2_CMD;
227	else
228		info.cmd = DRV_CTL_STOP_L2_CMD;
229
230	ring->cid = cid;
231	ring->client_id = cl_id;
232	ethdev->drv_ctl(dev->netdev, &info);
233}
234
235static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
236{
237	struct cnic_local *cp = dev->cnic_priv;
238	struct cnic_eth_dev *ethdev = cp->ethdev;
239	struct drv_ctl_info info;
240	struct drv_ctl_io *io = &info.data.io;
241
242	memset(&info, 0, sizeof(struct drv_ctl_info));
243	info.cmd = DRV_CTL_IO_WR_CMD;
244	io->offset = off;
245	io->data = val;
246	ethdev->drv_ctl(dev->netdev, &info);
247}
248
249static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
250{
251	struct cnic_local *cp = dev->cnic_priv;
252	struct cnic_eth_dev *ethdev = cp->ethdev;
253	struct drv_ctl_info info;
254	struct drv_ctl_io *io = &info.data.io;
255
256	memset(&info, 0, sizeof(struct drv_ctl_info));
257	info.cmd = DRV_CTL_IO_RD_CMD;
258	io->offset = off;
259	ethdev->drv_ctl(dev->netdev, &info);
260	return io->data;
261}
262
263static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
264{
265	struct cnic_local *cp = dev->cnic_priv;
266	struct cnic_eth_dev *ethdev = cp->ethdev;
267	struct drv_ctl_info info;
268	struct fcoe_capabilities *fcoe_cap =
269		&info.data.register_data.fcoe_features;
270
271	memset(&info, 0, sizeof(struct drv_ctl_info));
272	if (reg) {
273		info.cmd = DRV_CTL_ULP_REGISTER_CMD;
274		if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
275			memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
276	} else {
277		info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
278	}
279
280	info.data.ulp_type = ulp_type;
281	info.drv_state = state;
282	ethdev->drv_ctl(dev->netdev, &info);
283}
284
285static int cnic_in_use(struct cnic_sock *csk)
286{
287	return test_bit(SK_F_INUSE, &csk->flags);
288}
289
290static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
291{
292	struct cnic_local *cp = dev->cnic_priv;
293	struct cnic_eth_dev *ethdev = cp->ethdev;
294	struct drv_ctl_info info;
295
296	memset(&info, 0, sizeof(struct drv_ctl_info));
297	info.cmd = cmd;
298	info.data.credit.credit_count = count;
299	ethdev->drv_ctl(dev->netdev, &info);
300}
301
302static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
303{
304	u32 i;
305
306	if (!cp->ctx_tbl)
307		return -EINVAL;
308
309	for (i = 0; i < cp->max_cid_space; i++) {
310		if (cp->ctx_tbl[i].cid == cid) {
311			*l5_cid = i;
312			return 0;
313		}
314	}
315	return -EINVAL;
316}
317
318static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
319			   struct cnic_sock *csk)
320{
321	struct iscsi_path path_req;
322	char *buf = NULL;
323	u16 len = 0;
324	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
325	struct cnic_ulp_ops *ulp_ops;
326	struct cnic_uio_dev *udev = cp->udev;
327	int rc = 0, retry = 0;
328
329	if (!udev || udev->uio_dev == -1)
330		return -ENODEV;
331
332	if (csk) {
333		len = sizeof(path_req);
334		buf = (char *) &path_req;
335		memset(&path_req, 0, len);
336
337		msg_type = ISCSI_KEVENT_PATH_REQ;
338		path_req.handle = (u64) csk->l5_cid;
339		if (test_bit(SK_F_IPV6, &csk->flags)) {
340			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
341			       sizeof(struct in6_addr));
342			path_req.ip_addr_len = 16;
343		} else {
344			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
345			       sizeof(struct in_addr));
346			path_req.ip_addr_len = 4;
347		}
348		path_req.vlan_id = csk->vlan_id;
349		path_req.pmtu = csk->mtu;
350	}
351
352	while (retry < 3) {
353		rc = 0;
354		rcu_read_lock();
355		ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
356		if (ulp_ops)
357			rc = ulp_ops->iscsi_nl_send_msg(
358				cp->ulp_handle[CNIC_ULP_ISCSI],
359				msg_type, buf, len);
360		rcu_read_unlock();
361		if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
362			break;
363
364		msleep(100);
365		retry++;
366	}
367	return rc;
368}
369
370static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
371
372static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
373				  char *buf, u16 len)
374{
375	int rc = -EINVAL;
376
377	switch (msg_type) {
378	case ISCSI_UEVENT_PATH_UPDATE: {
379		struct cnic_local *cp;
380		u32 l5_cid;
381		struct cnic_sock *csk;
382		struct iscsi_path *path_resp;
383
384		if (len < sizeof(*path_resp))
385			break;
386
387		path_resp = (struct iscsi_path *) buf;
388		cp = dev->cnic_priv;
389		l5_cid = (u32) path_resp->handle;
390		if (l5_cid >= MAX_CM_SK_TBL_SZ)
391			break;
392
393		if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
394			rc = -ENODEV;
395			break;
396		}
397		csk = &cp->csk_tbl[l5_cid];
398		csk_hold(csk);
399		if (cnic_in_use(csk) &&
400		    test_bit(SK_F_CONNECT_START, &csk->flags)) {
401
402			csk->vlan_id = path_resp->vlan_id;
403
404			memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
405			if (test_bit(SK_F_IPV6, &csk->flags))
406				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
407				       sizeof(struct in6_addr));
408			else
409				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
410				       sizeof(struct in_addr));
411
412			if (is_valid_ether_addr(csk->ha)) {
413				cnic_cm_set_pg(csk);
414			} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
415				!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
416
417				cnic_cm_upcall(cp, csk,
418					L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
419				clear_bit(SK_F_CONNECT_START, &csk->flags);
420			}
421		}
422		csk_put(csk);
423		rc = 0;
424	}
425	}
426
427	return rc;
428}
429
430static int cnic_offld_prep(struct cnic_sock *csk)
431{
432	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
433		return 0;
434
435	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
436		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
437		return 0;
438	}
439
440	return 1;
441}
442
443static int cnic_close_prep(struct cnic_sock *csk)
444{
445	clear_bit(SK_F_CONNECT_START, &csk->flags);
446	smp_mb__after_atomic();
447
448	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
449		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
450			msleep(1);
451
452		return 1;
453	}
454	return 0;
455}
456
457static int cnic_abort_prep(struct cnic_sock *csk)
458{
459	clear_bit(SK_F_CONNECT_START, &csk->flags);
460	smp_mb__after_atomic();
461
462	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
463		msleep(1);
464
465	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
466		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
467		return 1;
468	}
469
470	return 0;
471}
472
473int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
474{
475	struct cnic_dev *dev;
476
477	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
478		pr_err("%s: Bad type %d\n", __func__, ulp_type);
479		return -EINVAL;
480	}
481	mutex_lock(&cnic_lock);
482	if (cnic_ulp_tbl_prot(ulp_type)) {
483		pr_err("%s: Type %d has already been registered\n",
484		       __func__, ulp_type);
485		mutex_unlock(&cnic_lock);
486		return -EBUSY;
487	}
488
489	read_lock(&cnic_dev_lock);
490	list_for_each_entry(dev, &cnic_dev_list, list) {
491		struct cnic_local *cp = dev->cnic_priv;
492
493		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
494	}
495	read_unlock(&cnic_dev_lock);
496
497	atomic_set(&ulp_ops->ref_count, 0);
498	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
499	mutex_unlock(&cnic_lock);
500
501	/* Prevent race conditions with netdev_event */
502	rtnl_lock();
503	list_for_each_entry(dev, &cnic_dev_list, list) {
504		struct cnic_local *cp = dev->cnic_priv;
505
506		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
507			ulp_ops->cnic_init(dev);
508	}
509	rtnl_unlock();
510
511	return 0;
512}
513
514int cnic_unregister_driver(int ulp_type)
515{
516	struct cnic_dev *dev;
517	struct cnic_ulp_ops *ulp_ops;
518	int i = 0;
519
520	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
521		pr_err("%s: Bad type %d\n", __func__, ulp_type);
522		return -EINVAL;
523	}
524	mutex_lock(&cnic_lock);
525	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
526	if (!ulp_ops) {
527		pr_err("%s: Type %d has not been registered\n",
528		       __func__, ulp_type);
529		goto out_unlock;
530	}
531	read_lock(&cnic_dev_lock);
532	list_for_each_entry(dev, &cnic_dev_list, list) {
533		struct cnic_local *cp = dev->cnic_priv;
534
535		if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
536			pr_err("%s: Type %d still has devices registered\n",
537			       __func__, ulp_type);
538			read_unlock(&cnic_dev_lock);
539			goto out_unlock;
540		}
541	}
542	read_unlock(&cnic_dev_lock);
543
544	RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
545
546	mutex_unlock(&cnic_lock);
547	synchronize_rcu();
548	while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
549		msleep(100);
550		i++;
551	}
552
553	if (atomic_read(&ulp_ops->ref_count) != 0)
554		pr_warn("%s: Failed waiting for ref count to go to zero\n",
555			__func__);
556	return 0;
557
558out_unlock:
559	mutex_unlock(&cnic_lock);
560	return -EINVAL;
561}
562
563static int cnic_start_hw(struct cnic_dev *);
564static void cnic_stop_hw(struct cnic_dev *);
565
566static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
567				void *ulp_ctx)
568{
569	struct cnic_local *cp = dev->cnic_priv;
570	struct cnic_ulp_ops *ulp_ops;
571
572	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
573		pr_err("%s: Bad type %d\n", __func__, ulp_type);
574		return -EINVAL;
575	}
576	mutex_lock(&cnic_lock);
577	if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
578		pr_err("%s: Driver with type %d has not been registered\n",
579		       __func__, ulp_type);
580		mutex_unlock(&cnic_lock);
581		return -EAGAIN;
582	}
583	if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
584		pr_err("%s: Type %d has already been registered to this device\n",
585		       __func__, ulp_type);
586		mutex_unlock(&cnic_lock);
587		return -EBUSY;
588	}
589
590	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
591	cp->ulp_handle[ulp_type] = ulp_ctx;
592	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
593	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
594	cnic_hold(dev);
595
596	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
597		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
598			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
599
600	mutex_unlock(&cnic_lock);
601
602	cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
603
604	return 0;
605
606}
607EXPORT_SYMBOL(cnic_register_driver);
608
609static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
610{
611	struct cnic_local *cp = dev->cnic_priv;
612	int i = 0;
613
614	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
615		pr_err("%s: Bad type %d\n", __func__, ulp_type);
616		return -EINVAL;
617	}
618
619	if (ulp_type == CNIC_ULP_ISCSI)
620		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
621
622	mutex_lock(&cnic_lock);
623	if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
624		RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
625		cnic_put(dev);
626	} else {
627		pr_err("%s: device not registered to this ulp type %d\n",
628		       __func__, ulp_type);
629		mutex_unlock(&cnic_lock);
630		return -EINVAL;
631	}
632	mutex_unlock(&cnic_lock);
633
634	if (ulp_type == CNIC_ULP_FCOE)
635		dev->fcoe_cap = NULL;
636
637	synchronize_rcu();
638
639	while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
640	       i < 20) {
641		msleep(100);
642		i++;
643	}
644	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
645		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
646
647	if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
648		cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
649	else
650		cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
651
652	return 0;
653}
654EXPORT_SYMBOL(cnic_unregister_driver);
655
656static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
657			    u32 next)
658{
659	id_tbl->start = start_id;
660	id_tbl->max = size;
661	id_tbl->next = next;
662	spin_lock_init(&id_tbl->lock);
663	id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
664	if (!id_tbl->table)
665		return -ENOMEM;
666
667	return 0;
668}
669
670static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
671{
672	bitmap_free(id_tbl->table);
673	id_tbl->table = NULL;
674}
675
676static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
677{
678	int ret = -1;
679
680	id -= id_tbl->start;
681	if (id >= id_tbl->max)
682		return ret;
683
684	spin_lock(&id_tbl->lock);
685	if (!test_bit(id, id_tbl->table)) {
686		set_bit(id, id_tbl->table);
687		ret = 0;
688	}
689	spin_unlock(&id_tbl->lock);
690	return ret;
691}
692
693/* Returns -1 if not successful */
694static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
695{
696	u32 id;
697
698	spin_lock(&id_tbl->lock);
699	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
700	if (id >= id_tbl->max) {
701		id = -1;
702		if (id_tbl->next != 0) {
703			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
704			if (id >= id_tbl->next)
705				id = -1;
706		}
707	}
708
709	if (id < id_tbl->max) {
710		set_bit(id, id_tbl->table);
711		id_tbl->next = (id + 1) & (id_tbl->max - 1);
712		id += id_tbl->start;
713	}
714
715	spin_unlock(&id_tbl->lock);
716
717	return id;
718}
719
720static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
721{
722	if (id == -1)
723		return;
724
725	id -= id_tbl->start;
726	if (id >= id_tbl->max)
727		return;
728
729	clear_bit(id, id_tbl->table);
730}
731
732static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
733{
734	int i;
735
736	if (!dma->pg_arr)
737		return;
738
739	for (i = 0; i < dma->num_pages; i++) {
740		if (dma->pg_arr[i]) {
741			dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
742					  dma->pg_arr[i], dma->pg_map_arr[i]);
743			dma->pg_arr[i] = NULL;
744		}
745	}
746	if (dma->pgtbl) {
747		dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
748				  dma->pgtbl, dma->pgtbl_map);
749		dma->pgtbl = NULL;
750	}
751	kfree(dma->pg_arr);
752	dma->pg_arr = NULL;
753	dma->num_pages = 0;
754}
755
756static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
757{
758	int i;
759	__le32 *page_table = (__le32 *) dma->pgtbl;
760
761	for (i = 0; i < dma->num_pages; i++) {
762		/* Each entry needs to be in big endian format. */
763		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
764		page_table++;
765		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
766		page_table++;
767	}
768}
769
770static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
771{
772	int i;
773	__le32 *page_table = (__le32 *) dma->pgtbl;
774
775	for (i = 0; i < dma->num_pages; i++) {
776		/* Each entry needs to be in little endian format. */
777		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
778		page_table++;
779		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
780		page_table++;
781	}
782}
783
784static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
785			  int pages, int use_pg_tbl)
786{
787	int i, size;
788	struct cnic_local *cp = dev->cnic_priv;
789
790	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
791	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
792	if (dma->pg_arr == NULL)
793		return -ENOMEM;
794
795	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
796	dma->num_pages = pages;
797
798	for (i = 0; i < pages; i++) {
799		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
800						    CNIC_PAGE_SIZE,
801						    &dma->pg_map_arr[i],
802						    GFP_ATOMIC);
803		if (dma->pg_arr[i] == NULL)
804			goto error;
805	}
806	if (!use_pg_tbl)
807		return 0;
808
809	dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
810			  ~(CNIC_PAGE_SIZE - 1);
811	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
812					&dma->pgtbl_map, GFP_ATOMIC);
813	if (dma->pgtbl == NULL)
814		goto error;
815
816	cp->setup_pgtbl(dev, dma);
817
818	return 0;
819
820error:
821	cnic_free_dma(dev, dma);
822	return -ENOMEM;
823}
824
825static void cnic_free_context(struct cnic_dev *dev)
826{
827	struct cnic_local *cp = dev->cnic_priv;
828	int i;
829
830	for (i = 0; i < cp->ctx_blks; i++) {
831		if (cp->ctx_arr[i].ctx) {
832			dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
833					  cp->ctx_arr[i].ctx,
834					  cp->ctx_arr[i].mapping);
835			cp->ctx_arr[i].ctx = NULL;
836		}
837	}
838}
839
840static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
841{
842	if (udev->l2_buf) {
843		dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
844				  udev->l2_buf, udev->l2_buf_map);
845		udev->l2_buf = NULL;
846	}
847
848	if (udev->l2_ring) {
849		dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
850				  udev->l2_ring, udev->l2_ring_map);
851		udev->l2_ring = NULL;
852	}
853
854}
855
856static void __cnic_free_uio(struct cnic_uio_dev *udev)
857{
858	uio_unregister_device(&udev->cnic_uinfo);
859
860	__cnic_free_uio_rings(udev);
861
862	pci_dev_put(udev->pdev);
863	kfree(udev);
864}
865
866static void cnic_free_uio(struct cnic_uio_dev *udev)
867{
868	if (!udev)
869		return;
870
871	write_lock(&cnic_dev_lock);
872	list_del_init(&udev->list);
873	write_unlock(&cnic_dev_lock);
874	__cnic_free_uio(udev);
875}
876
877static void cnic_free_resc(struct cnic_dev *dev)
878{
879	struct cnic_local *cp = dev->cnic_priv;
880	struct cnic_uio_dev *udev = cp->udev;
881
882	if (udev) {
883		udev->dev = NULL;
884		cp->udev = NULL;
885		if (udev->uio_dev == -1)
886			__cnic_free_uio_rings(udev);
887	}
888
889	cnic_free_context(dev);
890	kfree(cp->ctx_arr);
891	cp->ctx_arr = NULL;
892	cp->ctx_blks = 0;
893
894	cnic_free_dma(dev, &cp->gbl_buf_info);
895	cnic_free_dma(dev, &cp->kwq_info);
896	cnic_free_dma(dev, &cp->kwq_16_data_info);
897	cnic_free_dma(dev, &cp->kcq2.dma);
898	cnic_free_dma(dev, &cp->kcq1.dma);
899	kfree(cp->iscsi_tbl);
900	cp->iscsi_tbl = NULL;
901	kfree(cp->ctx_tbl);
902	cp->ctx_tbl = NULL;
903
904	cnic_free_id_tbl(&cp->fcoe_cid_tbl);
905	cnic_free_id_tbl(&cp->cid_tbl);
906}
907
908static int cnic_alloc_context(struct cnic_dev *dev)
909{
910	struct cnic_local *cp = dev->cnic_priv;
911
912	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
913		int i, k, arr_size;
914
915		cp->ctx_blk_size = CNIC_PAGE_SIZE;
916		cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
917		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
918			   sizeof(struct cnic_ctx);
919		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
920		if (cp->ctx_arr == NULL)
921			return -ENOMEM;
922
923		k = 0;
924		for (i = 0; i < 2; i++) {
925			u32 j, reg, off, lo, hi;
926
927			if (i == 0)
928				off = BNX2_PG_CTX_MAP;
929			else
930				off = BNX2_ISCSI_CTX_MAP;
931
932			reg = cnic_reg_rd_ind(dev, off);
933			lo = reg >> 16;
934			hi = reg & 0xffff;
935			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
936				cp->ctx_arr[k].cid = j;
937		}
938
939		cp->ctx_blks = k;
940		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
941			cp->ctx_blks = 0;
942			return -ENOMEM;
943		}
944
945		for (i = 0; i < cp->ctx_blks; i++) {
946			cp->ctx_arr[i].ctx =
947				dma_alloc_coherent(&dev->pcidev->dev,
948						   CNIC_PAGE_SIZE,
949						   &cp->ctx_arr[i].mapping,
950						   GFP_KERNEL);
951			if (cp->ctx_arr[i].ctx == NULL)
952				return -ENOMEM;
953		}
954	}
955	return 0;
956}
957
958static u16 cnic_bnx2_next_idx(u16 idx)
959{
960	return idx + 1;
961}
962
963static u16 cnic_bnx2_hw_idx(u16 idx)
964{
965	return idx;
966}
967
968static u16 cnic_bnx2x_next_idx(u16 idx)
969{
970	idx++;
971	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
972		idx++;
973
974	return idx;
975}
976
977static u16 cnic_bnx2x_hw_idx(u16 idx)
978{
979	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
980		idx++;
981	return idx;
982}
983
984static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
985			  bool use_pg_tbl)
986{
987	int err, i, use_page_tbl = 0;
988	struct kcqe **kcq;
989
990	if (use_pg_tbl)
991		use_page_tbl = 1;
992
993	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
994	if (err)
995		return err;
996
997	kcq = (struct kcqe **) info->dma.pg_arr;
998	info->kcq = kcq;
999
1000	info->next_idx = cnic_bnx2_next_idx;
1001	info->hw_idx = cnic_bnx2_hw_idx;
1002	if (use_pg_tbl)
1003		return 0;
1004
1005	info->next_idx = cnic_bnx2x_next_idx;
1006	info->hw_idx = cnic_bnx2x_hw_idx;
1007
1008	for (i = 0; i < KCQ_PAGE_CNT; i++) {
1009		struct bnx2x_bd_chain_next *next =
1010			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
1011		int j = i + 1;
1012
1013		if (j >= KCQ_PAGE_CNT)
1014			j = 0;
1015		next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1016		next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1017	}
1018	return 0;
1019}
1020
1021static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1022{
1023	struct cnic_local *cp = udev->dev->cnic_priv;
1024
1025	if (udev->l2_ring)
1026		return 0;
1027
1028	udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
1029	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1030					   &udev->l2_ring_map, GFP_KERNEL);
1031	if (!udev->l2_ring)
1032		return -ENOMEM;
1033
1034	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1035	udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
1036	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1037					  &udev->l2_buf_map, GFP_KERNEL);
1038	if (!udev->l2_buf) {
1039		__cnic_free_uio_rings(udev);
1040		return -ENOMEM;
1041	}
1042
1043	return 0;
1044
1045}
1046
1047static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1048{
1049	struct cnic_local *cp = dev->cnic_priv;
1050	struct cnic_uio_dev *udev;
1051
1052	list_for_each_entry(udev, &cnic_udev_list, list) {
1053		if (udev->pdev == dev->pcidev) {
1054			udev->dev = dev;
1055			if (__cnic_alloc_uio_rings(udev, pages)) {
1056				udev->dev = NULL;
1057				return -ENOMEM;
1058			}
1059			cp->udev = udev;
1060			return 0;
1061		}
1062	}
1063
1064	udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1065	if (!udev)
1066		return -ENOMEM;
1067
1068	udev->uio_dev = -1;
1069
1070	udev->dev = dev;
1071	udev->pdev = dev->pcidev;
1072
1073	if (__cnic_alloc_uio_rings(udev, pages))
1074		goto err_udev;
1075
1076	list_add(&udev->list, &cnic_udev_list);
1077
1078	pci_dev_get(udev->pdev);
1079
1080	cp->udev = udev;
1081
1082	return 0;
1083
1084 err_udev:
1085	kfree(udev);
1086	return -ENOMEM;
1087}
1088
1089static int cnic_init_uio(struct cnic_dev *dev)
1090{
1091	struct cnic_local *cp = dev->cnic_priv;
1092	struct cnic_uio_dev *udev = cp->udev;
1093	struct uio_info *uinfo;
1094	int ret = 0;
1095
1096	if (!udev)
1097		return -ENOMEM;
1098
1099	uinfo = &udev->cnic_uinfo;
1100
1101	uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1102	uinfo->mem[0].internal_addr = dev->regview;
1103	uinfo->mem[0].memtype = UIO_MEM_PHYS;
1104
1105	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1106		uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1107						     TX_MAX_TSS_RINGS + 1);
1108		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1109					CNIC_PAGE_MASK;
1110		uinfo->mem[1].dma_addr = cp->status_blk_map;
1111		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1112			uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE * 9);
1113		else
1114			uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE);
1115
1116		uinfo->name = "bnx2_cnic";
1117	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1118		uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1119
1120		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1121			CNIC_PAGE_MASK;
1122		uinfo->mem[1].dma_addr = cp->status_blk_map;
1123		uinfo->mem[1].size = PAGE_ALIGN(sizeof(*cp->bnx2x_def_status_blk));
1124
1125		uinfo->name = "bnx2x_cnic";
1126	}
1127
1128	uinfo->mem[1].dma_device = &dev->pcidev->dev;
1129	uinfo->mem[1].memtype = UIO_MEM_DMA_COHERENT;
1130
1131	uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1132	uinfo->mem[2].dma_addr = udev->l2_ring_map;
1133	uinfo->mem[2].size = PAGE_ALIGN(udev->l2_ring_size);
1134	uinfo->mem[2].dma_device = &dev->pcidev->dev;
1135	uinfo->mem[2].memtype = UIO_MEM_DMA_COHERENT;
1136
1137	uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1138	uinfo->mem[3].dma_addr = udev->l2_buf_map;
1139	uinfo->mem[3].size = PAGE_ALIGN(udev->l2_buf_size);
1140	uinfo->mem[3].dma_device = &dev->pcidev->dev;
1141	uinfo->mem[3].memtype = UIO_MEM_DMA_COHERENT;
1142
1143	uinfo->version = CNIC_MODULE_VERSION;
1144	uinfo->irq = UIO_IRQ_CUSTOM;
1145
1146	uinfo->open = cnic_uio_open;
1147	uinfo->release = cnic_uio_close;
1148
1149	if (udev->uio_dev == -1) {
1150		if (!uinfo->priv) {
1151			uinfo->priv = udev;
1152
1153			ret = uio_register_device(&udev->pdev->dev, uinfo);
1154		}
1155	} else {
1156		cnic_init_rings(dev);
1157	}
1158
1159	return ret;
1160}
1161
1162static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1163{
1164	struct cnic_local *cp = dev->cnic_priv;
1165	int ret;
1166
1167	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1168	if (ret)
1169		goto error;
1170	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1171
1172	ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1173	if (ret)
1174		goto error;
1175
1176	ret = cnic_alloc_context(dev);
1177	if (ret)
1178		goto error;
1179
1180	ret = cnic_alloc_uio_rings(dev, 2);
1181	if (ret)
1182		goto error;
1183
1184	ret = cnic_init_uio(dev);
1185	if (ret)
1186		goto error;
1187
1188	return 0;
1189
1190error:
1191	cnic_free_resc(dev);
1192	return ret;
1193}
1194
1195static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1196{
1197	struct cnic_local *cp = dev->cnic_priv;
1198	struct bnx2x *bp = netdev_priv(dev->netdev);
1199	int ctx_blk_size = cp->ethdev->ctx_blk_size;
1200	int total_mem, blks, i;
1201
1202	total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1203	blks = total_mem / ctx_blk_size;
1204	if (total_mem % ctx_blk_size)
1205		blks++;
1206
1207	if (blks > cp->ethdev->ctx_tbl_len)
1208		return -ENOMEM;
1209
1210	cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1211	if (cp->ctx_arr == NULL)
1212		return -ENOMEM;
1213
1214	cp->ctx_blks = blks;
1215	cp->ctx_blk_size = ctx_blk_size;
1216	if (!CHIP_IS_E1(bp))
1217		cp->ctx_align = 0;
1218	else
1219		cp->ctx_align = ctx_blk_size;
1220
1221	cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1222
1223	for (i = 0; i < blks; i++) {
1224		cp->ctx_arr[i].ctx =
1225			dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1226					   &cp->ctx_arr[i].mapping,
1227					   GFP_KERNEL);
1228		if (cp->ctx_arr[i].ctx == NULL)
1229			return -ENOMEM;
1230
1231		if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1232			if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1233				cnic_free_context(dev);
1234				cp->ctx_blk_size += cp->ctx_align;
1235				i = -1;
1236				continue;
1237			}
1238		}
1239	}
1240	return 0;
1241}
1242
1243static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1244{
1245	struct cnic_local *cp = dev->cnic_priv;
1246	struct bnx2x *bp = netdev_priv(dev->netdev);
1247	struct cnic_eth_dev *ethdev = cp->ethdev;
1248	u32 start_cid = ethdev->starting_cid;
1249	int i, j, n, ret, pages;
1250	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1251
1252	cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1253	cp->iscsi_start_cid = start_cid;
1254	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1255
1256	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
1257		cp->max_cid_space += dev->max_fcoe_conn;
1258		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1259		if (!cp->fcoe_init_cid)
1260			cp->fcoe_init_cid = 0x10;
1261	}
1262
1263	cp->iscsi_tbl = kcalloc(MAX_ISCSI_TBL_SZ, sizeof(struct cnic_iscsi),
1264				GFP_KERNEL);
1265	if (!cp->iscsi_tbl)
1266		goto error;
1267
1268	cp->ctx_tbl = kcalloc(cp->max_cid_space, sizeof(struct cnic_context),
1269			      GFP_KERNEL);
1270	if (!cp->ctx_tbl)
1271		goto error;
1272
1273	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1274		cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1275		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1276	}
1277
1278	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1279		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1280
1281	pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1282		CNIC_PAGE_SIZE;
1283
1284	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1285	if (ret)
1286		goto error;
1287
1288	n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1289	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1290		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1291
1292		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1293		cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1294						   off;
1295
1296		if ((i % n) == (n - 1))
1297			j++;
1298	}
1299
1300	ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1301	if (ret)
1302		goto error;
1303
1304	if (CNIC_SUPPORTS_FCOE(bp)) {
1305		ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1306		if (ret)
1307			goto error;
1308	}
1309
1310	pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
1311	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1312	if (ret)
1313		goto error;
1314
1315	ret = cnic_alloc_bnx2x_context(dev);
1316	if (ret)
1317		goto error;
1318
1319	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1320		return 0;
1321
1322	cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1323	cp->status_blk_map = cp->ethdev->irq_arr[1].status_blk_map;
1324
1325	cp->l2_rx_ring_size = 15;
1326
1327	ret = cnic_alloc_uio_rings(dev, 4);
1328	if (ret)
1329		goto error;
1330
1331	ret = cnic_init_uio(dev);
1332	if (ret)
1333		goto error;
1334
1335	return 0;
1336
1337error:
1338	cnic_free_resc(dev);
1339	return -ENOMEM;
1340}
1341
1342static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1343{
1344	return cp->max_kwq_idx -
1345		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1346}
1347
1348static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1349				  u32 num_wqes)
1350{
1351	struct cnic_local *cp = dev->cnic_priv;
1352	struct kwqe *prod_qe;
1353	u16 prod, sw_prod, i;
1354
1355	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1356		return -EAGAIN;		/* bnx2 is down */
1357
1358	spin_lock_bh(&cp->cnic_ulp_lock);
1359	if (num_wqes > cnic_kwq_avail(cp) &&
1360	    !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1361		spin_unlock_bh(&cp->cnic_ulp_lock);
1362		return -EAGAIN;
1363	}
1364
1365	clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1366
1367	prod = cp->kwq_prod_idx;
1368	sw_prod = prod & MAX_KWQ_IDX;
1369	for (i = 0; i < num_wqes; i++) {
1370		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1371		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1372		prod++;
1373		sw_prod = prod & MAX_KWQ_IDX;
1374	}
1375	cp->kwq_prod_idx = prod;
1376
1377	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1378
1379	spin_unlock_bh(&cp->cnic_ulp_lock);
1380	return 0;
1381}
1382
1383static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1384				   union l5cm_specific_data *l5_data)
1385{
1386	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1387	dma_addr_t map;
1388
1389	map = ctx->kwqe_data_mapping;
1390	l5_data->phy_address.lo = (u64) map & 0xffffffff;
1391	l5_data->phy_address.hi = (u64) map >> 32;
1392	return ctx->kwqe_data;
1393}
1394
1395static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1396				u32 type, union l5cm_specific_data *l5_data)
1397{
1398	struct cnic_local *cp = dev->cnic_priv;
1399	struct bnx2x *bp = netdev_priv(dev->netdev);
1400	struct l5cm_spe kwqe;
1401	struct kwqe_16 *kwq[1];
1402	u16 type_16;
1403	int ret;
1404
1405	kwqe.hdr.conn_and_cmd_data =
1406		cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1407			     BNX2X_HW_CID(bp, cid)));
1408
1409	type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1410	type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1411		   SPE_HDR_FUNCTION_ID;
1412
1413	kwqe.hdr.type = cpu_to_le16(type_16);
1414	kwqe.hdr.reserved1 = 0;
1415	kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1416	kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1417
1418	kwq[0] = (struct kwqe_16 *) &kwqe;
1419
1420	spin_lock_bh(&cp->cnic_ulp_lock);
1421	ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1422	spin_unlock_bh(&cp->cnic_ulp_lock);
1423
1424	if (ret == 1)
1425		return 0;
1426
1427	return ret;
1428}
1429
1430static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1431				   struct kcqe *cqes[], u32 num_cqes)
1432{
1433	struct cnic_local *cp = dev->cnic_priv;
1434	struct cnic_ulp_ops *ulp_ops;
1435
1436	rcu_read_lock();
1437	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1438	if (likely(ulp_ops)) {
1439		ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1440					  cqes, num_cqes);
1441	}
1442	rcu_read_unlock();
1443}
1444
1445static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1446				       int en_tcp_dack)
1447{
1448	struct bnx2x *bp = netdev_priv(dev->netdev);
1449	u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1450	u16 tstorm_flags = 0;
1451
1452	if (time_stamps) {
1453		xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1454		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1455	}
1456	if (en_tcp_dack)
1457		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1458
1459	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1460		 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
1461
1462	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1463		  TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
1464}
1465
1466static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1467{
1468	struct cnic_local *cp = dev->cnic_priv;
1469	struct bnx2x *bp = netdev_priv(dev->netdev);
1470	struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1471	int hq_bds, pages;
1472	u32 pfid = bp->pfid;
1473
1474	cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1475	cp->num_ccells = req1->num_ccells_per_conn;
1476	cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1477			      cp->num_iscsi_tasks;
1478	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1479			BNX2X_ISCSI_R2TQE_SIZE;
1480	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1481	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1482	hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1483	cp->num_cqs = req1->num_cqs;
1484
1485	if (!dev->max_iscsi_conn)
1486		return 0;
1487
1488	/* init Tstorm RAM */
1489	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1490		  req1->rq_num_wqes);
1491	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1492		  CNIC_PAGE_SIZE);
1493	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1494		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1495	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1496		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1497		  req1->num_tasks_per_conn);
1498
1499	/* init Ustorm RAM */
1500	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1501		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1502		  req1->rq_buffer_size);
1503	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1504		  CNIC_PAGE_SIZE);
1505	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1506		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1507	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1508		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1509		  req1->num_tasks_per_conn);
1510	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1511		  req1->rq_num_wqes);
1512	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1513		  req1->cq_num_wqes);
1514	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1515		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1516
1517	/* init Xstorm RAM */
1518	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1519		  CNIC_PAGE_SIZE);
1520	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1521		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1522	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1523		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1524		  req1->num_tasks_per_conn);
1525	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1526		  hq_bds);
1527	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1528		  req1->num_tasks_per_conn);
1529	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1530		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1531
1532	/* init Cstorm RAM */
1533	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1534		  CNIC_PAGE_SIZE);
1535	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1536		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1537	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1538		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1539		  req1->num_tasks_per_conn);
1540	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1541		  req1->cq_num_wqes);
1542	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1543		  hq_bds);
1544
1545	cnic_bnx2x_set_tcp_options(dev,
1546			req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1547			req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1548
1549	return 0;
1550}
1551
1552static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1553{
1554	struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1555	struct bnx2x *bp = netdev_priv(dev->netdev);
1556	u32 pfid = bp->pfid;
1557	struct iscsi_kcqe kcqe;
1558	struct kcqe *cqes[1];
1559
1560	memset(&kcqe, 0, sizeof(kcqe));
1561	if (!dev->max_iscsi_conn) {
1562		kcqe.completion_status =
1563			ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1564		goto done;
1565	}
1566
1567	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1568		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1569	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1570		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1571		req2->error_bit_map[1]);
1572
1573	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1574		  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1575	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1576		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1577	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1578		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1579		req2->error_bit_map[1]);
1580
1581	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1582		  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1583
1584	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1585
1586done:
1587	kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1588	cqes[0] = (struct kcqe *) &kcqe;
1589	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1590
1591	return 0;
1592}
1593
1594static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1595{
1596	struct cnic_local *cp = dev->cnic_priv;
1597	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1598
1599	if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1600		struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1601
1602		cnic_free_dma(dev, &iscsi->hq_info);
1603		cnic_free_dma(dev, &iscsi->r2tq_info);
1604		cnic_free_dma(dev, &iscsi->task_array_info);
1605		cnic_free_id(&cp->cid_tbl, ctx->cid);
1606	} else {
1607		cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1608	}
1609
1610	ctx->cid = 0;
1611}
1612
1613static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1614{
1615	u32 cid;
1616	int ret, pages;
1617	struct cnic_local *cp = dev->cnic_priv;
1618	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1619	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1620
1621	if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1622		cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1623		if (cid == -1) {
1624			ret = -ENOMEM;
1625			goto error;
1626		}
1627		ctx->cid = cid;
1628		return 0;
1629	}
1630
1631	cid = cnic_alloc_new_id(&cp->cid_tbl);
1632	if (cid == -1) {
1633		ret = -ENOMEM;
1634		goto error;
1635	}
1636
1637	ctx->cid = cid;
1638	pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
1639
1640	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1641	if (ret)
1642		goto error;
1643
1644	pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
1645	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1646	if (ret)
1647		goto error;
1648
1649	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1650	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1651	if (ret)
1652		goto error;
1653
1654	return 0;
1655
1656error:
1657	cnic_free_bnx2x_conn_resc(dev, l5_cid);
1658	return ret;
1659}
1660
1661static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1662				struct regpair *ctx_addr)
1663{
1664	struct cnic_local *cp = dev->cnic_priv;
1665	struct cnic_eth_dev *ethdev = cp->ethdev;
1666	int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1667	int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1668	unsigned long align_off = 0;
1669	dma_addr_t ctx_map;
1670	void *ctx;
1671
1672	if (cp->ctx_align) {
1673		unsigned long mask = cp->ctx_align - 1;
1674
1675		if (cp->ctx_arr[blk].mapping & mask)
1676			align_off = cp->ctx_align -
1677				    (cp->ctx_arr[blk].mapping & mask);
1678	}
1679	ctx_map = cp->ctx_arr[blk].mapping + align_off +
1680		(off * BNX2X_CONTEXT_MEM_SIZE);
1681	ctx = cp->ctx_arr[blk].ctx + align_off +
1682	      (off * BNX2X_CONTEXT_MEM_SIZE);
1683	if (init)
1684		memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1685
1686	ctx_addr->lo = ctx_map & 0xffffffff;
1687	ctx_addr->hi = (u64) ctx_map >> 32;
1688	return ctx;
1689}
1690
1691static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1692				u32 num)
1693{
1694	struct cnic_local *cp = dev->cnic_priv;
1695	struct bnx2x *bp = netdev_priv(dev->netdev);
1696	struct iscsi_kwqe_conn_offload1 *req1 =
1697			(struct iscsi_kwqe_conn_offload1 *) wqes[0];
1698	struct iscsi_kwqe_conn_offload2 *req2 =
1699			(struct iscsi_kwqe_conn_offload2 *) wqes[1];
1700	struct iscsi_kwqe_conn_offload3 *req3;
1701	struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1702	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1703	u32 cid = ctx->cid;
1704	u32 hw_cid = BNX2X_HW_CID(bp, cid);
1705	struct iscsi_context *ictx;
1706	struct regpair context_addr;
1707	int i, j, n = 2, n_max;
1708	u8 port = BP_PORT(bp);
1709
1710	ctx->ctx_flags = 0;
1711	if (!req2->num_additional_wqes)
1712		return -EINVAL;
1713
1714	n_max = req2->num_additional_wqes + 2;
1715
1716	ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1717	if (ictx == NULL)
1718		return -ENOMEM;
1719
1720	req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1721
1722	ictx->xstorm_ag_context.hq_prod = 1;
1723
1724	ictx->xstorm_st_context.iscsi.first_burst_length =
1725		ISCSI_DEF_FIRST_BURST_LEN;
1726	ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1727		ISCSI_DEF_MAX_RECV_SEG_LEN;
1728	ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1729		req1->sq_page_table_addr_lo;
1730	ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1731		req1->sq_page_table_addr_hi;
1732	ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1733	ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1734	ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1735		iscsi->hq_info.pgtbl_map & 0xffffffff;
1736	ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1737		(u64) iscsi->hq_info.pgtbl_map >> 32;
1738	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1739		iscsi->hq_info.pgtbl[0];
1740	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1741		iscsi->hq_info.pgtbl[1];
1742	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1743		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1744	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1745		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1746	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1747		iscsi->r2tq_info.pgtbl[0];
1748	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1749		iscsi->r2tq_info.pgtbl[1];
1750	ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1751		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1752	ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1753		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1754	ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1755		BNX2X_ISCSI_PBL_NOT_CACHED;
1756	ictx->xstorm_st_context.iscsi.flags.flags |=
1757		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1758	ictx->xstorm_st_context.iscsi.flags.flags |=
1759		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1760	ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1761		ETH_P_8021Q;
1762	if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
1763	    bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
1764
1765		port = 0;
1766	}
1767	ictx->xstorm_st_context.common.flags =
1768		1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1769	ictx->xstorm_st_context.common.flags =
1770		port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1771
1772	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1773	/* TSTORM requires the base address of RQ DB & not PTE */
1774	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1775		req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
1776	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1777		req2->rq_page_table_addr_hi;
1778	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1779	ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1780	ictx->tstorm_st_context.tcp.flags2 |=
1781		TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1782	ictx->tstorm_st_context.tcp.ooo_support_mode =
1783		TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1784
1785	ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1786
1787	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1788		req2->rq_page_table_addr_lo;
1789	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1790		req2->rq_page_table_addr_hi;
1791	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1792	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1793	ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1794		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1795	ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1796		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1797	ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1798		iscsi->r2tq_info.pgtbl[0];
1799	ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1800		iscsi->r2tq_info.pgtbl[1];
1801	ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1802		req1->cq_page_table_addr_lo;
1803	ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1804		req1->cq_page_table_addr_hi;
1805	ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1806	ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1807	ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1808	ictx->ustorm_st_context.task_pbe_cache_index =
1809		BNX2X_ISCSI_PBL_NOT_CACHED;
1810	ictx->ustorm_st_context.task_pdu_cache_index =
1811		BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1812
1813	for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1814		if (j == 3) {
1815			if (n >= n_max)
1816				break;
1817			req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1818			j = 0;
1819		}
1820		ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1821		ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1822			req3->qp_first_pte[j].hi;
1823		ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1824			req3->qp_first_pte[j].lo;
1825	}
1826
1827	ictx->ustorm_st_context.task_pbl_base.lo =
1828		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1829	ictx->ustorm_st_context.task_pbl_base.hi =
1830		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1831	ictx->ustorm_st_context.tce_phy_addr.lo =
1832		iscsi->task_array_info.pgtbl[0];
1833	ictx->ustorm_st_context.tce_phy_addr.hi =
1834		iscsi->task_array_info.pgtbl[1];
1835	ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1836	ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1837	ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1838	ictx->ustorm_st_context.negotiated_rx_and_flags |=
1839		ISCSI_DEF_MAX_BURST_LEN;
1840	ictx->ustorm_st_context.negotiated_rx |=
1841		ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1842		USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1843
1844	ictx->cstorm_st_context.hq_pbl_base.lo =
1845		iscsi->hq_info.pgtbl_map & 0xffffffff;
1846	ictx->cstorm_st_context.hq_pbl_base.hi =
1847		(u64) iscsi->hq_info.pgtbl_map >> 32;
1848	ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1849	ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1850	ictx->cstorm_st_context.task_pbl_base.lo =
1851		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1852	ictx->cstorm_st_context.task_pbl_base.hi =
1853		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1854	/* CSTORM and USTORM initialization is different, CSTORM requires
1855	 * CQ DB base & not PTE addr */
1856	ictx->cstorm_st_context.cq_db_base.lo =
1857		req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
1858	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1859	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1860	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1861	for (i = 0; i < cp->num_cqs; i++) {
1862		ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1863			ISCSI_INITIAL_SN;
1864		ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1865			ISCSI_INITIAL_SN;
1866	}
1867
1868	ictx->xstorm_ag_context.cdu_reserved =
1869		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1870				       ISCSI_CONNECTION_TYPE);
1871	ictx->ustorm_ag_context.cdu_usage =
1872		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1873				       ISCSI_CONNECTION_TYPE);
1874	return 0;
1875
1876}
1877
1878static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1879				   u32 num, int *work)
1880{
1881	struct iscsi_kwqe_conn_offload1 *req1;
1882	struct iscsi_kwqe_conn_offload2 *req2;
1883	struct cnic_local *cp = dev->cnic_priv;
1884	struct bnx2x *bp = netdev_priv(dev->netdev);
1885	struct cnic_context *ctx;
1886	struct iscsi_kcqe kcqe;
1887	struct kcqe *cqes[1];
1888	u32 l5_cid;
1889	int ret = 0;
1890
1891	if (num < 2) {
1892		*work = num;
1893		return -EINVAL;
1894	}
1895
1896	req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1897	req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1898	if ((num - 2) < req2->num_additional_wqes) {
1899		*work = num;
1900		return -EINVAL;
1901	}
1902	*work = 2 + req2->num_additional_wqes;
1903
1904	l5_cid = req1->iscsi_conn_id;
1905	if (l5_cid >= MAX_ISCSI_TBL_SZ)
1906		return -EINVAL;
1907
1908	memset(&kcqe, 0, sizeof(kcqe));
1909	kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1910	kcqe.iscsi_conn_id = l5_cid;
1911	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1912
1913	ctx = &cp->ctx_tbl[l5_cid];
1914	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1915		kcqe.completion_status =
1916			ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1917		goto done;
1918	}
1919
1920	if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1921		atomic_dec(&cp->iscsi_conn);
1922		goto done;
1923	}
1924	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1925	if (ret) {
1926		atomic_dec(&cp->iscsi_conn);
1927		goto done;
1928	}
1929	ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1930	if (ret < 0) {
1931		cnic_free_bnx2x_conn_resc(dev, l5_cid);
1932		atomic_dec(&cp->iscsi_conn);
1933		goto done;
1934	}
1935
1936	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1937	kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
1938
1939done:
1940	cqes[0] = (struct kcqe *) &kcqe;
1941	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1942	return 0;
1943}
1944
1945
1946static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1947{
1948	struct cnic_local *cp = dev->cnic_priv;
1949	struct iscsi_kwqe_conn_update *req =
1950		(struct iscsi_kwqe_conn_update *) kwqe;
1951	void *data;
1952	union l5cm_specific_data l5_data;
1953	u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1954	int ret;
1955
1956	if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1957		return -EINVAL;
1958
1959	data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1960	if (!data)
1961		return -ENOMEM;
1962
1963	memcpy(data, kwqe, sizeof(struct kwqe));
1964
1965	ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1966			req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1967	return ret;
1968}
1969
1970static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1971{
1972	struct cnic_local *cp = dev->cnic_priv;
1973	struct bnx2x *bp = netdev_priv(dev->netdev);
1974	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1975	union l5cm_specific_data l5_data;
1976	int ret;
1977	u32 hw_cid;
1978
1979	init_waitqueue_head(&ctx->waitq);
1980	ctx->wait_cond = 0;
1981	memset(&l5_data, 0, sizeof(l5_data));
1982	hw_cid = BNX2X_HW_CID(bp, ctx->cid);
1983
1984	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1985				  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1986
1987	if (ret == 0) {
1988		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1989		if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1990			return -EBUSY;
1991	}
1992
1993	return 0;
1994}
1995
1996static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1997{
1998	struct cnic_local *cp = dev->cnic_priv;
1999	struct iscsi_kwqe_conn_destroy *req =
2000		(struct iscsi_kwqe_conn_destroy *) kwqe;
2001	u32 l5_cid = req->reserved0;
2002	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2003	int ret = 0;
2004	struct iscsi_kcqe kcqe;
2005	struct kcqe *cqes[1];
2006
2007	if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2008		goto skip_cfc_delete;
2009
2010	if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
2011		unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
2012
2013		if (delta > (2 * HZ))
2014			delta = 0;
2015
2016		set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2017		queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2018		goto destroy_reply;
2019	}
2020
2021	ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2022
2023skip_cfc_delete:
2024	cnic_free_bnx2x_conn_resc(dev, l5_cid);
2025
2026	if (!ret) {
2027		atomic_dec(&cp->iscsi_conn);
2028		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2029	}
2030
2031destroy_reply:
2032	memset(&kcqe, 0, sizeof(kcqe));
2033	kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2034	kcqe.iscsi_conn_id = l5_cid;
2035	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2036	kcqe.iscsi_conn_context_id = req->context_id;
2037
2038	cqes[0] = (struct kcqe *) &kcqe;
2039	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2040
2041	return 0;
2042}
2043
2044static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2045				      struct l4_kwq_connect_req1 *kwqe1,
2046				      struct l4_kwq_connect_req3 *kwqe3,
2047				      struct l5cm_active_conn_buffer *conn_buf)
2048{
2049	struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2050	struct l5cm_xstorm_conn_buffer *xstorm_buf =
2051		&conn_buf->xstorm_conn_buffer;
2052	struct l5cm_tstorm_conn_buffer *tstorm_buf =
2053		&conn_buf->tstorm_conn_buffer;
2054	struct regpair context_addr;
2055	u32 cid = BNX2X_SW_CID(kwqe1->cid);
2056	struct in6_addr src_ip, dst_ip;
2057	int i;
2058	u32 *addrp;
2059
2060	addrp = (u32 *) &conn_addr->local_ip_addr;
2061	for (i = 0; i < 4; i++, addrp++)
2062		src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2063
2064	addrp = (u32 *) &conn_addr->remote_ip_addr;
2065	for (i = 0; i < 4; i++, addrp++)
2066		dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2067
2068	cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2069
2070	xstorm_buf->context_addr.hi = context_addr.hi;
2071	xstorm_buf->context_addr.lo = context_addr.lo;
2072	xstorm_buf->mss = 0xffff;
2073	xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2074	if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2075		xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2076	xstorm_buf->pseudo_header_checksum =
2077		swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2078
2079	if (kwqe3->ka_timeout) {
2080		tstorm_buf->ka_enable = 1;
2081		tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2082		tstorm_buf->ka_interval = kwqe3->ka_interval;
2083		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2084	}
2085	tstorm_buf->max_rt_time = 0xffffffff;
2086}
2087
2088static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2089{
2090	struct bnx2x *bp = netdev_priv(dev->netdev);
2091	u32 pfid = bp->pfid;
2092	u8 *mac = dev->mac_addr;
2093
2094	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2095		 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2096	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2097		 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2098	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2099		 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2100	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2101		 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2102	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2103		 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2104	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2105		 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2106
2107	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2108		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2109	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2110		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2111		 mac[4]);
2112	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2113		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2114	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2115		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2116		 mac[2]);
2117	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2118		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2119	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2120		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2121		 mac[0]);
2122}
2123
2124static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2125			      u32 num, int *work)
2126{
2127	struct cnic_local *cp = dev->cnic_priv;
2128	struct bnx2x *bp = netdev_priv(dev->netdev);
2129	struct l4_kwq_connect_req1 *kwqe1 =
2130		(struct l4_kwq_connect_req1 *) wqes[0];
2131	struct l4_kwq_connect_req3 *kwqe3;
2132	struct l5cm_active_conn_buffer *conn_buf;
2133	struct l5cm_conn_addr_params *conn_addr;
2134	union l5cm_specific_data l5_data;
2135	u32 l5_cid = kwqe1->pg_cid;
2136	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2137	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2138	int ret;
2139
2140	if (num < 2) {
2141		*work = num;
2142		return -EINVAL;
2143	}
2144
2145	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2146		*work = 3;
2147	else
2148		*work = 2;
2149
2150	if (num < *work) {
2151		*work = num;
2152		return -EINVAL;
2153	}
2154
2155	if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2156		netdev_err(dev->netdev, "conn_buf size too big\n");
2157		return -ENOMEM;
2158	}
2159	conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2160	if (!conn_buf)
2161		return -ENOMEM;
2162
2163	memset(conn_buf, 0, sizeof(*conn_buf));
2164
2165	conn_addr = &conn_buf->conn_addr_buf;
2166	conn_addr->remote_addr_0 = csk->ha[0];
2167	conn_addr->remote_addr_1 = csk->ha[1];
2168	conn_addr->remote_addr_2 = csk->ha[2];
2169	conn_addr->remote_addr_3 = csk->ha[3];
2170	conn_addr->remote_addr_4 = csk->ha[4];
2171	conn_addr->remote_addr_5 = csk->ha[5];
2172
2173	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2174		struct l4_kwq_connect_req2 *kwqe2 =
2175			(struct l4_kwq_connect_req2 *) wqes[1];
2176
2177		conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2178		conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2179		conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2180
2181		conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2182		conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2183		conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2184		conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2185	}
2186	kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2187
2188	conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2189	conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2190	conn_addr->local_tcp_port = kwqe1->src_port;
2191	conn_addr->remote_tcp_port = kwqe1->dst_port;
2192
2193	conn_addr->pmtu = kwqe3->pmtu;
2194	cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2195
2196	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2197		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
2198
2199	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2200			kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2201	if (!ret)
2202		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2203
2204	return ret;
2205}
2206
2207static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2208{
2209	struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2210	union l5cm_specific_data l5_data;
2211	int ret;
2212
2213	memset(&l5_data, 0, sizeof(l5_data));
2214	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2215			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2216	return ret;
2217}
2218
2219static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2220{
2221	struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2222	union l5cm_specific_data l5_data;
2223	int ret;
2224
2225	memset(&l5_data, 0, sizeof(l5_data));
2226	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2227			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2228	return ret;
2229}
2230static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2231{
2232	struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2233	struct l4_kcq kcqe;
2234	struct kcqe *cqes[1];
2235
2236	memset(&kcqe, 0, sizeof(kcqe));
2237	kcqe.pg_host_opaque = req->host_opaque;
2238	kcqe.pg_cid = req->host_opaque;
2239	kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2240	cqes[0] = (struct kcqe *) &kcqe;
2241	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2242	return 0;
2243}
2244
2245static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2246{
2247	struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2248	struct l4_kcq kcqe;
2249	struct kcqe *cqes[1];
2250
2251	memset(&kcqe, 0, sizeof(kcqe));
2252	kcqe.pg_host_opaque = req->pg_host_opaque;
2253	kcqe.pg_cid = req->pg_cid;
2254	kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2255	cqes[0] = (struct kcqe *) &kcqe;
2256	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2257	return 0;
2258}
2259
2260static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2261{
2262	struct fcoe_kwqe_stat *req;
2263	struct fcoe_stat_ramrod_params *fcoe_stat;
2264	union l5cm_specific_data l5_data;
2265	struct cnic_local *cp = dev->cnic_priv;
2266	struct bnx2x *bp = netdev_priv(dev->netdev);
2267	int ret;
2268	u32 cid;
2269
2270	req = (struct fcoe_kwqe_stat *) kwqe;
2271	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2272
2273	fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2274	if (!fcoe_stat)
2275		return -ENOMEM;
2276
2277	memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2278	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2279
2280	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2281				  FCOE_CONNECTION_TYPE, &l5_data);
2282	return ret;
2283}
2284
2285static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2286				 u32 num, int *work)
2287{
2288	int ret;
2289	struct cnic_local *cp = dev->cnic_priv;
2290	struct bnx2x *bp = netdev_priv(dev->netdev);
2291	u32 cid;
2292	struct fcoe_init_ramrod_params *fcoe_init;
2293	struct fcoe_kwqe_init1 *req1;
2294	struct fcoe_kwqe_init2 *req2;
2295	struct fcoe_kwqe_init3 *req3;
2296	union l5cm_specific_data l5_data;
2297
2298	if (num < 3) {
2299		*work = num;
2300		return -EINVAL;
2301	}
2302	req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2303	req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2304	req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2305	if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2306		*work = 1;
2307		return -EINVAL;
2308	}
2309	if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2310		*work = 2;
2311		return -EINVAL;
2312	}
2313
2314	if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2315		netdev_err(dev->netdev, "fcoe_init size too big\n");
2316		return -ENOMEM;
2317	}
2318	fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2319	if (!fcoe_init)
2320		return -ENOMEM;
2321
2322	memset(fcoe_init, 0, sizeof(*fcoe_init));
2323	memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2324	memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2325	memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2326	fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2327	fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2328	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2329
2330	fcoe_init->sb_num = cp->status_blk_num;
2331	fcoe_init->eq_prod = MAX_KCQ_IDX;
2332	fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2333	cp->kcq2.sw_prod_idx = 0;
2334
2335	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2336	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2337				  FCOE_CONNECTION_TYPE, &l5_data);
2338	*work = 3;
2339	return ret;
2340}
2341
2342static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2343				 u32 num, int *work)
2344{
2345	int ret = 0;
2346	u32 cid = -1, l5_cid;
2347	struct cnic_local *cp = dev->cnic_priv;
2348	struct bnx2x *bp = netdev_priv(dev->netdev);
2349	struct fcoe_kwqe_conn_offload1 *req1;
2350	struct fcoe_kwqe_conn_offload2 *req2;
2351	struct fcoe_kwqe_conn_offload3 *req3;
2352	struct fcoe_kwqe_conn_offload4 *req4;
2353	struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2354	struct cnic_context *ctx;
2355	struct fcoe_context *fctx;
2356	struct regpair ctx_addr;
2357	union l5cm_specific_data l5_data;
2358	struct fcoe_kcqe kcqe;
2359	struct kcqe *cqes[1];
2360
2361	if (num < 4) {
2362		*work = num;
2363		return -EINVAL;
2364	}
2365	req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2366	req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2367	req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2368	req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2369
2370	*work = 4;
2371
2372	l5_cid = req1->fcoe_conn_id;
2373	if (l5_cid >= dev->max_fcoe_conn)
2374		goto err_reply;
2375
2376	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2377
2378	ctx = &cp->ctx_tbl[l5_cid];
2379	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2380		goto err_reply;
2381
2382	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2383	if (ret) {
2384		ret = 0;
2385		goto err_reply;
2386	}
2387	cid = ctx->cid;
2388
2389	fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2390	if (fctx) {
2391		u32 hw_cid = BNX2X_HW_CID(bp, cid);
2392		u32 val;
2393
2394		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2395					     FCOE_CONNECTION_TYPE);
2396		fctx->xstorm_ag_context.cdu_reserved = val;
2397		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2398					     FCOE_CONNECTION_TYPE);
2399		fctx->ustorm_ag_context.cdu_usage = val;
2400	}
2401	if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2402		netdev_err(dev->netdev, "fcoe_offload size too big\n");
2403		goto err_reply;
2404	}
2405	fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2406	if (!fcoe_offload)
2407		goto err_reply;
2408
2409	memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2410	memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2411	memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2412	memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2413	memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2414
2415	cid = BNX2X_HW_CID(bp, cid);
2416	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2417				  FCOE_CONNECTION_TYPE, &l5_data);
2418	if (!ret)
2419		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2420
2421	return ret;
2422
2423err_reply:
2424	if (cid != -1)
2425		cnic_free_bnx2x_conn_resc(dev, l5_cid);
2426
2427	memset(&kcqe, 0, sizeof(kcqe));
2428	kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2429	kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2430	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2431
2432	cqes[0] = (struct kcqe *) &kcqe;
2433	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2434	return ret;
2435}
2436
2437static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2438{
2439	struct fcoe_kwqe_conn_enable_disable *req;
2440	struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2441	union l5cm_specific_data l5_data;
2442	int ret;
2443	u32 cid, l5_cid;
2444	struct cnic_local *cp = dev->cnic_priv;
2445
2446	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2447	cid = req->context_id;
2448	l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2449
2450	if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2451		netdev_err(dev->netdev, "fcoe_enable size too big\n");
2452		return -ENOMEM;
2453	}
2454	fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2455	if (!fcoe_enable)
2456		return -ENOMEM;
2457
2458	memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2459	memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2460	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2461				  FCOE_CONNECTION_TYPE, &l5_data);
2462	return ret;
2463}
2464
2465static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2466{
2467	struct fcoe_kwqe_conn_enable_disable *req;
2468	struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2469	union l5cm_specific_data l5_data;
2470	int ret;
2471	u32 cid, l5_cid;
2472	struct cnic_local *cp = dev->cnic_priv;
2473
2474	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2475	cid = req->context_id;
2476	l5_cid = req->conn_id;
2477	if (l5_cid >= dev->max_fcoe_conn)
2478		return -EINVAL;
2479
2480	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2481
2482	if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2483		netdev_err(dev->netdev, "fcoe_disable size too big\n");
2484		return -ENOMEM;
2485	}
2486	fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2487	if (!fcoe_disable)
2488		return -ENOMEM;
2489
2490	memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2491	memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2492	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2493				  FCOE_CONNECTION_TYPE, &l5_data);
2494	return ret;
2495}
2496
2497static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2498{
2499	struct fcoe_kwqe_conn_destroy *req;
2500	union l5cm_specific_data l5_data;
2501	int ret;
2502	u32 cid, l5_cid;
2503	struct cnic_local *cp = dev->cnic_priv;
2504	struct cnic_context *ctx;
2505	struct fcoe_kcqe kcqe;
2506	struct kcqe *cqes[1];
2507
2508	req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2509	cid = req->context_id;
2510	l5_cid = req->conn_id;
2511	if (l5_cid >= dev->max_fcoe_conn)
2512		return -EINVAL;
2513
2514	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2515
2516	ctx = &cp->ctx_tbl[l5_cid];
2517
2518	init_waitqueue_head(&ctx->waitq);
2519	ctx->wait_cond = 0;
2520
2521	memset(&kcqe, 0, sizeof(kcqe));
2522	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2523	memset(&l5_data, 0, sizeof(l5_data));
2524	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2525				  FCOE_CONNECTION_TYPE, &l5_data);
2526	if (ret == 0) {
2527		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2528		if (ctx->wait_cond)
2529			kcqe.completion_status = 0;
2530	}
2531
2532	set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2533	queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2534
2535	kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2536	kcqe.fcoe_conn_id = req->conn_id;
2537	kcqe.fcoe_conn_context_id = cid;
2538
2539	cqes[0] = (struct kcqe *) &kcqe;
2540	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2541	return ret;
2542}
2543
2544static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2545{
2546	struct cnic_local *cp = dev->cnic_priv;
2547	u32 i;
2548
2549	for (i = start_cid; i < cp->max_cid_space; i++) {
2550		struct cnic_context *ctx = &cp->ctx_tbl[i];
2551		int j;
2552
2553		while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2554			msleep(10);
2555
2556		for (j = 0; j < 5; j++) {
2557			if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2558				break;
2559			msleep(20);
2560		}
2561
2562		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2563			netdev_warn(dev->netdev, "CID %x not deleted\n",
2564				   ctx->cid);
2565	}
2566}
2567
2568static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2569{
2570	union l5cm_specific_data l5_data;
2571	struct cnic_local *cp = dev->cnic_priv;
2572	struct bnx2x *bp = netdev_priv(dev->netdev);
2573	int ret;
2574	u32 cid;
2575
2576	cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2577
2578	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2579
2580	memset(&l5_data, 0, sizeof(l5_data));
2581	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2582				  FCOE_CONNECTION_TYPE, &l5_data);
2583	return ret;
2584}
2585
2586static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2587{
2588	struct cnic_local *cp = dev->cnic_priv;
2589	struct kcqe kcqe;
2590	struct kcqe *cqes[1];
2591	u32 cid;
2592	u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2593	u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2594	u32 kcqe_op;
2595	int ulp_type;
2596
2597	cid = kwqe->kwqe_info0;
2598	memset(&kcqe, 0, sizeof(kcqe));
2599
2600	if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2601		u32 l5_cid = 0;
2602
2603		ulp_type = CNIC_ULP_FCOE;
2604		if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2605			struct fcoe_kwqe_conn_enable_disable *req;
2606
2607			req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2608			kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2609			cid = req->context_id;
2610			l5_cid = req->conn_id;
2611		} else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2612			kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2613		} else {
2614			return;
2615		}
2616		kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2617		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2618		kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2619		kcqe.kcqe_info2 = cid;
2620		kcqe.kcqe_info0 = l5_cid;
2621
2622	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2623		ulp_type = CNIC_ULP_ISCSI;
2624		if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2625			cid = kwqe->kwqe_info1;
2626
2627		kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2628		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2629		kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2630		kcqe.kcqe_info2 = cid;
2631		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2632
2633	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2634		struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2635
2636		ulp_type = CNIC_ULP_L4;
2637		if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2638			kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2639		else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2640			kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2641		else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2642			kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2643		else
2644			return;
2645
2646		kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2647				    KCQE_FLAGS_LAYER_MASK_L4;
2648		l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2649		l4kcqe->cid = cid;
2650		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2651	} else {
2652		return;
2653	}
2654
2655	cqes[0] = &kcqe;
2656	cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2657}
2658
2659static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2660					 struct kwqe *wqes[], u32 num_wqes)
2661{
2662	int i, work, ret;
2663	u32 opcode;
2664	struct kwqe *kwqe;
2665
2666	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2667		return -EAGAIN;		/* bnx2 is down */
2668
2669	for (i = 0; i < num_wqes; ) {
2670		kwqe = wqes[i];
2671		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2672		work = 1;
2673
2674		switch (opcode) {
2675		case ISCSI_KWQE_OPCODE_INIT1:
2676			ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2677			break;
2678		case ISCSI_KWQE_OPCODE_INIT2:
2679			ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2680			break;
2681		case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2682			ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2683						     num_wqes - i, &work);
2684			break;
2685		case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2686			ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2687			break;
2688		case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2689			ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2690			break;
2691		case L4_KWQE_OPCODE_VALUE_CONNECT1:
2692			ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2693						 &work);
2694			break;
2695		case L4_KWQE_OPCODE_VALUE_CLOSE:
2696			ret = cnic_bnx2x_close(dev, kwqe);
2697			break;
2698		case L4_KWQE_OPCODE_VALUE_RESET:
2699			ret = cnic_bnx2x_reset(dev, kwqe);
2700			break;
2701		case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2702			ret = cnic_bnx2x_offload_pg(dev, kwqe);
2703			break;
2704		case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2705			ret = cnic_bnx2x_update_pg(dev, kwqe);
2706			break;
2707		case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2708			ret = 0;
2709			break;
2710		default:
2711			ret = 0;
2712			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2713				   opcode);
2714			break;
2715		}
2716		if (ret < 0) {
2717			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2718				   opcode);
2719
2720			/* Possibly bnx2x parity error, send completion
2721			 * to ulp drivers with error code to speed up
2722			 * cleanup and reset recovery.
2723			 */
2724			if (ret == -EIO || ret == -EAGAIN)
2725				cnic_bnx2x_kwqe_err(dev, kwqe);
2726		}
2727		i += work;
2728	}
2729	return 0;
2730}
2731
2732static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2733					struct kwqe *wqes[], u32 num_wqes)
2734{
2735	struct bnx2x *bp = netdev_priv(dev->netdev);
2736	int i, work, ret;
2737	u32 opcode;
2738	struct kwqe *kwqe;
2739
2740	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2741		return -EAGAIN;		/* bnx2 is down */
2742
2743	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
2744		return -EINVAL;
2745
2746	for (i = 0; i < num_wqes; ) {
2747		kwqe = wqes[i];
2748		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2749		work = 1;
2750
2751		switch (opcode) {
2752		case FCOE_KWQE_OPCODE_INIT1:
2753			ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2754						    num_wqes - i, &work);
2755			break;
2756		case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2757			ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2758						    num_wqes - i, &work);
2759			break;
2760		case FCOE_KWQE_OPCODE_ENABLE_CONN:
2761			ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2762			break;
2763		case FCOE_KWQE_OPCODE_DISABLE_CONN:
2764			ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2765			break;
2766		case FCOE_KWQE_OPCODE_DESTROY_CONN:
2767			ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2768			break;
2769		case FCOE_KWQE_OPCODE_DESTROY:
2770			ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2771			break;
2772		case FCOE_KWQE_OPCODE_STAT:
2773			ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2774			break;
2775		default:
2776			ret = 0;
2777			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2778				   opcode);
2779			break;
2780		}
2781		if (ret < 0) {
2782			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2783				   opcode);
2784
2785			/* Possibly bnx2x parity error, send completion
2786			 * to ulp drivers with error code to speed up
2787			 * cleanup and reset recovery.
2788			 */
2789			if (ret == -EIO || ret == -EAGAIN)
2790				cnic_bnx2x_kwqe_err(dev, kwqe);
2791		}
2792		i += work;
2793	}
2794	return 0;
2795}
2796
2797static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2798				   u32 num_wqes)
2799{
2800	int ret = -EINVAL;
2801	u32 layer_code;
2802
2803	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2804		return -EAGAIN;		/* bnx2x is down */
2805
2806	if (!num_wqes)
2807		return 0;
2808
2809	layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2810	switch (layer_code) {
2811	case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2812	case KWQE_FLAGS_LAYER_MASK_L4:
2813	case KWQE_FLAGS_LAYER_MASK_L2:
2814		ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2815		break;
2816
2817	case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2818		ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2819		break;
2820	}
2821	return ret;
2822}
2823
2824static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2825{
2826	if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2827		return KCQE_FLAGS_LAYER_MASK_L4;
2828
2829	return opflag & KCQE_FLAGS_LAYER_MASK;
2830}
2831
2832static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2833{
2834	struct cnic_local *cp = dev->cnic_priv;
2835	int i, j, comp = 0;
2836
2837	i = 0;
2838	j = 1;
2839	while (num_cqes) {
2840		struct cnic_ulp_ops *ulp_ops;
2841		int ulp_type;
2842		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2843		u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2844
2845		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2846			comp++;
2847
2848		while (j < num_cqes) {
2849			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2850
2851			if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2852				break;
2853
2854			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2855				comp++;
2856			j++;
2857		}
2858
2859		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2860			ulp_type = CNIC_ULP_RDMA;
2861		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2862			ulp_type = CNIC_ULP_ISCSI;
2863		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2864			ulp_type = CNIC_ULP_FCOE;
2865		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2866			ulp_type = CNIC_ULP_L4;
2867		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2868			goto end;
2869		else {
2870			netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2871				   kcqe_op_flag);
2872			goto end;
2873		}
2874
2875		rcu_read_lock();
2876		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2877		if (likely(ulp_ops)) {
2878			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2879						  cp->completed_kcq + i, j);
2880		}
2881		rcu_read_unlock();
2882end:
2883		num_cqes -= j;
2884		i += j;
2885		j = 1;
2886	}
2887	if (unlikely(comp))
2888		cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2889}
2890
2891static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2892{
2893	struct cnic_local *cp = dev->cnic_priv;
2894	u16 i, ri, hw_prod, last;
2895	struct kcqe *kcqe;
2896	int kcqe_cnt = 0, last_cnt = 0;
2897
2898	i = ri = last = info->sw_prod_idx;
2899	ri &= MAX_KCQ_IDX;
2900	hw_prod = *info->hw_prod_idx_ptr;
2901	hw_prod = info->hw_idx(hw_prod);
2902
2903	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2904		kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2905		cp->completed_kcq[kcqe_cnt++] = kcqe;
2906		i = info->next_idx(i);
2907		ri = i & MAX_KCQ_IDX;
2908		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2909			last_cnt = kcqe_cnt;
2910			last = i;
2911		}
2912	}
2913
2914	info->sw_prod_idx = last;
2915	return last_cnt;
2916}
2917
2918static int cnic_l2_completion(struct cnic_local *cp)
2919{
2920	u16 hw_cons, sw_cons;
2921	struct cnic_uio_dev *udev = cp->udev;
2922	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2923					(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
2924	u32 cmd;
2925	int comp = 0;
2926
2927	if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2928		return 0;
2929
2930	hw_cons = *cp->rx_cons_ptr;
2931	if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2932		hw_cons++;
2933
2934	sw_cons = cp->rx_cons;
2935	while (sw_cons != hw_cons) {
2936		u8 cqe_fp_flags;
2937
2938		cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2939		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2940		if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2941			cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2942			cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2943			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2944			    cmd == RAMROD_CMD_ID_ETH_HALT)
2945				comp++;
2946		}
2947		sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2948	}
2949	return comp;
2950}
2951
2952static void cnic_chk_pkt_rings(struct cnic_local *cp)
2953{
2954	u16 rx_cons, tx_cons;
2955	int comp = 0;
2956
2957	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2958		return;
2959
2960	rx_cons = *cp->rx_cons_ptr;
2961	tx_cons = *cp->tx_cons_ptr;
2962	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2963		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2964			comp = cnic_l2_completion(cp);
2965
2966		cp->tx_cons = tx_cons;
2967		cp->rx_cons = rx_cons;
2968
2969		if (cp->udev)
2970			uio_event_notify(&cp->udev->cnic_uinfo);
2971	}
2972	if (comp)
2973		clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2974}
2975
2976static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2977{
2978	struct cnic_local *cp = dev->cnic_priv;
2979	u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2980	int kcqe_cnt;
2981
2982	/* status block index must be read before reading other fields */
2983	rmb();
2984	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2985
2986	while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2987
2988		service_kcqes(dev, kcqe_cnt);
2989
2990		/* Tell compiler that status_blk fields can change. */
2991		barrier();
2992		status_idx = (u16) *cp->kcq1.status_idx_ptr;
2993		/* status block index must be read first */
2994		rmb();
2995		cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2996	}
2997
2998	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2999
3000	cnic_chk_pkt_rings(cp);
3001
3002	return status_idx;
3003}
3004
3005static int cnic_service_bnx2(void *data, void *status_blk)
3006{
3007	struct cnic_dev *dev = data;
3008
3009	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3010		struct status_block *sblk = status_blk;
3011
3012		return sblk->status_idx;
3013	}
3014
3015	return cnic_service_bnx2_queues(dev);
3016}
3017
3018static void cnic_service_bnx2_msix(struct tasklet_struct *t)
3019{
3020	struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
3021	struct cnic_dev *dev = cp->dev;
3022
3023	cp->last_status_idx = cnic_service_bnx2_queues(dev);
3024
3025	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3026		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3027}
3028
3029static void cnic_doirq(struct cnic_dev *dev)
3030{
3031	struct cnic_local *cp = dev->cnic_priv;
3032
3033	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3034		u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3035
3036		prefetch(cp->status_blk.gen);
3037		prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
3038
3039		tasklet_schedule(&cp->cnic_irq_task);
3040	}
3041}
3042
3043static irqreturn_t cnic_irq(int irq, void *dev_instance)
3044{
3045	struct cnic_dev *dev = dev_instance;
3046	struct cnic_local *cp = dev->cnic_priv;
3047
3048	if (cp->ack_int)
3049		cp->ack_int(dev);
3050
3051	cnic_doirq(dev);
3052
3053	return IRQ_HANDLED;
3054}
3055
3056static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3057				      u16 index, u8 op, u8 update)
3058{
3059	struct bnx2x *bp = netdev_priv(dev->netdev);
3060	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
3061		       COMMAND_REG_INT_ACK);
3062	struct igu_ack_register igu_ack;
3063
3064	igu_ack.status_block_index = index;
3065	igu_ack.sb_id_and_flags =
3066			((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3067			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3068			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3069			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3070
3071	CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3072}
3073
3074static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3075			    u16 index, u8 op, u8 update)
3076{
3077	struct igu_regular cmd_data;
3078	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3079
3080	cmd_data.sb_id_and_flags =
3081		(index << IGU_REGULAR_SB_INDEX_SHIFT) |
3082		(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3083		(update << IGU_REGULAR_BUPDATE_SHIFT) |
3084		(op << IGU_REGULAR_ENABLE_INT_SHIFT);
3085
3086
3087	CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3088}
3089
3090static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3091{
3092	struct cnic_local *cp = dev->cnic_priv;
3093
3094	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3095			   IGU_INT_DISABLE, 0);
3096}
3097
3098static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3099{
3100	struct cnic_local *cp = dev->cnic_priv;
3101
3102	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3103			IGU_INT_DISABLE, 0);
3104}
3105
3106static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3107{
3108	struct cnic_local *cp = dev->cnic_priv;
3109
3110	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3111			   IGU_INT_ENABLE, 1);
3112}
3113
3114static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3115{
3116	struct cnic_local *cp = dev->cnic_priv;
3117
3118	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3119			IGU_INT_ENABLE, 1);
3120}
3121
3122static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3123{
3124	u32 last_status = *info->status_idx_ptr;
3125	int kcqe_cnt;
3126
3127	/* status block index must be read before reading the KCQ */
3128	rmb();
3129	while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3130
3131		service_kcqes(dev, kcqe_cnt);
3132
3133		/* Tell compiler that sblk fields can change. */
3134		barrier();
3135
3136		last_status = *info->status_idx_ptr;
3137		/* status block index must be read before reading the KCQ */
3138		rmb();
3139	}
3140	return last_status;
3141}
3142
3143static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
3144{
3145	struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
3146	struct cnic_dev *dev = cp->dev;
3147	struct bnx2x *bp = netdev_priv(dev->netdev);
3148	u32 status_idx, new_status_idx;
3149
3150	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3151		return;
3152
3153	while (1) {
3154		status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3155
3156		CNIC_WR16(dev, cp->kcq1.io_addr,
3157			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3158
3159		if (!CNIC_SUPPORTS_FCOE(bp)) {
3160			cp->arm_int(dev, status_idx);
3161			break;
3162		}
3163
3164		new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3165
3166		if (new_status_idx != status_idx)
3167			continue;
3168
3169		CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3170			  MAX_KCQ_IDX);
3171
3172		cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3173				status_idx, IGU_INT_ENABLE, 1);
3174
3175		break;
3176	}
3177}
3178
3179static int cnic_service_bnx2x(void *data, void *status_blk)
3180{
3181	struct cnic_dev *dev = data;
3182	struct cnic_local *cp = dev->cnic_priv;
3183
3184	if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3185		cnic_doirq(dev);
3186
3187	cnic_chk_pkt_rings(cp);
3188
3189	return 0;
3190}
3191
3192static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3193{
3194	struct cnic_ulp_ops *ulp_ops;
3195
3196	if (if_type == CNIC_ULP_ISCSI)
3197		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3198
3199	mutex_lock(&cnic_lock);
3200	ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3201					    lockdep_is_held(&cnic_lock));
3202	if (!ulp_ops) {
3203		mutex_unlock(&cnic_lock);
3204		return;
3205	}
3206	set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3207	mutex_unlock(&cnic_lock);
3208
3209	if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3210		ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3211
3212	clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3213}
3214
3215static void cnic_ulp_stop(struct cnic_dev *dev)
3216{
3217	struct cnic_local *cp = dev->cnic_priv;
3218	int if_type;
3219
3220	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3221		cnic_ulp_stop_one(cp, if_type);
3222}
3223
3224static void cnic_ulp_start(struct cnic_dev *dev)
3225{
3226	struct cnic_local *cp = dev->cnic_priv;
3227	int if_type;
3228
3229	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3230		struct cnic_ulp_ops *ulp_ops;
3231
3232		mutex_lock(&cnic_lock);
3233		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3234						    lockdep_is_held(&cnic_lock));
3235		if (!ulp_ops || !ulp_ops->cnic_start) {
3236			mutex_unlock(&cnic_lock);
3237			continue;
3238		}
3239		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3240		mutex_unlock(&cnic_lock);
3241
3242		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3243			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3244
3245		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3246	}
3247}
3248
3249static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3250{
3251	struct cnic_local *cp = dev->cnic_priv;
3252	struct cnic_ulp_ops *ulp_ops;
3253	int rc;
3254
3255	mutex_lock(&cnic_lock);
3256	ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
3257					    lockdep_is_held(&cnic_lock));
3258	if (ulp_ops && ulp_ops->cnic_get_stats)
3259		rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3260	else
3261		rc = -ENODEV;
3262	mutex_unlock(&cnic_lock);
3263	return rc;
3264}
3265
3266static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3267{
3268	struct cnic_dev *dev = data;
3269	int ulp_type = CNIC_ULP_ISCSI;
3270
3271	switch (info->cmd) {
3272	case CNIC_CTL_STOP_CMD:
3273		cnic_hold(dev);
3274
3275		cnic_ulp_stop(dev);
3276		cnic_stop_hw(dev);
3277
3278		cnic_put(dev);
3279		break;
3280	case CNIC_CTL_START_CMD:
3281		cnic_hold(dev);
3282
3283		if (!cnic_start_hw(dev))
3284			cnic_ulp_start(dev);
3285
3286		cnic_put(dev);
3287		break;
3288	case CNIC_CTL_STOP_ISCSI_CMD: {
3289		struct cnic_local *cp = dev->cnic_priv;
3290		set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3291		queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3292		break;
3293	}
3294	case CNIC_CTL_COMPLETION_CMD: {
3295		struct cnic_ctl_completion *comp = &info->data.comp;
3296		u32 cid = BNX2X_SW_CID(comp->cid);
3297		u32 l5_cid;
3298		struct cnic_local *cp = dev->cnic_priv;
3299
3300		if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3301			break;
3302
3303		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3304			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3305
3306			if (unlikely(comp->error)) {
3307				set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3308				netdev_err(dev->netdev,
3309					   "CID %x CFC delete comp error %x\n",
3310					   cid, comp->error);
3311			}
3312
3313			ctx->wait_cond = 1;
3314			wake_up(&ctx->waitq);
3315		}
3316		break;
3317	}
3318	case CNIC_CTL_FCOE_STATS_GET_CMD:
3319		ulp_type = CNIC_ULP_FCOE;
3320		fallthrough;
3321	case CNIC_CTL_ISCSI_STATS_GET_CMD:
3322		cnic_hold(dev);
3323		cnic_copy_ulp_stats(dev, ulp_type);
3324		cnic_put(dev);
3325		break;
3326
3327	default:
3328		return -EINVAL;
3329	}
3330	return 0;
3331}
3332
3333static void cnic_ulp_init(struct cnic_dev *dev)
3334{
3335	int i;
3336	struct cnic_local *cp = dev->cnic_priv;
3337
3338	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3339		struct cnic_ulp_ops *ulp_ops;
3340
3341		mutex_lock(&cnic_lock);
3342		ulp_ops = cnic_ulp_tbl_prot(i);
3343		if (!ulp_ops || !ulp_ops->cnic_init) {
3344			mutex_unlock(&cnic_lock);
3345			continue;
3346		}
3347		ulp_get(ulp_ops);
3348		mutex_unlock(&cnic_lock);
3349
3350		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3351			ulp_ops->cnic_init(dev);
3352
3353		ulp_put(ulp_ops);
3354	}
3355}
3356
3357static void cnic_ulp_exit(struct cnic_dev *dev)
3358{
3359	int i;
3360	struct cnic_local *cp = dev->cnic_priv;
3361
3362	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3363		struct cnic_ulp_ops *ulp_ops;
3364
3365		mutex_lock(&cnic_lock);
3366		ulp_ops = cnic_ulp_tbl_prot(i);
3367		if (!ulp_ops || !ulp_ops->cnic_exit) {
3368			mutex_unlock(&cnic_lock);
3369			continue;
3370		}
3371		ulp_get(ulp_ops);
3372		mutex_unlock(&cnic_lock);
3373
3374		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3375			ulp_ops->cnic_exit(dev);
3376
3377		ulp_put(ulp_ops);
3378	}
3379}
3380
3381static int cnic_cm_offload_pg(struct cnic_sock *csk)
3382{
3383	struct cnic_dev *dev = csk->dev;
3384	struct l4_kwq_offload_pg *l4kwqe;
3385	struct kwqe *wqes[1];
3386
3387	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3388	memset(l4kwqe, 0, sizeof(*l4kwqe));
3389	wqes[0] = (struct kwqe *) l4kwqe;
3390
3391	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3392	l4kwqe->flags =
3393		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3394	l4kwqe->l2hdr_nbytes = ETH_HLEN;
3395
3396	l4kwqe->da0 = csk->ha[0];
3397	l4kwqe->da1 = csk->ha[1];
3398	l4kwqe->da2 = csk->ha[2];
3399	l4kwqe->da3 = csk->ha[3];
3400	l4kwqe->da4 = csk->ha[4];
3401	l4kwqe->da5 = csk->ha[5];
3402
3403	l4kwqe->sa0 = dev->mac_addr[0];
3404	l4kwqe->sa1 = dev->mac_addr[1];
3405	l4kwqe->sa2 = dev->mac_addr[2];
3406	l4kwqe->sa3 = dev->mac_addr[3];
3407	l4kwqe->sa4 = dev->mac_addr[4];
3408	l4kwqe->sa5 = dev->mac_addr[5];
3409
3410	l4kwqe->etype = ETH_P_IP;
3411	l4kwqe->ipid_start = DEF_IPID_START;
3412	l4kwqe->host_opaque = csk->l5_cid;
3413
3414	if (csk->vlan_id) {
3415		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3416		l4kwqe->vlan_tag = csk->vlan_id;
3417		l4kwqe->l2hdr_nbytes += 4;
3418	}
3419
3420	return dev->submit_kwqes(dev, wqes, 1);
3421}
3422
3423static int cnic_cm_update_pg(struct cnic_sock *csk)
3424{
3425	struct cnic_dev *dev = csk->dev;
3426	struct l4_kwq_update_pg *l4kwqe;
3427	struct kwqe *wqes[1];
3428
3429	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3430	memset(l4kwqe, 0, sizeof(*l4kwqe));
3431	wqes[0] = (struct kwqe *) l4kwqe;
3432
3433	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3434	l4kwqe->flags =
3435		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3436	l4kwqe->pg_cid = csk->pg_cid;
3437
3438	l4kwqe->da0 = csk->ha[0];
3439	l4kwqe->da1 = csk->ha[1];
3440	l4kwqe->da2 = csk->ha[2];
3441	l4kwqe->da3 = csk->ha[3];
3442	l4kwqe->da4 = csk->ha[4];
3443	l4kwqe->da5 = csk->ha[5];
3444
3445	l4kwqe->pg_host_opaque = csk->l5_cid;
3446	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3447
3448	return dev->submit_kwqes(dev, wqes, 1);
3449}
3450
3451static int cnic_cm_upload_pg(struct cnic_sock *csk)
3452{
3453	struct cnic_dev *dev = csk->dev;
3454	struct l4_kwq_upload *l4kwqe;
3455	struct kwqe *wqes[1];
3456
3457	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3458	memset(l4kwqe, 0, sizeof(*l4kwqe));
3459	wqes[0] = (struct kwqe *) l4kwqe;
3460
3461	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3462	l4kwqe->flags =
3463		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3464	l4kwqe->cid = csk->pg_cid;
3465
3466	return dev->submit_kwqes(dev, wqes, 1);
3467}
3468
3469static int cnic_cm_conn_req(struct cnic_sock *csk)
3470{
3471	struct cnic_dev *dev = csk->dev;
3472	struct l4_kwq_connect_req1 *l4kwqe1;
3473	struct l4_kwq_connect_req2 *l4kwqe2;
3474	struct l4_kwq_connect_req3 *l4kwqe3;
3475	struct kwqe *wqes[3];
3476	u8 tcp_flags = 0;
3477	int num_wqes = 2;
3478
3479	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3480	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3481	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3482	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3483	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3484	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3485
3486	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3487	l4kwqe3->flags =
3488		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3489	l4kwqe3->ka_timeout = csk->ka_timeout;
3490	l4kwqe3->ka_interval = csk->ka_interval;
3491	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3492	l4kwqe3->tos = csk->tos;
3493	l4kwqe3->ttl = csk->ttl;
3494	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3495	l4kwqe3->pmtu = csk->mtu;
3496	l4kwqe3->rcv_buf = csk->rcv_buf;
3497	l4kwqe3->snd_buf = csk->snd_buf;
3498	l4kwqe3->seed = csk->seed;
3499
3500	wqes[0] = (struct kwqe *) l4kwqe1;
3501	if (test_bit(SK_F_IPV6, &csk->flags)) {
3502		wqes[1] = (struct kwqe *) l4kwqe2;
3503		wqes[2] = (struct kwqe *) l4kwqe3;
3504		num_wqes = 3;
3505
3506		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3507		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3508		l4kwqe2->flags =
3509			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3510			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3511		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3512		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3513		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3514		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3515		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3516		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3517		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3518			       sizeof(struct tcphdr);
3519	} else {
3520		wqes[1] = (struct kwqe *) l4kwqe3;
3521		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3522			       sizeof(struct tcphdr);
3523	}
3524
3525	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3526	l4kwqe1->flags =
3527		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3528		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3529	l4kwqe1->cid = csk->cid;
3530	l4kwqe1->pg_cid = csk->pg_cid;
3531	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3532	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3533	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3534	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3535	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3536		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3537	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3538		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3539	if (csk->tcp_flags & SK_TCP_NAGLE)
3540		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3541	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3542		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3543	if (csk->tcp_flags & SK_TCP_SACK)
3544		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3545	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3546		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3547
3548	l4kwqe1->tcp_flags = tcp_flags;
3549
3550	return dev->submit_kwqes(dev, wqes, num_wqes);
3551}
3552
3553static int cnic_cm_close_req(struct cnic_sock *csk)
3554{
3555	struct cnic_dev *dev = csk->dev;
3556	struct l4_kwq_close_req *l4kwqe;
3557	struct kwqe *wqes[1];
3558
3559	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3560	memset(l4kwqe, 0, sizeof(*l4kwqe));
3561	wqes[0] = (struct kwqe *) l4kwqe;
3562
3563	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3564	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3565	l4kwqe->cid = csk->cid;
3566
3567	return dev->submit_kwqes(dev, wqes, 1);
3568}
3569
3570static int cnic_cm_abort_req(struct cnic_sock *csk)
3571{
3572	struct cnic_dev *dev = csk->dev;
3573	struct l4_kwq_reset_req *l4kwqe;
3574	struct kwqe *wqes[1];
3575
3576	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3577	memset(l4kwqe, 0, sizeof(*l4kwqe));
3578	wqes[0] = (struct kwqe *) l4kwqe;
3579
3580	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3581	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3582	l4kwqe->cid = csk->cid;
3583
3584	return dev->submit_kwqes(dev, wqes, 1);
3585}
3586
3587static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3588			  u32 l5_cid, struct cnic_sock **csk, void *context)
3589{
3590	struct cnic_local *cp = dev->cnic_priv;
3591	struct cnic_sock *csk1;
3592
3593	if (l5_cid >= MAX_CM_SK_TBL_SZ)
3594		return -EINVAL;
3595
3596	if (cp->ctx_tbl) {
3597		struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3598
3599		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3600			return -EAGAIN;
3601	}
3602
3603	csk1 = &cp->csk_tbl[l5_cid];
3604	if (atomic_read(&csk1->ref_count))
3605		return -EAGAIN;
3606
3607	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3608		return -EBUSY;
3609
3610	csk1->dev = dev;
3611	csk1->cid = cid;
3612	csk1->l5_cid = l5_cid;
3613	csk1->ulp_type = ulp_type;
3614	csk1->context = context;
3615
3616	csk1->ka_timeout = DEF_KA_TIMEOUT;
3617	csk1->ka_interval = DEF_KA_INTERVAL;
3618	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3619	csk1->tos = DEF_TOS;
3620	csk1->ttl = DEF_TTL;
3621	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3622	csk1->rcv_buf = DEF_RCV_BUF;
3623	csk1->snd_buf = DEF_SND_BUF;
3624	csk1->seed = DEF_SEED;
3625	csk1->tcp_flags = 0;
3626
3627	*csk = csk1;
3628	return 0;
3629}
3630
3631static void cnic_cm_cleanup(struct cnic_sock *csk)
3632{
3633	if (csk->src_port) {
3634		struct cnic_dev *dev = csk->dev;
3635		struct cnic_local *cp = dev->cnic_priv;
3636
3637		cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3638		csk->src_port = 0;
3639	}
3640}
3641
3642static void cnic_close_conn(struct cnic_sock *csk)
3643{
3644	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3645		cnic_cm_upload_pg(csk);
3646		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3647	}
3648	cnic_cm_cleanup(csk);
3649}
3650
3651static int cnic_cm_destroy(struct cnic_sock *csk)
3652{
3653	if (!cnic_in_use(csk))
3654		return -EINVAL;
3655
3656	csk_hold(csk);
3657	clear_bit(SK_F_INUSE, &csk->flags);
3658	smp_mb__after_atomic();
3659	while (atomic_read(&csk->ref_count) != 1)
3660		msleep(1);
3661	cnic_cm_cleanup(csk);
3662
3663	csk->flags = 0;
3664	csk_put(csk);
3665	return 0;
3666}
3667
3668static inline u16 cnic_get_vlan(struct net_device *dev,
3669				struct net_device **vlan_dev)
3670{
3671	if (is_vlan_dev(dev)) {
3672		*vlan_dev = vlan_dev_real_dev(dev);
3673		return vlan_dev_vlan_id(dev);
3674	}
3675	*vlan_dev = dev;
3676	return 0;
3677}
3678
3679static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3680			     struct dst_entry **dst)
3681{
3682#if defined(CONFIG_INET)
3683	struct rtable *rt;
3684
3685	rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3686	if (!IS_ERR(rt)) {
3687		*dst = &rt->dst;
3688		return 0;
3689	}
3690	return PTR_ERR(rt);
3691#else
3692	return -ENETUNREACH;
3693#endif
3694}
3695
3696static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3697			     struct dst_entry **dst)
3698{
3699#if IS_ENABLED(CONFIG_IPV6)
3700	struct flowi6 fl6;
3701
3702	memset(&fl6, 0, sizeof(fl6));
3703	fl6.daddr = dst_addr->sin6_addr;
3704	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3705		fl6.flowi6_oif = dst_addr->sin6_scope_id;
3706
3707	*dst = ip6_route_output(&init_net, NULL, &fl6);
3708	if ((*dst)->error) {
3709		dst_release(*dst);
3710		*dst = NULL;
3711		return -ENETUNREACH;
3712	} else
3713		return 0;
3714#endif
3715
3716	return -ENETUNREACH;
3717}
3718
3719static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3720					   int ulp_type)
3721{
3722	struct cnic_dev *dev = NULL;
3723	struct dst_entry *dst;
3724	struct net_device *netdev = NULL;
3725	int err = -ENETUNREACH;
3726
3727	if (dst_addr->sin_family == AF_INET)
3728		err = cnic_get_v4_route(dst_addr, &dst);
3729	else if (dst_addr->sin_family == AF_INET6) {
3730		struct sockaddr_in6 *dst_addr6 =
3731			(struct sockaddr_in6 *) dst_addr;
3732
3733		err = cnic_get_v6_route(dst_addr6, &dst);
3734	} else
3735		return NULL;
3736
3737	if (err)
3738		return NULL;
3739
3740	if (!dst->dev)
3741		goto done;
3742
3743	cnic_get_vlan(dst->dev, &netdev);
3744
3745	dev = cnic_from_netdev(netdev);
3746
3747done:
3748	dst_release(dst);
3749	if (dev)
3750		cnic_put(dev);
3751	return dev;
3752}
3753
3754static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3755{
3756	struct cnic_dev *dev = csk->dev;
3757	struct cnic_local *cp = dev->cnic_priv;
3758
3759	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3760}
3761
3762static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3763{
3764	struct cnic_dev *dev = csk->dev;
3765	struct cnic_local *cp = dev->cnic_priv;
3766	int is_v6, rc = 0;
3767	struct dst_entry *dst = NULL;
3768	struct net_device *realdev;
3769	__be16 local_port;
3770	u32 port_id;
3771
3772	if (saddr->local.v6.sin6_family == AF_INET6 &&
3773	    saddr->remote.v6.sin6_family == AF_INET6)
3774		is_v6 = 1;
3775	else if (saddr->local.v4.sin_family == AF_INET &&
3776		 saddr->remote.v4.sin_family == AF_INET)
3777		is_v6 = 0;
3778	else
3779		return -EINVAL;
3780
3781	clear_bit(SK_F_IPV6, &csk->flags);
3782
3783	if (is_v6) {
3784		set_bit(SK_F_IPV6, &csk->flags);
3785		cnic_get_v6_route(&saddr->remote.v6, &dst);
3786
3787		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3788		       sizeof(struct in6_addr));
3789		csk->dst_port = saddr->remote.v6.sin6_port;
3790		local_port = saddr->local.v6.sin6_port;
3791
3792	} else {
3793		cnic_get_v4_route(&saddr->remote.v4, &dst);
3794
3795		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3796		csk->dst_port = saddr->remote.v4.sin_port;
3797		local_port = saddr->local.v4.sin_port;
3798	}
3799
3800	csk->vlan_id = 0;
3801	csk->mtu = dev->netdev->mtu;
3802	if (dst && dst->dev) {
3803		u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3804		if (realdev == dev->netdev) {
3805			csk->vlan_id = vlan;
3806			csk->mtu = dst_mtu(dst);
3807		}
3808	}
3809
3810	port_id = be16_to_cpu(local_port);
3811	if (port_id >= CNIC_LOCAL_PORT_MIN &&
3812	    port_id < CNIC_LOCAL_PORT_MAX) {
3813		if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3814			port_id = 0;
3815	} else
3816		port_id = 0;
3817
3818	if (!port_id) {
3819		port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3820		if (port_id == -1) {
3821			rc = -ENOMEM;
3822			goto err_out;
3823		}
3824		local_port = cpu_to_be16(port_id);
3825	}
3826	csk->src_port = local_port;
3827
3828err_out:
3829	dst_release(dst);
3830	return rc;
3831}
3832
3833static void cnic_init_csk_state(struct cnic_sock *csk)
3834{
3835	csk->state = 0;
3836	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3837	clear_bit(SK_F_CLOSING, &csk->flags);
3838}
3839
3840static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3841{
3842	struct cnic_local *cp = csk->dev->cnic_priv;
3843	int err = 0;
3844
3845	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3846		return -EOPNOTSUPP;
3847
3848	if (!cnic_in_use(csk))
3849		return -EINVAL;
3850
3851	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3852		return -EINVAL;
3853
3854	cnic_init_csk_state(csk);
3855
3856	err = cnic_get_route(csk, saddr);
3857	if (err)
3858		goto err_out;
3859
3860	err = cnic_resolve_addr(csk, saddr);
3861	if (!err)
3862		return 0;
3863
3864err_out:
3865	clear_bit(SK_F_CONNECT_START, &csk->flags);
3866	return err;
3867}
3868
3869static int cnic_cm_abort(struct cnic_sock *csk)
3870{
3871	struct cnic_local *cp = csk->dev->cnic_priv;
3872	u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3873
3874	if (!cnic_in_use(csk))
3875		return -EINVAL;
3876
3877	if (cnic_abort_prep(csk))
3878		return cnic_cm_abort_req(csk);
3879
3880	/* Getting here means that we haven't started connect, or
3881	 * connect was not successful, or it has been reset by the target.
3882	 */
3883
3884	cp->close_conn(csk, opcode);
3885	if (csk->state != opcode) {
3886		/* Wait for remote reset sequence to complete */
3887		while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3888			msleep(1);
3889
3890		return -EALREADY;
3891	}
3892
3893	return 0;
3894}
3895
3896static int cnic_cm_close(struct cnic_sock *csk)
3897{
3898	if (!cnic_in_use(csk))
3899		return -EINVAL;
3900
3901	if (cnic_close_prep(csk)) {
3902		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3903		return cnic_cm_close_req(csk);
3904	} else {
3905		/* Wait for remote reset sequence to complete */
3906		while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3907			msleep(1);
3908
3909		return -EALREADY;
3910	}
3911	return 0;
3912}
3913
3914static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3915			   u8 opcode)
3916{
3917	struct cnic_ulp_ops *ulp_ops;
3918	int ulp_type = csk->ulp_type;
3919
3920	rcu_read_lock();
3921	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3922	if (ulp_ops) {
3923		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3924			ulp_ops->cm_connect_complete(csk);
3925		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3926			ulp_ops->cm_close_complete(csk);
3927		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3928			ulp_ops->cm_remote_abort(csk);
3929		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3930			ulp_ops->cm_abort_complete(csk);
3931		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3932			ulp_ops->cm_remote_close(csk);
3933	}
3934	rcu_read_unlock();
3935}
3936
3937static int cnic_cm_set_pg(struct cnic_sock *csk)
3938{
3939	if (cnic_offld_prep(csk)) {
3940		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3941			cnic_cm_update_pg(csk);
3942		else
3943			cnic_cm_offload_pg(csk);
3944	}
3945	return 0;
3946}
3947
3948static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3949{
3950	struct cnic_local *cp = dev->cnic_priv;
3951	u32 l5_cid = kcqe->pg_host_opaque;
3952	u8 opcode = kcqe->op_code;
3953	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3954
3955	csk_hold(csk);
3956	if (!cnic_in_use(csk))
3957		goto done;
3958
3959	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3960		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3961		goto done;
3962	}
3963	/* Possible PG kcqe status:  SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3964	if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3965		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3966		cnic_cm_upcall(cp, csk,
3967			       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3968		goto done;
3969	}
3970
3971	csk->pg_cid = kcqe->pg_cid;
3972	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3973	cnic_cm_conn_req(csk);
3974
3975done:
3976	csk_put(csk);
3977}
3978
3979static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3980{
3981	struct cnic_local *cp = dev->cnic_priv;
3982	struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3983	u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3984	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3985
3986	ctx->timestamp = jiffies;
3987	ctx->wait_cond = 1;
3988	wake_up(&ctx->waitq);
3989}
3990
3991static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3992{
3993	struct cnic_local *cp = dev->cnic_priv;
3994	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3995	u8 opcode = l4kcqe->op_code;
3996	u32 l5_cid;
3997	struct cnic_sock *csk;
3998
3999	if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
4000		cnic_process_fcoe_term_conn(dev, kcqe);
4001		return;
4002	}
4003	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
4004	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
4005		cnic_cm_process_offld_pg(dev, l4kcqe);
4006		return;
4007	}
4008
4009	l5_cid = l4kcqe->conn_id;
4010	if (opcode & 0x80)
4011		l5_cid = l4kcqe->cid;
4012	if (l5_cid >= MAX_CM_SK_TBL_SZ)
4013		return;
4014
4015	csk = &cp->csk_tbl[l5_cid];
4016	csk_hold(csk);
4017
4018	if (!cnic_in_use(csk)) {
4019		csk_put(csk);
4020		return;
4021	}
4022
4023	switch (opcode) {
4024	case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4025		if (l4kcqe->status != 0) {
4026			clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4027			cnic_cm_upcall(cp, csk,
4028				       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4029		}
4030		break;
4031	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4032		if (l4kcqe->status == 0)
4033			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
4034		else if (l4kcqe->status ==
4035			 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4036			set_bit(SK_F_HW_ERR, &csk->flags);
4037
4038		smp_mb__before_atomic();
4039		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4040		cnic_cm_upcall(cp, csk, opcode);
4041		break;
4042
4043	case L5CM_RAMROD_CMD_ID_CLOSE: {
4044		struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4045
4046		if (l4kcqe->status == 0 && l5kcqe->completion_status == 0)
4047			break;
4048
4049		netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4050			    l4kcqe->status, l5kcqe->completion_status);
4051		opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4052	}
4053		fallthrough;
4054	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4055	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4056	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4057	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4058	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4059		if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4060			set_bit(SK_F_HW_ERR, &csk->flags);
4061
4062		cp->close_conn(csk, opcode);
4063		break;
4064
4065	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
4066		/* after we already sent CLOSE_REQ */
4067		if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4068		    !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4069		    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4070			cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4071		else
4072			cnic_cm_upcall(cp, csk, opcode);
4073		break;
4074	}
4075	csk_put(csk);
4076}
4077
4078static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4079{
4080	struct cnic_dev *dev = data;
4081	int i;
4082
4083	for (i = 0; i < num; i++)
4084		cnic_cm_process_kcqe(dev, kcqe[i]);
4085}
4086
4087static struct cnic_ulp_ops cm_ulp_ops = {
4088	.indicate_kcqes		= cnic_cm_indicate_kcqe,
4089};
4090
4091static void cnic_cm_free_mem(struct cnic_dev *dev)
4092{
4093	struct cnic_local *cp = dev->cnic_priv;
4094
4095	kvfree(cp->csk_tbl);
4096	cp->csk_tbl = NULL;
4097	cnic_free_id_tbl(&cp->csk_port_tbl);
4098}
4099
4100static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4101{
4102	struct cnic_local *cp = dev->cnic_priv;
4103	u32 port_id;
4104	int i;
4105
4106	cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
4107			       GFP_KERNEL);
4108	if (!cp->csk_tbl)
4109		return -ENOMEM;
4110
4111	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
4112		atomic_set(&cp->csk_tbl[i].ref_count, 0);
4113
4114	port_id = get_random_u32_below(CNIC_LOCAL_PORT_RANGE);
4115	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
4116			     CNIC_LOCAL_PORT_MIN, port_id)) {
4117		cnic_cm_free_mem(dev);
4118		return -ENOMEM;
4119	}
4120	return 0;
4121}
4122
4123static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4124{
4125	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4126		/* Unsolicited RESET_COMP or RESET_RECEIVED */
4127		opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4128		csk->state = opcode;
4129	}
4130
4131	/* 1. If event opcode matches the expected event in csk->state
4132	 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4133	 *    event
4134	 * 3. If the expected event is 0, meaning the connection was never
4135	 *    never established, we accept the opcode from cm_abort.
4136	 */
4137	if (opcode == csk->state || csk->state == 0 ||
4138	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4139	    csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4140		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4141			if (csk->state == 0)
4142				csk->state = opcode;
4143			return 1;
4144		}
4145	}
4146	return 0;
4147}
4148
4149static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4150{
4151	struct cnic_dev *dev = csk->dev;
4152	struct cnic_local *cp = dev->cnic_priv;
4153
4154	if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4155		cnic_cm_upcall(cp, csk, opcode);
4156		return;
4157	}
4158
4159	clear_bit(SK_F_CONNECT_START, &csk->flags);
4160	cnic_close_conn(csk);
4161	csk->state = opcode;
4162	cnic_cm_upcall(cp, csk, opcode);
4163}
4164
4165static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4166{
4167}
4168
4169static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4170{
4171	u32 seed;
4172
4173	seed = get_random_u32();
4174	cnic_ctx_wr(dev, 45, 0, seed);
4175	return 0;
4176}
4177
4178static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4179{
4180	struct cnic_dev *dev = csk->dev;
4181	struct cnic_local *cp = dev->cnic_priv;
4182	struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4183	union l5cm_specific_data l5_data;
4184	u32 cmd = 0;
4185	int close_complete = 0;
4186
4187	switch (opcode) {
4188	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4189	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4190	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4191		if (cnic_ready_to_close(csk, opcode)) {
4192			if (test_bit(SK_F_HW_ERR, &csk->flags))
4193				close_complete = 1;
4194			else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4195				cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4196			else
4197				close_complete = 1;
4198		}
4199		break;
4200	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4201		cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4202		break;
4203	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4204		close_complete = 1;
4205		break;
4206	}
4207	if (cmd) {
4208		memset(&l5_data, 0, sizeof(l5_data));
4209
4210		cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4211				    &l5_data);
4212	} else if (close_complete) {
4213		ctx->timestamp = jiffies;
4214		cnic_close_conn(csk);
4215		cnic_cm_upcall(cp, csk, csk->state);
4216	}
4217}
4218
4219static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4220{
4221	struct cnic_local *cp = dev->cnic_priv;
4222
4223	if (!cp->ctx_tbl)
4224		return;
4225
4226	if (!netif_running(dev->netdev))
4227		return;
4228
4229	cnic_bnx2x_delete_wait(dev, 0);
4230
4231	cancel_delayed_work(&cp->delete_task);
4232	flush_workqueue(cnic_wq);
4233
4234	if (atomic_read(&cp->iscsi_conn) != 0)
4235		netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4236			    atomic_read(&cp->iscsi_conn));
4237}
4238
4239static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4240{
4241	struct bnx2x *bp = netdev_priv(dev->netdev);
4242	u32 pfid = bp->pfid;
4243	u32 port = BP_PORT(bp);
4244
4245	cnic_init_bnx2x_mac(dev);
4246	cnic_bnx2x_set_tcp_options(dev, 0, 1);
4247
4248	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4249		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4250
4251	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4252		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4253	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4254		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4255		DEF_MAX_DA_COUNT);
4256
4257	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4258		 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4259	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4260		 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4261	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4262		 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4263	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4264		XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4265
4266	CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4267		DEF_MAX_CWND);
4268	return 0;
4269}
4270
4271static void cnic_delete_task(struct work_struct *work)
4272{
4273	struct cnic_local *cp;
4274	struct cnic_dev *dev;
4275	u32 i;
4276	int need_resched = 0;
4277
4278	cp = container_of(work, struct cnic_local, delete_task.work);
4279	dev = cp->dev;
4280
4281	if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4282		struct drv_ctl_info info;
4283
4284		cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4285
4286		memset(&info, 0, sizeof(struct drv_ctl_info));
4287		info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4288		cp->ethdev->drv_ctl(dev->netdev, &info);
4289	}
4290
4291	for (i = 0; i < cp->max_cid_space; i++) {
4292		struct cnic_context *ctx = &cp->ctx_tbl[i];
4293		int err;
4294
4295		if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4296		    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4297			continue;
4298
4299		if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4300			need_resched = 1;
4301			continue;
4302		}
4303
4304		if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4305			continue;
4306
4307		err = cnic_bnx2x_destroy_ramrod(dev, i);
4308
4309		cnic_free_bnx2x_conn_resc(dev, i);
4310		if (!err) {
4311			if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4312				atomic_dec(&cp->iscsi_conn);
4313
4314			clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4315		}
4316	}
4317
4318	if (need_resched)
4319		queue_delayed_work(cnic_wq, &cp->delete_task,
4320				   msecs_to_jiffies(10));
4321
4322}
4323
4324static int cnic_cm_open(struct cnic_dev *dev)
4325{
4326	struct cnic_local *cp = dev->cnic_priv;
4327	int err;
4328
4329	err = cnic_cm_alloc_mem(dev);
4330	if (err)
4331		return err;
4332
4333	err = cp->start_cm(dev);
4334
4335	if (err)
4336		goto err_out;
4337
4338	INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4339
4340	dev->cm_create = cnic_cm_create;
4341	dev->cm_destroy = cnic_cm_destroy;
4342	dev->cm_connect = cnic_cm_connect;
4343	dev->cm_abort = cnic_cm_abort;
4344	dev->cm_close = cnic_cm_close;
4345	dev->cm_select_dev = cnic_cm_select_dev;
4346
4347	cp->ulp_handle[CNIC_ULP_L4] = dev;
4348	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4349	return 0;
4350
4351err_out:
4352	cnic_cm_free_mem(dev);
4353	return err;
4354}
4355
4356static int cnic_cm_shutdown(struct cnic_dev *dev)
4357{
4358	struct cnic_local *cp = dev->cnic_priv;
4359	int i;
4360
4361	if (!cp->csk_tbl)
4362		return 0;
4363
4364	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4365		struct cnic_sock *csk = &cp->csk_tbl[i];
4366
4367		clear_bit(SK_F_INUSE, &csk->flags);
4368		cnic_cm_cleanup(csk);
4369	}
4370	cnic_cm_free_mem(dev);
4371
4372	return 0;
4373}
4374
4375static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4376{
4377	u32 cid_addr;
4378	int i;
4379
4380	cid_addr = GET_CID_ADDR(cid);
4381
4382	for (i = 0; i < CTX_SIZE; i += 4)
4383		cnic_ctx_wr(dev, cid_addr, i, 0);
4384}
4385
4386static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4387{
4388	struct cnic_local *cp = dev->cnic_priv;
4389	int ret = 0, i;
4390	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4391
4392	if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4393		return 0;
4394
4395	for (i = 0; i < cp->ctx_blks; i++) {
4396		int j;
4397		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4398		u32 val;
4399
4400		memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
4401
4402		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4403			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4404		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4405			(u64) cp->ctx_arr[i].mapping >> 32);
4406		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4407			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4408		for (j = 0; j < 10; j++) {
4409
4410			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4411			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4412				break;
4413			udelay(5);
4414		}
4415		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4416			ret = -EBUSY;
4417			break;
4418		}
4419	}
4420	return ret;
4421}
4422
4423static void cnic_free_irq(struct cnic_dev *dev)
4424{
4425	struct cnic_local *cp = dev->cnic_priv;
4426	struct cnic_eth_dev *ethdev = cp->ethdev;
4427
4428	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4429		cp->disable_int_sync(dev);
4430		tasklet_kill(&cp->cnic_irq_task);
4431		free_irq(ethdev->irq_arr[0].vector, dev);
4432	}
4433}
4434
4435static int cnic_request_irq(struct cnic_dev *dev)
4436{
4437	struct cnic_local *cp = dev->cnic_priv;
4438	struct cnic_eth_dev *ethdev = cp->ethdev;
4439	int err;
4440
4441	err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4442	if (err)
4443		tasklet_disable(&cp->cnic_irq_task);
4444
4445	return err;
4446}
4447
4448static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4449{
4450	struct cnic_local *cp = dev->cnic_priv;
4451	struct cnic_eth_dev *ethdev = cp->ethdev;
4452
4453	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4454		int err, i = 0;
4455		int sblk_num = cp->status_blk_num;
4456		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4457			   BNX2_HC_SB_CONFIG_1;
4458
4459		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4460
4461		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4462		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4463		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4464
4465		cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4466		tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
4467		err = cnic_request_irq(dev);
4468		if (err)
4469			return err;
4470
4471		while (cp->status_blk.bnx2->status_completion_producer_index &&
4472		       i < 10) {
4473			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4474				1 << (11 + sblk_num));
4475			udelay(10);
4476			i++;
4477			barrier();
4478		}
4479		if (cp->status_blk.bnx2->status_completion_producer_index) {
4480			cnic_free_irq(dev);
4481			goto failed;
4482		}
4483
4484	} else {
4485		struct status_block *sblk = cp->status_blk.gen;
4486		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4487		int i = 0;
4488
4489		while (sblk->status_completion_producer_index && i < 10) {
4490			CNIC_WR(dev, BNX2_HC_COMMAND,
4491				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4492			udelay(10);
4493			i++;
4494			barrier();
4495		}
4496		if (sblk->status_completion_producer_index)
4497			goto failed;
4498
4499	}
4500	return 0;
4501
4502failed:
4503	netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4504	return -EBUSY;
4505}
4506
4507static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4508{
4509	struct cnic_local *cp = dev->cnic_priv;
4510	struct cnic_eth_dev *ethdev = cp->ethdev;
4511
4512	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4513		return;
4514
4515	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4516		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4517}
4518
4519static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4520{
4521	struct cnic_local *cp = dev->cnic_priv;
4522	struct cnic_eth_dev *ethdev = cp->ethdev;
4523
4524	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4525		return;
4526
4527	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4528		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4529	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4530	synchronize_irq(ethdev->irq_arr[0].vector);
4531}
4532
4533static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4534{
4535	struct cnic_local *cp = dev->cnic_priv;
4536	struct cnic_eth_dev *ethdev = cp->ethdev;
4537	struct cnic_uio_dev *udev = cp->udev;
4538	u32 cid_addr, tx_cid, sb_id;
4539	u32 val, offset0, offset1, offset2, offset3;
4540	int i;
4541	struct bnx2_tx_bd *txbd;
4542	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4543	struct status_block *s_blk = cp->status_blk.gen;
4544
4545	sb_id = cp->status_blk_num;
4546	tx_cid = 20;
4547	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4548	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4549		struct status_block_msix *sblk = cp->status_blk.bnx2;
4550
4551		tx_cid = TX_TSS_CID + sb_id - 1;
4552		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4553			(TX_TSS_CID << 7));
4554		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4555	}
4556	cp->tx_cons = *cp->tx_cons_ptr;
4557
4558	cid_addr = GET_CID_ADDR(tx_cid);
4559	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
4560		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4561
4562		for (i = 0; i < PHY_CTX_SIZE; i += 4)
4563			cnic_ctx_wr(dev, cid_addr2, i, 0);
4564
4565		offset0 = BNX2_L2CTX_TYPE_XI;
4566		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4567		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4568		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4569	} else {
4570		cnic_init_context(dev, tx_cid);
4571		cnic_init_context(dev, tx_cid + 1);
4572
4573		offset0 = BNX2_L2CTX_TYPE;
4574		offset1 = BNX2_L2CTX_CMD_TYPE;
4575		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4576		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4577	}
4578	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4579	cnic_ctx_wr(dev, cid_addr, offset0, val);
4580
4581	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4582	cnic_ctx_wr(dev, cid_addr, offset1, val);
4583
4584	txbd = udev->l2_ring;
4585
4586	buf_map = udev->l2_buf_map;
4587	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
4588		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4589		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4590	}
4591	val = (u64) ring_map >> 32;
4592	cnic_ctx_wr(dev, cid_addr, offset2, val);
4593	txbd->tx_bd_haddr_hi = val;
4594
4595	val = (u64) ring_map & 0xffffffff;
4596	cnic_ctx_wr(dev, cid_addr, offset3, val);
4597	txbd->tx_bd_haddr_lo = val;
4598}
4599
4600static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4601{
4602	struct cnic_local *cp = dev->cnic_priv;
4603	struct cnic_eth_dev *ethdev = cp->ethdev;
4604	struct cnic_uio_dev *udev = cp->udev;
4605	u32 cid_addr, sb_id, val, coal_reg, coal_val;
4606	int i;
4607	struct bnx2_rx_bd *rxbd;
4608	struct status_block *s_blk = cp->status_blk.gen;
4609	dma_addr_t ring_map = udev->l2_ring_map;
4610
4611	sb_id = cp->status_blk_num;
4612	cnic_init_context(dev, 2);
4613	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4614	coal_reg = BNX2_HC_COMMAND;
4615	coal_val = CNIC_RD(dev, coal_reg);
4616	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4617		struct status_block_msix *sblk = cp->status_blk.bnx2;
4618
4619		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4620		coal_reg = BNX2_HC_COALESCE_NOW;
4621		coal_val = 1 << (11 + sb_id);
4622	}
4623	i = 0;
4624	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4625		CNIC_WR(dev, coal_reg, coal_val);
4626		udelay(10);
4627		i++;
4628		barrier();
4629	}
4630	cp->rx_cons = *cp->rx_cons_ptr;
4631
4632	cid_addr = GET_CID_ADDR(2);
4633	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4634	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4635	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4636
4637	if (sb_id == 0)
4638		val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4639	else
4640		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4641	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4642
4643	rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
4644	for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
4645		dma_addr_t buf_map;
4646		int n = (i % cp->l2_rx_ring_size) + 1;
4647
4648		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4649		rxbd->rx_bd_len = cp->l2_single_buf_size;
4650		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4651		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4652		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4653	}
4654	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
4655	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4656	rxbd->rx_bd_haddr_hi = val;
4657
4658	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
4659	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4660	rxbd->rx_bd_haddr_lo = val;
4661
4662	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4663	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4664}
4665
4666static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4667{
4668	struct kwqe *wqes[1], l2kwqe;
4669
4670	memset(&l2kwqe, 0, sizeof(l2kwqe));
4671	wqes[0] = &l2kwqe;
4672	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4673			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
4674			       KWQE_OPCODE_SHIFT) | 2;
4675	dev->submit_kwqes(dev, wqes, 1);
4676}
4677
4678static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4679{
4680	struct cnic_local *cp = dev->cnic_priv;
4681	u32 val;
4682
4683	val = cp->func << 2;
4684
4685	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4686
4687	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4688			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4689	dev->mac_addr[0] = (u8) (val >> 8);
4690	dev->mac_addr[1] = (u8) val;
4691
4692	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4693
4694	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4695			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4696	dev->mac_addr[2] = (u8) (val >> 24);
4697	dev->mac_addr[3] = (u8) (val >> 16);
4698	dev->mac_addr[4] = (u8) (val >> 8);
4699	dev->mac_addr[5] = (u8) val;
4700
4701	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4702
4703	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4704	if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4705		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4706
4707	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4708	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4709	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4710}
4711
4712static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4713{
4714	struct cnic_local *cp = dev->cnic_priv;
4715	struct cnic_eth_dev *ethdev = cp->ethdev;
4716	struct status_block *sblk = cp->status_blk.gen;
4717	u32 val, kcq_cid_addr, kwq_cid_addr;
4718	int err;
4719
4720	cnic_set_bnx2_mac(dev);
4721
4722	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4723	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4724	if (CNIC_PAGE_BITS > 12)
4725		val |= (12 - 8)  << 4;
4726	else
4727		val |= (CNIC_PAGE_BITS - 8)  << 4;
4728
4729	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4730
4731	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4732	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4733	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4734
4735	err = cnic_setup_5709_context(dev, 1);
4736	if (err)
4737		return err;
4738
4739	cnic_init_context(dev, KWQ_CID);
4740	cnic_init_context(dev, KCQ_CID);
4741
4742	kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4743	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4744
4745	cp->max_kwq_idx = MAX_KWQ_IDX;
4746	cp->kwq_prod_idx = 0;
4747	cp->kwq_con_idx = 0;
4748	set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4749
4750	if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
4751		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4752	else
4753		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4754
4755	/* Initialize the kernel work queue context. */
4756	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4757	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4758	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4759
4760	val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4761	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4762
4763	val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4764	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4765
4766	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4767	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4768
4769	val = (u32) cp->kwq_info.pgtbl_map;
4770	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4771
4772	kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4773	cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4774
4775	cp->kcq1.sw_prod_idx = 0;
4776	cp->kcq1.hw_prod_idx_ptr =
4777		&sblk->status_completion_producer_index;
4778
4779	cp->kcq1.status_idx_ptr = &sblk->status_idx;
4780
4781	/* Initialize the kernel complete queue context. */
4782	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4783	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4784	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4785
4786	val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4787	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4788
4789	val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4790	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4791
4792	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4793	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4794
4795	val = (u32) cp->kcq1.dma.pgtbl_map;
4796	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4797
4798	cp->int_num = 0;
4799	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4800		struct status_block_msix *msblk = cp->status_blk.bnx2;
4801		u32 sb_id = cp->status_blk_num;
4802		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4803
4804		cp->kcq1.hw_prod_idx_ptr =
4805			&msblk->status_completion_producer_index;
4806		cp->kcq1.status_idx_ptr = &msblk->status_idx;
4807		cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4808		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4809		cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4810		cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4811	}
4812
4813	/* Enable Commnad Scheduler notification when we write to the
4814	 * host producer index of the kernel contexts. */
4815	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4816
4817	/* Enable Command Scheduler notification when we write to either
4818	 * the Send Queue or Receive Queue producer indexes of the kernel
4819	 * bypass contexts. */
4820	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4821	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4822
4823	/* Notify COM when the driver post an application buffer. */
4824	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4825
4826	/* Set the CP and COM doorbells.  These two processors polls the
4827	 * doorbell for a non zero value before running.  This must be done
4828	 * after setting up the kernel queue contexts. */
4829	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4830	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4831
4832	cnic_init_bnx2_tx_ring(dev);
4833	cnic_init_bnx2_rx_ring(dev);
4834
4835	err = cnic_init_bnx2_irq(dev);
4836	if (err) {
4837		netdev_err(dev->netdev, "cnic_init_irq failed\n");
4838		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4839		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4840		return err;
4841	}
4842
4843	ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
4844
4845	return 0;
4846}
4847
4848static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4849{
4850	struct cnic_local *cp = dev->cnic_priv;
4851	struct cnic_eth_dev *ethdev = cp->ethdev;
4852	u32 start_offset = ethdev->ctx_tbl_offset;
4853	int i;
4854
4855	for (i = 0; i < cp->ctx_blks; i++) {
4856		struct cnic_ctx *ctx = &cp->ctx_arr[i];
4857		dma_addr_t map = ctx->mapping;
4858
4859		if (cp->ctx_align) {
4860			unsigned long mask = cp->ctx_align - 1;
4861
4862			map = (map + mask) & ~mask;
4863		}
4864
4865		cnic_ctx_tbl_wr(dev, start_offset + i, map);
4866	}
4867}
4868
4869static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4870{
4871	struct cnic_local *cp = dev->cnic_priv;
4872	struct cnic_eth_dev *ethdev = cp->ethdev;
4873	int err = 0;
4874
4875	tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
4876	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4877		err = cnic_request_irq(dev);
4878
4879	return err;
4880}
4881
4882static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4883						u16 sb_id, u8 sb_index,
4884						u8 disable)
4885{
4886	struct bnx2x *bp = netdev_priv(dev->netdev);
4887
4888	u32 addr = BAR_CSTRORM_INTMEM +
4889			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4890			offsetof(struct hc_status_block_data_e1x, index_data) +
4891			sizeof(struct hc_index_data)*sb_index +
4892			offsetof(struct hc_index_data, flags);
4893	u16 flags = CNIC_RD16(dev, addr);
4894	/* clear and set */
4895	flags &= ~HC_INDEX_DATA_HC_ENABLED;
4896	flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4897		  HC_INDEX_DATA_HC_ENABLED);
4898	CNIC_WR16(dev, addr, flags);
4899}
4900
4901static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4902{
4903	struct cnic_local *cp = dev->cnic_priv;
4904	struct bnx2x *bp = netdev_priv(dev->netdev);
4905	u8 sb_id = cp->status_blk_num;
4906
4907	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4908			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4909			offsetof(struct hc_status_block_data_e1x, index_data) +
4910			sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4911			offsetof(struct hc_index_data, timeout), 64 / 4);
4912	cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4913}
4914
4915static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4916{
4917}
4918
4919static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4920				    struct client_init_ramrod_data *data)
4921{
4922	struct cnic_local *cp = dev->cnic_priv;
4923	struct bnx2x *bp = netdev_priv(dev->netdev);
4924	struct cnic_uio_dev *udev = cp->udev;
4925	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4926	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4927	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4928	int i;
4929	u32 cli = cp->ethdev->iscsi_l2_client_id;
4930	u32 val;
4931
4932	memset(txbd, 0, CNIC_PAGE_SIZE);
4933
4934	buf_map = udev->l2_buf_map;
4935	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4936		struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4937		struct eth_tx_parse_bd_e1x *pbd_e1x =
4938			&((txbd + 1)->parse_bd_e1x);
4939		struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
4940		struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4941
4942		start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4943		start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4944		reg_bd->addr_hi = start_bd->addr_hi;
4945		reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4946		start_bd->nbytes = cpu_to_le16(0x10);
4947		start_bd->nbd = cpu_to_le16(3);
4948		start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4949		start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
4950		start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4951
4952		if (BNX2X_CHIP_IS_E2_PLUS(bp))
4953			pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4954				ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4955		else
4956			pbd_e1x->global_data = (UNICAST_ADDRESS <<
4957				ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
4958	}
4959
4960	val = (u64) ring_map >> 32;
4961	txbd->next_bd.addr_hi = cpu_to_le32(val);
4962
4963	data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4964
4965	val = (u64) ring_map & 0xffffffff;
4966	txbd->next_bd.addr_lo = cpu_to_le32(val);
4967
4968	data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4969
4970	/* Other ramrod params */
4971	data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4972	data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4973
4974	/* reset xstorm per client statistics */
4975	if (cli < MAX_STAT_COUNTER_ID) {
4976		data->general.statistics_zero_flg = 1;
4977		data->general.statistics_en_flg = 1;
4978		data->general.statistics_counter_id = cli;
4979	}
4980
4981	cp->tx_cons_ptr =
4982		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4983}
4984
4985static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4986				    struct client_init_ramrod_data *data)
4987{
4988	struct cnic_local *cp = dev->cnic_priv;
4989	struct bnx2x *bp = netdev_priv(dev->netdev);
4990	struct cnic_uio_dev *udev = cp->udev;
4991	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4992				CNIC_PAGE_SIZE);
4993	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4994				(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
4995	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4996	int i;
4997	u32 cli = cp->ethdev->iscsi_l2_client_id;
4998	int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
4999	u32 val;
5000	dma_addr_t ring_map = udev->l2_ring_map;
5001
5002	/* General data */
5003	data->general.client_id = cli;
5004	data->general.activate_flg = 1;
5005	data->general.sp_client_id = cli;
5006	data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
5007	data->general.func_id = bp->pfid;
5008
5009	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
5010		dma_addr_t buf_map;
5011		int n = (i % cp->l2_rx_ring_size) + 1;
5012
5013		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
5014		rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
5015		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5016	}
5017
5018	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
5019	rxbd->addr_hi = cpu_to_le32(val);
5020	data->rx.bd_page_base.hi = cpu_to_le32(val);
5021
5022	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
5023	rxbd->addr_lo = cpu_to_le32(val);
5024	data->rx.bd_page_base.lo = cpu_to_le32(val);
5025
5026	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
5027	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
5028	rxcqe->addr_hi = cpu_to_le32(val);
5029	data->rx.cqe_page_base.hi = cpu_to_le32(val);
5030
5031	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
5032	rxcqe->addr_lo = cpu_to_le32(val);
5033	data->rx.cqe_page_base.lo = cpu_to_le32(val);
5034
5035	/* Other ramrod params */
5036	data->rx.client_qzone_id = cl_qzone_id;
5037	data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5038	data->rx.status_block_id = BNX2X_DEF_SB_ID;
5039
5040	data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
5041
5042	data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
5043	data->rx.outer_vlan_removal_enable_flg = 1;
5044	data->rx.silent_vlan_removal_flg = 1;
5045	data->rx.silent_vlan_value = 0;
5046	data->rx.silent_vlan_mask = 0xffff;
5047
5048	cp->rx_cons_ptr =
5049		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
5050	cp->rx_cons = *cp->rx_cons_ptr;
5051}
5052
5053static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5054{
5055	struct cnic_local *cp = dev->cnic_priv;
5056	struct bnx2x *bp = netdev_priv(dev->netdev);
5057	u32 pfid = bp->pfid;
5058
5059	cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5060			   CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5061	cp->kcq1.sw_prod_idx = 0;
5062
5063	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5064		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5065
5066		cp->kcq1.hw_prod_idx_ptr =
5067			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5068		cp->kcq1.status_idx_ptr =
5069			&sb->sb.running_index[SM_RX_ID];
5070	} else {
5071		struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5072
5073		cp->kcq1.hw_prod_idx_ptr =
5074			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5075		cp->kcq1.status_idx_ptr =
5076			&sb->sb.running_index[SM_RX_ID];
5077	}
5078
5079	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5080		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5081
5082		cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5083					USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5084		cp->kcq2.sw_prod_idx = 0;
5085		cp->kcq2.hw_prod_idx_ptr =
5086			&sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5087		cp->kcq2.status_idx_ptr =
5088			&sb->sb.running_index[SM_RX_ID];
5089	}
5090}
5091
5092static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5093{
5094	struct cnic_local *cp = dev->cnic_priv;
5095	struct bnx2x *bp = netdev_priv(dev->netdev);
5096	struct cnic_eth_dev *ethdev = cp->ethdev;
5097	int ret;
5098	u32 pfid;
5099
5100	dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5101	cp->func = bp->pf_num;
5102
5103	pfid = bp->pfid;
5104
5105	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5106			       cp->iscsi_start_cid, 0);
5107
5108	if (ret)
5109		return -ENOMEM;
5110
5111	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5112		ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5113					cp->fcoe_start_cid, 0);
5114
5115		if (ret)
5116			return -ENOMEM;
5117	}
5118
5119	cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5120
5121	cnic_init_bnx2x_kcq(dev);
5122
5123	/* Only 1 EQ */
5124	CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5125	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5126		CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5127	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5128		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5129		cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5130	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5131		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5132		(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5133	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5134		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5135		cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5136	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5137		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5138		(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5139	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5140		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5141	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5142		CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5143	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5144		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5145		HC_INDEX_ISCSI_EQ_CONS);
5146
5147	CNIC_WR(dev, BAR_USTRORM_INTMEM +
5148		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5149		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5150	CNIC_WR(dev, BAR_USTRORM_INTMEM +
5151		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5152		(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5153
5154	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5155		TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5156
5157	cnic_setup_bnx2x_context(dev);
5158
5159	ret = cnic_init_bnx2x_irq(dev);
5160	if (ret)
5161		return ret;
5162
5163	ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
5164	return 0;
5165}
5166
5167static void cnic_init_rings(struct cnic_dev *dev)
5168{
5169	struct cnic_local *cp = dev->cnic_priv;
5170	struct bnx2x *bp = netdev_priv(dev->netdev);
5171	struct cnic_uio_dev *udev = cp->udev;
5172
5173	if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5174		return;
5175
5176	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5177		cnic_init_bnx2_tx_ring(dev);
5178		cnic_init_bnx2_rx_ring(dev);
5179		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5180	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5181		u32 cli = cp->ethdev->iscsi_l2_client_id;
5182		u32 cid = cp->ethdev->iscsi_l2_cid;
5183		u32 cl_qzone_id;
5184		struct client_init_ramrod_data *data;
5185		union l5cm_specific_data l5_data;
5186		struct ustorm_eth_rx_producers rx_prods = {0};
5187		u32 off, i, *cid_ptr;
5188
5189		rx_prods.bd_prod = 0;
5190		rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5191		barrier();
5192
5193		cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5194
5195		off = BAR_USTRORM_INTMEM +
5196			(BNX2X_CHIP_IS_E2_PLUS(bp) ?
5197			 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5198			 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
5199
5200		for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5201			CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5202
5203		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5204
5205		data = udev->l2_buf;
5206		cid_ptr = udev->l2_buf + 12;
5207
5208		memset(data, 0, sizeof(*data));
5209
5210		cnic_init_bnx2x_tx_ring(dev, data);
5211		cnic_init_bnx2x_rx_ring(dev, data);
5212
5213		data->general.fp_hsi_ver =  ETH_FP_HSI_VERSION;
5214
5215		l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5216		l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5217
5218		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5219
5220		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5221			cid, ETH_CONNECTION_TYPE, &l5_data);
5222
5223		i = 0;
5224		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5225		       ++i < 10)
5226			msleep(1);
5227
5228		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5229			netdev_err(dev->netdev,
5230				"iSCSI CLIENT_SETUP did not complete\n");
5231		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5232		cnic_ring_ctl(dev, cid, cli, 1);
5233		*cid_ptr = cid >> 4;
5234		*(cid_ptr + 1) = cid * bp->db_size;
5235		*(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
5236	}
5237}
5238
5239static void cnic_shutdown_rings(struct cnic_dev *dev)
5240{
5241	struct cnic_local *cp = dev->cnic_priv;
5242	struct cnic_uio_dev *udev = cp->udev;
5243	void *rx_ring;
5244
5245	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5246		return;
5247
5248	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5249		cnic_shutdown_bnx2_rx_ring(dev);
5250	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5251		u32 cli = cp->ethdev->iscsi_l2_client_id;
5252		u32 cid = cp->ethdev->iscsi_l2_cid;
5253		union l5cm_specific_data l5_data;
5254		int i;
5255
5256		cnic_ring_ctl(dev, cid, cli, 0);
5257
5258		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5259
5260		l5_data.phy_address.lo = cli;
5261		l5_data.phy_address.hi = 0;
5262		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5263			cid, ETH_CONNECTION_TYPE, &l5_data);
5264		i = 0;
5265		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5266		       ++i < 10)
5267			msleep(1);
5268
5269		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5270			netdev_err(dev->netdev,
5271				"iSCSI CLIENT_HALT did not complete\n");
5272		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5273
5274		memset(&l5_data, 0, sizeof(l5_data));
5275		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5276			cid, NONE_CONNECTION_TYPE, &l5_data);
5277		msleep(10);
5278	}
5279	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5280	rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
5281	memset(rx_ring, 0, CNIC_PAGE_SIZE);
5282}
5283
5284static int cnic_register_netdev(struct cnic_dev *dev)
5285{
5286	struct cnic_local *cp = dev->cnic_priv;
5287	struct cnic_eth_dev *ethdev = cp->ethdev;
5288	int err;
5289
5290	if (!ethdev)
5291		return -ENODEV;
5292
5293	if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5294		return 0;
5295
5296	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5297	if (err)
5298		netdev_err(dev->netdev, "register_cnic failed\n");
5299
5300	/* Read iSCSI config again.  On some bnx2x device, iSCSI config
5301	 * can change after firmware is downloaded.
5302	 */
5303	dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5304	if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5305		dev->max_iscsi_conn = 0;
5306
5307	return err;
5308}
5309
5310static void cnic_unregister_netdev(struct cnic_dev *dev)
5311{
5312	struct cnic_local *cp = dev->cnic_priv;
5313	struct cnic_eth_dev *ethdev = cp->ethdev;
5314
5315	if (!ethdev)
5316		return;
5317
5318	ethdev->drv_unregister_cnic(dev->netdev);
5319}
5320
5321static int cnic_start_hw(struct cnic_dev *dev)
5322{
5323	struct cnic_local *cp = dev->cnic_priv;
5324	struct cnic_eth_dev *ethdev = cp->ethdev;
5325	int err;
5326
5327	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5328		return -EALREADY;
5329
5330	dev->regview = ethdev->io_base;
5331	pci_dev_get(dev->pcidev);
5332	cp->func = PCI_FUNC(dev->pcidev->devfn);
5333	cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5334	cp->status_blk_map = ethdev->irq_arr[0].status_blk_map;
5335	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5336
5337	err = cp->alloc_resc(dev);
5338	if (err) {
5339		netdev_err(dev->netdev, "allocate resource failure\n");
5340		goto err1;
5341	}
5342
5343	err = cp->start_hw(dev);
5344	if (err)
5345		goto err1;
5346
5347	err = cnic_cm_open(dev);
5348	if (err)
5349		goto err1;
5350
5351	set_bit(CNIC_F_CNIC_UP, &dev->flags);
5352
5353	cp->enable_int(dev);
5354
5355	return 0;
5356
5357err1:
5358	if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
5359		cp->stop_hw(dev);
5360	else
5361		cp->free_resc(dev);
5362	pci_dev_put(dev->pcidev);
5363	return err;
5364}
5365
5366static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5367{
5368	cnic_disable_bnx2_int_sync(dev);
5369
5370	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5371	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5372
5373	cnic_init_context(dev, KWQ_CID);
5374	cnic_init_context(dev, KCQ_CID);
5375
5376	cnic_setup_5709_context(dev, 0);
5377	cnic_free_irq(dev);
5378
5379	cnic_free_resc(dev);
5380}
5381
5382
5383static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5384{
5385	struct cnic_local *cp = dev->cnic_priv;
5386	struct bnx2x *bp = netdev_priv(dev->netdev);
5387	u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5388	u32 sb_id = cp->status_blk_num;
5389	u32 idx_off, syn_off;
5390
5391	cnic_free_irq(dev);
5392
5393	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5394		idx_off = offsetof(struct hc_status_block_e2, index_values) +
5395			  (hc_index * sizeof(u16));
5396
5397		syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5398	} else {
5399		idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5400			  (hc_index * sizeof(u16));
5401
5402		syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5403	}
5404	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5405	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5406		  idx_off, 0);
5407
5408	*cp->kcq1.hw_prod_idx_ptr = 0;
5409	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5410		CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
5411	CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5412	cnic_free_resc(dev);
5413}
5414
5415static void cnic_stop_hw(struct cnic_dev *dev)
5416{
5417	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5418		struct cnic_local *cp = dev->cnic_priv;
5419		int i = 0;
5420
5421		/* Need to wait for the ring shutdown event to complete
5422		 * before clearing the CNIC_UP flag.
5423		 */
5424		while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
5425			msleep(100);
5426			i++;
5427		}
5428		cnic_shutdown_rings(dev);
5429		cp->stop_cm(dev);
5430		cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
5431		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5432		RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5433		synchronize_rcu();
5434		cnic_cm_shutdown(dev);
5435		cp->stop_hw(dev);
5436		pci_dev_put(dev->pcidev);
5437	}
5438}
5439
5440static void cnic_free_dev(struct cnic_dev *dev)
5441{
5442	int i = 0;
5443
5444	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5445		msleep(100);
5446		i++;
5447	}
5448	if (atomic_read(&dev->ref_count) != 0)
5449		netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5450
5451	netdev_info(dev->netdev, "Removed CNIC device\n");
5452	dev_put(dev->netdev);
5453	kfree(dev);
5454}
5455
5456static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
5457				struct cnic_fc_npiv_tbl *npiv_tbl)
5458{
5459	struct cnic_local *cp = dev->cnic_priv;
5460	struct bnx2x *bp = netdev_priv(dev->netdev);
5461	int ret;
5462
5463	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
5464		return -EAGAIN;     /* bnx2x is down */
5465
5466	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
5467		return -EINVAL;
5468
5469	ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
5470	return ret;
5471}
5472
5473static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5474				       struct pci_dev *pdev)
5475{
5476	struct cnic_dev *cdev;
5477	struct cnic_local *cp;
5478	int alloc_size;
5479
5480	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5481
5482	cdev = kzalloc(alloc_size, GFP_KERNEL);
5483	if (cdev == NULL)
5484		return NULL;
5485
5486	cdev->netdev = dev;
5487	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5488	cdev->register_device = cnic_register_device;
5489	cdev->unregister_device = cnic_unregister_device;
5490	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5491	cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
5492	atomic_set(&cdev->ref_count, 0);
5493
5494	cp = cdev->cnic_priv;
5495	cp->dev = cdev;
5496	cp->l2_single_buf_size = 0x400;
5497	cp->l2_rx_ring_size = 3;
5498
5499	spin_lock_init(&cp->cnic_ulp_lock);
5500
5501	netdev_info(dev, "Added CNIC device\n");
5502
5503	return cdev;
5504}
5505
5506static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5507{
5508	struct pci_dev *pdev;
5509	struct cnic_dev *cdev;
5510	struct cnic_local *cp;
5511	struct bnx2 *bp = netdev_priv(dev);
5512	struct cnic_eth_dev *ethdev = NULL;
5513
5514	if (bp->cnic_probe)
5515		ethdev = (bp->cnic_probe)(dev);
5516
5517	if (!ethdev)
5518		return NULL;
5519
5520	pdev = ethdev->pdev;
5521	if (!pdev)
5522		return NULL;
5523
5524	dev_hold(dev);
5525	pci_dev_get(pdev);
5526	if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5527	     pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5528	    (pdev->revision < 0x10)) {
5529		pci_dev_put(pdev);
5530		goto cnic_err;
5531	}
5532	pci_dev_put(pdev);
5533
5534	cdev = cnic_alloc_dev(dev, pdev);
5535	if (cdev == NULL)
5536		goto cnic_err;
5537
5538	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5539	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5540
5541	cp = cdev->cnic_priv;
5542	cp->ethdev = ethdev;
5543	cdev->pcidev = pdev;
5544	cp->chip_id = ethdev->chip_id;
5545
5546	cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5547
5548	cp->cnic_ops = &cnic_bnx2_ops;
5549	cp->start_hw = cnic_start_bnx2_hw;
5550	cp->stop_hw = cnic_stop_bnx2_hw;
5551	cp->setup_pgtbl = cnic_setup_page_tbl;
5552	cp->alloc_resc = cnic_alloc_bnx2_resc;
5553	cp->free_resc = cnic_free_resc;
5554	cp->start_cm = cnic_cm_init_bnx2_hw;
5555	cp->stop_cm = cnic_cm_stop_bnx2_hw;
5556	cp->enable_int = cnic_enable_bnx2_int;
5557	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5558	cp->close_conn = cnic_close_bnx2_conn;
5559	return cdev;
5560
5561cnic_err:
5562	dev_put(dev);
5563	return NULL;
5564}
5565
5566static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5567{
5568	struct pci_dev *pdev;
5569	struct cnic_dev *cdev;
5570	struct cnic_local *cp;
5571	struct bnx2x *bp = netdev_priv(dev);
5572	struct cnic_eth_dev *ethdev = NULL;
5573
5574	if (bp->cnic_probe)
5575		ethdev = bp->cnic_probe(dev);
5576
5577	if (!ethdev)
5578		return NULL;
5579
5580	pdev = ethdev->pdev;
5581	if (!pdev)
5582		return NULL;
5583
5584	dev_hold(dev);
5585	cdev = cnic_alloc_dev(dev, pdev);
5586	if (cdev == NULL) {
5587		dev_put(dev);
5588		return NULL;
5589	}
5590
5591	set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5592	cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5593
5594	cp = cdev->cnic_priv;
5595	cp->ethdev = ethdev;
5596	cdev->pcidev = pdev;
5597	cp->chip_id = ethdev->chip_id;
5598
5599	cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5600
5601	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5602		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5603	if (CNIC_SUPPORTS_FCOE(bp)) {
5604		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5605		cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5606	}
5607
5608	if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5609		cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5610
5611	memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
5612
5613	cp->cnic_ops = &cnic_bnx2x_ops;
5614	cp->start_hw = cnic_start_bnx2x_hw;
5615	cp->stop_hw = cnic_stop_bnx2x_hw;
5616	cp->setup_pgtbl = cnic_setup_page_tbl_le;
5617	cp->alloc_resc = cnic_alloc_bnx2x_resc;
5618	cp->free_resc = cnic_free_resc;
5619	cp->start_cm = cnic_cm_init_bnx2x_hw;
5620	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5621	cp->enable_int = cnic_enable_bnx2x_int;
5622	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5623	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5624		cp->ack_int = cnic_ack_bnx2x_e2_msix;
5625		cp->arm_int = cnic_arm_bnx2x_e2_msix;
5626	} else {
5627		cp->ack_int = cnic_ack_bnx2x_msix;
5628		cp->arm_int = cnic_arm_bnx2x_msix;
5629	}
5630	cp->close_conn = cnic_close_bnx2x_conn;
5631	return cdev;
5632}
5633
5634static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5635{
5636	struct ethtool_drvinfo drvinfo;
5637	struct cnic_dev *cdev = NULL;
5638
5639	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5640		memset(&drvinfo, 0, sizeof(drvinfo));
5641		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5642
5643		if (!strcmp(drvinfo.driver, "bnx2"))
5644			cdev = init_bnx2_cnic(dev);
5645		if (!strcmp(drvinfo.driver, "bnx2x"))
5646			cdev = init_bnx2x_cnic(dev);
5647		if (cdev) {
5648			write_lock(&cnic_dev_lock);
5649			list_add(&cdev->list, &cnic_dev_list);
5650			write_unlock(&cnic_dev_lock);
5651		}
5652	}
5653	return cdev;
5654}
5655
5656static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5657			      u16 vlan_id)
5658{
5659	int if_type;
5660
5661	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5662		struct cnic_ulp_ops *ulp_ops;
5663		void *ctx;
5664
5665		mutex_lock(&cnic_lock);
5666		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5667						lockdep_is_held(&cnic_lock));
5668		if (!ulp_ops || !ulp_ops->indicate_netevent) {
5669			mutex_unlock(&cnic_lock);
5670			continue;
5671		}
5672
5673		ctx = cp->ulp_handle[if_type];
5674
5675		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5676		mutex_unlock(&cnic_lock);
5677
5678		ulp_ops->indicate_netevent(ctx, event, vlan_id);
5679
5680		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5681	}
5682}
5683
5684/* netdev event handler */
5685static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5686							 void *ptr)
5687{
5688	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
5689	struct cnic_dev *dev;
5690	int new_dev = 0;
5691
5692	dev = cnic_from_netdev(netdev);
5693
5694	if (!dev && event == NETDEV_REGISTER) {
5695		/* Check for the hot-plug device */
5696		dev = is_cnic_dev(netdev);
5697		if (dev) {
5698			new_dev = 1;
5699			cnic_hold(dev);
5700		}
5701	}
5702	if (dev) {
5703		struct cnic_local *cp = dev->cnic_priv;
5704
5705		if (new_dev)
5706			cnic_ulp_init(dev);
5707		else if (event == NETDEV_UNREGISTER)
5708			cnic_ulp_exit(dev);
5709
5710		if (event == NETDEV_UP) {
5711			if (cnic_register_netdev(dev) != 0) {
5712				cnic_put(dev);
5713				goto done;
5714			}
5715			if (!cnic_start_hw(dev))
5716				cnic_ulp_start(dev);
5717		}
5718
5719		cnic_rcv_netevent(cp, event, 0);
5720
5721		if (event == NETDEV_GOING_DOWN) {
5722			cnic_ulp_stop(dev);
5723			cnic_stop_hw(dev);
5724			cnic_unregister_netdev(dev);
5725		} else if (event == NETDEV_UNREGISTER) {
5726			write_lock(&cnic_dev_lock);
5727			list_del_init(&dev->list);
5728			write_unlock(&cnic_dev_lock);
5729
5730			cnic_put(dev);
5731			cnic_free_dev(dev);
5732			goto done;
5733		}
5734		cnic_put(dev);
5735	} else {
5736		struct net_device *realdev;
5737		u16 vid;
5738
5739		vid = cnic_get_vlan(netdev, &realdev);
5740		if (realdev) {
5741			dev = cnic_from_netdev(realdev);
5742			if (dev) {
5743				vid |= VLAN_CFI_MASK;	/* make non-zero */
5744				cnic_rcv_netevent(dev->cnic_priv, event, vid);
5745				cnic_put(dev);
5746			}
5747		}
5748	}
5749done:
5750	return NOTIFY_DONE;
5751}
5752
5753static struct notifier_block cnic_netdev_notifier = {
5754	.notifier_call = cnic_netdev_event
5755};
5756
5757static void cnic_release(void)
5758{
5759	struct cnic_uio_dev *udev;
5760
5761	while (!list_empty(&cnic_udev_list)) {
5762		udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5763				  list);
5764		cnic_free_uio(udev);
5765	}
5766}
5767
5768static int __init cnic_init(void)
5769{
5770	int rc = 0;
5771
5772	pr_info("%s", version);
5773
5774	rc = register_netdevice_notifier(&cnic_netdev_notifier);
5775	if (rc) {
5776		cnic_release();
5777		return rc;
5778	}
5779
5780	cnic_wq = create_singlethread_workqueue("cnic_wq");
5781	if (!cnic_wq) {
5782		cnic_release();
5783		unregister_netdevice_notifier(&cnic_netdev_notifier);
5784		return -ENOMEM;
5785	}
5786
5787	return 0;
5788}
5789
5790static void __exit cnic_exit(void)
5791{
5792	unregister_netdevice_notifier(&cnic_netdev_notifier);
5793	cnic_release();
5794	destroy_workqueue(cnic_wq);
5795}
5796
5797module_init(cnic_init);
5798module_exit(cnic_exit);
5799