qlnx_os.c revision 322851
1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28
29/*
30 * File: qlnx_os.c
31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/11/sys/dev/qlnx/qlnxe/qlnx_os.c 322851 2017-08-24 18:51:55Z davidcs $");
36
37#include "qlnx_os.h"
38#include "bcm_osal.h"
39#include "reg_addr.h"
40#include "ecore_gtt_reg_addr.h"
41#include "ecore.h"
42#include "ecore_chain.h"
43#include "ecore_status.h"
44#include "ecore_hw.h"
45#include "ecore_rt_defs.h"
46#include "ecore_init_ops.h"
47#include "ecore_int.h"
48#include "ecore_cxt.h"
49#include "ecore_spq.h"
50#include "ecore_init_fw_funcs.h"
51#include "ecore_sp_commands.h"
52#include "ecore_dev_api.h"
53#include "ecore_l2_api.h"
54#include "ecore_mcp.h"
55#include "ecore_hw_defs.h"
56#include "mcp_public.h"
57#include "ecore_iro.h"
58#include "nvm_cfg.h"
59#include "ecore_dev_api.h"
60#include "ecore_dbg_fw_funcs.h"
61
62#include "qlnx_ioctl.h"
63#include "qlnx_def.h"
64#include "qlnx_ver.h"
65#include <sys/smp.h>
66
67
68/*
69 * static functions
70 */
71/*
72 * ioctl related functions
73 */
74static void qlnx_add_sysctls(qlnx_host_t *ha);
75
76/*
77 * main driver
78 */
79static void qlnx_release(qlnx_host_t *ha);
80static void qlnx_fp_isr(void *arg);
81static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
82static void qlnx_init(void *arg);
83static void qlnx_init_locked(qlnx_host_t *ha);
84static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
85static int qlnx_set_promisc(qlnx_host_t *ha);
86static int qlnx_set_allmulti(qlnx_host_t *ha);
87static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
88static int qlnx_media_change(struct ifnet *ifp);
89static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
90static void qlnx_stop(qlnx_host_t *ha);
91static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
92		struct mbuf **m_headp);
93static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
94static uint32_t qlnx_get_optics(qlnx_host_t *ha,
95			struct qlnx_link_output *if_link);
96static int qlnx_transmit(struct ifnet *ifp, struct mbuf  *mp);
97static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp,
98		struct mbuf *mp);
99static void qlnx_qflush(struct ifnet *ifp);
100
101static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
102static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
103static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
104static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
105static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
106static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
107
108static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
109static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
110
111static int qlnx_nic_setup(struct ecore_dev *cdev,
112		struct ecore_pf_params *func_params);
113static int qlnx_nic_start(struct ecore_dev *cdev);
114static int qlnx_slowpath_start(qlnx_host_t *ha);
115static int qlnx_slowpath_stop(qlnx_host_t *ha);
116static int qlnx_init_hw(qlnx_host_t *ha);
117static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
118		char ver_str[VER_SIZE]);
119static void qlnx_unload(qlnx_host_t *ha);
120static int qlnx_load(qlnx_host_t *ha);
121static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
122		uint32_t add_mac);
123static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
124		uint32_t len);
125static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
126static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
127static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
128		struct qlnx_rx_queue *rxq);
129static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
130static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
131		int hwfn_index);
132static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
133		int hwfn_index);
134static void qlnx_timer(void *arg);
135static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
136static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
137static void qlnx_trigger_dump(qlnx_host_t *ha);
138static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
139			struct qlnx_tx_queue *txq);
140static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
141		struct qlnx_tx_queue *txq);
142static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
143		int lro_enable);
144static void qlnx_fp_taskqueue(void *context, int pending);
145static void qlnx_sample_storm_stats(qlnx_host_t *ha);
146static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
147		struct qlnx_agg_info *tpa);
148static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
149
150#if __FreeBSD_version >= 1100000
151static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
152#endif
153
154
155/*
156 * Hooks to the Operating Systems
157 */
158static int qlnx_pci_probe (device_t);
159static int qlnx_pci_attach (device_t);
160static int qlnx_pci_detach (device_t);
161
162static device_method_t qlnx_pci_methods[] = {
163	/* Device interface */
164	DEVMETHOD(device_probe, qlnx_pci_probe),
165	DEVMETHOD(device_attach, qlnx_pci_attach),
166	DEVMETHOD(device_detach, qlnx_pci_detach),
167	{ 0, 0 }
168};
169
170static driver_t qlnx_pci_driver = {
171	"ql", qlnx_pci_methods, sizeof (qlnx_host_t),
172};
173
174static devclass_t qlnx_devclass;
175
176MODULE_VERSION(if_qlnxe,1);
177DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0);
178
179MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
180MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
181
182MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
183
184
185char qlnx_dev_str[64];
186char qlnx_ver_str[VER_SIZE];
187char qlnx_name_str[NAME_SIZE];
188
189/*
190 * Some PCI Configuration Space Related Defines
191 */
192
193#ifndef PCI_VENDOR_QLOGIC
194#define PCI_VENDOR_QLOGIC		0x1077
195#endif
196
197/* 40G Adapter QLE45xxx*/
198#ifndef QLOGIC_PCI_DEVICE_ID_1634
199#define QLOGIC_PCI_DEVICE_ID_1634	0x1634
200#endif
201
202/* 100G Adapter QLE45xxx*/
203#ifndef QLOGIC_PCI_DEVICE_ID_1644
204#define QLOGIC_PCI_DEVICE_ID_1644	0x1644
205#endif
206
207/* 25G Adapter QLE45xxx*/
208#ifndef QLOGIC_PCI_DEVICE_ID_1656
209#define QLOGIC_PCI_DEVICE_ID_1656	0x1656
210#endif
211
212/* 50G Adapter QLE45xxx*/
213#ifndef QLOGIC_PCI_DEVICE_ID_1654
214#define QLOGIC_PCI_DEVICE_ID_1654	0x1654
215#endif
216
217/* 10G/25G/40G Adapter QLE41xxx*/
218#ifndef QLOGIC_PCI_DEVICE_ID_8070
219#define QLOGIC_PCI_DEVICE_ID_8070	0x8070
220#endif
221
222SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD, 0, "qlnxe driver parameters");
223/* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
224static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
225SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
226		&qlnxe_queue_count, 0, "Multi-Queue queue count");
227
228static int
229qlnx_valid_device(device_t dev)
230{
231        uint16_t	device_id;
232
233        device_id = pci_get_device(dev);
234
235        if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
236                (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
237                (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
238                (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
239                (device_id == QLOGIC_PCI_DEVICE_ID_8070))
240                return 0;
241
242        return -1;
243}
244
245/*
246 * Name:	qlnx_pci_probe
247 * Function:	Validate the PCI device to be a QLA80XX device
248 */
249static int
250qlnx_pci_probe(device_t dev)
251{
252	snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
253		QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
254	snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
255
256	if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
257                return (ENXIO);
258	}
259
260        switch (pci_get_device(dev)) {
261
262        case QLOGIC_PCI_DEVICE_ID_1644:
263		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
264			"Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
265			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
266			QLNX_VERSION_BUILD);
267                device_set_desc_copy(dev, qlnx_dev_str);
268
269                break;
270
271        case QLOGIC_PCI_DEVICE_ID_1634:
272		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
273			"Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
274			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
275			QLNX_VERSION_BUILD);
276                device_set_desc_copy(dev, qlnx_dev_str);
277
278                break;
279
280        case QLOGIC_PCI_DEVICE_ID_1656:
281		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
282			"Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
283			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
284			QLNX_VERSION_BUILD);
285                device_set_desc_copy(dev, qlnx_dev_str);
286
287                break;
288
289        case QLOGIC_PCI_DEVICE_ID_1654:
290		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
291			"Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
292			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
293			QLNX_VERSION_BUILD);
294                device_set_desc_copy(dev, qlnx_dev_str);
295
296                break;
297
298	case QLOGIC_PCI_DEVICE_ID_8070:
299		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
300			"Qlogic 10GbE/25GbE/40GbE PCI CNA (AH) "
301			"Adapter-Ethernet Function",
302			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
303			QLNX_VERSION_BUILD);
304		device_set_desc_copy(dev, qlnx_dev_str);
305
306		break;
307
308        default:
309                return (ENXIO);
310        }
311
312        return (BUS_PROBE_DEFAULT);
313}
314
315static uint16_t
316qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
317	struct qlnx_tx_queue *txq)
318{
319	u16 hw_bd_cons;
320	u16 ecore_cons_idx;
321	uint16_t diff;
322
323	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
324
325	ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
326	if (hw_bd_cons < ecore_cons_idx) {
327		diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
328	} else {
329		diff = hw_bd_cons - ecore_cons_idx;
330	}
331	return diff;
332}
333
334
335static void
336qlnx_sp_intr(void *arg)
337{
338	struct ecore_hwfn	*p_hwfn;
339	qlnx_host_t		*ha;
340	int			i;
341
342	p_hwfn = arg;
343
344	if (p_hwfn == NULL) {
345		printf("%s: spurious slowpath intr\n", __func__);
346		return;
347	}
348
349	ha = (qlnx_host_t *)p_hwfn->p_dev;
350
351	QL_DPRINT2(ha, "enter\n");
352
353	for (i = 0; i < ha->cdev.num_hwfns; i++) {
354		if (&ha->cdev.hwfns[i] == p_hwfn) {
355			taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
356			break;
357		}
358	}
359	QL_DPRINT2(ha, "exit\n");
360
361	return;
362}
363
364static void
365qlnx_sp_taskqueue(void *context, int pending)
366{
367	struct ecore_hwfn	*p_hwfn;
368
369	p_hwfn = context;
370
371	if (p_hwfn != NULL) {
372		qlnx_sp_isr(p_hwfn);
373	}
374	return;
375}
376
377static int
378qlnx_create_sp_taskqueues(qlnx_host_t *ha)
379{
380	int	i;
381	uint8_t	tq_name[32];
382
383	for (i = 0; i < ha->cdev.num_hwfns; i++) {
384
385                struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
386
387		bzero(tq_name, sizeof (tq_name));
388		snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
389
390		TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
391
392		ha->sp_taskqueue[i] = taskqueue_create_fast(tq_name, M_NOWAIT,
393			 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
394
395		if (ha->sp_taskqueue[i] == NULL)
396			return (-1);
397
398		taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
399			tq_name);
400
401		QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
402	}
403
404	return (0);
405}
406
407static void
408qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
409{
410	int	i;
411
412	for (i = 0; i < ha->cdev.num_hwfns; i++) {
413		if (ha->sp_taskqueue[i] != NULL) {
414			taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
415			taskqueue_free(ha->sp_taskqueue[i]);
416		}
417	}
418	return;
419}
420
421static void
422qlnx_fp_taskqueue(void *context, int pending)
423{
424        struct qlnx_fastpath	*fp;
425        qlnx_host_t		*ha;
426        struct ifnet		*ifp;
427
428#ifdef QLNX_RCV_IN_TASKQ
429	int			lro_enable;
430	int			rx_int = 0, total_rx_count = 0;
431	struct thread		*cthread;
432#endif /* #ifdef QLNX_RCV_IN_TASKQ */
433
434        fp = context;
435
436        if (fp == NULL)
437                return;
438
439        ha = (qlnx_host_t *)fp->edev;
440
441        ifp = ha->ifp;
442
443#ifdef QLNX_RCV_IN_TASKQ
444
445	cthread = curthread;
446
447	thread_lock(cthread);
448
449	if (!sched_is_bound(cthread))
450		sched_bind(cthread, fp->rss_id);
451
452	thread_unlock(cthread);
453
454	lro_enable = ifp->if_capenable & IFCAP_LRO;
455
456	rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, lro_enable);
457
458	if (rx_int) {
459		fp->rx_pkts += rx_int;
460		total_rx_count += rx_int;
461	}
462
463#ifdef QLNX_SOFT_LRO
464	{
465		struct lro_ctrl *lro;
466
467		lro = &fp->rxq->lro;
468
469		if (lro_enable && total_rx_count) {
470
471#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
472
473			if (ha->dbg_trace_lro_cnt) {
474				if (lro->lro_mbuf_count & ~1023)
475					fp->lro_cnt_1024++;
476				else if (lro->lro_mbuf_count & ~511)
477					fp->lro_cnt_512++;
478				else if (lro->lro_mbuf_count & ~255)
479					fp->lro_cnt_256++;
480				else if (lro->lro_mbuf_count & ~127)
481					fp->lro_cnt_128++;
482				else if (lro->lro_mbuf_count & ~63)
483					fp->lro_cnt_64++;
484			}
485			tcp_lro_flush_all(lro);
486
487#else
488			struct lro_entry *queued;
489
490			while ((!SLIST_EMPTY(&lro->lro_active))) {
491				queued = SLIST_FIRST(&lro->lro_active);
492				SLIST_REMOVE_HEAD(&lro->lro_active, next);
493				tcp_lro_flush(lro, queued);
494			}
495#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
496		}
497	}
498#endif /* #ifdef QLNX_SOFT_LRO */
499
500	ecore_sb_update_sb_idx(fp->sb_info);
501	rmb();
502
503#endif /* #ifdef QLNX_RCV_IN_TASKQ */
504
505        if(ifp->if_drv_flags & IFF_DRV_RUNNING) {
506
507                if (!drbr_empty(ifp, fp->tx_br)) {
508
509                        if(mtx_trylock(&fp->tx_mtx)) {
510
511#ifdef QLNX_TRACE_PERF_DATA
512                                tx_pkts = fp->tx_pkts_transmitted;
513                                tx_compl = fp->tx_pkts_completed;
514#endif
515
516                                qlnx_transmit_locked(ifp, fp, NULL);
517
518#ifdef QLNX_TRACE_PERF_DATA
519                                fp->tx_pkts_trans_fp +=
520					(fp->tx_pkts_transmitted - tx_pkts);
521                                fp->tx_pkts_compl_fp +=
522					(fp->tx_pkts_completed - tx_compl);
523#endif
524                                mtx_unlock(&fp->tx_mtx);
525                        }
526                }
527        }
528
529#ifdef QLNX_RCV_IN_TASKQ
530	if (rx_int) {
531		if (fp->fp_taskqueue != NULL)
532			taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
533	} else {
534		if (fp->tx_ring_full) {
535			qlnx_mdelay(__func__, 100);
536		}
537		ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
538	}
539#endif /* #ifdef QLNX_RCV_IN_TASKQ */
540
541        QL_DPRINT2(ha, "exit \n");
542        return;
543}
544
545static int
546qlnx_create_fp_taskqueues(qlnx_host_t *ha)
547{
548	int	i;
549	uint8_t	tq_name[32];
550	struct qlnx_fastpath *fp;
551
552	for (i = 0; i < ha->num_rss; i++) {
553
554                fp = &ha->fp_array[i];
555
556		bzero(tq_name, sizeof (tq_name));
557		snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
558
559		TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
560
561		fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
562					taskqueue_thread_enqueue,
563					&fp->fp_taskqueue);
564
565		if (fp->fp_taskqueue == NULL)
566			return (-1);
567
568		taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
569			tq_name);
570
571		QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
572	}
573
574	return (0);
575}
576
577static void
578qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
579{
580	int			i;
581	struct qlnx_fastpath	*fp;
582
583	for (i = 0; i < ha->num_rss; i++) {
584
585                fp = &ha->fp_array[i];
586
587		if (fp->fp_taskqueue != NULL) {
588
589			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
590			taskqueue_free(fp->fp_taskqueue);
591			fp->fp_taskqueue = NULL;
592		}
593	}
594	return;
595}
596
597static void
598qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
599{
600	int			i;
601	struct qlnx_fastpath	*fp;
602
603	for (i = 0; i < ha->num_rss; i++) {
604                fp = &ha->fp_array[i];
605
606		if (fp->fp_taskqueue != NULL) {
607			QLNX_UNLOCK(ha);
608			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
609			QLNX_LOCK(ha);
610		}
611	}
612	return;
613}
614
615static void
616qlnx_get_params(qlnx_host_t *ha)
617{
618	if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
619		device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
620			qlnxe_queue_count);
621		qlnxe_queue_count = 0;
622	}
623	return;
624}
625
626/*
627 * Name:	qlnx_pci_attach
628 * Function:	attaches the device to the operating system
629 */
630static int
631qlnx_pci_attach(device_t dev)
632{
633	qlnx_host_t	*ha = NULL;
634	uint32_t	rsrc_len_reg = 0;
635	uint32_t	rsrc_len_dbells = 0;
636	uint32_t	rsrc_len_msix = 0;
637	int		i;
638	uint32_t	mfw_ver;
639
640        if ((ha = device_get_softc(dev)) == NULL) {
641                device_printf(dev, "cannot get softc\n");
642                return (ENOMEM);
643        }
644
645        memset(ha, 0, sizeof (qlnx_host_t));
646
647        if (qlnx_valid_device(dev) != 0) {
648                device_printf(dev, "device is not valid device\n");
649                return (ENXIO);
650	}
651        ha->pci_func = pci_get_function(dev);
652
653        ha->pci_dev = dev;
654
655	mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
656
657        ha->flags.lock_init = 1;
658
659        pci_enable_busmaster(dev);
660
661	/*
662	 * map the PCI BARs
663	 */
664
665        ha->reg_rid = PCIR_BAR(0);
666        ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
667                                RF_ACTIVE);
668
669        if (ha->pci_reg == NULL) {
670                device_printf(dev, "unable to map BAR0\n");
671                goto qlnx_pci_attach_err;
672        }
673
674        rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
675                                        ha->reg_rid);
676
677        ha->dbells_rid = PCIR_BAR(2);
678        ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
679                        &ha->dbells_rid, RF_ACTIVE);
680
681        if (ha->pci_dbells == NULL) {
682                device_printf(dev, "unable to map BAR1\n");
683                goto qlnx_pci_attach_err;
684	}
685
686        rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
687                                        ha->dbells_rid);
688
689	ha->dbells_phys_addr = (uint64_t)
690		bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);;
691	ha->dbells_size = rsrc_len_dbells;
692
693        ha->msix_rid = PCIR_BAR(4);
694        ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
695                        &ha->msix_rid, RF_ACTIVE);
696
697        if (ha->msix_bar == NULL) {
698                device_printf(dev, "unable to map BAR2\n");
699                goto qlnx_pci_attach_err;
700	}
701
702        rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
703                                        ha->msix_rid);
704	/*
705	 * allocate dma tags
706	 */
707
708	if (qlnx_alloc_parent_dma_tag(ha))
709                goto qlnx_pci_attach_err;
710
711	if (qlnx_alloc_tx_dma_tag(ha))
712                goto qlnx_pci_attach_err;
713
714	if (qlnx_alloc_rx_dma_tag(ha))
715                goto qlnx_pci_attach_err;
716
717
718	if (qlnx_init_hw(ha) != 0)
719		goto qlnx_pci_attach_err;
720
721	qlnx_get_params(ha);
722
723	if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
724		(qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
725		qlnxe_queue_count = QLNX_MAX_RSS;
726	}
727
728	/*
729	 * Allocate MSI-x vectors
730	 */
731	if(qlnxe_queue_count == 0)
732		ha->num_rss = QLNX_DEFAULT_RSS;
733	 else
734		ha->num_rss = qlnxe_queue_count;
735
736	ha->num_tc = QLNX_MAX_TC;
737
738        ha->msix_count = pci_msix_count(dev);
739
740	if (ha->msix_count > (mp_ncpus + ha->cdev.num_hwfns))
741		ha->msix_count = mp_ncpus + ha->cdev.num_hwfns;
742
743        if (!ha->msix_count ||
744		(ha->msix_count < (ha->cdev.num_hwfns + 1 ))) {
745                device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
746                        ha->msix_count);
747                goto qlnx_pci_attach_err;
748        }
749
750	if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns ))
751		ha->msix_count = ha->num_rss + ha->cdev.num_hwfns;
752	else
753		ha->num_rss = ha->msix_count - ha->cdev.num_hwfns;
754
755	QL_DPRINT1(ha, "\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]"
756		"\n\t\t\tdbells [%p, 0x%08x 0x%08x]"
757		"\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]"
758		"\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
759		 ha->pci_reg, rsrc_len_reg,
760		ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
761		ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
762		ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
763        if (pci_alloc_msix(dev, &ha->msix_count)) {
764                device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
765                        ha->msix_count);
766                ha->msix_count = 0;
767                goto qlnx_pci_attach_err;
768        }
769
770	/*
771	 * Initialize slow path interrupt and task queue
772	 */
773	if (qlnx_create_sp_taskqueues(ha) != 0)
774		goto qlnx_pci_attach_err;
775
776	for (i = 0; i < ha->cdev.num_hwfns; i++) {
777
778                struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
779
780        	ha->sp_irq_rid[i] = i + 1;
781        	ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
782                                &ha->sp_irq_rid[i],
783                                (RF_ACTIVE | RF_SHAREABLE));
784        	if (ha->sp_irq[i] == NULL) {
785                	device_printf(dev,
786				"could not allocate mbx interrupt\n");
787                	goto qlnx_pci_attach_err;
788        	}
789
790        	if (bus_setup_intr(dev, ha->sp_irq[i],
791				(INTR_TYPE_NET | INTR_MPSAFE), NULL,
792				qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
793                	device_printf(dev,
794				"could not setup slow path interrupt\n");
795			goto qlnx_pci_attach_err;
796		}
797
798		QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
799			" sp_irq %p sp_handle %p\n", p_hwfn,
800			ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
801
802	}
803
804	/*
805	 * initialize fast path interrupt
806	 */
807	if (qlnx_create_fp_taskqueues(ha) != 0)
808		goto qlnx_pci_attach_err;
809
810        for (i = 0; i < ha->num_rss; i++) {
811                ha->irq_vec[i].rss_idx = i;
812                ha->irq_vec[i].ha = ha;
813                ha->irq_vec[i].irq_rid = (1 + ha->cdev.num_hwfns) + i;
814
815                ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
816                                &ha->irq_vec[i].irq_rid,
817                                (RF_ACTIVE | RF_SHAREABLE));
818
819                if (ha->irq_vec[i].irq == NULL) {
820                        device_printf(dev,
821				"could not allocate interrupt[%d]\n", i);
822                        goto qlnx_pci_attach_err;
823                }
824
825		if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
826                        device_printf(dev, "could not allocate tx_br[%d]\n", i);
827                        goto qlnx_pci_attach_err;
828
829		}
830	}
831
832	callout_init(&ha->qlnx_callout, 1);
833	ha->flags.callout_init = 1;
834
835	for (i = 0; i < ha->cdev.num_hwfns; i++) {
836
837		if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
838			goto qlnx_pci_attach_err;
839		if (ha->grcdump_size[i] == 0)
840			goto qlnx_pci_attach_err;
841
842		ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
843		QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
844			i, ha->grcdump_size[i]);
845
846		ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
847		if (ha->grcdump[i] == NULL) {
848			device_printf(dev, "grcdump alloc[%d] failed\n", i);
849			goto qlnx_pci_attach_err;
850		}
851
852		if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
853			goto qlnx_pci_attach_err;
854		if (ha->idle_chk_size[i] == 0)
855			goto qlnx_pci_attach_err;
856
857		ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
858		QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
859			i, ha->idle_chk_size[i]);
860
861		ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
862
863		if (ha->idle_chk[i] == NULL) {
864			device_printf(dev, "idle_chk alloc failed\n");
865			goto qlnx_pci_attach_err;
866		}
867	}
868
869	if (qlnx_slowpath_start(ha) != 0) {
870
871		qlnx_mdelay(__func__, 1000);
872		qlnx_trigger_dump(ha);
873
874		goto qlnx_pci_attach_err0;
875	} else
876		ha->flags.slowpath_start = 1;
877
878	if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
879		qlnx_mdelay(__func__, 1000);
880		qlnx_trigger_dump(ha);
881
882		goto qlnx_pci_attach_err0;
883	}
884
885	if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
886		qlnx_mdelay(__func__, 1000);
887		qlnx_trigger_dump(ha);
888
889		goto qlnx_pci_attach_err0;
890	}
891	snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
892		((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
893		((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
894	snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
895		FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
896		FW_ENGINEERING_VERSION);
897
898	QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
899		 ha->stormfw_ver, ha->mfw_ver);
900
901	qlnx_init_ifnet(dev, ha);
902
903	/*
904	 * add sysctls
905	 */
906	qlnx_add_sysctls(ha);
907
908qlnx_pci_attach_err0:
909        /*
910	 * create ioctl device interface
911	 */
912        if (qlnx_make_cdev(ha)) {
913                device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
914                goto qlnx_pci_attach_err;
915        }
916
917	QL_DPRINT2(ha, "success\n");
918
919        return (0);
920
921qlnx_pci_attach_err:
922
923	qlnx_release(ha);
924
925	return (ENXIO);
926}
927
928/*
929 * Name:	qlnx_pci_detach
930 * Function:	Unhooks the device from the operating system
931 */
932static int
933qlnx_pci_detach(device_t dev)
934{
935	qlnx_host_t	*ha = NULL;
936
937        if ((ha = device_get_softc(dev)) == NULL) {
938                device_printf(dev, "cannot get softc\n");
939                return (ENOMEM);
940        }
941
942	QLNX_LOCK(ha);
943	qlnx_stop(ha);
944	QLNX_UNLOCK(ha);
945
946	qlnx_release(ha);
947
948        return (0);
949}
950
951static int
952qlnx_init_hw(qlnx_host_t *ha)
953{
954	int				rval = 0;
955	struct ecore_hw_prepare_params	params;
956
957	ecore_init_struct(&ha->cdev);
958
959	/* ha->dp_module = ECORE_MSG_PROBE |
960				ECORE_MSG_INTR |
961				ECORE_MSG_SP |
962				ECORE_MSG_LINK |
963				ECORE_MSG_SPQ |
964				ECORE_MSG_RDMA;
965	ha->dp_level = ECORE_LEVEL_VERBOSE;*/
966	ha->dp_level = ECORE_LEVEL_NOTICE;
967
968	ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
969
970	ha->cdev.regview = ha->pci_reg;
971	ha->cdev.doorbells = ha->pci_dbells;
972	ha->cdev.db_phys_addr = ha->dbells_phys_addr;
973	ha->cdev.db_size = ha->dbells_size;
974
975	bzero(&params, sizeof (struct ecore_hw_prepare_params));
976
977	ha->personality = ECORE_PCI_DEFAULT;
978
979	params.personality = ha->personality;
980
981	params.drv_resc_alloc = false;
982	params.chk_reg_fifo = false;
983	params.initiate_pf_flr = true;
984	params.epoch = 0;
985
986	ecore_hw_prepare(&ha->cdev, &params);
987
988	qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
989
990	return (rval);
991}
992
993static void
994qlnx_release(qlnx_host_t *ha)
995{
996        device_t	dev;
997        int		i;
998
999        dev = ha->pci_dev;
1000
1001	QL_DPRINT2(ha, "enter\n");
1002
1003	for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1004		if (ha->idle_chk[i] != NULL) {
1005			free(ha->idle_chk[i], M_QLNXBUF);
1006			ha->idle_chk[i] = NULL;
1007		}
1008
1009		if (ha->grcdump[i] != NULL) {
1010			free(ha->grcdump[i], M_QLNXBUF);
1011			ha->grcdump[i] = NULL;
1012		}
1013	}
1014
1015        if (ha->flags.callout_init)
1016                callout_drain(&ha->qlnx_callout);
1017
1018	if (ha->flags.slowpath_start) {
1019		qlnx_slowpath_stop(ha);
1020	}
1021
1022	ecore_hw_remove(&ha->cdev);
1023
1024        qlnx_del_cdev(ha);
1025
1026        if (ha->ifp != NULL)
1027                ether_ifdetach(ha->ifp);
1028
1029	qlnx_free_tx_dma_tag(ha);
1030
1031	qlnx_free_rx_dma_tag(ha);
1032
1033	qlnx_free_parent_dma_tag(ha);
1034
1035        for (i = 0; i < ha->num_rss; i++) {
1036		struct qlnx_fastpath *fp = &ha->fp_array[i];
1037
1038                if (ha->irq_vec[i].handle) {
1039                        (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1040                                        ha->irq_vec[i].handle);
1041                }
1042
1043                if (ha->irq_vec[i].irq) {
1044                        (void)bus_release_resource(dev, SYS_RES_IRQ,
1045                                ha->irq_vec[i].irq_rid,
1046                                ha->irq_vec[i].irq);
1047                }
1048
1049		qlnx_free_tx_br(ha, fp);
1050        }
1051	qlnx_destroy_fp_taskqueues(ha);
1052
1053 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1054        	if (ha->sp_handle[i])
1055                	(void)bus_teardown_intr(dev, ha->sp_irq[i],
1056				ha->sp_handle[i]);
1057
1058        	if (ha->sp_irq[i])
1059			(void) bus_release_resource(dev, SYS_RES_IRQ,
1060				ha->sp_irq_rid[i], ha->sp_irq[i]);
1061	}
1062
1063	qlnx_destroy_sp_taskqueues(ha);
1064
1065        if (ha->msix_count)
1066                pci_release_msi(dev);
1067
1068        if (ha->flags.lock_init) {
1069                mtx_destroy(&ha->hw_lock);
1070        }
1071
1072        if (ha->pci_reg)
1073                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1074                                ha->pci_reg);
1075
1076        if (ha->pci_dbells)
1077                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1078                                ha->pci_dbells);
1079
1080        if (ha->msix_bar)
1081                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1082                                ha->msix_bar);
1083
1084	QL_DPRINT2(ha, "exit\n");
1085	return;
1086}
1087
1088static void
1089qlnx_trigger_dump(qlnx_host_t *ha)
1090{
1091	int	i;
1092
1093	if (ha->ifp != NULL)
1094		ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1095
1096	QL_DPRINT2(ha, "enter\n");
1097
1098	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1099		qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1100		qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1101	}
1102
1103	QL_DPRINT2(ha, "exit\n");
1104
1105	return;
1106}
1107
1108static int
1109qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1110{
1111        int		err, ret = 0;
1112        qlnx_host_t	*ha;
1113
1114        err = sysctl_handle_int(oidp, &ret, 0, req);
1115
1116        if (err || !req->newptr)
1117                return (err);
1118
1119        if (ret == 1) {
1120                ha = (qlnx_host_t *)arg1;
1121                qlnx_trigger_dump(ha);
1122        }
1123        return (err);
1124}
1125
1126static int
1127qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1128{
1129        int			err, i, ret = 0, usecs = 0;
1130        qlnx_host_t		*ha;
1131	struct ecore_hwfn	*p_hwfn;
1132	struct qlnx_fastpath	*fp;
1133
1134        err = sysctl_handle_int(oidp, &usecs, 0, req);
1135
1136        if (err || !req->newptr || !usecs || (usecs > 255))
1137                return (err);
1138
1139        ha = (qlnx_host_t *)arg1;
1140
1141	for (i = 0; i < ha->num_rss; i++) {
1142
1143		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1144
1145        	fp = &ha->fp_array[i];
1146
1147		if (fp->txq[0]->handle != NULL) {
1148			ret = ecore_set_queue_coalesce(p_hwfn, 0,
1149					(uint16_t)usecs, fp->txq[0]->handle);
1150		}
1151        }
1152
1153	if (!ret)
1154		ha->tx_coalesce_usecs = (uint8_t)usecs;
1155
1156        return (err);
1157}
1158
1159static int
1160qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1161{
1162        int			err, i, ret = 0, usecs = 0;
1163        qlnx_host_t		*ha;
1164	struct ecore_hwfn	*p_hwfn;
1165	struct qlnx_fastpath	*fp;
1166
1167        err = sysctl_handle_int(oidp, &usecs, 0, req);
1168
1169        if (err || !req->newptr || !usecs || (usecs > 255))
1170                return (err);
1171
1172        ha = (qlnx_host_t *)arg1;
1173
1174	for (i = 0; i < ha->num_rss; i++) {
1175
1176		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1177
1178        	fp = &ha->fp_array[i];
1179
1180		if (fp->rxq->handle != NULL) {
1181			ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1182					 0, fp->rxq->handle);
1183		}
1184	}
1185
1186	if (!ret)
1187		ha->rx_coalesce_usecs = (uint8_t)usecs;
1188
1189        return (err);
1190}
1191
1192static void
1193qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1194{
1195        struct sysctl_ctx_list	*ctx;
1196        struct sysctl_oid_list	*children;
1197	struct sysctl_oid	*ctx_oid;
1198
1199        ctx = device_get_sysctl_ctx(ha->pci_dev);
1200	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1201
1202	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1203			CTLFLAG_RD, NULL, "spstat");
1204        children = SYSCTL_CHILDREN(ctx_oid);
1205
1206	SYSCTL_ADD_QUAD(ctx, children,
1207                OID_AUTO, "sp_interrupts",
1208                CTLFLAG_RD, &ha->sp_interrupts,
1209                "No. of slowpath interrupts");
1210
1211	return;
1212}
1213
1214static void
1215qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1216{
1217        struct sysctl_ctx_list	*ctx;
1218        struct sysctl_oid_list	*children;
1219        struct sysctl_oid_list	*node_children;
1220	struct sysctl_oid	*ctx_oid;
1221	int			i, j;
1222	uint8_t			name_str[16];
1223
1224        ctx = device_get_sysctl_ctx(ha->pci_dev);
1225	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1226
1227	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1228			CTLFLAG_RD, NULL, "fpstat");
1229	children = SYSCTL_CHILDREN(ctx_oid);
1230
1231	for (i = 0; i < ha->num_rss; i++) {
1232
1233		bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1234		snprintf(name_str, sizeof(name_str), "%d", i);
1235
1236		ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1237			CTLFLAG_RD, NULL, name_str);
1238		node_children = SYSCTL_CHILDREN(ctx_oid);
1239
1240		/* Tx Related */
1241
1242		SYSCTL_ADD_QUAD(ctx, node_children,
1243			OID_AUTO, "tx_pkts_processed",
1244			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1245			"No. of packets processed for transmission");
1246
1247		SYSCTL_ADD_QUAD(ctx, node_children,
1248			OID_AUTO, "tx_pkts_freed",
1249			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1250			"No. of freed packets");
1251
1252		SYSCTL_ADD_QUAD(ctx, node_children,
1253			OID_AUTO, "tx_pkts_transmitted",
1254			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1255			"No. of transmitted packets");
1256
1257		SYSCTL_ADD_QUAD(ctx, node_children,
1258			OID_AUTO, "tx_pkts_completed",
1259			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1260			"No. of transmit completions");
1261
1262                SYSCTL_ADD_QUAD(ctx, node_children,
1263                        OID_AUTO, "tx_non_tso_pkts",
1264                        CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1265                        "No. of non LSO transmited packets");
1266
1267#ifdef QLNX_TRACE_PERF_DATA
1268
1269                SYSCTL_ADD_QUAD(ctx, node_children,
1270                        OID_AUTO, "tx_pkts_trans_ctx",
1271                        CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1272                        "No. of transmitted packets in transmit context");
1273
1274                SYSCTL_ADD_QUAD(ctx, node_children,
1275                        OID_AUTO, "tx_pkts_compl_ctx",
1276                        CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1277                        "No. of transmit completions in transmit context");
1278
1279                SYSCTL_ADD_QUAD(ctx, node_children,
1280                        OID_AUTO, "tx_pkts_trans_fp",
1281                        CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1282                        "No. of transmitted packets in taskqueue");
1283
1284                SYSCTL_ADD_QUAD(ctx, node_children,
1285                        OID_AUTO, "tx_pkts_compl_fp",
1286                        CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1287                        "No. of transmit completions in taskqueue");
1288
1289                SYSCTL_ADD_QUAD(ctx, node_children,
1290                        OID_AUTO, "tx_pkts_compl_intr",
1291                        CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1292                        "No. of transmit completions in interrupt ctx");
1293#endif
1294
1295                SYSCTL_ADD_QUAD(ctx, node_children,
1296                        OID_AUTO, "tx_tso_pkts",
1297                        CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1298                        "No. of LSO transmited packets");
1299
1300		SYSCTL_ADD_QUAD(ctx, node_children,
1301			OID_AUTO, "tx_lso_wnd_min_len",
1302			CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1303			"tx_lso_wnd_min_len");
1304
1305		SYSCTL_ADD_QUAD(ctx, node_children,
1306			OID_AUTO, "tx_defrag",
1307			CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1308			"tx_defrag");
1309
1310		SYSCTL_ADD_QUAD(ctx, node_children,
1311			OID_AUTO, "tx_nsegs_gt_elem_left",
1312			CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1313			"tx_nsegs_gt_elem_left");
1314
1315		SYSCTL_ADD_UINT(ctx, node_children,
1316			OID_AUTO, "tx_tso_max_nsegs",
1317			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1318			ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1319
1320		SYSCTL_ADD_UINT(ctx, node_children,
1321			OID_AUTO, "tx_tso_min_nsegs",
1322			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1323			ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1324
1325		SYSCTL_ADD_UINT(ctx, node_children,
1326			OID_AUTO, "tx_tso_max_pkt_len",
1327			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1328			ha->fp_array[i].tx_tso_max_pkt_len,
1329			"tx_tso_max_pkt_len");
1330
1331		SYSCTL_ADD_UINT(ctx, node_children,
1332			OID_AUTO, "tx_tso_min_pkt_len",
1333			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1334			ha->fp_array[i].tx_tso_min_pkt_len,
1335			"tx_tso_min_pkt_len");
1336
1337		for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1338
1339			bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1340			snprintf(name_str, sizeof(name_str),
1341				"tx_pkts_nseg_%02d", (j+1));
1342
1343			SYSCTL_ADD_QUAD(ctx, node_children,
1344				OID_AUTO, name_str, CTLFLAG_RD,
1345				&ha->fp_array[i].tx_pkts[j], name_str);
1346		}
1347
1348#ifdef QLNX_TRACE_PERF_DATA
1349                for (j = 0; j < 18; j++) {
1350
1351                        bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1352                        snprintf(name_str, sizeof(name_str),
1353                                "tx_pkts_hist_%02d", (j+1));
1354
1355                        SYSCTL_ADD_QUAD(ctx, node_children,
1356                                OID_AUTO, name_str, CTLFLAG_RD,
1357                                &ha->fp_array[i].tx_pkts_hist[j], name_str);
1358                }
1359                for (j = 0; j < 5; j++) {
1360
1361                        bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1362                        snprintf(name_str, sizeof(name_str),
1363                                "tx_comInt_%02d", (j+1));
1364
1365                        SYSCTL_ADD_QUAD(ctx, node_children,
1366                                OID_AUTO, name_str, CTLFLAG_RD,
1367                                &ha->fp_array[i].tx_comInt[j], name_str);
1368                }
1369                for (j = 0; j < 18; j++) {
1370
1371                        bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1372                        snprintf(name_str, sizeof(name_str),
1373                                "tx_pkts_q_%02d", (j+1));
1374
1375                        SYSCTL_ADD_QUAD(ctx, node_children,
1376                                OID_AUTO, name_str, CTLFLAG_RD,
1377                                &ha->fp_array[i].tx_pkts_q[j], name_str);
1378                }
1379#endif
1380
1381		SYSCTL_ADD_QUAD(ctx, node_children,
1382			OID_AUTO, "err_tx_nsegs_gt_elem_left",
1383			CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1384			"err_tx_nsegs_gt_elem_left");
1385
1386		SYSCTL_ADD_QUAD(ctx, node_children,
1387			OID_AUTO, "err_tx_dmamap_create",
1388			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1389			"err_tx_dmamap_create");
1390
1391		SYSCTL_ADD_QUAD(ctx, node_children,
1392			OID_AUTO, "err_tx_defrag_dmamap_load",
1393			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1394			"err_tx_defrag_dmamap_load");
1395
1396		SYSCTL_ADD_QUAD(ctx, node_children,
1397			OID_AUTO, "err_tx_non_tso_max_seg",
1398			CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1399			"err_tx_non_tso_max_seg");
1400
1401		SYSCTL_ADD_QUAD(ctx, node_children,
1402			OID_AUTO, "err_tx_dmamap_load",
1403			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1404			"err_tx_dmamap_load");
1405
1406		SYSCTL_ADD_QUAD(ctx, node_children,
1407			OID_AUTO, "err_tx_defrag",
1408			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1409			"err_tx_defrag");
1410
1411		SYSCTL_ADD_QUAD(ctx, node_children,
1412			OID_AUTO, "err_tx_free_pkt_null",
1413			CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1414			"err_tx_free_pkt_null");
1415
1416		SYSCTL_ADD_QUAD(ctx, node_children,
1417			OID_AUTO, "err_tx_cons_idx_conflict",
1418			CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1419			"err_tx_cons_idx_conflict");
1420
1421		SYSCTL_ADD_QUAD(ctx, node_children,
1422			OID_AUTO, "lro_cnt_64",
1423			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1424			"lro_cnt_64");
1425
1426		SYSCTL_ADD_QUAD(ctx, node_children,
1427			OID_AUTO, "lro_cnt_128",
1428			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1429			"lro_cnt_128");
1430
1431		SYSCTL_ADD_QUAD(ctx, node_children,
1432			OID_AUTO, "lro_cnt_256",
1433			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1434			"lro_cnt_256");
1435
1436		SYSCTL_ADD_QUAD(ctx, node_children,
1437			OID_AUTO, "lro_cnt_512",
1438			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1439			"lro_cnt_512");
1440
1441		SYSCTL_ADD_QUAD(ctx, node_children,
1442			OID_AUTO, "lro_cnt_1024",
1443			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1444			"lro_cnt_1024");
1445
1446		/* Rx Related */
1447
1448		SYSCTL_ADD_QUAD(ctx, node_children,
1449			OID_AUTO, "rx_pkts",
1450			CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1451			"No. of received packets");
1452
1453		SYSCTL_ADD_QUAD(ctx, node_children,
1454			OID_AUTO, "tpa_start",
1455			CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1456			"No. of tpa_start packets");
1457
1458		SYSCTL_ADD_QUAD(ctx, node_children,
1459			OID_AUTO, "tpa_cont",
1460			CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1461			"No. of tpa_cont packets");
1462
1463		SYSCTL_ADD_QUAD(ctx, node_children,
1464			OID_AUTO, "tpa_end",
1465			CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1466			"No. of tpa_end packets");
1467
1468		SYSCTL_ADD_QUAD(ctx, node_children,
1469			OID_AUTO, "err_m_getcl",
1470			CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1471			"err_m_getcl");
1472
1473		SYSCTL_ADD_QUAD(ctx, node_children,
1474			OID_AUTO, "err_m_getjcl",
1475			CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1476			"err_m_getjcl");
1477
1478		SYSCTL_ADD_QUAD(ctx, node_children,
1479			OID_AUTO, "err_rx_hw_errors",
1480			CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1481			"err_rx_hw_errors");
1482
1483		SYSCTL_ADD_QUAD(ctx, node_children,
1484			OID_AUTO, "err_rx_alloc_errors",
1485			CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1486			"err_rx_alloc_errors");
1487	}
1488
1489	return;
1490}
1491
1492static void
1493qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1494{
1495        struct sysctl_ctx_list	*ctx;
1496        struct sysctl_oid_list	*children;
1497	struct sysctl_oid	*ctx_oid;
1498
1499        ctx = device_get_sysctl_ctx(ha->pci_dev);
1500	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1501
1502	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1503			CTLFLAG_RD, NULL, "hwstat");
1504        children = SYSCTL_CHILDREN(ctx_oid);
1505
1506	SYSCTL_ADD_QUAD(ctx, children,
1507                OID_AUTO, "no_buff_discards",
1508                CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1509                "No. of packets discarded due to lack of buffer");
1510
1511	SYSCTL_ADD_QUAD(ctx, children,
1512                OID_AUTO, "packet_too_big_discard",
1513                CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1514                "No. of packets discarded because packet was too big");
1515
1516	SYSCTL_ADD_QUAD(ctx, children,
1517                OID_AUTO, "ttl0_discard",
1518                CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1519                "ttl0_discard");
1520
1521	SYSCTL_ADD_QUAD(ctx, children,
1522                OID_AUTO, "rx_ucast_bytes",
1523                CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1524                "rx_ucast_bytes");
1525
1526	SYSCTL_ADD_QUAD(ctx, children,
1527                OID_AUTO, "rx_mcast_bytes",
1528                CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1529                "rx_mcast_bytes");
1530
1531	SYSCTL_ADD_QUAD(ctx, children,
1532                OID_AUTO, "rx_bcast_bytes",
1533                CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1534                "rx_bcast_bytes");
1535
1536	SYSCTL_ADD_QUAD(ctx, children,
1537                OID_AUTO, "rx_ucast_pkts",
1538                CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1539                "rx_ucast_pkts");
1540
1541	SYSCTL_ADD_QUAD(ctx, children,
1542                OID_AUTO, "rx_mcast_pkts",
1543                CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1544                "rx_mcast_pkts");
1545
1546	SYSCTL_ADD_QUAD(ctx, children,
1547                OID_AUTO, "rx_bcast_pkts",
1548                CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1549                "rx_bcast_pkts");
1550
1551	SYSCTL_ADD_QUAD(ctx, children,
1552                OID_AUTO, "mftag_filter_discards",
1553                CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1554                "mftag_filter_discards");
1555
1556	SYSCTL_ADD_QUAD(ctx, children,
1557                OID_AUTO, "mac_filter_discards",
1558                CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1559                "mac_filter_discards");
1560
1561	SYSCTL_ADD_QUAD(ctx, children,
1562                OID_AUTO, "tx_ucast_bytes",
1563                CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1564                "tx_ucast_bytes");
1565
1566	SYSCTL_ADD_QUAD(ctx, children,
1567                OID_AUTO, "tx_mcast_bytes",
1568                CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1569                "tx_mcast_bytes");
1570
1571	SYSCTL_ADD_QUAD(ctx, children,
1572                OID_AUTO, "tx_bcast_bytes",
1573                CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1574                "tx_bcast_bytes");
1575
1576	SYSCTL_ADD_QUAD(ctx, children,
1577                OID_AUTO, "tx_ucast_pkts",
1578                CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1579                "tx_ucast_pkts");
1580
1581	SYSCTL_ADD_QUAD(ctx, children,
1582                OID_AUTO, "tx_mcast_pkts",
1583                CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1584                "tx_mcast_pkts");
1585
1586	SYSCTL_ADD_QUAD(ctx, children,
1587                OID_AUTO, "tx_bcast_pkts",
1588                CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1589                "tx_bcast_pkts");
1590
1591	SYSCTL_ADD_QUAD(ctx, children,
1592                OID_AUTO, "tx_err_drop_pkts",
1593                CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1594                "tx_err_drop_pkts");
1595
1596	SYSCTL_ADD_QUAD(ctx, children,
1597                OID_AUTO, "tpa_coalesced_pkts",
1598                CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1599                "tpa_coalesced_pkts");
1600
1601	SYSCTL_ADD_QUAD(ctx, children,
1602                OID_AUTO, "tpa_coalesced_events",
1603                CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1604                "tpa_coalesced_events");
1605
1606	SYSCTL_ADD_QUAD(ctx, children,
1607                OID_AUTO, "tpa_aborts_num",
1608                CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1609                "tpa_aborts_num");
1610
1611	SYSCTL_ADD_QUAD(ctx, children,
1612                OID_AUTO, "tpa_not_coalesced_pkts",
1613                CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1614                "tpa_not_coalesced_pkts");
1615
1616	SYSCTL_ADD_QUAD(ctx, children,
1617                OID_AUTO, "tpa_coalesced_bytes",
1618                CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1619                "tpa_coalesced_bytes");
1620
1621	SYSCTL_ADD_QUAD(ctx, children,
1622                OID_AUTO, "rx_64_byte_packets",
1623                CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1624                "rx_64_byte_packets");
1625
1626	SYSCTL_ADD_QUAD(ctx, children,
1627                OID_AUTO, "rx_65_to_127_byte_packets",
1628                CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1629                "rx_65_to_127_byte_packets");
1630
1631	SYSCTL_ADD_QUAD(ctx, children,
1632                OID_AUTO, "rx_128_to_255_byte_packets",
1633                CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1634                "rx_128_to_255_byte_packets");
1635
1636	SYSCTL_ADD_QUAD(ctx, children,
1637                OID_AUTO, "rx_256_to_511_byte_packets",
1638                CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1639                "rx_256_to_511_byte_packets");
1640
1641	SYSCTL_ADD_QUAD(ctx, children,
1642                OID_AUTO, "rx_512_to_1023_byte_packets",
1643                CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1644                "rx_512_to_1023_byte_packets");
1645
1646	SYSCTL_ADD_QUAD(ctx, children,
1647                OID_AUTO, "rx_1024_to_1518_byte_packets",
1648                CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1649                "rx_1024_to_1518_byte_packets");
1650
1651	SYSCTL_ADD_QUAD(ctx, children,
1652                OID_AUTO, "rx_1519_to_1522_byte_packets",
1653                CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1654                "rx_1519_to_1522_byte_packets");
1655
1656	SYSCTL_ADD_QUAD(ctx, children,
1657                OID_AUTO, "rx_1523_to_2047_byte_packets",
1658                CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1659                "rx_1523_to_2047_byte_packets");
1660
1661	SYSCTL_ADD_QUAD(ctx, children,
1662                OID_AUTO, "rx_2048_to_4095_byte_packets",
1663                CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1664                "rx_2048_to_4095_byte_packets");
1665
1666	SYSCTL_ADD_QUAD(ctx, children,
1667                OID_AUTO, "rx_4096_to_9216_byte_packets",
1668                CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1669                "rx_4096_to_9216_byte_packets");
1670
1671	SYSCTL_ADD_QUAD(ctx, children,
1672                OID_AUTO, "rx_9217_to_16383_byte_packets",
1673                CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1674                "rx_9217_to_16383_byte_packets");
1675
1676	SYSCTL_ADD_QUAD(ctx, children,
1677                OID_AUTO, "rx_crc_errors",
1678                CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1679                "rx_crc_errors");
1680
1681	SYSCTL_ADD_QUAD(ctx, children,
1682                OID_AUTO, "rx_mac_crtl_frames",
1683                CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1684                "rx_mac_crtl_frames");
1685
1686	SYSCTL_ADD_QUAD(ctx, children,
1687                OID_AUTO, "rx_pause_frames",
1688                CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1689                "rx_pause_frames");
1690
1691	SYSCTL_ADD_QUAD(ctx, children,
1692                OID_AUTO, "rx_pfc_frames",
1693                CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1694                "rx_pfc_frames");
1695
1696	SYSCTL_ADD_QUAD(ctx, children,
1697                OID_AUTO, "rx_align_errors",
1698                CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1699                "rx_align_errors");
1700
1701	SYSCTL_ADD_QUAD(ctx, children,
1702                OID_AUTO, "rx_carrier_errors",
1703                CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1704                "rx_carrier_errors");
1705
1706	SYSCTL_ADD_QUAD(ctx, children,
1707                OID_AUTO, "rx_oversize_packets",
1708                CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1709                "rx_oversize_packets");
1710
1711	SYSCTL_ADD_QUAD(ctx, children,
1712                OID_AUTO, "rx_jabbers",
1713                CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1714                "rx_jabbers");
1715
1716	SYSCTL_ADD_QUAD(ctx, children,
1717                OID_AUTO, "rx_undersize_packets",
1718                CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
1719                "rx_undersize_packets");
1720
1721	SYSCTL_ADD_QUAD(ctx, children,
1722                OID_AUTO, "rx_fragments",
1723                CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
1724                "rx_fragments");
1725
1726	SYSCTL_ADD_QUAD(ctx, children,
1727                OID_AUTO, "tx_64_byte_packets",
1728                CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
1729                "tx_64_byte_packets");
1730
1731	SYSCTL_ADD_QUAD(ctx, children,
1732                OID_AUTO, "tx_65_to_127_byte_packets",
1733                CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
1734                "tx_65_to_127_byte_packets");
1735
1736	SYSCTL_ADD_QUAD(ctx, children,
1737                OID_AUTO, "tx_128_to_255_byte_packets",
1738                CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
1739                "tx_128_to_255_byte_packets");
1740
1741	SYSCTL_ADD_QUAD(ctx, children,
1742                OID_AUTO, "tx_256_to_511_byte_packets",
1743                CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
1744                "tx_256_to_511_byte_packets");
1745
1746	SYSCTL_ADD_QUAD(ctx, children,
1747                OID_AUTO, "tx_512_to_1023_byte_packets",
1748                CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
1749                "tx_512_to_1023_byte_packets");
1750
1751	SYSCTL_ADD_QUAD(ctx, children,
1752                OID_AUTO, "tx_1024_to_1518_byte_packets",
1753                CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
1754                "tx_1024_to_1518_byte_packets");
1755
1756	SYSCTL_ADD_QUAD(ctx, children,
1757                OID_AUTO, "tx_1519_to_2047_byte_packets",
1758                CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
1759                "tx_1519_to_2047_byte_packets");
1760
1761	SYSCTL_ADD_QUAD(ctx, children,
1762                OID_AUTO, "tx_2048_to_4095_byte_packets",
1763                CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
1764                "tx_2048_to_4095_byte_packets");
1765
1766	SYSCTL_ADD_QUAD(ctx, children,
1767                OID_AUTO, "tx_4096_to_9216_byte_packets",
1768                CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
1769                "tx_4096_to_9216_byte_packets");
1770
1771	SYSCTL_ADD_QUAD(ctx, children,
1772                OID_AUTO, "tx_9217_to_16383_byte_packets",
1773                CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
1774                "tx_9217_to_16383_byte_packets");
1775
1776	SYSCTL_ADD_QUAD(ctx, children,
1777                OID_AUTO, "tx_pause_frames",
1778                CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
1779                "tx_pause_frames");
1780
1781	SYSCTL_ADD_QUAD(ctx, children,
1782                OID_AUTO, "tx_pfc_frames",
1783                CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
1784                "tx_pfc_frames");
1785
1786	SYSCTL_ADD_QUAD(ctx, children,
1787                OID_AUTO, "tx_lpi_entry_count",
1788                CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
1789                "tx_lpi_entry_count");
1790
1791	SYSCTL_ADD_QUAD(ctx, children,
1792                OID_AUTO, "tx_total_collisions",
1793                CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
1794                "tx_total_collisions");
1795
1796	SYSCTL_ADD_QUAD(ctx, children,
1797                OID_AUTO, "brb_truncates",
1798                CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
1799                "brb_truncates");
1800
1801	SYSCTL_ADD_QUAD(ctx, children,
1802                OID_AUTO, "brb_discards",
1803                CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
1804                "brb_discards");
1805
1806	SYSCTL_ADD_QUAD(ctx, children,
1807                OID_AUTO, "rx_mac_bytes",
1808                CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
1809                "rx_mac_bytes");
1810
1811	SYSCTL_ADD_QUAD(ctx, children,
1812                OID_AUTO, "rx_mac_uc_packets",
1813                CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
1814                "rx_mac_uc_packets");
1815
1816	SYSCTL_ADD_QUAD(ctx, children,
1817                OID_AUTO, "rx_mac_mc_packets",
1818                CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
1819                "rx_mac_mc_packets");
1820
1821	SYSCTL_ADD_QUAD(ctx, children,
1822                OID_AUTO, "rx_mac_bc_packets",
1823                CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
1824                "rx_mac_bc_packets");
1825
1826	SYSCTL_ADD_QUAD(ctx, children,
1827                OID_AUTO, "rx_mac_frames_ok",
1828                CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
1829                "rx_mac_frames_ok");
1830
1831	SYSCTL_ADD_QUAD(ctx, children,
1832                OID_AUTO, "tx_mac_bytes",
1833                CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
1834                "tx_mac_bytes");
1835
1836	SYSCTL_ADD_QUAD(ctx, children,
1837                OID_AUTO, "tx_mac_uc_packets",
1838                CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
1839                "tx_mac_uc_packets");
1840
1841	SYSCTL_ADD_QUAD(ctx, children,
1842                OID_AUTO, "tx_mac_mc_packets",
1843                CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
1844                "tx_mac_mc_packets");
1845
1846	SYSCTL_ADD_QUAD(ctx, children,
1847                OID_AUTO, "tx_mac_bc_packets",
1848                CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
1849                "tx_mac_bc_packets");
1850
1851	SYSCTL_ADD_QUAD(ctx, children,
1852                OID_AUTO, "tx_mac_ctrl_frames",
1853                CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
1854                "tx_mac_ctrl_frames");
1855	return;
1856}
1857
1858static void
1859qlnx_add_sysctls(qlnx_host_t *ha)
1860{
1861        device_t		dev = ha->pci_dev;
1862	struct sysctl_ctx_list	*ctx;
1863	struct sysctl_oid_list	*children;
1864
1865	ctx = device_get_sysctl_ctx(dev);
1866	children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1867
1868	qlnx_add_fp_stats_sysctls(ha);
1869	qlnx_add_sp_stats_sysctls(ha);
1870	qlnx_add_hw_stats_sysctls(ha);
1871
1872	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
1873		CTLFLAG_RD, qlnx_ver_str, 0,
1874		"Driver Version");
1875
1876	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
1877		CTLFLAG_RD, ha->stormfw_ver, 0,
1878		"STORM Firmware Version");
1879
1880	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
1881		CTLFLAG_RD, ha->mfw_ver, 0,
1882		"Management Firmware Version");
1883
1884        SYSCTL_ADD_UINT(ctx, children,
1885                OID_AUTO, "personality", CTLFLAG_RD,
1886                &ha->personality, ha->personality,
1887		"\tpersonality = 0 => Ethernet Only\n"
1888		"\tpersonality = 3 => Ethernet and RoCE\n"
1889		"\tpersonality = 4 => Ethernet and iWARP\n"
1890		"\tpersonality = 6 => Default in Shared Memory\n");
1891
1892        ha->dbg_level = 0;
1893        SYSCTL_ADD_UINT(ctx, children,
1894                OID_AUTO, "debug", CTLFLAG_RW,
1895                &ha->dbg_level, ha->dbg_level, "Debug Level");
1896
1897        ha->dp_level = 0x01;
1898        SYSCTL_ADD_UINT(ctx, children,
1899                OID_AUTO, "dp_level", CTLFLAG_RW,
1900                &ha->dp_level, ha->dp_level, "DP Level");
1901
1902        ha->dbg_trace_lro_cnt = 0;
1903        SYSCTL_ADD_UINT(ctx, children,
1904                OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
1905                &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
1906		"Trace LRO Counts");
1907
1908        ha->dbg_trace_tso_pkt_len = 0;
1909        SYSCTL_ADD_UINT(ctx, children,
1910                OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
1911                &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
1912		"Trace TSO packet lengths");
1913
1914        ha->dp_module = 0;
1915        SYSCTL_ADD_UINT(ctx, children,
1916                OID_AUTO, "dp_module", CTLFLAG_RW,
1917                &ha->dp_module, ha->dp_module, "DP Module");
1918
1919        ha->err_inject = 0;
1920
1921        SYSCTL_ADD_UINT(ctx, children,
1922                OID_AUTO, "err_inject", CTLFLAG_RW,
1923                &ha->err_inject, ha->err_inject, "Error Inject");
1924
1925        ha->storm_stats_enable = 0;
1926
1927        SYSCTL_ADD_UINT(ctx, children,
1928                OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
1929                &ha->storm_stats_enable, ha->storm_stats_enable,
1930		"Enable Storm Statistics Gathering");
1931
1932        ha->storm_stats_index = 0;
1933
1934        SYSCTL_ADD_UINT(ctx, children,
1935                OID_AUTO, "storm_stats_index", CTLFLAG_RD,
1936                &ha->storm_stats_index, ha->storm_stats_index,
1937		"Enable Storm Statistics Gathering Current Index");
1938
1939        ha->grcdump_taken = 0;
1940        SYSCTL_ADD_UINT(ctx, children,
1941                OID_AUTO, "grcdump_taken", CTLFLAG_RD,
1942                &ha->grcdump_taken, ha->grcdump_taken, "grcdump_taken");
1943
1944        ha->idle_chk_taken = 0;
1945        SYSCTL_ADD_UINT(ctx, children,
1946                OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
1947                &ha->idle_chk_taken, ha->idle_chk_taken, "idle_chk_taken");
1948
1949        SYSCTL_ADD_UINT(ctx, children,
1950                OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
1951                &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
1952		"rx_coalesce_usecs");
1953
1954        SYSCTL_ADD_UINT(ctx, children,
1955                OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
1956                &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
1957		"tx_coalesce_usecs");
1958
1959	ha->rx_pkt_threshold = 128;
1960        SYSCTL_ADD_UINT(ctx, children,
1961                OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
1962                &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
1963		"No. of Rx Pkts to process at a time");
1964
1965	ha->rx_jumbo_buf_eq_mtu = 0;
1966        SYSCTL_ADD_UINT(ctx, children,
1967                OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
1968                &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
1969		"== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
1970		"otherwise Rx Jumbo buffers are set to >= MTU size\n");
1971
1972	SYSCTL_ADD_PROC(ctx, children,
1973		OID_AUTO, "trigger_dump", CTLTYPE_INT | CTLFLAG_RW,
1974		(void *)ha, 0,
1975		qlnx_trigger_dump_sysctl, "I", "trigger_dump");
1976
1977	SYSCTL_ADD_PROC(ctx, children,
1978		OID_AUTO, "set_rx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1979		(void *)ha, 0,
1980		qlnx_set_rx_coalesce, "I",
1981		"rx interrupt coalesce period microseconds");
1982
1983	SYSCTL_ADD_PROC(ctx, children,
1984		OID_AUTO, "set_tx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1985		(void *)ha, 0,
1986		qlnx_set_tx_coalesce, "I",
1987		"tx interrupt coalesce period microseconds");
1988
1989	SYSCTL_ADD_QUAD(ctx, children,
1990                OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
1991		&ha->err_illegal_intr, "err_illegal_intr");
1992
1993	SYSCTL_ADD_QUAD(ctx, children,
1994                OID_AUTO, "err_fp_null", CTLFLAG_RD,
1995		&ha->err_fp_null, "err_fp_null");
1996
1997	SYSCTL_ADD_QUAD(ctx, children,
1998                OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
1999		&ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2000	return;
2001}
2002
2003
2004
2005/*****************************************************************************
2006 * Operating System Network Interface Functions
2007 *****************************************************************************/
2008
2009static void
2010qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2011{
2012	uint16_t	device_id;
2013        struct ifnet	*ifp;
2014
2015        ifp = ha->ifp = if_alloc(IFT_ETHER);
2016
2017        if (ifp == NULL)
2018                panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
2019
2020        if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2021
2022	device_id = pci_get_device(ha->pci_dev);
2023
2024#if __FreeBSD_version >= 1000000
2025
2026        if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2027		ifp->if_baudrate = IF_Gbps(40);
2028        else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2029			(device_id == QLOGIC_PCI_DEVICE_ID_8070))
2030		ifp->if_baudrate = IF_Gbps(25);
2031        else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2032		ifp->if_baudrate = IF_Gbps(50);
2033        else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2034		ifp->if_baudrate = IF_Gbps(100);
2035
2036        ifp->if_capabilities = IFCAP_LINKSTATE;
2037#else
2038        ifp->if_mtu = ETHERMTU;
2039	ifp->if_baudrate = (1 * 1000 * 1000 *1000);
2040
2041#endif /* #if __FreeBSD_version >= 1000000 */
2042
2043        ifp->if_init = qlnx_init;
2044        ifp->if_softc = ha;
2045        ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2046        ifp->if_ioctl = qlnx_ioctl;
2047        ifp->if_transmit = qlnx_transmit;
2048        ifp->if_qflush = qlnx_qflush;
2049
2050        IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha));
2051        ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha);
2052        IFQ_SET_READY(&ifp->if_snd);
2053
2054#if __FreeBSD_version >= 1100036
2055	if_setgetcounterfn(ifp, qlnx_get_counter);
2056#endif
2057
2058        ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2059
2060        memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2061        ether_ifattach(ifp, ha->primary_mac);
2062	bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2063
2064	ifp->if_capabilities = IFCAP_HWCSUM;
2065	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2066
2067	ifp->if_capabilities |= IFCAP_VLAN_MTU;
2068	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2069	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2070	ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2071	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2072	ifp->if_capabilities |= IFCAP_TSO4;
2073	ifp->if_capabilities |= IFCAP_TSO6;
2074	ifp->if_capabilities |= IFCAP_LRO;
2075
2076	ifp->if_hw_tsomax =  QLNX_MAX_TSO_FRAME_SIZE -
2077				(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2078	ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */;
2079	ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE;
2080
2081
2082        ifp->if_capenable = ifp->if_capabilities;
2083
2084	ifp->if_hwassist = CSUM_IP;
2085	ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
2086	ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
2087	ifp->if_hwassist |= CSUM_TSO;
2088
2089	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2090
2091        ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2092		qlnx_media_status);
2093
2094        if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2095		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2096		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2097		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2098        } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2099			(device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2100		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2101		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2102        } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2103		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2104		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2105        } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2106		ifmedia_add(&ha->media,
2107			(IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2108		ifmedia_add(&ha->media,
2109			(IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2110		ifmedia_add(&ha->media,
2111			(IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2112	}
2113
2114        ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2115        ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2116
2117
2118        ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2119
2120        QL_DPRINT2(ha, "exit\n");
2121
2122        return;
2123}
2124
2125static void
2126qlnx_init_locked(qlnx_host_t *ha)
2127{
2128	struct ifnet	*ifp = ha->ifp;
2129
2130	QL_DPRINT1(ha, "Driver Initialization start \n");
2131
2132	qlnx_stop(ha);
2133
2134	if (qlnx_load(ha) == 0) {
2135		ifp->if_drv_flags |= IFF_DRV_RUNNING;
2136		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2137	}
2138
2139	return;
2140}
2141
2142static void
2143qlnx_init(void *arg)
2144{
2145	qlnx_host_t	*ha;
2146
2147	ha = (qlnx_host_t *)arg;
2148
2149	QL_DPRINT2(ha, "enter\n");
2150
2151	QLNX_LOCK(ha);
2152	qlnx_init_locked(ha);
2153	QLNX_UNLOCK(ha);
2154
2155	QL_DPRINT2(ha, "exit\n");
2156
2157	return;
2158}
2159
2160static int
2161qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2162{
2163	struct ecore_filter_mcast	*mcast;
2164	struct ecore_dev		*cdev;
2165	int				rc;
2166
2167	cdev = &ha->cdev;
2168
2169	mcast = &ha->ecore_mcast;
2170	bzero(mcast, sizeof(struct ecore_filter_mcast));
2171
2172	if (add_mac)
2173		mcast->opcode = ECORE_FILTER_ADD;
2174	else
2175		mcast->opcode = ECORE_FILTER_REMOVE;
2176
2177	mcast->num_mc_addrs = 1;
2178	memcpy(mcast->mac, mac_addr, ETH_ALEN);
2179
2180	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2181
2182	return (rc);
2183}
2184
2185static int
2186qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2187{
2188        int	i;
2189
2190        for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2191
2192                if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2193                        return 0; /* its been already added */
2194        }
2195
2196        for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2197
2198                if ((ha->mcast[i].addr[0] == 0) &&
2199                        (ha->mcast[i].addr[1] == 0) &&
2200                        (ha->mcast[i].addr[2] == 0) &&
2201                        (ha->mcast[i].addr[3] == 0) &&
2202                        (ha->mcast[i].addr[4] == 0) &&
2203                        (ha->mcast[i].addr[5] == 0)) {
2204
2205                        if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2206                                return (-1);
2207
2208                        bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2209                        ha->nmcast++;
2210
2211                        return 0;
2212                }
2213        }
2214        return 0;
2215}
2216
2217static int
2218qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2219{
2220        int	i;
2221
2222        for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2223                if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2224
2225                        if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2226                                return (-1);
2227
2228                        ha->mcast[i].addr[0] = 0;
2229                        ha->mcast[i].addr[1] = 0;
2230                        ha->mcast[i].addr[2] = 0;
2231                        ha->mcast[i].addr[3] = 0;
2232                        ha->mcast[i].addr[4] = 0;
2233                        ha->mcast[i].addr[5] = 0;
2234
2235                        ha->nmcast--;
2236
2237                        return 0;
2238                }
2239        }
2240        return 0;
2241}
2242
2243/*
2244 * Name: qls_hw_set_multi
2245 * Function: Sets the Multicast Addresses provided the host O.S into the
2246 *      hardware (for the given interface)
2247 */
2248static void
2249qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2250	uint32_t add_mac)
2251{
2252        int	i;
2253
2254        for (i = 0; i < mcnt; i++) {
2255                if (add_mac) {
2256                        if (qlnx_hw_add_mcast(ha, mta))
2257                                break;
2258                } else {
2259                        if (qlnx_hw_del_mcast(ha, mta))
2260                                break;
2261                }
2262
2263                mta += ETHER_HDR_LEN;
2264        }
2265        return;
2266}
2267
2268
2269#define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN)
2270static int
2271qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2272{
2273	uint8_t			mta[QLNX_MCAST_ADDRS_SIZE];
2274	struct ifmultiaddr	*ifma;
2275	int			mcnt = 0;
2276	struct ifnet		*ifp = ha->ifp;
2277	int			ret = 0;
2278
2279	if_maddr_rlock(ifp);
2280
2281	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2282
2283		if (ifma->ifma_addr->sa_family != AF_LINK)
2284			continue;
2285
2286		if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2287			break;
2288
2289		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2290			&mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2291
2292		mcnt++;
2293	}
2294
2295	if_maddr_runlock(ifp);
2296
2297	QLNX_LOCK(ha);
2298	qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2299	QLNX_UNLOCK(ha);
2300
2301	return (ret);
2302}
2303
2304static int
2305qlnx_set_promisc(qlnx_host_t *ha)
2306{
2307	int	rc = 0;
2308	uint8_t	filter;
2309
2310	filter = ha->filter;
2311	filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2312	filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2313
2314	rc = qlnx_set_rx_accept_filter(ha, filter);
2315	return (rc);
2316}
2317
2318static int
2319qlnx_set_allmulti(qlnx_host_t *ha)
2320{
2321	int	rc = 0;
2322	uint8_t	filter;
2323
2324	filter = ha->filter;
2325	filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2326	rc = qlnx_set_rx_accept_filter(ha, filter);
2327
2328	return (rc);
2329}
2330
2331
2332static int
2333qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2334{
2335	int		ret = 0, mask;
2336	struct ifreq	*ifr = (struct ifreq *)data;
2337	struct ifaddr	*ifa = (struct ifaddr *)data;
2338	qlnx_host_t	*ha;
2339
2340	ha = (qlnx_host_t *)ifp->if_softc;
2341
2342	switch (cmd) {
2343	case SIOCSIFADDR:
2344		QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2345
2346		if (ifa->ifa_addr->sa_family == AF_INET) {
2347			ifp->if_flags |= IFF_UP;
2348			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2349				QLNX_LOCK(ha);
2350				qlnx_init_locked(ha);
2351				QLNX_UNLOCK(ha);
2352			}
2353			QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2354				   cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2355
2356			arp_ifinit(ifp, ifa);
2357		} else {
2358			ether_ioctl(ifp, cmd, data);
2359		}
2360		break;
2361
2362	case SIOCSIFMTU:
2363		QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2364
2365		if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2366			ret = EINVAL;
2367		} else {
2368			QLNX_LOCK(ha);
2369			ifp->if_mtu = ifr->ifr_mtu;
2370			ha->max_frame_size =
2371				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2372			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2373				qlnx_init_locked(ha);
2374			}
2375
2376			QLNX_UNLOCK(ha);
2377		}
2378
2379		break;
2380
2381	case SIOCSIFFLAGS:
2382		QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2383
2384		QLNX_LOCK(ha);
2385
2386		if (ifp->if_flags & IFF_UP) {
2387			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2388				if ((ifp->if_flags ^ ha->if_flags) &
2389					IFF_PROMISC) {
2390					ret = qlnx_set_promisc(ha);
2391				} else if ((ifp->if_flags ^ ha->if_flags) &
2392					IFF_ALLMULTI) {
2393					ret = qlnx_set_allmulti(ha);
2394				}
2395			} else {
2396				ha->max_frame_size = ifp->if_mtu +
2397					ETHER_HDR_LEN + ETHER_CRC_LEN;
2398				qlnx_init_locked(ha);
2399			}
2400		} else {
2401			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2402				qlnx_stop(ha);
2403			ha->if_flags = ifp->if_flags;
2404		}
2405
2406		QLNX_UNLOCK(ha);
2407		break;
2408
2409	case SIOCADDMULTI:
2410		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2411
2412		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2413			if (qlnx_set_multi(ha, 1))
2414				ret = EINVAL;
2415		}
2416		break;
2417
2418	case SIOCDELMULTI:
2419		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2420
2421		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2422			if (qlnx_set_multi(ha, 0))
2423				ret = EINVAL;
2424		}
2425		break;
2426
2427	case SIOCSIFMEDIA:
2428	case SIOCGIFMEDIA:
2429		QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2430
2431		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2432		break;
2433
2434	case SIOCSIFCAP:
2435
2436		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2437
2438		QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2439
2440		if (mask & IFCAP_HWCSUM)
2441			ifp->if_capenable ^= IFCAP_HWCSUM;
2442		if (mask & IFCAP_TSO4)
2443			ifp->if_capenable ^= IFCAP_TSO4;
2444		if (mask & IFCAP_TSO6)
2445			ifp->if_capenable ^= IFCAP_TSO6;
2446		if (mask & IFCAP_VLAN_HWTAGGING)
2447			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2448		if (mask & IFCAP_VLAN_HWTSO)
2449			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2450		if (mask & IFCAP_LRO)
2451			ifp->if_capenable ^= IFCAP_LRO;
2452
2453		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2454			qlnx_init(ha);
2455
2456		VLAN_CAPABILITIES(ifp);
2457		break;
2458
2459#if (__FreeBSD_version >= 1100101)
2460
2461	case SIOCGI2C:
2462	{
2463		struct ifi2creq i2c;
2464		struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2465		struct ecore_ptt *p_ptt;
2466
2467		ret = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
2468
2469		if (ret)
2470			break;
2471
2472		if ((i2c.len > sizeof (i2c.data)) ||
2473			(i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2474			ret = EINVAL;
2475			break;
2476		}
2477
2478		p_ptt = ecore_ptt_acquire(p_hwfn);
2479
2480		if (!p_ptt) {
2481			QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2482			ret = -1;
2483			break;
2484		}
2485
2486		ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2487			(ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2488			i2c.len, &i2c.data[0]);
2489
2490		ecore_ptt_release(p_hwfn, p_ptt);
2491
2492		if (ret) {
2493			ret = -1;
2494			break;
2495		}
2496
2497		ret = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
2498
2499		QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2500			 len = %d addr = 0x%02x offset = 0x%04x \
2501			 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2502			 0x%02x 0x%02x 0x%02x\n",
2503			ret, i2c.len, i2c.dev_addr, i2c.offset,
2504			i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2505			i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2506		break;
2507	}
2508#endif /* #if (__FreeBSD_version >= 1100101) */
2509
2510	default:
2511		QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2512		ret = ether_ioctl(ifp, cmd, data);
2513		break;
2514	}
2515
2516	return (ret);
2517}
2518
2519static int
2520qlnx_media_change(struct ifnet *ifp)
2521{
2522	qlnx_host_t	*ha;
2523	struct ifmedia	*ifm;
2524	int		ret = 0;
2525
2526	ha = (qlnx_host_t *)ifp->if_softc;
2527
2528	QL_DPRINT2(ha, "enter\n");
2529
2530	ifm = &ha->media;
2531
2532	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2533		ret = EINVAL;
2534
2535	QL_DPRINT2(ha, "exit\n");
2536
2537	return (ret);
2538}
2539
2540static void
2541qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2542{
2543	qlnx_host_t		*ha;
2544
2545	ha = (qlnx_host_t *)ifp->if_softc;
2546
2547	QL_DPRINT2(ha, "enter\n");
2548
2549	ifmr->ifm_status = IFM_AVALID;
2550	ifmr->ifm_active = IFM_ETHER;
2551
2552	if (ha->link_up) {
2553		ifmr->ifm_status |= IFM_ACTIVE;
2554		ifmr->ifm_active |=
2555			(IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2556
2557		if (ha->if_link.link_partner_caps &
2558			(QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2559			ifmr->ifm_active |=
2560				(IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2561	}
2562
2563	QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2564
2565	return;
2566}
2567
2568
2569static void
2570qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2571	struct qlnx_tx_queue *txq)
2572{
2573	u16			idx;
2574	struct mbuf		*mp;
2575	bus_dmamap_t		map;
2576	int			i;
2577	struct eth_tx_bd	*tx_data_bd;
2578	struct eth_tx_1st_bd	*first_bd;
2579	int			nbds = 0;
2580
2581	idx = txq->sw_tx_cons;
2582	mp = txq->sw_tx_ring[idx].mp;
2583	map = txq->sw_tx_ring[idx].map;
2584
2585	if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2586
2587		QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2588
2589		QL_DPRINT1(ha, "(mp == NULL) "
2590			" tx_idx = 0x%x"
2591			" ecore_prod_idx = 0x%x"
2592			" ecore_cons_idx = 0x%x"
2593			" hw_bd_cons = 0x%x"
2594			" txq_db_last = 0x%x"
2595			" elem_left = 0x%x\n",
2596			fp->rss_id,
2597			ecore_chain_get_prod_idx(&txq->tx_pbl),
2598			ecore_chain_get_cons_idx(&txq->tx_pbl),
2599			le16toh(*txq->hw_cons_ptr),
2600			txq->tx_db.raw,
2601			ecore_chain_get_elem_left(&txq->tx_pbl));
2602
2603		fp->err_tx_free_pkt_null++;
2604
2605		//DEBUG
2606		qlnx_trigger_dump(ha);
2607
2608		return;
2609	} else {
2610
2611		QLNX_INC_OPACKETS((ha->ifp));
2612		QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2613
2614		bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2615		bus_dmamap_unload(ha->tx_tag, map);
2616
2617		fp->tx_pkts_freed++;
2618		fp->tx_pkts_completed++;
2619
2620		m_freem(mp);
2621	}
2622
2623	first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2624	nbds = first_bd->data.nbds;
2625
2626//	BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2627
2628	for (i = 1; i < nbds; i++) {
2629		tx_data_bd = ecore_chain_consume(&txq->tx_pbl);
2630//		BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2631	}
2632	txq->sw_tx_ring[idx].flags = 0;
2633	txq->sw_tx_ring[idx].mp = NULL;
2634	txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2635
2636	return;
2637}
2638
2639static void
2640qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2641	struct qlnx_tx_queue *txq)
2642{
2643	u16 hw_bd_cons;
2644	u16 ecore_cons_idx;
2645	uint16_t diff;
2646	uint16_t idx, idx2;
2647
2648	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2649
2650	while (hw_bd_cons !=
2651		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2652
2653		if (hw_bd_cons < ecore_cons_idx) {
2654			diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
2655		} else {
2656			diff = hw_bd_cons - ecore_cons_idx;
2657		}
2658		if ((diff > TX_RING_SIZE) ||
2659			QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2660
2661			QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2662
2663			QL_DPRINT1(ha, "(diff = 0x%x) "
2664				" tx_idx = 0x%x"
2665				" ecore_prod_idx = 0x%x"
2666				" ecore_cons_idx = 0x%x"
2667				" hw_bd_cons = 0x%x"
2668				" txq_db_last = 0x%x"
2669				" elem_left = 0x%x\n",
2670				diff,
2671				fp->rss_id,
2672				ecore_chain_get_prod_idx(&txq->tx_pbl),
2673				ecore_chain_get_cons_idx(&txq->tx_pbl),
2674				le16toh(*txq->hw_cons_ptr),
2675				txq->tx_db.raw,
2676				ecore_chain_get_elem_left(&txq->tx_pbl));
2677
2678			fp->err_tx_cons_idx_conflict++;
2679
2680			//DEBUG
2681			qlnx_trigger_dump(ha);
2682		}
2683
2684		idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2685		idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
2686		prefetch(txq->sw_tx_ring[idx].mp);
2687		prefetch(txq->sw_tx_ring[idx2].mp);
2688
2689		qlnx_free_tx_pkt(ha, fp, txq);
2690
2691		txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2692	}
2693	return;
2694}
2695
2696static int
2697qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath  *fp, struct mbuf  *mp)
2698{
2699        int                     ret = 0;
2700        struct qlnx_tx_queue    *txq;
2701        qlnx_host_t *           ha;
2702        uint16_t elem_left;
2703
2704        txq = fp->txq[0];
2705        ha = (qlnx_host_t *)fp->edev;
2706
2707
2708        if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) {
2709                if(mp != NULL)
2710                        ret = drbr_enqueue(ifp, fp->tx_br, mp);
2711                return (ret);
2712        }
2713
2714        if(mp != NULL)
2715                ret  = drbr_enqueue(ifp, fp->tx_br, mp);
2716
2717        mp = drbr_peek(ifp, fp->tx_br);
2718
2719        while (mp != NULL) {
2720
2721                if (qlnx_send(ha, fp, &mp)) {
2722
2723                        if (mp != NULL) {
2724                                drbr_putback(ifp, fp->tx_br, mp);
2725                        } else {
2726                                fp->tx_pkts_processed++;
2727                                drbr_advance(ifp, fp->tx_br);
2728                        }
2729                        goto qlnx_transmit_locked_exit;
2730
2731                } else {
2732                        drbr_advance(ifp, fp->tx_br);
2733                        fp->tx_pkts_transmitted++;
2734                        fp->tx_pkts_processed++;
2735                }
2736
2737                mp = drbr_peek(ifp, fp->tx_br);
2738        }
2739
2740qlnx_transmit_locked_exit:
2741        if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
2742                ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
2743                                        < QLNX_TX_ELEM_MAX_THRESH))
2744                (void)qlnx_tx_int(ha, fp, fp->txq[0]);
2745
2746        QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
2747        return ret;
2748}
2749
2750
2751static int
2752qlnx_transmit(struct ifnet *ifp, struct mbuf  *mp)
2753{
2754        qlnx_host_t		*ha = (qlnx_host_t *)ifp->if_softc;
2755        struct qlnx_fastpath	*fp;
2756        int			rss_id = 0, ret = 0;
2757
2758#ifdef QLNX_TRACEPERF_DATA
2759        uint64_t tx_pkts = 0, tx_compl = 0;
2760#endif
2761
2762        QL_DPRINT2(ha, "enter\n");
2763
2764#if __FreeBSD_version >= 1100000
2765        if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
2766#else
2767        if (mp->m_flags & M_FLOWID)
2768#endif
2769                rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
2770					ha->num_rss;
2771
2772        fp = &ha->fp_array[rss_id];
2773
2774        if (fp->tx_br == NULL) {
2775                ret = EINVAL;
2776                goto qlnx_transmit_exit;
2777        }
2778
2779        if (mtx_trylock(&fp->tx_mtx)) {
2780
2781#ifdef QLNX_TRACEPERF_DATA
2782                        tx_pkts = fp->tx_pkts_transmitted;
2783                        tx_compl = fp->tx_pkts_completed;
2784#endif
2785
2786                        ret = qlnx_transmit_locked(ifp, fp, mp);
2787
2788#ifdef QLNX_TRACEPERF_DATA
2789                        fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
2790                        fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
2791#endif
2792                        mtx_unlock(&fp->tx_mtx);
2793        } else {
2794                if (mp != NULL && (fp->fp_taskqueue != NULL)) {
2795                        ret = drbr_enqueue(ifp, fp->tx_br, mp);
2796                        taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
2797                }
2798        }
2799
2800qlnx_transmit_exit:
2801
2802        QL_DPRINT2(ha, "exit ret = %d\n", ret);
2803        return ret;
2804}
2805
2806static void
2807qlnx_qflush(struct ifnet *ifp)
2808{
2809	int			rss_id;
2810	struct qlnx_fastpath	*fp;
2811	struct mbuf		*mp;
2812	qlnx_host_t		*ha;
2813
2814	ha = (qlnx_host_t *)ifp->if_softc;
2815
2816	QL_DPRINT2(ha, "enter\n");
2817
2818	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
2819
2820		fp = &ha->fp_array[rss_id];
2821
2822		if (fp == NULL)
2823			continue;
2824
2825		if (fp->tx_br) {
2826			mtx_lock(&fp->tx_mtx);
2827
2828			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
2829				fp->tx_pkts_freed++;
2830				m_freem(mp);
2831			}
2832			mtx_unlock(&fp->tx_mtx);
2833		}
2834	}
2835	QL_DPRINT2(ha, "exit\n");
2836
2837	return;
2838}
2839
2840static void
2841qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
2842{
2843	struct ecore_dev	*cdev;
2844	uint32_t		offset;
2845
2846	cdev = &ha->cdev;
2847
2848	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)cdev->doorbells);
2849
2850	bus_write_4(ha->pci_dbells, offset, value);
2851	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_READ);
2852	bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
2853
2854	return;
2855}
2856
2857static uint32_t
2858qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
2859{
2860        struct ether_vlan_header	*eh = NULL;
2861        struct ip			*ip = NULL;
2862        struct ip6_hdr			*ip6 = NULL;
2863        struct tcphdr			*th = NULL;
2864        uint32_t			ehdrlen = 0, ip_hlen = 0, offset = 0;
2865        uint16_t			etype = 0;
2866        device_t			dev;
2867        uint8_t				buf[sizeof(struct ip6_hdr)];
2868
2869        dev = ha->pci_dev;
2870
2871        eh = mtod(mp, struct ether_vlan_header *);
2872
2873        if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2874                ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2875                etype = ntohs(eh->evl_proto);
2876        } else {
2877                ehdrlen = ETHER_HDR_LEN;
2878                etype = ntohs(eh->evl_encap_proto);
2879        }
2880
2881        switch (etype) {
2882
2883                case ETHERTYPE_IP:
2884                        ip = (struct ip *)(mp->m_data + ehdrlen);
2885
2886                        ip_hlen = sizeof (struct ip);
2887
2888                        if (mp->m_len < (ehdrlen + ip_hlen)) {
2889                                m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2890                                ip = (struct ip *)buf;
2891                        }
2892
2893                        th = (struct tcphdr *)(ip + 1);
2894			offset = ip_hlen + ehdrlen + (th->th_off << 2);
2895                break;
2896
2897                case ETHERTYPE_IPV6:
2898                        ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2899
2900                        ip_hlen = sizeof(struct ip6_hdr);
2901
2902                        if (mp->m_len < (ehdrlen + ip_hlen)) {
2903                                m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2904                                        buf);
2905                                ip6 = (struct ip6_hdr *)buf;
2906                        }
2907                        th = (struct tcphdr *)(ip6 + 1);
2908			offset = ip_hlen + ehdrlen + (th->th_off << 2);
2909                break;
2910
2911                default:
2912                break;
2913        }
2914
2915        return (offset);
2916}
2917
2918static __inline int
2919qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
2920	uint32_t offset)
2921{
2922	int			i;
2923	uint32_t		sum, nbds_in_hdr = 1;
2924	bus_dma_segment_t	*t_segs = segs;
2925
2926	/* count the number of segments spanned by TCP header */
2927
2928	i = 0;
2929	while ((i < nsegs) && (offset > t_segs->ds_len)) {
2930		nbds_in_hdr++;
2931		offset = offset - t_segs->ds_len;
2932		t_segs++;
2933		i++;
2934	}
2935
2936	while (nsegs >= QLNX_MAX_SEGMENTS_NON_TSO) {
2937
2938		sum = 0;
2939
2940		for (i = 0; i < (ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr); i++){
2941			sum += segs->ds_len;
2942			segs++;
2943		}
2944
2945		if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
2946			fp->tx_lso_wnd_min_len++;
2947			return (-1);
2948		}
2949
2950		nsegs -= QLNX_MAX_SEGMENTS_NON_TSO;
2951	}
2952
2953	return (0);
2954}
2955
2956static int
2957qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
2958{
2959	bus_dma_segment_t	*segs;
2960	bus_dmamap_t		map = 0;
2961	uint32_t		nsegs = 0;
2962	int			ret = -1;
2963	struct mbuf		*m_head = *m_headp;
2964	uint16_t		idx = 0;
2965	uint16_t		elem_left;
2966
2967	uint8_t			nbd = 0;
2968	struct qlnx_tx_queue    *txq;
2969
2970	struct eth_tx_1st_bd    *first_bd;
2971	struct eth_tx_2nd_bd    *second_bd;
2972	struct eth_tx_3rd_bd    *third_bd;
2973	struct eth_tx_bd        *tx_data_bd;
2974
2975	int			seg_idx = 0;
2976	uint32_t		nbds_in_hdr = 0;
2977	uint32_t		offset = 0;
2978
2979#ifdef QLNX_TRACE_PERF_DATA
2980        uint16_t                bd_used;
2981#endif
2982
2983	QL_DPRINT8(ha, "enter\n");
2984
2985	if (!ha->link_up)
2986		return (-1);
2987
2988	first_bd	= NULL;
2989	second_bd	= NULL;
2990	third_bd	= NULL;
2991	tx_data_bd	= NULL;
2992
2993	txq = fp->txq[0];
2994
2995        if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
2996		QLNX_TX_ELEM_MIN_THRESH) {
2997
2998                fp->tx_nsegs_gt_elem_left++;
2999                fp->err_tx_nsegs_gt_elem_left++;
3000
3001                return (ENOBUFS);
3002        }
3003
3004	idx = txq->sw_tx_prod;
3005
3006	map = txq->sw_tx_ring[idx].map;
3007	segs = txq->segs;
3008
3009	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3010			BUS_DMA_NOWAIT);
3011
3012	if (ha->dbg_trace_tso_pkt_len) {
3013		if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3014			if (!fp->tx_tso_min_pkt_len) {
3015				fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3016				fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3017			} else {
3018				if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3019					fp->tx_tso_min_pkt_len =
3020						m_head->m_pkthdr.len;
3021				if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3022					fp->tx_tso_max_pkt_len =
3023						m_head->m_pkthdr.len;
3024			}
3025		}
3026	}
3027
3028	if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3029		offset = qlnx_tcp_offset(ha, m_head);
3030
3031	if ((ret == EFBIG) ||
3032		((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3033			(!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3034		((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3035			qlnx_tso_check(fp, segs, nsegs, offset))))) {
3036
3037		struct mbuf *m;
3038
3039		QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3040
3041		fp->tx_defrag++;
3042
3043		m = m_defrag(m_head, M_NOWAIT);
3044		if (m == NULL) {
3045			fp->err_tx_defrag++;
3046			fp->tx_pkts_freed++;
3047			m_freem(m_head);
3048			*m_headp = NULL;
3049			QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3050			return (ENOBUFS);
3051		}
3052
3053		m_head = m;
3054		*m_headp = m_head;
3055
3056		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3057				segs, &nsegs, BUS_DMA_NOWAIT))) {
3058
3059			fp->err_tx_defrag_dmamap_load++;
3060
3061			QL_DPRINT1(ha,
3062				"bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3063				ret, m_head->m_pkthdr.len);
3064
3065			fp->tx_pkts_freed++;
3066			m_freem(m_head);
3067			*m_headp = NULL;
3068
3069			return (ret);
3070		}
3071
3072		if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3073			!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3074
3075			fp->err_tx_non_tso_max_seg++;
3076
3077			QL_DPRINT1(ha,
3078				"(%d) nsegs too many for non-TSO [%d, %d]\n",
3079				ret, nsegs, m_head->m_pkthdr.len);
3080
3081			fp->tx_pkts_freed++;
3082			m_freem(m_head);
3083			*m_headp = NULL;
3084
3085			return (ret);
3086		}
3087		if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3088			offset = qlnx_tcp_offset(ha, m_head);
3089
3090	} else if (ret) {
3091
3092		fp->err_tx_dmamap_load++;
3093
3094		QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3095			   ret, m_head->m_pkthdr.len);
3096		fp->tx_pkts_freed++;
3097		m_freem(m_head);
3098		*m_headp = NULL;
3099		return (ret);
3100	}
3101
3102	QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3103
3104	if (ha->dbg_trace_tso_pkt_len) {
3105		if (nsegs < QLNX_FP_MAX_SEGS)
3106			fp->tx_pkts[(nsegs - 1)]++;
3107		else
3108			fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3109	}
3110
3111#ifdef QLNX_TRACE_PERF_DATA
3112        if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3113                if(m_head->m_pkthdr.len <= 2048)
3114                        fp->tx_pkts_hist[0]++;
3115                else if((m_head->m_pkthdr.len > 2048) &&
3116				(m_head->m_pkthdr.len <= 4096))
3117                        fp->tx_pkts_hist[1]++;
3118                else if((m_head->m_pkthdr.len > 4096) &&
3119				(m_head->m_pkthdr.len <= 8192))
3120                        fp->tx_pkts_hist[2]++;
3121                else if((m_head->m_pkthdr.len > 8192) &&
3122				(m_head->m_pkthdr.len <= 12288 ))
3123                        fp->tx_pkts_hist[3]++;
3124                else if((m_head->m_pkthdr.len > 11288) &&
3125				(m_head->m_pkthdr.len <= 16394))
3126                        fp->tx_pkts_hist[4]++;
3127                else if((m_head->m_pkthdr.len > 16384) &&
3128				(m_head->m_pkthdr.len <= 20480))
3129                        fp->tx_pkts_hist[5]++;
3130                else if((m_head->m_pkthdr.len > 20480) &&
3131				(m_head->m_pkthdr.len <= 24576))
3132                        fp->tx_pkts_hist[6]++;
3133                else if((m_head->m_pkthdr.len > 24576) &&
3134				(m_head->m_pkthdr.len <= 28672))
3135                        fp->tx_pkts_hist[7]++;
3136                else if((m_head->m_pkthdr.len > 28762) &&
3137				(m_head->m_pkthdr.len <= 32768))
3138                        fp->tx_pkts_hist[8]++;
3139                else if((m_head->m_pkthdr.len > 32768) &&
3140				(m_head->m_pkthdr.len <= 36864))
3141                        fp->tx_pkts_hist[9]++;
3142                else if((m_head->m_pkthdr.len > 36864) &&
3143				(m_head->m_pkthdr.len <= 40960))
3144                        fp->tx_pkts_hist[10]++;
3145                else if((m_head->m_pkthdr.len > 40960) &&
3146				(m_head->m_pkthdr.len <= 45056))
3147                        fp->tx_pkts_hist[11]++;
3148                else if((m_head->m_pkthdr.len > 45056) &&
3149				(m_head->m_pkthdr.len <= 49152))
3150                        fp->tx_pkts_hist[12]++;
3151                else if((m_head->m_pkthdr.len > 49512) &&
3152				m_head->m_pkthdr.len <= 53248))
3153                        fp->tx_pkts_hist[13]++;
3154                else if((m_head->m_pkthdr.len > 53248) &&
3155				(m_head->m_pkthdr.len <= 57344))
3156                        fp->tx_pkts_hist[14]++;
3157                else if((m_head->m_pkthdr.len > 53248) &&
3158				(m_head->m_pkthdr.len <= 57344))
3159                        fp->tx_pkts_hist[15]++;
3160                else if((m_head->m_pkthdr.len > 57344) &&
3161				(m_head->m_pkthdr.len <= 61440))
3162                        fp->tx_pkts_hist[16]++;
3163                else
3164                        fp->tx_pkts_hist[17]++;
3165        }
3166
3167        if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3168
3169                elem_left =  ecore_chain_get_elem_left(&txq->tx_pbl);
3170                bd_used = TX_RING_SIZE - elem_left;
3171
3172                if(bd_used <= 100)
3173                        fp->tx_pkts_q[0]++;
3174                else if((bd_used > 100) && (bd_used <= 500))
3175                        fp->tx_pkts_q[1]++;
3176                else if((bd_used > 500) && (bd_used <= 1000))
3177                        fp->tx_pkts_q[2]++;
3178                else if((bd_used > 1000) && (bd_used <= 2000))
3179                        fp->tx_pkts_q[3]++;
3180                else if((bd_used > 3000) && (bd_used <= 4000))
3181                        fp->tx_pkts_q[4]++;
3182                else if((bd_used > 4000) && (bd_used <= 5000))
3183                        fp->tx_pkts_q[5]++;
3184                else if((bd_used > 6000) && (bd_used <= 7000))
3185                        fp->tx_pkts_q[6]++;
3186                else if((bd_used > 7000) && (bd_used <= 8000))
3187                        fp->tx_pkts_q[7]++;
3188                else if((bd_used > 8000) && (bd_used <= 9000))
3189                        fp->tx_pkts_q[8]++;
3190                else if((bd_used > 9000) && (bd_used <= 10000))
3191                        fp->tx_pkts_q[9]++;
3192                else if((bd_used > 10000) && (bd_used <= 11000))
3193                        fp->tx_pkts_q[10]++;
3194                else if((bd_used > 11000) && (bd_used <= 12000))
3195                        fp->tx_pkts_q[11]++;
3196                else if((bd_used > 12000) && (bd_used <= 13000))
3197                        fp->tx_pkts_q[12]++;
3198                else if((bd_used > 13000) && (bd_used <= 14000))
3199                        fp->tx_pkts_q[13]++;
3200                else if((bd_used > 14000) && (bd_used <= 15000))
3201                        fp->tx_pkts_q[14]++;
3202               else if((bd_used > 15000) && (bd_used <= 16000))
3203                        fp->tx_pkts_q[15]++;
3204                else
3205                        fp->tx_pkts_q[16]++;
3206        }
3207
3208#endif /* end of QLNX_TRACE_PERF_DATA */
3209
3210	if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3211		(int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3212
3213		QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3214			" in chain[%d] trying to free packets\n",
3215			nsegs, elem_left, fp->rss_id);
3216
3217		fp->tx_nsegs_gt_elem_left++;
3218
3219		(void)qlnx_tx_int(ha, fp, txq);
3220
3221		if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3222			ecore_chain_get_elem_left(&txq->tx_pbl))) {
3223
3224			QL_DPRINT1(ha,
3225				"(%d, 0x%x) insuffient BDs in chain[%d]\n",
3226				nsegs, elem_left, fp->rss_id);
3227
3228			fp->err_tx_nsegs_gt_elem_left++;
3229			fp->tx_ring_full = 1;
3230			if (ha->storm_stats_enable)
3231				ha->storm_stats_gather = 1;
3232			return (ENOBUFS);
3233		}
3234	}
3235
3236	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3237
3238	txq->sw_tx_ring[idx].mp = m_head;
3239
3240	first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3241
3242	memset(first_bd, 0, sizeof(*first_bd));
3243
3244	first_bd->data.bd_flags.bitfields =
3245		1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3246
3247	BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3248
3249	nbd++;
3250
3251	if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3252		first_bd->data.bd_flags.bitfields |=
3253			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3254	}
3255
3256	if (m_head->m_pkthdr.csum_flags &
3257		(CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3258		first_bd->data.bd_flags.bitfields |=
3259			(1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3260	}
3261
3262        if (m_head->m_flags & M_VLANTAG) {
3263                first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3264		first_bd->data.bd_flags.bitfields |=
3265			(1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3266        }
3267
3268	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3269
3270                first_bd->data.bd_flags.bitfields |=
3271			(1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3272		first_bd->data.bd_flags.bitfields |=
3273			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3274
3275		nbds_in_hdr = 1;
3276
3277		if (offset == segs->ds_len) {
3278			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3279			segs++;
3280			seg_idx++;
3281
3282			second_bd = (struct eth_tx_2nd_bd *)
3283					ecore_chain_produce(&txq->tx_pbl);
3284			memset(second_bd, 0, sizeof(*second_bd));
3285			nbd++;
3286
3287			if (seg_idx < nsegs) {
3288				BD_SET_UNMAP_ADDR_LEN(second_bd, \
3289					(segs->ds_addr), (segs->ds_len));
3290				segs++;
3291				seg_idx++;
3292			}
3293
3294			third_bd = (struct eth_tx_3rd_bd *)
3295					ecore_chain_produce(&txq->tx_pbl);
3296			memset(third_bd, 0, sizeof(*third_bd));
3297			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3298			third_bd->data.bitfields |=
3299				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3300			nbd++;
3301
3302			if (seg_idx < nsegs) {
3303				BD_SET_UNMAP_ADDR_LEN(third_bd, \
3304					(segs->ds_addr), (segs->ds_len));
3305				segs++;
3306				seg_idx++;
3307			}
3308
3309			for (; seg_idx < nsegs; seg_idx++) {
3310				tx_data_bd = (struct eth_tx_bd *)
3311					ecore_chain_produce(&txq->tx_pbl);
3312				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3313				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3314					segs->ds_addr,\
3315					segs->ds_len);
3316				segs++;
3317				nbd++;
3318			}
3319
3320		} else if (offset < segs->ds_len) {
3321			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3322
3323			second_bd = (struct eth_tx_2nd_bd *)
3324					ecore_chain_produce(&txq->tx_pbl);
3325			memset(second_bd, 0, sizeof(*second_bd));
3326			BD_SET_UNMAP_ADDR_LEN(second_bd, \
3327				(segs->ds_addr + offset),\
3328				(segs->ds_len - offset));
3329			nbd++;
3330			segs++;
3331
3332			third_bd = (struct eth_tx_3rd_bd *)
3333					ecore_chain_produce(&txq->tx_pbl);
3334			memset(third_bd, 0, sizeof(*third_bd));
3335
3336			BD_SET_UNMAP_ADDR_LEN(third_bd, \
3337					segs->ds_addr,\
3338					segs->ds_len);
3339			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3340			third_bd->data.bitfields |=
3341				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3342			segs++;
3343			nbd++;
3344
3345			for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3346				tx_data_bd = (struct eth_tx_bd *)
3347					ecore_chain_produce(&txq->tx_pbl);
3348				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3349				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3350					segs->ds_addr,\
3351					segs->ds_len);
3352				segs++;
3353				nbd++;
3354			}
3355
3356		} else {
3357			offset = offset - segs->ds_len;
3358			segs++;
3359
3360			for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3361
3362				if (offset)
3363					nbds_in_hdr++;
3364
3365				tx_data_bd = (struct eth_tx_bd *)
3366					ecore_chain_produce(&txq->tx_pbl);
3367				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3368
3369				if (second_bd == NULL) {
3370					second_bd = (struct eth_tx_2nd_bd *)
3371								tx_data_bd;
3372				} else if (third_bd == NULL) {
3373					third_bd = (struct eth_tx_3rd_bd *)
3374								tx_data_bd;
3375				}
3376
3377				if (offset && (offset < segs->ds_len)) {
3378					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3379						segs->ds_addr, offset);
3380
3381					tx_data_bd = (struct eth_tx_bd *)
3382					ecore_chain_produce(&txq->tx_pbl);
3383
3384					memset(tx_data_bd, 0,
3385						sizeof(*tx_data_bd));
3386
3387					if (second_bd == NULL) {
3388						second_bd =
3389					(struct eth_tx_2nd_bd *)tx_data_bd;
3390					} else if (third_bd == NULL) {
3391						third_bd =
3392					(struct eth_tx_3rd_bd *)tx_data_bd;
3393					}
3394					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3395						(segs->ds_addr + offset), \
3396						(segs->ds_len - offset));
3397					nbd++;
3398					offset = 0;
3399				} else {
3400					if (offset)
3401						offset = offset - segs->ds_len;
3402					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3403						segs->ds_addr, segs->ds_len);
3404				}
3405				segs++;
3406				nbd++;
3407			}
3408
3409			if (third_bd == NULL) {
3410				third_bd = (struct eth_tx_3rd_bd *)
3411					ecore_chain_produce(&txq->tx_pbl);
3412				memset(third_bd, 0, sizeof(*third_bd));
3413			}
3414
3415			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3416			third_bd->data.bitfields |=
3417				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3418		}
3419		fp->tx_tso_pkts++;
3420	} else {
3421		segs++;
3422		for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3423			tx_data_bd = (struct eth_tx_bd *)
3424					ecore_chain_produce(&txq->tx_pbl);
3425			memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3426			BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3427				segs->ds_len);
3428			segs++;
3429			nbd++;
3430		}
3431		first_bd->data.bitfields =
3432			(m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3433				 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3434		first_bd->data.bitfields =
3435			htole16(first_bd->data.bitfields);
3436		fp->tx_non_tso_pkts++;
3437	}
3438
3439
3440	first_bd->data.nbds = nbd;
3441
3442	if (ha->dbg_trace_tso_pkt_len) {
3443		if (fp->tx_tso_max_nsegs < nsegs)
3444			fp->tx_tso_max_nsegs = nsegs;
3445
3446		if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3447			fp->tx_tso_min_nsegs = nsegs;
3448	}
3449
3450	txq->sw_tx_ring[idx].nsegs = nsegs;
3451	txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3452
3453	txq->tx_db.data.bd_prod =
3454		htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3455
3456	qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3457
3458	QL_DPRINT8(ha, "exit\n");
3459	return (0);
3460}
3461
3462static void
3463qlnx_stop(qlnx_host_t *ha)
3464{
3465	struct ifnet	*ifp = ha->ifp;
3466	device_t	dev;
3467	int		i;
3468
3469	dev = ha->pci_dev;
3470
3471	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3472
3473	/*
3474	 * We simply lock and unlock each fp->tx_mtx to
3475	 * propagate the if_drv_flags
3476	 * state to each tx thread
3477	 */
3478        QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3479
3480	if (ha->state == QLNX_STATE_OPEN) {
3481        	for (i = 0; i < ha->num_rss; i++) {
3482			struct qlnx_fastpath *fp = &ha->fp_array[i];
3483
3484			mtx_lock(&fp->tx_mtx);
3485			mtx_unlock(&fp->tx_mtx);
3486
3487			if (fp->fp_taskqueue != NULL)
3488				taskqueue_enqueue(fp->fp_taskqueue,
3489					&fp->fp_task);
3490		}
3491	}
3492
3493	qlnx_unload(ha);
3494
3495	return;
3496}
3497
3498static int
3499qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3500{
3501        return(TX_RING_SIZE - 1);
3502}
3503
3504uint8_t *
3505qlnx_get_mac_addr(qlnx_host_t *ha)
3506{
3507	struct ecore_hwfn	*p_hwfn;
3508
3509	p_hwfn = &ha->cdev.hwfns[0];
3510        return (p_hwfn->hw_info.hw_mac_addr);
3511}
3512
3513static uint32_t
3514qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3515{
3516	uint32_t	ifm_type = 0;
3517
3518	switch (if_link->media_type) {
3519
3520	case MEDIA_MODULE_FIBER:
3521	case MEDIA_UNSPECIFIED:
3522		if (if_link->speed == (100 * 1000))
3523			ifm_type = QLNX_IFM_100G_SR4;
3524		else if (if_link->speed == (40 * 1000))
3525			ifm_type = IFM_40G_SR4;
3526		else if (if_link->speed == (25 * 1000))
3527			ifm_type = QLNX_IFM_25G_SR;
3528		else if (if_link->speed == (10 * 1000))
3529			ifm_type = (IFM_10G_LR | IFM_10G_SR);
3530		else if (if_link->speed == (1 * 1000))
3531			ifm_type = (IFM_1000_SX | IFM_1000_LX);
3532
3533		break;
3534
3535	case MEDIA_DA_TWINAX:
3536		if (if_link->speed == (100 * 1000))
3537			ifm_type = QLNX_IFM_100G_CR4;
3538		else if (if_link->speed == (40 * 1000))
3539			ifm_type = IFM_40G_CR4;
3540		else if (if_link->speed == (25 * 1000))
3541			ifm_type = QLNX_IFM_25G_CR;
3542		else if (if_link->speed == (10 * 1000))
3543			ifm_type = IFM_10G_TWINAX;
3544
3545		break;
3546
3547	default :
3548		ifm_type = IFM_UNKNOWN;
3549		break;
3550	}
3551	return (ifm_type);
3552}
3553
3554
3555
3556/*****************************************************************************
3557 * Interrupt Service Functions
3558 *****************************************************************************/
3559
3560static int
3561qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3562	struct mbuf *mp_head, uint16_t len)
3563{
3564	struct mbuf		*mp, *mpf, *mpl;
3565	struct sw_rx_data	*sw_rx_data;
3566	struct qlnx_rx_queue	*rxq;
3567	uint16_t 		len_in_buffer;
3568
3569	rxq = fp->rxq;
3570	mpf = mpl = mp = NULL;
3571
3572	while (len) {
3573
3574        	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3575
3576                sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3577                mp = sw_rx_data->data;
3578
3579		if (mp == NULL) {
3580                	QL_DPRINT1(ha, "mp = NULL\n");
3581			fp->err_rx_mp_null++;
3582        		rxq->sw_rx_cons  =
3583				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3584
3585			if (mpf != NULL)
3586				m_freem(mpf);
3587
3588			return (-1);
3589		}
3590		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3591			BUS_DMASYNC_POSTREAD);
3592
3593                if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3594
3595                        QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3596				" incoming packet and reusing its buffer\n");
3597
3598                        qlnx_reuse_rx_data(rxq);
3599                        fp->err_rx_alloc_errors++;
3600
3601			if (mpf != NULL)
3602				m_freem(mpf);
3603
3604			return (-1);
3605		}
3606                ecore_chain_consume(&rxq->rx_bd_ring);
3607
3608		if (len > rxq->rx_buf_size)
3609			len_in_buffer = rxq->rx_buf_size;
3610		else
3611			len_in_buffer = len;
3612
3613		len = len - len_in_buffer;
3614
3615		mp->m_flags &= ~M_PKTHDR;
3616		mp->m_next = NULL;
3617		mp->m_len = len_in_buffer;
3618
3619		if (mpf == NULL)
3620			mpf = mpl = mp;
3621		else {
3622			mpl->m_next = mp;
3623			mpl = mp;
3624		}
3625	}
3626
3627	if (mpf != NULL)
3628		mp_head->m_next = mpf;
3629
3630	return (0);
3631}
3632
3633static void
3634qlnx_tpa_start(qlnx_host_t *ha,
3635	struct qlnx_fastpath *fp,
3636	struct qlnx_rx_queue *rxq,
3637	struct eth_fast_path_rx_tpa_start_cqe *cqe)
3638{
3639	uint32_t		agg_index;
3640        struct ifnet		*ifp = ha->ifp;
3641	struct mbuf		*mp;
3642	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
3643	struct sw_rx_data	*sw_rx_data;
3644	dma_addr_t		addr;
3645	bus_dmamap_t		map;
3646	struct eth_rx_bd	*rx_bd;
3647	int			i;
3648	device_t		dev;
3649#if __FreeBSD_version >= 1100000
3650	uint8_t			hash_type;
3651#endif /* #if __FreeBSD_version >= 1100000 */
3652
3653	dev = ha->pci_dev;
3654	agg_index = cqe->tpa_agg_index;
3655
3656        QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3657                \t type = 0x%x\n \
3658                \t bitfields = 0x%x\n \
3659                \t seg_len = 0x%x\n \
3660                \t pars_flags = 0x%x\n \
3661                \t vlan_tag = 0x%x\n \
3662                \t rss_hash = 0x%x\n \
3663                \t len_on_first_bd = 0x%x\n \
3664                \t placement_offset = 0x%x\n \
3665                \t tpa_agg_index = 0x%x\n \
3666                \t header_len = 0x%x\n \
3667                \t ext_bd_len_list[0] = 0x%x\n \
3668                \t ext_bd_len_list[1] = 0x%x\n \
3669                \t ext_bd_len_list[2] = 0x%x\n \
3670                \t ext_bd_len_list[3] = 0x%x\n \
3671                \t ext_bd_len_list[4] = 0x%x\n",
3672                fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3673                cqe->pars_flags.flags, cqe->vlan_tag,
3674                cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3675                cqe->tpa_agg_index, cqe->header_len,
3676                cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3677                cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3678                cqe->ext_bd_len_list[4]);
3679
3680	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3681		fp->err_rx_tpa_invalid_agg_num++;
3682		return;
3683	}
3684
3685	sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3686	bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3687	mp = sw_rx_data->data;
3688
3689	QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3690
3691	if (mp == NULL) {
3692               	QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3693		fp->err_rx_mp_null++;
3694       		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3695
3696		return;
3697	}
3698
3699	if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3700
3701		QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3702			" flags = %x, dropping incoming packet\n", fp->rss_id,
3703			rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3704
3705		fp->err_rx_hw_errors++;
3706
3707		qlnx_reuse_rx_data(rxq);
3708
3709		QLNX_INC_IERRORS(ifp);
3710
3711		return;
3712	}
3713
3714	if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3715
3716		QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3717			" dropping incoming packet and reusing its buffer\n",
3718			fp->rss_id);
3719
3720		fp->err_rx_alloc_errors++;
3721		QLNX_INC_IQDROPS(ifp);
3722
3723		/*
3724		 * Load the tpa mbuf into the rx ring and save the
3725		 * posted mbuf
3726		 */
3727
3728		map = sw_rx_data->map;
3729		addr = sw_rx_data->dma_addr;
3730
3731		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
3732
3733		sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
3734		sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
3735		sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
3736
3737		rxq->tpa_info[agg_index].rx_buf.data = mp;
3738		rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
3739		rxq->tpa_info[agg_index].rx_buf.map = map;
3740
3741		rx_bd = (struct eth_rx_bd *)
3742				ecore_chain_produce(&rxq->rx_bd_ring);
3743
3744		rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
3745		rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
3746
3747		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3748			BUS_DMASYNC_PREREAD);
3749
3750		rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
3751		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3752
3753		ecore_chain_consume(&rxq->rx_bd_ring);
3754
3755		/* Now reuse any buffers posted in ext_bd_len_list */
3756		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3757
3758			if (cqe->ext_bd_len_list[i] == 0)
3759				break;
3760
3761			qlnx_reuse_rx_data(rxq);
3762		}
3763
3764		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3765		return;
3766	}
3767
3768	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3769
3770		QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
3771			" dropping incoming packet and reusing its buffer\n",
3772			fp->rss_id);
3773
3774		QLNX_INC_IQDROPS(ifp);
3775
3776		/* if we already have mbuf head in aggregation free it */
3777		if (rxq->tpa_info[agg_index].mpf) {
3778			m_freem(rxq->tpa_info[agg_index].mpf);
3779			rxq->tpa_info[agg_index].mpl = NULL;
3780		}
3781		rxq->tpa_info[agg_index].mpf = mp;
3782		rxq->tpa_info[agg_index].mpl = NULL;
3783
3784		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3785		ecore_chain_consume(&rxq->rx_bd_ring);
3786
3787		/* Now reuse any buffers posted in ext_bd_len_list */
3788		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3789
3790			if (cqe->ext_bd_len_list[i] == 0)
3791				break;
3792
3793			qlnx_reuse_rx_data(rxq);
3794		}
3795		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3796
3797		return;
3798	}
3799
3800	/*
3801	 * first process the ext_bd_len_list
3802	 * if this fails then we simply drop the packet
3803	 */
3804	ecore_chain_consume(&rxq->rx_bd_ring);
3805	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3806
3807	for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3808
3809		QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
3810
3811		if (cqe->ext_bd_len_list[i] == 0)
3812			break;
3813
3814		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3815		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3816			BUS_DMASYNC_POSTREAD);
3817
3818		mpc = sw_rx_data->data;
3819
3820		if (mpc == NULL) {
3821			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3822			fp->err_rx_mp_null++;
3823			if (mpf != NULL)
3824				m_freem(mpf);
3825			mpf = mpl = NULL;
3826			rxq->tpa_info[agg_index].agg_state =
3827						QLNX_AGG_STATE_ERROR;
3828			ecore_chain_consume(&rxq->rx_bd_ring);
3829			rxq->sw_rx_cons =
3830				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3831			continue;
3832		}
3833
3834		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3835			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3836				" dropping incoming packet and reusing its"
3837				" buffer\n", fp->rss_id);
3838
3839			qlnx_reuse_rx_data(rxq);
3840
3841			if (mpf != NULL)
3842				m_freem(mpf);
3843			mpf = mpl = NULL;
3844
3845			rxq->tpa_info[agg_index].agg_state =
3846						QLNX_AGG_STATE_ERROR;
3847
3848			ecore_chain_consume(&rxq->rx_bd_ring);
3849			rxq->sw_rx_cons =
3850				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3851
3852			continue;
3853		}
3854
3855		mpc->m_flags &= ~M_PKTHDR;
3856		mpc->m_next = NULL;
3857		mpc->m_len = cqe->ext_bd_len_list[i];
3858
3859
3860		if (mpf == NULL) {
3861			mpf = mpl = mpc;
3862		} else {
3863			mpl->m_len = ha->rx_buf_size;
3864			mpl->m_next = mpc;
3865			mpl = mpc;
3866		}
3867
3868		ecore_chain_consume(&rxq->rx_bd_ring);
3869		rxq->sw_rx_cons =
3870			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3871	}
3872
3873	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3874
3875		QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
3876			" incoming packet and reusing its buffer\n",
3877			fp->rss_id);
3878
3879		QLNX_INC_IQDROPS(ifp);
3880
3881		rxq->tpa_info[agg_index].mpf = mp;
3882		rxq->tpa_info[agg_index].mpl = NULL;
3883
3884		return;
3885	}
3886
3887        rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
3888
3889        if (mpf != NULL) {
3890                mp->m_len = ha->rx_buf_size;
3891                mp->m_next = mpf;
3892                rxq->tpa_info[agg_index].mpf = mp;
3893                rxq->tpa_info[agg_index].mpl = mpl;
3894        } else {
3895                mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
3896                rxq->tpa_info[agg_index].mpf = mp;
3897                rxq->tpa_info[agg_index].mpl = mp;
3898                mp->m_next = NULL;
3899        }
3900
3901	mp->m_flags |= M_PKTHDR;
3902
3903	/* assign packet to this interface interface */
3904	mp->m_pkthdr.rcvif = ifp;
3905
3906	/* assume no hardware checksum has complated */
3907	mp->m_pkthdr.csum_flags = 0;
3908
3909	//mp->m_pkthdr.flowid = fp->rss_id;
3910	mp->m_pkthdr.flowid = cqe->rss_hash;
3911
3912#if __FreeBSD_version >= 1100000
3913
3914	hash_type = cqe->bitfields &
3915			(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
3916			ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
3917
3918	switch (hash_type) {
3919
3920	case RSS_HASH_TYPE_IPV4:
3921		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
3922		break;
3923
3924	case RSS_HASH_TYPE_TCP_IPV4:
3925		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
3926		break;
3927
3928	case RSS_HASH_TYPE_IPV6:
3929		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
3930		break;
3931
3932	case RSS_HASH_TYPE_TCP_IPV6:
3933		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
3934		break;
3935
3936	default:
3937		M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
3938		break;
3939	}
3940
3941#else
3942	mp->m_flags |= M_FLOWID;
3943#endif
3944
3945	mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
3946					CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3947
3948	mp->m_pkthdr.csum_data = 0xFFFF;
3949
3950	if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
3951		mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
3952		mp->m_flags |= M_VLANTAG;
3953	}
3954
3955	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
3956
3957        QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
3958		fp->rss_id, rxq->tpa_info[agg_index].agg_state,
3959                rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
3960
3961	return;
3962}
3963
3964static void
3965qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3966	struct qlnx_rx_queue *rxq,
3967	struct eth_fast_path_rx_tpa_cont_cqe *cqe)
3968{
3969	struct sw_rx_data	*sw_rx_data;
3970	int			i;
3971	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
3972	struct mbuf		*mp;
3973	uint32_t		agg_index;
3974	device_t		dev;
3975
3976	dev = ha->pci_dev;
3977
3978        QL_DPRINT7(ha, "[%d]: enter\n \
3979                \t type = 0x%x\n \
3980                \t tpa_agg_index = 0x%x\n \
3981                \t len_list[0] = 0x%x\n \
3982                \t len_list[1] = 0x%x\n \
3983                \t len_list[2] = 0x%x\n \
3984                \t len_list[3] = 0x%x\n \
3985                \t len_list[4] = 0x%x\n \
3986                \t len_list[5] = 0x%x\n",
3987                fp->rss_id, cqe->type, cqe->tpa_agg_index,
3988                cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3989                cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
3990
3991	agg_index = cqe->tpa_agg_index;
3992
3993	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3994		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
3995		fp->err_rx_tpa_invalid_agg_num++;
3996		return;
3997	}
3998
3999
4000	for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4001
4002		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4003
4004		if (cqe->len_list[i] == 0)
4005			break;
4006
4007		if (rxq->tpa_info[agg_index].agg_state !=
4008			QLNX_AGG_STATE_START) {
4009			qlnx_reuse_rx_data(rxq);
4010			continue;
4011		}
4012
4013		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4014		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4015			BUS_DMASYNC_POSTREAD);
4016
4017		mpc = sw_rx_data->data;
4018
4019		if (mpc == NULL) {
4020
4021			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4022
4023			fp->err_rx_mp_null++;
4024			if (mpf != NULL)
4025				m_freem(mpf);
4026			mpf = mpl = NULL;
4027			rxq->tpa_info[agg_index].agg_state =
4028						QLNX_AGG_STATE_ERROR;
4029			ecore_chain_consume(&rxq->rx_bd_ring);
4030			rxq->sw_rx_cons =
4031				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4032			continue;
4033		}
4034
4035		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4036
4037			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4038				" dropping incoming packet and reusing its"
4039				" buffer\n", fp->rss_id);
4040
4041			qlnx_reuse_rx_data(rxq);
4042
4043			if (mpf != NULL)
4044				m_freem(mpf);
4045			mpf = mpl = NULL;
4046
4047			rxq->tpa_info[agg_index].agg_state =
4048						QLNX_AGG_STATE_ERROR;
4049
4050			ecore_chain_consume(&rxq->rx_bd_ring);
4051			rxq->sw_rx_cons =
4052				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4053
4054			continue;
4055		}
4056
4057		mpc->m_flags &= ~M_PKTHDR;
4058		mpc->m_next = NULL;
4059		mpc->m_len = cqe->len_list[i];
4060
4061
4062		if (mpf == NULL) {
4063			mpf = mpl = mpc;
4064		} else {
4065			mpl->m_len = ha->rx_buf_size;
4066			mpl->m_next = mpc;
4067			mpl = mpc;
4068		}
4069
4070		ecore_chain_consume(&rxq->rx_bd_ring);
4071		rxq->sw_rx_cons =
4072			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4073	}
4074
4075        QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4076                  fp->rss_id, mpf, mpl);
4077
4078	if (mpf != NULL) {
4079		mp = rxq->tpa_info[agg_index].mpl;
4080		mp->m_len = ha->rx_buf_size;
4081		mp->m_next = mpf;
4082		rxq->tpa_info[agg_index].mpl = mpl;
4083	}
4084
4085	return;
4086}
4087
4088static int
4089qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4090	struct qlnx_rx_queue *rxq,
4091	struct eth_fast_path_rx_tpa_end_cqe *cqe)
4092{
4093	struct sw_rx_data	*sw_rx_data;
4094	int			i;
4095	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
4096	struct mbuf		*mp;
4097	uint32_t		agg_index;
4098	uint32_t		len = 0;
4099        struct ifnet		*ifp = ha->ifp;
4100	device_t		dev;
4101
4102	dev = ha->pci_dev;
4103
4104        QL_DPRINT7(ha, "[%d]: enter\n \
4105                \t type = 0x%x\n \
4106                \t tpa_agg_index = 0x%x\n \
4107                \t total_packet_len = 0x%x\n \
4108                \t num_of_bds = 0x%x\n \
4109                \t end_reason = 0x%x\n \
4110                \t num_of_coalesced_segs = 0x%x\n \
4111                \t ts_delta = 0x%x\n \
4112                \t len_list[0] = 0x%x\n \
4113                \t len_list[1] = 0x%x\n \
4114                \t len_list[2] = 0x%x\n \
4115                \t len_list[3] = 0x%x\n",
4116                 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4117                cqe->total_packet_len, cqe->num_of_bds,
4118                cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4119                cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4120                cqe->len_list[3]);
4121
4122	agg_index = cqe->tpa_agg_index;
4123
4124	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4125
4126		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4127
4128		fp->err_rx_tpa_invalid_agg_num++;
4129		return (0);
4130	}
4131
4132
4133	for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4134
4135		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4136
4137		if (cqe->len_list[i] == 0)
4138			break;
4139
4140		if (rxq->tpa_info[agg_index].agg_state !=
4141			QLNX_AGG_STATE_START) {
4142
4143			QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4144
4145			qlnx_reuse_rx_data(rxq);
4146			continue;
4147		}
4148
4149		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4150		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4151			BUS_DMASYNC_POSTREAD);
4152
4153		mpc = sw_rx_data->data;
4154
4155		if (mpc == NULL) {
4156
4157			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4158
4159			fp->err_rx_mp_null++;
4160			if (mpf != NULL)
4161				m_freem(mpf);
4162			mpf = mpl = NULL;
4163			rxq->tpa_info[agg_index].agg_state =
4164						QLNX_AGG_STATE_ERROR;
4165			ecore_chain_consume(&rxq->rx_bd_ring);
4166			rxq->sw_rx_cons =
4167				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4168			continue;
4169		}
4170
4171		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4172			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4173				" dropping incoming packet and reusing its"
4174				" buffer\n", fp->rss_id);
4175
4176			qlnx_reuse_rx_data(rxq);
4177
4178			if (mpf != NULL)
4179				m_freem(mpf);
4180			mpf = mpl = NULL;
4181
4182			rxq->tpa_info[agg_index].agg_state =
4183						QLNX_AGG_STATE_ERROR;
4184
4185			ecore_chain_consume(&rxq->rx_bd_ring);
4186			rxq->sw_rx_cons =
4187				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4188
4189			continue;
4190		}
4191
4192		mpc->m_flags &= ~M_PKTHDR;
4193		mpc->m_next = NULL;
4194		mpc->m_len = cqe->len_list[i];
4195
4196
4197		if (mpf == NULL) {
4198			mpf = mpl = mpc;
4199		} else {
4200			mpl->m_len = ha->rx_buf_size;
4201			mpl->m_next = mpc;
4202			mpl = mpc;
4203		}
4204
4205		ecore_chain_consume(&rxq->rx_bd_ring);
4206		rxq->sw_rx_cons =
4207			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4208	}
4209
4210	QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4211
4212	if (mpf != NULL) {
4213
4214		QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4215
4216		mp = rxq->tpa_info[agg_index].mpl;
4217		mp->m_len = ha->rx_buf_size;
4218		mp->m_next = mpf;
4219	}
4220
4221	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4222
4223		QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4224
4225		if (rxq->tpa_info[agg_index].mpf != NULL)
4226			m_freem(rxq->tpa_info[agg_index].mpf);
4227		rxq->tpa_info[agg_index].mpf = NULL;
4228		rxq->tpa_info[agg_index].mpl = NULL;
4229		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4230		return (0);
4231	}
4232
4233	mp = rxq->tpa_info[agg_index].mpf;
4234	m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4235	mp->m_pkthdr.len = cqe->total_packet_len;
4236
4237	if (mp->m_next  == NULL)
4238		mp->m_len = mp->m_pkthdr.len;
4239	else {
4240		/* compute the total packet length */
4241		mpf = mp;
4242		while (mpf != NULL) {
4243			len += mpf->m_len;
4244			mpf = mpf->m_next;
4245		}
4246
4247		if (cqe->total_packet_len > len) {
4248			mpl = rxq->tpa_info[agg_index].mpl;
4249			mpl->m_len += (cqe->total_packet_len - len);
4250		}
4251	}
4252
4253	QLNX_INC_IPACKETS(ifp);
4254	QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4255
4256        QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n \
4257		m_len = 0x%x m_pkthdr_len = 0x%x\n",
4258                fp->rss_id, mp->m_pkthdr.csum_data,
4259                mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4260
4261	(*ifp->if_input)(ifp, mp);
4262
4263	rxq->tpa_info[agg_index].mpf = NULL;
4264	rxq->tpa_info[agg_index].mpl = NULL;
4265	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4266
4267	return (cqe->num_of_coalesced_segs);
4268}
4269
4270static int
4271qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4272	int lro_enable)
4273{
4274        uint16_t		hw_comp_cons, sw_comp_cons;
4275        int			rx_pkt = 0;
4276        struct qlnx_rx_queue	*rxq = fp->rxq;
4277        struct ifnet		*ifp = ha->ifp;
4278	struct ecore_dev	*cdev = &ha->cdev;
4279	struct ecore_hwfn       *p_hwfn;
4280
4281#ifdef QLNX_SOFT_LRO
4282	struct lro_ctrl		*lro;
4283
4284	lro = &rxq->lro;
4285#endif /* #ifdef QLNX_SOFT_LRO */
4286
4287        hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4288        sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4289
4290	p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4291
4292        /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4293         * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4294         * read before it is written by FW, then FW writes CQE and SB, and then
4295         * the CPU reads the hw_comp_cons, it will use an old CQE.
4296         */
4297
4298        /* Loop to complete all indicated BDs */
4299        while (sw_comp_cons != hw_comp_cons) {
4300                union eth_rx_cqe		*cqe;
4301                struct eth_fast_path_rx_reg_cqe	*fp_cqe;
4302                struct sw_rx_data		*sw_rx_data;
4303		register struct mbuf		*mp;
4304                enum eth_rx_cqe_type		cqe_type;
4305                uint16_t			len, pad, len_on_first_bd;
4306                uint8_t				*data;
4307#if __FreeBSD_version >= 1100000
4308		uint8_t				hash_type;
4309#endif /* #if __FreeBSD_version >= 1100000 */
4310
4311                /* Get the CQE from the completion ring */
4312                cqe = (union eth_rx_cqe *)
4313                        ecore_chain_consume(&rxq->rx_comp_ring);
4314                cqe_type = cqe->fast_path_regular.type;
4315
4316                if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4317                        QL_DPRINT3(ha, "Got a slowath CQE\n");
4318
4319                        ecore_eth_cqe_completion(p_hwfn,
4320                                        (struct eth_slow_path_rx_cqe *)cqe);
4321                        goto next_cqe;
4322                }
4323
4324		if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4325
4326			switch (cqe_type) {
4327
4328			case ETH_RX_CQE_TYPE_TPA_START:
4329				qlnx_tpa_start(ha, fp, rxq,
4330					&cqe->fast_path_tpa_start);
4331				fp->tpa_start++;
4332				break;
4333
4334			case ETH_RX_CQE_TYPE_TPA_CONT:
4335				qlnx_tpa_cont(ha, fp, rxq,
4336					&cqe->fast_path_tpa_cont);
4337				fp->tpa_cont++;
4338				break;
4339
4340			case ETH_RX_CQE_TYPE_TPA_END:
4341				rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4342						&cqe->fast_path_tpa_end);
4343				fp->tpa_end++;
4344				break;
4345
4346			default:
4347				break;
4348			}
4349
4350                        goto next_cqe;
4351		}
4352
4353                /* Get the data from the SW ring */
4354                sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4355                mp = sw_rx_data->data;
4356
4357		if (mp == NULL) {
4358                	QL_DPRINT1(ha, "mp = NULL\n");
4359			fp->err_rx_mp_null++;
4360        		rxq->sw_rx_cons  =
4361				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4362			goto next_cqe;
4363		}
4364		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4365			BUS_DMASYNC_POSTREAD);
4366
4367                /* non GRO */
4368                fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4369                len =  le16toh(fp_cqe->pkt_len);
4370                pad = fp_cqe->placement_offset;
4371
4372		QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4373			" len %u, parsing flags = %d pad  = %d\n",
4374			cqe_type, fp_cqe->bitfields,
4375			le16toh(fp_cqe->vlan_tag),
4376			len, le16toh(fp_cqe->pars_flags.flags), pad);
4377
4378		data = mtod(mp, uint8_t *);
4379		data = data + pad;
4380
4381		if (0)
4382			qlnx_dump_buf8(ha, __func__, data, len);
4383
4384                /* For every Rx BD consumed, we allocate a new BD so the BD ring
4385                 * is always with a fixed size. If allocation fails, we take the
4386                 * consumed BD and return it to the ring in the PROD position.
4387                 * The packet that was received on that BD will be dropped (and
4388                 * not passed to the upper stack).
4389                 */
4390		/* If this is an error packet then drop it */
4391		if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4392			CQE_FLAGS_ERR) {
4393
4394			QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4395				" dropping incoming packet\n", sw_comp_cons,
4396			le16toh(cqe->fast_path_regular.pars_flags.flags));
4397			fp->err_rx_hw_errors++;
4398
4399                        qlnx_reuse_rx_data(rxq);
4400
4401			QLNX_INC_IERRORS(ifp);
4402
4403			goto next_cqe;
4404		}
4405
4406                if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4407
4408                        QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4409				" incoming packet and reusing its buffer\n");
4410                        qlnx_reuse_rx_data(rxq);
4411
4412                        fp->err_rx_alloc_errors++;
4413
4414			QLNX_INC_IQDROPS(ifp);
4415
4416                        goto next_cqe;
4417                }
4418
4419                ecore_chain_consume(&rxq->rx_bd_ring);
4420
4421		len_on_first_bd = fp_cqe->len_on_first_bd;
4422		m_adj(mp, pad);
4423		mp->m_pkthdr.len = len;
4424
4425		QL_DPRINT1(ha, "len = %d len_on_first_bd = %d\n",
4426			   len, len_on_first_bd);
4427		if ((len > 60 ) && (len > len_on_first_bd)) {
4428
4429			mp->m_len = len_on_first_bd;
4430
4431			if (qlnx_rx_jumbo_chain(ha, fp, mp,
4432				(len - len_on_first_bd)) != 0) {
4433
4434				m_freem(mp);
4435
4436				QLNX_INC_IQDROPS(ifp);
4437
4438                        	goto next_cqe;
4439			}
4440
4441		} else if (len_on_first_bd < len) {
4442			fp->err_rx_jumbo_chain_pkts++;
4443		} else {
4444			mp->m_len = len;
4445		}
4446
4447		mp->m_flags |= M_PKTHDR;
4448
4449		/* assign packet to this interface interface */
4450		mp->m_pkthdr.rcvif = ifp;
4451
4452		/* assume no hardware checksum has complated */
4453		mp->m_pkthdr.csum_flags = 0;
4454
4455		mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4456
4457#if __FreeBSD_version >= 1100000
4458
4459		hash_type = fp_cqe->bitfields &
4460				(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4461				ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4462
4463		switch (hash_type) {
4464
4465		case RSS_HASH_TYPE_IPV4:
4466			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4467			break;
4468
4469		case RSS_HASH_TYPE_TCP_IPV4:
4470			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4471			break;
4472
4473		case RSS_HASH_TYPE_IPV6:
4474			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4475			break;
4476
4477		case RSS_HASH_TYPE_TCP_IPV6:
4478			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4479			break;
4480
4481		default:
4482			M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4483			break;
4484		}
4485
4486#else
4487		mp->m_flags |= M_FLOWID;
4488#endif
4489
4490		if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4491			mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4492		}
4493
4494		if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4495			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4496		}
4497
4498		if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4499			mp->m_pkthdr.csum_data = 0xFFFF;
4500			mp->m_pkthdr.csum_flags |=
4501				(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4502		}
4503
4504		if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4505			mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4506			mp->m_flags |= M_VLANTAG;
4507		}
4508
4509		QLNX_INC_IPACKETS(ifp);
4510		QLNX_INC_IBYTES(ifp, len);
4511
4512#ifdef QLNX_SOFT_LRO
4513
4514		if (lro_enable) {
4515
4516#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4517
4518			tcp_lro_queue_mbuf(lro, mp);
4519
4520#else
4521
4522			if (tcp_lro_rx(lro, mp, 0))
4523				(*ifp->if_input)(ifp, mp);
4524
4525#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4526
4527		} else {
4528			(*ifp->if_input)(ifp, mp);
4529		}
4530#else
4531
4532		(*ifp->if_input)(ifp, mp);
4533
4534#endif /* #ifdef QLNX_SOFT_LRO */
4535
4536                rx_pkt++;
4537
4538        	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4539
4540next_cqe:	/* don't consume bd rx buffer */
4541                ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4542                sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4543
4544		/* CR TPA - revisit how to handle budget in TPA perhaps
4545		   increase on "end" */
4546                if (rx_pkt == budget)
4547                        break;
4548        } /* repeat while sw_comp_cons != hw_comp_cons... */
4549
4550        /* Update producers */
4551        qlnx_update_rx_prod(p_hwfn, rxq);
4552
4553        return rx_pkt;
4554}
4555
4556
4557/*
4558 * fast path interrupt
4559 */
4560
4561static void
4562qlnx_fp_isr(void *arg)
4563{
4564        qlnx_ivec_t		*ivec = arg;
4565        qlnx_host_t		*ha;
4566        struct qlnx_fastpath	*fp = NULL;
4567        int			idx;
4568
4569        ha = ivec->ha;
4570
4571        if (ha->state != QLNX_STATE_OPEN) {
4572                return;
4573        }
4574
4575        idx = ivec->rss_idx;
4576
4577        if ((idx = ivec->rss_idx) >= ha->num_rss) {
4578                QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4579                ha->err_illegal_intr++;
4580                return;
4581        }
4582        fp = &ha->fp_array[idx];
4583
4584        if (fp == NULL) {
4585                ha->err_fp_null++;
4586        } else {
4587
4588#ifdef QLNX_RCV_IN_TASKQ
4589                ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4590		if (fp->fp_taskqueue != NULL)
4591			taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
4592#else
4593		int			rx_int = 0, total_rx_count = 0;
4594		int 			lro_enable, tc;
4595		struct qlnx_tx_queue	*txq;
4596		uint16_t		elem_left;
4597
4598		lro_enable = ha->ifp->if_capenable & IFCAP_LRO;
4599
4600                ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4601
4602                do {
4603                        for (tc = 0; tc < ha->num_tc; tc++) {
4604
4605				txq = fp->txq[tc];
4606
4607				if((int)(elem_left =
4608					ecore_chain_get_elem_left(&txq->tx_pbl)) <
4609						QLNX_TX_ELEM_THRESH)  {
4610
4611                                	if (mtx_trylock(&fp->tx_mtx)) {
4612#ifdef QLNX_TRACE_PERF_DATA
4613						tx_compl = fp->tx_pkts_completed;
4614#endif
4615
4616						qlnx_tx_int(ha, fp, fp->txq[tc]);
4617#ifdef QLNX_TRACE_PERF_DATA
4618						fp->tx_pkts_compl_intr +=
4619							(fp->tx_pkts_completed - tx_compl);
4620						if ((fp->tx_pkts_completed - tx_compl) <= 32)
4621							fp->tx_comInt[0]++;
4622						else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
4623							((fp->tx_pkts_completed - tx_compl) <= 64))
4624							fp->tx_comInt[1]++;
4625						else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
4626							((fp->tx_pkts_completed - tx_compl) <= 128))
4627							fp->tx_comInt[2]++;
4628						else if(((fp->tx_pkts_completed - tx_compl) > 128))
4629							fp->tx_comInt[3]++;
4630#endif
4631						mtx_unlock(&fp->tx_mtx);
4632					}
4633				}
4634                        }
4635
4636                        rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4637                                        lro_enable);
4638
4639                        if (rx_int) {
4640                                fp->rx_pkts += rx_int;
4641                                total_rx_count += rx_int;
4642                        }
4643
4644                } while (rx_int);
4645
4646#ifdef QLNX_SOFT_LRO
4647                {
4648                        struct lro_ctrl *lro;
4649
4650                        lro = &fp->rxq->lro;
4651
4652                        if (lro_enable && total_rx_count) {
4653
4654#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4655
4656#ifdef QLNX_TRACE_LRO_CNT
4657                                if (lro->lro_mbuf_count & ~1023)
4658                                        fp->lro_cnt_1024++;
4659                                else if (lro->lro_mbuf_count & ~511)
4660                                        fp->lro_cnt_512++;
4661                                else if (lro->lro_mbuf_count & ~255)
4662                                        fp->lro_cnt_256++;
4663                                else if (lro->lro_mbuf_count & ~127)
4664                                        fp->lro_cnt_128++;
4665                                else if (lro->lro_mbuf_count & ~63)
4666                                        fp->lro_cnt_64++;
4667#endif /* #ifdef QLNX_TRACE_LRO_CNT */
4668
4669                                tcp_lro_flush_all(lro);
4670
4671#else
4672                                struct lro_entry *queued;
4673
4674                                while ((!SLIST_EMPTY(&lro->lro_active))) {
4675                                        queued = SLIST_FIRST(&lro->lro_active);
4676                                        SLIST_REMOVE_HEAD(&lro->lro_active, \
4677                                                next);
4678                                        tcp_lro_flush(lro, queued);
4679                                }
4680#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4681                        }
4682                }
4683#endif /* #ifdef QLNX_SOFT_LRO */
4684
4685                ecore_sb_update_sb_idx(fp->sb_info);
4686                rmb();
4687                ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4688
4689#endif /* #ifdef QLNX_RCV_IN_TASKQ */
4690        }
4691
4692        return;
4693}
4694
4695
4696/*
4697 * slow path interrupt processing function
4698 * can be invoked in polled mode or in interrupt mode via taskqueue.
4699 */
4700void
4701qlnx_sp_isr(void *arg)
4702{
4703	struct ecore_hwfn	*p_hwfn;
4704	qlnx_host_t		*ha;
4705
4706	p_hwfn = arg;
4707
4708	ha = (qlnx_host_t *)p_hwfn->p_dev;
4709
4710	ha->sp_interrupts++;
4711
4712	QL_DPRINT2(ha, "enter\n");
4713
4714	ecore_int_sp_dpc(p_hwfn);
4715
4716	QL_DPRINT2(ha, "exit\n");
4717
4718	return;
4719}
4720
4721/*****************************************************************************
4722 * Support Functions for DMA'able Memory
4723 *****************************************************************************/
4724
4725static void
4726qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4727{
4728        *((bus_addr_t *)arg) = 0;
4729
4730        if (error) {
4731                printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4732                return;
4733        }
4734
4735        *((bus_addr_t *)arg) = segs[0].ds_addr;
4736
4737        return;
4738}
4739
4740static int
4741qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4742{
4743        int             ret = 0;
4744        device_t        dev;
4745        bus_addr_t      b_addr;
4746
4747        dev = ha->pci_dev;
4748
4749        ret = bus_dma_tag_create(
4750                        ha->parent_tag,/* parent */
4751                        dma_buf->alignment,
4752                        ((bus_size_t)(1ULL << 32)),/* boundary */
4753                        BUS_SPACE_MAXADDR,      /* lowaddr */
4754                        BUS_SPACE_MAXADDR,      /* highaddr */
4755                        NULL, NULL,             /* filter, filterarg */
4756                        dma_buf->size,          /* maxsize */
4757                        1,                      /* nsegments */
4758                        dma_buf->size,          /* maxsegsize */
4759                        0,                      /* flags */
4760                        NULL, NULL,             /* lockfunc, lockarg */
4761                        &dma_buf->dma_tag);
4762
4763        if (ret) {
4764                QL_DPRINT1(ha, "could not create dma tag\n");
4765                goto qlnx_alloc_dmabuf_exit;
4766        }
4767        ret = bus_dmamem_alloc(dma_buf->dma_tag,
4768                        (void **)&dma_buf->dma_b,
4769                        (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4770                        &dma_buf->dma_map);
4771        if (ret) {
4772                bus_dma_tag_destroy(dma_buf->dma_tag);
4773                QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4774                goto qlnx_alloc_dmabuf_exit;
4775        }
4776
4777        ret = bus_dmamap_load(dma_buf->dma_tag,
4778                        dma_buf->dma_map,
4779                        dma_buf->dma_b,
4780                        dma_buf->size,
4781                        qlnx_dmamap_callback,
4782                        &b_addr, BUS_DMA_NOWAIT);
4783
4784        if (ret || !b_addr) {
4785                bus_dma_tag_destroy(dma_buf->dma_tag);
4786                bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4787                        dma_buf->dma_map);
4788                ret = -1;
4789                goto qlnx_alloc_dmabuf_exit;
4790        }
4791
4792        dma_buf->dma_addr = b_addr;
4793
4794qlnx_alloc_dmabuf_exit:
4795
4796        return ret;
4797}
4798
4799static void
4800qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4801{
4802	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
4803        bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
4804        bus_dma_tag_destroy(dma_buf->dma_tag);
4805	return;
4806}
4807
4808void *
4809qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
4810{
4811	qlnx_dma_t	dma_buf;
4812	qlnx_dma_t	*dma_p;
4813	qlnx_host_t	*ha;
4814	device_t        dev;
4815
4816	ha = (qlnx_host_t *)ecore_dev;
4817	dev = ha->pci_dev;
4818
4819	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4820
4821	memset(&dma_buf, 0, sizeof (qlnx_dma_t));
4822
4823	dma_buf.size = size + PAGE_SIZE;
4824	dma_buf.alignment = 8;
4825
4826	if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
4827		return (NULL);
4828	bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
4829
4830	*phys = dma_buf.dma_addr;
4831
4832	dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
4833
4834	memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
4835/*
4836	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4837		(void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
4838		dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
4839*/
4840	return (dma_buf.dma_b);
4841}
4842
4843void
4844qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
4845	uint32_t size)
4846{
4847	qlnx_dma_t dma_buf, *dma_p;
4848	qlnx_host_t	*ha;
4849	device_t        dev;
4850
4851	ha = (qlnx_host_t *)ecore_dev;
4852	dev = ha->pci_dev;
4853
4854	if (v_addr == NULL)
4855		return;
4856
4857	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4858
4859	dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
4860/*
4861	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4862		(void *)dma_p->dma_map, (void *)dma_p->dma_tag,
4863		dma_p->dma_b, (void *)dma_p->dma_addr, size);
4864*/
4865	dma_buf = *dma_p;
4866
4867	qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
4868	return;
4869}
4870
4871static int
4872qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
4873{
4874        int             ret;
4875        device_t        dev;
4876
4877        dev = ha->pci_dev;
4878
4879        /*
4880         * Allocate parent DMA Tag
4881         */
4882        ret = bus_dma_tag_create(
4883                        bus_get_dma_tag(dev),   /* parent */
4884                        1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
4885                        BUS_SPACE_MAXADDR,      /* lowaddr */
4886                        BUS_SPACE_MAXADDR,      /* highaddr */
4887                        NULL, NULL,             /* filter, filterarg */
4888                        BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
4889                        0,                      /* nsegments */
4890                        BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
4891                        0,                      /* flags */
4892                        NULL, NULL,             /* lockfunc, lockarg */
4893                        &ha->parent_tag);
4894
4895        if (ret) {
4896                QL_DPRINT1(ha, "could not create parent dma tag\n");
4897                return (-1);
4898        }
4899
4900        ha->flags.parent_tag = 1;
4901
4902        return (0);
4903}
4904
4905static void
4906qlnx_free_parent_dma_tag(qlnx_host_t *ha)
4907{
4908        if (ha->parent_tag != NULL) {
4909                bus_dma_tag_destroy(ha->parent_tag);
4910		ha->parent_tag = NULL;
4911        }
4912	return;
4913}
4914
4915static int
4916qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
4917{
4918        if (bus_dma_tag_create(NULL,    /* parent */
4919                1, 0,    /* alignment, bounds */
4920                BUS_SPACE_MAXADDR,       /* lowaddr */
4921                BUS_SPACE_MAXADDR,       /* highaddr */
4922                NULL, NULL,      /* filter, filterarg */
4923                QLNX_MAX_TSO_FRAME_SIZE,     /* maxsize */
4924                QLNX_MAX_SEGMENTS,        /* nsegments */
4925                QLNX_MAX_TX_MBUF_SIZE,	  /* maxsegsize */
4926                0,        /* flags */
4927                NULL,    /* lockfunc */
4928                NULL,    /* lockfuncarg */
4929                &ha->tx_tag)) {
4930
4931                QL_DPRINT1(ha, "tx_tag alloc failed\n");
4932                return (-1);
4933        }
4934
4935	return (0);
4936}
4937
4938static void
4939qlnx_free_tx_dma_tag(qlnx_host_t *ha)
4940{
4941        if (ha->tx_tag != NULL) {
4942                bus_dma_tag_destroy(ha->tx_tag);
4943		ha->tx_tag = NULL;
4944        }
4945	return;
4946}
4947
4948static int
4949qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
4950{
4951        if (bus_dma_tag_create(NULL,    /* parent */
4952                        1, 0,    /* alignment, bounds */
4953                        BUS_SPACE_MAXADDR,       /* lowaddr */
4954                        BUS_SPACE_MAXADDR,       /* highaddr */
4955                        NULL, NULL,      /* filter, filterarg */
4956                        MJUM9BYTES,     /* maxsize */
4957                        1,        /* nsegments */
4958                        MJUM9BYTES,        /* maxsegsize */
4959                        0,        /* flags */
4960                        NULL,    /* lockfunc */
4961                        NULL,    /* lockfuncarg */
4962                        &ha->rx_tag)) {
4963
4964                QL_DPRINT1(ha, " rx_tag alloc failed\n");
4965
4966                return (-1);
4967        }
4968	return (0);
4969}
4970
4971static void
4972qlnx_free_rx_dma_tag(qlnx_host_t *ha)
4973{
4974        if (ha->rx_tag != NULL) {
4975                bus_dma_tag_destroy(ha->rx_tag);
4976		ha->rx_tag = NULL;
4977        }
4978	return;
4979}
4980
4981/*********************************
4982 * Exported functions
4983 *********************************/
4984uint32_t
4985qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
4986{
4987	uint32_t bar_size;
4988
4989	bar_id = bar_id * 2;
4990
4991	bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
4992				SYS_RES_MEMORY,
4993				PCIR_BAR(bar_id));
4994
4995	return (bar_size);
4996}
4997
4998uint32_t
4999qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5000{
5001	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5002				pci_reg, 1);
5003	return 0;
5004}
5005
5006uint32_t
5007qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5008	uint16_t *reg_value)
5009{
5010	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5011				pci_reg, 2);
5012	return 0;
5013}
5014
5015uint32_t
5016qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5017	uint32_t *reg_value)
5018{
5019	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5020				pci_reg, 4);
5021	return 0;
5022}
5023
5024void
5025qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5026{
5027	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5028		pci_reg, reg_value, 1);
5029	return;
5030}
5031
5032void
5033qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5034	uint16_t reg_value)
5035{
5036	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5037		pci_reg, reg_value, 2);
5038	return;
5039}
5040
5041void
5042qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5043	uint32_t reg_value)
5044{
5045	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5046		pci_reg, reg_value, 4);
5047	return;
5048}
5049
5050
5051int
5052qlnx_pci_find_capability(void *ecore_dev, int cap)
5053{
5054	int		reg;
5055	qlnx_host_t	*ha;
5056
5057	ha = ecore_dev;
5058
5059	if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, &reg) == 0)
5060		return reg;
5061	else {
5062		QL_DPRINT1(ha, "failed\n");
5063		return 0;
5064	}
5065}
5066
5067uint32_t
5068qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5069{
5070	uint32_t		data32;
5071	struct ecore_dev	*cdev;
5072	struct ecore_hwfn	*p_hwfn;
5073
5074	p_hwfn = hwfn;
5075
5076	cdev = p_hwfn->p_dev;
5077
5078	reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
5079			(uint8_t *)(cdev->regview)) + reg_addr;
5080
5081	data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr);
5082
5083	return (data32);
5084}
5085
5086void
5087qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5088{
5089	struct ecore_dev	*cdev;
5090	struct ecore_hwfn	*p_hwfn;
5091
5092	p_hwfn = hwfn;
5093
5094	cdev = p_hwfn->p_dev;
5095
5096	reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
5097			(uint8_t *)(cdev->regview)) + reg_addr;
5098
5099	bus_write_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
5100
5101	return;
5102}
5103
5104void
5105qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5106{
5107	struct ecore_dev	*cdev;
5108	struct ecore_hwfn	*p_hwfn;
5109
5110	p_hwfn = hwfn;
5111
5112	cdev = p_hwfn->p_dev;
5113
5114	reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
5115			(uint8_t *)(cdev->regview)) + reg_addr;
5116
5117	bus_write_2(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
5118
5119	return;
5120}
5121
5122void
5123qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5124{
5125	struct ecore_dev	*cdev;
5126	struct ecore_hwfn	*p_hwfn;
5127
5128	p_hwfn = hwfn;
5129
5130	cdev = p_hwfn->p_dev;
5131
5132	reg_addr = (uint32_t)((uint8_t *)(p_hwfn->doorbells) -
5133			(uint8_t *)(cdev->doorbells)) + reg_addr;
5134
5135	bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, reg_addr, value);
5136
5137	return;
5138}
5139
5140uint32_t
5141qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5142{
5143	uint32_t		data32;
5144	uint32_t		offset;
5145	struct ecore_dev	*cdev;
5146
5147	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5148	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5149
5150	data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5151
5152	return (data32);
5153}
5154
5155void
5156qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5157{
5158	uint32_t		offset;
5159	struct ecore_dev	*cdev;
5160
5161	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5162	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5163
5164	bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5165
5166	return;
5167}
5168
5169void
5170qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5171{
5172	uint32_t		offset;
5173	struct ecore_dev	*cdev;
5174
5175	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5176	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5177
5178	bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5179	return;
5180}
5181
5182void *
5183qlnx_zalloc(uint32_t size)
5184{
5185	caddr_t	va;
5186
5187	va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5188	bzero(va, size);
5189	return ((void *)va);
5190}
5191
5192void
5193qlnx_barrier(void *p_hwfn)
5194{
5195	qlnx_host_t	*ha;
5196
5197	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5198	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_WRITE);
5199}
5200
5201void
5202qlnx_link_update(void *p_hwfn)
5203{
5204	qlnx_host_t	*ha;
5205	int		prev_link_state;
5206
5207	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5208
5209	qlnx_fill_link(p_hwfn, &ha->if_link);
5210
5211	prev_link_state = ha->link_up;
5212	ha->link_up = ha->if_link.link_up;
5213
5214        if (prev_link_state !=  ha->link_up) {
5215                if (ha->link_up) {
5216                        if_link_state_change(ha->ifp, LINK_STATE_UP);
5217                } else {
5218                        if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5219                }
5220        }
5221        return;
5222}
5223
5224void
5225qlnx_fill_link(struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link)
5226{
5227	struct ecore_mcp_link_params    link_params;
5228	struct ecore_mcp_link_state     link_state;
5229
5230	memset(if_link, 0, sizeof(*if_link));
5231	memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5232	memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5233
5234	/* Prepare source inputs */
5235	/* we only deal with physical functions */
5236	memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5237		sizeof(link_params));
5238	memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5239		sizeof(link_state));
5240
5241	ecore_mcp_get_media_type(hwfn->p_dev, &if_link->media_type);
5242
5243	/* Set the link parameters to pass to protocol driver */
5244	if (link_state.link_up) {
5245		if_link->link_up = true;
5246		if_link->speed = link_state.speed;
5247	}
5248
5249	if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5250
5251	if (link_params.speed.autoneg)
5252		if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5253
5254	if (link_params.pause.autoneg ||
5255		(link_params.pause.forced_rx && link_params.pause.forced_tx))
5256		if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5257
5258	if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5259		link_params.pause.forced_tx)
5260		if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5261
5262	if (link_params.speed.advertised_speeds &
5263		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5264		if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5265                                           QLNX_LINK_CAP_1000baseT_Full;
5266
5267	if (link_params.speed.advertised_speeds &
5268		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5269		if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5270
5271	if (link_params.speed.advertised_speeds &
5272		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5273		if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5274
5275	if (link_params.speed.advertised_speeds &
5276		NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5277		if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5278
5279	if (link_params.speed.advertised_speeds &
5280		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5281		if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5282
5283	if (link_params.speed.advertised_speeds &
5284		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5285		if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5286
5287	if_link->advertised_caps = if_link->supported_caps;
5288
5289	if_link->autoneg = link_params.speed.autoneg;
5290	if_link->duplex = QLNX_LINK_DUPLEX;
5291
5292	/* Link partner capabilities */
5293
5294	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5295		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5296
5297	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5298		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5299
5300	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5301		if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5302
5303	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5304		if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5305
5306	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5307		if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5308
5309	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5310		if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5311
5312	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5313		if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5314
5315	if (link_state.an_complete)
5316		if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5317
5318	if (link_state.partner_adv_pause)
5319		if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5320
5321	if ((link_state.partner_adv_pause ==
5322		ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5323		(link_state.partner_adv_pause ==
5324			ECORE_LINK_PARTNER_BOTH_PAUSE))
5325		if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5326
5327	return;
5328}
5329
5330static int
5331qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5332{
5333        int	rc, i;
5334
5335        for (i = 0; i < cdev->num_hwfns; i++) {
5336                struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5337                p_hwfn->pf_params = *func_params;
5338        }
5339
5340        rc = ecore_resc_alloc(cdev);
5341        if (rc)
5342                goto qlnx_nic_setup_exit;
5343
5344        ecore_resc_setup(cdev);
5345
5346qlnx_nic_setup_exit:
5347
5348        return rc;
5349}
5350
5351static int
5352qlnx_nic_start(struct ecore_dev *cdev)
5353{
5354        int				rc;
5355	struct ecore_hw_init_params	params;
5356
5357	bzero(&params, sizeof (struct ecore_hw_init_params));
5358
5359	params.p_tunn = NULL;
5360	params.b_hw_start = true;
5361	params.int_mode = cdev->int_mode;
5362	params.allow_npar_tx_switch = true;
5363	params.bin_fw_data = NULL;
5364
5365        rc = ecore_hw_init(cdev, &params);
5366        if (rc) {
5367                ecore_resc_free(cdev);
5368                return rc;
5369        }
5370
5371        return 0;
5372}
5373
5374static int
5375qlnx_slowpath_start(qlnx_host_t *ha)
5376{
5377	struct ecore_dev	*cdev;
5378	struct ecore_pf_params	pf_params;
5379	int			rc;
5380
5381	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5382	pf_params.eth_pf_params.num_cons  =
5383		(ha->num_rss) * (ha->num_tc + 1);
5384
5385	cdev = &ha->cdev;
5386
5387	rc = qlnx_nic_setup(cdev, &pf_params);
5388        if (rc)
5389                goto qlnx_slowpath_start_exit;
5390
5391        cdev->int_mode = ECORE_INT_MODE_MSIX;
5392        cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5393
5394#ifdef QLNX_MAX_COALESCE
5395	cdev->rx_coalesce_usecs = 255;
5396	cdev->tx_coalesce_usecs = 255;
5397#endif
5398
5399	rc = qlnx_nic_start(cdev);
5400
5401	ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5402	ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5403
5404qlnx_slowpath_start_exit:
5405
5406	return (rc);
5407}
5408
5409static int
5410qlnx_slowpath_stop(qlnx_host_t *ha)
5411{
5412	struct ecore_dev	*cdev;
5413	device_t		dev = ha->pci_dev;
5414	int			i;
5415
5416	cdev = &ha->cdev;
5417
5418	ecore_hw_stop(cdev);
5419
5420 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
5421
5422        	if (ha->sp_handle[i])
5423                	(void)bus_teardown_intr(dev, ha->sp_irq[i],
5424				ha->sp_handle[i]);
5425
5426		ha->sp_handle[i] = NULL;
5427
5428        	if (ha->sp_irq[i])
5429			(void) bus_release_resource(dev, SYS_RES_IRQ,
5430				ha->sp_irq_rid[i], ha->sp_irq[i]);
5431		ha->sp_irq[i] = NULL;
5432	}
5433
5434        ecore_resc_free(cdev);
5435
5436        return 0;
5437}
5438
5439static void
5440qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5441	char ver_str[VER_SIZE])
5442{
5443        int	i;
5444
5445        memcpy(cdev->name, name, NAME_SIZE);
5446
5447        for_each_hwfn(cdev, i) {
5448                snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5449        }
5450
5451        cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5452
5453	return ;
5454}
5455
5456void
5457qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5458{
5459	enum ecore_mcp_protocol_type	type;
5460	union ecore_mcp_protocol_stats	*stats;
5461	struct ecore_eth_stats		eth_stats;
5462	qlnx_host_t			*ha;
5463
5464	ha = cdev;
5465	stats = proto_stats;
5466	type = proto_type;
5467
5468        switch (type) {
5469
5470        case ECORE_MCP_LAN_STATS:
5471                ecore_get_vport_stats((struct ecore_dev *)cdev, &eth_stats);
5472                stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5473                stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5474                stats->lan_stats.fcs_err = -1;
5475                break;
5476
5477	default:
5478		ha->err_get_proto_invalid_type++;
5479
5480		QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5481		break;
5482	}
5483	return;
5484}
5485
5486static int
5487qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5488{
5489	struct ecore_hwfn	*p_hwfn;
5490	struct ecore_ptt	*p_ptt;
5491
5492	p_hwfn = &ha->cdev.hwfns[0];
5493	p_ptt = ecore_ptt_acquire(p_hwfn);
5494
5495	if (p_ptt ==  NULL) {
5496                QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5497                return (-1);
5498	}
5499	ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5500
5501	ecore_ptt_release(p_hwfn, p_ptt);
5502
5503	return (0);
5504}
5505
5506static int
5507qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5508{
5509	struct ecore_hwfn	*p_hwfn;
5510	struct ecore_ptt	*p_ptt;
5511
5512	p_hwfn = &ha->cdev.hwfns[0];
5513	p_ptt = ecore_ptt_acquire(p_hwfn);
5514
5515	if (p_ptt ==  NULL) {
5516                QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5517                return (-1);
5518	}
5519	ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5520
5521	ecore_ptt_release(p_hwfn, p_ptt);
5522
5523	return (0);
5524}
5525
5526static int
5527qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5528{
5529	struct ecore_dev	*cdev;
5530
5531	cdev = &ha->cdev;
5532
5533	bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5534	bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5535	bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5536
5537        return 0;
5538}
5539
5540static void
5541qlnx_init_fp(qlnx_host_t *ha)
5542{
5543	int rss_id, txq_array_index, tc;
5544
5545	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5546
5547		struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5548
5549		fp->rss_id = rss_id;
5550		fp->edev = ha;
5551		fp->sb_info = &ha->sb_array[rss_id];
5552		fp->rxq = &ha->rxq_array[rss_id];
5553		fp->rxq->rxq_id = rss_id;
5554
5555		for (tc = 0; tc < ha->num_tc; tc++) {
5556                        txq_array_index = tc * ha->num_rss + rss_id;
5557                        fp->txq[tc] = &ha->txq_array[txq_array_index];
5558                        fp->txq[tc]->index = txq_array_index;
5559		}
5560
5561		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5562			rss_id);
5563
5564		fp->tx_ring_full = 0;
5565
5566		/* reset all the statistics counters */
5567
5568		fp->tx_pkts_processed = 0;
5569		fp->tx_pkts_freed = 0;
5570		fp->tx_pkts_transmitted = 0;
5571		fp->tx_pkts_completed = 0;
5572
5573#ifdef QLNX_TRACE_PERF_DATA
5574		fp->tx_pkts_trans_ctx = 0;
5575		fp->tx_pkts_compl_ctx = 0;
5576		fp->tx_pkts_trans_fp = 0;
5577		fp->tx_pkts_compl_fp = 0;
5578		fp->tx_pkts_compl_intr = 0;
5579#endif
5580		fp->tx_lso_wnd_min_len = 0;
5581		fp->tx_defrag = 0;
5582		fp->tx_nsegs_gt_elem_left = 0;
5583		fp->tx_tso_max_nsegs = 0;
5584		fp->tx_tso_min_nsegs = 0;
5585		fp->err_tx_nsegs_gt_elem_left = 0;
5586		fp->err_tx_dmamap_create = 0;
5587		fp->err_tx_defrag_dmamap_load = 0;
5588		fp->err_tx_non_tso_max_seg = 0;
5589		fp->err_tx_dmamap_load = 0;
5590		fp->err_tx_defrag = 0;
5591		fp->err_tx_free_pkt_null = 0;
5592		fp->err_tx_cons_idx_conflict = 0;
5593
5594		fp->rx_pkts = 0;
5595		fp->err_m_getcl = 0;
5596		fp->err_m_getjcl = 0;
5597        }
5598	return;
5599}
5600
5601static void
5602qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5603{
5604	struct ecore_dev	*cdev;
5605
5606	cdev = &ha->cdev;
5607
5608        if (sb_info->sb_virt) {
5609                OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5610			(sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5611		sb_info->sb_virt = NULL;
5612	}
5613}
5614
5615static int
5616qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5617	void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5618{
5619        struct ecore_hwfn	*p_hwfn;
5620        int			hwfn_index, rc;
5621        u16			rel_sb_id;
5622
5623        hwfn_index = sb_id % cdev->num_hwfns;
5624        p_hwfn = &cdev->hwfns[hwfn_index];
5625        rel_sb_id = sb_id / cdev->num_hwfns;
5626
5627        QL_DPRINT2(((qlnx_host_t *)cdev),
5628                "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5629                sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5630                hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5631                sb_virt_addr, (void *)sb_phy_addr);
5632
5633        rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5634                             sb_virt_addr, sb_phy_addr, rel_sb_id);
5635
5636        return rc;
5637}
5638
5639/* This function allocates fast-path status block memory */
5640static int
5641qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5642{
5643        struct status_block_e4	*sb_virt;
5644        bus_addr_t		sb_phys;
5645        int			rc;
5646	uint32_t		size;
5647	struct ecore_dev	*cdev;
5648
5649	cdev = &ha->cdev;
5650
5651	size = sizeof(*sb_virt);
5652	sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5653
5654        if (!sb_virt) {
5655                QL_DPRINT1(ha, "Status block allocation failed\n");
5656                return -ENOMEM;
5657        }
5658
5659        rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5660        if (rc) {
5661                OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5662        }
5663
5664	return rc;
5665}
5666
5667static void
5668qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5669{
5670        int			i;
5671	struct sw_rx_data	*rx_buf;
5672
5673        for (i = 0; i < rxq->num_rx_buffers; i++) {
5674
5675                rx_buf = &rxq->sw_rx_ring[i];
5676
5677		if (rx_buf->data != NULL) {
5678			if (rx_buf->map != NULL) {
5679				bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5680				bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5681				rx_buf->map = NULL;
5682			}
5683			m_freem(rx_buf->data);
5684			rx_buf->data = NULL;
5685		}
5686        }
5687	return;
5688}
5689
5690static void
5691qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5692{
5693	struct ecore_dev	*cdev;
5694	int			i;
5695
5696	cdev = &ha->cdev;
5697
5698	qlnx_free_rx_buffers(ha, rxq);
5699
5700	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5701		qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5702		if (rxq->tpa_info[i].mpf != NULL)
5703			m_freem(rxq->tpa_info[i].mpf);
5704	}
5705
5706	bzero((void *)&rxq->sw_rx_ring[0],
5707		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
5708
5709        /* Free the real RQ ring used by FW */
5710	if (rxq->rx_bd_ring.p_virt_addr) {
5711                ecore_chain_free(cdev, &rxq->rx_bd_ring);
5712                rxq->rx_bd_ring.p_virt_addr = NULL;
5713        }
5714
5715        /* Free the real completion ring used by FW */
5716        if (rxq->rx_comp_ring.p_virt_addr &&
5717                        rxq->rx_comp_ring.pbl_sp.p_virt_table) {
5718                ecore_chain_free(cdev, &rxq->rx_comp_ring);
5719                rxq->rx_comp_ring.p_virt_addr = NULL;
5720                rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
5721        }
5722
5723#ifdef QLNX_SOFT_LRO
5724	{
5725		struct lro_ctrl *lro;
5726
5727		lro = &rxq->lro;
5728		tcp_lro_free(lro);
5729	}
5730#endif /* #ifdef QLNX_SOFT_LRO */
5731
5732	return;
5733}
5734
5735static int
5736qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5737{
5738        register struct mbuf	*mp;
5739        uint16_t		rx_buf_size;
5740        struct sw_rx_data	*sw_rx_data;
5741        struct eth_rx_bd	*rx_bd;
5742        dma_addr_t		dma_addr;
5743	bus_dmamap_t		map;
5744	bus_dma_segment_t       segs[1];
5745	int			nsegs;
5746	int			ret;
5747	struct ecore_dev	*cdev;
5748
5749	cdev = &ha->cdev;
5750
5751        rx_buf_size = rxq->rx_buf_size;
5752
5753	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5754
5755        if (mp == NULL) {
5756                QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5757                return -ENOMEM;
5758        }
5759
5760	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5761
5762	map = (bus_dmamap_t)0;
5763
5764	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5765			BUS_DMA_NOWAIT);
5766	dma_addr = segs[0].ds_addr;
5767
5768	if (ret || !dma_addr || (nsegs != 1)) {
5769		m_freem(mp);
5770		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5771                           ret, (long long unsigned int)dma_addr, nsegs);
5772		return -ENOMEM;
5773	}
5774
5775        sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
5776        sw_rx_data->data = mp;
5777        sw_rx_data->dma_addr = dma_addr;
5778        sw_rx_data->map = map;
5779
5780        /* Advance PROD and get BD pointer */
5781        rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
5782        rx_bd->addr.hi = htole32(U64_HI(dma_addr));
5783        rx_bd->addr.lo = htole32(U64_LO(dma_addr));
5784	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5785
5786        rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
5787
5788        return 0;
5789}
5790
5791static int
5792qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
5793	struct qlnx_agg_info *tpa)
5794{
5795	struct mbuf		*mp;
5796        dma_addr_t		dma_addr;
5797	bus_dmamap_t		map;
5798	bus_dma_segment_t       segs[1];
5799	int			nsegs;
5800	int			ret;
5801        struct sw_rx_data	*rx_buf;
5802
5803	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5804
5805        if (mp == NULL) {
5806                QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5807                return -ENOMEM;
5808        }
5809
5810	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5811
5812	map = (bus_dmamap_t)0;
5813
5814	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5815			BUS_DMA_NOWAIT);
5816	dma_addr = segs[0].ds_addr;
5817
5818	if (ret || !dma_addr || (nsegs != 1)) {
5819		m_freem(mp);
5820		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5821			ret, (long long unsigned int)dma_addr, nsegs);
5822		return -ENOMEM;
5823	}
5824
5825        rx_buf = &tpa->rx_buf;
5826
5827	memset(rx_buf, 0, sizeof (struct sw_rx_data));
5828
5829        rx_buf->data = mp;
5830        rx_buf->dma_addr = dma_addr;
5831        rx_buf->map = map;
5832
5833	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5834
5835	return (0);
5836}
5837
5838static void
5839qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
5840{
5841        struct sw_rx_data	*rx_buf;
5842
5843	rx_buf = &tpa->rx_buf;
5844
5845	if (rx_buf->data != NULL) {
5846		if (rx_buf->map != NULL) {
5847			bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5848			bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5849			rx_buf->map = NULL;
5850		}
5851		m_freem(rx_buf->data);
5852		rx_buf->data = NULL;
5853	}
5854	return;
5855}
5856
5857/* This function allocates all memory needed per Rx queue */
5858static int
5859qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5860{
5861        int			i, rc, num_allocated;
5862	struct ifnet		*ifp;
5863	struct ecore_dev	 *cdev;
5864
5865	cdev = &ha->cdev;
5866	ifp = ha->ifp;
5867
5868        rxq->num_rx_buffers = RX_RING_SIZE;
5869
5870	rxq->rx_buf_size = ha->rx_buf_size;
5871
5872        /* Allocate the parallel driver ring for Rx buffers */
5873	bzero((void *)&rxq->sw_rx_ring[0],
5874		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
5875
5876        /* Allocate FW Rx ring  */
5877
5878        rc = ecore_chain_alloc(cdev,
5879			ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
5880			ECORE_CHAIN_MODE_NEXT_PTR,
5881			ECORE_CHAIN_CNT_TYPE_U16,
5882			RX_RING_SIZE,
5883			sizeof(struct eth_rx_bd),
5884			&rxq->rx_bd_ring, NULL);
5885
5886        if (rc)
5887                goto err;
5888
5889        /* Allocate FW completion ring */
5890        rc = ecore_chain_alloc(cdev,
5891                        ECORE_CHAIN_USE_TO_CONSUME,
5892                        ECORE_CHAIN_MODE_PBL,
5893			ECORE_CHAIN_CNT_TYPE_U16,
5894                        RX_RING_SIZE,
5895                        sizeof(union eth_rx_cqe),
5896                        &rxq->rx_comp_ring, NULL);
5897
5898        if (rc)
5899                goto err;
5900
5901        /* Allocate buffers for the Rx ring */
5902
5903	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5904		rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
5905			&rxq->tpa_info[i]);
5906                if (rc)
5907                        break;
5908
5909	}
5910
5911        for (i = 0; i < rxq->num_rx_buffers; i++) {
5912                rc = qlnx_alloc_rx_buffer(ha, rxq);
5913                if (rc)
5914                        break;
5915        }
5916        num_allocated = i;
5917        if (!num_allocated) {
5918		QL_DPRINT1(ha, "Rx buffers allocation failed\n");
5919                goto err;
5920        } else if (num_allocated < rxq->num_rx_buffers) {
5921		QL_DPRINT1(ha, "Allocated less buffers than"
5922			" desired (%d allocated)\n", num_allocated);
5923        }
5924
5925#ifdef QLNX_SOFT_LRO
5926
5927	{
5928		struct lro_ctrl *lro;
5929
5930		lro = &rxq->lro;
5931
5932#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
5933		if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
5934			QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5935				   rxq->rxq_id);
5936			goto err;
5937		}
5938#else
5939		if (tcp_lro_init(lro)) {
5940			QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5941				   rxq->rxq_id);
5942			goto err;
5943		}
5944#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
5945
5946		lro->ifp = ha->ifp;
5947	}
5948#endif /* #ifdef QLNX_SOFT_LRO */
5949        return 0;
5950
5951err:
5952        qlnx_free_mem_rxq(ha, rxq);
5953        return -ENOMEM;
5954}
5955
5956
5957static void
5958qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
5959	struct qlnx_tx_queue *txq)
5960{
5961	struct ecore_dev	*cdev;
5962
5963	cdev = &ha->cdev;
5964
5965	bzero((void *)&txq->sw_tx_ring[0],
5966		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
5967
5968        /* Free the real RQ ring used by FW */
5969        if (txq->tx_pbl.p_virt_addr) {
5970                ecore_chain_free(cdev, &txq->tx_pbl);
5971                txq->tx_pbl.p_virt_addr = NULL;
5972        }
5973	return;
5974}
5975
5976/* This function allocates all memory needed per Tx queue */
5977static int
5978qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
5979	struct qlnx_tx_queue *txq)
5980{
5981        int			ret = ECORE_SUCCESS;
5982        union eth_tx_bd_types	*p_virt;
5983	struct ecore_dev	*cdev;
5984
5985	cdev = &ha->cdev;
5986
5987	bzero((void *)&txq->sw_tx_ring[0],
5988		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
5989
5990        /* Allocate the real Tx ring to be used by FW */
5991        ret = ecore_chain_alloc(cdev,
5992                        ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
5993                        ECORE_CHAIN_MODE_PBL,
5994			ECORE_CHAIN_CNT_TYPE_U16,
5995                        TX_RING_SIZE,
5996                        sizeof(*p_virt),
5997                        &txq->tx_pbl, NULL);
5998
5999        if (ret != ECORE_SUCCESS) {
6000                goto err;
6001        }
6002
6003	txq->num_tx_buffers = TX_RING_SIZE;
6004
6005        return 0;
6006
6007err:
6008        qlnx_free_mem_txq(ha, fp, txq);
6009        return -ENOMEM;
6010}
6011
6012static void
6013qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6014{
6015	struct mbuf	*mp;
6016	struct ifnet	*ifp = ha->ifp;
6017
6018	if (mtx_initialized(&fp->tx_mtx)) {
6019
6020		if (fp->tx_br != NULL) {
6021
6022			mtx_lock(&fp->tx_mtx);
6023
6024			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6025				fp->tx_pkts_freed++;
6026				m_freem(mp);
6027			}
6028
6029			mtx_unlock(&fp->tx_mtx);
6030
6031			buf_ring_free(fp->tx_br, M_DEVBUF);
6032			fp->tx_br = NULL;
6033		}
6034		mtx_destroy(&fp->tx_mtx);
6035	}
6036	return;
6037}
6038
6039static void
6040qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6041{
6042        int	tc;
6043
6044        qlnx_free_mem_sb(ha, fp->sb_info);
6045
6046        qlnx_free_mem_rxq(ha, fp->rxq);
6047
6048        for (tc = 0; tc < ha->num_tc; tc++)
6049                qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6050
6051	return;
6052}
6053
6054static int
6055qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6056{
6057	snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6058		"qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6059
6060	mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6061
6062        fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6063                                   M_NOWAIT, &fp->tx_mtx);
6064        if (fp->tx_br == NULL) {
6065		QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6066			ha->dev_unit, fp->rss_id);
6067		return -ENOMEM;
6068        }
6069	return 0;
6070}
6071
6072static int
6073qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6074{
6075        int	rc, tc;
6076
6077        rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6078        if (rc)
6079                goto err;
6080
6081	if (ha->rx_jumbo_buf_eq_mtu) {
6082		if (ha->max_frame_size <= MCLBYTES)
6083			ha->rx_buf_size = MCLBYTES;
6084		else if (ha->max_frame_size <= MJUMPAGESIZE)
6085			ha->rx_buf_size = MJUMPAGESIZE;
6086		else if (ha->max_frame_size <= MJUM9BYTES)
6087			ha->rx_buf_size = MJUM9BYTES;
6088		else if (ha->max_frame_size <= MJUM16BYTES)
6089			ha->rx_buf_size = MJUM16BYTES;
6090	} else {
6091		if (ha->max_frame_size <= MCLBYTES)
6092			ha->rx_buf_size = MCLBYTES;
6093		else
6094			ha->rx_buf_size = MJUMPAGESIZE;
6095	}
6096
6097        rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6098        if (rc)
6099                goto err;
6100
6101        for (tc = 0; tc < ha->num_tc; tc++) {
6102                rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6103                if (rc)
6104                        goto err;
6105        }
6106
6107        return 0;
6108
6109err:
6110        qlnx_free_mem_fp(ha, fp);
6111        return -ENOMEM;
6112}
6113
6114static void
6115qlnx_free_mem_load(qlnx_host_t *ha)
6116{
6117        int			i;
6118	struct ecore_dev	*cdev;
6119
6120	cdev = &ha->cdev;
6121
6122        for (i = 0; i < ha->num_rss; i++) {
6123                struct qlnx_fastpath *fp = &ha->fp_array[i];
6124
6125                qlnx_free_mem_fp(ha, fp);
6126        }
6127	return;
6128}
6129
6130static int
6131qlnx_alloc_mem_load(qlnx_host_t *ha)
6132{
6133        int	rc = 0, rss_id;
6134
6135        for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6136                struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6137
6138                rc = qlnx_alloc_mem_fp(ha, fp);
6139                if (rc)
6140                        break;
6141        }
6142	return (rc);
6143}
6144
6145static int
6146qlnx_start_vport(struct ecore_dev *cdev,
6147                u8 vport_id,
6148                u16 mtu,
6149                u8 drop_ttl0_flg,
6150                u8 inner_vlan_removal_en_flg,
6151		u8 tx_switching,
6152		u8 hw_lro_enable)
6153{
6154        int					rc, i;
6155	struct ecore_sp_vport_start_params	vport_start_params = { 0 };
6156	qlnx_host_t				*ha;
6157
6158	ha = (qlnx_host_t *)cdev;
6159
6160	vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6161	vport_start_params.tx_switching = 0;
6162	vport_start_params.handle_ptp_pkts = 0;
6163	vport_start_params.only_untagged = 0;
6164	vport_start_params.drop_ttl0 = drop_ttl0_flg;
6165
6166	vport_start_params.tpa_mode =
6167		(hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6168	vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6169
6170	vport_start_params.vport_id = vport_id;
6171	vport_start_params.mtu = mtu;
6172
6173
6174	QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6175
6176        for_each_hwfn(cdev, i) {
6177                struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6178
6179		vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6180		vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6181
6182                rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6183
6184                if (rc) {
6185			QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6186				" with MTU %d\n" , vport_id, mtu);
6187                        return -ENOMEM;
6188                }
6189
6190                ecore_hw_start_fastpath(p_hwfn);
6191
6192		QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6193			vport_id, mtu);
6194        }
6195        return 0;
6196}
6197
6198
6199static int
6200qlnx_update_vport(struct ecore_dev *cdev,
6201	struct qlnx_update_vport_params *params)
6202{
6203        struct ecore_sp_vport_update_params	sp_params;
6204        int					rc, i, j, fp_index;
6205	struct ecore_hwfn			*p_hwfn;
6206        struct ecore_rss_params			*rss;
6207	qlnx_host_t				*ha = (qlnx_host_t *)cdev;
6208        struct qlnx_fastpath			*fp;
6209
6210        memset(&sp_params, 0, sizeof(sp_params));
6211        /* Translate protocol params into sp params */
6212        sp_params.vport_id = params->vport_id;
6213
6214        sp_params.update_vport_active_rx_flg =
6215		params->update_vport_active_rx_flg;
6216        sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6217
6218        sp_params.update_vport_active_tx_flg =
6219		params->update_vport_active_tx_flg;
6220        sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6221
6222        sp_params.update_inner_vlan_removal_flg =
6223                params->update_inner_vlan_removal_flg;
6224        sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6225
6226	sp_params.sge_tpa_params = params->sge_tpa_params;
6227
6228        /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6229         * We need to re-fix the rss values per engine for CMT.
6230         */
6231	if (params->rss_params->update_rss_config)
6232        sp_params.rss_params = params->rss_params;
6233	else
6234		sp_params.rss_params =  NULL;
6235
6236        for_each_hwfn(cdev, i) {
6237
6238		p_hwfn = &cdev->hwfns[i];
6239
6240		if ((cdev->num_hwfns > 1) &&
6241			params->rss_params->update_rss_config &&
6242			params->rss_params->rss_enable) {
6243
6244			rss = params->rss_params;
6245
6246			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6247
6248				fp_index = ((cdev->num_hwfns * j) + i) %
6249						ha->num_rss;
6250
6251                		fp = &ha->fp_array[fp_index];
6252                        	rss->rss_ind_table[j] = fp->rxq->handle;
6253			}
6254
6255			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6256				QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6257					rss->rss_ind_table[j],
6258					rss->rss_ind_table[j+1],
6259					rss->rss_ind_table[j+2],
6260					rss->rss_ind_table[j+3],
6261					rss->rss_ind_table[j+4],
6262					rss->rss_ind_table[j+5],
6263					rss->rss_ind_table[j+6],
6264					rss->rss_ind_table[j+7]);
6265					j += 8;
6266			}
6267		}
6268
6269                sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6270
6271		QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6272
6273                rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6274                                           ECORE_SPQ_MODE_EBLOCK, NULL);
6275                if (rc) {
6276			QL_DPRINT1(ha, "Failed to update VPORT\n");
6277                        return rc;
6278                }
6279
6280                QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6281			rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6282			params->vport_id, params->vport_active_tx_flg,
6283			params->vport_active_rx_flg,
6284			params->update_vport_active_tx_flg,
6285			params->update_vport_active_rx_flg);
6286        }
6287
6288        return 0;
6289}
6290
6291static void
6292qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6293{
6294        struct eth_rx_bd	*rx_bd_cons =
6295					ecore_chain_consume(&rxq->rx_bd_ring);
6296        struct eth_rx_bd	*rx_bd_prod =
6297					ecore_chain_produce(&rxq->rx_bd_ring);
6298        struct sw_rx_data	*sw_rx_data_cons =
6299					&rxq->sw_rx_ring[rxq->sw_rx_cons];
6300        struct sw_rx_data	*sw_rx_data_prod =
6301					&rxq->sw_rx_ring[rxq->sw_rx_prod];
6302
6303        sw_rx_data_prod->data = sw_rx_data_cons->data;
6304        memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6305
6306        rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6307        rxq->sw_rx_prod  = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6308
6309	return;
6310}
6311
6312static void
6313qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6314{
6315
6316        uint16_t	 	bd_prod;
6317        uint16_t		cqe_prod;
6318	union {
6319		struct eth_rx_prod_data rx_prod_data;
6320		uint32_t		data32;
6321	} rx_prods;
6322
6323        bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6324        cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6325
6326        /* Update producers */
6327        rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6328        rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6329
6330        /* Make sure that the BD and SGE data is updated before updating the
6331         * producers since FW might read the BD/SGE right after the producer
6332         * is updated.
6333         */
6334	wmb();
6335
6336        internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6337		sizeof(rx_prods), &rx_prods.data32);
6338
6339        /* mmiowb is needed to synchronize doorbell writes from more than one
6340         * processor. It guarantees that the write arrives to the device before
6341         * the napi lock is released and another qlnx_poll is called (possibly
6342         * on another CPU). Without this barrier, the next doorbell can bypass
6343         * this doorbell. This is applicable to IA64/Altix systems.
6344         */
6345        wmb();
6346
6347	return;
6348}
6349
6350static uint32_t qlnx_hash_key[] = {
6351                ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6352                ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6353                ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6354                ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6355                ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6356                ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6357                ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6358                ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6359                ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6360                ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6361
6362static int
6363qlnx_start_queues(qlnx_host_t *ha)
6364{
6365        int				rc, tc, i, vport_id = 0,
6366					drop_ttl0_flg = 1, vlan_removal_en = 1,
6367					tx_switching = 0, hw_lro_enable = 0;
6368        struct ecore_dev		*cdev = &ha->cdev;
6369        struct ecore_rss_params		*rss_params = &ha->rss_params;
6370        struct qlnx_update_vport_params	vport_update_params;
6371        struct ifnet			*ifp;
6372        struct ecore_hwfn		*p_hwfn;
6373	struct ecore_sge_tpa_params	tpa_params;
6374	struct ecore_queue_start_common_params qparams;
6375        struct qlnx_fastpath		*fp;
6376
6377	ifp = ha->ifp;
6378
6379	QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6380
6381        if (!ha->num_rss) {
6382		QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6383			" are no Rx queues\n");
6384                return -EINVAL;
6385        }
6386
6387#ifndef QLNX_SOFT_LRO
6388        hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
6389#endif /* #ifndef QLNX_SOFT_LRO */
6390
6391        rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
6392			vlan_removal_en, tx_switching, hw_lro_enable);
6393
6394        if (rc) {
6395                QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6396                return rc;
6397        }
6398
6399	QL_DPRINT2(ha, "Start vport ramrod passed, "
6400		"vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6401		vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en);
6402
6403        for_each_rss(i) {
6404		struct ecore_rxq_start_ret_params rx_ret_params;
6405		struct ecore_txq_start_ret_params tx_ret_params;
6406
6407                fp = &ha->fp_array[i];
6408        	p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6409
6410		bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6411		bzero(&rx_ret_params,
6412			sizeof (struct ecore_rxq_start_ret_params));
6413
6414		qparams.queue_id = i ;
6415		qparams.vport_id = vport_id;
6416		qparams.stats_id = vport_id;
6417		qparams.p_sb = fp->sb_info;
6418		qparams.sb_idx = RX_PI;
6419
6420
6421		rc = ecore_eth_rx_queue_start(p_hwfn,
6422			p_hwfn->hw_info.opaque_fid,
6423			&qparams,
6424			fp->rxq->rx_buf_size,	/* bd_max_bytes */
6425			/* bd_chain_phys_addr */
6426			fp->rxq->rx_bd_ring.p_phys_addr,
6427			/* cqe_pbl_addr */
6428			ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6429			/* cqe_pbl_size */
6430			ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6431			&rx_ret_params);
6432
6433                if (rc) {
6434                	QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6435                        return rc;
6436                }
6437
6438		fp->rxq->hw_rxq_prod_addr	= rx_ret_params.p_prod;
6439		fp->rxq->handle			= rx_ret_params.p_handle;
6440                fp->rxq->hw_cons_ptr		=
6441				&fp->sb_info->sb_virt->pi_array[RX_PI];
6442
6443                qlnx_update_rx_prod(p_hwfn, fp->rxq);
6444
6445                for (tc = 0; tc < ha->num_tc; tc++) {
6446                        struct qlnx_tx_queue *txq = fp->txq[tc];
6447
6448			bzero(&qparams,
6449				sizeof(struct ecore_queue_start_common_params));
6450			bzero(&tx_ret_params,
6451				sizeof (struct ecore_txq_start_ret_params));
6452
6453			qparams.queue_id = txq->index / cdev->num_hwfns ;
6454			qparams.vport_id = vport_id;
6455			qparams.stats_id = vport_id;
6456			qparams.p_sb = fp->sb_info;
6457			qparams.sb_idx = TX_PI(tc);
6458
6459			rc = ecore_eth_tx_queue_start(p_hwfn,
6460				p_hwfn->hw_info.opaque_fid,
6461				&qparams, tc,
6462				/* bd_chain_phys_addr */
6463				ecore_chain_get_pbl_phys(&txq->tx_pbl),
6464				ecore_chain_get_page_cnt(&txq->tx_pbl),
6465				&tx_ret_params);
6466
6467                        if (rc) {
6468                		QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6469					   txq->index, rc);
6470                                return rc;
6471                        }
6472
6473			txq->doorbell_addr = tx_ret_params.p_doorbell;
6474			txq->handle = tx_ret_params.p_handle;
6475
6476                        txq->hw_cons_ptr =
6477                                &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6478                        SET_FIELD(txq->tx_db.data.params,
6479                                  ETH_DB_DATA_DEST, DB_DEST_XCM);
6480                        SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6481                                  DB_AGG_CMD_SET);
6482                        SET_FIELD(txq->tx_db.data.params,
6483                                  ETH_DB_DATA_AGG_VAL_SEL,
6484                                  DQ_XCM_ETH_TX_BD_PROD_CMD);
6485
6486                        txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6487                }
6488        }
6489
6490        /* Fill struct with RSS params */
6491        if (ha->num_rss > 1) {
6492
6493                rss_params->update_rss_config = 1;
6494                rss_params->rss_enable = 1;
6495                rss_params->update_rss_capabilities = 1;
6496                rss_params->update_rss_ind_table = 1;
6497                rss_params->update_rss_key = 1;
6498                rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6499                                       ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6500                rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6501
6502                for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6503                	fp = &ha->fp_array[(i % ha->num_rss)];
6504                        rss_params->rss_ind_table[i] = fp->rxq->handle;
6505		}
6506
6507                for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6508			rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6509
6510        } else {
6511                memset(rss_params, 0, sizeof(*rss_params));
6512        }
6513
6514
6515        /* Prepare and send the vport enable */
6516        memset(&vport_update_params, 0, sizeof(vport_update_params));
6517        vport_update_params.vport_id = vport_id;
6518        vport_update_params.update_vport_active_tx_flg = 1;
6519        vport_update_params.vport_active_tx_flg = 1;
6520        vport_update_params.update_vport_active_rx_flg = 1;
6521        vport_update_params.vport_active_rx_flg = 1;
6522        vport_update_params.rss_params = rss_params;
6523        vport_update_params.update_inner_vlan_removal_flg = 1;
6524        vport_update_params.inner_vlan_removal_flg = 1;
6525
6526	if (hw_lro_enable) {
6527		memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6528
6529		tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6530
6531		tpa_params.update_tpa_en_flg = 1;
6532		tpa_params.tpa_ipv4_en_flg = 1;
6533		tpa_params.tpa_ipv6_en_flg = 1;
6534
6535		tpa_params.update_tpa_param_flg = 1;
6536		tpa_params.tpa_pkt_split_flg = 0;
6537		tpa_params.tpa_hdr_data_split_flg = 0;
6538		tpa_params.tpa_gro_consistent_flg = 0;
6539		tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6540		tpa_params.tpa_max_size = (uint16_t)(-1);
6541		tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
6542		tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
6543
6544		vport_update_params.sge_tpa_params = &tpa_params;
6545	}
6546
6547        rc = qlnx_update_vport(cdev, &vport_update_params);
6548        if (rc) {
6549		QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6550                return rc;
6551        }
6552
6553        return 0;
6554}
6555
6556static int
6557qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6558	struct qlnx_tx_queue *txq)
6559{
6560	uint16_t	hw_bd_cons;
6561	uint16_t	ecore_cons_idx;
6562
6563	QL_DPRINT2(ha, "enter\n");
6564
6565	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6566
6567	while (hw_bd_cons !=
6568		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6569
6570		mtx_lock(&fp->tx_mtx);
6571
6572		(void)qlnx_tx_int(ha, fp, txq);
6573
6574		mtx_unlock(&fp->tx_mtx);
6575
6576		qlnx_mdelay(__func__, 2);
6577
6578		hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6579	}
6580
6581	QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6582
6583        return 0;
6584}
6585
6586static int
6587qlnx_stop_queues(qlnx_host_t *ha)
6588{
6589        struct qlnx_update_vport_params	vport_update_params;
6590        struct ecore_dev		*cdev;
6591        struct qlnx_fastpath		*fp;
6592        int				rc, tc, i;
6593
6594        cdev = &ha->cdev;
6595
6596        /* Disable the vport */
6597
6598        memset(&vport_update_params, 0, sizeof(vport_update_params));
6599
6600        vport_update_params.vport_id = 0;
6601        vport_update_params.update_vport_active_tx_flg = 1;
6602        vport_update_params.vport_active_tx_flg = 0;
6603        vport_update_params.update_vport_active_rx_flg = 1;
6604        vport_update_params.vport_active_rx_flg = 0;
6605        vport_update_params.rss_params = &ha->rss_params;
6606        vport_update_params.rss_params->update_rss_config = 0;
6607        vport_update_params.rss_params->rss_enable = 0;
6608        vport_update_params.update_inner_vlan_removal_flg = 0;
6609        vport_update_params.inner_vlan_removal_flg = 0;
6610
6611	QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6612
6613        rc = qlnx_update_vport(cdev, &vport_update_params);
6614        if (rc) {
6615		QL_DPRINT1(ha, "Failed to update vport\n");
6616                return rc;
6617        }
6618
6619        /* Flush Tx queues. If needed, request drain from MCP */
6620        for_each_rss(i) {
6621                fp = &ha->fp_array[i];
6622
6623                for (tc = 0; tc < ha->num_tc; tc++) {
6624                        struct qlnx_tx_queue *txq = fp->txq[tc];
6625
6626                        rc = qlnx_drain_txq(ha, fp, txq);
6627                        if (rc)
6628                                return rc;
6629                }
6630        }
6631
6632        /* Stop all Queues in reverse order*/
6633        for (i = ha->num_rss - 1; i >= 0; i--) {
6634
6635		struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6636
6637                fp = &ha->fp_array[i];
6638
6639                /* Stop the Tx Queue(s)*/
6640                for (tc = 0; tc < ha->num_tc; tc++) {
6641			int tx_queue_id;
6642
6643			tx_queue_id = tc * ha->num_rss + i;
6644			rc = ecore_eth_tx_queue_stop(p_hwfn,
6645					fp->txq[tc]->handle);
6646
6647                        if (rc) {
6648				QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6649					   tx_queue_id);
6650                                return rc;
6651                        }
6652                }
6653
6654                /* Stop the Rx Queue*/
6655		rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6656				false);
6657                if (rc) {
6658                        QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6659                        return rc;
6660                }
6661        }
6662
6663        /* Stop the vport */
6664	for_each_hwfn(cdev, i) {
6665
6666		struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6667
6668		rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6669
6670		if (rc) {
6671                        QL_DPRINT1(ha, "Failed to stop VPORT\n");
6672			return rc;
6673		}
6674	}
6675
6676        return rc;
6677}
6678
6679static int
6680qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6681	enum ecore_filter_opcode opcode,
6682	unsigned char mac[ETH_ALEN])
6683{
6684	struct ecore_filter_ucast	ucast;
6685	struct ecore_dev		*cdev;
6686	int				rc;
6687
6688	cdev = &ha->cdev;
6689
6690	bzero(&ucast, sizeof(struct ecore_filter_ucast));
6691
6692        ucast.opcode = opcode;
6693        ucast.type = ECORE_FILTER_MAC;
6694        ucast.is_rx_filter = 1;
6695        ucast.vport_to_add_to = 0;
6696        memcpy(&ucast.mac[0], mac, ETH_ALEN);
6697
6698	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6699
6700        return (rc);
6701}
6702
6703static int
6704qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6705{
6706	struct ecore_filter_ucast	ucast;
6707	struct ecore_dev		*cdev;
6708	int				rc;
6709
6710	bzero(&ucast, sizeof(struct ecore_filter_ucast));
6711
6712	ucast.opcode = ECORE_FILTER_REPLACE;
6713	ucast.type = ECORE_FILTER_MAC;
6714	ucast.is_rx_filter = 1;
6715
6716	cdev = &ha->cdev;
6717
6718	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6719
6720	return (rc);
6721}
6722
6723static int
6724qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6725{
6726	struct ecore_filter_mcast	*mcast;
6727	struct ecore_dev		*cdev;
6728	int				rc, i;
6729
6730	cdev = &ha->cdev;
6731
6732	mcast = &ha->ecore_mcast;
6733	bzero(mcast, sizeof(struct ecore_filter_mcast));
6734
6735	mcast->opcode = ECORE_FILTER_REMOVE;
6736
6737	for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
6738
6739		if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6740			ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6741			ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6742
6743			memcpy(&mcast->mac[i], &ha->mcast[i].addr[0], ETH_ALEN);
6744			mcast->num_mc_addrs++;
6745		}
6746	}
6747	mcast = &ha->ecore_mcast;
6748
6749	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
6750
6751	bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
6752	ha->nmcast = 0;
6753
6754	return (rc);
6755}
6756
6757static int
6758qlnx_clean_filters(qlnx_host_t *ha)
6759{
6760        int	rc = 0;
6761
6762	/* Remove all unicast macs */
6763	rc = qlnx_remove_all_ucast_mac(ha);
6764	if (rc)
6765		return rc;
6766
6767	/* Remove all multicast macs */
6768	rc = qlnx_remove_all_mcast_mac(ha);
6769	if (rc)
6770		return rc;
6771
6772        rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
6773
6774        return (rc);
6775}
6776
6777static int
6778qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
6779{
6780	struct ecore_filter_accept_flags	accept;
6781	int					rc = 0;
6782	struct ecore_dev			*cdev;
6783
6784	cdev = &ha->cdev;
6785
6786	bzero(&accept, sizeof(struct ecore_filter_accept_flags));
6787
6788	accept.update_rx_mode_config = 1;
6789	accept.rx_accept_filter = filter;
6790
6791	accept.update_tx_mode_config = 1;
6792	accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
6793		ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
6794
6795	rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
6796			ECORE_SPQ_MODE_CB, NULL);
6797
6798	return (rc);
6799}
6800
6801static int
6802qlnx_set_rx_mode(qlnx_host_t *ha)
6803{
6804	int	rc = 0;
6805	uint8_t	filter;
6806
6807	rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
6808        if (rc)
6809                return rc;
6810
6811	rc = qlnx_remove_all_mcast_mac(ha);
6812        if (rc)
6813                return rc;
6814
6815	filter = ECORE_ACCEPT_UCAST_MATCHED |
6816			ECORE_ACCEPT_MCAST_MATCHED |
6817			ECORE_ACCEPT_BCAST;
6818	ha->filter = filter;
6819
6820	rc = qlnx_set_rx_accept_filter(ha, filter);
6821
6822	return (rc);
6823}
6824
6825static int
6826qlnx_set_link(qlnx_host_t *ha, bool link_up)
6827{
6828        int			i, rc = 0;
6829	struct ecore_dev	*cdev;
6830	struct ecore_hwfn	*hwfn;
6831	struct ecore_ptt	*ptt;
6832
6833	cdev = &ha->cdev;
6834
6835        for_each_hwfn(cdev, i) {
6836
6837                hwfn = &cdev->hwfns[i];
6838
6839                ptt = ecore_ptt_acquire(hwfn);
6840       	        if (!ptt)
6841                        return -EBUSY;
6842
6843                rc = ecore_mcp_set_link(hwfn, ptt, link_up);
6844
6845                ecore_ptt_release(hwfn, ptt);
6846
6847                if (rc)
6848                        return rc;
6849        }
6850        return (rc);
6851}
6852
6853#if __FreeBSD_version >= 1100000
6854static uint64_t
6855qlnx_get_counter(if_t ifp, ift_counter cnt)
6856{
6857	qlnx_host_t *ha;
6858	uint64_t count;
6859
6860        ha = (qlnx_host_t *)if_getsoftc(ifp);
6861
6862        switch (cnt) {
6863
6864        case IFCOUNTER_IPACKETS:
6865		count = ha->hw_stats.common.rx_ucast_pkts +
6866			ha->hw_stats.common.rx_mcast_pkts +
6867			ha->hw_stats.common.rx_bcast_pkts;
6868		break;
6869
6870        case IFCOUNTER_IERRORS:
6871		count = ha->hw_stats.common.rx_crc_errors +
6872			ha->hw_stats.common.rx_align_errors +
6873			ha->hw_stats.common.rx_oversize_packets +
6874			ha->hw_stats.common.rx_undersize_packets;
6875		break;
6876
6877        case IFCOUNTER_OPACKETS:
6878		count = ha->hw_stats.common.tx_ucast_pkts +
6879			ha->hw_stats.common.tx_mcast_pkts +
6880			ha->hw_stats.common.tx_bcast_pkts;
6881		break;
6882
6883        case IFCOUNTER_OERRORS:
6884                count = ha->hw_stats.common.tx_err_drop_pkts;
6885		break;
6886
6887        case IFCOUNTER_COLLISIONS:
6888                return (0);
6889
6890        case IFCOUNTER_IBYTES:
6891		count = ha->hw_stats.common.rx_ucast_bytes +
6892			ha->hw_stats.common.rx_mcast_bytes +
6893			ha->hw_stats.common.rx_bcast_bytes;
6894		break;
6895
6896        case IFCOUNTER_OBYTES:
6897		count = ha->hw_stats.common.tx_ucast_bytes +
6898			ha->hw_stats.common.tx_mcast_bytes +
6899			ha->hw_stats.common.tx_bcast_bytes;
6900		break;
6901
6902        case IFCOUNTER_IMCASTS:
6903		count = ha->hw_stats.common.rx_mcast_bytes;
6904		break;
6905
6906        case IFCOUNTER_OMCASTS:
6907		count = ha->hw_stats.common.tx_mcast_bytes;
6908		break;
6909
6910        case IFCOUNTER_IQDROPS:
6911        case IFCOUNTER_OQDROPS:
6912        case IFCOUNTER_NOPROTO:
6913
6914        default:
6915                return (if_get_counter_default(ifp, cnt));
6916        }
6917	return (count);
6918}
6919#endif
6920
6921
6922static void
6923qlnx_timer(void *arg)
6924{
6925	qlnx_host_t	*ha;
6926
6927	ha = (qlnx_host_t *)arg;
6928
6929       	ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
6930
6931	if (ha->storm_stats_gather)
6932		qlnx_sample_storm_stats(ha);
6933
6934	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
6935
6936	return;
6937}
6938
6939static int
6940qlnx_load(qlnx_host_t *ha)
6941{
6942	int			i;
6943	int			rc = 0;
6944	struct ecore_dev	*cdev;
6945        device_t		dev;
6946
6947	cdev = &ha->cdev;
6948        dev = ha->pci_dev;
6949
6950	QL_DPRINT2(ha, "enter\n");
6951
6952        rc = qlnx_alloc_mem_arrays(ha);
6953        if (rc)
6954                goto qlnx_load_exit0;
6955
6956        qlnx_init_fp(ha);
6957
6958        rc = qlnx_alloc_mem_load(ha);
6959        if (rc)
6960                goto qlnx_load_exit1;
6961
6962        QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
6963		   ha->num_rss, ha->num_tc);
6964
6965	for (i = 0; i < ha->num_rss; i++) {
6966
6967		if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
6968                        (INTR_TYPE_NET | INTR_MPSAFE),
6969                        NULL, qlnx_fp_isr, &ha->irq_vec[i],
6970                        &ha->irq_vec[i].handle))) {
6971
6972                        QL_DPRINT1(ha, "could not setup interrupt\n");
6973                        goto qlnx_load_exit2;
6974		}
6975
6976		QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
6977			 irq %p handle %p\n", i,
6978			ha->irq_vec[i].irq_rid,
6979			ha->irq_vec[i].irq, ha->irq_vec[i].handle);
6980
6981		bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
6982	}
6983
6984        rc = qlnx_start_queues(ha);
6985        if (rc)
6986                goto qlnx_load_exit2;
6987
6988        QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
6989
6990        /* Add primary mac and set Rx filters */
6991        rc = qlnx_set_rx_mode(ha);
6992        if (rc)
6993                goto qlnx_load_exit2;
6994
6995        /* Ask for link-up using current configuration */
6996	qlnx_set_link(ha, true);
6997
6998        ha->state = QLNX_STATE_OPEN;
6999
7000	bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7001
7002	if (ha->flags.callout_init)
7003        	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7004
7005        goto qlnx_load_exit0;
7006
7007qlnx_load_exit2:
7008        qlnx_free_mem_load(ha);
7009
7010qlnx_load_exit1:
7011        ha->num_rss = 0;
7012
7013qlnx_load_exit0:
7014	QL_DPRINT2(ha, "exit [%d]\n", rc);
7015        return rc;
7016}
7017
7018static void
7019qlnx_drain_soft_lro(qlnx_host_t *ha)
7020{
7021#ifdef QLNX_SOFT_LRO
7022
7023	struct ifnet	*ifp;
7024	int		i;
7025
7026	ifp = ha->ifp;
7027
7028
7029	if (ifp->if_capenable & IFCAP_LRO) {
7030
7031	        for (i = 0; i < ha->num_rss; i++) {
7032
7033			struct qlnx_fastpath *fp = &ha->fp_array[i];
7034			struct lro_ctrl *lro;
7035
7036			lro = &fp->rxq->lro;
7037
7038#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
7039
7040			tcp_lro_flush_all(lro);
7041
7042#else
7043			struct lro_entry *queued;
7044
7045			while ((!SLIST_EMPTY(&lro->lro_active))){
7046				queued = SLIST_FIRST(&lro->lro_active);
7047				SLIST_REMOVE_HEAD(&lro->lro_active, next);
7048				tcp_lro_flush(lro, queued);
7049			}
7050
7051#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
7052
7053                }
7054	}
7055
7056#endif /* #ifdef QLNX_SOFT_LRO */
7057
7058	return;
7059}
7060
7061static void
7062qlnx_unload(qlnx_host_t *ha)
7063{
7064	struct ecore_dev	*cdev;
7065        device_t		dev;
7066	int			i;
7067
7068	cdev = &ha->cdev;
7069        dev = ha->pci_dev;
7070
7071	QL_DPRINT2(ha, "enter\n");
7072        QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7073
7074	if (ha->state == QLNX_STATE_OPEN) {
7075
7076		qlnx_set_link(ha, false);
7077		qlnx_clean_filters(ha);
7078		qlnx_stop_queues(ha);
7079		ecore_hw_stop_fastpath(cdev);
7080
7081		for (i = 0; i < ha->num_rss; i++) {
7082			if (ha->irq_vec[i].handle) {
7083				(void)bus_teardown_intr(dev,
7084					ha->irq_vec[i].irq,
7085					ha->irq_vec[i].handle);
7086				ha->irq_vec[i].handle = NULL;
7087			}
7088		}
7089
7090		qlnx_drain_fp_taskqueues(ha);
7091		qlnx_drain_soft_lro(ha);
7092        	qlnx_free_mem_load(ha);
7093	}
7094
7095	if (ha->flags.callout_init)
7096		callout_drain(&ha->qlnx_callout);
7097
7098	qlnx_mdelay(__func__, 1000);
7099
7100        ha->state = QLNX_STATE_CLOSED;
7101
7102	QL_DPRINT2(ha, "exit\n");
7103	return;
7104}
7105
7106static int
7107qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7108{
7109	int			rval = -1;
7110	struct ecore_hwfn	*p_hwfn;
7111	struct ecore_ptt	*p_ptt;
7112
7113	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7114
7115	p_hwfn = &ha->cdev.hwfns[hwfn_index];
7116	p_ptt = ecore_ptt_acquire(p_hwfn);
7117
7118        if (!p_ptt) {
7119		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7120                return (rval);
7121        }
7122
7123        rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7124
7125	if (rval == DBG_STATUS_OK)
7126                rval = 0;
7127        else {
7128		QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7129			"[0x%x]\n", rval);
7130	}
7131
7132        ecore_ptt_release(p_hwfn, p_ptt);
7133
7134        return (rval);
7135}
7136
7137static int
7138qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7139{
7140	int			rval = -1;
7141	struct ecore_hwfn	*p_hwfn;
7142	struct ecore_ptt	*p_ptt;
7143
7144	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7145
7146	p_hwfn = &ha->cdev.hwfns[hwfn_index];
7147	p_ptt = ecore_ptt_acquire(p_hwfn);
7148
7149        if (!p_ptt) {
7150		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7151                return (rval);
7152        }
7153
7154        rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7155
7156	if (rval == DBG_STATUS_OK)
7157                rval = 0;
7158        else {
7159		QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7160			" [0x%x]\n", rval);
7161	}
7162
7163        ecore_ptt_release(p_hwfn, p_ptt);
7164
7165        return (rval);
7166}
7167
7168
7169static void
7170qlnx_sample_storm_stats(qlnx_host_t *ha)
7171{
7172        int			i, index;
7173        struct ecore_dev	*cdev;
7174	qlnx_storm_stats_t	*s_stats;
7175	uint32_t		reg;
7176        struct ecore_ptt	*p_ptt;
7177        struct ecore_hwfn	*hwfn;
7178
7179	if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7180		ha->storm_stats_gather = 0;
7181		return;
7182	}
7183
7184        cdev = &ha->cdev;
7185
7186        for_each_hwfn(cdev, i) {
7187
7188                hwfn = &cdev->hwfns[i];
7189
7190                p_ptt = ecore_ptt_acquire(hwfn);
7191                if (!p_ptt)
7192                        return;
7193
7194		index = ha->storm_stats_index +
7195				(i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7196
7197		s_stats = &ha->storm_stats[index];
7198
7199		/* XSTORM */
7200		reg = XSEM_REG_FAST_MEMORY +
7201				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7202		s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7203
7204		reg = XSEM_REG_FAST_MEMORY +
7205				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7206		s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7207
7208		reg = XSEM_REG_FAST_MEMORY +
7209				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7210		s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7211
7212		reg = XSEM_REG_FAST_MEMORY +
7213				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7214		s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7215
7216		/* YSTORM */
7217		reg = YSEM_REG_FAST_MEMORY +
7218				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7219		s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7220
7221		reg = YSEM_REG_FAST_MEMORY +
7222				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7223		s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7224
7225		reg = YSEM_REG_FAST_MEMORY +
7226				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7227		s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7228
7229		reg = YSEM_REG_FAST_MEMORY +
7230				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7231		s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7232
7233		/* PSTORM */
7234		reg = PSEM_REG_FAST_MEMORY +
7235				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7236		s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7237
7238		reg = PSEM_REG_FAST_MEMORY +
7239				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7240		s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7241
7242		reg = PSEM_REG_FAST_MEMORY +
7243				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7244		s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7245
7246		reg = PSEM_REG_FAST_MEMORY +
7247				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7248		s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7249
7250		/* TSTORM */
7251		reg = TSEM_REG_FAST_MEMORY +
7252				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7253		s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7254
7255		reg = TSEM_REG_FAST_MEMORY +
7256				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7257		s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7258
7259		reg = TSEM_REG_FAST_MEMORY +
7260				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7261		s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7262
7263		reg = TSEM_REG_FAST_MEMORY +
7264				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7265		s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7266
7267		/* MSTORM */
7268		reg = MSEM_REG_FAST_MEMORY +
7269				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7270		s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7271
7272		reg = MSEM_REG_FAST_MEMORY +
7273				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7274		s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7275
7276		reg = MSEM_REG_FAST_MEMORY +
7277				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7278		s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7279
7280		reg = MSEM_REG_FAST_MEMORY +
7281				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7282		s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7283
7284		/* USTORM */
7285		reg = USEM_REG_FAST_MEMORY +
7286				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7287		s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7288
7289		reg = USEM_REG_FAST_MEMORY +
7290				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7291		s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7292
7293		reg = USEM_REG_FAST_MEMORY +
7294				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7295		s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7296
7297		reg = USEM_REG_FAST_MEMORY +
7298				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7299		s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7300
7301                ecore_ptt_release(hwfn, p_ptt);
7302        }
7303
7304	ha->storm_stats_index++;
7305
7306        return;
7307}
7308
7309/*
7310 * Name: qlnx_dump_buf8
7311 * Function: dumps a buffer as bytes
7312 */
7313static void
7314qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7315{
7316        device_t	dev;
7317        uint32_t	i = 0;
7318        uint8_t		*buf;
7319
7320        dev = ha->pci_dev;
7321        buf = dbuf;
7322
7323        device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7324
7325        while (len >= 16) {
7326                device_printf(dev,"0x%08x:"
7327                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7328                        " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7329                        buf[0], buf[1], buf[2], buf[3],
7330                        buf[4], buf[5], buf[6], buf[7],
7331                        buf[8], buf[9], buf[10], buf[11],
7332                        buf[12], buf[13], buf[14], buf[15]);
7333                i += 16;
7334                len -= 16;
7335                buf += 16;
7336        }
7337        switch (len) {
7338        case 1:
7339                device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7340                break;
7341        case 2:
7342                device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7343                break;
7344        case 3:
7345                device_printf(dev,"0x%08x: %02x %02x %02x\n",
7346                        i, buf[0], buf[1], buf[2]);
7347                break;
7348        case 4:
7349                device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7350                        buf[0], buf[1], buf[2], buf[3]);
7351                break;
7352        case 5:
7353                device_printf(dev,"0x%08x:"
7354                        " %02x %02x %02x %02x %02x\n", i,
7355                        buf[0], buf[1], buf[2], buf[3], buf[4]);
7356                break;
7357        case 6:
7358                device_printf(dev,"0x%08x:"
7359                        " %02x %02x %02x %02x %02x %02x\n", i,
7360                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7361                break;
7362        case 7:
7363                device_printf(dev,"0x%08x:"
7364                        " %02x %02x %02x %02x %02x %02x %02x\n", i,
7365                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7366                break;
7367        case 8:
7368                device_printf(dev,"0x%08x:"
7369                        " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7370                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7371                        buf[7]);
7372                break;
7373        case 9:
7374                device_printf(dev,"0x%08x:"
7375                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7376                        " %02x\n", i,
7377                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7378                        buf[7], buf[8]);
7379                break;
7380        case 10:
7381                device_printf(dev,"0x%08x:"
7382                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7383                        " %02x %02x\n", i,
7384                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7385                        buf[7], buf[8], buf[9]);
7386                break;
7387        case 11:
7388                device_printf(dev,"0x%08x:"
7389                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7390                        " %02x %02x %02x\n", i,
7391                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7392                        buf[7], buf[8], buf[9], buf[10]);
7393                break;
7394        case 12:
7395                device_printf(dev,"0x%08x:"
7396                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7397                        " %02x %02x %02x %02x\n", i,
7398                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7399                        buf[7], buf[8], buf[9], buf[10], buf[11]);
7400                break;
7401        case 13:
7402                device_printf(dev,"0x%08x:"
7403                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7404                        " %02x %02x %02x %02x %02x\n", i,
7405                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7406                        buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7407                break;
7408        case 14:
7409                device_printf(dev,"0x%08x:"
7410                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7411                        " %02x %02x %02x %02x %02x %02x\n", i,
7412                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7413                        buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7414                        buf[13]);
7415                break;
7416        case 15:
7417                device_printf(dev,"0x%08x:"
7418                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7419                        " %02x %02x %02x %02x %02x %02x %02x\n", i,
7420                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7421                        buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7422                        buf[13], buf[14]);
7423                break;
7424        default:
7425                break;
7426        }
7427
7428        device_printf(dev, "%s: %s dump end\n", __func__, msg);
7429
7430        return;
7431}
7432
7433