qlnx_os.c revision 322849
1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28
29/*
30 * File: qlnx_os.c
31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/11/sys/dev/qlnx/qlnxe/qlnx_os.c 322849 2017-08-24 17:36:10Z davidcs $");
36
37#include "qlnx_os.h"
38#include "bcm_osal.h"
39#include "reg_addr.h"
40#include "ecore_gtt_reg_addr.h"
41#include "ecore.h"
42#include "ecore_chain.h"
43#include "ecore_status.h"
44#include "ecore_hw.h"
45#include "ecore_rt_defs.h"
46#include "ecore_init_ops.h"
47#include "ecore_int.h"
48#include "ecore_cxt.h"
49#include "ecore_spq.h"
50#include "ecore_init_fw_funcs.h"
51#include "ecore_sp_commands.h"
52#include "ecore_dev_api.h"
53#include "ecore_l2_api.h"
54#include "ecore_mcp.h"
55#include "ecore_hw_defs.h"
56#include "mcp_public.h"
57#include "ecore_iro.h"
58#include "nvm_cfg.h"
59#include "ecore_dev_api.h"
60#include "ecore_dbg_fw_funcs.h"
61
62#include "qlnx_ioctl.h"
63#include "qlnx_def.h"
64#include "qlnx_ver.h"
65#include <sys/smp.h>
66
67
68/*
69 * static functions
70 */
71/*
72 * ioctl related functions
73 */
74static void qlnx_add_sysctls(qlnx_host_t *ha);
75
76/*
77 * main driver
78 */
79static void qlnx_release(qlnx_host_t *ha);
80static void qlnx_fp_isr(void *arg);
81static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
82static void qlnx_init(void *arg);
83static void qlnx_init_locked(qlnx_host_t *ha);
84static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
85static int qlnx_set_promisc(qlnx_host_t *ha);
86static int qlnx_set_allmulti(qlnx_host_t *ha);
87static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
88static int qlnx_media_change(struct ifnet *ifp);
89static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
90static void qlnx_stop(qlnx_host_t *ha);
91static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
92		struct mbuf **m_headp);
93static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
94static uint32_t qlnx_get_optics(qlnx_host_t *ha,
95			struct qlnx_link_output *if_link);
96static int qlnx_transmit(struct ifnet *ifp, struct mbuf  *mp);
97static void qlnx_qflush(struct ifnet *ifp);
98
99static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
100static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
101static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
102static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
103static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
104static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
105
106static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
107static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
108
109static int qlnx_nic_setup(struct ecore_dev *cdev,
110		struct ecore_pf_params *func_params);
111static int qlnx_nic_start(struct ecore_dev *cdev);
112static int qlnx_slowpath_start(qlnx_host_t *ha);
113static int qlnx_slowpath_stop(qlnx_host_t *ha);
114static int qlnx_init_hw(qlnx_host_t *ha);
115static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
116		char ver_str[VER_SIZE]);
117static void qlnx_unload(qlnx_host_t *ha);
118static int qlnx_load(qlnx_host_t *ha);
119static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
120		uint32_t add_mac);
121static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
122		uint32_t len);
123static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
124static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
125static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
126		struct qlnx_rx_queue *rxq);
127static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
128static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
129		int hwfn_index);
130static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
131		int hwfn_index);
132static void qlnx_timer(void *arg);
133static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
134static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
135static void qlnx_trigger_dump(qlnx_host_t *ha);
136static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
137		struct qlnx_tx_queue *txq);
138static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
139		int lro_enable);
140static void qlnx_fp_taskqueue(void *context, int pending);
141static void qlnx_sample_storm_stats(qlnx_host_t *ha);
142static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
143		struct qlnx_agg_info *tpa);
144static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
145
146#if __FreeBSD_version >= 1100000
147static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
148#endif
149
150
151/*
152 * Hooks to the Operating Systems
153 */
154static int qlnx_pci_probe (device_t);
155static int qlnx_pci_attach (device_t);
156static int qlnx_pci_detach (device_t);
157
158static device_method_t qlnx_pci_methods[] = {
159	/* Device interface */
160	DEVMETHOD(device_probe, qlnx_pci_probe),
161	DEVMETHOD(device_attach, qlnx_pci_attach),
162	DEVMETHOD(device_detach, qlnx_pci_detach),
163	{ 0, 0 }
164};
165
166static driver_t qlnx_pci_driver = {
167	"ql", qlnx_pci_methods, sizeof (qlnx_host_t),
168};
169
170static devclass_t qlnx_devclass;
171
172MODULE_VERSION(if_qlnxe,1);
173DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0);
174
175MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
176MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
177
178MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
179
180
181char qlnx_dev_str[64];
182char qlnx_ver_str[VER_SIZE];
183char qlnx_name_str[NAME_SIZE];
184
185/*
186 * Some PCI Configuration Space Related Defines
187 */
188
189#ifndef PCI_VENDOR_QLOGIC
190#define PCI_VENDOR_QLOGIC		0x1077
191#endif
192
193/* 40G Adapter QLE45xxx*/
194#ifndef QLOGIC_PCI_DEVICE_ID_1634
195#define QLOGIC_PCI_DEVICE_ID_1634	0x1634
196#endif
197
198/* 100G Adapter QLE45xxx*/
199#ifndef QLOGIC_PCI_DEVICE_ID_1644
200#define QLOGIC_PCI_DEVICE_ID_1644	0x1644
201#endif
202
203/* 25G Adapter QLE45xxx*/
204#ifndef QLOGIC_PCI_DEVICE_ID_1656
205#define QLOGIC_PCI_DEVICE_ID_1656	0x1656
206#endif
207
208/* 50G Adapter QLE45xxx*/
209#ifndef QLOGIC_PCI_DEVICE_ID_1654
210#define QLOGIC_PCI_DEVICE_ID_1654	0x1654
211#endif
212
213/* 10G/25G/40G Adapter QLE41xxx*/
214#ifndef QLOGIC_PCI_DEVICE_ID_8070
215#define QLOGIC_PCI_DEVICE_ID_8070	0x8070
216#endif
217
218static int
219qlnx_valid_device(device_t dev)
220{
221        uint16_t	device_id;
222
223        device_id = pci_get_device(dev);
224
225        if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
226                (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
227                (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
228                (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
229                (device_id == QLOGIC_PCI_DEVICE_ID_8070))
230                return 0;
231
232        return -1;
233}
234
235/*
236 * Name:	qlnx_pci_probe
237 * Function:	Validate the PCI device to be a QLA80XX device
238 */
239static int
240qlnx_pci_probe(device_t dev)
241{
242	snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
243		QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
244	snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
245
246	if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
247                return (ENXIO);
248	}
249
250        switch (pci_get_device(dev)) {
251
252        case QLOGIC_PCI_DEVICE_ID_1644:
253		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
254			"Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
255			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
256			QLNX_VERSION_BUILD);
257                device_set_desc_copy(dev, qlnx_dev_str);
258
259                break;
260
261        case QLOGIC_PCI_DEVICE_ID_1634:
262		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
263			"Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
264			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
265			QLNX_VERSION_BUILD);
266                device_set_desc_copy(dev, qlnx_dev_str);
267
268                break;
269
270        case QLOGIC_PCI_DEVICE_ID_1656:
271		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
272			"Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
273			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
274			QLNX_VERSION_BUILD);
275                device_set_desc_copy(dev, qlnx_dev_str);
276
277                break;
278
279        case QLOGIC_PCI_DEVICE_ID_1654:
280		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
281			"Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
282			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
283			QLNX_VERSION_BUILD);
284                device_set_desc_copy(dev, qlnx_dev_str);
285
286                break;
287
288	case QLOGIC_PCI_DEVICE_ID_8070:
289		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
290			"Qlogic 10GbE/25GbE/40GbE PCI CNA (AH) "
291			"Adapter-Ethernet Function",
292			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
293			QLNX_VERSION_BUILD);
294		device_set_desc_copy(dev, qlnx_dev_str);
295
296		break;
297
298        default:
299                return (ENXIO);
300        }
301
302        return (BUS_PROBE_DEFAULT);
303}
304
305
306static void
307qlnx_sp_intr(void *arg)
308{
309	struct ecore_hwfn	*p_hwfn;
310	qlnx_host_t		*ha;
311	int			i;
312
313	p_hwfn = arg;
314
315	if (p_hwfn == NULL) {
316		printf("%s: spurious slowpath intr\n", __func__);
317		return;
318	}
319
320	ha = (qlnx_host_t *)p_hwfn->p_dev;
321
322	QL_DPRINT2(ha, "enter\n");
323
324	for (i = 0; i < ha->cdev.num_hwfns; i++) {
325		if (&ha->cdev.hwfns[i] == p_hwfn) {
326			taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
327			break;
328		}
329	}
330	QL_DPRINT2(ha, "exit\n");
331
332	return;
333}
334
335static void
336qlnx_sp_taskqueue(void *context, int pending)
337{
338	struct ecore_hwfn	*p_hwfn;
339
340	p_hwfn = context;
341
342	if (p_hwfn != NULL) {
343		qlnx_sp_isr(p_hwfn);
344	}
345	return;
346}
347
348static int
349qlnx_create_sp_taskqueues(qlnx_host_t *ha)
350{
351	int	i;
352	uint8_t	tq_name[32];
353
354	for (i = 0; i < ha->cdev.num_hwfns; i++) {
355
356                struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
357
358		bzero(tq_name, sizeof (tq_name));
359		snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
360
361		TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
362
363		ha->sp_taskqueue[i] = taskqueue_create_fast(tq_name, M_NOWAIT,
364			 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
365
366		if (ha->sp_taskqueue[i] == NULL)
367			return (-1);
368
369		taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
370			tq_name);
371
372		QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
373	}
374
375	return (0);
376}
377
378static void
379qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
380{
381	int	i;
382
383	for (i = 0; i < ha->cdev.num_hwfns; i++) {
384		if (ha->sp_taskqueue[i] != NULL) {
385			taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
386			taskqueue_free(ha->sp_taskqueue[i]);
387		}
388	}
389	return;
390}
391
392static void
393qlnx_fp_taskqueue(void *context, int pending)
394{
395        struct qlnx_fastpath	*fp;
396        qlnx_host_t		*ha;
397        struct ifnet		*ifp;
398        struct mbuf		*mp;
399        int			ret;
400	struct thread		*cthread;
401
402#ifdef QLNX_RCV_IN_TASKQ
403	int			lro_enable;
404	int			rx_int = 0, total_rx_count = 0;
405
406#endif /* #ifdef QLNX_RCV_IN_TASKQ */
407
408        fp = context;
409
410        if (fp == NULL)
411                return;
412
413	cthread = curthread;
414
415	thread_lock(cthread);
416
417	if (!sched_is_bound(cthread))
418		sched_bind(cthread, fp->rss_id);
419
420	thread_unlock(cthread);
421
422        ha = (qlnx_host_t *)fp->edev;
423
424        ifp = ha->ifp;
425
426#ifdef QLNX_RCV_IN_TASKQ
427	{
428		lro_enable = ifp->if_capenable & IFCAP_LRO;
429
430		rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, lro_enable);
431
432		if (rx_int) {
433			fp->rx_pkts += rx_int;
434			total_rx_count += rx_int;
435		}
436
437#ifdef QLNX_SOFT_LRO
438		{
439			struct lro_ctrl *lro;
440
441			lro = &fp->rxq->lro;
442
443			if (lro_enable && total_rx_count) {
444
445#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
446
447				if (ha->dbg_trace_lro_cnt) {
448					if (lro->lro_mbuf_count & ~1023)
449						fp->lro_cnt_1024++;
450					else if (lro->lro_mbuf_count & ~511)
451						fp->lro_cnt_512++;
452					else if (lro->lro_mbuf_count & ~255)
453						fp->lro_cnt_256++;
454					else if (lro->lro_mbuf_count & ~127)
455						fp->lro_cnt_128++;
456					else if (lro->lro_mbuf_count & ~63)
457						fp->lro_cnt_64++;
458				}
459				tcp_lro_flush_all(lro);
460
461#else
462				struct lro_entry *queued;
463
464				while ((!SLIST_EMPTY(&lro->lro_active))) {
465					queued = SLIST_FIRST(&lro->lro_active);
466					SLIST_REMOVE_HEAD(&lro->lro_active, next);
467					tcp_lro_flush(lro, queued);
468				}
469#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
470			}
471		}
472#endif /* #ifdef QLNX_SOFT_LRO */
473
474		ecore_sb_update_sb_idx(fp->sb_info);
475		rmb();
476	}
477
478#endif /* #ifdef QLNX_RCV_IN_TASKQ */
479
480        mtx_lock(&fp->tx_mtx);
481
482        if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
483                IFF_DRV_RUNNING) || (!ha->link_up)) {
484
485                mtx_unlock(&fp->tx_mtx);
486                goto qlnx_fp_taskqueue_exit;
487        }
488
489        mp = drbr_peek(ifp, fp->tx_br);
490
491        while (mp != NULL) {
492
493		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
494			ret = qlnx_send(ha, fp, &mp);
495		} else {
496			ret = -1;
497		}
498
499                if (ret) {
500
501                        if (mp != NULL) {
502                                drbr_putback(ifp, fp->tx_br, mp);
503                        } else {
504                                fp->tx_pkts_processed++;
505                                drbr_advance(ifp, fp->tx_br);
506                        }
507
508                        mtx_unlock(&fp->tx_mtx);
509
510                        goto qlnx_fp_taskqueue_exit;
511
512                } else {
513                        drbr_advance(ifp, fp->tx_br);
514                        fp->tx_pkts_transmitted++;
515                        fp->tx_pkts_processed++;
516                }
517
518		if (fp->tx_ring_full)
519			break;
520
521                mp = drbr_peek(ifp, fp->tx_br);
522        }
523
524        mtx_unlock(&fp->tx_mtx);
525
526qlnx_fp_taskqueue_exit:
527
528#ifdef QLNX_RCV_IN_TASKQ
529	if (rx_int) {
530		if (fp->fp_taskqueue != NULL)
531			taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
532	} else {
533		if (fp->tx_ring_full) {
534			qlnx_mdelay(__func__, 100);
535		}
536		ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
537	}
538#endif /* #ifdef QLNX_RCV_IN_TASKQ */
539
540        QL_DPRINT2(ha, "exit ret = %d\n", ret);
541        return;
542}
543
544static int
545qlnx_create_fp_taskqueues(qlnx_host_t *ha)
546{
547	int	i;
548	uint8_t	tq_name[32];
549	struct qlnx_fastpath *fp;
550
551	for (i = 0; i < ha->num_rss; i++) {
552
553                fp = &ha->fp_array[i];
554
555		bzero(tq_name, sizeof (tq_name));
556		snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
557
558		TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
559
560		fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
561					taskqueue_thread_enqueue,
562					&fp->fp_taskqueue);
563
564		if (fp->fp_taskqueue == NULL)
565			return (-1);
566
567		taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
568			tq_name);
569
570		QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
571	}
572
573	return (0);
574}
575
576static void
577qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
578{
579	int			i;
580	struct qlnx_fastpath	*fp;
581
582	for (i = 0; i < ha->num_rss; i++) {
583
584                fp = &ha->fp_array[i];
585
586		if (fp->fp_taskqueue != NULL) {
587
588			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
589			taskqueue_free(fp->fp_taskqueue);
590			fp->fp_taskqueue = NULL;
591		}
592	}
593	return;
594}
595
596static void
597qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
598{
599	int			i;
600	struct qlnx_fastpath	*fp;
601
602	for (i = 0; i < ha->num_rss; i++) {
603                fp = &ha->fp_array[i];
604
605		if (fp->fp_taskqueue != NULL) {
606			QLNX_UNLOCK(ha);
607			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
608			QLNX_LOCK(ha);
609		}
610	}
611	return;
612}
613
614/*
615 * Name:	qlnx_pci_attach
616 * Function:	attaches the device to the operating system
617 */
618static int
619qlnx_pci_attach(device_t dev)
620{
621	qlnx_host_t	*ha = NULL;
622	uint32_t	rsrc_len_reg = 0;
623	uint32_t	rsrc_len_dbells = 0;
624	uint32_t	rsrc_len_msix = 0;
625	int		i;
626	uint32_t	mfw_ver;
627
628        if ((ha = device_get_softc(dev)) == NULL) {
629                device_printf(dev, "cannot get softc\n");
630                return (ENOMEM);
631        }
632
633        memset(ha, 0, sizeof (qlnx_host_t));
634
635        if (qlnx_valid_device(dev) != 0) {
636                device_printf(dev, "device is not valid device\n");
637                return (ENXIO);
638	}
639        ha->pci_func = pci_get_function(dev);
640
641        ha->pci_dev = dev;
642
643	mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
644
645        ha->flags.lock_init = 1;
646
647        pci_enable_busmaster(dev);
648
649	/*
650	 * map the PCI BARs
651	 */
652
653        ha->reg_rid = PCIR_BAR(0);
654        ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
655                                RF_ACTIVE);
656
657        if (ha->pci_reg == NULL) {
658                device_printf(dev, "unable to map BAR0\n");
659                goto qlnx_pci_attach_err;
660        }
661
662        rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
663                                        ha->reg_rid);
664
665        ha->dbells_rid = PCIR_BAR(2);
666        ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
667                        &ha->dbells_rid, RF_ACTIVE);
668
669        if (ha->pci_dbells == NULL) {
670                device_printf(dev, "unable to map BAR1\n");
671                goto qlnx_pci_attach_err;
672	}
673
674        rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
675                                        ha->dbells_rid);
676
677	ha->dbells_phys_addr = (uint64_t)
678		bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);;
679	ha->dbells_size = rsrc_len_dbells;
680
681        ha->msix_rid = PCIR_BAR(4);
682        ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
683                        &ha->msix_rid, RF_ACTIVE);
684
685        if (ha->msix_bar == NULL) {
686                device_printf(dev, "unable to map BAR2\n");
687                goto qlnx_pci_attach_err;
688	}
689
690        rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
691                                        ha->msix_rid);
692	/*
693	 * allocate dma tags
694	 */
695
696	if (qlnx_alloc_parent_dma_tag(ha))
697                goto qlnx_pci_attach_err;
698
699	if (qlnx_alloc_tx_dma_tag(ha))
700                goto qlnx_pci_attach_err;
701
702	if (qlnx_alloc_rx_dma_tag(ha))
703                goto qlnx_pci_attach_err;
704
705
706	if (qlnx_init_hw(ha) != 0)
707		goto qlnx_pci_attach_err;
708
709	/*
710	 * Allocate MSI-x vectors
711	 */
712	ha->num_rss = QLNX_MAX_RSS;
713	ha->num_tc = QLNX_MAX_TC;
714
715        ha->msix_count = pci_msix_count(dev);
716
717	if (ha->msix_count > (mp_ncpus + ha->cdev.num_hwfns))
718		ha->msix_count = mp_ncpus + ha->cdev.num_hwfns;
719
720        if (!ha->msix_count ||
721		(ha->msix_count < (ha->cdev.num_hwfns + 1 ))) {
722                device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
723                        ha->msix_count);
724                goto qlnx_pci_attach_err;
725        }
726
727	if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns ))
728		ha->msix_count = ha->num_rss + ha->cdev.num_hwfns;
729	else
730		ha->num_rss = ha->msix_count - ha->cdev.num_hwfns;
731
732	QL_DPRINT1(ha, "\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]"
733		"\n\t\t\tdbells [%p, 0x%08x 0x%08x]"
734		"\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]"
735		"\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
736		 ha->pci_reg, rsrc_len_reg,
737		ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
738		ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
739		ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
740        if (pci_alloc_msix(dev, &ha->msix_count)) {
741                device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
742                        ha->msix_count);
743                ha->msix_count = 0;
744                goto qlnx_pci_attach_err;
745        }
746
747	/*
748	 * Initialize slow path interrupt and task queue
749	 */
750	if (qlnx_create_sp_taskqueues(ha) != 0)
751		goto qlnx_pci_attach_err;
752
753	for (i = 0; i < ha->cdev.num_hwfns; i++) {
754
755                struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
756
757        	ha->sp_irq_rid[i] = i + 1;
758        	ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
759                                &ha->sp_irq_rid[i],
760                                (RF_ACTIVE | RF_SHAREABLE));
761        	if (ha->sp_irq[i] == NULL) {
762                	device_printf(dev,
763				"could not allocate mbx interrupt\n");
764                	goto qlnx_pci_attach_err;
765        	}
766
767        	if (bus_setup_intr(dev, ha->sp_irq[i],
768				(INTR_TYPE_NET | INTR_MPSAFE), NULL,
769				qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
770                	device_printf(dev,
771				"could not setup slow path interrupt\n");
772			goto qlnx_pci_attach_err;
773		}
774
775		QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
776			" sp_irq %p sp_handle %p\n", p_hwfn,
777			ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
778
779	}
780
781	/*
782	 * initialize fast path interrupt
783	 */
784	if (qlnx_create_fp_taskqueues(ha) != 0)
785		goto qlnx_pci_attach_err;
786
787        for (i = 0; i < ha->num_rss; i++) {
788                ha->irq_vec[i].rss_idx = i;
789                ha->irq_vec[i].ha = ha;
790                ha->irq_vec[i].irq_rid = (1 + ha->cdev.num_hwfns) + i;
791
792                ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
793                                &ha->irq_vec[i].irq_rid,
794                                (RF_ACTIVE | RF_SHAREABLE));
795
796                if (ha->irq_vec[i].irq == NULL) {
797                        device_printf(dev,
798				"could not allocate interrupt[%d]\n", i);
799                        goto qlnx_pci_attach_err;
800                }
801
802		if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
803                        device_printf(dev, "could not allocate tx_br[%d]\n", i);
804                        goto qlnx_pci_attach_err;
805
806		}
807	}
808
809	callout_init(&ha->qlnx_callout, 1);
810	ha->flags.callout_init = 1;
811
812	for (i = 0; i < ha->cdev.num_hwfns; i++) {
813
814		if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
815			goto qlnx_pci_attach_err;
816		if (ha->grcdump_size[i] == 0)
817			goto qlnx_pci_attach_err;
818
819		ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
820		QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
821			i, ha->grcdump_size[i]);
822
823		ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
824		if (ha->grcdump[i] == NULL) {
825			device_printf(dev, "grcdump alloc[%d] failed\n", i);
826			goto qlnx_pci_attach_err;
827		}
828
829		if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
830			goto qlnx_pci_attach_err;
831		if (ha->idle_chk_size[i] == 0)
832			goto qlnx_pci_attach_err;
833
834		ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
835		QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
836			i, ha->idle_chk_size[i]);
837
838		ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
839
840		if (ha->idle_chk[i] == NULL) {
841			device_printf(dev, "idle_chk alloc failed\n");
842			goto qlnx_pci_attach_err;
843		}
844	}
845
846	if (qlnx_slowpath_start(ha) != 0) {
847
848		qlnx_mdelay(__func__, 1000);
849		qlnx_trigger_dump(ha);
850
851		goto qlnx_pci_attach_err0;
852	} else
853		ha->flags.slowpath_start = 1;
854
855	if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
856		qlnx_mdelay(__func__, 1000);
857		qlnx_trigger_dump(ha);
858
859		goto qlnx_pci_attach_err0;
860	}
861
862	if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
863		qlnx_mdelay(__func__, 1000);
864		qlnx_trigger_dump(ha);
865
866		goto qlnx_pci_attach_err0;
867	}
868	snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
869		((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
870		((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
871	snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
872		FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
873		FW_ENGINEERING_VERSION);
874
875	QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
876		 ha->stormfw_ver, ha->mfw_ver);
877
878	qlnx_init_ifnet(dev, ha);
879
880	/*
881	 * add sysctls
882	 */
883	qlnx_add_sysctls(ha);
884
885qlnx_pci_attach_err0:
886        /*
887	 * create ioctl device interface
888	 */
889        if (qlnx_make_cdev(ha)) {
890                device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
891                goto qlnx_pci_attach_err;
892        }
893
894	QL_DPRINT2(ha, "success\n");
895
896        return (0);
897
898qlnx_pci_attach_err:
899
900	qlnx_release(ha);
901
902	return (ENXIO);
903}
904
905/*
906 * Name:	qlnx_pci_detach
907 * Function:	Unhooks the device from the operating system
908 */
909static int
910qlnx_pci_detach(device_t dev)
911{
912	qlnx_host_t	*ha = NULL;
913
914        if ((ha = device_get_softc(dev)) == NULL) {
915                device_printf(dev, "cannot get softc\n");
916                return (ENOMEM);
917        }
918
919	QLNX_LOCK(ha);
920	qlnx_stop(ha);
921	QLNX_UNLOCK(ha);
922
923	qlnx_release(ha);
924
925        return (0);
926}
927
928static int
929qlnx_init_hw(qlnx_host_t *ha)
930{
931	int				rval = 0;
932	struct ecore_hw_prepare_params	params;
933
934	ecore_init_struct(&ha->cdev);
935
936	/* ha->dp_module = ECORE_MSG_PROBE |
937				ECORE_MSG_INTR |
938				ECORE_MSG_SP |
939				ECORE_MSG_LINK |
940				ECORE_MSG_SPQ |
941				ECORE_MSG_RDMA;
942	ha->dp_level = ECORE_LEVEL_VERBOSE;*/
943	ha->dp_level = ECORE_LEVEL_NOTICE;
944
945	ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
946
947	ha->cdev.regview = ha->pci_reg;
948	ha->cdev.doorbells = ha->pci_dbells;
949	ha->cdev.db_phys_addr = ha->dbells_phys_addr;
950	ha->cdev.db_size = ha->dbells_size;
951
952	bzero(&params, sizeof (struct ecore_hw_prepare_params));
953
954	ha->personality = ECORE_PCI_DEFAULT;
955
956	params.personality = ha->personality;
957
958	params.drv_resc_alloc = false;
959	params.chk_reg_fifo = false;
960	params.initiate_pf_flr = true;
961	params.epoch = 0;
962
963	ecore_hw_prepare(&ha->cdev, &params);
964
965	qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
966
967	return (rval);
968}
969
970static void
971qlnx_release(qlnx_host_t *ha)
972{
973        device_t	dev;
974        int		i;
975
976        dev = ha->pci_dev;
977
978	QL_DPRINT2(ha, "enter\n");
979
980	for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
981		if (ha->idle_chk[i] != NULL) {
982			free(ha->idle_chk[i], M_QLNXBUF);
983			ha->idle_chk[i] = NULL;
984		}
985
986		if (ha->grcdump[i] != NULL) {
987			free(ha->grcdump[i], M_QLNXBUF);
988			ha->grcdump[i] = NULL;
989		}
990	}
991
992        if (ha->flags.callout_init)
993                callout_drain(&ha->qlnx_callout);
994
995	if (ha->flags.slowpath_start) {
996		qlnx_slowpath_stop(ha);
997	}
998
999	ecore_hw_remove(&ha->cdev);
1000
1001        qlnx_del_cdev(ha);
1002
1003        if (ha->ifp != NULL)
1004                ether_ifdetach(ha->ifp);
1005
1006	qlnx_free_tx_dma_tag(ha);
1007
1008	qlnx_free_rx_dma_tag(ha);
1009
1010	qlnx_free_parent_dma_tag(ha);
1011
1012        for (i = 0; i < ha->num_rss; i++) {
1013		struct qlnx_fastpath *fp = &ha->fp_array[i];
1014
1015                if (ha->irq_vec[i].handle) {
1016                        (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1017                                        ha->irq_vec[i].handle);
1018                }
1019
1020                if (ha->irq_vec[i].irq) {
1021                        (void)bus_release_resource(dev, SYS_RES_IRQ,
1022                                ha->irq_vec[i].irq_rid,
1023                                ha->irq_vec[i].irq);
1024                }
1025
1026		qlnx_free_tx_br(ha, fp);
1027        }
1028	qlnx_destroy_fp_taskqueues(ha);
1029
1030 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1031        	if (ha->sp_handle[i])
1032                	(void)bus_teardown_intr(dev, ha->sp_irq[i],
1033				ha->sp_handle[i]);
1034
1035        	if (ha->sp_irq[i])
1036			(void) bus_release_resource(dev, SYS_RES_IRQ,
1037				ha->sp_irq_rid[i], ha->sp_irq[i]);
1038	}
1039
1040	qlnx_destroy_sp_taskqueues(ha);
1041
1042        if (ha->msix_count)
1043                pci_release_msi(dev);
1044
1045        if (ha->flags.lock_init) {
1046                mtx_destroy(&ha->hw_lock);
1047        }
1048
1049        if (ha->pci_reg)
1050                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1051                                ha->pci_reg);
1052
1053        if (ha->pci_dbells)
1054                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1055                                ha->pci_dbells);
1056
1057        if (ha->msix_bar)
1058                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1059                                ha->msix_bar);
1060
1061	QL_DPRINT2(ha, "exit\n");
1062	return;
1063}
1064
1065static void
1066qlnx_trigger_dump(qlnx_host_t *ha)
1067{
1068	int	i;
1069
1070	if (ha->ifp != NULL)
1071		ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1072
1073	QL_DPRINT2(ha, "enter\n");
1074
1075	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1076		qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1077		qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1078	}
1079
1080	QL_DPRINT2(ha, "exit\n");
1081
1082	return;
1083}
1084
1085static int
1086qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1087{
1088        int		err, ret = 0;
1089        qlnx_host_t	*ha;
1090
1091        err = sysctl_handle_int(oidp, &ret, 0, req);
1092
1093        if (err || !req->newptr)
1094                return (err);
1095
1096        if (ret == 1) {
1097                ha = (qlnx_host_t *)arg1;
1098                qlnx_trigger_dump(ha);
1099        }
1100        return (err);
1101}
1102
1103static int
1104qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1105{
1106        int			err, i, ret = 0, usecs = 0;
1107        qlnx_host_t		*ha;
1108	struct ecore_hwfn	*p_hwfn;
1109	struct qlnx_fastpath	*fp;
1110
1111        err = sysctl_handle_int(oidp, &usecs, 0, req);
1112
1113        if (err || !req->newptr || !usecs || (usecs > 255))
1114                return (err);
1115
1116        ha = (qlnx_host_t *)arg1;
1117
1118	for (i = 0; i < ha->num_rss; i++) {
1119
1120		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1121
1122        	fp = &ha->fp_array[i];
1123
1124		if (fp->txq[0]->handle != NULL) {
1125			ret = ecore_set_queue_coalesce(p_hwfn, 0,
1126					(uint16_t)usecs, fp->txq[0]->handle);
1127		}
1128        }
1129
1130	if (!ret)
1131		ha->tx_coalesce_usecs = (uint8_t)usecs;
1132
1133        return (err);
1134}
1135
1136static int
1137qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1138{
1139        int			err, i, ret = 0, usecs = 0;
1140        qlnx_host_t		*ha;
1141	struct ecore_hwfn	*p_hwfn;
1142	struct qlnx_fastpath	*fp;
1143
1144        err = sysctl_handle_int(oidp, &usecs, 0, req);
1145
1146        if (err || !req->newptr || !usecs || (usecs > 255))
1147                return (err);
1148
1149        ha = (qlnx_host_t *)arg1;
1150
1151	for (i = 0; i < ha->num_rss; i++) {
1152
1153		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1154
1155        	fp = &ha->fp_array[i];
1156
1157		if (fp->rxq->handle != NULL) {
1158			ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1159					 0, fp->rxq->handle);
1160		}
1161	}
1162
1163	if (!ret)
1164		ha->rx_coalesce_usecs = (uint8_t)usecs;
1165
1166        return (err);
1167}
1168
1169static void
1170qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1171{
1172        struct sysctl_ctx_list	*ctx;
1173        struct sysctl_oid_list	*children;
1174	struct sysctl_oid	*ctx_oid;
1175
1176        ctx = device_get_sysctl_ctx(ha->pci_dev);
1177	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1178
1179	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1180			CTLFLAG_RD, NULL, "spstat");
1181        children = SYSCTL_CHILDREN(ctx_oid);
1182
1183	SYSCTL_ADD_QUAD(ctx, children,
1184                OID_AUTO, "sp_interrupts",
1185                CTLFLAG_RD, &ha->sp_interrupts,
1186                "No. of slowpath interrupts");
1187
1188	return;
1189}
1190
1191static void
1192qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1193{
1194        struct sysctl_ctx_list	*ctx;
1195        struct sysctl_oid_list	*children;
1196        struct sysctl_oid_list	*node_children;
1197	struct sysctl_oid	*ctx_oid;
1198	int			i, j;
1199	uint8_t			name_str[16];
1200
1201        ctx = device_get_sysctl_ctx(ha->pci_dev);
1202	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1203
1204	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1205			CTLFLAG_RD, NULL, "fpstat");
1206	children = SYSCTL_CHILDREN(ctx_oid);
1207
1208	for (i = 0; i < ha->num_rss; i++) {
1209
1210		bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1211		snprintf(name_str, sizeof(name_str), "%d", i);
1212
1213		ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1214			CTLFLAG_RD, NULL, name_str);
1215		node_children = SYSCTL_CHILDREN(ctx_oid);
1216
1217		/* Tx Related */
1218
1219		SYSCTL_ADD_QUAD(ctx, node_children,
1220			OID_AUTO, "tx_pkts_processed",
1221			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1222			"No. of packets processed for transmission");
1223
1224		SYSCTL_ADD_QUAD(ctx, node_children,
1225			OID_AUTO, "tx_pkts_freed",
1226			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1227			"No. of freed packets");
1228
1229		SYSCTL_ADD_QUAD(ctx, node_children,
1230			OID_AUTO, "tx_pkts_transmitted",
1231			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1232			"No. of transmitted packets");
1233
1234		SYSCTL_ADD_QUAD(ctx, node_children,
1235			OID_AUTO, "tx_pkts_completed",
1236			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1237			"No. of transmit completions");
1238
1239		SYSCTL_ADD_QUAD(ctx, node_children,
1240			OID_AUTO, "tx_lso_wnd_min_len",
1241			CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1242			"tx_lso_wnd_min_len");
1243
1244		SYSCTL_ADD_QUAD(ctx, node_children,
1245			OID_AUTO, "tx_defrag",
1246			CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1247			"tx_defrag");
1248
1249		SYSCTL_ADD_QUAD(ctx, node_children,
1250			OID_AUTO, "tx_nsegs_gt_elem_left",
1251			CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1252			"tx_nsegs_gt_elem_left");
1253
1254		SYSCTL_ADD_UINT(ctx, node_children,
1255			OID_AUTO, "tx_tso_max_nsegs",
1256			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1257			ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1258
1259		SYSCTL_ADD_UINT(ctx, node_children,
1260			OID_AUTO, "tx_tso_min_nsegs",
1261			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1262			ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1263
1264		SYSCTL_ADD_UINT(ctx, node_children,
1265			OID_AUTO, "tx_tso_max_pkt_len",
1266			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1267			ha->fp_array[i].tx_tso_max_pkt_len,
1268			"tx_tso_max_pkt_len");
1269
1270		SYSCTL_ADD_UINT(ctx, node_children,
1271			OID_AUTO, "tx_tso_min_pkt_len",
1272			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1273			ha->fp_array[i].tx_tso_min_pkt_len,
1274			"tx_tso_min_pkt_len");
1275
1276		for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1277
1278			bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1279			snprintf(name_str, sizeof(name_str),
1280				"tx_pkts_nseg_%02d", (j+1));
1281
1282			SYSCTL_ADD_QUAD(ctx, node_children,
1283				OID_AUTO, name_str, CTLFLAG_RD,
1284				&ha->fp_array[i].tx_pkts[j], name_str);
1285		}
1286
1287		SYSCTL_ADD_QUAD(ctx, node_children,
1288			OID_AUTO, "err_tx_nsegs_gt_elem_left",
1289			CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1290			"err_tx_nsegs_gt_elem_left");
1291
1292		SYSCTL_ADD_QUAD(ctx, node_children,
1293			OID_AUTO, "err_tx_dmamap_create",
1294			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1295			"err_tx_dmamap_create");
1296
1297		SYSCTL_ADD_QUAD(ctx, node_children,
1298			OID_AUTO, "err_tx_defrag_dmamap_load",
1299			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1300			"err_tx_defrag_dmamap_load");
1301
1302		SYSCTL_ADD_QUAD(ctx, node_children,
1303			OID_AUTO, "err_tx_non_tso_max_seg",
1304			CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1305			"err_tx_non_tso_max_seg");
1306
1307		SYSCTL_ADD_QUAD(ctx, node_children,
1308			OID_AUTO, "err_tx_dmamap_load",
1309			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1310			"err_tx_dmamap_load");
1311
1312		SYSCTL_ADD_QUAD(ctx, node_children,
1313			OID_AUTO, "err_tx_defrag",
1314			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1315			"err_tx_defrag");
1316
1317		SYSCTL_ADD_QUAD(ctx, node_children,
1318			OID_AUTO, "err_tx_free_pkt_null",
1319			CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1320			"err_tx_free_pkt_null");
1321
1322		SYSCTL_ADD_QUAD(ctx, node_children,
1323			OID_AUTO, "err_tx_cons_idx_conflict",
1324			CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1325			"err_tx_cons_idx_conflict");
1326
1327		SYSCTL_ADD_QUAD(ctx, node_children,
1328			OID_AUTO, "lro_cnt_64",
1329			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1330			"lro_cnt_64");
1331
1332		SYSCTL_ADD_QUAD(ctx, node_children,
1333			OID_AUTO, "lro_cnt_128",
1334			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1335			"lro_cnt_128");
1336
1337		SYSCTL_ADD_QUAD(ctx, node_children,
1338			OID_AUTO, "lro_cnt_256",
1339			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1340			"lro_cnt_256");
1341
1342		SYSCTL_ADD_QUAD(ctx, node_children,
1343			OID_AUTO, "lro_cnt_512",
1344			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1345			"lro_cnt_512");
1346
1347		SYSCTL_ADD_QUAD(ctx, node_children,
1348			OID_AUTO, "lro_cnt_1024",
1349			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1350			"lro_cnt_1024");
1351
1352		/* Rx Related */
1353
1354		SYSCTL_ADD_QUAD(ctx, node_children,
1355			OID_AUTO, "rx_pkts",
1356			CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1357			"No. of received packets");
1358
1359		SYSCTL_ADD_QUAD(ctx, node_children,
1360			OID_AUTO, "tpa_start",
1361			CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1362			"No. of tpa_start packets");
1363
1364		SYSCTL_ADD_QUAD(ctx, node_children,
1365			OID_AUTO, "tpa_cont",
1366			CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1367			"No. of tpa_cont packets");
1368
1369		SYSCTL_ADD_QUAD(ctx, node_children,
1370			OID_AUTO, "tpa_end",
1371			CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1372			"No. of tpa_end packets");
1373
1374		SYSCTL_ADD_QUAD(ctx, node_children,
1375			OID_AUTO, "err_m_getcl",
1376			CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1377			"err_m_getcl");
1378
1379		SYSCTL_ADD_QUAD(ctx, node_children,
1380			OID_AUTO, "err_m_getjcl",
1381			CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1382			"err_m_getjcl");
1383
1384		SYSCTL_ADD_QUAD(ctx, node_children,
1385			OID_AUTO, "err_rx_hw_errors",
1386			CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1387			"err_rx_hw_errors");
1388
1389		SYSCTL_ADD_QUAD(ctx, node_children,
1390			OID_AUTO, "err_rx_alloc_errors",
1391			CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1392			"err_rx_alloc_errors");
1393	}
1394
1395	return;
1396}
1397
1398static void
1399qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1400{
1401        struct sysctl_ctx_list	*ctx;
1402        struct sysctl_oid_list	*children;
1403	struct sysctl_oid	*ctx_oid;
1404
1405        ctx = device_get_sysctl_ctx(ha->pci_dev);
1406	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1407
1408	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1409			CTLFLAG_RD, NULL, "hwstat");
1410        children = SYSCTL_CHILDREN(ctx_oid);
1411
1412	SYSCTL_ADD_QUAD(ctx, children,
1413                OID_AUTO, "no_buff_discards",
1414                CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1415                "No. of packets discarded due to lack of buffer");
1416
1417	SYSCTL_ADD_QUAD(ctx, children,
1418                OID_AUTO, "packet_too_big_discard",
1419                CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1420                "No. of packets discarded because packet was too big");
1421
1422	SYSCTL_ADD_QUAD(ctx, children,
1423                OID_AUTO, "ttl0_discard",
1424                CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1425                "ttl0_discard");
1426
1427	SYSCTL_ADD_QUAD(ctx, children,
1428                OID_AUTO, "rx_ucast_bytes",
1429                CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1430                "rx_ucast_bytes");
1431
1432	SYSCTL_ADD_QUAD(ctx, children,
1433                OID_AUTO, "rx_mcast_bytes",
1434                CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1435                "rx_mcast_bytes");
1436
1437	SYSCTL_ADD_QUAD(ctx, children,
1438                OID_AUTO, "rx_bcast_bytes",
1439                CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1440                "rx_bcast_bytes");
1441
1442	SYSCTL_ADD_QUAD(ctx, children,
1443                OID_AUTO, "rx_ucast_pkts",
1444                CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1445                "rx_ucast_pkts");
1446
1447	SYSCTL_ADD_QUAD(ctx, children,
1448                OID_AUTO, "rx_mcast_pkts",
1449                CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1450                "rx_mcast_pkts");
1451
1452	SYSCTL_ADD_QUAD(ctx, children,
1453                OID_AUTO, "rx_bcast_pkts",
1454                CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1455                "rx_bcast_pkts");
1456
1457	SYSCTL_ADD_QUAD(ctx, children,
1458                OID_AUTO, "mftag_filter_discards",
1459                CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1460                "mftag_filter_discards");
1461
1462	SYSCTL_ADD_QUAD(ctx, children,
1463                OID_AUTO, "mac_filter_discards",
1464                CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1465                "mac_filter_discards");
1466
1467	SYSCTL_ADD_QUAD(ctx, children,
1468                OID_AUTO, "tx_ucast_bytes",
1469                CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1470                "tx_ucast_bytes");
1471
1472	SYSCTL_ADD_QUAD(ctx, children,
1473                OID_AUTO, "tx_mcast_bytes",
1474                CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1475                "tx_mcast_bytes");
1476
1477	SYSCTL_ADD_QUAD(ctx, children,
1478                OID_AUTO, "tx_bcast_bytes",
1479                CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1480                "tx_bcast_bytes");
1481
1482	SYSCTL_ADD_QUAD(ctx, children,
1483                OID_AUTO, "tx_ucast_pkts",
1484                CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1485                "tx_ucast_pkts");
1486
1487	SYSCTL_ADD_QUAD(ctx, children,
1488                OID_AUTO, "tx_mcast_pkts",
1489                CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1490                "tx_mcast_pkts");
1491
1492	SYSCTL_ADD_QUAD(ctx, children,
1493                OID_AUTO, "tx_bcast_pkts",
1494                CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1495                "tx_bcast_pkts");
1496
1497	SYSCTL_ADD_QUAD(ctx, children,
1498                OID_AUTO, "tx_err_drop_pkts",
1499                CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1500                "tx_err_drop_pkts");
1501
1502	SYSCTL_ADD_QUAD(ctx, children,
1503                OID_AUTO, "tpa_coalesced_pkts",
1504                CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1505                "tpa_coalesced_pkts");
1506
1507	SYSCTL_ADD_QUAD(ctx, children,
1508                OID_AUTO, "tpa_coalesced_events",
1509                CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1510                "tpa_coalesced_events");
1511
1512	SYSCTL_ADD_QUAD(ctx, children,
1513                OID_AUTO, "tpa_aborts_num",
1514                CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1515                "tpa_aborts_num");
1516
1517	SYSCTL_ADD_QUAD(ctx, children,
1518                OID_AUTO, "tpa_not_coalesced_pkts",
1519                CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1520                "tpa_not_coalesced_pkts");
1521
1522	SYSCTL_ADD_QUAD(ctx, children,
1523                OID_AUTO, "tpa_coalesced_bytes",
1524                CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1525                "tpa_coalesced_bytes");
1526
1527	SYSCTL_ADD_QUAD(ctx, children,
1528                OID_AUTO, "rx_64_byte_packets",
1529                CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1530                "rx_64_byte_packets");
1531
1532	SYSCTL_ADD_QUAD(ctx, children,
1533                OID_AUTO, "rx_65_to_127_byte_packets",
1534                CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1535                "rx_65_to_127_byte_packets");
1536
1537	SYSCTL_ADD_QUAD(ctx, children,
1538                OID_AUTO, "rx_128_to_255_byte_packets",
1539                CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1540                "rx_128_to_255_byte_packets");
1541
1542	SYSCTL_ADD_QUAD(ctx, children,
1543                OID_AUTO, "rx_256_to_511_byte_packets",
1544                CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1545                "rx_256_to_511_byte_packets");
1546
1547	SYSCTL_ADD_QUAD(ctx, children,
1548                OID_AUTO, "rx_512_to_1023_byte_packets",
1549                CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1550                "rx_512_to_1023_byte_packets");
1551
1552	SYSCTL_ADD_QUAD(ctx, children,
1553                OID_AUTO, "rx_1024_to_1518_byte_packets",
1554                CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1555                "rx_1024_to_1518_byte_packets");
1556
1557	SYSCTL_ADD_QUAD(ctx, children,
1558                OID_AUTO, "rx_1519_to_1522_byte_packets",
1559                CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1560                "rx_1519_to_1522_byte_packets");
1561
1562	SYSCTL_ADD_QUAD(ctx, children,
1563                OID_AUTO, "rx_1523_to_2047_byte_packets",
1564                CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1565                "rx_1523_to_2047_byte_packets");
1566
1567	SYSCTL_ADD_QUAD(ctx, children,
1568                OID_AUTO, "rx_2048_to_4095_byte_packets",
1569                CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1570                "rx_2048_to_4095_byte_packets");
1571
1572	SYSCTL_ADD_QUAD(ctx, children,
1573                OID_AUTO, "rx_4096_to_9216_byte_packets",
1574                CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1575                "rx_4096_to_9216_byte_packets");
1576
1577	SYSCTL_ADD_QUAD(ctx, children,
1578                OID_AUTO, "rx_9217_to_16383_byte_packets",
1579                CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1580                "rx_9217_to_16383_byte_packets");
1581
1582	SYSCTL_ADD_QUAD(ctx, children,
1583                OID_AUTO, "rx_crc_errors",
1584                CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1585                "rx_crc_errors");
1586
1587	SYSCTL_ADD_QUAD(ctx, children,
1588                OID_AUTO, "rx_mac_crtl_frames",
1589                CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1590                "rx_mac_crtl_frames");
1591
1592	SYSCTL_ADD_QUAD(ctx, children,
1593                OID_AUTO, "rx_pause_frames",
1594                CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1595                "rx_pause_frames");
1596
1597	SYSCTL_ADD_QUAD(ctx, children,
1598                OID_AUTO, "rx_pfc_frames",
1599                CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1600                "rx_pfc_frames");
1601
1602	SYSCTL_ADD_QUAD(ctx, children,
1603                OID_AUTO, "rx_align_errors",
1604                CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1605                "rx_align_errors");
1606
1607	SYSCTL_ADD_QUAD(ctx, children,
1608                OID_AUTO, "rx_carrier_errors",
1609                CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1610                "rx_carrier_errors");
1611
1612	SYSCTL_ADD_QUAD(ctx, children,
1613                OID_AUTO, "rx_oversize_packets",
1614                CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1615                "rx_oversize_packets");
1616
1617	SYSCTL_ADD_QUAD(ctx, children,
1618                OID_AUTO, "rx_jabbers",
1619                CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1620                "rx_jabbers");
1621
1622	SYSCTL_ADD_QUAD(ctx, children,
1623                OID_AUTO, "rx_undersize_packets",
1624                CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
1625                "rx_undersize_packets");
1626
1627	SYSCTL_ADD_QUAD(ctx, children,
1628                OID_AUTO, "rx_fragments",
1629                CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
1630                "rx_fragments");
1631
1632	SYSCTL_ADD_QUAD(ctx, children,
1633                OID_AUTO, "tx_64_byte_packets",
1634                CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
1635                "tx_64_byte_packets");
1636
1637	SYSCTL_ADD_QUAD(ctx, children,
1638                OID_AUTO, "tx_65_to_127_byte_packets",
1639                CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
1640                "tx_65_to_127_byte_packets");
1641
1642	SYSCTL_ADD_QUAD(ctx, children,
1643                OID_AUTO, "tx_128_to_255_byte_packets",
1644                CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
1645                "tx_128_to_255_byte_packets");
1646
1647	SYSCTL_ADD_QUAD(ctx, children,
1648                OID_AUTO, "tx_256_to_511_byte_packets",
1649                CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
1650                "tx_256_to_511_byte_packets");
1651
1652	SYSCTL_ADD_QUAD(ctx, children,
1653                OID_AUTO, "tx_512_to_1023_byte_packets",
1654                CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
1655                "tx_512_to_1023_byte_packets");
1656
1657	SYSCTL_ADD_QUAD(ctx, children,
1658                OID_AUTO, "tx_1024_to_1518_byte_packets",
1659                CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
1660                "tx_1024_to_1518_byte_packets");
1661
1662	SYSCTL_ADD_QUAD(ctx, children,
1663                OID_AUTO, "tx_1519_to_2047_byte_packets",
1664                CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
1665                "tx_1519_to_2047_byte_packets");
1666
1667	SYSCTL_ADD_QUAD(ctx, children,
1668                OID_AUTO, "tx_2048_to_4095_byte_packets",
1669                CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
1670                "tx_2048_to_4095_byte_packets");
1671
1672	SYSCTL_ADD_QUAD(ctx, children,
1673                OID_AUTO, "tx_4096_to_9216_byte_packets",
1674                CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
1675                "tx_4096_to_9216_byte_packets");
1676
1677	SYSCTL_ADD_QUAD(ctx, children,
1678                OID_AUTO, "tx_9217_to_16383_byte_packets",
1679                CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
1680                "tx_9217_to_16383_byte_packets");
1681
1682	SYSCTL_ADD_QUAD(ctx, children,
1683                OID_AUTO, "tx_pause_frames",
1684                CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
1685                "tx_pause_frames");
1686
1687	SYSCTL_ADD_QUAD(ctx, children,
1688                OID_AUTO, "tx_pfc_frames",
1689                CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
1690                "tx_pfc_frames");
1691
1692	SYSCTL_ADD_QUAD(ctx, children,
1693                OID_AUTO, "tx_lpi_entry_count",
1694                CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
1695                "tx_lpi_entry_count");
1696
1697	SYSCTL_ADD_QUAD(ctx, children,
1698                OID_AUTO, "tx_total_collisions",
1699                CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
1700                "tx_total_collisions");
1701
1702	SYSCTL_ADD_QUAD(ctx, children,
1703                OID_AUTO, "brb_truncates",
1704                CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
1705                "brb_truncates");
1706
1707	SYSCTL_ADD_QUAD(ctx, children,
1708                OID_AUTO, "brb_discards",
1709                CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
1710                "brb_discards");
1711
1712	SYSCTL_ADD_QUAD(ctx, children,
1713                OID_AUTO, "rx_mac_bytes",
1714                CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
1715                "rx_mac_bytes");
1716
1717	SYSCTL_ADD_QUAD(ctx, children,
1718                OID_AUTO, "rx_mac_uc_packets",
1719                CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
1720                "rx_mac_uc_packets");
1721
1722	SYSCTL_ADD_QUAD(ctx, children,
1723                OID_AUTO, "rx_mac_mc_packets",
1724                CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
1725                "rx_mac_mc_packets");
1726
1727	SYSCTL_ADD_QUAD(ctx, children,
1728                OID_AUTO, "rx_mac_bc_packets",
1729                CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
1730                "rx_mac_bc_packets");
1731
1732	SYSCTL_ADD_QUAD(ctx, children,
1733                OID_AUTO, "rx_mac_frames_ok",
1734                CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
1735                "rx_mac_frames_ok");
1736
1737	SYSCTL_ADD_QUAD(ctx, children,
1738                OID_AUTO, "tx_mac_bytes",
1739                CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
1740                "tx_mac_bytes");
1741
1742	SYSCTL_ADD_QUAD(ctx, children,
1743                OID_AUTO, "tx_mac_uc_packets",
1744                CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
1745                "tx_mac_uc_packets");
1746
1747	SYSCTL_ADD_QUAD(ctx, children,
1748                OID_AUTO, "tx_mac_mc_packets",
1749                CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
1750                "tx_mac_mc_packets");
1751
1752	SYSCTL_ADD_QUAD(ctx, children,
1753                OID_AUTO, "tx_mac_bc_packets",
1754                CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
1755                "tx_mac_bc_packets");
1756
1757	SYSCTL_ADD_QUAD(ctx, children,
1758                OID_AUTO, "tx_mac_ctrl_frames",
1759                CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
1760                "tx_mac_ctrl_frames");
1761	return;
1762}
1763
1764static void
1765qlnx_add_sysctls(qlnx_host_t *ha)
1766{
1767        device_t		dev = ha->pci_dev;
1768	struct sysctl_ctx_list	*ctx;
1769	struct sysctl_oid_list	*children;
1770
1771	ctx = device_get_sysctl_ctx(dev);
1772	children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1773
1774	qlnx_add_fp_stats_sysctls(ha);
1775	qlnx_add_sp_stats_sysctls(ha);
1776	qlnx_add_hw_stats_sysctls(ha);
1777
1778	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
1779		CTLFLAG_RD, qlnx_ver_str, 0,
1780		"Driver Version");
1781
1782	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
1783		CTLFLAG_RD, ha->stormfw_ver, 0,
1784		"STORM Firmware Version");
1785
1786	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
1787		CTLFLAG_RD, ha->mfw_ver, 0,
1788		"Management Firmware Version");
1789
1790        SYSCTL_ADD_UINT(ctx, children,
1791                OID_AUTO, "personality", CTLFLAG_RD,
1792                &ha->personality, ha->personality,
1793		"\tpersonality = 0 => Ethernet Only\n"
1794		"\tpersonality = 3 => Ethernet and RoCE\n"
1795		"\tpersonality = 4 => Ethernet and iWARP\n"
1796		"\tpersonality = 6 => Default in Shared Memory\n");
1797
1798        ha->dbg_level = 0;
1799        SYSCTL_ADD_UINT(ctx, children,
1800                OID_AUTO, "debug", CTLFLAG_RW,
1801                &ha->dbg_level, ha->dbg_level, "Debug Level");
1802
1803        ha->dp_level = 0x01;
1804        SYSCTL_ADD_UINT(ctx, children,
1805                OID_AUTO, "dp_level", CTLFLAG_RW,
1806                &ha->dp_level, ha->dp_level, "DP Level");
1807
1808        ha->dbg_trace_lro_cnt = 0;
1809        SYSCTL_ADD_UINT(ctx, children,
1810                OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
1811                &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
1812		"Trace LRO Counts");
1813
1814        ha->dbg_trace_tso_pkt_len = 0;
1815        SYSCTL_ADD_UINT(ctx, children,
1816                OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
1817                &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
1818		"Trace TSO packet lengths");
1819
1820        ha->dp_module = 0;
1821        SYSCTL_ADD_UINT(ctx, children,
1822                OID_AUTO, "dp_module", CTLFLAG_RW,
1823                &ha->dp_module, ha->dp_module, "DP Module");
1824
1825        ha->err_inject = 0;
1826
1827        SYSCTL_ADD_UINT(ctx, children,
1828                OID_AUTO, "err_inject", CTLFLAG_RW,
1829                &ha->err_inject, ha->err_inject, "Error Inject");
1830
1831        ha->storm_stats_enable = 0;
1832
1833        SYSCTL_ADD_UINT(ctx, children,
1834                OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
1835                &ha->storm_stats_enable, ha->storm_stats_enable,
1836		"Enable Storm Statistics Gathering");
1837
1838        ha->storm_stats_index = 0;
1839
1840        SYSCTL_ADD_UINT(ctx, children,
1841                OID_AUTO, "storm_stats_index", CTLFLAG_RD,
1842                &ha->storm_stats_index, ha->storm_stats_index,
1843		"Enable Storm Statistics Gathering Current Index");
1844
1845        ha->grcdump_taken = 0;
1846        SYSCTL_ADD_UINT(ctx, children,
1847                OID_AUTO, "grcdump_taken", CTLFLAG_RD,
1848                &ha->grcdump_taken, ha->grcdump_taken, "grcdump_taken");
1849
1850        ha->idle_chk_taken = 0;
1851        SYSCTL_ADD_UINT(ctx, children,
1852                OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
1853                &ha->idle_chk_taken, ha->idle_chk_taken, "idle_chk_taken");
1854
1855        SYSCTL_ADD_UINT(ctx, children,
1856                OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
1857                &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
1858		"rx_coalesce_usecs");
1859
1860        SYSCTL_ADD_UINT(ctx, children,
1861                OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
1862                &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
1863		"tx_coalesce_usecs");
1864
1865	ha->rx_pkt_threshold = 128;
1866        SYSCTL_ADD_UINT(ctx, children,
1867                OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
1868                &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
1869		"No. of Rx Pkts to process at a time");
1870
1871	ha->rx_jumbo_buf_eq_mtu = 0;
1872        SYSCTL_ADD_UINT(ctx, children,
1873                OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
1874                &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
1875		"== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
1876		"otherwise Rx Jumbo buffers are set to >= MTU size\n");
1877
1878	SYSCTL_ADD_PROC(ctx, children,
1879		OID_AUTO, "trigger_dump", CTLTYPE_INT | CTLFLAG_RW,
1880		(void *)ha, 0,
1881		qlnx_trigger_dump_sysctl, "I", "trigger_dump");
1882
1883	SYSCTL_ADD_PROC(ctx, children,
1884		OID_AUTO, "set_rx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1885		(void *)ha, 0,
1886		qlnx_set_rx_coalesce, "I",
1887		"rx interrupt coalesce period microseconds");
1888
1889	SYSCTL_ADD_PROC(ctx, children,
1890		OID_AUTO, "set_tx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1891		(void *)ha, 0,
1892		qlnx_set_tx_coalesce, "I",
1893		"tx interrupt coalesce period microseconds");
1894
1895	SYSCTL_ADD_QUAD(ctx, children,
1896                OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
1897		&ha->err_illegal_intr, "err_illegal_intr");
1898
1899	SYSCTL_ADD_QUAD(ctx, children,
1900                OID_AUTO, "err_fp_null", CTLFLAG_RD,
1901		&ha->err_fp_null, "err_fp_null");
1902
1903	SYSCTL_ADD_QUAD(ctx, children,
1904                OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
1905		&ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
1906	return;
1907}
1908
1909
1910
1911/*****************************************************************************
1912 * Operating System Network Interface Functions
1913 *****************************************************************************/
1914
1915static void
1916qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
1917{
1918	uint16_t	device_id;
1919        struct ifnet	*ifp;
1920
1921        ifp = ha->ifp = if_alloc(IFT_ETHER);
1922
1923        if (ifp == NULL)
1924                panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
1925
1926        if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1927
1928	device_id = pci_get_device(ha->pci_dev);
1929
1930#if __FreeBSD_version >= 1000000
1931
1932        if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
1933		ifp->if_baudrate = IF_Gbps(40);
1934        else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
1935			(device_id == QLOGIC_PCI_DEVICE_ID_8070))
1936		ifp->if_baudrate = IF_Gbps(25);
1937        else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
1938		ifp->if_baudrate = IF_Gbps(50);
1939        else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
1940		ifp->if_baudrate = IF_Gbps(100);
1941
1942        ifp->if_capabilities = IFCAP_LINKSTATE;
1943#else
1944        ifp->if_mtu = ETHERMTU;
1945	ifp->if_baudrate = (1 * 1000 * 1000 *1000);
1946
1947#endif /* #if __FreeBSD_version >= 1000000 */
1948
1949        ifp->if_init = qlnx_init;
1950        ifp->if_softc = ha;
1951        ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1952        ifp->if_ioctl = qlnx_ioctl;
1953        ifp->if_transmit = qlnx_transmit;
1954        ifp->if_qflush = qlnx_qflush;
1955
1956        IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha));
1957        ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha);
1958        IFQ_SET_READY(&ifp->if_snd);
1959
1960#if __FreeBSD_version >= 1100036
1961	if_setgetcounterfn(ifp, qlnx_get_counter);
1962#endif
1963
1964        ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1965
1966        memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
1967        ether_ifattach(ifp, ha->primary_mac);
1968	bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
1969
1970	ifp->if_capabilities = IFCAP_HWCSUM;
1971	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1972
1973	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1974	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1975	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1976	ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1977	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1978	ifp->if_capabilities |= IFCAP_TSO4;
1979	ifp->if_capabilities |= IFCAP_TSO6;
1980	ifp->if_capabilities |= IFCAP_LRO;
1981
1982        ifp->if_capenable = ifp->if_capabilities;
1983
1984	ifp->if_hwassist = CSUM_IP;
1985	ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
1986	ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
1987	ifp->if_hwassist |= CSUM_TSO;
1988
1989	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1990
1991        ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
1992		qlnx_media_status);
1993
1994        if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
1995		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
1996		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
1997		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
1998        } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
1999			(device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2000		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2001		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2002        } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2003		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2004		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2005        } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2006		ifmedia_add(&ha->media,
2007			(IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2008		ifmedia_add(&ha->media,
2009			(IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2010		ifmedia_add(&ha->media,
2011			(IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2012	}
2013
2014        ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2015        ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2016
2017
2018        ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2019
2020        QL_DPRINT2(ha, "exit\n");
2021
2022        return;
2023}
2024
2025static void
2026qlnx_init_locked(qlnx_host_t *ha)
2027{
2028	struct ifnet	*ifp = ha->ifp;
2029
2030	QL_DPRINT1(ha, "Driver Initialization start \n");
2031
2032	qlnx_stop(ha);
2033
2034	if (qlnx_load(ha) == 0) {
2035		ifp->if_drv_flags |= IFF_DRV_RUNNING;
2036		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2037	}
2038
2039	return;
2040}
2041
2042static void
2043qlnx_init(void *arg)
2044{
2045	qlnx_host_t	*ha;
2046
2047	ha = (qlnx_host_t *)arg;
2048
2049	QL_DPRINT2(ha, "enter\n");
2050
2051	QLNX_LOCK(ha);
2052	qlnx_init_locked(ha);
2053	QLNX_UNLOCK(ha);
2054
2055	QL_DPRINT2(ha, "exit\n");
2056
2057	return;
2058}
2059
2060static int
2061qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2062{
2063	struct ecore_filter_mcast	*mcast;
2064	struct ecore_dev		*cdev;
2065	int				rc;
2066
2067	cdev = &ha->cdev;
2068
2069	mcast = &ha->ecore_mcast;
2070	bzero(mcast, sizeof(struct ecore_filter_mcast));
2071
2072	if (add_mac)
2073		mcast->opcode = ECORE_FILTER_ADD;
2074	else
2075		mcast->opcode = ECORE_FILTER_REMOVE;
2076
2077	mcast->num_mc_addrs = 1;
2078	memcpy(mcast->mac, mac_addr, ETH_ALEN);
2079
2080	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2081
2082	return (rc);
2083}
2084
2085static int
2086qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2087{
2088        int	i;
2089
2090        for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2091
2092                if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2093                        return 0; /* its been already added */
2094        }
2095
2096        for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2097
2098                if ((ha->mcast[i].addr[0] == 0) &&
2099                        (ha->mcast[i].addr[1] == 0) &&
2100                        (ha->mcast[i].addr[2] == 0) &&
2101                        (ha->mcast[i].addr[3] == 0) &&
2102                        (ha->mcast[i].addr[4] == 0) &&
2103                        (ha->mcast[i].addr[5] == 0)) {
2104
2105                        if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2106                                return (-1);
2107
2108                        bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2109                        ha->nmcast++;
2110
2111                        return 0;
2112                }
2113        }
2114        return 0;
2115}
2116
2117static int
2118qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2119{
2120        int	i;
2121
2122        for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2123                if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2124
2125                        if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2126                                return (-1);
2127
2128                        ha->mcast[i].addr[0] = 0;
2129                        ha->mcast[i].addr[1] = 0;
2130                        ha->mcast[i].addr[2] = 0;
2131                        ha->mcast[i].addr[3] = 0;
2132                        ha->mcast[i].addr[4] = 0;
2133                        ha->mcast[i].addr[5] = 0;
2134
2135                        ha->nmcast--;
2136
2137                        return 0;
2138                }
2139        }
2140        return 0;
2141}
2142
2143/*
2144 * Name: qls_hw_set_multi
2145 * Function: Sets the Multicast Addresses provided the host O.S into the
2146 *      hardware (for the given interface)
2147 */
2148static void
2149qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2150	uint32_t add_mac)
2151{
2152        int	i;
2153
2154        for (i = 0; i < mcnt; i++) {
2155                if (add_mac) {
2156                        if (qlnx_hw_add_mcast(ha, mta))
2157                                break;
2158                } else {
2159                        if (qlnx_hw_del_mcast(ha, mta))
2160                                break;
2161                }
2162
2163                mta += ETHER_HDR_LEN;
2164        }
2165        return;
2166}
2167
2168
2169#define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN)
2170static int
2171qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2172{
2173	uint8_t			mta[QLNX_MCAST_ADDRS_SIZE];
2174	struct ifmultiaddr	*ifma;
2175	int			mcnt = 0;
2176	struct ifnet		*ifp = ha->ifp;
2177	int			ret = 0;
2178
2179	if_maddr_rlock(ifp);
2180
2181	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2182
2183		if (ifma->ifma_addr->sa_family != AF_LINK)
2184			continue;
2185
2186		if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2187			break;
2188
2189		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2190			&mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2191
2192		mcnt++;
2193	}
2194
2195	if_maddr_runlock(ifp);
2196
2197	QLNX_LOCK(ha);
2198	qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2199	QLNX_UNLOCK(ha);
2200
2201	return (ret);
2202}
2203
2204static int
2205qlnx_set_promisc(qlnx_host_t *ha)
2206{
2207	int	rc = 0;
2208	uint8_t	filter;
2209
2210	filter = ha->filter;
2211	filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2212	filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2213
2214	rc = qlnx_set_rx_accept_filter(ha, filter);
2215	return (rc);
2216}
2217
2218static int
2219qlnx_set_allmulti(qlnx_host_t *ha)
2220{
2221	int	rc = 0;
2222	uint8_t	filter;
2223
2224	filter = ha->filter;
2225	filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2226	rc = qlnx_set_rx_accept_filter(ha, filter);
2227
2228	return (rc);
2229}
2230
2231
2232static int
2233qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2234{
2235	int		ret = 0, mask;
2236	struct ifreq	*ifr = (struct ifreq *)data;
2237	struct ifaddr	*ifa = (struct ifaddr *)data;
2238	qlnx_host_t	*ha;
2239
2240	ha = (qlnx_host_t *)ifp->if_softc;
2241
2242	switch (cmd) {
2243	case SIOCSIFADDR:
2244		QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2245
2246		if (ifa->ifa_addr->sa_family == AF_INET) {
2247			ifp->if_flags |= IFF_UP;
2248			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2249				QLNX_LOCK(ha);
2250				qlnx_init_locked(ha);
2251				QLNX_UNLOCK(ha);
2252			}
2253			QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2254				   cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2255
2256			arp_ifinit(ifp, ifa);
2257		} else {
2258			ether_ioctl(ifp, cmd, data);
2259		}
2260		break;
2261
2262	case SIOCSIFMTU:
2263		QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2264
2265		if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2266			ret = EINVAL;
2267		} else {
2268			QLNX_LOCK(ha);
2269			ifp->if_mtu = ifr->ifr_mtu;
2270			ha->max_frame_size =
2271				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2272			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2273				qlnx_init_locked(ha);
2274			}
2275
2276			QLNX_UNLOCK(ha);
2277		}
2278
2279		break;
2280
2281	case SIOCSIFFLAGS:
2282		QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2283
2284		QLNX_LOCK(ha);
2285
2286		if (ifp->if_flags & IFF_UP) {
2287			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2288				if ((ifp->if_flags ^ ha->if_flags) &
2289					IFF_PROMISC) {
2290					ret = qlnx_set_promisc(ha);
2291				} else if ((ifp->if_flags ^ ha->if_flags) &
2292					IFF_ALLMULTI) {
2293					ret = qlnx_set_allmulti(ha);
2294				}
2295			} else {
2296				ha->max_frame_size = ifp->if_mtu +
2297					ETHER_HDR_LEN + ETHER_CRC_LEN;
2298				qlnx_init_locked(ha);
2299			}
2300		} else {
2301			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2302				qlnx_stop(ha);
2303			ha->if_flags = ifp->if_flags;
2304		}
2305
2306		QLNX_UNLOCK(ha);
2307		break;
2308
2309	case SIOCADDMULTI:
2310		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2311
2312		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2313			if (qlnx_set_multi(ha, 1))
2314				ret = EINVAL;
2315		}
2316		break;
2317
2318	case SIOCDELMULTI:
2319		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2320
2321		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2322			if (qlnx_set_multi(ha, 0))
2323				ret = EINVAL;
2324		}
2325		break;
2326
2327	case SIOCSIFMEDIA:
2328	case SIOCGIFMEDIA:
2329		QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2330
2331		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2332		break;
2333
2334	case SIOCSIFCAP:
2335
2336		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2337
2338		QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2339
2340		if (mask & IFCAP_HWCSUM)
2341			ifp->if_capenable ^= IFCAP_HWCSUM;
2342		if (mask & IFCAP_TSO4)
2343			ifp->if_capenable ^= IFCAP_TSO4;
2344		if (mask & IFCAP_TSO6)
2345			ifp->if_capenable ^= IFCAP_TSO6;
2346		if (mask & IFCAP_VLAN_HWTAGGING)
2347			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2348		if (mask & IFCAP_VLAN_HWTSO)
2349			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2350		if (mask & IFCAP_LRO)
2351			ifp->if_capenable ^= IFCAP_LRO;
2352
2353		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2354			qlnx_init(ha);
2355
2356		VLAN_CAPABILITIES(ifp);
2357		break;
2358
2359#if (__FreeBSD_version >= 1100101)
2360
2361	case SIOCGI2C:
2362	{
2363		struct ifi2creq i2c;
2364		struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2365		struct ecore_ptt *p_ptt;
2366
2367		ret = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
2368
2369		if (ret)
2370			break;
2371
2372		if ((i2c.len > sizeof (i2c.data)) ||
2373			(i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2374			ret = EINVAL;
2375			break;
2376		}
2377
2378		p_ptt = ecore_ptt_acquire(p_hwfn);
2379
2380		if (!p_ptt) {
2381			QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2382			ret = -1;
2383			break;
2384		}
2385
2386		ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2387			(ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2388			i2c.len, &i2c.data[0]);
2389
2390		ecore_ptt_release(p_hwfn, p_ptt);
2391
2392		if (ret) {
2393			ret = -1;
2394			break;
2395		}
2396
2397		ret = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
2398
2399		QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2400			 len = %d addr = 0x%02x offset = 0x%04x \
2401			 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2402			 0x%02x 0x%02x 0x%02x\n",
2403			ret, i2c.len, i2c.dev_addr, i2c.offset,
2404			i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2405			i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2406		break;
2407	}
2408#endif /* #if (__FreeBSD_version >= 1100101) */
2409
2410	default:
2411		QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2412		ret = ether_ioctl(ifp, cmd, data);
2413		break;
2414	}
2415
2416	return (ret);
2417}
2418
2419static int
2420qlnx_media_change(struct ifnet *ifp)
2421{
2422	qlnx_host_t	*ha;
2423	struct ifmedia	*ifm;
2424	int		ret = 0;
2425
2426	ha = (qlnx_host_t *)ifp->if_softc;
2427
2428	QL_DPRINT2(ha, "enter\n");
2429
2430	ifm = &ha->media;
2431
2432	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2433		ret = EINVAL;
2434
2435	QL_DPRINT2(ha, "exit\n");
2436
2437	return (ret);
2438}
2439
2440static void
2441qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2442{
2443	qlnx_host_t		*ha;
2444
2445	ha = (qlnx_host_t *)ifp->if_softc;
2446
2447	QL_DPRINT2(ha, "enter\n");
2448
2449	ifmr->ifm_status = IFM_AVALID;
2450	ifmr->ifm_active = IFM_ETHER;
2451
2452	if (ha->link_up) {
2453		ifmr->ifm_status |= IFM_ACTIVE;
2454		ifmr->ifm_active |=
2455			(IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2456
2457		if (ha->if_link.link_partner_caps &
2458			(QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2459			ifmr->ifm_active |=
2460				(IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2461	}
2462
2463	QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2464
2465	return;
2466}
2467
2468
2469static void
2470qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2471	struct qlnx_tx_queue *txq)
2472{
2473	u16			idx;
2474	struct mbuf		*mp;
2475	bus_dmamap_t		map;
2476	int			i;
2477	struct eth_tx_bd	*tx_data_bd;
2478	struct eth_tx_1st_bd	*first_bd;
2479	int			nbds = 0;
2480
2481	idx = txq->sw_tx_cons;
2482	mp = txq->sw_tx_ring[idx].mp;
2483	map = txq->sw_tx_ring[idx].map;
2484
2485	if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2486
2487		QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2488
2489		QL_DPRINT1(ha, "(mp == NULL) "
2490			" tx_idx = 0x%x"
2491			" ecore_prod_idx = 0x%x"
2492			" ecore_cons_idx = 0x%x"
2493			" hw_bd_cons = 0x%x"
2494			" txq_db_last = 0x%x"
2495			" elem_left = 0x%x\n",
2496			fp->rss_id,
2497			ecore_chain_get_prod_idx(&txq->tx_pbl),
2498			ecore_chain_get_cons_idx(&txq->tx_pbl),
2499			le16toh(*txq->hw_cons_ptr),
2500			txq->tx_db.raw,
2501			ecore_chain_get_elem_left(&txq->tx_pbl));
2502
2503		fp->err_tx_free_pkt_null++;
2504
2505		//DEBUG
2506		qlnx_trigger_dump(ha);
2507
2508		return;
2509	} else {
2510
2511		QLNX_INC_OPACKETS((ha->ifp));
2512		QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2513
2514		bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2515		bus_dmamap_unload(ha->tx_tag, map);
2516
2517		fp->tx_pkts_freed++;
2518		fp->tx_pkts_completed++;
2519
2520		m_freem(mp);
2521	}
2522
2523	first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2524	nbds = first_bd->data.nbds;
2525
2526//	BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2527
2528	for (i = 1; i < nbds; i++) {
2529		tx_data_bd = ecore_chain_consume(&txq->tx_pbl);
2530//		BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2531	}
2532	txq->sw_tx_ring[idx].flags = 0;
2533	txq->sw_tx_ring[idx].mp = NULL;
2534	txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2535
2536	return;
2537}
2538
2539static void
2540qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2541	struct qlnx_tx_queue *txq)
2542{
2543	u16 hw_bd_cons;
2544	u16 ecore_cons_idx;
2545	uint16_t diff;
2546
2547	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2548
2549	while (hw_bd_cons !=
2550		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2551
2552		if (hw_bd_cons < ecore_cons_idx) {
2553			diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
2554		} else {
2555			diff = hw_bd_cons - ecore_cons_idx;
2556		}
2557		if ((diff > TX_RING_SIZE) ||
2558			QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2559
2560			QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2561
2562			QL_DPRINT1(ha, "(diff = 0x%x) "
2563				" tx_idx = 0x%x"
2564				" ecore_prod_idx = 0x%x"
2565				" ecore_cons_idx = 0x%x"
2566				" hw_bd_cons = 0x%x"
2567				" txq_db_last = 0x%x"
2568				" elem_left = 0x%x\n",
2569				diff,
2570				fp->rss_id,
2571				ecore_chain_get_prod_idx(&txq->tx_pbl),
2572				ecore_chain_get_cons_idx(&txq->tx_pbl),
2573				le16toh(*txq->hw_cons_ptr),
2574				txq->tx_db.raw,
2575				ecore_chain_get_elem_left(&txq->tx_pbl));
2576
2577			fp->err_tx_cons_idx_conflict++;
2578
2579			//DEBUG
2580			qlnx_trigger_dump(ha);
2581		}
2582
2583		qlnx_free_tx_pkt(ha, fp, txq);
2584
2585		txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2586	}
2587	return;
2588}
2589
2590static int
2591qlnx_transmit(struct ifnet *ifp, struct mbuf  *mp)
2592{
2593        qlnx_host_t		*ha = (qlnx_host_t *)ifp->if_softc;
2594        struct qlnx_fastpath	*fp;
2595        int			rss_id = 0, ret = 0;
2596
2597        QL_DPRINT2(ha, "enter\n");
2598
2599#if __FreeBSD_version >= 1100000
2600        if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
2601#else
2602        if (mp->m_flags & M_FLOWID)
2603#endif
2604                rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
2605					ha->num_rss;
2606
2607        fp = &ha->fp_array[rss_id];
2608
2609        if (fp->tx_br == NULL) {
2610                ret = EINVAL;
2611                goto qlnx_transmit_exit;
2612        }
2613
2614        if (mp != NULL) {
2615                ret = drbr_enqueue(ifp, fp->tx_br, mp);
2616        }
2617
2618        if (fp->fp_taskqueue != NULL)
2619                taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
2620
2621        ret = 0;
2622
2623qlnx_transmit_exit:
2624
2625        QL_DPRINT2(ha, "exit ret = %d\n", ret);
2626        return ret;
2627}
2628
2629static void
2630qlnx_qflush(struct ifnet *ifp)
2631{
2632	int			rss_id;
2633	struct qlnx_fastpath	*fp;
2634	struct mbuf		*mp;
2635	qlnx_host_t		*ha;
2636
2637	ha = (qlnx_host_t *)ifp->if_softc;
2638
2639	QL_DPRINT2(ha, "enter\n");
2640
2641	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
2642
2643		fp = &ha->fp_array[rss_id];
2644
2645		if (fp == NULL)
2646			continue;
2647
2648		if (fp->tx_br) {
2649			mtx_lock(&fp->tx_mtx);
2650
2651			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
2652				fp->tx_pkts_freed++;
2653				m_freem(mp);
2654			}
2655			mtx_unlock(&fp->tx_mtx);
2656		}
2657	}
2658	QL_DPRINT2(ha, "exit\n");
2659
2660	return;
2661}
2662
2663static void
2664qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
2665{
2666	struct ecore_dev	*cdev;
2667	uint32_t		offset;
2668
2669	cdev = &ha->cdev;
2670
2671	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)cdev->doorbells);
2672
2673	bus_write_4(ha->pci_dbells, offset, value);
2674	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_READ);
2675	bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
2676
2677	return;
2678}
2679
2680static uint32_t
2681qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
2682{
2683        struct ether_vlan_header	*eh = NULL;
2684        struct ip			*ip = NULL;
2685        struct ip6_hdr			*ip6 = NULL;
2686        struct tcphdr			*th = NULL;
2687        uint32_t			ehdrlen = 0, ip_hlen = 0, offset = 0;
2688        uint16_t			etype = 0;
2689        device_t			dev;
2690        uint8_t				buf[sizeof(struct ip6_hdr)];
2691
2692        dev = ha->pci_dev;
2693
2694        eh = mtod(mp, struct ether_vlan_header *);
2695
2696        if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2697                ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2698                etype = ntohs(eh->evl_proto);
2699        } else {
2700                ehdrlen = ETHER_HDR_LEN;
2701                etype = ntohs(eh->evl_encap_proto);
2702        }
2703
2704        switch (etype) {
2705
2706                case ETHERTYPE_IP:
2707                        ip = (struct ip *)(mp->m_data + ehdrlen);
2708
2709                        ip_hlen = sizeof (struct ip);
2710
2711                        if (mp->m_len < (ehdrlen + ip_hlen)) {
2712                                m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2713                                ip = (struct ip *)buf;
2714                        }
2715
2716                        th = (struct tcphdr *)(ip + 1);
2717			offset = ip_hlen + ehdrlen + (th->th_off << 2);
2718                break;
2719
2720                case ETHERTYPE_IPV6:
2721                        ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2722
2723                        ip_hlen = sizeof(struct ip6_hdr);
2724
2725                        if (mp->m_len < (ehdrlen + ip_hlen)) {
2726                                m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2727                                        buf);
2728                                ip6 = (struct ip6_hdr *)buf;
2729                        }
2730                        th = (struct tcphdr *)(ip6 + 1);
2731			offset = ip_hlen + ehdrlen + (th->th_off << 2);
2732                break;
2733
2734                default:
2735                break;
2736        }
2737
2738        return (offset);
2739}
2740
2741static __inline int
2742qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
2743	uint32_t offset)
2744{
2745	int			i;
2746	uint32_t		sum, nbds_in_hdr = 1;
2747	bus_dma_segment_t	*t_segs = segs;
2748
2749	/* count the number of segments spanned by TCP header */
2750
2751	i = 0;
2752	while ((i < nsegs) && (offset > t_segs->ds_len)) {
2753		nbds_in_hdr++;
2754		offset = offset - t_segs->ds_len;
2755		t_segs++;
2756		i++;
2757	}
2758
2759	while (nsegs >= QLNX_MAX_SEGMENTS_NON_TSO) {
2760
2761		sum = 0;
2762
2763		for (i = 0; i < (ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr); i++){
2764			sum += segs->ds_len;
2765			segs++;
2766		}
2767
2768		if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
2769			fp->tx_lso_wnd_min_len++;
2770			return (-1);
2771		}
2772
2773		nsegs -= QLNX_MAX_SEGMENTS_NON_TSO;
2774	}
2775
2776	return (0);
2777}
2778
2779static int
2780qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
2781{
2782	bus_dma_segment_t	*segs;
2783	bus_dmamap_t		map = 0;
2784	uint32_t		nsegs = 0;
2785	int			ret = -1;
2786	struct mbuf		*m_head = *m_headp;
2787	uint16_t		idx = 0;
2788	uint16_t		elem_left;
2789
2790	uint8_t			nbd = 0;
2791	struct qlnx_tx_queue    *txq;
2792
2793	struct eth_tx_1st_bd    *first_bd;
2794	struct eth_tx_2nd_bd    *second_bd;
2795	struct eth_tx_3rd_bd    *third_bd;
2796	struct eth_tx_bd        *tx_data_bd;
2797
2798	int			seg_idx = 0;
2799	uint32_t		nbds_in_hdr = 0;
2800	uint32_t		offset = 0;
2801
2802	QL_DPRINT8(ha, "enter\n");
2803
2804	if (!ha->link_up)
2805		return (-1);
2806
2807	first_bd	= NULL;
2808	second_bd	= NULL;
2809	third_bd	= NULL;
2810	tx_data_bd	= NULL;
2811
2812	txq = fp->txq[0];
2813
2814	if (fp->tx_ring_full) {
2815		elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
2816
2817		if (elem_left < (TX_RING_SIZE >> 4))
2818			return (-1);
2819		else
2820			fp->tx_ring_full = 0;
2821	}
2822
2823	idx = txq->sw_tx_prod;
2824
2825	map = txq->sw_tx_ring[idx].map;
2826	segs = txq->segs;
2827
2828	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
2829			BUS_DMA_NOWAIT);
2830
2831	if (ha->dbg_trace_tso_pkt_len) {
2832		if (!fp->tx_tso_min_pkt_len) {
2833			fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2834			fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2835		} else {
2836			if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
2837				fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2838			if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
2839				fp->tx_tso_max_pkt_len = m_head->m_pkthdr.len;
2840		}
2841	}
2842
2843	if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
2844		offset = qlnx_tcp_offset(ha, m_head);
2845
2846	if ((ret == EFBIG) ||
2847		((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
2848			(!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
2849		((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
2850			qlnx_tso_check(fp, segs, nsegs, offset))))) {
2851
2852		struct mbuf *m;
2853
2854		QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
2855
2856		fp->tx_defrag++;
2857
2858		m = m_defrag(m_head, M_NOWAIT);
2859		if (m == NULL) {
2860			fp->err_tx_defrag++;
2861			fp->tx_pkts_freed++;
2862			m_freem(m_head);
2863			*m_headp = NULL;
2864			QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
2865			return (ENOBUFS);
2866		}
2867
2868		m_head = m;
2869		*m_headp = m_head;
2870
2871		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
2872				segs, &nsegs, BUS_DMA_NOWAIT))) {
2873
2874			fp->err_tx_defrag_dmamap_load++;
2875
2876			QL_DPRINT1(ha,
2877				"bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
2878				ret, m_head->m_pkthdr.len);
2879
2880			fp->tx_pkts_freed++;
2881			m_freem(m_head);
2882			*m_headp = NULL;
2883
2884			return (ret);
2885		}
2886
2887		if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
2888			!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
2889
2890			fp->err_tx_non_tso_max_seg++;
2891
2892			QL_DPRINT1(ha,
2893				"(%d) nsegs too many for non-TSO [%d, %d]\n",
2894				ret, nsegs, m_head->m_pkthdr.len);
2895
2896			fp->tx_pkts_freed++;
2897			m_freem(m_head);
2898			*m_headp = NULL;
2899
2900			return (ret);
2901		}
2902		if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
2903			offset = qlnx_tcp_offset(ha, m_head);
2904
2905	} else if (ret) {
2906
2907		fp->err_tx_dmamap_load++;
2908
2909		QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
2910			   ret, m_head->m_pkthdr.len);
2911		fp->tx_pkts_freed++;
2912		m_freem(m_head);
2913		*m_headp = NULL;
2914		return (ret);
2915	}
2916
2917	QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
2918
2919	if (ha->dbg_trace_tso_pkt_len) {
2920		if (nsegs < QLNX_FP_MAX_SEGS)
2921			fp->tx_pkts[(nsegs - 1)]++;
2922		else
2923			fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
2924	}
2925
2926	if ((nsegs + QLNX_TX_ELEM_RESERVE) >
2927		(int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
2928
2929		QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
2930			" in chain[%d] trying to free packets\n",
2931			nsegs, elem_left, fp->rss_id);
2932
2933		fp->tx_nsegs_gt_elem_left++;
2934
2935		(void)qlnx_tx_int(ha, fp, txq);
2936
2937		if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
2938			ecore_chain_get_elem_left(&txq->tx_pbl))) {
2939
2940			QL_DPRINT1(ha,
2941				"(%d, 0x%x) insuffient BDs in chain[%d]\n",
2942				nsegs, elem_left, fp->rss_id);
2943
2944			fp->err_tx_nsegs_gt_elem_left++;
2945			fp->tx_ring_full = 1;
2946			ha->storm_stats_enable = 1;
2947			return (ENOBUFS);
2948		}
2949	}
2950
2951	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
2952
2953	txq->sw_tx_ring[idx].mp = m_head;
2954
2955	first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2956
2957	memset(first_bd, 0, sizeof(*first_bd));
2958
2959	first_bd->data.bd_flags.bitfields =
2960		1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2961
2962	BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
2963
2964	nbd++;
2965
2966	if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
2967		first_bd->data.bd_flags.bitfields |=
2968			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
2969	}
2970
2971	if (m_head->m_pkthdr.csum_flags &
2972		(CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
2973		first_bd->data.bd_flags.bitfields |=
2974			(1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
2975	}
2976
2977        if (m_head->m_flags & M_VLANTAG) {
2978                first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
2979		first_bd->data.bd_flags.bitfields |=
2980			(1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
2981        }
2982
2983	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2984
2985                first_bd->data.bd_flags.bitfields |=
2986			(1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
2987		first_bd->data.bd_flags.bitfields |=
2988			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
2989
2990		nbds_in_hdr = 1;
2991
2992		if (offset == segs->ds_len) {
2993			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
2994			segs++;
2995			seg_idx++;
2996
2997			second_bd = (struct eth_tx_2nd_bd *)
2998					ecore_chain_produce(&txq->tx_pbl);
2999			memset(second_bd, 0, sizeof(*second_bd));
3000			nbd++;
3001
3002			if (seg_idx < nsegs) {
3003				BD_SET_UNMAP_ADDR_LEN(second_bd, \
3004					(segs->ds_addr), (segs->ds_len));
3005				segs++;
3006				seg_idx++;
3007			}
3008
3009			third_bd = (struct eth_tx_3rd_bd *)
3010					ecore_chain_produce(&txq->tx_pbl);
3011			memset(third_bd, 0, sizeof(*third_bd));
3012			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3013			third_bd->data.bitfields |=
3014				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3015			nbd++;
3016
3017			if (seg_idx < nsegs) {
3018				BD_SET_UNMAP_ADDR_LEN(third_bd, \
3019					(segs->ds_addr), (segs->ds_len));
3020				segs++;
3021				seg_idx++;
3022			}
3023
3024			for (; seg_idx < nsegs; seg_idx++) {
3025				tx_data_bd = (struct eth_tx_bd *)
3026					ecore_chain_produce(&txq->tx_pbl);
3027				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3028				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3029					segs->ds_addr,\
3030					segs->ds_len);
3031				segs++;
3032				nbd++;
3033			}
3034
3035		} else if (offset < segs->ds_len) {
3036			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3037
3038			second_bd = (struct eth_tx_2nd_bd *)
3039					ecore_chain_produce(&txq->tx_pbl);
3040			memset(second_bd, 0, sizeof(*second_bd));
3041			BD_SET_UNMAP_ADDR_LEN(second_bd, \
3042				(segs->ds_addr + offset),\
3043				(segs->ds_len - offset));
3044			nbd++;
3045			segs++;
3046
3047			third_bd = (struct eth_tx_3rd_bd *)
3048					ecore_chain_produce(&txq->tx_pbl);
3049			memset(third_bd, 0, sizeof(*third_bd));
3050
3051			BD_SET_UNMAP_ADDR_LEN(third_bd, \
3052					segs->ds_addr,\
3053					segs->ds_len);
3054			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3055			third_bd->data.bitfields |=
3056				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3057			segs++;
3058			nbd++;
3059
3060			for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3061				tx_data_bd = (struct eth_tx_bd *)
3062					ecore_chain_produce(&txq->tx_pbl);
3063				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3064				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3065					segs->ds_addr,\
3066					segs->ds_len);
3067				segs++;
3068				nbd++;
3069			}
3070
3071		} else {
3072			offset = offset - segs->ds_len;
3073			segs++;
3074
3075			for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3076
3077				if (offset)
3078					nbds_in_hdr++;
3079
3080				tx_data_bd = (struct eth_tx_bd *)
3081					ecore_chain_produce(&txq->tx_pbl);
3082				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3083
3084				if (second_bd == NULL) {
3085					second_bd = (struct eth_tx_2nd_bd *)
3086								tx_data_bd;
3087				} else if (third_bd == NULL) {
3088					third_bd = (struct eth_tx_3rd_bd *)
3089								tx_data_bd;
3090				}
3091
3092				if (offset && (offset < segs->ds_len)) {
3093					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3094						segs->ds_addr, offset);
3095
3096					tx_data_bd = (struct eth_tx_bd *)
3097					ecore_chain_produce(&txq->tx_pbl);
3098
3099					memset(tx_data_bd, 0,
3100						sizeof(*tx_data_bd));
3101
3102					if (second_bd == NULL) {
3103						second_bd =
3104					(struct eth_tx_2nd_bd *)tx_data_bd;
3105					} else if (third_bd == NULL) {
3106						third_bd =
3107					(struct eth_tx_3rd_bd *)tx_data_bd;
3108					}
3109					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3110						(segs->ds_addr + offset), \
3111						(segs->ds_len - offset));
3112					nbd++;
3113					offset = 0;
3114				} else {
3115					if (offset)
3116						offset = offset - segs->ds_len;
3117					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3118						segs->ds_addr, segs->ds_len);
3119				}
3120				segs++;
3121				nbd++;
3122			}
3123
3124			if (third_bd == NULL) {
3125				third_bd = (struct eth_tx_3rd_bd *)
3126					ecore_chain_produce(&txq->tx_pbl);
3127				memset(third_bd, 0, sizeof(*third_bd));
3128			}
3129
3130			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3131			third_bd->data.bitfields |=
3132				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3133		}
3134	} else {
3135		segs++;
3136		for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3137			tx_data_bd = (struct eth_tx_bd *)
3138					ecore_chain_produce(&txq->tx_pbl);
3139			memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3140			BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3141				segs->ds_len);
3142			segs++;
3143			nbd++;
3144		}
3145		first_bd->data.bitfields =
3146			(m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3147				 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3148		first_bd->data.bitfields =
3149			htole16(first_bd->data.bitfields);
3150	}
3151
3152
3153	first_bd->data.nbds = nbd;
3154
3155	if (ha->dbg_trace_tso_pkt_len) {
3156		if (fp->tx_tso_max_nsegs < nsegs)
3157			fp->tx_tso_max_nsegs = nsegs;
3158
3159		if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3160			fp->tx_tso_min_nsegs = nsegs;
3161	}
3162
3163	txq->sw_tx_ring[idx].nsegs = nsegs;
3164	txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3165
3166	txq->tx_db.data.bd_prod =
3167		htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3168
3169	qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3170
3171	QL_DPRINT8(ha, "exit\n");
3172	return (0);
3173}
3174
3175static void
3176qlnx_stop(qlnx_host_t *ha)
3177{
3178	struct ifnet	*ifp = ha->ifp;
3179	device_t	dev;
3180	int		i;
3181
3182	dev = ha->pci_dev;
3183
3184	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3185
3186	/*
3187	 * We simply lock and unlock each fp->tx_mtx to
3188	 * propagate the if_drv_flags
3189	 * state to each tx thread
3190	 */
3191        QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3192
3193	if (ha->state == QLNX_STATE_OPEN) {
3194        	for (i = 0; i < ha->num_rss; i++) {
3195			struct qlnx_fastpath *fp = &ha->fp_array[i];
3196
3197			mtx_lock(&fp->tx_mtx);
3198			mtx_unlock(&fp->tx_mtx);
3199
3200			if (fp->fp_taskqueue != NULL)
3201				taskqueue_enqueue(fp->fp_taskqueue,
3202					&fp->fp_task);
3203		}
3204	}
3205
3206	qlnx_unload(ha);
3207
3208	return;
3209}
3210
3211static int
3212qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3213{
3214        return(TX_RING_SIZE - 1);
3215}
3216
3217uint8_t *
3218qlnx_get_mac_addr(qlnx_host_t *ha)
3219{
3220	struct ecore_hwfn	*p_hwfn;
3221
3222	p_hwfn = &ha->cdev.hwfns[0];
3223        return (p_hwfn->hw_info.hw_mac_addr);
3224}
3225
3226static uint32_t
3227qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3228{
3229	uint32_t	ifm_type = 0;
3230
3231	switch (if_link->media_type) {
3232
3233	case MEDIA_MODULE_FIBER:
3234	case MEDIA_UNSPECIFIED:
3235		if (if_link->speed == (100 * 1000))
3236			ifm_type = QLNX_IFM_100G_SR4;
3237		else if (if_link->speed == (40 * 1000))
3238			ifm_type = IFM_40G_SR4;
3239		else if (if_link->speed == (25 * 1000))
3240			ifm_type = QLNX_IFM_25G_SR;
3241		else if (if_link->speed == (10 * 1000))
3242			ifm_type = (IFM_10G_LR | IFM_10G_SR);
3243		else if (if_link->speed == (1 * 1000))
3244			ifm_type = (IFM_1000_SX | IFM_1000_LX);
3245
3246		break;
3247
3248	case MEDIA_DA_TWINAX:
3249		if (if_link->speed == (100 * 1000))
3250			ifm_type = QLNX_IFM_100G_CR4;
3251		else if (if_link->speed == (40 * 1000))
3252			ifm_type = IFM_40G_CR4;
3253		else if (if_link->speed == (25 * 1000))
3254			ifm_type = QLNX_IFM_25G_CR;
3255		else if (if_link->speed == (10 * 1000))
3256			ifm_type = IFM_10G_TWINAX;
3257
3258		break;
3259
3260	default :
3261		ifm_type = IFM_UNKNOWN;
3262		break;
3263	}
3264	return (ifm_type);
3265}
3266
3267
3268
3269/*****************************************************************************
3270 * Interrupt Service Functions
3271 *****************************************************************************/
3272
3273static int
3274qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3275	struct mbuf *mp_head, uint16_t len)
3276{
3277	struct mbuf		*mp, *mpf, *mpl;
3278	struct sw_rx_data	*sw_rx_data;
3279	struct qlnx_rx_queue	*rxq;
3280	uint16_t 		len_in_buffer;
3281
3282	rxq = fp->rxq;
3283	mpf = mpl = mp = NULL;
3284
3285	while (len) {
3286
3287        	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3288
3289                sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3290                mp = sw_rx_data->data;
3291
3292		if (mp == NULL) {
3293                	QL_DPRINT1(ha, "mp = NULL\n");
3294			fp->err_rx_mp_null++;
3295        		rxq->sw_rx_cons  =
3296				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3297
3298			if (mpf != NULL)
3299				m_freem(mpf);
3300
3301			return (-1);
3302		}
3303		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3304			BUS_DMASYNC_POSTREAD);
3305
3306                if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3307
3308                        QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3309				" incoming packet and reusing its buffer\n");
3310
3311                        qlnx_reuse_rx_data(rxq);
3312                        fp->err_rx_alloc_errors++;
3313
3314			if (mpf != NULL)
3315				m_freem(mpf);
3316
3317			return (-1);
3318		}
3319                ecore_chain_consume(&rxq->rx_bd_ring);
3320
3321		if (len > rxq->rx_buf_size)
3322			len_in_buffer = rxq->rx_buf_size;
3323		else
3324			len_in_buffer = len;
3325
3326		len = len - len_in_buffer;
3327
3328		mp->m_flags &= ~M_PKTHDR;
3329		mp->m_next = NULL;
3330		mp->m_len = len_in_buffer;
3331
3332		if (mpf == NULL)
3333			mpf = mpl = mp;
3334		else {
3335			mpl->m_next = mp;
3336			mpl = mp;
3337		}
3338	}
3339
3340	if (mpf != NULL)
3341		mp_head->m_next = mpf;
3342
3343	return (0);
3344}
3345
3346static void
3347qlnx_tpa_start(qlnx_host_t *ha,
3348	struct qlnx_fastpath *fp,
3349	struct qlnx_rx_queue *rxq,
3350	struct eth_fast_path_rx_tpa_start_cqe *cqe)
3351{
3352	uint32_t		agg_index;
3353        struct ifnet		*ifp = ha->ifp;
3354	struct mbuf		*mp;
3355	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
3356	struct sw_rx_data	*sw_rx_data;
3357	dma_addr_t		addr;
3358	bus_dmamap_t		map;
3359	struct eth_rx_bd	*rx_bd;
3360	int			i;
3361	device_t		dev;
3362#if __FreeBSD_version >= 1100000
3363	uint8_t			hash_type;
3364#endif /* #if __FreeBSD_version >= 1100000 */
3365
3366	dev = ha->pci_dev;
3367	agg_index = cqe->tpa_agg_index;
3368
3369        QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3370                \t type = 0x%x\n \
3371                \t bitfields = 0x%x\n \
3372                \t seg_len = 0x%x\n \
3373                \t pars_flags = 0x%x\n \
3374                \t vlan_tag = 0x%x\n \
3375                \t rss_hash = 0x%x\n \
3376                \t len_on_first_bd = 0x%x\n \
3377                \t placement_offset = 0x%x\n \
3378                \t tpa_agg_index = 0x%x\n \
3379                \t header_len = 0x%x\n \
3380                \t ext_bd_len_list[0] = 0x%x\n \
3381                \t ext_bd_len_list[1] = 0x%x\n \
3382                \t ext_bd_len_list[2] = 0x%x\n \
3383                \t ext_bd_len_list[3] = 0x%x\n \
3384                \t ext_bd_len_list[4] = 0x%x\n",
3385                fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3386                cqe->pars_flags.flags, cqe->vlan_tag,
3387                cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3388                cqe->tpa_agg_index, cqe->header_len,
3389                cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3390                cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3391                cqe->ext_bd_len_list[4]);
3392
3393	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3394		fp->err_rx_tpa_invalid_agg_num++;
3395		return;
3396	}
3397
3398	sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3399	bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3400	mp = sw_rx_data->data;
3401
3402	QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3403
3404	if (mp == NULL) {
3405               	QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3406		fp->err_rx_mp_null++;
3407       		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3408
3409		return;
3410	}
3411
3412	if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3413
3414		QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3415			" flags = %x, dropping incoming packet\n", fp->rss_id,
3416			rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3417
3418		fp->err_rx_hw_errors++;
3419
3420		qlnx_reuse_rx_data(rxq);
3421
3422		QLNX_INC_IERRORS(ifp);
3423
3424		return;
3425	}
3426
3427	if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3428
3429		QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3430			" dropping incoming packet and reusing its buffer\n",
3431			fp->rss_id);
3432
3433		fp->err_rx_alloc_errors++;
3434		QLNX_INC_IQDROPS(ifp);
3435
3436		/*
3437		 * Load the tpa mbuf into the rx ring and save the
3438		 * posted mbuf
3439		 */
3440
3441		map = sw_rx_data->map;
3442		addr = sw_rx_data->dma_addr;
3443
3444		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
3445
3446		sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
3447		sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
3448		sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
3449
3450		rxq->tpa_info[agg_index].rx_buf.data = mp;
3451		rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
3452		rxq->tpa_info[agg_index].rx_buf.map = map;
3453
3454		rx_bd = (struct eth_rx_bd *)
3455				ecore_chain_produce(&rxq->rx_bd_ring);
3456
3457		rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
3458		rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
3459
3460		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3461			BUS_DMASYNC_PREREAD);
3462
3463		rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
3464		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3465
3466		ecore_chain_consume(&rxq->rx_bd_ring);
3467
3468		/* Now reuse any buffers posted in ext_bd_len_list */
3469		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3470
3471			if (cqe->ext_bd_len_list[i] == 0)
3472				break;
3473
3474			qlnx_reuse_rx_data(rxq);
3475		}
3476
3477		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3478		return;
3479	}
3480
3481	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3482
3483		QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
3484			" dropping incoming packet and reusing its buffer\n",
3485			fp->rss_id);
3486
3487		QLNX_INC_IQDROPS(ifp);
3488
3489		/* if we already have mbuf head in aggregation free it */
3490		if (rxq->tpa_info[agg_index].mpf) {
3491			m_freem(rxq->tpa_info[agg_index].mpf);
3492			rxq->tpa_info[agg_index].mpl = NULL;
3493		}
3494		rxq->tpa_info[agg_index].mpf = mp;
3495		rxq->tpa_info[agg_index].mpl = NULL;
3496
3497		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3498		ecore_chain_consume(&rxq->rx_bd_ring);
3499
3500		/* Now reuse any buffers posted in ext_bd_len_list */
3501		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3502
3503			if (cqe->ext_bd_len_list[i] == 0)
3504				break;
3505
3506			qlnx_reuse_rx_data(rxq);
3507		}
3508		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3509
3510		return;
3511	}
3512
3513	/*
3514	 * first process the ext_bd_len_list
3515	 * if this fails then we simply drop the packet
3516	 */
3517	ecore_chain_consume(&rxq->rx_bd_ring);
3518	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3519
3520	for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3521
3522		QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
3523
3524		if (cqe->ext_bd_len_list[i] == 0)
3525			break;
3526
3527		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3528		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3529			BUS_DMASYNC_POSTREAD);
3530
3531		mpc = sw_rx_data->data;
3532
3533		if (mpc == NULL) {
3534			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3535			fp->err_rx_mp_null++;
3536			if (mpf != NULL)
3537				m_freem(mpf);
3538			mpf = mpl = NULL;
3539			rxq->tpa_info[agg_index].agg_state =
3540						QLNX_AGG_STATE_ERROR;
3541			ecore_chain_consume(&rxq->rx_bd_ring);
3542			rxq->sw_rx_cons =
3543				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3544			continue;
3545		}
3546
3547		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3548			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3549				" dropping incoming packet and reusing its"
3550				" buffer\n", fp->rss_id);
3551
3552			qlnx_reuse_rx_data(rxq);
3553
3554			if (mpf != NULL)
3555				m_freem(mpf);
3556			mpf = mpl = NULL;
3557
3558			rxq->tpa_info[agg_index].agg_state =
3559						QLNX_AGG_STATE_ERROR;
3560
3561			ecore_chain_consume(&rxq->rx_bd_ring);
3562			rxq->sw_rx_cons =
3563				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3564
3565			continue;
3566		}
3567
3568		mpc->m_flags &= ~M_PKTHDR;
3569		mpc->m_next = NULL;
3570		mpc->m_len = cqe->ext_bd_len_list[i];
3571
3572
3573		if (mpf == NULL) {
3574			mpf = mpl = mpc;
3575		} else {
3576			mpl->m_len = ha->rx_buf_size;
3577			mpl->m_next = mpc;
3578			mpl = mpc;
3579		}
3580
3581		ecore_chain_consume(&rxq->rx_bd_ring);
3582		rxq->sw_rx_cons =
3583			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3584	}
3585
3586	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3587
3588		QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
3589			" incoming packet and reusing its buffer\n",
3590			fp->rss_id);
3591
3592		QLNX_INC_IQDROPS(ifp);
3593
3594		rxq->tpa_info[agg_index].mpf = mp;
3595		rxq->tpa_info[agg_index].mpl = NULL;
3596
3597		return;
3598	}
3599
3600        rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
3601
3602        if (mpf != NULL) {
3603                mp->m_len = ha->rx_buf_size;
3604                mp->m_next = mpf;
3605                rxq->tpa_info[agg_index].mpf = mp;
3606                rxq->tpa_info[agg_index].mpl = mpl;
3607        } else {
3608                mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
3609                rxq->tpa_info[agg_index].mpf = mp;
3610                rxq->tpa_info[agg_index].mpl = mp;
3611                mp->m_next = NULL;
3612        }
3613
3614	mp->m_flags |= M_PKTHDR;
3615
3616	/* assign packet to this interface interface */
3617	mp->m_pkthdr.rcvif = ifp;
3618
3619	/* assume no hardware checksum has complated */
3620	mp->m_pkthdr.csum_flags = 0;
3621
3622	//mp->m_pkthdr.flowid = fp->rss_id;
3623	mp->m_pkthdr.flowid = cqe->rss_hash;
3624
3625#if __FreeBSD_version >= 1100000
3626
3627	hash_type = cqe->bitfields &
3628			(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
3629			ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
3630
3631	switch (hash_type) {
3632
3633	case RSS_HASH_TYPE_IPV4:
3634		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
3635		break;
3636
3637	case RSS_HASH_TYPE_TCP_IPV4:
3638		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
3639		break;
3640
3641	case RSS_HASH_TYPE_IPV6:
3642		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
3643		break;
3644
3645	case RSS_HASH_TYPE_TCP_IPV6:
3646		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
3647		break;
3648
3649	default:
3650		M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
3651		break;
3652	}
3653
3654#else
3655	mp->m_flags |= M_FLOWID;
3656#endif
3657
3658	mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
3659					CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3660
3661	mp->m_pkthdr.csum_data = 0xFFFF;
3662
3663	if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
3664		mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
3665		mp->m_flags |= M_VLANTAG;
3666	}
3667
3668	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
3669
3670        QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
3671		fp->rss_id, rxq->tpa_info[agg_index].agg_state,
3672                rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
3673
3674	return;
3675}
3676
3677static void
3678qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3679	struct qlnx_rx_queue *rxq,
3680	struct eth_fast_path_rx_tpa_cont_cqe *cqe)
3681{
3682	struct sw_rx_data	*sw_rx_data;
3683	int			i;
3684	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
3685	struct mbuf		*mp;
3686	uint32_t		agg_index;
3687	device_t		dev;
3688
3689	dev = ha->pci_dev;
3690
3691        QL_DPRINT7(ha, "[%d]: enter\n \
3692                \t type = 0x%x\n \
3693                \t tpa_agg_index = 0x%x\n \
3694                \t len_list[0] = 0x%x\n \
3695                \t len_list[1] = 0x%x\n \
3696                \t len_list[2] = 0x%x\n \
3697                \t len_list[3] = 0x%x\n \
3698                \t len_list[4] = 0x%x\n \
3699                \t len_list[5] = 0x%x\n",
3700                fp->rss_id, cqe->type, cqe->tpa_agg_index,
3701                cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3702                cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
3703
3704	agg_index = cqe->tpa_agg_index;
3705
3706	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3707		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
3708		fp->err_rx_tpa_invalid_agg_num++;
3709		return;
3710	}
3711
3712
3713	for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
3714
3715		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
3716
3717		if (cqe->len_list[i] == 0)
3718			break;
3719
3720		if (rxq->tpa_info[agg_index].agg_state !=
3721			QLNX_AGG_STATE_START) {
3722			qlnx_reuse_rx_data(rxq);
3723			continue;
3724		}
3725
3726		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3727		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3728			BUS_DMASYNC_POSTREAD);
3729
3730		mpc = sw_rx_data->data;
3731
3732		if (mpc == NULL) {
3733
3734			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3735
3736			fp->err_rx_mp_null++;
3737			if (mpf != NULL)
3738				m_freem(mpf);
3739			mpf = mpl = NULL;
3740			rxq->tpa_info[agg_index].agg_state =
3741						QLNX_AGG_STATE_ERROR;
3742			ecore_chain_consume(&rxq->rx_bd_ring);
3743			rxq->sw_rx_cons =
3744				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3745			continue;
3746		}
3747
3748		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3749
3750			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3751				" dropping incoming packet and reusing its"
3752				" buffer\n", fp->rss_id);
3753
3754			qlnx_reuse_rx_data(rxq);
3755
3756			if (mpf != NULL)
3757				m_freem(mpf);
3758			mpf = mpl = NULL;
3759
3760			rxq->tpa_info[agg_index].agg_state =
3761						QLNX_AGG_STATE_ERROR;
3762
3763			ecore_chain_consume(&rxq->rx_bd_ring);
3764			rxq->sw_rx_cons =
3765				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3766
3767			continue;
3768		}
3769
3770		mpc->m_flags &= ~M_PKTHDR;
3771		mpc->m_next = NULL;
3772		mpc->m_len = cqe->len_list[i];
3773
3774
3775		if (mpf == NULL) {
3776			mpf = mpl = mpc;
3777		} else {
3778			mpl->m_len = ha->rx_buf_size;
3779			mpl->m_next = mpc;
3780			mpl = mpc;
3781		}
3782
3783		ecore_chain_consume(&rxq->rx_bd_ring);
3784		rxq->sw_rx_cons =
3785			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3786	}
3787
3788        QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
3789                  fp->rss_id, mpf, mpl);
3790
3791	if (mpf != NULL) {
3792		mp = rxq->tpa_info[agg_index].mpl;
3793		mp->m_len = ha->rx_buf_size;
3794		mp->m_next = mpf;
3795		rxq->tpa_info[agg_index].mpl = mpl;
3796	}
3797
3798	return;
3799}
3800
3801static int
3802qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3803	struct qlnx_rx_queue *rxq,
3804	struct eth_fast_path_rx_tpa_end_cqe *cqe)
3805{
3806	struct sw_rx_data	*sw_rx_data;
3807	int			i;
3808	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
3809	struct mbuf		*mp;
3810	uint32_t		agg_index;
3811	uint32_t		len = 0;
3812        struct ifnet		*ifp = ha->ifp;
3813	device_t		dev;
3814
3815	dev = ha->pci_dev;
3816
3817        QL_DPRINT7(ha, "[%d]: enter\n \
3818                \t type = 0x%x\n \
3819                \t tpa_agg_index = 0x%x\n \
3820                \t total_packet_len = 0x%x\n \
3821                \t num_of_bds = 0x%x\n \
3822                \t end_reason = 0x%x\n \
3823                \t num_of_coalesced_segs = 0x%x\n \
3824                \t ts_delta = 0x%x\n \
3825                \t len_list[0] = 0x%x\n \
3826                \t len_list[1] = 0x%x\n \
3827                \t len_list[2] = 0x%x\n \
3828                \t len_list[3] = 0x%x\n",
3829                 fp->rss_id, cqe->type, cqe->tpa_agg_index,
3830                cqe->total_packet_len, cqe->num_of_bds,
3831                cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
3832                cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3833                cqe->len_list[3]);
3834
3835	agg_index = cqe->tpa_agg_index;
3836
3837	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3838
3839		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
3840
3841		fp->err_rx_tpa_invalid_agg_num++;
3842		return (0);
3843	}
3844
3845
3846	for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
3847
3848		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
3849
3850		if (cqe->len_list[i] == 0)
3851			break;
3852
3853		if (rxq->tpa_info[agg_index].agg_state !=
3854			QLNX_AGG_STATE_START) {
3855
3856			QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
3857
3858			qlnx_reuse_rx_data(rxq);
3859			continue;
3860		}
3861
3862		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3863		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3864			BUS_DMASYNC_POSTREAD);
3865
3866		mpc = sw_rx_data->data;
3867
3868		if (mpc == NULL) {
3869
3870			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3871
3872			fp->err_rx_mp_null++;
3873			if (mpf != NULL)
3874				m_freem(mpf);
3875			mpf = mpl = NULL;
3876			rxq->tpa_info[agg_index].agg_state =
3877						QLNX_AGG_STATE_ERROR;
3878			ecore_chain_consume(&rxq->rx_bd_ring);
3879			rxq->sw_rx_cons =
3880				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3881			continue;
3882		}
3883
3884		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3885			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3886				" dropping incoming packet and reusing its"
3887				" buffer\n", fp->rss_id);
3888
3889			qlnx_reuse_rx_data(rxq);
3890
3891			if (mpf != NULL)
3892				m_freem(mpf);
3893			mpf = mpl = NULL;
3894
3895			rxq->tpa_info[agg_index].agg_state =
3896						QLNX_AGG_STATE_ERROR;
3897
3898			ecore_chain_consume(&rxq->rx_bd_ring);
3899			rxq->sw_rx_cons =
3900				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3901
3902			continue;
3903		}
3904
3905		mpc->m_flags &= ~M_PKTHDR;
3906		mpc->m_next = NULL;
3907		mpc->m_len = cqe->len_list[i];
3908
3909
3910		if (mpf == NULL) {
3911			mpf = mpl = mpc;
3912		} else {
3913			mpl->m_len = ha->rx_buf_size;
3914			mpl->m_next = mpc;
3915			mpl = mpc;
3916		}
3917
3918		ecore_chain_consume(&rxq->rx_bd_ring);
3919		rxq->sw_rx_cons =
3920			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3921	}
3922
3923	QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
3924
3925	if (mpf != NULL) {
3926
3927		QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
3928
3929		mp = rxq->tpa_info[agg_index].mpl;
3930		mp->m_len = ha->rx_buf_size;
3931		mp->m_next = mpf;
3932	}
3933
3934	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
3935
3936		QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
3937
3938		if (rxq->tpa_info[agg_index].mpf != NULL)
3939			m_freem(rxq->tpa_info[agg_index].mpf);
3940		rxq->tpa_info[agg_index].mpf = NULL;
3941		rxq->tpa_info[agg_index].mpl = NULL;
3942		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3943		return (0);
3944	}
3945
3946	mp = rxq->tpa_info[agg_index].mpf;
3947	m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
3948	mp->m_pkthdr.len = cqe->total_packet_len;
3949
3950	if (mp->m_next  == NULL)
3951		mp->m_len = mp->m_pkthdr.len;
3952	else {
3953		/* compute the total packet length */
3954		mpf = mp;
3955		while (mpf != NULL) {
3956			len += mpf->m_len;
3957			mpf = mpf->m_next;
3958		}
3959
3960		if (cqe->total_packet_len > len) {
3961			mpl = rxq->tpa_info[agg_index].mpl;
3962			mpl->m_len += (cqe->total_packet_len - len);
3963		}
3964	}
3965
3966	QLNX_INC_IPACKETS(ifp);
3967	QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
3968
3969        QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n \
3970		m_len = 0x%x m_pkthdr_len = 0x%x\n",
3971                fp->rss_id, mp->m_pkthdr.csum_data,
3972                mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
3973
3974	(*ifp->if_input)(ifp, mp);
3975
3976	rxq->tpa_info[agg_index].mpf = NULL;
3977	rxq->tpa_info[agg_index].mpl = NULL;
3978	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3979
3980	return (cqe->num_of_coalesced_segs);
3981}
3982
3983static int
3984qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
3985	int lro_enable)
3986{
3987        uint16_t		hw_comp_cons, sw_comp_cons;
3988        int			rx_pkt = 0;
3989        struct qlnx_rx_queue	*rxq = fp->rxq;
3990        struct ifnet		*ifp = ha->ifp;
3991	struct ecore_dev	*cdev = &ha->cdev;
3992	struct ecore_hwfn       *p_hwfn;
3993
3994#ifdef QLNX_SOFT_LRO
3995	struct lro_ctrl		*lro;
3996
3997	lro = &rxq->lro;
3998#endif /* #ifdef QLNX_SOFT_LRO */
3999
4000        hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4001        sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4002
4003	p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4004
4005        /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4006         * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4007         * read before it is written by FW, then FW writes CQE and SB, and then
4008         * the CPU reads the hw_comp_cons, it will use an old CQE.
4009         */
4010
4011        /* Loop to complete all indicated BDs */
4012        while (sw_comp_cons != hw_comp_cons) {
4013                union eth_rx_cqe		*cqe;
4014                struct eth_fast_path_rx_reg_cqe	*fp_cqe;
4015                struct sw_rx_data		*sw_rx_data;
4016		register struct mbuf		*mp;
4017                enum eth_rx_cqe_type		cqe_type;
4018                uint16_t			len, pad, len_on_first_bd;
4019                uint8_t				*data;
4020#if __FreeBSD_version >= 1100000
4021		uint8_t				hash_type;
4022#endif /* #if __FreeBSD_version >= 1100000 */
4023
4024                /* Get the CQE from the completion ring */
4025                cqe = (union eth_rx_cqe *)
4026                        ecore_chain_consume(&rxq->rx_comp_ring);
4027                cqe_type = cqe->fast_path_regular.type;
4028
4029                if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4030                        QL_DPRINT3(ha, "Got a slowath CQE\n");
4031
4032                        ecore_eth_cqe_completion(p_hwfn,
4033                                        (struct eth_slow_path_rx_cqe *)cqe);
4034                        goto next_cqe;
4035                }
4036
4037		if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4038
4039			switch (cqe_type) {
4040
4041			case ETH_RX_CQE_TYPE_TPA_START:
4042				qlnx_tpa_start(ha, fp, rxq,
4043					&cqe->fast_path_tpa_start);
4044				fp->tpa_start++;
4045				break;
4046
4047			case ETH_RX_CQE_TYPE_TPA_CONT:
4048				qlnx_tpa_cont(ha, fp, rxq,
4049					&cqe->fast_path_tpa_cont);
4050				fp->tpa_cont++;
4051				break;
4052
4053			case ETH_RX_CQE_TYPE_TPA_END:
4054				rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4055						&cqe->fast_path_tpa_end);
4056				fp->tpa_end++;
4057				break;
4058
4059			default:
4060				break;
4061			}
4062
4063                        goto next_cqe;
4064		}
4065
4066                /* Get the data from the SW ring */
4067                sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4068                mp = sw_rx_data->data;
4069
4070		if (mp == NULL) {
4071                	QL_DPRINT1(ha, "mp = NULL\n");
4072			fp->err_rx_mp_null++;
4073        		rxq->sw_rx_cons  =
4074				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4075			goto next_cqe;
4076		}
4077		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4078			BUS_DMASYNC_POSTREAD);
4079
4080                /* non GRO */
4081                fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4082                len =  le16toh(fp_cqe->pkt_len);
4083                pad = fp_cqe->placement_offset;
4084
4085		QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4086			" len %u, parsing flags = %d pad  = %d\n",
4087			cqe_type, fp_cqe->bitfields,
4088			le16toh(fp_cqe->vlan_tag),
4089			len, le16toh(fp_cqe->pars_flags.flags), pad);
4090
4091		data = mtod(mp, uint8_t *);
4092		data = data + pad;
4093
4094		if (0)
4095			qlnx_dump_buf8(ha, __func__, data, len);
4096
4097                /* For every Rx BD consumed, we allocate a new BD so the BD ring
4098                 * is always with a fixed size. If allocation fails, we take the
4099                 * consumed BD and return it to the ring in the PROD position.
4100                 * The packet that was received on that BD will be dropped (and
4101                 * not passed to the upper stack).
4102                 */
4103		/* If this is an error packet then drop it */
4104		if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4105			CQE_FLAGS_ERR) {
4106
4107			QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4108				" dropping incoming packet\n", sw_comp_cons,
4109			le16toh(cqe->fast_path_regular.pars_flags.flags));
4110			fp->err_rx_hw_errors++;
4111
4112                        qlnx_reuse_rx_data(rxq);
4113
4114			QLNX_INC_IERRORS(ifp);
4115
4116			goto next_cqe;
4117		}
4118
4119                if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4120
4121                        QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4122				" incoming packet and reusing its buffer\n");
4123                        qlnx_reuse_rx_data(rxq);
4124
4125                        fp->err_rx_alloc_errors++;
4126
4127			QLNX_INC_IQDROPS(ifp);
4128
4129                        goto next_cqe;
4130                }
4131
4132                ecore_chain_consume(&rxq->rx_bd_ring);
4133
4134		len_on_first_bd = fp_cqe->len_on_first_bd;
4135		m_adj(mp, pad);
4136		mp->m_pkthdr.len = len;
4137
4138		QL_DPRINT1(ha, "len = %d len_on_first_bd = %d\n",
4139			   len, len_on_first_bd);
4140		if ((len > 60 ) && (len > len_on_first_bd)) {
4141
4142			mp->m_len = len_on_first_bd;
4143
4144			if (qlnx_rx_jumbo_chain(ha, fp, mp,
4145				(len - len_on_first_bd)) != 0) {
4146
4147				m_freem(mp);
4148
4149				QLNX_INC_IQDROPS(ifp);
4150
4151                        	goto next_cqe;
4152			}
4153
4154		} else if (len_on_first_bd < len) {
4155			fp->err_rx_jumbo_chain_pkts++;
4156		} else {
4157			mp->m_len = len;
4158		}
4159
4160		mp->m_flags |= M_PKTHDR;
4161
4162		/* assign packet to this interface interface */
4163		mp->m_pkthdr.rcvif = ifp;
4164
4165		/* assume no hardware checksum has complated */
4166		mp->m_pkthdr.csum_flags = 0;
4167
4168		mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4169
4170#if __FreeBSD_version >= 1100000
4171
4172		hash_type = fp_cqe->bitfields &
4173				(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4174				ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4175
4176		switch (hash_type) {
4177
4178		case RSS_HASH_TYPE_IPV4:
4179			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4180			break;
4181
4182		case RSS_HASH_TYPE_TCP_IPV4:
4183			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4184			break;
4185
4186		case RSS_HASH_TYPE_IPV6:
4187			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4188			break;
4189
4190		case RSS_HASH_TYPE_TCP_IPV6:
4191			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4192			break;
4193
4194		default:
4195			M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4196			break;
4197		}
4198
4199#else
4200		mp->m_flags |= M_FLOWID;
4201#endif
4202
4203		if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4204			mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4205		}
4206
4207		if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4208			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4209		}
4210
4211		if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4212			mp->m_pkthdr.csum_data = 0xFFFF;
4213			mp->m_pkthdr.csum_flags |=
4214				(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4215		}
4216
4217		if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4218			mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4219			mp->m_flags |= M_VLANTAG;
4220		}
4221
4222		QLNX_INC_IPACKETS(ifp);
4223		QLNX_INC_IBYTES(ifp, len);
4224
4225#ifdef QLNX_SOFT_LRO
4226
4227		if (lro_enable) {
4228
4229#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4230
4231			tcp_lro_queue_mbuf(lro, mp);
4232
4233#else
4234
4235			if (tcp_lro_rx(lro, mp, 0))
4236				(*ifp->if_input)(ifp, mp);
4237
4238#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4239
4240		} else {
4241			(*ifp->if_input)(ifp, mp);
4242		}
4243#else
4244
4245		(*ifp->if_input)(ifp, mp);
4246
4247#endif /* #ifdef QLNX_SOFT_LRO */
4248
4249                rx_pkt++;
4250
4251        	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4252
4253next_cqe:	/* don't consume bd rx buffer */
4254                ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4255                sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4256
4257		/* CR TPA - revisit how to handle budget in TPA perhaps
4258		   increase on "end" */
4259                if (rx_pkt == budget)
4260                        break;
4261        } /* repeat while sw_comp_cons != hw_comp_cons... */
4262
4263        /* Update producers */
4264        qlnx_update_rx_prod(p_hwfn, rxq);
4265
4266        return rx_pkt;
4267}
4268
4269
4270/*
4271 * fast path interrupt
4272 */
4273
4274static void
4275qlnx_fp_isr(void *arg)
4276{
4277        qlnx_ivec_t		*ivec = arg;
4278        qlnx_host_t		*ha;
4279        struct qlnx_fastpath	*fp = NULL;
4280        int			idx;
4281
4282        ha = ivec->ha;
4283
4284        if (ha->state != QLNX_STATE_OPEN) {
4285                return;
4286        }
4287
4288        idx = ivec->rss_idx;
4289
4290        if ((idx = ivec->rss_idx) >= ha->num_rss) {
4291                QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4292                ha->err_illegal_intr++;
4293                return;
4294        }
4295        fp = &ha->fp_array[idx];
4296
4297        if (fp == NULL) {
4298                ha->err_fp_null++;
4299        } else {
4300
4301#ifdef QLNX_RCV_IN_TASKQ
4302                ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4303		if (fp->fp_taskqueue != NULL)
4304			taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
4305#else
4306		int	rx_int = 0, total_rx_count = 0;
4307		int 	lro_enable, tc;
4308
4309		lro_enable = ha->ifp->if_capenable & IFCAP_LRO;
4310
4311                ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4312
4313                do {
4314                        for (tc = 0; tc < ha->num_tc; tc++) {
4315                                if (mtx_trylock(&fp->tx_mtx)) {
4316                                        qlnx_tx_int(ha, fp, fp->txq[tc]);
4317                                        mtx_unlock(&fp->tx_mtx);
4318                                }
4319                        }
4320
4321                        rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4322                                        lro_enable);
4323
4324                        if (rx_int) {
4325                                fp->rx_pkts += rx_int;
4326                                total_rx_count += rx_int;
4327                        }
4328
4329                } while (rx_int);
4330
4331
4332#ifdef QLNX_SOFT_LRO
4333                {
4334                        struct lro_ctrl *lro;
4335
4336                        lro = &fp->rxq->lro;
4337
4338                        if (lro_enable && total_rx_count) {
4339
4340#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4341
4342#ifdef QLNX_TRACE_LRO_CNT
4343                                if (lro->lro_mbuf_count & ~1023)
4344                                        fp->lro_cnt_1024++;
4345                                else if (lro->lro_mbuf_count & ~511)
4346                                        fp->lro_cnt_512++;
4347                                else if (lro->lro_mbuf_count & ~255)
4348                                        fp->lro_cnt_256++;
4349                                else if (lro->lro_mbuf_count & ~127)
4350                                        fp->lro_cnt_128++;
4351                                else if (lro->lro_mbuf_count & ~63)
4352                                        fp->lro_cnt_64++;
4353#endif /* #ifdef QLNX_TRACE_LRO_CNT */
4354
4355                                tcp_lro_flush_all(lro);
4356
4357#else
4358                                struct lro_entry *queued;
4359
4360                                while ((!SLIST_EMPTY(&lro->lro_active))) {
4361                                        queued = SLIST_FIRST(&lro->lro_active);
4362                                        SLIST_REMOVE_HEAD(&lro->lro_active, \
4363                                                next);
4364                                        tcp_lro_flush(lro, queued);
4365                                }
4366#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4367                        }
4368                }
4369#endif /* #ifdef QLNX_SOFT_LRO */
4370
4371                ecore_sb_update_sb_idx(fp->sb_info);
4372                rmb();
4373                ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4374
4375#endif /* #ifdef QLNX_RCV_IN_TASKQ */
4376        }
4377
4378        return;
4379}
4380
4381
4382/*
4383 * slow path interrupt processing function
4384 * can be invoked in polled mode or in interrupt mode via taskqueue.
4385 */
4386void
4387qlnx_sp_isr(void *arg)
4388{
4389	struct ecore_hwfn	*p_hwfn;
4390	qlnx_host_t		*ha;
4391
4392	p_hwfn = arg;
4393
4394	ha = (qlnx_host_t *)p_hwfn->p_dev;
4395
4396	ha->sp_interrupts++;
4397
4398	QL_DPRINT2(ha, "enter\n");
4399
4400	ecore_int_sp_dpc(p_hwfn);
4401
4402	QL_DPRINT2(ha, "exit\n");
4403
4404	return;
4405}
4406
4407/*****************************************************************************
4408 * Support Functions for DMA'able Memory
4409 *****************************************************************************/
4410
4411static void
4412qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4413{
4414        *((bus_addr_t *)arg) = 0;
4415
4416        if (error) {
4417                printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4418                return;
4419        }
4420
4421        *((bus_addr_t *)arg) = segs[0].ds_addr;
4422
4423        return;
4424}
4425
4426static int
4427qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4428{
4429        int             ret = 0;
4430        device_t        dev;
4431        bus_addr_t      b_addr;
4432
4433        dev = ha->pci_dev;
4434
4435        ret = bus_dma_tag_create(
4436                        ha->parent_tag,/* parent */
4437                        dma_buf->alignment,
4438                        ((bus_size_t)(1ULL << 32)),/* boundary */
4439                        BUS_SPACE_MAXADDR,      /* lowaddr */
4440                        BUS_SPACE_MAXADDR,      /* highaddr */
4441                        NULL, NULL,             /* filter, filterarg */
4442                        dma_buf->size,          /* maxsize */
4443                        1,                      /* nsegments */
4444                        dma_buf->size,          /* maxsegsize */
4445                        0,                      /* flags */
4446                        NULL, NULL,             /* lockfunc, lockarg */
4447                        &dma_buf->dma_tag);
4448
4449        if (ret) {
4450                QL_DPRINT1(ha, "could not create dma tag\n");
4451                goto qlnx_alloc_dmabuf_exit;
4452        }
4453        ret = bus_dmamem_alloc(dma_buf->dma_tag,
4454                        (void **)&dma_buf->dma_b,
4455                        (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4456                        &dma_buf->dma_map);
4457        if (ret) {
4458                bus_dma_tag_destroy(dma_buf->dma_tag);
4459                QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4460                goto qlnx_alloc_dmabuf_exit;
4461        }
4462
4463        ret = bus_dmamap_load(dma_buf->dma_tag,
4464                        dma_buf->dma_map,
4465                        dma_buf->dma_b,
4466                        dma_buf->size,
4467                        qlnx_dmamap_callback,
4468                        &b_addr, BUS_DMA_NOWAIT);
4469
4470        if (ret || !b_addr) {
4471                bus_dma_tag_destroy(dma_buf->dma_tag);
4472                bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4473                        dma_buf->dma_map);
4474                ret = -1;
4475                goto qlnx_alloc_dmabuf_exit;
4476        }
4477
4478        dma_buf->dma_addr = b_addr;
4479
4480qlnx_alloc_dmabuf_exit:
4481
4482        return ret;
4483}
4484
4485static void
4486qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4487{
4488	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
4489        bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
4490        bus_dma_tag_destroy(dma_buf->dma_tag);
4491	return;
4492}
4493
4494void *
4495qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
4496{
4497	qlnx_dma_t	dma_buf;
4498	qlnx_dma_t	*dma_p;
4499	qlnx_host_t	*ha;
4500	device_t        dev;
4501
4502	ha = (qlnx_host_t *)ecore_dev;
4503	dev = ha->pci_dev;
4504
4505	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4506
4507	memset(&dma_buf, 0, sizeof (qlnx_dma_t));
4508
4509	dma_buf.size = size + PAGE_SIZE;
4510	dma_buf.alignment = 8;
4511
4512	if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
4513		return (NULL);
4514	bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
4515
4516	*phys = dma_buf.dma_addr;
4517
4518	dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
4519
4520	memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
4521/*
4522	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4523		(void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
4524		dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
4525*/
4526	return (dma_buf.dma_b);
4527}
4528
4529void
4530qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
4531	uint32_t size)
4532{
4533	qlnx_dma_t dma_buf, *dma_p;
4534	qlnx_host_t	*ha;
4535	device_t        dev;
4536
4537	ha = (qlnx_host_t *)ecore_dev;
4538	dev = ha->pci_dev;
4539
4540	if (v_addr == NULL)
4541		return;
4542
4543	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4544
4545	dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
4546/*
4547	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4548		(void *)dma_p->dma_map, (void *)dma_p->dma_tag,
4549		dma_p->dma_b, (void *)dma_p->dma_addr, size);
4550*/
4551	dma_buf = *dma_p;
4552
4553	qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
4554	return;
4555}
4556
4557static int
4558qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
4559{
4560        int             ret;
4561        device_t        dev;
4562
4563        dev = ha->pci_dev;
4564
4565        /*
4566         * Allocate parent DMA Tag
4567         */
4568        ret = bus_dma_tag_create(
4569                        bus_get_dma_tag(dev),   /* parent */
4570                        1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
4571                        BUS_SPACE_MAXADDR,      /* lowaddr */
4572                        BUS_SPACE_MAXADDR,      /* highaddr */
4573                        NULL, NULL,             /* filter, filterarg */
4574                        BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
4575                        0,                      /* nsegments */
4576                        BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
4577                        0,                      /* flags */
4578                        NULL, NULL,             /* lockfunc, lockarg */
4579                        &ha->parent_tag);
4580
4581        if (ret) {
4582                QL_DPRINT1(ha, "could not create parent dma tag\n");
4583                return (-1);
4584        }
4585
4586        ha->flags.parent_tag = 1;
4587
4588        return (0);
4589}
4590
4591static void
4592qlnx_free_parent_dma_tag(qlnx_host_t *ha)
4593{
4594        if (ha->parent_tag != NULL) {
4595                bus_dma_tag_destroy(ha->parent_tag);
4596		ha->parent_tag = NULL;
4597        }
4598	return;
4599}
4600
4601static int
4602qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
4603{
4604        if (bus_dma_tag_create(NULL,    /* parent */
4605                1, 0,    /* alignment, bounds */
4606                BUS_SPACE_MAXADDR,       /* lowaddr */
4607                BUS_SPACE_MAXADDR,       /* highaddr */
4608                NULL, NULL,      /* filter, filterarg */
4609                QLNX_MAX_TSO_FRAME_SIZE,     /* maxsize */
4610                QLNX_MAX_SEGMENTS,        /* nsegments */
4611                (PAGE_SIZE * 4),        /* maxsegsize */
4612                BUS_DMA_ALLOCNOW,        /* flags */
4613                NULL,    /* lockfunc */
4614                NULL,    /* lockfuncarg */
4615                &ha->tx_tag)) {
4616
4617                QL_DPRINT1(ha, "tx_tag alloc failed\n");
4618                return (-1);
4619        }
4620
4621	return (0);
4622}
4623
4624static void
4625qlnx_free_tx_dma_tag(qlnx_host_t *ha)
4626{
4627        if (ha->tx_tag != NULL) {
4628                bus_dma_tag_destroy(ha->tx_tag);
4629		ha->tx_tag = NULL;
4630        }
4631	return;
4632}
4633
4634static int
4635qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
4636{
4637        if (bus_dma_tag_create(NULL,    /* parent */
4638                        1, 0,    /* alignment, bounds */
4639                        BUS_SPACE_MAXADDR,       /* lowaddr */
4640                        BUS_SPACE_MAXADDR,       /* highaddr */
4641                        NULL, NULL,      /* filter, filterarg */
4642                        MJUM9BYTES,     /* maxsize */
4643                        1,        /* nsegments */
4644                        MJUM9BYTES,        /* maxsegsize */
4645                        BUS_DMA_ALLOCNOW,        /* flags */
4646                        NULL,    /* lockfunc */
4647                        NULL,    /* lockfuncarg */
4648                        &ha->rx_tag)) {
4649
4650                QL_DPRINT1(ha, " rx_tag alloc failed\n");
4651
4652                return (-1);
4653        }
4654	return (0);
4655}
4656
4657static void
4658qlnx_free_rx_dma_tag(qlnx_host_t *ha)
4659{
4660        if (ha->rx_tag != NULL) {
4661                bus_dma_tag_destroy(ha->rx_tag);
4662		ha->rx_tag = NULL;
4663        }
4664	return;
4665}
4666
4667/*********************************
4668 * Exported functions
4669 *********************************/
4670uint32_t
4671qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
4672{
4673	uint32_t bar_size;
4674
4675	bar_id = bar_id * 2;
4676
4677	bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
4678				SYS_RES_MEMORY,
4679				PCIR_BAR(bar_id));
4680
4681	return (bar_size);
4682}
4683
4684uint32_t
4685qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
4686{
4687	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4688				pci_reg, 1);
4689	return 0;
4690}
4691
4692uint32_t
4693qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
4694	uint16_t *reg_value)
4695{
4696	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4697				pci_reg, 2);
4698	return 0;
4699}
4700
4701uint32_t
4702qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
4703	uint32_t *reg_value)
4704{
4705	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4706				pci_reg, 4);
4707	return 0;
4708}
4709
4710void
4711qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
4712{
4713	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4714		pci_reg, reg_value, 1);
4715	return;
4716}
4717
4718void
4719qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
4720	uint16_t reg_value)
4721{
4722	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4723		pci_reg, reg_value, 2);
4724	return;
4725}
4726
4727void
4728qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
4729	uint32_t reg_value)
4730{
4731	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4732		pci_reg, reg_value, 4);
4733	return;
4734}
4735
4736
4737int
4738qlnx_pci_find_capability(void *ecore_dev, int cap)
4739{
4740	int		reg;
4741	qlnx_host_t	*ha;
4742
4743	ha = ecore_dev;
4744
4745	if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, &reg) == 0)
4746		return reg;
4747	else {
4748		QL_DPRINT1(ha, "failed\n");
4749		return 0;
4750	}
4751}
4752
4753uint32_t
4754qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
4755{
4756	uint32_t		data32;
4757	struct ecore_dev	*cdev;
4758	struct ecore_hwfn	*p_hwfn;
4759
4760	p_hwfn = hwfn;
4761
4762	cdev = p_hwfn->p_dev;
4763
4764	reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4765			(uint8_t *)(cdev->regview)) + reg_addr;
4766
4767	data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr);
4768
4769	return (data32);
4770}
4771
4772void
4773qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
4774{
4775	struct ecore_dev	*cdev;
4776	struct ecore_hwfn	*p_hwfn;
4777
4778	p_hwfn = hwfn;
4779
4780	cdev = p_hwfn->p_dev;
4781
4782	reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4783			(uint8_t *)(cdev->regview)) + reg_addr;
4784
4785	bus_write_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
4786
4787	return;
4788}
4789
4790void
4791qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
4792{
4793	struct ecore_dev	*cdev;
4794	struct ecore_hwfn	*p_hwfn;
4795
4796	p_hwfn = hwfn;
4797
4798	cdev = p_hwfn->p_dev;
4799
4800	reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4801			(uint8_t *)(cdev->regview)) + reg_addr;
4802
4803	bus_write_2(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
4804
4805	return;
4806}
4807
4808void
4809qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
4810{
4811	struct ecore_dev	*cdev;
4812	struct ecore_hwfn	*p_hwfn;
4813
4814	p_hwfn = hwfn;
4815
4816	cdev = p_hwfn->p_dev;
4817
4818	reg_addr = (uint32_t)((uint8_t *)(p_hwfn->doorbells) -
4819			(uint8_t *)(cdev->doorbells)) + reg_addr;
4820
4821	bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, reg_addr, value);
4822
4823	return;
4824}
4825
4826uint32_t
4827qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
4828{
4829	uint32_t		data32;
4830	uint32_t		offset;
4831	struct ecore_dev	*cdev;
4832
4833	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
4834	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
4835
4836	data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
4837
4838	return (data32);
4839}
4840
4841void
4842qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
4843{
4844	uint32_t		offset;
4845	struct ecore_dev	*cdev;
4846
4847	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
4848	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
4849
4850	bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
4851
4852	return;
4853}
4854
4855void
4856qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
4857{
4858	uint32_t		offset;
4859	struct ecore_dev	*cdev;
4860
4861	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
4862	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
4863
4864	bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
4865	return;
4866}
4867
4868void *
4869qlnx_zalloc(uint32_t size)
4870{
4871	caddr_t	va;
4872
4873	va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
4874	bzero(va, size);
4875	return ((void *)va);
4876}
4877
4878void
4879qlnx_barrier(void *p_hwfn)
4880{
4881	qlnx_host_t	*ha;
4882
4883	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
4884	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_WRITE);
4885}
4886
4887void
4888qlnx_link_update(void *p_hwfn)
4889{
4890	qlnx_host_t	*ha;
4891	int		prev_link_state;
4892
4893	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
4894
4895	qlnx_fill_link(p_hwfn, &ha->if_link);
4896
4897	prev_link_state = ha->link_up;
4898	ha->link_up = ha->if_link.link_up;
4899
4900        if (prev_link_state !=  ha->link_up) {
4901                if (ha->link_up) {
4902                        if_link_state_change(ha->ifp, LINK_STATE_UP);
4903                } else {
4904                        if_link_state_change(ha->ifp, LINK_STATE_DOWN);
4905                }
4906        }
4907        return;
4908}
4909
4910void
4911qlnx_fill_link(struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link)
4912{
4913	struct ecore_mcp_link_params    link_params;
4914	struct ecore_mcp_link_state     link_state;
4915
4916	memset(if_link, 0, sizeof(*if_link));
4917	memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
4918	memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
4919
4920	/* Prepare source inputs */
4921	/* we only deal with physical functions */
4922	memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
4923		sizeof(link_params));
4924	memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
4925		sizeof(link_state));
4926
4927	ecore_mcp_get_media_type(hwfn->p_dev, &if_link->media_type);
4928
4929	/* Set the link parameters to pass to protocol driver */
4930	if (link_state.link_up) {
4931		if_link->link_up = true;
4932		if_link->speed = link_state.speed;
4933	}
4934
4935	if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
4936
4937	if (link_params.speed.autoneg)
4938		if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
4939
4940	if (link_params.pause.autoneg ||
4941		(link_params.pause.forced_rx && link_params.pause.forced_tx))
4942		if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
4943
4944	if (link_params.pause.autoneg || link_params.pause.forced_rx ||
4945		link_params.pause.forced_tx)
4946		if_link->supported_caps |= QLNX_LINK_CAP_Pause;
4947
4948	if (link_params.speed.advertised_speeds &
4949		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
4950		if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
4951                                           QLNX_LINK_CAP_1000baseT_Full;
4952
4953	if (link_params.speed.advertised_speeds &
4954		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
4955		if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
4956
4957	if (link_params.speed.advertised_speeds &
4958		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
4959		if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
4960
4961	if (link_params.speed.advertised_speeds &
4962		NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
4963		if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
4964
4965	if (link_params.speed.advertised_speeds &
4966		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
4967		if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
4968
4969	if (link_params.speed.advertised_speeds &
4970		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
4971		if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
4972
4973	if_link->advertised_caps = if_link->supported_caps;
4974
4975	if_link->autoneg = link_params.speed.autoneg;
4976	if_link->duplex = QLNX_LINK_DUPLEX;
4977
4978	/* Link partner capabilities */
4979
4980	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
4981		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
4982
4983	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
4984		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
4985
4986	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
4987		if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
4988
4989	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
4990		if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
4991
4992	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
4993		if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
4994
4995	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
4996		if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
4997
4998	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
4999		if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5000
5001	if (link_state.an_complete)
5002		if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5003
5004	if (link_state.partner_adv_pause)
5005		if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5006
5007	if ((link_state.partner_adv_pause ==
5008		ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5009		(link_state.partner_adv_pause ==
5010			ECORE_LINK_PARTNER_BOTH_PAUSE))
5011		if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5012
5013	return;
5014}
5015
5016static int
5017qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5018{
5019        int	rc, i;
5020
5021        for (i = 0; i < cdev->num_hwfns; i++) {
5022                struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5023                p_hwfn->pf_params = *func_params;
5024        }
5025
5026        rc = ecore_resc_alloc(cdev);
5027        if (rc)
5028                goto qlnx_nic_setup_exit;
5029
5030        ecore_resc_setup(cdev);
5031
5032qlnx_nic_setup_exit:
5033
5034        return rc;
5035}
5036
5037static int
5038qlnx_nic_start(struct ecore_dev *cdev)
5039{
5040        int				rc;
5041	struct ecore_hw_init_params	params;
5042
5043	bzero(&params, sizeof (struct ecore_hw_init_params));
5044
5045	params.p_tunn = NULL;
5046	params.b_hw_start = true;
5047	params.int_mode = cdev->int_mode;
5048	params.allow_npar_tx_switch = true;
5049	params.bin_fw_data = NULL;
5050
5051        rc = ecore_hw_init(cdev, &params);
5052        if (rc) {
5053                ecore_resc_free(cdev);
5054                return rc;
5055        }
5056
5057        return 0;
5058}
5059
5060static int
5061qlnx_slowpath_start(qlnx_host_t *ha)
5062{
5063	struct ecore_dev	*cdev;
5064	struct ecore_pf_params	pf_params;
5065	int			rc;
5066
5067	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5068	pf_params.eth_pf_params.num_cons  =
5069		(ha->num_rss) * (ha->num_tc + 1);
5070
5071	cdev = &ha->cdev;
5072
5073	rc = qlnx_nic_setup(cdev, &pf_params);
5074        if (rc)
5075                goto qlnx_slowpath_start_exit;
5076
5077        cdev->int_mode = ECORE_INT_MODE_MSIX;
5078        cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5079
5080#ifdef QLNX_MAX_COALESCE
5081	cdev->rx_coalesce_usecs = 255;
5082	cdev->tx_coalesce_usecs = 255;
5083#endif
5084
5085	rc = qlnx_nic_start(cdev);
5086
5087	ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5088	ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5089
5090qlnx_slowpath_start_exit:
5091
5092	return (rc);
5093}
5094
5095static int
5096qlnx_slowpath_stop(qlnx_host_t *ha)
5097{
5098	struct ecore_dev	*cdev;
5099	device_t		dev = ha->pci_dev;
5100	int			i;
5101
5102	cdev = &ha->cdev;
5103
5104	ecore_hw_stop(cdev);
5105
5106 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
5107
5108        	if (ha->sp_handle[i])
5109                	(void)bus_teardown_intr(dev, ha->sp_irq[i],
5110				ha->sp_handle[i]);
5111
5112		ha->sp_handle[i] = NULL;
5113
5114        	if (ha->sp_irq[i])
5115			(void) bus_release_resource(dev, SYS_RES_IRQ,
5116				ha->sp_irq_rid[i], ha->sp_irq[i]);
5117		ha->sp_irq[i] = NULL;
5118	}
5119
5120        ecore_resc_free(cdev);
5121
5122        return 0;
5123}
5124
5125static void
5126qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5127	char ver_str[VER_SIZE])
5128{
5129        int	i;
5130
5131        memcpy(cdev->name, name, NAME_SIZE);
5132
5133        for_each_hwfn(cdev, i) {
5134                snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5135        }
5136
5137        cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5138
5139	return ;
5140}
5141
5142void
5143qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5144{
5145	enum ecore_mcp_protocol_type	type;
5146	union ecore_mcp_protocol_stats	*stats;
5147	struct ecore_eth_stats		eth_stats;
5148	qlnx_host_t			*ha;
5149
5150	ha = cdev;
5151	stats = proto_stats;
5152	type = proto_type;
5153
5154        switch (type) {
5155
5156        case ECORE_MCP_LAN_STATS:
5157                ecore_get_vport_stats((struct ecore_dev *)cdev, &eth_stats);
5158                stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5159                stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5160                stats->lan_stats.fcs_err = -1;
5161                break;
5162
5163	default:
5164		ha->err_get_proto_invalid_type++;
5165
5166		QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5167		break;
5168	}
5169	return;
5170}
5171
5172static int
5173qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5174{
5175	struct ecore_hwfn	*p_hwfn;
5176	struct ecore_ptt	*p_ptt;
5177
5178	p_hwfn = &ha->cdev.hwfns[0];
5179	p_ptt = ecore_ptt_acquire(p_hwfn);
5180
5181	if (p_ptt ==  NULL) {
5182                QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5183                return (-1);
5184	}
5185	ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5186
5187	ecore_ptt_release(p_hwfn, p_ptt);
5188
5189	return (0);
5190}
5191
5192static int
5193qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5194{
5195	struct ecore_hwfn	*p_hwfn;
5196	struct ecore_ptt	*p_ptt;
5197
5198	p_hwfn = &ha->cdev.hwfns[0];
5199	p_ptt = ecore_ptt_acquire(p_hwfn);
5200
5201	if (p_ptt ==  NULL) {
5202                QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5203                return (-1);
5204	}
5205	ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5206
5207	ecore_ptt_release(p_hwfn, p_ptt);
5208
5209	return (0);
5210}
5211
5212static int
5213qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5214{
5215	struct ecore_dev	*cdev;
5216
5217	cdev = &ha->cdev;
5218
5219	bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5220	bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5221	bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5222
5223        return 0;
5224}
5225
5226static void
5227qlnx_init_fp(qlnx_host_t *ha)
5228{
5229	int rss_id, txq_array_index, tc;
5230
5231	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5232
5233		struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5234
5235		fp->rss_id = rss_id;
5236		fp->edev = ha;
5237		fp->sb_info = &ha->sb_array[rss_id];
5238		fp->rxq = &ha->rxq_array[rss_id];
5239		fp->rxq->rxq_id = rss_id;
5240
5241		for (tc = 0; tc < ha->num_tc; tc++) {
5242                        txq_array_index = tc * ha->num_rss + rss_id;
5243                        fp->txq[tc] = &ha->txq_array[txq_array_index];
5244                        fp->txq[tc]->index = txq_array_index;
5245		}
5246
5247		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5248			rss_id);
5249
5250		fp->tx_ring_full = 0;
5251
5252		/* reset all the statistics counters */
5253
5254		fp->tx_pkts_processed = 0;
5255		fp->tx_pkts_freed = 0;
5256		fp->tx_pkts_transmitted = 0;
5257		fp->tx_pkts_completed = 0;
5258		fp->tx_lso_wnd_min_len = 0;
5259		fp->tx_defrag = 0;
5260		fp->tx_nsegs_gt_elem_left = 0;
5261		fp->tx_tso_max_nsegs = 0;
5262		fp->tx_tso_min_nsegs = 0;
5263		fp->err_tx_nsegs_gt_elem_left = 0;
5264		fp->err_tx_dmamap_create = 0;
5265		fp->err_tx_defrag_dmamap_load = 0;
5266		fp->err_tx_non_tso_max_seg = 0;
5267		fp->err_tx_dmamap_load = 0;
5268		fp->err_tx_defrag = 0;
5269		fp->err_tx_free_pkt_null = 0;
5270		fp->err_tx_cons_idx_conflict = 0;
5271
5272		fp->rx_pkts = 0;
5273		fp->err_m_getcl = 0;
5274		fp->err_m_getjcl = 0;
5275        }
5276	return;
5277}
5278
5279static void
5280qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5281{
5282	struct ecore_dev	*cdev;
5283
5284	cdev = &ha->cdev;
5285
5286        if (sb_info->sb_virt) {
5287                OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5288			(sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5289		sb_info->sb_virt = NULL;
5290	}
5291}
5292
5293static int
5294qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5295	void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5296{
5297        struct ecore_hwfn	*p_hwfn;
5298        int			hwfn_index, rc;
5299        u16			rel_sb_id;
5300
5301        hwfn_index = sb_id % cdev->num_hwfns;
5302        p_hwfn = &cdev->hwfns[hwfn_index];
5303        rel_sb_id = sb_id / cdev->num_hwfns;
5304
5305        QL_DPRINT2(((qlnx_host_t *)cdev),
5306                "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5307                sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5308                hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5309                sb_virt_addr, (void *)sb_phy_addr);
5310
5311        rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5312                             sb_virt_addr, sb_phy_addr, rel_sb_id);
5313
5314        return rc;
5315}
5316
5317/* This function allocates fast-path status block memory */
5318static int
5319qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5320{
5321        struct status_block_e4	*sb_virt;
5322        bus_addr_t		sb_phys;
5323        int			rc;
5324	uint32_t		size;
5325	struct ecore_dev	*cdev;
5326
5327	cdev = &ha->cdev;
5328
5329	size = sizeof(*sb_virt);
5330	sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5331
5332        if (!sb_virt) {
5333                QL_DPRINT1(ha, "Status block allocation failed\n");
5334                return -ENOMEM;
5335        }
5336
5337        rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5338        if (rc) {
5339                OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5340        }
5341
5342	return rc;
5343}
5344
5345static void
5346qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5347{
5348        int			i;
5349	struct sw_rx_data	*rx_buf;
5350
5351        for (i = 0; i < rxq->num_rx_buffers; i++) {
5352
5353                rx_buf = &rxq->sw_rx_ring[i];
5354
5355		if (rx_buf->data != NULL) {
5356			if (rx_buf->map != NULL) {
5357				bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5358				bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5359				rx_buf->map = NULL;
5360			}
5361			m_freem(rx_buf->data);
5362			rx_buf->data = NULL;
5363		}
5364        }
5365	return;
5366}
5367
5368static void
5369qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5370{
5371	struct ecore_dev	*cdev;
5372	int			i;
5373
5374	cdev = &ha->cdev;
5375
5376	qlnx_free_rx_buffers(ha, rxq);
5377
5378	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5379		qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5380		if (rxq->tpa_info[i].mpf != NULL)
5381			m_freem(rxq->tpa_info[i].mpf);
5382	}
5383
5384	bzero((void *)&rxq->sw_rx_ring[0],
5385		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
5386
5387        /* Free the real RQ ring used by FW */
5388	if (rxq->rx_bd_ring.p_virt_addr) {
5389                ecore_chain_free(cdev, &rxq->rx_bd_ring);
5390                rxq->rx_bd_ring.p_virt_addr = NULL;
5391        }
5392
5393        /* Free the real completion ring used by FW */
5394        if (rxq->rx_comp_ring.p_virt_addr &&
5395                        rxq->rx_comp_ring.pbl_sp.p_virt_table) {
5396                ecore_chain_free(cdev, &rxq->rx_comp_ring);
5397                rxq->rx_comp_ring.p_virt_addr = NULL;
5398                rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
5399        }
5400
5401#ifdef QLNX_SOFT_LRO
5402	{
5403		struct lro_ctrl *lro;
5404
5405		lro = &rxq->lro;
5406		tcp_lro_free(lro);
5407	}
5408#endif /* #ifdef QLNX_SOFT_LRO */
5409
5410	return;
5411}
5412
5413static int
5414qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5415{
5416        register struct mbuf	*mp;
5417        uint16_t		rx_buf_size;
5418        struct sw_rx_data	*sw_rx_data;
5419        struct eth_rx_bd	*rx_bd;
5420        dma_addr_t		dma_addr;
5421	bus_dmamap_t		map;
5422	bus_dma_segment_t       segs[1];
5423	int			nsegs;
5424	int			ret;
5425	struct ecore_dev	*cdev;
5426
5427	cdev = &ha->cdev;
5428
5429        rx_buf_size = rxq->rx_buf_size;
5430
5431	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5432
5433        if (mp == NULL) {
5434                QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5435                return -ENOMEM;
5436        }
5437
5438	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5439
5440	map = (bus_dmamap_t)0;
5441
5442	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5443			BUS_DMA_NOWAIT);
5444	dma_addr = segs[0].ds_addr;
5445
5446	if (ret || !dma_addr || (nsegs != 1)) {
5447		m_freem(mp);
5448		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5449                           ret, (long long unsigned int)dma_addr, nsegs);
5450		return -ENOMEM;
5451	}
5452
5453        sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
5454        sw_rx_data->data = mp;
5455        sw_rx_data->dma_addr = dma_addr;
5456        sw_rx_data->map = map;
5457
5458        /* Advance PROD and get BD pointer */
5459        rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
5460        rx_bd->addr.hi = htole32(U64_HI(dma_addr));
5461        rx_bd->addr.lo = htole32(U64_LO(dma_addr));
5462	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5463
5464        rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
5465
5466        return 0;
5467}
5468
5469static int
5470qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
5471	struct qlnx_agg_info *tpa)
5472{
5473	struct mbuf		*mp;
5474        dma_addr_t		dma_addr;
5475	bus_dmamap_t		map;
5476	bus_dma_segment_t       segs[1];
5477	int			nsegs;
5478	int			ret;
5479        struct sw_rx_data	*rx_buf;
5480
5481	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5482
5483        if (mp == NULL) {
5484                QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5485                return -ENOMEM;
5486        }
5487
5488	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5489
5490	map = (bus_dmamap_t)0;
5491
5492	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5493			BUS_DMA_NOWAIT);
5494	dma_addr = segs[0].ds_addr;
5495
5496	if (ret || !dma_addr || (nsegs != 1)) {
5497		m_freem(mp);
5498		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5499			ret, (long long unsigned int)dma_addr, nsegs);
5500		return -ENOMEM;
5501	}
5502
5503        rx_buf = &tpa->rx_buf;
5504
5505	memset(rx_buf, 0, sizeof (struct sw_rx_data));
5506
5507        rx_buf->data = mp;
5508        rx_buf->dma_addr = dma_addr;
5509        rx_buf->map = map;
5510
5511	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5512
5513	return (0);
5514}
5515
5516static void
5517qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
5518{
5519        struct sw_rx_data	*rx_buf;
5520
5521	rx_buf = &tpa->rx_buf;
5522
5523	if (rx_buf->data != NULL) {
5524		if (rx_buf->map != NULL) {
5525			bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5526			bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5527			rx_buf->map = NULL;
5528		}
5529		m_freem(rx_buf->data);
5530		rx_buf->data = NULL;
5531	}
5532	return;
5533}
5534
5535/* This function allocates all memory needed per Rx queue */
5536static int
5537qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5538{
5539        int			i, rc, num_allocated;
5540	struct ifnet		*ifp;
5541	struct ecore_dev	 *cdev;
5542
5543	cdev = &ha->cdev;
5544	ifp = ha->ifp;
5545
5546        rxq->num_rx_buffers = RX_RING_SIZE;
5547
5548	rxq->rx_buf_size = ha->rx_buf_size;
5549
5550        /* Allocate the parallel driver ring for Rx buffers */
5551	bzero((void *)&rxq->sw_rx_ring[0],
5552		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
5553
5554        /* Allocate FW Rx ring  */
5555
5556        rc = ecore_chain_alloc(cdev,
5557			ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
5558			ECORE_CHAIN_MODE_NEXT_PTR,
5559			ECORE_CHAIN_CNT_TYPE_U16,
5560			RX_RING_SIZE,
5561			sizeof(struct eth_rx_bd),
5562			&rxq->rx_bd_ring, NULL);
5563
5564        if (rc)
5565                goto err;
5566
5567        /* Allocate FW completion ring */
5568        rc = ecore_chain_alloc(cdev,
5569                        ECORE_CHAIN_USE_TO_CONSUME,
5570                        ECORE_CHAIN_MODE_PBL,
5571			ECORE_CHAIN_CNT_TYPE_U16,
5572                        RX_RING_SIZE,
5573                        sizeof(union eth_rx_cqe),
5574                        &rxq->rx_comp_ring, NULL);
5575
5576        if (rc)
5577                goto err;
5578
5579        /* Allocate buffers for the Rx ring */
5580
5581	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5582		rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
5583			&rxq->tpa_info[i]);
5584                if (rc)
5585                        break;
5586
5587	}
5588
5589        for (i = 0; i < rxq->num_rx_buffers; i++) {
5590                rc = qlnx_alloc_rx_buffer(ha, rxq);
5591                if (rc)
5592                        break;
5593        }
5594        num_allocated = i;
5595        if (!num_allocated) {
5596		QL_DPRINT1(ha, "Rx buffers allocation failed\n");
5597                goto err;
5598        } else if (num_allocated < rxq->num_rx_buffers) {
5599		QL_DPRINT1(ha, "Allocated less buffers than"
5600			" desired (%d allocated)\n", num_allocated);
5601        }
5602
5603#ifdef QLNX_SOFT_LRO
5604
5605	{
5606		struct lro_ctrl *lro;
5607
5608		lro = &rxq->lro;
5609
5610#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
5611		if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
5612			QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5613				   rxq->rxq_id);
5614			goto err;
5615		}
5616#else
5617		if (tcp_lro_init(lro)) {
5618			QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5619				   rxq->rxq_id);
5620			goto err;
5621		}
5622#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
5623
5624		lro->ifp = ha->ifp;
5625	}
5626#endif /* #ifdef QLNX_SOFT_LRO */
5627        return 0;
5628
5629err:
5630        qlnx_free_mem_rxq(ha, rxq);
5631        return -ENOMEM;
5632}
5633
5634
5635static void
5636qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
5637	struct qlnx_tx_queue *txq)
5638{
5639	struct ecore_dev	*cdev;
5640
5641	cdev = &ha->cdev;
5642
5643	bzero((void *)&txq->sw_tx_ring[0],
5644		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
5645
5646        /* Free the real RQ ring used by FW */
5647        if (txq->tx_pbl.p_virt_addr) {
5648                ecore_chain_free(cdev, &txq->tx_pbl);
5649                txq->tx_pbl.p_virt_addr = NULL;
5650        }
5651	return;
5652}
5653
5654/* This function allocates all memory needed per Tx queue */
5655static int
5656qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
5657	struct qlnx_tx_queue *txq)
5658{
5659        int			ret = ECORE_SUCCESS;
5660        union eth_tx_bd_types	*p_virt;
5661	struct ecore_dev	*cdev;
5662
5663	cdev = &ha->cdev;
5664
5665	bzero((void *)&txq->sw_tx_ring[0],
5666		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
5667
5668        /* Allocate the real Tx ring to be used by FW */
5669        ret = ecore_chain_alloc(cdev,
5670                        ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
5671                        ECORE_CHAIN_MODE_PBL,
5672			ECORE_CHAIN_CNT_TYPE_U16,
5673                        TX_RING_SIZE,
5674                        sizeof(*p_virt),
5675                        &txq->tx_pbl, NULL);
5676
5677        if (ret != ECORE_SUCCESS) {
5678                goto err;
5679        }
5680
5681	txq->num_tx_buffers = TX_RING_SIZE;
5682
5683        return 0;
5684
5685err:
5686        qlnx_free_mem_txq(ha, fp, txq);
5687        return -ENOMEM;
5688}
5689
5690static void
5691qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5692{
5693	struct mbuf	*mp;
5694	struct ifnet	*ifp = ha->ifp;
5695
5696	if (mtx_initialized(&fp->tx_mtx)) {
5697
5698		if (fp->tx_br != NULL) {
5699
5700			mtx_lock(&fp->tx_mtx);
5701
5702			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
5703				fp->tx_pkts_freed++;
5704				m_freem(mp);
5705			}
5706
5707			mtx_unlock(&fp->tx_mtx);
5708
5709			buf_ring_free(fp->tx_br, M_DEVBUF);
5710			fp->tx_br = NULL;
5711		}
5712		mtx_destroy(&fp->tx_mtx);
5713	}
5714	return;
5715}
5716
5717static void
5718qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5719{
5720        int	tc;
5721
5722        qlnx_free_mem_sb(ha, fp->sb_info);
5723
5724        qlnx_free_mem_rxq(ha, fp->rxq);
5725
5726        for (tc = 0; tc < ha->num_tc; tc++)
5727                qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
5728
5729	return;
5730}
5731
5732static int
5733qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5734{
5735	snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
5736		"qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
5737
5738	mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
5739
5740        fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
5741                                   M_NOWAIT, &fp->tx_mtx);
5742        if (fp->tx_br == NULL) {
5743		QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
5744			ha->dev_unit, fp->rss_id);
5745		return -ENOMEM;
5746        }
5747	return 0;
5748}
5749
5750static int
5751qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5752{
5753        int	rc, tc;
5754
5755        rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
5756        if (rc)
5757                goto err;
5758
5759	if (ha->rx_jumbo_buf_eq_mtu) {
5760		if (ha->max_frame_size <= MCLBYTES)
5761			ha->rx_buf_size = MCLBYTES;
5762		else if (ha->max_frame_size <= MJUMPAGESIZE)
5763			ha->rx_buf_size = MJUMPAGESIZE;
5764		else if (ha->max_frame_size <= MJUM9BYTES)
5765			ha->rx_buf_size = MJUM9BYTES;
5766		else if (ha->max_frame_size <= MJUM16BYTES)
5767			ha->rx_buf_size = MJUM16BYTES;
5768	} else {
5769		if (ha->max_frame_size <= MCLBYTES)
5770			ha->rx_buf_size = MCLBYTES;
5771		else
5772			ha->rx_buf_size = MJUMPAGESIZE;
5773	}
5774
5775        rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
5776        if (rc)
5777                goto err;
5778
5779        for (tc = 0; tc < ha->num_tc; tc++) {
5780                rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
5781                if (rc)
5782                        goto err;
5783        }
5784
5785        return 0;
5786
5787err:
5788        qlnx_free_mem_fp(ha, fp);
5789        return -ENOMEM;
5790}
5791
5792static void
5793qlnx_free_mem_load(qlnx_host_t *ha)
5794{
5795        int			i;
5796	struct ecore_dev	*cdev;
5797
5798	cdev = &ha->cdev;
5799
5800        for (i = 0; i < ha->num_rss; i++) {
5801                struct qlnx_fastpath *fp = &ha->fp_array[i];
5802
5803                qlnx_free_mem_fp(ha, fp);
5804        }
5805	return;
5806}
5807
5808static int
5809qlnx_alloc_mem_load(qlnx_host_t *ha)
5810{
5811        int	rc = 0, rss_id;
5812
5813        for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5814                struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5815
5816                rc = qlnx_alloc_mem_fp(ha, fp);
5817                if (rc)
5818                        break;
5819        }
5820	return (rc);
5821}
5822
5823static int
5824qlnx_start_vport(struct ecore_dev *cdev,
5825                u8 vport_id,
5826                u16 mtu,
5827                u8 drop_ttl0_flg,
5828                u8 inner_vlan_removal_en_flg,
5829		u8 tx_switching,
5830		u8 hw_lro_enable)
5831{
5832        int					rc, i;
5833	struct ecore_sp_vport_start_params	vport_start_params = { 0 };
5834	qlnx_host_t				*ha;
5835
5836	ha = (qlnx_host_t *)cdev;
5837
5838	vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
5839	vport_start_params.tx_switching = 0;
5840	vport_start_params.handle_ptp_pkts = 0;
5841	vport_start_params.only_untagged = 0;
5842	vport_start_params.drop_ttl0 = drop_ttl0_flg;
5843
5844	vport_start_params.tpa_mode =
5845		(hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
5846	vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
5847
5848	vport_start_params.vport_id = vport_id;
5849	vport_start_params.mtu = mtu;
5850
5851
5852	QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
5853
5854        for_each_hwfn(cdev, i) {
5855                struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5856
5857		vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
5858		vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5859
5860                rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
5861
5862                if (rc) {
5863			QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
5864				" with MTU %d\n" , vport_id, mtu);
5865                        return -ENOMEM;
5866                }
5867
5868                ecore_hw_start_fastpath(p_hwfn);
5869
5870		QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
5871			vport_id, mtu);
5872        }
5873        return 0;
5874}
5875
5876
5877static int
5878qlnx_update_vport(struct ecore_dev *cdev,
5879	struct qlnx_update_vport_params *params)
5880{
5881        struct ecore_sp_vport_update_params	sp_params;
5882        int					rc, i, j, fp_index;
5883	struct ecore_hwfn			*p_hwfn;
5884        struct ecore_rss_params			*rss;
5885	qlnx_host_t				*ha = (qlnx_host_t *)cdev;
5886        struct qlnx_fastpath			*fp;
5887
5888        memset(&sp_params, 0, sizeof(sp_params));
5889        /* Translate protocol params into sp params */
5890        sp_params.vport_id = params->vport_id;
5891
5892        sp_params.update_vport_active_rx_flg =
5893		params->update_vport_active_rx_flg;
5894        sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
5895
5896        sp_params.update_vport_active_tx_flg =
5897		params->update_vport_active_tx_flg;
5898        sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
5899
5900        sp_params.update_inner_vlan_removal_flg =
5901                params->update_inner_vlan_removal_flg;
5902        sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
5903
5904	sp_params.sge_tpa_params = params->sge_tpa_params;
5905
5906        /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
5907         * We need to re-fix the rss values per engine for CMT.
5908         */
5909	if (params->rss_params->update_rss_config)
5910        sp_params.rss_params = params->rss_params;
5911	else
5912		sp_params.rss_params =  NULL;
5913
5914        for_each_hwfn(cdev, i) {
5915
5916		p_hwfn = &cdev->hwfns[i];
5917
5918		if ((cdev->num_hwfns > 1) &&
5919			params->rss_params->update_rss_config &&
5920			params->rss_params->rss_enable) {
5921
5922			rss = params->rss_params;
5923
5924			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
5925
5926				fp_index = ((cdev->num_hwfns * j) + i) %
5927						ha->num_rss;
5928
5929                		fp = &ha->fp_array[fp_index];
5930                        	rss->rss_ind_table[j] = fp->rxq->handle;
5931			}
5932
5933			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
5934				QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
5935					rss->rss_ind_table[j],
5936					rss->rss_ind_table[j+1],
5937					rss->rss_ind_table[j+2],
5938					rss->rss_ind_table[j+3],
5939					rss->rss_ind_table[j+4],
5940					rss->rss_ind_table[j+5],
5941					rss->rss_ind_table[j+6],
5942					rss->rss_ind_table[j+7]);
5943					j += 8;
5944			}
5945		}
5946
5947                sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5948
5949		QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
5950
5951                rc = ecore_sp_vport_update(p_hwfn, &sp_params,
5952                                           ECORE_SPQ_MODE_EBLOCK, NULL);
5953                if (rc) {
5954			QL_DPRINT1(ha, "Failed to update VPORT\n");
5955                        return rc;
5956                }
5957
5958                QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
5959			rx_active_flag %d [tx_update %d], [rx_update %d]\n",
5960			params->vport_id, params->vport_active_tx_flg,
5961			params->vport_active_rx_flg,
5962			params->update_vport_active_tx_flg,
5963			params->update_vport_active_rx_flg);
5964        }
5965
5966        return 0;
5967}
5968
5969static void
5970qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
5971{
5972        struct eth_rx_bd	*rx_bd_cons =
5973					ecore_chain_consume(&rxq->rx_bd_ring);
5974        struct eth_rx_bd	*rx_bd_prod =
5975					ecore_chain_produce(&rxq->rx_bd_ring);
5976        struct sw_rx_data	*sw_rx_data_cons =
5977					&rxq->sw_rx_ring[rxq->sw_rx_cons];
5978        struct sw_rx_data	*sw_rx_data_prod =
5979					&rxq->sw_rx_ring[rxq->sw_rx_prod];
5980
5981        sw_rx_data_prod->data = sw_rx_data_cons->data;
5982        memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
5983
5984        rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
5985        rxq->sw_rx_prod  = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
5986
5987	return;
5988}
5989
5990static void
5991qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
5992{
5993
5994        uint16_t	 	bd_prod;
5995        uint16_t		cqe_prod;
5996	union {
5997		struct eth_rx_prod_data rx_prod_data;
5998		uint32_t		data32;
5999	} rx_prods;
6000
6001        bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6002        cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6003
6004        /* Update producers */
6005        rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6006        rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6007
6008        /* Make sure that the BD and SGE data is updated before updating the
6009         * producers since FW might read the BD/SGE right after the producer
6010         * is updated.
6011         */
6012	wmb();
6013
6014        internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6015		sizeof(rx_prods), &rx_prods.data32);
6016
6017        /* mmiowb is needed to synchronize doorbell writes from more than one
6018         * processor. It guarantees that the write arrives to the device before
6019         * the napi lock is released and another qlnx_poll is called (possibly
6020         * on another CPU). Without this barrier, the next doorbell can bypass
6021         * this doorbell. This is applicable to IA64/Altix systems.
6022         */
6023        wmb();
6024
6025	return;
6026}
6027
6028static uint32_t qlnx_hash_key[] = {
6029                ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6030                ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6031                ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6032                ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6033                ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6034                ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6035                ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6036                ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6037                ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6038                ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6039
6040static int
6041qlnx_start_queues(qlnx_host_t *ha)
6042{
6043        int				rc, tc, i, vport_id = 0,
6044					drop_ttl0_flg = 1, vlan_removal_en = 1,
6045					tx_switching = 0, hw_lro_enable = 0;
6046        struct ecore_dev		*cdev = &ha->cdev;
6047        struct ecore_rss_params		*rss_params = &ha->rss_params;
6048        struct qlnx_update_vport_params	vport_update_params;
6049        struct ifnet			*ifp;
6050        struct ecore_hwfn		*p_hwfn;
6051	struct ecore_sge_tpa_params	tpa_params;
6052	struct ecore_queue_start_common_params qparams;
6053        struct qlnx_fastpath		*fp;
6054
6055	ifp = ha->ifp;
6056
6057	QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6058
6059        if (!ha->num_rss) {
6060		QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6061			" are no Rx queues\n");
6062                return -EINVAL;
6063        }
6064
6065#ifndef QLNX_SOFT_LRO
6066        hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
6067#endif /* #ifndef QLNX_SOFT_LRO */
6068
6069        rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
6070			vlan_removal_en, tx_switching, hw_lro_enable);
6071
6072        if (rc) {
6073                QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6074                return rc;
6075        }
6076
6077	QL_DPRINT2(ha, "Start vport ramrod passed, "
6078		"vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6079		vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en);
6080
6081        for_each_rss(i) {
6082		struct ecore_rxq_start_ret_params rx_ret_params;
6083		struct ecore_txq_start_ret_params tx_ret_params;
6084
6085                fp = &ha->fp_array[i];
6086        	p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6087
6088		bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6089		bzero(&rx_ret_params,
6090			sizeof (struct ecore_rxq_start_ret_params));
6091
6092		qparams.queue_id = i ;
6093		qparams.vport_id = vport_id;
6094		qparams.stats_id = vport_id;
6095		qparams.p_sb = fp->sb_info;
6096		qparams.sb_idx = RX_PI;
6097
6098
6099		rc = ecore_eth_rx_queue_start(p_hwfn,
6100			p_hwfn->hw_info.opaque_fid,
6101			&qparams,
6102			fp->rxq->rx_buf_size,	/* bd_max_bytes */
6103			/* bd_chain_phys_addr */
6104			fp->rxq->rx_bd_ring.p_phys_addr,
6105			/* cqe_pbl_addr */
6106			ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6107			/* cqe_pbl_size */
6108			ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6109			&rx_ret_params);
6110
6111                if (rc) {
6112                	QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6113                        return rc;
6114                }
6115
6116		fp->rxq->hw_rxq_prod_addr	= rx_ret_params.p_prod;
6117		fp->rxq->handle			= rx_ret_params.p_handle;
6118                fp->rxq->hw_cons_ptr		=
6119				&fp->sb_info->sb_virt->pi_array[RX_PI];
6120
6121                qlnx_update_rx_prod(p_hwfn, fp->rxq);
6122
6123                for (tc = 0; tc < ha->num_tc; tc++) {
6124                        struct qlnx_tx_queue *txq = fp->txq[tc];
6125
6126			bzero(&qparams,
6127				sizeof(struct ecore_queue_start_common_params));
6128			bzero(&tx_ret_params,
6129				sizeof (struct ecore_txq_start_ret_params));
6130
6131			qparams.queue_id = txq->index / cdev->num_hwfns ;
6132			qparams.vport_id = vport_id;
6133			qparams.stats_id = vport_id;
6134			qparams.p_sb = fp->sb_info;
6135			qparams.sb_idx = TX_PI(tc);
6136
6137			rc = ecore_eth_tx_queue_start(p_hwfn,
6138				p_hwfn->hw_info.opaque_fid,
6139				&qparams, tc,
6140				/* bd_chain_phys_addr */
6141				ecore_chain_get_pbl_phys(&txq->tx_pbl),
6142				ecore_chain_get_page_cnt(&txq->tx_pbl),
6143				&tx_ret_params);
6144
6145                        if (rc) {
6146                		QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6147					   txq->index, rc);
6148                                return rc;
6149                        }
6150
6151			txq->doorbell_addr = tx_ret_params.p_doorbell;
6152			txq->handle = tx_ret_params.p_handle;
6153
6154                        txq->hw_cons_ptr =
6155                                &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6156                        SET_FIELD(txq->tx_db.data.params,
6157                                  ETH_DB_DATA_DEST, DB_DEST_XCM);
6158                        SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6159                                  DB_AGG_CMD_SET);
6160                        SET_FIELD(txq->tx_db.data.params,
6161                                  ETH_DB_DATA_AGG_VAL_SEL,
6162                                  DQ_XCM_ETH_TX_BD_PROD_CMD);
6163
6164                        txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6165                }
6166        }
6167
6168        /* Fill struct with RSS params */
6169        if (ha->num_rss > 1) {
6170
6171                rss_params->update_rss_config = 1;
6172                rss_params->rss_enable = 1;
6173                rss_params->update_rss_capabilities = 1;
6174                rss_params->update_rss_ind_table = 1;
6175                rss_params->update_rss_key = 1;
6176                rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6177                                       ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6178                rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6179
6180                for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6181                	fp = &ha->fp_array[(i % ha->num_rss)];
6182                        rss_params->rss_ind_table[i] = fp->rxq->handle;
6183		}
6184
6185                for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6186			rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6187
6188        } else {
6189                memset(rss_params, 0, sizeof(*rss_params));
6190        }
6191
6192
6193        /* Prepare and send the vport enable */
6194        memset(&vport_update_params, 0, sizeof(vport_update_params));
6195        vport_update_params.vport_id = vport_id;
6196        vport_update_params.update_vport_active_tx_flg = 1;
6197        vport_update_params.vport_active_tx_flg = 1;
6198        vport_update_params.update_vport_active_rx_flg = 1;
6199        vport_update_params.vport_active_rx_flg = 1;
6200        vport_update_params.rss_params = rss_params;
6201        vport_update_params.update_inner_vlan_removal_flg = 1;
6202        vport_update_params.inner_vlan_removal_flg = 1;
6203
6204	if (hw_lro_enable) {
6205		memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6206
6207		tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6208
6209		tpa_params.update_tpa_en_flg = 1;
6210		tpa_params.tpa_ipv4_en_flg = 1;
6211		tpa_params.tpa_ipv6_en_flg = 1;
6212
6213		tpa_params.update_tpa_param_flg = 1;
6214		tpa_params.tpa_pkt_split_flg = 0;
6215		tpa_params.tpa_hdr_data_split_flg = 0;
6216		tpa_params.tpa_gro_consistent_flg = 0;
6217		tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6218		tpa_params.tpa_max_size = (uint16_t)(-1);
6219		tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
6220		tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
6221
6222		vport_update_params.sge_tpa_params = &tpa_params;
6223	}
6224
6225        rc = qlnx_update_vport(cdev, &vport_update_params);
6226        if (rc) {
6227		QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6228                return rc;
6229        }
6230
6231        return 0;
6232}
6233
6234static int
6235qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6236	struct qlnx_tx_queue *txq)
6237{
6238	uint16_t	hw_bd_cons;
6239	uint16_t	ecore_cons_idx;
6240
6241	QL_DPRINT2(ha, "enter\n");
6242
6243	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6244
6245	while (hw_bd_cons !=
6246		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6247
6248		mtx_lock(&fp->tx_mtx);
6249
6250		(void)qlnx_tx_int(ha, fp, txq);
6251
6252		mtx_unlock(&fp->tx_mtx);
6253
6254		qlnx_mdelay(__func__, 2);
6255
6256		hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6257	}
6258
6259	QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6260
6261        return 0;
6262}
6263
6264static int
6265qlnx_stop_queues(qlnx_host_t *ha)
6266{
6267        struct qlnx_update_vport_params	vport_update_params;
6268        struct ecore_dev		*cdev;
6269        struct qlnx_fastpath		*fp;
6270        int				rc, tc, i;
6271
6272        cdev = &ha->cdev;
6273
6274        /* Disable the vport */
6275
6276        memset(&vport_update_params, 0, sizeof(vport_update_params));
6277
6278        vport_update_params.vport_id = 0;
6279        vport_update_params.update_vport_active_tx_flg = 1;
6280        vport_update_params.vport_active_tx_flg = 0;
6281        vport_update_params.update_vport_active_rx_flg = 1;
6282        vport_update_params.vport_active_rx_flg = 0;
6283        vport_update_params.rss_params = &ha->rss_params;
6284        vport_update_params.rss_params->update_rss_config = 0;
6285        vport_update_params.rss_params->rss_enable = 0;
6286        vport_update_params.update_inner_vlan_removal_flg = 0;
6287        vport_update_params.inner_vlan_removal_flg = 0;
6288
6289	QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6290
6291        rc = qlnx_update_vport(cdev, &vport_update_params);
6292        if (rc) {
6293		QL_DPRINT1(ha, "Failed to update vport\n");
6294                return rc;
6295        }
6296
6297        /* Flush Tx queues. If needed, request drain from MCP */
6298        for_each_rss(i) {
6299                fp = &ha->fp_array[i];
6300
6301                for (tc = 0; tc < ha->num_tc; tc++) {
6302                        struct qlnx_tx_queue *txq = fp->txq[tc];
6303
6304                        rc = qlnx_drain_txq(ha, fp, txq);
6305                        if (rc)
6306                                return rc;
6307                }
6308        }
6309
6310        /* Stop all Queues in reverse order*/
6311        for (i = ha->num_rss - 1; i >= 0; i--) {
6312
6313		struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6314
6315                fp = &ha->fp_array[i];
6316
6317                /* Stop the Tx Queue(s)*/
6318                for (tc = 0; tc < ha->num_tc; tc++) {
6319			int tx_queue_id;
6320
6321			tx_queue_id = tc * ha->num_rss + i;
6322			rc = ecore_eth_tx_queue_stop(p_hwfn,
6323					fp->txq[tc]->handle);
6324
6325                        if (rc) {
6326				QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6327					   tx_queue_id);
6328                                return rc;
6329                        }
6330                }
6331
6332                /* Stop the Rx Queue*/
6333		rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6334				false);
6335                if (rc) {
6336                        QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6337                        return rc;
6338                }
6339        }
6340
6341        /* Stop the vport */
6342	for_each_hwfn(cdev, i) {
6343
6344		struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6345
6346		rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6347
6348		if (rc) {
6349                        QL_DPRINT1(ha, "Failed to stop VPORT\n");
6350			return rc;
6351		}
6352	}
6353
6354        return rc;
6355}
6356
6357static int
6358qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6359	enum ecore_filter_opcode opcode,
6360	unsigned char mac[ETH_ALEN])
6361{
6362	struct ecore_filter_ucast	ucast;
6363	struct ecore_dev		*cdev;
6364	int				rc;
6365
6366	cdev = &ha->cdev;
6367
6368	bzero(&ucast, sizeof(struct ecore_filter_ucast));
6369
6370        ucast.opcode = opcode;
6371        ucast.type = ECORE_FILTER_MAC;
6372        ucast.is_rx_filter = 1;
6373        ucast.vport_to_add_to = 0;
6374        memcpy(&ucast.mac[0], mac, ETH_ALEN);
6375
6376	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6377
6378        return (rc);
6379}
6380
6381static int
6382qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6383{
6384	struct ecore_filter_ucast	ucast;
6385	struct ecore_dev		*cdev;
6386	int				rc;
6387
6388	bzero(&ucast, sizeof(struct ecore_filter_ucast));
6389
6390	ucast.opcode = ECORE_FILTER_REPLACE;
6391	ucast.type = ECORE_FILTER_MAC;
6392	ucast.is_rx_filter = 1;
6393
6394	cdev = &ha->cdev;
6395
6396	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6397
6398	return (rc);
6399}
6400
6401static int
6402qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6403{
6404	struct ecore_filter_mcast	*mcast;
6405	struct ecore_dev		*cdev;
6406	int				rc, i;
6407
6408	cdev = &ha->cdev;
6409
6410	mcast = &ha->ecore_mcast;
6411	bzero(mcast, sizeof(struct ecore_filter_mcast));
6412
6413	mcast->opcode = ECORE_FILTER_REMOVE;
6414
6415	for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
6416
6417		if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6418			ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6419			ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6420
6421			memcpy(&mcast->mac[i], &ha->mcast[i].addr[0], ETH_ALEN);
6422			mcast->num_mc_addrs++;
6423		}
6424	}
6425	mcast = &ha->ecore_mcast;
6426
6427	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
6428
6429	bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
6430	ha->nmcast = 0;
6431
6432	return (rc);
6433}
6434
6435static int
6436qlnx_clean_filters(qlnx_host_t *ha)
6437{
6438        int	rc = 0;
6439
6440	/* Remove all unicast macs */
6441	rc = qlnx_remove_all_ucast_mac(ha);
6442	if (rc)
6443		return rc;
6444
6445	/* Remove all multicast macs */
6446	rc = qlnx_remove_all_mcast_mac(ha);
6447	if (rc)
6448		return rc;
6449
6450        rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
6451
6452        return (rc);
6453}
6454
6455static int
6456qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
6457{
6458	struct ecore_filter_accept_flags	accept;
6459	int					rc = 0;
6460	struct ecore_dev			*cdev;
6461
6462	cdev = &ha->cdev;
6463
6464	bzero(&accept, sizeof(struct ecore_filter_accept_flags));
6465
6466	accept.update_rx_mode_config = 1;
6467	accept.rx_accept_filter = filter;
6468
6469	accept.update_tx_mode_config = 1;
6470	accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
6471		ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
6472
6473	rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
6474			ECORE_SPQ_MODE_CB, NULL);
6475
6476	return (rc);
6477}
6478
6479static int
6480qlnx_set_rx_mode(qlnx_host_t *ha)
6481{
6482	int	rc = 0;
6483	uint8_t	filter;
6484
6485	rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
6486        if (rc)
6487                return rc;
6488
6489	rc = qlnx_remove_all_mcast_mac(ha);
6490        if (rc)
6491                return rc;
6492
6493	filter = ECORE_ACCEPT_UCAST_MATCHED |
6494			ECORE_ACCEPT_MCAST_MATCHED |
6495			ECORE_ACCEPT_BCAST;
6496	ha->filter = filter;
6497
6498	rc = qlnx_set_rx_accept_filter(ha, filter);
6499
6500	return (rc);
6501}
6502
6503static int
6504qlnx_set_link(qlnx_host_t *ha, bool link_up)
6505{
6506        int			i, rc = 0;
6507	struct ecore_dev	*cdev;
6508	struct ecore_hwfn	*hwfn;
6509	struct ecore_ptt	*ptt;
6510
6511	cdev = &ha->cdev;
6512
6513        for_each_hwfn(cdev, i) {
6514
6515                hwfn = &cdev->hwfns[i];
6516
6517                ptt = ecore_ptt_acquire(hwfn);
6518       	        if (!ptt)
6519                        return -EBUSY;
6520
6521                rc = ecore_mcp_set_link(hwfn, ptt, link_up);
6522
6523                ecore_ptt_release(hwfn, ptt);
6524
6525                if (rc)
6526                        return rc;
6527        }
6528        return (rc);
6529}
6530
6531#if __FreeBSD_version >= 1100000
6532static uint64_t
6533qlnx_get_counter(if_t ifp, ift_counter cnt)
6534{
6535	qlnx_host_t *ha;
6536	uint64_t count;
6537
6538        ha = (qlnx_host_t *)if_getsoftc(ifp);
6539
6540        switch (cnt) {
6541
6542        case IFCOUNTER_IPACKETS:
6543		count = ha->hw_stats.common.rx_ucast_pkts +
6544			ha->hw_stats.common.rx_mcast_pkts +
6545			ha->hw_stats.common.rx_bcast_pkts;
6546		break;
6547
6548        case IFCOUNTER_IERRORS:
6549		count = ha->hw_stats.common.rx_crc_errors +
6550			ha->hw_stats.common.rx_align_errors +
6551			ha->hw_stats.common.rx_oversize_packets +
6552			ha->hw_stats.common.rx_undersize_packets;
6553		break;
6554
6555        case IFCOUNTER_OPACKETS:
6556		count = ha->hw_stats.common.tx_ucast_pkts +
6557			ha->hw_stats.common.tx_mcast_pkts +
6558			ha->hw_stats.common.tx_bcast_pkts;
6559		break;
6560
6561        case IFCOUNTER_OERRORS:
6562                count = ha->hw_stats.common.tx_err_drop_pkts;
6563		break;
6564
6565        case IFCOUNTER_COLLISIONS:
6566                return (0);
6567
6568        case IFCOUNTER_IBYTES:
6569		count = ha->hw_stats.common.rx_ucast_bytes +
6570			ha->hw_stats.common.rx_mcast_bytes +
6571			ha->hw_stats.common.rx_bcast_bytes;
6572		break;
6573
6574        case IFCOUNTER_OBYTES:
6575		count = ha->hw_stats.common.tx_ucast_bytes +
6576			ha->hw_stats.common.tx_mcast_bytes +
6577			ha->hw_stats.common.tx_bcast_bytes;
6578		break;
6579
6580        case IFCOUNTER_IMCASTS:
6581		count = ha->hw_stats.common.rx_mcast_bytes;
6582		break;
6583
6584        case IFCOUNTER_OMCASTS:
6585		count = ha->hw_stats.common.tx_mcast_bytes;
6586		break;
6587
6588        case IFCOUNTER_IQDROPS:
6589        case IFCOUNTER_OQDROPS:
6590        case IFCOUNTER_NOPROTO:
6591
6592        default:
6593                return (if_get_counter_default(ifp, cnt));
6594        }
6595	return (count);
6596}
6597#endif
6598
6599
6600static void
6601qlnx_timer(void *arg)
6602{
6603	qlnx_host_t	*ha;
6604
6605	ha = (qlnx_host_t *)arg;
6606
6607       	ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
6608
6609	if (ha->storm_stats_enable)
6610		qlnx_sample_storm_stats(ha);
6611
6612	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
6613
6614	return;
6615}
6616
6617static int
6618qlnx_load(qlnx_host_t *ha)
6619{
6620	int			i;
6621	int			rc = 0;
6622	struct ecore_dev	*cdev;
6623        device_t		dev;
6624
6625	cdev = &ha->cdev;
6626        dev = ha->pci_dev;
6627
6628	QL_DPRINT2(ha, "enter\n");
6629
6630        rc = qlnx_alloc_mem_arrays(ha);
6631        if (rc)
6632                goto qlnx_load_exit0;
6633
6634        qlnx_init_fp(ha);
6635
6636        rc = qlnx_alloc_mem_load(ha);
6637        if (rc)
6638                goto qlnx_load_exit1;
6639
6640        QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
6641		   ha->num_rss, ha->num_tc);
6642
6643	for (i = 0; i < ha->num_rss; i++) {
6644
6645		if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
6646                        (INTR_TYPE_NET | INTR_MPSAFE),
6647                        NULL, qlnx_fp_isr, &ha->irq_vec[i],
6648                        &ha->irq_vec[i].handle))) {
6649
6650                        QL_DPRINT1(ha, "could not setup interrupt\n");
6651                        goto qlnx_load_exit2;
6652		}
6653
6654		QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
6655			 irq %p handle %p\n", i,
6656			ha->irq_vec[i].irq_rid,
6657			ha->irq_vec[i].irq, ha->irq_vec[i].handle);
6658
6659		bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
6660	}
6661
6662        rc = qlnx_start_queues(ha);
6663        if (rc)
6664                goto qlnx_load_exit2;
6665
6666        QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
6667
6668        /* Add primary mac and set Rx filters */
6669        rc = qlnx_set_rx_mode(ha);
6670        if (rc)
6671                goto qlnx_load_exit2;
6672
6673        /* Ask for link-up using current configuration */
6674	qlnx_set_link(ha, true);
6675
6676        ha->state = QLNX_STATE_OPEN;
6677
6678	bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
6679
6680	if (ha->flags.callout_init)
6681        	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
6682
6683        goto qlnx_load_exit0;
6684
6685qlnx_load_exit2:
6686        qlnx_free_mem_load(ha);
6687
6688qlnx_load_exit1:
6689        ha->num_rss = 0;
6690
6691qlnx_load_exit0:
6692	QL_DPRINT2(ha, "exit [%d]\n", rc);
6693        return rc;
6694}
6695
6696static void
6697qlnx_drain_soft_lro(qlnx_host_t *ha)
6698{
6699#ifdef QLNX_SOFT_LRO
6700
6701	struct ifnet	*ifp;
6702	int		i;
6703
6704	ifp = ha->ifp;
6705
6706
6707	if (ifp->if_capenable & IFCAP_LRO) {
6708
6709	        for (i = 0; i < ha->num_rss; i++) {
6710
6711			struct qlnx_fastpath *fp = &ha->fp_array[i];
6712			struct lro_ctrl *lro;
6713
6714			lro = &fp->rxq->lro;
6715
6716#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
6717
6718			tcp_lro_flush_all(lro);
6719
6720#else
6721			struct lro_entry *queued;
6722
6723			while ((!SLIST_EMPTY(&lro->lro_active))){
6724				queued = SLIST_FIRST(&lro->lro_active);
6725				SLIST_REMOVE_HEAD(&lro->lro_active, next);
6726				tcp_lro_flush(lro, queued);
6727			}
6728
6729#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
6730
6731                }
6732	}
6733
6734#endif /* #ifdef QLNX_SOFT_LRO */
6735
6736	return;
6737}
6738
6739static void
6740qlnx_unload(qlnx_host_t *ha)
6741{
6742	struct ecore_dev	*cdev;
6743        device_t		dev;
6744	int			i;
6745
6746	cdev = &ha->cdev;
6747        dev = ha->pci_dev;
6748
6749	QL_DPRINT2(ha, "enter\n");
6750        QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
6751
6752	if (ha->state == QLNX_STATE_OPEN) {
6753
6754		qlnx_set_link(ha, false);
6755		qlnx_clean_filters(ha);
6756		qlnx_stop_queues(ha);
6757		ecore_hw_stop_fastpath(cdev);
6758
6759		for (i = 0; i < ha->num_rss; i++) {
6760			if (ha->irq_vec[i].handle) {
6761				(void)bus_teardown_intr(dev,
6762					ha->irq_vec[i].irq,
6763					ha->irq_vec[i].handle);
6764				ha->irq_vec[i].handle = NULL;
6765			}
6766		}
6767
6768		qlnx_drain_fp_taskqueues(ha);
6769		qlnx_drain_soft_lro(ha);
6770        	qlnx_free_mem_load(ha);
6771	}
6772
6773	if (ha->flags.callout_init)
6774		callout_drain(&ha->qlnx_callout);
6775
6776	qlnx_mdelay(__func__, 1000);
6777
6778        ha->state = QLNX_STATE_CLOSED;
6779
6780	QL_DPRINT2(ha, "exit\n");
6781	return;
6782}
6783
6784static int
6785qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
6786{
6787	int			rval = -1;
6788	struct ecore_hwfn	*p_hwfn;
6789	struct ecore_ptt	*p_ptt;
6790
6791	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6792
6793	p_hwfn = &ha->cdev.hwfns[hwfn_index];
6794	p_ptt = ecore_ptt_acquire(p_hwfn);
6795
6796        if (!p_ptt) {
6797		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
6798                return (rval);
6799        }
6800
6801        rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6802
6803	if (rval == DBG_STATUS_OK)
6804                rval = 0;
6805        else {
6806		QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
6807			"[0x%x]\n", rval);
6808	}
6809
6810        ecore_ptt_release(p_hwfn, p_ptt);
6811
6812        return (rval);
6813}
6814
6815static int
6816qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
6817{
6818	int			rval = -1;
6819	struct ecore_hwfn	*p_hwfn;
6820	struct ecore_ptt	*p_ptt;
6821
6822	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6823
6824	p_hwfn = &ha->cdev.hwfns[hwfn_index];
6825	p_ptt = ecore_ptt_acquire(p_hwfn);
6826
6827        if (!p_ptt) {
6828		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
6829                return (rval);
6830        }
6831
6832        rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6833
6834	if (rval == DBG_STATUS_OK)
6835                rval = 0;
6836        else {
6837		QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
6838			" [0x%x]\n", rval);
6839	}
6840
6841        ecore_ptt_release(p_hwfn, p_ptt);
6842
6843        return (rval);
6844}
6845
6846
6847static void
6848qlnx_sample_storm_stats(qlnx_host_t *ha)
6849{
6850        int			i, index;
6851        struct ecore_dev	*cdev;
6852	qlnx_storm_stats_t	*s_stats;
6853	uint32_t		reg;
6854        struct ecore_ptt	*p_ptt;
6855        struct ecore_hwfn	*hwfn;
6856
6857	if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
6858		ha->storm_stats_enable = 0;
6859		return;
6860	}
6861
6862        cdev = &ha->cdev;
6863
6864        for_each_hwfn(cdev, i) {
6865
6866                hwfn = &cdev->hwfns[i];
6867
6868                p_ptt = ecore_ptt_acquire(hwfn);
6869                if (!p_ptt)
6870                        return;
6871
6872		index = ha->storm_stats_index +
6873				(i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
6874
6875		s_stats = &ha->storm_stats[index];
6876
6877		/* XSTORM */
6878		reg = XSEM_REG_FAST_MEMORY +
6879				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6880		s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6881
6882		reg = XSEM_REG_FAST_MEMORY +
6883				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6884		s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6885
6886		reg = XSEM_REG_FAST_MEMORY +
6887				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6888		s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6889
6890		reg = XSEM_REG_FAST_MEMORY +
6891				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6892		s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6893
6894		/* YSTORM */
6895		reg = YSEM_REG_FAST_MEMORY +
6896				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6897		s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6898
6899		reg = YSEM_REG_FAST_MEMORY +
6900				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6901		s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6902
6903		reg = YSEM_REG_FAST_MEMORY +
6904				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6905		s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6906
6907		reg = YSEM_REG_FAST_MEMORY +
6908				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6909		s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6910
6911		/* PSTORM */
6912		reg = PSEM_REG_FAST_MEMORY +
6913				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6914		s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6915
6916		reg = PSEM_REG_FAST_MEMORY +
6917				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6918		s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6919
6920		reg = PSEM_REG_FAST_MEMORY +
6921				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6922		s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6923
6924		reg = PSEM_REG_FAST_MEMORY +
6925				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6926		s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6927
6928		/* TSTORM */
6929		reg = TSEM_REG_FAST_MEMORY +
6930				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6931		s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6932
6933		reg = TSEM_REG_FAST_MEMORY +
6934				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6935		s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6936
6937		reg = TSEM_REG_FAST_MEMORY +
6938				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6939		s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6940
6941		reg = TSEM_REG_FAST_MEMORY +
6942				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6943		s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6944
6945		/* MSTORM */
6946		reg = MSEM_REG_FAST_MEMORY +
6947				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6948		s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6949
6950		reg = MSEM_REG_FAST_MEMORY +
6951				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6952		s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6953
6954		reg = MSEM_REG_FAST_MEMORY +
6955				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6956		s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6957
6958		reg = MSEM_REG_FAST_MEMORY +
6959				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6960		s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6961
6962		/* USTORM */
6963		reg = USEM_REG_FAST_MEMORY +
6964				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6965		s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6966
6967		reg = USEM_REG_FAST_MEMORY +
6968				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6969		s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6970
6971		reg = USEM_REG_FAST_MEMORY +
6972				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6973		s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6974
6975		reg = USEM_REG_FAST_MEMORY +
6976				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6977		s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6978
6979                ecore_ptt_release(hwfn, p_ptt);
6980        }
6981
6982	ha->storm_stats_index++;
6983
6984        return;
6985}
6986
6987/*
6988 * Name: qlnx_dump_buf8
6989 * Function: dumps a buffer as bytes
6990 */
6991static void
6992qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
6993{
6994        device_t	dev;
6995        uint32_t	i = 0;
6996        uint8_t		*buf;
6997
6998        dev = ha->pci_dev;
6999        buf = dbuf;
7000
7001        device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7002
7003        while (len >= 16) {
7004                device_printf(dev,"0x%08x:"
7005                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7006                        " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7007                        buf[0], buf[1], buf[2], buf[3],
7008                        buf[4], buf[5], buf[6], buf[7],
7009                        buf[8], buf[9], buf[10], buf[11],
7010                        buf[12], buf[13], buf[14], buf[15]);
7011                i += 16;
7012                len -= 16;
7013                buf += 16;
7014        }
7015        switch (len) {
7016        case 1:
7017                device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7018                break;
7019        case 2:
7020                device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7021                break;
7022        case 3:
7023                device_printf(dev,"0x%08x: %02x %02x %02x\n",
7024                        i, buf[0], buf[1], buf[2]);
7025                break;
7026        case 4:
7027                device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7028                        buf[0], buf[1], buf[2], buf[3]);
7029                break;
7030        case 5:
7031                device_printf(dev,"0x%08x:"
7032                        " %02x %02x %02x %02x %02x\n", i,
7033                        buf[0], buf[1], buf[2], buf[3], buf[4]);
7034                break;
7035        case 6:
7036                device_printf(dev,"0x%08x:"
7037                        " %02x %02x %02x %02x %02x %02x\n", i,
7038                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7039                break;
7040        case 7:
7041                device_printf(dev,"0x%08x:"
7042                        " %02x %02x %02x %02x %02x %02x %02x\n", i,
7043                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7044                break;
7045        case 8:
7046                device_printf(dev,"0x%08x:"
7047                        " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7048                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7049                        buf[7]);
7050                break;
7051        case 9:
7052                device_printf(dev,"0x%08x:"
7053                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7054                        " %02x\n", i,
7055                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7056                        buf[7], buf[8]);
7057                break;
7058        case 10:
7059                device_printf(dev,"0x%08x:"
7060                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7061                        " %02x %02x\n", i,
7062                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7063                        buf[7], buf[8], buf[9]);
7064                break;
7065        case 11:
7066                device_printf(dev,"0x%08x:"
7067                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7068                        " %02x %02x %02x\n", i,
7069                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7070                        buf[7], buf[8], buf[9], buf[10]);
7071                break;
7072        case 12:
7073                device_printf(dev,"0x%08x:"
7074                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7075                        " %02x %02x %02x %02x\n", i,
7076                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7077                        buf[7], buf[8], buf[9], buf[10], buf[11]);
7078                break;
7079        case 13:
7080                device_printf(dev,"0x%08x:"
7081                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7082                        " %02x %02x %02x %02x %02x\n", i,
7083                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7084                        buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7085                break;
7086        case 14:
7087                device_printf(dev,"0x%08x:"
7088                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7089                        " %02x %02x %02x %02x %02x %02x\n", i,
7090                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7091                        buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7092                        buf[13]);
7093                break;
7094        case 15:
7095                device_printf(dev,"0x%08x:"
7096                        " %02x %02x %02x %02x %02x %02x %02x %02x"
7097                        " %02x %02x %02x %02x %02x %02x %02x\n", i,
7098                        buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7099                        buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7100                        buf[13], buf[14]);
7101                break;
7102        default:
7103                break;
7104        }
7105
7106        device_printf(dev, "%s: %s dump end\n", __func__, msg);
7107
7108        return;
7109}
7110
7111