qlnx_def.h revision 318659
1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD: stable/11/sys/dev/qlnx/qlnxe/qlnx_def.h 318659 2017-05-22 19:28:38Z davidcs $
28 *
29 */
30
31
32
33/*
34 * File: qlnx_def.h
35 * Author : David C Somayajulu, Cavium Inc., San Jose, CA 95131.
36 */
37
38#ifndef _QLNX_DEF_H_
39#define _QLNX_DEF_H_
40
41#define VER_SIZE 16
42
43struct qlnx_ivec {
44        uint32_t                rss_idx;
45        void                    *ha;
46        struct resource         *irq;
47        void                    *handle;
48        int                     irq_rid;
49};
50
51typedef struct qlnx_ivec qlnx_ivec_t;
52
53//#define QLNX_MAX_RSS	30
54#define QLNX_MAX_RSS	16
55#define QLNX_MAX_TC	1
56
57enum QLNX_STATE {
58        QLNX_STATE_CLOSED,
59        QLNX_STATE_OPEN,
60};
61
62#define HILO_U64(hi, lo)                ((((u64)(hi)) << 32) + (lo))
63
64#define MAX_NUM_TC      8
65#define MAX_NUM_PRI     8
66
67#ifndef BITS_PER_BYTE
68#define BITS_PER_BYTE	8
69#endif /* #ifndef BITS_PER_BYTE */
70
71
72/*
73 * RX ring buffer contains pointer to kmalloc() data only,
74 */
75struct sw_rx_data {
76        void		*data;
77	bus_dmamap_t	map;
78	dma_addr_t	dma_addr;
79};
80
81enum qlnx_agg_state {
82        QLNX_AGG_STATE_NONE  = 0,
83        QLNX_AGG_STATE_START = 1,
84        QLNX_AGG_STATE_ERROR = 2
85};
86
87struct qlnx_agg_info {
88        /* rx_buf is a data buffer that can be placed /consumed from rx bd
89         * chain. It has two purposes: We will preallocate the data buffer
90         * for each aggregation when we open the interface and will place this
91         * buffer on the rx-bd-ring when we receive TPA_START. We don't want
92         * to be in a state where allocation fails, as we can't reuse the
93         * consumer buffer in the rx-chain since FW may still be writing to it
94         * (since header needs to be modified for TPA.
95         * The second purpose is to keep a pointer to the bd buffer during
96         * aggregation.
97         */
98        struct sw_rx_data       rx_buf;
99        enum qlnx_agg_state     agg_state;
100	uint16_t		placement_offset;
101        struct mbuf             *mpf; /* first mbuf in chain */
102        struct mbuf             *mpl; /* last mbuf in chain */
103};
104
105#define RX_RING_SIZE_POW        13
106#define RX_RING_SIZE            (1 << RX_RING_SIZE_POW)
107
108#define TX_RING_SIZE_POW        14
109#define TX_RING_SIZE            (1 << TX_RING_SIZE_POW)
110
111struct qlnx_rx_queue {
112        volatile __le16         *hw_cons_ptr;
113        struct sw_rx_data       sw_rx_ring[RX_RING_SIZE];
114        uint16_t		sw_rx_cons;
115        uint16_t		sw_rx_prod;
116        struct ecore_chain      rx_bd_ring;
117        struct ecore_chain      rx_comp_ring;
118        void __iomem            *hw_rxq_prod_addr;
119	void 			*handle;
120
121        /* LRO */
122        struct qlnx_agg_info    tpa_info[ETH_TPA_MAX_AGGS_NUM];
123
124        uint32_t		rx_buf_size;
125
126        uint16_t		num_rx_buffers;
127        uint16_t		rxq_id;
128
129
130#ifdef QLNX_SOFT_LRO
131	struct lro_ctrl		lro;
132#endif
133};
134
135
136union db_prod {
137        struct eth_db_data	data;
138        uint32_t		raw;
139};
140
141struct sw_tx_bd {
142        struct mbuf		*mp;
143	bus_dmamap_t		map;
144        uint8_t			flags;
145	int			nsegs;
146
147/* Set on the first BD descriptor when there is a split BD */
148#define QLNX_TSO_SPLIT_BD               (1<<0)
149};
150
151#define QLNX_MAX_SEGMENTS		255
152struct qlnx_tx_queue {
153
154        int                     index; /* Queue index */
155        volatile __le16         *hw_cons_ptr;
156        struct sw_tx_bd         sw_tx_ring[TX_RING_SIZE];
157        uint16_t		sw_tx_cons;
158        uint16_t		sw_tx_prod;
159        struct ecore_chain	tx_pbl;
160        void __iomem            *doorbell_addr;
161	void 			*handle;
162        union db_prod           tx_db;
163
164	bus_dma_segment_t	segs[QLNX_MAX_SEGMENTS];
165
166        uint16_t		num_tx_buffers;
167};
168
169#define BD_UNMAP_ADDR(bd)	HILO_U64(le32toh((bd)->addr.hi), \
170					le32toh((bd)->addr.lo))
171#define BD_UNMAP_LEN(bd)	(le16toh((bd)->nbytes))
172
173#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
174        do { \
175                (bd)->addr.hi = htole32(U64_HI(maddr)); \
176                (bd)->addr.lo = htole32(U64_LO(maddr)); \
177                (bd)->nbytes = htole16(len); \
178        } while (0);
179
180
181#define QLNX_FP_MAX_SEGS	24
182
183struct qlnx_fastpath {
184        void			*edev;
185        uint8_t			rss_id;
186        struct ecore_sb_info    *sb_info;
187        struct qlnx_rx_queue    *rxq;
188        struct qlnx_tx_queue    *txq[MAX_NUM_TC];
189	char			name[64];
190
191	struct mtx		tx_mtx;
192	char			tx_mtx_name[32];
193	struct buf_ring		*tx_br;
194	uint32_t		tx_ring_full;
195
196	struct task		fp_task;
197	struct taskqueue	*fp_taskqueue;
198
199	/* transmit statistics */
200	uint64_t		tx_pkts_processed;
201	uint64_t		tx_pkts_freed;
202	uint64_t		tx_pkts_transmitted;
203	uint64_t		tx_pkts_completed;
204	uint64_t		tx_lso_wnd_min_len;
205	uint64_t		tx_defrag;
206	uint64_t		tx_nsegs_gt_elem_left;
207	uint32_t		tx_tso_max_nsegs;
208	uint32_t		tx_tso_min_nsegs;
209	uint32_t		tx_tso_max_pkt_len;
210	uint32_t		tx_tso_min_pkt_len;
211	uint64_t		tx_pkts[QLNX_FP_MAX_SEGS];
212	uint64_t		err_tx_nsegs_gt_elem_left;
213        uint64_t                err_tx_dmamap_create;
214        uint64_t                err_tx_defrag_dmamap_load;
215        uint64_t                err_tx_non_tso_max_seg;
216        uint64_t                err_tx_dmamap_load;
217        uint64_t                err_tx_defrag;
218        uint64_t                err_tx_free_pkt_null;
219        uint64_t                err_tx_cons_idx_conflict;
220
221        uint64_t                lro_cnt_64;
222        uint64_t                lro_cnt_128;
223        uint64_t                lro_cnt_256;
224        uint64_t                lro_cnt_512;
225        uint64_t                lro_cnt_1024;
226
227	/* receive statistics */
228	uint64_t		rx_pkts;
229	uint64_t		tpa_start;
230	uint64_t		tpa_cont;
231	uint64_t		tpa_end;
232        uint64_t                err_m_getcl;
233        uint64_t                err_m_getjcl;
234        uint64_t		err_rx_hw_errors;
235        uint64_t		err_rx_alloc_errors;
236	uint64_t		err_rx_jumbo_chain_pkts;
237	uint64_t		err_rx_mp_null;
238	uint64_t		err_rx_tpa_invalid_agg_num;
239};
240
241struct qlnx_update_vport_params {
242        uint8_t			vport_id;
243        uint8_t			update_vport_active_rx_flg;
244        uint8_t			vport_active_rx_flg;
245        uint8_t			update_vport_active_tx_flg;
246        uint8_t			vport_active_tx_flg;
247        uint8_t			update_inner_vlan_removal_flg;
248        uint8_t			inner_vlan_removal_flg;
249        struct ecore_rss_params	*rss_params;
250	struct ecore_sge_tpa_params *sge_tpa_params;
251};
252
253/*
254 * link related
255 */
256struct qlnx_link_output {
257	bool		link_up;
258	uint32_t	supported_caps;
259	uint32_t	advertised_caps;
260	uint32_t	link_partner_caps;
261	uint32_t	speed; /* In Mb/s */
262	bool		autoneg;
263	uint32_t	media_type;
264	uint32_t	duplex;
265};
266typedef struct qlnx_link_output qlnx_link_output_t;
267
268#define QLNX_LINK_DUPLEX			0x0001
269
270#define QLNX_LINK_CAP_FIBRE			0x0001
271#define QLNX_LINK_CAP_Autoneg			0x0002
272#define QLNX_LINK_CAP_Pause			0x0004
273#define QLNX_LINK_CAP_Asym_Pause		0x0008
274#define QLNX_LINK_CAP_1000baseT_Half		0x0010
275#define QLNX_LINK_CAP_1000baseT_Full		0x0020
276#define QLNX_LINK_CAP_10000baseKR_Full		0x0040
277#define QLNX_LINK_CAP_25000baseKR_Full		0x0080
278#define QLNX_LINK_CAP_40000baseLR4_Full		0x0100
279#define QLNX_LINK_CAP_50000baseKR2_Full		0x0200
280#define QLNX_LINK_CAP_100000baseKR4_Full	0x0400
281
282
283/* Functions definition */
284
285#define XMIT_PLAIN              0
286#define XMIT_L4_CSUM            (1 << 0)
287#define XMIT_LSO                (1 << 1)
288
289#define CQE_FLAGS_ERR   (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<       \
290                         PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT |       \
291                         PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<     \
292                         PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT |     \
293                         PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << \
294                         PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | \
295                         PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << \
296                         PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT)
297
298#define RX_COPY_THRESH          92
299#define ETH_MAX_PACKET_SIZE     1500
300
301#define QLNX_MFW_VERSION_LENGTH 32
302#define QLNX_STORMFW_VERSION_LENGTH 32
303
304#define QLNX_TX_ELEM_RESERVE	2
305
306#define QLNX_TPA_MAX_AGG_BUFFERS             (20)
307
308#define QLNX_MAX_NUM_MULTICAST_ADDRS	ECORE_MAX_MC_ADDRS
309typedef struct _qlnx_mcast {
310        uint16_t        rsrvd;
311        uint8_t         addr[6];
312} __packed qlnx_mcast_t;
313
314/*
315 * Adapter structure contains the hardware independent information of the
316 * pci function.
317 */
318struct qlnx_host {
319
320	/* interface to ecore */
321
322	struct ecore_dev	cdev;
323
324	uint32_t		state;
325
326	/* some flags */
327        volatile struct {
328                volatile uint32_t
329			callout_init		:1,
330                        slowpath_start		:1,
331                        parent_tag		:1,
332                        lock_init		:1;
333        } flags;
334
335	/* interface to o.s */
336
337	device_t		pci_dev;
338	uint8_t			pci_func;
339	uint8_t			dev_unit;
340
341	struct ifnet		*ifp;
342	int			if_flags;
343	volatile int		link_up;
344	struct ifmedia		media;
345	uint16_t		max_frame_size;
346
347	struct cdev		*ioctl_dev;
348
349	/* resources */
350        struct resource         *pci_reg;
351        int                     reg_rid;
352
353        struct resource         *pci_dbells;
354        int                     dbells_rid;
355	uint64_t		dbells_phys_addr;
356	uint32_t		dbells_size;
357
358        struct resource         *msix_bar;
359        int                     msix_rid;
360
361	int			msix_count;
362
363	struct mtx		hw_lock;
364
365	/* debug */
366
367	uint32_t                dbg_level;
368	uint32_t                dbg_trace_lro_cnt;
369	uint32_t                dbg_trace_tso_pkt_len;
370	uint32_t                dp_level;
371	uint32_t                dp_module;
372
373	/* misc */
374	uint8_t 		mfw_ver[QLNX_MFW_VERSION_LENGTH];
375	uint8_t 		stormfw_ver[QLNX_STORMFW_VERSION_LENGTH];
376	uint32_t		flash_size;
377
378	/* dma related */
379
380	bus_dma_tag_t		parent_tag;
381	bus_dma_tag_t		tx_tag;
382	bus_dma_tag_t		rx_tag;
383
384
385        struct ecore_sb_info    sb_array[QLNX_MAX_RSS];
386        struct qlnx_rx_queue    rxq_array[QLNX_MAX_RSS];
387        struct qlnx_tx_queue    txq_array[(QLNX_MAX_RSS * MAX_NUM_TC)];
388        struct qlnx_fastpath    fp_array[QLNX_MAX_RSS];
389
390	/* tx related */
391	struct callout		tx_callout;
392	uint32_t		txr_idx;
393
394	/* rx related */
395	uint32_t		rx_pkt_threshold;
396	uint32_t		rx_jumbo_buf_eq_mtu;
397
398	/* slow path related */
399        struct resource         *sp_irq[MAX_HWFNS_PER_DEVICE];
400        void                    *sp_handle[MAX_HWFNS_PER_DEVICE];
401        int                     sp_irq_rid[MAX_HWFNS_PER_DEVICE];
402	struct task		sp_task[MAX_HWFNS_PER_DEVICE];
403	struct taskqueue	*sp_taskqueue[MAX_HWFNS_PER_DEVICE];
404
405	struct callout          qlnx_callout;
406
407	/* fast path related */
408	int			num_rss;
409	int			num_tc;
410
411#define QLNX_MAX_TSS_CNT(ha)	((ha->num_rss) * (ha->num_tc))
412
413	qlnx_ivec_t              irq_vec[QLNX_MAX_RSS];
414
415
416	uint8_t			filter;
417	uint32_t                nmcast;
418	qlnx_mcast_t            mcast[QLNX_MAX_NUM_MULTICAST_ADDRS];
419	struct ecore_filter_mcast ecore_mcast;
420	uint8_t			primary_mac[ETH_ALEN];
421	uint8_t			prio_to_tc[MAX_NUM_PRI];
422	struct ecore_eth_stats	hw_stats;
423	struct ecore_rss_params	rss_params;
424        uint32_t		rx_buf_size;
425        bool			rx_csum_offload;
426
427	uint32_t		rx_coalesce_usecs;
428	uint32_t		tx_coalesce_usecs;
429
430	/* link related */
431	qlnx_link_output_t	if_link;
432
433	/* global counters */
434	uint64_t		sp_interrupts;
435	uint64_t		err_illegal_intr;
436	uint64_t		err_fp_null;
437	uint64_t		err_get_proto_invalid_type;
438
439	/* grcdump related */
440	uint32_t		err_inject;
441	uint32_t		grcdump_taken;
442	uint32_t		grcdump_dwords[QLNX_MAX_HW_FUNCS];
443	uint32_t		grcdump_size[QLNX_MAX_HW_FUNCS];
444	void			*grcdump[QLNX_MAX_HW_FUNCS];
445
446	uint32_t		idle_chk_taken;
447	uint32_t		idle_chk_dwords[QLNX_MAX_HW_FUNCS];
448	uint32_t		idle_chk_size[QLNX_MAX_HW_FUNCS];
449	void			*idle_chk[QLNX_MAX_HW_FUNCS];
450
451	/* storm stats related */
452#define QLNX_STORM_STATS_TOTAL \
453		(QLNX_MAX_HW_FUNCS * QLNX_STORM_STATS_SAMPLES_PER_HWFN)
454	qlnx_storm_stats_t	storm_stats[QLNX_STORM_STATS_TOTAL];
455	uint32_t		storm_stats_index;
456	uint32_t		storm_stats_enable;
457
458	uint32_t		personality;
459};
460
461typedef struct qlnx_host qlnx_host_t;
462
463/* note that align has to be a power of 2 */
464#define QL_ALIGN(size, align) (size + (align - 1)) & ~(align - 1);
465#define QL_MIN(x, y) ((x < y) ? x : y)
466
467#define QL_RUNNING(ifp) \
468		((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \
469			IFF_DRV_RUNNING)
470
471#define QLNX_MAX_MTU			9000
472#define QLNX_MAX_SEGMENTS_NON_TSO	(ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1)
473#define QLNX_MAX_TSO_FRAME_SIZE		((64 * 1024 - 1) + 22)
474
475#define QL_MAC_CMP(mac1, mac2)    \
476        ((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \
477        (*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1)
478#define for_each_rss(i) for (i = 0; i < ha->num_rss; i++)
479
480/*
481 * Debug Related
482 */
483
484#ifdef QLNX_DEBUG
485
486#define QL_DPRINT1(ha, x, ...) 					\
487	do { 							\
488		if ((ha)->dbg_level & 0x0001) {			\
489			device_printf ((ha)->pci_dev,		\
490				"[%s:%d]" x,			\
491				__func__, __LINE__,		\
492				## __VA_ARGS__);		\
493		}						\
494	} while (0)
495
496#define QL_DPRINT2(ha, x, ...)					\
497	do { 							\
498		if ((ha)->dbg_level & 0x0002) {			\
499			device_printf ((ha)->pci_dev,		\
500				"[%s:%d]" x,			\
501				__func__, __LINE__,		\
502				## __VA_ARGS__);		\
503		}						\
504	} while (0)
505
506#define QL_DPRINT3(ha, x, ...)					\
507	do { 							\
508		if ((ha)->dbg_level & 0x0004) {			\
509			device_printf ((ha)->pci_dev,		\
510				"[%s:%d]" x,			\
511				__func__, __LINE__,		\
512				## __VA_ARGS__);		\
513		}						\
514	} while (0)
515
516#define QL_DPRINT4(ha, x, ...)					\
517	do { 							\
518		if ((ha)->dbg_level & 0x0008) {			\
519			device_printf ((ha)->pci_dev,		\
520				"[%s:%d]" x,			\
521				__func__, __LINE__,		\
522				## __VA_ARGS__);		\
523		}						\
524	} while (0)
525
526#define QL_DPRINT5(ha, x, ...)					\
527	do { 							\
528		if ((ha)->dbg_level & 0x0010) {			\
529			device_printf ((ha)->pci_dev,		\
530				"[%s:%d]" x,			\
531				__func__, __LINE__,		\
532				## __VA_ARGS__);		\
533		}						\
534	} while (0)
535
536#define QL_DPRINT6(ha, x, ...)					\
537	do { 							\
538		if ((ha)->dbg_level & 0x0020) {			\
539			device_printf ((ha)->pci_dev,		\
540				"[%s:%d]" x,			\
541				__func__, __LINE__,		\
542				## __VA_ARGS__);		\
543		}						\
544	} while (0)
545
546#define QL_DPRINT7(ha, x, ...)					\
547	do { 							\
548		if ((ha)->dbg_level & 0x0040) {			\
549			device_printf ((ha)->pci_dev,		\
550				"[%s:%d]" x,			\
551				__func__, __LINE__,		\
552				## __VA_ARGS__);		\
553		}						\
554	} while (0)
555
556#define QL_DPRINT8(ha, x, ...)					\
557	do { 							\
558		if ((ha)->dbg_level & 0x0080) {			\
559			device_printf ((ha)->pci_dev,		\
560				"[%s:%d]" x,			\
561				__func__, __LINE__,		\
562				## __VA_ARGS__);		\
563		}						\
564	} while (0)
565
566#define QL_DPRINT9(ha, x, ...)					\
567	do { 							\
568		if ((ha)->dbg_level & 0x0100) {			\
569			device_printf ((ha)->pci_dev,		\
570				"[%s:%d]" x,			\
571				__func__, __LINE__,		\
572				## __VA_ARGS__);		\
573		}						\
574	} while (0)
575
576#define QL_DPRINT11(ha, x, ...)					\
577	do { 							\
578		if ((ha)->dbg_level & 0x0400) {			\
579			device_printf ((ha)->pci_dev,		\
580				"[%s:%d]" x,			\
581				__func__, __LINE__,		\
582				## __VA_ARGS__);		\
583		}						\
584	} while (0)
585
586#define QL_DPRINT12(ha, x, ...)					\
587	do { 							\
588		if ((ha)->dbg_level & 0x0800) {			\
589			device_printf ((ha)->pci_dev,		\
590				"[%s:%d]" x,			\
591				__func__, __LINE__,		\
592				## __VA_ARGS__);		\
593		}						\
594	} while (0)
595
596#define QL_DPRINT13(ha, x, ...)					\
597	do { 							\
598		if ((ha)->dbg_level & 0x1000) {			\
599			device_printf ((ha)->pci_dev,		\
600				"[%s:%d]" x,			\
601				__func__, __LINE__,		\
602				## __VA_ARGS__);		\
603		}						\
604	} while (0)
605
606
607#else
608
609#define QL_DPRINT1(ha, x, ...)
610#define QL_DPRINT2(ha, x, ...)
611#define QL_DPRINT3(ha, x, ...)
612#define QL_DPRINT4(ha, x, ...)
613#define QL_DPRINT5(ha, x, ...)
614#define QL_DPRINT6(ha, x, ...)
615#define QL_DPRINT7(ha, x, ...)
616#define QL_DPRINT8(ha, x, ...)
617#define QL_DPRINT9(ha, x, ...)
618#define QL_DPRINT11(ha, x, ...)
619#define QL_DPRINT12(ha, x, ...)
620#define QL_DPRINT13(ha, x, ...)
621
622#endif /* #ifdef QLNX_DEBUG */
623
624#define QL_ASSERT(ha, x, y)     if (!x) panic y
625
626#define QL_ERR_INJECT(ha, val)		(ha->err_inject == val)
627#define QL_RESET_ERR_INJECT(ha, val)	{if (ha->err_inject == val) ha->err_inject = 0;}
628#define QL_ERR_INJCT_TX_INT_DIFF	0x0001
629#define QL_ERR_INJCT_TX_INT_MBUF_NULL	0x0002
630
631
632/*
633 * exported functions
634 */
635extern int qlnx_make_cdev(qlnx_host_t *ha);
636extern void qlnx_del_cdev(qlnx_host_t *ha);
637extern int qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
638		int hwfn_index);
639extern int qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
640		int hwfn_index);
641extern uint8_t *qlnx_get_mac_addr(qlnx_host_t *ha);
642extern void qlnx_fill_link(struct ecore_hwfn *hwfn,
643                          struct qlnx_link_output *if_link);
644
645/*
646 * Some OS specific stuff
647 */
648
649#if (defined IFM_100G_SR4)
650#define QLNX_IFM_100G_SR4 IFM_100G_SR4
651#define QLNX_IFM_100G_LR4 IFM_100G_LR4
652#define QLNX_IFM_100G_CR4 IFM_100G_CR4
653#else
654#define QLNX_IFM_100G_SR4 IFM_UNKNOWN
655#define QLNX_IFM_100G_LR4 IFM_UNKNOWN
656#endif
657
658#if (defined IFM_25G_SR)
659#define QLNX_IFM_25G_SR IFM_25G_SR
660#define QLNX_IFM_25G_CR IFM_25G_CR
661#else
662#define QLNX_IFM_25G_SR IFM_UNKNOWN
663#define QLNX_IFM_25G_CR IFM_UNKNOWN
664#endif
665
666
667#if __FreeBSD_version < 1100000
668
669#define QLNX_INC_IERRORS(ifp)		ifp->if_ierrors++
670#define QLNX_INC_IQDROPS(ifp)		ifp->if_iqdrops++
671#define QLNX_INC_IPACKETS(ifp)		ifp->if_ipackets++
672#define QLNX_INC_OPACKETS(ifp)		ifp->if_opackets++
673#define QLNX_INC_OBYTES(ifp, len)	ifp->if_obytes += len
674#define QLNX_INC_IBYTES(ifp, len)	ifp->if_ibytes += len
675
676#else
677
678#define QLNX_INC_IERRORS(ifp)	if_inc_counter(ifp, IFCOUNTER_IERRORS, 1)
679#define QLNX_INC_IQDROPS(ifp)	if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1)
680#define QLNX_INC_IPACKETS(ifp)	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1)
681#define QLNX_INC_OPACKETS(ifp)	if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1)
682
683#define QLNX_INC_OBYTES(ifp, len)	\
684			if_inc_counter(ifp, IFCOUNTER_OBYTES, len)
685#define QLNX_INC_IBYTES(ifp, len)	\
686			if_inc_counter(ha->ifp, IFCOUNTER_IBYTES, len)
687
688#endif /* #if __FreeBSD_version < 1100000 */
689
690#define CQE_L3_PACKET(flags)    \
691        ((((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3Type_ipv4) || \
692        (((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3Type_ipv6))
693
694#define CQE_IP_HDR_ERR(flags) \
695        ((flags) & (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK \
696                << PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT))
697
698#define CQE_L4_HAS_CSUM(flags) \
699        ((flags) & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK \
700                << PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT))
701
702#define CQE_HAS_VLAN(flags) \
703        ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
704                << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
705
706
707#endif /* #ifndef _QLNX_DEF_H_ */
708