qlnx_def.h revision 322851
1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD: stable/11/sys/dev/qlnx/qlnxe/qlnx_def.h 322851 2017-08-24 18:51:55Z davidcs $
28 *
29 */
30
31
32
33/*
34 * File: qlnx_def.h
35 * Author : David C Somayajulu, Cavium Inc., San Jose, CA 95131.
36 */
37
38#ifndef _QLNX_DEF_H_
39#define _QLNX_DEF_H_
40
41#define VER_SIZE 16
42
43struct qlnx_ivec {
44        uint32_t                rss_idx;
45        void                    *ha;
46        struct resource         *irq;
47        void                    *handle;
48        int                     irq_rid;
49};
50
51typedef struct qlnx_ivec qlnx_ivec_t;
52
53//#define QLNX_MAX_RSS		30
54#define QLNX_MAX_RSS		36
55#define QLNX_DEFAULT_RSS	16
56#define QLNX_MAX_TC		1
57
58enum QLNX_STATE {
59        QLNX_STATE_CLOSED,
60        QLNX_STATE_OPEN,
61};
62
63#define HILO_U64(hi, lo)                ((((u64)(hi)) << 32) + (lo))
64
65#define MAX_NUM_TC      8
66#define MAX_NUM_PRI     8
67
68#ifndef BITS_PER_BYTE
69#define BITS_PER_BYTE	8
70#endif /* #ifndef BITS_PER_BYTE */
71
72
73/*
74 * RX ring buffer contains pointer to kmalloc() data only,
75 */
76struct sw_rx_data {
77        void		*data;
78	bus_dmamap_t	map;
79	dma_addr_t	dma_addr;
80};
81
82enum qlnx_agg_state {
83        QLNX_AGG_STATE_NONE  = 0,
84        QLNX_AGG_STATE_START = 1,
85        QLNX_AGG_STATE_ERROR = 2
86};
87
88struct qlnx_agg_info {
89        /* rx_buf is a data buffer that can be placed /consumed from rx bd
90         * chain. It has two purposes: We will preallocate the data buffer
91         * for each aggregation when we open the interface and will place this
92         * buffer on the rx-bd-ring when we receive TPA_START. We don't want
93         * to be in a state where allocation fails, as we can't reuse the
94         * consumer buffer in the rx-chain since FW may still be writing to it
95         * (since header needs to be modified for TPA.
96         * The second purpose is to keep a pointer to the bd buffer during
97         * aggregation.
98         */
99        struct sw_rx_data       rx_buf;
100        enum qlnx_agg_state     agg_state;
101	uint16_t		placement_offset;
102        struct mbuf             *mpf; /* first mbuf in chain */
103        struct mbuf             *mpl; /* last mbuf in chain */
104};
105
106#define RX_RING_SIZE_POW        13
107#define RX_RING_SIZE            (1 << RX_RING_SIZE_POW)
108
109#define TX_RING_SIZE_POW        14
110#define TX_RING_SIZE            (1 << TX_RING_SIZE_POW)
111
112struct qlnx_rx_queue {
113        volatile __le16         *hw_cons_ptr;
114        struct sw_rx_data       sw_rx_ring[RX_RING_SIZE];
115        uint16_t		sw_rx_cons;
116        uint16_t		sw_rx_prod;
117        struct ecore_chain      rx_bd_ring;
118        struct ecore_chain      rx_comp_ring;
119        void __iomem            *hw_rxq_prod_addr;
120	void 			*handle;
121
122        /* LRO */
123        struct qlnx_agg_info    tpa_info[ETH_TPA_MAX_AGGS_NUM];
124
125        uint32_t		rx_buf_size;
126
127        uint16_t		num_rx_buffers;
128        uint16_t		rxq_id;
129
130
131#ifdef QLNX_SOFT_LRO
132	struct lro_ctrl		lro;
133#endif
134};
135
136
137union db_prod {
138        struct eth_db_data	data;
139        uint32_t		raw;
140};
141
142struct sw_tx_bd {
143        struct mbuf		*mp;
144	bus_dmamap_t		map;
145        uint8_t			flags;
146	int			nsegs;
147
148/* Set on the first BD descriptor when there is a split BD */
149#define QLNX_TSO_SPLIT_BD               (1<<0)
150};
151
152#define QLNX_MAX_SEGMENTS		255
153struct qlnx_tx_queue {
154
155        int                     index; /* Queue index */
156        volatile __le16         *hw_cons_ptr;
157        struct sw_tx_bd         sw_tx_ring[TX_RING_SIZE];
158        uint16_t		sw_tx_cons;
159        uint16_t		sw_tx_prod;
160        struct ecore_chain	tx_pbl;
161        void __iomem            *doorbell_addr;
162	void 			*handle;
163        union db_prod           tx_db;
164
165	bus_dma_segment_t	segs[QLNX_MAX_SEGMENTS];
166
167        uint16_t		num_tx_buffers;
168};
169
170#define BD_UNMAP_ADDR(bd)	HILO_U64(le32toh((bd)->addr.hi), \
171					le32toh((bd)->addr.lo))
172#define BD_UNMAP_LEN(bd)	(le16toh((bd)->nbytes))
173
174#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
175        do { \
176                (bd)->addr.hi = htole32(U64_HI(maddr)); \
177                (bd)->addr.lo = htole32(U64_LO(maddr)); \
178                (bd)->nbytes = htole16(len); \
179        } while (0);
180
181
182#define QLNX_FP_MAX_SEGS	24
183
184struct qlnx_fastpath {
185        void			*edev;
186        uint8_t			rss_id;
187        struct ecore_sb_info    *sb_info;
188        struct qlnx_rx_queue    *rxq;
189        struct qlnx_tx_queue    *txq[MAX_NUM_TC];
190	char			name[64];
191
192	struct mtx		tx_mtx;
193	char			tx_mtx_name[32];
194	struct buf_ring		*tx_br;
195	uint32_t		tx_ring_full;
196
197	struct task		fp_task;
198	struct taskqueue	*fp_taskqueue;
199
200	/* transmit statistics */
201	uint64_t		tx_pkts_processed;
202	uint64_t		tx_pkts_freed;
203	uint64_t		tx_pkts_transmitted;
204	uint64_t		tx_pkts_completed;
205	uint64_t		tx_tso_pkts;
206	uint64_t		tx_non_tso_pkts;
207
208#ifdef QLNX_TRACE_PERF_DATA
209	uint64_t		tx_pkts_trans_ctx;
210	uint64_t		tx_pkts_compl_ctx;
211	uint64_t		tx_pkts_trans_fp;
212	uint64_t		tx_pkts_compl_fp;
213	uint64_t		tx_pkts_compl_intr;
214#endif
215
216	uint64_t		tx_lso_wnd_min_len;
217	uint64_t		tx_defrag;
218	uint64_t		tx_nsegs_gt_elem_left;
219	uint32_t		tx_tso_max_nsegs;
220	uint32_t		tx_tso_min_nsegs;
221	uint32_t		tx_tso_max_pkt_len;
222	uint32_t		tx_tso_min_pkt_len;
223	uint64_t		tx_pkts[QLNX_FP_MAX_SEGS];
224
225#ifdef QLNX_TRACE_PERF_DATA
226	uint64_t		tx_pkts_hist[QLNX_FP_MAX_SEGS];
227	uint64_t		tx_comInt[QLNX_FP_MAX_SEGS];
228	uint64_t		tx_pkts_q[QLNX_FP_MAX_SEGS];
229#endif
230
231	uint64_t		err_tx_nsegs_gt_elem_left;
232        uint64_t                err_tx_dmamap_create;
233        uint64_t                err_tx_defrag_dmamap_load;
234        uint64_t                err_tx_non_tso_max_seg;
235        uint64_t                err_tx_dmamap_load;
236        uint64_t                err_tx_defrag;
237        uint64_t                err_tx_free_pkt_null;
238        uint64_t                err_tx_cons_idx_conflict;
239
240        uint64_t                lro_cnt_64;
241        uint64_t                lro_cnt_128;
242        uint64_t                lro_cnt_256;
243        uint64_t                lro_cnt_512;
244        uint64_t                lro_cnt_1024;
245
246	/* receive statistics */
247	uint64_t		rx_pkts;
248	uint64_t		tpa_start;
249	uint64_t		tpa_cont;
250	uint64_t		tpa_end;
251        uint64_t                err_m_getcl;
252        uint64_t                err_m_getjcl;
253        uint64_t		err_rx_hw_errors;
254        uint64_t		err_rx_alloc_errors;
255	uint64_t		err_rx_jumbo_chain_pkts;
256	uint64_t		err_rx_mp_null;
257	uint64_t		err_rx_tpa_invalid_agg_num;
258};
259
260struct qlnx_update_vport_params {
261        uint8_t			vport_id;
262        uint8_t			update_vport_active_rx_flg;
263        uint8_t			vport_active_rx_flg;
264        uint8_t			update_vport_active_tx_flg;
265        uint8_t			vport_active_tx_flg;
266        uint8_t			update_inner_vlan_removal_flg;
267        uint8_t			inner_vlan_removal_flg;
268        struct ecore_rss_params	*rss_params;
269	struct ecore_sge_tpa_params *sge_tpa_params;
270};
271
272/*
273 * link related
274 */
275struct qlnx_link_output {
276	bool		link_up;
277	uint32_t	supported_caps;
278	uint32_t	advertised_caps;
279	uint32_t	link_partner_caps;
280	uint32_t	speed; /* In Mb/s */
281	bool		autoneg;
282	uint32_t	media_type;
283	uint32_t	duplex;
284};
285typedef struct qlnx_link_output qlnx_link_output_t;
286
287#define QLNX_LINK_DUPLEX			0x0001
288
289#define QLNX_LINK_CAP_FIBRE			0x0001
290#define QLNX_LINK_CAP_Autoneg			0x0002
291#define QLNX_LINK_CAP_Pause			0x0004
292#define QLNX_LINK_CAP_Asym_Pause		0x0008
293#define QLNX_LINK_CAP_1000baseT_Half		0x0010
294#define QLNX_LINK_CAP_1000baseT_Full		0x0020
295#define QLNX_LINK_CAP_10000baseKR_Full		0x0040
296#define QLNX_LINK_CAP_25000baseKR_Full		0x0080
297#define QLNX_LINK_CAP_40000baseLR4_Full		0x0100
298#define QLNX_LINK_CAP_50000baseKR2_Full		0x0200
299#define QLNX_LINK_CAP_100000baseKR4_Full	0x0400
300
301
302/* Functions definition */
303
304#define XMIT_PLAIN              0
305#define XMIT_L4_CSUM            (1 << 0)
306#define XMIT_LSO                (1 << 1)
307
308#define CQE_FLAGS_ERR   (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<       \
309                         PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT |       \
310                         PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<     \
311                         PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT |     \
312                         PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << \
313                         PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | \
314                         PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << \
315                         PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT)
316
317#define RX_COPY_THRESH          92
318#define ETH_MAX_PACKET_SIZE     1500
319
320#define QLNX_MFW_VERSION_LENGTH 32
321#define QLNX_STORMFW_VERSION_LENGTH 32
322
323#define QLNX_TX_ELEM_RESERVE		2
324#define QLNX_TX_ELEM_THRESH		128
325#define QLNX_TX_ELEM_MAX_THRESH		512
326#define QLNX_TX_ELEM_MIN_THRESH		32
327#define QLNX_TX_COMPL_THRESH		32
328
329
330#define QLNX_TPA_MAX_AGG_BUFFERS             (20)
331
332#define QLNX_MAX_NUM_MULTICAST_ADDRS	ECORE_MAX_MC_ADDRS
333typedef struct _qlnx_mcast {
334        uint16_t        rsrvd;
335        uint8_t         addr[6];
336} __packed qlnx_mcast_t;
337
338/*
339 * Adapter structure contains the hardware independent information of the
340 * pci function.
341 */
342struct qlnx_host {
343
344	/* interface to ecore */
345
346	struct ecore_dev	cdev;
347
348	uint32_t		state;
349
350	/* some flags */
351        volatile struct {
352                volatile uint32_t
353			callout_init		:1,
354                        slowpath_start		:1,
355                        parent_tag		:1,
356                        lock_init		:1;
357        } flags;
358
359	/* interface to o.s */
360
361	device_t		pci_dev;
362	uint8_t			pci_func;
363	uint8_t			dev_unit;
364
365	struct ifnet		*ifp;
366	int			if_flags;
367	volatile int		link_up;
368	struct ifmedia		media;
369	uint16_t		max_frame_size;
370
371	struct cdev		*ioctl_dev;
372
373	/* resources */
374        struct resource         *pci_reg;
375        int                     reg_rid;
376
377        struct resource         *pci_dbells;
378        int                     dbells_rid;
379	uint64_t		dbells_phys_addr;
380	uint32_t		dbells_size;
381
382        struct resource         *msix_bar;
383        int                     msix_rid;
384
385	int			msix_count;
386
387	struct mtx		hw_lock;
388
389	/* debug */
390
391	uint32_t                dbg_level;
392	uint32_t                dbg_trace_lro_cnt;
393	uint32_t                dbg_trace_tso_pkt_len;
394	uint32_t                dp_level;
395	uint32_t                dp_module;
396
397	/* misc */
398	uint8_t 		mfw_ver[QLNX_MFW_VERSION_LENGTH];
399	uint8_t 		stormfw_ver[QLNX_STORMFW_VERSION_LENGTH];
400	uint32_t		flash_size;
401
402	/* dma related */
403
404	bus_dma_tag_t		parent_tag;
405	bus_dma_tag_t		tx_tag;
406	bus_dma_tag_t		rx_tag;
407
408
409        struct ecore_sb_info    sb_array[QLNX_MAX_RSS];
410        struct qlnx_rx_queue    rxq_array[QLNX_MAX_RSS];
411        struct qlnx_tx_queue    txq_array[(QLNX_MAX_RSS * MAX_NUM_TC)];
412        struct qlnx_fastpath    fp_array[QLNX_MAX_RSS];
413
414	/* tx related */
415	struct callout		tx_callout;
416	uint32_t		txr_idx;
417
418	/* rx related */
419	uint32_t		rx_pkt_threshold;
420	uint32_t		rx_jumbo_buf_eq_mtu;
421
422	/* slow path related */
423        struct resource         *sp_irq[MAX_HWFNS_PER_DEVICE];
424        void                    *sp_handle[MAX_HWFNS_PER_DEVICE];
425        int                     sp_irq_rid[MAX_HWFNS_PER_DEVICE];
426	struct task		sp_task[MAX_HWFNS_PER_DEVICE];
427	struct taskqueue	*sp_taskqueue[MAX_HWFNS_PER_DEVICE];
428
429	struct callout          qlnx_callout;
430
431	/* fast path related */
432	int			num_rss;
433	int			num_tc;
434
435#define QLNX_MAX_TSS_CNT(ha)	((ha->num_rss) * (ha->num_tc))
436
437	qlnx_ivec_t              irq_vec[QLNX_MAX_RSS];
438
439
440	uint8_t			filter;
441	uint32_t                nmcast;
442	qlnx_mcast_t            mcast[QLNX_MAX_NUM_MULTICAST_ADDRS];
443	struct ecore_filter_mcast ecore_mcast;
444	uint8_t			primary_mac[ETH_ALEN];
445	uint8_t			prio_to_tc[MAX_NUM_PRI];
446	struct ecore_eth_stats	hw_stats;
447	struct ecore_rss_params	rss_params;
448        uint32_t		rx_buf_size;
449        bool			rx_csum_offload;
450
451	uint32_t		rx_coalesce_usecs;
452	uint32_t		tx_coalesce_usecs;
453
454	/* link related */
455	qlnx_link_output_t	if_link;
456
457	/* global counters */
458	uint64_t		sp_interrupts;
459	uint64_t		err_illegal_intr;
460	uint64_t		err_fp_null;
461	uint64_t		err_get_proto_invalid_type;
462
463	/* grcdump related */
464	uint32_t		err_inject;
465	uint32_t		grcdump_taken;
466	uint32_t		grcdump_dwords[QLNX_MAX_HW_FUNCS];
467	uint32_t		grcdump_size[QLNX_MAX_HW_FUNCS];
468	void			*grcdump[QLNX_MAX_HW_FUNCS];
469
470	uint32_t		idle_chk_taken;
471	uint32_t		idle_chk_dwords[QLNX_MAX_HW_FUNCS];
472	uint32_t		idle_chk_size[QLNX_MAX_HW_FUNCS];
473	void			*idle_chk[QLNX_MAX_HW_FUNCS];
474
475	/* storm stats related */
476#define QLNX_STORM_STATS_TOTAL \
477		(QLNX_MAX_HW_FUNCS * QLNX_STORM_STATS_SAMPLES_PER_HWFN)
478	qlnx_storm_stats_t	storm_stats[QLNX_STORM_STATS_TOTAL];
479	uint32_t		storm_stats_index;
480	uint32_t		storm_stats_enable;
481	uint32_t		storm_stats_gather;
482
483	uint32_t		personality;
484};
485
486typedef struct qlnx_host qlnx_host_t;
487
488/* note that align has to be a power of 2 */
489#define QL_ALIGN(size, align) (size + (align - 1)) & ~(align - 1);
490#define QL_MIN(x, y) ((x < y) ? x : y)
491
492#define QL_RUNNING(ifp) \
493		((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \
494			IFF_DRV_RUNNING)
495
496#define QLNX_MAX_MTU			9000
497#define QLNX_MAX_SEGMENTS_NON_TSO	(ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1)
498//#define QLNX_MAX_TSO_FRAME_SIZE		((64 * 1024 - 1) + 22)
499#define QLNX_MAX_TSO_FRAME_SIZE		65536
500#define QLNX_MAX_TX_MBUF_SIZE		65536    /* bytes - bd_len = 16bits */
501
502
503#define QL_MAC_CMP(mac1, mac2)    \
504        ((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \
505        (*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1)
506#define for_each_rss(i) for (i = 0; i < ha->num_rss; i++)
507
508/*
509 * Debug Related
510 */
511
512#ifdef QLNX_DEBUG
513
514#define QL_DPRINT1(ha, x, ...) 					\
515	do { 							\
516		if ((ha)->dbg_level & 0x0001) {			\
517			device_printf ((ha)->pci_dev,		\
518				"[%s:%d]" x,			\
519				__func__, __LINE__,		\
520				## __VA_ARGS__);		\
521		}						\
522	} while (0)
523
524#define QL_DPRINT2(ha, x, ...)					\
525	do { 							\
526		if ((ha)->dbg_level & 0x0002) {			\
527			device_printf ((ha)->pci_dev,		\
528				"[%s:%d]" x,			\
529				__func__, __LINE__,		\
530				## __VA_ARGS__);		\
531		}						\
532	} while (0)
533
534#define QL_DPRINT3(ha, x, ...)					\
535	do { 							\
536		if ((ha)->dbg_level & 0x0004) {			\
537			device_printf ((ha)->pci_dev,		\
538				"[%s:%d]" x,			\
539				__func__, __LINE__,		\
540				## __VA_ARGS__);		\
541		}						\
542	} while (0)
543
544#define QL_DPRINT4(ha, x, ...)					\
545	do { 							\
546		if ((ha)->dbg_level & 0x0008) {			\
547			device_printf ((ha)->pci_dev,		\
548				"[%s:%d]" x,			\
549				__func__, __LINE__,		\
550				## __VA_ARGS__);		\
551		}						\
552	} while (0)
553
554#define QL_DPRINT5(ha, x, ...)					\
555	do { 							\
556		if ((ha)->dbg_level & 0x0010) {			\
557			device_printf ((ha)->pci_dev,		\
558				"[%s:%d]" x,			\
559				__func__, __LINE__,		\
560				## __VA_ARGS__);		\
561		}						\
562	} while (0)
563
564#define QL_DPRINT6(ha, x, ...)					\
565	do { 							\
566		if ((ha)->dbg_level & 0x0020) {			\
567			device_printf ((ha)->pci_dev,		\
568				"[%s:%d]" x,			\
569				__func__, __LINE__,		\
570				## __VA_ARGS__);		\
571		}						\
572	} while (0)
573
574#define QL_DPRINT7(ha, x, ...)					\
575	do { 							\
576		if ((ha)->dbg_level & 0x0040) {			\
577			device_printf ((ha)->pci_dev,		\
578				"[%s:%d]" x,			\
579				__func__, __LINE__,		\
580				## __VA_ARGS__);		\
581		}						\
582	} while (0)
583
584#define QL_DPRINT8(ha, x, ...)					\
585	do { 							\
586		if ((ha)->dbg_level & 0x0080) {			\
587			device_printf ((ha)->pci_dev,		\
588				"[%s:%d]" x,			\
589				__func__, __LINE__,		\
590				## __VA_ARGS__);		\
591		}						\
592	} while (0)
593
594#define QL_DPRINT9(ha, x, ...)					\
595	do { 							\
596		if ((ha)->dbg_level & 0x0100) {			\
597			device_printf ((ha)->pci_dev,		\
598				"[%s:%d]" x,			\
599				__func__, __LINE__,		\
600				## __VA_ARGS__);		\
601		}						\
602	} while (0)
603
604#define QL_DPRINT11(ha, x, ...)					\
605	do { 							\
606		if ((ha)->dbg_level & 0x0400) {			\
607			device_printf ((ha)->pci_dev,		\
608				"[%s:%d]" x,			\
609				__func__, __LINE__,		\
610				## __VA_ARGS__);		\
611		}						\
612	} while (0)
613
614#define QL_DPRINT12(ha, x, ...)					\
615	do { 							\
616		if ((ha)->dbg_level & 0x0800) {			\
617			device_printf ((ha)->pci_dev,		\
618				"[%s:%d]" x,			\
619				__func__, __LINE__,		\
620				## __VA_ARGS__);		\
621		}						\
622	} while (0)
623
624#define QL_DPRINT13(ha, x, ...)					\
625	do { 							\
626		if ((ha)->dbg_level & 0x1000) {			\
627			device_printf ((ha)->pci_dev,		\
628				"[%s:%d]" x,			\
629				__func__, __LINE__,		\
630				## __VA_ARGS__);		\
631		}						\
632	} while (0)
633
634
635#else
636
637#define QL_DPRINT1(ha, x, ...)
638#define QL_DPRINT2(ha, x, ...)
639#define QL_DPRINT3(ha, x, ...)
640#define QL_DPRINT4(ha, x, ...)
641#define QL_DPRINT5(ha, x, ...)
642#define QL_DPRINT6(ha, x, ...)
643#define QL_DPRINT7(ha, x, ...)
644#define QL_DPRINT8(ha, x, ...)
645#define QL_DPRINT9(ha, x, ...)
646#define QL_DPRINT11(ha, x, ...)
647#define QL_DPRINT12(ha, x, ...)
648#define QL_DPRINT13(ha, x, ...)
649
650#endif /* #ifdef QLNX_DEBUG */
651
652#define QL_ASSERT(ha, x, y)     if (!x) panic y
653
654#define QL_ERR_INJECT(ha, val)		(ha->err_inject == val)
655#define QL_RESET_ERR_INJECT(ha, val)	{if (ha->err_inject == val) ha->err_inject = 0;}
656#define QL_ERR_INJCT_TX_INT_DIFF	0x0001
657#define QL_ERR_INJCT_TX_INT_MBUF_NULL	0x0002
658
659
660/*
661 * exported functions
662 */
663extern int qlnx_make_cdev(qlnx_host_t *ha);
664extern void qlnx_del_cdev(qlnx_host_t *ha);
665extern int qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
666		int hwfn_index);
667extern int qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
668		int hwfn_index);
669extern uint8_t *qlnx_get_mac_addr(qlnx_host_t *ha);
670extern void qlnx_fill_link(struct ecore_hwfn *hwfn,
671                          struct qlnx_link_output *if_link);
672
673/*
674 * Some OS specific stuff
675 */
676
677#if (defined IFM_100G_SR4)
678#define QLNX_IFM_100G_SR4 IFM_100G_SR4
679#define QLNX_IFM_100G_LR4 IFM_100G_LR4
680#define QLNX_IFM_100G_CR4 IFM_100G_CR4
681#else
682#define QLNX_IFM_100G_SR4 IFM_UNKNOWN
683#define QLNX_IFM_100G_LR4 IFM_UNKNOWN
684#endif
685
686#if (defined IFM_25G_SR)
687#define QLNX_IFM_25G_SR IFM_25G_SR
688#define QLNX_IFM_25G_CR IFM_25G_CR
689#else
690#define QLNX_IFM_25G_SR IFM_UNKNOWN
691#define QLNX_IFM_25G_CR IFM_UNKNOWN
692#endif
693
694
695#if __FreeBSD_version < 1100000
696
697#define QLNX_INC_IERRORS(ifp)		ifp->if_ierrors++
698#define QLNX_INC_IQDROPS(ifp)		ifp->if_iqdrops++
699#define QLNX_INC_IPACKETS(ifp)		ifp->if_ipackets++
700#define QLNX_INC_OPACKETS(ifp)		ifp->if_opackets++
701#define QLNX_INC_OBYTES(ifp, len)	ifp->if_obytes += len
702#define QLNX_INC_IBYTES(ifp, len)	ifp->if_ibytes += len
703
704#else
705
706#define QLNX_INC_IERRORS(ifp)	if_inc_counter(ifp, IFCOUNTER_IERRORS, 1)
707#define QLNX_INC_IQDROPS(ifp)	if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1)
708#define QLNX_INC_IPACKETS(ifp)	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1)
709#define QLNX_INC_OPACKETS(ifp)	if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1)
710
711#define QLNX_INC_OBYTES(ifp, len)	\
712			if_inc_counter(ifp, IFCOUNTER_OBYTES, len)
713#define QLNX_INC_IBYTES(ifp, len)	\
714			if_inc_counter(ha->ifp, IFCOUNTER_IBYTES, len)
715
716#endif /* #if __FreeBSD_version < 1100000 */
717
718#define CQE_L3_PACKET(flags)    \
719        ((((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv4) || \
720        (((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv6))
721
722#define CQE_IP_HDR_ERR(flags) \
723        ((flags) & (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK \
724                << PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT))
725
726#define CQE_L4_HAS_CSUM(flags) \
727        ((flags) & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK \
728                << PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT))
729
730#define CQE_HAS_VLAN(flags) \
731        ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
732                << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
733
734#if defined(__i386__) || defined(__amd64__)
735
736static __inline
737void prefetch(void *x)
738{
739        __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
740}
741
742#else
743#define prefetch(x)
744#endif
745
746
747#endif /* #ifndef _QLNX_DEF_H_ */
748