1/*-
2 * Copyright (c) 2013 Tsubai Masanari
3 * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 * $FreeBSD: stable/11/sys/dev/vmware/vmxnet3/if_vmxvar.h 337856 2018-08-15 16:16:59Z loos $
18 */
19
20#ifndef _IF_VMXVAR_H
21#define _IF_VMXVAR_H
22
23struct vmxnet3_softc;
24
25struct vmxnet3_dma_alloc {
26	bus_addr_t		dma_paddr;
27	caddr_t			dma_vaddr;
28	bus_dma_tag_t		dma_tag;
29	bus_dmamap_t		dma_map;
30	bus_size_t		dma_size;
31};
32
33/*
34 * The number of Rx/Tx queues this driver prefers.
35 */
36#define VMXNET3_DEF_RX_QUEUES	8
37#define VMXNET3_DEF_TX_QUEUES	8
38
39/*
40 * The number of Rx rings in each Rx queue.
41 */
42#define VMXNET3_RXRINGS_PERQ	2
43
44/*
45 * The number of descriptors in each Rx/Tx ring.
46 */
47#define VMXNET3_DEF_TX_NDESC		512
48#define VMXNET3_MAX_TX_NDESC		4096
49#define VMXNET3_MIN_TX_NDESC		32
50#define VMXNET3_MASK_TX_NDESC		0x1F
51#define VMXNET3_DEF_RX_NDESC		256
52#define VMXNET3_MAX_RX_NDESC		2048
53#define VMXNET3_MIN_RX_NDESC		32
54#define VMXNET3_MASK_RX_NDESC		0x1F
55
56#define VMXNET3_MAX_TX_NCOMPDESC	VMXNET3_MAX_TX_NDESC
57#define VMXNET3_MAX_RX_NCOMPDESC \
58    (VMXNET3_MAX_RX_NDESC * VMXNET3_RXRINGS_PERQ)
59
60struct vmxnet3_txbuf {
61	bus_dmamap_t		 vtxb_dmamap;
62	struct mbuf		*vtxb_m;
63};
64
65struct vmxnet3_txring {
66	struct vmxnet3_txbuf	*vxtxr_txbuf;
67	u_int			 vxtxr_head;
68	u_int			 vxtxr_next;
69	u_int			 vxtxr_ndesc;
70	int			 vxtxr_gen;
71	bus_dma_tag_t		 vxtxr_txtag;
72	struct vmxnet3_txdesc	*vxtxr_txd;
73	struct vmxnet3_dma_alloc vxtxr_dma;
74};
75
76static inline int
77VMXNET3_TXRING_AVAIL(struct vmxnet3_txring *txr)
78{
79	int avail = txr->vxtxr_next - txr->vxtxr_head - 1;
80	return (avail < 0 ? txr->vxtxr_ndesc + avail : avail);
81}
82
83struct vmxnet3_rxbuf {
84	bus_dmamap_t		 vrxb_dmamap;
85	struct mbuf		*vrxb_m;
86};
87
88struct vmxnet3_rxring {
89	struct vmxnet3_rxbuf	*vxrxr_rxbuf;
90	struct vmxnet3_rxdesc	*vxrxr_rxd;
91	u_int			 vxrxr_fill;
92	u_int			 vxrxr_ndesc;
93	int			 vxrxr_gen;
94	int			 vxrxr_rid;
95	bus_dma_tag_t		 vxrxr_rxtag;
96	struct vmxnet3_dma_alloc vxrxr_dma;
97	bus_dmamap_t		 vxrxr_spare_dmap;
98};
99
100static inline void
101vmxnet3_rxr_increment_fill(struct vmxnet3_rxring *rxr)
102{
103
104	if (++rxr->vxrxr_fill == rxr->vxrxr_ndesc) {
105		rxr->vxrxr_fill = 0;
106		rxr->vxrxr_gen ^= 1;
107	}
108}
109
110struct vmxnet3_comp_ring {
111	union {
112		struct vmxnet3_txcompdesc *txcd;
113		struct vmxnet3_rxcompdesc *rxcd;
114	}			 vxcr_u;
115	u_int			 vxcr_next;
116	u_int			 vxcr_ndesc;
117	int			 vxcr_gen;
118	struct vmxnet3_dma_alloc vxcr_dma;
119};
120
121struct vmxnet3_txq_stats {
122	uint64_t		vmtxs_opackets;	/* if_opackets */
123	uint64_t		vmtxs_obytes;	/* if_obytes */
124	uint64_t		vmtxs_omcasts;	/* if_omcasts */
125	uint64_t		vmtxs_csum;
126	uint64_t		vmtxs_tso;
127	uint64_t		vmtxs_full;
128	uint64_t		vmtxs_offload_failed;
129};
130
131struct vmxnet3_txqueue {
132	struct mtx			 vxtxq_mtx;
133	struct vmxnet3_softc		*vxtxq_sc;
134#ifndef VMXNET3_LEGACY_TX
135	struct buf_ring			*vxtxq_br;
136#endif
137	int				 vxtxq_id;
138	int				 vxtxq_intr_idx;
139	int				 vxtxq_watchdog;
140	struct vmxnet3_txring		 vxtxq_cmd_ring;
141	struct vmxnet3_comp_ring	 vxtxq_comp_ring;
142	struct vmxnet3_txq_stats	 vxtxq_stats;
143	struct vmxnet3_txq_shared	*vxtxq_ts;
144	struct sysctl_oid_list		*vxtxq_sysctl;
145#ifndef VMXNET3_LEGACY_TX
146	struct task			 vxtxq_defrtask;
147#endif
148	char				 vxtxq_name[16];
149} __aligned(CACHE_LINE_SIZE);
150
151#define VMXNET3_TXQ_LOCK(_txq)		mtx_lock(&(_txq)->vxtxq_mtx)
152#define VMXNET3_TXQ_TRYLOCK(_txq)	mtx_trylock(&(_txq)->vxtxq_mtx)
153#define VMXNET3_TXQ_UNLOCK(_txq)	mtx_unlock(&(_txq)->vxtxq_mtx)
154#define VMXNET3_TXQ_LOCK_ASSERT(_txq)		\
155    mtx_assert(&(_txq)->vxtxq_mtx, MA_OWNED)
156#define VMXNET3_TXQ_LOCK_ASSERT_NOTOWNED(_txq)	\
157    mtx_assert(&(_txq)->vxtxq_mtx, MA_NOTOWNED)
158
159struct vmxnet3_rxq_stats {
160	uint64_t		vmrxs_ipackets;	/* if_ipackets */
161	uint64_t		vmrxs_ibytes;	/* if_ibytes */
162	uint64_t		vmrxs_iqdrops;	/* if_iqdrops */
163	uint64_t		vmrxs_ierrors;	/* if_ierrors */
164};
165
166struct vmxnet3_rxqueue {
167	struct mtx			 vxrxq_mtx;
168	struct vmxnet3_softc		*vxrxq_sc;
169	int				 vxrxq_id;
170	int				 vxrxq_intr_idx;
171	struct mbuf			*vxrxq_mhead;
172	struct mbuf			*vxrxq_mtail;
173	struct vmxnet3_rxring		 vxrxq_cmd_ring[VMXNET3_RXRINGS_PERQ];
174	struct vmxnet3_comp_ring	 vxrxq_comp_ring;
175	struct vmxnet3_rxq_stats	 vxrxq_stats;
176	struct vmxnet3_rxq_shared	*vxrxq_rs;
177	struct sysctl_oid_list		*vxrxq_sysctl;
178	char				 vxrxq_name[16];
179} __aligned(CACHE_LINE_SIZE);
180
181#define VMXNET3_RXQ_LOCK(_rxq)		mtx_lock(&(_rxq)->vxrxq_mtx)
182#define VMXNET3_RXQ_UNLOCK(_rxq)	mtx_unlock(&(_rxq)->vxrxq_mtx)
183#define VMXNET3_RXQ_LOCK_ASSERT(_rxq)		\
184    mtx_assert(&(_rxq)->vxrxq_mtx, MA_OWNED)
185#define VMXNET3_RXQ_LOCK_ASSERT_NOTOWNED(_rxq)	\
186    mtx_assert(&(_rxq)->vxrxq_mtx, MA_NOTOWNED)
187
188struct vmxnet3_statistics {
189	uint32_t		vmst_defragged;
190	uint32_t		vmst_defrag_failed;
191	uint32_t		vmst_mgetcl_failed;
192	uint32_t		vmst_mbuf_load_failed;
193};
194
195struct vmxnet3_interrupt {
196	struct resource		*vmxi_irq;
197	int			 vmxi_rid;
198	void			*vmxi_handler;
199};
200
201struct vmxnet3_softc {
202	device_t			 vmx_dev;
203	struct ifnet			*vmx_ifp;
204	struct vmxnet3_driver_shared	*vmx_ds;
205	uint32_t			 vmx_flags;
206#define VMXNET3_FLAG_NO_MSIX	0x0001
207#define VMXNET3_FLAG_RSS	0x0002
208
209	struct vmxnet3_rxqueue		*vmx_rxq;
210	struct vmxnet3_txqueue		*vmx_txq;
211
212	struct resource			*vmx_res0;
213	bus_space_tag_t			 vmx_iot0;
214	bus_space_handle_t		 vmx_ioh0;
215	struct resource			*vmx_res1;
216	bus_space_tag_t			 vmx_iot1;
217	bus_space_handle_t		 vmx_ioh1;
218	struct resource			*vmx_msix_res;
219
220	int				 vmx_link_active;
221	int				 vmx_link_speed;
222	int				 vmx_if_flags;
223	int				 vmx_ntxqueues;
224	int				 vmx_nrxqueues;
225	int				 vmx_ntxdescs;
226	int				 vmx_nrxdescs;
227	int				 vmx_max_rxsegs;
228	int				 vmx_rx_max_chain;
229
230	struct vmxnet3_statistics	 vmx_stats;
231
232	int				 vmx_intr_type;
233	int				 vmx_intr_mask_mode;
234	int				 vmx_event_intr_idx;
235	int				 vmx_nintrs;
236	struct vmxnet3_interrupt	 vmx_intrs[VMXNET3_MAX_INTRS];
237
238	struct mtx			 vmx_mtx;
239#ifndef VMXNET3_LEGACY_TX
240	struct taskqueue		*vmx_tq;
241#endif
242	uint8_t				*vmx_mcast;
243	void				*vmx_qs;
244	struct vmxnet3_rss_shared	*vmx_rss;
245	struct callout			 vmx_tick;
246	struct vmxnet3_dma_alloc	 vmx_ds_dma;
247	struct vmxnet3_dma_alloc	 vmx_qs_dma;
248	struct vmxnet3_dma_alloc	 vmx_mcast_dma;
249	struct vmxnet3_dma_alloc	 vmx_rss_dma;
250	struct ifmedia			 vmx_media;
251	int				 vmx_max_ntxqueues;
252	int				 vmx_max_nrxqueues;
253	eventhandler_tag		 vmx_vlan_attach;
254	eventhandler_tag		 vmx_vlan_detach;
255	uint32_t			 vmx_vlan_filter[4096/32];
256	uint8_t				 vmx_lladdr[ETHER_ADDR_LEN];
257};
258
259#define VMXNET3_CORE_LOCK_INIT(_sc, _name) \
260    mtx_init(&(_sc)->vmx_mtx, _name, "VMXNET3 Lock", MTX_DEF)
261#define VMXNET3_CORE_LOCK_DESTROY(_sc)	mtx_destroy(&(_sc)->vmx_mtx)
262#define VMXNET3_CORE_LOCK(_sc)		mtx_lock(&(_sc)->vmx_mtx)
263#define VMXNET3_CORE_UNLOCK(_sc)	mtx_unlock(&(_sc)->vmx_mtx)
264#define VMXNET3_CORE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->vmx_mtx, MA_OWNED)
265#define VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(_sc) \
266    mtx_assert(&(_sc)->vmx_mtx, MA_NOTOWNED)
267
268/*
269 * Our driver version we report to the hypervisor; we just keep
270 * this value constant.
271 */
272#define VMXNET3_DRIVER_VERSION 0x00010000
273
274/*
275 * Max descriptors per Tx packet. We must limit the size of the
276 * any TSO packets based on the number of segments.
277 */
278#define VMXNET3_TX_MAXSEGS		32
279#define VMXNET3_TX_MAXSIZE		(VMXNET3_TX_MAXSEGS * MCLBYTES)
280
281/*
282 * Maximum support Tx segments size. The length field in the
283 * Tx descriptor is 14 bits.
284 */
285#define VMXNET3_TX_MAXSEGSIZE		(1 << 14)
286
287/*
288 * The maximum number of Rx segments we accept. When LRO is enabled,
289 * this allows us to receive the maximum sized frame with one MCLBYTES
290 * cluster followed by 16 MJUMPAGESIZE clusters.
291 */
292#define VMXNET3_MAX_RX_SEGS		17
293
294/*
295 * Predetermined size of the multicast MACs filter table. If the
296 * number of multicast addresses exceeds this size, then the
297 * ALL_MULTI mode is use instead.
298 */
299#define VMXNET3_MULTICAST_MAX		32
300
301/*
302 * Our Tx watchdog timeout.
303 */
304#define VMXNET3_WATCHDOG_TIMEOUT	5
305
306/*
307 * Number of slots in the Tx bufrings. This value matches most other
308 * multiqueue drivers.
309 */
310#define VMXNET3_DEF_BUFRING_SIZE	4096
311
312/*
313 * IP protocols that we can perform Tx checksum offloading of.
314 */
315#define VMXNET3_CSUM_OFFLOAD		(CSUM_TCP | CSUM_UDP)
316#define VMXNET3_CSUM_OFFLOAD_IPV6	(CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
317
318#define VMXNET3_CSUM_ALL_OFFLOAD	\
319    (VMXNET3_CSUM_OFFLOAD | VMXNET3_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
320
321/*
322 * Compat macros to keep this driver compiling on old releases.
323 */
324
325#if !defined(SYSCTL_ADD_UQUAD)
326#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
327#endif
328
329#if !defined(IFCAP_TXCSUM_IPV6)
330#define IFCAP_TXCSUM_IPV6 0
331#endif
332
333#if !defined(IFCAP_RXCSUM_IPV6)
334#define IFCAP_RXCSUM_IPV6 0
335#endif
336
337#if !defined(CSUM_TCP_IPV6)
338#define CSUM_TCP_IPV6 0
339#endif
340
341#if !defined(CSUM_UDP_IPV6)
342#define CSUM_UDP_IPV6	0
343#endif
344
345#endif /* _IF_VMXVAR_H */
346