1/*	$OpenBSD: if_vic.c,v 1.106 2024/05/24 06:02:57 jsg Exp $	*/
2
3/*
4 * Copyright (c) 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2006 David Gwynne <dlg@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*
21 * Driver for the VMware Virtual NIC ("vmxnet")
22 */
23
24#include "bpfilter.h"
25
26#include <sys/param.h>
27#include <sys/systm.h>
28#include <sys/sockio.h>
29#include <sys/mbuf.h>
30#include <sys/socket.h>
31#include <sys/malloc.h>
32#include <sys/timeout.h>
33#include <sys/device.h>
34
35#include <machine/bus.h>
36#include <machine/intr.h>
37
38#include <net/if.h>
39#include <net/if_media.h>
40
41#if NBPFILTER > 0
42#include <net/bpf.h>
43#endif
44
45#include <netinet/in.h>
46#include <netinet/if_ether.h>
47
48#include <dev/pci/pcireg.h>
49#include <dev/pci/pcivar.h>
50#include <dev/pci/pcidevs.h>
51
52#define VIC_PCI_BAR		PCI_MAPREG_START /* Base Address Register */
53
54#define VIC_LANCE_SIZE		0x20
55#define VIC_MORPH_SIZE		0x04
56#define  VIC_MORPH_MASK			0xffff
57#define  VIC_MORPH_LANCE		0x2934
58#define  VIC_MORPH_VMXNET		0x4392
59#define VIC_VMXNET_SIZE		0x40
60#define VIC_LANCE_MINLEN	(VIC_LANCE_SIZE + VIC_MORPH_SIZE + \
61				    VIC_VMXNET_SIZE)
62
63#define VIC_MAGIC		0xbabe864f
64
65/* Register address offsets */
66#define VIC_DATA_ADDR		0x0000		/* Shared data address */
67#define VIC_DATA_LENGTH		0x0004		/* Shared data length */
68#define VIC_Tx_ADDR		0x0008		/* Tx pointer address */
69
70/* Command register */
71#define VIC_CMD			0x000c		/* Command register */
72#define  VIC_CMD_INTR_ACK	0x0001	/* Acknowledge interrupt */
73#define  VIC_CMD_MCASTFIL	0x0002	/* Multicast address filter */
74#define   VIC_CMD_MCASTFIL_LENGTH	2
75#define  VIC_CMD_IFF		0x0004	/* Interface flags */
76#define   VIC_CMD_IFF_PROMISC	0x0001		/* Promiscuous enabled */
77#define   VIC_CMD_IFF_BROADCAST	0x0002		/* Broadcast enabled */
78#define   VIC_CMD_IFF_MULTICAST	0x0004		/* Multicast enabled */
79#define  VIC_CMD_INTR_DISABLE	0x0020	/* Disable interrupts */
80#define  VIC_CMD_INTR_ENABLE	0x0040	/* Enable interrupts */
81#define  VIC_CMD_Tx_DONE	0x0100	/* Tx done register */
82#define  VIC_CMD_NUM_Rx_BUF	0x0200	/* Number of Rx buffers */
83#define  VIC_CMD_NUM_Tx_BUF	0x0400	/* Number of Tx buffers */
84#define  VIC_CMD_NUM_PINNED_BUF	0x0800	/* Number of pinned buffers */
85#define  VIC_CMD_HWCAP		0x1000	/* Capability register */
86#define   VIC_CMD_HWCAP_SG		(1<<0) /* Scatter-gather transmits */
87#define   VIC_CMD_HWCAP_CSUM_IPv4	(1<<1) /* TCP/UDP cksum */
88#define   VIC_CMD_HWCAP_CSUM_ALL	(1<<3) /* Hardware cksum */
89#define   VIC_CMD_HWCAP_CSUM \
90	(VIC_CMD_HWCAP_CSUM_IPv4 | VIC_CMD_HWCAP_CSUM_ALL)
91#define   VIC_CMD_HWCAP_DMA_HIGH		(1<<4) /* High DMA mapping */
92#define   VIC_CMD_HWCAP_TOE		(1<<5) /* TCP offload engine */
93#define   VIC_CMD_HWCAP_TSO		(1<<6) /* TCP segmentation offload */
94#define   VIC_CMD_HWCAP_TSO_SW		(1<<7) /* Software TCP segmentation */
95#define   VIC_CMD_HWCAP_VPROM		(1<<8) /* Virtual PROM available */
96#define   VIC_CMD_HWCAP_VLAN_Tx		(1<<9) /* Hardware VLAN MTU Rx */
97#define   VIC_CMD_HWCAP_VLAN_Rx		(1<<10) /* Hardware VLAN MTU Tx */
98#define   VIC_CMD_HWCAP_VLAN_SW		(1<<11)	/* Software VLAN MTU */
99#define   VIC_CMD_HWCAP_VLAN \
100	(VIC_CMD_HWCAP_VLAN_Tx | VIC_CMD_HWCAP_VLAN_Rx | \
101	VIC_CMD_HWCAP_VLAN_SW)
102#define  VIC_CMD_HWCAP_BITS \
103	"\20\01SG\02CSUM4\03CSUM\04HDMA\05TOE\06TSO" \
104	"\07TSOSW\10VPROM\13VLANTx\14VLANRx\15VLANSW"
105#define  VIC_CMD_FEATURE	0x2000	/* Additional feature register */
106#define   VIC_CMD_FEATURE_0_Tx		(1<<0)
107#define   VIC_CMD_FEATURE_TSO		(1<<1)
108
109#define VIC_LLADDR		0x0010		/* MAC address register */
110#define VIC_VERSION_MINOR	0x0018		/* Minor version register */
111#define VIC_VERSION_MAJOR	0x001c		/* Major version register */
112#define VIC_VERSION_MAJOR_M	0xffff0000
113
114/* Status register */
115#define VIC_STATUS		0x0020
116#define  VIC_STATUS_CONNECTED		(1<<0)
117#define  VIC_STATUS_ENABLED		(1<<1)
118
119#define VIC_TOE_ADDR		0x0024		/* TCP offload address */
120
121/* Virtual PROM address */
122#define VIC_VPROM		0x0028
123#define VIC_VPROM_LENGTH	6
124
125/* Shared DMA data structures */
126
127struct vic_sg {
128	u_int32_t	sg_addr_low;
129	u_int16_t	sg_addr_high;
130	u_int16_t	sg_length;
131} __packed;
132
133#define VIC_SG_MAX		6
134#define VIC_SG_ADDR_MACH	0
135#define VIC_SG_ADDR_PHYS	1
136#define VIC_SG_ADDR_VIRT	3
137
138struct vic_sgarray {
139	u_int16_t	sa_addr_type;
140	u_int16_t	sa_length;
141	struct vic_sg	sa_sg[VIC_SG_MAX];
142} __packed;
143
144struct vic_rxdesc {
145	u_int64_t	rx_physaddr;
146	u_int32_t	rx_buflength;
147	u_int32_t	rx_length;
148	u_int16_t	rx_owner;
149	u_int16_t	rx_flags;
150	u_int32_t	rx_priv;
151} __packed;
152
153#define VIC_RX_FLAGS_CSUMHW_OK	0x0001
154
155struct vic_txdesc {
156	u_int16_t		tx_flags;
157	u_int16_t		tx_owner;
158	u_int32_t		tx_priv;
159	u_int32_t		tx_tsomss;
160	struct vic_sgarray	tx_sa;
161} __packed;
162
163#define VIC_TX_FLAGS_KEEP	0x0001
164#define VIC_TX_FLAGS_TXURN	0x0002
165#define VIC_TX_FLAGS_CSUMHW	0x0004
166#define VIC_TX_FLAGS_TSO	0x0008
167#define VIC_TX_FLAGS_PINNED	0x0010
168#define VIC_TX_FLAGS_QRETRY	0x1000
169
170struct vic_stats {
171	u_int32_t		vs_tx_count;
172	u_int32_t		vs_tx_packets;
173	u_int32_t		vs_tx_0copy;
174	u_int32_t		vs_tx_copy;
175	u_int32_t		vs_tx_maxpending;
176	u_int32_t		vs_tx_stopped;
177	u_int32_t		vs_tx_overrun;
178	u_int32_t		vs_intr;
179	u_int32_t		vs_rx_packets;
180	u_int32_t		vs_rx_underrun;
181} __packed;
182
183#define VIC_NRXRINGS		2
184
185struct vic_data {
186	u_int32_t		vd_magic;
187
188	struct {
189		u_int32_t		length;
190		u_int32_t		nextidx;
191	}			vd_rx[VIC_NRXRINGS];
192
193	u_int32_t		vd_irq;
194	u_int32_t		vd_iff;
195
196	u_int32_t		vd_mcastfil[VIC_CMD_MCASTFIL_LENGTH];
197
198	u_int32_t		vd_reserved1[1];
199
200	u_int32_t		vd_tx_length;
201	u_int32_t		vd_tx_curidx;
202	u_int32_t		vd_tx_nextidx;
203	u_int32_t		vd_tx_stopped;
204	u_int32_t		vd_tx_triggerlvl;
205	u_int32_t		vd_tx_queued;
206	u_int32_t		vd_tx_minlength;
207
208	u_int32_t		vd_reserved2[6];
209
210	u_int32_t		vd_rx_saved_nextidx[VIC_NRXRINGS];
211	u_int32_t		vd_tx_saved_nextidx;
212
213	u_int32_t		vd_length;
214	u_int32_t		vd_rx_offset[VIC_NRXRINGS];
215	u_int32_t		vd_tx_offset;
216	u_int32_t		vd_debug;
217	u_int32_t		vd_tx_physaddr;
218	u_int32_t		vd_tx_physaddr_length;
219	u_int32_t		vd_tx_maxlength;
220
221	struct vic_stats	vd_stats;
222} __packed;
223
224#define VIC_OWNER_DRIVER	0
225#define VIC_OWNER_DRIVER_PEND	1
226#define VIC_OWNER_NIC		2
227#define VIC_OWNER_NIC_PEND	3
228
229#define VIC_JUMBO_FRAMELEN	9018
230#define VIC_JUMBO_MTU		(VIC_JUMBO_FRAMELEN - ETHER_HDR_LEN - ETHER_CRC_LEN)
231
232#define VIC_NBUF		100
233#define VIC_NBUF_MAX		128
234#define VIC_MAX_SCATTER		1	/* 8? */
235#define VIC_QUEUE_SIZE		VIC_NBUF_MAX
236#define VIC_INC(_x, _y)		(_x) = ((_x) + 1) % (_y)
237#define VIC_TX_TIMEOUT		5
238
239#define VIC_MIN_FRAMELEN	(ETHER_MIN_LEN - ETHER_CRC_LEN)
240
241#define VIC_TXURN_WARN(_sc)	((_sc)->sc_txpending >= ((_sc)->sc_ntxbuf - 5))
242#define VIC_TXURN(_sc)		((_sc)->sc_txpending >= (_sc)->sc_ntxbuf)
243
244struct vic_rxbuf {
245	bus_dmamap_t		rxb_dmamap;
246	struct mbuf		*rxb_m;
247};
248
249struct vic_txbuf {
250	bus_dmamap_t		txb_dmamap;
251	struct mbuf		*txb_m;
252};
253
254struct vic_softc {
255	struct device		sc_dev;
256
257	pci_chipset_tag_t	sc_pc;
258	pcitag_t		sc_tag;
259
260	bus_space_tag_t		sc_iot;
261	bus_space_handle_t	sc_ioh;
262	bus_size_t		sc_ios;
263	bus_dma_tag_t		sc_dmat;
264
265	void			*sc_ih;
266
267	struct timeout		sc_tick;
268
269	struct arpcom		sc_ac;
270	struct ifmedia		sc_media;
271
272	u_int32_t		sc_nrxbuf;
273	u_int32_t		sc_ntxbuf;
274	u_int32_t		sc_cap;
275	u_int32_t		sc_feature;
276	u_int8_t		sc_lladdr[ETHER_ADDR_LEN];
277
278	bus_dmamap_t		sc_dma_map;
279	bus_dma_segment_t	sc_dma_seg;
280	size_t			sc_dma_size;
281	caddr_t			sc_dma_kva;
282#define VIC_DMA_DVA(_sc)	((_sc)->sc_dma_map->dm_segs[0].ds_addr)
283#define VIC_DMA_KVA(_sc)	((void *)(_sc)->sc_dma_kva)
284
285	struct vic_data		*sc_data;
286
287	struct {
288		struct if_rxring	ring;
289		struct vic_rxbuf	*bufs;
290		struct vic_rxdesc	*slots;
291		int			end;
292		u_int			pktlen;
293	}			sc_rxq[VIC_NRXRINGS];
294
295	struct vic_txbuf	*sc_txbuf;
296	struct vic_txdesc	*sc_txq;
297	volatile u_int		sc_txpending;
298};
299
300struct cfdriver vic_cd = {
301	NULL, "vic", DV_IFNET
302};
303
304int		vic_match(struct device *, void *, void *);
305void		vic_attach(struct device *, struct device *, void *);
306
307const struct cfattach vic_ca = {
308	sizeof(struct vic_softc), vic_match, vic_attach
309};
310
311int		vic_intr(void *);
312
313int		vic_query(struct vic_softc *);
314int		vic_alloc_data(struct vic_softc *);
315int		vic_init_data(struct vic_softc *sc);
316int		vic_uninit_data(struct vic_softc *sc);
317
318u_int32_t	vic_read(struct vic_softc *, bus_size_t);
319void		vic_write(struct vic_softc *, bus_size_t, u_int32_t);
320
321u_int32_t	vic_read_cmd(struct vic_softc *, u_int32_t);
322
323int		vic_alloc_dmamem(struct vic_softc *);
324void		vic_free_dmamem(struct vic_softc *);
325
326void		vic_link_state(struct vic_softc *);
327void		vic_rx_fill(struct vic_softc *, int);
328void		vic_rx_proc(struct vic_softc *, int);
329void		vic_tx_proc(struct vic_softc *);
330void		vic_iff(struct vic_softc *);
331void		vic_getlladdr(struct vic_softc *);
332void		vic_setlladdr(struct vic_softc *);
333int		vic_media_change(struct ifnet *);
334void		vic_media_status(struct ifnet *, struct ifmediareq *);
335void		vic_start(struct ifnet *);
336int		vic_load_txb(struct vic_softc *, struct vic_txbuf *,
337		    struct mbuf *);
338void		vic_watchdog(struct ifnet *);
339int		vic_ioctl(struct ifnet *, u_long, caddr_t);
340int		vic_rxrinfo(struct vic_softc *, struct if_rxrinfo *);
341void		vic_init(struct ifnet *);
342void		vic_stop(struct ifnet *);
343void		vic_tick(void *);
344
345#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
346
347struct mbuf *vic_alloc_mbuf(struct vic_softc *, bus_dmamap_t, u_int);
348
349const struct pci_matchid vic_devices[] = {
350	{ PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET }
351};
352
353int
354vic_match(struct device *parent, void *match, void *aux)
355{
356	struct pci_attach_args		*pa = aux;
357	pcireg_t			memtype;
358	bus_size_t			pcisize;
359	bus_addr_t			pciaddr;
360
361	switch (pa->pa_id) {
362	case PCI_ID_CODE(PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET):
363		return (1);
364
365	case PCI_ID_CODE(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI):
366		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VIC_PCI_BAR);
367		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, VIC_PCI_BAR,
368		    memtype, &pciaddr, &pcisize, NULL) != 0)
369			break;
370
371		if (pcisize > VIC_LANCE_MINLEN)
372			return (2);
373
374		break;
375	}
376
377	return (0);
378}
379
380void
381vic_attach(struct device *parent, struct device *self, void *aux)
382{
383	struct vic_softc		*sc = (struct vic_softc *)self;
384	struct pci_attach_args		*pa = aux;
385	bus_space_handle_t		ioh;
386	pcireg_t			r;
387	pci_intr_handle_t		ih;
388	struct ifnet			*ifp;
389
390	sc->sc_pc = pa->pa_pc;
391	sc->sc_tag = pa->pa_tag;
392	sc->sc_dmat = pa->pa_dmat;
393
394	r = pci_mapreg_type(sc->sc_pc, sc->sc_tag, VIC_PCI_BAR);
395	if (pci_mapreg_map(pa, VIC_PCI_BAR, r, 0, &sc->sc_iot,
396	    &ioh, NULL, &sc->sc_ios, 0) != 0) {
397		printf(": unable to map system interface register\n");
398		return;
399	}
400
401	switch (pa->pa_id) {
402	case PCI_ID_CODE(PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET):
403		if (bus_space_subregion(sc->sc_iot, ioh, 0, sc->sc_ios,
404		    &sc->sc_ioh) != 0) {
405			printf(": unable to map register window\n");
406			goto unmap;
407		}
408		break;
409
410	case PCI_ID_CODE(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI):
411		if (bus_space_subregion(sc->sc_iot, ioh,
412		    VIC_LANCE_SIZE + VIC_MORPH_SIZE, VIC_VMXNET_SIZE,
413		    &sc->sc_ioh) != 0) {
414			printf(": unable to map register window\n");
415			goto unmap;
416		}
417
418		bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
419		    BUS_SPACE_BARRIER_READ);
420		r = bus_space_read_4(sc->sc_iot, ioh, VIC_LANCE_SIZE);
421
422		if ((r & VIC_MORPH_MASK) == VIC_MORPH_VMXNET)
423			break;
424		if ((r & VIC_MORPH_MASK) != VIC_MORPH_LANCE) {
425			printf(": unexpected morph value (0x%08x)\n", r);
426			goto unmap;
427		}
428
429		r &= ~VIC_MORPH_MASK;
430		r |= VIC_MORPH_VMXNET;
431
432		bus_space_write_4(sc->sc_iot, ioh, VIC_LANCE_SIZE, r);
433		bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
434		    BUS_SPACE_BARRIER_WRITE);
435
436		bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
437		    BUS_SPACE_BARRIER_READ);
438		r = bus_space_read_4(sc->sc_iot, ioh, VIC_LANCE_SIZE);
439
440		if ((r & VIC_MORPH_MASK) != VIC_MORPH_VMXNET) {
441			printf(": unable to morph vlance chip\n");
442			goto unmap;
443		}
444
445		break;
446	}
447
448	if (pci_intr_map(pa, &ih) != 0) {
449		printf(": unable to map interrupt\n");
450		goto unmap;
451	}
452
453	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
454	    vic_intr, sc, DEVNAME(sc));
455	if (sc->sc_ih == NULL) {
456		printf(": unable to establish interrupt\n");
457		goto unmap;
458	}
459
460	if (vic_query(sc) != 0) {
461		/* error printed by vic_query */
462		goto unmap;
463	}
464
465	if (vic_alloc_data(sc) != 0) {
466		/* error printed by vic_alloc */
467		goto unmap;
468	}
469
470	timeout_set(&sc->sc_tick, vic_tick, sc);
471
472	bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
473
474	ifp = &sc->sc_ac.ac_if;
475	ifp->if_softc = sc;
476	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
477	ifp->if_ioctl = vic_ioctl;
478	ifp->if_start = vic_start;
479	ifp->if_watchdog = vic_watchdog;
480	ifp->if_hardmtu = VIC_JUMBO_MTU;
481	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
482	ifq_init_maxlen(&ifp->if_snd, sc->sc_ntxbuf - 1);
483
484	ifp->if_capabilities = IFCAP_VLAN_MTU;
485
486#if 0
487	/* XXX interface capabilities */
488	if (sc->sc_cap & VIC_CMD_HWCAP_VLAN)
489		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
490	if (sc->sc_cap & VIC_CMD_HWCAP_CSUM)
491		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
492		    IFCAP_CSUM_UDPv4;
493#endif
494
495	ifmedia_init(&sc->sc_media, 0, vic_media_change, vic_media_status);
496	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
497	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
498
499	if_attach(ifp);
500	ether_ifattach(ifp);
501
502	printf(": %s, address %s\n", pci_intr_string(pa->pa_pc, ih),
503	    ether_sprintf(sc->sc_lladdr));
504
505#ifdef VIC_DEBUG
506	printf("%s: feature 0x%8x, cap 0x%8x, rx/txbuf %d/%d\n", DEVNAME(sc),
507	    sc->sc_feature, sc->sc_cap, sc->sc_nrxbuf, sc->sc_ntxbuf);
508#endif
509
510	return;
511
512unmap:
513	bus_space_unmap(sc->sc_iot, ioh, sc->sc_ios);
514	sc->sc_ios = 0;
515}
516
517int
518vic_query(struct vic_softc *sc)
519{
520	u_int32_t			major, minor;
521
522	major = vic_read(sc, VIC_VERSION_MAJOR);
523	minor = vic_read(sc, VIC_VERSION_MINOR);
524
525	/* Check for a supported version */
526	if ((major & VIC_VERSION_MAJOR_M) !=
527	    (VIC_MAGIC & VIC_VERSION_MAJOR_M)) {
528		printf(": magic mismatch\n");
529		return (1);
530	}
531
532	if (VIC_MAGIC > major || VIC_MAGIC < minor) {
533		printf(": unsupported version (%X)\n",
534		    major & ~VIC_VERSION_MAJOR_M);
535		return (1);
536	}
537
538	sc->sc_nrxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Rx_BUF);
539	sc->sc_ntxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Tx_BUF);
540	sc->sc_feature = vic_read_cmd(sc, VIC_CMD_FEATURE);
541	sc->sc_cap = vic_read_cmd(sc, VIC_CMD_HWCAP);
542
543	vic_getlladdr(sc);
544
545	if (sc->sc_nrxbuf > VIC_NBUF_MAX || sc->sc_nrxbuf == 0)
546		sc->sc_nrxbuf = VIC_NBUF;
547	if (sc->sc_ntxbuf > VIC_NBUF_MAX || sc->sc_ntxbuf == 0)
548		sc->sc_ntxbuf = VIC_NBUF;
549
550	return (0);
551}
552
553int
554vic_alloc_data(struct vic_softc *sc)
555{
556	u_int8_t			*kva;
557	u_int				offset;
558	struct vic_rxdesc		*rxd;
559	int				i, q;
560
561	sc->sc_rxq[0].pktlen = MCLBYTES;
562	sc->sc_rxq[1].pktlen = 4096;
563
564	for (q = 0; q < VIC_NRXRINGS; q++) {
565		sc->sc_rxq[q].bufs = mallocarray(sc->sc_nrxbuf,
566		    sizeof(struct vic_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
567		if (sc->sc_rxq[q].bufs == NULL) {
568			printf(": unable to allocate rxbuf for ring %d\n", q);
569			goto freerx;
570		}
571	}
572
573	sc->sc_txbuf = mallocarray(sc->sc_ntxbuf, sizeof(struct vic_txbuf),
574	    M_DEVBUF, M_NOWAIT);
575	if (sc->sc_txbuf == NULL) {
576		printf(": unable to allocate txbuf\n");
577		goto freerx;
578	}
579
580	sc->sc_dma_size = sizeof(struct vic_data) +
581	    (sc->sc_nrxbuf * VIC_NRXRINGS) * sizeof(struct vic_rxdesc) +
582	    sc->sc_ntxbuf * sizeof(struct vic_txdesc);
583
584	if (vic_alloc_dmamem(sc) != 0) {
585		printf(": unable to allocate dma region\n");
586		goto freetx;
587	}
588	kva = VIC_DMA_KVA(sc);
589
590	/* set up basic vic data */
591	sc->sc_data = VIC_DMA_KVA(sc);
592
593	sc->sc_data->vd_magic = VIC_MAGIC;
594	sc->sc_data->vd_length = sc->sc_dma_size;
595
596	offset = sizeof(struct vic_data);
597
598	/* set up the rx rings */
599
600	for (q = 0; q < VIC_NRXRINGS; q++) {
601		sc->sc_rxq[q].slots = (struct vic_rxdesc *)&kva[offset];
602		sc->sc_data->vd_rx_offset[q] = offset;
603		sc->sc_data->vd_rx[q].length = sc->sc_nrxbuf;
604
605		for (i = 0; i < sc->sc_nrxbuf; i++) {
606			rxd = &sc->sc_rxq[q].slots[i];
607
608			rxd->rx_physaddr = 0;
609			rxd->rx_buflength = 0;
610			rxd->rx_length = 0;
611			rxd->rx_owner = VIC_OWNER_DRIVER;
612
613			offset += sizeof(struct vic_rxdesc);
614		}
615	}
616
617	/* set up the tx ring */
618	sc->sc_txq = (struct vic_txdesc *)&kva[offset];
619
620	sc->sc_data->vd_tx_offset = offset;
621	sc->sc_data->vd_tx_length = sc->sc_ntxbuf;
622
623	return (0);
624freetx:
625	free(sc->sc_txbuf, M_DEVBUF, 0);
626	q = VIC_NRXRINGS;
627freerx:
628	while (q--)
629		free(sc->sc_rxq[q].bufs, M_DEVBUF, 0);
630
631	return (1);
632}
633
634void
635vic_rx_fill(struct vic_softc *sc, int q)
636{
637	struct vic_rxbuf		*rxb;
638	struct vic_rxdesc		*rxd;
639	u_int				slots;
640
641	for (slots = if_rxr_get(&sc->sc_rxq[q].ring, sc->sc_nrxbuf);
642	    slots > 0; slots--) {
643		rxb = &sc->sc_rxq[q].bufs[sc->sc_rxq[q].end];
644		rxd = &sc->sc_rxq[q].slots[sc->sc_rxq[q].end];
645
646		rxb->rxb_m = vic_alloc_mbuf(sc, rxb->rxb_dmamap,
647		    sc->sc_rxq[q].pktlen);
648		if (rxb->rxb_m == NULL)
649			break;
650
651		bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
652		    rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
653
654		rxd->rx_physaddr = rxb->rxb_dmamap->dm_segs[0].ds_addr;
655		rxd->rx_buflength = rxb->rxb_m->m_pkthdr.len;
656		rxd->rx_length = 0;
657		rxd->rx_owner = VIC_OWNER_NIC;
658
659		VIC_INC(sc->sc_rxq[q].end, sc->sc_data->vd_rx[q].length);
660	}
661	if_rxr_put(&sc->sc_rxq[q].ring, slots);
662}
663
664int
665vic_init_data(struct vic_softc *sc)
666{
667	struct vic_rxbuf		*rxb;
668	struct vic_rxdesc		*rxd;
669	struct vic_txbuf		*txb;
670
671	int				q, i;
672
673	for (q = 0; q < VIC_NRXRINGS; q++) {
674		for (i = 0; i < sc->sc_nrxbuf; i++) {
675			rxb = &sc->sc_rxq[q].bufs[i];
676			rxd = &sc->sc_rxq[q].slots[i];
677
678			if (bus_dmamap_create(sc->sc_dmat,
679			    sc->sc_rxq[q].pktlen, 1, sc->sc_rxq[q].pktlen, 0,
680			    BUS_DMA_NOWAIT, &rxb->rxb_dmamap) != 0) {
681				printf("%s: unable to create dmamap for "
682				    "ring %d slot %d\n", DEVNAME(sc), q, i);
683				goto freerxbs;
684			}
685
686			/* scrub the ring */
687			rxd->rx_physaddr = 0;
688			rxd->rx_buflength = 0;
689			rxd->rx_length = 0;
690			rxd->rx_owner = VIC_OWNER_DRIVER;
691		}
692		sc->sc_rxq[q].end = 0;
693
694		if_rxr_init(&sc->sc_rxq[q].ring, 2, sc->sc_nrxbuf - 1);
695		vic_rx_fill(sc, q);
696	}
697
698	for (i = 0; i < sc->sc_ntxbuf; i++) {
699		txb = &sc->sc_txbuf[i];
700		if (bus_dmamap_create(sc->sc_dmat, VIC_JUMBO_FRAMELEN,
701		    (sc->sc_cap & VIC_CMD_HWCAP_SG) ? VIC_SG_MAX : 1,
702		    VIC_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
703		    &txb->txb_dmamap) != 0) {
704			printf("%s: unable to create dmamap for tx %d\n",
705			    DEVNAME(sc), i);
706			goto freetxbs;
707		}
708		txb->txb_m = NULL;
709	}
710
711	return (0);
712
713freetxbs:
714	while (i--) {
715		txb = &sc->sc_txbuf[i];
716		bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap);
717	}
718
719	i = sc->sc_nrxbuf;
720	q = VIC_NRXRINGS - 1;
721freerxbs:
722	while (q >= 0) {
723		while (i--) {
724			rxb = &sc->sc_rxq[q].bufs[i];
725
726			if (rxb->rxb_m != NULL) {
727				bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap,
728				    0, rxb->rxb_m->m_pkthdr.len,
729				    BUS_DMASYNC_POSTREAD);
730				bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
731				m_freem(rxb->rxb_m);
732				rxb->rxb_m = NULL;
733			}
734			bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
735		}
736		q--;
737	}
738
739	return (1);
740}
741
742int
743vic_uninit_data(struct vic_softc *sc)
744{
745	struct vic_rxbuf		*rxb;
746	struct vic_rxdesc		*rxd;
747	struct vic_txbuf		*txb;
748
749	int				i, q;
750
751	for (q = 0; q < VIC_NRXRINGS; q++) {
752		for (i = 0; i < sc->sc_nrxbuf; i++) {
753			rxb = &sc->sc_rxq[q].bufs[i];
754			rxd = &sc->sc_rxq[q].slots[i];
755
756			if (rxb->rxb_m != NULL) {
757				bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap,
758				    0, rxb->rxb_m->m_pkthdr.len,
759				    BUS_DMASYNC_POSTREAD);
760				bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
761				m_freem(rxb->rxb_m);
762				rxb->rxb_m = NULL;
763			}
764			bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
765		}
766	}
767
768	for (i = 0; i < sc->sc_ntxbuf; i++) {
769		txb = &sc->sc_txbuf[i];
770		bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap);
771	}
772
773	return (0);
774}
775
776void
777vic_link_state(struct vic_softc *sc)
778{
779	struct ifnet *ifp = &sc->sc_ac.ac_if;
780	u_int32_t status;
781	int link_state = LINK_STATE_DOWN;
782
783	status = vic_read(sc, VIC_STATUS);
784	if (status & VIC_STATUS_CONNECTED)
785		link_state = LINK_STATE_FULL_DUPLEX;
786	if (ifp->if_link_state != link_state) {
787		ifp->if_link_state = link_state;
788		if_link_state_change(ifp);
789	}
790}
791
792int
793vic_intr(void *arg)
794{
795	struct vic_softc *sc = (struct vic_softc *)arg;
796	int q;
797
798	vic_write(sc, VIC_CMD, VIC_CMD_INTR_ACK);
799
800	for (q = 0; q < VIC_NRXRINGS; q++)
801		vic_rx_proc(sc, q);
802	vic_tx_proc(sc);
803
804	return (-1);
805}
806
807void
808vic_rx_proc(struct vic_softc *sc, int q)
809{
810	struct ifnet			*ifp = &sc->sc_ac.ac_if;
811	struct vic_rxdesc		*rxd;
812	struct vic_rxbuf		*rxb;
813	struct mbuf_list		 ml = MBUF_LIST_INITIALIZER();
814	struct mbuf			*m;
815	int				len, idx;
816
817	if ((ifp->if_flags & IFF_RUNNING) == 0)
818		return;
819
820	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
821	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
822
823	while (if_rxr_inuse(&sc->sc_rxq[q].ring) > 0) {
824		idx = sc->sc_data->vd_rx[q].nextidx;
825		if (idx >= sc->sc_data->vd_rx[q].length) {
826			ifp->if_ierrors++;
827			if (ifp->if_flags & IFF_DEBUG)
828				printf("%s: receive index error\n",
829				    sc->sc_dev.dv_xname);
830			break;
831		}
832
833		rxd = &sc->sc_rxq[q].slots[idx];
834		if (rxd->rx_owner != VIC_OWNER_DRIVER)
835			break;
836
837		rxb = &sc->sc_rxq[q].bufs[idx];
838
839		if (rxb->rxb_m == NULL) {
840			ifp->if_ierrors++;
841			printf("%s: rxb %d has no mbuf\n", DEVNAME(sc), idx);
842			break;
843		}
844
845		bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
846		    rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
847		bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
848
849		m = rxb->rxb_m;
850		rxb->rxb_m = NULL;
851		len = rxd->rx_length;
852
853		if (len < VIC_MIN_FRAMELEN) {
854			m_freem(m);
855
856			ifp->if_iqdrops++;
857			goto nextp;
858		}
859
860		m->m_pkthdr.len = m->m_len = len;
861
862		ml_enqueue(&ml, m);
863
864 nextp:
865		if_rxr_put(&sc->sc_rxq[q].ring, 1);
866		VIC_INC(sc->sc_data->vd_rx[q].nextidx, sc->sc_nrxbuf);
867	}
868
869	if (ifiq_input(&ifp->if_rcv, &ml))
870		if_rxr_livelocked(&sc->sc_rxq[q].ring);
871
872	vic_rx_fill(sc, q);
873
874	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
875	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
876}
877
878void
879vic_tx_proc(struct vic_softc *sc)
880{
881	struct ifnet			*ifp = &sc->sc_ac.ac_if;
882	struct vic_txdesc		*txd;
883	struct vic_txbuf		*txb;
884	int				idx;
885
886	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
887	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
888
889	while (sc->sc_txpending > 0) {
890		idx = sc->sc_data->vd_tx_curidx;
891		if (idx >= sc->sc_data->vd_tx_length) {
892			ifp->if_oerrors++;
893			break;
894		}
895
896		txd = &sc->sc_txq[idx];
897		if (txd->tx_owner != VIC_OWNER_DRIVER)
898			break;
899
900		txb = &sc->sc_txbuf[idx];
901		if (txb->txb_m == NULL) {
902			printf("%s: tx ring is corrupt\n", DEVNAME(sc));
903			ifp->if_oerrors++;
904			break;
905		}
906
907		bus_dmamap_sync(sc->sc_dmat, txb->txb_dmamap, 0,
908		    txb->txb_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
909		bus_dmamap_unload(sc->sc_dmat, txb->txb_dmamap);
910
911		m_freem(txb->txb_m);
912		txb->txb_m = NULL;
913		ifq_clr_oactive(&ifp->if_snd);
914
915		sc->sc_txpending--;
916		sc->sc_data->vd_tx_stopped = 0;
917
918		VIC_INC(sc->sc_data->vd_tx_curidx, sc->sc_data->vd_tx_length);
919	}
920
921	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
922	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
923
924	vic_start(ifp);
925}
926
927void
928vic_iff(struct vic_softc *sc)
929{
930	struct arpcom *ac = &sc->sc_ac;
931	struct ifnet *ifp = &sc->sc_ac.ac_if;
932	struct ether_multi *enm;
933	struct ether_multistep step;
934	u_int32_t crc;
935	u_int16_t *mcastfil = (u_int16_t *)sc->sc_data->vd_mcastfil;
936	u_int flags;
937
938	ifp->if_flags &= ~IFF_ALLMULTI;
939
940	/* Always accept broadcast frames. */
941	flags = VIC_CMD_IFF_BROADCAST;
942
943	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
944		ifp->if_flags |= IFF_ALLMULTI;
945		if (ifp->if_flags & IFF_PROMISC)
946			flags |= VIC_CMD_IFF_PROMISC;
947		else
948			flags |= VIC_CMD_IFF_MULTICAST;
949		memset(&sc->sc_data->vd_mcastfil, 0xff,
950		    sizeof(sc->sc_data->vd_mcastfil));
951	} else {
952		flags |= VIC_CMD_IFF_MULTICAST;
953
954		bzero(&sc->sc_data->vd_mcastfil,
955		    sizeof(sc->sc_data->vd_mcastfil));
956
957		ETHER_FIRST_MULTI(step, ac, enm);
958		while (enm != NULL) {
959			crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
960
961			crc >>= 26;
962
963			mcastfil[crc >> 4] |= htole16(1 << (crc & 0xf));
964
965			ETHER_NEXT_MULTI(step, enm);
966		}
967	}
968
969	vic_write(sc, VIC_CMD, VIC_CMD_MCASTFIL);
970	sc->sc_data->vd_iff = flags;
971	vic_write(sc, VIC_CMD, VIC_CMD_IFF);
972}
973
974void
975vic_getlladdr(struct vic_softc *sc)
976{
977	u_int32_t reg;
978
979	/* Get MAC address */
980	reg = (sc->sc_cap & VIC_CMD_HWCAP_VPROM) ? VIC_VPROM : VIC_LLADDR;
981
982	bus_space_barrier(sc->sc_iot, sc->sc_ioh, reg, ETHER_ADDR_LEN,
983	    BUS_SPACE_BARRIER_READ);
984	bus_space_read_region_1(sc->sc_iot, sc->sc_ioh, reg, sc->sc_lladdr,
985	    ETHER_ADDR_LEN);
986
987	/* Update the MAC address register */
988	if (reg == VIC_VPROM)
989		vic_setlladdr(sc);
990}
991
992void
993vic_setlladdr(struct vic_softc *sc)
994{
995	bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, VIC_LLADDR,
996	    sc->sc_lladdr, ETHER_ADDR_LEN);
997	bus_space_barrier(sc->sc_iot, sc->sc_ioh, VIC_LLADDR, ETHER_ADDR_LEN,
998	    BUS_SPACE_BARRIER_WRITE);
999}
1000
1001int
1002vic_media_change(struct ifnet *ifp)
1003{
1004	/* Ignore */
1005	return (0);
1006}
1007
1008void
1009vic_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1010{
1011	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1012
1013	imr->ifm_active = IFM_ETHER | IFM_AUTO;
1014	imr->ifm_status = IFM_AVALID;
1015
1016	vic_link_state(sc);
1017
1018	if (LINK_STATE_IS_UP(ifp->if_link_state) &&
1019	    ifp->if_flags & IFF_UP)
1020		imr->ifm_status |= IFM_ACTIVE;
1021}
1022
1023void
1024vic_start(struct ifnet *ifp)
1025{
1026	struct vic_softc		*sc;
1027	struct mbuf			*m;
1028	struct vic_txbuf		*txb;
1029	struct vic_txdesc		*txd;
1030	struct vic_sg			*sge;
1031	bus_dmamap_t			dmap;
1032	int				i, idx;
1033	int				tx = 0;
1034
1035	if (!(ifp->if_flags & IFF_RUNNING))
1036		return;
1037
1038	if (ifq_is_oactive(&ifp->if_snd))
1039		return;
1040
1041	if (ifq_empty(&ifp->if_snd))
1042		return;
1043
1044	sc = (struct vic_softc *)ifp->if_softc;
1045
1046	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1047	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1048
1049	for (;;) {
1050		if (VIC_TXURN(sc)) {
1051			ifq_set_oactive(&ifp->if_snd);
1052			break;
1053		}
1054
1055		idx = sc->sc_data->vd_tx_nextidx;
1056		if (idx >= sc->sc_data->vd_tx_length) {
1057			printf("%s: tx idx is corrupt\n", DEVNAME(sc));
1058			ifp->if_oerrors++;
1059			break;
1060		}
1061
1062		txd = &sc->sc_txq[idx];
1063		txb = &sc->sc_txbuf[idx];
1064
1065		if (txb->txb_m != NULL) {
1066			printf("%s: tx ring is corrupt\n", DEVNAME(sc));
1067			sc->sc_data->vd_tx_stopped = 1;
1068			ifp->if_oerrors++;
1069			break;
1070		}
1071
1072		m = ifq_dequeue(&ifp->if_snd);
1073		if (m == NULL)
1074			break;
1075
1076		if (vic_load_txb(sc, txb, m) != 0) {
1077			m_freem(m);
1078			ifp->if_oerrors++;
1079			continue;
1080		}
1081
1082#if NBPFILTER > 0
1083		if (ifp->if_bpf)
1084			bpf_mtap(ifp->if_bpf, txb->txb_m, BPF_DIRECTION_OUT);
1085#endif
1086
1087		dmap = txb->txb_dmamap;
1088		txd->tx_flags = VIC_TX_FLAGS_KEEP;
1089		txd->tx_owner = VIC_OWNER_NIC;
1090		txd->tx_sa.sa_addr_type = VIC_SG_ADDR_PHYS;
1091		txd->tx_sa.sa_length = dmap->dm_nsegs;
1092		for (i = 0; i < dmap->dm_nsegs; i++) {
1093			sge = &txd->tx_sa.sa_sg[i];
1094			sge->sg_length = dmap->dm_segs[i].ds_len;
1095			sge->sg_addr_low = dmap->dm_segs[i].ds_addr;
1096		}
1097
1098		if (VIC_TXURN_WARN(sc)) {
1099			txd->tx_flags |= VIC_TX_FLAGS_TXURN;
1100		}
1101
1102		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1103		    BUS_DMASYNC_PREWRITE);
1104
1105		sc->sc_txpending++;
1106
1107		VIC_INC(sc->sc_data->vd_tx_nextidx, sc->sc_data->vd_tx_length);
1108
1109		tx = 1;
1110	}
1111
1112	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1113	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1114
1115	if (tx)
1116		vic_read(sc, VIC_Tx_ADDR);
1117}
1118
1119int
1120vic_load_txb(struct vic_softc *sc, struct vic_txbuf *txb, struct mbuf *m)
1121{
1122	bus_dmamap_t			dmap = txb->txb_dmamap;
1123	int				error;
1124
1125	error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, BUS_DMA_NOWAIT);
1126	switch (error) {
1127	case 0:
1128		txb->txb_m = m;
1129		break;
1130
1131	case EFBIG:
1132		if (m_defrag(m, M_DONTWAIT) == 0 &&
1133		    bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m,
1134		    BUS_DMA_NOWAIT) == 0) {
1135			txb->txb_m = m;
1136			break;
1137		}
1138
1139		/* FALLTHROUGH */
1140	default:
1141		return (ENOBUFS);
1142	}
1143
1144	return (0);
1145}
1146
1147void
1148vic_watchdog(struct ifnet *ifp)
1149{
1150#if 0
1151	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1152
1153	if (sc->sc_txpending && sc->sc_txtimeout > 0) {
1154		if (--sc->sc_txtimeout == 0) {
1155			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1156			ifp->if_flags &= ~IFF_RUNNING;
1157			vic_init(ifp);
1158			ifp->if_oerrors++;
1159			return;
1160		}
1161	}
1162
1163	if (!ifq_empty(&ifp->if_snd))
1164		vic_start(ifp);
1165#endif
1166}
1167
1168int
1169vic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1170{
1171	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1172	struct ifreq *ifr = (struct ifreq *)data;
1173	int s, error = 0;
1174
1175	s = splnet();
1176
1177	switch (cmd) {
1178	case SIOCSIFADDR:
1179		ifp->if_flags |= IFF_UP;
1180		/* FALLTHROUGH */
1181	case SIOCSIFFLAGS:
1182		if (ifp->if_flags & IFF_UP) {
1183			if (ifp->if_flags & IFF_RUNNING)
1184				error = ENETRESET;
1185			else
1186				vic_init(ifp);
1187		} else {
1188			if (ifp->if_flags & IFF_RUNNING)
1189				vic_stop(ifp);
1190		}
1191		break;
1192
1193	case SIOCGIFMEDIA:
1194	case SIOCSIFMEDIA:
1195		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1196		break;
1197
1198	case SIOCGIFRXR:
1199		error = vic_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1200		break;
1201
1202	default:
1203		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1204	}
1205
1206	if (error == ENETRESET) {
1207		if (ifp->if_flags & IFF_RUNNING)
1208			vic_iff(sc);
1209		error = 0;
1210	}
1211
1212	splx(s);
1213	return (error);
1214}
1215
1216int
1217vic_rxrinfo(struct vic_softc *sc, struct if_rxrinfo *ifri)
1218{
1219	struct if_rxring_info ifr[2];
1220
1221	memset(ifr, 0, sizeof(ifr));
1222
1223	ifr[0].ifr_size = MCLBYTES;
1224	ifr[0].ifr_info = sc->sc_rxq[0].ring;
1225
1226	ifr[1].ifr_size = 4096;
1227	ifr[1].ifr_info = sc->sc_rxq[1].ring;
1228
1229	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
1230}
1231
1232void
1233vic_init(struct ifnet *ifp)
1234{
1235	struct vic_softc	*sc = (struct vic_softc *)ifp->if_softc;
1236	int			q;
1237	int			s;
1238
1239	sc->sc_data->vd_tx_curidx = 0;
1240	sc->sc_data->vd_tx_nextidx = 0;
1241	sc->sc_data->vd_tx_stopped = sc->sc_data->vd_tx_queued = 0;
1242	sc->sc_data->vd_tx_saved_nextidx = 0;
1243
1244	for (q = 0; q < VIC_NRXRINGS; q++) {
1245		sc->sc_data->vd_rx[q].nextidx = 0;
1246		sc->sc_data->vd_rx_saved_nextidx[q] = 0;
1247	}
1248
1249	if (vic_init_data(sc) != 0)
1250		return;
1251
1252	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1253	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1254
1255	s = splnet();
1256
1257	vic_write(sc, VIC_DATA_ADDR, VIC_DMA_DVA(sc));
1258	vic_write(sc, VIC_DATA_LENGTH, sc->sc_dma_size);
1259
1260	ifp->if_flags |= IFF_RUNNING;
1261	ifq_clr_oactive(&ifp->if_snd);
1262
1263	vic_iff(sc);
1264	vic_write(sc, VIC_CMD, VIC_CMD_INTR_ENABLE);
1265
1266	splx(s);
1267
1268	timeout_add_sec(&sc->sc_tick, 1);
1269}
1270
1271void
1272vic_stop(struct ifnet *ifp)
1273{
1274	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1275	int s;
1276
1277	s = splnet();
1278
1279	timeout_del(&sc->sc_tick);
1280
1281	ifp->if_flags &= ~IFF_RUNNING;
1282	ifq_clr_oactive(&ifp->if_snd);
1283
1284	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1285	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1286
1287	/* XXX wait for tx to complete */
1288	while (sc->sc_txpending > 0) {
1289		splx(s);
1290		delay(1000);
1291		s = splnet();
1292	}
1293
1294	sc->sc_data->vd_tx_stopped = 1;
1295
1296	vic_write(sc, VIC_CMD, VIC_CMD_INTR_DISABLE);
1297
1298	sc->sc_data->vd_iff = 0;
1299	vic_write(sc, VIC_CMD, VIC_CMD_IFF);
1300
1301	vic_write(sc, VIC_DATA_ADDR, 0);
1302
1303	vic_uninit_data(sc);
1304
1305	splx(s);
1306}
1307
1308struct mbuf *
1309vic_alloc_mbuf(struct vic_softc *sc, bus_dmamap_t map, u_int pktlen)
1310{
1311	struct mbuf *m = NULL;
1312
1313	m = MCLGETL(NULL, M_DONTWAIT, pktlen);
1314	if (!m)
1315		return (NULL);
1316	m->m_data += ETHER_ALIGN;
1317	m->m_len = m->m_pkthdr.len = pktlen - ETHER_ALIGN;
1318
1319	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1320		printf("%s: could not load mbuf DMA map\n", DEVNAME(sc));
1321		m_freem(m);
1322		return (NULL);
1323	}
1324
1325	return (m);
1326}
1327
1328void
1329vic_tick(void *arg)
1330{
1331	struct vic_softc		*sc = (struct vic_softc *)arg;
1332
1333	vic_link_state(sc);
1334
1335	timeout_add_sec(&sc->sc_tick, 1);
1336}
1337
1338u_int32_t
1339vic_read(struct vic_softc *sc, bus_size_t r)
1340{
1341	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1342	    BUS_SPACE_BARRIER_READ);
1343	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1344}
1345
1346void
1347vic_write(struct vic_softc *sc, bus_size_t r, u_int32_t v)
1348{
1349	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1350	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1351	    BUS_SPACE_BARRIER_WRITE);
1352}
1353
1354u_int32_t
1355vic_read_cmd(struct vic_softc *sc, u_int32_t cmd)
1356{
1357	vic_write(sc, VIC_CMD, cmd);
1358	return (vic_read(sc, VIC_CMD));
1359}
1360
1361int
1362vic_alloc_dmamem(struct vic_softc *sc)
1363{
1364	int nsegs;
1365
1366	if (bus_dmamap_create(sc->sc_dmat, sc->sc_dma_size, 1,
1367	    sc->sc_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1368	    &sc->sc_dma_map) != 0)
1369		goto err;
1370
1371	if (bus_dmamem_alloc(sc->sc_dmat, sc->sc_dma_size, 16, 0,
1372	    &sc->sc_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1373		goto destroy;
1374
1375	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_seg, nsegs,
1376	    sc->sc_dma_size, &sc->sc_dma_kva, BUS_DMA_NOWAIT) != 0)
1377		goto free;
1378
1379	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dma_map, sc->sc_dma_kva,
1380	    sc->sc_dma_size, NULL, BUS_DMA_NOWAIT) != 0)
1381		goto unmap;
1382
1383	return (0);
1384
1385unmap:
1386	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
1387free:
1388	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1);
1389destroy:
1390	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map);
1391err:
1392	return (1);
1393}
1394
1395void
1396vic_free_dmamem(struct vic_softc *sc)
1397{
1398	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_map);
1399	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
1400	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1);
1401	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map);
1402}
1403