if_vge.c revision 200529
1/*-
2 * Copyright (c) 2004
3 *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/vge/if_vge.c 200529 2009-12-14 19:08:11Z yongari $");
35
36/*
37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
38 *
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Networking Software Engineer
41 * Wind River Systems
42 */
43
44/*
45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
46 * combines a tri-speed ethernet MAC and PHY, with the following
47 * features:
48 *
49 *	o Jumbo frame support up to 16K
50 *	o Transmit and receive flow control
51 *	o IPv4 checksum offload
52 *	o VLAN tag insertion and stripping
53 *	o TCP large send
54 *	o 64-bit multicast hash table filter
55 *	o 64 entry CAM filter
56 *	o 16K RX FIFO and 48K TX FIFO memory
57 *	o Interrupt moderation
58 *
59 * The VT6122 supports up to four transmit DMA queues. The descriptors
60 * in the transmit ring can address up to 7 data fragments; frames which
61 * span more than 7 data buffers must be coalesced, but in general the
62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
63 * long. The receive descriptors address only a single buffer.
64 *
65 * There are two peculiar design issues with the VT6122. One is that
66 * receive data buffers must be aligned on a 32-bit boundary. This is
67 * not a problem where the VT6122 is used as a LOM device in x86-based
68 * systems, but on architectures that generate unaligned access traps, we
69 * have to do some copying.
70 *
71 * The other issue has to do with the way 64-bit addresses are handled.
72 * The DMA descriptors only allow you to specify 48 bits of addressing
73 * information. The remaining 16 bits are specified using one of the
74 * I/O registers. If you only have a 32-bit system, then this isn't
75 * an issue, but if you have a 64-bit system and more than 4GB of
76 * memory, you must have to make sure your network data buffers reside
77 * in the same 48-bit 'segment.'
78 *
79 * Special thanks to Ryan Fu at VIA Networking for providing documentation
80 * and sample NICs for testing.
81 */
82
83#ifdef HAVE_KERNEL_OPTION_HEADERS
84#include "opt_device_polling.h"
85#endif
86
87#include <sys/param.h>
88#include <sys/endian.h>
89#include <sys/systm.h>
90#include <sys/sockio.h>
91#include <sys/mbuf.h>
92#include <sys/malloc.h>
93#include <sys/module.h>
94#include <sys/kernel.h>
95#include <sys/socket.h>
96
97#include <net/if.h>
98#include <net/if_arp.h>
99#include <net/ethernet.h>
100#include <net/if_dl.h>
101#include <net/if_media.h>
102#include <net/if_types.h>
103#include <net/if_vlan_var.h>
104
105#include <net/bpf.h>
106
107#include <machine/bus.h>
108#include <machine/resource.h>
109#include <sys/bus.h>
110#include <sys/rman.h>
111
112#include <dev/mii/mii.h>
113#include <dev/mii/miivar.h>
114
115#include <dev/pci/pcireg.h>
116#include <dev/pci/pcivar.h>
117
118MODULE_DEPEND(vge, pci, 1, 1, 1);
119MODULE_DEPEND(vge, ether, 1, 1, 1);
120MODULE_DEPEND(vge, miibus, 1, 1, 1);
121
122/* "device miibus" required.  See GENERIC if you get errors here. */
123#include "miibus_if.h"
124
125#include <dev/vge/if_vgereg.h>
126#include <dev/vge/if_vgevar.h>
127
128#define VGE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
129
130/*
131 * Various supported device vendors/types and their names.
132 */
133static struct vge_type vge_devs[] = {
134	{ VIA_VENDORID, VIA_DEVICEID_61XX,
135		"VIA Networking Gigabit Ethernet" },
136	{ 0, 0, NULL }
137};
138
139static int vge_probe		(device_t);
140static int vge_attach		(device_t);
141static int vge_detach		(device_t);
142
143static int vge_encap		(struct vge_softc *, struct mbuf **);
144
145static void vge_dmamap_cb	(void *, bus_dma_segment_t *, int, int);
146static int vge_dma_alloc	(struct vge_softc *);
147static void vge_dma_free	(struct vge_softc *);
148static void vge_discard_rxbuf	(struct vge_softc *, int);
149static int vge_newbuf		(struct vge_softc *, int);
150static int vge_rx_list_init	(struct vge_softc *);
151static int vge_tx_list_init	(struct vge_softc *);
152static void vge_freebufs	(struct vge_softc *);
153#ifndef __NO_STRICT_ALIGNMENT
154static __inline void vge_fixup_rx
155				(struct mbuf *);
156#endif
157static int vge_rxeof		(struct vge_softc *, int);
158static void vge_txeof		(struct vge_softc *);
159static void vge_intr		(void *);
160static void vge_tick		(void *);
161static void vge_start		(struct ifnet *);
162static void vge_start_locked	(struct ifnet *);
163static int vge_ioctl		(struct ifnet *, u_long, caddr_t);
164static void vge_init		(void *);
165static void vge_init_locked	(struct vge_softc *);
166static void vge_stop		(struct vge_softc *);
167static void vge_watchdog	(void *);
168static int vge_suspend		(device_t);
169static int vge_resume		(device_t);
170static int vge_shutdown		(device_t);
171static int vge_ifmedia_upd	(struct ifnet *);
172static void vge_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
173
174#ifdef VGE_EEPROM
175static void vge_eeprom_getword	(struct vge_softc *, int, u_int16_t *);
176#endif
177static void vge_read_eeprom	(struct vge_softc *, caddr_t, int, int, int);
178
179static void vge_miipoll_start	(struct vge_softc *);
180static void vge_miipoll_stop	(struct vge_softc *);
181static int vge_miibus_readreg	(device_t, int, int);
182static int vge_miibus_writereg	(device_t, int, int, int);
183static void vge_miibus_statchg	(device_t);
184
185static void vge_cam_clear	(struct vge_softc *);
186static int vge_cam_set		(struct vge_softc *, uint8_t *);
187static void vge_setmulti	(struct vge_softc *);
188static void vge_reset		(struct vge_softc *);
189
190static device_method_t vge_methods[] = {
191	/* Device interface */
192	DEVMETHOD(device_probe,		vge_probe),
193	DEVMETHOD(device_attach,	vge_attach),
194	DEVMETHOD(device_detach,	vge_detach),
195	DEVMETHOD(device_suspend,	vge_suspend),
196	DEVMETHOD(device_resume,	vge_resume),
197	DEVMETHOD(device_shutdown,	vge_shutdown),
198
199	/* bus interface */
200	DEVMETHOD(bus_print_child,	bus_generic_print_child),
201	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
202
203	/* MII interface */
204	DEVMETHOD(miibus_readreg,	vge_miibus_readreg),
205	DEVMETHOD(miibus_writereg,	vge_miibus_writereg),
206	DEVMETHOD(miibus_statchg,	vge_miibus_statchg),
207
208	{ 0, 0 }
209};
210
211static driver_t vge_driver = {
212	"vge",
213	vge_methods,
214	sizeof(struct vge_softc)
215};
216
217static devclass_t vge_devclass;
218
219DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
220DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
221
222#ifdef VGE_EEPROM
223/*
224 * Read a word of data stored in the EEPROM at address 'addr.'
225 */
226static void
227vge_eeprom_getword(sc, addr, dest)
228	struct vge_softc	*sc;
229	int			addr;
230	u_int16_t		*dest;
231{
232	int			i;
233	u_int16_t		word = 0;
234
235	/*
236	 * Enter EEPROM embedded programming mode. In order to
237	 * access the EEPROM at all, we first have to set the
238	 * EELOAD bit in the CHIPCFG2 register.
239	 */
240	CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
241	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
242
243	/* Select the address of the word we want to read */
244	CSR_WRITE_1(sc, VGE_EEADDR, addr);
245
246	/* Issue read command */
247	CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
248
249	/* Wait for the done bit to be set. */
250	for (i = 0; i < VGE_TIMEOUT; i++) {
251		if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
252			break;
253	}
254
255	if (i == VGE_TIMEOUT) {
256		device_printf(sc->vge_dev, "EEPROM read timed out\n");
257		*dest = 0;
258		return;
259	}
260
261	/* Read the result */
262	word = CSR_READ_2(sc, VGE_EERDDAT);
263
264	/* Turn off EEPROM access mode. */
265	CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
266	CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
267
268	*dest = word;
269
270	return;
271}
272#endif
273
274/*
275 * Read a sequence of words from the EEPROM.
276 */
277static void
278vge_read_eeprom(sc, dest, off, cnt, swap)
279	struct vge_softc	*sc;
280	caddr_t			dest;
281	int			off;
282	int			cnt;
283	int			swap;
284{
285	int			i;
286#ifdef VGE_EEPROM
287	u_int16_t		word = 0, *ptr;
288
289	for (i = 0; i < cnt; i++) {
290		vge_eeprom_getword(sc, off + i, &word);
291		ptr = (u_int16_t *)(dest + (i * 2));
292		if (swap)
293			*ptr = ntohs(word);
294		else
295			*ptr = word;
296	}
297#else
298	for (i = 0; i < ETHER_ADDR_LEN; i++)
299		dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
300#endif
301}
302
303static void
304vge_miipoll_stop(sc)
305	struct vge_softc	*sc;
306{
307	int			i;
308
309	CSR_WRITE_1(sc, VGE_MIICMD, 0);
310
311	for (i = 0; i < VGE_TIMEOUT; i++) {
312		DELAY(1);
313		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
314			break;
315	}
316
317	if (i == VGE_TIMEOUT)
318		device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
319
320	return;
321}
322
323static void
324vge_miipoll_start(sc)
325	struct vge_softc	*sc;
326{
327	int			i;
328
329	/* First, make sure we're idle. */
330
331	CSR_WRITE_1(sc, VGE_MIICMD, 0);
332	CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
333
334	for (i = 0; i < VGE_TIMEOUT; i++) {
335		DELAY(1);
336		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
337			break;
338	}
339
340	if (i == VGE_TIMEOUT) {
341		device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
342		return;
343	}
344
345	/* Now enable auto poll mode. */
346
347	CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
348
349	/* And make sure it started. */
350
351	for (i = 0; i < VGE_TIMEOUT; i++) {
352		DELAY(1);
353		if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
354			break;
355	}
356
357	if (i == VGE_TIMEOUT)
358		device_printf(sc->vge_dev, "failed to start MII autopoll\n");
359
360	return;
361}
362
363static int
364vge_miibus_readreg(dev, phy, reg)
365	device_t		dev;
366	int			phy, reg;
367{
368	struct vge_softc	*sc;
369	int			i;
370	u_int16_t		rval = 0;
371
372	sc = device_get_softc(dev);
373
374	if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
375		return(0);
376
377	vge_miipoll_stop(sc);
378
379	/* Specify the register we want to read. */
380	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
381
382	/* Issue read command. */
383	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
384
385	/* Wait for the read command bit to self-clear. */
386	for (i = 0; i < VGE_TIMEOUT; i++) {
387		DELAY(1);
388		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
389			break;
390	}
391
392	if (i == VGE_TIMEOUT)
393		device_printf(sc->vge_dev, "MII read timed out\n");
394	else
395		rval = CSR_READ_2(sc, VGE_MIIDATA);
396
397	vge_miipoll_start(sc);
398
399	return (rval);
400}
401
402static int
403vge_miibus_writereg(dev, phy, reg, data)
404	device_t		dev;
405	int			phy, reg, data;
406{
407	struct vge_softc	*sc;
408	int			i, rval = 0;
409
410	sc = device_get_softc(dev);
411
412	if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
413		return(0);
414
415	vge_miipoll_stop(sc);
416
417	/* Specify the register we want to write. */
418	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
419
420	/* Specify the data we want to write. */
421	CSR_WRITE_2(sc, VGE_MIIDATA, data);
422
423	/* Issue write command. */
424	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
425
426	/* Wait for the write command bit to self-clear. */
427	for (i = 0; i < VGE_TIMEOUT; i++) {
428		DELAY(1);
429		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
430			break;
431	}
432
433	if (i == VGE_TIMEOUT) {
434		device_printf(sc->vge_dev, "MII write timed out\n");
435		rval = EIO;
436	}
437
438	vge_miipoll_start(sc);
439
440	return (rval);
441}
442
443static void
444vge_cam_clear(sc)
445	struct vge_softc	*sc;
446{
447	int			i;
448
449	/*
450	 * Turn off all the mask bits. This tells the chip
451	 * that none of the entries in the CAM filter are valid.
452	 * desired entries will be enabled as we fill the filter in.
453	 */
454
455	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
456	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
457	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
458	for (i = 0; i < 8; i++)
459		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
460
461	/* Clear the VLAN filter too. */
462
463	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
464	for (i = 0; i < 8; i++)
465		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
466
467	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
468	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
469	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
470
471	sc->vge_camidx = 0;
472
473	return;
474}
475
476static int
477vge_cam_set(sc, addr)
478	struct vge_softc	*sc;
479	uint8_t			*addr;
480{
481	int			i, error = 0;
482
483	if (sc->vge_camidx == VGE_CAM_MAXADDRS)
484		return(ENOSPC);
485
486	/* Select the CAM data page. */
487	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
488	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
489
490	/* Set the filter entry we want to update and enable writing. */
491	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
492
493	/* Write the address to the CAM registers */
494	for (i = 0; i < ETHER_ADDR_LEN; i++)
495		CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
496
497	/* Issue a write command. */
498	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
499
500	/* Wake for it to clear. */
501	for (i = 0; i < VGE_TIMEOUT; i++) {
502		DELAY(1);
503		if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
504			break;
505	}
506
507	if (i == VGE_TIMEOUT) {
508		device_printf(sc->vge_dev, "setting CAM filter failed\n");
509		error = EIO;
510		goto fail;
511	}
512
513	/* Select the CAM mask page. */
514	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
515	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
516
517	/* Set the mask bit that enables this filter. */
518	CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
519	    1<<(sc->vge_camidx & 7));
520
521	sc->vge_camidx++;
522
523fail:
524	/* Turn off access to CAM. */
525	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
526	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
527	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
528
529	return (error);
530}
531
532/*
533 * Program the multicast filter. We use the 64-entry CAM filter
534 * for perfect filtering. If there's more than 64 multicast addresses,
535 * we use the hash filter instead.
536 */
537static void
538vge_setmulti(sc)
539	struct vge_softc	*sc;
540{
541	struct ifnet		*ifp;
542	int			error = 0/*, h = 0*/;
543	struct ifmultiaddr	*ifma;
544	u_int32_t		h, hashes[2] = { 0, 0 };
545
546	VGE_LOCK_ASSERT(sc);
547
548	ifp = sc->vge_ifp;
549
550	/* First, zot all the multicast entries. */
551	vge_cam_clear(sc);
552	CSR_WRITE_4(sc, VGE_MAR0, 0);
553	CSR_WRITE_4(sc, VGE_MAR1, 0);
554
555	/*
556	 * If the user wants allmulti or promisc mode, enable reception
557	 * of all multicast frames.
558	 */
559	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
560		CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
561		CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
562		return;
563	}
564
565	/* Now program new ones */
566	if_maddr_rlock(ifp);
567	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
568		if (ifma->ifma_addr->sa_family != AF_LINK)
569			continue;
570		error = vge_cam_set(sc,
571		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
572		if (error)
573			break;
574	}
575
576	/* If there were too many addresses, use the hash filter. */
577	if (error) {
578		vge_cam_clear(sc);
579
580		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
581			if (ifma->ifma_addr->sa_family != AF_LINK)
582				continue;
583			h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
584			    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
585			if (h < 32)
586				hashes[0] |= (1 << h);
587			else
588				hashes[1] |= (1 << (h - 32));
589		}
590
591		CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
592		CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
593	}
594	if_maddr_runlock(ifp);
595
596	return;
597}
598
599static void
600vge_reset(sc)
601	struct vge_softc		*sc;
602{
603	int			i;
604
605	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
606
607	for (i = 0; i < VGE_TIMEOUT; i++) {
608		DELAY(5);
609		if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
610			break;
611	}
612
613	if (i == VGE_TIMEOUT) {
614		device_printf(sc->vge_dev, "soft reset timed out");
615		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
616		DELAY(2000);
617	}
618
619	DELAY(5000);
620
621	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
622
623	for (i = 0; i < VGE_TIMEOUT; i++) {
624		DELAY(5);
625		if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
626			break;
627	}
628
629	if (i == VGE_TIMEOUT) {
630		device_printf(sc->vge_dev, "EEPROM reload timed out\n");
631		return;
632	}
633
634	CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
635
636	return;
637}
638
639/*
640 * Probe for a VIA gigabit chip. Check the PCI vendor and device
641 * IDs against our list and return a device name if we find a match.
642 */
643static int
644vge_probe(dev)
645	device_t		dev;
646{
647	struct vge_type		*t;
648
649	t = vge_devs;
650
651	while (t->vge_name != NULL) {
652		if ((pci_get_vendor(dev) == t->vge_vid) &&
653		    (pci_get_device(dev) == t->vge_did)) {
654			device_set_desc(dev, t->vge_name);
655			return (BUS_PROBE_DEFAULT);
656		}
657		t++;
658	}
659
660	return (ENXIO);
661}
662
663/*
664 * Map a single buffer address.
665 */
666
667struct vge_dmamap_arg {
668	bus_addr_t	vge_busaddr;
669};
670
671static void
672vge_dmamap_cb(arg, segs, nsegs, error)
673	void			*arg;
674	bus_dma_segment_t	*segs;
675	int			nsegs;
676	int			error;
677{
678	struct vge_dmamap_arg	*ctx;
679
680	if (error != 0)
681		return;
682
683	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
684
685	ctx = (struct vge_dmamap_arg *)arg;
686	ctx->vge_busaddr = segs[0].ds_addr;
687}
688
689static int
690vge_dma_alloc(sc)
691	struct vge_softc	*sc;
692{
693	struct vge_dmamap_arg	ctx;
694	struct vge_txdesc	*txd;
695	struct vge_rxdesc	*rxd;
696	bus_addr_t		lowaddr, tx_ring_end, rx_ring_end;
697	int			error, i;
698
699	lowaddr = BUS_SPACE_MAXADDR;
700
701again:
702	/* Create parent ring tag. */
703	error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
704	    1, 0,			/* algnmnt, boundary */
705	    lowaddr,			/* lowaddr */
706	    BUS_SPACE_MAXADDR,		/* highaddr */
707	    NULL, NULL,			/* filter, filterarg */
708	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
709	    0,				/* nsegments */
710	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
711	    0,				/* flags */
712	    NULL, NULL,			/* lockfunc, lockarg */
713	    &sc->vge_cdata.vge_ring_tag);
714	if (error != 0) {
715		device_printf(sc->vge_dev,
716		    "could not create parent DMA tag.\n");
717		goto fail;
718	}
719
720	/* Create tag for Tx ring. */
721	error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
722	    VGE_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
723	    BUS_SPACE_MAXADDR,		/* lowaddr */
724	    BUS_SPACE_MAXADDR,		/* highaddr */
725	    NULL, NULL,			/* filter, filterarg */
726	    VGE_TX_LIST_SZ,		/* maxsize */
727	    1,				/* nsegments */
728	    VGE_TX_LIST_SZ,		/* maxsegsize */
729	    0,				/* flags */
730	    NULL, NULL,			/* lockfunc, lockarg */
731	    &sc->vge_cdata.vge_tx_ring_tag);
732	if (error != 0) {
733		device_printf(sc->vge_dev,
734		    "could not allocate Tx ring DMA tag.\n");
735		goto fail;
736	}
737
738	/* Create tag for Rx ring. */
739	error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
740	    VGE_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
741	    BUS_SPACE_MAXADDR,		/* lowaddr */
742	    BUS_SPACE_MAXADDR,		/* highaddr */
743	    NULL, NULL,			/* filter, filterarg */
744	    VGE_RX_LIST_SZ,		/* maxsize */
745	    1,				/* nsegments */
746	    VGE_RX_LIST_SZ,		/* maxsegsize */
747	    0,				/* flags */
748	    NULL, NULL,			/* lockfunc, lockarg */
749	    &sc->vge_cdata.vge_rx_ring_tag);
750	if (error != 0) {
751		device_printf(sc->vge_dev,
752		    "could not allocate Rx ring DMA tag.\n");
753		goto fail;
754	}
755
756	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
757	error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag,
758	    (void **)&sc->vge_rdata.vge_tx_ring,
759	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
760	    &sc->vge_cdata.vge_tx_ring_map);
761	if (error != 0) {
762		device_printf(sc->vge_dev,
763		    "could not allocate DMA'able memory for Tx ring.\n");
764		goto fail;
765	}
766
767	ctx.vge_busaddr = 0;
768	error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag,
769	    sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring,
770	    VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
771	if (error != 0 || ctx.vge_busaddr == 0) {
772		device_printf(sc->vge_dev,
773		    "could not load DMA'able memory for Tx ring.\n");
774		goto fail;
775	}
776	sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr;
777
778	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
779	error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag,
780	    (void **)&sc->vge_rdata.vge_rx_ring,
781	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
782	    &sc->vge_cdata.vge_rx_ring_map);
783	if (error != 0) {
784		device_printf(sc->vge_dev,
785		    "could not allocate DMA'able memory for Rx ring.\n");
786		goto fail;
787	}
788
789	ctx.vge_busaddr = 0;
790	error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag,
791	    sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring,
792	    VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
793	if (error != 0 || ctx.vge_busaddr == 0) {
794		device_printf(sc->vge_dev,
795		    "could not load DMA'able memory for Rx ring.\n");
796		goto fail;
797	}
798	sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr;
799
800	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
801	tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ;
802	rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ;
803	if ((VGE_ADDR_HI(tx_ring_end) !=
804	    VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) ||
805	    (VGE_ADDR_HI(rx_ring_end) !=
806	    VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) ||
807	    VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) {
808		device_printf(sc->vge_dev, "4GB boundary crossed, "
809		    "switching to 32bit DMA address mode.\n");
810		vge_dma_free(sc);
811		/* Limit DMA address space to 32bit and try again. */
812		lowaddr = BUS_SPACE_MAXADDR_32BIT;
813		goto again;
814	}
815
816	/* Create parent buffer tag. */
817	error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
818	    1, 0,			/* algnmnt, boundary */
819	    VGE_BUF_DMA_MAXADDR,	/* lowaddr */
820	    BUS_SPACE_MAXADDR,		/* highaddr */
821	    NULL, NULL,			/* filter, filterarg */
822	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
823	    0,				/* nsegments */
824	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
825	    0,				/* flags */
826	    NULL, NULL,			/* lockfunc, lockarg */
827	    &sc->vge_cdata.vge_buffer_tag);
828	if (error != 0) {
829		device_printf(sc->vge_dev,
830		    "could not create parent buffer DMA tag.\n");
831		goto fail;
832	}
833
834	/* Create tag for Tx buffers. */
835	error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
836	    1, 0,			/* algnmnt, boundary */
837	    BUS_SPACE_MAXADDR,		/* lowaddr */
838	    BUS_SPACE_MAXADDR,		/* highaddr */
839	    NULL, NULL,			/* filter, filterarg */
840	    MCLBYTES * VGE_MAXTXSEGS,	/* maxsize */
841	    VGE_MAXTXSEGS,		/* nsegments */
842	    MCLBYTES,			/* maxsegsize */
843	    0,				/* flags */
844	    NULL, NULL,			/* lockfunc, lockarg */
845	    &sc->vge_cdata.vge_tx_tag);
846	if (error != 0) {
847		device_printf(sc->vge_dev, "could not create Tx DMA tag.\n");
848		goto fail;
849	}
850
851	/* Create tag for Rx buffers. */
852	error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
853	    VGE_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
854	    BUS_SPACE_MAXADDR,		/* lowaddr */
855	    BUS_SPACE_MAXADDR,		/* highaddr */
856	    NULL, NULL,			/* filter, filterarg */
857	    MCLBYTES,			/* maxsize */
858	    1,				/* nsegments */
859	    MCLBYTES,			/* maxsegsize */
860	    0,				/* flags */
861	    NULL, NULL,			/* lockfunc, lockarg */
862	    &sc->vge_cdata.vge_rx_tag);
863	if (error != 0) {
864		device_printf(sc->vge_dev, "could not create Rx DMA tag.\n");
865		goto fail;
866	}
867
868	/* Create DMA maps for Tx buffers. */
869	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
870		txd = &sc->vge_cdata.vge_txdesc[i];
871		txd->tx_m = NULL;
872		txd->tx_dmamap = NULL;
873		error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0,
874		    &txd->tx_dmamap);
875		if (error != 0) {
876			device_printf(sc->vge_dev,
877			    "could not create Tx dmamap.\n");
878			goto fail;
879		}
880	}
881	/* Create DMA maps for Rx buffers. */
882	if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
883	    &sc->vge_cdata.vge_rx_sparemap)) != 0) {
884		device_printf(sc->vge_dev,
885		    "could not create spare Rx dmamap.\n");
886		goto fail;
887	}
888	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
889		rxd = &sc->vge_cdata.vge_rxdesc[i];
890		rxd->rx_m = NULL;
891		rxd->rx_dmamap = NULL;
892		error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
893		    &rxd->rx_dmamap);
894		if (error != 0) {
895			device_printf(sc->vge_dev,
896			    "could not create Rx dmamap.\n");
897			goto fail;
898		}
899	}
900
901fail:
902	return (error);
903}
904
905static void
906vge_dma_free(sc)
907	struct vge_softc	*sc;
908{
909	struct vge_txdesc	*txd;
910	struct vge_rxdesc	*rxd;
911	int			i;
912
913	/* Tx ring. */
914	if (sc->vge_cdata.vge_tx_ring_tag != NULL) {
915		if (sc->vge_cdata.vge_tx_ring_map)
916			bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag,
917			    sc->vge_cdata.vge_tx_ring_map);
918		if (sc->vge_cdata.vge_tx_ring_map &&
919		    sc->vge_rdata.vge_tx_ring)
920			bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag,
921			    sc->vge_rdata.vge_tx_ring,
922			    sc->vge_cdata.vge_tx_ring_map);
923		sc->vge_rdata.vge_tx_ring = NULL;
924		sc->vge_cdata.vge_tx_ring_map = NULL;
925		bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag);
926		sc->vge_cdata.vge_tx_ring_tag = NULL;
927	}
928	/* Rx ring. */
929	if (sc->vge_cdata.vge_rx_ring_tag != NULL) {
930		if (sc->vge_cdata.vge_rx_ring_map)
931			bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag,
932			    sc->vge_cdata.vge_rx_ring_map);
933		if (sc->vge_cdata.vge_rx_ring_map &&
934		    sc->vge_rdata.vge_rx_ring)
935			bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag,
936			    sc->vge_rdata.vge_rx_ring,
937			    sc->vge_cdata.vge_rx_ring_map);
938		sc->vge_rdata.vge_rx_ring = NULL;
939		sc->vge_cdata.vge_rx_ring_map = NULL;
940		bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag);
941		sc->vge_cdata.vge_rx_ring_tag = NULL;
942	}
943	/* Tx buffers. */
944	if (sc->vge_cdata.vge_tx_tag != NULL) {
945		for (i = 0; i < VGE_TX_DESC_CNT; i++) {
946			txd = &sc->vge_cdata.vge_txdesc[i];
947			if (txd->tx_dmamap != NULL) {
948				bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag,
949				    txd->tx_dmamap);
950				txd->tx_dmamap = NULL;
951			}
952		}
953		bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag);
954		sc->vge_cdata.vge_tx_tag = NULL;
955	}
956	/* Rx buffers. */
957	if (sc->vge_cdata.vge_rx_tag != NULL) {
958		for (i = 0; i < VGE_RX_DESC_CNT; i++) {
959			rxd = &sc->vge_cdata.vge_rxdesc[i];
960			if (rxd->rx_dmamap != NULL) {
961				bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
962				    rxd->rx_dmamap);
963				rxd->rx_dmamap = NULL;
964			}
965		}
966		if (sc->vge_cdata.vge_rx_sparemap != NULL) {
967			bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
968			    sc->vge_cdata.vge_rx_sparemap);
969			sc->vge_cdata.vge_rx_sparemap = NULL;
970		}
971		bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag);
972		sc->vge_cdata.vge_rx_tag = NULL;
973	}
974
975	if (sc->vge_cdata.vge_buffer_tag != NULL) {
976		bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag);
977		sc->vge_cdata.vge_buffer_tag = NULL;
978	}
979	if (sc->vge_cdata.vge_ring_tag != NULL) {
980		bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag);
981		sc->vge_cdata.vge_ring_tag = NULL;
982	}
983}
984
985/*
986 * Attach the interface. Allocate softc structures, do ifmedia
987 * setup and ethernet/BPF attach.
988 */
989static int
990vge_attach(dev)
991	device_t		dev;
992{
993	u_char			eaddr[ETHER_ADDR_LEN];
994	struct vge_softc	*sc;
995	struct ifnet		*ifp;
996	int			error = 0, rid;
997
998	sc = device_get_softc(dev);
999	sc->vge_dev = dev;
1000
1001	mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1002	    MTX_DEF);
1003	callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0);
1004
1005	/*
1006	 * Map control/status registers.
1007	 */
1008	pci_enable_busmaster(dev);
1009
1010	rid = PCIR_BAR(1);
1011	sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1012	    RF_ACTIVE);
1013
1014	if (sc->vge_res == NULL) {
1015		device_printf(dev, "couldn't map ports/memory\n");
1016		error = ENXIO;
1017		goto fail;
1018	}
1019
1020	/* Allocate interrupt */
1021	rid = 0;
1022	sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1023	    RF_SHAREABLE | RF_ACTIVE);
1024
1025	if (sc->vge_irq == NULL) {
1026		device_printf(dev, "couldn't map interrupt\n");
1027		error = ENXIO;
1028		goto fail;
1029	}
1030
1031	/* Reset the adapter. */
1032	vge_reset(sc);
1033
1034	/*
1035	 * Get station address from the EEPROM.
1036	 */
1037	vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
1038
1039	error = vge_dma_alloc(sc);
1040	if (error)
1041		goto fail;
1042
1043	ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
1044	if (ifp == NULL) {
1045		device_printf(dev, "can not if_alloc()\n");
1046		error = ENOSPC;
1047		goto fail;
1048	}
1049
1050	/* Do MII setup */
1051	if (mii_phy_probe(dev, &sc->vge_miibus,
1052	    vge_ifmedia_upd, vge_ifmedia_sts)) {
1053		device_printf(dev, "MII without any phy!\n");
1054		error = ENXIO;
1055		goto fail;
1056	}
1057
1058	ifp->if_softc = sc;
1059	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1060	ifp->if_mtu = ETHERMTU;
1061	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1062	ifp->if_ioctl = vge_ioctl;
1063	ifp->if_capabilities = IFCAP_VLAN_MTU;
1064	ifp->if_start = vge_start;
1065	ifp->if_hwassist = VGE_CSUM_FEATURES;
1066	ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING;
1067	ifp->if_capenable = ifp->if_capabilities;
1068#ifdef DEVICE_POLLING
1069	ifp->if_capabilities |= IFCAP_POLLING;
1070#endif
1071	ifp->if_init = vge_init;
1072	IFQ_SET_MAXLEN(&ifp->if_snd, VGE_IFQ_MAXLEN);
1073	ifp->if_snd.ifq_drv_maxlen = VGE_IFQ_MAXLEN;
1074	IFQ_SET_READY(&ifp->if_snd);
1075
1076	/*
1077	 * Call MI attach routine.
1078	 */
1079	ether_ifattach(ifp, eaddr);
1080
1081	/* Hook interrupt last to avoid having to lock softc */
1082	error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
1083	    NULL, vge_intr, sc, &sc->vge_intrhand);
1084
1085	if (error) {
1086		device_printf(dev, "couldn't set up irq\n");
1087		ether_ifdetach(ifp);
1088		goto fail;
1089	}
1090
1091fail:
1092	if (error)
1093		vge_detach(dev);
1094
1095	return (error);
1096}
1097
1098/*
1099 * Shutdown hardware and free up resources. This can be called any
1100 * time after the mutex has been initialized. It is called in both
1101 * the error case in attach and the normal detach case so it needs
1102 * to be careful about only freeing resources that have actually been
1103 * allocated.
1104 */
1105static int
1106vge_detach(dev)
1107	device_t		dev;
1108{
1109	struct vge_softc		*sc;
1110	struct ifnet		*ifp;
1111
1112	sc = device_get_softc(dev);
1113	KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
1114	ifp = sc->vge_ifp;
1115
1116#ifdef DEVICE_POLLING
1117	if (ifp->if_capenable & IFCAP_POLLING)
1118		ether_poll_deregister(ifp);
1119#endif
1120
1121	/* These should only be active if attach succeeded */
1122	if (device_is_attached(dev)) {
1123		ether_ifdetach(ifp);
1124		VGE_LOCK(sc);
1125		vge_stop(sc);
1126		VGE_UNLOCK(sc);
1127		callout_drain(&sc->vge_watchdog);
1128	}
1129	if (sc->vge_miibus)
1130		device_delete_child(dev, sc->vge_miibus);
1131	bus_generic_detach(dev);
1132
1133	if (sc->vge_intrhand)
1134		bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1135	if (sc->vge_irq)
1136		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq);
1137	if (sc->vge_res)
1138		bus_release_resource(dev, SYS_RES_MEMORY,
1139		    PCIR_BAR(1), sc->vge_res);
1140	if (ifp)
1141		if_free(ifp);
1142
1143	vge_dma_free(sc);
1144	mtx_destroy(&sc->vge_mtx);
1145
1146	return (0);
1147}
1148
1149static void
1150vge_discard_rxbuf(sc, prod)
1151	struct vge_softc	*sc;
1152	int			prod;
1153{
1154	struct vge_rxdesc	*rxd;
1155	int			i;
1156
1157	rxd = &sc->vge_cdata.vge_rxdesc[prod];
1158	rxd->rx_desc->vge_sts = 0;
1159	rxd->rx_desc->vge_ctl = 0;
1160
1161	/*
1162	 * Note: the manual fails to document the fact that for
1163	 * proper opration, the driver needs to replentish the RX
1164	 * DMA ring 4 descriptors at a time (rather than one at a
1165	 * time, like most chips). We can allocate the new buffers
1166	 * but we should not set the OWN bits until we're ready
1167	 * to hand back 4 of them in one shot.
1168	 */
1169	if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1170		for (i = VGE_RXCHUNK; i > 0; i--) {
1171			rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1172			rxd = rxd->rxd_prev;
1173		}
1174		sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1175	}
1176}
1177
1178static int
1179vge_newbuf(sc, prod)
1180	struct vge_softc	*sc;
1181	int			prod;
1182{
1183	struct vge_rxdesc	*rxd;
1184	struct mbuf		*m;
1185	bus_dma_segment_t	segs[1];
1186	bus_dmamap_t		map;
1187	int			i, nsegs;
1188
1189	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1190	if (m == NULL)
1191		return (ENOBUFS);
1192	/*
1193	 * This is part of an evil trick to deal with strict-alignment
1194	 * architectures. The VIA chip requires RX buffers to be aligned
1195	 * on 32-bit boundaries, but that will hose strict-alignment
1196	 * architectures. To get around this, we leave some empty space
1197	 * at the start of each buffer and for non-strict-alignment hosts,
1198	 * we copy the buffer back two bytes to achieve word alignment.
1199	 * This is slightly more efficient than allocating a new buffer,
1200	 * copying the contents, and discarding the old buffer.
1201	 */
1202	m->m_len = m->m_pkthdr.len = MCLBYTES;
1203	m_adj(m, VGE_RX_BUF_ALIGN);
1204
1205	if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag,
1206	    sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1207		m_freem(m);
1208		return (ENOBUFS);
1209	}
1210	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1211
1212	rxd = &sc->vge_cdata.vge_rxdesc[prod];
1213	if (rxd->rx_m != NULL) {
1214		bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1215		    BUS_DMASYNC_POSTREAD);
1216		bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap);
1217	}
1218	map = rxd->rx_dmamap;
1219	rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap;
1220	sc->vge_cdata.vge_rx_sparemap = map;
1221	bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1222	    BUS_DMASYNC_PREREAD);
1223	rxd->rx_m = m;
1224
1225	rxd->rx_desc->vge_sts = 0;
1226	rxd->rx_desc->vge_ctl = 0;
1227	rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
1228	rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) |
1229	    (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I);
1230
1231	/*
1232	 * Note: the manual fails to document the fact that for
1233	 * proper operation, the driver needs to replenish the RX
1234	 * DMA ring 4 descriptors at a time (rather than one at a
1235	 * time, like most chips). We can allocate the new buffers
1236	 * but we should not set the OWN bits until we're ready
1237	 * to hand back 4 of them in one shot.
1238	 */
1239	if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1240		for (i = VGE_RXCHUNK; i > 0; i--) {
1241			rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1242			rxd = rxd->rxd_prev;
1243		}
1244		sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1245	}
1246
1247	return (0);
1248}
1249
1250static int
1251vge_tx_list_init(sc)
1252	struct vge_softc	*sc;
1253{
1254	struct vge_ring_data	*rd;
1255	struct vge_txdesc	*txd;
1256	int			i;
1257
1258	VGE_LOCK_ASSERT(sc);
1259
1260	sc->vge_cdata.vge_tx_prodidx = 0;
1261	sc->vge_cdata.vge_tx_considx = 0;
1262	sc->vge_cdata.vge_tx_cnt = 0;
1263
1264	rd = &sc->vge_rdata;
1265	bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ);
1266	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1267		txd = &sc->vge_cdata.vge_txdesc[i];
1268		txd->tx_m = NULL;
1269		txd->tx_desc = &rd->vge_tx_ring[i];
1270	}
1271
1272	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1273	    sc->vge_cdata.vge_tx_ring_map,
1274	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1275
1276	return (0);
1277}
1278
1279static int
1280vge_rx_list_init(sc)
1281	struct vge_softc	*sc;
1282{
1283	struct vge_ring_data	*rd;
1284	struct vge_rxdesc	*rxd;
1285	int			i;
1286
1287	VGE_LOCK_ASSERT(sc);
1288
1289	sc->vge_cdata.vge_rx_prodidx = 0;
1290	sc->vge_cdata.vge_head = NULL;
1291	sc->vge_cdata.vge_tail = NULL;
1292	sc->vge_cdata.vge_rx_commit = 0;
1293
1294	rd = &sc->vge_rdata;
1295	bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ);
1296	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1297		rxd = &sc->vge_cdata.vge_rxdesc[i];
1298		rxd->rx_m = NULL;
1299		rxd->rx_desc = &rd->vge_rx_ring[i];
1300		if (i == 0)
1301			rxd->rxd_prev =
1302			    &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1];
1303		else
1304			rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1];
1305		if (vge_newbuf(sc, i) != 0)
1306			return (ENOBUFS);
1307	}
1308
1309	bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1310	    sc->vge_cdata.vge_rx_ring_map,
1311	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1312
1313	sc->vge_cdata.vge_rx_commit = 0;
1314
1315	return (0);
1316}
1317
1318static void
1319vge_freebufs(sc)
1320	struct vge_softc	*sc;
1321{
1322	struct vge_txdesc	*txd;
1323	struct vge_rxdesc	*rxd;
1324	struct ifnet		*ifp;
1325	int			i;
1326
1327	VGE_LOCK_ASSERT(sc);
1328
1329	ifp = sc->vge_ifp;
1330	/*
1331	 * Free RX and TX mbufs still in the queues.
1332	 */
1333	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1334		rxd = &sc->vge_cdata.vge_rxdesc[i];
1335		if (rxd->rx_m != NULL) {
1336			bus_dmamap_sync(sc->vge_cdata.vge_rx_tag,
1337			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1338			bus_dmamap_unload(sc->vge_cdata.vge_rx_tag,
1339			    rxd->rx_dmamap);
1340			m_freem(rxd->rx_m);
1341			rxd->rx_m = NULL;
1342		}
1343	}
1344
1345	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1346		txd = &sc->vge_cdata.vge_txdesc[i];
1347		if (txd->tx_m != NULL) {
1348			bus_dmamap_sync(sc->vge_cdata.vge_tx_tag,
1349			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1350			bus_dmamap_unload(sc->vge_cdata.vge_tx_tag,
1351			    txd->tx_dmamap);
1352			m_freem(txd->tx_m);
1353			txd->tx_m = NULL;
1354			ifp->if_oerrors++;
1355		}
1356	}
1357}
1358
1359#ifndef	__NO_STRICT_ALIGNMENT
1360static __inline void
1361vge_fixup_rx(m)
1362	struct mbuf		*m;
1363{
1364	int			i;
1365	uint16_t		*src, *dst;
1366
1367	src = mtod(m, uint16_t *);
1368	dst = src - 1;
1369
1370	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1371		*dst++ = *src++;
1372
1373	m->m_data -= ETHER_ALIGN;
1374}
1375#endif
1376
1377/*
1378 * RX handler. We support the reception of jumbo frames that have
1379 * been fragmented across multiple 2K mbuf cluster buffers.
1380 */
1381static int
1382vge_rxeof(sc, count)
1383	struct vge_softc	*sc;
1384	int			count;
1385{
1386	struct mbuf		*m;
1387	struct ifnet		*ifp;
1388	int			prod, prog, total_len;
1389	struct vge_rxdesc	*rxd;
1390	struct vge_rx_desc	*cur_rx;
1391	uint32_t		rxstat, rxctl;
1392
1393	VGE_LOCK_ASSERT(sc);
1394
1395	ifp = sc->vge_ifp;
1396
1397	bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1398	    sc->vge_cdata.vge_rx_ring_map,
1399	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1400
1401	prod = sc->vge_cdata.vge_rx_prodidx;
1402	for (prog = 0; count > 0 &&
1403	    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1404	    VGE_RX_DESC_INC(prod)) {
1405		cur_rx = &sc->vge_rdata.vge_rx_ring[prod];
1406		rxstat = le32toh(cur_rx->vge_sts);
1407		if ((rxstat & VGE_RDSTS_OWN) != 0)
1408			break;
1409		count--;
1410		prog++;
1411		rxctl = le32toh(cur_rx->vge_ctl);
1412		total_len = VGE_RXBYTES(rxstat);
1413		rxd = &sc->vge_cdata.vge_rxdesc[prod];
1414		m = rxd->rx_m;
1415
1416		/*
1417		 * If the 'start of frame' bit is set, this indicates
1418		 * either the first fragment in a multi-fragment receive,
1419		 * or an intermediate fragment. Either way, we want to
1420		 * accumulate the buffers.
1421		 */
1422		if ((rxstat & VGE_RXPKT_SOF) != 0) {
1423			if (vge_newbuf(sc, prod) != 0) {
1424				ifp->if_iqdrops++;
1425				VGE_CHAIN_RESET(sc);
1426				vge_discard_rxbuf(sc, prod);
1427				continue;
1428			}
1429			m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN;
1430			if (sc->vge_cdata.vge_head == NULL) {
1431				sc->vge_cdata.vge_head = m;
1432				sc->vge_cdata.vge_tail = m;
1433			} else {
1434				m->m_flags &= ~M_PKTHDR;
1435				sc->vge_cdata.vge_tail->m_next = m;
1436				sc->vge_cdata.vge_tail = m;
1437			}
1438			continue;
1439		}
1440
1441		/*
1442		 * Bad/error frames will have the RXOK bit cleared.
1443		 * However, there's one error case we want to allow:
1444		 * if a VLAN tagged frame arrives and the chip can't
1445		 * match it against the CAM filter, it considers this
1446		 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1447		 * We don't want to drop the frame though: our VLAN
1448		 * filtering is done in software.
1449		 * We also want to receive bad-checksummed frames and
1450		 * and frames with bad-length.
1451		 */
1452		if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
1453		    (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR |
1454		    VGE_RDSTS_CSUMERR)) == 0) {
1455			ifp->if_ierrors++;
1456			/*
1457			 * If this is part of a multi-fragment packet,
1458			 * discard all the pieces.
1459			 */
1460			VGE_CHAIN_RESET(sc);
1461			vge_discard_rxbuf(sc, prod);
1462			continue;
1463		}
1464
1465		if (vge_newbuf(sc, prod) != 0) {
1466			ifp->if_iqdrops++;
1467			VGE_CHAIN_RESET(sc);
1468			vge_discard_rxbuf(sc, prod);
1469			continue;
1470		}
1471
1472		/* Chain received mbufs. */
1473		if (sc->vge_cdata.vge_head != NULL) {
1474			m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN);
1475			/*
1476			 * Special case: if there's 4 bytes or less
1477			 * in this buffer, the mbuf can be discarded:
1478			 * the last 4 bytes is the CRC, which we don't
1479			 * care about anyway.
1480			 */
1481			if (m->m_len <= ETHER_CRC_LEN) {
1482				sc->vge_cdata.vge_tail->m_len -=
1483				    (ETHER_CRC_LEN - m->m_len);
1484				m_freem(m);
1485			} else {
1486				m->m_len -= ETHER_CRC_LEN;
1487				m->m_flags &= ~M_PKTHDR;
1488				sc->vge_cdata.vge_tail->m_next = m;
1489			}
1490			m = sc->vge_cdata.vge_head;
1491			m->m_flags |= M_PKTHDR;
1492			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1493		} else {
1494			m->m_flags |= M_PKTHDR;
1495			m->m_pkthdr.len = m->m_len =
1496			    (total_len - ETHER_CRC_LEN);
1497		}
1498
1499#ifndef	__NO_STRICT_ALIGNMENT
1500		vge_fixup_rx(m);
1501#endif
1502		m->m_pkthdr.rcvif = ifp;
1503
1504		/* Do RX checksumming if enabled */
1505		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1506		    (rxctl & VGE_RDCTL_FRAG) == 0) {
1507			/* Check IP header checksum */
1508			if ((rxctl & VGE_RDCTL_IPPKT) != 0)
1509				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1510			if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0)
1511				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1512
1513			/* Check TCP/UDP checksum */
1514			if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) &&
1515			    rxctl & VGE_RDCTL_PROTOCSUMOK) {
1516				m->m_pkthdr.csum_flags |=
1517				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1518				m->m_pkthdr.csum_data = 0xffff;
1519			}
1520		}
1521
1522		if ((rxstat & VGE_RDSTS_VTAG) != 0) {
1523			/*
1524			 * The 32-bit rxctl register is stored in little-endian.
1525			 * However, the 16-bit vlan tag is stored in big-endian,
1526			 * so we have to byte swap it.
1527			 */
1528			m->m_pkthdr.ether_vtag =
1529			    bswap16(rxctl & VGE_RDCTL_VLANID);
1530			m->m_flags |= M_VLANTAG;
1531		}
1532
1533		VGE_UNLOCK(sc);
1534		(*ifp->if_input)(ifp, m);
1535		VGE_LOCK(sc);
1536		sc->vge_cdata.vge_head = NULL;
1537		sc->vge_cdata.vge_tail = NULL;
1538	}
1539
1540	if (prog > 0) {
1541		sc->vge_cdata.vge_rx_prodidx = prod;
1542		bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1543		    sc->vge_cdata.vge_rx_ring_map,
1544		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1545		/* Update residue counter. */
1546		if (sc->vge_cdata.vge_rx_commit != 0) {
1547			CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT,
1548			    sc->vge_cdata.vge_rx_commit);
1549			sc->vge_cdata.vge_rx_commit = 0;
1550		}
1551	}
1552	return (prog);
1553}
1554
1555static void
1556vge_txeof(sc)
1557	struct vge_softc	*sc;
1558{
1559	struct ifnet		*ifp;
1560	struct vge_tx_desc	*cur_tx;
1561	struct vge_txdesc	*txd;
1562	uint32_t		txstat;
1563	int			cons, prod;
1564
1565	VGE_LOCK_ASSERT(sc);
1566
1567	ifp = sc->vge_ifp;
1568
1569	if (sc->vge_cdata.vge_tx_cnt == 0)
1570		return;
1571
1572	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1573	    sc->vge_cdata.vge_tx_ring_map,
1574	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1575
1576	/*
1577	 * Go through our tx list and free mbufs for those
1578	 * frames that have been transmitted.
1579	 */
1580	cons = sc->vge_cdata.vge_tx_considx;
1581	prod = sc->vge_cdata.vge_tx_prodidx;
1582	for (; cons != prod; VGE_TX_DESC_INC(cons)) {
1583		cur_tx = &sc->vge_rdata.vge_tx_ring[cons];
1584		txstat = le32toh(cur_tx->vge_sts);
1585		if ((txstat & VGE_TDSTS_OWN) != 0)
1586			break;
1587		sc->vge_cdata.vge_tx_cnt--;
1588		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1589
1590		txd = &sc->vge_cdata.vge_txdesc[cons];
1591		bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1592		    BUS_DMASYNC_POSTWRITE);
1593		bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap);
1594
1595		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
1596		    __func__));
1597		m_freem(txd->tx_m);
1598		txd->tx_m = NULL;
1599		txd->tx_desc->vge_frag[0].vge_addrhi = 0;
1600	}
1601	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1602	    sc->vge_cdata.vge_tx_ring_map,
1603	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1604	sc->vge_cdata.vge_tx_considx = cons;
1605	if (sc->vge_cdata.vge_tx_cnt == 0)
1606		sc->vge_timer = 0;
1607	else {
1608		/*
1609		 * If not all descriptors have been released reaped yet,
1610		 * reload the timer so that we will eventually get another
1611		 * interrupt that will cause us to re-enter this routine.
1612		 * This is done in case the transmitter has gone idle.
1613		 */
1614		CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1615	}
1616}
1617
1618static void
1619vge_tick(xsc)
1620	void			*xsc;
1621{
1622	struct vge_softc	*sc;
1623	struct ifnet		*ifp;
1624	struct mii_data		*mii;
1625
1626	sc = xsc;
1627	ifp = sc->vge_ifp;
1628	VGE_LOCK_ASSERT(sc);
1629	mii = device_get_softc(sc->vge_miibus);
1630
1631	mii_tick(mii);
1632	if (sc->vge_link) {
1633		if (!(mii->mii_media_status & IFM_ACTIVE)) {
1634			sc->vge_link = 0;
1635			if_link_state_change(sc->vge_ifp,
1636			    LINK_STATE_DOWN);
1637		}
1638	} else {
1639		if (mii->mii_media_status & IFM_ACTIVE &&
1640		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1641			sc->vge_link = 1;
1642			if_link_state_change(sc->vge_ifp,
1643			    LINK_STATE_UP);
1644			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1645				vge_start_locked(ifp);
1646		}
1647	}
1648
1649	return;
1650}
1651
1652#ifdef DEVICE_POLLING
1653static int
1654vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1655{
1656	struct vge_softc *sc = ifp->if_softc;
1657	int rx_npkts = 0;
1658
1659	VGE_LOCK(sc);
1660	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1661		goto done;
1662
1663	rx_npkts = vge_rxeof(sc, count);
1664	vge_txeof(sc);
1665
1666	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1667		vge_start_locked(ifp);
1668
1669	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1670		u_int32_t       status;
1671		status = CSR_READ_4(sc, VGE_ISR);
1672		if (status == 0xFFFFFFFF)
1673			goto done;
1674		if (status)
1675			CSR_WRITE_4(sc, VGE_ISR, status);
1676
1677		/*
1678		 * XXX check behaviour on receiver stalls.
1679		 */
1680
1681		if (status & VGE_ISR_TXDMA_STALL ||
1682		    status & VGE_ISR_RXDMA_STALL) {
1683			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1684			vge_init_locked(sc);
1685		}
1686
1687		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1688			vge_rxeof(sc, count);
1689			ifp->if_ierrors++;
1690			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1691			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1692		}
1693	}
1694done:
1695	VGE_UNLOCK(sc);
1696	return (rx_npkts);
1697}
1698#endif /* DEVICE_POLLING */
1699
1700static void
1701vge_intr(arg)
1702	void			*arg;
1703{
1704	struct vge_softc	*sc;
1705	struct ifnet		*ifp;
1706	u_int32_t		status;
1707
1708	sc = arg;
1709
1710	if (sc->suspended) {
1711		return;
1712	}
1713
1714	VGE_LOCK(sc);
1715	ifp = sc->vge_ifp;
1716
1717	if (!(ifp->if_flags & IFF_UP)) {
1718		VGE_UNLOCK(sc);
1719		return;
1720	}
1721
1722#ifdef DEVICE_POLLING
1723	if  (ifp->if_capenable & IFCAP_POLLING) {
1724		VGE_UNLOCK(sc);
1725		return;
1726	}
1727#endif
1728
1729	/* Disable interrupts */
1730	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1731
1732	for (;;) {
1733
1734		status = CSR_READ_4(sc, VGE_ISR);
1735		/* If the card has gone away the read returns 0xffff. */
1736		if (status == 0xFFFFFFFF)
1737			break;
1738
1739		if (status)
1740			CSR_WRITE_4(sc, VGE_ISR, status);
1741
1742		if ((status & VGE_INTRS) == 0)
1743			break;
1744
1745		if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1746			vge_rxeof(sc, VGE_RX_DESC_CNT);
1747
1748		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1749			vge_rxeof(sc, VGE_RX_DESC_CNT);
1750			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1751			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1752		}
1753
1754		if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1755			vge_txeof(sc);
1756
1757		if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
1758			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1759			vge_init_locked(sc);
1760		}
1761
1762		if (status & VGE_ISR_LINKSTS)
1763			vge_tick(sc);
1764	}
1765
1766	/* Re-enable interrupts */
1767	CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1768
1769	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1770		vge_start_locked(ifp);
1771
1772	VGE_UNLOCK(sc);
1773
1774	return;
1775}
1776
1777static int
1778vge_encap(sc, m_head)
1779	struct vge_softc	*sc;
1780	struct mbuf		**m_head;
1781{
1782	struct vge_txdesc	*txd;
1783	struct vge_tx_frag	*frag;
1784	struct mbuf		*m;
1785	bus_dma_segment_t	txsegs[VGE_MAXTXSEGS];
1786	int			error, i, nsegs, padlen;
1787	uint32_t		cflags;
1788
1789	VGE_LOCK_ASSERT(sc);
1790
1791	M_ASSERTPKTHDR((*m_head));
1792
1793	/* Argh. This chip does not autopad short frames. */
1794	if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1795		m = *m_head;
1796		padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len;
1797		if (M_WRITABLE(m) == 0) {
1798			/* Get a writable copy. */
1799			m = m_dup(*m_head, M_DONTWAIT);
1800			m_freem(*m_head);
1801			if (m == NULL) {
1802				*m_head = NULL;
1803				return (ENOBUFS);
1804			}
1805			*m_head = m;
1806		}
1807		if (M_TRAILINGSPACE(m) < padlen) {
1808			m = m_defrag(m, M_DONTWAIT);
1809			if (m == NULL) {
1810				m_freem(*m_head);
1811				*m_head = NULL;
1812				return (ENOBUFS);
1813			}
1814		}
1815		/*
1816		 * Manually pad short frames, and zero the pad space
1817		 * to avoid leaking data.
1818		 */
1819		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1820		m->m_pkthdr.len += padlen;
1821		m->m_len = m->m_pkthdr.len;
1822		*m_head = m;
1823	}
1824
1825	txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx];
1826
1827	error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1828	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1829	if (error == EFBIG) {
1830		m = m_collapse(*m_head, M_DONTWAIT, VGE_MAXTXSEGS);
1831		if (m == NULL) {
1832			m_freem(*m_head);
1833			*m_head = NULL;
1834			return (ENOMEM);
1835		}
1836		*m_head = m;
1837		error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1838		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1839		if (error != 0) {
1840			m_freem(*m_head);
1841			*m_head = NULL;
1842			return (error);
1843		}
1844	} else if (error != 0)
1845		return (error);
1846	bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1847	    BUS_DMASYNC_PREWRITE);
1848
1849	m = *m_head;
1850	cflags = 0;
1851
1852	/* Configure checksum offload. */
1853	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1854		cflags |= VGE_TDCTL_IPCSUM;
1855	if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1856		cflags |= VGE_TDCTL_TCPCSUM;
1857	if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1858		cflags |= VGE_TDCTL_UDPCSUM;
1859
1860	/* Configure VLAN. */
1861	if ((m->m_flags & M_VLANTAG) != 0)
1862		cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG;
1863	txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16);
1864	/*
1865	 * XXX
1866	 * Velocity family seems to support TSO but no information
1867	 * for MSS configuration is available. Also the number of
1868	 * fragments supported by a descriptor is too small to hold
1869	 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF,
1870	 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build
1871	 * longer chain of buffers but no additional information is
1872	 * available.
1873	 *
1874	 * When telling the chip how many segments there are, we
1875	 * must use nsegs + 1 instead of just nsegs. Darned if I
1876	 * know why. This also means we can't use the last fragment
1877	 * field of Tx descriptor.
1878	 */
1879	txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) |
1880	    VGE_TD_LS_NORM);
1881	for (i = 0; i < nsegs; i++) {
1882		frag = &txd->tx_desc->vge_frag[i];
1883		frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr));
1884		frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) |
1885		    (VGE_BUFLEN(txsegs[i].ds_len) << 16));
1886	}
1887
1888	sc->vge_cdata.vge_tx_cnt++;
1889	VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx);
1890
1891	/*
1892	 * Finally request interrupt and give the first descriptor
1893	 * ownership to hardware.
1894	 */
1895	txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC);
1896	txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN);
1897	txd->tx_m = m;
1898
1899	return (0);
1900}
1901
1902/*
1903 * Main transmit routine.
1904 */
1905
1906static void
1907vge_start(ifp)
1908	struct ifnet		*ifp;
1909{
1910	struct vge_softc	*sc;
1911
1912	sc = ifp->if_softc;
1913	VGE_LOCK(sc);
1914	vge_start_locked(ifp);
1915	VGE_UNLOCK(sc);
1916}
1917
1918
1919static void
1920vge_start_locked(ifp)
1921	struct ifnet		*ifp;
1922{
1923	struct vge_softc	*sc;
1924	struct vge_txdesc	*txd;
1925	struct mbuf		*m_head;
1926	int			enq, idx;
1927
1928	sc = ifp->if_softc;
1929
1930	VGE_LOCK_ASSERT(sc);
1931
1932	if (sc->vge_link == 0 ||
1933	    (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1934	    IFF_DRV_RUNNING)
1935		return;
1936
1937	idx = sc->vge_cdata.vge_tx_prodidx;
1938	VGE_TX_DESC_DEC(idx);
1939	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1940	    sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) {
1941		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1942		if (m_head == NULL)
1943			break;
1944		/*
1945		 * Pack the data into the transmit ring. If we
1946		 * don't have room, set the OACTIVE flag and wait
1947		 * for the NIC to drain the ring.
1948		 */
1949		if (vge_encap(sc, &m_head)) {
1950			if (m_head == NULL)
1951				break;
1952			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1953			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1954			break;
1955		}
1956
1957		txd = &sc->vge_cdata.vge_txdesc[idx];
1958		txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q);
1959		VGE_TX_DESC_INC(idx);
1960
1961		enq++;
1962		/*
1963		 * If there's a BPF listener, bounce a copy of this frame
1964		 * to him.
1965		 */
1966		ETHER_BPF_MTAP(ifp, m_head);
1967	}
1968
1969	if (enq > 0) {
1970		bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1971		    sc->vge_cdata.vge_tx_ring_map,
1972		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1973		/* Issue a transmit command. */
1974		CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1975		/*
1976		 * Use the countdown timer for interrupt moderation.
1977		 * 'TX done' interrupts are disabled. Instead, we reset the
1978		 * countdown timer, which will begin counting until it hits
1979		 * the value in the SSTIMER register, and then trigger an
1980		 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1981		 * the timer count is reloaded. Only when the transmitter
1982		 * is idle will the timer hit 0 and an interrupt fire.
1983		 */
1984		CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1985
1986		/*
1987		 * Set a timeout in case the chip goes out to lunch.
1988		 */
1989		sc->vge_timer = 5;
1990	}
1991}
1992
1993static void
1994vge_init(xsc)
1995	void			*xsc;
1996{
1997	struct vge_softc	*sc = xsc;
1998
1999	VGE_LOCK(sc);
2000	vge_init_locked(sc);
2001	VGE_UNLOCK(sc);
2002}
2003
2004static void
2005vge_init_locked(struct vge_softc *sc)
2006{
2007	struct ifnet		*ifp = sc->vge_ifp;
2008	struct mii_data		*mii;
2009	int			error, i;
2010
2011	VGE_LOCK_ASSERT(sc);
2012	mii = device_get_softc(sc->vge_miibus);
2013
2014	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2015		return;
2016
2017	/*
2018	 * Cancel pending I/O and free all RX/TX buffers.
2019	 */
2020	vge_stop(sc);
2021	vge_reset(sc);
2022
2023	/*
2024	 * Initialize the RX and TX descriptors and mbufs.
2025	 */
2026
2027	error = vge_rx_list_init(sc);
2028	if (error != 0) {
2029                device_printf(sc->vge_dev, "no memory for Rx buffers.\n");
2030                return;
2031	}
2032	vge_tx_list_init(sc);
2033
2034	/* Set our station address */
2035	for (i = 0; i < ETHER_ADDR_LEN; i++)
2036		CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
2037
2038	/*
2039	 * Set receive FIFO threshold. Also allow transmission and
2040	 * reception of VLAN tagged frames.
2041	 */
2042	CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
2043	CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
2044
2045	/* Set DMA burst length */
2046	CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
2047	CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
2048
2049	CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
2050
2051	/* Set collision backoff algorithm */
2052	CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
2053	    VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
2054	CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
2055
2056	/* Disable LPSEL field in priority resolution */
2057	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
2058
2059	/*
2060	 * Load the addresses of the DMA queues into the chip.
2061	 * Note that we only use one transmit queue.
2062	 */
2063
2064	CSR_WRITE_4(sc, VGE_TXDESC_HIADDR,
2065	    VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr));
2066	CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
2067	    VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr));
2068	CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
2069
2070	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
2071	    VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr));
2072	CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
2073	CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
2074
2075	/* Enable and wake up the RX descriptor queue */
2076	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
2077	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
2078
2079	/* Enable the TX descriptor queue */
2080	CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
2081
2082	/* Set up the receive filter -- allow large frames for VLANs. */
2083	CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
2084
2085	/* If we want promiscuous mode, set the allframes bit. */
2086	if (ifp->if_flags & IFF_PROMISC) {
2087		CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
2088	}
2089
2090	/* Set capture broadcast bit to capture broadcast frames. */
2091	if (ifp->if_flags & IFF_BROADCAST) {
2092		CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
2093	}
2094
2095	/* Set multicast bit to capture multicast frames. */
2096	if (ifp->if_flags & IFF_MULTICAST) {
2097		CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
2098	}
2099
2100	/* Init the cam filter. */
2101	vge_cam_clear(sc);
2102
2103	/* Init the multicast filter. */
2104	vge_setmulti(sc);
2105
2106	/* Enable flow control */
2107
2108	CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
2109
2110	/* Enable jumbo frame reception (if desired) */
2111
2112	/* Start the MAC. */
2113	CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
2114	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
2115	CSR_WRITE_1(sc, VGE_CRS0,
2116	    VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
2117
2118	/*
2119	 * Configure one-shot timer for microsecond
2120	 * resolution and load it for 500 usecs.
2121	 */
2122	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
2123	CSR_WRITE_2(sc, VGE_SSTIMER, 400);
2124
2125	/*
2126	 * Configure interrupt moderation for receive. Enable
2127	 * the holdoff counter and load it, and set the RX
2128	 * suppression count to the number of descriptors we
2129	 * want to allow before triggering an interrupt.
2130	 * The holdoff timer is in units of 20 usecs.
2131	 */
2132
2133#ifdef notyet
2134	CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
2135	/* Select the interrupt holdoff timer page. */
2136	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2137	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
2138	CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
2139
2140	/* Enable use of the holdoff timer. */
2141	CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
2142	CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
2143
2144	/* Select the RX suppression threshold page. */
2145	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2146	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
2147	CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
2148
2149	/* Restore the page select bits. */
2150	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2151	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
2152#endif
2153
2154#ifdef DEVICE_POLLING
2155	/*
2156	 * Disable interrupts if we are polling.
2157	 */
2158	if (ifp->if_capenable & IFCAP_POLLING) {
2159		CSR_WRITE_4(sc, VGE_IMR, 0);
2160		CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2161	} else	/* otherwise ... */
2162#endif
2163	{
2164	/*
2165	 * Enable interrupts.
2166	 */
2167		CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2168		CSR_WRITE_4(sc, VGE_ISR, 0);
2169		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2170	}
2171
2172	mii_mediachg(mii);
2173
2174	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2175	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2176	callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2177
2178	sc->vge_link = 0;
2179}
2180
2181/*
2182 * Set media options.
2183 */
2184static int
2185vge_ifmedia_upd(ifp)
2186	struct ifnet		*ifp;
2187{
2188	struct vge_softc	*sc;
2189	struct mii_data		*mii;
2190
2191	sc = ifp->if_softc;
2192	VGE_LOCK(sc);
2193	mii = device_get_softc(sc->vge_miibus);
2194	mii_mediachg(mii);
2195	VGE_UNLOCK(sc);
2196
2197	return (0);
2198}
2199
2200/*
2201 * Report current media status.
2202 */
2203static void
2204vge_ifmedia_sts(ifp, ifmr)
2205	struct ifnet		*ifp;
2206	struct ifmediareq	*ifmr;
2207{
2208	struct vge_softc	*sc;
2209	struct mii_data		*mii;
2210
2211	sc = ifp->if_softc;
2212	mii = device_get_softc(sc->vge_miibus);
2213
2214	VGE_LOCK(sc);
2215	mii_pollstat(mii);
2216	VGE_UNLOCK(sc);
2217	ifmr->ifm_active = mii->mii_media_active;
2218	ifmr->ifm_status = mii->mii_media_status;
2219
2220	return;
2221}
2222
2223static void
2224vge_miibus_statchg(dev)
2225	device_t		dev;
2226{
2227	struct vge_softc	*sc;
2228	struct mii_data		*mii;
2229	struct ifmedia_entry	*ife;
2230
2231	sc = device_get_softc(dev);
2232	mii = device_get_softc(sc->vge_miibus);
2233	ife = mii->mii_media.ifm_cur;
2234
2235	/*
2236	 * If the user manually selects a media mode, we need to turn
2237	 * on the forced MAC mode bit in the DIAGCTL register. If the
2238	 * user happens to choose a full duplex mode, we also need to
2239	 * set the 'force full duplex' bit. This applies only to
2240	 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2241	 * mode is disabled, and in 1000baseT mode, full duplex is
2242	 * always implied, so we turn on the forced mode bit but leave
2243	 * the FDX bit cleared.
2244	 */
2245
2246	switch (IFM_SUBTYPE(ife->ifm_media)) {
2247	case IFM_AUTO:
2248		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2249		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2250		break;
2251	case IFM_1000_T:
2252		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2253		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2254		break;
2255	case IFM_100_TX:
2256	case IFM_10_T:
2257		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2258		if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2259			CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2260		} else {
2261			CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2262		}
2263		break;
2264	default:
2265		device_printf(dev, "unknown media type: %x\n",
2266		    IFM_SUBTYPE(ife->ifm_media));
2267		break;
2268	}
2269
2270	return;
2271}
2272
2273static int
2274vge_ioctl(ifp, command, data)
2275	struct ifnet		*ifp;
2276	u_long			command;
2277	caddr_t			data;
2278{
2279	struct vge_softc	*sc = ifp->if_softc;
2280	struct ifreq		*ifr = (struct ifreq *) data;
2281	struct mii_data		*mii;
2282	int			error = 0;
2283
2284	switch (command) {
2285	case SIOCSIFMTU:
2286		if (ifr->ifr_mtu > VGE_JUMBO_MTU)
2287			error = EINVAL;
2288		ifp->if_mtu = ifr->ifr_mtu;
2289		break;
2290	case SIOCSIFFLAGS:
2291		VGE_LOCK(sc);
2292		if (ifp->if_flags & IFF_UP) {
2293			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2294			    ifp->if_flags & IFF_PROMISC &&
2295			    !(sc->vge_if_flags & IFF_PROMISC)) {
2296				CSR_SETBIT_1(sc, VGE_RXCTL,
2297				    VGE_RXCTL_RX_PROMISC);
2298				vge_setmulti(sc);
2299			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2300			    !(ifp->if_flags & IFF_PROMISC) &&
2301			    sc->vge_if_flags & IFF_PROMISC) {
2302				CSR_CLRBIT_1(sc, VGE_RXCTL,
2303				    VGE_RXCTL_RX_PROMISC);
2304				vge_setmulti(sc);
2305                        } else
2306				vge_init_locked(sc);
2307		} else {
2308			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2309				vge_stop(sc);
2310		}
2311		sc->vge_if_flags = ifp->if_flags;
2312		VGE_UNLOCK(sc);
2313		break;
2314	case SIOCADDMULTI:
2315	case SIOCDELMULTI:
2316		VGE_LOCK(sc);
2317		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2318			vge_setmulti(sc);
2319		VGE_UNLOCK(sc);
2320		break;
2321	case SIOCGIFMEDIA:
2322	case SIOCSIFMEDIA:
2323		mii = device_get_softc(sc->vge_miibus);
2324		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2325		break;
2326	case SIOCSIFCAP:
2327	    {
2328		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2329#ifdef DEVICE_POLLING
2330		if (mask & IFCAP_POLLING) {
2331			if (ifr->ifr_reqcap & IFCAP_POLLING) {
2332				error = ether_poll_register(vge_poll, ifp);
2333				if (error)
2334					return(error);
2335				VGE_LOCK(sc);
2336					/* Disable interrupts */
2337				CSR_WRITE_4(sc, VGE_IMR, 0);
2338				CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2339				ifp->if_capenable |= IFCAP_POLLING;
2340				VGE_UNLOCK(sc);
2341			} else {
2342				error = ether_poll_deregister(ifp);
2343				/* Enable interrupts. */
2344				VGE_LOCK(sc);
2345				CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2346				CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2347				CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2348				ifp->if_capenable &= ~IFCAP_POLLING;
2349				VGE_UNLOCK(sc);
2350			}
2351		}
2352#endif /* DEVICE_POLLING */
2353		VGE_LOCK(sc);
2354		if ((mask & IFCAP_TXCSUM) != 0 &&
2355		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2356			ifp->if_capenable ^= IFCAP_TXCSUM;
2357			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2358				ifp->if_hwassist |= VGE_CSUM_FEATURES;
2359			else
2360				ifp->if_hwassist &= ~VGE_CSUM_FEATURES;
2361		}
2362		if ((mask & IFCAP_RXCSUM) != 0 &&
2363		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
2364			ifp->if_capenable ^= IFCAP_RXCSUM;
2365		VGE_UNLOCK(sc);
2366	    }
2367		break;
2368	default:
2369		error = ether_ioctl(ifp, command, data);
2370		break;
2371	}
2372
2373	return (error);
2374}
2375
2376static void
2377vge_watchdog(void *arg)
2378{
2379	struct vge_softc *sc;
2380	struct ifnet *ifp;
2381
2382	sc = arg;
2383	VGE_LOCK_ASSERT(sc);
2384	callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2385	if (sc->vge_timer == 0 || --sc->vge_timer > 0)
2386		return;
2387
2388	ifp = sc->vge_ifp;
2389	if_printf(ifp, "watchdog timeout\n");
2390	ifp->if_oerrors++;
2391
2392	vge_txeof(sc);
2393	vge_rxeof(sc, VGE_RX_DESC_CNT);
2394
2395	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2396	vge_init_locked(sc);
2397
2398	return;
2399}
2400
2401/*
2402 * Stop the adapter and free any mbufs allocated to the
2403 * RX and TX lists.
2404 */
2405static void
2406vge_stop(sc)
2407	struct vge_softc		*sc;
2408{
2409	struct ifnet		*ifp;
2410
2411	VGE_LOCK_ASSERT(sc);
2412	ifp = sc->vge_ifp;
2413	sc->vge_timer = 0;
2414	callout_stop(&sc->vge_watchdog);
2415
2416	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2417
2418	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2419	CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2420	CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2421	CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2422	CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2423	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2424
2425	VGE_CHAIN_RESET(sc);
2426	vge_txeof(sc);
2427	vge_freebufs(sc);
2428}
2429
2430/*
2431 * Device suspend routine.  Stop the interface and save some PCI
2432 * settings in case the BIOS doesn't restore them properly on
2433 * resume.
2434 */
2435static int
2436vge_suspend(dev)
2437	device_t		dev;
2438{
2439	struct vge_softc	*sc;
2440
2441	sc = device_get_softc(dev);
2442
2443	VGE_LOCK(sc);
2444	vge_stop(sc);
2445
2446	sc->suspended = 1;
2447	VGE_UNLOCK(sc);
2448
2449	return (0);
2450}
2451
2452/*
2453 * Device resume routine.  Restore some PCI settings in case the BIOS
2454 * doesn't, re-enable busmastering, and restart the interface if
2455 * appropriate.
2456 */
2457static int
2458vge_resume(dev)
2459	device_t		dev;
2460{
2461	struct vge_softc	*sc;
2462	struct ifnet		*ifp;
2463
2464	sc = device_get_softc(dev);
2465	ifp = sc->vge_ifp;
2466
2467	/* reenable busmastering */
2468	pci_enable_busmaster(dev);
2469	pci_enable_io(dev, SYS_RES_MEMORY);
2470
2471	/* reinitialize interface if necessary */
2472	VGE_LOCK(sc);
2473	if (ifp->if_flags & IFF_UP) {
2474		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2475		vge_init_locked(sc);
2476	}
2477	sc->suspended = 0;
2478	VGE_UNLOCK(sc);
2479
2480	return (0);
2481}
2482
2483/*
2484 * Stop all chip I/O so that the kernel's probe routines don't
2485 * get confused by errant DMAs when rebooting.
2486 */
2487static int
2488vge_shutdown(dev)
2489	device_t		dev;
2490{
2491	struct vge_softc		*sc;
2492
2493	sc = device_get_softc(dev);
2494
2495	VGE_LOCK(sc);
2496	vge_stop(sc);
2497	VGE_UNLOCK(sc);
2498
2499	return (0);
2500}
2501