if_vge.c revision 200526
1/*-
2 * Copyright (c) 2004
3 *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/vge/if_vge.c 200526 2009-12-14 18:49:16Z yongari $");
35
36/*
37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
38 *
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Networking Software Engineer
41 * Wind River Systems
42 */
43
44/*
45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
46 * combines a tri-speed ethernet MAC and PHY, with the following
47 * features:
48 *
49 *	o Jumbo frame support up to 16K
50 *	o Transmit and receive flow control
51 *	o IPv4 checksum offload
52 *	o VLAN tag insertion and stripping
53 *	o TCP large send
54 *	o 64-bit multicast hash table filter
55 *	o 64 entry CAM filter
56 *	o 16K RX FIFO and 48K TX FIFO memory
57 *	o Interrupt moderation
58 *
59 * The VT6122 supports up to four transmit DMA queues. The descriptors
60 * in the transmit ring can address up to 7 data fragments; frames which
61 * span more than 7 data buffers must be coalesced, but in general the
62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
63 * long. The receive descriptors address only a single buffer.
64 *
65 * There are two peculiar design issues with the VT6122. One is that
66 * receive data buffers must be aligned on a 32-bit boundary. This is
67 * not a problem where the VT6122 is used as a LOM device in x86-based
68 * systems, but on architectures that generate unaligned access traps, we
69 * have to do some copying.
70 *
71 * The other issue has to do with the way 64-bit addresses are handled.
72 * The DMA descriptors only allow you to specify 48 bits of addressing
73 * information. The remaining 16 bits are specified using one of the
74 * I/O registers. If you only have a 32-bit system, then this isn't
75 * an issue, but if you have a 64-bit system and more than 4GB of
76 * memory, you must have to make sure your network data buffers reside
77 * in the same 48-bit 'segment.'
78 *
79 * Special thanks to Ryan Fu at VIA Networking for providing documentation
80 * and sample NICs for testing.
81 */
82
83#ifdef HAVE_KERNEL_OPTION_HEADERS
84#include "opt_device_polling.h"
85#endif
86
87#include <sys/param.h>
88#include <sys/endian.h>
89#include <sys/systm.h>
90#include <sys/sockio.h>
91#include <sys/mbuf.h>
92#include <sys/malloc.h>
93#include <sys/module.h>
94#include <sys/kernel.h>
95#include <sys/socket.h>
96
97#include <net/if.h>
98#include <net/if_arp.h>
99#include <net/ethernet.h>
100#include <net/if_dl.h>
101#include <net/if_media.h>
102#include <net/if_types.h>
103#include <net/if_vlan_var.h>
104
105#include <net/bpf.h>
106
107#include <machine/bus.h>
108#include <machine/resource.h>
109#include <sys/bus.h>
110#include <sys/rman.h>
111
112#include <dev/mii/mii.h>
113#include <dev/mii/miivar.h>
114
115#include <dev/pci/pcireg.h>
116#include <dev/pci/pcivar.h>
117
118MODULE_DEPEND(vge, pci, 1, 1, 1);
119MODULE_DEPEND(vge, ether, 1, 1, 1);
120MODULE_DEPEND(vge, miibus, 1, 1, 1);
121
122/* "device miibus" required.  See GENERIC if you get errors here. */
123#include "miibus_if.h"
124
125#include <dev/vge/if_vgereg.h>
126#include <dev/vge/if_vgevar.h>
127
128#define VGE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
129
130/*
131 * Various supported device vendors/types and their names.
132 */
133static struct vge_type vge_devs[] = {
134	{ VIA_VENDORID, VIA_DEVICEID_61XX,
135		"VIA Networking Gigabit Ethernet" },
136	{ 0, 0, NULL }
137};
138
139static int vge_probe		(device_t);
140static int vge_attach		(device_t);
141static int vge_detach		(device_t);
142
143static int vge_encap		(struct vge_softc *, struct mbuf **);
144
145static void vge_dmamap_cb	(void *, bus_dma_segment_t *, int, int);
146static int vge_dma_alloc	(struct vge_softc *);
147static void vge_dma_free	(struct vge_softc *);
148static void vge_discard_rxbuf	(struct vge_softc *, int);
149static int vge_newbuf		(struct vge_softc *, int);
150static int vge_rx_list_init	(struct vge_softc *);
151static int vge_tx_list_init	(struct vge_softc *);
152static void vge_freebufs	(struct vge_softc *);
153#ifndef __NO_STRICT_ALIGNMENT
154static __inline void vge_fixup_rx
155				(struct mbuf *);
156#endif
157static int vge_rxeof		(struct vge_softc *, int);
158static void vge_txeof		(struct vge_softc *);
159static void vge_intr		(void *);
160static void vge_tick		(void *);
161static void vge_start		(struct ifnet *);
162static void vge_start_locked	(struct ifnet *);
163static int vge_ioctl		(struct ifnet *, u_long, caddr_t);
164static void vge_init		(void *);
165static void vge_init_locked	(struct vge_softc *);
166static void vge_stop		(struct vge_softc *);
167static void vge_watchdog	(void *);
168static int vge_suspend		(device_t);
169static int vge_resume		(device_t);
170static int vge_shutdown		(device_t);
171static int vge_ifmedia_upd	(struct ifnet *);
172static void vge_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
173
174#ifdef VGE_EEPROM
175static void vge_eeprom_getword	(struct vge_softc *, int, u_int16_t *);
176#endif
177static void vge_read_eeprom	(struct vge_softc *, caddr_t, int, int, int);
178
179static void vge_miipoll_start	(struct vge_softc *);
180static void vge_miipoll_stop	(struct vge_softc *);
181static int vge_miibus_readreg	(device_t, int, int);
182static int vge_miibus_writereg	(device_t, int, int, int);
183static void vge_miibus_statchg	(device_t);
184
185static void vge_cam_clear	(struct vge_softc *);
186static int vge_cam_set		(struct vge_softc *, uint8_t *);
187static void vge_setmulti	(struct vge_softc *);
188static void vge_reset		(struct vge_softc *);
189
190static device_method_t vge_methods[] = {
191	/* Device interface */
192	DEVMETHOD(device_probe,		vge_probe),
193	DEVMETHOD(device_attach,	vge_attach),
194	DEVMETHOD(device_detach,	vge_detach),
195	DEVMETHOD(device_suspend,	vge_suspend),
196	DEVMETHOD(device_resume,	vge_resume),
197	DEVMETHOD(device_shutdown,	vge_shutdown),
198
199	/* bus interface */
200	DEVMETHOD(bus_print_child,	bus_generic_print_child),
201	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
202
203	/* MII interface */
204	DEVMETHOD(miibus_readreg,	vge_miibus_readreg),
205	DEVMETHOD(miibus_writereg,	vge_miibus_writereg),
206	DEVMETHOD(miibus_statchg,	vge_miibus_statchg),
207
208	{ 0, 0 }
209};
210
211static driver_t vge_driver = {
212	"vge",
213	vge_methods,
214	sizeof(struct vge_softc)
215};
216
217static devclass_t vge_devclass;
218
219DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
220DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
221
222#ifdef VGE_EEPROM
223/*
224 * Read a word of data stored in the EEPROM at address 'addr.'
225 */
226static void
227vge_eeprom_getword(sc, addr, dest)
228	struct vge_softc	*sc;
229	int			addr;
230	u_int16_t		*dest;
231{
232	int			i;
233	u_int16_t		word = 0;
234
235	/*
236	 * Enter EEPROM embedded programming mode. In order to
237	 * access the EEPROM at all, we first have to set the
238	 * EELOAD bit in the CHIPCFG2 register.
239	 */
240	CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
241	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
242
243	/* Select the address of the word we want to read */
244	CSR_WRITE_1(sc, VGE_EEADDR, addr);
245
246	/* Issue read command */
247	CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
248
249	/* Wait for the done bit to be set. */
250	for (i = 0; i < VGE_TIMEOUT; i++) {
251		if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
252			break;
253	}
254
255	if (i == VGE_TIMEOUT) {
256		device_printf(sc->vge_dev, "EEPROM read timed out\n");
257		*dest = 0;
258		return;
259	}
260
261	/* Read the result */
262	word = CSR_READ_2(sc, VGE_EERDDAT);
263
264	/* Turn off EEPROM access mode. */
265	CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
266	CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
267
268	*dest = word;
269
270	return;
271}
272#endif
273
274/*
275 * Read a sequence of words from the EEPROM.
276 */
277static void
278vge_read_eeprom(sc, dest, off, cnt, swap)
279	struct vge_softc	*sc;
280	caddr_t			dest;
281	int			off;
282	int			cnt;
283	int			swap;
284{
285	int			i;
286#ifdef VGE_EEPROM
287	u_int16_t		word = 0, *ptr;
288
289	for (i = 0; i < cnt; i++) {
290		vge_eeprom_getword(sc, off + i, &word);
291		ptr = (u_int16_t *)(dest + (i * 2));
292		if (swap)
293			*ptr = ntohs(word);
294		else
295			*ptr = word;
296	}
297#else
298	for (i = 0; i < ETHER_ADDR_LEN; i++)
299		dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
300#endif
301}
302
303static void
304vge_miipoll_stop(sc)
305	struct vge_softc	*sc;
306{
307	int			i;
308
309	CSR_WRITE_1(sc, VGE_MIICMD, 0);
310
311	for (i = 0; i < VGE_TIMEOUT; i++) {
312		DELAY(1);
313		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
314			break;
315	}
316
317	if (i == VGE_TIMEOUT)
318		device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
319
320	return;
321}
322
323static void
324vge_miipoll_start(sc)
325	struct vge_softc	*sc;
326{
327	int			i;
328
329	/* First, make sure we're idle. */
330
331	CSR_WRITE_1(sc, VGE_MIICMD, 0);
332	CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
333
334	for (i = 0; i < VGE_TIMEOUT; i++) {
335		DELAY(1);
336		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
337			break;
338	}
339
340	if (i == VGE_TIMEOUT) {
341		device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
342		return;
343	}
344
345	/* Now enable auto poll mode. */
346
347	CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
348
349	/* And make sure it started. */
350
351	for (i = 0; i < VGE_TIMEOUT; i++) {
352		DELAY(1);
353		if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
354			break;
355	}
356
357	if (i == VGE_TIMEOUT)
358		device_printf(sc->vge_dev, "failed to start MII autopoll\n");
359
360	return;
361}
362
363static int
364vge_miibus_readreg(dev, phy, reg)
365	device_t		dev;
366	int			phy, reg;
367{
368	struct vge_softc	*sc;
369	int			i;
370	u_int16_t		rval = 0;
371
372	sc = device_get_softc(dev);
373
374	if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
375		return(0);
376
377	vge_miipoll_stop(sc);
378
379	/* Specify the register we want to read. */
380	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
381
382	/* Issue read command. */
383	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
384
385	/* Wait for the read command bit to self-clear. */
386	for (i = 0; i < VGE_TIMEOUT; i++) {
387		DELAY(1);
388		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
389			break;
390	}
391
392	if (i == VGE_TIMEOUT)
393		device_printf(sc->vge_dev, "MII read timed out\n");
394	else
395		rval = CSR_READ_2(sc, VGE_MIIDATA);
396
397	vge_miipoll_start(sc);
398
399	return (rval);
400}
401
402static int
403vge_miibus_writereg(dev, phy, reg, data)
404	device_t		dev;
405	int			phy, reg, data;
406{
407	struct vge_softc	*sc;
408	int			i, rval = 0;
409
410	sc = device_get_softc(dev);
411
412	if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
413		return(0);
414
415	vge_miipoll_stop(sc);
416
417	/* Specify the register we want to write. */
418	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
419
420	/* Specify the data we want to write. */
421	CSR_WRITE_2(sc, VGE_MIIDATA, data);
422
423	/* Issue write command. */
424	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
425
426	/* Wait for the write command bit to self-clear. */
427	for (i = 0; i < VGE_TIMEOUT; i++) {
428		DELAY(1);
429		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
430			break;
431	}
432
433	if (i == VGE_TIMEOUT) {
434		device_printf(sc->vge_dev, "MII write timed out\n");
435		rval = EIO;
436	}
437
438	vge_miipoll_start(sc);
439
440	return (rval);
441}
442
443static void
444vge_cam_clear(sc)
445	struct vge_softc	*sc;
446{
447	int			i;
448
449	/*
450	 * Turn off all the mask bits. This tells the chip
451	 * that none of the entries in the CAM filter are valid.
452	 * desired entries will be enabled as we fill the filter in.
453	 */
454
455	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
456	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
457	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
458	for (i = 0; i < 8; i++)
459		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
460
461	/* Clear the VLAN filter too. */
462
463	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
464	for (i = 0; i < 8; i++)
465		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
466
467	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
468	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
469	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
470
471	sc->vge_camidx = 0;
472
473	return;
474}
475
476static int
477vge_cam_set(sc, addr)
478	struct vge_softc	*sc;
479	uint8_t			*addr;
480{
481	int			i, error = 0;
482
483	if (sc->vge_camidx == VGE_CAM_MAXADDRS)
484		return(ENOSPC);
485
486	/* Select the CAM data page. */
487	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
488	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
489
490	/* Set the filter entry we want to update and enable writing. */
491	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
492
493	/* Write the address to the CAM registers */
494	for (i = 0; i < ETHER_ADDR_LEN; i++)
495		CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
496
497	/* Issue a write command. */
498	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
499
500	/* Wake for it to clear. */
501	for (i = 0; i < VGE_TIMEOUT; i++) {
502		DELAY(1);
503		if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
504			break;
505	}
506
507	if (i == VGE_TIMEOUT) {
508		device_printf(sc->vge_dev, "setting CAM filter failed\n");
509		error = EIO;
510		goto fail;
511	}
512
513	/* Select the CAM mask page. */
514	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
515	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
516
517	/* Set the mask bit that enables this filter. */
518	CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
519	    1<<(sc->vge_camidx & 7));
520
521	sc->vge_camidx++;
522
523fail:
524	/* Turn off access to CAM. */
525	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
526	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
527	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
528
529	return (error);
530}
531
532/*
533 * Program the multicast filter. We use the 64-entry CAM filter
534 * for perfect filtering. If there's more than 64 multicast addresses,
535 * we use the hash filter instead.
536 */
537static void
538vge_setmulti(sc)
539	struct vge_softc	*sc;
540{
541	struct ifnet		*ifp;
542	int			error = 0/*, h = 0*/;
543	struct ifmultiaddr	*ifma;
544	u_int32_t		h, hashes[2] = { 0, 0 };
545
546	VGE_LOCK_ASSERT(sc);
547
548	ifp = sc->vge_ifp;
549
550	/* First, zot all the multicast entries. */
551	vge_cam_clear(sc);
552	CSR_WRITE_4(sc, VGE_MAR0, 0);
553	CSR_WRITE_4(sc, VGE_MAR1, 0);
554
555	/*
556	 * If the user wants allmulti or promisc mode, enable reception
557	 * of all multicast frames.
558	 */
559	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
560		CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
561		CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
562		return;
563	}
564
565	/* Now program new ones */
566	if_maddr_rlock(ifp);
567	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
568		if (ifma->ifma_addr->sa_family != AF_LINK)
569			continue;
570		error = vge_cam_set(sc,
571		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
572		if (error)
573			break;
574	}
575
576	/* If there were too many addresses, use the hash filter. */
577	if (error) {
578		vge_cam_clear(sc);
579
580		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
581			if (ifma->ifma_addr->sa_family != AF_LINK)
582				continue;
583			h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
584			    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
585			if (h < 32)
586				hashes[0] |= (1 << h);
587			else
588				hashes[1] |= (1 << (h - 32));
589		}
590
591		CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
592		CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
593	}
594	if_maddr_runlock(ifp);
595
596	return;
597}
598
599static void
600vge_reset(sc)
601	struct vge_softc		*sc;
602{
603	int			i;
604
605	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
606
607	for (i = 0; i < VGE_TIMEOUT; i++) {
608		DELAY(5);
609		if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
610			break;
611	}
612
613	if (i == VGE_TIMEOUT) {
614		device_printf(sc->vge_dev, "soft reset timed out");
615		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
616		DELAY(2000);
617	}
618
619	DELAY(5000);
620
621	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
622
623	for (i = 0; i < VGE_TIMEOUT; i++) {
624		DELAY(5);
625		if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
626			break;
627	}
628
629	if (i == VGE_TIMEOUT) {
630		device_printf(sc->vge_dev, "EEPROM reload timed out\n");
631		return;
632	}
633
634	CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
635
636	return;
637}
638
639/*
640 * Probe for a VIA gigabit chip. Check the PCI vendor and device
641 * IDs against our list and return a device name if we find a match.
642 */
643static int
644vge_probe(dev)
645	device_t		dev;
646{
647	struct vge_type		*t;
648
649	t = vge_devs;
650
651	while (t->vge_name != NULL) {
652		if ((pci_get_vendor(dev) == t->vge_vid) &&
653		    (pci_get_device(dev) == t->vge_did)) {
654			device_set_desc(dev, t->vge_name);
655			return (BUS_PROBE_DEFAULT);
656		}
657		t++;
658	}
659
660	return (ENXIO);
661}
662
663/*
664 * Map a single buffer address.
665 */
666
667struct vge_dmamap_arg {
668	bus_addr_t	vge_busaddr;
669};
670
671static void
672vge_dmamap_cb(arg, segs, nsegs, error)
673	void			*arg;
674	bus_dma_segment_t	*segs;
675	int			nsegs;
676	int			error;
677{
678	struct vge_dmamap_arg	*ctx;
679
680	if (error != 0)
681		return;
682
683	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
684
685	ctx = (struct vge_dmamap_arg *)arg;
686	ctx->vge_busaddr = segs[0].ds_addr;
687}
688
689static int
690vge_dma_alloc(sc)
691	struct vge_softc	*sc;
692{
693	struct vge_dmamap_arg	ctx;
694	struct vge_txdesc	*txd;
695	struct vge_rxdesc	*rxd;
696	bus_addr_t		lowaddr, tx_ring_end, rx_ring_end;
697	int			error, i;
698
699	lowaddr = BUS_SPACE_MAXADDR;
700
701again:
702	/* Create parent ring tag. */
703	error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
704	    1, 0,			/* algnmnt, boundary */
705	    lowaddr,			/* lowaddr */
706	    BUS_SPACE_MAXADDR,		/* highaddr */
707	    NULL, NULL,			/* filter, filterarg */
708	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
709	    0,				/* nsegments */
710	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
711	    0,				/* flags */
712	    NULL, NULL,			/* lockfunc, lockarg */
713	    &sc->vge_cdata.vge_ring_tag);
714	if (error != 0) {
715		device_printf(sc->vge_dev,
716		    "could not create parent DMA tag.\n");
717		goto fail;
718	}
719
720	/* Create tag for Tx ring. */
721	error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
722	    VGE_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
723	    BUS_SPACE_MAXADDR,		/* lowaddr */
724	    BUS_SPACE_MAXADDR,		/* highaddr */
725	    NULL, NULL,			/* filter, filterarg */
726	    VGE_TX_LIST_SZ,		/* maxsize */
727	    1,				/* nsegments */
728	    VGE_TX_LIST_SZ,		/* maxsegsize */
729	    0,				/* flags */
730	    NULL, NULL,			/* lockfunc, lockarg */
731	    &sc->vge_cdata.vge_tx_ring_tag);
732	if (error != 0) {
733		device_printf(sc->vge_dev,
734		    "could not allocate Tx ring DMA tag.\n");
735		goto fail;
736	}
737
738	/* Create tag for Rx ring. */
739	error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
740	    VGE_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
741	    BUS_SPACE_MAXADDR,		/* lowaddr */
742	    BUS_SPACE_MAXADDR,		/* highaddr */
743	    NULL, NULL,			/* filter, filterarg */
744	    VGE_RX_LIST_SZ,		/* maxsize */
745	    1,				/* nsegments */
746	    VGE_RX_LIST_SZ,		/* maxsegsize */
747	    0,				/* flags */
748	    NULL, NULL,			/* lockfunc, lockarg */
749	    &sc->vge_cdata.vge_rx_ring_tag);
750	if (error != 0) {
751		device_printf(sc->vge_dev,
752		    "could not allocate Rx ring DMA tag.\n");
753		goto fail;
754	}
755
756	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
757	error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag,
758	    (void **)&sc->vge_rdata.vge_tx_ring,
759	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
760	    &sc->vge_cdata.vge_tx_ring_map);
761	if (error != 0) {
762		device_printf(sc->vge_dev,
763		    "could not allocate DMA'able memory for Tx ring.\n");
764		goto fail;
765	}
766
767	ctx.vge_busaddr = 0;
768	error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag,
769	    sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring,
770	    VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
771	if (error != 0 || ctx.vge_busaddr == 0) {
772		device_printf(sc->vge_dev,
773		    "could not load DMA'able memory for Tx ring.\n");
774		goto fail;
775	}
776	sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr;
777
778	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
779	error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag,
780	    (void **)&sc->vge_rdata.vge_rx_ring,
781	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
782	    &sc->vge_cdata.vge_rx_ring_map);
783	if (error != 0) {
784		device_printf(sc->vge_dev,
785		    "could not allocate DMA'able memory for Rx ring.\n");
786		goto fail;
787	}
788
789	ctx.vge_busaddr = 0;
790	error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag,
791	    sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring,
792	    VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
793	if (error != 0 || ctx.vge_busaddr == 0) {
794		device_printf(sc->vge_dev,
795		    "could not load DMA'able memory for Rx ring.\n");
796		goto fail;
797	}
798	sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr;
799
800	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
801	tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ;
802	rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ;
803	if ((VGE_ADDR_HI(tx_ring_end) !=
804	    VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) ||
805	    (VGE_ADDR_HI(rx_ring_end) !=
806	    VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) ||
807	    VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) {
808		device_printf(sc->vge_dev, "4GB boundary crossed, "
809		    "switching to 32bit DMA address mode.\n");
810		vge_dma_free(sc);
811		/* Limit DMA address space to 32bit and try again. */
812		lowaddr = BUS_SPACE_MAXADDR_32BIT;
813		goto again;
814	}
815
816	/* Create parent buffer tag. */
817	error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
818	    1, 0,			/* algnmnt, boundary */
819	    VGE_BUF_DMA_MAXADDR,	/* lowaddr */
820	    BUS_SPACE_MAXADDR,		/* highaddr */
821	    NULL, NULL,			/* filter, filterarg */
822	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
823	    0,				/* nsegments */
824	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
825	    0,				/* flags */
826	    NULL, NULL,			/* lockfunc, lockarg */
827	    &sc->vge_cdata.vge_buffer_tag);
828	if (error != 0) {
829		device_printf(sc->vge_dev,
830		    "could not create parent buffer DMA tag.\n");
831		goto fail;
832	}
833
834	/* Create tag for Tx buffers. */
835	error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
836	    1, 0,			/* algnmnt, boundary */
837	    BUS_SPACE_MAXADDR,		/* lowaddr */
838	    BUS_SPACE_MAXADDR,		/* highaddr */
839	    NULL, NULL,			/* filter, filterarg */
840	    MCLBYTES * VGE_MAXTXSEGS,	/* maxsize */
841	    VGE_MAXTXSEGS,		/* nsegments */
842	    MCLBYTES,			/* maxsegsize */
843	    0,				/* flags */
844	    NULL, NULL,			/* lockfunc, lockarg */
845	    &sc->vge_cdata.vge_tx_tag);
846	if (error != 0) {
847		device_printf(sc->vge_dev, "could not create Tx DMA tag.\n");
848		goto fail;
849	}
850
851	/* Create tag for Rx buffers. */
852	error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
853	    VGE_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
854	    BUS_SPACE_MAXADDR,		/* lowaddr */
855	    BUS_SPACE_MAXADDR,		/* highaddr */
856	    NULL, NULL,			/* filter, filterarg */
857	    MCLBYTES,			/* maxsize */
858	    1,				/* nsegments */
859	    MCLBYTES,			/* maxsegsize */
860	    0,				/* flags */
861	    NULL, NULL,			/* lockfunc, lockarg */
862	    &sc->vge_cdata.vge_rx_tag);
863	if (error != 0) {
864		device_printf(sc->vge_dev, "could not create Rx DMA tag.\n");
865		goto fail;
866	}
867
868	/* Create DMA maps for Tx buffers. */
869	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
870		txd = &sc->vge_cdata.vge_txdesc[i];
871		txd->tx_m = NULL;
872		txd->tx_dmamap = NULL;
873		error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0,
874		    &txd->tx_dmamap);
875		if (error != 0) {
876			device_printf(sc->vge_dev,
877			    "could not create Tx dmamap.\n");
878			goto fail;
879		}
880	}
881	/* Create DMA maps for Rx buffers. */
882	if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
883	    &sc->vge_cdata.vge_rx_sparemap)) != 0) {
884		device_printf(sc->vge_dev,
885		    "could not create spare Rx dmamap.\n");
886		goto fail;
887	}
888	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
889		rxd = &sc->vge_cdata.vge_rxdesc[i];
890		rxd->rx_m = NULL;
891		rxd->rx_dmamap = NULL;
892		error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
893		    &rxd->rx_dmamap);
894		if (error != 0) {
895			device_printf(sc->vge_dev,
896			    "could not create Rx dmamap.\n");
897			goto fail;
898		}
899	}
900
901fail:
902	return (error);
903}
904
905static void
906vge_dma_free(sc)
907	struct vge_softc	*sc;
908{
909	struct vge_txdesc	*txd;
910	struct vge_rxdesc	*rxd;
911	int			i;
912
913	/* Tx ring. */
914	if (sc->vge_cdata.vge_tx_ring_tag != NULL) {
915		if (sc->vge_cdata.vge_tx_ring_map)
916			bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag,
917			    sc->vge_cdata.vge_tx_ring_map);
918		if (sc->vge_cdata.vge_tx_ring_map &&
919		    sc->vge_rdata.vge_tx_ring)
920			bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag,
921			    sc->vge_rdata.vge_tx_ring,
922			    sc->vge_cdata.vge_tx_ring_map);
923		sc->vge_rdata.vge_tx_ring = NULL;
924		sc->vge_cdata.vge_tx_ring_map = NULL;
925		bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag);
926		sc->vge_cdata.vge_tx_ring_tag = NULL;
927	}
928	/* Rx ring. */
929	if (sc->vge_cdata.vge_rx_ring_tag != NULL) {
930		if (sc->vge_cdata.vge_rx_ring_map)
931			bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag,
932			    sc->vge_cdata.vge_rx_ring_map);
933		if (sc->vge_cdata.vge_rx_ring_map &&
934		    sc->vge_rdata.vge_rx_ring)
935			bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag,
936			    sc->vge_rdata.vge_rx_ring,
937			    sc->vge_cdata.vge_rx_ring_map);
938		sc->vge_rdata.vge_rx_ring = NULL;
939		sc->vge_cdata.vge_rx_ring_map = NULL;
940		bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag);
941		sc->vge_cdata.vge_rx_ring_tag = NULL;
942	}
943	/* Tx buffers. */
944	if (sc->vge_cdata.vge_tx_tag != NULL) {
945		for (i = 0; i < VGE_TX_DESC_CNT; i++) {
946			txd = &sc->vge_cdata.vge_txdesc[i];
947			if (txd->tx_dmamap != NULL) {
948				bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag,
949				    txd->tx_dmamap);
950				txd->tx_dmamap = NULL;
951			}
952		}
953		bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag);
954		sc->vge_cdata.vge_tx_tag = NULL;
955	}
956	/* Rx buffers. */
957	if (sc->vge_cdata.vge_rx_tag != NULL) {
958		for (i = 0; i < VGE_RX_DESC_CNT; i++) {
959			rxd = &sc->vge_cdata.vge_rxdesc[i];
960			if (rxd->rx_dmamap != NULL) {
961				bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
962				    rxd->rx_dmamap);
963				rxd->rx_dmamap = NULL;
964			}
965		}
966		if (sc->vge_cdata.vge_rx_sparemap != NULL) {
967			bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
968			    sc->vge_cdata.vge_rx_sparemap);
969			sc->vge_cdata.vge_rx_sparemap = NULL;
970		}
971		bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag);
972		sc->vge_cdata.vge_rx_tag = NULL;
973	}
974
975	if (sc->vge_cdata.vge_buffer_tag != NULL) {
976		bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag);
977		sc->vge_cdata.vge_buffer_tag = NULL;
978	}
979	if (sc->vge_cdata.vge_ring_tag != NULL) {
980		bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag);
981		sc->vge_cdata.vge_ring_tag = NULL;
982	}
983}
984
985/*
986 * Attach the interface. Allocate softc structures, do ifmedia
987 * setup and ethernet/BPF attach.
988 */
989static int
990vge_attach(dev)
991	device_t		dev;
992{
993	u_char			eaddr[ETHER_ADDR_LEN];
994	struct vge_softc	*sc;
995	struct ifnet		*ifp;
996	int			error = 0, rid;
997
998	sc = device_get_softc(dev);
999	sc->vge_dev = dev;
1000
1001	mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1002	    MTX_DEF);
1003	callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0);
1004
1005	/*
1006	 * Map control/status registers.
1007	 */
1008	pci_enable_busmaster(dev);
1009
1010	rid = PCIR_BAR(1);
1011	sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1012	    RF_ACTIVE);
1013
1014	if (sc->vge_res == NULL) {
1015		device_printf(dev, "couldn't map ports/memory\n");
1016		error = ENXIO;
1017		goto fail;
1018	}
1019
1020	/* Allocate interrupt */
1021	rid = 0;
1022	sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1023	    RF_SHAREABLE | RF_ACTIVE);
1024
1025	if (sc->vge_irq == NULL) {
1026		device_printf(dev, "couldn't map interrupt\n");
1027		error = ENXIO;
1028		goto fail;
1029	}
1030
1031	/* Reset the adapter. */
1032	vge_reset(sc);
1033
1034	/*
1035	 * Get station address from the EEPROM.
1036	 */
1037	vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
1038
1039	error = vge_dma_alloc(sc);
1040	if (error)
1041		goto fail;
1042
1043	ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
1044	if (ifp == NULL) {
1045		device_printf(dev, "can not if_alloc()\n");
1046		error = ENOSPC;
1047		goto fail;
1048	}
1049
1050	/* Do MII setup */
1051	if (mii_phy_probe(dev, &sc->vge_miibus,
1052	    vge_ifmedia_upd, vge_ifmedia_sts)) {
1053		device_printf(dev, "MII without any phy!\n");
1054		error = ENXIO;
1055		goto fail;
1056	}
1057
1058	ifp->if_softc = sc;
1059	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1060	ifp->if_mtu = ETHERMTU;
1061	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1062	ifp->if_ioctl = vge_ioctl;
1063	ifp->if_capabilities = IFCAP_VLAN_MTU;
1064	ifp->if_start = vge_start;
1065	ifp->if_hwassist = VGE_CSUM_FEATURES;
1066	ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING;
1067	ifp->if_capenable = ifp->if_capabilities;
1068#ifdef DEVICE_POLLING
1069	ifp->if_capabilities |= IFCAP_POLLING;
1070#endif
1071	ifp->if_init = vge_init;
1072	IFQ_SET_MAXLEN(&ifp->if_snd, VGE_IFQ_MAXLEN);
1073	ifp->if_snd.ifq_drv_maxlen = VGE_IFQ_MAXLEN;
1074	IFQ_SET_READY(&ifp->if_snd);
1075
1076	/*
1077	 * Call MI attach routine.
1078	 */
1079	ether_ifattach(ifp, eaddr);
1080
1081	/* Hook interrupt last to avoid having to lock softc */
1082	error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
1083	    NULL, vge_intr, sc, &sc->vge_intrhand);
1084
1085	if (error) {
1086		device_printf(dev, "couldn't set up irq\n");
1087		ether_ifdetach(ifp);
1088		goto fail;
1089	}
1090
1091fail:
1092	if (error)
1093		vge_detach(dev);
1094
1095	return (error);
1096}
1097
1098/*
1099 * Shutdown hardware and free up resources. This can be called any
1100 * time after the mutex has been initialized. It is called in both
1101 * the error case in attach and the normal detach case so it needs
1102 * to be careful about only freeing resources that have actually been
1103 * allocated.
1104 */
1105static int
1106vge_detach(dev)
1107	device_t		dev;
1108{
1109	struct vge_softc		*sc;
1110	struct ifnet		*ifp;
1111
1112	sc = device_get_softc(dev);
1113	KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
1114	ifp = sc->vge_ifp;
1115
1116#ifdef DEVICE_POLLING
1117	if (ifp->if_capenable & IFCAP_POLLING)
1118		ether_poll_deregister(ifp);
1119#endif
1120
1121	/* These should only be active if attach succeeded */
1122	if (device_is_attached(dev)) {
1123		ether_ifdetach(ifp);
1124		VGE_LOCK(sc);
1125		vge_stop(sc);
1126		VGE_UNLOCK(sc);
1127		callout_drain(&sc->vge_watchdog);
1128	}
1129	if (sc->vge_miibus)
1130		device_delete_child(dev, sc->vge_miibus);
1131	bus_generic_detach(dev);
1132
1133	if (sc->vge_intrhand)
1134		bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1135	if (sc->vge_irq)
1136		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq);
1137	if (sc->vge_res)
1138		bus_release_resource(dev, SYS_RES_MEMORY,
1139		    PCIR_BAR(1), sc->vge_res);
1140	if (ifp)
1141		if_free(ifp);
1142
1143	vge_dma_free(sc);
1144	mtx_destroy(&sc->vge_mtx);
1145
1146	return (0);
1147}
1148
1149static void
1150vge_discard_rxbuf(sc, prod)
1151	struct vge_softc	*sc;
1152	int			prod;
1153{
1154	struct vge_rxdesc	*rxd;
1155	int			i;
1156
1157	rxd = &sc->vge_cdata.vge_rxdesc[prod];
1158	rxd->rx_desc->vge_sts = 0;
1159	rxd->rx_desc->vge_ctl = 0;
1160
1161	/*
1162	 * Note: the manual fails to document the fact that for
1163	 * proper opration, the driver needs to replentish the RX
1164	 * DMA ring 4 descriptors at a time (rather than one at a
1165	 * time, like most chips). We can allocate the new buffers
1166	 * but we should not set the OWN bits until we're ready
1167	 * to hand back 4 of them in one shot.
1168	 */
1169	if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1170		for (i = VGE_RXCHUNK; i > 0; i--) {
1171			rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1172			rxd = rxd->rxd_prev;
1173		}
1174		sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1175	}
1176}
1177
1178static int
1179vge_newbuf(sc, prod)
1180	struct vge_softc	*sc;
1181	int			prod;
1182{
1183	struct vge_rxdesc	*rxd;
1184	struct mbuf		*m;
1185	bus_dma_segment_t	segs[1];
1186	bus_dmamap_t		map;
1187	int			i, nsegs;
1188
1189	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1190	if (m == NULL)
1191		return (ENOBUFS);
1192	/*
1193	 * This is part of an evil trick to deal with strict-alignment
1194	 * architectures. The VIA chip requires RX buffers to be aligned
1195	 * on 32-bit boundaries, but that will hose strict-alignment
1196	 * architectures. To get around this, we leave some empty space
1197	 * at the start of each buffer and for non-strict-alignment hosts,
1198	 * we copy the buffer back two bytes to achieve word alignment.
1199	 * This is slightly more efficient than allocating a new buffer,
1200	 * copying the contents, and discarding the old buffer.
1201	 */
1202	m->m_len = m->m_pkthdr.len = MCLBYTES;
1203	m_adj(m, VGE_RX_BUF_ALIGN);
1204
1205	if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag,
1206	    sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1207		m_freem(m);
1208		return (ENOBUFS);
1209	}
1210	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1211
1212	rxd = &sc->vge_cdata.vge_rxdesc[prod];
1213	if (rxd->rx_m != NULL) {
1214		bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1215		    BUS_DMASYNC_POSTREAD);
1216		bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap);
1217	}
1218	map = rxd->rx_dmamap;
1219	rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap;
1220	sc->vge_cdata.vge_rx_sparemap = map;
1221	bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1222	    BUS_DMASYNC_PREREAD);
1223	rxd->rx_m = m;
1224
1225	rxd->rx_desc->vge_sts = 0;
1226	rxd->rx_desc->vge_ctl = 0;
1227	rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
1228	rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) |
1229	    (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I);
1230
1231	/*
1232	 * Note: the manual fails to document the fact that for
1233	 * proper operation, the driver needs to replenish the RX
1234	 * DMA ring 4 descriptors at a time (rather than one at a
1235	 * time, like most chips). We can allocate the new buffers
1236	 * but we should not set the OWN bits until we're ready
1237	 * to hand back 4 of them in one shot.
1238	 */
1239	if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1240		for (i = VGE_RXCHUNK; i > 0; i--) {
1241			rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1242			rxd = rxd->rxd_prev;
1243		}
1244		sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1245	}
1246
1247	return (0);
1248}
1249
1250static int
1251vge_tx_list_init(sc)
1252	struct vge_softc	*sc;
1253{
1254	struct vge_ring_data	*rd;
1255	struct vge_txdesc	*txd;
1256	int			i;
1257
1258	VGE_LOCK_ASSERT(sc);
1259
1260	sc->vge_cdata.vge_tx_prodidx = 0;
1261	sc->vge_cdata.vge_tx_considx = 0;
1262	sc->vge_cdata.vge_tx_cnt = 0;
1263
1264	rd = &sc->vge_rdata;
1265	bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ);
1266	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1267		txd = &sc->vge_cdata.vge_txdesc[i];
1268		txd->tx_m = NULL;
1269		txd->tx_desc = &rd->vge_tx_ring[i];
1270	}
1271
1272	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1273	    sc->vge_cdata.vge_tx_ring_map,
1274	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1275
1276	return (0);
1277}
1278
1279static int
1280vge_rx_list_init(sc)
1281	struct vge_softc	*sc;
1282{
1283	struct vge_ring_data	*rd;
1284	struct vge_rxdesc	*rxd;
1285	int			i;
1286
1287	VGE_LOCK_ASSERT(sc);
1288
1289	sc->vge_cdata.vge_rx_prodidx = 0;
1290	sc->vge_cdata.vge_head = NULL;
1291	sc->vge_cdata.vge_tail = NULL;
1292	sc->vge_cdata.vge_rx_commit = 0;
1293
1294	rd = &sc->vge_rdata;
1295	bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ);
1296	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1297		rxd = &sc->vge_cdata.vge_rxdesc[i];
1298		rxd->rx_m = NULL;
1299		rxd->rx_desc = &rd->vge_rx_ring[i];
1300		if (i == 0)
1301			rxd->rxd_prev =
1302			    &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1];
1303		else
1304			rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1];
1305		if (vge_newbuf(sc, i) != 0)
1306			return (ENOBUFS);
1307	}
1308
1309	bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1310	    sc->vge_cdata.vge_rx_ring_map,
1311	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1312
1313	sc->vge_cdata.vge_rx_commit = 0;
1314
1315	return (0);
1316}
1317
1318static void
1319vge_freebufs(sc)
1320	struct vge_softc	*sc;
1321{
1322	struct vge_txdesc	*txd;
1323	struct vge_rxdesc	*rxd;
1324	struct ifnet		*ifp;
1325	int			i;
1326
1327	VGE_LOCK_ASSERT(sc);
1328
1329	ifp = sc->vge_ifp;
1330	/*
1331	 * Free RX and TX mbufs still in the queues.
1332	 */
1333	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1334		rxd = &sc->vge_cdata.vge_rxdesc[i];
1335		if (rxd->rx_m != NULL) {
1336			bus_dmamap_sync(sc->vge_cdata.vge_rx_tag,
1337			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1338			bus_dmamap_unload(sc->vge_cdata.vge_rx_tag,
1339			    rxd->rx_dmamap);
1340			m_freem(rxd->rx_m);
1341			rxd->rx_m = NULL;
1342		}
1343	}
1344
1345	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1346		txd = &sc->vge_cdata.vge_txdesc[i];
1347		if (txd->tx_m != NULL) {
1348			bus_dmamap_sync(sc->vge_cdata.vge_tx_tag,
1349			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1350			bus_dmamap_unload(sc->vge_cdata.vge_tx_tag,
1351			    txd->tx_dmamap);
1352			m_freem(txd->tx_m);
1353			txd->tx_m = NULL;
1354			ifp->if_oerrors++;
1355		}
1356	}
1357}
1358
1359#ifndef	__NO_STRICT_ALIGNMENT
1360static __inline void
1361vge_fixup_rx(m)
1362	struct mbuf		*m;
1363{
1364	int			i;
1365	uint16_t		*src, *dst;
1366
1367	src = mtod(m, uint16_t *);
1368	dst = src - 1;
1369
1370	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1371		*dst++ = *src++;
1372
1373	m->m_data -= ETHER_ALIGN;
1374}
1375#endif
1376
1377/*
1378 * RX handler. We support the reception of jumbo frames that have
1379 * been fragmented across multiple 2K mbuf cluster buffers.
1380 */
1381static int
1382vge_rxeof(sc, count)
1383	struct vge_softc	*sc;
1384	int			count;
1385{
1386	struct mbuf		*m;
1387	struct ifnet		*ifp;
1388	int			prod, prog, total_len;
1389	struct vge_rxdesc	*rxd;
1390	struct vge_rx_desc	*cur_rx;
1391	uint32_t		rxstat, rxctl;
1392
1393	VGE_LOCK_ASSERT(sc);
1394
1395	ifp = sc->vge_ifp;
1396
1397	bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1398	    sc->vge_cdata.vge_rx_ring_map,
1399	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1400
1401	prod = sc->vge_cdata.vge_rx_prodidx;
1402	for (prog = 0; count > 0 &&
1403	    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1404	    VGE_RX_DESC_INC(prod)) {
1405		cur_rx = &sc->vge_rdata.vge_rx_ring[prod];
1406		rxstat = le32toh(cur_rx->vge_sts);
1407		if ((rxstat & VGE_RDSTS_OWN) != 0)
1408			break;
1409		count--;
1410		prog++;
1411		rxctl = le32toh(cur_rx->vge_ctl);
1412		total_len = VGE_RXBYTES(rxstat);
1413		rxd = &sc->vge_cdata.vge_rxdesc[prod];
1414		m = rxd->rx_m;
1415
1416		/*
1417		 * If the 'start of frame' bit is set, this indicates
1418		 * either the first fragment in a multi-fragment receive,
1419		 * or an intermediate fragment. Either way, we want to
1420		 * accumulate the buffers.
1421		 */
1422		if ((rxstat & VGE_RXPKT_SOF) != 0) {
1423			if (vge_newbuf(sc, prod) != 0) {
1424				ifp->if_iqdrops++;
1425				VGE_CHAIN_RESET(sc);
1426				vge_discard_rxbuf(sc, prod);
1427				continue;
1428			}
1429			m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN;
1430			if (sc->vge_cdata.vge_head == NULL) {
1431				sc->vge_cdata.vge_head = m;
1432				sc->vge_cdata.vge_tail = m;
1433			} else {
1434				m->m_flags &= ~M_PKTHDR;
1435				sc->vge_cdata.vge_tail->m_next = m;
1436				sc->vge_cdata.vge_tail = m;
1437			}
1438			continue;
1439		}
1440
1441		/*
1442		 * Bad/error frames will have the RXOK bit cleared.
1443		 * However, there's one error case we want to allow:
1444		 * if a VLAN tagged frame arrives and the chip can't
1445		 * match it against the CAM filter, it considers this
1446		 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1447		 * We don't want to drop the frame though: our VLAN
1448		 * filtering is done in software.
1449		 * We also want to receive bad-checksummed frames and
1450		 * and frames with bad-length.
1451		 */
1452		if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
1453		    (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR |
1454		    VGE_RDSTS_CSUMERR)) == 0) {
1455			ifp->if_ierrors++;
1456			/*
1457			 * If this is part of a multi-fragment packet,
1458			 * discard all the pieces.
1459			 */
1460			VGE_CHAIN_RESET(sc);
1461			vge_discard_rxbuf(sc, prod);
1462			continue;
1463		}
1464
1465		if (vge_newbuf(sc, prod) != 0) {
1466			ifp->if_iqdrops++;
1467			VGE_CHAIN_RESET(sc);
1468			vge_discard_rxbuf(sc, prod);
1469			continue;
1470		}
1471
1472		/* Chain received mbufs. */
1473		if (sc->vge_cdata.vge_head != NULL) {
1474			m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN);
1475			/*
1476			 * Special case: if there's 4 bytes or less
1477			 * in this buffer, the mbuf can be discarded:
1478			 * the last 4 bytes is the CRC, which we don't
1479			 * care about anyway.
1480			 */
1481			if (m->m_len <= ETHER_CRC_LEN) {
1482				sc->vge_cdata.vge_tail->m_len -=
1483				    (ETHER_CRC_LEN - m->m_len);
1484				m_freem(m);
1485			} else {
1486				m->m_len -= ETHER_CRC_LEN;
1487				m->m_flags &= ~M_PKTHDR;
1488				sc->vge_cdata.vge_tail->m_next = m;
1489			}
1490			m = sc->vge_cdata.vge_head;
1491			m->m_flags |= M_PKTHDR;
1492			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1493		} else {
1494			m->m_flags |= M_PKTHDR;
1495			m->m_pkthdr.len = m->m_len =
1496			    (total_len - ETHER_CRC_LEN);
1497		}
1498
1499#ifndef	__NO_STRICT_ALIGNMENT
1500		vge_fixup_rx(m);
1501#endif
1502		m->m_pkthdr.rcvif = ifp;
1503
1504		/* Do RX checksumming if enabled */
1505		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1506		    (rxctl & VGE_RDCTL_FRAG) == 0) {
1507			/* Check IP header checksum */
1508			if ((rxctl & VGE_RDCTL_IPPKT) != 0)
1509				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1510			if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0)
1511				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1512
1513			/* Check TCP/UDP checksum */
1514			if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) &&
1515			    rxctl & VGE_RDCTL_PROTOCSUMOK) {
1516				m->m_pkthdr.csum_flags |=
1517				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1518				m->m_pkthdr.csum_data = 0xffff;
1519			}
1520		}
1521
1522		if ((rxstat & VGE_RDSTS_VTAG) != 0) {
1523			/*
1524			 * The 32-bit rxctl register is stored in little-endian.
1525			 * However, the 16-bit vlan tag is stored in big-endian,
1526			 * so we have to byte swap it.
1527			 */
1528			m->m_pkthdr.ether_vtag =
1529			    bswap16(rxctl & VGE_RDCTL_VLANID);
1530			m->m_flags |= M_VLANTAG;
1531		}
1532
1533		VGE_UNLOCK(sc);
1534		(*ifp->if_input)(ifp, m);
1535		VGE_LOCK(sc);
1536		sc->vge_cdata.vge_head = NULL;
1537		sc->vge_cdata.vge_tail = NULL;
1538	}
1539
1540	if (prog > 0) {
1541		sc->vge_cdata.vge_rx_prodidx = prod;
1542		bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1543		    sc->vge_cdata.vge_rx_ring_map,
1544		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1545		/* Update residue counter. */
1546		if (sc->vge_cdata.vge_rx_commit != 0) {
1547			CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT,
1548			    sc->vge_cdata.vge_rx_commit);
1549			sc->vge_cdata.vge_rx_commit = 0;
1550		}
1551	}
1552	return (prog);
1553}
1554
1555static void
1556vge_txeof(sc)
1557	struct vge_softc	*sc;
1558{
1559	struct ifnet		*ifp;
1560	struct vge_tx_desc	*cur_tx;
1561	struct vge_txdesc	*txd;
1562	uint32_t		txstat;
1563	int			cons, prod;
1564
1565	VGE_LOCK_ASSERT(sc);
1566
1567	ifp = sc->vge_ifp;
1568
1569	if (sc->vge_cdata.vge_tx_cnt == 0)
1570		return;
1571
1572	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1573	    sc->vge_cdata.vge_tx_ring_map,
1574	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1575
1576	/*
1577	 * Go through our tx list and free mbufs for those
1578	 * frames that have been transmitted.
1579	 */
1580	cons = sc->vge_cdata.vge_tx_considx;
1581	prod = sc->vge_cdata.vge_tx_prodidx;
1582	for (; cons != prod; VGE_TX_DESC_INC(cons)) {
1583		cur_tx = &sc->vge_rdata.vge_tx_ring[cons];
1584		txstat = le32toh(cur_tx->vge_sts);
1585		if ((txstat & VGE_TDSTS_OWN) != 0)
1586			break;
1587		sc->vge_cdata.vge_tx_cnt--;
1588		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1589
1590		txd = &sc->vge_cdata.vge_txdesc[cons];
1591		bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1592		    BUS_DMASYNC_POSTWRITE);
1593		bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap);
1594
1595		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
1596		    __func__));
1597		m_freem(txd->tx_m);
1598		txd->tx_m = NULL;
1599	}
1600
1601	sc->vge_cdata.vge_tx_considx = cons;
1602	if (sc->vge_cdata.vge_tx_cnt == 0)
1603		sc->vge_timer = 0;
1604	else {
1605		/*
1606		 * If not all descriptors have been released reaped yet,
1607		 * reload the timer so that we will eventually get another
1608		 * interrupt that will cause us to re-enter this routine.
1609		 * This is done in case the transmitter has gone idle.
1610		 */
1611		CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1612	}
1613}
1614
1615static void
1616vge_tick(xsc)
1617	void			*xsc;
1618{
1619	struct vge_softc	*sc;
1620	struct ifnet		*ifp;
1621	struct mii_data		*mii;
1622
1623	sc = xsc;
1624	ifp = sc->vge_ifp;
1625	VGE_LOCK_ASSERT(sc);
1626	mii = device_get_softc(sc->vge_miibus);
1627
1628	mii_tick(mii);
1629	if (sc->vge_link) {
1630		if (!(mii->mii_media_status & IFM_ACTIVE)) {
1631			sc->vge_link = 0;
1632			if_link_state_change(sc->vge_ifp,
1633			    LINK_STATE_DOWN);
1634		}
1635	} else {
1636		if (mii->mii_media_status & IFM_ACTIVE &&
1637		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1638			sc->vge_link = 1;
1639			if_link_state_change(sc->vge_ifp,
1640			    LINK_STATE_UP);
1641			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1642				vge_start_locked(ifp);
1643		}
1644	}
1645
1646	return;
1647}
1648
1649#ifdef DEVICE_POLLING
1650static int
1651vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1652{
1653	struct vge_softc *sc = ifp->if_softc;
1654	int rx_npkts = 0;
1655
1656	VGE_LOCK(sc);
1657	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1658		goto done;
1659
1660	rx_npkts = vge_rxeof(sc, count);
1661	vge_txeof(sc);
1662
1663	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1664		vge_start_locked(ifp);
1665
1666	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1667		u_int32_t       status;
1668		status = CSR_READ_4(sc, VGE_ISR);
1669		if (status == 0xFFFFFFFF)
1670			goto done;
1671		if (status)
1672			CSR_WRITE_4(sc, VGE_ISR, status);
1673
1674		/*
1675		 * XXX check behaviour on receiver stalls.
1676		 */
1677
1678		if (status & VGE_ISR_TXDMA_STALL ||
1679		    status & VGE_ISR_RXDMA_STALL) {
1680			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1681			vge_init_locked(sc);
1682		}
1683
1684		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1685			vge_rxeof(sc, count);
1686			ifp->if_ierrors++;
1687			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1688			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1689		}
1690	}
1691done:
1692	VGE_UNLOCK(sc);
1693	return (rx_npkts);
1694}
1695#endif /* DEVICE_POLLING */
1696
1697static void
1698vge_intr(arg)
1699	void			*arg;
1700{
1701	struct vge_softc	*sc;
1702	struct ifnet		*ifp;
1703	u_int32_t		status;
1704
1705	sc = arg;
1706
1707	if (sc->suspended) {
1708		return;
1709	}
1710
1711	VGE_LOCK(sc);
1712	ifp = sc->vge_ifp;
1713
1714	if (!(ifp->if_flags & IFF_UP)) {
1715		VGE_UNLOCK(sc);
1716		return;
1717	}
1718
1719#ifdef DEVICE_POLLING
1720	if  (ifp->if_capenable & IFCAP_POLLING) {
1721		VGE_UNLOCK(sc);
1722		return;
1723	}
1724#endif
1725
1726	/* Disable interrupts */
1727	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1728
1729	for (;;) {
1730
1731		status = CSR_READ_4(sc, VGE_ISR);
1732		/* If the card has gone away the read returns 0xffff. */
1733		if (status == 0xFFFFFFFF)
1734			break;
1735
1736		if (status)
1737			CSR_WRITE_4(sc, VGE_ISR, status);
1738
1739		if ((status & VGE_INTRS) == 0)
1740			break;
1741
1742		if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1743			vge_rxeof(sc, VGE_RX_DESC_CNT);
1744
1745		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1746			vge_rxeof(sc, VGE_RX_DESC_CNT);
1747			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1748			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1749		}
1750
1751		if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1752			vge_txeof(sc);
1753
1754		if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
1755			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1756			vge_init_locked(sc);
1757		}
1758
1759		if (status & VGE_ISR_LINKSTS)
1760			vge_tick(sc);
1761	}
1762
1763	/* Re-enable interrupts */
1764	CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1765
1766	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1767		vge_start_locked(ifp);
1768
1769	VGE_UNLOCK(sc);
1770
1771	return;
1772}
1773
1774static int
1775vge_encap(sc, m_head)
1776	struct vge_softc	*sc;
1777	struct mbuf		**m_head;
1778{
1779	struct vge_txdesc	*txd;
1780	struct vge_tx_frag	*frag;
1781	struct mbuf		*m;
1782	bus_dma_segment_t	txsegs[VGE_MAXTXSEGS];
1783	int			error, i, nsegs, padlen;
1784	uint32_t		cflags;
1785
1786	VGE_LOCK_ASSERT(sc);
1787
1788	M_ASSERTPKTHDR((*m_head));
1789
1790	/* Argh. This chip does not autopad short frames. */
1791	if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1792		m = *m_head;
1793		padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len;
1794		if (M_WRITABLE(m) == 0) {
1795			/* Get a writable copy. */
1796			m = m_dup(*m_head, M_DONTWAIT);
1797			m_freem(*m_head);
1798			if (m == NULL) {
1799				*m_head = NULL;
1800				return (ENOBUFS);
1801			}
1802			*m_head = m;
1803		}
1804		if (M_TRAILINGSPACE(m) < padlen) {
1805			m = m_defrag(m, M_DONTWAIT);
1806			if (m == NULL) {
1807				m_freem(*m_head);
1808				*m_head = NULL;
1809				return (ENOBUFS);
1810			}
1811		}
1812		/*
1813		 * Manually pad short frames, and zero the pad space
1814		 * to avoid leaking data.
1815		 */
1816		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1817		m->m_pkthdr.len += padlen;
1818		m->m_len = m->m_pkthdr.len;
1819		*m_head = m;
1820	}
1821
1822	txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx];
1823
1824	error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1825	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1826	if (error == EFBIG) {
1827		m = m_collapse(*m_head, M_DONTWAIT, VGE_MAXTXSEGS);
1828		if (m == NULL) {
1829			m_freem(*m_head);
1830			*m_head = NULL;
1831			return (ENOMEM);
1832		}
1833		*m_head = m;
1834		error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1835		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1836		if (error != 0) {
1837			m_freem(*m_head);
1838			*m_head = NULL;
1839			return (error);
1840		}
1841	} else if (error != 0)
1842		return (error);
1843	bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1844	    BUS_DMASYNC_PREWRITE);
1845
1846	m = *m_head;
1847	cflags = 0;
1848
1849	/* Configure checksum offload. */
1850	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1851		cflags |= VGE_TDCTL_IPCSUM;
1852	if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1853		cflags |= VGE_TDCTL_TCPCSUM;
1854	if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1855		cflags |= VGE_TDCTL_UDPCSUM;
1856
1857	/* Configure VLAN. */
1858	if ((m->m_flags & M_VLANTAG) != 0)
1859		cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG;
1860	txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16);
1861	/*
1862	 * XXX
1863	 * Velocity family seems to support TSO but no information
1864	 * for MSS configuration is available. Also the number of
1865	 * fragments supported by a descriptor is too small to hold
1866	 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF,
1867	 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build
1868	 * longer chain of buffers but no additional information is
1869	 * available.
1870	 *
1871	 * When telling the chip how many segments there are, we
1872	 * must use nsegs + 1 instead of just nsegs. Darned if I
1873	 * know why. This also means we can't use the last fragment
1874	 * field of Tx descriptor.
1875	 */
1876	txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) |
1877	    VGE_TD_LS_NORM);
1878	for (i = 0; i < nsegs; i++) {
1879		frag = &txd->tx_desc->vge_frag[i];
1880		frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr));
1881		frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) |
1882		    (VGE_BUFLEN(txsegs[i].ds_len) << 16));
1883	}
1884
1885	sc->vge_cdata.vge_tx_cnt++;
1886	VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx);
1887
1888	/*
1889	 * Finally request interrupt and give the first descriptor
1890	 * ownership to hardware.
1891	 */
1892	txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC);
1893	txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN);
1894	txd->tx_m = m;
1895
1896	return (0);
1897}
1898
1899/*
1900 * Main transmit routine.
1901 */
1902
1903static void
1904vge_start(ifp)
1905	struct ifnet		*ifp;
1906{
1907	struct vge_softc	*sc;
1908
1909	sc = ifp->if_softc;
1910	VGE_LOCK(sc);
1911	vge_start_locked(ifp);
1912	VGE_UNLOCK(sc);
1913}
1914
1915
1916static void
1917vge_start_locked(ifp)
1918	struct ifnet		*ifp;
1919{
1920	struct vge_softc	*sc;
1921	struct vge_txdesc	*txd;
1922	struct mbuf		*m_head;
1923	int			enq, idx;
1924
1925	sc = ifp->if_softc;
1926
1927	VGE_LOCK_ASSERT(sc);
1928
1929	if (sc->vge_link == 0 ||
1930	    (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1931	    IFF_DRV_RUNNING)
1932		return;
1933
1934	idx = sc->vge_cdata.vge_tx_prodidx;
1935	VGE_TX_DESC_DEC(idx);
1936	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1937	    sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) {
1938		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1939		if (m_head == NULL)
1940			break;
1941		/*
1942		 * Pack the data into the transmit ring. If we
1943		 * don't have room, set the OACTIVE flag and wait
1944		 * for the NIC to drain the ring.
1945		 */
1946		if (vge_encap(sc, &m_head)) {
1947			if (m_head == NULL)
1948				break;
1949			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1950			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1951			break;
1952		}
1953
1954		txd = &sc->vge_cdata.vge_txdesc[idx];
1955		txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q);
1956		VGE_TX_DESC_INC(idx);
1957
1958		enq++;
1959		/*
1960		 * If there's a BPF listener, bounce a copy of this frame
1961		 * to him.
1962		 */
1963		ETHER_BPF_MTAP(ifp, m_head);
1964	}
1965
1966	if (enq > 0) {
1967		bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1968		    sc->vge_cdata.vge_tx_ring_map,
1969		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1970		/* Issue a transmit command. */
1971		CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1972		/*
1973		 * Use the countdown timer for interrupt moderation.
1974		 * 'TX done' interrupts are disabled. Instead, we reset the
1975		 * countdown timer, which will begin counting until it hits
1976		 * the value in the SSTIMER register, and then trigger an
1977		 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1978		 * the timer count is reloaded. Only when the transmitter
1979		 * is idle will the timer hit 0 and an interrupt fire.
1980		 */
1981		CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1982
1983		/*
1984		 * Set a timeout in case the chip goes out to lunch.
1985		 */
1986		sc->vge_timer = 5;
1987	}
1988}
1989
1990static void
1991vge_init(xsc)
1992	void			*xsc;
1993{
1994	struct vge_softc	*sc = xsc;
1995
1996	VGE_LOCK(sc);
1997	vge_init_locked(sc);
1998	VGE_UNLOCK(sc);
1999}
2000
2001static void
2002vge_init_locked(struct vge_softc *sc)
2003{
2004	struct ifnet		*ifp = sc->vge_ifp;
2005	struct mii_data		*mii;
2006	int			error, i;
2007
2008	VGE_LOCK_ASSERT(sc);
2009	mii = device_get_softc(sc->vge_miibus);
2010
2011	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2012		return;
2013
2014	/*
2015	 * Cancel pending I/O and free all RX/TX buffers.
2016	 */
2017	vge_stop(sc);
2018	vge_reset(sc);
2019
2020	/*
2021	 * Initialize the RX and TX descriptors and mbufs.
2022	 */
2023
2024	error = vge_rx_list_init(sc);
2025	if (error != 0) {
2026                device_printf(sc->vge_dev, "no memory for Rx buffers.\n");
2027                return;
2028	}
2029	vge_tx_list_init(sc);
2030
2031	/* Set our station address */
2032	for (i = 0; i < ETHER_ADDR_LEN; i++)
2033		CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
2034
2035	/*
2036	 * Set receive FIFO threshold. Also allow transmission and
2037	 * reception of VLAN tagged frames.
2038	 */
2039	CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
2040	CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
2041
2042	/* Set DMA burst length */
2043	CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
2044	CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
2045
2046	CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
2047
2048	/* Set collision backoff algorithm */
2049	CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
2050	    VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
2051	CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
2052
2053	/* Disable LPSEL field in priority resolution */
2054	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
2055
2056	/*
2057	 * Load the addresses of the DMA queues into the chip.
2058	 * Note that we only use one transmit queue.
2059	 */
2060
2061	CSR_WRITE_4(sc, VGE_TXDESC_HIADDR,
2062	    VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr));
2063	CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
2064	    VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr));
2065	CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
2066
2067	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
2068	    VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr));
2069	CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
2070	CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
2071
2072	/* Enable and wake up the RX descriptor queue */
2073	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
2074	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
2075
2076	/* Enable the TX descriptor queue */
2077	CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
2078
2079	/* Set up the receive filter -- allow large frames for VLANs. */
2080	CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
2081
2082	/* If we want promiscuous mode, set the allframes bit. */
2083	if (ifp->if_flags & IFF_PROMISC) {
2084		CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
2085	}
2086
2087	/* Set capture broadcast bit to capture broadcast frames. */
2088	if (ifp->if_flags & IFF_BROADCAST) {
2089		CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
2090	}
2091
2092	/* Set multicast bit to capture multicast frames. */
2093	if (ifp->if_flags & IFF_MULTICAST) {
2094		CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
2095	}
2096
2097	/* Init the cam filter. */
2098	vge_cam_clear(sc);
2099
2100	/* Init the multicast filter. */
2101	vge_setmulti(sc);
2102
2103	/* Enable flow control */
2104
2105	CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
2106
2107	/* Enable jumbo frame reception (if desired) */
2108
2109	/* Start the MAC. */
2110	CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
2111	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
2112	CSR_WRITE_1(sc, VGE_CRS0,
2113	    VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
2114
2115	/*
2116	 * Configure one-shot timer for microsecond
2117	 * resolution and load it for 500 usecs.
2118	 */
2119	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
2120	CSR_WRITE_2(sc, VGE_SSTIMER, 400);
2121
2122	/*
2123	 * Configure interrupt moderation for receive. Enable
2124	 * the holdoff counter and load it, and set the RX
2125	 * suppression count to the number of descriptors we
2126	 * want to allow before triggering an interrupt.
2127	 * The holdoff timer is in units of 20 usecs.
2128	 */
2129
2130#ifdef notyet
2131	CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
2132	/* Select the interrupt holdoff timer page. */
2133	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2134	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
2135	CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
2136
2137	/* Enable use of the holdoff timer. */
2138	CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
2139	CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
2140
2141	/* Select the RX suppression threshold page. */
2142	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2143	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
2144	CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
2145
2146	/* Restore the page select bits. */
2147	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2148	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
2149#endif
2150
2151#ifdef DEVICE_POLLING
2152	/*
2153	 * Disable interrupts if we are polling.
2154	 */
2155	if (ifp->if_capenable & IFCAP_POLLING) {
2156		CSR_WRITE_4(sc, VGE_IMR, 0);
2157		CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2158	} else	/* otherwise ... */
2159#endif
2160	{
2161	/*
2162	 * Enable interrupts.
2163	 */
2164		CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2165		CSR_WRITE_4(sc, VGE_ISR, 0);
2166		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2167	}
2168
2169	mii_mediachg(mii);
2170
2171	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2172	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2173	callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2174
2175	sc->vge_link = 0;
2176}
2177
2178/*
2179 * Set media options.
2180 */
2181static int
2182vge_ifmedia_upd(ifp)
2183	struct ifnet		*ifp;
2184{
2185	struct vge_softc	*sc;
2186	struct mii_data		*mii;
2187
2188	sc = ifp->if_softc;
2189	VGE_LOCK(sc);
2190	mii = device_get_softc(sc->vge_miibus);
2191	mii_mediachg(mii);
2192	VGE_UNLOCK(sc);
2193
2194	return (0);
2195}
2196
2197/*
2198 * Report current media status.
2199 */
2200static void
2201vge_ifmedia_sts(ifp, ifmr)
2202	struct ifnet		*ifp;
2203	struct ifmediareq	*ifmr;
2204{
2205	struct vge_softc	*sc;
2206	struct mii_data		*mii;
2207
2208	sc = ifp->if_softc;
2209	mii = device_get_softc(sc->vge_miibus);
2210
2211	VGE_LOCK(sc);
2212	mii_pollstat(mii);
2213	VGE_UNLOCK(sc);
2214	ifmr->ifm_active = mii->mii_media_active;
2215	ifmr->ifm_status = mii->mii_media_status;
2216
2217	return;
2218}
2219
2220static void
2221vge_miibus_statchg(dev)
2222	device_t		dev;
2223{
2224	struct vge_softc	*sc;
2225	struct mii_data		*mii;
2226	struct ifmedia_entry	*ife;
2227
2228	sc = device_get_softc(dev);
2229	mii = device_get_softc(sc->vge_miibus);
2230	ife = mii->mii_media.ifm_cur;
2231
2232	/*
2233	 * If the user manually selects a media mode, we need to turn
2234	 * on the forced MAC mode bit in the DIAGCTL register. If the
2235	 * user happens to choose a full duplex mode, we also need to
2236	 * set the 'force full duplex' bit. This applies only to
2237	 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2238	 * mode is disabled, and in 1000baseT mode, full duplex is
2239	 * always implied, so we turn on the forced mode bit but leave
2240	 * the FDX bit cleared.
2241	 */
2242
2243	switch (IFM_SUBTYPE(ife->ifm_media)) {
2244	case IFM_AUTO:
2245		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2246		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2247		break;
2248	case IFM_1000_T:
2249		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2250		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2251		break;
2252	case IFM_100_TX:
2253	case IFM_10_T:
2254		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2255		if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2256			CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2257		} else {
2258			CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2259		}
2260		break;
2261	default:
2262		device_printf(dev, "unknown media type: %x\n",
2263		    IFM_SUBTYPE(ife->ifm_media));
2264		break;
2265	}
2266
2267	return;
2268}
2269
2270static int
2271vge_ioctl(ifp, command, data)
2272	struct ifnet		*ifp;
2273	u_long			command;
2274	caddr_t			data;
2275{
2276	struct vge_softc	*sc = ifp->if_softc;
2277	struct ifreq		*ifr = (struct ifreq *) data;
2278	struct mii_data		*mii;
2279	int			error = 0;
2280
2281	switch (command) {
2282	case SIOCSIFMTU:
2283		if (ifr->ifr_mtu > VGE_JUMBO_MTU)
2284			error = EINVAL;
2285		ifp->if_mtu = ifr->ifr_mtu;
2286		break;
2287	case SIOCSIFFLAGS:
2288		VGE_LOCK(sc);
2289		if (ifp->if_flags & IFF_UP) {
2290			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2291			    ifp->if_flags & IFF_PROMISC &&
2292			    !(sc->vge_if_flags & IFF_PROMISC)) {
2293				CSR_SETBIT_1(sc, VGE_RXCTL,
2294				    VGE_RXCTL_RX_PROMISC);
2295				vge_setmulti(sc);
2296			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2297			    !(ifp->if_flags & IFF_PROMISC) &&
2298			    sc->vge_if_flags & IFF_PROMISC) {
2299				CSR_CLRBIT_1(sc, VGE_RXCTL,
2300				    VGE_RXCTL_RX_PROMISC);
2301				vge_setmulti(sc);
2302                        } else
2303				vge_init_locked(sc);
2304		} else {
2305			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2306				vge_stop(sc);
2307		}
2308		sc->vge_if_flags = ifp->if_flags;
2309		VGE_UNLOCK(sc);
2310		break;
2311	case SIOCADDMULTI:
2312	case SIOCDELMULTI:
2313		VGE_LOCK(sc);
2314		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2315			vge_setmulti(sc);
2316		VGE_UNLOCK(sc);
2317		break;
2318	case SIOCGIFMEDIA:
2319	case SIOCSIFMEDIA:
2320		mii = device_get_softc(sc->vge_miibus);
2321		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2322		break;
2323	case SIOCSIFCAP:
2324	    {
2325		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2326#ifdef DEVICE_POLLING
2327		if (mask & IFCAP_POLLING) {
2328			if (ifr->ifr_reqcap & IFCAP_POLLING) {
2329				error = ether_poll_register(vge_poll, ifp);
2330				if (error)
2331					return(error);
2332				VGE_LOCK(sc);
2333					/* Disable interrupts */
2334				CSR_WRITE_4(sc, VGE_IMR, 0);
2335				CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2336				ifp->if_capenable |= IFCAP_POLLING;
2337				VGE_UNLOCK(sc);
2338			} else {
2339				error = ether_poll_deregister(ifp);
2340				/* Enable interrupts. */
2341				VGE_LOCK(sc);
2342				CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2343				CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2344				CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2345				ifp->if_capenable &= ~IFCAP_POLLING;
2346				VGE_UNLOCK(sc);
2347			}
2348		}
2349#endif /* DEVICE_POLLING */
2350		VGE_LOCK(sc);
2351		if ((mask & IFCAP_TXCSUM) != 0 &&
2352		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2353			ifp->if_capenable ^= IFCAP_TXCSUM;
2354			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2355				ifp->if_hwassist |= VGE_CSUM_FEATURES;
2356			else
2357				ifp->if_hwassist &= ~VGE_CSUM_FEATURES;
2358		}
2359		if ((mask & IFCAP_RXCSUM) != 0 &&
2360		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
2361			ifp->if_capenable ^= IFCAP_RXCSUM;
2362		VGE_UNLOCK(sc);
2363	    }
2364		break;
2365	default:
2366		error = ether_ioctl(ifp, command, data);
2367		break;
2368	}
2369
2370	return (error);
2371}
2372
2373static void
2374vge_watchdog(void *arg)
2375{
2376	struct vge_softc *sc;
2377	struct ifnet *ifp;
2378
2379	sc = arg;
2380	VGE_LOCK_ASSERT(sc);
2381	callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2382	if (sc->vge_timer == 0 || --sc->vge_timer > 0)
2383		return;
2384
2385	ifp = sc->vge_ifp;
2386	if_printf(ifp, "watchdog timeout\n");
2387	ifp->if_oerrors++;
2388
2389	vge_txeof(sc);
2390	vge_rxeof(sc, VGE_RX_DESC_CNT);
2391
2392	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2393	vge_init_locked(sc);
2394
2395	return;
2396}
2397
2398/*
2399 * Stop the adapter and free any mbufs allocated to the
2400 * RX and TX lists.
2401 */
2402static void
2403vge_stop(sc)
2404	struct vge_softc		*sc;
2405{
2406	struct ifnet		*ifp;
2407
2408	VGE_LOCK_ASSERT(sc);
2409	ifp = sc->vge_ifp;
2410	sc->vge_timer = 0;
2411	callout_stop(&sc->vge_watchdog);
2412
2413	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2414
2415	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2416	CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2417	CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2418	CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2419	CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2420	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2421
2422	VGE_CHAIN_RESET(sc);
2423	vge_txeof(sc);
2424	vge_freebufs(sc);
2425}
2426
2427/*
2428 * Device suspend routine.  Stop the interface and save some PCI
2429 * settings in case the BIOS doesn't restore them properly on
2430 * resume.
2431 */
2432static int
2433vge_suspend(dev)
2434	device_t		dev;
2435{
2436	struct vge_softc	*sc;
2437
2438	sc = device_get_softc(dev);
2439
2440	VGE_LOCK(sc);
2441	vge_stop(sc);
2442
2443	sc->suspended = 1;
2444	VGE_UNLOCK(sc);
2445
2446	return (0);
2447}
2448
2449/*
2450 * Device resume routine.  Restore some PCI settings in case the BIOS
2451 * doesn't, re-enable busmastering, and restart the interface if
2452 * appropriate.
2453 */
2454static int
2455vge_resume(dev)
2456	device_t		dev;
2457{
2458	struct vge_softc	*sc;
2459	struct ifnet		*ifp;
2460
2461	sc = device_get_softc(dev);
2462	ifp = sc->vge_ifp;
2463
2464	/* reenable busmastering */
2465	pci_enable_busmaster(dev);
2466	pci_enable_io(dev, SYS_RES_MEMORY);
2467
2468	/* reinitialize interface if necessary */
2469	VGE_LOCK(sc);
2470	if (ifp->if_flags & IFF_UP) {
2471		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2472		vge_init_locked(sc);
2473	}
2474	sc->suspended = 0;
2475	VGE_UNLOCK(sc);
2476
2477	return (0);
2478}
2479
2480/*
2481 * Stop all chip I/O so that the kernel's probe routines don't
2482 * get confused by errant DMAs when rebooting.
2483 */
2484static int
2485vge_shutdown(dev)
2486	device_t		dev;
2487{
2488	struct vge_softc		*sc;
2489
2490	sc = device_get_softc(dev);
2491
2492	VGE_LOCK(sc);
2493	vge_stop(sc);
2494	VGE_UNLOCK(sc);
2495
2496	return (0);
2497}
2498