Deleted Added
full compact
if_vge.c (167190) if_vge.c (173839)
1/*-
2 * Copyright (c) 2004
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2004
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/vge/if_vge.c 167190 2007-03-04 03:38:08Z csjp $");
34__FBSDID("$FreeBSD: head/sys/dev/vge/if_vge.c 173839 2007-11-22 02:45:00Z yongari $");
35
36/*
37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
38 *
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Networking Software Engineer
41 * Wind River Systems
42 */
43
44/*
45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
46 * combines a tri-speed ethernet MAC and PHY, with the following
47 * features:
48 *
49 * o Jumbo frame support up to 16K
50 * o Transmit and receive flow control
51 * o IPv4 checksum offload
52 * o VLAN tag insertion and stripping
53 * o TCP large send
54 * o 64-bit multicast hash table filter
55 * o 64 entry CAM filter
56 * o 16K RX FIFO and 48K TX FIFO memory
57 * o Interrupt moderation
58 *
59 * The VT6122 supports up to four transmit DMA queues. The descriptors
60 * in the transmit ring can address up to 7 data fragments; frames which
61 * span more than 7 data buffers must be coalesced, but in general the
62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
63 * long. The receive descriptors address only a single buffer.
64 *
65 * There are two peculiar design issues with the VT6122. One is that
66 * receive data buffers must be aligned on a 32-bit boundary. This is
67 * not a problem where the VT6122 is used as a LOM device in x86-based
68 * systems, but on architectures that generate unaligned access traps, we
69 * have to do some copying.
70 *
71 * The other issue has to do with the way 64-bit addresses are handled.
72 * The DMA descriptors only allow you to specify 48 bits of addressing
73 * information. The remaining 16 bits are specified using one of the
74 * I/O registers. If you only have a 32-bit system, then this isn't
75 * an issue, but if you have a 64-bit system and more than 4GB of
76 * memory, you must have to make sure your network data buffers reside
77 * in the same 48-bit 'segment.'
78 *
79 * Special thanks to Ryan Fu at VIA Networking for providing documentation
80 * and sample NICs for testing.
81 */
82
83#ifdef HAVE_KERNEL_OPTION_HEADERS
84#include "opt_device_polling.h"
85#endif
86
87#include <sys/param.h>
88#include <sys/endian.h>
89#include <sys/systm.h>
90#include <sys/sockio.h>
91#include <sys/mbuf.h>
92#include <sys/malloc.h>
93#include <sys/module.h>
94#include <sys/kernel.h>
95#include <sys/socket.h>
96#include <sys/taskqueue.h>
97
98#include <net/if.h>
99#include <net/if_arp.h>
100#include <net/ethernet.h>
101#include <net/if_dl.h>
102#include <net/if_media.h>
103#include <net/if_types.h>
104#include <net/if_vlan_var.h>
105
106#include <net/bpf.h>
107
108#include <machine/bus.h>
109#include <machine/resource.h>
110#include <sys/bus.h>
111#include <sys/rman.h>
112
113#include <dev/mii/mii.h>
114#include <dev/mii/miivar.h>
115
116#include <dev/pci/pcireg.h>
117#include <dev/pci/pcivar.h>
118
119MODULE_DEPEND(vge, pci, 1, 1, 1);
120MODULE_DEPEND(vge, ether, 1, 1, 1);
121MODULE_DEPEND(vge, miibus, 1, 1, 1);
122
123/* "device miibus" required. See GENERIC if you get errors here. */
124#include "miibus_if.h"
125
126#include <dev/vge/if_vgereg.h>
127#include <dev/vge/if_vgevar.h>
128
129#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
130
131/*
132 * Various supported device vendors/types and their names.
133 */
134static struct vge_type vge_devs[] = {
135 { VIA_VENDORID, VIA_DEVICEID_61XX,
136 "VIA Networking Gigabit Ethernet" },
137 { 0, 0, NULL }
138};
139
140static int vge_probe (device_t);
141static int vge_attach (device_t);
142static int vge_detach (device_t);
143
144static int vge_encap (struct vge_softc *, struct mbuf *, int);
145
146static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
147static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int,
148 bus_size_t, int);
149static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
150 bus_size_t, int);
151static int vge_allocmem (device_t, struct vge_softc *);
152static int vge_newbuf (struct vge_softc *, int, struct mbuf *);
153static int vge_rx_list_init (struct vge_softc *);
154static int vge_tx_list_init (struct vge_softc *);
155#ifdef VGE_FIXUP_RX
156static __inline void vge_fixup_rx
157 (struct mbuf *);
158#endif
159static void vge_rxeof (struct vge_softc *);
160static void vge_txeof (struct vge_softc *);
161static void vge_intr (void *);
162static void vge_tick (void *);
163static void vge_tx_task (void *, int);
164static void vge_start (struct ifnet *);
165static int vge_ioctl (struct ifnet *, u_long, caddr_t);
166static void vge_init (void *);
167static void vge_stop (struct vge_softc *);
168static void vge_watchdog (struct ifnet *);
169static int vge_suspend (device_t);
170static int vge_resume (device_t);
35
36/*
37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
38 *
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Networking Software Engineer
41 * Wind River Systems
42 */
43
44/*
45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
46 * combines a tri-speed ethernet MAC and PHY, with the following
47 * features:
48 *
49 * o Jumbo frame support up to 16K
50 * o Transmit and receive flow control
51 * o IPv4 checksum offload
52 * o VLAN tag insertion and stripping
53 * o TCP large send
54 * o 64-bit multicast hash table filter
55 * o 64 entry CAM filter
56 * o 16K RX FIFO and 48K TX FIFO memory
57 * o Interrupt moderation
58 *
59 * The VT6122 supports up to four transmit DMA queues. The descriptors
60 * in the transmit ring can address up to 7 data fragments; frames which
61 * span more than 7 data buffers must be coalesced, but in general the
62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
63 * long. The receive descriptors address only a single buffer.
64 *
65 * There are two peculiar design issues with the VT6122. One is that
66 * receive data buffers must be aligned on a 32-bit boundary. This is
67 * not a problem where the VT6122 is used as a LOM device in x86-based
68 * systems, but on architectures that generate unaligned access traps, we
69 * have to do some copying.
70 *
71 * The other issue has to do with the way 64-bit addresses are handled.
72 * The DMA descriptors only allow you to specify 48 bits of addressing
73 * information. The remaining 16 bits are specified using one of the
74 * I/O registers. If you only have a 32-bit system, then this isn't
75 * an issue, but if you have a 64-bit system and more than 4GB of
76 * memory, you must have to make sure your network data buffers reside
77 * in the same 48-bit 'segment.'
78 *
79 * Special thanks to Ryan Fu at VIA Networking for providing documentation
80 * and sample NICs for testing.
81 */
82
83#ifdef HAVE_KERNEL_OPTION_HEADERS
84#include "opt_device_polling.h"
85#endif
86
87#include <sys/param.h>
88#include <sys/endian.h>
89#include <sys/systm.h>
90#include <sys/sockio.h>
91#include <sys/mbuf.h>
92#include <sys/malloc.h>
93#include <sys/module.h>
94#include <sys/kernel.h>
95#include <sys/socket.h>
96#include <sys/taskqueue.h>
97
98#include <net/if.h>
99#include <net/if_arp.h>
100#include <net/ethernet.h>
101#include <net/if_dl.h>
102#include <net/if_media.h>
103#include <net/if_types.h>
104#include <net/if_vlan_var.h>
105
106#include <net/bpf.h>
107
108#include <machine/bus.h>
109#include <machine/resource.h>
110#include <sys/bus.h>
111#include <sys/rman.h>
112
113#include <dev/mii/mii.h>
114#include <dev/mii/miivar.h>
115
116#include <dev/pci/pcireg.h>
117#include <dev/pci/pcivar.h>
118
119MODULE_DEPEND(vge, pci, 1, 1, 1);
120MODULE_DEPEND(vge, ether, 1, 1, 1);
121MODULE_DEPEND(vge, miibus, 1, 1, 1);
122
123/* "device miibus" required. See GENERIC if you get errors here. */
124#include "miibus_if.h"
125
126#include <dev/vge/if_vgereg.h>
127#include <dev/vge/if_vgevar.h>
128
129#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
130
131/*
132 * Various supported device vendors/types and their names.
133 */
134static struct vge_type vge_devs[] = {
135 { VIA_VENDORID, VIA_DEVICEID_61XX,
136 "VIA Networking Gigabit Ethernet" },
137 { 0, 0, NULL }
138};
139
140static int vge_probe (device_t);
141static int vge_attach (device_t);
142static int vge_detach (device_t);
143
144static int vge_encap (struct vge_softc *, struct mbuf *, int);
145
146static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
147static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int,
148 bus_size_t, int);
149static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
150 bus_size_t, int);
151static int vge_allocmem (device_t, struct vge_softc *);
152static int vge_newbuf (struct vge_softc *, int, struct mbuf *);
153static int vge_rx_list_init (struct vge_softc *);
154static int vge_tx_list_init (struct vge_softc *);
155#ifdef VGE_FIXUP_RX
156static __inline void vge_fixup_rx
157 (struct mbuf *);
158#endif
159static void vge_rxeof (struct vge_softc *);
160static void vge_txeof (struct vge_softc *);
161static void vge_intr (void *);
162static void vge_tick (void *);
163static void vge_tx_task (void *, int);
164static void vge_start (struct ifnet *);
165static int vge_ioctl (struct ifnet *, u_long, caddr_t);
166static void vge_init (void *);
167static void vge_stop (struct vge_softc *);
168static void vge_watchdog (struct ifnet *);
169static int vge_suspend (device_t);
170static int vge_resume (device_t);
171static void vge_shutdown (device_t);
171static int vge_shutdown (device_t);
172static int vge_ifmedia_upd (struct ifnet *);
173static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
174
175#ifdef VGE_EEPROM
176static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *);
177#endif
178static void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int);
179
180static void vge_miipoll_start (struct vge_softc *);
181static void vge_miipoll_stop (struct vge_softc *);
182static int vge_miibus_readreg (device_t, int, int);
183static int vge_miibus_writereg (device_t, int, int, int);
184static void vge_miibus_statchg (device_t);
185
186static void vge_cam_clear (struct vge_softc *);
187static int vge_cam_set (struct vge_softc *, uint8_t *);
188static void vge_setmulti (struct vge_softc *);
189static void vge_reset (struct vge_softc *);
190
191#define VGE_PCI_LOIO 0x10
192#define VGE_PCI_LOMEM 0x14
193
194static device_method_t vge_methods[] = {
195 /* Device interface */
196 DEVMETHOD(device_probe, vge_probe),
197 DEVMETHOD(device_attach, vge_attach),
198 DEVMETHOD(device_detach, vge_detach),
199 DEVMETHOD(device_suspend, vge_suspend),
200 DEVMETHOD(device_resume, vge_resume),
201 DEVMETHOD(device_shutdown, vge_shutdown),
202
203 /* bus interface */
204 DEVMETHOD(bus_print_child, bus_generic_print_child),
205 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
206
207 /* MII interface */
208 DEVMETHOD(miibus_readreg, vge_miibus_readreg),
209 DEVMETHOD(miibus_writereg, vge_miibus_writereg),
210 DEVMETHOD(miibus_statchg, vge_miibus_statchg),
211
212 { 0, 0 }
213};
214
215static driver_t vge_driver = {
216 "vge",
217 vge_methods,
218 sizeof(struct vge_softc)
219};
220
221static devclass_t vge_devclass;
222
223DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
224DRIVER_MODULE(vge, cardbus, vge_driver, vge_devclass, 0, 0);
225DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
226
227#ifdef VGE_EEPROM
228/*
229 * Read a word of data stored in the EEPROM at address 'addr.'
230 */
231static void
232vge_eeprom_getword(sc, addr, dest)
233 struct vge_softc *sc;
234 int addr;
235 u_int16_t *dest;
236{
237 register int i;
238 u_int16_t word = 0;
239
240 /*
241 * Enter EEPROM embedded programming mode. In order to
242 * access the EEPROM at all, we first have to set the
243 * EELOAD bit in the CHIPCFG2 register.
244 */
245 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
246 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
247
248 /* Select the address of the word we want to read */
249 CSR_WRITE_1(sc, VGE_EEADDR, addr);
250
251 /* Issue read command */
252 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
253
254 /* Wait for the done bit to be set. */
255 for (i = 0; i < VGE_TIMEOUT; i++) {
256 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
257 break;
258 }
259
260 if (i == VGE_TIMEOUT) {
261 device_printf(sc->vge_dev, "EEPROM read timed out\n");
262 *dest = 0;
263 return;
264 }
265
266 /* Read the result */
267 word = CSR_READ_2(sc, VGE_EERDDAT);
268
269 /* Turn off EEPROM access mode. */
270 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
271 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
272
273 *dest = word;
274
275 return;
276}
277#endif
278
279/*
280 * Read a sequence of words from the EEPROM.
281 */
282static void
283vge_read_eeprom(sc, dest, off, cnt, swap)
284 struct vge_softc *sc;
285 caddr_t dest;
286 int off;
287 int cnt;
288 int swap;
289{
290 int i;
291#ifdef VGE_EEPROM
292 u_int16_t word = 0, *ptr;
293
294 for (i = 0; i < cnt; i++) {
295 vge_eeprom_getword(sc, off + i, &word);
296 ptr = (u_int16_t *)(dest + (i * 2));
297 if (swap)
298 *ptr = ntohs(word);
299 else
300 *ptr = word;
301 }
302#else
303 for (i = 0; i < ETHER_ADDR_LEN; i++)
304 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
305#endif
306}
307
308static void
309vge_miipoll_stop(sc)
310 struct vge_softc *sc;
311{
312 int i;
313
314 CSR_WRITE_1(sc, VGE_MIICMD, 0);
315
316 for (i = 0; i < VGE_TIMEOUT; i++) {
317 DELAY(1);
318 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
319 break;
320 }
321
322 if (i == VGE_TIMEOUT)
323 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
324
325 return;
326}
327
328static void
329vge_miipoll_start(sc)
330 struct vge_softc *sc;
331{
332 int i;
333
334 /* First, make sure we're idle. */
335
336 CSR_WRITE_1(sc, VGE_MIICMD, 0);
337 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
338
339 for (i = 0; i < VGE_TIMEOUT; i++) {
340 DELAY(1);
341 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
342 break;
343 }
344
345 if (i == VGE_TIMEOUT) {
346 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
347 return;
348 }
349
350 /* Now enable auto poll mode. */
351
352 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
353
354 /* And make sure it started. */
355
356 for (i = 0; i < VGE_TIMEOUT; i++) {
357 DELAY(1);
358 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
359 break;
360 }
361
362 if (i == VGE_TIMEOUT)
363 device_printf(sc->vge_dev, "failed to start MII autopoll\n");
364
365 return;
366}
367
368static int
369vge_miibus_readreg(dev, phy, reg)
370 device_t dev;
371 int phy, reg;
372{
373 struct vge_softc *sc;
374 int i;
375 u_int16_t rval = 0;
376
377 sc = device_get_softc(dev);
378
379 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
380 return(0);
381
382 VGE_LOCK(sc);
383 vge_miipoll_stop(sc);
384
385 /* Specify the register we want to read. */
386 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
387
388 /* Issue read command. */
389 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
390
391 /* Wait for the read command bit to self-clear. */
392 for (i = 0; i < VGE_TIMEOUT; i++) {
393 DELAY(1);
394 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
395 break;
396 }
397
398 if (i == VGE_TIMEOUT)
399 device_printf(sc->vge_dev, "MII read timed out\n");
400 else
401 rval = CSR_READ_2(sc, VGE_MIIDATA);
402
403 vge_miipoll_start(sc);
404 VGE_UNLOCK(sc);
405
406 return (rval);
407}
408
409static int
410vge_miibus_writereg(dev, phy, reg, data)
411 device_t dev;
412 int phy, reg, data;
413{
414 struct vge_softc *sc;
415 int i, rval = 0;
416
417 sc = device_get_softc(dev);
418
419 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
420 return(0);
421
422 VGE_LOCK(sc);
423 vge_miipoll_stop(sc);
424
425 /* Specify the register we want to write. */
426 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
427
428 /* Specify the data we want to write. */
429 CSR_WRITE_2(sc, VGE_MIIDATA, data);
430
431 /* Issue write command. */
432 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
433
434 /* Wait for the write command bit to self-clear. */
435 for (i = 0; i < VGE_TIMEOUT; i++) {
436 DELAY(1);
437 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
438 break;
439 }
440
441 if (i == VGE_TIMEOUT) {
442 device_printf(sc->vge_dev, "MII write timed out\n");
443 rval = EIO;
444 }
445
446 vge_miipoll_start(sc);
447 VGE_UNLOCK(sc);
448
449 return (rval);
450}
451
452static void
453vge_cam_clear(sc)
454 struct vge_softc *sc;
455{
456 int i;
457
458 /*
459 * Turn off all the mask bits. This tells the chip
460 * that none of the entries in the CAM filter are valid.
461 * desired entries will be enabled as we fill the filter in.
462 */
463
464 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
465 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
466 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
467 for (i = 0; i < 8; i++)
468 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
469
470 /* Clear the VLAN filter too. */
471
472 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
473 for (i = 0; i < 8; i++)
474 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
475
476 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
477 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
478 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
479
480 sc->vge_camidx = 0;
481
482 return;
483}
484
485static int
486vge_cam_set(sc, addr)
487 struct vge_softc *sc;
488 uint8_t *addr;
489{
490 int i, error = 0;
491
492 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
493 return(ENOSPC);
494
495 /* Select the CAM data page. */
496 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
497 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
498
499 /* Set the filter entry we want to update and enable writing. */
500 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
501
502 /* Write the address to the CAM registers */
503 for (i = 0; i < ETHER_ADDR_LEN; i++)
504 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
505
506 /* Issue a write command. */
507 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
508
509 /* Wake for it to clear. */
510 for (i = 0; i < VGE_TIMEOUT; i++) {
511 DELAY(1);
512 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
513 break;
514 }
515
516 if (i == VGE_TIMEOUT) {
517 device_printf(sc->vge_dev, "setting CAM filter failed\n");
518 error = EIO;
519 goto fail;
520 }
521
522 /* Select the CAM mask page. */
523 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
524 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
525
526 /* Set the mask bit that enables this filter. */
527 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
528 1<<(sc->vge_camidx & 7));
529
530 sc->vge_camidx++;
531
532fail:
533 /* Turn off access to CAM. */
534 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
535 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
536 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
537
538 return (error);
539}
540
541/*
542 * Program the multicast filter. We use the 64-entry CAM filter
543 * for perfect filtering. If there's more than 64 multicast addresses,
544 * we use the hash filter insted.
545 */
546static void
547vge_setmulti(sc)
548 struct vge_softc *sc;
549{
550 struct ifnet *ifp;
551 int error = 0/*, h = 0*/;
552 struct ifmultiaddr *ifma;
553 u_int32_t h, hashes[2] = { 0, 0 };
554
555 ifp = sc->vge_ifp;
556
557 /* First, zot all the multicast entries. */
558 vge_cam_clear(sc);
559 CSR_WRITE_4(sc, VGE_MAR0, 0);
560 CSR_WRITE_4(sc, VGE_MAR1, 0);
561
562 /*
563 * If the user wants allmulti or promisc mode, enable reception
564 * of all multicast frames.
565 */
566 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
567 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
568 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
569 return;
570 }
571
572 /* Now program new ones */
573 IF_ADDR_LOCK(ifp);
574 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
575 if (ifma->ifma_addr->sa_family != AF_LINK)
576 continue;
577 error = vge_cam_set(sc,
578 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
579 if (error)
580 break;
581 }
582
583 /* If there were too many addresses, use the hash filter. */
584 if (error) {
585 vge_cam_clear(sc);
586
587 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
588 if (ifma->ifma_addr->sa_family != AF_LINK)
589 continue;
590 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
591 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
592 if (h < 32)
593 hashes[0] |= (1 << h);
594 else
595 hashes[1] |= (1 << (h - 32));
596 }
597
598 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
599 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
600 }
601 IF_ADDR_UNLOCK(ifp);
602
603 return;
604}
605
606static void
607vge_reset(sc)
608 struct vge_softc *sc;
609{
610 register int i;
611
612 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
613
614 for (i = 0; i < VGE_TIMEOUT; i++) {
615 DELAY(5);
616 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
617 break;
618 }
619
620 if (i == VGE_TIMEOUT) {
621 device_printf(sc->vge_dev, "soft reset timed out");
622 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
623 DELAY(2000);
624 }
625
626 DELAY(5000);
627
628 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
629
630 for (i = 0; i < VGE_TIMEOUT; i++) {
631 DELAY(5);
632 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
633 break;
634 }
635
636 if (i == VGE_TIMEOUT) {
637 device_printf(sc->vge_dev, "EEPROM reload timed out\n");
638 return;
639 }
640
641 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
642
643 return;
644}
645
646/*
647 * Probe for a VIA gigabit chip. Check the PCI vendor and device
648 * IDs against our list and return a device name if we find a match.
649 */
650static int
651vge_probe(dev)
652 device_t dev;
653{
654 struct vge_type *t;
655 struct vge_softc *sc;
656
657 t = vge_devs;
658 sc = device_get_softc(dev);
659
660 while (t->vge_name != NULL) {
661 if ((pci_get_vendor(dev) == t->vge_vid) &&
662 (pci_get_device(dev) == t->vge_did)) {
663 device_set_desc(dev, t->vge_name);
664 return (BUS_PROBE_DEFAULT);
665 }
666 t++;
667 }
668
669 return (ENXIO);
670}
671
672static void
673vge_dma_map_rx_desc(arg, segs, nseg, mapsize, error)
674 void *arg;
675 bus_dma_segment_t *segs;
676 int nseg;
677 bus_size_t mapsize;
678 int error;
679{
680
681 struct vge_dmaload_arg *ctx;
682 struct vge_rx_desc *d = NULL;
683
684 if (error)
685 return;
686
687 ctx = arg;
688
689 /* Signal error to caller if there's too many segments */
690 if (nseg > ctx->vge_maxsegs) {
691 ctx->vge_maxsegs = 0;
692 return;
693 }
694
695 /*
696 * Map the segment array into descriptors.
697 */
698
699 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx];
700
701 /* If this descriptor is still owned by the chip, bail. */
702
703 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
704 device_printf(ctx->sc->vge_dev,
705 "tried to map busy descriptor\n");
706 ctx->vge_maxsegs = 0;
707 return;
708 }
709
710 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I);
711 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
712 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
713 d->vge_sts = 0;
714 d->vge_ctl = 0;
715
716 ctx->vge_maxsegs = 1;
717
718 return;
719}
720
721static void
722vge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
723 void *arg;
724 bus_dma_segment_t *segs;
725 int nseg;
726 bus_size_t mapsize;
727 int error;
728{
729 struct vge_dmaload_arg *ctx;
730 struct vge_tx_desc *d = NULL;
731 struct vge_tx_frag *f;
732 int i = 0;
733
734 if (error)
735 return;
736
737 ctx = arg;
738
739 /* Signal error to caller if there's too many segments */
740 if (nseg > ctx->vge_maxsegs) {
741 ctx->vge_maxsegs = 0;
742 return;
743 }
744
745 /* Map the segment array into descriptors. */
746
747 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx];
748
749 /* If this descriptor is still owned by the chip, bail. */
750
751 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) {
752 ctx->vge_maxsegs = 0;
753 return;
754 }
755
756 for (i = 0; i < nseg; i++) {
757 f = &d->vge_frag[i];
758 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len));
759 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr));
760 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF);
761 }
762
763 /* Argh. This chip does not autopad short frames */
764
765 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) {
766 f = &d->vge_frag[i];
767 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN -
768 ctx->vge_m0->m_pkthdr.len));
769 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
770 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
771 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN;
772 i++;
773 }
774
775 /*
776 * When telling the chip how many segments there are, we
777 * must use nsegs + 1 instead of just nsegs. Darned if I
778 * know why.
779 */
780 i++;
781
782 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16;
783 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM;
784
785 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN)
786 d->vge_ctl |= VGE_TDCTL_JUMBO;
787
788 ctx->vge_maxsegs = nseg;
789
790 return;
791}
792
793/*
794 * Map a single buffer address.
795 */
796
797static void
798vge_dma_map_addr(arg, segs, nseg, error)
799 void *arg;
800 bus_dma_segment_t *segs;
801 int nseg;
802 int error;
803{
804 bus_addr_t *addr;
805
806 if (error)
807 return;
808
809 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
810 addr = arg;
811 *addr = segs->ds_addr;
812
813 return;
814}
815
816static int
817vge_allocmem(dev, sc)
818 device_t dev;
819 struct vge_softc *sc;
820{
821 int error;
822 int nseg;
823 int i;
824
825 /*
826 * Allocate map for RX mbufs.
827 */
828 nseg = 32;
829 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0,
830 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
831 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW,
832 NULL, NULL, &sc->vge_ldata.vge_mtag);
833 if (error) {
834 device_printf(dev, "could not allocate dma tag\n");
835 return (ENOMEM);
836 }
837
838 /*
839 * Allocate map for TX descriptor list.
840 */
841 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN,
842 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
843 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
844 NULL, NULL, &sc->vge_ldata.vge_tx_list_tag);
845 if (error) {
846 device_printf(dev, "could not allocate dma tag\n");
847 return (ENOMEM);
848 }
849
850 /* Allocate DMA'able memory for the TX ring */
851
852 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag,
853 (void **)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
854 &sc->vge_ldata.vge_tx_list_map);
855 if (error)
856 return (ENOMEM);
857
858 /* Load the map for the TX ring. */
859
860 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag,
861 sc->vge_ldata.vge_tx_list_map, sc->vge_ldata.vge_tx_list,
862 VGE_TX_LIST_SZ, vge_dma_map_addr,
863 &sc->vge_ldata.vge_tx_list_addr, BUS_DMA_NOWAIT);
864
865 /* Create DMA maps for TX buffers */
866
867 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
868 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
869 &sc->vge_ldata.vge_tx_dmamap[i]);
870 if (error) {
871 device_printf(dev, "can't create DMA map for TX\n");
872 return (ENOMEM);
873 }
874 }
875
876 /*
877 * Allocate map for RX descriptor list.
878 */
879 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN,
880 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
881 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
882 NULL, NULL, &sc->vge_ldata.vge_rx_list_tag);
883 if (error) {
884 device_printf(dev, "could not allocate dma tag\n");
885 return (ENOMEM);
886 }
887
888 /* Allocate DMA'able memory for the RX ring */
889
890 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag,
891 (void **)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
892 &sc->vge_ldata.vge_rx_list_map);
893 if (error)
894 return (ENOMEM);
895
896 /* Load the map for the RX ring. */
897
898 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag,
899 sc->vge_ldata.vge_rx_list_map, sc->vge_ldata.vge_rx_list,
900 VGE_TX_LIST_SZ, vge_dma_map_addr,
901 &sc->vge_ldata.vge_rx_list_addr, BUS_DMA_NOWAIT);
902
903 /* Create DMA maps for RX buffers */
904
905 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
906 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
907 &sc->vge_ldata.vge_rx_dmamap[i]);
908 if (error) {
909 device_printf(dev, "can't create DMA map for RX\n");
910 return (ENOMEM);
911 }
912 }
913
914 return (0);
915}
916
917/*
918 * Attach the interface. Allocate softc structures, do ifmedia
919 * setup and ethernet/BPF attach.
920 */
921static int
922vge_attach(dev)
923 device_t dev;
924{
925 u_char eaddr[ETHER_ADDR_LEN];
926 struct vge_softc *sc;
927 struct ifnet *ifp;
928 int unit, error = 0, rid;
929
930 sc = device_get_softc(dev);
931 unit = device_get_unit(dev);
932 sc->vge_dev = dev;
933
934 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
935 MTX_DEF | MTX_RECURSE);
936 /*
937 * Map control/status registers.
938 */
939 pci_enable_busmaster(dev);
940
941 rid = VGE_PCI_LOMEM;
942 sc->vge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
943 0, ~0, 1, RF_ACTIVE);
944
945 if (sc->vge_res == NULL) {
946 printf ("vge%d: couldn't map ports/memory\n", unit);
947 error = ENXIO;
948 goto fail;
949 }
950
951 sc->vge_btag = rman_get_bustag(sc->vge_res);
952 sc->vge_bhandle = rman_get_bushandle(sc->vge_res);
953
954 /* Allocate interrupt */
955 rid = 0;
956 sc->vge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
957 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
958
959 if (sc->vge_irq == NULL) {
960 printf("vge%d: couldn't map interrupt\n", unit);
961 error = ENXIO;
962 goto fail;
963 }
964
965 /* Reset the adapter. */
966 vge_reset(sc);
967
968 /*
969 * Get station address from the EEPROM.
970 */
971 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
972
973 sc->vge_unit = unit;
974
975 /*
976 * Allocate the parent bus DMA tag appropriate for PCI.
977 */
978#define VGE_NSEG_NEW 32
979 error = bus_dma_tag_create(NULL, /* parent */
980 1, 0, /* alignment, boundary */
981 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
982 BUS_SPACE_MAXADDR, /* highaddr */
983 NULL, NULL, /* filter, filterarg */
984 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */
985 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
986 BUS_DMA_ALLOCNOW, /* flags */
987 NULL, NULL, /* lockfunc, lockarg */
988 &sc->vge_parent_tag);
989 if (error)
990 goto fail;
991
992 error = vge_allocmem(dev, sc);
993
994 if (error)
995 goto fail;
996
997 ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
998 if (ifp == NULL) {
999 printf("vge%d: can not if_alloc()\n", sc->vge_unit);
1000 error = ENOSPC;
1001 goto fail;
1002 }
1003
1004 /* Do MII setup */
1005 if (mii_phy_probe(dev, &sc->vge_miibus,
1006 vge_ifmedia_upd, vge_ifmedia_sts)) {
1007 printf("vge%d: MII without any phy!\n", sc->vge_unit);
1008 error = ENXIO;
1009 goto fail;
1010 }
1011
1012 ifp->if_softc = sc;
1013 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1014 ifp->if_mtu = ETHERMTU;
1015 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1016 ifp->if_ioctl = vge_ioctl;
1017 ifp->if_capabilities = IFCAP_VLAN_MTU;
1018 ifp->if_start = vge_start;
1019 ifp->if_hwassist = VGE_CSUM_FEATURES;
1020 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING;
1021 ifp->if_capenable = ifp->if_capabilities;
1022#ifdef DEVICE_POLLING
1023 ifp->if_capabilities |= IFCAP_POLLING;
1024#endif
1025 ifp->if_watchdog = vge_watchdog;
1026 ifp->if_init = vge_init;
1027 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_IFQ_MAXLEN);
1028 ifp->if_snd.ifq_drv_maxlen = VGE_IFQ_MAXLEN;
1029 IFQ_SET_READY(&ifp->if_snd);
1030
1031 TASK_INIT(&sc->vge_txtask, 0, vge_tx_task, ifp);
1032
1033 /*
1034 * Call MI attach routine.
1035 */
1036 ether_ifattach(ifp, eaddr);
1037
1038 /* Hook interrupt last to avoid having to lock softc */
1039 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
1040 NULL, vge_intr, sc, &sc->vge_intrhand);
1041
1042 if (error) {
1043 printf("vge%d: couldn't set up irq\n", unit);
1044 ether_ifdetach(ifp);
1045 goto fail;
1046 }
1047
1048fail:
1049 if (error)
1050 vge_detach(dev);
1051
1052 return (error);
1053}
1054
1055/*
1056 * Shutdown hardware and free up resources. This can be called any
1057 * time after the mutex has been initialized. It is called in both
1058 * the error case in attach and the normal detach case so it needs
1059 * to be careful about only freeing resources that have actually been
1060 * allocated.
1061 */
1062static int
1063vge_detach(dev)
1064 device_t dev;
1065{
1066 struct vge_softc *sc;
1067 struct ifnet *ifp;
1068 int i;
1069
1070 sc = device_get_softc(dev);
1071 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
1072 ifp = sc->vge_ifp;
1073
1074#ifdef DEVICE_POLLING
1075 if (ifp->if_capenable & IFCAP_POLLING)
1076 ether_poll_deregister(ifp);
1077#endif
1078
1079 /* These should only be active if attach succeeded */
1080 if (device_is_attached(dev)) {
1081 vge_stop(sc);
1082 /*
1083 * Force off the IFF_UP flag here, in case someone
1084 * still had a BPF descriptor attached to this
1085 * interface. If they do, ether_ifattach() will cause
1086 * the BPF code to try and clear the promisc mode
1087 * flag, which will bubble down to vge_ioctl(),
1088 * which will try to call vge_init() again. This will
1089 * turn the NIC back on and restart the MII ticker,
1090 * which will panic the system when the kernel tries
1091 * to invoke the vge_tick() function that isn't there
1092 * anymore.
1093 */
1094 ifp->if_flags &= ~IFF_UP;
1095 ether_ifdetach(ifp);
1096 }
1097 if (sc->vge_miibus)
1098 device_delete_child(dev, sc->vge_miibus);
1099 bus_generic_detach(dev);
1100
1101 if (sc->vge_intrhand)
1102 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1103 if (sc->vge_irq)
1104 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq);
1105 if (sc->vge_res)
1106 bus_release_resource(dev, SYS_RES_MEMORY,
1107 VGE_PCI_LOMEM, sc->vge_res);
1108 if (ifp)
1109 if_free(ifp);
1110
1111 /* Unload and free the RX DMA ring memory and map */
1112
1113 if (sc->vge_ldata.vge_rx_list_tag) {
1114 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag,
1115 sc->vge_ldata.vge_rx_list_map);
1116 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag,
1117 sc->vge_ldata.vge_rx_list,
1118 sc->vge_ldata.vge_rx_list_map);
1119 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag);
1120 }
1121
1122 /* Unload and free the TX DMA ring memory and map */
1123
1124 if (sc->vge_ldata.vge_tx_list_tag) {
1125 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag,
1126 sc->vge_ldata.vge_tx_list_map);
1127 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag,
1128 sc->vge_ldata.vge_tx_list,
1129 sc->vge_ldata.vge_tx_list_map);
1130 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag);
1131 }
1132
1133 /* Destroy all the RX and TX buffer maps */
1134
1135 if (sc->vge_ldata.vge_mtag) {
1136 for (i = 0; i < VGE_TX_DESC_CNT; i++)
1137 bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
1138 sc->vge_ldata.vge_tx_dmamap[i]);
1139 for (i = 0; i < VGE_RX_DESC_CNT; i++)
1140 bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
1141 sc->vge_ldata.vge_rx_dmamap[i]);
1142 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag);
1143 }
1144
1145 if (sc->vge_parent_tag)
1146 bus_dma_tag_destroy(sc->vge_parent_tag);
1147
1148 mtx_destroy(&sc->vge_mtx);
1149
1150 return (0);
1151}
1152
1153static int
1154vge_newbuf(sc, idx, m)
1155 struct vge_softc *sc;
1156 int idx;
1157 struct mbuf *m;
1158{
1159 struct vge_dmaload_arg arg;
1160 struct mbuf *n = NULL;
1161 int i, error;
1162
1163 if (m == NULL) {
1164 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1165 if (n == NULL)
1166 return (ENOBUFS);
1167 m = n;
1168 } else
1169 m->m_data = m->m_ext.ext_buf;
1170
1171
1172#ifdef VGE_FIXUP_RX
1173 /*
1174 * This is part of an evil trick to deal with non-x86 platforms.
1175 * The VIA chip requires RX buffers to be aligned on 32-bit
1176 * boundaries, but that will hose non-x86 machines. To get around
1177 * this, we leave some empty space at the start of each buffer
1178 * and for non-x86 hosts, we copy the buffer back two bytes
1179 * to achieve word alignment. This is slightly more efficient
1180 * than allocating a new buffer, copying the contents, and
1181 * discarding the old buffer.
1182 */
1183 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
1184 m_adj(m, VGE_ETHER_ALIGN);
1185#else
1186 m->m_len = m->m_pkthdr.len = MCLBYTES;
1187#endif
1188
1189 arg.sc = sc;
1190 arg.vge_idx = idx;
1191 arg.vge_maxsegs = 1;
1192 arg.vge_flags = 0;
1193
1194 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag,
1195 sc->vge_ldata.vge_rx_dmamap[idx], m, vge_dma_map_rx_desc,
1196 &arg, BUS_DMA_NOWAIT);
1197 if (error || arg.vge_maxsegs != 1) {
1198 if (n != NULL)
1199 m_freem(n);
1200 return (ENOMEM);
1201 }
1202
1203 /*
1204 * Note: the manual fails to document the fact that for
1205 * proper opration, the driver needs to replentish the RX
1206 * DMA ring 4 descriptors at a time (rather than one at a
1207 * time, like most chips). We can allocate the new buffers
1208 * but we should not set the OWN bits until we're ready
1209 * to hand back 4 of them in one shot.
1210 */
1211
1212#define VGE_RXCHUNK 4
1213 sc->vge_rx_consumed++;
1214 if (sc->vge_rx_consumed == VGE_RXCHUNK) {
1215 for (i = idx; i != idx - sc->vge_rx_consumed; i--)
1216 sc->vge_ldata.vge_rx_list[i].vge_sts |=
1217 htole32(VGE_RDSTS_OWN);
1218 sc->vge_rx_consumed = 0;
1219 }
1220
1221 sc->vge_ldata.vge_rx_mbuf[idx] = m;
1222
1223 bus_dmamap_sync(sc->vge_ldata.vge_mtag,
1224 sc->vge_ldata.vge_rx_dmamap[idx],
1225 BUS_DMASYNC_PREREAD);
1226
1227 return (0);
1228}
1229
1230static int
1231vge_tx_list_init(sc)
1232 struct vge_softc *sc;
1233{
1234 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ);
1235 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf,
1236 (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
1237
1238 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1239 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE);
1240 sc->vge_ldata.vge_tx_prodidx = 0;
1241 sc->vge_ldata.vge_tx_considx = 0;
1242 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
1243
1244 return (0);
1245}
1246
1247static int
1248vge_rx_list_init(sc)
1249 struct vge_softc *sc;
1250{
1251 int i;
1252
1253 bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ);
1254 bzero ((char *)&sc->vge_ldata.vge_rx_mbuf,
1255 (VGE_RX_DESC_CNT * sizeof(struct mbuf *)));
1256
1257 sc->vge_rx_consumed = 0;
1258
1259 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1260 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
1261 return (ENOBUFS);
1262 }
1263
1264 /* Flush the RX descriptors */
1265
1266 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1267 sc->vge_ldata.vge_rx_list_map,
1268 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1269
1270 sc->vge_ldata.vge_rx_prodidx = 0;
1271 sc->vge_rx_consumed = 0;
1272 sc->vge_head = sc->vge_tail = NULL;
1273
1274 return (0);
1275}
1276
1277#ifdef VGE_FIXUP_RX
1278static __inline void
1279vge_fixup_rx(m)
1280 struct mbuf *m;
1281{
1282 int i;
1283 uint16_t *src, *dst;
1284
1285 src = mtod(m, uint16_t *);
1286 dst = src - 1;
1287
1288 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1289 *dst++ = *src++;
1290
1291 m->m_data -= ETHER_ALIGN;
1292
1293 return;
1294}
1295#endif
1296
1297/*
1298 * RX handler. We support the reception of jumbo frames that have
1299 * been fragmented across multiple 2K mbuf cluster buffers.
1300 */
1301static void
1302vge_rxeof(sc)
1303 struct vge_softc *sc;
1304{
1305 struct mbuf *m;
1306 struct ifnet *ifp;
1307 int i, total_len;
1308 int lim = 0;
1309 struct vge_rx_desc *cur_rx;
1310 u_int32_t rxstat, rxctl;
1311
1312 VGE_LOCK_ASSERT(sc);
1313 ifp = sc->vge_ifp;
1314 i = sc->vge_ldata.vge_rx_prodidx;
1315
1316 /* Invalidate the descriptor memory */
1317
1318 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1319 sc->vge_ldata.vge_rx_list_map,
1320 BUS_DMASYNC_POSTREAD);
1321
1322 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) {
1323
1324#ifdef DEVICE_POLLING
1325 if (ifp->if_capenable & IFCAP_POLLING) {
1326 if (sc->rxcycles <= 0)
1327 break;
1328 sc->rxcycles--;
1329 }
1330#endif
1331
1332 cur_rx = &sc->vge_ldata.vge_rx_list[i];
1333 m = sc->vge_ldata.vge_rx_mbuf[i];
1334 total_len = VGE_RXBYTES(cur_rx);
1335 rxstat = le32toh(cur_rx->vge_sts);
1336 rxctl = le32toh(cur_rx->vge_ctl);
1337
1338 /* Invalidate the RX mbuf and unload its map */
1339
1340 bus_dmamap_sync(sc->vge_ldata.vge_mtag,
1341 sc->vge_ldata.vge_rx_dmamap[i],
1342 BUS_DMASYNC_POSTWRITE);
1343 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
1344 sc->vge_ldata.vge_rx_dmamap[i]);
1345
1346 /*
1347 * If the 'start of frame' bit is set, this indicates
1348 * either the first fragment in a multi-fragment receive,
1349 * or an intermediate fragment. Either way, we want to
1350 * accumulate the buffers.
1351 */
1352 if (rxstat & VGE_RXPKT_SOF) {
1353 m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
1354 if (sc->vge_head == NULL)
1355 sc->vge_head = sc->vge_tail = m;
1356 else {
1357 m->m_flags &= ~M_PKTHDR;
1358 sc->vge_tail->m_next = m;
1359 sc->vge_tail = m;
1360 }
1361 vge_newbuf(sc, i, NULL);
1362 VGE_RX_DESC_INC(i);
1363 continue;
1364 }
1365
1366 /*
1367 * Bad/error frames will have the RXOK bit cleared.
1368 * However, there's one error case we want to allow:
1369 * if a VLAN tagged frame arrives and the chip can't
1370 * match it against the CAM filter, it considers this
1371 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1372 * We don't want to drop the frame though: our VLAN
1373 * filtering is done in software.
1374 */
1375 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM)
1376 && !(rxstat & VGE_RDSTS_CSUMERR)) {
1377 ifp->if_ierrors++;
1378 /*
1379 * If this is part of a multi-fragment packet,
1380 * discard all the pieces.
1381 */
1382 if (sc->vge_head != NULL) {
1383 m_freem(sc->vge_head);
1384 sc->vge_head = sc->vge_tail = NULL;
1385 }
1386 vge_newbuf(sc, i, m);
1387 VGE_RX_DESC_INC(i);
1388 continue;
1389 }
1390
1391 /*
1392 * If allocating a replacement mbuf fails,
1393 * reload the current one.
1394 */
1395
1396 if (vge_newbuf(sc, i, NULL)) {
1397 ifp->if_ierrors++;
1398 if (sc->vge_head != NULL) {
1399 m_freem(sc->vge_head);
1400 sc->vge_head = sc->vge_tail = NULL;
1401 }
1402 vge_newbuf(sc, i, m);
1403 VGE_RX_DESC_INC(i);
1404 continue;
1405 }
1406
1407 VGE_RX_DESC_INC(i);
1408
1409 if (sc->vge_head != NULL) {
1410 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
1411 /*
1412 * Special case: if there's 4 bytes or less
1413 * in this buffer, the mbuf can be discarded:
1414 * the last 4 bytes is the CRC, which we don't
1415 * care about anyway.
1416 */
1417 if (m->m_len <= ETHER_CRC_LEN) {
1418 sc->vge_tail->m_len -=
1419 (ETHER_CRC_LEN - m->m_len);
1420 m_freem(m);
1421 } else {
1422 m->m_len -= ETHER_CRC_LEN;
1423 m->m_flags &= ~M_PKTHDR;
1424 sc->vge_tail->m_next = m;
1425 }
1426 m = sc->vge_head;
1427 sc->vge_head = sc->vge_tail = NULL;
1428 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1429 } else
1430 m->m_pkthdr.len = m->m_len =
1431 (total_len - ETHER_CRC_LEN);
1432
1433#ifdef VGE_FIXUP_RX
1434 vge_fixup_rx(m);
1435#endif
1436 ifp->if_ipackets++;
1437 m->m_pkthdr.rcvif = ifp;
1438
1439 /* Do RX checksumming if enabled */
1440 if (ifp->if_capenable & IFCAP_RXCSUM) {
1441
1442 /* Check IP header checksum */
1443 if (rxctl & VGE_RDCTL_IPPKT)
1444 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1445 if (rxctl & VGE_RDCTL_IPCSUMOK)
1446 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1447
1448 /* Check TCP/UDP checksum */
1449 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) &&
1450 rxctl & VGE_RDCTL_PROTOCSUMOK) {
1451 m->m_pkthdr.csum_flags |=
1452 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1453 m->m_pkthdr.csum_data = 0xffff;
1454 }
1455 }
1456
1457 if (rxstat & VGE_RDSTS_VTAG) {
1458 /*
1459 * The 32-bit rxctl register is stored in little-endian.
1460 * However, the 16-bit vlan tag is stored in big-endian,
1461 * so we have to byte swap it.
1462 */
1463 m->m_pkthdr.ether_vtag =
1464 bswap16(rxctl & VGE_RDCTL_VLANID);
1465 m->m_flags |= M_VLANTAG;
1466 }
1467
1468 VGE_UNLOCK(sc);
1469 (*ifp->if_input)(ifp, m);
1470 VGE_LOCK(sc);
1471
1472 lim++;
1473 if (lim == VGE_RX_DESC_CNT)
1474 break;
1475
1476 }
1477
1478 /* Flush the RX DMA ring */
1479
1480 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1481 sc->vge_ldata.vge_rx_list_map,
1482 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1483
1484 sc->vge_ldata.vge_rx_prodidx = i;
1485 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1486
1487
1488 return;
1489}
1490
1491static void
1492vge_txeof(sc)
1493 struct vge_softc *sc;
1494{
1495 struct ifnet *ifp;
1496 u_int32_t txstat;
1497 int idx;
1498
1499 ifp = sc->vge_ifp;
1500 idx = sc->vge_ldata.vge_tx_considx;
1501
1502 /* Invalidate the TX descriptor list */
1503
1504 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1505 sc->vge_ldata.vge_tx_list_map,
1506 BUS_DMASYNC_POSTREAD);
1507
1508 while (idx != sc->vge_ldata.vge_tx_prodidx) {
1509
1510 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
1511 if (txstat & VGE_TDSTS_OWN)
1512 break;
1513
1514 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
1515 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
1516 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
1517 sc->vge_ldata.vge_tx_dmamap[idx]);
1518 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1519 ifp->if_collisions++;
1520 if (txstat & VGE_TDSTS_TXERR)
1521 ifp->if_oerrors++;
1522 else
1523 ifp->if_opackets++;
1524
1525 sc->vge_ldata.vge_tx_free++;
1526 VGE_TX_DESC_INC(idx);
1527 }
1528
1529 /* No changes made to the TX ring, so no flush needed */
1530
1531 if (idx != sc->vge_ldata.vge_tx_considx) {
1532 sc->vge_ldata.vge_tx_considx = idx;
1533 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1534 ifp->if_timer = 0;
1535 }
1536
1537 /*
1538 * If not all descriptors have been released reaped yet,
1539 * reload the timer so that we will eventually get another
1540 * interrupt that will cause us to re-enter this routine.
1541 * This is done in case the transmitter has gone idle.
1542 */
1543 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) {
1544 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1545 }
1546
1547 return;
1548}
1549
1550static void
1551vge_tick(xsc)
1552 void *xsc;
1553{
1554 struct vge_softc *sc;
1555 struct ifnet *ifp;
1556 struct mii_data *mii;
1557
1558 sc = xsc;
1559 ifp = sc->vge_ifp;
1560 VGE_LOCK(sc);
1561 mii = device_get_softc(sc->vge_miibus);
1562
1563 mii_tick(mii);
1564 if (sc->vge_link) {
1565 if (!(mii->mii_media_status & IFM_ACTIVE)) {
1566 sc->vge_link = 0;
1567 if_link_state_change(sc->vge_ifp,
1568 LINK_STATE_DOWN);
1569 }
1570 } else {
1571 if (mii->mii_media_status & IFM_ACTIVE &&
1572 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1573 sc->vge_link = 1;
1574 if_link_state_change(sc->vge_ifp,
1575 LINK_STATE_UP);
1576 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1577 taskqueue_enqueue(taskqueue_swi,
1578 &sc->vge_txtask);
1579 }
1580 }
1581
1582 VGE_UNLOCK(sc);
1583
1584 return;
1585}
1586
1587#ifdef DEVICE_POLLING
1588static void
1589vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1590{
1591 struct vge_softc *sc = ifp->if_softc;
1592
1593 VGE_LOCK(sc);
1594 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1595 goto done;
1596
1597 sc->rxcycles = count;
1598 vge_rxeof(sc);
1599 vge_txeof(sc);
1600
1601 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1602 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
1603
1604 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1605 u_int32_t status;
1606 status = CSR_READ_4(sc, VGE_ISR);
1607 if (status == 0xFFFFFFFF)
1608 goto done;
1609 if (status)
1610 CSR_WRITE_4(sc, VGE_ISR, status);
1611
1612 /*
1613 * XXX check behaviour on receiver stalls.
1614 */
1615
1616 if (status & VGE_ISR_TXDMA_STALL ||
1617 status & VGE_ISR_RXDMA_STALL)
1618 vge_init(sc);
1619
1620 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1621 vge_rxeof(sc);
1622 ifp->if_ierrors++;
1623 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1624 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1625 }
1626 }
1627done:
1628 VGE_UNLOCK(sc);
1629}
1630#endif /* DEVICE_POLLING */
1631
1632static void
1633vge_intr(arg)
1634 void *arg;
1635{
1636 struct vge_softc *sc;
1637 struct ifnet *ifp;
1638 u_int32_t status;
1639
1640 sc = arg;
1641
1642 if (sc->suspended) {
1643 return;
1644 }
1645
1646 VGE_LOCK(sc);
1647 ifp = sc->vge_ifp;
1648
1649 if (!(ifp->if_flags & IFF_UP)) {
1650 VGE_UNLOCK(sc);
1651 return;
1652 }
1653
1654#ifdef DEVICE_POLLING
1655 if (ifp->if_capenable & IFCAP_POLLING) {
1656 VGE_UNLOCK(sc);
1657 return;
1658 }
1659#endif
1660
1661 /* Disable interrupts */
1662 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1663
1664 for (;;) {
1665
1666 status = CSR_READ_4(sc, VGE_ISR);
1667 /* If the card has gone away the read returns 0xffff. */
1668 if (status == 0xFFFFFFFF)
1669 break;
1670
1671 if (status)
1672 CSR_WRITE_4(sc, VGE_ISR, status);
1673
1674 if ((status & VGE_INTRS) == 0)
1675 break;
1676
1677 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1678 vge_rxeof(sc);
1679
1680 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1681 vge_rxeof(sc);
1682 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1683 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1684 }
1685
1686 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1687 vge_txeof(sc);
1688
1689 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1690 vge_init(sc);
1691
1692 if (status & VGE_ISR_LINKSTS)
1693 vge_tick(sc);
1694 }
1695
1696 /* Re-enable interrupts */
1697 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1698
1699 VGE_UNLOCK(sc);
1700
1701 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1702 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
1703
1704 return;
1705}
1706
1707static int
1708vge_encap(sc, m_head, idx)
1709 struct vge_softc *sc;
1710 struct mbuf *m_head;
1711 int idx;
1712{
1713 struct mbuf *m_new = NULL;
1714 struct vge_dmaload_arg arg;
1715 bus_dmamap_t map;
1716 int error;
1717
1718 if (sc->vge_ldata.vge_tx_free <= 2)
1719 return (EFBIG);
1720
1721 arg.vge_flags = 0;
1722
1723 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1724 arg.vge_flags |= VGE_TDCTL_IPCSUM;
1725 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1726 arg.vge_flags |= VGE_TDCTL_TCPCSUM;
1727 if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1728 arg.vge_flags |= VGE_TDCTL_UDPCSUM;
1729
1730 arg.sc = sc;
1731 arg.vge_idx = idx;
1732 arg.vge_m0 = m_head;
1733 arg.vge_maxsegs = VGE_TX_FRAGS;
1734
1735 map = sc->vge_ldata.vge_tx_dmamap[idx];
1736 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map,
1737 m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT);
1738
1739 if (error && error != EFBIG) {
1740 printf("vge%d: can't map mbuf (error %d)\n",
1741 sc->vge_unit, error);
1742 return (ENOBUFS);
1743 }
1744
1745 /* Too many segments to map, coalesce into a single mbuf */
1746
1747 if (error || arg.vge_maxsegs == 0) {
1748 m_new = m_defrag(m_head, M_DONTWAIT);
1749 if (m_new == NULL)
1750 return (1);
1751 else
1752 m_head = m_new;
1753
1754 arg.sc = sc;
1755 arg.vge_m0 = m_head;
1756 arg.vge_idx = idx;
1757 arg.vge_maxsegs = 1;
1758
1759 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map,
1760 m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT);
1761 if (error) {
1762 printf("vge%d: can't map mbuf (error %d)\n",
1763 sc->vge_unit, error);
1764 return (EFBIG);
1765 }
1766 }
1767
1768 sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
1769 sc->vge_ldata.vge_tx_free--;
1770
1771 /*
1772 * Set up hardware VLAN tagging.
1773 */
1774
1775 if (m_head->m_flags & M_VLANTAG)
1776 sc->vge_ldata.vge_tx_list[idx].vge_ctl |=
1777 htole32(m_head->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG);
1778
1779 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN);
1780
1781 return (0);
1782}
1783
1784static void
1785vge_tx_task(arg, npending)
1786 void *arg;
1787 int npending;
1788{
1789 struct ifnet *ifp;
1790
1791 ifp = arg;
1792 vge_start(ifp);
1793
1794 return;
1795}
1796
1797/*
1798 * Main transmit routine.
1799 */
1800
1801static void
1802vge_start(ifp)
1803 struct ifnet *ifp;
1804{
1805 struct vge_softc *sc;
1806 struct mbuf *m_head = NULL;
1807 int idx, pidx = 0;
1808
1809 sc = ifp->if_softc;
1810 VGE_LOCK(sc);
1811
1812 if (!sc->vge_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1813 VGE_UNLOCK(sc);
1814 return;
1815 }
1816
1817 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1818 VGE_UNLOCK(sc);
1819 return;
1820 }
1821
1822 idx = sc->vge_ldata.vge_tx_prodidx;
1823
1824 pidx = idx - 1;
1825 if (pidx < 0)
1826 pidx = VGE_TX_DESC_CNT - 1;
1827
1828
1829 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) {
1830 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1831 if (m_head == NULL)
1832 break;
1833
1834 if (vge_encap(sc, m_head, idx)) {
1835 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1836 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1837 break;
1838 }
1839
1840 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
1841 htole16(VGE_TXDESC_Q);
1842
1843 pidx = idx;
1844 VGE_TX_DESC_INC(idx);
1845
1846 /*
1847 * If there's a BPF listener, bounce a copy of this frame
1848 * to him.
1849 */
1850 ETHER_BPF_MTAP(ifp, m_head);
1851 }
1852
1853 if (idx == sc->vge_ldata.vge_tx_prodidx) {
1854 VGE_UNLOCK(sc);
1855 return;
1856 }
1857
1858 /* Flush the TX descriptors */
1859
1860 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1861 sc->vge_ldata.vge_tx_list_map,
1862 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1863
1864 /* Issue a transmit command. */
1865 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1866
1867 sc->vge_ldata.vge_tx_prodidx = idx;
1868
1869 /*
1870 * Use the countdown timer for interrupt moderation.
1871 * 'TX done' interrupts are disabled. Instead, we reset the
1872 * countdown timer, which will begin counting until it hits
1873 * the value in the SSTIMER register, and then trigger an
1874 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1875 * the timer count is reloaded. Only when the transmitter
1876 * is idle will the timer hit 0 and an interrupt fire.
1877 */
1878 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1879
1880 VGE_UNLOCK(sc);
1881
1882 /*
1883 * Set a timeout in case the chip goes out to lunch.
1884 */
1885 ifp->if_timer = 5;
1886
1887 return;
1888}
1889
1890static void
1891vge_init(xsc)
1892 void *xsc;
1893{
1894 struct vge_softc *sc = xsc;
1895 struct ifnet *ifp = sc->vge_ifp;
1896 struct mii_data *mii;
1897 int i;
1898
1899 VGE_LOCK(sc);
1900 mii = device_get_softc(sc->vge_miibus);
1901
1902 /*
1903 * Cancel pending I/O and free all RX/TX buffers.
1904 */
1905 vge_stop(sc);
1906 vge_reset(sc);
1907
1908 /*
1909 * Initialize the RX and TX descriptors and mbufs.
1910 */
1911
1912 vge_rx_list_init(sc);
1913 vge_tx_list_init(sc);
1914
1915 /* Set our station address */
1916 for (i = 0; i < ETHER_ADDR_LEN; i++)
1917 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
1918
1919 /*
1920 * Set receive FIFO threshold. Also allow transmission and
1921 * reception of VLAN tagged frames.
1922 */
1923 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1924 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1925
1926 /* Set DMA burst length */
1927 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1928 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1929
1930 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1931
1932 /* Set collision backoff algorithm */
1933 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1934 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1935 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1936
1937 /* Disable LPSEL field in priority resolution */
1938 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1939
1940 /*
1941 * Load the addresses of the DMA queues into the chip.
1942 * Note that we only use one transmit queue.
1943 */
1944
1945 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
1946 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr));
1947 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
1948
1949 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
1950 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr));
1951 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
1952 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
1953
1954 /* Enable and wake up the RX descriptor queue */
1955 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1956 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1957
1958 /* Enable the TX descriptor queue */
1959 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1960
1961 /* Set up the receive filter -- allow large frames for VLANs. */
1962 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1963
1964 /* If we want promiscuous mode, set the allframes bit. */
1965 if (ifp->if_flags & IFF_PROMISC) {
1966 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1967 }
1968
1969 /* Set capture broadcast bit to capture broadcast frames. */
1970 if (ifp->if_flags & IFF_BROADCAST) {
1971 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1972 }
1973
1974 /* Set multicast bit to capture multicast frames. */
1975 if (ifp->if_flags & IFF_MULTICAST) {
1976 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1977 }
1978
1979 /* Init the cam filter. */
1980 vge_cam_clear(sc);
1981
1982 /* Init the multicast filter. */
1983 vge_setmulti(sc);
1984
1985 /* Enable flow control */
1986
1987 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1988
1989 /* Enable jumbo frame reception (if desired) */
1990
1991 /* Start the MAC. */
1992 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1993 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1994 CSR_WRITE_1(sc, VGE_CRS0,
1995 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1996
1997 /*
1998 * Configure one-shot timer for microsecond
1999 * resulution and load it for 500 usecs.
2000 */
2001 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
2002 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
2003
2004 /*
2005 * Configure interrupt moderation for receive. Enable
2006 * the holdoff counter and load it, and set the RX
2007 * suppression count to the number of descriptors we
2008 * want to allow before triggering an interrupt.
2009 * The holdoff timer is in units of 20 usecs.
2010 */
2011
2012#ifdef notyet
2013 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
2014 /* Select the interrupt holdoff timer page. */
2015 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2016 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
2017 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
2018
2019 /* Enable use of the holdoff timer. */
2020 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
2021 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
2022
2023 /* Select the RX suppression threshold page. */
2024 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2025 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
2026 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
2027
2028 /* Restore the page select bits. */
2029 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2030 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
2031#endif
2032
2033#ifdef DEVICE_POLLING
2034 /*
2035 * Disable interrupts if we are polling.
2036 */
2037 if (ifp->if_capenable & IFCAP_POLLING) {
2038 CSR_WRITE_4(sc, VGE_IMR, 0);
2039 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2040 } else /* otherwise ... */
2041#endif
2042 {
2043 /*
2044 * Enable interrupts.
2045 */
2046 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2047 CSR_WRITE_4(sc, VGE_ISR, 0);
2048 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2049 }
2050
2051 mii_mediachg(mii);
2052
2053 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2054 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2055
2056 sc->vge_if_flags = 0;
2057 sc->vge_link = 0;
2058
2059 VGE_UNLOCK(sc);
2060
2061 return;
2062}
2063
2064/*
2065 * Set media options.
2066 */
2067static int
2068vge_ifmedia_upd(ifp)
2069 struct ifnet *ifp;
2070{
2071 struct vge_softc *sc;
2072 struct mii_data *mii;
2073
2074 sc = ifp->if_softc;
2075 VGE_LOCK(sc);
2076 mii = device_get_softc(sc->vge_miibus);
2077 mii_mediachg(mii);
2078 VGE_UNLOCK(sc);
2079
2080 return (0);
2081}
2082
2083/*
2084 * Report current media status.
2085 */
2086static void
2087vge_ifmedia_sts(ifp, ifmr)
2088 struct ifnet *ifp;
2089 struct ifmediareq *ifmr;
2090{
2091 struct vge_softc *sc;
2092 struct mii_data *mii;
2093
2094 sc = ifp->if_softc;
2095 mii = device_get_softc(sc->vge_miibus);
2096
2097 mii_pollstat(mii);
2098 ifmr->ifm_active = mii->mii_media_active;
2099 ifmr->ifm_status = mii->mii_media_status;
2100
2101 return;
2102}
2103
2104static void
2105vge_miibus_statchg(dev)
2106 device_t dev;
2107{
2108 struct vge_softc *sc;
2109 struct mii_data *mii;
2110 struct ifmedia_entry *ife;
2111
2112 sc = device_get_softc(dev);
2113 mii = device_get_softc(sc->vge_miibus);
2114 ife = mii->mii_media.ifm_cur;
2115
2116 /*
2117 * If the user manually selects a media mode, we need to turn
2118 * on the forced MAC mode bit in the DIAGCTL register. If the
2119 * user happens to choose a full duplex mode, we also need to
2120 * set the 'force full duplex' bit. This applies only to
2121 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2122 * mode is disabled, and in 1000baseT mode, full duplex is
2123 * always implied, so we turn on the forced mode bit but leave
2124 * the FDX bit cleared.
2125 */
2126
2127 switch (IFM_SUBTYPE(ife->ifm_media)) {
2128 case IFM_AUTO:
2129 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2130 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2131 break;
2132 case IFM_1000_T:
2133 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2134 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2135 break;
2136 case IFM_100_TX:
2137 case IFM_10_T:
2138 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2139 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2140 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2141 } else {
2142 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2143 }
2144 break;
2145 default:
2146 device_printf(dev, "unknown media type: %x\n",
2147 IFM_SUBTYPE(ife->ifm_media));
2148 break;
2149 }
2150
2151 return;
2152}
2153
2154static int
2155vge_ioctl(ifp, command, data)
2156 struct ifnet *ifp;
2157 u_long command;
2158 caddr_t data;
2159{
2160 struct vge_softc *sc = ifp->if_softc;
2161 struct ifreq *ifr = (struct ifreq *) data;
2162 struct mii_data *mii;
2163 int error = 0;
2164
2165 switch (command) {
2166 case SIOCSIFMTU:
2167 if (ifr->ifr_mtu > VGE_JUMBO_MTU)
2168 error = EINVAL;
2169 ifp->if_mtu = ifr->ifr_mtu;
2170 break;
2171 case SIOCSIFFLAGS:
2172 if (ifp->if_flags & IFF_UP) {
2173 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2174 ifp->if_flags & IFF_PROMISC &&
2175 !(sc->vge_if_flags & IFF_PROMISC)) {
2176 CSR_SETBIT_1(sc, VGE_RXCTL,
2177 VGE_RXCTL_RX_PROMISC);
2178 vge_setmulti(sc);
2179 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2180 !(ifp->if_flags & IFF_PROMISC) &&
2181 sc->vge_if_flags & IFF_PROMISC) {
2182 CSR_CLRBIT_1(sc, VGE_RXCTL,
2183 VGE_RXCTL_RX_PROMISC);
2184 vge_setmulti(sc);
2185 } else
2186 vge_init(sc);
2187 } else {
2188 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2189 vge_stop(sc);
2190 }
2191 sc->vge_if_flags = ifp->if_flags;
2192 break;
2193 case SIOCADDMULTI:
2194 case SIOCDELMULTI:
2195 vge_setmulti(sc);
2196 break;
2197 case SIOCGIFMEDIA:
2198 case SIOCSIFMEDIA:
2199 mii = device_get_softc(sc->vge_miibus);
2200 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2201 break;
2202 case SIOCSIFCAP:
2203 {
2204 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2205#ifdef DEVICE_POLLING
2206 if (mask & IFCAP_POLLING) {
2207 if (ifr->ifr_reqcap & IFCAP_POLLING) {
2208 error = ether_poll_register(vge_poll, ifp);
2209 if (error)
2210 return(error);
2211 VGE_LOCK(sc);
2212 /* Disable interrupts */
2213 CSR_WRITE_4(sc, VGE_IMR, 0);
2214 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2215 ifp->if_capenable |= IFCAP_POLLING;
2216 VGE_UNLOCK(sc);
2217 } else {
2218 error = ether_poll_deregister(ifp);
2219 /* Enable interrupts. */
2220 VGE_LOCK(sc);
2221 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2222 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2223 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2224 ifp->if_capenable &= ~IFCAP_POLLING;
2225 VGE_UNLOCK(sc);
2226 }
2227 }
2228#endif /* DEVICE_POLLING */
2229 if (mask & IFCAP_HWCSUM) {
2230 ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM);
2231 if (ifp->if_capenable & IFCAP_TXCSUM)
2232 ifp->if_hwassist = VGE_CSUM_FEATURES;
2233 else
2234 ifp->if_hwassist = 0;
2235 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2236 vge_init(sc);
2237 }
2238 }
2239 break;
2240 default:
2241 error = ether_ioctl(ifp, command, data);
2242 break;
2243 }
2244
2245 return (error);
2246}
2247
2248static void
2249vge_watchdog(ifp)
2250 struct ifnet *ifp;
2251{
2252 struct vge_softc *sc;
2253
2254 sc = ifp->if_softc;
2255 VGE_LOCK(sc);
2256 printf("vge%d: watchdog timeout\n", sc->vge_unit);
2257 ifp->if_oerrors++;
2258
2259 vge_txeof(sc);
2260 vge_rxeof(sc);
2261
2262 vge_init(sc);
2263
2264 VGE_UNLOCK(sc);
2265
2266 return;
2267}
2268
2269/*
2270 * Stop the adapter and free any mbufs allocated to the
2271 * RX and TX lists.
2272 */
2273static void
2274vge_stop(sc)
2275 struct vge_softc *sc;
2276{
2277 register int i;
2278 struct ifnet *ifp;
2279
2280 VGE_LOCK(sc);
2281 ifp = sc->vge_ifp;
2282 ifp->if_timer = 0;
2283
2284 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2285
2286 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2287 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2288 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2289 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2290 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2291 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2292
2293 if (sc->vge_head != NULL) {
2294 m_freem(sc->vge_head);
2295 sc->vge_head = sc->vge_tail = NULL;
2296 }
2297
2298 /* Free the TX list buffers. */
2299
2300 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
2301 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
2302 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
2303 sc->vge_ldata.vge_tx_dmamap[i]);
2304 m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
2305 sc->vge_ldata.vge_tx_mbuf[i] = NULL;
2306 }
2307 }
2308
2309 /* Free the RX list buffers. */
2310
2311 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
2312 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
2313 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
2314 sc->vge_ldata.vge_rx_dmamap[i]);
2315 m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
2316 sc->vge_ldata.vge_rx_mbuf[i] = NULL;
2317 }
2318 }
2319
2320 VGE_UNLOCK(sc);
2321
2322 return;
2323}
2324
2325/*
2326 * Device suspend routine. Stop the interface and save some PCI
2327 * settings in case the BIOS doesn't restore them properly on
2328 * resume.
2329 */
2330static int
2331vge_suspend(dev)
2332 device_t dev;
2333{
2334 struct vge_softc *sc;
2335
2336 sc = device_get_softc(dev);
2337
2338 vge_stop(sc);
2339
2340 sc->suspended = 1;
2341
2342 return (0);
2343}
2344
2345/*
2346 * Device resume routine. Restore some PCI settings in case the BIOS
2347 * doesn't, re-enable busmastering, and restart the interface if
2348 * appropriate.
2349 */
2350static int
2351vge_resume(dev)
2352 device_t dev;
2353{
2354 struct vge_softc *sc;
2355 struct ifnet *ifp;
2356
2357 sc = device_get_softc(dev);
2358 ifp = sc->vge_ifp;
2359
2360 /* reenable busmastering */
2361 pci_enable_busmaster(dev);
2362 pci_enable_io(dev, SYS_RES_MEMORY);
2363
2364 /* reinitialize interface if necessary */
2365 if (ifp->if_flags & IFF_UP)
2366 vge_init(sc);
2367
2368 sc->suspended = 0;
2369
2370 return (0);
2371}
2372
2373/*
2374 * Stop all chip I/O so that the kernel's probe routines don't
2375 * get confused by errant DMAs when rebooting.
2376 */
172static int vge_ifmedia_upd (struct ifnet *);
173static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
174
175#ifdef VGE_EEPROM
176static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *);
177#endif
178static void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int);
179
180static void vge_miipoll_start (struct vge_softc *);
181static void vge_miipoll_stop (struct vge_softc *);
182static int vge_miibus_readreg (device_t, int, int);
183static int vge_miibus_writereg (device_t, int, int, int);
184static void vge_miibus_statchg (device_t);
185
186static void vge_cam_clear (struct vge_softc *);
187static int vge_cam_set (struct vge_softc *, uint8_t *);
188static void vge_setmulti (struct vge_softc *);
189static void vge_reset (struct vge_softc *);
190
191#define VGE_PCI_LOIO 0x10
192#define VGE_PCI_LOMEM 0x14
193
194static device_method_t vge_methods[] = {
195 /* Device interface */
196 DEVMETHOD(device_probe, vge_probe),
197 DEVMETHOD(device_attach, vge_attach),
198 DEVMETHOD(device_detach, vge_detach),
199 DEVMETHOD(device_suspend, vge_suspend),
200 DEVMETHOD(device_resume, vge_resume),
201 DEVMETHOD(device_shutdown, vge_shutdown),
202
203 /* bus interface */
204 DEVMETHOD(bus_print_child, bus_generic_print_child),
205 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
206
207 /* MII interface */
208 DEVMETHOD(miibus_readreg, vge_miibus_readreg),
209 DEVMETHOD(miibus_writereg, vge_miibus_writereg),
210 DEVMETHOD(miibus_statchg, vge_miibus_statchg),
211
212 { 0, 0 }
213};
214
215static driver_t vge_driver = {
216 "vge",
217 vge_methods,
218 sizeof(struct vge_softc)
219};
220
221static devclass_t vge_devclass;
222
223DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
224DRIVER_MODULE(vge, cardbus, vge_driver, vge_devclass, 0, 0);
225DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
226
227#ifdef VGE_EEPROM
228/*
229 * Read a word of data stored in the EEPROM at address 'addr.'
230 */
231static void
232vge_eeprom_getword(sc, addr, dest)
233 struct vge_softc *sc;
234 int addr;
235 u_int16_t *dest;
236{
237 register int i;
238 u_int16_t word = 0;
239
240 /*
241 * Enter EEPROM embedded programming mode. In order to
242 * access the EEPROM at all, we first have to set the
243 * EELOAD bit in the CHIPCFG2 register.
244 */
245 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
246 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
247
248 /* Select the address of the word we want to read */
249 CSR_WRITE_1(sc, VGE_EEADDR, addr);
250
251 /* Issue read command */
252 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
253
254 /* Wait for the done bit to be set. */
255 for (i = 0; i < VGE_TIMEOUT; i++) {
256 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
257 break;
258 }
259
260 if (i == VGE_TIMEOUT) {
261 device_printf(sc->vge_dev, "EEPROM read timed out\n");
262 *dest = 0;
263 return;
264 }
265
266 /* Read the result */
267 word = CSR_READ_2(sc, VGE_EERDDAT);
268
269 /* Turn off EEPROM access mode. */
270 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
271 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
272
273 *dest = word;
274
275 return;
276}
277#endif
278
279/*
280 * Read a sequence of words from the EEPROM.
281 */
282static void
283vge_read_eeprom(sc, dest, off, cnt, swap)
284 struct vge_softc *sc;
285 caddr_t dest;
286 int off;
287 int cnt;
288 int swap;
289{
290 int i;
291#ifdef VGE_EEPROM
292 u_int16_t word = 0, *ptr;
293
294 for (i = 0; i < cnt; i++) {
295 vge_eeprom_getword(sc, off + i, &word);
296 ptr = (u_int16_t *)(dest + (i * 2));
297 if (swap)
298 *ptr = ntohs(word);
299 else
300 *ptr = word;
301 }
302#else
303 for (i = 0; i < ETHER_ADDR_LEN; i++)
304 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
305#endif
306}
307
308static void
309vge_miipoll_stop(sc)
310 struct vge_softc *sc;
311{
312 int i;
313
314 CSR_WRITE_1(sc, VGE_MIICMD, 0);
315
316 for (i = 0; i < VGE_TIMEOUT; i++) {
317 DELAY(1);
318 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
319 break;
320 }
321
322 if (i == VGE_TIMEOUT)
323 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
324
325 return;
326}
327
328static void
329vge_miipoll_start(sc)
330 struct vge_softc *sc;
331{
332 int i;
333
334 /* First, make sure we're idle. */
335
336 CSR_WRITE_1(sc, VGE_MIICMD, 0);
337 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
338
339 for (i = 0; i < VGE_TIMEOUT; i++) {
340 DELAY(1);
341 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
342 break;
343 }
344
345 if (i == VGE_TIMEOUT) {
346 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
347 return;
348 }
349
350 /* Now enable auto poll mode. */
351
352 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
353
354 /* And make sure it started. */
355
356 for (i = 0; i < VGE_TIMEOUT; i++) {
357 DELAY(1);
358 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
359 break;
360 }
361
362 if (i == VGE_TIMEOUT)
363 device_printf(sc->vge_dev, "failed to start MII autopoll\n");
364
365 return;
366}
367
368static int
369vge_miibus_readreg(dev, phy, reg)
370 device_t dev;
371 int phy, reg;
372{
373 struct vge_softc *sc;
374 int i;
375 u_int16_t rval = 0;
376
377 sc = device_get_softc(dev);
378
379 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
380 return(0);
381
382 VGE_LOCK(sc);
383 vge_miipoll_stop(sc);
384
385 /* Specify the register we want to read. */
386 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
387
388 /* Issue read command. */
389 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
390
391 /* Wait for the read command bit to self-clear. */
392 for (i = 0; i < VGE_TIMEOUT; i++) {
393 DELAY(1);
394 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
395 break;
396 }
397
398 if (i == VGE_TIMEOUT)
399 device_printf(sc->vge_dev, "MII read timed out\n");
400 else
401 rval = CSR_READ_2(sc, VGE_MIIDATA);
402
403 vge_miipoll_start(sc);
404 VGE_UNLOCK(sc);
405
406 return (rval);
407}
408
409static int
410vge_miibus_writereg(dev, phy, reg, data)
411 device_t dev;
412 int phy, reg, data;
413{
414 struct vge_softc *sc;
415 int i, rval = 0;
416
417 sc = device_get_softc(dev);
418
419 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
420 return(0);
421
422 VGE_LOCK(sc);
423 vge_miipoll_stop(sc);
424
425 /* Specify the register we want to write. */
426 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
427
428 /* Specify the data we want to write. */
429 CSR_WRITE_2(sc, VGE_MIIDATA, data);
430
431 /* Issue write command. */
432 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
433
434 /* Wait for the write command bit to self-clear. */
435 for (i = 0; i < VGE_TIMEOUT; i++) {
436 DELAY(1);
437 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
438 break;
439 }
440
441 if (i == VGE_TIMEOUT) {
442 device_printf(sc->vge_dev, "MII write timed out\n");
443 rval = EIO;
444 }
445
446 vge_miipoll_start(sc);
447 VGE_UNLOCK(sc);
448
449 return (rval);
450}
451
452static void
453vge_cam_clear(sc)
454 struct vge_softc *sc;
455{
456 int i;
457
458 /*
459 * Turn off all the mask bits. This tells the chip
460 * that none of the entries in the CAM filter are valid.
461 * desired entries will be enabled as we fill the filter in.
462 */
463
464 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
465 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
466 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
467 for (i = 0; i < 8; i++)
468 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
469
470 /* Clear the VLAN filter too. */
471
472 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
473 for (i = 0; i < 8; i++)
474 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
475
476 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
477 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
478 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
479
480 sc->vge_camidx = 0;
481
482 return;
483}
484
485static int
486vge_cam_set(sc, addr)
487 struct vge_softc *sc;
488 uint8_t *addr;
489{
490 int i, error = 0;
491
492 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
493 return(ENOSPC);
494
495 /* Select the CAM data page. */
496 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
497 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
498
499 /* Set the filter entry we want to update and enable writing. */
500 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
501
502 /* Write the address to the CAM registers */
503 for (i = 0; i < ETHER_ADDR_LEN; i++)
504 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
505
506 /* Issue a write command. */
507 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
508
509 /* Wake for it to clear. */
510 for (i = 0; i < VGE_TIMEOUT; i++) {
511 DELAY(1);
512 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
513 break;
514 }
515
516 if (i == VGE_TIMEOUT) {
517 device_printf(sc->vge_dev, "setting CAM filter failed\n");
518 error = EIO;
519 goto fail;
520 }
521
522 /* Select the CAM mask page. */
523 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
524 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
525
526 /* Set the mask bit that enables this filter. */
527 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
528 1<<(sc->vge_camidx & 7));
529
530 sc->vge_camidx++;
531
532fail:
533 /* Turn off access to CAM. */
534 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
535 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
536 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
537
538 return (error);
539}
540
541/*
542 * Program the multicast filter. We use the 64-entry CAM filter
543 * for perfect filtering. If there's more than 64 multicast addresses,
544 * we use the hash filter insted.
545 */
546static void
547vge_setmulti(sc)
548 struct vge_softc *sc;
549{
550 struct ifnet *ifp;
551 int error = 0/*, h = 0*/;
552 struct ifmultiaddr *ifma;
553 u_int32_t h, hashes[2] = { 0, 0 };
554
555 ifp = sc->vge_ifp;
556
557 /* First, zot all the multicast entries. */
558 vge_cam_clear(sc);
559 CSR_WRITE_4(sc, VGE_MAR0, 0);
560 CSR_WRITE_4(sc, VGE_MAR1, 0);
561
562 /*
563 * If the user wants allmulti or promisc mode, enable reception
564 * of all multicast frames.
565 */
566 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
567 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
568 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
569 return;
570 }
571
572 /* Now program new ones */
573 IF_ADDR_LOCK(ifp);
574 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
575 if (ifma->ifma_addr->sa_family != AF_LINK)
576 continue;
577 error = vge_cam_set(sc,
578 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
579 if (error)
580 break;
581 }
582
583 /* If there were too many addresses, use the hash filter. */
584 if (error) {
585 vge_cam_clear(sc);
586
587 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
588 if (ifma->ifma_addr->sa_family != AF_LINK)
589 continue;
590 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
591 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
592 if (h < 32)
593 hashes[0] |= (1 << h);
594 else
595 hashes[1] |= (1 << (h - 32));
596 }
597
598 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
599 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
600 }
601 IF_ADDR_UNLOCK(ifp);
602
603 return;
604}
605
606static void
607vge_reset(sc)
608 struct vge_softc *sc;
609{
610 register int i;
611
612 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
613
614 for (i = 0; i < VGE_TIMEOUT; i++) {
615 DELAY(5);
616 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
617 break;
618 }
619
620 if (i == VGE_TIMEOUT) {
621 device_printf(sc->vge_dev, "soft reset timed out");
622 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
623 DELAY(2000);
624 }
625
626 DELAY(5000);
627
628 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
629
630 for (i = 0; i < VGE_TIMEOUT; i++) {
631 DELAY(5);
632 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
633 break;
634 }
635
636 if (i == VGE_TIMEOUT) {
637 device_printf(sc->vge_dev, "EEPROM reload timed out\n");
638 return;
639 }
640
641 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
642
643 return;
644}
645
646/*
647 * Probe for a VIA gigabit chip. Check the PCI vendor and device
648 * IDs against our list and return a device name if we find a match.
649 */
650static int
651vge_probe(dev)
652 device_t dev;
653{
654 struct vge_type *t;
655 struct vge_softc *sc;
656
657 t = vge_devs;
658 sc = device_get_softc(dev);
659
660 while (t->vge_name != NULL) {
661 if ((pci_get_vendor(dev) == t->vge_vid) &&
662 (pci_get_device(dev) == t->vge_did)) {
663 device_set_desc(dev, t->vge_name);
664 return (BUS_PROBE_DEFAULT);
665 }
666 t++;
667 }
668
669 return (ENXIO);
670}
671
672static void
673vge_dma_map_rx_desc(arg, segs, nseg, mapsize, error)
674 void *arg;
675 bus_dma_segment_t *segs;
676 int nseg;
677 bus_size_t mapsize;
678 int error;
679{
680
681 struct vge_dmaload_arg *ctx;
682 struct vge_rx_desc *d = NULL;
683
684 if (error)
685 return;
686
687 ctx = arg;
688
689 /* Signal error to caller if there's too many segments */
690 if (nseg > ctx->vge_maxsegs) {
691 ctx->vge_maxsegs = 0;
692 return;
693 }
694
695 /*
696 * Map the segment array into descriptors.
697 */
698
699 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx];
700
701 /* If this descriptor is still owned by the chip, bail. */
702
703 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
704 device_printf(ctx->sc->vge_dev,
705 "tried to map busy descriptor\n");
706 ctx->vge_maxsegs = 0;
707 return;
708 }
709
710 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I);
711 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
712 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
713 d->vge_sts = 0;
714 d->vge_ctl = 0;
715
716 ctx->vge_maxsegs = 1;
717
718 return;
719}
720
721static void
722vge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
723 void *arg;
724 bus_dma_segment_t *segs;
725 int nseg;
726 bus_size_t mapsize;
727 int error;
728{
729 struct vge_dmaload_arg *ctx;
730 struct vge_tx_desc *d = NULL;
731 struct vge_tx_frag *f;
732 int i = 0;
733
734 if (error)
735 return;
736
737 ctx = arg;
738
739 /* Signal error to caller if there's too many segments */
740 if (nseg > ctx->vge_maxsegs) {
741 ctx->vge_maxsegs = 0;
742 return;
743 }
744
745 /* Map the segment array into descriptors. */
746
747 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx];
748
749 /* If this descriptor is still owned by the chip, bail. */
750
751 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) {
752 ctx->vge_maxsegs = 0;
753 return;
754 }
755
756 for (i = 0; i < nseg; i++) {
757 f = &d->vge_frag[i];
758 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len));
759 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr));
760 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF);
761 }
762
763 /* Argh. This chip does not autopad short frames */
764
765 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) {
766 f = &d->vge_frag[i];
767 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN -
768 ctx->vge_m0->m_pkthdr.len));
769 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
770 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
771 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN;
772 i++;
773 }
774
775 /*
776 * When telling the chip how many segments there are, we
777 * must use nsegs + 1 instead of just nsegs. Darned if I
778 * know why.
779 */
780 i++;
781
782 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16;
783 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM;
784
785 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN)
786 d->vge_ctl |= VGE_TDCTL_JUMBO;
787
788 ctx->vge_maxsegs = nseg;
789
790 return;
791}
792
793/*
794 * Map a single buffer address.
795 */
796
797static void
798vge_dma_map_addr(arg, segs, nseg, error)
799 void *arg;
800 bus_dma_segment_t *segs;
801 int nseg;
802 int error;
803{
804 bus_addr_t *addr;
805
806 if (error)
807 return;
808
809 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
810 addr = arg;
811 *addr = segs->ds_addr;
812
813 return;
814}
815
816static int
817vge_allocmem(dev, sc)
818 device_t dev;
819 struct vge_softc *sc;
820{
821 int error;
822 int nseg;
823 int i;
824
825 /*
826 * Allocate map for RX mbufs.
827 */
828 nseg = 32;
829 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0,
830 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
831 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW,
832 NULL, NULL, &sc->vge_ldata.vge_mtag);
833 if (error) {
834 device_printf(dev, "could not allocate dma tag\n");
835 return (ENOMEM);
836 }
837
838 /*
839 * Allocate map for TX descriptor list.
840 */
841 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN,
842 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
843 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
844 NULL, NULL, &sc->vge_ldata.vge_tx_list_tag);
845 if (error) {
846 device_printf(dev, "could not allocate dma tag\n");
847 return (ENOMEM);
848 }
849
850 /* Allocate DMA'able memory for the TX ring */
851
852 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag,
853 (void **)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
854 &sc->vge_ldata.vge_tx_list_map);
855 if (error)
856 return (ENOMEM);
857
858 /* Load the map for the TX ring. */
859
860 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag,
861 sc->vge_ldata.vge_tx_list_map, sc->vge_ldata.vge_tx_list,
862 VGE_TX_LIST_SZ, vge_dma_map_addr,
863 &sc->vge_ldata.vge_tx_list_addr, BUS_DMA_NOWAIT);
864
865 /* Create DMA maps for TX buffers */
866
867 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
868 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
869 &sc->vge_ldata.vge_tx_dmamap[i]);
870 if (error) {
871 device_printf(dev, "can't create DMA map for TX\n");
872 return (ENOMEM);
873 }
874 }
875
876 /*
877 * Allocate map for RX descriptor list.
878 */
879 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN,
880 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
881 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
882 NULL, NULL, &sc->vge_ldata.vge_rx_list_tag);
883 if (error) {
884 device_printf(dev, "could not allocate dma tag\n");
885 return (ENOMEM);
886 }
887
888 /* Allocate DMA'able memory for the RX ring */
889
890 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag,
891 (void **)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
892 &sc->vge_ldata.vge_rx_list_map);
893 if (error)
894 return (ENOMEM);
895
896 /* Load the map for the RX ring. */
897
898 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag,
899 sc->vge_ldata.vge_rx_list_map, sc->vge_ldata.vge_rx_list,
900 VGE_TX_LIST_SZ, vge_dma_map_addr,
901 &sc->vge_ldata.vge_rx_list_addr, BUS_DMA_NOWAIT);
902
903 /* Create DMA maps for RX buffers */
904
905 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
906 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
907 &sc->vge_ldata.vge_rx_dmamap[i]);
908 if (error) {
909 device_printf(dev, "can't create DMA map for RX\n");
910 return (ENOMEM);
911 }
912 }
913
914 return (0);
915}
916
917/*
918 * Attach the interface. Allocate softc structures, do ifmedia
919 * setup and ethernet/BPF attach.
920 */
921static int
922vge_attach(dev)
923 device_t dev;
924{
925 u_char eaddr[ETHER_ADDR_LEN];
926 struct vge_softc *sc;
927 struct ifnet *ifp;
928 int unit, error = 0, rid;
929
930 sc = device_get_softc(dev);
931 unit = device_get_unit(dev);
932 sc->vge_dev = dev;
933
934 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
935 MTX_DEF | MTX_RECURSE);
936 /*
937 * Map control/status registers.
938 */
939 pci_enable_busmaster(dev);
940
941 rid = VGE_PCI_LOMEM;
942 sc->vge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
943 0, ~0, 1, RF_ACTIVE);
944
945 if (sc->vge_res == NULL) {
946 printf ("vge%d: couldn't map ports/memory\n", unit);
947 error = ENXIO;
948 goto fail;
949 }
950
951 sc->vge_btag = rman_get_bustag(sc->vge_res);
952 sc->vge_bhandle = rman_get_bushandle(sc->vge_res);
953
954 /* Allocate interrupt */
955 rid = 0;
956 sc->vge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
957 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
958
959 if (sc->vge_irq == NULL) {
960 printf("vge%d: couldn't map interrupt\n", unit);
961 error = ENXIO;
962 goto fail;
963 }
964
965 /* Reset the adapter. */
966 vge_reset(sc);
967
968 /*
969 * Get station address from the EEPROM.
970 */
971 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
972
973 sc->vge_unit = unit;
974
975 /*
976 * Allocate the parent bus DMA tag appropriate for PCI.
977 */
978#define VGE_NSEG_NEW 32
979 error = bus_dma_tag_create(NULL, /* parent */
980 1, 0, /* alignment, boundary */
981 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
982 BUS_SPACE_MAXADDR, /* highaddr */
983 NULL, NULL, /* filter, filterarg */
984 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */
985 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
986 BUS_DMA_ALLOCNOW, /* flags */
987 NULL, NULL, /* lockfunc, lockarg */
988 &sc->vge_parent_tag);
989 if (error)
990 goto fail;
991
992 error = vge_allocmem(dev, sc);
993
994 if (error)
995 goto fail;
996
997 ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
998 if (ifp == NULL) {
999 printf("vge%d: can not if_alloc()\n", sc->vge_unit);
1000 error = ENOSPC;
1001 goto fail;
1002 }
1003
1004 /* Do MII setup */
1005 if (mii_phy_probe(dev, &sc->vge_miibus,
1006 vge_ifmedia_upd, vge_ifmedia_sts)) {
1007 printf("vge%d: MII without any phy!\n", sc->vge_unit);
1008 error = ENXIO;
1009 goto fail;
1010 }
1011
1012 ifp->if_softc = sc;
1013 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1014 ifp->if_mtu = ETHERMTU;
1015 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1016 ifp->if_ioctl = vge_ioctl;
1017 ifp->if_capabilities = IFCAP_VLAN_MTU;
1018 ifp->if_start = vge_start;
1019 ifp->if_hwassist = VGE_CSUM_FEATURES;
1020 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING;
1021 ifp->if_capenable = ifp->if_capabilities;
1022#ifdef DEVICE_POLLING
1023 ifp->if_capabilities |= IFCAP_POLLING;
1024#endif
1025 ifp->if_watchdog = vge_watchdog;
1026 ifp->if_init = vge_init;
1027 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_IFQ_MAXLEN);
1028 ifp->if_snd.ifq_drv_maxlen = VGE_IFQ_MAXLEN;
1029 IFQ_SET_READY(&ifp->if_snd);
1030
1031 TASK_INIT(&sc->vge_txtask, 0, vge_tx_task, ifp);
1032
1033 /*
1034 * Call MI attach routine.
1035 */
1036 ether_ifattach(ifp, eaddr);
1037
1038 /* Hook interrupt last to avoid having to lock softc */
1039 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
1040 NULL, vge_intr, sc, &sc->vge_intrhand);
1041
1042 if (error) {
1043 printf("vge%d: couldn't set up irq\n", unit);
1044 ether_ifdetach(ifp);
1045 goto fail;
1046 }
1047
1048fail:
1049 if (error)
1050 vge_detach(dev);
1051
1052 return (error);
1053}
1054
1055/*
1056 * Shutdown hardware and free up resources. This can be called any
1057 * time after the mutex has been initialized. It is called in both
1058 * the error case in attach and the normal detach case so it needs
1059 * to be careful about only freeing resources that have actually been
1060 * allocated.
1061 */
1062static int
1063vge_detach(dev)
1064 device_t dev;
1065{
1066 struct vge_softc *sc;
1067 struct ifnet *ifp;
1068 int i;
1069
1070 sc = device_get_softc(dev);
1071 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
1072 ifp = sc->vge_ifp;
1073
1074#ifdef DEVICE_POLLING
1075 if (ifp->if_capenable & IFCAP_POLLING)
1076 ether_poll_deregister(ifp);
1077#endif
1078
1079 /* These should only be active if attach succeeded */
1080 if (device_is_attached(dev)) {
1081 vge_stop(sc);
1082 /*
1083 * Force off the IFF_UP flag here, in case someone
1084 * still had a BPF descriptor attached to this
1085 * interface. If they do, ether_ifattach() will cause
1086 * the BPF code to try and clear the promisc mode
1087 * flag, which will bubble down to vge_ioctl(),
1088 * which will try to call vge_init() again. This will
1089 * turn the NIC back on and restart the MII ticker,
1090 * which will panic the system when the kernel tries
1091 * to invoke the vge_tick() function that isn't there
1092 * anymore.
1093 */
1094 ifp->if_flags &= ~IFF_UP;
1095 ether_ifdetach(ifp);
1096 }
1097 if (sc->vge_miibus)
1098 device_delete_child(dev, sc->vge_miibus);
1099 bus_generic_detach(dev);
1100
1101 if (sc->vge_intrhand)
1102 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1103 if (sc->vge_irq)
1104 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq);
1105 if (sc->vge_res)
1106 bus_release_resource(dev, SYS_RES_MEMORY,
1107 VGE_PCI_LOMEM, sc->vge_res);
1108 if (ifp)
1109 if_free(ifp);
1110
1111 /* Unload and free the RX DMA ring memory and map */
1112
1113 if (sc->vge_ldata.vge_rx_list_tag) {
1114 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag,
1115 sc->vge_ldata.vge_rx_list_map);
1116 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag,
1117 sc->vge_ldata.vge_rx_list,
1118 sc->vge_ldata.vge_rx_list_map);
1119 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag);
1120 }
1121
1122 /* Unload and free the TX DMA ring memory and map */
1123
1124 if (sc->vge_ldata.vge_tx_list_tag) {
1125 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag,
1126 sc->vge_ldata.vge_tx_list_map);
1127 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag,
1128 sc->vge_ldata.vge_tx_list,
1129 sc->vge_ldata.vge_tx_list_map);
1130 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag);
1131 }
1132
1133 /* Destroy all the RX and TX buffer maps */
1134
1135 if (sc->vge_ldata.vge_mtag) {
1136 for (i = 0; i < VGE_TX_DESC_CNT; i++)
1137 bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
1138 sc->vge_ldata.vge_tx_dmamap[i]);
1139 for (i = 0; i < VGE_RX_DESC_CNT; i++)
1140 bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
1141 sc->vge_ldata.vge_rx_dmamap[i]);
1142 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag);
1143 }
1144
1145 if (sc->vge_parent_tag)
1146 bus_dma_tag_destroy(sc->vge_parent_tag);
1147
1148 mtx_destroy(&sc->vge_mtx);
1149
1150 return (0);
1151}
1152
1153static int
1154vge_newbuf(sc, idx, m)
1155 struct vge_softc *sc;
1156 int idx;
1157 struct mbuf *m;
1158{
1159 struct vge_dmaload_arg arg;
1160 struct mbuf *n = NULL;
1161 int i, error;
1162
1163 if (m == NULL) {
1164 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1165 if (n == NULL)
1166 return (ENOBUFS);
1167 m = n;
1168 } else
1169 m->m_data = m->m_ext.ext_buf;
1170
1171
1172#ifdef VGE_FIXUP_RX
1173 /*
1174 * This is part of an evil trick to deal with non-x86 platforms.
1175 * The VIA chip requires RX buffers to be aligned on 32-bit
1176 * boundaries, but that will hose non-x86 machines. To get around
1177 * this, we leave some empty space at the start of each buffer
1178 * and for non-x86 hosts, we copy the buffer back two bytes
1179 * to achieve word alignment. This is slightly more efficient
1180 * than allocating a new buffer, copying the contents, and
1181 * discarding the old buffer.
1182 */
1183 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
1184 m_adj(m, VGE_ETHER_ALIGN);
1185#else
1186 m->m_len = m->m_pkthdr.len = MCLBYTES;
1187#endif
1188
1189 arg.sc = sc;
1190 arg.vge_idx = idx;
1191 arg.vge_maxsegs = 1;
1192 arg.vge_flags = 0;
1193
1194 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag,
1195 sc->vge_ldata.vge_rx_dmamap[idx], m, vge_dma_map_rx_desc,
1196 &arg, BUS_DMA_NOWAIT);
1197 if (error || arg.vge_maxsegs != 1) {
1198 if (n != NULL)
1199 m_freem(n);
1200 return (ENOMEM);
1201 }
1202
1203 /*
1204 * Note: the manual fails to document the fact that for
1205 * proper opration, the driver needs to replentish the RX
1206 * DMA ring 4 descriptors at a time (rather than one at a
1207 * time, like most chips). We can allocate the new buffers
1208 * but we should not set the OWN bits until we're ready
1209 * to hand back 4 of them in one shot.
1210 */
1211
1212#define VGE_RXCHUNK 4
1213 sc->vge_rx_consumed++;
1214 if (sc->vge_rx_consumed == VGE_RXCHUNK) {
1215 for (i = idx; i != idx - sc->vge_rx_consumed; i--)
1216 sc->vge_ldata.vge_rx_list[i].vge_sts |=
1217 htole32(VGE_RDSTS_OWN);
1218 sc->vge_rx_consumed = 0;
1219 }
1220
1221 sc->vge_ldata.vge_rx_mbuf[idx] = m;
1222
1223 bus_dmamap_sync(sc->vge_ldata.vge_mtag,
1224 sc->vge_ldata.vge_rx_dmamap[idx],
1225 BUS_DMASYNC_PREREAD);
1226
1227 return (0);
1228}
1229
1230static int
1231vge_tx_list_init(sc)
1232 struct vge_softc *sc;
1233{
1234 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ);
1235 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf,
1236 (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
1237
1238 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1239 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE);
1240 sc->vge_ldata.vge_tx_prodidx = 0;
1241 sc->vge_ldata.vge_tx_considx = 0;
1242 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
1243
1244 return (0);
1245}
1246
1247static int
1248vge_rx_list_init(sc)
1249 struct vge_softc *sc;
1250{
1251 int i;
1252
1253 bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ);
1254 bzero ((char *)&sc->vge_ldata.vge_rx_mbuf,
1255 (VGE_RX_DESC_CNT * sizeof(struct mbuf *)));
1256
1257 sc->vge_rx_consumed = 0;
1258
1259 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1260 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
1261 return (ENOBUFS);
1262 }
1263
1264 /* Flush the RX descriptors */
1265
1266 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1267 sc->vge_ldata.vge_rx_list_map,
1268 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1269
1270 sc->vge_ldata.vge_rx_prodidx = 0;
1271 sc->vge_rx_consumed = 0;
1272 sc->vge_head = sc->vge_tail = NULL;
1273
1274 return (0);
1275}
1276
1277#ifdef VGE_FIXUP_RX
1278static __inline void
1279vge_fixup_rx(m)
1280 struct mbuf *m;
1281{
1282 int i;
1283 uint16_t *src, *dst;
1284
1285 src = mtod(m, uint16_t *);
1286 dst = src - 1;
1287
1288 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1289 *dst++ = *src++;
1290
1291 m->m_data -= ETHER_ALIGN;
1292
1293 return;
1294}
1295#endif
1296
1297/*
1298 * RX handler. We support the reception of jumbo frames that have
1299 * been fragmented across multiple 2K mbuf cluster buffers.
1300 */
1301static void
1302vge_rxeof(sc)
1303 struct vge_softc *sc;
1304{
1305 struct mbuf *m;
1306 struct ifnet *ifp;
1307 int i, total_len;
1308 int lim = 0;
1309 struct vge_rx_desc *cur_rx;
1310 u_int32_t rxstat, rxctl;
1311
1312 VGE_LOCK_ASSERT(sc);
1313 ifp = sc->vge_ifp;
1314 i = sc->vge_ldata.vge_rx_prodidx;
1315
1316 /* Invalidate the descriptor memory */
1317
1318 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1319 sc->vge_ldata.vge_rx_list_map,
1320 BUS_DMASYNC_POSTREAD);
1321
1322 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) {
1323
1324#ifdef DEVICE_POLLING
1325 if (ifp->if_capenable & IFCAP_POLLING) {
1326 if (sc->rxcycles <= 0)
1327 break;
1328 sc->rxcycles--;
1329 }
1330#endif
1331
1332 cur_rx = &sc->vge_ldata.vge_rx_list[i];
1333 m = sc->vge_ldata.vge_rx_mbuf[i];
1334 total_len = VGE_RXBYTES(cur_rx);
1335 rxstat = le32toh(cur_rx->vge_sts);
1336 rxctl = le32toh(cur_rx->vge_ctl);
1337
1338 /* Invalidate the RX mbuf and unload its map */
1339
1340 bus_dmamap_sync(sc->vge_ldata.vge_mtag,
1341 sc->vge_ldata.vge_rx_dmamap[i],
1342 BUS_DMASYNC_POSTWRITE);
1343 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
1344 sc->vge_ldata.vge_rx_dmamap[i]);
1345
1346 /*
1347 * If the 'start of frame' bit is set, this indicates
1348 * either the first fragment in a multi-fragment receive,
1349 * or an intermediate fragment. Either way, we want to
1350 * accumulate the buffers.
1351 */
1352 if (rxstat & VGE_RXPKT_SOF) {
1353 m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
1354 if (sc->vge_head == NULL)
1355 sc->vge_head = sc->vge_tail = m;
1356 else {
1357 m->m_flags &= ~M_PKTHDR;
1358 sc->vge_tail->m_next = m;
1359 sc->vge_tail = m;
1360 }
1361 vge_newbuf(sc, i, NULL);
1362 VGE_RX_DESC_INC(i);
1363 continue;
1364 }
1365
1366 /*
1367 * Bad/error frames will have the RXOK bit cleared.
1368 * However, there's one error case we want to allow:
1369 * if a VLAN tagged frame arrives and the chip can't
1370 * match it against the CAM filter, it considers this
1371 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1372 * We don't want to drop the frame though: our VLAN
1373 * filtering is done in software.
1374 */
1375 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM)
1376 && !(rxstat & VGE_RDSTS_CSUMERR)) {
1377 ifp->if_ierrors++;
1378 /*
1379 * If this is part of a multi-fragment packet,
1380 * discard all the pieces.
1381 */
1382 if (sc->vge_head != NULL) {
1383 m_freem(sc->vge_head);
1384 sc->vge_head = sc->vge_tail = NULL;
1385 }
1386 vge_newbuf(sc, i, m);
1387 VGE_RX_DESC_INC(i);
1388 continue;
1389 }
1390
1391 /*
1392 * If allocating a replacement mbuf fails,
1393 * reload the current one.
1394 */
1395
1396 if (vge_newbuf(sc, i, NULL)) {
1397 ifp->if_ierrors++;
1398 if (sc->vge_head != NULL) {
1399 m_freem(sc->vge_head);
1400 sc->vge_head = sc->vge_tail = NULL;
1401 }
1402 vge_newbuf(sc, i, m);
1403 VGE_RX_DESC_INC(i);
1404 continue;
1405 }
1406
1407 VGE_RX_DESC_INC(i);
1408
1409 if (sc->vge_head != NULL) {
1410 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
1411 /*
1412 * Special case: if there's 4 bytes or less
1413 * in this buffer, the mbuf can be discarded:
1414 * the last 4 bytes is the CRC, which we don't
1415 * care about anyway.
1416 */
1417 if (m->m_len <= ETHER_CRC_LEN) {
1418 sc->vge_tail->m_len -=
1419 (ETHER_CRC_LEN - m->m_len);
1420 m_freem(m);
1421 } else {
1422 m->m_len -= ETHER_CRC_LEN;
1423 m->m_flags &= ~M_PKTHDR;
1424 sc->vge_tail->m_next = m;
1425 }
1426 m = sc->vge_head;
1427 sc->vge_head = sc->vge_tail = NULL;
1428 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1429 } else
1430 m->m_pkthdr.len = m->m_len =
1431 (total_len - ETHER_CRC_LEN);
1432
1433#ifdef VGE_FIXUP_RX
1434 vge_fixup_rx(m);
1435#endif
1436 ifp->if_ipackets++;
1437 m->m_pkthdr.rcvif = ifp;
1438
1439 /* Do RX checksumming if enabled */
1440 if (ifp->if_capenable & IFCAP_RXCSUM) {
1441
1442 /* Check IP header checksum */
1443 if (rxctl & VGE_RDCTL_IPPKT)
1444 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1445 if (rxctl & VGE_RDCTL_IPCSUMOK)
1446 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1447
1448 /* Check TCP/UDP checksum */
1449 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) &&
1450 rxctl & VGE_RDCTL_PROTOCSUMOK) {
1451 m->m_pkthdr.csum_flags |=
1452 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1453 m->m_pkthdr.csum_data = 0xffff;
1454 }
1455 }
1456
1457 if (rxstat & VGE_RDSTS_VTAG) {
1458 /*
1459 * The 32-bit rxctl register is stored in little-endian.
1460 * However, the 16-bit vlan tag is stored in big-endian,
1461 * so we have to byte swap it.
1462 */
1463 m->m_pkthdr.ether_vtag =
1464 bswap16(rxctl & VGE_RDCTL_VLANID);
1465 m->m_flags |= M_VLANTAG;
1466 }
1467
1468 VGE_UNLOCK(sc);
1469 (*ifp->if_input)(ifp, m);
1470 VGE_LOCK(sc);
1471
1472 lim++;
1473 if (lim == VGE_RX_DESC_CNT)
1474 break;
1475
1476 }
1477
1478 /* Flush the RX DMA ring */
1479
1480 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1481 sc->vge_ldata.vge_rx_list_map,
1482 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1483
1484 sc->vge_ldata.vge_rx_prodidx = i;
1485 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1486
1487
1488 return;
1489}
1490
1491static void
1492vge_txeof(sc)
1493 struct vge_softc *sc;
1494{
1495 struct ifnet *ifp;
1496 u_int32_t txstat;
1497 int idx;
1498
1499 ifp = sc->vge_ifp;
1500 idx = sc->vge_ldata.vge_tx_considx;
1501
1502 /* Invalidate the TX descriptor list */
1503
1504 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1505 sc->vge_ldata.vge_tx_list_map,
1506 BUS_DMASYNC_POSTREAD);
1507
1508 while (idx != sc->vge_ldata.vge_tx_prodidx) {
1509
1510 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
1511 if (txstat & VGE_TDSTS_OWN)
1512 break;
1513
1514 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
1515 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
1516 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
1517 sc->vge_ldata.vge_tx_dmamap[idx]);
1518 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1519 ifp->if_collisions++;
1520 if (txstat & VGE_TDSTS_TXERR)
1521 ifp->if_oerrors++;
1522 else
1523 ifp->if_opackets++;
1524
1525 sc->vge_ldata.vge_tx_free++;
1526 VGE_TX_DESC_INC(idx);
1527 }
1528
1529 /* No changes made to the TX ring, so no flush needed */
1530
1531 if (idx != sc->vge_ldata.vge_tx_considx) {
1532 sc->vge_ldata.vge_tx_considx = idx;
1533 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1534 ifp->if_timer = 0;
1535 }
1536
1537 /*
1538 * If not all descriptors have been released reaped yet,
1539 * reload the timer so that we will eventually get another
1540 * interrupt that will cause us to re-enter this routine.
1541 * This is done in case the transmitter has gone idle.
1542 */
1543 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) {
1544 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1545 }
1546
1547 return;
1548}
1549
1550static void
1551vge_tick(xsc)
1552 void *xsc;
1553{
1554 struct vge_softc *sc;
1555 struct ifnet *ifp;
1556 struct mii_data *mii;
1557
1558 sc = xsc;
1559 ifp = sc->vge_ifp;
1560 VGE_LOCK(sc);
1561 mii = device_get_softc(sc->vge_miibus);
1562
1563 mii_tick(mii);
1564 if (sc->vge_link) {
1565 if (!(mii->mii_media_status & IFM_ACTIVE)) {
1566 sc->vge_link = 0;
1567 if_link_state_change(sc->vge_ifp,
1568 LINK_STATE_DOWN);
1569 }
1570 } else {
1571 if (mii->mii_media_status & IFM_ACTIVE &&
1572 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1573 sc->vge_link = 1;
1574 if_link_state_change(sc->vge_ifp,
1575 LINK_STATE_UP);
1576 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1577 taskqueue_enqueue(taskqueue_swi,
1578 &sc->vge_txtask);
1579 }
1580 }
1581
1582 VGE_UNLOCK(sc);
1583
1584 return;
1585}
1586
1587#ifdef DEVICE_POLLING
1588static void
1589vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1590{
1591 struct vge_softc *sc = ifp->if_softc;
1592
1593 VGE_LOCK(sc);
1594 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1595 goto done;
1596
1597 sc->rxcycles = count;
1598 vge_rxeof(sc);
1599 vge_txeof(sc);
1600
1601 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1602 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
1603
1604 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1605 u_int32_t status;
1606 status = CSR_READ_4(sc, VGE_ISR);
1607 if (status == 0xFFFFFFFF)
1608 goto done;
1609 if (status)
1610 CSR_WRITE_4(sc, VGE_ISR, status);
1611
1612 /*
1613 * XXX check behaviour on receiver stalls.
1614 */
1615
1616 if (status & VGE_ISR_TXDMA_STALL ||
1617 status & VGE_ISR_RXDMA_STALL)
1618 vge_init(sc);
1619
1620 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1621 vge_rxeof(sc);
1622 ifp->if_ierrors++;
1623 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1624 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1625 }
1626 }
1627done:
1628 VGE_UNLOCK(sc);
1629}
1630#endif /* DEVICE_POLLING */
1631
1632static void
1633vge_intr(arg)
1634 void *arg;
1635{
1636 struct vge_softc *sc;
1637 struct ifnet *ifp;
1638 u_int32_t status;
1639
1640 sc = arg;
1641
1642 if (sc->suspended) {
1643 return;
1644 }
1645
1646 VGE_LOCK(sc);
1647 ifp = sc->vge_ifp;
1648
1649 if (!(ifp->if_flags & IFF_UP)) {
1650 VGE_UNLOCK(sc);
1651 return;
1652 }
1653
1654#ifdef DEVICE_POLLING
1655 if (ifp->if_capenable & IFCAP_POLLING) {
1656 VGE_UNLOCK(sc);
1657 return;
1658 }
1659#endif
1660
1661 /* Disable interrupts */
1662 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1663
1664 for (;;) {
1665
1666 status = CSR_READ_4(sc, VGE_ISR);
1667 /* If the card has gone away the read returns 0xffff. */
1668 if (status == 0xFFFFFFFF)
1669 break;
1670
1671 if (status)
1672 CSR_WRITE_4(sc, VGE_ISR, status);
1673
1674 if ((status & VGE_INTRS) == 0)
1675 break;
1676
1677 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1678 vge_rxeof(sc);
1679
1680 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1681 vge_rxeof(sc);
1682 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1683 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1684 }
1685
1686 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1687 vge_txeof(sc);
1688
1689 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1690 vge_init(sc);
1691
1692 if (status & VGE_ISR_LINKSTS)
1693 vge_tick(sc);
1694 }
1695
1696 /* Re-enable interrupts */
1697 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1698
1699 VGE_UNLOCK(sc);
1700
1701 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1702 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
1703
1704 return;
1705}
1706
1707static int
1708vge_encap(sc, m_head, idx)
1709 struct vge_softc *sc;
1710 struct mbuf *m_head;
1711 int idx;
1712{
1713 struct mbuf *m_new = NULL;
1714 struct vge_dmaload_arg arg;
1715 bus_dmamap_t map;
1716 int error;
1717
1718 if (sc->vge_ldata.vge_tx_free <= 2)
1719 return (EFBIG);
1720
1721 arg.vge_flags = 0;
1722
1723 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1724 arg.vge_flags |= VGE_TDCTL_IPCSUM;
1725 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1726 arg.vge_flags |= VGE_TDCTL_TCPCSUM;
1727 if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1728 arg.vge_flags |= VGE_TDCTL_UDPCSUM;
1729
1730 arg.sc = sc;
1731 arg.vge_idx = idx;
1732 arg.vge_m0 = m_head;
1733 arg.vge_maxsegs = VGE_TX_FRAGS;
1734
1735 map = sc->vge_ldata.vge_tx_dmamap[idx];
1736 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map,
1737 m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT);
1738
1739 if (error && error != EFBIG) {
1740 printf("vge%d: can't map mbuf (error %d)\n",
1741 sc->vge_unit, error);
1742 return (ENOBUFS);
1743 }
1744
1745 /* Too many segments to map, coalesce into a single mbuf */
1746
1747 if (error || arg.vge_maxsegs == 0) {
1748 m_new = m_defrag(m_head, M_DONTWAIT);
1749 if (m_new == NULL)
1750 return (1);
1751 else
1752 m_head = m_new;
1753
1754 arg.sc = sc;
1755 arg.vge_m0 = m_head;
1756 arg.vge_idx = idx;
1757 arg.vge_maxsegs = 1;
1758
1759 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map,
1760 m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT);
1761 if (error) {
1762 printf("vge%d: can't map mbuf (error %d)\n",
1763 sc->vge_unit, error);
1764 return (EFBIG);
1765 }
1766 }
1767
1768 sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
1769 sc->vge_ldata.vge_tx_free--;
1770
1771 /*
1772 * Set up hardware VLAN tagging.
1773 */
1774
1775 if (m_head->m_flags & M_VLANTAG)
1776 sc->vge_ldata.vge_tx_list[idx].vge_ctl |=
1777 htole32(m_head->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG);
1778
1779 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN);
1780
1781 return (0);
1782}
1783
1784static void
1785vge_tx_task(arg, npending)
1786 void *arg;
1787 int npending;
1788{
1789 struct ifnet *ifp;
1790
1791 ifp = arg;
1792 vge_start(ifp);
1793
1794 return;
1795}
1796
1797/*
1798 * Main transmit routine.
1799 */
1800
1801static void
1802vge_start(ifp)
1803 struct ifnet *ifp;
1804{
1805 struct vge_softc *sc;
1806 struct mbuf *m_head = NULL;
1807 int idx, pidx = 0;
1808
1809 sc = ifp->if_softc;
1810 VGE_LOCK(sc);
1811
1812 if (!sc->vge_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1813 VGE_UNLOCK(sc);
1814 return;
1815 }
1816
1817 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1818 VGE_UNLOCK(sc);
1819 return;
1820 }
1821
1822 idx = sc->vge_ldata.vge_tx_prodidx;
1823
1824 pidx = idx - 1;
1825 if (pidx < 0)
1826 pidx = VGE_TX_DESC_CNT - 1;
1827
1828
1829 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) {
1830 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1831 if (m_head == NULL)
1832 break;
1833
1834 if (vge_encap(sc, m_head, idx)) {
1835 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1836 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1837 break;
1838 }
1839
1840 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
1841 htole16(VGE_TXDESC_Q);
1842
1843 pidx = idx;
1844 VGE_TX_DESC_INC(idx);
1845
1846 /*
1847 * If there's a BPF listener, bounce a copy of this frame
1848 * to him.
1849 */
1850 ETHER_BPF_MTAP(ifp, m_head);
1851 }
1852
1853 if (idx == sc->vge_ldata.vge_tx_prodidx) {
1854 VGE_UNLOCK(sc);
1855 return;
1856 }
1857
1858 /* Flush the TX descriptors */
1859
1860 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1861 sc->vge_ldata.vge_tx_list_map,
1862 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1863
1864 /* Issue a transmit command. */
1865 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1866
1867 sc->vge_ldata.vge_tx_prodidx = idx;
1868
1869 /*
1870 * Use the countdown timer for interrupt moderation.
1871 * 'TX done' interrupts are disabled. Instead, we reset the
1872 * countdown timer, which will begin counting until it hits
1873 * the value in the SSTIMER register, and then trigger an
1874 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1875 * the timer count is reloaded. Only when the transmitter
1876 * is idle will the timer hit 0 and an interrupt fire.
1877 */
1878 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1879
1880 VGE_UNLOCK(sc);
1881
1882 /*
1883 * Set a timeout in case the chip goes out to lunch.
1884 */
1885 ifp->if_timer = 5;
1886
1887 return;
1888}
1889
1890static void
1891vge_init(xsc)
1892 void *xsc;
1893{
1894 struct vge_softc *sc = xsc;
1895 struct ifnet *ifp = sc->vge_ifp;
1896 struct mii_data *mii;
1897 int i;
1898
1899 VGE_LOCK(sc);
1900 mii = device_get_softc(sc->vge_miibus);
1901
1902 /*
1903 * Cancel pending I/O and free all RX/TX buffers.
1904 */
1905 vge_stop(sc);
1906 vge_reset(sc);
1907
1908 /*
1909 * Initialize the RX and TX descriptors and mbufs.
1910 */
1911
1912 vge_rx_list_init(sc);
1913 vge_tx_list_init(sc);
1914
1915 /* Set our station address */
1916 for (i = 0; i < ETHER_ADDR_LEN; i++)
1917 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
1918
1919 /*
1920 * Set receive FIFO threshold. Also allow transmission and
1921 * reception of VLAN tagged frames.
1922 */
1923 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1924 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1925
1926 /* Set DMA burst length */
1927 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1928 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1929
1930 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1931
1932 /* Set collision backoff algorithm */
1933 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1934 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1935 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1936
1937 /* Disable LPSEL field in priority resolution */
1938 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1939
1940 /*
1941 * Load the addresses of the DMA queues into the chip.
1942 * Note that we only use one transmit queue.
1943 */
1944
1945 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
1946 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr));
1947 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
1948
1949 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
1950 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr));
1951 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
1952 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
1953
1954 /* Enable and wake up the RX descriptor queue */
1955 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1956 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1957
1958 /* Enable the TX descriptor queue */
1959 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1960
1961 /* Set up the receive filter -- allow large frames for VLANs. */
1962 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1963
1964 /* If we want promiscuous mode, set the allframes bit. */
1965 if (ifp->if_flags & IFF_PROMISC) {
1966 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1967 }
1968
1969 /* Set capture broadcast bit to capture broadcast frames. */
1970 if (ifp->if_flags & IFF_BROADCAST) {
1971 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1972 }
1973
1974 /* Set multicast bit to capture multicast frames. */
1975 if (ifp->if_flags & IFF_MULTICAST) {
1976 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1977 }
1978
1979 /* Init the cam filter. */
1980 vge_cam_clear(sc);
1981
1982 /* Init the multicast filter. */
1983 vge_setmulti(sc);
1984
1985 /* Enable flow control */
1986
1987 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1988
1989 /* Enable jumbo frame reception (if desired) */
1990
1991 /* Start the MAC. */
1992 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1993 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1994 CSR_WRITE_1(sc, VGE_CRS0,
1995 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1996
1997 /*
1998 * Configure one-shot timer for microsecond
1999 * resulution and load it for 500 usecs.
2000 */
2001 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
2002 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
2003
2004 /*
2005 * Configure interrupt moderation for receive. Enable
2006 * the holdoff counter and load it, and set the RX
2007 * suppression count to the number of descriptors we
2008 * want to allow before triggering an interrupt.
2009 * The holdoff timer is in units of 20 usecs.
2010 */
2011
2012#ifdef notyet
2013 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
2014 /* Select the interrupt holdoff timer page. */
2015 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2016 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
2017 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
2018
2019 /* Enable use of the holdoff timer. */
2020 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
2021 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
2022
2023 /* Select the RX suppression threshold page. */
2024 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2025 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
2026 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
2027
2028 /* Restore the page select bits. */
2029 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2030 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
2031#endif
2032
2033#ifdef DEVICE_POLLING
2034 /*
2035 * Disable interrupts if we are polling.
2036 */
2037 if (ifp->if_capenable & IFCAP_POLLING) {
2038 CSR_WRITE_4(sc, VGE_IMR, 0);
2039 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2040 } else /* otherwise ... */
2041#endif
2042 {
2043 /*
2044 * Enable interrupts.
2045 */
2046 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2047 CSR_WRITE_4(sc, VGE_ISR, 0);
2048 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2049 }
2050
2051 mii_mediachg(mii);
2052
2053 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2054 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2055
2056 sc->vge_if_flags = 0;
2057 sc->vge_link = 0;
2058
2059 VGE_UNLOCK(sc);
2060
2061 return;
2062}
2063
2064/*
2065 * Set media options.
2066 */
2067static int
2068vge_ifmedia_upd(ifp)
2069 struct ifnet *ifp;
2070{
2071 struct vge_softc *sc;
2072 struct mii_data *mii;
2073
2074 sc = ifp->if_softc;
2075 VGE_LOCK(sc);
2076 mii = device_get_softc(sc->vge_miibus);
2077 mii_mediachg(mii);
2078 VGE_UNLOCK(sc);
2079
2080 return (0);
2081}
2082
2083/*
2084 * Report current media status.
2085 */
2086static void
2087vge_ifmedia_sts(ifp, ifmr)
2088 struct ifnet *ifp;
2089 struct ifmediareq *ifmr;
2090{
2091 struct vge_softc *sc;
2092 struct mii_data *mii;
2093
2094 sc = ifp->if_softc;
2095 mii = device_get_softc(sc->vge_miibus);
2096
2097 mii_pollstat(mii);
2098 ifmr->ifm_active = mii->mii_media_active;
2099 ifmr->ifm_status = mii->mii_media_status;
2100
2101 return;
2102}
2103
2104static void
2105vge_miibus_statchg(dev)
2106 device_t dev;
2107{
2108 struct vge_softc *sc;
2109 struct mii_data *mii;
2110 struct ifmedia_entry *ife;
2111
2112 sc = device_get_softc(dev);
2113 mii = device_get_softc(sc->vge_miibus);
2114 ife = mii->mii_media.ifm_cur;
2115
2116 /*
2117 * If the user manually selects a media mode, we need to turn
2118 * on the forced MAC mode bit in the DIAGCTL register. If the
2119 * user happens to choose a full duplex mode, we also need to
2120 * set the 'force full duplex' bit. This applies only to
2121 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2122 * mode is disabled, and in 1000baseT mode, full duplex is
2123 * always implied, so we turn on the forced mode bit but leave
2124 * the FDX bit cleared.
2125 */
2126
2127 switch (IFM_SUBTYPE(ife->ifm_media)) {
2128 case IFM_AUTO:
2129 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2130 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2131 break;
2132 case IFM_1000_T:
2133 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2134 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2135 break;
2136 case IFM_100_TX:
2137 case IFM_10_T:
2138 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2139 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2140 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2141 } else {
2142 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2143 }
2144 break;
2145 default:
2146 device_printf(dev, "unknown media type: %x\n",
2147 IFM_SUBTYPE(ife->ifm_media));
2148 break;
2149 }
2150
2151 return;
2152}
2153
2154static int
2155vge_ioctl(ifp, command, data)
2156 struct ifnet *ifp;
2157 u_long command;
2158 caddr_t data;
2159{
2160 struct vge_softc *sc = ifp->if_softc;
2161 struct ifreq *ifr = (struct ifreq *) data;
2162 struct mii_data *mii;
2163 int error = 0;
2164
2165 switch (command) {
2166 case SIOCSIFMTU:
2167 if (ifr->ifr_mtu > VGE_JUMBO_MTU)
2168 error = EINVAL;
2169 ifp->if_mtu = ifr->ifr_mtu;
2170 break;
2171 case SIOCSIFFLAGS:
2172 if (ifp->if_flags & IFF_UP) {
2173 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2174 ifp->if_flags & IFF_PROMISC &&
2175 !(sc->vge_if_flags & IFF_PROMISC)) {
2176 CSR_SETBIT_1(sc, VGE_RXCTL,
2177 VGE_RXCTL_RX_PROMISC);
2178 vge_setmulti(sc);
2179 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2180 !(ifp->if_flags & IFF_PROMISC) &&
2181 sc->vge_if_flags & IFF_PROMISC) {
2182 CSR_CLRBIT_1(sc, VGE_RXCTL,
2183 VGE_RXCTL_RX_PROMISC);
2184 vge_setmulti(sc);
2185 } else
2186 vge_init(sc);
2187 } else {
2188 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2189 vge_stop(sc);
2190 }
2191 sc->vge_if_flags = ifp->if_flags;
2192 break;
2193 case SIOCADDMULTI:
2194 case SIOCDELMULTI:
2195 vge_setmulti(sc);
2196 break;
2197 case SIOCGIFMEDIA:
2198 case SIOCSIFMEDIA:
2199 mii = device_get_softc(sc->vge_miibus);
2200 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2201 break;
2202 case SIOCSIFCAP:
2203 {
2204 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2205#ifdef DEVICE_POLLING
2206 if (mask & IFCAP_POLLING) {
2207 if (ifr->ifr_reqcap & IFCAP_POLLING) {
2208 error = ether_poll_register(vge_poll, ifp);
2209 if (error)
2210 return(error);
2211 VGE_LOCK(sc);
2212 /* Disable interrupts */
2213 CSR_WRITE_4(sc, VGE_IMR, 0);
2214 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2215 ifp->if_capenable |= IFCAP_POLLING;
2216 VGE_UNLOCK(sc);
2217 } else {
2218 error = ether_poll_deregister(ifp);
2219 /* Enable interrupts. */
2220 VGE_LOCK(sc);
2221 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2222 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2223 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2224 ifp->if_capenable &= ~IFCAP_POLLING;
2225 VGE_UNLOCK(sc);
2226 }
2227 }
2228#endif /* DEVICE_POLLING */
2229 if (mask & IFCAP_HWCSUM) {
2230 ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM);
2231 if (ifp->if_capenable & IFCAP_TXCSUM)
2232 ifp->if_hwassist = VGE_CSUM_FEATURES;
2233 else
2234 ifp->if_hwassist = 0;
2235 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2236 vge_init(sc);
2237 }
2238 }
2239 break;
2240 default:
2241 error = ether_ioctl(ifp, command, data);
2242 break;
2243 }
2244
2245 return (error);
2246}
2247
2248static void
2249vge_watchdog(ifp)
2250 struct ifnet *ifp;
2251{
2252 struct vge_softc *sc;
2253
2254 sc = ifp->if_softc;
2255 VGE_LOCK(sc);
2256 printf("vge%d: watchdog timeout\n", sc->vge_unit);
2257 ifp->if_oerrors++;
2258
2259 vge_txeof(sc);
2260 vge_rxeof(sc);
2261
2262 vge_init(sc);
2263
2264 VGE_UNLOCK(sc);
2265
2266 return;
2267}
2268
2269/*
2270 * Stop the adapter and free any mbufs allocated to the
2271 * RX and TX lists.
2272 */
2273static void
2274vge_stop(sc)
2275 struct vge_softc *sc;
2276{
2277 register int i;
2278 struct ifnet *ifp;
2279
2280 VGE_LOCK(sc);
2281 ifp = sc->vge_ifp;
2282 ifp->if_timer = 0;
2283
2284 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2285
2286 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2287 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2288 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2289 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2290 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2291 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2292
2293 if (sc->vge_head != NULL) {
2294 m_freem(sc->vge_head);
2295 sc->vge_head = sc->vge_tail = NULL;
2296 }
2297
2298 /* Free the TX list buffers. */
2299
2300 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
2301 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
2302 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
2303 sc->vge_ldata.vge_tx_dmamap[i]);
2304 m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
2305 sc->vge_ldata.vge_tx_mbuf[i] = NULL;
2306 }
2307 }
2308
2309 /* Free the RX list buffers. */
2310
2311 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
2312 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
2313 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
2314 sc->vge_ldata.vge_rx_dmamap[i]);
2315 m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
2316 sc->vge_ldata.vge_rx_mbuf[i] = NULL;
2317 }
2318 }
2319
2320 VGE_UNLOCK(sc);
2321
2322 return;
2323}
2324
2325/*
2326 * Device suspend routine. Stop the interface and save some PCI
2327 * settings in case the BIOS doesn't restore them properly on
2328 * resume.
2329 */
2330static int
2331vge_suspend(dev)
2332 device_t dev;
2333{
2334 struct vge_softc *sc;
2335
2336 sc = device_get_softc(dev);
2337
2338 vge_stop(sc);
2339
2340 sc->suspended = 1;
2341
2342 return (0);
2343}
2344
2345/*
2346 * Device resume routine. Restore some PCI settings in case the BIOS
2347 * doesn't, re-enable busmastering, and restart the interface if
2348 * appropriate.
2349 */
2350static int
2351vge_resume(dev)
2352 device_t dev;
2353{
2354 struct vge_softc *sc;
2355 struct ifnet *ifp;
2356
2357 sc = device_get_softc(dev);
2358 ifp = sc->vge_ifp;
2359
2360 /* reenable busmastering */
2361 pci_enable_busmaster(dev);
2362 pci_enable_io(dev, SYS_RES_MEMORY);
2363
2364 /* reinitialize interface if necessary */
2365 if (ifp->if_flags & IFF_UP)
2366 vge_init(sc);
2367
2368 sc->suspended = 0;
2369
2370 return (0);
2371}
2372
2373/*
2374 * Stop all chip I/O so that the kernel's probe routines don't
2375 * get confused by errant DMAs when rebooting.
2376 */
2377static void
2377static int
2378vge_shutdown(dev)
2379 device_t dev;
2380{
2381 struct vge_softc *sc;
2382
2383 sc = device_get_softc(dev);
2384
2385 vge_stop(sc);
2378vge_shutdown(dev)
2379 device_t dev;
2380{
2381 struct vge_softc *sc;
2382
2383 sc = device_get_softc(dev);
2384
2385 vge_stop(sc);
2386
2387 return (0);
2386}
2388}