Deleted Added
full compact
if_pcn.c (77786) if_pcn.c (79472)
1/*
2 * Copyright (c) 2000 Berkeley Software Design, Inc.
3 * Copyright (c) 1997, 1998, 1999, 2000
4 * Bill Paul <wpaul@osd.bsdi.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
1/*
2 * Copyright (c) 2000 Berkeley Software Design, Inc.
3 * Copyright (c) 1997, 1998, 1999, 2000
4 * Bill Paul <wpaul@osd.bsdi.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD: head/sys/pci/if_pcn.c 77786 2001-06-05 20:51:17Z wpaul $
33 * $FreeBSD: head/sys/pci/if_pcn.c 79472 2001-07-09 17:58:42Z wpaul $
34 */
35
36/*
37 * AMD Am79c972 fast ethernet PCI NIC driver. Datatheets are available
38 * from http://www.amd.com.
39 *
40 * Written by Bill Paul <wpaul@osd.bsdi.com>
41 */
42
43/*
44 * The AMD PCnet/PCI controllers are more advanced and functional
45 * versions of the venerable 7990 LANCE. The PCnet/PCI chips retain
46 * backwards compatibility with the LANCE and thus can be made
47 * to work with older LANCE drivers. This is in fact how the
48 * PCnet/PCI chips were supported in FreeBSD originally. The trouble
49 * is that the PCnet/PCI devices offer several performance enhancements
50 * which can't be exploited in LANCE compatibility mode. Chief among
51 * these enhancements is the ability to perform PCI DMA operations
52 * using 32-bit addressing (which eliminates the need for ISA
53 * bounce-buffering), and special receive buffer alignment (which
54 * allows the receive handler to pass packets to the upper protocol
55 * layers without copying on both the x86 and alpha platforms).
56 */
57
58#include <sys/param.h>
59#include <sys/systm.h>
60#include <sys/sockio.h>
61#include <sys/mbuf.h>
62#include <sys/malloc.h>
63#include <sys/kernel.h>
64#include <sys/socket.h>
65
66#include <net/if.h>
67#include <net/if_arp.h>
68#include <net/ethernet.h>
69#include <net/if_dl.h>
70#include <net/if_media.h>
71
72#include <net/bpf.h>
73
74#include <vm/vm.h> /* for vtophys */
75#include <vm/pmap.h> /* for vtophys */
76#include <machine/bus_pio.h>
77#include <machine/bus_memio.h>
78#include <machine/bus.h>
79#include <machine/resource.h>
80#include <sys/bus.h>
81#include <sys/rman.h>
82
83#include <dev/mii/mii.h>
84#include <dev/mii/miivar.h>
85
86#include <pci/pcireg.h>
87#include <pci/pcivar.h>
88
89#define PCN_USEIOSPACE
90
91#include <pci/if_pcnreg.h>
92
93MODULE_DEPEND(pcn, miibus, 1, 1, 1);
94
95/* "controller miibus0" required. See GENERIC if you get errors here. */
96#include "miibus_if.h"
97
98#ifndef lint
99static const char rcsid[] =
34 */
35
36/*
37 * AMD Am79c972 fast ethernet PCI NIC driver. Datatheets are available
38 * from http://www.amd.com.
39 *
40 * Written by Bill Paul <wpaul@osd.bsdi.com>
41 */
42
43/*
44 * The AMD PCnet/PCI controllers are more advanced and functional
45 * versions of the venerable 7990 LANCE. The PCnet/PCI chips retain
46 * backwards compatibility with the LANCE and thus can be made
47 * to work with older LANCE drivers. This is in fact how the
48 * PCnet/PCI chips were supported in FreeBSD originally. The trouble
49 * is that the PCnet/PCI devices offer several performance enhancements
50 * which can't be exploited in LANCE compatibility mode. Chief among
51 * these enhancements is the ability to perform PCI DMA operations
52 * using 32-bit addressing (which eliminates the need for ISA
53 * bounce-buffering), and special receive buffer alignment (which
54 * allows the receive handler to pass packets to the upper protocol
55 * layers without copying on both the x86 and alpha platforms).
56 */
57
58#include <sys/param.h>
59#include <sys/systm.h>
60#include <sys/sockio.h>
61#include <sys/mbuf.h>
62#include <sys/malloc.h>
63#include <sys/kernel.h>
64#include <sys/socket.h>
65
66#include <net/if.h>
67#include <net/if_arp.h>
68#include <net/ethernet.h>
69#include <net/if_dl.h>
70#include <net/if_media.h>
71
72#include <net/bpf.h>
73
74#include <vm/vm.h> /* for vtophys */
75#include <vm/pmap.h> /* for vtophys */
76#include <machine/bus_pio.h>
77#include <machine/bus_memio.h>
78#include <machine/bus.h>
79#include <machine/resource.h>
80#include <sys/bus.h>
81#include <sys/rman.h>
82
83#include <dev/mii/mii.h>
84#include <dev/mii/miivar.h>
85
86#include <pci/pcireg.h>
87#include <pci/pcivar.h>
88
89#define PCN_USEIOSPACE
90
91#include <pci/if_pcnreg.h>
92
93MODULE_DEPEND(pcn, miibus, 1, 1, 1);
94
95/* "controller miibus0" required. See GENERIC if you get errors here. */
96#include "miibus_if.h"
97
98#ifndef lint
99static const char rcsid[] =
100 "$FreeBSD: head/sys/pci/if_pcn.c 77786 2001-06-05 20:51:17Z wpaul $";
100 "$FreeBSD: head/sys/pci/if_pcn.c 79472 2001-07-09 17:58:42Z wpaul $";
101#endif
102
103/*
104 * Various supported device vendors/types and their names.
105 */
106static struct pcn_type pcn_devs[] = {
107 { PCN_VENDORID, PCN_DEVICEID_PCNET, "AMD PCnet/PCI 10/100BaseTX" },
108 { PCN_VENDORID, PCN_DEVICEID_HOME, "AMD PCnet/Home HomePNA" },
109 { 0, 0, NULL }
110};
111
112static u_int32_t pcn_csr_read __P((struct pcn_softc *, int));
113static u_int16_t pcn_csr_read16 __P((struct pcn_softc *, int));
114static u_int16_t pcn_bcr_read16 __P((struct pcn_softc *, int));
115static void pcn_csr_write __P((struct pcn_softc *, int, int));
116static u_int32_t pcn_bcr_read __P((struct pcn_softc *, int));
117static void pcn_bcr_write __P((struct pcn_softc *, int, int));
118
119static int pcn_probe __P((device_t));
120static int pcn_attach __P((device_t));
121static int pcn_detach __P((device_t));
122
123static int pcn_newbuf __P((struct pcn_softc *, int, struct mbuf *));
124static int pcn_encap __P((struct pcn_softc *,
125 struct mbuf *, u_int32_t *));
126static void pcn_rxeof __P((struct pcn_softc *));
127static void pcn_txeof __P((struct pcn_softc *));
128static void pcn_intr __P((void *));
129static void pcn_tick __P((void *));
130static void pcn_start __P((struct ifnet *));
131static int pcn_ioctl __P((struct ifnet *, u_long, caddr_t));
132static void pcn_init __P((void *));
133static void pcn_stop __P((struct pcn_softc *));
134static void pcn_watchdog __P((struct ifnet *));
135static void pcn_shutdown __P((device_t));
136static int pcn_ifmedia_upd __P((struct ifnet *));
137static void pcn_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
138
139static int pcn_miibus_readreg __P((device_t, int, int));
140static int pcn_miibus_writereg __P((device_t, int, int, int));
141static void pcn_miibus_statchg __P((device_t));
142
143static void pcn_setfilt __P((struct ifnet *));
144static void pcn_setmulti __P((struct pcn_softc *));
145static u_int32_t pcn_crc __P((caddr_t));
146static void pcn_reset __P((struct pcn_softc *));
147static int pcn_list_rx_init __P((struct pcn_softc *));
148static int pcn_list_tx_init __P((struct pcn_softc *));
149
150#ifdef PCN_USEIOSPACE
151#define PCN_RES SYS_RES_IOPORT
152#define PCN_RID PCN_PCI_LOIO
153#else
154#define PCN_RES SYS_RES_MEMORY
155#define PCN_RID PCN_PCI_LOMEM
156#endif
157
158static device_method_t pcn_methods[] = {
159 /* Device interface */
160 DEVMETHOD(device_probe, pcn_probe),
161 DEVMETHOD(device_attach, pcn_attach),
162 DEVMETHOD(device_detach, pcn_detach),
163 DEVMETHOD(device_shutdown, pcn_shutdown),
164
165 /* bus interface */
166 DEVMETHOD(bus_print_child, bus_generic_print_child),
167 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
168
169 /* MII interface */
170 DEVMETHOD(miibus_readreg, pcn_miibus_readreg),
171 DEVMETHOD(miibus_writereg, pcn_miibus_writereg),
172 DEVMETHOD(miibus_statchg, pcn_miibus_statchg),
173
174 { 0, 0 }
175};
176
177static driver_t pcn_driver = {
178 "pcn",
179 pcn_methods,
180 sizeof(struct pcn_softc)
181};
182
183static devclass_t pcn_devclass;
184
185DRIVER_MODULE(if_pcn, pci, pcn_driver, pcn_devclass, 0, 0);
186DRIVER_MODULE(miibus, pcn, miibus_driver, miibus_devclass, 0, 0);
187
188#define PCN_CSR_SETBIT(sc, reg, x) \
189 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) | (x))
190
191#define PCN_CSR_CLRBIT(sc, reg, x) \
192 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) & ~(x))
193
194#define PCN_BCR_SETBIT(sc, reg, x) \
195 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) | (x))
196
197#define PCN_BCR_CLRBIT(sc, reg, x) \
198 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) & ~(x))
199
200static u_int32_t pcn_csr_read(sc, reg)
201 struct pcn_softc *sc;
202 int reg;
203{
204 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
205 return(CSR_READ_4(sc, PCN_IO32_RDP));
206}
207
208static u_int16_t pcn_csr_read16(sc, reg)
209 struct pcn_softc *sc;
210 int reg;
211{
212 CSR_WRITE_2(sc, PCN_IO16_RAP, reg);
213 return(CSR_READ_2(sc, PCN_IO16_RDP));
214}
215
216static void pcn_csr_write(sc, reg, val)
217 struct pcn_softc *sc;
218 int reg;
219{
220 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
221 CSR_WRITE_4(sc, PCN_IO32_RDP, val);
222 return;
223}
224
225static u_int32_t pcn_bcr_read(sc, reg)
226 struct pcn_softc *sc;
227 int reg;
228{
229 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
230 return(CSR_READ_4(sc, PCN_IO32_BDP));
231}
232
233static u_int16_t pcn_bcr_read16(sc, reg)
234 struct pcn_softc *sc;
235 int reg;
236{
237 CSR_WRITE_2(sc, PCN_IO16_RAP, reg);
238 return(CSR_READ_2(sc, PCN_IO16_BDP));
239}
240
241static void pcn_bcr_write(sc, reg, val)
242 struct pcn_softc *sc;
243 int reg;
244{
245 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
246 CSR_WRITE_4(sc, PCN_IO32_BDP, val);
247 return;
248}
249
250static int pcn_miibus_readreg(dev, phy, reg)
251 device_t dev;
252 int phy, reg;
253{
254 struct pcn_softc *sc;
255 int val;
256
257 sc = device_get_softc(dev);
258
259 if (sc->pcn_phyaddr && phy > sc->pcn_phyaddr)
260 return(0);
261
262 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5));
263 val = pcn_bcr_read(sc, PCN_BCR_MIIDATA) & 0xFFFF;
264 if (val == 0xFFFF)
265 return(0);
266
267 sc->pcn_phyaddr = phy;
268
269 return(val);
270}
271
272static int pcn_miibus_writereg(dev, phy, reg, data)
273 device_t dev;
274 int phy, reg, data;
275{
276 struct pcn_softc *sc;
277
278 sc = device_get_softc(dev);
279
280 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5));
281 pcn_bcr_write(sc, PCN_BCR_MIIDATA, data);
282
283 return(0);
284}
285
286static void pcn_miibus_statchg(dev)
287 device_t dev;
288{
289 struct pcn_softc *sc;
290 struct mii_data *mii;
291
292 sc = device_get_softc(dev);
293 mii = device_get_softc(sc->pcn_miibus);
294
295 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
296 PCN_BCR_SETBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN);
297 } else {
298 PCN_BCR_CLRBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN);
299 }
300
301 return;
302}
303
304#define DC_POLY 0xEDB88320
305
306static u_int32_t pcn_crc(addr)
307 caddr_t addr;
308{
309 u_int32_t idx, bit, data, crc;
310
311 /* Compute CRC for the address value. */
312 crc = 0xFFFFFFFF; /* initial value */
313
314 for (idx = 0; idx < 6; idx++) {
315 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
316 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0);
317 }
318
319 return ((crc >> 26) & 0x3F);
320}
321
322static void pcn_setmulti(sc)
323 struct pcn_softc *sc;
324{
325 struct ifnet *ifp;
326 struct ifmultiaddr *ifma;
327 u_int32_t h, i;
328 u_int16_t hashes[4] = { 0, 0, 0, 0 };
329
330 ifp = &sc->arpcom.ac_if;
331
332 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
333
334 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
335 for (i = 0; i < 4; i++)
336 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0xFFFF);
337 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
338 return;
339 }
340
341 /* first, zot all the existing hash bits */
342 for (i = 0; i < 4; i++)
343 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0);
344
345 /* now program new ones */
346 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
347 if (ifma->ifma_addr->sa_family != AF_LINK)
348 continue;
349 h = pcn_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
350 hashes[h >> 4] |= 1 << (h & 0xF);
351 }
352
353 for (i = 0; i < 4; i++)
354 pcn_csr_write(sc, PCN_CSR_MAR0 + i, hashes[i]);
355
356 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
357
358 return;
359}
360
361static void pcn_reset(sc)
362 struct pcn_softc *sc;
363{
364 /*
365 * Issue a reset by reading from the RESET register.
366 * Note that we don't know if the chip is operating in
367 * 16-bit or 32-bit mode at this point, so we attempt
368 * to reset the chip both ways. If one fails, the other
369 * will succeed.
370 */
371 CSR_READ_2(sc, PCN_IO16_RESET);
372 CSR_READ_4(sc, PCN_IO32_RESET);
373
374 /* Wait a little while for the chip to get its brains in order. */
375 DELAY(1000);
376
377 /* Select 32-bit (DWIO) mode */
378 CSR_WRITE_4(sc, PCN_IO32_RDP, 0);
379
380 /* Select software style 3. */
381 pcn_bcr_write(sc, PCN_BCR_SSTYLE, PCN_SWSTYLE_PCNETPCI_BURST);
382
383 return;
384}
385
386/*
387 * Probe for an AMD chip. Check the PCI vendor and device
388 * IDs against our list and return a device name if we find a match.
389 */
390static int pcn_probe(dev)
391 device_t dev;
392{
393 struct pcn_type *t;
394 struct pcn_softc *sc;
395 int rid;
396 u_int32_t chip_id;
397
398 t = pcn_devs;
399 sc = device_get_softc(dev);
400
401 while(t->pcn_name != NULL) {
402 if ((pci_get_vendor(dev) == t->pcn_vid) &&
403 (pci_get_device(dev) == t->pcn_did)) {
404 /*
405 * Temporarily map the I/O space
406 * so we can read the chip ID register.
407 */
408 rid = PCN_RID;
409 sc->pcn_res = bus_alloc_resource(dev, PCN_RES, &rid,
410 0, ~0, 1, RF_ACTIVE);
411 if (sc->pcn_res == NULL) {
412 device_printf(dev,
413 "couldn't map ports/memory\n");
414 return(ENXIO);
415 }
416 sc->pcn_btag = rman_get_bustag(sc->pcn_res);
417 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res);
418 mtx_init(&sc->pcn_mtx,
419 device_get_nameunit(dev), MTX_DEF);
420 PCN_LOCK(sc);
421 /*
422 * Note: we can *NOT* put the chip into
423 * 32-bit mode yet. The lnc driver will only
424 * work in 16-bit mode, and once the chip
425 * goes into 32-bit mode, the only way to
426 * get it out again is with a hardware reset.
427 * So if pcn_probe() is called before the
428 * lnc driver's probe routine, the chip will
429 * be locked into 32-bit operation and the lnc
430 * driver will be unable to attach to it.
431 * Note II: if the chip happens to already
432 * be in 32-bit mode, we still need to check
433 * the chip ID, but first we have to detect
434 * 32-bit mode using only 16-bit operations.
435 * The safest way to do this is to read the
436 * PCI subsystem ID from BCR23/24 and compare
437 * that with the value read from PCI config
438 * space.
439 */
440 chip_id = pcn_bcr_read16(sc, PCN_BCR_PCISUBSYSID);
441 chip_id <<= 16;
442 chip_id |= pcn_bcr_read16(sc, PCN_BCR_PCISUBVENID);
443 /*
444 * Note III: the test for 0x10001000 is a hack to
445 * pacify VMware, who's pseudo-PCnet interface is
446 * broken. Reading the subsystem register from PCI
447 * config space yeilds 0x00000000 while reading the
448 * same value from I/O space yeilds 0x10001000. It's
449 * not supposed to be that way.
450 */
451 if (chip_id == pci_read_config(dev,
452 PCIR_SUBVEND_0, 4) || chip_id == 0x10001000) {
453 /* We're in 16-bit mode. */
454 chip_id = pcn_csr_read16(sc, PCN_CSR_CHIPID1);
455 chip_id <<= 16;
456 chip_id |= pcn_csr_read16(sc, PCN_CSR_CHIPID0);
457 } else {
458 /* We're in 32-bit mode. */
459 chip_id = pcn_csr_read(sc, PCN_CSR_CHIPID1);
460 chip_id <<= 16;
461 chip_id |= pcn_csr_read(sc, PCN_CSR_CHIPID0);
462 }
463 bus_release_resource(dev, PCN_RES,
464 PCN_RID, sc->pcn_res);
465 PCN_UNLOCK(sc);
466 mtx_destroy(&sc->pcn_mtx);
467 chip_id >>= 12;
468 sc->pcn_type = chip_id & PART_MASK;
469 switch(sc->pcn_type) {
470 case Am79C971:
471 case Am79C972:
472 case Am79C973:
473 case Am79C975:
474 case Am79C976:
475 case Am79C978:
476 break;
477 default:
478 return(ENXIO);
479 break;
480 }
481 device_set_desc(dev, t->pcn_name);
482 return(0);
483 }
484 t++;
485 }
486
487 return(ENXIO);
488}
489
490/*
491 * Attach the interface. Allocate softc structures, do ifmedia
492 * setup and ethernet/BPF attach.
493 */
494static int pcn_attach(dev)
495 device_t dev;
496{
497 u_int32_t eaddr[2];
498 u_int32_t command;
499 struct pcn_softc *sc;
500 struct ifnet *ifp;
501 int unit, error = 0, rid;
502
503 sc = device_get_softc(dev);
504 unit = device_get_unit(dev);
505
506 /* Initialize our mutex. */
507 mtx_init(&sc->pcn_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE);
508 PCN_LOCK(sc);
509
510 /*
511 * Handle power management nonsense.
512 */
513 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
514 u_int32_t iobase, membase, irq;
515
516 /* Save important PCI config data. */
517 iobase = pci_read_config(dev, PCN_PCI_LOIO, 4);
518 membase = pci_read_config(dev, PCN_PCI_LOMEM, 4);
519 irq = pci_read_config(dev, PCN_PCI_INTLINE, 4);
520
521 /* Reset the power state. */
522 printf("pcn%d: chip is in D%d power mode "
523 "-- setting to D0\n", unit,
524 pci_get_powerstate(dev));
525 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
526
527 /* Restore PCI config data. */
528 pci_write_config(dev, PCN_PCI_LOIO, iobase, 4);
529 pci_write_config(dev, PCN_PCI_LOMEM, membase, 4);
530 pci_write_config(dev, PCN_PCI_INTLINE, irq, 4);
531 }
532
533 /*
534 * Map control/status registers.
535 */
536 pci_enable_busmaster(dev);
101#endif
102
103/*
104 * Various supported device vendors/types and their names.
105 */
106static struct pcn_type pcn_devs[] = {
107 { PCN_VENDORID, PCN_DEVICEID_PCNET, "AMD PCnet/PCI 10/100BaseTX" },
108 { PCN_VENDORID, PCN_DEVICEID_HOME, "AMD PCnet/Home HomePNA" },
109 { 0, 0, NULL }
110};
111
112static u_int32_t pcn_csr_read __P((struct pcn_softc *, int));
113static u_int16_t pcn_csr_read16 __P((struct pcn_softc *, int));
114static u_int16_t pcn_bcr_read16 __P((struct pcn_softc *, int));
115static void pcn_csr_write __P((struct pcn_softc *, int, int));
116static u_int32_t pcn_bcr_read __P((struct pcn_softc *, int));
117static void pcn_bcr_write __P((struct pcn_softc *, int, int));
118
119static int pcn_probe __P((device_t));
120static int pcn_attach __P((device_t));
121static int pcn_detach __P((device_t));
122
123static int pcn_newbuf __P((struct pcn_softc *, int, struct mbuf *));
124static int pcn_encap __P((struct pcn_softc *,
125 struct mbuf *, u_int32_t *));
126static void pcn_rxeof __P((struct pcn_softc *));
127static void pcn_txeof __P((struct pcn_softc *));
128static void pcn_intr __P((void *));
129static void pcn_tick __P((void *));
130static void pcn_start __P((struct ifnet *));
131static int pcn_ioctl __P((struct ifnet *, u_long, caddr_t));
132static void pcn_init __P((void *));
133static void pcn_stop __P((struct pcn_softc *));
134static void pcn_watchdog __P((struct ifnet *));
135static void pcn_shutdown __P((device_t));
136static int pcn_ifmedia_upd __P((struct ifnet *));
137static void pcn_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
138
139static int pcn_miibus_readreg __P((device_t, int, int));
140static int pcn_miibus_writereg __P((device_t, int, int, int));
141static void pcn_miibus_statchg __P((device_t));
142
143static void pcn_setfilt __P((struct ifnet *));
144static void pcn_setmulti __P((struct pcn_softc *));
145static u_int32_t pcn_crc __P((caddr_t));
146static void pcn_reset __P((struct pcn_softc *));
147static int pcn_list_rx_init __P((struct pcn_softc *));
148static int pcn_list_tx_init __P((struct pcn_softc *));
149
150#ifdef PCN_USEIOSPACE
151#define PCN_RES SYS_RES_IOPORT
152#define PCN_RID PCN_PCI_LOIO
153#else
154#define PCN_RES SYS_RES_MEMORY
155#define PCN_RID PCN_PCI_LOMEM
156#endif
157
158static device_method_t pcn_methods[] = {
159 /* Device interface */
160 DEVMETHOD(device_probe, pcn_probe),
161 DEVMETHOD(device_attach, pcn_attach),
162 DEVMETHOD(device_detach, pcn_detach),
163 DEVMETHOD(device_shutdown, pcn_shutdown),
164
165 /* bus interface */
166 DEVMETHOD(bus_print_child, bus_generic_print_child),
167 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
168
169 /* MII interface */
170 DEVMETHOD(miibus_readreg, pcn_miibus_readreg),
171 DEVMETHOD(miibus_writereg, pcn_miibus_writereg),
172 DEVMETHOD(miibus_statchg, pcn_miibus_statchg),
173
174 { 0, 0 }
175};
176
177static driver_t pcn_driver = {
178 "pcn",
179 pcn_methods,
180 sizeof(struct pcn_softc)
181};
182
183static devclass_t pcn_devclass;
184
185DRIVER_MODULE(if_pcn, pci, pcn_driver, pcn_devclass, 0, 0);
186DRIVER_MODULE(miibus, pcn, miibus_driver, miibus_devclass, 0, 0);
187
188#define PCN_CSR_SETBIT(sc, reg, x) \
189 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) | (x))
190
191#define PCN_CSR_CLRBIT(sc, reg, x) \
192 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) & ~(x))
193
194#define PCN_BCR_SETBIT(sc, reg, x) \
195 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) | (x))
196
197#define PCN_BCR_CLRBIT(sc, reg, x) \
198 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) & ~(x))
199
200static u_int32_t pcn_csr_read(sc, reg)
201 struct pcn_softc *sc;
202 int reg;
203{
204 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
205 return(CSR_READ_4(sc, PCN_IO32_RDP));
206}
207
208static u_int16_t pcn_csr_read16(sc, reg)
209 struct pcn_softc *sc;
210 int reg;
211{
212 CSR_WRITE_2(sc, PCN_IO16_RAP, reg);
213 return(CSR_READ_2(sc, PCN_IO16_RDP));
214}
215
216static void pcn_csr_write(sc, reg, val)
217 struct pcn_softc *sc;
218 int reg;
219{
220 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
221 CSR_WRITE_4(sc, PCN_IO32_RDP, val);
222 return;
223}
224
225static u_int32_t pcn_bcr_read(sc, reg)
226 struct pcn_softc *sc;
227 int reg;
228{
229 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
230 return(CSR_READ_4(sc, PCN_IO32_BDP));
231}
232
233static u_int16_t pcn_bcr_read16(sc, reg)
234 struct pcn_softc *sc;
235 int reg;
236{
237 CSR_WRITE_2(sc, PCN_IO16_RAP, reg);
238 return(CSR_READ_2(sc, PCN_IO16_BDP));
239}
240
241static void pcn_bcr_write(sc, reg, val)
242 struct pcn_softc *sc;
243 int reg;
244{
245 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
246 CSR_WRITE_4(sc, PCN_IO32_BDP, val);
247 return;
248}
249
250static int pcn_miibus_readreg(dev, phy, reg)
251 device_t dev;
252 int phy, reg;
253{
254 struct pcn_softc *sc;
255 int val;
256
257 sc = device_get_softc(dev);
258
259 if (sc->pcn_phyaddr && phy > sc->pcn_phyaddr)
260 return(0);
261
262 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5));
263 val = pcn_bcr_read(sc, PCN_BCR_MIIDATA) & 0xFFFF;
264 if (val == 0xFFFF)
265 return(0);
266
267 sc->pcn_phyaddr = phy;
268
269 return(val);
270}
271
272static int pcn_miibus_writereg(dev, phy, reg, data)
273 device_t dev;
274 int phy, reg, data;
275{
276 struct pcn_softc *sc;
277
278 sc = device_get_softc(dev);
279
280 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5));
281 pcn_bcr_write(sc, PCN_BCR_MIIDATA, data);
282
283 return(0);
284}
285
286static void pcn_miibus_statchg(dev)
287 device_t dev;
288{
289 struct pcn_softc *sc;
290 struct mii_data *mii;
291
292 sc = device_get_softc(dev);
293 mii = device_get_softc(sc->pcn_miibus);
294
295 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
296 PCN_BCR_SETBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN);
297 } else {
298 PCN_BCR_CLRBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN);
299 }
300
301 return;
302}
303
304#define DC_POLY 0xEDB88320
305
306static u_int32_t pcn_crc(addr)
307 caddr_t addr;
308{
309 u_int32_t idx, bit, data, crc;
310
311 /* Compute CRC for the address value. */
312 crc = 0xFFFFFFFF; /* initial value */
313
314 for (idx = 0; idx < 6; idx++) {
315 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
316 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0);
317 }
318
319 return ((crc >> 26) & 0x3F);
320}
321
322static void pcn_setmulti(sc)
323 struct pcn_softc *sc;
324{
325 struct ifnet *ifp;
326 struct ifmultiaddr *ifma;
327 u_int32_t h, i;
328 u_int16_t hashes[4] = { 0, 0, 0, 0 };
329
330 ifp = &sc->arpcom.ac_if;
331
332 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
333
334 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
335 for (i = 0; i < 4; i++)
336 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0xFFFF);
337 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
338 return;
339 }
340
341 /* first, zot all the existing hash bits */
342 for (i = 0; i < 4; i++)
343 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0);
344
345 /* now program new ones */
346 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
347 if (ifma->ifma_addr->sa_family != AF_LINK)
348 continue;
349 h = pcn_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
350 hashes[h >> 4] |= 1 << (h & 0xF);
351 }
352
353 for (i = 0; i < 4; i++)
354 pcn_csr_write(sc, PCN_CSR_MAR0 + i, hashes[i]);
355
356 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
357
358 return;
359}
360
361static void pcn_reset(sc)
362 struct pcn_softc *sc;
363{
364 /*
365 * Issue a reset by reading from the RESET register.
366 * Note that we don't know if the chip is operating in
367 * 16-bit or 32-bit mode at this point, so we attempt
368 * to reset the chip both ways. If one fails, the other
369 * will succeed.
370 */
371 CSR_READ_2(sc, PCN_IO16_RESET);
372 CSR_READ_4(sc, PCN_IO32_RESET);
373
374 /* Wait a little while for the chip to get its brains in order. */
375 DELAY(1000);
376
377 /* Select 32-bit (DWIO) mode */
378 CSR_WRITE_4(sc, PCN_IO32_RDP, 0);
379
380 /* Select software style 3. */
381 pcn_bcr_write(sc, PCN_BCR_SSTYLE, PCN_SWSTYLE_PCNETPCI_BURST);
382
383 return;
384}
385
386/*
387 * Probe for an AMD chip. Check the PCI vendor and device
388 * IDs against our list and return a device name if we find a match.
389 */
390static int pcn_probe(dev)
391 device_t dev;
392{
393 struct pcn_type *t;
394 struct pcn_softc *sc;
395 int rid;
396 u_int32_t chip_id;
397
398 t = pcn_devs;
399 sc = device_get_softc(dev);
400
401 while(t->pcn_name != NULL) {
402 if ((pci_get_vendor(dev) == t->pcn_vid) &&
403 (pci_get_device(dev) == t->pcn_did)) {
404 /*
405 * Temporarily map the I/O space
406 * so we can read the chip ID register.
407 */
408 rid = PCN_RID;
409 sc->pcn_res = bus_alloc_resource(dev, PCN_RES, &rid,
410 0, ~0, 1, RF_ACTIVE);
411 if (sc->pcn_res == NULL) {
412 device_printf(dev,
413 "couldn't map ports/memory\n");
414 return(ENXIO);
415 }
416 sc->pcn_btag = rman_get_bustag(sc->pcn_res);
417 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res);
418 mtx_init(&sc->pcn_mtx,
419 device_get_nameunit(dev), MTX_DEF);
420 PCN_LOCK(sc);
421 /*
422 * Note: we can *NOT* put the chip into
423 * 32-bit mode yet. The lnc driver will only
424 * work in 16-bit mode, and once the chip
425 * goes into 32-bit mode, the only way to
426 * get it out again is with a hardware reset.
427 * So if pcn_probe() is called before the
428 * lnc driver's probe routine, the chip will
429 * be locked into 32-bit operation and the lnc
430 * driver will be unable to attach to it.
431 * Note II: if the chip happens to already
432 * be in 32-bit mode, we still need to check
433 * the chip ID, but first we have to detect
434 * 32-bit mode using only 16-bit operations.
435 * The safest way to do this is to read the
436 * PCI subsystem ID from BCR23/24 and compare
437 * that with the value read from PCI config
438 * space.
439 */
440 chip_id = pcn_bcr_read16(sc, PCN_BCR_PCISUBSYSID);
441 chip_id <<= 16;
442 chip_id |= pcn_bcr_read16(sc, PCN_BCR_PCISUBVENID);
443 /*
444 * Note III: the test for 0x10001000 is a hack to
445 * pacify VMware, who's pseudo-PCnet interface is
446 * broken. Reading the subsystem register from PCI
447 * config space yeilds 0x00000000 while reading the
448 * same value from I/O space yeilds 0x10001000. It's
449 * not supposed to be that way.
450 */
451 if (chip_id == pci_read_config(dev,
452 PCIR_SUBVEND_0, 4) || chip_id == 0x10001000) {
453 /* We're in 16-bit mode. */
454 chip_id = pcn_csr_read16(sc, PCN_CSR_CHIPID1);
455 chip_id <<= 16;
456 chip_id |= pcn_csr_read16(sc, PCN_CSR_CHIPID0);
457 } else {
458 /* We're in 32-bit mode. */
459 chip_id = pcn_csr_read(sc, PCN_CSR_CHIPID1);
460 chip_id <<= 16;
461 chip_id |= pcn_csr_read(sc, PCN_CSR_CHIPID0);
462 }
463 bus_release_resource(dev, PCN_RES,
464 PCN_RID, sc->pcn_res);
465 PCN_UNLOCK(sc);
466 mtx_destroy(&sc->pcn_mtx);
467 chip_id >>= 12;
468 sc->pcn_type = chip_id & PART_MASK;
469 switch(sc->pcn_type) {
470 case Am79C971:
471 case Am79C972:
472 case Am79C973:
473 case Am79C975:
474 case Am79C976:
475 case Am79C978:
476 break;
477 default:
478 return(ENXIO);
479 break;
480 }
481 device_set_desc(dev, t->pcn_name);
482 return(0);
483 }
484 t++;
485 }
486
487 return(ENXIO);
488}
489
490/*
491 * Attach the interface. Allocate softc structures, do ifmedia
492 * setup and ethernet/BPF attach.
493 */
494static int pcn_attach(dev)
495 device_t dev;
496{
497 u_int32_t eaddr[2];
498 u_int32_t command;
499 struct pcn_softc *sc;
500 struct ifnet *ifp;
501 int unit, error = 0, rid;
502
503 sc = device_get_softc(dev);
504 unit = device_get_unit(dev);
505
506 /* Initialize our mutex. */
507 mtx_init(&sc->pcn_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE);
508 PCN_LOCK(sc);
509
510 /*
511 * Handle power management nonsense.
512 */
513 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
514 u_int32_t iobase, membase, irq;
515
516 /* Save important PCI config data. */
517 iobase = pci_read_config(dev, PCN_PCI_LOIO, 4);
518 membase = pci_read_config(dev, PCN_PCI_LOMEM, 4);
519 irq = pci_read_config(dev, PCN_PCI_INTLINE, 4);
520
521 /* Reset the power state. */
522 printf("pcn%d: chip is in D%d power mode "
523 "-- setting to D0\n", unit,
524 pci_get_powerstate(dev));
525 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
526
527 /* Restore PCI config data. */
528 pci_write_config(dev, PCN_PCI_LOIO, iobase, 4);
529 pci_write_config(dev, PCN_PCI_LOMEM, membase, 4);
530 pci_write_config(dev, PCN_PCI_INTLINE, irq, 4);
531 }
532
533 /*
534 * Map control/status registers.
535 */
536 pci_enable_busmaster(dev);
537 pci_enable_io(dev, PCIM_CMD_PORTEN);
538 pci_enable_io(dev, PCIM_CMD_MEMEN);
537 pci_enable_io(dev, SYS_RES_IOPORT);
538 pci_enable_io(dev, SYS_RES_MEMORY);
539 command = pci_read_config(dev, PCIR_COMMAND, 4);
540
541#ifdef PCN_USEIOSPACE
542 if (!(command & PCIM_CMD_PORTEN)) {
543 printf("pcn%d: failed to enable I/O ports!\n", unit);
544 error = ENXIO;;
545 goto fail;
546 }
547#else
548 if (!(command & PCIM_CMD_MEMEN)) {
549 printf("pcn%d: failed to enable memory mapping!\n", unit);
550 error = ENXIO;;
551 goto fail;
552 }
553#endif
554
555 rid = PCN_RID;
556 sc->pcn_res = bus_alloc_resource(dev, PCN_RES, &rid,
557 0, ~0, 1, RF_ACTIVE);
558
559 if (sc->pcn_res == NULL) {
560 printf("pcn%d: couldn't map ports/memory\n", unit);
561 error = ENXIO;
562 goto fail;
563 }
564
565 sc->pcn_btag = rman_get_bustag(sc->pcn_res);
566 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res);
567
568 /* Allocate interrupt */
569 rid = 0;
570 sc->pcn_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
571 RF_SHAREABLE | RF_ACTIVE);
572
573 if (sc->pcn_irq == NULL) {
574 printf("pcn%d: couldn't map interrupt\n", unit);
575 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
576 error = ENXIO;
577 goto fail;
578 }
579
580 error = bus_setup_intr(dev, sc->pcn_irq, INTR_TYPE_NET,
581 pcn_intr, sc, &sc->pcn_intrhand);
582
583 if (error) {
584 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_res);
585 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
586 printf("pcn%d: couldn't set up irq\n", unit);
587 goto fail;
588 }
589
590 /* Reset the adapter. */
591 pcn_reset(sc);
592
593 /*
594 * Get station address from the EEPROM.
595 */
596 eaddr[0] = CSR_READ_4(sc, PCN_IO32_APROM00);
597 eaddr[1] = CSR_READ_4(sc, PCN_IO32_APROM01);
598 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
599
600 /*
601 * An AMD chip was detected. Inform the world.
602 */
603 printf("pcn%d: Ethernet address: %6D\n", unit,
604 sc->arpcom.ac_enaddr, ":");
605
606 sc->pcn_unit = unit;
607 callout_handle_init(&sc->pcn_stat_ch);
608
609 sc->pcn_ldata = contigmalloc(sizeof(struct pcn_list_data), M_DEVBUF,
610 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
611
612 if (sc->pcn_ldata == NULL) {
613 printf("pcn%d: no memory for list buffers!\n", unit);
614 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
615 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
616 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
617 error = ENXIO;
618 goto fail;
619 }
620 bzero(sc->pcn_ldata, sizeof(struct pcn_list_data));
621
622 ifp = &sc->arpcom.ac_if;
623 ifp->if_softc = sc;
624 ifp->if_unit = unit;
625 ifp->if_name = "pcn";
626 ifp->if_mtu = ETHERMTU;
627 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
628 ifp->if_ioctl = pcn_ioctl;
629 ifp->if_output = ether_output;
630 ifp->if_start = pcn_start;
631 ifp->if_watchdog = pcn_watchdog;
632 ifp->if_init = pcn_init;
633 ifp->if_baudrate = 10000000;
634 ifp->if_snd.ifq_maxlen = PCN_TX_LIST_CNT - 1;
635
636 /*
637 * Do MII setup.
638 */
639 if (mii_phy_probe(dev, &sc->pcn_miibus,
640 pcn_ifmedia_upd, pcn_ifmedia_sts)) {
641 printf("pcn%d: MII without any PHY!\n", sc->pcn_unit);
642 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
643 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
644 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
645 error = ENXIO;
646 goto fail;
647 }
648
649 /*
650 * Call MI attach routine.
651 */
652 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
653 callout_handle_init(&sc->pcn_stat_ch);
654 PCN_UNLOCK(sc);
655 return(0);
656
657fail:
658 PCN_UNLOCK(sc);
659 mtx_destroy(&sc->pcn_mtx);
660
661 return(error);
662}
663
664static int pcn_detach(dev)
665 device_t dev;
666{
667 struct pcn_softc *sc;
668 struct ifnet *ifp;
669
670 sc = device_get_softc(dev);
671 ifp = &sc->arpcom.ac_if;
672
673 PCN_LOCK(sc);
674
675 pcn_reset(sc);
676 pcn_stop(sc);
677 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
678
679 if (sc->pcn_miibus != NULL) {
680 bus_generic_detach(dev);
681 device_delete_child(dev, sc->pcn_miibus);
682 }
683
684 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
685 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
686 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
687
688 contigfree(sc->pcn_ldata, sizeof(struct pcn_list_data), M_DEVBUF);
689 PCN_UNLOCK(sc);
690
691 mtx_destroy(&sc->pcn_mtx);
692
693 return(0);
694}
695
696/*
697 * Initialize the transmit descriptors.
698 */
699static int pcn_list_tx_init(sc)
700 struct pcn_softc *sc;
701{
702 struct pcn_list_data *ld;
703 struct pcn_ring_data *cd;
704 int i;
705
706 cd = &sc->pcn_cdata;
707 ld = sc->pcn_ldata;
708
709 for (i = 0; i < PCN_TX_LIST_CNT; i++) {
710 cd->pcn_tx_chain[i] = NULL;
711 ld->pcn_tx_list[i].pcn_tbaddr = 0;
712 ld->pcn_tx_list[i].pcn_txctl = 0;
713 ld->pcn_tx_list[i].pcn_txstat = 0;
714 }
715
716 cd->pcn_tx_prod = cd->pcn_tx_cons = cd->pcn_tx_cnt = 0;
717
718 return(0);
719}
720
721
722/*
723 * Initialize the RX descriptors and allocate mbufs for them.
724 */
725static int pcn_list_rx_init(sc)
726 struct pcn_softc *sc;
727{
728 struct pcn_list_data *ld;
729 struct pcn_ring_data *cd;
730 int i;
731
732 ld = sc->pcn_ldata;
733 cd = &sc->pcn_cdata;
734
735 for (i = 0; i < PCN_RX_LIST_CNT; i++) {
736 if (pcn_newbuf(sc, i, NULL) == ENOBUFS)
737 return(ENOBUFS);
738 }
739
740 cd->pcn_rx_prod = 0;
741
742 return(0);
743}
744
745/*
746 * Initialize an RX descriptor and attach an MBUF cluster.
747 */
748static int pcn_newbuf(sc, idx, m)
749 struct pcn_softc *sc;
750 int idx;
751 struct mbuf *m;
752{
753 struct mbuf *m_new = NULL;
754 struct pcn_rx_desc *c;
755
756 c = &sc->pcn_ldata->pcn_rx_list[idx];
757
758 if (m == NULL) {
759 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
760 if (m_new == NULL) {
761 printf("pcn%d: no memory for rx list "
762 "-- packet dropped!\n", sc->pcn_unit);
763 return(ENOBUFS);
764 }
765
766 MCLGET(m_new, M_DONTWAIT);
767 if (!(m_new->m_flags & M_EXT)) {
768 printf("pcn%d: no memory for rx list "
769 "-- packet dropped!\n", sc->pcn_unit);
770 m_freem(m_new);
771 return(ENOBUFS);
772 }
773 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
774 } else {
775 m_new = m;
776 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
777 m_new->m_data = m_new->m_ext.ext_buf;
778 }
779
780 m_adj(m_new, ETHER_ALIGN);
781
782 sc->pcn_cdata.pcn_rx_chain[idx] = m_new;
783 c->pcn_rbaddr = vtophys(mtod(m_new, caddr_t));
784 c->pcn_bufsz = (~(PCN_RXLEN) + 1) & PCN_RXLEN_BUFSZ;
785 c->pcn_bufsz |= PCN_RXLEN_MBO;
786 c->pcn_rxstat = PCN_RXSTAT_STP|PCN_RXSTAT_ENP|PCN_RXSTAT_OWN;
787
788 return(0);
789}
790
791/*
792 * A frame has been uploaded: pass the resulting mbuf chain up to
793 * the higher level protocols.
794 */
795static void pcn_rxeof(sc)
796 struct pcn_softc *sc;
797{
798 struct ether_header *eh;
799 struct mbuf *m;
800 struct ifnet *ifp;
801 struct pcn_rx_desc *cur_rx;
802 int i;
803
804 ifp = &sc->arpcom.ac_if;
805 i = sc->pcn_cdata.pcn_rx_prod;
806
807 while(PCN_OWN_RXDESC(&sc->pcn_ldata->pcn_rx_list[i])) {
808 cur_rx = &sc->pcn_ldata->pcn_rx_list[i];
809 m = sc->pcn_cdata.pcn_rx_chain[i];
810 sc->pcn_cdata.pcn_rx_chain[i] = NULL;
811
812 /*
813 * If an error occurs, update stats, clear the
814 * status word and leave the mbuf cluster in place:
815 * it should simply get re-used next time this descriptor
816 * comes up in the ring.
817 */
818 if (cur_rx->pcn_rxstat & PCN_RXSTAT_ERR) {
819 ifp->if_ierrors++;
820 pcn_newbuf(sc, i, m);
821 PCN_INC(i, PCN_RX_LIST_CNT);
822 continue;
823 }
824
825 if (pcn_newbuf(sc, i, NULL)) {
826 /* Ran out of mbufs; recycle this one. */
827 pcn_newbuf(sc, i, m);
828 ifp->if_ierrors++;
829 PCN_INC(i, PCN_RX_LIST_CNT);
830 continue;
831 }
832
833 PCN_INC(i, PCN_RX_LIST_CNT);
834
835 /* No errors; receive the packet. */
836 ifp->if_ipackets++;
837 eh = mtod(m, struct ether_header *);
838 m->m_len = m->m_pkthdr.len =
839 cur_rx->pcn_rxlen - ETHER_CRC_LEN;
840 m->m_pkthdr.rcvif = ifp;
841
842 /* Remove header from mbuf and pass it on. */
843 m_adj(m, sizeof(struct ether_header));
844 ether_input(ifp, eh, m);
845 }
846
847 sc->pcn_cdata.pcn_rx_prod = i;
848
849 return;
850}
851
852/*
853 * A frame was downloaded to the chip. It's safe for us to clean up
854 * the list buffers.
855 */
856
857static void pcn_txeof(sc)
858 struct pcn_softc *sc;
859{
860 struct pcn_tx_desc *cur_tx = NULL;
861 struct ifnet *ifp;
862 u_int32_t idx;
863
864 ifp = &sc->arpcom.ac_if;
865
866 /* Clear the timeout timer. */
867 ifp->if_timer = 0;
868
869 /*
870 * Go through our tx list and free mbufs for those
871 * frames that have been transmitted.
872 */
873 idx = sc->pcn_cdata.pcn_tx_cons;
874 while (idx != sc->pcn_cdata.pcn_tx_prod) {
875 cur_tx = &sc->pcn_ldata->pcn_tx_list[idx];
876
877 if (!PCN_OWN_TXDESC(cur_tx))
878 break;
879
880 if (!(cur_tx->pcn_txctl & PCN_TXCTL_ENP)) {
881 sc->pcn_cdata.pcn_tx_cnt--;
882 PCN_INC(idx, PCN_TX_LIST_CNT);
883 continue;
884 }
885
886 if (cur_tx->pcn_txctl & PCN_TXCTL_ERR) {
887 ifp->if_oerrors++;
888 if (cur_tx->pcn_txstat & PCN_TXSTAT_EXDEF)
889 ifp->if_collisions++;
890 if (cur_tx->pcn_txstat & PCN_TXSTAT_RTRY)
891 ifp->if_collisions++;
892 }
893
894 ifp->if_collisions +=
895 cur_tx->pcn_txstat & PCN_TXSTAT_TRC;
896
897 ifp->if_opackets++;
898 if (sc->pcn_cdata.pcn_tx_chain[idx] != NULL) {
899 m_freem(sc->pcn_cdata.pcn_tx_chain[idx]);
900 sc->pcn_cdata.pcn_tx_chain[idx] = NULL;
901 }
902
903 sc->pcn_cdata.pcn_tx_cnt--;
904 PCN_INC(idx, PCN_TX_LIST_CNT);
905 ifp->if_timer = 0;
906 }
907
908 sc->pcn_cdata.pcn_tx_cons = idx;
909
910 if (cur_tx != NULL)
911 ifp->if_flags &= ~IFF_OACTIVE;
912
913 return;
914}
915
916static void pcn_tick(xsc)
917 void *xsc;
918{
919 struct pcn_softc *sc;
920 struct mii_data *mii;
921 struct ifnet *ifp;
922
923 sc = xsc;
924 ifp = &sc->arpcom.ac_if;
925 PCN_LOCK(sc);
926
927 mii = device_get_softc(sc->pcn_miibus);
928 mii_tick(mii);
929
930 if (sc->pcn_link & !(mii->mii_media_status & IFM_ACTIVE))
931 sc->pcn_link = 0;
932
933 if (!sc->pcn_link) {
934 mii_pollstat(mii);
935 if (mii->mii_media_status & IFM_ACTIVE &&
936 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
937 sc->pcn_link++;
938 if (ifp->if_snd.ifq_head != NULL)
939 pcn_start(ifp);
940 }
941
942 sc->pcn_stat_ch = timeout(pcn_tick, sc, hz);
943
944 PCN_UNLOCK(sc);
945
946 return;
947}
948
949static void pcn_intr(arg)
950 void *arg;
951{
952 struct pcn_softc *sc;
953 struct ifnet *ifp;
954 u_int32_t status;
955
956 sc = arg;
957 ifp = &sc->arpcom.ac_if;
958
959 /* Supress unwanted interrupts */
960 if (!(ifp->if_flags & IFF_UP)) {
961 pcn_stop(sc);
962 return;
963 }
964
965 CSR_WRITE_4(sc, PCN_IO32_RAP, PCN_CSR_CSR);
966
967 while ((status = CSR_READ_4(sc, PCN_IO32_RDP)) & PCN_CSR_INTR) {
968 CSR_WRITE_4(sc, PCN_IO32_RDP, status);
969
970 if (status & PCN_CSR_RINT)
971 pcn_rxeof(sc);
972
973 if (status & PCN_CSR_TINT)
974 pcn_txeof(sc);
975
976 if (status & PCN_CSR_ERR) {
977 pcn_init(sc);
978 break;
979 }
980 }
981
982 if (ifp->if_snd.ifq_head != NULL)
983 pcn_start(ifp);
984
985 return;
986}
987
988/*
989 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
990 * pointers to the fragment pointers.
991 */
992static int pcn_encap(sc, m_head, txidx)
993 struct pcn_softc *sc;
994 struct mbuf *m_head;
995 u_int32_t *txidx;
996{
997 struct pcn_tx_desc *f = NULL;
998 struct mbuf *m;
999 int frag, cur, cnt = 0;
1000
1001 /*
1002 * Start packing the mbufs in this chain into
1003 * the fragment pointers. Stop when we run out
1004 * of fragments or hit the end of the mbuf chain.
1005 */
1006 m = m_head;
1007 cur = frag = *txidx;
1008
1009 for (m = m_head; m != NULL; m = m->m_next) {
1010 if (m->m_len != 0) {
1011 if ((PCN_TX_LIST_CNT -
1012 (sc->pcn_cdata.pcn_tx_cnt + cnt)) < 2)
1013 return(ENOBUFS);
1014 f = &sc->pcn_ldata->pcn_tx_list[frag];
1015 f->pcn_txctl = (~(m->m_len) + 1) & PCN_TXCTL_BUFSZ;
1016 f->pcn_txctl |= PCN_TXCTL_MBO;
1017 f->pcn_tbaddr = vtophys(mtod(m, vm_offset_t));
1018 if (cnt == 0)
1019 f->pcn_txctl |= PCN_TXCTL_STP;
1020 else
1021 f->pcn_txctl |= PCN_TXCTL_OWN;
1022 cur = frag;
1023 PCN_INC(frag, PCN_TX_LIST_CNT);
1024 cnt++;
1025 }
1026 }
1027
1028 if (m != NULL)
1029 return(ENOBUFS);
1030
1031 sc->pcn_cdata.pcn_tx_chain[cur] = m_head;
1032 sc->pcn_ldata->pcn_tx_list[cur].pcn_txctl |=
1033 PCN_TXCTL_ENP|PCN_TXCTL_ADD_FCS|PCN_TXCTL_MORE_LTINT;
1034 sc->pcn_ldata->pcn_tx_list[*txidx].pcn_txctl |= PCN_TXCTL_OWN;
1035 sc->pcn_cdata.pcn_tx_cnt += cnt;
1036 *txidx = frag;
1037
1038 return(0);
1039}
1040
1041/*
1042 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1043 * to the mbuf data regions directly in the transmit lists. We also save a
1044 * copy of the pointers since the transmit list fragment pointers are
1045 * physical addresses.
1046 */
1047static void pcn_start(ifp)
1048 struct ifnet *ifp;
1049{
1050 struct pcn_softc *sc;
1051 struct mbuf *m_head = NULL;
1052 u_int32_t idx;
1053
1054 sc = ifp->if_softc;
1055
1056 PCN_LOCK(sc);
1057
1058 if (!sc->pcn_link) {
1059 PCN_UNLOCK(sc);
1060 return;
1061 }
1062
1063 idx = sc->pcn_cdata.pcn_tx_prod;
1064
1065 if (ifp->if_flags & IFF_OACTIVE) {
1066 PCN_UNLOCK(sc);
1067 return;
1068 }
1069
1070 while(sc->pcn_cdata.pcn_tx_chain[idx] == NULL) {
1071 IF_DEQUEUE(&ifp->if_snd, m_head);
1072 if (m_head == NULL)
1073 break;
1074
1075 if (pcn_encap(sc, m_head, &idx)) {
1076 IF_PREPEND(&ifp->if_snd, m_head);
1077 ifp->if_flags |= IFF_OACTIVE;
1078 break;
1079 }
1080
1081 /*
1082 * If there's a BPF listener, bounce a copy of this frame
1083 * to him.
1084 */
1085 if (ifp->if_bpf)
1086 bpf_mtap(ifp, m_head);
1087
1088 }
1089
1090 /* Transmit */
1091 sc->pcn_cdata.pcn_tx_prod = idx;
1092 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN);
1093
1094 /*
1095 * Set a timeout in case the chip goes out to lunch.
1096 */
1097 ifp->if_timer = 5;
1098
1099 PCN_UNLOCK(sc);
1100
1101 return;
1102}
1103
1104static void pcn_setfilt(ifp)
1105 struct ifnet *ifp;
1106{
1107 struct pcn_softc *sc;
1108
1109 sc = ifp->if_softc;
1110
1111 /* If we want promiscuous mode, set the allframes bit. */
1112 if (ifp->if_flags & IFF_PROMISC) {
1113 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC);
1114 } else {
1115 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC);
1116 }
1117
1118 /* Set the capture broadcast bit to capture broadcast frames. */
1119 if (ifp->if_flags & IFF_BROADCAST) {
1120 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD);
1121 } else {
1122 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD);
1123 }
1124
1125 return;
1126}
1127
1128static void pcn_init(xsc)
1129 void *xsc;
1130{
1131 struct pcn_softc *sc = xsc;
1132 struct ifnet *ifp = &sc->arpcom.ac_if;
1133 struct mii_data *mii = NULL;
1134
1135 PCN_LOCK(sc);
1136
1137 /*
1138 * Cancel pending I/O and free all RX/TX buffers.
1139 */
1140 pcn_stop(sc);
1141 pcn_reset(sc);
1142
1143 mii = device_get_softc(sc->pcn_miibus);
1144
1145 /* Set MAC address */
1146 pcn_csr_write(sc, PCN_CSR_PAR0,
1147 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1148 pcn_csr_write(sc, PCN_CSR_PAR1,
1149 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1150 pcn_csr_write(sc, PCN_CSR_PAR2,
1151 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1152
1153 /* Init circular RX list. */
1154 if (pcn_list_rx_init(sc) == ENOBUFS) {
1155 printf("pcn%d: initialization failed: no "
1156 "memory for rx buffers\n", sc->pcn_unit);
1157 pcn_stop(sc);
1158 PCN_UNLOCK(sc);
1159 return;
1160 }
1161
1162 /*
1163 * Init tx descriptors.
1164 */
1165 pcn_list_tx_init(sc);
1166
1167 /* Set up the mode register. */
1168 pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_MII);
1169
1170 /* Set up RX filter. */
1171 pcn_setfilt(ifp);
1172
1173 /*
1174 * Load the multicast filter.
1175 */
1176 pcn_setmulti(sc);
1177
1178 /*
1179 * Load the addresses of the RX and TX lists.
1180 */
1181 pcn_csr_write(sc, PCN_CSR_RXADDR0,
1182 vtophys(&sc->pcn_ldata->pcn_rx_list[0]) & 0xFFFF);
1183 pcn_csr_write(sc, PCN_CSR_RXADDR1,
1184 (vtophys(&sc->pcn_ldata->pcn_rx_list[0]) >> 16) & 0xFFFF);
1185 pcn_csr_write(sc, PCN_CSR_TXADDR0,
1186 vtophys(&sc->pcn_ldata->pcn_tx_list[0]) & 0xFFFF);
1187 pcn_csr_write(sc, PCN_CSR_TXADDR1,
1188 (vtophys(&sc->pcn_ldata->pcn_tx_list[0]) >> 16) & 0xFFFF);
1189
1190 /* Set the RX and TX ring sizes. */
1191 pcn_csr_write(sc, PCN_CSR_RXRINGLEN, (~PCN_RX_LIST_CNT) + 1);
1192 pcn_csr_write(sc, PCN_CSR_TXRINGLEN, (~PCN_TX_LIST_CNT) + 1);
1193
1194 /* We're not using the initialization block. */
1195 pcn_csr_write(sc, PCN_CSR_IAB1, 0);
1196
1197 /* Enable fast suspend mode. */
1198 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL2, PCN_EXTCTL2_FASTSPNDE);
1199
1200 /*
1201 * Enable burst read and write. Also set the no underflow
1202 * bit. This will avoid transmit underruns in certain
1203 * conditions while still providing decent performance.
1204 */
1205 PCN_BCR_SETBIT(sc, PCN_BCR_BUSCTL, PCN_BUSCTL_NOUFLOW|
1206 PCN_BUSCTL_BREAD|PCN_BUSCTL_BWRITE);
1207
1208 /* Enable graceful recovery from underflow. */
1209 PCN_CSR_SETBIT(sc, PCN_CSR_IMR, PCN_IMR_DXSUFLO);
1210
1211 /* Enable auto-padding of short TX frames. */
1212 PCN_CSR_SETBIT(sc, PCN_CSR_TFEAT, PCN_TFEAT_PAD_TX);
1213
1214 /* Disable MII autoneg (we handle this ourselves). */
1215 PCN_BCR_SETBIT(sc, PCN_BCR_MIICTL, PCN_MIICTL_DANAS);
1216
1217 if (sc->pcn_type == Am79C978)
1218 pcn_bcr_write(sc, PCN_BCR_PHYSEL,
1219 PCN_PHYSEL_PCNET|PCN_PHY_HOMEPNA);
1220
1221 /* Enable interrupts and start the controller running. */
1222 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START);
1223
1224 mii_mediachg(mii);
1225
1226 ifp->if_flags |= IFF_RUNNING;
1227 ifp->if_flags &= ~IFF_OACTIVE;
1228
1229 sc->pcn_stat_ch = timeout(pcn_tick, sc, hz);
1230 PCN_UNLOCK(sc);
1231
1232 return;
1233}
1234
1235/*
1236 * Set media options.
1237 */
1238static int pcn_ifmedia_upd(ifp)
1239 struct ifnet *ifp;
1240{
1241 struct pcn_softc *sc;
1242 struct mii_data *mii;
1243
1244 sc = ifp->if_softc;
1245 mii = device_get_softc(sc->pcn_miibus);
1246
1247 sc->pcn_link = 0;
1248 if (mii->mii_instance) {
1249 struct mii_softc *miisc;
1250 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1251 mii_phy_reset(miisc);
1252 }
1253 mii_mediachg(mii);
1254
1255 return(0);
1256}
1257
1258/*
1259 * Report current media status.
1260 */
1261static void pcn_ifmedia_sts(ifp, ifmr)
1262 struct ifnet *ifp;
1263 struct ifmediareq *ifmr;
1264{
1265 struct pcn_softc *sc;
1266 struct mii_data *mii;
1267
1268 sc = ifp->if_softc;
1269
1270 mii = device_get_softc(sc->pcn_miibus);
1271 mii_pollstat(mii);
1272 ifmr->ifm_active = mii->mii_media_active;
1273 ifmr->ifm_status = mii->mii_media_status;
1274
1275 return;
1276}
1277
1278static int pcn_ioctl(ifp, command, data)
1279 struct ifnet *ifp;
1280 u_long command;
1281 caddr_t data;
1282{
1283 struct pcn_softc *sc = ifp->if_softc;
1284 struct ifreq *ifr = (struct ifreq *) data;
1285 struct mii_data *mii = NULL;
1286 int error = 0;
1287
1288 PCN_LOCK(sc);
1289
1290 switch(command) {
1291 case SIOCSIFADDR:
1292 case SIOCGIFADDR:
1293 case SIOCSIFMTU:
1294 error = ether_ioctl(ifp, command, data);
1295 break;
1296 case SIOCSIFFLAGS:
1297 if (ifp->if_flags & IFF_UP) {
1298 if (ifp->if_flags & IFF_RUNNING &&
1299 ifp->if_flags & IFF_PROMISC &&
1300 !(sc->pcn_if_flags & IFF_PROMISC)) {
1301 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1,
1302 PCN_EXTCTL1_SPND);
1303 pcn_setfilt(ifp);
1304 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1,
1305 PCN_EXTCTL1_SPND);
1306 pcn_csr_write(sc, PCN_CSR_CSR,
1307 PCN_CSR_INTEN|PCN_CSR_START);
1308 } else if (ifp->if_flags & IFF_RUNNING &&
1309 !(ifp->if_flags & IFF_PROMISC) &&
1310 sc->pcn_if_flags & IFF_PROMISC) {
1311 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1,
1312 PCN_EXTCTL1_SPND);
1313 pcn_setfilt(ifp);
1314 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1,
1315 PCN_EXTCTL1_SPND);
1316 pcn_csr_write(sc, PCN_CSR_CSR,
1317 PCN_CSR_INTEN|PCN_CSR_START);
1318 } else if (!(ifp->if_flags & IFF_RUNNING))
1319 pcn_init(sc);
1320 } else {
1321 if (ifp->if_flags & IFF_RUNNING)
1322 pcn_stop(sc);
1323 }
1324 sc->pcn_if_flags = ifp->if_flags;
1325 error = 0;
1326 break;
1327 case SIOCADDMULTI:
1328 case SIOCDELMULTI:
1329 pcn_setmulti(sc);
1330 error = 0;
1331 break;
1332 case SIOCGIFMEDIA:
1333 case SIOCSIFMEDIA:
1334 mii = device_get_softc(sc->pcn_miibus);
1335 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1336 break;
1337 default:
1338 error = EINVAL;
1339 break;
1340 }
1341
1342 PCN_UNLOCK(sc);
1343
1344 return(error);
1345}
1346
1347static void pcn_watchdog(ifp)
1348 struct ifnet *ifp;
1349{
1350 struct pcn_softc *sc;
1351
1352 sc = ifp->if_softc;
1353
1354 PCN_LOCK(sc);
1355
1356 ifp->if_oerrors++;
1357 printf("pcn%d: watchdog timeout\n", sc->pcn_unit);
1358
1359 pcn_stop(sc);
1360 pcn_reset(sc);
1361 pcn_init(sc);
1362
1363 if (ifp->if_snd.ifq_head != NULL)
1364 pcn_start(ifp);
1365
1366 PCN_UNLOCK(sc);
1367
1368 return;
1369}
1370
1371/*
1372 * Stop the adapter and free any mbufs allocated to the
1373 * RX and TX lists.
1374 */
1375static void pcn_stop(sc)
1376 struct pcn_softc *sc;
1377{
1378 register int i;
1379 struct ifnet *ifp;
1380
1381 ifp = &sc->arpcom.ac_if;
1382 PCN_LOCK(sc);
1383 ifp->if_timer = 0;
1384
1385 untimeout(pcn_tick, sc, sc->pcn_stat_ch);
1386 PCN_CSR_SETBIT(sc, PCN_CSR_CSR, PCN_CSR_STOP);
1387 sc->pcn_link = 0;
1388
1389 /*
1390 * Free data in the RX lists.
1391 */
1392 for (i = 0; i < PCN_RX_LIST_CNT; i++) {
1393 if (sc->pcn_cdata.pcn_rx_chain[i] != NULL) {
1394 m_freem(sc->pcn_cdata.pcn_rx_chain[i]);
1395 sc->pcn_cdata.pcn_rx_chain[i] = NULL;
1396 }
1397 }
1398 bzero((char *)&sc->pcn_ldata->pcn_rx_list,
1399 sizeof(sc->pcn_ldata->pcn_rx_list));
1400
1401 /*
1402 * Free the TX list buffers.
1403 */
1404 for (i = 0; i < PCN_TX_LIST_CNT; i++) {
1405 if (sc->pcn_cdata.pcn_tx_chain[i] != NULL) {
1406 m_freem(sc->pcn_cdata.pcn_tx_chain[i]);
1407 sc->pcn_cdata.pcn_tx_chain[i] = NULL;
1408 }
1409 }
1410
1411 bzero((char *)&sc->pcn_ldata->pcn_tx_list,
1412 sizeof(sc->pcn_ldata->pcn_tx_list));
1413
1414 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1415 PCN_UNLOCK(sc);
1416
1417 return;
1418}
1419
1420/*
1421 * Stop all chip I/O so that the kernel's probe routines don't
1422 * get confused by errant DMAs when rebooting.
1423 */
1424static void pcn_shutdown(dev)
1425 device_t dev;
1426{
1427 struct pcn_softc *sc;
1428
1429 sc = device_get_softc(dev);
1430
1431 PCN_LOCK(sc);
1432 pcn_reset(sc);
1433 pcn_stop(sc);
1434 PCN_UNLOCK(sc);
1435
1436 return;
1437}
539 command = pci_read_config(dev, PCIR_COMMAND, 4);
540
541#ifdef PCN_USEIOSPACE
542 if (!(command & PCIM_CMD_PORTEN)) {
543 printf("pcn%d: failed to enable I/O ports!\n", unit);
544 error = ENXIO;;
545 goto fail;
546 }
547#else
548 if (!(command & PCIM_CMD_MEMEN)) {
549 printf("pcn%d: failed to enable memory mapping!\n", unit);
550 error = ENXIO;;
551 goto fail;
552 }
553#endif
554
555 rid = PCN_RID;
556 sc->pcn_res = bus_alloc_resource(dev, PCN_RES, &rid,
557 0, ~0, 1, RF_ACTIVE);
558
559 if (sc->pcn_res == NULL) {
560 printf("pcn%d: couldn't map ports/memory\n", unit);
561 error = ENXIO;
562 goto fail;
563 }
564
565 sc->pcn_btag = rman_get_bustag(sc->pcn_res);
566 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res);
567
568 /* Allocate interrupt */
569 rid = 0;
570 sc->pcn_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
571 RF_SHAREABLE | RF_ACTIVE);
572
573 if (sc->pcn_irq == NULL) {
574 printf("pcn%d: couldn't map interrupt\n", unit);
575 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
576 error = ENXIO;
577 goto fail;
578 }
579
580 error = bus_setup_intr(dev, sc->pcn_irq, INTR_TYPE_NET,
581 pcn_intr, sc, &sc->pcn_intrhand);
582
583 if (error) {
584 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_res);
585 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
586 printf("pcn%d: couldn't set up irq\n", unit);
587 goto fail;
588 }
589
590 /* Reset the adapter. */
591 pcn_reset(sc);
592
593 /*
594 * Get station address from the EEPROM.
595 */
596 eaddr[0] = CSR_READ_4(sc, PCN_IO32_APROM00);
597 eaddr[1] = CSR_READ_4(sc, PCN_IO32_APROM01);
598 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
599
600 /*
601 * An AMD chip was detected. Inform the world.
602 */
603 printf("pcn%d: Ethernet address: %6D\n", unit,
604 sc->arpcom.ac_enaddr, ":");
605
606 sc->pcn_unit = unit;
607 callout_handle_init(&sc->pcn_stat_ch);
608
609 sc->pcn_ldata = contigmalloc(sizeof(struct pcn_list_data), M_DEVBUF,
610 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
611
612 if (sc->pcn_ldata == NULL) {
613 printf("pcn%d: no memory for list buffers!\n", unit);
614 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
615 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
616 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
617 error = ENXIO;
618 goto fail;
619 }
620 bzero(sc->pcn_ldata, sizeof(struct pcn_list_data));
621
622 ifp = &sc->arpcom.ac_if;
623 ifp->if_softc = sc;
624 ifp->if_unit = unit;
625 ifp->if_name = "pcn";
626 ifp->if_mtu = ETHERMTU;
627 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
628 ifp->if_ioctl = pcn_ioctl;
629 ifp->if_output = ether_output;
630 ifp->if_start = pcn_start;
631 ifp->if_watchdog = pcn_watchdog;
632 ifp->if_init = pcn_init;
633 ifp->if_baudrate = 10000000;
634 ifp->if_snd.ifq_maxlen = PCN_TX_LIST_CNT - 1;
635
636 /*
637 * Do MII setup.
638 */
639 if (mii_phy_probe(dev, &sc->pcn_miibus,
640 pcn_ifmedia_upd, pcn_ifmedia_sts)) {
641 printf("pcn%d: MII without any PHY!\n", sc->pcn_unit);
642 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
643 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
644 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
645 error = ENXIO;
646 goto fail;
647 }
648
649 /*
650 * Call MI attach routine.
651 */
652 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
653 callout_handle_init(&sc->pcn_stat_ch);
654 PCN_UNLOCK(sc);
655 return(0);
656
657fail:
658 PCN_UNLOCK(sc);
659 mtx_destroy(&sc->pcn_mtx);
660
661 return(error);
662}
663
664static int pcn_detach(dev)
665 device_t dev;
666{
667 struct pcn_softc *sc;
668 struct ifnet *ifp;
669
670 sc = device_get_softc(dev);
671 ifp = &sc->arpcom.ac_if;
672
673 PCN_LOCK(sc);
674
675 pcn_reset(sc);
676 pcn_stop(sc);
677 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
678
679 if (sc->pcn_miibus != NULL) {
680 bus_generic_detach(dev);
681 device_delete_child(dev, sc->pcn_miibus);
682 }
683
684 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
685 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
686 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
687
688 contigfree(sc->pcn_ldata, sizeof(struct pcn_list_data), M_DEVBUF);
689 PCN_UNLOCK(sc);
690
691 mtx_destroy(&sc->pcn_mtx);
692
693 return(0);
694}
695
696/*
697 * Initialize the transmit descriptors.
698 */
699static int pcn_list_tx_init(sc)
700 struct pcn_softc *sc;
701{
702 struct pcn_list_data *ld;
703 struct pcn_ring_data *cd;
704 int i;
705
706 cd = &sc->pcn_cdata;
707 ld = sc->pcn_ldata;
708
709 for (i = 0; i < PCN_TX_LIST_CNT; i++) {
710 cd->pcn_tx_chain[i] = NULL;
711 ld->pcn_tx_list[i].pcn_tbaddr = 0;
712 ld->pcn_tx_list[i].pcn_txctl = 0;
713 ld->pcn_tx_list[i].pcn_txstat = 0;
714 }
715
716 cd->pcn_tx_prod = cd->pcn_tx_cons = cd->pcn_tx_cnt = 0;
717
718 return(0);
719}
720
721
722/*
723 * Initialize the RX descriptors and allocate mbufs for them.
724 */
725static int pcn_list_rx_init(sc)
726 struct pcn_softc *sc;
727{
728 struct pcn_list_data *ld;
729 struct pcn_ring_data *cd;
730 int i;
731
732 ld = sc->pcn_ldata;
733 cd = &sc->pcn_cdata;
734
735 for (i = 0; i < PCN_RX_LIST_CNT; i++) {
736 if (pcn_newbuf(sc, i, NULL) == ENOBUFS)
737 return(ENOBUFS);
738 }
739
740 cd->pcn_rx_prod = 0;
741
742 return(0);
743}
744
745/*
746 * Initialize an RX descriptor and attach an MBUF cluster.
747 */
748static int pcn_newbuf(sc, idx, m)
749 struct pcn_softc *sc;
750 int idx;
751 struct mbuf *m;
752{
753 struct mbuf *m_new = NULL;
754 struct pcn_rx_desc *c;
755
756 c = &sc->pcn_ldata->pcn_rx_list[idx];
757
758 if (m == NULL) {
759 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
760 if (m_new == NULL) {
761 printf("pcn%d: no memory for rx list "
762 "-- packet dropped!\n", sc->pcn_unit);
763 return(ENOBUFS);
764 }
765
766 MCLGET(m_new, M_DONTWAIT);
767 if (!(m_new->m_flags & M_EXT)) {
768 printf("pcn%d: no memory for rx list "
769 "-- packet dropped!\n", sc->pcn_unit);
770 m_freem(m_new);
771 return(ENOBUFS);
772 }
773 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
774 } else {
775 m_new = m;
776 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
777 m_new->m_data = m_new->m_ext.ext_buf;
778 }
779
780 m_adj(m_new, ETHER_ALIGN);
781
782 sc->pcn_cdata.pcn_rx_chain[idx] = m_new;
783 c->pcn_rbaddr = vtophys(mtod(m_new, caddr_t));
784 c->pcn_bufsz = (~(PCN_RXLEN) + 1) & PCN_RXLEN_BUFSZ;
785 c->pcn_bufsz |= PCN_RXLEN_MBO;
786 c->pcn_rxstat = PCN_RXSTAT_STP|PCN_RXSTAT_ENP|PCN_RXSTAT_OWN;
787
788 return(0);
789}
790
791/*
792 * A frame has been uploaded: pass the resulting mbuf chain up to
793 * the higher level protocols.
794 */
795static void pcn_rxeof(sc)
796 struct pcn_softc *sc;
797{
798 struct ether_header *eh;
799 struct mbuf *m;
800 struct ifnet *ifp;
801 struct pcn_rx_desc *cur_rx;
802 int i;
803
804 ifp = &sc->arpcom.ac_if;
805 i = sc->pcn_cdata.pcn_rx_prod;
806
807 while(PCN_OWN_RXDESC(&sc->pcn_ldata->pcn_rx_list[i])) {
808 cur_rx = &sc->pcn_ldata->pcn_rx_list[i];
809 m = sc->pcn_cdata.pcn_rx_chain[i];
810 sc->pcn_cdata.pcn_rx_chain[i] = NULL;
811
812 /*
813 * If an error occurs, update stats, clear the
814 * status word and leave the mbuf cluster in place:
815 * it should simply get re-used next time this descriptor
816 * comes up in the ring.
817 */
818 if (cur_rx->pcn_rxstat & PCN_RXSTAT_ERR) {
819 ifp->if_ierrors++;
820 pcn_newbuf(sc, i, m);
821 PCN_INC(i, PCN_RX_LIST_CNT);
822 continue;
823 }
824
825 if (pcn_newbuf(sc, i, NULL)) {
826 /* Ran out of mbufs; recycle this one. */
827 pcn_newbuf(sc, i, m);
828 ifp->if_ierrors++;
829 PCN_INC(i, PCN_RX_LIST_CNT);
830 continue;
831 }
832
833 PCN_INC(i, PCN_RX_LIST_CNT);
834
835 /* No errors; receive the packet. */
836 ifp->if_ipackets++;
837 eh = mtod(m, struct ether_header *);
838 m->m_len = m->m_pkthdr.len =
839 cur_rx->pcn_rxlen - ETHER_CRC_LEN;
840 m->m_pkthdr.rcvif = ifp;
841
842 /* Remove header from mbuf and pass it on. */
843 m_adj(m, sizeof(struct ether_header));
844 ether_input(ifp, eh, m);
845 }
846
847 sc->pcn_cdata.pcn_rx_prod = i;
848
849 return;
850}
851
852/*
853 * A frame was downloaded to the chip. It's safe for us to clean up
854 * the list buffers.
855 */
856
857static void pcn_txeof(sc)
858 struct pcn_softc *sc;
859{
860 struct pcn_tx_desc *cur_tx = NULL;
861 struct ifnet *ifp;
862 u_int32_t idx;
863
864 ifp = &sc->arpcom.ac_if;
865
866 /* Clear the timeout timer. */
867 ifp->if_timer = 0;
868
869 /*
870 * Go through our tx list and free mbufs for those
871 * frames that have been transmitted.
872 */
873 idx = sc->pcn_cdata.pcn_tx_cons;
874 while (idx != sc->pcn_cdata.pcn_tx_prod) {
875 cur_tx = &sc->pcn_ldata->pcn_tx_list[idx];
876
877 if (!PCN_OWN_TXDESC(cur_tx))
878 break;
879
880 if (!(cur_tx->pcn_txctl & PCN_TXCTL_ENP)) {
881 sc->pcn_cdata.pcn_tx_cnt--;
882 PCN_INC(idx, PCN_TX_LIST_CNT);
883 continue;
884 }
885
886 if (cur_tx->pcn_txctl & PCN_TXCTL_ERR) {
887 ifp->if_oerrors++;
888 if (cur_tx->pcn_txstat & PCN_TXSTAT_EXDEF)
889 ifp->if_collisions++;
890 if (cur_tx->pcn_txstat & PCN_TXSTAT_RTRY)
891 ifp->if_collisions++;
892 }
893
894 ifp->if_collisions +=
895 cur_tx->pcn_txstat & PCN_TXSTAT_TRC;
896
897 ifp->if_opackets++;
898 if (sc->pcn_cdata.pcn_tx_chain[idx] != NULL) {
899 m_freem(sc->pcn_cdata.pcn_tx_chain[idx]);
900 sc->pcn_cdata.pcn_tx_chain[idx] = NULL;
901 }
902
903 sc->pcn_cdata.pcn_tx_cnt--;
904 PCN_INC(idx, PCN_TX_LIST_CNT);
905 ifp->if_timer = 0;
906 }
907
908 sc->pcn_cdata.pcn_tx_cons = idx;
909
910 if (cur_tx != NULL)
911 ifp->if_flags &= ~IFF_OACTIVE;
912
913 return;
914}
915
916static void pcn_tick(xsc)
917 void *xsc;
918{
919 struct pcn_softc *sc;
920 struct mii_data *mii;
921 struct ifnet *ifp;
922
923 sc = xsc;
924 ifp = &sc->arpcom.ac_if;
925 PCN_LOCK(sc);
926
927 mii = device_get_softc(sc->pcn_miibus);
928 mii_tick(mii);
929
930 if (sc->pcn_link & !(mii->mii_media_status & IFM_ACTIVE))
931 sc->pcn_link = 0;
932
933 if (!sc->pcn_link) {
934 mii_pollstat(mii);
935 if (mii->mii_media_status & IFM_ACTIVE &&
936 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
937 sc->pcn_link++;
938 if (ifp->if_snd.ifq_head != NULL)
939 pcn_start(ifp);
940 }
941
942 sc->pcn_stat_ch = timeout(pcn_tick, sc, hz);
943
944 PCN_UNLOCK(sc);
945
946 return;
947}
948
949static void pcn_intr(arg)
950 void *arg;
951{
952 struct pcn_softc *sc;
953 struct ifnet *ifp;
954 u_int32_t status;
955
956 sc = arg;
957 ifp = &sc->arpcom.ac_if;
958
959 /* Supress unwanted interrupts */
960 if (!(ifp->if_flags & IFF_UP)) {
961 pcn_stop(sc);
962 return;
963 }
964
965 CSR_WRITE_4(sc, PCN_IO32_RAP, PCN_CSR_CSR);
966
967 while ((status = CSR_READ_4(sc, PCN_IO32_RDP)) & PCN_CSR_INTR) {
968 CSR_WRITE_4(sc, PCN_IO32_RDP, status);
969
970 if (status & PCN_CSR_RINT)
971 pcn_rxeof(sc);
972
973 if (status & PCN_CSR_TINT)
974 pcn_txeof(sc);
975
976 if (status & PCN_CSR_ERR) {
977 pcn_init(sc);
978 break;
979 }
980 }
981
982 if (ifp->if_snd.ifq_head != NULL)
983 pcn_start(ifp);
984
985 return;
986}
987
988/*
989 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
990 * pointers to the fragment pointers.
991 */
992static int pcn_encap(sc, m_head, txidx)
993 struct pcn_softc *sc;
994 struct mbuf *m_head;
995 u_int32_t *txidx;
996{
997 struct pcn_tx_desc *f = NULL;
998 struct mbuf *m;
999 int frag, cur, cnt = 0;
1000
1001 /*
1002 * Start packing the mbufs in this chain into
1003 * the fragment pointers. Stop when we run out
1004 * of fragments or hit the end of the mbuf chain.
1005 */
1006 m = m_head;
1007 cur = frag = *txidx;
1008
1009 for (m = m_head; m != NULL; m = m->m_next) {
1010 if (m->m_len != 0) {
1011 if ((PCN_TX_LIST_CNT -
1012 (sc->pcn_cdata.pcn_tx_cnt + cnt)) < 2)
1013 return(ENOBUFS);
1014 f = &sc->pcn_ldata->pcn_tx_list[frag];
1015 f->pcn_txctl = (~(m->m_len) + 1) & PCN_TXCTL_BUFSZ;
1016 f->pcn_txctl |= PCN_TXCTL_MBO;
1017 f->pcn_tbaddr = vtophys(mtod(m, vm_offset_t));
1018 if (cnt == 0)
1019 f->pcn_txctl |= PCN_TXCTL_STP;
1020 else
1021 f->pcn_txctl |= PCN_TXCTL_OWN;
1022 cur = frag;
1023 PCN_INC(frag, PCN_TX_LIST_CNT);
1024 cnt++;
1025 }
1026 }
1027
1028 if (m != NULL)
1029 return(ENOBUFS);
1030
1031 sc->pcn_cdata.pcn_tx_chain[cur] = m_head;
1032 sc->pcn_ldata->pcn_tx_list[cur].pcn_txctl |=
1033 PCN_TXCTL_ENP|PCN_TXCTL_ADD_FCS|PCN_TXCTL_MORE_LTINT;
1034 sc->pcn_ldata->pcn_tx_list[*txidx].pcn_txctl |= PCN_TXCTL_OWN;
1035 sc->pcn_cdata.pcn_tx_cnt += cnt;
1036 *txidx = frag;
1037
1038 return(0);
1039}
1040
1041/*
1042 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1043 * to the mbuf data regions directly in the transmit lists. We also save a
1044 * copy of the pointers since the transmit list fragment pointers are
1045 * physical addresses.
1046 */
1047static void pcn_start(ifp)
1048 struct ifnet *ifp;
1049{
1050 struct pcn_softc *sc;
1051 struct mbuf *m_head = NULL;
1052 u_int32_t idx;
1053
1054 sc = ifp->if_softc;
1055
1056 PCN_LOCK(sc);
1057
1058 if (!sc->pcn_link) {
1059 PCN_UNLOCK(sc);
1060 return;
1061 }
1062
1063 idx = sc->pcn_cdata.pcn_tx_prod;
1064
1065 if (ifp->if_flags & IFF_OACTIVE) {
1066 PCN_UNLOCK(sc);
1067 return;
1068 }
1069
1070 while(sc->pcn_cdata.pcn_tx_chain[idx] == NULL) {
1071 IF_DEQUEUE(&ifp->if_snd, m_head);
1072 if (m_head == NULL)
1073 break;
1074
1075 if (pcn_encap(sc, m_head, &idx)) {
1076 IF_PREPEND(&ifp->if_snd, m_head);
1077 ifp->if_flags |= IFF_OACTIVE;
1078 break;
1079 }
1080
1081 /*
1082 * If there's a BPF listener, bounce a copy of this frame
1083 * to him.
1084 */
1085 if (ifp->if_bpf)
1086 bpf_mtap(ifp, m_head);
1087
1088 }
1089
1090 /* Transmit */
1091 sc->pcn_cdata.pcn_tx_prod = idx;
1092 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN);
1093
1094 /*
1095 * Set a timeout in case the chip goes out to lunch.
1096 */
1097 ifp->if_timer = 5;
1098
1099 PCN_UNLOCK(sc);
1100
1101 return;
1102}
1103
1104static void pcn_setfilt(ifp)
1105 struct ifnet *ifp;
1106{
1107 struct pcn_softc *sc;
1108
1109 sc = ifp->if_softc;
1110
1111 /* If we want promiscuous mode, set the allframes bit. */
1112 if (ifp->if_flags & IFF_PROMISC) {
1113 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC);
1114 } else {
1115 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC);
1116 }
1117
1118 /* Set the capture broadcast bit to capture broadcast frames. */
1119 if (ifp->if_flags & IFF_BROADCAST) {
1120 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD);
1121 } else {
1122 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD);
1123 }
1124
1125 return;
1126}
1127
1128static void pcn_init(xsc)
1129 void *xsc;
1130{
1131 struct pcn_softc *sc = xsc;
1132 struct ifnet *ifp = &sc->arpcom.ac_if;
1133 struct mii_data *mii = NULL;
1134
1135 PCN_LOCK(sc);
1136
1137 /*
1138 * Cancel pending I/O and free all RX/TX buffers.
1139 */
1140 pcn_stop(sc);
1141 pcn_reset(sc);
1142
1143 mii = device_get_softc(sc->pcn_miibus);
1144
1145 /* Set MAC address */
1146 pcn_csr_write(sc, PCN_CSR_PAR0,
1147 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1148 pcn_csr_write(sc, PCN_CSR_PAR1,
1149 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1150 pcn_csr_write(sc, PCN_CSR_PAR2,
1151 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1152
1153 /* Init circular RX list. */
1154 if (pcn_list_rx_init(sc) == ENOBUFS) {
1155 printf("pcn%d: initialization failed: no "
1156 "memory for rx buffers\n", sc->pcn_unit);
1157 pcn_stop(sc);
1158 PCN_UNLOCK(sc);
1159 return;
1160 }
1161
1162 /*
1163 * Init tx descriptors.
1164 */
1165 pcn_list_tx_init(sc);
1166
1167 /* Set up the mode register. */
1168 pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_MII);
1169
1170 /* Set up RX filter. */
1171 pcn_setfilt(ifp);
1172
1173 /*
1174 * Load the multicast filter.
1175 */
1176 pcn_setmulti(sc);
1177
1178 /*
1179 * Load the addresses of the RX and TX lists.
1180 */
1181 pcn_csr_write(sc, PCN_CSR_RXADDR0,
1182 vtophys(&sc->pcn_ldata->pcn_rx_list[0]) & 0xFFFF);
1183 pcn_csr_write(sc, PCN_CSR_RXADDR1,
1184 (vtophys(&sc->pcn_ldata->pcn_rx_list[0]) >> 16) & 0xFFFF);
1185 pcn_csr_write(sc, PCN_CSR_TXADDR0,
1186 vtophys(&sc->pcn_ldata->pcn_tx_list[0]) & 0xFFFF);
1187 pcn_csr_write(sc, PCN_CSR_TXADDR1,
1188 (vtophys(&sc->pcn_ldata->pcn_tx_list[0]) >> 16) & 0xFFFF);
1189
1190 /* Set the RX and TX ring sizes. */
1191 pcn_csr_write(sc, PCN_CSR_RXRINGLEN, (~PCN_RX_LIST_CNT) + 1);
1192 pcn_csr_write(sc, PCN_CSR_TXRINGLEN, (~PCN_TX_LIST_CNT) + 1);
1193
1194 /* We're not using the initialization block. */
1195 pcn_csr_write(sc, PCN_CSR_IAB1, 0);
1196
1197 /* Enable fast suspend mode. */
1198 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL2, PCN_EXTCTL2_FASTSPNDE);
1199
1200 /*
1201 * Enable burst read and write. Also set the no underflow
1202 * bit. This will avoid transmit underruns in certain
1203 * conditions while still providing decent performance.
1204 */
1205 PCN_BCR_SETBIT(sc, PCN_BCR_BUSCTL, PCN_BUSCTL_NOUFLOW|
1206 PCN_BUSCTL_BREAD|PCN_BUSCTL_BWRITE);
1207
1208 /* Enable graceful recovery from underflow. */
1209 PCN_CSR_SETBIT(sc, PCN_CSR_IMR, PCN_IMR_DXSUFLO);
1210
1211 /* Enable auto-padding of short TX frames. */
1212 PCN_CSR_SETBIT(sc, PCN_CSR_TFEAT, PCN_TFEAT_PAD_TX);
1213
1214 /* Disable MII autoneg (we handle this ourselves). */
1215 PCN_BCR_SETBIT(sc, PCN_BCR_MIICTL, PCN_MIICTL_DANAS);
1216
1217 if (sc->pcn_type == Am79C978)
1218 pcn_bcr_write(sc, PCN_BCR_PHYSEL,
1219 PCN_PHYSEL_PCNET|PCN_PHY_HOMEPNA);
1220
1221 /* Enable interrupts and start the controller running. */
1222 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START);
1223
1224 mii_mediachg(mii);
1225
1226 ifp->if_flags |= IFF_RUNNING;
1227 ifp->if_flags &= ~IFF_OACTIVE;
1228
1229 sc->pcn_stat_ch = timeout(pcn_tick, sc, hz);
1230 PCN_UNLOCK(sc);
1231
1232 return;
1233}
1234
1235/*
1236 * Set media options.
1237 */
1238static int pcn_ifmedia_upd(ifp)
1239 struct ifnet *ifp;
1240{
1241 struct pcn_softc *sc;
1242 struct mii_data *mii;
1243
1244 sc = ifp->if_softc;
1245 mii = device_get_softc(sc->pcn_miibus);
1246
1247 sc->pcn_link = 0;
1248 if (mii->mii_instance) {
1249 struct mii_softc *miisc;
1250 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1251 mii_phy_reset(miisc);
1252 }
1253 mii_mediachg(mii);
1254
1255 return(0);
1256}
1257
1258/*
1259 * Report current media status.
1260 */
1261static void pcn_ifmedia_sts(ifp, ifmr)
1262 struct ifnet *ifp;
1263 struct ifmediareq *ifmr;
1264{
1265 struct pcn_softc *sc;
1266 struct mii_data *mii;
1267
1268 sc = ifp->if_softc;
1269
1270 mii = device_get_softc(sc->pcn_miibus);
1271 mii_pollstat(mii);
1272 ifmr->ifm_active = mii->mii_media_active;
1273 ifmr->ifm_status = mii->mii_media_status;
1274
1275 return;
1276}
1277
1278static int pcn_ioctl(ifp, command, data)
1279 struct ifnet *ifp;
1280 u_long command;
1281 caddr_t data;
1282{
1283 struct pcn_softc *sc = ifp->if_softc;
1284 struct ifreq *ifr = (struct ifreq *) data;
1285 struct mii_data *mii = NULL;
1286 int error = 0;
1287
1288 PCN_LOCK(sc);
1289
1290 switch(command) {
1291 case SIOCSIFADDR:
1292 case SIOCGIFADDR:
1293 case SIOCSIFMTU:
1294 error = ether_ioctl(ifp, command, data);
1295 break;
1296 case SIOCSIFFLAGS:
1297 if (ifp->if_flags & IFF_UP) {
1298 if (ifp->if_flags & IFF_RUNNING &&
1299 ifp->if_flags & IFF_PROMISC &&
1300 !(sc->pcn_if_flags & IFF_PROMISC)) {
1301 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1,
1302 PCN_EXTCTL1_SPND);
1303 pcn_setfilt(ifp);
1304 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1,
1305 PCN_EXTCTL1_SPND);
1306 pcn_csr_write(sc, PCN_CSR_CSR,
1307 PCN_CSR_INTEN|PCN_CSR_START);
1308 } else if (ifp->if_flags & IFF_RUNNING &&
1309 !(ifp->if_flags & IFF_PROMISC) &&
1310 sc->pcn_if_flags & IFF_PROMISC) {
1311 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1,
1312 PCN_EXTCTL1_SPND);
1313 pcn_setfilt(ifp);
1314 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1,
1315 PCN_EXTCTL1_SPND);
1316 pcn_csr_write(sc, PCN_CSR_CSR,
1317 PCN_CSR_INTEN|PCN_CSR_START);
1318 } else if (!(ifp->if_flags & IFF_RUNNING))
1319 pcn_init(sc);
1320 } else {
1321 if (ifp->if_flags & IFF_RUNNING)
1322 pcn_stop(sc);
1323 }
1324 sc->pcn_if_flags = ifp->if_flags;
1325 error = 0;
1326 break;
1327 case SIOCADDMULTI:
1328 case SIOCDELMULTI:
1329 pcn_setmulti(sc);
1330 error = 0;
1331 break;
1332 case SIOCGIFMEDIA:
1333 case SIOCSIFMEDIA:
1334 mii = device_get_softc(sc->pcn_miibus);
1335 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1336 break;
1337 default:
1338 error = EINVAL;
1339 break;
1340 }
1341
1342 PCN_UNLOCK(sc);
1343
1344 return(error);
1345}
1346
1347static void pcn_watchdog(ifp)
1348 struct ifnet *ifp;
1349{
1350 struct pcn_softc *sc;
1351
1352 sc = ifp->if_softc;
1353
1354 PCN_LOCK(sc);
1355
1356 ifp->if_oerrors++;
1357 printf("pcn%d: watchdog timeout\n", sc->pcn_unit);
1358
1359 pcn_stop(sc);
1360 pcn_reset(sc);
1361 pcn_init(sc);
1362
1363 if (ifp->if_snd.ifq_head != NULL)
1364 pcn_start(ifp);
1365
1366 PCN_UNLOCK(sc);
1367
1368 return;
1369}
1370
1371/*
1372 * Stop the adapter and free any mbufs allocated to the
1373 * RX and TX lists.
1374 */
1375static void pcn_stop(sc)
1376 struct pcn_softc *sc;
1377{
1378 register int i;
1379 struct ifnet *ifp;
1380
1381 ifp = &sc->arpcom.ac_if;
1382 PCN_LOCK(sc);
1383 ifp->if_timer = 0;
1384
1385 untimeout(pcn_tick, sc, sc->pcn_stat_ch);
1386 PCN_CSR_SETBIT(sc, PCN_CSR_CSR, PCN_CSR_STOP);
1387 sc->pcn_link = 0;
1388
1389 /*
1390 * Free data in the RX lists.
1391 */
1392 for (i = 0; i < PCN_RX_LIST_CNT; i++) {
1393 if (sc->pcn_cdata.pcn_rx_chain[i] != NULL) {
1394 m_freem(sc->pcn_cdata.pcn_rx_chain[i]);
1395 sc->pcn_cdata.pcn_rx_chain[i] = NULL;
1396 }
1397 }
1398 bzero((char *)&sc->pcn_ldata->pcn_rx_list,
1399 sizeof(sc->pcn_ldata->pcn_rx_list));
1400
1401 /*
1402 * Free the TX list buffers.
1403 */
1404 for (i = 0; i < PCN_TX_LIST_CNT; i++) {
1405 if (sc->pcn_cdata.pcn_tx_chain[i] != NULL) {
1406 m_freem(sc->pcn_cdata.pcn_tx_chain[i]);
1407 sc->pcn_cdata.pcn_tx_chain[i] = NULL;
1408 }
1409 }
1410
1411 bzero((char *)&sc->pcn_ldata->pcn_tx_list,
1412 sizeof(sc->pcn_ldata->pcn_tx_list));
1413
1414 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1415 PCN_UNLOCK(sc);
1416
1417 return;
1418}
1419
1420/*
1421 * Stop all chip I/O so that the kernel's probe routines don't
1422 * get confused by errant DMAs when rebooting.
1423 */
1424static void pcn_shutdown(dev)
1425 device_t dev;
1426{
1427 struct pcn_softc *sc;
1428
1429 sc = device_get_softc(dev);
1430
1431 PCN_LOCK(sc);
1432 pcn_reset(sc);
1433 pcn_stop(sc);
1434 PCN_UNLOCK(sc);
1435
1436 return;
1437}