Deleted Added
full compact
if_pcn.c (69067) if_pcn.c (69583)
1/*
2 * Copyright (c) 2000 Berkeley Software Design, Inc.
3 * Copyright (c) 1997, 1998, 1999, 2000
4 * Bill Paul <wpaul@osd.bsdi.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
1/*
2 * Copyright (c) 2000 Berkeley Software Design, Inc.
3 * Copyright (c) 1997, 1998, 1999, 2000
4 * Bill Paul <wpaul@osd.bsdi.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD: head/sys/pci/if_pcn.c 69067 2000-11-23 00:28:43Z wpaul $
33 * $FreeBSD: head/sys/pci/if_pcn.c 69583 2000-12-04 22:46:50Z wpaul $
34 */
35
36/*
37 * AMD Am79c972 fast ethernet PCI NIC driver. Datatheets are available
38 * from http://www.amd.com.
39 *
40 * Written by Bill Paul <wpaul@osd.bsdi.com>
41 */
42
43/*
44 * The AMD PCnet/PCI controllers are more advanced and functional
45 * versions of the venerable 7990 LANCE. The PCnet/PCI chips retain
46 * backwards compatibility with the LANCE and thus can be made
47 * to work with older LANCE drivers. This is in fact how the
48 * PCnet/PCI chips were supported in FreeBSD originally. The trouble
49 * is that the PCnet/PCI devices offer several performance enhancements
50 * which can't be exploited in LANCE compatibility mode. Chief among
51 * these enhancements is the ability to perform PCI DMA operations
52 * using 32-bit addressing (which eliminates the need for ISA
53 * bounce-buffering), and special receive buffer alignment (which
54 * allows the receive handler to pass packets to the upper protocol
55 * layers without copying on both the x86 and alpha platforms).
56 */
57
58#include <sys/param.h>
59#include <sys/systm.h>
60#include <sys/sockio.h>
61#include <sys/mbuf.h>
62#include <sys/malloc.h>
63#include <sys/kernel.h>
64#include <sys/socket.h>
65
66#include <net/if.h>
67#include <net/if_arp.h>
68#include <net/ethernet.h>
69#include <net/if_dl.h>
70#include <net/if_media.h>
71
72#include <net/bpf.h>
73
74#include <vm/vm.h> /* for vtophys */
75#include <vm/pmap.h> /* for vtophys */
76#include <machine/bus_pio.h>
77#include <machine/bus_memio.h>
78#include <machine/bus.h>
79#include <machine/resource.h>
80#include <sys/bus.h>
81#include <sys/rman.h>
82
83#include <dev/mii/mii.h>
84#include <dev/mii/miivar.h>
85
86#include <pci/pcireg.h>
87#include <pci/pcivar.h>
88
89#define PCN_USEIOSPACE
90
91#include <pci/if_pcnreg.h>
92
93MODULE_DEPEND(pcn, miibus, 1, 1, 1);
94
95/* "controller miibus0" required. See GENERIC if you get errors here. */
96#include "miibus_if.h"
97
98#ifndef lint
99static const char rcsid[] =
34 */
35
36/*
37 * AMD Am79c972 fast ethernet PCI NIC driver. Datatheets are available
38 * from http://www.amd.com.
39 *
40 * Written by Bill Paul <wpaul@osd.bsdi.com>
41 */
42
43/*
44 * The AMD PCnet/PCI controllers are more advanced and functional
45 * versions of the venerable 7990 LANCE. The PCnet/PCI chips retain
46 * backwards compatibility with the LANCE and thus can be made
47 * to work with older LANCE drivers. This is in fact how the
48 * PCnet/PCI chips were supported in FreeBSD originally. The trouble
49 * is that the PCnet/PCI devices offer several performance enhancements
50 * which can't be exploited in LANCE compatibility mode. Chief among
51 * these enhancements is the ability to perform PCI DMA operations
52 * using 32-bit addressing (which eliminates the need for ISA
53 * bounce-buffering), and special receive buffer alignment (which
54 * allows the receive handler to pass packets to the upper protocol
55 * layers without copying on both the x86 and alpha platforms).
56 */
57
58#include <sys/param.h>
59#include <sys/systm.h>
60#include <sys/sockio.h>
61#include <sys/mbuf.h>
62#include <sys/malloc.h>
63#include <sys/kernel.h>
64#include <sys/socket.h>
65
66#include <net/if.h>
67#include <net/if_arp.h>
68#include <net/ethernet.h>
69#include <net/if_dl.h>
70#include <net/if_media.h>
71
72#include <net/bpf.h>
73
74#include <vm/vm.h> /* for vtophys */
75#include <vm/pmap.h> /* for vtophys */
76#include <machine/bus_pio.h>
77#include <machine/bus_memio.h>
78#include <machine/bus.h>
79#include <machine/resource.h>
80#include <sys/bus.h>
81#include <sys/rman.h>
82
83#include <dev/mii/mii.h>
84#include <dev/mii/miivar.h>
85
86#include <pci/pcireg.h>
87#include <pci/pcivar.h>
88
89#define PCN_USEIOSPACE
90
91#include <pci/if_pcnreg.h>
92
93MODULE_DEPEND(pcn, miibus, 1, 1, 1);
94
95/* "controller miibus0" required. See GENERIC if you get errors here. */
96#include "miibus_if.h"
97
98#ifndef lint
99static const char rcsid[] =
100 "$FreeBSD: head/sys/pci/if_pcn.c 69067 2000-11-23 00:28:43Z wpaul $";
100 "$FreeBSD: head/sys/pci/if_pcn.c 69583 2000-12-04 22:46:50Z wpaul $";
101#endif
102
103/*
104 * Various supported device vendors/types and their names.
105 */
106static struct pcn_type pcn_devs[] = {
107 { PCN_VENDORID, PCN_DEVICEID_PCNET, "AMD PCnet/PCI 10/100BaseTX" },
108 { PCN_VENDORID, PCN_DEVICEID_HOME, "AMD PCnet/Home HomePNA" },
109 { 0, 0, NULL }
110};
111
112static u_int32_t pcn_csr_read __P((struct pcn_softc *, int));
113static u_int16_t pcn_csr_read16 __P((struct pcn_softc *, int));
114static u_int16_t pcn_bcr_read16 __P((struct pcn_softc *, int));
115static void pcn_csr_write __P((struct pcn_softc *, int, int));
116static u_int32_t pcn_bcr_read __P((struct pcn_softc *, int));
117static void pcn_bcr_write __P((struct pcn_softc *, int, int));
118
119static int pcn_probe __P((device_t));
120static int pcn_attach __P((device_t));
121static int pcn_detach __P((device_t));
122
123static int pcn_newbuf __P((struct pcn_softc *, int, struct mbuf *));
124static int pcn_encap __P((struct pcn_softc *,
125 struct mbuf *, u_int32_t *));
126static void pcn_rxeof __P((struct pcn_softc *));
127static void pcn_txeof __P((struct pcn_softc *));
128static void pcn_intr __P((void *));
129static void pcn_tick __P((void *));
130static void pcn_start __P((struct ifnet *));
131static int pcn_ioctl __P((struct ifnet *, u_long, caddr_t));
132static void pcn_init __P((void *));
133static void pcn_stop __P((struct pcn_softc *));
134static void pcn_watchdog __P((struct ifnet *));
135static void pcn_shutdown __P((device_t));
136static int pcn_ifmedia_upd __P((struct ifnet *));
137static void pcn_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
138
139static int pcn_miibus_readreg __P((device_t, int, int));
140static int pcn_miibus_writereg __P((device_t, int, int, int));
141static void pcn_miibus_statchg __P((device_t));
142
143static void pcn_setfilt __P((struct ifnet *));
144static void pcn_setmulti __P((struct pcn_softc *));
145static u_int32_t pcn_crc __P((caddr_t));
146static void pcn_reset __P((struct pcn_softc *));
147static int pcn_list_rx_init __P((struct pcn_softc *));
148static int pcn_list_tx_init __P((struct pcn_softc *));
149
150#ifdef PCN_USEIOSPACE
151#define PCN_RES SYS_RES_IOPORT
152#define PCN_RID PCN_PCI_LOIO
153#else
154#define PCN_RES SYS_RES_MEMORY
155#define PCN_RID PCN_PCI_LOMEM
156#endif
157
158static device_method_t pcn_methods[] = {
159 /* Device interface */
160 DEVMETHOD(device_probe, pcn_probe),
161 DEVMETHOD(device_attach, pcn_attach),
162 DEVMETHOD(device_detach, pcn_detach),
163 DEVMETHOD(device_shutdown, pcn_shutdown),
164
165 /* bus interface */
166 DEVMETHOD(bus_print_child, bus_generic_print_child),
167 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
168
169 /* MII interface */
170 DEVMETHOD(miibus_readreg, pcn_miibus_readreg),
171 DEVMETHOD(miibus_writereg, pcn_miibus_writereg),
172 DEVMETHOD(miibus_statchg, pcn_miibus_statchg),
173
174 { 0, 0 }
175};
176
177static driver_t pcn_driver = {
178 "pcn",
179 pcn_methods,
180 sizeof(struct pcn_softc)
181};
182
183static devclass_t pcn_devclass;
184
185DRIVER_MODULE(if_pcn, pci, pcn_driver, pcn_devclass, 0, 0);
186DRIVER_MODULE(miibus, pcn, miibus_driver, miibus_devclass, 0, 0);
187
188#define PCN_CSR_SETBIT(sc, reg, x) \
189 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) | (x))
190
191#define PCN_CSR_CLRBIT(sc, reg, x) \
192 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) & ~(x))
193
194#define PCN_BCR_SETBIT(sc, reg, x) \
195 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) | (x))
196
197#define PCN_BCR_CLRBIT(sc, reg, x) \
198 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) & ~(x))
199
200static u_int32_t pcn_csr_read(sc, reg)
201 struct pcn_softc *sc;
202 int reg;
203{
204 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
205 return(CSR_READ_4(sc, PCN_IO32_RDP));
206}
207
208static u_int16_t pcn_csr_read16(sc, reg)
209 struct pcn_softc *sc;
210 int reg;
211{
212 CSR_WRITE_2(sc, PCN_IO16_RAP, reg);
213 return(CSR_READ_2(sc, PCN_IO16_RDP));
214}
215
216static void pcn_csr_write(sc, reg, val)
217 struct pcn_softc *sc;
218 int reg;
219{
220 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
221 CSR_WRITE_4(sc, PCN_IO32_RDP, val);
222 return;
223}
224
225static u_int32_t pcn_bcr_read(sc, reg)
226 struct pcn_softc *sc;
227 int reg;
228{
229 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
230 return(CSR_READ_4(sc, PCN_IO32_BDP));
231}
232
233static u_int16_t pcn_bcr_read16(sc, reg)
234 struct pcn_softc *sc;
235 int reg;
236{
237 CSR_WRITE_2(sc, PCN_IO16_RAP, reg);
238 return(CSR_READ_2(sc, PCN_IO16_BDP));
239}
240
241static void pcn_bcr_write(sc, reg, val)
242 struct pcn_softc *sc;
243 int reg;
244{
245 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
246 CSR_WRITE_4(sc, PCN_IO32_BDP, val);
247 return;
248}
249
250static int pcn_miibus_readreg(dev, phy, reg)
251 device_t dev;
252 int phy, reg;
253{
254 struct pcn_softc *sc;
255 int val;
256
257 sc = device_get_softc(dev);
258
259 if (sc->pcn_phyaddr && phy > sc->pcn_phyaddr)
260 return(0);
261
262 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5));
263 val = pcn_bcr_read(sc, PCN_BCR_MIIDATA) & 0xFFFF;
264 if (val == 0xFFFF)
265 return(0);
266
267 sc->pcn_phyaddr = phy;
268
269 return(val);
270}
271
272static int pcn_miibus_writereg(dev, phy, reg, data)
273 device_t dev;
274 int phy, reg, data;
275{
276 struct pcn_softc *sc;
277
278 sc = device_get_softc(dev);
279
280 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5));
281 pcn_bcr_write(sc, PCN_BCR_MIIDATA, data);
282
283 return(0);
284}
285
286static void pcn_miibus_statchg(dev)
287 device_t dev;
288{
289 struct pcn_softc *sc;
290 struct mii_data *mii;
291
292 sc = device_get_softc(dev);
293 mii = device_get_softc(sc->pcn_miibus);
294
295 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
296 PCN_BCR_SETBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN);
297 } else {
298 PCN_BCR_CLRBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN);
299 }
300
301 return;
302}
303
304#define DC_POLY 0xEDB88320
305
306static u_int32_t pcn_crc(addr)
307 caddr_t addr;
308{
309 u_int32_t idx, bit, data, crc;
310
311 /* Compute CRC for the address value. */
312 crc = 0xFFFFFFFF; /* initial value */
313
314 for (idx = 0; idx < 6; idx++) {
315 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
316 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0);
317 }
318
319 return ((crc >> 26) & 0x3F);
320}
321
322static void pcn_setmulti(sc)
323 struct pcn_softc *sc;
324{
325 struct ifnet *ifp;
326 struct ifmultiaddr *ifma;
327 u_int32_t h, i;
328 u_int16_t hashes[4] = { 0, 0, 0, 0 };
329
330 ifp = &sc->arpcom.ac_if;
331
332 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
333
334 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
335 for (i = 0; i < 4; i++)
336 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0xFFFF);
337 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
338 return;
339 }
340
341 /* first, zot all the existing hash bits */
342 for (i = 0; i < 4; i++)
343 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0);
344
345 /* now program new ones */
346 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
347 ifma = ifma->ifma_link.le_next) {
348 if (ifma->ifma_addr->sa_family != AF_LINK)
349 continue;
350 h = pcn_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
351 hashes[h >> 4] |= 1 << (h & 0xF);
352 }
353
354 for (i = 0; i < 4; i++)
355 pcn_csr_write(sc, PCN_CSR_MAR0 + i, hashes[i]);
356
357 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
358
359 return;
360}
361
362static void pcn_reset(sc)
363 struct pcn_softc *sc;
364{
365 /*
366 * Issue a reset by reading from the RESET register.
367 * Note that we don't know if the chip is operating in
368 * 16-bit or 32-bit mode at this point, so we attempt
369 * to reset the chip both ways. If one fails, the other
370 * will succeed.
371 */
372 CSR_READ_2(sc, PCN_IO16_RESET);
373 CSR_READ_4(sc, PCN_IO32_RESET);
374
375 /* Wait a little while for the chip to get its brains in order. */
376 DELAY(1000);
377
378 /* Select 32-bit (DWIO) mode */
379 CSR_WRITE_4(sc, PCN_IO32_RDP, 0);
380
381 /* Select software style 3. */
382 pcn_bcr_write(sc, PCN_BCR_SSTYLE, PCN_SWSTYLE_PCNETPCI_BURST);
383
384 return;
385}
386
387/*
388 * Probe for an AMD chip. Check the PCI vendor and device
389 * IDs against our list and return a device name if we find a match.
390 */
391static int pcn_probe(dev)
392 device_t dev;
393{
394 struct pcn_type *t;
395 struct pcn_softc *sc;
396 int rid;
397 u_int32_t chip_id;
398
399 t = pcn_devs;
400 sc = device_get_softc(dev);
401
402 while(t->pcn_name != NULL) {
403 if ((pci_get_vendor(dev) == t->pcn_vid) &&
404 (pci_get_device(dev) == t->pcn_did)) {
405 /*
406 * Temporarily map the I/O space
407 * so we can read the chip ID register.
408 */
409 rid = PCN_RID;
410 sc->pcn_res = bus_alloc_resource(dev, PCN_RES, &rid,
411 0, ~0, 1, RF_ACTIVE);
412 if (sc->pcn_res == NULL) {
413 device_printf(dev,
414 "couldn't map ports/memory\n");
415 return(ENXIO);
416 }
417 sc->pcn_btag = rman_get_bustag(sc->pcn_res);
418 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res);
419 mtx_init(&sc->pcn_mtx,
420 device_get_nameunit(dev), MTX_DEF);
421 PCN_LOCK(sc);
422 /*
423 * Note: we can *NOT* put the chip into
424 * 32-bit mode yet. The lnc driver will only
425 * work in 16-bit mode, and once the chip
426 * goes into 32-bit mode, the only way to
427 * get it out again is with a hardware reset.
428 * So if pcn_probe() is called before the
429 * lnc driver's probe routine, the chip will
430 * be locked into 32-bit operation and the lnc
431 * driver will be unable to attach to it.
432 * Note II: if the chip happens to already
433 * be in 32-bit mode, we still need to check
434 * the chip ID, but first we have to detect
435 * 32-bit mode using only 16-bit operations.
436 * The safest way to do this is to read the
437 * PCI subsystem ID from BCR23/24 and compare
438 * that with the value read from PCI config
439 * space.
440 */
441 chip_id = pcn_bcr_read16(sc, PCN_BCR_PCISUBSYSID);
442 chip_id <<= 16;
443 chip_id |= pcn_bcr_read16(sc, PCN_BCR_PCISUBVENID);
444 if (chip_id == pci_read_config(dev,
445 PCIR_SUBVEND_0, 4)) {
446 /* We're in 16-bit mode. */
447 chip_id = pcn_csr_read16(sc, PCN_CSR_CHIPID1);
448 chip_id <<= 16;
449 chip_id |= pcn_csr_read16(sc, PCN_CSR_CHIPID0);
450 } else {
451 /* We're in 32-bit mode. */
452 chip_id = pcn_csr_read(sc, PCN_CSR_CHIPID1);
453 chip_id <<= 16;
454 chip_id |= pcn_csr_read(sc, PCN_CSR_CHIPID0);
455 }
456 bus_release_resource(dev, PCN_RES,
457 PCN_RID, sc->pcn_res);
458 PCN_UNLOCK(sc);
459 mtx_destroy(&sc->pcn_mtx);
460 chip_id >>= 12;
461 sc->pcn_type = chip_id & PART_MASK;
462 switch(sc->pcn_type) {
463 case Am79C971:
464 case Am79C972:
465 case Am79C973:
466 case Am79C975:
467 case Am79C976:
468 case Am79C978:
469 break;
470 default:
471 return(ENXIO);
472 break;
473 }
474 device_set_desc(dev, t->pcn_name);
475 return(0);
476 }
477 t++;
478 }
479
480 return(ENXIO);
481}
482
483/*
484 * Attach the interface. Allocate softc structures, do ifmedia
485 * setup and ethernet/BPF attach.
486 */
487static int pcn_attach(dev)
488 device_t dev;
489{
490 u_int32_t eaddr[2];
491 u_int32_t command;
492 struct pcn_softc *sc;
493 struct ifnet *ifp;
494 int unit, error = 0, rid;
495
496 sc = device_get_softc(dev);
497 unit = device_get_unit(dev);
498
101#endif
102
103/*
104 * Various supported device vendors/types and their names.
105 */
106static struct pcn_type pcn_devs[] = {
107 { PCN_VENDORID, PCN_DEVICEID_PCNET, "AMD PCnet/PCI 10/100BaseTX" },
108 { PCN_VENDORID, PCN_DEVICEID_HOME, "AMD PCnet/Home HomePNA" },
109 { 0, 0, NULL }
110};
111
112static u_int32_t pcn_csr_read __P((struct pcn_softc *, int));
113static u_int16_t pcn_csr_read16 __P((struct pcn_softc *, int));
114static u_int16_t pcn_bcr_read16 __P((struct pcn_softc *, int));
115static void pcn_csr_write __P((struct pcn_softc *, int, int));
116static u_int32_t pcn_bcr_read __P((struct pcn_softc *, int));
117static void pcn_bcr_write __P((struct pcn_softc *, int, int));
118
119static int pcn_probe __P((device_t));
120static int pcn_attach __P((device_t));
121static int pcn_detach __P((device_t));
122
123static int pcn_newbuf __P((struct pcn_softc *, int, struct mbuf *));
124static int pcn_encap __P((struct pcn_softc *,
125 struct mbuf *, u_int32_t *));
126static void pcn_rxeof __P((struct pcn_softc *));
127static void pcn_txeof __P((struct pcn_softc *));
128static void pcn_intr __P((void *));
129static void pcn_tick __P((void *));
130static void pcn_start __P((struct ifnet *));
131static int pcn_ioctl __P((struct ifnet *, u_long, caddr_t));
132static void pcn_init __P((void *));
133static void pcn_stop __P((struct pcn_softc *));
134static void pcn_watchdog __P((struct ifnet *));
135static void pcn_shutdown __P((device_t));
136static int pcn_ifmedia_upd __P((struct ifnet *));
137static void pcn_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
138
139static int pcn_miibus_readreg __P((device_t, int, int));
140static int pcn_miibus_writereg __P((device_t, int, int, int));
141static void pcn_miibus_statchg __P((device_t));
142
143static void pcn_setfilt __P((struct ifnet *));
144static void pcn_setmulti __P((struct pcn_softc *));
145static u_int32_t pcn_crc __P((caddr_t));
146static void pcn_reset __P((struct pcn_softc *));
147static int pcn_list_rx_init __P((struct pcn_softc *));
148static int pcn_list_tx_init __P((struct pcn_softc *));
149
150#ifdef PCN_USEIOSPACE
151#define PCN_RES SYS_RES_IOPORT
152#define PCN_RID PCN_PCI_LOIO
153#else
154#define PCN_RES SYS_RES_MEMORY
155#define PCN_RID PCN_PCI_LOMEM
156#endif
157
158static device_method_t pcn_methods[] = {
159 /* Device interface */
160 DEVMETHOD(device_probe, pcn_probe),
161 DEVMETHOD(device_attach, pcn_attach),
162 DEVMETHOD(device_detach, pcn_detach),
163 DEVMETHOD(device_shutdown, pcn_shutdown),
164
165 /* bus interface */
166 DEVMETHOD(bus_print_child, bus_generic_print_child),
167 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
168
169 /* MII interface */
170 DEVMETHOD(miibus_readreg, pcn_miibus_readreg),
171 DEVMETHOD(miibus_writereg, pcn_miibus_writereg),
172 DEVMETHOD(miibus_statchg, pcn_miibus_statchg),
173
174 { 0, 0 }
175};
176
177static driver_t pcn_driver = {
178 "pcn",
179 pcn_methods,
180 sizeof(struct pcn_softc)
181};
182
183static devclass_t pcn_devclass;
184
185DRIVER_MODULE(if_pcn, pci, pcn_driver, pcn_devclass, 0, 0);
186DRIVER_MODULE(miibus, pcn, miibus_driver, miibus_devclass, 0, 0);
187
188#define PCN_CSR_SETBIT(sc, reg, x) \
189 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) | (x))
190
191#define PCN_CSR_CLRBIT(sc, reg, x) \
192 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) & ~(x))
193
194#define PCN_BCR_SETBIT(sc, reg, x) \
195 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) | (x))
196
197#define PCN_BCR_CLRBIT(sc, reg, x) \
198 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) & ~(x))
199
200static u_int32_t pcn_csr_read(sc, reg)
201 struct pcn_softc *sc;
202 int reg;
203{
204 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
205 return(CSR_READ_4(sc, PCN_IO32_RDP));
206}
207
208static u_int16_t pcn_csr_read16(sc, reg)
209 struct pcn_softc *sc;
210 int reg;
211{
212 CSR_WRITE_2(sc, PCN_IO16_RAP, reg);
213 return(CSR_READ_2(sc, PCN_IO16_RDP));
214}
215
216static void pcn_csr_write(sc, reg, val)
217 struct pcn_softc *sc;
218 int reg;
219{
220 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
221 CSR_WRITE_4(sc, PCN_IO32_RDP, val);
222 return;
223}
224
225static u_int32_t pcn_bcr_read(sc, reg)
226 struct pcn_softc *sc;
227 int reg;
228{
229 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
230 return(CSR_READ_4(sc, PCN_IO32_BDP));
231}
232
233static u_int16_t pcn_bcr_read16(sc, reg)
234 struct pcn_softc *sc;
235 int reg;
236{
237 CSR_WRITE_2(sc, PCN_IO16_RAP, reg);
238 return(CSR_READ_2(sc, PCN_IO16_BDP));
239}
240
241static void pcn_bcr_write(sc, reg, val)
242 struct pcn_softc *sc;
243 int reg;
244{
245 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
246 CSR_WRITE_4(sc, PCN_IO32_BDP, val);
247 return;
248}
249
250static int pcn_miibus_readreg(dev, phy, reg)
251 device_t dev;
252 int phy, reg;
253{
254 struct pcn_softc *sc;
255 int val;
256
257 sc = device_get_softc(dev);
258
259 if (sc->pcn_phyaddr && phy > sc->pcn_phyaddr)
260 return(0);
261
262 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5));
263 val = pcn_bcr_read(sc, PCN_BCR_MIIDATA) & 0xFFFF;
264 if (val == 0xFFFF)
265 return(0);
266
267 sc->pcn_phyaddr = phy;
268
269 return(val);
270}
271
272static int pcn_miibus_writereg(dev, phy, reg, data)
273 device_t dev;
274 int phy, reg, data;
275{
276 struct pcn_softc *sc;
277
278 sc = device_get_softc(dev);
279
280 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5));
281 pcn_bcr_write(sc, PCN_BCR_MIIDATA, data);
282
283 return(0);
284}
285
286static void pcn_miibus_statchg(dev)
287 device_t dev;
288{
289 struct pcn_softc *sc;
290 struct mii_data *mii;
291
292 sc = device_get_softc(dev);
293 mii = device_get_softc(sc->pcn_miibus);
294
295 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
296 PCN_BCR_SETBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN);
297 } else {
298 PCN_BCR_CLRBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN);
299 }
300
301 return;
302}
303
304#define DC_POLY 0xEDB88320
305
306static u_int32_t pcn_crc(addr)
307 caddr_t addr;
308{
309 u_int32_t idx, bit, data, crc;
310
311 /* Compute CRC for the address value. */
312 crc = 0xFFFFFFFF; /* initial value */
313
314 for (idx = 0; idx < 6; idx++) {
315 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
316 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0);
317 }
318
319 return ((crc >> 26) & 0x3F);
320}
321
322static void pcn_setmulti(sc)
323 struct pcn_softc *sc;
324{
325 struct ifnet *ifp;
326 struct ifmultiaddr *ifma;
327 u_int32_t h, i;
328 u_int16_t hashes[4] = { 0, 0, 0, 0 };
329
330 ifp = &sc->arpcom.ac_if;
331
332 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
333
334 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
335 for (i = 0; i < 4; i++)
336 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0xFFFF);
337 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
338 return;
339 }
340
341 /* first, zot all the existing hash bits */
342 for (i = 0; i < 4; i++)
343 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0);
344
345 /* now program new ones */
346 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
347 ifma = ifma->ifma_link.le_next) {
348 if (ifma->ifma_addr->sa_family != AF_LINK)
349 continue;
350 h = pcn_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
351 hashes[h >> 4] |= 1 << (h & 0xF);
352 }
353
354 for (i = 0; i < 4; i++)
355 pcn_csr_write(sc, PCN_CSR_MAR0 + i, hashes[i]);
356
357 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
358
359 return;
360}
361
362static void pcn_reset(sc)
363 struct pcn_softc *sc;
364{
365 /*
366 * Issue a reset by reading from the RESET register.
367 * Note that we don't know if the chip is operating in
368 * 16-bit or 32-bit mode at this point, so we attempt
369 * to reset the chip both ways. If one fails, the other
370 * will succeed.
371 */
372 CSR_READ_2(sc, PCN_IO16_RESET);
373 CSR_READ_4(sc, PCN_IO32_RESET);
374
375 /* Wait a little while for the chip to get its brains in order. */
376 DELAY(1000);
377
378 /* Select 32-bit (DWIO) mode */
379 CSR_WRITE_4(sc, PCN_IO32_RDP, 0);
380
381 /* Select software style 3. */
382 pcn_bcr_write(sc, PCN_BCR_SSTYLE, PCN_SWSTYLE_PCNETPCI_BURST);
383
384 return;
385}
386
387/*
388 * Probe for an AMD chip. Check the PCI vendor and device
389 * IDs against our list and return a device name if we find a match.
390 */
391static int pcn_probe(dev)
392 device_t dev;
393{
394 struct pcn_type *t;
395 struct pcn_softc *sc;
396 int rid;
397 u_int32_t chip_id;
398
399 t = pcn_devs;
400 sc = device_get_softc(dev);
401
402 while(t->pcn_name != NULL) {
403 if ((pci_get_vendor(dev) == t->pcn_vid) &&
404 (pci_get_device(dev) == t->pcn_did)) {
405 /*
406 * Temporarily map the I/O space
407 * so we can read the chip ID register.
408 */
409 rid = PCN_RID;
410 sc->pcn_res = bus_alloc_resource(dev, PCN_RES, &rid,
411 0, ~0, 1, RF_ACTIVE);
412 if (sc->pcn_res == NULL) {
413 device_printf(dev,
414 "couldn't map ports/memory\n");
415 return(ENXIO);
416 }
417 sc->pcn_btag = rman_get_bustag(sc->pcn_res);
418 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res);
419 mtx_init(&sc->pcn_mtx,
420 device_get_nameunit(dev), MTX_DEF);
421 PCN_LOCK(sc);
422 /*
423 * Note: we can *NOT* put the chip into
424 * 32-bit mode yet. The lnc driver will only
425 * work in 16-bit mode, and once the chip
426 * goes into 32-bit mode, the only way to
427 * get it out again is with a hardware reset.
428 * So if pcn_probe() is called before the
429 * lnc driver's probe routine, the chip will
430 * be locked into 32-bit operation and the lnc
431 * driver will be unable to attach to it.
432 * Note II: if the chip happens to already
433 * be in 32-bit mode, we still need to check
434 * the chip ID, but first we have to detect
435 * 32-bit mode using only 16-bit operations.
436 * The safest way to do this is to read the
437 * PCI subsystem ID from BCR23/24 and compare
438 * that with the value read from PCI config
439 * space.
440 */
441 chip_id = pcn_bcr_read16(sc, PCN_BCR_PCISUBSYSID);
442 chip_id <<= 16;
443 chip_id |= pcn_bcr_read16(sc, PCN_BCR_PCISUBVENID);
444 if (chip_id == pci_read_config(dev,
445 PCIR_SUBVEND_0, 4)) {
446 /* We're in 16-bit mode. */
447 chip_id = pcn_csr_read16(sc, PCN_CSR_CHIPID1);
448 chip_id <<= 16;
449 chip_id |= pcn_csr_read16(sc, PCN_CSR_CHIPID0);
450 } else {
451 /* We're in 32-bit mode. */
452 chip_id = pcn_csr_read(sc, PCN_CSR_CHIPID1);
453 chip_id <<= 16;
454 chip_id |= pcn_csr_read(sc, PCN_CSR_CHIPID0);
455 }
456 bus_release_resource(dev, PCN_RES,
457 PCN_RID, sc->pcn_res);
458 PCN_UNLOCK(sc);
459 mtx_destroy(&sc->pcn_mtx);
460 chip_id >>= 12;
461 sc->pcn_type = chip_id & PART_MASK;
462 switch(sc->pcn_type) {
463 case Am79C971:
464 case Am79C972:
465 case Am79C973:
466 case Am79C975:
467 case Am79C976:
468 case Am79C978:
469 break;
470 default:
471 return(ENXIO);
472 break;
473 }
474 device_set_desc(dev, t->pcn_name);
475 return(0);
476 }
477 t++;
478 }
479
480 return(ENXIO);
481}
482
483/*
484 * Attach the interface. Allocate softc structures, do ifmedia
485 * setup and ethernet/BPF attach.
486 */
487static int pcn_attach(dev)
488 device_t dev;
489{
490 u_int32_t eaddr[2];
491 u_int32_t command;
492 struct pcn_softc *sc;
493 struct ifnet *ifp;
494 int unit, error = 0, rid;
495
496 sc = device_get_softc(dev);
497 unit = device_get_unit(dev);
498
499 /* Initialize our mutex. */
500 mtx_init(&sc->pcn_mtx, device_get_nameunit(dev), MTX_DEF);
501 PCN_LOCK(sc);
502
499 /*
500 * Handle power management nonsense.
501 */
502
503 command = pci_read_config(dev, PCN_PCI_CAPID, 4) & 0x000000FF;
504 if (command == 0x01) {
505
506 command = pci_read_config(dev, PCN_PCI_PWRMGMTCTRL, 4);
507 if (command & PCN_PSTATE_MASK) {
508 u_int32_t iobase, membase, irq;
509
510 /* Save important PCI config data. */
511 iobase = pci_read_config(dev, PCN_PCI_LOIO, 4);
512 membase = pci_read_config(dev, PCN_PCI_LOMEM, 4);
513 irq = pci_read_config(dev, PCN_PCI_INTLINE, 4);
514
515 /* Reset the power state. */
516 printf("pcn%d: chip is in D%d power mode "
517 "-- setting to D0\n", unit, command & PCN_PSTATE_MASK);
518 command &= 0xFFFFFFFC;
519 pci_write_config(dev, PCN_PCI_PWRMGMTCTRL, command, 4);
520
521 /* Restore PCI config data. */
522 pci_write_config(dev, PCN_PCI_LOIO, iobase, 4);
523 pci_write_config(dev, PCN_PCI_LOMEM, membase, 4);
524 pci_write_config(dev, PCN_PCI_INTLINE, irq, 4);
525 }
526 }
527
528 /*
529 * Map control/status registers.
530 */
531 command = pci_read_config(dev, PCIR_COMMAND, 4);
532 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
533 pci_write_config(dev, PCIR_COMMAND, command, 4);
534 command = pci_read_config(dev, PCIR_COMMAND, 4);
535
536#ifdef PCN_USEIOSPACE
537 if (!(command & PCIM_CMD_PORTEN)) {
538 printf("pcn%d: failed to enable I/O ports!\n", unit);
539 error = ENXIO;;
540 goto fail;
541 }
542#else
543 if (!(command & PCIM_CMD_MEMEN)) {
544 printf("pcn%d: failed to enable memory mapping!\n", unit);
545 error = ENXIO;;
546 goto fail;
547 }
548#endif
549
550 rid = PCN_RID;
551 sc->pcn_res = bus_alloc_resource(dev, PCN_RES, &rid,
552 0, ~0, 1, RF_ACTIVE);
553
554 if (sc->pcn_res == NULL) {
555 printf("pcn%d: couldn't map ports/memory\n", unit);
556 error = ENXIO;
557 goto fail;
558 }
559
560 sc->pcn_btag = rman_get_bustag(sc->pcn_res);
561 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res);
562
563 /* Allocate interrupt */
564 rid = 0;
565 sc->pcn_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
566 RF_SHAREABLE | RF_ACTIVE);
567
568 if (sc->pcn_irq == NULL) {
569 printf("pcn%d: couldn't map interrupt\n", unit);
570 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
571 error = ENXIO;
572 goto fail;
573 }
574
575 error = bus_setup_intr(dev, sc->pcn_irq, INTR_TYPE_NET,
576 pcn_intr, sc, &sc->pcn_intrhand);
577
578 if (error) {
579 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_res);
580 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
581 printf("pcn%d: couldn't set up irq\n", unit);
582 goto fail;
583 }
584
503 /*
504 * Handle power management nonsense.
505 */
506
507 command = pci_read_config(dev, PCN_PCI_CAPID, 4) & 0x000000FF;
508 if (command == 0x01) {
509
510 command = pci_read_config(dev, PCN_PCI_PWRMGMTCTRL, 4);
511 if (command & PCN_PSTATE_MASK) {
512 u_int32_t iobase, membase, irq;
513
514 /* Save important PCI config data. */
515 iobase = pci_read_config(dev, PCN_PCI_LOIO, 4);
516 membase = pci_read_config(dev, PCN_PCI_LOMEM, 4);
517 irq = pci_read_config(dev, PCN_PCI_INTLINE, 4);
518
519 /* Reset the power state. */
520 printf("pcn%d: chip is in D%d power mode "
521 "-- setting to D0\n", unit, command & PCN_PSTATE_MASK);
522 command &= 0xFFFFFFFC;
523 pci_write_config(dev, PCN_PCI_PWRMGMTCTRL, command, 4);
524
525 /* Restore PCI config data. */
526 pci_write_config(dev, PCN_PCI_LOIO, iobase, 4);
527 pci_write_config(dev, PCN_PCI_LOMEM, membase, 4);
528 pci_write_config(dev, PCN_PCI_INTLINE, irq, 4);
529 }
530 }
531
532 /*
533 * Map control/status registers.
534 */
535 command = pci_read_config(dev, PCIR_COMMAND, 4);
536 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
537 pci_write_config(dev, PCIR_COMMAND, command, 4);
538 command = pci_read_config(dev, PCIR_COMMAND, 4);
539
540#ifdef PCN_USEIOSPACE
541 if (!(command & PCIM_CMD_PORTEN)) {
542 printf("pcn%d: failed to enable I/O ports!\n", unit);
543 error = ENXIO;;
544 goto fail;
545 }
546#else
547 if (!(command & PCIM_CMD_MEMEN)) {
548 printf("pcn%d: failed to enable memory mapping!\n", unit);
549 error = ENXIO;;
550 goto fail;
551 }
552#endif
553
554 rid = PCN_RID;
555 sc->pcn_res = bus_alloc_resource(dev, PCN_RES, &rid,
556 0, ~0, 1, RF_ACTIVE);
557
558 if (sc->pcn_res == NULL) {
559 printf("pcn%d: couldn't map ports/memory\n", unit);
560 error = ENXIO;
561 goto fail;
562 }
563
564 sc->pcn_btag = rman_get_bustag(sc->pcn_res);
565 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res);
566
567 /* Allocate interrupt */
568 rid = 0;
569 sc->pcn_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
570 RF_SHAREABLE | RF_ACTIVE);
571
572 if (sc->pcn_irq == NULL) {
573 printf("pcn%d: couldn't map interrupt\n", unit);
574 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
575 error = ENXIO;
576 goto fail;
577 }
578
579 error = bus_setup_intr(dev, sc->pcn_irq, INTR_TYPE_NET,
580 pcn_intr, sc, &sc->pcn_intrhand);
581
582 if (error) {
583 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_res);
584 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
585 printf("pcn%d: couldn't set up irq\n", unit);
586 goto fail;
587 }
588
585 /* Initialize our mutex. */
586 mtx_init(&sc->pcn_mtx, device_get_nameunit(dev), MTX_DEF);
587 PCN_LOCK(sc);
588
589 /* Reset the adapter. */
590 pcn_reset(sc);
591
592 /*
593 * Get station address from the EEPROM.
594 */
595 eaddr[0] = CSR_READ_4(sc, PCN_IO32_APROM00);
596 eaddr[1] = CSR_READ_4(sc, PCN_IO32_APROM01);
597 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
598
599 /*
600 * An AMD chip was detected. Inform the world.
601 */
602 printf("pcn%d: Ethernet address: %6D\n", unit,
603 sc->arpcom.ac_enaddr, ":");
604
605 sc->pcn_unit = unit;
606 callout_handle_init(&sc->pcn_stat_ch);
607
608 sc->pcn_ldata = contigmalloc(sizeof(struct pcn_list_data), M_DEVBUF,
609 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
610
611 if (sc->pcn_ldata == NULL) {
612 printf("pcn%d: no memory for list buffers!\n", unit);
613 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
614 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
615 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
616 error = ENXIO;
617 goto fail;
618 }
619 bzero(sc->pcn_ldata, sizeof(struct pcn_list_data));
620
621 ifp = &sc->arpcom.ac_if;
622 ifp->if_softc = sc;
623 ifp->if_unit = unit;
624 ifp->if_name = "pcn";
625 ifp->if_mtu = ETHERMTU;
626 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
627 ifp->if_ioctl = pcn_ioctl;
628 ifp->if_output = ether_output;
629 ifp->if_start = pcn_start;
630 ifp->if_watchdog = pcn_watchdog;
631 ifp->if_init = pcn_init;
632 ifp->if_baudrate = 10000000;
633 ifp->if_snd.ifq_maxlen = PCN_TX_LIST_CNT - 1;
634
635 /*
636 * Do MII setup.
637 */
638 if (mii_phy_probe(dev, &sc->pcn_miibus,
639 pcn_ifmedia_upd, pcn_ifmedia_sts)) {
640 printf("pcn%d: MII without any PHY!\n", sc->pcn_unit);
641 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
642 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
643 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
644 error = ENXIO;
645 goto fail;
646 }
647
648 /*
649 * Call MI attach routine.
650 */
651 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
652 callout_handle_init(&sc->pcn_stat_ch);
653 PCN_UNLOCK(sc);
654 return(0);
655
656fail:
657 PCN_UNLOCK(sc);
658 mtx_destroy(&sc->pcn_mtx);
659
660 return(error);
661}
662
663static int pcn_detach(dev)
664 device_t dev;
665{
666 struct pcn_softc *sc;
667 struct ifnet *ifp;
668
669 sc = device_get_softc(dev);
670 ifp = &sc->arpcom.ac_if;
671
672 PCN_LOCK(sc);
673
674 pcn_reset(sc);
675 pcn_stop(sc);
676 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
677
678 if (sc->pcn_miibus != NULL) {
679 bus_generic_detach(dev);
680 device_delete_child(dev, sc->pcn_miibus);
681 }
682
683 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
684 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
685 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
686
687 contigfree(sc->pcn_ldata, sizeof(struct pcn_list_data), M_DEVBUF);
688 PCN_UNLOCK(sc);
689
690 mtx_destroy(&sc->pcn_mtx);
691
692 return(0);
693}
694
695/*
696 * Initialize the transmit descriptors.
697 */
698static int pcn_list_tx_init(sc)
699 struct pcn_softc *sc;
700{
701 struct pcn_list_data *ld;
702 struct pcn_ring_data *cd;
703 int i;
704
705 cd = &sc->pcn_cdata;
706 ld = sc->pcn_ldata;
707
708 for (i = 0; i < PCN_TX_LIST_CNT; i++) {
709 cd->pcn_tx_chain[i] = NULL;
710 ld->pcn_tx_list[i].pcn_tbaddr = 0;
711 ld->pcn_tx_list[i].pcn_txctl = 0;
712 ld->pcn_tx_list[i].pcn_txstat = 0;
713 }
714
715 cd->pcn_tx_prod = cd->pcn_tx_cons = cd->pcn_tx_cnt = 0;
716
717 return(0);
718}
719
720
721/*
722 * Initialize the RX descriptors and allocate mbufs for them.
723 */
724static int pcn_list_rx_init(sc)
725 struct pcn_softc *sc;
726{
727 struct pcn_list_data *ld;
728 struct pcn_ring_data *cd;
729 int i;
730
731 ld = sc->pcn_ldata;
732 cd = &sc->pcn_cdata;
733
734 for (i = 0; i < PCN_RX_LIST_CNT; i++) {
735 if (pcn_newbuf(sc, i, NULL) == ENOBUFS)
736 return(ENOBUFS);
737 }
738
739 cd->pcn_rx_prod = 0;
740
741 return(0);
742}
743
744/*
745 * Initialize an RX descriptor and attach an MBUF cluster.
746 */
747static int pcn_newbuf(sc, idx, m)
748 struct pcn_softc *sc;
749 int idx;
750 struct mbuf *m;
751{
752 struct mbuf *m_new = NULL;
753 struct pcn_rx_desc *c;
754
755 c = &sc->pcn_ldata->pcn_rx_list[idx];
756
757 if (m == NULL) {
758 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
759 if (m_new == NULL) {
760 printf("pcn%d: no memory for rx list "
761 "-- packet dropped!\n", sc->pcn_unit);
762 return(ENOBUFS);
763 }
764
765 MCLGET(m_new, M_DONTWAIT);
766 if (!(m_new->m_flags & M_EXT)) {
767 printf("pcn%d: no memory for rx list "
768 "-- packet dropped!\n", sc->pcn_unit);
769 m_freem(m_new);
770 return(ENOBUFS);
771 }
772 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
773 } else {
774 m_new = m;
775 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
776 m_new->m_data = m_new->m_ext.ext_buf;
777 }
778
779 m_adj(m_new, ETHER_ALIGN);
780
781 sc->pcn_cdata.pcn_rx_chain[idx] = m_new;
782 c->pcn_rbaddr = vtophys(mtod(m_new, caddr_t));
783 c->pcn_bufsz = (~(PCN_RXLEN) + 1) & PCN_RXLEN_BUFSZ;
784 c->pcn_bufsz |= PCN_RXLEN_MBO;
785 c->pcn_rxstat = PCN_RXSTAT_STP|PCN_RXSTAT_ENP|PCN_RXSTAT_OWN;
786
787 return(0);
788}
789
790/*
791 * A frame has been uploaded: pass the resulting mbuf chain up to
792 * the higher level protocols.
793 */
794static void pcn_rxeof(sc)
795 struct pcn_softc *sc;
796{
797 struct ether_header *eh;
798 struct mbuf *m;
799 struct ifnet *ifp;
800 struct pcn_rx_desc *cur_rx;
801 int i;
802
803 ifp = &sc->arpcom.ac_if;
804 i = sc->pcn_cdata.pcn_rx_prod;
805
806 while(PCN_OWN_RXDESC(&sc->pcn_ldata->pcn_rx_list[i])) {
807 cur_rx = &sc->pcn_ldata->pcn_rx_list[i];
808 m = sc->pcn_cdata.pcn_rx_chain[i];
809 sc->pcn_cdata.pcn_rx_chain[i] = NULL;
810
811 /*
812 * If an error occurs, update stats, clear the
813 * status word and leave the mbuf cluster in place:
814 * it should simply get re-used next time this descriptor
815 * comes up in the ring.
816 */
817 if (cur_rx->pcn_rxstat & PCN_RXSTAT_ERR) {
818 ifp->if_ierrors++;
819 pcn_newbuf(sc, i, m);
820 PCN_INC(i, PCN_RX_LIST_CNT);
821 continue;
822 }
823
824 if (pcn_newbuf(sc, i, NULL)) {
825 /* Ran out of mbufs; recycle this one. */
826 pcn_newbuf(sc, i, m);
827 ifp->if_ierrors++;
828 PCN_INC(i, PCN_RX_LIST_CNT);
829 continue;
830 }
831
832 PCN_INC(i, PCN_RX_LIST_CNT);
833
834 /* No errors; receive the packet. */
835 ifp->if_ipackets++;
836 eh = mtod(m, struct ether_header *);
837 m->m_len = m->m_pkthdr.len =
838 cur_rx->pcn_rxlen - ETHER_CRC_LEN;
839 m->m_pkthdr.rcvif = ifp;
840
841 /* Remove header from mbuf and pass it on. */
842 m_adj(m, sizeof(struct ether_header));
843 ether_input(ifp, eh, m);
844 }
845
846 sc->pcn_cdata.pcn_rx_prod = i;
847
848 return;
849}
850
851/*
852 * A frame was downloaded to the chip. It's safe for us to clean up
853 * the list buffers.
854 */
855
856static void pcn_txeof(sc)
857 struct pcn_softc *sc;
858{
859 struct pcn_tx_desc *cur_tx = NULL;
860 struct ifnet *ifp;
861 u_int32_t idx;
862
863 ifp = &sc->arpcom.ac_if;
864
865 /* Clear the timeout timer. */
866 ifp->if_timer = 0;
867
868 /*
869 * Go through our tx list and free mbufs for those
870 * frames that have been transmitted.
871 */
872 idx = sc->pcn_cdata.pcn_tx_cons;
873 while (idx != sc->pcn_cdata.pcn_tx_prod) {
874 cur_tx = &sc->pcn_ldata->pcn_tx_list[idx];
875
876 if (!PCN_OWN_TXDESC(cur_tx))
877 break;
878
879 if (!(cur_tx->pcn_txctl & PCN_TXCTL_ENP)) {
880 sc->pcn_cdata.pcn_tx_cnt--;
881 PCN_INC(idx, PCN_TX_LIST_CNT);
882 continue;
883 }
884
885 if (cur_tx->pcn_txctl & PCN_TXCTL_ERR) {
886 ifp->if_oerrors++;
887 if (cur_tx->pcn_txstat & PCN_TXSTAT_EXDEF)
888 ifp->if_collisions++;
889 if (cur_tx->pcn_txstat & PCN_TXSTAT_RTRY)
890 ifp->if_collisions++;
891 }
892
893 ifp->if_collisions +=
894 cur_tx->pcn_txstat & PCN_TXSTAT_TRC;
895
896 ifp->if_opackets++;
897 if (sc->pcn_cdata.pcn_tx_chain[idx] != NULL) {
898 m_freem(sc->pcn_cdata.pcn_tx_chain[idx]);
899 sc->pcn_cdata.pcn_tx_chain[idx] = NULL;
900 }
901
902 sc->pcn_cdata.pcn_tx_cnt--;
903 PCN_INC(idx, PCN_TX_LIST_CNT);
904 ifp->if_timer = 0;
905 }
906
907 sc->pcn_cdata.pcn_tx_cons = idx;
908
909 if (cur_tx != NULL)
910 ifp->if_flags &= ~IFF_OACTIVE;
911
912 return;
913}
914
915static void pcn_tick(xsc)
916 void *xsc;
917{
918 struct pcn_softc *sc;
919 struct mii_data *mii;
920 struct ifnet *ifp;
921
922 sc = xsc;
923 ifp = &sc->arpcom.ac_if;
924 PCN_LOCK(sc);
925
926 mii = device_get_softc(sc->pcn_miibus);
927 mii_tick(mii);
928
929 if (sc->pcn_link & !(mii->mii_media_status & IFM_ACTIVE))
930 sc->pcn_link = 0;
931
932 if (!sc->pcn_link) {
933 mii_pollstat(mii);
934 if (mii->mii_media_status & IFM_ACTIVE &&
935 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
936 sc->pcn_link++;
937 if (ifp->if_snd.ifq_head != NULL)
938 pcn_start(ifp);
939 }
940
941 sc->pcn_stat_ch = timeout(pcn_tick, sc, hz);
942
943 PCN_UNLOCK(sc);
944
945 return;
946}
947
948static void pcn_intr(arg)
949 void *arg;
950{
951 struct pcn_softc *sc;
952 struct ifnet *ifp;
953 u_int32_t status;
954
955 sc = arg;
956 ifp = &sc->arpcom.ac_if;
957
958 /* Supress unwanted interrupts */
959 if (!(ifp->if_flags & IFF_UP)) {
960 pcn_stop(sc);
961 return;
962 }
963
964 CSR_WRITE_4(sc, PCN_IO32_RAP, PCN_CSR_CSR);
965
966 while ((status = CSR_READ_4(sc, PCN_IO32_RDP)) & PCN_CSR_INTR) {
967 CSR_WRITE_4(sc, PCN_IO32_RDP, status);
968
969 if (status & PCN_CSR_RINT)
970 pcn_rxeof(sc);
971
972 if (status & PCN_CSR_TINT)
973 pcn_txeof(sc);
974
975 if (status & PCN_CSR_ERR) {
976 pcn_init(sc);
977 break;
978 }
979 }
980
981 if (ifp->if_snd.ifq_head != NULL)
982 pcn_start(ifp);
983
984 return;
985}
986
987/*
988 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
989 * pointers to the fragment pointers.
990 */
991static int pcn_encap(sc, m_head, txidx)
992 struct pcn_softc *sc;
993 struct mbuf *m_head;
994 u_int32_t *txidx;
995{
996 struct pcn_tx_desc *f = NULL;
997 struct mbuf *m;
998 int frag, cur, cnt = 0;
999
1000 /*
1001 * Start packing the mbufs in this chain into
1002 * the fragment pointers. Stop when we run out
1003 * of fragments or hit the end of the mbuf chain.
1004 */
1005 m = m_head;
1006 cur = frag = *txidx;
1007
1008 for (m = m_head; m != NULL; m = m->m_next) {
1009 if (m->m_len != 0) {
1010 if ((PCN_TX_LIST_CNT -
1011 (sc->pcn_cdata.pcn_tx_cnt + cnt)) < 2)
1012 return(ENOBUFS);
1013 f = &sc->pcn_ldata->pcn_tx_list[frag];
1014 f->pcn_txctl = (~(m->m_len) + 1) & PCN_TXCTL_BUFSZ;
1015 f->pcn_txctl |= PCN_TXCTL_MBO;
1016 f->pcn_tbaddr = vtophys(mtod(m, vm_offset_t));
1017 if (cnt == 0)
1018 f->pcn_txctl |= PCN_TXCTL_STP;
1019 else
1020 f->pcn_txctl |= PCN_TXCTL_OWN;
1021 cur = frag;
1022 PCN_INC(frag, PCN_TX_LIST_CNT);
1023 cnt++;
1024 }
1025 }
1026
1027 if (m != NULL)
1028 return(ENOBUFS);
1029
1030 sc->pcn_cdata.pcn_tx_chain[cur] = m_head;
1031 sc->pcn_ldata->pcn_tx_list[cur].pcn_txctl |=
1032 PCN_TXCTL_ENP|PCN_TXCTL_ADD_FCS|PCN_TXCTL_MORE_LTINT;
1033 sc->pcn_ldata->pcn_tx_list[*txidx].pcn_txctl |= PCN_TXCTL_OWN;
1034 sc->pcn_cdata.pcn_tx_cnt += cnt;
1035 *txidx = frag;
1036
1037 return(0);
1038}
1039
1040/*
1041 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1042 * to the mbuf data regions directly in the transmit lists. We also save a
1043 * copy of the pointers since the transmit list fragment pointers are
1044 * physical addresses.
1045 */
1046static void pcn_start(ifp)
1047 struct ifnet *ifp;
1048{
1049 struct pcn_softc *sc;
1050 struct mbuf *m_head = NULL;
1051 u_int32_t idx;
1052
1053 sc = ifp->if_softc;
1054
1055 PCN_LOCK(sc);
1056
1057 if (!sc->pcn_link) {
1058 PCN_UNLOCK(sc);
1059 return;
1060 }
1061
1062 idx = sc->pcn_cdata.pcn_tx_prod;
1063
1064 if (ifp->if_flags & IFF_OACTIVE) {
1065 PCN_UNLOCK(sc);
1066 return;
1067 }
1068
1069 while(sc->pcn_cdata.pcn_tx_chain[idx] == NULL) {
1070 IF_DEQUEUE(&ifp->if_snd, m_head);
1071 if (m_head == NULL)
1072 break;
1073
1074 if (pcn_encap(sc, m_head, &idx)) {
1075 IF_PREPEND(&ifp->if_snd, m_head);
1076 ifp->if_flags |= IFF_OACTIVE;
1077 break;
1078 }
1079
1080 /*
1081 * If there's a BPF listener, bounce a copy of this frame
1082 * to him.
1083 */
1084 if (ifp->if_bpf)
1085 bpf_mtap(ifp, m_head);
1086
1087 }
1088
1089 /* Transmit */
1090 sc->pcn_cdata.pcn_tx_prod = idx;
1091 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN);
1092
1093 /*
1094 * Set a timeout in case the chip goes out to lunch.
1095 */
1096 ifp->if_timer = 5;
1097
1098 PCN_UNLOCK(sc);
1099
1100 return;
1101}
1102
1103static void pcn_setfilt(ifp)
1104 struct ifnet *ifp;
1105{
1106 struct pcn_softc *sc;
1107
1108 sc = ifp->if_softc;
1109
1110 /* If we want promiscuous mode, set the allframes bit. */
1111 if (ifp->if_flags & IFF_PROMISC) {
1112 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC);
1113 } else {
1114 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC);
1115 }
1116
1117 /* Set the capture broadcast bit to capture broadcast frames. */
1118 if (ifp->if_flags & IFF_BROADCAST) {
1119 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD);
1120 } else {
1121 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD);
1122 }
1123
1124 return;
1125}
1126
1127static void pcn_init(xsc)
1128 void *xsc;
1129{
1130 struct pcn_softc *sc = xsc;
1131 struct ifnet *ifp = &sc->arpcom.ac_if;
1132 struct mii_data *mii = NULL;
1133
1134 PCN_LOCK(sc);
1135
1136 /*
1137 * Cancel pending I/O and free all RX/TX buffers.
1138 */
1139 pcn_stop(sc);
1140 pcn_reset(sc);
1141
1142 mii = device_get_softc(sc->pcn_miibus);
1143
1144 /* Set MAC address */
1145 pcn_csr_write(sc, PCN_CSR_PAR0,
1146 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1147 pcn_csr_write(sc, PCN_CSR_PAR1,
1148 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1149 pcn_csr_write(sc, PCN_CSR_PAR2,
1150 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1151
1152 /* Init circular RX list. */
1153 if (pcn_list_rx_init(sc) == ENOBUFS) {
1154 printf("pcn%d: initialization failed: no "
1155 "memory for rx buffers\n", sc->pcn_unit);
1156 pcn_stop(sc);
1157 PCN_UNLOCK(sc);
1158 return;
1159 }
1160
1161 /*
1162 * Init tx descriptors.
1163 */
1164 pcn_list_tx_init(sc);
1165
1166 /* Set up the mode register. */
1167 pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_MII);
1168
1169 /* Set up RX filter. */
1170 pcn_setfilt(ifp);
1171
1172 /*
1173 * Load the multicast filter.
1174 */
1175 pcn_setmulti(sc);
1176
1177 /*
1178 * Load the addresses of the RX and TX lists.
1179 */
1180 pcn_csr_write(sc, PCN_CSR_RXADDR0,
1181 vtophys(&sc->pcn_ldata->pcn_rx_list[0]) & 0xFFFF);
1182 pcn_csr_write(sc, PCN_CSR_RXADDR1,
1183 (vtophys(&sc->pcn_ldata->pcn_rx_list[0]) >> 16) & 0xFFFF);
1184 pcn_csr_write(sc, PCN_CSR_TXADDR0,
1185 vtophys(&sc->pcn_ldata->pcn_tx_list[0]) & 0xFFFF);
1186 pcn_csr_write(sc, PCN_CSR_TXADDR1,
1187 (vtophys(&sc->pcn_ldata->pcn_tx_list[0]) >> 16) & 0xFFFF);
1188
1189 /* Set the RX and TX ring sizes. */
1190 pcn_csr_write(sc, PCN_CSR_RXRINGLEN, (~PCN_RX_LIST_CNT) + 1);
1191 pcn_csr_write(sc, PCN_CSR_TXRINGLEN, (~PCN_TX_LIST_CNT) + 1);
1192
1193 /* We're not using the initialization block. */
1194 pcn_csr_write(sc, PCN_CSR_IAB1, 0);
1195
1196 /* Enable fast suspend mode. */
1197 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL2, PCN_EXTCTL2_FASTSPNDE);
1198
1199 /*
1200 * Enable burst read and write. Also set the no underflow
1201 * bit. This will avoid transmit underruns in certain
1202 * conditions while still providing decent performance.
1203 */
1204 PCN_BCR_SETBIT(sc, PCN_BCR_BUSCTL, PCN_BUSCTL_NOUFLOW|
1205 PCN_BUSCTL_BREAD|PCN_BUSCTL_BWRITE);
1206
1207 /* Enable graceful recovery from underflow. */
1208 PCN_CSR_SETBIT(sc, PCN_CSR_IMR, PCN_IMR_DXSUFLO);
1209
1210 /* Enable auto-padding of short TX frames. */
1211 PCN_CSR_SETBIT(sc, PCN_CSR_TFEAT, PCN_TFEAT_PAD_TX);
1212
1213 /* Disable MII autoneg (we handle this ourselves). */
1214 PCN_BCR_CLRBIT(sc, PCN_BCR_MIICTL, PCN_MIICTL_DANAS);
1215
1216 if (sc->pcn_type == Am79C978)
1217 pcn_bcr_write(sc, PCN_BCR_PHYSEL,
1218 PCN_PHYSEL_PCNET|PCN_PHY_HOMEPNA);
1219
1220 /* Enable interrupts and start the controller running. */
1221 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START);
1222
1223 mii_mediachg(mii);
1224
1225 ifp->if_flags |= IFF_RUNNING;
1226 ifp->if_flags &= ~IFF_OACTIVE;
1227
1228 sc->pcn_stat_ch = timeout(pcn_tick, sc, hz);
1229 PCN_UNLOCK(sc);
1230
1231 return;
1232}
1233
1234/*
1235 * Set media options.
1236 */
1237static int pcn_ifmedia_upd(ifp)
1238 struct ifnet *ifp;
1239{
1240 struct pcn_softc *sc;
1241 struct mii_data *mii;
1242
1243 sc = ifp->if_softc;
1244 mii = device_get_softc(sc->pcn_miibus);
1245
1246 sc->pcn_link = 0;
1247 if (mii->mii_instance) {
1248 struct mii_softc *miisc;
1249 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1250 miisc = LIST_NEXT(miisc, mii_list))
1251 mii_phy_reset(miisc);
1252 }
1253 mii_mediachg(mii);
1254
1255 return(0);
1256}
1257
1258/*
1259 * Report current media status.
1260 */
1261static void pcn_ifmedia_sts(ifp, ifmr)
1262 struct ifnet *ifp;
1263 struct ifmediareq *ifmr;
1264{
1265 struct pcn_softc *sc;
1266 struct mii_data *mii;
1267
1268 sc = ifp->if_softc;
1269
1270 mii = device_get_softc(sc->pcn_miibus);
1271 mii_pollstat(mii);
1272 ifmr->ifm_active = mii->mii_media_active;
1273 ifmr->ifm_status = mii->mii_media_status;
1274
1275 return;
1276}
1277
1278static int pcn_ioctl(ifp, command, data)
1279 struct ifnet *ifp;
1280 u_long command;
1281 caddr_t data;
1282{
1283 struct pcn_softc *sc = ifp->if_softc;
1284 struct ifreq *ifr = (struct ifreq *) data;
1285 struct mii_data *mii = NULL;
1286 int error = 0;
1287
1288 PCN_LOCK(sc);
1289
1290 switch(command) {
1291 case SIOCSIFADDR:
1292 case SIOCGIFADDR:
1293 case SIOCSIFMTU:
1294 error = ether_ioctl(ifp, command, data);
1295 break;
1296 case SIOCSIFFLAGS:
1297 if (ifp->if_flags & IFF_UP) {
1298 if (ifp->if_flags & IFF_RUNNING &&
1299 ifp->if_flags & IFF_PROMISC &&
1300 !(sc->pcn_if_flags & IFF_PROMISC)) {
1301 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1,
1302 PCN_EXTCTL1_SPND);
1303 pcn_setfilt(ifp);
1304 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1,
1305 PCN_EXTCTL1_SPND);
1306 pcn_csr_write(sc, PCN_CSR_CSR,
1307 PCN_CSR_INTEN|PCN_CSR_START);
1308 } else if (ifp->if_flags & IFF_RUNNING &&
1309 !(ifp->if_flags & IFF_PROMISC) &&
1310 sc->pcn_if_flags & IFF_PROMISC) {
1311 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1,
1312 PCN_EXTCTL1_SPND);
1313 pcn_setfilt(ifp);
1314 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1,
1315 PCN_EXTCTL1_SPND);
1316 pcn_csr_write(sc, PCN_CSR_CSR,
1317 PCN_CSR_INTEN|PCN_CSR_START);
1318 } else if (!(ifp->if_flags & IFF_RUNNING))
1319 pcn_init(sc);
1320 } else {
1321 if (ifp->if_flags & IFF_RUNNING)
1322 pcn_stop(sc);
1323 }
1324 sc->pcn_if_flags = ifp->if_flags;
1325 error = 0;
1326 break;
1327 case SIOCADDMULTI:
1328 case SIOCDELMULTI:
1329 pcn_setmulti(sc);
1330 error = 0;
1331 break;
1332 case SIOCGIFMEDIA:
1333 case SIOCSIFMEDIA:
1334 mii = device_get_softc(sc->pcn_miibus);
1335 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1336 break;
1337 default:
1338 error = EINVAL;
1339 break;
1340 }
1341
1342 PCN_UNLOCK(sc);
1343
1344 return(error);
1345}
1346
1347static void pcn_watchdog(ifp)
1348 struct ifnet *ifp;
1349{
1350 struct pcn_softc *sc;
1351
1352 sc = ifp->if_softc;
1353
1354 PCN_LOCK(sc);
1355
1356 ifp->if_oerrors++;
1357 printf("pcn%d: watchdog timeout\n", sc->pcn_unit);
1358
1359 pcn_stop(sc);
1360 pcn_reset(sc);
1361 pcn_init(sc);
1362
1363 if (ifp->if_snd.ifq_head != NULL)
1364 pcn_start(ifp);
1365
1366 PCN_UNLOCK(sc);
1367
1368 return;
1369}
1370
1371/*
1372 * Stop the adapter and free any mbufs allocated to the
1373 * RX and TX lists.
1374 */
1375static void pcn_stop(sc)
1376 struct pcn_softc *sc;
1377{
1378 register int i;
1379 struct ifnet *ifp;
1380
1381 ifp = &sc->arpcom.ac_if;
1382 PCN_LOCK(sc);
1383 ifp->if_timer = 0;
1384
1385 untimeout(pcn_tick, sc, sc->pcn_stat_ch);
1386 PCN_CSR_SETBIT(sc, PCN_CSR_CSR, PCN_CSR_STOP);
1387 sc->pcn_link = 0;
1388
1389 /*
1390 * Free data in the RX lists.
1391 */
1392 for (i = 0; i < PCN_RX_LIST_CNT; i++) {
1393 if (sc->pcn_cdata.pcn_rx_chain[i] != NULL) {
1394 m_freem(sc->pcn_cdata.pcn_rx_chain[i]);
1395 sc->pcn_cdata.pcn_rx_chain[i] = NULL;
1396 }
1397 }
1398 bzero((char *)&sc->pcn_ldata->pcn_rx_list,
1399 sizeof(sc->pcn_ldata->pcn_rx_list));
1400
1401 /*
1402 * Free the TX list buffers.
1403 */
1404 for (i = 0; i < PCN_TX_LIST_CNT; i++) {
1405 if (sc->pcn_cdata.pcn_tx_chain[i] != NULL) {
1406 m_freem(sc->pcn_cdata.pcn_tx_chain[i]);
1407 sc->pcn_cdata.pcn_tx_chain[i] = NULL;
1408 }
1409 }
1410
1411 bzero((char *)&sc->pcn_ldata->pcn_tx_list,
1412 sizeof(sc->pcn_ldata->pcn_tx_list));
1413
1414 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1415 PCN_UNLOCK(sc);
1416
1417 return;
1418}
1419
1420/*
1421 * Stop all chip I/O so that the kernel's probe routines don't
1422 * get confused by errant DMAs when rebooting.
1423 */
1424static void pcn_shutdown(dev)
1425 device_t dev;
1426{
1427 struct pcn_softc *sc;
1428
1429 sc = device_get_softc(dev);
1430
1431 PCN_LOCK(sc);
1432 pcn_reset(sc);
1433 pcn_stop(sc);
1434 PCN_UNLOCK(sc);
1435
1436 return;
1437}
589 /* Reset the adapter. */
590 pcn_reset(sc);
591
592 /*
593 * Get station address from the EEPROM.
594 */
595 eaddr[0] = CSR_READ_4(sc, PCN_IO32_APROM00);
596 eaddr[1] = CSR_READ_4(sc, PCN_IO32_APROM01);
597 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
598
599 /*
600 * An AMD chip was detected. Inform the world.
601 */
602 printf("pcn%d: Ethernet address: %6D\n", unit,
603 sc->arpcom.ac_enaddr, ":");
604
605 sc->pcn_unit = unit;
606 callout_handle_init(&sc->pcn_stat_ch);
607
608 sc->pcn_ldata = contigmalloc(sizeof(struct pcn_list_data), M_DEVBUF,
609 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
610
611 if (sc->pcn_ldata == NULL) {
612 printf("pcn%d: no memory for list buffers!\n", unit);
613 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
614 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
615 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
616 error = ENXIO;
617 goto fail;
618 }
619 bzero(sc->pcn_ldata, sizeof(struct pcn_list_data));
620
621 ifp = &sc->arpcom.ac_if;
622 ifp->if_softc = sc;
623 ifp->if_unit = unit;
624 ifp->if_name = "pcn";
625 ifp->if_mtu = ETHERMTU;
626 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
627 ifp->if_ioctl = pcn_ioctl;
628 ifp->if_output = ether_output;
629 ifp->if_start = pcn_start;
630 ifp->if_watchdog = pcn_watchdog;
631 ifp->if_init = pcn_init;
632 ifp->if_baudrate = 10000000;
633 ifp->if_snd.ifq_maxlen = PCN_TX_LIST_CNT - 1;
634
635 /*
636 * Do MII setup.
637 */
638 if (mii_phy_probe(dev, &sc->pcn_miibus,
639 pcn_ifmedia_upd, pcn_ifmedia_sts)) {
640 printf("pcn%d: MII without any PHY!\n", sc->pcn_unit);
641 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
642 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
643 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
644 error = ENXIO;
645 goto fail;
646 }
647
648 /*
649 * Call MI attach routine.
650 */
651 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
652 callout_handle_init(&sc->pcn_stat_ch);
653 PCN_UNLOCK(sc);
654 return(0);
655
656fail:
657 PCN_UNLOCK(sc);
658 mtx_destroy(&sc->pcn_mtx);
659
660 return(error);
661}
662
663static int pcn_detach(dev)
664 device_t dev;
665{
666 struct pcn_softc *sc;
667 struct ifnet *ifp;
668
669 sc = device_get_softc(dev);
670 ifp = &sc->arpcom.ac_if;
671
672 PCN_LOCK(sc);
673
674 pcn_reset(sc);
675 pcn_stop(sc);
676 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
677
678 if (sc->pcn_miibus != NULL) {
679 bus_generic_detach(dev);
680 device_delete_child(dev, sc->pcn_miibus);
681 }
682
683 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
684 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
685 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
686
687 contigfree(sc->pcn_ldata, sizeof(struct pcn_list_data), M_DEVBUF);
688 PCN_UNLOCK(sc);
689
690 mtx_destroy(&sc->pcn_mtx);
691
692 return(0);
693}
694
695/*
696 * Initialize the transmit descriptors.
697 */
698static int pcn_list_tx_init(sc)
699 struct pcn_softc *sc;
700{
701 struct pcn_list_data *ld;
702 struct pcn_ring_data *cd;
703 int i;
704
705 cd = &sc->pcn_cdata;
706 ld = sc->pcn_ldata;
707
708 for (i = 0; i < PCN_TX_LIST_CNT; i++) {
709 cd->pcn_tx_chain[i] = NULL;
710 ld->pcn_tx_list[i].pcn_tbaddr = 0;
711 ld->pcn_tx_list[i].pcn_txctl = 0;
712 ld->pcn_tx_list[i].pcn_txstat = 0;
713 }
714
715 cd->pcn_tx_prod = cd->pcn_tx_cons = cd->pcn_tx_cnt = 0;
716
717 return(0);
718}
719
720
721/*
722 * Initialize the RX descriptors and allocate mbufs for them.
723 */
724static int pcn_list_rx_init(sc)
725 struct pcn_softc *sc;
726{
727 struct pcn_list_data *ld;
728 struct pcn_ring_data *cd;
729 int i;
730
731 ld = sc->pcn_ldata;
732 cd = &sc->pcn_cdata;
733
734 for (i = 0; i < PCN_RX_LIST_CNT; i++) {
735 if (pcn_newbuf(sc, i, NULL) == ENOBUFS)
736 return(ENOBUFS);
737 }
738
739 cd->pcn_rx_prod = 0;
740
741 return(0);
742}
743
744/*
745 * Initialize an RX descriptor and attach an MBUF cluster.
746 */
747static int pcn_newbuf(sc, idx, m)
748 struct pcn_softc *sc;
749 int idx;
750 struct mbuf *m;
751{
752 struct mbuf *m_new = NULL;
753 struct pcn_rx_desc *c;
754
755 c = &sc->pcn_ldata->pcn_rx_list[idx];
756
757 if (m == NULL) {
758 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
759 if (m_new == NULL) {
760 printf("pcn%d: no memory for rx list "
761 "-- packet dropped!\n", sc->pcn_unit);
762 return(ENOBUFS);
763 }
764
765 MCLGET(m_new, M_DONTWAIT);
766 if (!(m_new->m_flags & M_EXT)) {
767 printf("pcn%d: no memory for rx list "
768 "-- packet dropped!\n", sc->pcn_unit);
769 m_freem(m_new);
770 return(ENOBUFS);
771 }
772 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
773 } else {
774 m_new = m;
775 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
776 m_new->m_data = m_new->m_ext.ext_buf;
777 }
778
779 m_adj(m_new, ETHER_ALIGN);
780
781 sc->pcn_cdata.pcn_rx_chain[idx] = m_new;
782 c->pcn_rbaddr = vtophys(mtod(m_new, caddr_t));
783 c->pcn_bufsz = (~(PCN_RXLEN) + 1) & PCN_RXLEN_BUFSZ;
784 c->pcn_bufsz |= PCN_RXLEN_MBO;
785 c->pcn_rxstat = PCN_RXSTAT_STP|PCN_RXSTAT_ENP|PCN_RXSTAT_OWN;
786
787 return(0);
788}
789
790/*
791 * A frame has been uploaded: pass the resulting mbuf chain up to
792 * the higher level protocols.
793 */
794static void pcn_rxeof(sc)
795 struct pcn_softc *sc;
796{
797 struct ether_header *eh;
798 struct mbuf *m;
799 struct ifnet *ifp;
800 struct pcn_rx_desc *cur_rx;
801 int i;
802
803 ifp = &sc->arpcom.ac_if;
804 i = sc->pcn_cdata.pcn_rx_prod;
805
806 while(PCN_OWN_RXDESC(&sc->pcn_ldata->pcn_rx_list[i])) {
807 cur_rx = &sc->pcn_ldata->pcn_rx_list[i];
808 m = sc->pcn_cdata.pcn_rx_chain[i];
809 sc->pcn_cdata.pcn_rx_chain[i] = NULL;
810
811 /*
812 * If an error occurs, update stats, clear the
813 * status word and leave the mbuf cluster in place:
814 * it should simply get re-used next time this descriptor
815 * comes up in the ring.
816 */
817 if (cur_rx->pcn_rxstat & PCN_RXSTAT_ERR) {
818 ifp->if_ierrors++;
819 pcn_newbuf(sc, i, m);
820 PCN_INC(i, PCN_RX_LIST_CNT);
821 continue;
822 }
823
824 if (pcn_newbuf(sc, i, NULL)) {
825 /* Ran out of mbufs; recycle this one. */
826 pcn_newbuf(sc, i, m);
827 ifp->if_ierrors++;
828 PCN_INC(i, PCN_RX_LIST_CNT);
829 continue;
830 }
831
832 PCN_INC(i, PCN_RX_LIST_CNT);
833
834 /* No errors; receive the packet. */
835 ifp->if_ipackets++;
836 eh = mtod(m, struct ether_header *);
837 m->m_len = m->m_pkthdr.len =
838 cur_rx->pcn_rxlen - ETHER_CRC_LEN;
839 m->m_pkthdr.rcvif = ifp;
840
841 /* Remove header from mbuf and pass it on. */
842 m_adj(m, sizeof(struct ether_header));
843 ether_input(ifp, eh, m);
844 }
845
846 sc->pcn_cdata.pcn_rx_prod = i;
847
848 return;
849}
850
851/*
852 * A frame was downloaded to the chip. It's safe for us to clean up
853 * the list buffers.
854 */
855
856static void pcn_txeof(sc)
857 struct pcn_softc *sc;
858{
859 struct pcn_tx_desc *cur_tx = NULL;
860 struct ifnet *ifp;
861 u_int32_t idx;
862
863 ifp = &sc->arpcom.ac_if;
864
865 /* Clear the timeout timer. */
866 ifp->if_timer = 0;
867
868 /*
869 * Go through our tx list and free mbufs for those
870 * frames that have been transmitted.
871 */
872 idx = sc->pcn_cdata.pcn_tx_cons;
873 while (idx != sc->pcn_cdata.pcn_tx_prod) {
874 cur_tx = &sc->pcn_ldata->pcn_tx_list[idx];
875
876 if (!PCN_OWN_TXDESC(cur_tx))
877 break;
878
879 if (!(cur_tx->pcn_txctl & PCN_TXCTL_ENP)) {
880 sc->pcn_cdata.pcn_tx_cnt--;
881 PCN_INC(idx, PCN_TX_LIST_CNT);
882 continue;
883 }
884
885 if (cur_tx->pcn_txctl & PCN_TXCTL_ERR) {
886 ifp->if_oerrors++;
887 if (cur_tx->pcn_txstat & PCN_TXSTAT_EXDEF)
888 ifp->if_collisions++;
889 if (cur_tx->pcn_txstat & PCN_TXSTAT_RTRY)
890 ifp->if_collisions++;
891 }
892
893 ifp->if_collisions +=
894 cur_tx->pcn_txstat & PCN_TXSTAT_TRC;
895
896 ifp->if_opackets++;
897 if (sc->pcn_cdata.pcn_tx_chain[idx] != NULL) {
898 m_freem(sc->pcn_cdata.pcn_tx_chain[idx]);
899 sc->pcn_cdata.pcn_tx_chain[idx] = NULL;
900 }
901
902 sc->pcn_cdata.pcn_tx_cnt--;
903 PCN_INC(idx, PCN_TX_LIST_CNT);
904 ifp->if_timer = 0;
905 }
906
907 sc->pcn_cdata.pcn_tx_cons = idx;
908
909 if (cur_tx != NULL)
910 ifp->if_flags &= ~IFF_OACTIVE;
911
912 return;
913}
914
915static void pcn_tick(xsc)
916 void *xsc;
917{
918 struct pcn_softc *sc;
919 struct mii_data *mii;
920 struct ifnet *ifp;
921
922 sc = xsc;
923 ifp = &sc->arpcom.ac_if;
924 PCN_LOCK(sc);
925
926 mii = device_get_softc(sc->pcn_miibus);
927 mii_tick(mii);
928
929 if (sc->pcn_link & !(mii->mii_media_status & IFM_ACTIVE))
930 sc->pcn_link = 0;
931
932 if (!sc->pcn_link) {
933 mii_pollstat(mii);
934 if (mii->mii_media_status & IFM_ACTIVE &&
935 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
936 sc->pcn_link++;
937 if (ifp->if_snd.ifq_head != NULL)
938 pcn_start(ifp);
939 }
940
941 sc->pcn_stat_ch = timeout(pcn_tick, sc, hz);
942
943 PCN_UNLOCK(sc);
944
945 return;
946}
947
948static void pcn_intr(arg)
949 void *arg;
950{
951 struct pcn_softc *sc;
952 struct ifnet *ifp;
953 u_int32_t status;
954
955 sc = arg;
956 ifp = &sc->arpcom.ac_if;
957
958 /* Supress unwanted interrupts */
959 if (!(ifp->if_flags & IFF_UP)) {
960 pcn_stop(sc);
961 return;
962 }
963
964 CSR_WRITE_4(sc, PCN_IO32_RAP, PCN_CSR_CSR);
965
966 while ((status = CSR_READ_4(sc, PCN_IO32_RDP)) & PCN_CSR_INTR) {
967 CSR_WRITE_4(sc, PCN_IO32_RDP, status);
968
969 if (status & PCN_CSR_RINT)
970 pcn_rxeof(sc);
971
972 if (status & PCN_CSR_TINT)
973 pcn_txeof(sc);
974
975 if (status & PCN_CSR_ERR) {
976 pcn_init(sc);
977 break;
978 }
979 }
980
981 if (ifp->if_snd.ifq_head != NULL)
982 pcn_start(ifp);
983
984 return;
985}
986
987/*
988 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
989 * pointers to the fragment pointers.
990 */
991static int pcn_encap(sc, m_head, txidx)
992 struct pcn_softc *sc;
993 struct mbuf *m_head;
994 u_int32_t *txidx;
995{
996 struct pcn_tx_desc *f = NULL;
997 struct mbuf *m;
998 int frag, cur, cnt = 0;
999
1000 /*
1001 * Start packing the mbufs in this chain into
1002 * the fragment pointers. Stop when we run out
1003 * of fragments or hit the end of the mbuf chain.
1004 */
1005 m = m_head;
1006 cur = frag = *txidx;
1007
1008 for (m = m_head; m != NULL; m = m->m_next) {
1009 if (m->m_len != 0) {
1010 if ((PCN_TX_LIST_CNT -
1011 (sc->pcn_cdata.pcn_tx_cnt + cnt)) < 2)
1012 return(ENOBUFS);
1013 f = &sc->pcn_ldata->pcn_tx_list[frag];
1014 f->pcn_txctl = (~(m->m_len) + 1) & PCN_TXCTL_BUFSZ;
1015 f->pcn_txctl |= PCN_TXCTL_MBO;
1016 f->pcn_tbaddr = vtophys(mtod(m, vm_offset_t));
1017 if (cnt == 0)
1018 f->pcn_txctl |= PCN_TXCTL_STP;
1019 else
1020 f->pcn_txctl |= PCN_TXCTL_OWN;
1021 cur = frag;
1022 PCN_INC(frag, PCN_TX_LIST_CNT);
1023 cnt++;
1024 }
1025 }
1026
1027 if (m != NULL)
1028 return(ENOBUFS);
1029
1030 sc->pcn_cdata.pcn_tx_chain[cur] = m_head;
1031 sc->pcn_ldata->pcn_tx_list[cur].pcn_txctl |=
1032 PCN_TXCTL_ENP|PCN_TXCTL_ADD_FCS|PCN_TXCTL_MORE_LTINT;
1033 sc->pcn_ldata->pcn_tx_list[*txidx].pcn_txctl |= PCN_TXCTL_OWN;
1034 sc->pcn_cdata.pcn_tx_cnt += cnt;
1035 *txidx = frag;
1036
1037 return(0);
1038}
1039
1040/*
1041 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1042 * to the mbuf data regions directly in the transmit lists. We also save a
1043 * copy of the pointers since the transmit list fragment pointers are
1044 * physical addresses.
1045 */
1046static void pcn_start(ifp)
1047 struct ifnet *ifp;
1048{
1049 struct pcn_softc *sc;
1050 struct mbuf *m_head = NULL;
1051 u_int32_t idx;
1052
1053 sc = ifp->if_softc;
1054
1055 PCN_LOCK(sc);
1056
1057 if (!sc->pcn_link) {
1058 PCN_UNLOCK(sc);
1059 return;
1060 }
1061
1062 idx = sc->pcn_cdata.pcn_tx_prod;
1063
1064 if (ifp->if_flags & IFF_OACTIVE) {
1065 PCN_UNLOCK(sc);
1066 return;
1067 }
1068
1069 while(sc->pcn_cdata.pcn_tx_chain[idx] == NULL) {
1070 IF_DEQUEUE(&ifp->if_snd, m_head);
1071 if (m_head == NULL)
1072 break;
1073
1074 if (pcn_encap(sc, m_head, &idx)) {
1075 IF_PREPEND(&ifp->if_snd, m_head);
1076 ifp->if_flags |= IFF_OACTIVE;
1077 break;
1078 }
1079
1080 /*
1081 * If there's a BPF listener, bounce a copy of this frame
1082 * to him.
1083 */
1084 if (ifp->if_bpf)
1085 bpf_mtap(ifp, m_head);
1086
1087 }
1088
1089 /* Transmit */
1090 sc->pcn_cdata.pcn_tx_prod = idx;
1091 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN);
1092
1093 /*
1094 * Set a timeout in case the chip goes out to lunch.
1095 */
1096 ifp->if_timer = 5;
1097
1098 PCN_UNLOCK(sc);
1099
1100 return;
1101}
1102
1103static void pcn_setfilt(ifp)
1104 struct ifnet *ifp;
1105{
1106 struct pcn_softc *sc;
1107
1108 sc = ifp->if_softc;
1109
1110 /* If we want promiscuous mode, set the allframes bit. */
1111 if (ifp->if_flags & IFF_PROMISC) {
1112 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC);
1113 } else {
1114 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC);
1115 }
1116
1117 /* Set the capture broadcast bit to capture broadcast frames. */
1118 if (ifp->if_flags & IFF_BROADCAST) {
1119 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD);
1120 } else {
1121 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD);
1122 }
1123
1124 return;
1125}
1126
1127static void pcn_init(xsc)
1128 void *xsc;
1129{
1130 struct pcn_softc *sc = xsc;
1131 struct ifnet *ifp = &sc->arpcom.ac_if;
1132 struct mii_data *mii = NULL;
1133
1134 PCN_LOCK(sc);
1135
1136 /*
1137 * Cancel pending I/O and free all RX/TX buffers.
1138 */
1139 pcn_stop(sc);
1140 pcn_reset(sc);
1141
1142 mii = device_get_softc(sc->pcn_miibus);
1143
1144 /* Set MAC address */
1145 pcn_csr_write(sc, PCN_CSR_PAR0,
1146 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1147 pcn_csr_write(sc, PCN_CSR_PAR1,
1148 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1149 pcn_csr_write(sc, PCN_CSR_PAR2,
1150 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1151
1152 /* Init circular RX list. */
1153 if (pcn_list_rx_init(sc) == ENOBUFS) {
1154 printf("pcn%d: initialization failed: no "
1155 "memory for rx buffers\n", sc->pcn_unit);
1156 pcn_stop(sc);
1157 PCN_UNLOCK(sc);
1158 return;
1159 }
1160
1161 /*
1162 * Init tx descriptors.
1163 */
1164 pcn_list_tx_init(sc);
1165
1166 /* Set up the mode register. */
1167 pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_MII);
1168
1169 /* Set up RX filter. */
1170 pcn_setfilt(ifp);
1171
1172 /*
1173 * Load the multicast filter.
1174 */
1175 pcn_setmulti(sc);
1176
1177 /*
1178 * Load the addresses of the RX and TX lists.
1179 */
1180 pcn_csr_write(sc, PCN_CSR_RXADDR0,
1181 vtophys(&sc->pcn_ldata->pcn_rx_list[0]) & 0xFFFF);
1182 pcn_csr_write(sc, PCN_CSR_RXADDR1,
1183 (vtophys(&sc->pcn_ldata->pcn_rx_list[0]) >> 16) & 0xFFFF);
1184 pcn_csr_write(sc, PCN_CSR_TXADDR0,
1185 vtophys(&sc->pcn_ldata->pcn_tx_list[0]) & 0xFFFF);
1186 pcn_csr_write(sc, PCN_CSR_TXADDR1,
1187 (vtophys(&sc->pcn_ldata->pcn_tx_list[0]) >> 16) & 0xFFFF);
1188
1189 /* Set the RX and TX ring sizes. */
1190 pcn_csr_write(sc, PCN_CSR_RXRINGLEN, (~PCN_RX_LIST_CNT) + 1);
1191 pcn_csr_write(sc, PCN_CSR_TXRINGLEN, (~PCN_TX_LIST_CNT) + 1);
1192
1193 /* We're not using the initialization block. */
1194 pcn_csr_write(sc, PCN_CSR_IAB1, 0);
1195
1196 /* Enable fast suspend mode. */
1197 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL2, PCN_EXTCTL2_FASTSPNDE);
1198
1199 /*
1200 * Enable burst read and write. Also set the no underflow
1201 * bit. This will avoid transmit underruns in certain
1202 * conditions while still providing decent performance.
1203 */
1204 PCN_BCR_SETBIT(sc, PCN_BCR_BUSCTL, PCN_BUSCTL_NOUFLOW|
1205 PCN_BUSCTL_BREAD|PCN_BUSCTL_BWRITE);
1206
1207 /* Enable graceful recovery from underflow. */
1208 PCN_CSR_SETBIT(sc, PCN_CSR_IMR, PCN_IMR_DXSUFLO);
1209
1210 /* Enable auto-padding of short TX frames. */
1211 PCN_CSR_SETBIT(sc, PCN_CSR_TFEAT, PCN_TFEAT_PAD_TX);
1212
1213 /* Disable MII autoneg (we handle this ourselves). */
1214 PCN_BCR_CLRBIT(sc, PCN_BCR_MIICTL, PCN_MIICTL_DANAS);
1215
1216 if (sc->pcn_type == Am79C978)
1217 pcn_bcr_write(sc, PCN_BCR_PHYSEL,
1218 PCN_PHYSEL_PCNET|PCN_PHY_HOMEPNA);
1219
1220 /* Enable interrupts and start the controller running. */
1221 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START);
1222
1223 mii_mediachg(mii);
1224
1225 ifp->if_flags |= IFF_RUNNING;
1226 ifp->if_flags &= ~IFF_OACTIVE;
1227
1228 sc->pcn_stat_ch = timeout(pcn_tick, sc, hz);
1229 PCN_UNLOCK(sc);
1230
1231 return;
1232}
1233
1234/*
1235 * Set media options.
1236 */
1237static int pcn_ifmedia_upd(ifp)
1238 struct ifnet *ifp;
1239{
1240 struct pcn_softc *sc;
1241 struct mii_data *mii;
1242
1243 sc = ifp->if_softc;
1244 mii = device_get_softc(sc->pcn_miibus);
1245
1246 sc->pcn_link = 0;
1247 if (mii->mii_instance) {
1248 struct mii_softc *miisc;
1249 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1250 miisc = LIST_NEXT(miisc, mii_list))
1251 mii_phy_reset(miisc);
1252 }
1253 mii_mediachg(mii);
1254
1255 return(0);
1256}
1257
1258/*
1259 * Report current media status.
1260 */
1261static void pcn_ifmedia_sts(ifp, ifmr)
1262 struct ifnet *ifp;
1263 struct ifmediareq *ifmr;
1264{
1265 struct pcn_softc *sc;
1266 struct mii_data *mii;
1267
1268 sc = ifp->if_softc;
1269
1270 mii = device_get_softc(sc->pcn_miibus);
1271 mii_pollstat(mii);
1272 ifmr->ifm_active = mii->mii_media_active;
1273 ifmr->ifm_status = mii->mii_media_status;
1274
1275 return;
1276}
1277
1278static int pcn_ioctl(ifp, command, data)
1279 struct ifnet *ifp;
1280 u_long command;
1281 caddr_t data;
1282{
1283 struct pcn_softc *sc = ifp->if_softc;
1284 struct ifreq *ifr = (struct ifreq *) data;
1285 struct mii_data *mii = NULL;
1286 int error = 0;
1287
1288 PCN_LOCK(sc);
1289
1290 switch(command) {
1291 case SIOCSIFADDR:
1292 case SIOCGIFADDR:
1293 case SIOCSIFMTU:
1294 error = ether_ioctl(ifp, command, data);
1295 break;
1296 case SIOCSIFFLAGS:
1297 if (ifp->if_flags & IFF_UP) {
1298 if (ifp->if_flags & IFF_RUNNING &&
1299 ifp->if_flags & IFF_PROMISC &&
1300 !(sc->pcn_if_flags & IFF_PROMISC)) {
1301 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1,
1302 PCN_EXTCTL1_SPND);
1303 pcn_setfilt(ifp);
1304 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1,
1305 PCN_EXTCTL1_SPND);
1306 pcn_csr_write(sc, PCN_CSR_CSR,
1307 PCN_CSR_INTEN|PCN_CSR_START);
1308 } else if (ifp->if_flags & IFF_RUNNING &&
1309 !(ifp->if_flags & IFF_PROMISC) &&
1310 sc->pcn_if_flags & IFF_PROMISC) {
1311 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1,
1312 PCN_EXTCTL1_SPND);
1313 pcn_setfilt(ifp);
1314 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1,
1315 PCN_EXTCTL1_SPND);
1316 pcn_csr_write(sc, PCN_CSR_CSR,
1317 PCN_CSR_INTEN|PCN_CSR_START);
1318 } else if (!(ifp->if_flags & IFF_RUNNING))
1319 pcn_init(sc);
1320 } else {
1321 if (ifp->if_flags & IFF_RUNNING)
1322 pcn_stop(sc);
1323 }
1324 sc->pcn_if_flags = ifp->if_flags;
1325 error = 0;
1326 break;
1327 case SIOCADDMULTI:
1328 case SIOCDELMULTI:
1329 pcn_setmulti(sc);
1330 error = 0;
1331 break;
1332 case SIOCGIFMEDIA:
1333 case SIOCSIFMEDIA:
1334 mii = device_get_softc(sc->pcn_miibus);
1335 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1336 break;
1337 default:
1338 error = EINVAL;
1339 break;
1340 }
1341
1342 PCN_UNLOCK(sc);
1343
1344 return(error);
1345}
1346
1347static void pcn_watchdog(ifp)
1348 struct ifnet *ifp;
1349{
1350 struct pcn_softc *sc;
1351
1352 sc = ifp->if_softc;
1353
1354 PCN_LOCK(sc);
1355
1356 ifp->if_oerrors++;
1357 printf("pcn%d: watchdog timeout\n", sc->pcn_unit);
1358
1359 pcn_stop(sc);
1360 pcn_reset(sc);
1361 pcn_init(sc);
1362
1363 if (ifp->if_snd.ifq_head != NULL)
1364 pcn_start(ifp);
1365
1366 PCN_UNLOCK(sc);
1367
1368 return;
1369}
1370
1371/*
1372 * Stop the adapter and free any mbufs allocated to the
1373 * RX and TX lists.
1374 */
1375static void pcn_stop(sc)
1376 struct pcn_softc *sc;
1377{
1378 register int i;
1379 struct ifnet *ifp;
1380
1381 ifp = &sc->arpcom.ac_if;
1382 PCN_LOCK(sc);
1383 ifp->if_timer = 0;
1384
1385 untimeout(pcn_tick, sc, sc->pcn_stat_ch);
1386 PCN_CSR_SETBIT(sc, PCN_CSR_CSR, PCN_CSR_STOP);
1387 sc->pcn_link = 0;
1388
1389 /*
1390 * Free data in the RX lists.
1391 */
1392 for (i = 0; i < PCN_RX_LIST_CNT; i++) {
1393 if (sc->pcn_cdata.pcn_rx_chain[i] != NULL) {
1394 m_freem(sc->pcn_cdata.pcn_rx_chain[i]);
1395 sc->pcn_cdata.pcn_rx_chain[i] = NULL;
1396 }
1397 }
1398 bzero((char *)&sc->pcn_ldata->pcn_rx_list,
1399 sizeof(sc->pcn_ldata->pcn_rx_list));
1400
1401 /*
1402 * Free the TX list buffers.
1403 */
1404 for (i = 0; i < PCN_TX_LIST_CNT; i++) {
1405 if (sc->pcn_cdata.pcn_tx_chain[i] != NULL) {
1406 m_freem(sc->pcn_cdata.pcn_tx_chain[i]);
1407 sc->pcn_cdata.pcn_tx_chain[i] = NULL;
1408 }
1409 }
1410
1411 bzero((char *)&sc->pcn_ldata->pcn_tx_list,
1412 sizeof(sc->pcn_ldata->pcn_tx_list));
1413
1414 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1415 PCN_UNLOCK(sc);
1416
1417 return;
1418}
1419
1420/*
1421 * Stop all chip I/O so that the kernel's probe routines don't
1422 * get confused by errant DMAs when rebooting.
1423 */
1424static void pcn_shutdown(dev)
1425 device_t dev;
1426{
1427 struct pcn_softc *sc;
1428
1429 sc = device_get_softc(dev);
1430
1431 PCN_LOCK(sc);
1432 pcn_reset(sc);
1433 pcn_stop(sc);
1434 PCN_UNLOCK(sc);
1435
1436 return;
1437}