if_rl.c revision 229093
1/*-
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/9/sys/pci/if_rl.c 229093 2011-12-31 14:12:12Z hselasky $");
35
36/*
37 * RealTek 8129/8139 PCI NIC driver
38 *
39 * Supports several extremely cheap PCI 10/100 adapters based on
40 * the RealTek chipset. Datasheets can be obtained from
41 * www.realtek.com.tw.
42 *
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47/*
48 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
49 * probably the worst PCI ethernet controller ever made, with the possible
50 * exception of the FEAST chip made by SMC. The 8139 supports bus-master
51 * DMA, but it has a terrible interface that nullifies any performance
52 * gains that bus-master DMA usually offers.
53 *
54 * For transmission, the chip offers a series of four TX descriptor
55 * registers. Each transmit frame must be in a contiguous buffer, aligned
56 * on a longword (32-bit) boundary. This means we almost always have to
57 * do mbuf copies in order to transmit a frame, except in the unlikely
58 * case where a) the packet fits into a single mbuf, and b) the packet
59 * is 32-bit aligned within the mbuf's data area. The presence of only
60 * four descriptor registers means that we can never have more than four
61 * packets queued for transmission at any one time.
62 *
63 * Reception is not much better. The driver has to allocate a single large
64 * buffer area (up to 64K in size) into which the chip will DMA received
65 * frames. Because we don't know where within this region received packets
66 * will begin or end, we have no choice but to copy data from the buffer
67 * area into mbufs in order to pass the packets up to the higher protocol
68 * levels.
69 *
70 * It's impossible given this rotten design to really achieve decent
71 * performance at 100Mbps, unless you happen to have a 400Mhz PII or
72 * some equally overmuscled CPU to drive it.
73 *
74 * On the bright side, the 8139 does have a built-in PHY, although
75 * rather than using an MDIO serial interface like most other NICs, the
76 * PHY registers are directly accessible through the 8139's register
77 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
78 * filter.
79 *
80 * The 8129 chip is an older version of the 8139 that uses an external PHY
81 * chip. The 8129 has a serial MDIO interface for accessing the MII where
82 * the 8139 lets you directly access the on-board PHY registers. We need
83 * to select which interface to use depending on the chip type.
84 */
85
86#ifdef HAVE_KERNEL_OPTION_HEADERS
87#include "opt_device_polling.h"
88#endif
89
90#include <sys/param.h>
91#include <sys/endian.h>
92#include <sys/systm.h>
93#include <sys/sockio.h>
94#include <sys/mbuf.h>
95#include <sys/malloc.h>
96#include <sys/kernel.h>
97#include <sys/module.h>
98#include <sys/socket.h>
99#include <sys/sysctl.h>
100
101#include <net/if.h>
102#include <net/if_arp.h>
103#include <net/ethernet.h>
104#include <net/if_dl.h>
105#include <net/if_media.h>
106#include <net/if_types.h>
107
108#include <net/bpf.h>
109
110#include <machine/bus.h>
111#include <machine/resource.h>
112#include <sys/bus.h>
113#include <sys/rman.h>
114
115#include <dev/mii/mii.h>
116#include <dev/mii/mii_bitbang.h>
117#include <dev/mii/miivar.h>
118
119#include <dev/pci/pcireg.h>
120#include <dev/pci/pcivar.h>
121
122MODULE_DEPEND(rl, pci, 1, 1, 1);
123MODULE_DEPEND(rl, ether, 1, 1, 1);
124MODULE_DEPEND(rl, miibus, 1, 1, 1);
125
126/* "device miibus" required.  See GENERIC if you get errors here. */
127#include "miibus_if.h"
128
129#include <pci/if_rlreg.h>
130
131/*
132 * Various supported device vendors/types and their names.
133 */
134static const struct rl_type const rl_devs[] = {
135	{ RT_VENDORID, RT_DEVICEID_8129, RL_8129,
136		"RealTek 8129 10/100BaseTX" },
137	{ RT_VENDORID, RT_DEVICEID_8139, RL_8139,
138		"RealTek 8139 10/100BaseTX" },
139	{ RT_VENDORID, RT_DEVICEID_8139D, RL_8139,
140		"RealTek 8139 10/100BaseTX" },
141	{ RT_VENDORID, RT_DEVICEID_8138, RL_8139,
142		"RealTek 8139 10/100BaseTX CardBus" },
143	{ RT_VENDORID, RT_DEVICEID_8100, RL_8139,
144		"RealTek 8100 10/100BaseTX" },
145	{ ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
146		"Accton MPX 5030/5038 10/100BaseTX" },
147	{ DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139,
148		"Delta Electronics 8139 10/100BaseTX" },
149	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139,
150		"Addtron Technology 8139 10/100BaseTX" },
151	{ DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139,
152		"D-Link DFE-530TX+ 10/100BaseTX" },
153	{ DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139,
154		"D-Link DFE-690TXD 10/100BaseTX" },
155	{ NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
156		"Nortel Networks 10/100BaseTX" },
157	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139,
158		"Corega FEther CB-TXD" },
159	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139,
160		"Corega FEtherII CB-TXD" },
161	{ PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139,
162		"Peppercon AG ROL-F" },
163	{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139,
164		"Planex FNW-3603-TX" },
165	{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139,
166		"Planex FNW-3800-TX" },
167	{ CP_VENDORID, RT_DEVICEID_8139, RL_8139,
168		"Compaq HNE-300" },
169	{ LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139,
170		"LevelOne FPC-0106TX" },
171	{ EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139,
172		"Edimax EP-4103DL CardBus" }
173};
174
175static int rl_attach(device_t);
176static int rl_detach(device_t);
177static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int);
178static int rl_dma_alloc(struct rl_softc *);
179static void rl_dma_free(struct rl_softc *);
180static void rl_eeprom_putbyte(struct rl_softc *, int);
181static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *);
182static int rl_encap(struct rl_softc *, struct mbuf **);
183static int rl_list_tx_init(struct rl_softc *);
184static int rl_list_rx_init(struct rl_softc *);
185static int rl_ifmedia_upd(struct ifnet *);
186static void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
187static int rl_ioctl(struct ifnet *, u_long, caddr_t);
188static void rl_intr(void *);
189static void rl_init(void *);
190static void rl_init_locked(struct rl_softc *sc);
191static int rl_miibus_readreg(device_t, int, int);
192static void rl_miibus_statchg(device_t);
193static int rl_miibus_writereg(device_t, int, int, int);
194#ifdef DEVICE_POLLING
195static int rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
196static int rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
197#endif
198static int rl_probe(device_t);
199static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int);
200static void rl_reset(struct rl_softc *);
201static int rl_resume(device_t);
202static int rl_rxeof(struct rl_softc *);
203static void rl_rxfilter(struct rl_softc *);
204static int rl_shutdown(device_t);
205static void rl_start(struct ifnet *);
206static void rl_start_locked(struct ifnet *);
207static void rl_stop(struct rl_softc *);
208static int rl_suspend(device_t);
209static void rl_tick(void *);
210static void rl_txeof(struct rl_softc *);
211static void rl_watchdog(struct rl_softc *);
212static void rl_setwol(struct rl_softc *);
213static void rl_clrwol(struct rl_softc *);
214
215/*
216 * MII bit-bang glue
217 */
218static uint32_t rl_mii_bitbang_read(device_t);
219static void rl_mii_bitbang_write(device_t, uint32_t);
220
221static const struct mii_bitbang_ops rl_mii_bitbang_ops = {
222	rl_mii_bitbang_read,
223	rl_mii_bitbang_write,
224	{
225		RL_MII_DATAOUT,	/* MII_BIT_MDO */
226		RL_MII_DATAIN,	/* MII_BIT_MDI */
227		RL_MII_CLK,	/* MII_BIT_MDC */
228		RL_MII_DIR,	/* MII_BIT_DIR_HOST_PHY */
229		0,		/* MII_BIT_DIR_PHY_HOST */
230	}
231};
232
233static device_method_t rl_methods[] = {
234	/* Device interface */
235	DEVMETHOD(device_probe,		rl_probe),
236	DEVMETHOD(device_attach,	rl_attach),
237	DEVMETHOD(device_detach,	rl_detach),
238	DEVMETHOD(device_suspend,	rl_suspend),
239	DEVMETHOD(device_resume,	rl_resume),
240	DEVMETHOD(device_shutdown,	rl_shutdown),
241
242	/* MII interface */
243	DEVMETHOD(miibus_readreg,	rl_miibus_readreg),
244	DEVMETHOD(miibus_writereg,	rl_miibus_writereg),
245	DEVMETHOD(miibus_statchg,	rl_miibus_statchg),
246
247	DEVMETHOD_END
248};
249
250static driver_t rl_driver = {
251	"rl",
252	rl_methods,
253	sizeof(struct rl_softc)
254};
255
256static devclass_t rl_devclass;
257
258DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0);
259DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0);
260DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0);
261
262#define EE_SET(x)					\
263	CSR_WRITE_1(sc, RL_EECMD,			\
264		CSR_READ_1(sc, RL_EECMD) | x)
265
266#define EE_CLR(x)					\
267	CSR_WRITE_1(sc, RL_EECMD,			\
268		CSR_READ_1(sc, RL_EECMD) & ~x)
269
270/*
271 * Send a read command and address to the EEPROM, check for ACK.
272 */
273static void
274rl_eeprom_putbyte(struct rl_softc *sc, int addr)
275{
276	register int		d, i;
277
278	d = addr | sc->rl_eecmd_read;
279
280	/*
281	 * Feed in each bit and strobe the clock.
282	 */
283	for (i = 0x400; i; i >>= 1) {
284		if (d & i) {
285			EE_SET(RL_EE_DATAIN);
286		} else {
287			EE_CLR(RL_EE_DATAIN);
288		}
289		DELAY(100);
290		EE_SET(RL_EE_CLK);
291		DELAY(150);
292		EE_CLR(RL_EE_CLK);
293		DELAY(100);
294	}
295}
296
297/*
298 * Read a word of data stored in the EEPROM at address 'addr.'
299 */
300static void
301rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest)
302{
303	register int		i;
304	uint16_t		word = 0;
305
306	/* Enter EEPROM access mode. */
307	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
308
309	/*
310	 * Send address of word we want to read.
311	 */
312	rl_eeprom_putbyte(sc, addr);
313
314	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
315
316	/*
317	 * Start reading bits from EEPROM.
318	 */
319	for (i = 0x8000; i; i >>= 1) {
320		EE_SET(RL_EE_CLK);
321		DELAY(100);
322		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
323			word |= i;
324		EE_CLR(RL_EE_CLK);
325		DELAY(100);
326	}
327
328	/* Turn off EEPROM access mode. */
329	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
330
331	*dest = word;
332}
333
334/*
335 * Read a sequence of words from the EEPROM.
336 */
337static void
338rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap)
339{
340	int			i;
341	uint16_t		word = 0, *ptr;
342
343	for (i = 0; i < cnt; i++) {
344		rl_eeprom_getword(sc, off + i, &word);
345		ptr = (uint16_t *)(dest + (i * 2));
346		if (swap)
347			*ptr = ntohs(word);
348		else
349			*ptr = word;
350	}
351}
352
353/*
354 * Read the MII serial port for the MII bit-bang module.
355 */
356static uint32_t
357rl_mii_bitbang_read(device_t dev)
358{
359	struct rl_softc *sc;
360	uint32_t val;
361
362	sc = device_get_softc(dev);
363
364	val = CSR_READ_1(sc, RL_MII);
365	CSR_BARRIER(sc, RL_MII, 1,
366	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
367
368	return (val);
369}
370
371/*
372 * Write the MII serial port for the MII bit-bang module.
373 */
374static void
375rl_mii_bitbang_write(device_t dev, uint32_t val)
376{
377	struct rl_softc *sc;
378
379	sc = device_get_softc(dev);
380
381	CSR_WRITE_1(sc, RL_MII, val);
382	CSR_BARRIER(sc, RL_MII, 1,
383	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
384}
385
386static int
387rl_miibus_readreg(device_t dev, int phy, int reg)
388{
389	struct rl_softc		*sc;
390	uint16_t		rl8139_reg;
391
392	sc = device_get_softc(dev);
393
394	if (sc->rl_type == RL_8139) {
395		switch (reg) {
396		case MII_BMCR:
397			rl8139_reg = RL_BMCR;
398			break;
399		case MII_BMSR:
400			rl8139_reg = RL_BMSR;
401			break;
402		case MII_ANAR:
403			rl8139_reg = RL_ANAR;
404			break;
405		case MII_ANER:
406			rl8139_reg = RL_ANER;
407			break;
408		case MII_ANLPAR:
409			rl8139_reg = RL_LPAR;
410			break;
411		case MII_PHYIDR1:
412		case MII_PHYIDR2:
413			return (0);
414		/*
415		 * Allow the rlphy driver to read the media status
416		 * register. If we have a link partner which does not
417		 * support NWAY, this is the register which will tell
418		 * us the results of parallel detection.
419		 */
420		case RL_MEDIASTAT:
421			return (CSR_READ_1(sc, RL_MEDIASTAT));
422		default:
423			device_printf(sc->rl_dev, "bad phy register\n");
424			return (0);
425		}
426		return (CSR_READ_2(sc, rl8139_reg));
427	}
428
429	return (mii_bitbang_readreg(dev, &rl_mii_bitbang_ops, phy, reg));
430}
431
432static int
433rl_miibus_writereg(device_t dev, int phy, int reg, int data)
434{
435	struct rl_softc		*sc;
436	uint16_t		rl8139_reg;
437
438	sc = device_get_softc(dev);
439
440	if (sc->rl_type == RL_8139) {
441		switch (reg) {
442		case MII_BMCR:
443			rl8139_reg = RL_BMCR;
444			break;
445		case MII_BMSR:
446			rl8139_reg = RL_BMSR;
447			break;
448		case MII_ANAR:
449			rl8139_reg = RL_ANAR;
450			break;
451		case MII_ANER:
452			rl8139_reg = RL_ANER;
453			break;
454		case MII_ANLPAR:
455			rl8139_reg = RL_LPAR;
456			break;
457		case MII_PHYIDR1:
458		case MII_PHYIDR2:
459			return (0);
460			break;
461		default:
462			device_printf(sc->rl_dev, "bad phy register\n");
463			return (0);
464		}
465		CSR_WRITE_2(sc, rl8139_reg, data);
466		return (0);
467	}
468
469	mii_bitbang_writereg(dev, &rl_mii_bitbang_ops, phy, reg, data);
470
471	return (0);
472}
473
474static void
475rl_miibus_statchg(device_t dev)
476{
477	struct rl_softc		*sc;
478	struct ifnet		*ifp;
479	struct mii_data		*mii;
480
481	sc = device_get_softc(dev);
482	mii = device_get_softc(sc->rl_miibus);
483	ifp = sc->rl_ifp;
484	if (mii == NULL || ifp == NULL ||
485	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
486		return;
487
488	sc->rl_flags &= ~RL_FLAG_LINK;
489	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
490	    (IFM_ACTIVE | IFM_AVALID)) {
491		switch (IFM_SUBTYPE(mii->mii_media_active)) {
492		case IFM_10_T:
493		case IFM_100_TX:
494			sc->rl_flags |= RL_FLAG_LINK;
495			break;
496		default:
497			break;
498		}
499	}
500	/*
501	 * RealTek controllers do not provide any interface to
502	 * Tx/Rx MACs for resolved speed, duplex and flow-control
503	 * parameters.
504	 */
505}
506
507/*
508 * Program the 64-bit multicast hash filter.
509 */
510static void
511rl_rxfilter(struct rl_softc *sc)
512{
513	struct ifnet		*ifp = sc->rl_ifp;
514	int			h = 0;
515	uint32_t		hashes[2] = { 0, 0 };
516	struct ifmultiaddr	*ifma;
517	uint32_t		rxfilt;
518
519	RL_LOCK_ASSERT(sc);
520
521	rxfilt = CSR_READ_4(sc, RL_RXCFG);
522	rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD |
523	    RL_RXCFG_RX_MULTI);
524	/* Always accept frames destined for this host. */
525	rxfilt |= RL_RXCFG_RX_INDIV;
526	/* Set capture broadcast bit to capture broadcast frames. */
527	if (ifp->if_flags & IFF_BROADCAST)
528		rxfilt |= RL_RXCFG_RX_BROAD;
529	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
530		rxfilt |= RL_RXCFG_RX_MULTI;
531		if (ifp->if_flags & IFF_PROMISC)
532			rxfilt |= RL_RXCFG_RX_ALLPHYS;
533		hashes[0] = 0xFFFFFFFF;
534		hashes[1] = 0xFFFFFFFF;
535	} else {
536		/* Now program new ones. */
537		if_maddr_rlock(ifp);
538		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
539			if (ifma->ifma_addr->sa_family != AF_LINK)
540				continue;
541			h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
542			    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
543			if (h < 32)
544				hashes[0] |= (1 << h);
545			else
546				hashes[1] |= (1 << (h - 32));
547		}
548		if_maddr_runlock(ifp);
549		if (hashes[0] != 0 || hashes[1] != 0)
550			rxfilt |= RL_RXCFG_RX_MULTI;
551	}
552
553	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
554	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
555	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
556}
557
558static void
559rl_reset(struct rl_softc *sc)
560{
561	register int		i;
562
563	RL_LOCK_ASSERT(sc);
564
565	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
566
567	for (i = 0; i < RL_TIMEOUT; i++) {
568		DELAY(10);
569		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
570			break;
571	}
572	if (i == RL_TIMEOUT)
573		device_printf(sc->rl_dev, "reset never completed!\n");
574}
575
576/*
577 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device
578 * IDs against our list and return a device name if we find a match.
579 */
580static int
581rl_probe(device_t dev)
582{
583	const struct rl_type	*t;
584	uint16_t		devid, revid, vendor;
585	int			i;
586
587	vendor = pci_get_vendor(dev);
588	devid = pci_get_device(dev);
589	revid = pci_get_revid(dev);
590
591	if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
592		if (revid == 0x20) {
593			/* 8139C+, let re(4) take care of this device. */
594			return (ENXIO);
595		}
596	}
597	t = rl_devs;
598	for (i = 0; i < sizeof(rl_devs) / sizeof(rl_devs[0]); i++, t++) {
599		if (vendor == t->rl_vid && devid == t->rl_did) {
600			device_set_desc(dev, t->rl_name);
601			return (BUS_PROBE_DEFAULT);
602		}
603	}
604
605	return (ENXIO);
606}
607
608struct rl_dmamap_arg {
609	bus_addr_t	rl_busaddr;
610};
611
612static void
613rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
614{
615	struct rl_dmamap_arg	*ctx;
616
617	if (error != 0)
618		return;
619
620	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
621
622        ctx = (struct rl_dmamap_arg *)arg;
623        ctx->rl_busaddr = segs[0].ds_addr;
624}
625
626/*
627 * Attach the interface. Allocate softc structures, do ifmedia
628 * setup and ethernet/BPF attach.
629 */
630static int
631rl_attach(device_t dev)
632{
633	uint8_t			eaddr[ETHER_ADDR_LEN];
634	uint16_t		as[3];
635	struct ifnet		*ifp;
636	struct rl_softc		*sc;
637	const struct rl_type	*t;
638	struct sysctl_ctx_list	*ctx;
639	struct sysctl_oid_list	*children;
640	int			error = 0, hwrev, i, phy, pmc, rid;
641	int			prefer_iomap, unit;
642	uint16_t		rl_did = 0;
643	char			tn[32];
644
645	sc = device_get_softc(dev);
646	unit = device_get_unit(dev);
647	sc->rl_dev = dev;
648
649	sc->rl_twister_enable = 0;
650	snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit);
651	TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable);
652	ctx = device_get_sysctl_ctx(sc->rl_dev);
653	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
654	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD,
655	   &sc->rl_twister_enable, 0, "");
656
657	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
658	    MTX_DEF);
659	callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
660
661	pci_enable_busmaster(dev);
662
663
664	/*
665	 * Map control/status registers.
666	 * Default to using PIO access for this driver. On SMP systems,
667	 * there appear to be problems with memory mapped mode: it looks
668	 * like doing too many memory mapped access back to back in rapid
669	 * succession can hang the bus. I'm inclined to blame this on
670	 * crummy design/construction on the part of RealTek. Memory
671	 * mapped mode does appear to work on uniprocessor systems though.
672	 */
673	prefer_iomap = 1;
674	snprintf(tn, sizeof(tn), "dev.rl.%d.prefer_iomap", unit);
675	TUNABLE_INT_FETCH(tn, &prefer_iomap);
676	if (prefer_iomap) {
677		sc->rl_res_id = PCIR_BAR(0);
678		sc->rl_res_type = SYS_RES_IOPORT;
679		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
680		    &sc->rl_res_id, RF_ACTIVE);
681	}
682	if (prefer_iomap == 0 || sc->rl_res == NULL) {
683		sc->rl_res_id = PCIR_BAR(1);
684		sc->rl_res_type = SYS_RES_MEMORY;
685		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
686		    &sc->rl_res_id, RF_ACTIVE);
687	}
688	if (sc->rl_res == NULL) {
689		device_printf(dev, "couldn't map ports/memory\n");
690		error = ENXIO;
691		goto fail;
692	}
693
694#ifdef notdef
695	/*
696	 * Detect the Realtek 8139B. For some reason, this chip is very
697	 * unstable when left to autoselect the media
698	 * The best workaround is to set the device to the required
699	 * media type or to set it to the 10 Meg speed.
700	 */
701	if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF)
702		device_printf(dev,
703"Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n");
704#endif
705
706	sc->rl_btag = rman_get_bustag(sc->rl_res);
707	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
708
709	/* Allocate interrupt */
710	rid = 0;
711	sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
712	    RF_SHAREABLE | RF_ACTIVE);
713
714	if (sc->rl_irq[0] == NULL) {
715		device_printf(dev, "couldn't map interrupt\n");
716		error = ENXIO;
717		goto fail;
718	}
719
720	/*
721	 * Reset the adapter. Only take the lock here as it's needed in
722	 * order to call rl_reset().
723	 */
724	RL_LOCK(sc);
725	rl_reset(sc);
726	RL_UNLOCK(sc);
727
728	sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
729	rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0);
730	if (rl_did != 0x8129)
731		sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
732
733	/*
734	 * Get station address from the EEPROM.
735	 */
736	rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0);
737	for (i = 0; i < 3; i++) {
738		eaddr[(i * 2) + 0] = as[i] & 0xff;
739		eaddr[(i * 2) + 1] = as[i] >> 8;
740	}
741
742	/*
743	 * Now read the exact device type from the EEPROM to find
744	 * out if it's an 8129 or 8139.
745	 */
746	rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0);
747
748	t = rl_devs;
749	sc->rl_type = 0;
750	while(t->rl_name != NULL) {
751		if (rl_did == t->rl_did) {
752			sc->rl_type = t->rl_basetype;
753			break;
754		}
755		t++;
756	}
757
758	if (sc->rl_type == 0) {
759		device_printf(dev, "unknown device ID: %x assuming 8139\n",
760		    rl_did);
761		sc->rl_type = RL_8139;
762		/*
763		 * Read RL_IDR register to get ethernet address as accessing
764		 * EEPROM may not extract correct address.
765		 */
766		for (i = 0; i < ETHER_ADDR_LEN; i++)
767			eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
768	}
769
770	if ((error = rl_dma_alloc(sc)) != 0)
771		goto fail;
772
773	ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
774	if (ifp == NULL) {
775		device_printf(dev, "can not if_alloc()\n");
776		error = ENOSPC;
777		goto fail;
778	}
779
780#define	RL_PHYAD_INTERNAL	0
781
782	/* Do MII setup */
783	phy = MII_PHY_ANY;
784	if (sc->rl_type == RL_8139)
785		phy = RL_PHYAD_INTERNAL;
786	error = mii_attach(dev, &sc->rl_miibus, ifp, rl_ifmedia_upd,
787	    rl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
788	if (error != 0) {
789		device_printf(dev, "attaching PHYs failed\n");
790		goto fail;
791	}
792
793	ifp->if_softc = sc;
794	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
795	ifp->if_mtu = ETHERMTU;
796	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
797	ifp->if_ioctl = rl_ioctl;
798	ifp->if_start = rl_start;
799	ifp->if_init = rl_init;
800	ifp->if_capabilities = IFCAP_VLAN_MTU;
801	/* Check WOL for RTL8139B or newer controllers. */
802	if (sc->rl_type == RL_8139 &&
803	    pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
804		hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
805		switch (hwrev) {
806		case RL_HWREV_8139B:
807		case RL_HWREV_8130:
808		case RL_HWREV_8139C:
809		case RL_HWREV_8139D:
810		case RL_HWREV_8101:
811		case RL_HWREV_8100:
812			ifp->if_capabilities |= IFCAP_WOL;
813			/* Disable WOL. */
814			rl_clrwol(sc);
815			break;
816		default:
817			break;
818		}
819	}
820	ifp->if_capenable = ifp->if_capabilities;
821#ifdef DEVICE_POLLING
822	ifp->if_capabilities |= IFCAP_POLLING;
823#endif
824	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
825	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
826	IFQ_SET_READY(&ifp->if_snd);
827
828	/*
829	 * Call MI attach routine.
830	 */
831	ether_ifattach(ifp, eaddr);
832
833	/* Hook interrupt last to avoid having to lock softc */
834	error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE,
835	    NULL, rl_intr, sc, &sc->rl_intrhand[0]);
836	if (error) {
837		device_printf(sc->rl_dev, "couldn't set up irq\n");
838		ether_ifdetach(ifp);
839	}
840
841fail:
842	if (error)
843		rl_detach(dev);
844
845	return (error);
846}
847
848/*
849 * Shutdown hardware and free up resources. This can be called any
850 * time after the mutex has been initialized. It is called in both
851 * the error case in attach and the normal detach case so it needs
852 * to be careful about only freeing resources that have actually been
853 * allocated.
854 */
855static int
856rl_detach(device_t dev)
857{
858	struct rl_softc		*sc;
859	struct ifnet		*ifp;
860
861	sc = device_get_softc(dev);
862	ifp = sc->rl_ifp;
863
864	KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
865
866#ifdef DEVICE_POLLING
867	if (ifp->if_capenable & IFCAP_POLLING)
868		ether_poll_deregister(ifp);
869#endif
870	/* These should only be active if attach succeeded */
871	if (device_is_attached(dev)) {
872		RL_LOCK(sc);
873		rl_stop(sc);
874		RL_UNLOCK(sc);
875		callout_drain(&sc->rl_stat_callout);
876		ether_ifdetach(ifp);
877	}
878#if 0
879	sc->suspended = 1;
880#endif
881	if (sc->rl_miibus)
882		device_delete_child(dev, sc->rl_miibus);
883	bus_generic_detach(dev);
884
885	if (sc->rl_intrhand[0])
886		bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
887	if (sc->rl_irq[0])
888		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]);
889	if (sc->rl_res)
890		bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
891		    sc->rl_res);
892
893	if (ifp)
894		if_free(ifp);
895
896	rl_dma_free(sc);
897
898	mtx_destroy(&sc->rl_mtx);
899
900	return (0);
901}
902
903static int
904rl_dma_alloc(struct rl_softc *sc)
905{
906	struct rl_dmamap_arg	ctx;
907	int			error, i;
908
909	/*
910	 * Allocate the parent bus DMA tag appropriate for PCI.
911	 */
912	error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev),	/* parent */
913	    1, 0,			/* alignment, boundary */
914	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
915	    BUS_SPACE_MAXADDR,		/* highaddr */
916	    NULL, NULL,			/* filter, filterarg */
917	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsize, nsegments */
918	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
919	    0,				/* flags */
920	    NULL, NULL,			/* lockfunc, lockarg */
921	    &sc->rl_parent_tag);
922	if (error) {
923                device_printf(sc->rl_dev,
924		    "failed to create parent DMA tag.\n");
925		goto fail;
926	}
927	/* Create DMA tag for Rx memory block. */
928	error = bus_dma_tag_create(sc->rl_parent_tag,	/* parent */
929	    RL_RX_8139_BUF_ALIGN, 0,	/* alignment, boundary */
930	    BUS_SPACE_MAXADDR,		/* lowaddr */
931	    BUS_SPACE_MAXADDR,		/* highaddr */
932	    NULL, NULL,			/* filter, filterarg */
933	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1,	/* maxsize,nsegments */
934	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ,	/* maxsegsize */
935	    0,				/* flags */
936	    NULL, NULL,			/* lockfunc, lockarg */
937	    &sc->rl_cdata.rl_rx_tag);
938	if (error) {
939                device_printf(sc->rl_dev,
940		    "failed to create Rx memory block DMA tag.\n");
941		goto fail;
942	}
943	/* Create DMA tag for Tx buffer. */
944	error = bus_dma_tag_create(sc->rl_parent_tag,	/* parent */
945	    RL_TX_8139_BUF_ALIGN, 0,	/* alignment, boundary */
946	    BUS_SPACE_MAXADDR,		/* lowaddr */
947	    BUS_SPACE_MAXADDR,		/* highaddr */
948	    NULL, NULL,			/* filter, filterarg */
949	    MCLBYTES, 1,		/* maxsize, nsegments */
950	    MCLBYTES,			/* maxsegsize */
951	    0,				/* flags */
952	    NULL, NULL,			/* lockfunc, lockarg */
953	    &sc->rl_cdata.rl_tx_tag);
954	if (error) {
955                device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n");
956		goto fail;
957	}
958
959	/*
960	 * Allocate DMA'able memory and load DMA map for Rx memory block.
961	 */
962	error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag,
963	    (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK |
964	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap);
965	if (error != 0) {
966		device_printf(sc->rl_dev,
967		    "failed to allocate Rx DMA memory block.\n");
968		goto fail;
969	}
970	ctx.rl_busaddr = 0;
971	error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag,
972	    sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf,
973	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx,
974	    BUS_DMA_NOWAIT);
975	if (error != 0 || ctx.rl_busaddr == 0) {
976		device_printf(sc->rl_dev,
977		    "could not load Rx DMA memory block.\n");
978		goto fail;
979	}
980	sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr;
981
982	/* Create DMA maps for Tx buffers. */
983	for (i = 0; i < RL_TX_LIST_CNT; i++) {
984		sc->rl_cdata.rl_tx_chain[i] = NULL;
985		sc->rl_cdata.rl_tx_dmamap[i] = NULL;
986		error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0,
987		    &sc->rl_cdata.rl_tx_dmamap[i]);
988		if (error != 0) {
989			device_printf(sc->rl_dev,
990			    "could not create Tx dmamap.\n");
991			goto fail;
992		}
993	}
994
995	/* Leave a few bytes before the start of the RX ring buffer. */
996	sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf;
997	sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE;
998
999fail:
1000	return (error);
1001}
1002
1003static void
1004rl_dma_free(struct rl_softc *sc)
1005{
1006	int			i;
1007
1008	/* Rx memory block. */
1009	if (sc->rl_cdata.rl_rx_tag != NULL) {
1010		if (sc->rl_cdata.rl_rx_dmamap != NULL)
1011			bus_dmamap_unload(sc->rl_cdata.rl_rx_tag,
1012			    sc->rl_cdata.rl_rx_dmamap);
1013		if (sc->rl_cdata.rl_rx_dmamap != NULL &&
1014		    sc->rl_cdata.rl_rx_buf_ptr != NULL)
1015			bus_dmamem_free(sc->rl_cdata.rl_rx_tag,
1016			    sc->rl_cdata.rl_rx_buf_ptr,
1017			    sc->rl_cdata.rl_rx_dmamap);
1018		sc->rl_cdata.rl_rx_buf_ptr = NULL;
1019		sc->rl_cdata.rl_rx_buf = NULL;
1020		sc->rl_cdata.rl_rx_dmamap = NULL;
1021		bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag);
1022		sc->rl_cdata.rl_tx_tag = NULL;
1023	}
1024
1025	/* Tx buffers. */
1026	if (sc->rl_cdata.rl_tx_tag != NULL) {
1027		for (i = 0; i < RL_TX_LIST_CNT; i++) {
1028			if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) {
1029				bus_dmamap_destroy(
1030				    sc->rl_cdata.rl_tx_tag,
1031				    sc->rl_cdata.rl_tx_dmamap[i]);
1032				sc->rl_cdata.rl_tx_dmamap[i] = NULL;
1033			}
1034		}
1035		bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag);
1036		sc->rl_cdata.rl_tx_tag = NULL;
1037	}
1038
1039	if (sc->rl_parent_tag != NULL) {
1040		bus_dma_tag_destroy(sc->rl_parent_tag);
1041		sc->rl_parent_tag = NULL;
1042	}
1043}
1044
1045/*
1046 * Initialize the transmit descriptors.
1047 */
1048static int
1049rl_list_tx_init(struct rl_softc *sc)
1050{
1051	struct rl_chain_data	*cd;
1052	int			i;
1053
1054	RL_LOCK_ASSERT(sc);
1055
1056	cd = &sc->rl_cdata;
1057	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1058		cd->rl_tx_chain[i] = NULL;
1059		CSR_WRITE_4(sc,
1060		    RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000);
1061	}
1062
1063	sc->rl_cdata.cur_tx = 0;
1064	sc->rl_cdata.last_tx = 0;
1065
1066	return (0);
1067}
1068
1069static int
1070rl_list_rx_init(struct rl_softc *sc)
1071{
1072
1073	RL_LOCK_ASSERT(sc);
1074
1075	bzero(sc->rl_cdata.rl_rx_buf_ptr,
1076	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ);
1077	bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap,
1078	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1079
1080	return (0);
1081}
1082
1083/*
1084 * A frame has been uploaded: pass the resulting mbuf chain up to
1085 * the higher level protocols.
1086 *
1087 * You know there's something wrong with a PCI bus-master chip design
1088 * when you have to use m_devget().
1089 *
1090 * The receive operation is badly documented in the datasheet, so I'll
1091 * attempt to document it here. The driver provides a buffer area and
1092 * places its base address in the RX buffer start address register.
1093 * The chip then begins copying frames into the RX buffer. Each frame
1094 * is preceded by a 32-bit RX status word which specifies the length
1095 * of the frame and certain other status bits. Each frame (starting with
1096 * the status word) is also 32-bit aligned. The frame length is in the
1097 * first 16 bits of the status word; the lower 15 bits correspond with
1098 * the 'rx status register' mentioned in the datasheet.
1099 *
1100 * Note: to make the Alpha happy, the frame payload needs to be aligned
1101 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes)
1102 * as the offset argument to m_devget().
1103 */
1104static int
1105rl_rxeof(struct rl_softc *sc)
1106{
1107	struct mbuf		*m;
1108	struct ifnet		*ifp = sc->rl_ifp;
1109	uint8_t			*rxbufpos;
1110	int			total_len = 0;
1111	int			wrap = 0;
1112	int			rx_npkts = 0;
1113	uint32_t		rxstat;
1114	uint16_t		cur_rx;
1115	uint16_t		limit;
1116	uint16_t		max_bytes, rx_bytes = 0;
1117
1118	RL_LOCK_ASSERT(sc);
1119
1120	bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap,
1121	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1122
1123	cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN;
1124
1125	/* Do not try to read past this point. */
1126	limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN;
1127
1128	if (limit < cur_rx)
1129		max_bytes = (RL_RXBUFLEN - cur_rx) + limit;
1130	else
1131		max_bytes = limit - cur_rx;
1132
1133	while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
1134#ifdef DEVICE_POLLING
1135		if (ifp->if_capenable & IFCAP_POLLING) {
1136			if (sc->rxcycles <= 0)
1137				break;
1138			sc->rxcycles--;
1139		}
1140#endif
1141		rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
1142		rxstat = le32toh(*(uint32_t *)rxbufpos);
1143
1144		/*
1145		 * Here's a totally undocumented fact for you. When the
1146		 * RealTek chip is in the process of copying a packet into
1147		 * RAM for you, the length will be 0xfff0. If you spot a
1148		 * packet header with this value, you need to stop. The
1149		 * datasheet makes absolutely no mention of this and
1150		 * RealTek should be shot for this.
1151		 */
1152		total_len = rxstat >> 16;
1153		if (total_len == RL_RXSTAT_UNFINISHED)
1154			break;
1155
1156		if (!(rxstat & RL_RXSTAT_RXOK) ||
1157		    total_len < ETHER_MIN_LEN ||
1158		    total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
1159			ifp->if_ierrors++;
1160			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1161			rl_init_locked(sc);
1162			return (rx_npkts);
1163		}
1164
1165		/* No errors; receive the packet. */
1166		rx_bytes += total_len + 4;
1167
1168		/*
1169		 * XXX The RealTek chip includes the CRC with every
1170		 * received frame, and there's no way to turn this
1171		 * behavior off (at least, I can't find anything in
1172		 * the manual that explains how to do it) so we have
1173		 * to trim off the CRC manually.
1174		 */
1175		total_len -= ETHER_CRC_LEN;
1176
1177		/*
1178		 * Avoid trying to read more bytes than we know
1179		 * the chip has prepared for us.
1180		 */
1181		if (rx_bytes > max_bytes)
1182			break;
1183
1184		rxbufpos = sc->rl_cdata.rl_rx_buf +
1185			((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN);
1186		if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN))
1187			rxbufpos = sc->rl_cdata.rl_rx_buf;
1188
1189		wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos;
1190		if (total_len > wrap) {
1191			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1192			    NULL);
1193			if (m != NULL)
1194				m_copyback(m, wrap, total_len - wrap,
1195					sc->rl_cdata.rl_rx_buf);
1196			cur_rx = (total_len - wrap + ETHER_CRC_LEN);
1197		} else {
1198			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1199			    NULL);
1200			cur_rx += total_len + 4 + ETHER_CRC_LEN;
1201		}
1202
1203		/* Round up to 32-bit boundary. */
1204		cur_rx = (cur_rx + 3) & ~3;
1205		CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16);
1206
1207		if (m == NULL) {
1208			ifp->if_iqdrops++;
1209			continue;
1210		}
1211
1212		ifp->if_ipackets++;
1213		RL_UNLOCK(sc);
1214		(*ifp->if_input)(ifp, m);
1215		RL_LOCK(sc);
1216		rx_npkts++;
1217	}
1218
1219	/* No need to sync Rx memory block as we didn't modify it. */
1220	return (rx_npkts);
1221}
1222
1223/*
1224 * A frame was downloaded to the chip. It's safe for us to clean up
1225 * the list buffers.
1226 */
1227static void
1228rl_txeof(struct rl_softc *sc)
1229{
1230	struct ifnet		*ifp = sc->rl_ifp;
1231	uint32_t		txstat;
1232
1233	RL_LOCK_ASSERT(sc);
1234
1235	/*
1236	 * Go through our tx list and free mbufs for those
1237	 * frames that have been uploaded.
1238	 */
1239	do {
1240		if (RL_LAST_TXMBUF(sc) == NULL)
1241			break;
1242		txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc));
1243		if (!(txstat & (RL_TXSTAT_TX_OK|
1244		    RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT)))
1245			break;
1246
1247		ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24;
1248
1249		bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc),
1250		    BUS_DMASYNC_POSTWRITE);
1251		bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc));
1252		m_freem(RL_LAST_TXMBUF(sc));
1253		RL_LAST_TXMBUF(sc) = NULL;
1254		/*
1255		 * If there was a transmit underrun, bump the TX threshold.
1256		 * Make sure not to overflow the 63 * 32byte we can address
1257		 * with the 6 available bit.
1258		 */
1259		if ((txstat & RL_TXSTAT_TX_UNDERRUN) &&
1260		    (sc->rl_txthresh < 2016))
1261			sc->rl_txthresh += 32;
1262		if (txstat & RL_TXSTAT_TX_OK)
1263			ifp->if_opackets++;
1264		else {
1265			int			oldthresh;
1266			ifp->if_oerrors++;
1267			if ((txstat & RL_TXSTAT_TXABRT) ||
1268			    (txstat & RL_TXSTAT_OUTOFWIN))
1269				CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1270			oldthresh = sc->rl_txthresh;
1271			/* error recovery */
1272			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1273			rl_init_locked(sc);
1274			/* restore original threshold */
1275			sc->rl_txthresh = oldthresh;
1276			return;
1277		}
1278		RL_INC(sc->rl_cdata.last_tx);
1279		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1280	} while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx);
1281
1282	if (RL_LAST_TXMBUF(sc) == NULL)
1283		sc->rl_watchdog_timer = 0;
1284}
1285
1286static void
1287rl_twister_update(struct rl_softc *sc)
1288{
1289	uint16_t linktest;
1290	/*
1291	 * Table provided by RealTek (Kinston <shangh@realtek.com.tw>) for
1292	 * Linux driver.  Values undocumented otherwise.
1293	 */
1294	static const uint32_t param[4][4] = {
1295		{0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
1296		{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1297		{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1298		{0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
1299	};
1300
1301	/*
1302	 * Tune the so-called twister registers of the RTL8139.  These
1303	 * are used to compensate for impedance mismatches.  The
1304	 * method for tuning these registers is undocumented and the
1305	 * following procedure is collected from public sources.
1306	 */
1307	switch (sc->rl_twister)
1308	{
1309	case CHK_LINK:
1310		/*
1311		 * If we have a sufficient link, then we can proceed in
1312		 * the state machine to the next stage.  If not, then
1313		 * disable further tuning after writing sane defaults.
1314		 */
1315		if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) {
1316			CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD);
1317			sc->rl_twister = FIND_ROW;
1318		} else {
1319			CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD);
1320			CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1321			CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1322			CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1323			sc->rl_twister = DONE;
1324		}
1325		break;
1326	case FIND_ROW:
1327		/*
1328		 * Read how long it took to see the echo to find the tuning
1329		 * row to use.
1330		 */
1331		linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1332		if (linktest == RL_CSCFG_ROW3)
1333			sc->rl_twist_row = 3;
1334		else if (linktest == RL_CSCFG_ROW2)
1335			sc->rl_twist_row = 2;
1336		else if (linktest == RL_CSCFG_ROW1)
1337			sc->rl_twist_row = 1;
1338		else
1339			sc->rl_twist_row = 0;
1340		sc->rl_twist_col = 0;
1341		sc->rl_twister = SET_PARAM;
1342		break;
1343	case SET_PARAM:
1344		if (sc->rl_twist_col == 0)
1345			CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1346		CSR_WRITE_4(sc, RL_PARA7C,
1347		    param[sc->rl_twist_row][sc->rl_twist_col]);
1348		if (++sc->rl_twist_col == 4) {
1349			if (sc->rl_twist_row == 3)
1350				sc->rl_twister = RECHK_LONG;
1351			else
1352				sc->rl_twister = DONE;
1353		}
1354		break;
1355	case RECHK_LONG:
1356		/*
1357		 * For long cables, we have to double check to make sure we
1358		 * don't mistune.
1359		 */
1360		linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1361		if (linktest == RL_CSCFG_ROW3)
1362			sc->rl_twister = DONE;
1363		else {
1364			CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE);
1365			sc->rl_twister = RETUNE;
1366		}
1367		break;
1368	case RETUNE:
1369		/* Retune for a shorter cable (try column 2) */
1370		CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1371		CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1372		CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1373		CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1374		sc->rl_twist_row--;
1375		sc->rl_twist_col = 0;
1376		sc->rl_twister = SET_PARAM;
1377		break;
1378
1379	case DONE:
1380		break;
1381	}
1382
1383}
1384
1385static void
1386rl_tick(void *xsc)
1387{
1388	struct rl_softc		*sc = xsc;
1389	struct mii_data		*mii;
1390	int ticks;
1391
1392	RL_LOCK_ASSERT(sc);
1393	/*
1394	 * If we're doing the twister cable calibration, then we need to defer
1395	 * watchdog timeouts.  This is a no-op in normal operations, but
1396	 * can falsely trigger when the cable calibration takes a while and
1397	 * there was traffic ready to go when rl was started.
1398	 *
1399	 * We don't defer mii_tick since that updates the mii status, which
1400	 * helps the twister process, at least according to similar patches
1401	 * for the Linux driver I found online while doing the fixes.  Worst
1402	 * case is a few extra mii reads during calibration.
1403	 */
1404	mii = device_get_softc(sc->rl_miibus);
1405	mii_tick(mii);
1406	if ((sc->rl_flags & RL_FLAG_LINK) == 0)
1407		rl_miibus_statchg(sc->rl_dev);
1408	if (sc->rl_twister_enable) {
1409		if (sc->rl_twister == DONE)
1410			rl_watchdog(sc);
1411		else
1412			rl_twister_update(sc);
1413		if (sc->rl_twister == DONE)
1414			ticks = hz;
1415		else
1416			ticks = hz / 10;
1417	} else {
1418		rl_watchdog(sc);
1419		ticks = hz;
1420	}
1421
1422	callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc);
1423}
1424
1425#ifdef DEVICE_POLLING
1426static int
1427rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1428{
1429	struct rl_softc *sc = ifp->if_softc;
1430	int rx_npkts = 0;
1431
1432	RL_LOCK(sc);
1433	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1434		rx_npkts = rl_poll_locked(ifp, cmd, count);
1435	RL_UNLOCK(sc);
1436	return (rx_npkts);
1437}
1438
1439static int
1440rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1441{
1442	struct rl_softc *sc = ifp->if_softc;
1443	int rx_npkts;
1444
1445	RL_LOCK_ASSERT(sc);
1446
1447	sc->rxcycles = count;
1448	rx_npkts = rl_rxeof(sc);
1449	rl_txeof(sc);
1450
1451	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1452		rl_start_locked(ifp);
1453
1454	if (cmd == POLL_AND_CHECK_STATUS) {
1455		uint16_t	status;
1456
1457		/* We should also check the status register. */
1458		status = CSR_READ_2(sc, RL_ISR);
1459		if (status == 0xffff)
1460			return (rx_npkts);
1461		if (status != 0)
1462			CSR_WRITE_2(sc, RL_ISR, status);
1463
1464		/* XXX We should check behaviour on receiver stalls. */
1465
1466		if (status & RL_ISR_SYSTEM_ERR) {
1467			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1468			rl_init_locked(sc);
1469		}
1470	}
1471	return (rx_npkts);
1472}
1473#endif /* DEVICE_POLLING */
1474
1475static void
1476rl_intr(void *arg)
1477{
1478	struct rl_softc		*sc = arg;
1479	struct ifnet		*ifp = sc->rl_ifp;
1480	uint16_t		status;
1481	int			count;
1482
1483	RL_LOCK(sc);
1484
1485	if (sc->suspended)
1486		goto done_locked;
1487
1488#ifdef DEVICE_POLLING
1489	if  (ifp->if_capenable & IFCAP_POLLING)
1490		goto done_locked;
1491#endif
1492
1493	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1494		goto done_locked2;
1495	status = CSR_READ_2(sc, RL_ISR);
1496	if (status == 0xffff || (status & RL_INTRS) == 0)
1497		goto done_locked;
1498	/*
1499	 * Ours, disable further interrupts.
1500	 */
1501	CSR_WRITE_2(sc, RL_IMR, 0);
1502	for (count = 16; count > 0; count--) {
1503		CSR_WRITE_2(sc, RL_ISR, status);
1504		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1505			if (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR))
1506				rl_rxeof(sc);
1507			if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR))
1508				rl_txeof(sc);
1509			if (status & RL_ISR_SYSTEM_ERR) {
1510				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1511				rl_init_locked(sc);
1512				RL_UNLOCK(sc);
1513				return;
1514			}
1515		}
1516		status = CSR_READ_2(sc, RL_ISR);
1517		/* If the card has gone away, the read returns 0xffff. */
1518		if (status == 0xffff || (status & RL_INTRS) == 0)
1519			break;
1520	}
1521
1522	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1523		rl_start_locked(ifp);
1524
1525done_locked2:
1526	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1527		CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1528done_locked:
1529	RL_UNLOCK(sc);
1530}
1531
1532/*
1533 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1534 * pointers to the fragment pointers.
1535 */
1536static int
1537rl_encap(struct rl_softc *sc, struct mbuf **m_head)
1538{
1539	struct mbuf		*m;
1540	bus_dma_segment_t	txsegs[1];
1541	int			error, nsegs, padlen;
1542
1543	RL_LOCK_ASSERT(sc);
1544
1545	m = *m_head;
1546	padlen = 0;
1547	/*
1548	 * Hardware doesn't auto-pad, so we have to make sure
1549	 * pad short frames out to the minimum frame length.
1550	 */
1551	if (m->m_pkthdr.len < RL_MIN_FRAMELEN)
1552		padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len;
1553	/*
1554	 * The RealTek is brain damaged and wants longword-aligned
1555	 * TX buffers, plus we can only have one fragment buffer
1556	 * per packet. We have to copy pretty much all the time.
1557	 */
1558	if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 ||
1559	    (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) {
1560		m = m_defrag(*m_head, M_DONTWAIT);
1561		if (m == NULL) {
1562			m_freem(*m_head);
1563			*m_head = NULL;
1564			return (ENOMEM);
1565		}
1566	}
1567	*m_head = m;
1568
1569	if (padlen > 0) {
1570		/*
1571		 * Make security-conscious people happy: zero out the
1572		 * bytes in the pad area, since we don't know what
1573		 * this mbuf cluster buffer's previous user might
1574		 * have left in it.
1575		 */
1576		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1577		m->m_pkthdr.len += padlen;
1578		m->m_len = m->m_pkthdr.len;
1579	}
1580
1581	error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag,
1582	    RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0);
1583	if (error != 0)
1584		return (error);
1585	if (nsegs == 0) {
1586		m_freem(*m_head);
1587		*m_head = NULL;
1588		return (EIO);
1589	}
1590
1591	RL_CUR_TXMBUF(sc) = m;
1592	bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc),
1593	    BUS_DMASYNC_PREWRITE);
1594	CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr));
1595
1596	return (0);
1597}
1598
1599/*
1600 * Main transmit routine.
1601 */
1602static void
1603rl_start(struct ifnet *ifp)
1604{
1605	struct rl_softc		*sc = ifp->if_softc;
1606
1607	RL_LOCK(sc);
1608	rl_start_locked(ifp);
1609	RL_UNLOCK(sc);
1610}
1611
1612static void
1613rl_start_locked(struct ifnet *ifp)
1614{
1615	struct rl_softc		*sc = ifp->if_softc;
1616	struct mbuf		*m_head = NULL;
1617
1618	RL_LOCK_ASSERT(sc);
1619
1620	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1621	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
1622		return;
1623
1624	while (RL_CUR_TXMBUF(sc) == NULL) {
1625
1626		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1627
1628		if (m_head == NULL)
1629			break;
1630
1631		if (rl_encap(sc, &m_head)) {
1632			if (m_head == NULL)
1633				break;
1634			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1635			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1636			break;
1637		}
1638
1639		/* Pass a copy of this mbuf chain to the bpf subsystem. */
1640		BPF_MTAP(ifp, RL_CUR_TXMBUF(sc));
1641
1642		/* Transmit the frame. */
1643		CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc),
1644		    RL_TXTHRESH(sc->rl_txthresh) |
1645		    RL_CUR_TXMBUF(sc)->m_pkthdr.len);
1646
1647		RL_INC(sc->rl_cdata.cur_tx);
1648
1649		/* Set a timeout in case the chip goes out to lunch. */
1650		sc->rl_watchdog_timer = 5;
1651	}
1652
1653	/*
1654	 * We broke out of the loop because all our TX slots are
1655	 * full. Mark the NIC as busy until it drains some of the
1656	 * packets from the queue.
1657	 */
1658	if (RL_CUR_TXMBUF(sc) != NULL)
1659		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1660}
1661
1662static void
1663rl_init(void *xsc)
1664{
1665	struct rl_softc		*sc = xsc;
1666
1667	RL_LOCK(sc);
1668	rl_init_locked(sc);
1669	RL_UNLOCK(sc);
1670}
1671
1672static void
1673rl_init_locked(struct rl_softc *sc)
1674{
1675	struct ifnet		*ifp = sc->rl_ifp;
1676	struct mii_data		*mii;
1677	uint32_t		eaddr[2];
1678
1679	RL_LOCK_ASSERT(sc);
1680
1681	mii = device_get_softc(sc->rl_miibus);
1682
1683	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1684		return;
1685
1686	/*
1687	 * Cancel pending I/O and free all RX/TX buffers.
1688	 */
1689	rl_stop(sc);
1690
1691	rl_reset(sc);
1692	if (sc->rl_twister_enable) {
1693		/*
1694		 * Reset twister register tuning state.  The twister
1695		 * registers and their tuning are undocumented, but
1696		 * are necessary to cope with bad links.  rl_twister =
1697		 * DONE here will disable this entirely.
1698		 */
1699		sc->rl_twister = CHK_LINK;
1700	}
1701
1702	/*
1703	 * Init our MAC address.  Even though the chipset
1704	 * documentation doesn't mention it, we need to enter "Config
1705	 * register write enable" mode to modify the ID registers.
1706	 */
1707	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
1708	bzero(eaddr, sizeof(eaddr));
1709	bcopy(IF_LLADDR(sc->rl_ifp), eaddr, ETHER_ADDR_LEN);
1710	CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]);
1711	CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]);
1712	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1713
1714	/* Init the RX memory block pointer register. */
1715	CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr +
1716	    RL_RX_8139_BUF_RESERVE);
1717	/* Init TX descriptors. */
1718	rl_list_tx_init(sc);
1719	/* Init Rx memory block. */
1720	rl_list_rx_init(sc);
1721
1722	/*
1723	 * Enable transmit and receive.
1724	 */
1725	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1726
1727	/*
1728	 * Set the initial TX and RX configuration.
1729	 */
1730	CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1731	CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
1732
1733	/* Set RX filter. */
1734	rl_rxfilter(sc);
1735
1736#ifdef DEVICE_POLLING
1737	/* Disable interrupts if we are polling. */
1738	if (ifp->if_capenable & IFCAP_POLLING)
1739		CSR_WRITE_2(sc, RL_IMR, 0);
1740	else
1741#endif
1742	/* Enable interrupts. */
1743	CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1744
1745	/* Set initial TX threshold */
1746	sc->rl_txthresh = RL_TX_THRESH_INIT;
1747
1748	/* Start RX/TX process. */
1749	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
1750
1751	/* Enable receiver and transmitter. */
1752	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1753
1754	sc->rl_flags &= ~RL_FLAG_LINK;
1755	mii_mediachg(mii);
1756
1757	CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
1758
1759	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1760	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1761
1762	callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc);
1763}
1764
1765/*
1766 * Set media options.
1767 */
1768static int
1769rl_ifmedia_upd(struct ifnet *ifp)
1770{
1771	struct rl_softc		*sc = ifp->if_softc;
1772	struct mii_data		*mii;
1773
1774	mii = device_get_softc(sc->rl_miibus);
1775
1776	RL_LOCK(sc);
1777	mii_mediachg(mii);
1778	RL_UNLOCK(sc);
1779
1780	return (0);
1781}
1782
1783/*
1784 * Report current media status.
1785 */
1786static void
1787rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1788{
1789	struct rl_softc		*sc = ifp->if_softc;
1790	struct mii_data		*mii;
1791
1792	mii = device_get_softc(sc->rl_miibus);
1793
1794	RL_LOCK(sc);
1795	mii_pollstat(mii);
1796	ifmr->ifm_active = mii->mii_media_active;
1797	ifmr->ifm_status = mii->mii_media_status;
1798	RL_UNLOCK(sc);
1799}
1800
1801static int
1802rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1803{
1804	struct ifreq		*ifr = (struct ifreq *)data;
1805	struct mii_data		*mii;
1806	struct rl_softc		*sc = ifp->if_softc;
1807	int			error = 0, mask;
1808
1809	switch (command) {
1810	case SIOCSIFFLAGS:
1811		RL_LOCK(sc);
1812		if (ifp->if_flags & IFF_UP) {
1813			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1814			    ((ifp->if_flags ^ sc->rl_if_flags) &
1815                            (IFF_PROMISC | IFF_ALLMULTI)))
1816				rl_rxfilter(sc);
1817                        else
1818				rl_init_locked(sc);
1819                } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1820			rl_stop(sc);
1821		sc->rl_if_flags = ifp->if_flags;
1822		RL_UNLOCK(sc);
1823		break;
1824	case SIOCADDMULTI:
1825	case SIOCDELMULTI:
1826		RL_LOCK(sc);
1827		rl_rxfilter(sc);
1828		RL_UNLOCK(sc);
1829		break;
1830	case SIOCGIFMEDIA:
1831	case SIOCSIFMEDIA:
1832		mii = device_get_softc(sc->rl_miibus);
1833		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1834		break;
1835	case SIOCSIFCAP:
1836		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1837#ifdef DEVICE_POLLING
1838		if (ifr->ifr_reqcap & IFCAP_POLLING &&
1839		    !(ifp->if_capenable & IFCAP_POLLING)) {
1840			error = ether_poll_register(rl_poll, ifp);
1841			if (error)
1842				return(error);
1843			RL_LOCK(sc);
1844			/* Disable interrupts */
1845			CSR_WRITE_2(sc, RL_IMR, 0x0000);
1846			ifp->if_capenable |= IFCAP_POLLING;
1847			RL_UNLOCK(sc);
1848			return (error);
1849
1850		}
1851		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1852		    ifp->if_capenable & IFCAP_POLLING) {
1853			error = ether_poll_deregister(ifp);
1854			/* Enable interrupts. */
1855			RL_LOCK(sc);
1856			CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1857			ifp->if_capenable &= ~IFCAP_POLLING;
1858			RL_UNLOCK(sc);
1859			return (error);
1860		}
1861#endif /* DEVICE_POLLING */
1862		if ((mask & IFCAP_WOL) != 0 &&
1863		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
1864			if ((mask & IFCAP_WOL_UCAST) != 0)
1865				ifp->if_capenable ^= IFCAP_WOL_UCAST;
1866			if ((mask & IFCAP_WOL_MCAST) != 0)
1867				ifp->if_capenable ^= IFCAP_WOL_MCAST;
1868			if ((mask & IFCAP_WOL_MAGIC) != 0)
1869				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1870		}
1871		break;
1872	default:
1873		error = ether_ioctl(ifp, command, data);
1874		break;
1875	}
1876
1877	return (error);
1878}
1879
1880static void
1881rl_watchdog(struct rl_softc *sc)
1882{
1883
1884	RL_LOCK_ASSERT(sc);
1885
1886	if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0)
1887		return;
1888
1889	device_printf(sc->rl_dev, "watchdog timeout\n");
1890	sc->rl_ifp->if_oerrors++;
1891
1892	rl_txeof(sc);
1893	rl_rxeof(sc);
1894	sc->rl_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1895	rl_init_locked(sc);
1896}
1897
1898/*
1899 * Stop the adapter and free any mbufs allocated to the
1900 * RX and TX lists.
1901 */
1902static void
1903rl_stop(struct rl_softc *sc)
1904{
1905	register int		i;
1906	struct ifnet		*ifp = sc->rl_ifp;
1907
1908	RL_LOCK_ASSERT(sc);
1909
1910	sc->rl_watchdog_timer = 0;
1911	callout_stop(&sc->rl_stat_callout);
1912	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1913	sc->rl_flags &= ~RL_FLAG_LINK;
1914
1915	CSR_WRITE_1(sc, RL_COMMAND, 0x00);
1916	CSR_WRITE_2(sc, RL_IMR, 0x0000);
1917	for (i = 0; i < RL_TIMEOUT; i++) {
1918		DELAY(10);
1919		if ((CSR_READ_1(sc, RL_COMMAND) &
1920		    (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0)
1921			break;
1922	}
1923	if (i == RL_TIMEOUT)
1924		device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n");
1925
1926	/*
1927	 * Free the TX list buffers.
1928	 */
1929	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1930		if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
1931			if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
1932				bus_dmamap_sync(sc->rl_cdata.rl_tx_tag,
1933				    sc->rl_cdata.rl_tx_dmamap[i],
1934				    BUS_DMASYNC_POSTWRITE);
1935				bus_dmamap_unload(sc->rl_cdata.rl_tx_tag,
1936				    sc->rl_cdata.rl_tx_dmamap[i]);
1937				m_freem(sc->rl_cdata.rl_tx_chain[i]);
1938				sc->rl_cdata.rl_tx_chain[i] = NULL;
1939			}
1940			CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)),
1941			    0x0000000);
1942		}
1943	}
1944}
1945
1946/*
1947 * Device suspend routine.  Stop the interface and save some PCI
1948 * settings in case the BIOS doesn't restore them properly on
1949 * resume.
1950 */
1951static int
1952rl_suspend(device_t dev)
1953{
1954	struct rl_softc		*sc;
1955
1956	sc = device_get_softc(dev);
1957
1958	RL_LOCK(sc);
1959	rl_stop(sc);
1960	rl_setwol(sc);
1961	sc->suspended = 1;
1962	RL_UNLOCK(sc);
1963
1964	return (0);
1965}
1966
1967/*
1968 * Device resume routine.  Restore some PCI settings in case the BIOS
1969 * doesn't, re-enable busmastering, and restart the interface if
1970 * appropriate.
1971 */
1972static int
1973rl_resume(device_t dev)
1974{
1975	struct rl_softc		*sc;
1976	struct ifnet		*ifp;
1977	int			pmc;
1978	uint16_t		pmstat;
1979
1980	sc = device_get_softc(dev);
1981	ifp = sc->rl_ifp;
1982
1983	RL_LOCK(sc);
1984
1985	if ((ifp->if_capabilities & IFCAP_WOL) != 0 &&
1986	    pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
1987		/* Disable PME and clear PME status. */
1988		pmstat = pci_read_config(sc->rl_dev,
1989		    pmc + PCIR_POWER_STATUS, 2);
1990		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1991			pmstat &= ~PCIM_PSTAT_PMEENABLE;
1992			pci_write_config(sc->rl_dev,
1993			    pmc + PCIR_POWER_STATUS, pmstat, 2);
1994		}
1995		/*
1996		 * Clear WOL matching such that normal Rx filtering
1997		 * wouldn't interfere with WOL patterns.
1998		 */
1999		rl_clrwol(sc);
2000	}
2001
2002	/* reinitialize interface if necessary */
2003	if (ifp->if_flags & IFF_UP)
2004		rl_init_locked(sc);
2005
2006	sc->suspended = 0;
2007
2008	RL_UNLOCK(sc);
2009
2010	return (0);
2011}
2012
2013/*
2014 * Stop all chip I/O so that the kernel's probe routines don't
2015 * get confused by errant DMAs when rebooting.
2016 */
2017static int
2018rl_shutdown(device_t dev)
2019{
2020	struct rl_softc		*sc;
2021
2022	sc = device_get_softc(dev);
2023
2024	RL_LOCK(sc);
2025	rl_stop(sc);
2026	/*
2027	 * Mark interface as down since otherwise we will panic if
2028	 * interrupt comes in later on, which can happen in some
2029	 * cases.
2030	 */
2031	sc->rl_ifp->if_flags &= ~IFF_UP;
2032	rl_setwol(sc);
2033	RL_UNLOCK(sc);
2034
2035	return (0);
2036}
2037
2038static void
2039rl_setwol(struct rl_softc *sc)
2040{
2041	struct ifnet		*ifp;
2042	int			pmc;
2043	uint16_t		pmstat;
2044	uint8_t			v;
2045
2046	RL_LOCK_ASSERT(sc);
2047
2048	ifp = sc->rl_ifp;
2049	if ((ifp->if_capabilities & IFCAP_WOL) == 0)
2050		return;
2051	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
2052		return;
2053
2054	/* Enable config register write. */
2055	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2056
2057	/* Enable PME. */
2058	v = CSR_READ_1(sc, RL_CFG1);
2059	v &= ~RL_CFG1_PME;
2060	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2061		v |= RL_CFG1_PME;
2062	CSR_WRITE_1(sc, RL_CFG1, v);
2063
2064	v = CSR_READ_1(sc, RL_CFG3);
2065	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2066	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2067		v |= RL_CFG3_WOL_MAGIC;
2068	CSR_WRITE_1(sc, RL_CFG3, v);
2069
2070	/* Config register write done. */
2071	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2072
2073	v = CSR_READ_1(sc, RL_CFG5);
2074	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2075	v &= ~RL_CFG5_WOL_LANWAKE;
2076	if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2077		v |= RL_CFG5_WOL_UCAST;
2078	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2079		v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
2080	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2081		v |= RL_CFG5_WOL_LANWAKE;
2082	CSR_WRITE_1(sc, RL_CFG5, v);
2083	/* Request PME if WOL is requested. */
2084	pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
2085	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2086	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2087		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2088	pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2089}
2090
2091static void
2092rl_clrwol(struct rl_softc *sc)
2093{
2094	struct ifnet		*ifp;
2095	uint8_t			v;
2096
2097	ifp = sc->rl_ifp;
2098	if ((ifp->if_capabilities & IFCAP_WOL) == 0)
2099		return;
2100
2101	/* Enable config register write. */
2102	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2103
2104	v = CSR_READ_1(sc, RL_CFG3);
2105	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2106	CSR_WRITE_1(sc, RL_CFG3, v);
2107
2108	/* Config register write done. */
2109	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2110
2111	v = CSR_READ_1(sc, RL_CFG5);
2112	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2113	v &= ~RL_CFG5_WOL_LANWAKE;
2114	CSR_WRITE_1(sc, RL_CFG5, v);
2115}
2116