if_rl.c revision 257176
166458Sdfr/*-
2218822Sdim * Copyright (c) 1997, 1998
366458Sdfr *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
466458Sdfr *
566458Sdfr * Redistribution and use in source and binary forms, with or without
6221271Smarcel * modification, are permitted provided that the following conditions
766458Sdfr * are met:
866458Sdfr * 1. Redistributions of source code must retain the above copyright
966458Sdfr *    notice, this list of conditions and the following disclaimer.
10105377Smarcel * 2. Redistributions in binary form must reproduce the above copyright
11115055Smarcel *    notice, this list of conditions and the following disclaimer in the
12219758Smarcel *    documentation and/or other materials provided with the distribution.
13219758Smarcel * 3. All advertising materials mentioning features or use of this software
14219758Smarcel *    must display the following acknowledgement:
15219758Smarcel *	This product includes software developed by Bill Paul.
16221271Smarcel * 4. Neither the name of the author nor the names of any co-contributors
17221271Smarcel *    may be used to endorse or promote products derived from this software
18219758Smarcel *    without specific prior written permission.
19219758Smarcel *
20219758Smarcel * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21219758Smarcel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22219758Smarcel * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23219758Smarcel * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24219758Smarcel * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25219758Smarcel * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26219758Smarcel * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27219758Smarcel * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28115055Smarcel * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29115055Smarcel * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30115055Smarcel * THE POSSIBILITY OF SUCH DAMAGE.
31115055Smarcel */
32115055Smarcel
33115055Smarcel#include <sys/cdefs.h>
34115055Smarcel__FBSDID("$FreeBSD: head/sys/pci/if_rl.c 257176 2013-10-26 17:58:36Z glebius $");
35115055Smarcel
36115055Smarcel/*
37115055Smarcel * RealTek 8129/8139 PCI NIC driver
38115055Smarcel *
39115055Smarcel * Supports several extremely cheap PCI 10/100 adapters based on
40115055Smarcel * the RealTek chipset. Datasheets can be obtained from
41115055Smarcel * www.realtek.com.tw.
42115055Smarcel *
43115055Smarcel * Written by Bill Paul <wpaul@ctr.columbia.edu>
44115055Smarcel * Electrical Engineering Department
45115055Smarcel * Columbia University, New York City
46115055Smarcel */
47115055Smarcel/*
48115055Smarcel * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
49219758Smarcel * probably the worst PCI ethernet controller ever made, with the possible
50219758Smarcel * exception of the FEAST chip made by SMC. The 8139 supports bus-master
51219758Smarcel * DMA, but it has a terrible interface that nullifies any performance
52219758Smarcel * gains that bus-master DMA usually offers.
53115055Smarcel *
54115055Smarcel * For transmission, the chip offers a series of four TX descriptor
55115055Smarcel * registers. Each transmit frame must be in a contiguous buffer, aligned
56115055Smarcel * on a longword (32-bit) boundary. This means we almost always have to
57115055Smarcel * do mbuf copies in order to transmit a frame, except in the unlikely
58219758Smarcel * case where a) the packet fits into a single mbuf, and b) the packet
59219758Smarcel * is 32-bit aligned within the mbuf's data area. The presence of only
60219758Smarcel * four descriptor registers means that we can never have more than four
61219758Smarcel * packets queued for transmission at any one time.
62219758Smarcel *
63221271Smarcel * Reception is not much better. The driver has to allocate a single large
64115055Smarcel * buffer area (up to 64K in size) into which the chip will DMA received
6566458Sdfr * frames. Because we don't know where within this region received packets
66221271Smarcel * will begin or end, we have no choice but to copy data from the buffer
67221271Smarcel * area into mbufs in order to pass the packets up to the higher protocol
68115055Smarcel * levels.
6966458Sdfr *
70115055Smarcel * It's impossible given this rotten design to really achieve decent
71115055Smarcel * performance at 100Mbps, unless you happen to have a 400Mhz PII or
72115055Smarcel * some equally overmuscled CPU to drive it.
7366458Sdfr *
7466458Sdfr * On the bright side, the 8139 does have a built-in PHY, although
75115055Smarcel * rather than using an MDIO serial interface like most other NICs, the
7666458Sdfr * PHY registers are directly accessible through the 8139's register
77115055Smarcel * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
7866458Sdfr * filter.
7966458Sdfr *
80115055Smarcel * The 8129 chip is an older version of the 8139 that uses an external PHY
8166458Sdfr * chip. The 8129 has a serial MDIO interface for accessing the MII where
82130829Smarcel * the 8139 lets you directly access the on-board PHY registers. We need
83130829Smarcel * to select which interface to use depending on the chip type.
84115055Smarcel */
85115055Smarcel
8666458Sdfr#ifdef HAVE_KERNEL_OPTION_HEADERS
8766458Sdfr#include "opt_device_polling.h"
8866458Sdfr#endif
89115055Smarcel
90115055Smarcel#include <sys/param.h>
91115055Smarcel#include <sys/endian.h>
92115055Smarcel#include <sys/systm.h>
93115055Smarcel#include <sys/sockio.h>
9466458Sdfr#include <sys/mbuf.h>
9566458Sdfr#include <sys/malloc.h>
96115055Smarcel#include <sys/kernel.h>
9766458Sdfr#include <sys/module.h>
98115055Smarcel#include <sys/socket.h>
99115055Smarcel#include <sys/sysctl.h>
100115055Smarcel
101115055Smarcel#include <net/if.h>
102115055Smarcel#include <net/if_var.h>
103115055Smarcel#include <net/if_arp.h>
104115055Smarcel#include <net/ethernet.h>
105115055Smarcel#include <net/if_dl.h>
106115055Smarcel#include <net/if_media.h>
107115055Smarcel#include <net/if_types.h>
10866458Sdfr
109115055Smarcel#include <net/bpf.h>
11066458Sdfr
111115055Smarcel#include <machine/bus.h>
112115055Smarcel#include <machine/resource.h>
113115055Smarcel#include <sys/bus.h>
114115055Smarcel#include <sys/rman.h>
11566458Sdfr
11666458Sdfr#include <dev/mii/mii.h>
117115055Smarcel#include <dev/mii/mii_bitbang.h>
11866458Sdfr#include <dev/mii/miivar.h>
11966458Sdfr
120115055Smarcel#include <dev/pci/pcireg.h>
121115055Smarcel#include <dev/pci/pcivar.h>
122115055Smarcel
123115055SmarcelMODULE_DEPEND(rl, pci, 1, 1, 1);
124115055SmarcelMODULE_DEPEND(rl, ether, 1, 1, 1);
12566458SdfrMODULE_DEPEND(rl, miibus, 1, 1, 1);
126115055Smarcel
12766458Sdfr/* "device miibus" required.  See GENERIC if you get errors here. */
12866458Sdfr#include "miibus_if.h"
12966458Sdfr
13066458Sdfr#include <pci/if_rlreg.h>
13166458Sdfr
13266458Sdfr/*
13366458Sdfr * Various supported device vendors/types and their names.
13466458Sdfr */
13566458Sdfrstatic const struct rl_type rl_devs[] = {
13666458Sdfr	{ RT_VENDORID, RT_DEVICEID_8129, RL_8129,
13766458Sdfr		"RealTek 8129 10/100BaseTX" },
13866458Sdfr	{ RT_VENDORID, RT_DEVICEID_8139, RL_8139,
13966458Sdfr		"RealTek 8139 10/100BaseTX" },
140115055Smarcel	{ RT_VENDORID, RT_DEVICEID_8139D, RL_8139,
14166458Sdfr		"RealTek 8139 10/100BaseTX" },
14266458Sdfr	{ RT_VENDORID, RT_DEVICEID_8138, RL_8139,
14366458Sdfr		"RealTek 8139 10/100BaseTX CardBus" },
14466458Sdfr	{ RT_VENDORID, RT_DEVICEID_8100, RL_8139,
14566458Sdfr		"RealTek 8100 10/100BaseTX" },
14666458Sdfr	{ ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
14766458Sdfr		"Accton MPX 5030/5038 10/100BaseTX" },
14866458Sdfr	{ DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139,
14966458Sdfr		"Delta Electronics 8139 10/100BaseTX" },
15066458Sdfr	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139,
15166458Sdfr		"Addtron Technology 8139 10/100BaseTX" },
15266458Sdfr	{ DLINK_VENDORID, DLINK_DEVICEID_520TX_REVC1, RL_8139,
153		"D-Link DFE-520TX (rev. C1) 10/100BaseTX" },
154	{ DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139,
155		"D-Link DFE-530TX+ 10/100BaseTX" },
156	{ DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139,
157		"D-Link DFE-690TXD 10/100BaseTX" },
158	{ NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
159		"Nortel Networks 10/100BaseTX" },
160	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139,
161		"Corega FEther CB-TXD" },
162	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139,
163		"Corega FEtherII CB-TXD" },
164	{ PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139,
165		"Peppercon AG ROL-F" },
166	{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139,
167		"Planex FNW-3603-TX" },
168	{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139,
169		"Planex FNW-3800-TX" },
170	{ CP_VENDORID, RT_DEVICEID_8139, RL_8139,
171		"Compaq HNE-300" },
172	{ LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139,
173		"LevelOne FPC-0106TX" },
174	{ EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139,
175		"Edimax EP-4103DL CardBus" }
176};
177
178static int rl_attach(device_t);
179static int rl_detach(device_t);
180static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int);
181static int rl_dma_alloc(struct rl_softc *);
182static void rl_dma_free(struct rl_softc *);
183static void rl_eeprom_putbyte(struct rl_softc *, int);
184static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *);
185static int rl_encap(struct rl_softc *, struct mbuf **);
186static int rl_list_tx_init(struct rl_softc *);
187static int rl_list_rx_init(struct rl_softc *);
188static int rl_ifmedia_upd(struct ifnet *);
189static void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
190static int rl_ioctl(struct ifnet *, u_long, caddr_t);
191static void rl_intr(void *);
192static void rl_init(void *);
193static void rl_init_locked(struct rl_softc *sc);
194static int rl_miibus_readreg(device_t, int, int);
195static void rl_miibus_statchg(device_t);
196static int rl_miibus_writereg(device_t, int, int, int);
197#ifdef DEVICE_POLLING
198static int rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
199static int rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
200#endif
201static int rl_probe(device_t);
202static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int);
203static void rl_reset(struct rl_softc *);
204static int rl_resume(device_t);
205static int rl_rxeof(struct rl_softc *);
206static void rl_rxfilter(struct rl_softc *);
207static int rl_shutdown(device_t);
208static void rl_start(struct ifnet *);
209static void rl_start_locked(struct ifnet *);
210static void rl_stop(struct rl_softc *);
211static int rl_suspend(device_t);
212static void rl_tick(void *);
213static void rl_txeof(struct rl_softc *);
214static void rl_watchdog(struct rl_softc *);
215static void rl_setwol(struct rl_softc *);
216static void rl_clrwol(struct rl_softc *);
217
218/*
219 * MII bit-bang glue
220 */
221static uint32_t rl_mii_bitbang_read(device_t);
222static void rl_mii_bitbang_write(device_t, uint32_t);
223
224static const struct mii_bitbang_ops rl_mii_bitbang_ops = {
225	rl_mii_bitbang_read,
226	rl_mii_bitbang_write,
227	{
228		RL_MII_DATAOUT,	/* MII_BIT_MDO */
229		RL_MII_DATAIN,	/* MII_BIT_MDI */
230		RL_MII_CLK,	/* MII_BIT_MDC */
231		RL_MII_DIR,	/* MII_BIT_DIR_HOST_PHY */
232		0,		/* MII_BIT_DIR_PHY_HOST */
233	}
234};
235
236static device_method_t rl_methods[] = {
237	/* Device interface */
238	DEVMETHOD(device_probe,		rl_probe),
239	DEVMETHOD(device_attach,	rl_attach),
240	DEVMETHOD(device_detach,	rl_detach),
241	DEVMETHOD(device_suspend,	rl_suspend),
242	DEVMETHOD(device_resume,	rl_resume),
243	DEVMETHOD(device_shutdown,	rl_shutdown),
244
245	/* MII interface */
246	DEVMETHOD(miibus_readreg,	rl_miibus_readreg),
247	DEVMETHOD(miibus_writereg,	rl_miibus_writereg),
248	DEVMETHOD(miibus_statchg,	rl_miibus_statchg),
249
250	DEVMETHOD_END
251};
252
253static driver_t rl_driver = {
254	"rl",
255	rl_methods,
256	sizeof(struct rl_softc)
257};
258
259static devclass_t rl_devclass;
260
261DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0);
262DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0);
263DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0);
264
265#define EE_SET(x)					\
266	CSR_WRITE_1(sc, RL_EECMD,			\
267		CSR_READ_1(sc, RL_EECMD) | x)
268
269#define EE_CLR(x)					\
270	CSR_WRITE_1(sc, RL_EECMD,			\
271		CSR_READ_1(sc, RL_EECMD) & ~x)
272
273/*
274 * Send a read command and address to the EEPROM, check for ACK.
275 */
276static void
277rl_eeprom_putbyte(struct rl_softc *sc, int addr)
278{
279	register int		d, i;
280
281	d = addr | sc->rl_eecmd_read;
282
283	/*
284	 * Feed in each bit and strobe the clock.
285	 */
286	for (i = 0x400; i; i >>= 1) {
287		if (d & i) {
288			EE_SET(RL_EE_DATAIN);
289		} else {
290			EE_CLR(RL_EE_DATAIN);
291		}
292		DELAY(100);
293		EE_SET(RL_EE_CLK);
294		DELAY(150);
295		EE_CLR(RL_EE_CLK);
296		DELAY(100);
297	}
298}
299
300/*
301 * Read a word of data stored in the EEPROM at address 'addr.'
302 */
303static void
304rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest)
305{
306	register int		i;
307	uint16_t		word = 0;
308
309	/* Enter EEPROM access mode. */
310	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
311
312	/*
313	 * Send address of word we want to read.
314	 */
315	rl_eeprom_putbyte(sc, addr);
316
317	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
318
319	/*
320	 * Start reading bits from EEPROM.
321	 */
322	for (i = 0x8000; i; i >>= 1) {
323		EE_SET(RL_EE_CLK);
324		DELAY(100);
325		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
326			word |= i;
327		EE_CLR(RL_EE_CLK);
328		DELAY(100);
329	}
330
331	/* Turn off EEPROM access mode. */
332	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
333
334	*dest = word;
335}
336
337/*
338 * Read a sequence of words from the EEPROM.
339 */
340static void
341rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap)
342{
343	int			i;
344	uint16_t		word = 0, *ptr;
345
346	for (i = 0; i < cnt; i++) {
347		rl_eeprom_getword(sc, off + i, &word);
348		ptr = (uint16_t *)(dest + (i * 2));
349		if (swap)
350			*ptr = ntohs(word);
351		else
352			*ptr = word;
353	}
354}
355
356/*
357 * Read the MII serial port for the MII bit-bang module.
358 */
359static uint32_t
360rl_mii_bitbang_read(device_t dev)
361{
362	struct rl_softc *sc;
363	uint32_t val;
364
365	sc = device_get_softc(dev);
366
367	val = CSR_READ_1(sc, RL_MII);
368	CSR_BARRIER(sc, RL_MII, 1,
369	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
370
371	return (val);
372}
373
374/*
375 * Write the MII serial port for the MII bit-bang module.
376 */
377static void
378rl_mii_bitbang_write(device_t dev, uint32_t val)
379{
380	struct rl_softc *sc;
381
382	sc = device_get_softc(dev);
383
384	CSR_WRITE_1(sc, RL_MII, val);
385	CSR_BARRIER(sc, RL_MII, 1,
386	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
387}
388
389static int
390rl_miibus_readreg(device_t dev, int phy, int reg)
391{
392	struct rl_softc		*sc;
393	uint16_t		rl8139_reg;
394
395	sc = device_get_softc(dev);
396
397	if (sc->rl_type == RL_8139) {
398		switch (reg) {
399		case MII_BMCR:
400			rl8139_reg = RL_BMCR;
401			break;
402		case MII_BMSR:
403			rl8139_reg = RL_BMSR;
404			break;
405		case MII_ANAR:
406			rl8139_reg = RL_ANAR;
407			break;
408		case MII_ANER:
409			rl8139_reg = RL_ANER;
410			break;
411		case MII_ANLPAR:
412			rl8139_reg = RL_LPAR;
413			break;
414		case MII_PHYIDR1:
415		case MII_PHYIDR2:
416			return (0);
417		/*
418		 * Allow the rlphy driver to read the media status
419		 * register. If we have a link partner which does not
420		 * support NWAY, this is the register which will tell
421		 * us the results of parallel detection.
422		 */
423		case RL_MEDIASTAT:
424			return (CSR_READ_1(sc, RL_MEDIASTAT));
425		default:
426			device_printf(sc->rl_dev, "bad phy register\n");
427			return (0);
428		}
429		return (CSR_READ_2(sc, rl8139_reg));
430	}
431
432	return (mii_bitbang_readreg(dev, &rl_mii_bitbang_ops, phy, reg));
433}
434
435static int
436rl_miibus_writereg(device_t dev, int phy, int reg, int data)
437{
438	struct rl_softc		*sc;
439	uint16_t		rl8139_reg;
440
441	sc = device_get_softc(dev);
442
443	if (sc->rl_type == RL_8139) {
444		switch (reg) {
445		case MII_BMCR:
446			rl8139_reg = RL_BMCR;
447			break;
448		case MII_BMSR:
449			rl8139_reg = RL_BMSR;
450			break;
451		case MII_ANAR:
452			rl8139_reg = RL_ANAR;
453			break;
454		case MII_ANER:
455			rl8139_reg = RL_ANER;
456			break;
457		case MII_ANLPAR:
458			rl8139_reg = RL_LPAR;
459			break;
460		case MII_PHYIDR1:
461		case MII_PHYIDR2:
462			return (0);
463			break;
464		default:
465			device_printf(sc->rl_dev, "bad phy register\n");
466			return (0);
467		}
468		CSR_WRITE_2(sc, rl8139_reg, data);
469		return (0);
470	}
471
472	mii_bitbang_writereg(dev, &rl_mii_bitbang_ops, phy, reg, data);
473
474	return (0);
475}
476
477static void
478rl_miibus_statchg(device_t dev)
479{
480	struct rl_softc		*sc;
481	struct ifnet		*ifp;
482	struct mii_data		*mii;
483
484	sc = device_get_softc(dev);
485	mii = device_get_softc(sc->rl_miibus);
486	ifp = sc->rl_ifp;
487	if (mii == NULL || ifp == NULL ||
488	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
489		return;
490
491	sc->rl_flags &= ~RL_FLAG_LINK;
492	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
493	    (IFM_ACTIVE | IFM_AVALID)) {
494		switch (IFM_SUBTYPE(mii->mii_media_active)) {
495		case IFM_10_T:
496		case IFM_100_TX:
497			sc->rl_flags |= RL_FLAG_LINK;
498			break;
499		default:
500			break;
501		}
502	}
503	/*
504	 * RealTek controllers do not provide any interface to
505	 * Tx/Rx MACs for resolved speed, duplex and flow-control
506	 * parameters.
507	 */
508}
509
510/*
511 * Program the 64-bit multicast hash filter.
512 */
513static void
514rl_rxfilter(struct rl_softc *sc)
515{
516	struct ifnet		*ifp = sc->rl_ifp;
517	int			h = 0;
518	uint32_t		hashes[2] = { 0, 0 };
519	struct ifmultiaddr	*ifma;
520	uint32_t		rxfilt;
521
522	RL_LOCK_ASSERT(sc);
523
524	rxfilt = CSR_READ_4(sc, RL_RXCFG);
525	rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD |
526	    RL_RXCFG_RX_MULTI);
527	/* Always accept frames destined for this host. */
528	rxfilt |= RL_RXCFG_RX_INDIV;
529	/* Set capture broadcast bit to capture broadcast frames. */
530	if (ifp->if_flags & IFF_BROADCAST)
531		rxfilt |= RL_RXCFG_RX_BROAD;
532	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
533		rxfilt |= RL_RXCFG_RX_MULTI;
534		if (ifp->if_flags & IFF_PROMISC)
535			rxfilt |= RL_RXCFG_RX_ALLPHYS;
536		hashes[0] = 0xFFFFFFFF;
537		hashes[1] = 0xFFFFFFFF;
538	} else {
539		/* Now program new ones. */
540		if_maddr_rlock(ifp);
541		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
542			if (ifma->ifma_addr->sa_family != AF_LINK)
543				continue;
544			h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
545			    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
546			if (h < 32)
547				hashes[0] |= (1 << h);
548			else
549				hashes[1] |= (1 << (h - 32));
550		}
551		if_maddr_runlock(ifp);
552		if (hashes[0] != 0 || hashes[1] != 0)
553			rxfilt |= RL_RXCFG_RX_MULTI;
554	}
555
556	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
557	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
558	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
559}
560
561static void
562rl_reset(struct rl_softc *sc)
563{
564	register int		i;
565
566	RL_LOCK_ASSERT(sc);
567
568	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
569
570	for (i = 0; i < RL_TIMEOUT; i++) {
571		DELAY(10);
572		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
573			break;
574	}
575	if (i == RL_TIMEOUT)
576		device_printf(sc->rl_dev, "reset never completed!\n");
577}
578
579/*
580 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device
581 * IDs against our list and return a device name if we find a match.
582 */
583static int
584rl_probe(device_t dev)
585{
586	const struct rl_type	*t;
587	uint16_t		devid, revid, vendor;
588	int			i;
589
590	vendor = pci_get_vendor(dev);
591	devid = pci_get_device(dev);
592	revid = pci_get_revid(dev);
593
594	if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
595		if (revid == 0x20) {
596			/* 8139C+, let re(4) take care of this device. */
597			return (ENXIO);
598		}
599	}
600	t = rl_devs;
601	for (i = 0; i < sizeof(rl_devs) / sizeof(rl_devs[0]); i++, t++) {
602		if (vendor == t->rl_vid && devid == t->rl_did) {
603			device_set_desc(dev, t->rl_name);
604			return (BUS_PROBE_DEFAULT);
605		}
606	}
607
608	return (ENXIO);
609}
610
611struct rl_dmamap_arg {
612	bus_addr_t	rl_busaddr;
613};
614
615static void
616rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
617{
618	struct rl_dmamap_arg	*ctx;
619
620	if (error != 0)
621		return;
622
623	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
624
625        ctx = (struct rl_dmamap_arg *)arg;
626        ctx->rl_busaddr = segs[0].ds_addr;
627}
628
629/*
630 * Attach the interface. Allocate softc structures, do ifmedia
631 * setup and ethernet/BPF attach.
632 */
633static int
634rl_attach(device_t dev)
635{
636	uint8_t			eaddr[ETHER_ADDR_LEN];
637	uint16_t		as[3];
638	struct ifnet		*ifp;
639	struct rl_softc		*sc;
640	const struct rl_type	*t;
641	struct sysctl_ctx_list	*ctx;
642	struct sysctl_oid_list	*children;
643	int			error = 0, hwrev, i, phy, pmc, rid;
644	int			prefer_iomap, unit;
645	uint16_t		rl_did = 0;
646	char			tn[32];
647
648	sc = device_get_softc(dev);
649	unit = device_get_unit(dev);
650	sc->rl_dev = dev;
651
652	sc->rl_twister_enable = 0;
653	snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit);
654	TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable);
655	ctx = device_get_sysctl_ctx(sc->rl_dev);
656	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
657	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD,
658	   &sc->rl_twister_enable, 0, "");
659
660	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
661	    MTX_DEF);
662	callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
663
664	pci_enable_busmaster(dev);
665
666
667	/*
668	 * Map control/status registers.
669	 * Default to using PIO access for this driver. On SMP systems,
670	 * there appear to be problems with memory mapped mode: it looks
671	 * like doing too many memory mapped access back to back in rapid
672	 * succession can hang the bus. I'm inclined to blame this on
673	 * crummy design/construction on the part of RealTek. Memory
674	 * mapped mode does appear to work on uniprocessor systems though.
675	 */
676	prefer_iomap = 1;
677	snprintf(tn, sizeof(tn), "dev.rl.%d.prefer_iomap", unit);
678	TUNABLE_INT_FETCH(tn, &prefer_iomap);
679	if (prefer_iomap) {
680		sc->rl_res_id = PCIR_BAR(0);
681		sc->rl_res_type = SYS_RES_IOPORT;
682		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
683		    &sc->rl_res_id, RF_ACTIVE);
684	}
685	if (prefer_iomap == 0 || sc->rl_res == NULL) {
686		sc->rl_res_id = PCIR_BAR(1);
687		sc->rl_res_type = SYS_RES_MEMORY;
688		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
689		    &sc->rl_res_id, RF_ACTIVE);
690	}
691	if (sc->rl_res == NULL) {
692		device_printf(dev, "couldn't map ports/memory\n");
693		error = ENXIO;
694		goto fail;
695	}
696
697#ifdef notdef
698	/*
699	 * Detect the Realtek 8139B. For some reason, this chip is very
700	 * unstable when left to autoselect the media
701	 * The best workaround is to set the device to the required
702	 * media type or to set it to the 10 Meg speed.
703	 */
704	if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF)
705		device_printf(dev,
706"Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n");
707#endif
708
709	sc->rl_btag = rman_get_bustag(sc->rl_res);
710	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
711
712	/* Allocate interrupt */
713	rid = 0;
714	sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
715	    RF_SHAREABLE | RF_ACTIVE);
716
717	if (sc->rl_irq[0] == NULL) {
718		device_printf(dev, "couldn't map interrupt\n");
719		error = ENXIO;
720		goto fail;
721	}
722
723	sc->rl_cfg0 = RL_8139_CFG0;
724	sc->rl_cfg1 = RL_8139_CFG1;
725	sc->rl_cfg2 = 0;
726	sc->rl_cfg3 = RL_8139_CFG3;
727	sc->rl_cfg4 = RL_8139_CFG4;
728	sc->rl_cfg5 = RL_8139_CFG5;
729
730	/*
731	 * Reset the adapter. Only take the lock here as it's needed in
732	 * order to call rl_reset().
733	 */
734	RL_LOCK(sc);
735	rl_reset(sc);
736	RL_UNLOCK(sc);
737
738	sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
739	rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0);
740	if (rl_did != 0x8129)
741		sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
742
743	/*
744	 * Get station address from the EEPROM.
745	 */
746	rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0);
747	for (i = 0; i < 3; i++) {
748		eaddr[(i * 2) + 0] = as[i] & 0xff;
749		eaddr[(i * 2) + 1] = as[i] >> 8;
750	}
751
752	/*
753	 * Now read the exact device type from the EEPROM to find
754	 * out if it's an 8129 or 8139.
755	 */
756	rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0);
757
758	t = rl_devs;
759	sc->rl_type = 0;
760	while(t->rl_name != NULL) {
761		if (rl_did == t->rl_did) {
762			sc->rl_type = t->rl_basetype;
763			break;
764		}
765		t++;
766	}
767
768	if (sc->rl_type == 0) {
769		device_printf(dev, "unknown device ID: %x assuming 8139\n",
770		    rl_did);
771		sc->rl_type = RL_8139;
772		/*
773		 * Read RL_IDR register to get ethernet address as accessing
774		 * EEPROM may not extract correct address.
775		 */
776		for (i = 0; i < ETHER_ADDR_LEN; i++)
777			eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
778	}
779
780	if ((error = rl_dma_alloc(sc)) != 0)
781		goto fail;
782
783	ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
784	if (ifp == NULL) {
785		device_printf(dev, "can not if_alloc()\n");
786		error = ENOSPC;
787		goto fail;
788	}
789
790#define	RL_PHYAD_INTERNAL	0
791
792	/* Do MII setup */
793	phy = MII_PHY_ANY;
794	if (sc->rl_type == RL_8139)
795		phy = RL_PHYAD_INTERNAL;
796	error = mii_attach(dev, &sc->rl_miibus, ifp, rl_ifmedia_upd,
797	    rl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
798	if (error != 0) {
799		device_printf(dev, "attaching PHYs failed\n");
800		goto fail;
801	}
802
803	ifp->if_softc = sc;
804	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
805	ifp->if_mtu = ETHERMTU;
806	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
807	ifp->if_ioctl = rl_ioctl;
808	ifp->if_start = rl_start;
809	ifp->if_init = rl_init;
810	ifp->if_capabilities = IFCAP_VLAN_MTU;
811	/* Check WOL for RTL8139B or newer controllers. */
812	if (sc->rl_type == RL_8139 &&
813	    pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
814		hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
815		switch (hwrev) {
816		case RL_HWREV_8139B:
817		case RL_HWREV_8130:
818		case RL_HWREV_8139C:
819		case RL_HWREV_8139D:
820		case RL_HWREV_8101:
821		case RL_HWREV_8100:
822			ifp->if_capabilities |= IFCAP_WOL;
823			/* Disable WOL. */
824			rl_clrwol(sc);
825			break;
826		default:
827			break;
828		}
829	}
830	ifp->if_capenable = ifp->if_capabilities;
831	ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST);
832#ifdef DEVICE_POLLING
833	ifp->if_capabilities |= IFCAP_POLLING;
834#endif
835	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
836	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
837	IFQ_SET_READY(&ifp->if_snd);
838
839	/*
840	 * Call MI attach routine.
841	 */
842	ether_ifattach(ifp, eaddr);
843
844	/* Hook interrupt last to avoid having to lock softc */
845	error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE,
846	    NULL, rl_intr, sc, &sc->rl_intrhand[0]);
847	if (error) {
848		device_printf(sc->rl_dev, "couldn't set up irq\n");
849		ether_ifdetach(ifp);
850	}
851
852fail:
853	if (error)
854		rl_detach(dev);
855
856	return (error);
857}
858
859/*
860 * Shutdown hardware and free up resources. This can be called any
861 * time after the mutex has been initialized. It is called in both
862 * the error case in attach and the normal detach case so it needs
863 * to be careful about only freeing resources that have actually been
864 * allocated.
865 */
866static int
867rl_detach(device_t dev)
868{
869	struct rl_softc		*sc;
870	struct ifnet		*ifp;
871
872	sc = device_get_softc(dev);
873	ifp = sc->rl_ifp;
874
875	KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
876
877#ifdef DEVICE_POLLING
878	if (ifp->if_capenable & IFCAP_POLLING)
879		ether_poll_deregister(ifp);
880#endif
881	/* These should only be active if attach succeeded */
882	if (device_is_attached(dev)) {
883		RL_LOCK(sc);
884		rl_stop(sc);
885		RL_UNLOCK(sc);
886		callout_drain(&sc->rl_stat_callout);
887		ether_ifdetach(ifp);
888	}
889#if 0
890	sc->suspended = 1;
891#endif
892	if (sc->rl_miibus)
893		device_delete_child(dev, sc->rl_miibus);
894	bus_generic_detach(dev);
895
896	if (sc->rl_intrhand[0])
897		bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
898	if (sc->rl_irq[0])
899		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]);
900	if (sc->rl_res)
901		bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
902		    sc->rl_res);
903
904	if (ifp)
905		if_free(ifp);
906
907	rl_dma_free(sc);
908
909	mtx_destroy(&sc->rl_mtx);
910
911	return (0);
912}
913
914static int
915rl_dma_alloc(struct rl_softc *sc)
916{
917	struct rl_dmamap_arg	ctx;
918	int			error, i;
919
920	/*
921	 * Allocate the parent bus DMA tag appropriate for PCI.
922	 */
923	error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev),	/* parent */
924	    1, 0,			/* alignment, boundary */
925	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
926	    BUS_SPACE_MAXADDR,		/* highaddr */
927	    NULL, NULL,			/* filter, filterarg */
928	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsize, nsegments */
929	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
930	    0,				/* flags */
931	    NULL, NULL,			/* lockfunc, lockarg */
932	    &sc->rl_parent_tag);
933	if (error) {
934                device_printf(sc->rl_dev,
935		    "failed to create parent DMA tag.\n");
936		goto fail;
937	}
938	/* Create DMA tag for Rx memory block. */
939	error = bus_dma_tag_create(sc->rl_parent_tag,	/* parent */
940	    RL_RX_8139_BUF_ALIGN, 0,	/* alignment, boundary */
941	    BUS_SPACE_MAXADDR,		/* lowaddr */
942	    BUS_SPACE_MAXADDR,		/* highaddr */
943	    NULL, NULL,			/* filter, filterarg */
944	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1,	/* maxsize,nsegments */
945	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ,	/* maxsegsize */
946	    0,				/* flags */
947	    NULL, NULL,			/* lockfunc, lockarg */
948	    &sc->rl_cdata.rl_rx_tag);
949	if (error) {
950                device_printf(sc->rl_dev,
951		    "failed to create Rx memory block DMA tag.\n");
952		goto fail;
953	}
954	/* Create DMA tag for Tx buffer. */
955	error = bus_dma_tag_create(sc->rl_parent_tag,	/* parent */
956	    RL_TX_8139_BUF_ALIGN, 0,	/* alignment, boundary */
957	    BUS_SPACE_MAXADDR,		/* lowaddr */
958	    BUS_SPACE_MAXADDR,		/* highaddr */
959	    NULL, NULL,			/* filter, filterarg */
960	    MCLBYTES, 1,		/* maxsize, nsegments */
961	    MCLBYTES,			/* maxsegsize */
962	    0,				/* flags */
963	    NULL, NULL,			/* lockfunc, lockarg */
964	    &sc->rl_cdata.rl_tx_tag);
965	if (error) {
966                device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n");
967		goto fail;
968	}
969
970	/*
971	 * Allocate DMA'able memory and load DMA map for Rx memory block.
972	 */
973	error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag,
974	    (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK |
975	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap);
976	if (error != 0) {
977		device_printf(sc->rl_dev,
978		    "failed to allocate Rx DMA memory block.\n");
979		goto fail;
980	}
981	ctx.rl_busaddr = 0;
982	error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag,
983	    sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf,
984	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx,
985	    BUS_DMA_NOWAIT);
986	if (error != 0 || ctx.rl_busaddr == 0) {
987		device_printf(sc->rl_dev,
988		    "could not load Rx DMA memory block.\n");
989		goto fail;
990	}
991	sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr;
992
993	/* Create DMA maps for Tx buffers. */
994	for (i = 0; i < RL_TX_LIST_CNT; i++) {
995		sc->rl_cdata.rl_tx_chain[i] = NULL;
996		sc->rl_cdata.rl_tx_dmamap[i] = NULL;
997		error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0,
998		    &sc->rl_cdata.rl_tx_dmamap[i]);
999		if (error != 0) {
1000			device_printf(sc->rl_dev,
1001			    "could not create Tx dmamap.\n");
1002			goto fail;
1003		}
1004	}
1005
1006	/* Leave a few bytes before the start of the RX ring buffer. */
1007	sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf;
1008	sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE;
1009
1010fail:
1011	return (error);
1012}
1013
1014static void
1015rl_dma_free(struct rl_softc *sc)
1016{
1017	int			i;
1018
1019	/* Rx memory block. */
1020	if (sc->rl_cdata.rl_rx_tag != NULL) {
1021		if (sc->rl_cdata.rl_rx_dmamap != NULL)
1022			bus_dmamap_unload(sc->rl_cdata.rl_rx_tag,
1023			    sc->rl_cdata.rl_rx_dmamap);
1024		if (sc->rl_cdata.rl_rx_dmamap != NULL &&
1025		    sc->rl_cdata.rl_rx_buf_ptr != NULL)
1026			bus_dmamem_free(sc->rl_cdata.rl_rx_tag,
1027			    sc->rl_cdata.rl_rx_buf_ptr,
1028			    sc->rl_cdata.rl_rx_dmamap);
1029		sc->rl_cdata.rl_rx_buf_ptr = NULL;
1030		sc->rl_cdata.rl_rx_buf = NULL;
1031		sc->rl_cdata.rl_rx_dmamap = NULL;
1032		bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag);
1033		sc->rl_cdata.rl_tx_tag = NULL;
1034	}
1035
1036	/* Tx buffers. */
1037	if (sc->rl_cdata.rl_tx_tag != NULL) {
1038		for (i = 0; i < RL_TX_LIST_CNT; i++) {
1039			if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) {
1040				bus_dmamap_destroy(
1041				    sc->rl_cdata.rl_tx_tag,
1042				    sc->rl_cdata.rl_tx_dmamap[i]);
1043				sc->rl_cdata.rl_tx_dmamap[i] = NULL;
1044			}
1045		}
1046		bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag);
1047		sc->rl_cdata.rl_tx_tag = NULL;
1048	}
1049
1050	if (sc->rl_parent_tag != NULL) {
1051		bus_dma_tag_destroy(sc->rl_parent_tag);
1052		sc->rl_parent_tag = NULL;
1053	}
1054}
1055
1056/*
1057 * Initialize the transmit descriptors.
1058 */
1059static int
1060rl_list_tx_init(struct rl_softc *sc)
1061{
1062	struct rl_chain_data	*cd;
1063	int			i;
1064
1065	RL_LOCK_ASSERT(sc);
1066
1067	cd = &sc->rl_cdata;
1068	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1069		cd->rl_tx_chain[i] = NULL;
1070		CSR_WRITE_4(sc,
1071		    RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000);
1072	}
1073
1074	sc->rl_cdata.cur_tx = 0;
1075	sc->rl_cdata.last_tx = 0;
1076
1077	return (0);
1078}
1079
1080static int
1081rl_list_rx_init(struct rl_softc *sc)
1082{
1083
1084	RL_LOCK_ASSERT(sc);
1085
1086	bzero(sc->rl_cdata.rl_rx_buf_ptr,
1087	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ);
1088	bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap,
1089	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1090
1091	return (0);
1092}
1093
1094/*
1095 * A frame has been uploaded: pass the resulting mbuf chain up to
1096 * the higher level protocols.
1097 *
1098 * You know there's something wrong with a PCI bus-master chip design
1099 * when you have to use m_devget().
1100 *
1101 * The receive operation is badly documented in the datasheet, so I'll
1102 * attempt to document it here. The driver provides a buffer area and
1103 * places its base address in the RX buffer start address register.
1104 * The chip then begins copying frames into the RX buffer. Each frame
1105 * is preceded by a 32-bit RX status word which specifies the length
1106 * of the frame and certain other status bits. Each frame (starting with
1107 * the status word) is also 32-bit aligned. The frame length is in the
1108 * first 16 bits of the status word; the lower 15 bits correspond with
1109 * the 'rx status register' mentioned in the datasheet.
1110 *
1111 * Note: to make the Alpha happy, the frame payload needs to be aligned
1112 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes)
1113 * as the offset argument to m_devget().
1114 */
1115static int
1116rl_rxeof(struct rl_softc *sc)
1117{
1118	struct mbuf		*m;
1119	struct ifnet		*ifp = sc->rl_ifp;
1120	uint8_t			*rxbufpos;
1121	int			total_len = 0;
1122	int			wrap = 0;
1123	int			rx_npkts = 0;
1124	uint32_t		rxstat;
1125	uint16_t		cur_rx;
1126	uint16_t		limit;
1127	uint16_t		max_bytes, rx_bytes = 0;
1128
1129	RL_LOCK_ASSERT(sc);
1130
1131	bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap,
1132	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1133
1134	cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN;
1135
1136	/* Do not try to read past this point. */
1137	limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN;
1138
1139	if (limit < cur_rx)
1140		max_bytes = (RL_RXBUFLEN - cur_rx) + limit;
1141	else
1142		max_bytes = limit - cur_rx;
1143
1144	while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
1145#ifdef DEVICE_POLLING
1146		if (ifp->if_capenable & IFCAP_POLLING) {
1147			if (sc->rxcycles <= 0)
1148				break;
1149			sc->rxcycles--;
1150		}
1151#endif
1152		rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
1153		rxstat = le32toh(*(uint32_t *)rxbufpos);
1154
1155		/*
1156		 * Here's a totally undocumented fact for you. When the
1157		 * RealTek chip is in the process of copying a packet into
1158		 * RAM for you, the length will be 0xfff0. If you spot a
1159		 * packet header with this value, you need to stop. The
1160		 * datasheet makes absolutely no mention of this and
1161		 * RealTek should be shot for this.
1162		 */
1163		total_len = rxstat >> 16;
1164		if (total_len == RL_RXSTAT_UNFINISHED)
1165			break;
1166
1167		if (!(rxstat & RL_RXSTAT_RXOK) ||
1168		    total_len < ETHER_MIN_LEN ||
1169		    total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
1170			ifp->if_ierrors++;
1171			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1172			rl_init_locked(sc);
1173			return (rx_npkts);
1174		}
1175
1176		/* No errors; receive the packet. */
1177		rx_bytes += total_len + 4;
1178
1179		/*
1180		 * XXX The RealTek chip includes the CRC with every
1181		 * received frame, and there's no way to turn this
1182		 * behavior off (at least, I can't find anything in
1183		 * the manual that explains how to do it) so we have
1184		 * to trim off the CRC manually.
1185		 */
1186		total_len -= ETHER_CRC_LEN;
1187
1188		/*
1189		 * Avoid trying to read more bytes than we know
1190		 * the chip has prepared for us.
1191		 */
1192		if (rx_bytes > max_bytes)
1193			break;
1194
1195		rxbufpos = sc->rl_cdata.rl_rx_buf +
1196			((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN);
1197		if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN))
1198			rxbufpos = sc->rl_cdata.rl_rx_buf;
1199
1200		wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos;
1201		if (total_len > wrap) {
1202			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1203			    NULL);
1204			if (m != NULL)
1205				m_copyback(m, wrap, total_len - wrap,
1206					sc->rl_cdata.rl_rx_buf);
1207			cur_rx = (total_len - wrap + ETHER_CRC_LEN);
1208		} else {
1209			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1210			    NULL);
1211			cur_rx += total_len + 4 + ETHER_CRC_LEN;
1212		}
1213
1214		/* Round up to 32-bit boundary. */
1215		cur_rx = (cur_rx + 3) & ~3;
1216		CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16);
1217
1218		if (m == NULL) {
1219			ifp->if_iqdrops++;
1220			continue;
1221		}
1222
1223		ifp->if_ipackets++;
1224		RL_UNLOCK(sc);
1225		(*ifp->if_input)(ifp, m);
1226		RL_LOCK(sc);
1227		rx_npkts++;
1228	}
1229
1230	/* No need to sync Rx memory block as we didn't modify it. */
1231	return (rx_npkts);
1232}
1233
1234/*
1235 * A frame was downloaded to the chip. It's safe for us to clean up
1236 * the list buffers.
1237 */
1238static void
1239rl_txeof(struct rl_softc *sc)
1240{
1241	struct ifnet		*ifp = sc->rl_ifp;
1242	uint32_t		txstat;
1243
1244	RL_LOCK_ASSERT(sc);
1245
1246	/*
1247	 * Go through our tx list and free mbufs for those
1248	 * frames that have been uploaded.
1249	 */
1250	do {
1251		if (RL_LAST_TXMBUF(sc) == NULL)
1252			break;
1253		txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc));
1254		if (!(txstat & (RL_TXSTAT_TX_OK|
1255		    RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT)))
1256			break;
1257
1258		ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24;
1259
1260		bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc),
1261		    BUS_DMASYNC_POSTWRITE);
1262		bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc));
1263		m_freem(RL_LAST_TXMBUF(sc));
1264		RL_LAST_TXMBUF(sc) = NULL;
1265		/*
1266		 * If there was a transmit underrun, bump the TX threshold.
1267		 * Make sure not to overflow the 63 * 32byte we can address
1268		 * with the 6 available bit.
1269		 */
1270		if ((txstat & RL_TXSTAT_TX_UNDERRUN) &&
1271		    (sc->rl_txthresh < 2016))
1272			sc->rl_txthresh += 32;
1273		if (txstat & RL_TXSTAT_TX_OK)
1274			ifp->if_opackets++;
1275		else {
1276			int			oldthresh;
1277			ifp->if_oerrors++;
1278			if ((txstat & RL_TXSTAT_TXABRT) ||
1279			    (txstat & RL_TXSTAT_OUTOFWIN))
1280				CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1281			oldthresh = sc->rl_txthresh;
1282			/* error recovery */
1283			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1284			rl_init_locked(sc);
1285			/* restore original threshold */
1286			sc->rl_txthresh = oldthresh;
1287			return;
1288		}
1289		RL_INC(sc->rl_cdata.last_tx);
1290		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1291	} while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx);
1292
1293	if (RL_LAST_TXMBUF(sc) == NULL)
1294		sc->rl_watchdog_timer = 0;
1295}
1296
1297static void
1298rl_twister_update(struct rl_softc *sc)
1299{
1300	uint16_t linktest;
1301	/*
1302	 * Table provided by RealTek (Kinston <shangh@realtek.com.tw>) for
1303	 * Linux driver.  Values undocumented otherwise.
1304	 */
1305	static const uint32_t param[4][4] = {
1306		{0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
1307		{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1308		{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1309		{0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
1310	};
1311
1312	/*
1313	 * Tune the so-called twister registers of the RTL8139.  These
1314	 * are used to compensate for impedance mismatches.  The
1315	 * method for tuning these registers is undocumented and the
1316	 * following procedure is collected from public sources.
1317	 */
1318	switch (sc->rl_twister)
1319	{
1320	case CHK_LINK:
1321		/*
1322		 * If we have a sufficient link, then we can proceed in
1323		 * the state machine to the next stage.  If not, then
1324		 * disable further tuning after writing sane defaults.
1325		 */
1326		if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) {
1327			CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD);
1328			sc->rl_twister = FIND_ROW;
1329		} else {
1330			CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD);
1331			CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1332			CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1333			CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1334			sc->rl_twister = DONE;
1335		}
1336		break;
1337	case FIND_ROW:
1338		/*
1339		 * Read how long it took to see the echo to find the tuning
1340		 * row to use.
1341		 */
1342		linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1343		if (linktest == RL_CSCFG_ROW3)
1344			sc->rl_twist_row = 3;
1345		else if (linktest == RL_CSCFG_ROW2)
1346			sc->rl_twist_row = 2;
1347		else if (linktest == RL_CSCFG_ROW1)
1348			sc->rl_twist_row = 1;
1349		else
1350			sc->rl_twist_row = 0;
1351		sc->rl_twist_col = 0;
1352		sc->rl_twister = SET_PARAM;
1353		break;
1354	case SET_PARAM:
1355		if (sc->rl_twist_col == 0)
1356			CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1357		CSR_WRITE_4(sc, RL_PARA7C,
1358		    param[sc->rl_twist_row][sc->rl_twist_col]);
1359		if (++sc->rl_twist_col == 4) {
1360			if (sc->rl_twist_row == 3)
1361				sc->rl_twister = RECHK_LONG;
1362			else
1363				sc->rl_twister = DONE;
1364		}
1365		break;
1366	case RECHK_LONG:
1367		/*
1368		 * For long cables, we have to double check to make sure we
1369		 * don't mistune.
1370		 */
1371		linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1372		if (linktest == RL_CSCFG_ROW3)
1373			sc->rl_twister = DONE;
1374		else {
1375			CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE);
1376			sc->rl_twister = RETUNE;
1377		}
1378		break;
1379	case RETUNE:
1380		/* Retune for a shorter cable (try column 2) */
1381		CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1382		CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1383		CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1384		CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1385		sc->rl_twist_row--;
1386		sc->rl_twist_col = 0;
1387		sc->rl_twister = SET_PARAM;
1388		break;
1389
1390	case DONE:
1391		break;
1392	}
1393
1394}
1395
1396static void
1397rl_tick(void *xsc)
1398{
1399	struct rl_softc		*sc = xsc;
1400	struct mii_data		*mii;
1401	int ticks;
1402
1403	RL_LOCK_ASSERT(sc);
1404	/*
1405	 * If we're doing the twister cable calibration, then we need to defer
1406	 * watchdog timeouts.  This is a no-op in normal operations, but
1407	 * can falsely trigger when the cable calibration takes a while and
1408	 * there was traffic ready to go when rl was started.
1409	 *
1410	 * We don't defer mii_tick since that updates the mii status, which
1411	 * helps the twister process, at least according to similar patches
1412	 * for the Linux driver I found online while doing the fixes.  Worst
1413	 * case is a few extra mii reads during calibration.
1414	 */
1415	mii = device_get_softc(sc->rl_miibus);
1416	mii_tick(mii);
1417	if ((sc->rl_flags & RL_FLAG_LINK) == 0)
1418		rl_miibus_statchg(sc->rl_dev);
1419	if (sc->rl_twister_enable) {
1420		if (sc->rl_twister == DONE)
1421			rl_watchdog(sc);
1422		else
1423			rl_twister_update(sc);
1424		if (sc->rl_twister == DONE)
1425			ticks = hz;
1426		else
1427			ticks = hz / 10;
1428	} else {
1429		rl_watchdog(sc);
1430		ticks = hz;
1431	}
1432
1433	callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc);
1434}
1435
1436#ifdef DEVICE_POLLING
1437static int
1438rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1439{
1440	struct rl_softc *sc = ifp->if_softc;
1441	int rx_npkts = 0;
1442
1443	RL_LOCK(sc);
1444	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1445		rx_npkts = rl_poll_locked(ifp, cmd, count);
1446	RL_UNLOCK(sc);
1447	return (rx_npkts);
1448}
1449
1450static int
1451rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1452{
1453	struct rl_softc *sc = ifp->if_softc;
1454	int rx_npkts;
1455
1456	RL_LOCK_ASSERT(sc);
1457
1458	sc->rxcycles = count;
1459	rx_npkts = rl_rxeof(sc);
1460	rl_txeof(sc);
1461
1462	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1463		rl_start_locked(ifp);
1464
1465	if (cmd == POLL_AND_CHECK_STATUS) {
1466		uint16_t	status;
1467
1468		/* We should also check the status register. */
1469		status = CSR_READ_2(sc, RL_ISR);
1470		if (status == 0xffff)
1471			return (rx_npkts);
1472		if (status != 0)
1473			CSR_WRITE_2(sc, RL_ISR, status);
1474
1475		/* XXX We should check behaviour on receiver stalls. */
1476
1477		if (status & RL_ISR_SYSTEM_ERR) {
1478			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1479			rl_init_locked(sc);
1480		}
1481	}
1482	return (rx_npkts);
1483}
1484#endif /* DEVICE_POLLING */
1485
1486static void
1487rl_intr(void *arg)
1488{
1489	struct rl_softc		*sc = arg;
1490	struct ifnet		*ifp = sc->rl_ifp;
1491	uint16_t		status;
1492	int			count;
1493
1494	RL_LOCK(sc);
1495
1496	if (sc->suspended)
1497		goto done_locked;
1498
1499#ifdef DEVICE_POLLING
1500	if  (ifp->if_capenable & IFCAP_POLLING)
1501		goto done_locked;
1502#endif
1503
1504	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1505		goto done_locked2;
1506	status = CSR_READ_2(sc, RL_ISR);
1507	if (status == 0xffff || (status & RL_INTRS) == 0)
1508		goto done_locked;
1509	/*
1510	 * Ours, disable further interrupts.
1511	 */
1512	CSR_WRITE_2(sc, RL_IMR, 0);
1513	for (count = 16; count > 0; count--) {
1514		CSR_WRITE_2(sc, RL_ISR, status);
1515		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1516			if (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR))
1517				rl_rxeof(sc);
1518			if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR))
1519				rl_txeof(sc);
1520			if (status & RL_ISR_SYSTEM_ERR) {
1521				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1522				rl_init_locked(sc);
1523				RL_UNLOCK(sc);
1524				return;
1525			}
1526		}
1527		status = CSR_READ_2(sc, RL_ISR);
1528		/* If the card has gone away, the read returns 0xffff. */
1529		if (status == 0xffff || (status & RL_INTRS) == 0)
1530			break;
1531	}
1532
1533	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1534		rl_start_locked(ifp);
1535
1536done_locked2:
1537	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1538		CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1539done_locked:
1540	RL_UNLOCK(sc);
1541}
1542
1543/*
1544 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1545 * pointers to the fragment pointers.
1546 */
1547static int
1548rl_encap(struct rl_softc *sc, struct mbuf **m_head)
1549{
1550	struct mbuf		*m;
1551	bus_dma_segment_t	txsegs[1];
1552	int			error, nsegs, padlen;
1553
1554	RL_LOCK_ASSERT(sc);
1555
1556	m = *m_head;
1557	padlen = 0;
1558	/*
1559	 * Hardware doesn't auto-pad, so we have to make sure
1560	 * pad short frames out to the minimum frame length.
1561	 */
1562	if (m->m_pkthdr.len < RL_MIN_FRAMELEN)
1563		padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len;
1564	/*
1565	 * The RealTek is brain damaged and wants longword-aligned
1566	 * TX buffers, plus we can only have one fragment buffer
1567	 * per packet. We have to copy pretty much all the time.
1568	 */
1569	if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 ||
1570	    (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) {
1571		m = m_defrag(*m_head, M_NOWAIT);
1572		if (m == NULL) {
1573			m_freem(*m_head);
1574			*m_head = NULL;
1575			return (ENOMEM);
1576		}
1577	}
1578	*m_head = m;
1579
1580	if (padlen > 0) {
1581		/*
1582		 * Make security-conscious people happy: zero out the
1583		 * bytes in the pad area, since we don't know what
1584		 * this mbuf cluster buffer's previous user might
1585		 * have left in it.
1586		 */
1587		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1588		m->m_pkthdr.len += padlen;
1589		m->m_len = m->m_pkthdr.len;
1590	}
1591
1592	error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag,
1593	    RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0);
1594	if (error != 0)
1595		return (error);
1596	if (nsegs == 0) {
1597		m_freem(*m_head);
1598		*m_head = NULL;
1599		return (EIO);
1600	}
1601
1602	RL_CUR_TXMBUF(sc) = m;
1603	bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc),
1604	    BUS_DMASYNC_PREWRITE);
1605	CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr));
1606
1607	return (0);
1608}
1609
1610/*
1611 * Main transmit routine.
1612 */
1613static void
1614rl_start(struct ifnet *ifp)
1615{
1616	struct rl_softc		*sc = ifp->if_softc;
1617
1618	RL_LOCK(sc);
1619	rl_start_locked(ifp);
1620	RL_UNLOCK(sc);
1621}
1622
1623static void
1624rl_start_locked(struct ifnet *ifp)
1625{
1626	struct rl_softc		*sc = ifp->if_softc;
1627	struct mbuf		*m_head = NULL;
1628
1629	RL_LOCK_ASSERT(sc);
1630
1631	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1632	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
1633		return;
1634
1635	while (RL_CUR_TXMBUF(sc) == NULL) {
1636
1637		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1638
1639		if (m_head == NULL)
1640			break;
1641
1642		if (rl_encap(sc, &m_head)) {
1643			if (m_head == NULL)
1644				break;
1645			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1646			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1647			break;
1648		}
1649
1650		/* Pass a copy of this mbuf chain to the bpf subsystem. */
1651		BPF_MTAP(ifp, RL_CUR_TXMBUF(sc));
1652
1653		/* Transmit the frame. */
1654		CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc),
1655		    RL_TXTHRESH(sc->rl_txthresh) |
1656		    RL_CUR_TXMBUF(sc)->m_pkthdr.len);
1657
1658		RL_INC(sc->rl_cdata.cur_tx);
1659
1660		/* Set a timeout in case the chip goes out to lunch. */
1661		sc->rl_watchdog_timer = 5;
1662	}
1663
1664	/*
1665	 * We broke out of the loop because all our TX slots are
1666	 * full. Mark the NIC as busy until it drains some of the
1667	 * packets from the queue.
1668	 */
1669	if (RL_CUR_TXMBUF(sc) != NULL)
1670		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1671}
1672
1673static void
1674rl_init(void *xsc)
1675{
1676	struct rl_softc		*sc = xsc;
1677
1678	RL_LOCK(sc);
1679	rl_init_locked(sc);
1680	RL_UNLOCK(sc);
1681}
1682
1683static void
1684rl_init_locked(struct rl_softc *sc)
1685{
1686	struct ifnet		*ifp = sc->rl_ifp;
1687	struct mii_data		*mii;
1688	uint32_t		eaddr[2];
1689
1690	RL_LOCK_ASSERT(sc);
1691
1692	mii = device_get_softc(sc->rl_miibus);
1693
1694	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1695		return;
1696
1697	/*
1698	 * Cancel pending I/O and free all RX/TX buffers.
1699	 */
1700	rl_stop(sc);
1701
1702	rl_reset(sc);
1703	if (sc->rl_twister_enable) {
1704		/*
1705		 * Reset twister register tuning state.  The twister
1706		 * registers and their tuning are undocumented, but
1707		 * are necessary to cope with bad links.  rl_twister =
1708		 * DONE here will disable this entirely.
1709		 */
1710		sc->rl_twister = CHK_LINK;
1711	}
1712
1713	/*
1714	 * Init our MAC address.  Even though the chipset
1715	 * documentation doesn't mention it, we need to enter "Config
1716	 * register write enable" mode to modify the ID registers.
1717	 */
1718	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
1719	bzero(eaddr, sizeof(eaddr));
1720	bcopy(IF_LLADDR(sc->rl_ifp), eaddr, ETHER_ADDR_LEN);
1721	CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]);
1722	CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]);
1723	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1724
1725	/* Init the RX memory block pointer register. */
1726	CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr +
1727	    RL_RX_8139_BUF_RESERVE);
1728	/* Init TX descriptors. */
1729	rl_list_tx_init(sc);
1730	/* Init Rx memory block. */
1731	rl_list_rx_init(sc);
1732
1733	/*
1734	 * Enable transmit and receive.
1735	 */
1736	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1737
1738	/*
1739	 * Set the initial TX and RX configuration.
1740	 */
1741	CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1742	CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
1743
1744	/* Set RX filter. */
1745	rl_rxfilter(sc);
1746
1747#ifdef DEVICE_POLLING
1748	/* Disable interrupts if we are polling. */
1749	if (ifp->if_capenable & IFCAP_POLLING)
1750		CSR_WRITE_2(sc, RL_IMR, 0);
1751	else
1752#endif
1753	/* Enable interrupts. */
1754	CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1755
1756	/* Set initial TX threshold */
1757	sc->rl_txthresh = RL_TX_THRESH_INIT;
1758
1759	/* Start RX/TX process. */
1760	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
1761
1762	/* Enable receiver and transmitter. */
1763	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1764
1765	sc->rl_flags &= ~RL_FLAG_LINK;
1766	mii_mediachg(mii);
1767
1768	CSR_WRITE_1(sc, sc->rl_cfg1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
1769
1770	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1771	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1772
1773	callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc);
1774}
1775
1776/*
1777 * Set media options.
1778 */
1779static int
1780rl_ifmedia_upd(struct ifnet *ifp)
1781{
1782	struct rl_softc		*sc = ifp->if_softc;
1783	struct mii_data		*mii;
1784
1785	mii = device_get_softc(sc->rl_miibus);
1786
1787	RL_LOCK(sc);
1788	mii_mediachg(mii);
1789	RL_UNLOCK(sc);
1790
1791	return (0);
1792}
1793
1794/*
1795 * Report current media status.
1796 */
1797static void
1798rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1799{
1800	struct rl_softc		*sc = ifp->if_softc;
1801	struct mii_data		*mii;
1802
1803	mii = device_get_softc(sc->rl_miibus);
1804
1805	RL_LOCK(sc);
1806	mii_pollstat(mii);
1807	ifmr->ifm_active = mii->mii_media_active;
1808	ifmr->ifm_status = mii->mii_media_status;
1809	RL_UNLOCK(sc);
1810}
1811
1812static int
1813rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1814{
1815	struct ifreq		*ifr = (struct ifreq *)data;
1816	struct mii_data		*mii;
1817	struct rl_softc		*sc = ifp->if_softc;
1818	int			error = 0, mask;
1819
1820	switch (command) {
1821	case SIOCSIFFLAGS:
1822		RL_LOCK(sc);
1823		if (ifp->if_flags & IFF_UP) {
1824			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1825			    ((ifp->if_flags ^ sc->rl_if_flags) &
1826                            (IFF_PROMISC | IFF_ALLMULTI)))
1827				rl_rxfilter(sc);
1828                        else
1829				rl_init_locked(sc);
1830                } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1831			rl_stop(sc);
1832		sc->rl_if_flags = ifp->if_flags;
1833		RL_UNLOCK(sc);
1834		break;
1835	case SIOCADDMULTI:
1836	case SIOCDELMULTI:
1837		RL_LOCK(sc);
1838		rl_rxfilter(sc);
1839		RL_UNLOCK(sc);
1840		break;
1841	case SIOCGIFMEDIA:
1842	case SIOCSIFMEDIA:
1843		mii = device_get_softc(sc->rl_miibus);
1844		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1845		break;
1846	case SIOCSIFCAP:
1847		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1848#ifdef DEVICE_POLLING
1849		if (ifr->ifr_reqcap & IFCAP_POLLING &&
1850		    !(ifp->if_capenable & IFCAP_POLLING)) {
1851			error = ether_poll_register(rl_poll, ifp);
1852			if (error)
1853				return(error);
1854			RL_LOCK(sc);
1855			/* Disable interrupts */
1856			CSR_WRITE_2(sc, RL_IMR, 0x0000);
1857			ifp->if_capenable |= IFCAP_POLLING;
1858			RL_UNLOCK(sc);
1859			return (error);
1860
1861		}
1862		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1863		    ifp->if_capenable & IFCAP_POLLING) {
1864			error = ether_poll_deregister(ifp);
1865			/* Enable interrupts. */
1866			RL_LOCK(sc);
1867			CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1868			ifp->if_capenable &= ~IFCAP_POLLING;
1869			RL_UNLOCK(sc);
1870			return (error);
1871		}
1872#endif /* DEVICE_POLLING */
1873		if ((mask & IFCAP_WOL) != 0 &&
1874		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
1875			if ((mask & IFCAP_WOL_UCAST) != 0)
1876				ifp->if_capenable ^= IFCAP_WOL_UCAST;
1877			if ((mask & IFCAP_WOL_MCAST) != 0)
1878				ifp->if_capenable ^= IFCAP_WOL_MCAST;
1879			if ((mask & IFCAP_WOL_MAGIC) != 0)
1880				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1881		}
1882		break;
1883	default:
1884		error = ether_ioctl(ifp, command, data);
1885		break;
1886	}
1887
1888	return (error);
1889}
1890
1891static void
1892rl_watchdog(struct rl_softc *sc)
1893{
1894
1895	RL_LOCK_ASSERT(sc);
1896
1897	if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0)
1898		return;
1899
1900	device_printf(sc->rl_dev, "watchdog timeout\n");
1901	sc->rl_ifp->if_oerrors++;
1902
1903	rl_txeof(sc);
1904	rl_rxeof(sc);
1905	sc->rl_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1906	rl_init_locked(sc);
1907}
1908
1909/*
1910 * Stop the adapter and free any mbufs allocated to the
1911 * RX and TX lists.
1912 */
1913static void
1914rl_stop(struct rl_softc *sc)
1915{
1916	register int		i;
1917	struct ifnet		*ifp = sc->rl_ifp;
1918
1919	RL_LOCK_ASSERT(sc);
1920
1921	sc->rl_watchdog_timer = 0;
1922	callout_stop(&sc->rl_stat_callout);
1923	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1924	sc->rl_flags &= ~RL_FLAG_LINK;
1925
1926	CSR_WRITE_1(sc, RL_COMMAND, 0x00);
1927	CSR_WRITE_2(sc, RL_IMR, 0x0000);
1928	for (i = 0; i < RL_TIMEOUT; i++) {
1929		DELAY(10);
1930		if ((CSR_READ_1(sc, RL_COMMAND) &
1931		    (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0)
1932			break;
1933	}
1934	if (i == RL_TIMEOUT)
1935		device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n");
1936
1937	/*
1938	 * Free the TX list buffers.
1939	 */
1940	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1941		if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
1942			if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
1943				bus_dmamap_sync(sc->rl_cdata.rl_tx_tag,
1944				    sc->rl_cdata.rl_tx_dmamap[i],
1945				    BUS_DMASYNC_POSTWRITE);
1946				bus_dmamap_unload(sc->rl_cdata.rl_tx_tag,
1947				    sc->rl_cdata.rl_tx_dmamap[i]);
1948				m_freem(sc->rl_cdata.rl_tx_chain[i]);
1949				sc->rl_cdata.rl_tx_chain[i] = NULL;
1950			}
1951			CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)),
1952			    0x0000000);
1953		}
1954	}
1955}
1956
1957/*
1958 * Device suspend routine.  Stop the interface and save some PCI
1959 * settings in case the BIOS doesn't restore them properly on
1960 * resume.
1961 */
1962static int
1963rl_suspend(device_t dev)
1964{
1965	struct rl_softc		*sc;
1966
1967	sc = device_get_softc(dev);
1968
1969	RL_LOCK(sc);
1970	rl_stop(sc);
1971	rl_setwol(sc);
1972	sc->suspended = 1;
1973	RL_UNLOCK(sc);
1974
1975	return (0);
1976}
1977
1978/*
1979 * Device resume routine.  Restore some PCI settings in case the BIOS
1980 * doesn't, re-enable busmastering, and restart the interface if
1981 * appropriate.
1982 */
1983static int
1984rl_resume(device_t dev)
1985{
1986	struct rl_softc		*sc;
1987	struct ifnet		*ifp;
1988	int			pmc;
1989	uint16_t		pmstat;
1990
1991	sc = device_get_softc(dev);
1992	ifp = sc->rl_ifp;
1993
1994	RL_LOCK(sc);
1995
1996	if ((ifp->if_capabilities & IFCAP_WOL) != 0 &&
1997	    pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
1998		/* Disable PME and clear PME status. */
1999		pmstat = pci_read_config(sc->rl_dev,
2000		    pmc + PCIR_POWER_STATUS, 2);
2001		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
2002			pmstat &= ~PCIM_PSTAT_PMEENABLE;
2003			pci_write_config(sc->rl_dev,
2004			    pmc + PCIR_POWER_STATUS, pmstat, 2);
2005		}
2006		/*
2007		 * Clear WOL matching such that normal Rx filtering
2008		 * wouldn't interfere with WOL patterns.
2009		 */
2010		rl_clrwol(sc);
2011	}
2012
2013	/* reinitialize interface if necessary */
2014	if (ifp->if_flags & IFF_UP)
2015		rl_init_locked(sc);
2016
2017	sc->suspended = 0;
2018
2019	RL_UNLOCK(sc);
2020
2021	return (0);
2022}
2023
2024/*
2025 * Stop all chip I/O so that the kernel's probe routines don't
2026 * get confused by errant DMAs when rebooting.
2027 */
2028static int
2029rl_shutdown(device_t dev)
2030{
2031	struct rl_softc		*sc;
2032
2033	sc = device_get_softc(dev);
2034
2035	RL_LOCK(sc);
2036	rl_stop(sc);
2037	/*
2038	 * Mark interface as down since otherwise we will panic if
2039	 * interrupt comes in later on, which can happen in some
2040	 * cases.
2041	 */
2042	sc->rl_ifp->if_flags &= ~IFF_UP;
2043	rl_setwol(sc);
2044	RL_UNLOCK(sc);
2045
2046	return (0);
2047}
2048
2049static void
2050rl_setwol(struct rl_softc *sc)
2051{
2052	struct ifnet		*ifp;
2053	int			pmc;
2054	uint16_t		pmstat;
2055	uint8_t			v;
2056
2057	RL_LOCK_ASSERT(sc);
2058
2059	ifp = sc->rl_ifp;
2060	if ((ifp->if_capabilities & IFCAP_WOL) == 0)
2061		return;
2062	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
2063		return;
2064
2065	/* Enable config register write. */
2066	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2067
2068	/* Enable PME. */
2069	v = CSR_READ_1(sc, sc->rl_cfg1);
2070	v &= ~RL_CFG1_PME;
2071	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2072		v |= RL_CFG1_PME;
2073	CSR_WRITE_1(sc, sc->rl_cfg1, v);
2074
2075	v = CSR_READ_1(sc, sc->rl_cfg3);
2076	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2077	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2078		v |= RL_CFG3_WOL_MAGIC;
2079	CSR_WRITE_1(sc, sc->rl_cfg3, v);
2080
2081	v = CSR_READ_1(sc, sc->rl_cfg5);
2082	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2083	v &= ~RL_CFG5_WOL_LANWAKE;
2084	if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2085		v |= RL_CFG5_WOL_UCAST;
2086	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2087		v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
2088	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2089		v |= RL_CFG5_WOL_LANWAKE;
2090	CSR_WRITE_1(sc, sc->rl_cfg5, v);
2091
2092	/* Config register write done. */
2093	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2094
2095	/* Request PME if WOL is requested. */
2096	pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
2097	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2098	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2099		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2100	pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2101}
2102
2103static void
2104rl_clrwol(struct rl_softc *sc)
2105{
2106	struct ifnet		*ifp;
2107	uint8_t			v;
2108
2109	ifp = sc->rl_ifp;
2110	if ((ifp->if_capabilities & IFCAP_WOL) == 0)
2111		return;
2112
2113	/* Enable config register write. */
2114	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2115
2116	v = CSR_READ_1(sc, sc->rl_cfg3);
2117	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2118	CSR_WRITE_1(sc, sc->rl_cfg3, v);
2119
2120	/* Config register write done. */
2121	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2122
2123	v = CSR_READ_1(sc, sc->rl_cfg5);
2124	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2125	v &= ~RL_CFG5_WOL_LANWAKE;
2126	CSR_WRITE_1(sc, sc->rl_cfg5, v);
2127}
2128