if_rl.c revision 226995
193853Sgshapiro/*-
293853Sgshapiro * Copyright (c) 1997, 1998
393853Sgshapiro *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
493853Sgshapiro *
593853Sgshapiro * Redistribution and use in source and binary forms, with or without
693853Sgshapiro * modification, are permitted provided that the following conditions
793853Sgshapiro * are met:
893853Sgshapiro * 1. Redistributions of source code must retain the above copyright
993853Sgshapiro *    notice, this list of conditions and the following disclaimer.
1093853Sgshapiro * 2. Redistributions in binary form must reproduce the above copyright
1193853Sgshapiro *    notice, this list of conditions and the following disclaimer in the
1293853Sgshapiro *    documentation and/or other materials provided with the distribution.
1393853Sgshapiro * 3. All advertising materials mentioning features or use of this software
1493853Sgshapiro *    must display the following acknowledgement:
1593853Sgshapiro *	This product includes software developed by Bill Paul.
1693853Sgshapiro * 4. Neither the name of the author nor the names of any co-contributors
1793853Sgshapiro *    may be used to endorse or promote products derived from this software
1893853Sgshapiro *    without specific prior written permission.
1993853Sgshapiro *
2093853Sgshapiro * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
2193853Sgshapiro * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2293853Sgshapiro * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2393853Sgshapiro * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
2493853Sgshapiro * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2593853Sgshapiro * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2693853Sgshapiro * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2793853Sgshapiro * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2893853Sgshapiro * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2993853Sgshapiro * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
3093853Sgshapiro * THE POSSIBILITY OF SUCH DAMAGE.
3193853Sgshapiro */
3293853Sgshapiro
3393853Sgshapiro#include <sys/cdefs.h>
3493853Sgshapiro__FBSDID("$FreeBSD: head/sys/pci/if_rl.c 226995 2011-11-01 16:13:59Z marius $");
3593853Sgshapiro
3693853Sgshapiro/*
3793853Sgshapiro * RealTek 8129/8139 PCI NIC driver
3893853Sgshapiro *
3993853Sgshapiro * Supports several extremely cheap PCI 10/100 adapters based on
4093853Sgshapiro * the RealTek chipset. Datasheets can be obtained from
4193853Sgshapiro * www.realtek.com.tw.
4293853Sgshapiro *
4393853Sgshapiro * Written by Bill Paul <wpaul@ctr.columbia.edu>
4493853Sgshapiro * Electrical Engineering Department
4593853Sgshapiro * Columbia University, New York City
4693853Sgshapiro */
4793853Sgshapiro/*
4893853Sgshapiro * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
4993853Sgshapiro * probably the worst PCI ethernet controller ever made, with the possible
5093853Sgshapiro * exception of the FEAST chip made by SMC. The 8139 supports bus-master
5193853Sgshapiro * DMA, but it has a terrible interface that nullifies any performance
5293853Sgshapiro * gains that bus-master DMA usually offers.
5393853Sgshapiro *
5493853Sgshapiro * For transmission, the chip offers a series of four TX descriptor
5593853Sgshapiro * registers. Each transmit frame must be in a contiguous buffer, aligned
5693853Sgshapiro * on a longword (32-bit) boundary. This means we almost always have to
5793853Sgshapiro * do mbuf copies in order to transmit a frame, except in the unlikely
5893853Sgshapiro * case where a) the packet fits into a single mbuf, and b) the packet
5993853Sgshapiro * is 32-bit aligned within the mbuf's data area. The presence of only
6093853Sgshapiro * four descriptor registers means that we can never have more than four
6193853Sgshapiro * packets queued for transmission at any one time.
6293853Sgshapiro *
6393853Sgshapiro * Reception is not much better. The driver has to allocate a single large
6493853Sgshapiro * buffer area (up to 64K in size) into which the chip will DMA received
6593853Sgshapiro * frames. Because we don't know where within this region received packets
6693853Sgshapiro * will begin or end, we have no choice but to copy data from the buffer
6793853Sgshapiro * area into mbufs in order to pass the packets up to the higher protocol
6893853Sgshapiro * levels.
6993853Sgshapiro *
7093853Sgshapiro * It's impossible given this rotten design to really achieve decent
7193853Sgshapiro * performance at 100Mbps, unless you happen to have a 400Mhz PII or
7293853Sgshapiro * some equally overmuscled CPU to drive it.
7393853Sgshapiro *
7493853Sgshapiro * On the bright side, the 8139 does have a built-in PHY, although
7593853Sgshapiro * rather than using an MDIO serial interface like most other NICs, the
7693853Sgshapiro * PHY registers are directly accessible through the 8139's register
7793853Sgshapiro * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
7893853Sgshapiro * filter.
7993853Sgshapiro *
8093853Sgshapiro * The 8129 chip is an older version of the 8139 that uses an external PHY
8193853Sgshapiro * chip. The 8129 has a serial MDIO interface for accessing the MII where
8293853Sgshapiro * the 8139 lets you directly access the on-board PHY registers. We need
8393853Sgshapiro * to select which interface to use depending on the chip type.
8493853Sgshapiro */
8593853Sgshapiro
8693853Sgshapiro#ifdef HAVE_KERNEL_OPTION_HEADERS
8793853Sgshapiro#include "opt_device_polling.h"
8893853Sgshapiro#endif
8993853Sgshapiro
9093853Sgshapiro#include <sys/param.h>
9193853Sgshapiro#include <sys/endian.h>
9293853Sgshapiro#include <sys/systm.h>
9393853Sgshapiro#include <sys/sockio.h>
9493853Sgshapiro#include <sys/mbuf.h>
9593853Sgshapiro#include <sys/malloc.h>
96105596Skeramida#include <sys/kernel.h>
9793853Sgshapiro#include <sys/module.h>
9893853Sgshapiro#include <sys/socket.h>
9993853Sgshapiro#include <sys/sysctl.h>
10093853Sgshapiro
10193853Sgshapiro#include <net/if.h>
10293853Sgshapiro#include <net/if_arp.h>
10393853Sgshapiro#include <net/ethernet.h>
10493853Sgshapiro#include <net/if_dl.h>
10593853Sgshapiro#include <net/if_media.h>
10693853Sgshapiro#include <net/if_types.h>
10793853Sgshapiro
10893853Sgshapiro#include <net/bpf.h>
10993853Sgshapiro
11093853Sgshapiro#include <machine/bus.h>
111102915Sgshapiro#include <machine/resource.h>
112102915Sgshapiro#include <sys/bus.h>
11393853Sgshapiro#include <sys/rman.h>
114102915Sgshapiro
115102915Sgshapiro#include <dev/mii/mii.h>
116102915Sgshapiro#include <dev/mii/mii_bitbang.h>
117102915Sgshapiro#include <dev/mii/miivar.h>
118102915Sgshapiro
119102915Sgshapiro#include <dev/pci/pcireg.h>
120102915Sgshapiro#include <dev/pci/pcivar.h>
121102915Sgshapiro
12293853SgshapiroMODULE_DEPEND(rl, pci, 1, 1, 1);
12393853SgshapiroMODULE_DEPEND(rl, ether, 1, 1, 1);
12493853SgshapiroMODULE_DEPEND(rl, miibus, 1, 1, 1);
12593853Sgshapiro
12693853Sgshapiro/* "device miibus" required.  See GENERIC if you get errors here. */
12793853Sgshapiro#include "miibus_if.h"
12893853Sgshapiro
12993853Sgshapiro#include <pci/if_rlreg.h>
13093853Sgshapiro
13193853Sgshapiro/*
13293853Sgshapiro * Various supported device vendors/types and their names.
13393853Sgshapiro */
13493853Sgshapirostatic const struct rl_type const rl_devs[] = {
13593853Sgshapiro	{ RT_VENDORID, RT_DEVICEID_8129, RL_8129,
13693853Sgshapiro		"RealTek 8129 10/100BaseTX" },
13793853Sgshapiro	{ RT_VENDORID, RT_DEVICEID_8139, RL_8139,
13893853Sgshapiro		"RealTek 8139 10/100BaseTX" },
13993853Sgshapiro	{ RT_VENDORID, RT_DEVICEID_8139D, RL_8139,
14093853Sgshapiro		"RealTek 8139 10/100BaseTX" },
14193853Sgshapiro	{ RT_VENDORID, RT_DEVICEID_8138, RL_8139,
14293853Sgshapiro		"RealTek 8139 10/100BaseTX CardBus" },
14393853Sgshapiro	{ RT_VENDORID, RT_DEVICEID_8100, RL_8139,
14493853Sgshapiro		"RealTek 8100 10/100BaseTX" },
14593853Sgshapiro	{ ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
14693853Sgshapiro		"Accton MPX 5030/5038 10/100BaseTX" },
14793853Sgshapiro	{ DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139,
14893853Sgshapiro		"Delta Electronics 8139 10/100BaseTX" },
14997131Sgshapiro	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139,
15097131Sgshapiro		"Addtron Technology 8139 10/100BaseTX" },
151105596Skeramida	{ DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139,
15293853Sgshapiro		"D-Link DFE-530TX+ 10/100BaseTX" },
15393853Sgshapiro	{ DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139,
15493853Sgshapiro		"D-Link DFE-690TXD 10/100BaseTX" },
15593853Sgshapiro	{ NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
15693853Sgshapiro		"Nortel Networks 10/100BaseTX" },
15793853Sgshapiro	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139,
15893853Sgshapiro		"Corega FEther CB-TXD" },
15993853Sgshapiro	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139,
16093853Sgshapiro		"Corega FEtherII CB-TXD" },
16193853Sgshapiro	{ PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139,
16293853Sgshapiro		"Peppercon AG ROL-F" },
16393853Sgshapiro	{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139,
16493853Sgshapiro		"Planex FNW-3603-TX" },
16593853Sgshapiro	{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139,
16693853Sgshapiro		"Planex FNW-3800-TX" },
16793853Sgshapiro	{ CP_VENDORID, RT_DEVICEID_8139, RL_8139,
16893853Sgshapiro		"Compaq HNE-300" },
16993853Sgshapiro	{ LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139,
17093853Sgshapiro		"LevelOne FPC-0106TX" },
17193853Sgshapiro	{ EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139,
17293853Sgshapiro		"Edimax EP-4103DL CardBus" }
17393853Sgshapiro};
17493853Sgshapiro
17593853Sgshapirostatic int rl_attach(device_t);
17693853Sgshapirostatic int rl_detach(device_t);
17793853Sgshapirostatic void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int);
17893853Sgshapirostatic int rl_dma_alloc(struct rl_softc *);
17993853Sgshapirostatic void rl_dma_free(struct rl_softc *);
18093853Sgshapirostatic void rl_eeprom_putbyte(struct rl_softc *, int);
18193853Sgshapirostatic void rl_eeprom_getword(struct rl_softc *, int, uint16_t *);
18293853Sgshapirostatic int rl_encap(struct rl_softc *, struct mbuf **);
18393853Sgshapirostatic int rl_list_tx_init(struct rl_softc *);
18493853Sgshapirostatic int rl_list_rx_init(struct rl_softc *);
18593853Sgshapirostatic int rl_ifmedia_upd(struct ifnet *);
18693853Sgshapirostatic void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
18793853Sgshapirostatic int rl_ioctl(struct ifnet *, u_long, caddr_t);
18893853Sgshapirostatic void rl_intr(void *);
18993853Sgshapirostatic void rl_init(void *);
19093853Sgshapirostatic void rl_init_locked(struct rl_softc *sc);
19193853Sgshapirostatic int rl_miibus_readreg(device_t, int, int);
19293853Sgshapirostatic void rl_miibus_statchg(device_t);
19393853Sgshapirostatic int rl_miibus_writereg(device_t, int, int, int);
19493853Sgshapiro#ifdef DEVICE_POLLING
19593853Sgshapirostatic int rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
19693853Sgshapirostatic int rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
19793853Sgshapiro#endif
19893853Sgshapirostatic int rl_probe(device_t);
19993853Sgshapirostatic void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int);
20093853Sgshapirostatic void rl_reset(struct rl_softc *);
20193853Sgshapirostatic int rl_resume(device_t);
20293853Sgshapirostatic int rl_rxeof(struct rl_softc *);
20393853Sgshapirostatic void rl_rxfilter(struct rl_softc *);
20493853Sgshapirostatic int rl_shutdown(device_t);
20593853Sgshapirostatic void rl_start(struct ifnet *);
20693853Sgshapirostatic void rl_start_locked(struct ifnet *);
207105596Skeramidastatic void rl_stop(struct rl_softc *);
20893853Sgshapirostatic int rl_suspend(device_t);
20993853Sgshapirostatic void rl_tick(void *);
21093853Sgshapirostatic void rl_txeof(struct rl_softc *);
21193853Sgshapirostatic void rl_watchdog(struct rl_softc *);
21293853Sgshapirostatic void rl_setwol(struct rl_softc *);
21393853Sgshapirostatic void rl_clrwol(struct rl_softc *);
21493853Sgshapiro
21593853Sgshapiro/*
21693853Sgshapiro * MII bit-bang glue
21793853Sgshapiro */
21893853Sgshapirostatic uint32_t rl_mii_bitbang_read(device_t);
21993853Sgshapirostatic void rl_mii_bitbang_write(device_t, uint32_t);
22093853Sgshapiro
22193853Sgshapirostatic const struct mii_bitbang_ops rl_mii_bitbang_ops = {
22293853Sgshapiro	rl_mii_bitbang_read,
22393853Sgshapiro	rl_mii_bitbang_write,
22493853Sgshapiro	{
22593853Sgshapiro		RL_MII_DATAOUT,	/* MII_BIT_MDO */
22693853Sgshapiro		RL_MII_DATAIN,	/* MII_BIT_MDI */
22793853Sgshapiro		RL_MII_CLK,	/* MII_BIT_MDC */
22893853Sgshapiro		RL_MII_DIR,	/* MII_BIT_DIR_HOST_PHY */
22993853Sgshapiro		0,		/* MII_BIT_DIR_PHY_HOST */
23093853Sgshapiro	}
23193853Sgshapiro};
23293853Sgshapiro
23393853Sgshapirostatic device_method_t rl_methods[] = {
23493853Sgshapiro	/* Device interface */
23593853Sgshapiro	DEVMETHOD(device_probe,		rl_probe),
23693853Sgshapiro	DEVMETHOD(device_attach,	rl_attach),
23793853Sgshapiro	DEVMETHOD(device_detach,	rl_detach),
23893853Sgshapiro	DEVMETHOD(device_suspend,	rl_suspend),
23993853Sgshapiro	DEVMETHOD(device_resume,	rl_resume),
24093853Sgshapiro	DEVMETHOD(device_shutdown,	rl_shutdown),
24193853Sgshapiro
242102915Sgshapiro	/* bus interface */
243107383Sru	DEVMETHOD(bus_print_child,	bus_generic_print_child),
244107383Sru	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
245107383Sru
246102915Sgshapiro	/* MII interface */
247107383Sru	DEVMETHOD(miibus_readreg,	rl_miibus_readreg),
248102915Sgshapiro	DEVMETHOD(miibus_writereg,	rl_miibus_writereg),
249102915Sgshapiro	DEVMETHOD(miibus_statchg,	rl_miibus_statchg),
250102915Sgshapiro
251102915Sgshapiro	{ 0, 0 }
252102915Sgshapiro};
253105595Skeramida
25493853Sgshapirostatic driver_t rl_driver = {
25593853Sgshapiro	"rl",
25693853Sgshapiro	rl_methods,
25793853Sgshapiro	sizeof(struct rl_softc)
25893853Sgshapiro};
25993853Sgshapiro
26093853Sgshapirostatic devclass_t rl_devclass;
26193853Sgshapiro
26293853SgshapiroDRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0);
263DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0);
264DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0);
265
266#define EE_SET(x)					\
267	CSR_WRITE_1(sc, RL_EECMD,			\
268		CSR_READ_1(sc, RL_EECMD) | x)
269
270#define EE_CLR(x)					\
271	CSR_WRITE_1(sc, RL_EECMD,			\
272		CSR_READ_1(sc, RL_EECMD) & ~x)
273
274/*
275 * Send a read command and address to the EEPROM, check for ACK.
276 */
277static void
278rl_eeprom_putbyte(struct rl_softc *sc, int addr)
279{
280	register int		d, i;
281
282	d = addr | sc->rl_eecmd_read;
283
284	/*
285	 * Feed in each bit and strobe the clock.
286	 */
287	for (i = 0x400; i; i >>= 1) {
288		if (d & i) {
289			EE_SET(RL_EE_DATAIN);
290		} else {
291			EE_CLR(RL_EE_DATAIN);
292		}
293		DELAY(100);
294		EE_SET(RL_EE_CLK);
295		DELAY(150);
296		EE_CLR(RL_EE_CLK);
297		DELAY(100);
298	}
299}
300
301/*
302 * Read a word of data stored in the EEPROM at address 'addr.'
303 */
304static void
305rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest)
306{
307	register int		i;
308	uint16_t		word = 0;
309
310	/* Enter EEPROM access mode. */
311	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
312
313	/*
314	 * Send address of word we want to read.
315	 */
316	rl_eeprom_putbyte(sc, addr);
317
318	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
319
320	/*
321	 * Start reading bits from EEPROM.
322	 */
323	for (i = 0x8000; i; i >>= 1) {
324		EE_SET(RL_EE_CLK);
325		DELAY(100);
326		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
327			word |= i;
328		EE_CLR(RL_EE_CLK);
329		DELAY(100);
330	}
331
332	/* Turn off EEPROM access mode. */
333	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
334
335	*dest = word;
336}
337
338/*
339 * Read a sequence of words from the EEPROM.
340 */
341static void
342rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap)
343{
344	int			i;
345	uint16_t		word = 0, *ptr;
346
347	for (i = 0; i < cnt; i++) {
348		rl_eeprom_getword(sc, off + i, &word);
349		ptr = (uint16_t *)(dest + (i * 2));
350		if (swap)
351			*ptr = ntohs(word);
352		else
353			*ptr = word;
354	}
355}
356
357/*
358 * Read the MII serial port for the MII bit-bang module.
359 */
360static uint32_t
361rl_mii_bitbang_read(device_t dev)
362{
363	struct rl_softc *sc;
364	uint32_t val;
365
366	sc = device_get_softc(dev);
367
368	val = CSR_READ_1(sc, RL_MII);
369	CSR_BARRIER(sc, RL_MII, 1,
370	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
371
372	return (val);
373}
374
375/*
376 * Write the MII serial port for the MII bit-bang module.
377 */
378static void
379rl_mii_bitbang_write(device_t dev, uint32_t val)
380{
381	struct rl_softc *sc;
382
383	sc = device_get_softc(dev);
384
385	CSR_WRITE_1(sc, RL_MII, val);
386	CSR_BARRIER(sc, RL_MII, 1,
387	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
388}
389
390static int
391rl_miibus_readreg(device_t dev, int phy, int reg)
392{
393	struct rl_softc		*sc;
394	uint16_t		rl8139_reg;
395
396	sc = device_get_softc(dev);
397
398	if (sc->rl_type == RL_8139) {
399		switch (reg) {
400		case MII_BMCR:
401			rl8139_reg = RL_BMCR;
402			break;
403		case MII_BMSR:
404			rl8139_reg = RL_BMSR;
405			break;
406		case MII_ANAR:
407			rl8139_reg = RL_ANAR;
408			break;
409		case MII_ANER:
410			rl8139_reg = RL_ANER;
411			break;
412		case MII_ANLPAR:
413			rl8139_reg = RL_LPAR;
414			break;
415		case MII_PHYIDR1:
416		case MII_PHYIDR2:
417			return (0);
418		/*
419		 * Allow the rlphy driver to read the media status
420		 * register. If we have a link partner which does not
421		 * support NWAY, this is the register which will tell
422		 * us the results of parallel detection.
423		 */
424		case RL_MEDIASTAT:
425			return (CSR_READ_1(sc, RL_MEDIASTAT));
426		default:
427			device_printf(sc->rl_dev, "bad phy register\n");
428			return (0);
429		}
430		return (CSR_READ_2(sc, rl8139_reg));
431	}
432
433	return (mii_bitbang_readreg(dev, &rl_mii_bitbang_ops, phy, reg));
434}
435
436static int
437rl_miibus_writereg(device_t dev, int phy, int reg, int data)
438{
439	struct rl_softc		*sc;
440	uint16_t		rl8139_reg;
441
442	sc = device_get_softc(dev);
443
444	if (sc->rl_type == RL_8139) {
445		switch (reg) {
446		case MII_BMCR:
447			rl8139_reg = RL_BMCR;
448			break;
449		case MII_BMSR:
450			rl8139_reg = RL_BMSR;
451			break;
452		case MII_ANAR:
453			rl8139_reg = RL_ANAR;
454			break;
455		case MII_ANER:
456			rl8139_reg = RL_ANER;
457			break;
458		case MII_ANLPAR:
459			rl8139_reg = RL_LPAR;
460			break;
461		case MII_PHYIDR1:
462		case MII_PHYIDR2:
463			return (0);
464			break;
465		default:
466			device_printf(sc->rl_dev, "bad phy register\n");
467			return (0);
468		}
469		CSR_WRITE_2(sc, rl8139_reg, data);
470		return (0);
471	}
472
473	mii_bitbang_writereg(dev, &rl_mii_bitbang_ops, phy, reg, data);
474
475	return (0);
476}
477
478static void
479rl_miibus_statchg(device_t dev)
480{
481	struct rl_softc		*sc;
482	struct ifnet		*ifp;
483	struct mii_data		*mii;
484
485	sc = device_get_softc(dev);
486	mii = device_get_softc(sc->rl_miibus);
487	ifp = sc->rl_ifp;
488	if (mii == NULL || ifp == NULL ||
489	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
490		return;
491
492	sc->rl_flags &= ~RL_FLAG_LINK;
493	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
494	    (IFM_ACTIVE | IFM_AVALID)) {
495		switch (IFM_SUBTYPE(mii->mii_media_active)) {
496		case IFM_10_T:
497		case IFM_100_TX:
498			sc->rl_flags |= RL_FLAG_LINK;
499			break;
500		default:
501			break;
502		}
503	}
504	/*
505	 * RealTek controllers do not provide any interface to
506	 * Tx/Rx MACs for resolved speed, duplex and flow-control
507	 * parameters.
508	 */
509}
510
511/*
512 * Program the 64-bit multicast hash filter.
513 */
514static void
515rl_rxfilter(struct rl_softc *sc)
516{
517	struct ifnet		*ifp = sc->rl_ifp;
518	int			h = 0;
519	uint32_t		hashes[2] = { 0, 0 };
520	struct ifmultiaddr	*ifma;
521	uint32_t		rxfilt;
522
523	RL_LOCK_ASSERT(sc);
524
525	rxfilt = CSR_READ_4(sc, RL_RXCFG);
526	rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD |
527	    RL_RXCFG_RX_MULTI);
528	/* Always accept frames destined for this host. */
529	rxfilt |= RL_RXCFG_RX_INDIV;
530	/* Set capture broadcast bit to capture broadcast frames. */
531	if (ifp->if_flags & IFF_BROADCAST)
532		rxfilt |= RL_RXCFG_RX_BROAD;
533	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
534		rxfilt |= RL_RXCFG_RX_MULTI;
535		if (ifp->if_flags & IFF_PROMISC)
536			rxfilt |= RL_RXCFG_RX_ALLPHYS;
537		hashes[0] = 0xFFFFFFFF;
538		hashes[1] = 0xFFFFFFFF;
539	} else {
540		/* Now program new ones. */
541		if_maddr_rlock(ifp);
542		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
543			if (ifma->ifma_addr->sa_family != AF_LINK)
544				continue;
545			h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
546			    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
547			if (h < 32)
548				hashes[0] |= (1 << h);
549			else
550				hashes[1] |= (1 << (h - 32));
551		}
552		if_maddr_runlock(ifp);
553		if (hashes[0] != 0 || hashes[1] != 0)
554			rxfilt |= RL_RXCFG_RX_MULTI;
555	}
556
557	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
558	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
559	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
560}
561
562static void
563rl_reset(struct rl_softc *sc)
564{
565	register int		i;
566
567	RL_LOCK_ASSERT(sc);
568
569	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
570
571	for (i = 0; i < RL_TIMEOUT; i++) {
572		DELAY(10);
573		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
574			break;
575	}
576	if (i == RL_TIMEOUT)
577		device_printf(sc->rl_dev, "reset never completed!\n");
578}
579
580/*
581 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device
582 * IDs against our list and return a device name if we find a match.
583 */
584static int
585rl_probe(device_t dev)
586{
587	const struct rl_type	*t;
588	uint16_t		devid, revid, vendor;
589	int			i;
590
591	vendor = pci_get_vendor(dev);
592	devid = pci_get_device(dev);
593	revid = pci_get_revid(dev);
594
595	if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
596		if (revid == 0x20) {
597			/* 8139C+, let re(4) take care of this device. */
598			return (ENXIO);
599		}
600	}
601	t = rl_devs;
602	for (i = 0; i < sizeof(rl_devs) / sizeof(rl_devs[0]); i++, t++) {
603		if (vendor == t->rl_vid && devid == t->rl_did) {
604			device_set_desc(dev, t->rl_name);
605			return (BUS_PROBE_DEFAULT);
606		}
607	}
608
609	return (ENXIO);
610}
611
612struct rl_dmamap_arg {
613	bus_addr_t	rl_busaddr;
614};
615
616static void
617rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
618{
619	struct rl_dmamap_arg	*ctx;
620
621	if (error != 0)
622		return;
623
624	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
625
626        ctx = (struct rl_dmamap_arg *)arg;
627        ctx->rl_busaddr = segs[0].ds_addr;
628}
629
630/*
631 * Attach the interface. Allocate softc structures, do ifmedia
632 * setup and ethernet/BPF attach.
633 */
634static int
635rl_attach(device_t dev)
636{
637	uint8_t			eaddr[ETHER_ADDR_LEN];
638	uint16_t		as[3];
639	struct ifnet		*ifp;
640	struct rl_softc		*sc;
641	const struct rl_type	*t;
642	struct sysctl_ctx_list	*ctx;
643	struct sysctl_oid_list	*children;
644	int			error = 0, hwrev, i, phy, pmc, rid;
645	int			prefer_iomap, unit;
646	uint16_t		rl_did = 0;
647	char			tn[32];
648
649	sc = device_get_softc(dev);
650	unit = device_get_unit(dev);
651	sc->rl_dev = dev;
652
653	sc->rl_twister_enable = 0;
654	snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit);
655	TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable);
656	ctx = device_get_sysctl_ctx(sc->rl_dev);
657	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
658	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD,
659	   &sc->rl_twister_enable, 0, "");
660
661	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
662	    MTX_DEF);
663	callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
664
665	pci_enable_busmaster(dev);
666
667
668	/*
669	 * Map control/status registers.
670	 * Default to using PIO access for this driver. On SMP systems,
671	 * there appear to be problems with memory mapped mode: it looks
672	 * like doing too many memory mapped access back to back in rapid
673	 * succession can hang the bus. I'm inclined to blame this on
674	 * crummy design/construction on the part of RealTek. Memory
675	 * mapped mode does appear to work on uniprocessor systems though.
676	 */
677	prefer_iomap = 1;
678	snprintf(tn, sizeof(tn), "dev.rl.%d.prefer_iomap", unit);
679	TUNABLE_INT_FETCH(tn, &prefer_iomap);
680	if (prefer_iomap) {
681		sc->rl_res_id = PCIR_BAR(0);
682		sc->rl_res_type = SYS_RES_IOPORT;
683		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
684		    &sc->rl_res_id, RF_ACTIVE);
685	}
686	if (prefer_iomap == 0 || sc->rl_res == NULL) {
687		sc->rl_res_id = PCIR_BAR(1);
688		sc->rl_res_type = SYS_RES_MEMORY;
689		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
690		    &sc->rl_res_id, RF_ACTIVE);
691	}
692	if (sc->rl_res == NULL) {
693		device_printf(dev, "couldn't map ports/memory\n");
694		error = ENXIO;
695		goto fail;
696	}
697
698#ifdef notdef
699	/*
700	 * Detect the Realtek 8139B. For some reason, this chip is very
701	 * unstable when left to autoselect the media
702	 * The best workaround is to set the device to the required
703	 * media type or to set it to the 10 Meg speed.
704	 */
705	if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF)
706		device_printf(dev,
707"Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n");
708#endif
709
710	sc->rl_btag = rman_get_bustag(sc->rl_res);
711	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
712
713	/* Allocate interrupt */
714	rid = 0;
715	sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
716	    RF_SHAREABLE | RF_ACTIVE);
717
718	if (sc->rl_irq[0] == NULL) {
719		device_printf(dev, "couldn't map interrupt\n");
720		error = ENXIO;
721		goto fail;
722	}
723
724	/*
725	 * Reset the adapter. Only take the lock here as it's needed in
726	 * order to call rl_reset().
727	 */
728	RL_LOCK(sc);
729	rl_reset(sc);
730	RL_UNLOCK(sc);
731
732	sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
733	rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0);
734	if (rl_did != 0x8129)
735		sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
736
737	/*
738	 * Get station address from the EEPROM.
739	 */
740	rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0);
741	for (i = 0; i < 3; i++) {
742		eaddr[(i * 2) + 0] = as[i] & 0xff;
743		eaddr[(i * 2) + 1] = as[i] >> 8;
744	}
745
746	/*
747	 * Now read the exact device type from the EEPROM to find
748	 * out if it's an 8129 or 8139.
749	 */
750	rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0);
751
752	t = rl_devs;
753	sc->rl_type = 0;
754	while(t->rl_name != NULL) {
755		if (rl_did == t->rl_did) {
756			sc->rl_type = t->rl_basetype;
757			break;
758		}
759		t++;
760	}
761
762	if (sc->rl_type == 0) {
763		device_printf(dev, "unknown device ID: %x assuming 8139\n",
764		    rl_did);
765		sc->rl_type = RL_8139;
766		/*
767		 * Read RL_IDR register to get ethernet address as accessing
768		 * EEPROM may not extract correct address.
769		 */
770		for (i = 0; i < ETHER_ADDR_LEN; i++)
771			eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
772	}
773
774	if ((error = rl_dma_alloc(sc)) != 0)
775		goto fail;
776
777	ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
778	if (ifp == NULL) {
779		device_printf(dev, "can not if_alloc()\n");
780		error = ENOSPC;
781		goto fail;
782	}
783
784#define	RL_PHYAD_INTERNAL	0
785
786	/* Do MII setup */
787	phy = MII_PHY_ANY;
788	if (sc->rl_type == RL_8139)
789		phy = RL_PHYAD_INTERNAL;
790	error = mii_attach(dev, &sc->rl_miibus, ifp, rl_ifmedia_upd,
791	    rl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
792	if (error != 0) {
793		device_printf(dev, "attaching PHYs failed\n");
794		goto fail;
795	}
796
797	ifp->if_softc = sc;
798	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
799	ifp->if_mtu = ETHERMTU;
800	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
801	ifp->if_ioctl = rl_ioctl;
802	ifp->if_start = rl_start;
803	ifp->if_init = rl_init;
804	ifp->if_capabilities = IFCAP_VLAN_MTU;
805	/* Check WOL for RTL8139B or newer controllers. */
806	if (sc->rl_type == RL_8139 &&
807	    pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
808		hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
809		switch (hwrev) {
810		case RL_HWREV_8139B:
811		case RL_HWREV_8130:
812		case RL_HWREV_8139C:
813		case RL_HWREV_8139D:
814		case RL_HWREV_8101:
815		case RL_HWREV_8100:
816			ifp->if_capabilities |= IFCAP_WOL;
817			/* Disable WOL. */
818			rl_clrwol(sc);
819			break;
820		default:
821			break;
822		}
823	}
824	ifp->if_capenable = ifp->if_capabilities;
825#ifdef DEVICE_POLLING
826	ifp->if_capabilities |= IFCAP_POLLING;
827#endif
828	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
829	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
830	IFQ_SET_READY(&ifp->if_snd);
831
832	/*
833	 * Call MI attach routine.
834	 */
835	ether_ifattach(ifp, eaddr);
836
837	/* Hook interrupt last to avoid having to lock softc */
838	error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE,
839	    NULL, rl_intr, sc, &sc->rl_intrhand[0]);
840	if (error) {
841		device_printf(sc->rl_dev, "couldn't set up irq\n");
842		ether_ifdetach(ifp);
843	}
844
845fail:
846	if (error)
847		rl_detach(dev);
848
849	return (error);
850}
851
852/*
853 * Shutdown hardware and free up resources. This can be called any
854 * time after the mutex has been initialized. It is called in both
855 * the error case in attach and the normal detach case so it needs
856 * to be careful about only freeing resources that have actually been
857 * allocated.
858 */
859static int
860rl_detach(device_t dev)
861{
862	struct rl_softc		*sc;
863	struct ifnet		*ifp;
864
865	sc = device_get_softc(dev);
866	ifp = sc->rl_ifp;
867
868	KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
869
870#ifdef DEVICE_POLLING
871	if (ifp->if_capenable & IFCAP_POLLING)
872		ether_poll_deregister(ifp);
873#endif
874	/* These should only be active if attach succeeded */
875	if (device_is_attached(dev)) {
876		RL_LOCK(sc);
877		rl_stop(sc);
878		RL_UNLOCK(sc);
879		callout_drain(&sc->rl_stat_callout);
880		ether_ifdetach(ifp);
881	}
882#if 0
883	sc->suspended = 1;
884#endif
885	if (sc->rl_miibus)
886		device_delete_child(dev, sc->rl_miibus);
887	bus_generic_detach(dev);
888
889	if (sc->rl_intrhand[0])
890		bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
891	if (sc->rl_irq[0])
892		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]);
893	if (sc->rl_res)
894		bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
895		    sc->rl_res);
896
897	if (ifp)
898		if_free(ifp);
899
900	rl_dma_free(sc);
901
902	mtx_destroy(&sc->rl_mtx);
903
904	return (0);
905}
906
907static int
908rl_dma_alloc(struct rl_softc *sc)
909{
910	struct rl_dmamap_arg	ctx;
911	int			error, i;
912
913	/*
914	 * Allocate the parent bus DMA tag appropriate for PCI.
915	 */
916	error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev),	/* parent */
917	    1, 0,			/* alignment, boundary */
918	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
919	    BUS_SPACE_MAXADDR,		/* highaddr */
920	    NULL, NULL,			/* filter, filterarg */
921	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsize, nsegments */
922	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
923	    0,				/* flags */
924	    NULL, NULL,			/* lockfunc, lockarg */
925	    &sc->rl_parent_tag);
926	if (error) {
927                device_printf(sc->rl_dev,
928		    "failed to create parent DMA tag.\n");
929		goto fail;
930	}
931	/* Create DMA tag for Rx memory block. */
932	error = bus_dma_tag_create(sc->rl_parent_tag,	/* parent */
933	    RL_RX_8139_BUF_ALIGN, 0,	/* alignment, boundary */
934	    BUS_SPACE_MAXADDR,		/* lowaddr */
935	    BUS_SPACE_MAXADDR,		/* highaddr */
936	    NULL, NULL,			/* filter, filterarg */
937	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1,	/* maxsize,nsegments */
938	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ,	/* maxsegsize */
939	    0,				/* flags */
940	    NULL, NULL,			/* lockfunc, lockarg */
941	    &sc->rl_cdata.rl_rx_tag);
942	if (error) {
943                device_printf(sc->rl_dev,
944		    "failed to create Rx memory block DMA tag.\n");
945		goto fail;
946	}
947	/* Create DMA tag for Tx buffer. */
948	error = bus_dma_tag_create(sc->rl_parent_tag,	/* parent */
949	    RL_TX_8139_BUF_ALIGN, 0,	/* alignment, boundary */
950	    BUS_SPACE_MAXADDR,		/* lowaddr */
951	    BUS_SPACE_MAXADDR,		/* highaddr */
952	    NULL, NULL,			/* filter, filterarg */
953	    MCLBYTES, 1,		/* maxsize, nsegments */
954	    MCLBYTES,			/* maxsegsize */
955	    0,				/* flags */
956	    NULL, NULL,			/* lockfunc, lockarg */
957	    &sc->rl_cdata.rl_tx_tag);
958	if (error) {
959                device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n");
960		goto fail;
961	}
962
963	/*
964	 * Allocate DMA'able memory and load DMA map for Rx memory block.
965	 */
966	error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag,
967	    (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK |
968	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap);
969	if (error != 0) {
970		device_printf(sc->rl_dev,
971		    "failed to allocate Rx DMA memory block.\n");
972		goto fail;
973	}
974	ctx.rl_busaddr = 0;
975	error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag,
976	    sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf,
977	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx,
978	    BUS_DMA_NOWAIT);
979	if (error != 0 || ctx.rl_busaddr == 0) {
980		device_printf(sc->rl_dev,
981		    "could not load Rx DMA memory block.\n");
982		goto fail;
983	}
984	sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr;
985
986	/* Create DMA maps for Tx buffers. */
987	for (i = 0; i < RL_TX_LIST_CNT; i++) {
988		sc->rl_cdata.rl_tx_chain[i] = NULL;
989		sc->rl_cdata.rl_tx_dmamap[i] = NULL;
990		error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0,
991		    &sc->rl_cdata.rl_tx_dmamap[i]);
992		if (error != 0) {
993			device_printf(sc->rl_dev,
994			    "could not create Tx dmamap.\n");
995			goto fail;
996		}
997	}
998
999	/* Leave a few bytes before the start of the RX ring buffer. */
1000	sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf;
1001	sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE;
1002
1003fail:
1004	return (error);
1005}
1006
1007static void
1008rl_dma_free(struct rl_softc *sc)
1009{
1010	int			i;
1011
1012	/* Rx memory block. */
1013	if (sc->rl_cdata.rl_rx_tag != NULL) {
1014		if (sc->rl_cdata.rl_rx_dmamap != NULL)
1015			bus_dmamap_unload(sc->rl_cdata.rl_rx_tag,
1016			    sc->rl_cdata.rl_rx_dmamap);
1017		if (sc->rl_cdata.rl_rx_dmamap != NULL &&
1018		    sc->rl_cdata.rl_rx_buf_ptr != NULL)
1019			bus_dmamem_free(sc->rl_cdata.rl_rx_tag,
1020			    sc->rl_cdata.rl_rx_buf_ptr,
1021			    sc->rl_cdata.rl_rx_dmamap);
1022		sc->rl_cdata.rl_rx_buf_ptr = NULL;
1023		sc->rl_cdata.rl_rx_buf = NULL;
1024		sc->rl_cdata.rl_rx_dmamap = NULL;
1025		bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag);
1026		sc->rl_cdata.rl_tx_tag = NULL;
1027	}
1028
1029	/* Tx buffers. */
1030	if (sc->rl_cdata.rl_tx_tag != NULL) {
1031		for (i = 0; i < RL_TX_LIST_CNT; i++) {
1032			if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) {
1033				bus_dmamap_destroy(
1034				    sc->rl_cdata.rl_tx_tag,
1035				    sc->rl_cdata.rl_tx_dmamap[i]);
1036				sc->rl_cdata.rl_tx_dmamap[i] = NULL;
1037			}
1038		}
1039		bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag);
1040		sc->rl_cdata.rl_tx_tag = NULL;
1041	}
1042
1043	if (sc->rl_parent_tag != NULL) {
1044		bus_dma_tag_destroy(sc->rl_parent_tag);
1045		sc->rl_parent_tag = NULL;
1046	}
1047}
1048
1049/*
1050 * Initialize the transmit descriptors.
1051 */
1052static int
1053rl_list_tx_init(struct rl_softc *sc)
1054{
1055	struct rl_chain_data	*cd;
1056	int			i;
1057
1058	RL_LOCK_ASSERT(sc);
1059
1060	cd = &sc->rl_cdata;
1061	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1062		cd->rl_tx_chain[i] = NULL;
1063		CSR_WRITE_4(sc,
1064		    RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000);
1065	}
1066
1067	sc->rl_cdata.cur_tx = 0;
1068	sc->rl_cdata.last_tx = 0;
1069
1070	return (0);
1071}
1072
1073static int
1074rl_list_rx_init(struct rl_softc *sc)
1075{
1076
1077	RL_LOCK_ASSERT(sc);
1078
1079	bzero(sc->rl_cdata.rl_rx_buf_ptr,
1080	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ);
1081	bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap,
1082	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1083
1084	return (0);
1085}
1086
1087/*
1088 * A frame has been uploaded: pass the resulting mbuf chain up to
1089 * the higher level protocols.
1090 *
1091 * You know there's something wrong with a PCI bus-master chip design
1092 * when you have to use m_devget().
1093 *
1094 * The receive operation is badly documented in the datasheet, so I'll
1095 * attempt to document it here. The driver provides a buffer area and
1096 * places its base address in the RX buffer start address register.
1097 * The chip then begins copying frames into the RX buffer. Each frame
1098 * is preceded by a 32-bit RX status word which specifies the length
1099 * of the frame and certain other status bits. Each frame (starting with
1100 * the status word) is also 32-bit aligned. The frame length is in the
1101 * first 16 bits of the status word; the lower 15 bits correspond with
1102 * the 'rx status register' mentioned in the datasheet.
1103 *
1104 * Note: to make the Alpha happy, the frame payload needs to be aligned
1105 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes)
1106 * as the offset argument to m_devget().
1107 */
1108static int
1109rl_rxeof(struct rl_softc *sc)
1110{
1111	struct mbuf		*m;
1112	struct ifnet		*ifp = sc->rl_ifp;
1113	uint8_t			*rxbufpos;
1114	int			total_len = 0;
1115	int			wrap = 0;
1116	int			rx_npkts = 0;
1117	uint32_t		rxstat;
1118	uint16_t		cur_rx;
1119	uint16_t		limit;
1120	uint16_t		max_bytes, rx_bytes = 0;
1121
1122	RL_LOCK_ASSERT(sc);
1123
1124	bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap,
1125	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1126
1127	cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN;
1128
1129	/* Do not try to read past this point. */
1130	limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN;
1131
1132	if (limit < cur_rx)
1133		max_bytes = (RL_RXBUFLEN - cur_rx) + limit;
1134	else
1135		max_bytes = limit - cur_rx;
1136
1137	while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
1138#ifdef DEVICE_POLLING
1139		if (ifp->if_capenable & IFCAP_POLLING) {
1140			if (sc->rxcycles <= 0)
1141				break;
1142			sc->rxcycles--;
1143		}
1144#endif
1145		rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
1146		rxstat = le32toh(*(uint32_t *)rxbufpos);
1147
1148		/*
1149		 * Here's a totally undocumented fact for you. When the
1150		 * RealTek chip is in the process of copying a packet into
1151		 * RAM for you, the length will be 0xfff0. If you spot a
1152		 * packet header with this value, you need to stop. The
1153		 * datasheet makes absolutely no mention of this and
1154		 * RealTek should be shot for this.
1155		 */
1156		total_len = rxstat >> 16;
1157		if (total_len == RL_RXSTAT_UNFINISHED)
1158			break;
1159
1160		if (!(rxstat & RL_RXSTAT_RXOK) ||
1161		    total_len < ETHER_MIN_LEN ||
1162		    total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
1163			ifp->if_ierrors++;
1164			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1165			rl_init_locked(sc);
1166			return (rx_npkts);
1167		}
1168
1169		/* No errors; receive the packet. */
1170		rx_bytes += total_len + 4;
1171
1172		/*
1173		 * XXX The RealTek chip includes the CRC with every
1174		 * received frame, and there's no way to turn this
1175		 * behavior off (at least, I can't find anything in
1176		 * the manual that explains how to do it) so we have
1177		 * to trim off the CRC manually.
1178		 */
1179		total_len -= ETHER_CRC_LEN;
1180
1181		/*
1182		 * Avoid trying to read more bytes than we know
1183		 * the chip has prepared for us.
1184		 */
1185		if (rx_bytes > max_bytes)
1186			break;
1187
1188		rxbufpos = sc->rl_cdata.rl_rx_buf +
1189			((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN);
1190		if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN))
1191			rxbufpos = sc->rl_cdata.rl_rx_buf;
1192
1193		wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos;
1194		if (total_len > wrap) {
1195			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1196			    NULL);
1197			if (m != NULL)
1198				m_copyback(m, wrap, total_len - wrap,
1199					sc->rl_cdata.rl_rx_buf);
1200			cur_rx = (total_len - wrap + ETHER_CRC_LEN);
1201		} else {
1202			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1203			    NULL);
1204			cur_rx += total_len + 4 + ETHER_CRC_LEN;
1205		}
1206
1207		/* Round up to 32-bit boundary. */
1208		cur_rx = (cur_rx + 3) & ~3;
1209		CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16);
1210
1211		if (m == NULL) {
1212			ifp->if_iqdrops++;
1213			continue;
1214		}
1215
1216		ifp->if_ipackets++;
1217		RL_UNLOCK(sc);
1218		(*ifp->if_input)(ifp, m);
1219		RL_LOCK(sc);
1220		rx_npkts++;
1221	}
1222
1223	/* No need to sync Rx memory block as we didn't modify it. */
1224	return (rx_npkts);
1225}
1226
1227/*
1228 * A frame was downloaded to the chip. It's safe for us to clean up
1229 * the list buffers.
1230 */
1231static void
1232rl_txeof(struct rl_softc *sc)
1233{
1234	struct ifnet		*ifp = sc->rl_ifp;
1235	uint32_t		txstat;
1236
1237	RL_LOCK_ASSERT(sc);
1238
1239	/*
1240	 * Go through our tx list and free mbufs for those
1241	 * frames that have been uploaded.
1242	 */
1243	do {
1244		if (RL_LAST_TXMBUF(sc) == NULL)
1245			break;
1246		txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc));
1247		if (!(txstat & (RL_TXSTAT_TX_OK|
1248		    RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT)))
1249			break;
1250
1251		ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24;
1252
1253		bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc),
1254		    BUS_DMASYNC_POSTWRITE);
1255		bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc));
1256		m_freem(RL_LAST_TXMBUF(sc));
1257		RL_LAST_TXMBUF(sc) = NULL;
1258		/*
1259		 * If there was a transmit underrun, bump the TX threshold.
1260		 * Make sure not to overflow the 63 * 32byte we can address
1261		 * with the 6 available bit.
1262		 */
1263		if ((txstat & RL_TXSTAT_TX_UNDERRUN) &&
1264		    (sc->rl_txthresh < 2016))
1265			sc->rl_txthresh += 32;
1266		if (txstat & RL_TXSTAT_TX_OK)
1267			ifp->if_opackets++;
1268		else {
1269			int			oldthresh;
1270			ifp->if_oerrors++;
1271			if ((txstat & RL_TXSTAT_TXABRT) ||
1272			    (txstat & RL_TXSTAT_OUTOFWIN))
1273				CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1274			oldthresh = sc->rl_txthresh;
1275			/* error recovery */
1276			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1277			rl_init_locked(sc);
1278			/* restore original threshold */
1279			sc->rl_txthresh = oldthresh;
1280			return;
1281		}
1282		RL_INC(sc->rl_cdata.last_tx);
1283		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1284	} while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx);
1285
1286	if (RL_LAST_TXMBUF(sc) == NULL)
1287		sc->rl_watchdog_timer = 0;
1288}
1289
1290static void
1291rl_twister_update(struct rl_softc *sc)
1292{
1293	uint16_t linktest;
1294	/*
1295	 * Table provided by RealTek (Kinston <shangh@realtek.com.tw>) for
1296	 * Linux driver.  Values undocumented otherwise.
1297	 */
1298	static const uint32_t param[4][4] = {
1299		{0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
1300		{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1301		{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1302		{0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
1303	};
1304
1305	/*
1306	 * Tune the so-called twister registers of the RTL8139.  These
1307	 * are used to compensate for impedance mismatches.  The
1308	 * method for tuning these registers is undocumented and the
1309	 * following procedure is collected from public sources.
1310	 */
1311	switch (sc->rl_twister)
1312	{
1313	case CHK_LINK:
1314		/*
1315		 * If we have a sufficient link, then we can proceed in
1316		 * the state machine to the next stage.  If not, then
1317		 * disable further tuning after writing sane defaults.
1318		 */
1319		if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) {
1320			CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD);
1321			sc->rl_twister = FIND_ROW;
1322		} else {
1323			CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD);
1324			CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1325			CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1326			CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1327			sc->rl_twister = DONE;
1328		}
1329		break;
1330	case FIND_ROW:
1331		/*
1332		 * Read how long it took to see the echo to find the tuning
1333		 * row to use.
1334		 */
1335		linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1336		if (linktest == RL_CSCFG_ROW3)
1337			sc->rl_twist_row = 3;
1338		else if (linktest == RL_CSCFG_ROW2)
1339			sc->rl_twist_row = 2;
1340		else if (linktest == RL_CSCFG_ROW1)
1341			sc->rl_twist_row = 1;
1342		else
1343			sc->rl_twist_row = 0;
1344		sc->rl_twist_col = 0;
1345		sc->rl_twister = SET_PARAM;
1346		break;
1347	case SET_PARAM:
1348		if (sc->rl_twist_col == 0)
1349			CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1350		CSR_WRITE_4(sc, RL_PARA7C,
1351		    param[sc->rl_twist_row][sc->rl_twist_col]);
1352		if (++sc->rl_twist_col == 4) {
1353			if (sc->rl_twist_row == 3)
1354				sc->rl_twister = RECHK_LONG;
1355			else
1356				sc->rl_twister = DONE;
1357		}
1358		break;
1359	case RECHK_LONG:
1360		/*
1361		 * For long cables, we have to double check to make sure we
1362		 * don't mistune.
1363		 */
1364		linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1365		if (linktest == RL_CSCFG_ROW3)
1366			sc->rl_twister = DONE;
1367		else {
1368			CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE);
1369			sc->rl_twister = RETUNE;
1370		}
1371		break;
1372	case RETUNE:
1373		/* Retune for a shorter cable (try column 2) */
1374		CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1375		CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1376		CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1377		CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1378		sc->rl_twist_row--;
1379		sc->rl_twist_col = 0;
1380		sc->rl_twister = SET_PARAM;
1381		break;
1382
1383	case DONE:
1384		break;
1385	}
1386
1387}
1388
1389static void
1390rl_tick(void *xsc)
1391{
1392	struct rl_softc		*sc = xsc;
1393	struct mii_data		*mii;
1394	int ticks;
1395
1396	RL_LOCK_ASSERT(sc);
1397	/*
1398	 * If we're doing the twister cable calibration, then we need to defer
1399	 * watchdog timeouts.  This is a no-op in normal operations, but
1400	 * can falsely trigger when the cable calibration takes a while and
1401	 * there was traffic ready to go when rl was started.
1402	 *
1403	 * We don't defer mii_tick since that updates the mii status, which
1404	 * helps the twister process, at least according to similar patches
1405	 * for the Linux driver I found online while doing the fixes.  Worst
1406	 * case is a few extra mii reads during calibration.
1407	 */
1408	mii = device_get_softc(sc->rl_miibus);
1409	mii_tick(mii);
1410	if ((sc->rl_flags & RL_FLAG_LINK) == 0)
1411		rl_miibus_statchg(sc->rl_dev);
1412	if (sc->rl_twister_enable) {
1413		if (sc->rl_twister == DONE)
1414			rl_watchdog(sc);
1415		else
1416			rl_twister_update(sc);
1417		if (sc->rl_twister == DONE)
1418			ticks = hz;
1419		else
1420			ticks = hz / 10;
1421	} else {
1422		rl_watchdog(sc);
1423		ticks = hz;
1424	}
1425
1426	callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc);
1427}
1428
1429#ifdef DEVICE_POLLING
1430static int
1431rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1432{
1433	struct rl_softc *sc = ifp->if_softc;
1434	int rx_npkts = 0;
1435
1436	RL_LOCK(sc);
1437	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1438		rx_npkts = rl_poll_locked(ifp, cmd, count);
1439	RL_UNLOCK(sc);
1440	return (rx_npkts);
1441}
1442
1443static int
1444rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1445{
1446	struct rl_softc *sc = ifp->if_softc;
1447	int rx_npkts;
1448
1449	RL_LOCK_ASSERT(sc);
1450
1451	sc->rxcycles = count;
1452	rx_npkts = rl_rxeof(sc);
1453	rl_txeof(sc);
1454
1455	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1456		rl_start_locked(ifp);
1457
1458	if (cmd == POLL_AND_CHECK_STATUS) {
1459		uint16_t	status;
1460
1461		/* We should also check the status register. */
1462		status = CSR_READ_2(sc, RL_ISR);
1463		if (status == 0xffff)
1464			return (rx_npkts);
1465		if (status != 0)
1466			CSR_WRITE_2(sc, RL_ISR, status);
1467
1468		/* XXX We should check behaviour on receiver stalls. */
1469
1470		if (status & RL_ISR_SYSTEM_ERR) {
1471			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1472			rl_init_locked(sc);
1473		}
1474	}
1475	return (rx_npkts);
1476}
1477#endif /* DEVICE_POLLING */
1478
1479static void
1480rl_intr(void *arg)
1481{
1482	struct rl_softc		*sc = arg;
1483	struct ifnet		*ifp = sc->rl_ifp;
1484	uint16_t		status;
1485	int			count;
1486
1487	RL_LOCK(sc);
1488
1489	if (sc->suspended)
1490		goto done_locked;
1491
1492#ifdef DEVICE_POLLING
1493	if  (ifp->if_capenable & IFCAP_POLLING)
1494		goto done_locked;
1495#endif
1496
1497	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1498		goto done_locked2;
1499	status = CSR_READ_2(sc, RL_ISR);
1500	if (status == 0xffff || (status & RL_INTRS) == 0)
1501		goto done_locked;
1502	/*
1503	 * Ours, disable further interrupts.
1504	 */
1505	CSR_WRITE_2(sc, RL_IMR, 0);
1506	for (count = 16; count > 0; count--) {
1507		CSR_WRITE_2(sc, RL_ISR, status);
1508		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1509			if (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR))
1510				rl_rxeof(sc);
1511			if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR))
1512				rl_txeof(sc);
1513			if (status & RL_ISR_SYSTEM_ERR) {
1514				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1515				rl_init_locked(sc);
1516				RL_UNLOCK(sc);
1517				return;
1518			}
1519		}
1520		status = CSR_READ_2(sc, RL_ISR);
1521		/* If the card has gone away, the read returns 0xffff. */
1522		if (status == 0xffff || (status & RL_INTRS) == 0)
1523			break;
1524	}
1525
1526	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1527		rl_start_locked(ifp);
1528
1529done_locked2:
1530	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1531		CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1532done_locked:
1533	RL_UNLOCK(sc);
1534}
1535
1536/*
1537 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1538 * pointers to the fragment pointers.
1539 */
1540static int
1541rl_encap(struct rl_softc *sc, struct mbuf **m_head)
1542{
1543	struct mbuf		*m;
1544	bus_dma_segment_t	txsegs[1];
1545	int			error, nsegs, padlen;
1546
1547	RL_LOCK_ASSERT(sc);
1548
1549	m = *m_head;
1550	padlen = 0;
1551	/*
1552	 * Hardware doesn't auto-pad, so we have to make sure
1553	 * pad short frames out to the minimum frame length.
1554	 */
1555	if (m->m_pkthdr.len < RL_MIN_FRAMELEN)
1556		padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len;
1557	/*
1558	 * The RealTek is brain damaged and wants longword-aligned
1559	 * TX buffers, plus we can only have one fragment buffer
1560	 * per packet. We have to copy pretty much all the time.
1561	 */
1562	if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 ||
1563	    (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) {
1564		m = m_defrag(*m_head, M_DONTWAIT);
1565		if (m == NULL) {
1566			m_freem(*m_head);
1567			*m_head = NULL;
1568			return (ENOMEM);
1569		}
1570	}
1571	*m_head = m;
1572
1573	if (padlen > 0) {
1574		/*
1575		 * Make security-conscious people happy: zero out the
1576		 * bytes in the pad area, since we don't know what
1577		 * this mbuf cluster buffer's previous user might
1578		 * have left in it.
1579		 */
1580		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1581		m->m_pkthdr.len += padlen;
1582		m->m_len = m->m_pkthdr.len;
1583	}
1584
1585	error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag,
1586	    RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0);
1587	if (error != 0)
1588		return (error);
1589	if (nsegs == 0) {
1590		m_freem(*m_head);
1591		*m_head = NULL;
1592		return (EIO);
1593	}
1594
1595	RL_CUR_TXMBUF(sc) = m;
1596	bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc),
1597	    BUS_DMASYNC_PREWRITE);
1598	CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr));
1599
1600	return (0);
1601}
1602
1603/*
1604 * Main transmit routine.
1605 */
1606static void
1607rl_start(struct ifnet *ifp)
1608{
1609	struct rl_softc		*sc = ifp->if_softc;
1610
1611	RL_LOCK(sc);
1612	rl_start_locked(ifp);
1613	RL_UNLOCK(sc);
1614}
1615
1616static void
1617rl_start_locked(struct ifnet *ifp)
1618{
1619	struct rl_softc		*sc = ifp->if_softc;
1620	struct mbuf		*m_head = NULL;
1621
1622	RL_LOCK_ASSERT(sc);
1623
1624	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1625	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
1626		return;
1627
1628	while (RL_CUR_TXMBUF(sc) == NULL) {
1629
1630		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1631
1632		if (m_head == NULL)
1633			break;
1634
1635		if (rl_encap(sc, &m_head)) {
1636			if (m_head == NULL)
1637				break;
1638			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1639			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1640			break;
1641		}
1642
1643		/* Pass a copy of this mbuf chain to the bpf subsystem. */
1644		BPF_MTAP(ifp, RL_CUR_TXMBUF(sc));
1645
1646		/* Transmit the frame. */
1647		CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc),
1648		    RL_TXTHRESH(sc->rl_txthresh) |
1649		    RL_CUR_TXMBUF(sc)->m_pkthdr.len);
1650
1651		RL_INC(sc->rl_cdata.cur_tx);
1652
1653		/* Set a timeout in case the chip goes out to lunch. */
1654		sc->rl_watchdog_timer = 5;
1655	}
1656
1657	/*
1658	 * We broke out of the loop because all our TX slots are
1659	 * full. Mark the NIC as busy until it drains some of the
1660	 * packets from the queue.
1661	 */
1662	if (RL_CUR_TXMBUF(sc) != NULL)
1663		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1664}
1665
1666static void
1667rl_init(void *xsc)
1668{
1669	struct rl_softc		*sc = xsc;
1670
1671	RL_LOCK(sc);
1672	rl_init_locked(sc);
1673	RL_UNLOCK(sc);
1674}
1675
1676static void
1677rl_init_locked(struct rl_softc *sc)
1678{
1679	struct ifnet		*ifp = sc->rl_ifp;
1680	struct mii_data		*mii;
1681	uint32_t		eaddr[2];
1682
1683	RL_LOCK_ASSERT(sc);
1684
1685	mii = device_get_softc(sc->rl_miibus);
1686
1687	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1688		return;
1689
1690	/*
1691	 * Cancel pending I/O and free all RX/TX buffers.
1692	 */
1693	rl_stop(sc);
1694
1695	rl_reset(sc);
1696	if (sc->rl_twister_enable) {
1697		/*
1698		 * Reset twister register tuning state.  The twister
1699		 * registers and their tuning are undocumented, but
1700		 * are necessary to cope with bad links.  rl_twister =
1701		 * DONE here will disable this entirely.
1702		 */
1703		sc->rl_twister = CHK_LINK;
1704	}
1705
1706	/*
1707	 * Init our MAC address.  Even though the chipset
1708	 * documentation doesn't mention it, we need to enter "Config
1709	 * register write enable" mode to modify the ID registers.
1710	 */
1711	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
1712	bzero(eaddr, sizeof(eaddr));
1713	bcopy(IF_LLADDR(sc->rl_ifp), eaddr, ETHER_ADDR_LEN);
1714	CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]);
1715	CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]);
1716	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1717
1718	/* Init the RX memory block pointer register. */
1719	CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr +
1720	    RL_RX_8139_BUF_RESERVE);
1721	/* Init TX descriptors. */
1722	rl_list_tx_init(sc);
1723	/* Init Rx memory block. */
1724	rl_list_rx_init(sc);
1725
1726	/*
1727	 * Enable transmit and receive.
1728	 */
1729	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1730
1731	/*
1732	 * Set the initial TX and RX configuration.
1733	 */
1734	CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1735	CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
1736
1737	/* Set RX filter. */
1738	rl_rxfilter(sc);
1739
1740#ifdef DEVICE_POLLING
1741	/* Disable interrupts if we are polling. */
1742	if (ifp->if_capenable & IFCAP_POLLING)
1743		CSR_WRITE_2(sc, RL_IMR, 0);
1744	else
1745#endif
1746	/* Enable interrupts. */
1747	CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1748
1749	/* Set initial TX threshold */
1750	sc->rl_txthresh = RL_TX_THRESH_INIT;
1751
1752	/* Start RX/TX process. */
1753	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
1754
1755	/* Enable receiver and transmitter. */
1756	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1757
1758	sc->rl_flags &= ~RL_FLAG_LINK;
1759	mii_mediachg(mii);
1760
1761	CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
1762
1763	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1764	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1765
1766	callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc);
1767}
1768
1769/*
1770 * Set media options.
1771 */
1772static int
1773rl_ifmedia_upd(struct ifnet *ifp)
1774{
1775	struct rl_softc		*sc = ifp->if_softc;
1776	struct mii_data		*mii;
1777
1778	mii = device_get_softc(sc->rl_miibus);
1779
1780	RL_LOCK(sc);
1781	mii_mediachg(mii);
1782	RL_UNLOCK(sc);
1783
1784	return (0);
1785}
1786
1787/*
1788 * Report current media status.
1789 */
1790static void
1791rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1792{
1793	struct rl_softc		*sc = ifp->if_softc;
1794	struct mii_data		*mii;
1795
1796	mii = device_get_softc(sc->rl_miibus);
1797
1798	RL_LOCK(sc);
1799	mii_pollstat(mii);
1800	ifmr->ifm_active = mii->mii_media_active;
1801	ifmr->ifm_status = mii->mii_media_status;
1802	RL_UNLOCK(sc);
1803}
1804
1805static int
1806rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1807{
1808	struct ifreq		*ifr = (struct ifreq *)data;
1809	struct mii_data		*mii;
1810	struct rl_softc		*sc = ifp->if_softc;
1811	int			error = 0, mask;
1812
1813	switch (command) {
1814	case SIOCSIFFLAGS:
1815		RL_LOCK(sc);
1816		if (ifp->if_flags & IFF_UP) {
1817			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1818			    ((ifp->if_flags ^ sc->rl_if_flags) &
1819                            (IFF_PROMISC | IFF_ALLMULTI)))
1820				rl_rxfilter(sc);
1821                        else
1822				rl_init_locked(sc);
1823                } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1824			rl_stop(sc);
1825		sc->rl_if_flags = ifp->if_flags;
1826		RL_UNLOCK(sc);
1827		break;
1828	case SIOCADDMULTI:
1829	case SIOCDELMULTI:
1830		RL_LOCK(sc);
1831		rl_rxfilter(sc);
1832		RL_UNLOCK(sc);
1833		break;
1834	case SIOCGIFMEDIA:
1835	case SIOCSIFMEDIA:
1836		mii = device_get_softc(sc->rl_miibus);
1837		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1838		break;
1839	case SIOCSIFCAP:
1840		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1841#ifdef DEVICE_POLLING
1842		if (ifr->ifr_reqcap & IFCAP_POLLING &&
1843		    !(ifp->if_capenable & IFCAP_POLLING)) {
1844			error = ether_poll_register(rl_poll, ifp);
1845			if (error)
1846				return(error);
1847			RL_LOCK(sc);
1848			/* Disable interrupts */
1849			CSR_WRITE_2(sc, RL_IMR, 0x0000);
1850			ifp->if_capenable |= IFCAP_POLLING;
1851			RL_UNLOCK(sc);
1852			return (error);
1853
1854		}
1855		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1856		    ifp->if_capenable & IFCAP_POLLING) {
1857			error = ether_poll_deregister(ifp);
1858			/* Enable interrupts. */
1859			RL_LOCK(sc);
1860			CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1861			ifp->if_capenable &= ~IFCAP_POLLING;
1862			RL_UNLOCK(sc);
1863			return (error);
1864		}
1865#endif /* DEVICE_POLLING */
1866		if ((mask & IFCAP_WOL) != 0 &&
1867		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
1868			if ((mask & IFCAP_WOL_UCAST) != 0)
1869				ifp->if_capenable ^= IFCAP_WOL_UCAST;
1870			if ((mask & IFCAP_WOL_MCAST) != 0)
1871				ifp->if_capenable ^= IFCAP_WOL_MCAST;
1872			if ((mask & IFCAP_WOL_MAGIC) != 0)
1873				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1874		}
1875		break;
1876	default:
1877		error = ether_ioctl(ifp, command, data);
1878		break;
1879	}
1880
1881	return (error);
1882}
1883
1884static void
1885rl_watchdog(struct rl_softc *sc)
1886{
1887
1888	RL_LOCK_ASSERT(sc);
1889
1890	if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0)
1891		return;
1892
1893	device_printf(sc->rl_dev, "watchdog timeout\n");
1894	sc->rl_ifp->if_oerrors++;
1895
1896	rl_txeof(sc);
1897	rl_rxeof(sc);
1898	sc->rl_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1899	rl_init_locked(sc);
1900}
1901
1902/*
1903 * Stop the adapter and free any mbufs allocated to the
1904 * RX and TX lists.
1905 */
1906static void
1907rl_stop(struct rl_softc *sc)
1908{
1909	register int		i;
1910	struct ifnet		*ifp = sc->rl_ifp;
1911
1912	RL_LOCK_ASSERT(sc);
1913
1914	sc->rl_watchdog_timer = 0;
1915	callout_stop(&sc->rl_stat_callout);
1916	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1917	sc->rl_flags &= ~RL_FLAG_LINK;
1918
1919	CSR_WRITE_1(sc, RL_COMMAND, 0x00);
1920	CSR_WRITE_2(sc, RL_IMR, 0x0000);
1921	for (i = 0; i < RL_TIMEOUT; i++) {
1922		DELAY(10);
1923		if ((CSR_READ_1(sc, RL_COMMAND) &
1924		    (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0)
1925			break;
1926	}
1927	if (i == RL_TIMEOUT)
1928		device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n");
1929
1930	/*
1931	 * Free the TX list buffers.
1932	 */
1933	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1934		if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
1935			if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
1936				bus_dmamap_sync(sc->rl_cdata.rl_tx_tag,
1937				    sc->rl_cdata.rl_tx_dmamap[i],
1938				    BUS_DMASYNC_POSTWRITE);
1939				bus_dmamap_unload(sc->rl_cdata.rl_tx_tag,
1940				    sc->rl_cdata.rl_tx_dmamap[i]);
1941				m_freem(sc->rl_cdata.rl_tx_chain[i]);
1942				sc->rl_cdata.rl_tx_chain[i] = NULL;
1943			}
1944			CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)),
1945			    0x0000000);
1946		}
1947	}
1948}
1949
1950/*
1951 * Device suspend routine.  Stop the interface and save some PCI
1952 * settings in case the BIOS doesn't restore them properly on
1953 * resume.
1954 */
1955static int
1956rl_suspend(device_t dev)
1957{
1958	struct rl_softc		*sc;
1959
1960	sc = device_get_softc(dev);
1961
1962	RL_LOCK(sc);
1963	rl_stop(sc);
1964	rl_setwol(sc);
1965	sc->suspended = 1;
1966	RL_UNLOCK(sc);
1967
1968	return (0);
1969}
1970
1971/*
1972 * Device resume routine.  Restore some PCI settings in case the BIOS
1973 * doesn't, re-enable busmastering, and restart the interface if
1974 * appropriate.
1975 */
1976static int
1977rl_resume(device_t dev)
1978{
1979	struct rl_softc		*sc;
1980	struct ifnet		*ifp;
1981	int			pmc;
1982	uint16_t		pmstat;
1983
1984	sc = device_get_softc(dev);
1985	ifp = sc->rl_ifp;
1986
1987	RL_LOCK(sc);
1988
1989	if ((ifp->if_capabilities & IFCAP_WOL) != 0 &&
1990	    pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
1991		/* Disable PME and clear PME status. */
1992		pmstat = pci_read_config(sc->rl_dev,
1993		    pmc + PCIR_POWER_STATUS, 2);
1994		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1995			pmstat &= ~PCIM_PSTAT_PMEENABLE;
1996			pci_write_config(sc->rl_dev,
1997			    pmc + PCIR_POWER_STATUS, pmstat, 2);
1998		}
1999		/*
2000		 * Clear WOL matching such that normal Rx filtering
2001		 * wouldn't interfere with WOL patterns.
2002		 */
2003		rl_clrwol(sc);
2004	}
2005
2006	/* reinitialize interface if necessary */
2007	if (ifp->if_flags & IFF_UP)
2008		rl_init_locked(sc);
2009
2010	sc->suspended = 0;
2011
2012	RL_UNLOCK(sc);
2013
2014	return (0);
2015}
2016
2017/*
2018 * Stop all chip I/O so that the kernel's probe routines don't
2019 * get confused by errant DMAs when rebooting.
2020 */
2021static int
2022rl_shutdown(device_t dev)
2023{
2024	struct rl_softc		*sc;
2025
2026	sc = device_get_softc(dev);
2027
2028	RL_LOCK(sc);
2029	rl_stop(sc);
2030	/*
2031	 * Mark interface as down since otherwise we will panic if
2032	 * interrupt comes in later on, which can happen in some
2033	 * cases.
2034	 */
2035	sc->rl_ifp->if_flags &= ~IFF_UP;
2036	rl_setwol(sc);
2037	RL_UNLOCK(sc);
2038
2039	return (0);
2040}
2041
2042static void
2043rl_setwol(struct rl_softc *sc)
2044{
2045	struct ifnet		*ifp;
2046	int			pmc;
2047	uint16_t		pmstat;
2048	uint8_t			v;
2049
2050	RL_LOCK_ASSERT(sc);
2051
2052	ifp = sc->rl_ifp;
2053	if ((ifp->if_capabilities & IFCAP_WOL) == 0)
2054		return;
2055	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
2056		return;
2057
2058	/* Enable config register write. */
2059	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2060
2061	/* Enable PME. */
2062	v = CSR_READ_1(sc, RL_CFG1);
2063	v &= ~RL_CFG1_PME;
2064	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2065		v |= RL_CFG1_PME;
2066	CSR_WRITE_1(sc, RL_CFG1, v);
2067
2068	v = CSR_READ_1(sc, RL_CFG3);
2069	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2070	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2071		v |= RL_CFG3_WOL_MAGIC;
2072	CSR_WRITE_1(sc, RL_CFG3, v);
2073
2074	/* Config register write done. */
2075	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2076
2077	v = CSR_READ_1(sc, RL_CFG5);
2078	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2079	v &= ~RL_CFG5_WOL_LANWAKE;
2080	if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2081		v |= RL_CFG5_WOL_UCAST;
2082	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2083		v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
2084	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2085		v |= RL_CFG5_WOL_LANWAKE;
2086	CSR_WRITE_1(sc, RL_CFG5, v);
2087	/* Request PME if WOL is requested. */
2088	pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
2089	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2090	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2091		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2092	pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2093}
2094
2095static void
2096rl_clrwol(struct rl_softc *sc)
2097{
2098	struct ifnet		*ifp;
2099	uint8_t			v;
2100
2101	ifp = sc->rl_ifp;
2102	if ((ifp->if_capabilities & IFCAP_WOL) == 0)
2103		return;
2104
2105	/* Enable config register write. */
2106	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2107
2108	v = CSR_READ_1(sc, RL_CFG3);
2109	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2110	CSR_WRITE_1(sc, RL_CFG3, v);
2111
2112	/* Config register write done. */
2113	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2114
2115	v = CSR_READ_1(sc, RL_CFG5);
2116	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2117	v &= ~RL_CFG5_WOL_LANWAKE;
2118	CSR_WRITE_1(sc, RL_CFG5, v);
2119}
2120