if_rl.c revision 184245
1210284Sjmallett/*-
2232812Sjmallett * Copyright (c) 1997, 1998
3215990Sjmallett *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4210284Sjmallett *
5210284Sjmallett * Redistribution and use in source and binary forms, with or without
6215990Sjmallett * modification, are permitted provided that the following conditions
7215990Sjmallett * are met:
8215990Sjmallett * 1. Redistributions of source code must retain the above copyright
9210284Sjmallett *    notice, this list of conditions and the following disclaimer.
10215990Sjmallett * 2. Redistributions in binary form must reproduce the above copyright
11215990Sjmallett *    notice, this list of conditions and the following disclaimer in the
12210284Sjmallett *    documentation and/or other materials provided with the distribution.
13215990Sjmallett * 3. All advertising materials mentioning features or use of this software
14215990Sjmallett *    must display the following acknowledgement:
15215990Sjmallett *	This product includes software developed by Bill Paul.
16215990Sjmallett * 4. Neither the name of the author nor the names of any co-contributors
17215990Sjmallett *    may be used to endorse or promote products derived from this software
18232812Sjmallett *    without specific prior written permission.
19215990Sjmallett *
20215990Sjmallett * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21215990Sjmallett * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22215990Sjmallett * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23215990Sjmallett * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24215990Sjmallett * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25215990Sjmallett * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26215990Sjmallett * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27215990Sjmallett * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28215990Sjmallett * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29232812Sjmallett * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30215990Sjmallett * THE POSSIBILITY OF SUCH DAMAGE.
31215990Sjmallett */
32215990Sjmallett
33215990Sjmallett#include <sys/cdefs.h>
34215990Sjmallett__FBSDID("$FreeBSD: head/sys/pci/if_rl.c 184245 2008-10-25 03:41:36Z yongari $");
35215990Sjmallett
36215990Sjmallett/*
37215990Sjmallett * RealTek 8129/8139 PCI NIC driver
38210284Sjmallett *
39210284Sjmallett * Supports several extremely cheap PCI 10/100 adapters based on
40210284Sjmallett * the RealTek chipset. Datasheets can be obtained from
41210284Sjmallett * www.realtek.com.tw.
42210284Sjmallett *
43210284Sjmallett * Written by Bill Paul <wpaul@ctr.columbia.edu>
44210284Sjmallett * Electrical Engineering Department
45215990Sjmallett * Columbia University, New York City
46210284Sjmallett */
47210284Sjmallett/*
48210284Sjmallett * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
49210284Sjmallett * probably the worst PCI ethernet controller ever made, with the possible
50210284Sjmallett * exception of the FEAST chip made by SMC. The 8139 supports bus-master
51232812Sjmallett * DMA, but it has a terrible interface that nullifies any performance
52210284Sjmallett * gains that bus-master DMA usually offers.
53210284Sjmallett *
54210284Sjmallett * For transmission, the chip offers a series of four TX descriptor
55210284Sjmallett * registers. Each transmit frame must be in a contiguous buffer, aligned
56210284Sjmallett * on a longword (32-bit) boundary. This means we almost always have to
57210284Sjmallett * do mbuf copies in order to transmit a frame, except in the unlikely
58210284Sjmallett * case where a) the packet fits into a single mbuf, and b) the packet
59210284Sjmallett * is 32-bit aligned within the mbuf's data area. The presence of only
60210284Sjmallett * four descriptor registers means that we can never have more than four
61210284Sjmallett * packets queued for transmission at any one time.
62210284Sjmallett *
63215990Sjmallett * Reception is not much better. The driver has to allocate a single large
64210284Sjmallett * buffer area (up to 64K in size) into which the chip will DMA received
65210284Sjmallett * frames. Because we don't know where within this region received packets
66210284Sjmallett * will begin or end, we have no choice but to copy data from the buffer
67210284Sjmallett * area into mbufs in order to pass the packets up to the higher protocol
68210284Sjmallett * levels.
69210284Sjmallett *
70210284Sjmallett * It's impossible given this rotten design to really achieve decent
71210284Sjmallett * performance at 100Mbps, unless you happen to have a 400Mhz PII or
72210284Sjmallett * some equally overmuscled CPU to drive it.
73210284Sjmallett *
74210284Sjmallett * On the bright side, the 8139 does have a built-in PHY, although
75210284Sjmallett * rather than using an MDIO serial interface like most other NICs, the
76210284Sjmallett * PHY registers are directly accessible through the 8139's register
77210284Sjmallett * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
78210284Sjmallett * filter.
79210284Sjmallett *
80210284Sjmallett * The 8129 chip is an older version of the 8139 that uses an external PHY
81210284Sjmallett * chip. The 8129 has a serial MDIO interface for accessing the MII where
82210284Sjmallett * the 8139 lets you directly access the on-board PHY registers. We need
83210284Sjmallett * to select which interface to use depending on the chip type.
84210284Sjmallett */
85210284Sjmallett
86210284Sjmallett#ifdef HAVE_KERNEL_OPTION_HEADERS
87210284Sjmallett#include "opt_device_polling.h"
88210284Sjmallett#endif
89210284Sjmallett
90210284Sjmallett#include <sys/param.h>
91210284Sjmallett#include <sys/endian.h>
92210284Sjmallett#include <sys/systm.h>
93210284Sjmallett#include <sys/sockio.h>
94210284Sjmallett#include <sys/mbuf.h>
95210284Sjmallett#include <sys/malloc.h>
96210284Sjmallett#include <sys/kernel.h>
97210284Sjmallett#include <sys/module.h>
98210284Sjmallett#include <sys/socket.h>
99210284Sjmallett
100210284Sjmallett#include <net/if.h>
101210284Sjmallett#include <net/if_arp.h>
102210284Sjmallett#include <net/ethernet.h>
103210284Sjmallett#include <net/if_dl.h>
104210284Sjmallett#include <net/if_media.h>
105210284Sjmallett#include <net/if_types.h>
106210284Sjmallett
107210284Sjmallett#include <net/bpf.h>
108210284Sjmallett
109210284Sjmallett#include <machine/bus.h>
110210284Sjmallett#include <machine/resource.h>
111210284Sjmallett#include <sys/bus.h>
112210284Sjmallett#include <sys/rman.h>
113210284Sjmallett
114210284Sjmallett#include <dev/mii/mii.h>
115210284Sjmallett#include <dev/mii/miivar.h>
116210284Sjmallett
117210284Sjmallett#include <dev/pci/pcireg.h>
118210284Sjmallett#include <dev/pci/pcivar.h>
119210284Sjmallett
120210284SjmallettMODULE_DEPEND(rl, pci, 1, 1, 1);
121210284SjmallettMODULE_DEPEND(rl, ether, 1, 1, 1);
122210284SjmallettMODULE_DEPEND(rl, miibus, 1, 1, 1);
123210284Sjmallett
124210284Sjmallett/* "device miibus" required.  See GENERIC if you get errors here. */
125210284Sjmallett#include "miibus_if.h"
126210284Sjmallett
127210284Sjmallett/*
128210284Sjmallett * Default to using PIO access for this driver. On SMP systems,
129210284Sjmallett * there appear to be problems with memory mapped mode: it looks like
130210284Sjmallett * doing too many memory mapped access back to back in rapid succession
131210284Sjmallett * can hang the bus. I'm inclined to blame this on crummy design/construction
132210284Sjmallett * on the part of RealTek. Memory mapped mode does appear to work on
133210284Sjmallett * uniprocessor systems though.
134215990Sjmallett */
135210284Sjmallett#define RL_USEIOSPACE
136210284Sjmallett
137210284Sjmallett#include <pci/if_rlreg.h>
138210284Sjmallett
139210284Sjmallett/*
140210284Sjmallett * Various supported device vendors/types and their names.
141210284Sjmallett */
142210284Sjmallettstatic struct rl_type rl_devs[] = {
143210284Sjmallett	{ RT_VENDORID, RT_DEVICEID_8129, RL_8129,
144210284Sjmallett		"RealTek 8129 10/100BaseTX" },
145210284Sjmallett	{ RT_VENDORID, RT_DEVICEID_8139, RL_8139,
146210284Sjmallett		"RealTek 8139 10/100BaseTX" },
147210284Sjmallett	{ RT_VENDORID, RT_DEVICEID_8139D, RL_8139,
148210284Sjmallett		"RealTek 8139 10/100BaseTX" },
149210284Sjmallett	{ RT_VENDORID, RT_DEVICEID_8138, RL_8139,
150210284Sjmallett		"RealTek 8139 10/100BaseTX CardBus" },
151210284Sjmallett	{ RT_VENDORID, RT_DEVICEID_8100, RL_8139,
152210284Sjmallett		"RealTek 8100 10/100BaseTX" },
153210284Sjmallett	{ ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
154210284Sjmallett		"Accton MPX 5030/5038 10/100BaseTX" },
155210284Sjmallett	{ DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139,
156210284Sjmallett		"Delta Electronics 8139 10/100BaseTX" },
157210284Sjmallett	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139,
158210284Sjmallett		"Addtron Technolgy 8139 10/100BaseTX" },
159210284Sjmallett	{ DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139,
160210284Sjmallett		"D-Link DFE-530TX+ 10/100BaseTX" },
161210284Sjmallett	{ DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139,
162210284Sjmallett		"D-Link DFE-690TXD 10/100BaseTX" },
163210284Sjmallett	{ NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
164210284Sjmallett		"Nortel Networks 10/100BaseTX" },
165210284Sjmallett	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139,
166210284Sjmallett		"Corega FEther CB-TXD" },
167210284Sjmallett	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139,
168210284Sjmallett		"Corega FEtherII CB-TXD" },
169210284Sjmallett	{ PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139,
170210284Sjmallett		"Peppercon AG ROL-F" },
171210284Sjmallett	{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139,
172210284Sjmallett		"Planex FNW-3603-TX" },
173210284Sjmallett	{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139,
174		"Planex FNW-3800-TX" },
175	{ CP_VENDORID, RT_DEVICEID_8139, RL_8139,
176		"Compaq HNE-300" },
177	{ LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139,
178		"LevelOne FPC-0106TX" },
179	{ EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139,
180		"Edimax EP-4103DL CardBus" }
181};
182
183static int rl_attach(device_t);
184static int rl_detach(device_t);
185static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int);
186static int rl_dma_alloc(struct rl_softc *);
187static void rl_dma_free(struct rl_softc *);
188static void rl_eeprom_putbyte(struct rl_softc *, int);
189static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *);
190static int rl_encap(struct rl_softc *, struct mbuf **);
191static int rl_list_tx_init(struct rl_softc *);
192static int rl_list_rx_init(struct rl_softc *);
193static int rl_ifmedia_upd(struct ifnet *);
194static void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
195static int rl_ioctl(struct ifnet *, u_long, caddr_t);
196static void rl_intr(void *);
197static void rl_init(void *);
198static void rl_init_locked(struct rl_softc *sc);
199static void rl_mii_send(struct rl_softc *, uint32_t, int);
200static void rl_mii_sync(struct rl_softc *);
201static int rl_mii_readreg(struct rl_softc *, struct rl_mii_frame *);
202static int rl_mii_writereg(struct rl_softc *, struct rl_mii_frame *);
203static int rl_miibus_readreg(device_t, int, int);
204static void rl_miibus_statchg(device_t);
205static int rl_miibus_writereg(device_t, int, int, int);
206#ifdef DEVICE_POLLING
207static void rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
208static void rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
209#endif
210static int rl_probe(device_t);
211static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int);
212static void rl_reset(struct rl_softc *);
213static int rl_resume(device_t);
214static void rl_rxeof(struct rl_softc *);
215static void rl_setmulti(struct rl_softc *);
216static int rl_shutdown(device_t);
217static void rl_start(struct ifnet *);
218static void rl_start_locked(struct ifnet *);
219static void rl_stop(struct rl_softc *);
220static int rl_suspend(device_t);
221static void rl_tick(void *);
222static void rl_txeof(struct rl_softc *);
223static void rl_watchdog(struct rl_softc *);
224
225#ifdef RL_USEIOSPACE
226#define RL_RES			SYS_RES_IOPORT
227#define RL_RID			RL_PCI_LOIO
228#else
229#define RL_RES			SYS_RES_MEMORY
230#define RL_RID			RL_PCI_LOMEM
231#endif
232
233static device_method_t rl_methods[] = {
234	/* Device interface */
235	DEVMETHOD(device_probe,		rl_probe),
236	DEVMETHOD(device_attach,	rl_attach),
237	DEVMETHOD(device_detach,	rl_detach),
238	DEVMETHOD(device_suspend,	rl_suspend),
239	DEVMETHOD(device_resume,	rl_resume),
240	DEVMETHOD(device_shutdown,	rl_shutdown),
241
242	/* bus interface */
243	DEVMETHOD(bus_print_child,	bus_generic_print_child),
244	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
245
246	/* MII interface */
247	DEVMETHOD(miibus_readreg,	rl_miibus_readreg),
248	DEVMETHOD(miibus_writereg,	rl_miibus_writereg),
249	DEVMETHOD(miibus_statchg,	rl_miibus_statchg),
250
251	{ 0, 0 }
252};
253
254static driver_t rl_driver = {
255	"rl",
256	rl_methods,
257	sizeof(struct rl_softc)
258};
259
260static devclass_t rl_devclass;
261
262DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0);
263DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0);
264DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0);
265
266#define EE_SET(x)					\
267	CSR_WRITE_1(sc, RL_EECMD,			\
268		CSR_READ_1(sc, RL_EECMD) | x)
269
270#define EE_CLR(x)					\
271	CSR_WRITE_1(sc, RL_EECMD,			\
272		CSR_READ_1(sc, RL_EECMD) & ~x)
273
274/*
275 * Send a read command and address to the EEPROM, check for ACK.
276 */
277static void
278rl_eeprom_putbyte(struct rl_softc *sc, int addr)
279{
280	register int		d, i;
281
282	d = addr | sc->rl_eecmd_read;
283
284	/*
285	 * Feed in each bit and strobe the clock.
286	 */
287	for (i = 0x400; i; i >>= 1) {
288		if (d & i) {
289			EE_SET(RL_EE_DATAIN);
290		} else {
291			EE_CLR(RL_EE_DATAIN);
292		}
293		DELAY(100);
294		EE_SET(RL_EE_CLK);
295		DELAY(150);
296		EE_CLR(RL_EE_CLK);
297		DELAY(100);
298	}
299}
300
301/*
302 * Read a word of data stored in the EEPROM at address 'addr.'
303 */
304static void
305rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest)
306{
307	register int		i;
308	uint16_t		word = 0;
309
310	/* Enter EEPROM access mode. */
311	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
312
313	/*
314	 * Send address of word we want to read.
315	 */
316	rl_eeprom_putbyte(sc, addr);
317
318	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
319
320	/*
321	 * Start reading bits from EEPROM.
322	 */
323	for (i = 0x8000; i; i >>= 1) {
324		EE_SET(RL_EE_CLK);
325		DELAY(100);
326		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
327			word |= i;
328		EE_CLR(RL_EE_CLK);
329		DELAY(100);
330	}
331
332	/* Turn off EEPROM access mode. */
333	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
334
335	*dest = word;
336}
337
338/*
339 * Read a sequence of words from the EEPROM.
340 */
341static void
342rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap)
343{
344	int			i;
345	uint16_t		word = 0, *ptr;
346
347	for (i = 0; i < cnt; i++) {
348		rl_eeprom_getword(sc, off + i, &word);
349		ptr = (uint16_t *)(dest + (i * 2));
350		if (swap)
351			*ptr = ntohs(word);
352		else
353			*ptr = word;
354	}
355}
356
357/*
358 * MII access routines are provided for the 8129, which
359 * doesn't have a built-in PHY. For the 8139, we fake things
360 * up by diverting rl_phy_readreg()/rl_phy_writereg() to the
361 * direct access PHY registers.
362 */
363#define MII_SET(x)					\
364	CSR_WRITE_1(sc, RL_MII,				\
365		CSR_READ_1(sc, RL_MII) | (x))
366
367#define MII_CLR(x)					\
368	CSR_WRITE_1(sc, RL_MII,				\
369		CSR_READ_1(sc, RL_MII) & ~(x))
370
371/*
372 * Sync the PHYs by setting data bit and strobing the clock 32 times.
373 */
374static void
375rl_mii_sync(struct rl_softc *sc)
376{
377	register int		i;
378
379	MII_SET(RL_MII_DIR|RL_MII_DATAOUT);
380
381	for (i = 0; i < 32; i++) {
382		MII_SET(RL_MII_CLK);
383		DELAY(1);
384		MII_CLR(RL_MII_CLK);
385		DELAY(1);
386	}
387}
388
389/*
390 * Clock a series of bits through the MII.
391 */
392static void
393rl_mii_send(struct rl_softc *sc, uint32_t bits, int cnt)
394{
395	int			i;
396
397	MII_CLR(RL_MII_CLK);
398
399	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
400		if (bits & i) {
401			MII_SET(RL_MII_DATAOUT);
402		} else {
403			MII_CLR(RL_MII_DATAOUT);
404		}
405		DELAY(1);
406		MII_CLR(RL_MII_CLK);
407		DELAY(1);
408		MII_SET(RL_MII_CLK);
409	}
410}
411
412/*
413 * Read an PHY register through the MII.
414 */
415static int
416rl_mii_readreg(struct rl_softc *sc, struct rl_mii_frame *frame)
417{
418	int			i, ack;
419
420	/* Set up frame for RX. */
421	frame->mii_stdelim = RL_MII_STARTDELIM;
422	frame->mii_opcode = RL_MII_READOP;
423	frame->mii_turnaround = 0;
424	frame->mii_data = 0;
425
426	CSR_WRITE_2(sc, RL_MII, 0);
427
428	/* Turn on data xmit. */
429	MII_SET(RL_MII_DIR);
430
431	rl_mii_sync(sc);
432
433	/* Send command/address info. */
434	rl_mii_send(sc, frame->mii_stdelim, 2);
435	rl_mii_send(sc, frame->mii_opcode, 2);
436	rl_mii_send(sc, frame->mii_phyaddr, 5);
437	rl_mii_send(sc, frame->mii_regaddr, 5);
438
439	/* Idle bit */
440	MII_CLR((RL_MII_CLK|RL_MII_DATAOUT));
441	DELAY(1);
442	MII_SET(RL_MII_CLK);
443	DELAY(1);
444
445	/* Turn off xmit. */
446	MII_CLR(RL_MII_DIR);
447
448	/* Check for ack */
449	MII_CLR(RL_MII_CLK);
450	DELAY(1);
451	ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN;
452	MII_SET(RL_MII_CLK);
453	DELAY(1);
454
455	/*
456	 * Now try reading data bits. If the ack failed, we still
457	 * need to clock through 16 cycles to keep the PHY(s) in sync.
458	 */
459	if (ack) {
460		for(i = 0; i < 16; i++) {
461			MII_CLR(RL_MII_CLK);
462			DELAY(1);
463			MII_SET(RL_MII_CLK);
464			DELAY(1);
465		}
466		goto fail;
467	}
468
469	for (i = 0x8000; i; i >>= 1) {
470		MII_CLR(RL_MII_CLK);
471		DELAY(1);
472		if (!ack) {
473			if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN)
474				frame->mii_data |= i;
475			DELAY(1);
476		}
477		MII_SET(RL_MII_CLK);
478		DELAY(1);
479	}
480
481fail:
482	MII_CLR(RL_MII_CLK);
483	DELAY(1);
484	MII_SET(RL_MII_CLK);
485	DELAY(1);
486
487	return (ack ? 1 : 0);
488}
489
490/*
491 * Write to a PHY register through the MII.
492 */
493static int
494rl_mii_writereg(struct rl_softc *sc, struct rl_mii_frame *frame)
495{
496
497	/* Set up frame for TX. */
498	frame->mii_stdelim = RL_MII_STARTDELIM;
499	frame->mii_opcode = RL_MII_WRITEOP;
500	frame->mii_turnaround = RL_MII_TURNAROUND;
501
502	/* Turn on data output. */
503	MII_SET(RL_MII_DIR);
504
505	rl_mii_sync(sc);
506
507	rl_mii_send(sc, frame->mii_stdelim, 2);
508	rl_mii_send(sc, frame->mii_opcode, 2);
509	rl_mii_send(sc, frame->mii_phyaddr, 5);
510	rl_mii_send(sc, frame->mii_regaddr, 5);
511	rl_mii_send(sc, frame->mii_turnaround, 2);
512	rl_mii_send(sc, frame->mii_data, 16);
513
514	/* Idle bit. */
515	MII_SET(RL_MII_CLK);
516	DELAY(1);
517	MII_CLR(RL_MII_CLK);
518	DELAY(1);
519
520	/* Turn off xmit. */
521	MII_CLR(RL_MII_DIR);
522
523	return (0);
524}
525
526static int
527rl_miibus_readreg(device_t dev, int phy, int reg)
528{
529	struct rl_softc		*sc;
530	struct rl_mii_frame	frame;
531	uint16_t		rval = 0;
532	uint16_t		rl8139_reg = 0;
533
534	sc = device_get_softc(dev);
535
536	if (sc->rl_type == RL_8139) {
537		/* Pretend the internal PHY is only at address 0 */
538		if (phy) {
539			return (0);
540		}
541		switch (reg) {
542		case MII_BMCR:
543			rl8139_reg = RL_BMCR;
544			break;
545		case MII_BMSR:
546			rl8139_reg = RL_BMSR;
547			break;
548		case MII_ANAR:
549			rl8139_reg = RL_ANAR;
550			break;
551		case MII_ANER:
552			rl8139_reg = RL_ANER;
553			break;
554		case MII_ANLPAR:
555			rl8139_reg = RL_LPAR;
556			break;
557		case MII_PHYIDR1:
558		case MII_PHYIDR2:
559			return (0);
560		/*
561		 * Allow the rlphy driver to read the media status
562		 * register. If we have a link partner which does not
563		 * support NWAY, this is the register which will tell
564		 * us the results of parallel detection.
565		 */
566		case RL_MEDIASTAT:
567			rval = CSR_READ_1(sc, RL_MEDIASTAT);
568			return (rval);
569		default:
570			device_printf(sc->rl_dev, "bad phy register\n");
571			return (0);
572		}
573		rval = CSR_READ_2(sc, rl8139_reg);
574		return (rval);
575	}
576
577	bzero((char *)&frame, sizeof(frame));
578	frame.mii_phyaddr = phy;
579	frame.mii_regaddr = reg;
580	rl_mii_readreg(sc, &frame);
581
582	return (frame.mii_data);
583}
584
585static int
586rl_miibus_writereg(device_t dev, int phy, int reg, int data)
587{
588	struct rl_softc		*sc;
589	struct rl_mii_frame	frame;
590	uint16_t		rl8139_reg = 0;
591
592	sc = device_get_softc(dev);
593
594	if (sc->rl_type == RL_8139) {
595		/* Pretend the internal PHY is only at address 0 */
596		if (phy) {
597			return (0);
598		}
599		switch (reg) {
600		case MII_BMCR:
601			rl8139_reg = RL_BMCR;
602			break;
603		case MII_BMSR:
604			rl8139_reg = RL_BMSR;
605			break;
606		case MII_ANAR:
607			rl8139_reg = RL_ANAR;
608			break;
609		case MII_ANER:
610			rl8139_reg = RL_ANER;
611			break;
612		case MII_ANLPAR:
613			rl8139_reg = RL_LPAR;
614			break;
615		case MII_PHYIDR1:
616		case MII_PHYIDR2:
617			return (0);
618			break;
619		default:
620			device_printf(sc->rl_dev, "bad phy register\n");
621			return (0);
622		}
623		CSR_WRITE_2(sc, rl8139_reg, data);
624		return (0);
625	}
626
627	bzero((char *)&frame, sizeof(frame));
628	frame.mii_phyaddr = phy;
629	frame.mii_regaddr = reg;
630	frame.mii_data = data;
631	rl_mii_writereg(sc, &frame);
632
633	return (0);
634}
635
636static void
637rl_miibus_statchg(device_t dev)
638{
639	struct rl_softc		*sc;
640	struct ifnet		*ifp;
641	struct mii_data		*mii;
642
643	sc = device_get_softc(dev);
644	mii = device_get_softc(sc->rl_miibus);
645	ifp = sc->rl_ifp;
646	if (mii == NULL || ifp == NULL ||
647	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
648		return;
649
650	sc->rl_flags &= ~RL_FLAG_LINK;
651	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
652	    (IFM_ACTIVE | IFM_AVALID)) {
653		switch (IFM_SUBTYPE(mii->mii_media_active)) {
654		case IFM_10_T:
655		case IFM_100_TX:
656			sc->rl_flags |= RL_FLAG_LINK;
657			break;
658		default:
659			break;
660		}
661	}
662	/*
663	 * RealTek controllers do not provide any interface to
664	 * Tx/Rx MACs for resolved speed, duplex and flow-control
665	 * parameters.
666	 */
667}
668
669/*
670 * Program the 64-bit multicast hash filter.
671 */
672static void
673rl_setmulti(struct rl_softc *sc)
674{
675	struct ifnet		*ifp = sc->rl_ifp;
676	int			h = 0;
677	uint32_t		hashes[2] = { 0, 0 };
678	struct ifmultiaddr	*ifma;
679	uint32_t		rxfilt;
680	int			mcnt = 0;
681
682	RL_LOCK_ASSERT(sc);
683
684	rxfilt = CSR_READ_4(sc, RL_RXCFG);
685
686	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
687		rxfilt |= RL_RXCFG_RX_MULTI;
688		CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
689		CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF);
690		CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF);
691		return;
692	}
693
694	/* first, zot all the existing hash bits */
695	CSR_WRITE_4(sc, RL_MAR0, 0);
696	CSR_WRITE_4(sc, RL_MAR4, 0);
697
698	/* now program new ones */
699	IF_ADDR_LOCK(ifp);
700	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
701		if (ifma->ifma_addr->sa_family != AF_LINK)
702			continue;
703		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
704		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
705		if (h < 32)
706			hashes[0] |= (1 << h);
707		else
708			hashes[1] |= (1 << (h - 32));
709		mcnt++;
710	}
711	IF_ADDR_UNLOCK(ifp);
712
713	if (mcnt)
714		rxfilt |= RL_RXCFG_RX_MULTI;
715	else
716		rxfilt &= ~RL_RXCFG_RX_MULTI;
717
718	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
719	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
720	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
721}
722
723static void
724rl_reset(struct rl_softc *sc)
725{
726	register int		i;
727
728	RL_LOCK_ASSERT(sc);
729
730	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
731
732	for (i = 0; i < RL_TIMEOUT; i++) {
733		DELAY(10);
734		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
735			break;
736	}
737	if (i == RL_TIMEOUT)
738		device_printf(sc->rl_dev, "reset never completed!\n");
739}
740
741/*
742 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device
743 * IDs against our list and return a device name if we find a match.
744 */
745static int
746rl_probe(device_t dev)
747{
748	struct rl_type		*t;
749	uint16_t		devid, revid, vendor;
750	int			i;
751
752	vendor = pci_get_vendor(dev);
753	devid = pci_get_device(dev);
754	revid = pci_get_revid(dev);
755
756	if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
757		if (revid == 0x20) {
758			/* 8139C+, let re(4) take care of this device. */
759			return (ENXIO);
760		}
761	}
762	t = rl_devs;
763	for (i = 0; i < sizeof(rl_devs) / sizeof(rl_devs[0]); i++, t++) {
764		if (vendor == t->rl_vid && devid == t->rl_did) {
765			device_set_desc(dev, t->rl_name);
766			return (BUS_PROBE_DEFAULT);
767		}
768	}
769
770	return (ENXIO);
771}
772
773struct rl_dmamap_arg {
774	bus_addr_t	rl_busaddr;
775};
776
777static void
778rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
779{
780	struct rl_dmamap_arg	*ctx;
781
782	if (error != 0)
783		return;
784
785	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
786
787        ctx = (struct rl_dmamap_arg *)arg;
788        ctx->rl_busaddr = segs[0].ds_addr;
789}
790
791/*
792 * Attach the interface. Allocate softc structures, do ifmedia
793 * setup and ethernet/BPF attach.
794 */
795static int
796rl_attach(device_t dev)
797{
798	uint8_t			eaddr[ETHER_ADDR_LEN];
799	uint16_t		as[3];
800	struct ifnet		*ifp;
801	struct rl_softc		*sc;
802	struct rl_type		*t;
803	int			error = 0, i, rid;
804	int			unit;
805	uint16_t		rl_did = 0;
806
807	sc = device_get_softc(dev);
808	unit = device_get_unit(dev);
809	sc->rl_dev = dev;
810
811	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
812	    MTX_DEF);
813	callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
814
815	pci_enable_busmaster(dev);
816
817	/* Map control/status registers. */
818	rid = RL_RID;
819	sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid, RF_ACTIVE);
820
821	if (sc->rl_res == NULL) {
822		device_printf(dev, "couldn't map ports/memory\n");
823		error = ENXIO;
824		goto fail;
825	}
826
827#ifdef notdef
828	/*
829	 * Detect the Realtek 8139B. For some reason, this chip is very
830	 * unstable when left to autoselect the media
831	 * The best workaround is to set the device to the required
832	 * media type or to set it to the 10 Meg speed.
833	 */
834	if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF)
835		device_printf(dev,
836"Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n");
837#endif
838
839	sc->rl_btag = rman_get_bustag(sc->rl_res);
840	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
841
842	/* Allocate interrupt */
843	rid = 0;
844	sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
845	    RF_SHAREABLE | RF_ACTIVE);
846
847	if (sc->rl_irq[0] == NULL) {
848		device_printf(dev, "couldn't map interrupt\n");
849		error = ENXIO;
850		goto fail;
851	}
852
853	/*
854	 * Reset the adapter. Only take the lock here as it's needed in
855	 * order to call rl_reset().
856	 */
857	RL_LOCK(sc);
858	rl_reset(sc);
859	RL_UNLOCK(sc);
860
861	sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
862	rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0);
863	if (rl_did != 0x8129)
864		sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
865
866	/*
867	 * Get station address from the EEPROM.
868	 */
869	rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0);
870	for (i = 0; i < 3; i++) {
871		eaddr[(i * 2) + 0] = as[i] & 0xff;
872		eaddr[(i * 2) + 1] = as[i] >> 8;
873	}
874
875	/*
876	 * Now read the exact device type from the EEPROM to find
877	 * out if it's an 8129 or 8139.
878	 */
879	rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0);
880
881	t = rl_devs;
882	sc->rl_type = 0;
883	while(t->rl_name != NULL) {
884		if (rl_did == t->rl_did) {
885			sc->rl_type = t->rl_basetype;
886			break;
887		}
888		t++;
889	}
890
891	if (sc->rl_type == 0) {
892		device_printf(dev, "unknown device ID: %x\n", rl_did);
893		error = ENXIO;
894		goto fail;
895	}
896
897	if ((error = rl_dma_alloc(sc)) != 0)
898		goto fail;
899
900	ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
901	if (ifp == NULL) {
902		device_printf(dev, "can not if_alloc()\n");
903		error = ENOSPC;
904		goto fail;
905	}
906
907	/* Do MII setup */
908	if (mii_phy_probe(dev, &sc->rl_miibus,
909	    rl_ifmedia_upd, rl_ifmedia_sts)) {
910		device_printf(dev, "MII without any phy!\n");
911		error = ENXIO;
912		goto fail;
913	}
914
915	ifp->if_softc = sc;
916	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
917	ifp->if_mtu = ETHERMTU;
918	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
919	ifp->if_ioctl = rl_ioctl;
920	ifp->if_start = rl_start;
921	ifp->if_init = rl_init;
922	ifp->if_capabilities = IFCAP_VLAN_MTU;
923	ifp->if_capenable = ifp->if_capabilities;
924#ifdef DEVICE_POLLING
925	ifp->if_capabilities |= IFCAP_POLLING;
926#endif
927	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
928	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
929	IFQ_SET_READY(&ifp->if_snd);
930
931	/*
932	 * Call MI attach routine.
933	 */
934	ether_ifattach(ifp, eaddr);
935
936	/* Hook interrupt last to avoid having to lock softc */
937	error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE,
938	    NULL, rl_intr, sc, &sc->rl_intrhand[0]);
939	if (error) {
940		device_printf(sc->rl_dev, "couldn't set up irq\n");
941		ether_ifdetach(ifp);
942	}
943
944fail:
945	if (error)
946		rl_detach(dev);
947
948	return (error);
949}
950
951/*
952 * Shutdown hardware and free up resources. This can be called any
953 * time after the mutex has been initialized. It is called in both
954 * the error case in attach and the normal detach case so it needs
955 * to be careful about only freeing resources that have actually been
956 * allocated.
957 */
958static int
959rl_detach(device_t dev)
960{
961	struct rl_softc		*sc;
962	struct ifnet		*ifp;
963
964	sc = device_get_softc(dev);
965	ifp = sc->rl_ifp;
966
967	KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
968
969#ifdef DEVICE_POLLING
970	if (ifp->if_capenable & IFCAP_POLLING)
971		ether_poll_deregister(ifp);
972#endif
973	/* These should only be active if attach succeeded */
974	if (device_is_attached(dev)) {
975		RL_LOCK(sc);
976		rl_stop(sc);
977		RL_UNLOCK(sc);
978		callout_drain(&sc->rl_stat_callout);
979		ether_ifdetach(ifp);
980	}
981#if 0
982	sc->suspended = 1;
983#endif
984	if (sc->rl_miibus)
985		device_delete_child(dev, sc->rl_miibus);
986	bus_generic_detach(dev);
987
988	if (sc->rl_intrhand[0])
989		bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
990	if (sc->rl_irq[0])
991		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]);
992	if (sc->rl_res)
993		bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res);
994
995	if (ifp)
996		if_free(ifp);
997
998	rl_dma_free(sc);
999
1000	mtx_destroy(&sc->rl_mtx);
1001
1002	return (0);
1003}
1004
1005static int
1006rl_dma_alloc(struct rl_softc *sc)
1007{
1008	struct rl_dmamap_arg	ctx;
1009	int			error, i;
1010
1011	/*
1012	 * Allocate the parent bus DMA tag appropriate for PCI.
1013	 */
1014	error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev),	/* parent */
1015	    1, 0,			/* alignment, boundary */
1016	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1017	    BUS_SPACE_MAXADDR,		/* highaddr */
1018	    NULL, NULL,			/* filter, filterarg */
1019	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsize, nsegments */
1020	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1021	    0,				/* flags */
1022	    NULL, NULL,			/* lockfunc, lockarg */
1023	    &sc->rl_parent_tag);
1024	if (error) {
1025                device_printf(sc->rl_dev,
1026		    "failed to create parent DMA tag.\n");
1027		goto fail;
1028	}
1029	/* Create DMA tag for Rx memory block. */
1030	error = bus_dma_tag_create(sc->rl_parent_tag,	/* parent */
1031	    RL_RX_8139_BUF_ALIGN, 0,	/* alignment, boundary */
1032	    BUS_SPACE_MAXADDR,		/* lowaddr */
1033	    BUS_SPACE_MAXADDR,		/* highaddr */
1034	    NULL, NULL,			/* filter, filterarg */
1035	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1,	/* maxsize,nsegments */
1036	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ,	/* maxsegsize */
1037	    0,				/* flags */
1038	    NULL, NULL,			/* lockfunc, lockarg */
1039	    &sc->rl_cdata.rl_rx_tag);
1040	if (error) {
1041                device_printf(sc->rl_dev,
1042		    "failed to create Rx memory block DMA tag.\n");
1043		goto fail;
1044	}
1045	/* Create DMA tag for Tx buffer. */
1046	error = bus_dma_tag_create(sc->rl_parent_tag,	/* parent */
1047	    RL_TX_8139_BUF_ALIGN, 0,	/* alignment, boundary */
1048	    BUS_SPACE_MAXADDR,		/* lowaddr */
1049	    BUS_SPACE_MAXADDR,		/* highaddr */
1050	    NULL, NULL,			/* filter, filterarg */
1051	    MCLBYTES, 1,		/* maxsize, nsegments */
1052	    MCLBYTES,			/* maxsegsize */
1053	    0,				/* flags */
1054	    NULL, NULL,			/* lockfunc, lockarg */
1055	    &sc->rl_cdata.rl_tx_tag);
1056	if (error) {
1057                device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n");
1058		goto fail;
1059	}
1060
1061	/*
1062	 * Allocate DMA'able memory and load DMA map for Rx memory block.
1063	 */
1064	error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag,
1065	    (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK |
1066	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap);
1067	if (error != 0) {
1068		device_printf(sc->rl_dev,
1069		    "failed to allocate Rx DMA memory block.\n");
1070		goto fail;
1071	}
1072	ctx.rl_busaddr = 0;
1073	error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag,
1074	    sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf,
1075	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx,
1076	    BUS_DMA_NOWAIT);
1077	if (error != 0 || ctx.rl_busaddr == 0) {
1078		device_printf(sc->rl_dev,
1079		    "could not load Rx DMA memory block.\n");
1080		goto fail;
1081	}
1082	sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr;
1083
1084	/* Create DMA maps for Tx buffers. */
1085	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1086		sc->rl_cdata.rl_tx_chain[i] = NULL;
1087		sc->rl_cdata.rl_tx_dmamap[i] = NULL;
1088		error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0,
1089		    &sc->rl_cdata.rl_tx_dmamap[i]);
1090		if (error != 0) {
1091			device_printf(sc->rl_dev,
1092			    "could not create Tx dmamap.\n");
1093			goto fail;
1094		}
1095	}
1096
1097	/* Leave a few bytes before the start of the RX ring buffer. */
1098	sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf;
1099	sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE;
1100
1101fail:
1102	return (error);
1103}
1104
1105static void
1106rl_dma_free(struct rl_softc *sc)
1107{
1108	int			i;
1109
1110	/* Rx memory block. */
1111	if (sc->rl_cdata.rl_rx_tag != NULL) {
1112		if (sc->rl_cdata.rl_rx_dmamap != NULL)
1113			bus_dmamap_unload(sc->rl_cdata.rl_rx_tag,
1114			    sc->rl_cdata.rl_rx_dmamap);
1115		if (sc->rl_cdata.rl_rx_dmamap != NULL &&
1116		    sc->rl_cdata.rl_rx_buf_ptr != NULL)
1117			bus_dmamem_free(sc->rl_cdata.rl_rx_tag,
1118			    sc->rl_cdata.rl_rx_buf_ptr,
1119			    sc->rl_cdata.rl_rx_dmamap);
1120		sc->rl_cdata.rl_rx_buf_ptr = NULL;
1121		sc->rl_cdata.rl_rx_buf = NULL;
1122		sc->rl_cdata.rl_rx_dmamap = NULL;
1123		bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag);
1124		sc->rl_cdata.rl_tx_tag = NULL;
1125	}
1126
1127	/* Tx buffers. */
1128	if (sc->rl_cdata.rl_tx_tag != NULL) {
1129		for (i = 0; i < RL_TX_LIST_CNT; i++) {
1130			if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) {
1131				bus_dmamap_destroy(
1132				    sc->rl_cdata.rl_tx_tag,
1133				    sc->rl_cdata.rl_tx_dmamap[i]);
1134				sc->rl_cdata.rl_tx_dmamap[i] = NULL;
1135			}
1136		bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag);
1137		sc->rl_cdata.rl_tx_tag = NULL;
1138		}
1139	}
1140
1141	if (sc->rl_parent_tag != NULL) {
1142		bus_dma_tag_destroy(sc->rl_parent_tag);
1143		sc->rl_parent_tag = NULL;
1144	}
1145}
1146
1147/*
1148 * Initialize the transmit descriptors.
1149 */
1150static int
1151rl_list_tx_init(struct rl_softc *sc)
1152{
1153	struct rl_chain_data	*cd;
1154	int			i;
1155
1156	RL_LOCK_ASSERT(sc);
1157
1158	cd = &sc->rl_cdata;
1159	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1160		cd->rl_tx_chain[i] = NULL;
1161		CSR_WRITE_4(sc,
1162		    RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000);
1163	}
1164
1165	sc->rl_cdata.cur_tx = 0;
1166	sc->rl_cdata.last_tx = 0;
1167
1168	return (0);
1169}
1170
1171static int
1172rl_list_rx_init(struct rl_softc *sc)
1173{
1174
1175	RL_LOCK_ASSERT(sc);
1176
1177	bzero(sc->rl_cdata.rl_rx_buf_ptr,
1178	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ);
1179	bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap,
1180	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1181
1182	return (0);
1183}
1184
1185/*
1186 * A frame has been uploaded: pass the resulting mbuf chain up to
1187 * the higher level protocols.
1188 *
1189 * You know there's something wrong with a PCI bus-master chip design
1190 * when you have to use m_devget().
1191 *
1192 * The receive operation is badly documented in the datasheet, so I'll
1193 * attempt to document it here. The driver provides a buffer area and
1194 * places its base address in the RX buffer start address register.
1195 * The chip then begins copying frames into the RX buffer. Each frame
1196 * is preceded by a 32-bit RX status word which specifies the length
1197 * of the frame and certain other status bits. Each frame (starting with
1198 * the status word) is also 32-bit aligned. The frame length is in the
1199 * first 16 bits of the status word; the lower 15 bits correspond with
1200 * the 'rx status register' mentioned in the datasheet.
1201 *
1202 * Note: to make the Alpha happy, the frame payload needs to be aligned
1203 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes)
1204 * as the offset argument to m_devget().
1205 */
1206static void
1207rl_rxeof(struct rl_softc *sc)
1208{
1209	struct mbuf		*m;
1210	struct ifnet		*ifp = sc->rl_ifp;
1211	uint8_t			*rxbufpos;
1212	int			total_len = 0;
1213	int			wrap = 0;
1214	uint32_t		rxstat;
1215	uint16_t		cur_rx;
1216	uint16_t		limit;
1217	uint16_t		max_bytes, rx_bytes = 0;
1218
1219	RL_LOCK_ASSERT(sc);
1220
1221	bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap,
1222	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1223
1224	cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN;
1225
1226	/* Do not try to read past this point. */
1227	limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN;
1228
1229	if (limit < cur_rx)
1230		max_bytes = (RL_RXBUFLEN - cur_rx) + limit;
1231	else
1232		max_bytes = limit - cur_rx;
1233
1234	while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
1235#ifdef DEVICE_POLLING
1236		if (ifp->if_capenable & IFCAP_POLLING) {
1237			if (sc->rxcycles <= 0)
1238				break;
1239			sc->rxcycles--;
1240		}
1241#endif
1242		rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
1243		rxstat = le32toh(*(uint32_t *)rxbufpos);
1244
1245		/*
1246		 * Here's a totally undocumented fact for you. When the
1247		 * RealTek chip is in the process of copying a packet into
1248		 * RAM for you, the length will be 0xfff0. If you spot a
1249		 * packet header with this value, you need to stop. The
1250		 * datasheet makes absolutely no mention of this and
1251		 * RealTek should be shot for this.
1252		 */
1253		total_len = rxstat >> 16;
1254		if (total_len == RL_RXSTAT_UNFINISHED)
1255			break;
1256
1257		if (!(rxstat & RL_RXSTAT_RXOK) ||
1258		    total_len < ETHER_MIN_LEN ||
1259		    total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
1260			ifp->if_ierrors++;
1261			rl_init_locked(sc);
1262			return;
1263		}
1264
1265		/* No errors; receive the packet. */
1266		rx_bytes += total_len + 4;
1267
1268		/*
1269		 * XXX The RealTek chip includes the CRC with every
1270		 * received frame, and there's no way to turn this
1271		 * behavior off (at least, I can't find anything in
1272		 * the manual that explains how to do it) so we have
1273		 * to trim off the CRC manually.
1274		 */
1275		total_len -= ETHER_CRC_LEN;
1276
1277		/*
1278		 * Avoid trying to read more bytes than we know
1279		 * the chip has prepared for us.
1280		 */
1281		if (rx_bytes > max_bytes)
1282			break;
1283
1284		rxbufpos = sc->rl_cdata.rl_rx_buf +
1285			((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN);
1286		if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN))
1287			rxbufpos = sc->rl_cdata.rl_rx_buf;
1288
1289		wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos;
1290		if (total_len > wrap) {
1291			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1292			    NULL);
1293			if (m == NULL) {
1294				ifp->if_ierrors++;
1295			} else {
1296				m_copyback(m, wrap, total_len - wrap,
1297					sc->rl_cdata.rl_rx_buf);
1298			}
1299			cur_rx = (total_len - wrap + ETHER_CRC_LEN);
1300		} else {
1301			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1302			    NULL);
1303			if (m == NULL)
1304				ifp->if_ierrors++;
1305			cur_rx += total_len + 4 + ETHER_CRC_LEN;
1306		}
1307
1308		/* Round up to 32-bit boundary. */
1309		cur_rx = (cur_rx + 3) & ~3;
1310		CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16);
1311
1312		if (m == NULL)
1313			continue;
1314
1315		ifp->if_ipackets++;
1316		RL_UNLOCK(sc);
1317		(*ifp->if_input)(ifp, m);
1318		RL_LOCK(sc);
1319	}
1320
1321	/* No need to sync Rx memory block as we didn't mofify it. */
1322}
1323
1324/*
1325 * A frame was downloaded to the chip. It's safe for us to clean up
1326 * the list buffers.
1327 */
1328static void
1329rl_txeof(struct rl_softc *sc)
1330{
1331	struct ifnet		*ifp = sc->rl_ifp;
1332	uint32_t		txstat;
1333
1334	RL_LOCK_ASSERT(sc);
1335
1336	/*
1337	 * Go through our tx list and free mbufs for those
1338	 * frames that have been uploaded.
1339	 */
1340	do {
1341		if (RL_LAST_TXMBUF(sc) == NULL)
1342			break;
1343		txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc));
1344		if (!(txstat & (RL_TXSTAT_TX_OK|
1345		    RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT)))
1346			break;
1347
1348		ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24;
1349
1350		bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc),
1351		    BUS_DMASYNC_POSTWRITE);
1352		bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc));
1353		m_freem(RL_LAST_TXMBUF(sc));
1354		RL_LAST_TXMBUF(sc) = NULL;
1355		/*
1356		 * If there was a transmit underrun, bump the TX threshold.
1357		 * Make sure not to overflow the 63 * 32byte we can address
1358		 * with the 6 available bit.
1359		 */
1360		if ((txstat & RL_TXSTAT_TX_UNDERRUN) &&
1361		    (sc->rl_txthresh < 2016))
1362			sc->rl_txthresh += 32;
1363		if (txstat & RL_TXSTAT_TX_OK)
1364			ifp->if_opackets++;
1365		else {
1366			int			oldthresh;
1367			ifp->if_oerrors++;
1368			if ((txstat & RL_TXSTAT_TXABRT) ||
1369			    (txstat & RL_TXSTAT_OUTOFWIN))
1370				CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1371			oldthresh = sc->rl_txthresh;
1372			/* error recovery */
1373			rl_init_locked(sc);
1374			/* restore original threshold */
1375			sc->rl_txthresh = oldthresh;
1376			return;
1377		}
1378		RL_INC(sc->rl_cdata.last_tx);
1379		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1380	} while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx);
1381
1382	if (RL_LAST_TXMBUF(sc) == NULL)
1383		sc->rl_watchdog_timer = 0;
1384}
1385
1386static void
1387rl_tick(void *xsc)
1388{
1389	struct rl_softc		*sc = xsc;
1390	struct mii_data		*mii;
1391
1392	RL_LOCK_ASSERT(sc);
1393	mii = device_get_softc(sc->rl_miibus);
1394	mii_tick(mii);
1395
1396	rl_watchdog(sc);
1397
1398	callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc);
1399}
1400
1401#ifdef DEVICE_POLLING
1402static void
1403rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1404{
1405	struct rl_softc *sc = ifp->if_softc;
1406
1407	RL_LOCK(sc);
1408	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1409		rl_poll_locked(ifp, cmd, count);
1410	RL_UNLOCK(sc);
1411}
1412
1413static void
1414rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1415{
1416	struct rl_softc *sc = ifp->if_softc;
1417
1418	RL_LOCK_ASSERT(sc);
1419
1420	sc->rxcycles = count;
1421	rl_rxeof(sc);
1422	rl_txeof(sc);
1423
1424	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1425		rl_start_locked(ifp);
1426
1427	if (cmd == POLL_AND_CHECK_STATUS) {
1428		uint16_t	status;
1429
1430		/* We should also check the status register. */
1431		status = CSR_READ_2(sc, RL_ISR);
1432		if (status == 0xffff)
1433			return;
1434		if (status != 0)
1435			CSR_WRITE_2(sc, RL_ISR, status);
1436
1437		/* XXX We should check behaviour on receiver stalls. */
1438
1439		if (status & RL_ISR_SYSTEM_ERR)
1440			rl_init_locked(sc);
1441	}
1442}
1443#endif /* DEVICE_POLLING */
1444
1445static void
1446rl_intr(void *arg)
1447{
1448	struct rl_softc		*sc = arg;
1449	struct ifnet		*ifp = sc->rl_ifp;
1450	uint16_t		status;
1451
1452	RL_LOCK(sc);
1453
1454	if (sc->suspended)
1455		goto done_locked;
1456
1457#ifdef DEVICE_POLLING
1458	if  (ifp->if_capenable & IFCAP_POLLING)
1459		goto done_locked;
1460#endif
1461
1462	for (;;) {
1463		status = CSR_READ_2(sc, RL_ISR);
1464		/* If the card has gone away, the read returns 0xffff. */
1465		if (status == 0xffff)
1466			break;
1467		if (status != 0)
1468			CSR_WRITE_2(sc, RL_ISR, status);
1469		if ((status & RL_INTRS) == 0)
1470			break;
1471		if (status & RL_ISR_RX_OK)
1472			rl_rxeof(sc);
1473		if (status & RL_ISR_RX_ERR)
1474			rl_rxeof(sc);
1475		if ((status & RL_ISR_TX_OK) || (status & RL_ISR_TX_ERR))
1476			rl_txeof(sc);
1477		if (status & RL_ISR_SYSTEM_ERR)
1478			rl_init_locked(sc);
1479	}
1480
1481	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1482		rl_start_locked(ifp);
1483
1484done_locked:
1485	RL_UNLOCK(sc);
1486}
1487
1488/*
1489 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1490 * pointers to the fragment pointers.
1491 */
1492static int
1493rl_encap(struct rl_softc *sc, struct mbuf **m_head)
1494{
1495	struct mbuf		*m;
1496	bus_dma_segment_t	txsegs[1];
1497	int			error, nsegs, padlen;
1498
1499	RL_LOCK_ASSERT(sc);
1500
1501	m = *m_head;
1502	padlen = 0;
1503	/*
1504	 * Hardware doesn't auto-pad, so we have to make sure
1505	 * pad short frames out to the minimum frame length.
1506	 */
1507	if (m->m_pkthdr.len < RL_MIN_FRAMELEN)
1508		padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len;
1509	/*
1510	 * The RealTek is brain damaged and wants longword-aligned
1511	 * TX buffers, plus we can only have one fragment buffer
1512	 * per packet. We have to copy pretty much all the time.
1513	 */
1514	if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 ||
1515	    (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) {
1516		m = m_defrag(*m_head, M_DONTWAIT);
1517		if (m == NULL) {
1518			m_freem(*m_head);
1519			*m_head = NULL;
1520			return (ENOMEM);
1521		}
1522	}
1523	*m_head = m;
1524
1525	if (padlen > 0) {
1526		/*
1527		 * Make security concious people happy: zero out the
1528		 * bytes in the pad area, since we don't know what
1529		 * this mbuf cluster buffer's previous user might
1530		 * have left in it.
1531		 */
1532		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1533		m->m_pkthdr.len += padlen;
1534		m->m_len = m->m_pkthdr.len;
1535	}
1536
1537	error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag,
1538	    RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0);
1539	if (error != 0)
1540		return (error);
1541	if (nsegs == 0) {
1542		m_freem(*m_head);
1543		*m_head = NULL;
1544		return (EIO);
1545	}
1546
1547	RL_CUR_TXMBUF(sc) = m;
1548	bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc),
1549	    BUS_DMASYNC_PREWRITE);
1550	CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr));
1551
1552	return (0);
1553}
1554
1555/*
1556 * Main transmit routine.
1557 */
1558static void
1559rl_start(struct ifnet *ifp)
1560{
1561	struct rl_softc		*sc = ifp->if_softc;
1562
1563	RL_LOCK(sc);
1564	rl_start_locked(ifp);
1565	RL_UNLOCK(sc);
1566}
1567
1568static void
1569rl_start_locked(struct ifnet *ifp)
1570{
1571	struct rl_softc		*sc = ifp->if_softc;
1572	struct mbuf		*m_head = NULL;
1573
1574	RL_LOCK_ASSERT(sc);
1575
1576	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1577	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
1578		return;
1579
1580	while (RL_CUR_TXMBUF(sc) == NULL) {
1581
1582		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1583
1584		if (m_head == NULL)
1585			break;
1586
1587		if (rl_encap(sc, &m_head)) {
1588			if (m_head == NULL)
1589				break;
1590			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1591			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1592			break;
1593		}
1594
1595		/* Pass a copy of this mbuf chain to the bpf subsystem. */
1596		BPF_MTAP(ifp, RL_CUR_TXMBUF(sc));
1597
1598		/* Transmit the frame. */
1599		CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc),
1600		    RL_TXTHRESH(sc->rl_txthresh) |
1601		    RL_CUR_TXMBUF(sc)->m_pkthdr.len);
1602
1603		RL_INC(sc->rl_cdata.cur_tx);
1604
1605		/* Set a timeout in case the chip goes out to lunch. */
1606		sc->rl_watchdog_timer = 5;
1607	}
1608
1609	/*
1610	 * We broke out of the loop because all our TX slots are
1611	 * full. Mark the NIC as busy until it drains some of the
1612	 * packets from the queue.
1613	 */
1614	if (RL_CUR_TXMBUF(sc) != NULL)
1615		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1616}
1617
1618static void
1619rl_init(void *xsc)
1620{
1621	struct rl_softc		*sc = xsc;
1622
1623	RL_LOCK(sc);
1624	rl_init_locked(sc);
1625	RL_UNLOCK(sc);
1626}
1627
1628static void
1629rl_init_locked(struct rl_softc *sc)
1630{
1631	struct ifnet		*ifp = sc->rl_ifp;
1632	struct mii_data		*mii;
1633	uint32_t		rxcfg = 0;
1634	uint32_t		eaddr[2];
1635
1636	RL_LOCK_ASSERT(sc);
1637
1638	mii = device_get_softc(sc->rl_miibus);
1639
1640	/*
1641	 * Cancel pending I/O and free all RX/TX buffers.
1642	 */
1643	rl_stop(sc);
1644
1645	rl_reset(sc);
1646
1647	/*
1648	 * Init our MAC address.  Even though the chipset
1649	 * documentation doesn't mention it, we need to enter "Config
1650	 * register write enable" mode to modify the ID registers.
1651	 */
1652	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
1653	bzero(eaddr, sizeof(eaddr));
1654	bcopy(IF_LLADDR(sc->rl_ifp), eaddr, ETHER_ADDR_LEN);
1655	CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]);
1656	CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]);
1657	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1658
1659	/* Init the RX memory block pointer register. */
1660	CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr +
1661	    RL_RX_8139_BUF_RESERVE);
1662	/* Init TX descriptors. */
1663	rl_list_tx_init(sc);
1664	/* Init Rx memory block. */
1665	rl_list_rx_init(sc);
1666
1667	/*
1668	 * Enable transmit and receive.
1669	 */
1670	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1671
1672	/*
1673	 * Set the initial TX and RX configuration.
1674	 */
1675	CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1676	CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
1677
1678	/* Set the individual bit to receive frames for this host only. */
1679	rxcfg = CSR_READ_4(sc, RL_RXCFG);
1680	rxcfg |= RL_RXCFG_RX_INDIV;
1681
1682	/* If we want promiscuous mode, set the allframes bit. */
1683	if (ifp->if_flags & IFF_PROMISC) {
1684		rxcfg |= RL_RXCFG_RX_ALLPHYS;
1685		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
1686	} else {
1687		rxcfg &= ~RL_RXCFG_RX_ALLPHYS;
1688		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
1689	}
1690
1691	/* Set capture broadcast bit to capture broadcast frames. */
1692	if (ifp->if_flags & IFF_BROADCAST) {
1693		rxcfg |= RL_RXCFG_RX_BROAD;
1694		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
1695	} else {
1696		rxcfg &= ~RL_RXCFG_RX_BROAD;
1697		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
1698	}
1699
1700	/* Program the multicast filter, if necessary. */
1701	rl_setmulti(sc);
1702
1703#ifdef DEVICE_POLLING
1704	/* Disable interrupts if we are polling. */
1705	if (ifp->if_capenable & IFCAP_POLLING)
1706		CSR_WRITE_2(sc, RL_IMR, 0);
1707	else
1708#endif
1709	/* Enable interrupts. */
1710	CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1711
1712	/* Set initial TX threshold */
1713	sc->rl_txthresh = RL_TX_THRESH_INIT;
1714
1715	/* Start RX/TX process. */
1716	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
1717
1718	/* Enable receiver and transmitter. */
1719	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1720
1721	sc->rl_flags &= ~RL_FLAG_LINK;
1722	mii_mediachg(mii);
1723
1724	CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
1725
1726	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1727	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1728
1729	callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc);
1730}
1731
1732/*
1733 * Set media options.
1734 */
1735static int
1736rl_ifmedia_upd(struct ifnet *ifp)
1737{
1738	struct rl_softc		*sc = ifp->if_softc;
1739	struct mii_data		*mii;
1740
1741	mii = device_get_softc(sc->rl_miibus);
1742
1743	RL_LOCK(sc);
1744	mii_mediachg(mii);
1745	RL_UNLOCK(sc);
1746
1747	return (0);
1748}
1749
1750/*
1751 * Report current media status.
1752 */
1753static void
1754rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1755{
1756	struct rl_softc		*sc = ifp->if_softc;
1757	struct mii_data		*mii;
1758
1759	mii = device_get_softc(sc->rl_miibus);
1760
1761	RL_LOCK(sc);
1762	mii_pollstat(mii);
1763	RL_UNLOCK(sc);
1764	ifmr->ifm_active = mii->mii_media_active;
1765	ifmr->ifm_status = mii->mii_media_status;
1766}
1767
1768static int
1769rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1770{
1771	struct ifreq		*ifr = (struct ifreq *)data;
1772	struct mii_data		*mii;
1773	struct rl_softc		*sc = ifp->if_softc;
1774	int			error = 0;
1775
1776	switch (command) {
1777	case SIOCSIFFLAGS:
1778		RL_LOCK(sc);
1779		if (ifp->if_flags & IFF_UP) {
1780			rl_init_locked(sc);
1781		} else {
1782			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1783				rl_stop(sc);
1784		}
1785		RL_UNLOCK(sc);
1786		error = 0;
1787		break;
1788	case SIOCADDMULTI:
1789	case SIOCDELMULTI:
1790		RL_LOCK(sc);
1791		rl_setmulti(sc);
1792		RL_UNLOCK(sc);
1793		error = 0;
1794		break;
1795	case SIOCGIFMEDIA:
1796	case SIOCSIFMEDIA:
1797		mii = device_get_softc(sc->rl_miibus);
1798		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1799		break;
1800	case SIOCSIFCAP:
1801#ifdef DEVICE_POLLING
1802		if (ifr->ifr_reqcap & IFCAP_POLLING &&
1803		    !(ifp->if_capenable & IFCAP_POLLING)) {
1804			error = ether_poll_register(rl_poll, ifp);
1805			if (error)
1806				return(error);
1807			RL_LOCK(sc);
1808			/* Disable interrupts */
1809			CSR_WRITE_2(sc, RL_IMR, 0x0000);
1810			ifp->if_capenable |= IFCAP_POLLING;
1811			RL_UNLOCK(sc);
1812			return (error);
1813
1814		}
1815		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1816		    ifp->if_capenable & IFCAP_POLLING) {
1817			error = ether_poll_deregister(ifp);
1818			/* Enable interrupts. */
1819			RL_LOCK(sc);
1820			CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1821			ifp->if_capenable &= ~IFCAP_POLLING;
1822			RL_UNLOCK(sc);
1823			return (error);
1824		}
1825#endif /* DEVICE_POLLING */
1826		break;
1827	default:
1828		error = ether_ioctl(ifp, command, data);
1829		break;
1830	}
1831
1832	return (error);
1833}
1834
1835static void
1836rl_watchdog(struct rl_softc *sc)
1837{
1838
1839	RL_LOCK_ASSERT(sc);
1840
1841	if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0)
1842		return;
1843
1844	device_printf(sc->rl_dev, "watchdog timeout\n");
1845	sc->rl_ifp->if_oerrors++;
1846
1847	rl_txeof(sc);
1848	rl_rxeof(sc);
1849	rl_init_locked(sc);
1850}
1851
1852/*
1853 * Stop the adapter and free any mbufs allocated to the
1854 * RX and TX lists.
1855 */
1856static void
1857rl_stop(struct rl_softc *sc)
1858{
1859	register int		i;
1860	struct ifnet		*ifp = sc->rl_ifp;
1861
1862	RL_LOCK_ASSERT(sc);
1863
1864	sc->rl_watchdog_timer = 0;
1865	callout_stop(&sc->rl_stat_callout);
1866	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1867	sc->rl_flags &= ~RL_FLAG_LINK;
1868
1869	CSR_WRITE_1(sc, RL_COMMAND, 0x00);
1870	CSR_WRITE_2(sc, RL_IMR, 0x0000);
1871	for (i = 0; i < RL_TIMEOUT; i++) {
1872		DELAY(10);
1873		if ((CSR_READ_1(sc, RL_COMMAND) &
1874		    (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0)
1875			break;
1876	}
1877	if (i == RL_TIMEOUT)
1878		device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n");
1879
1880	/*
1881	 * Free the TX list buffers.
1882	 */
1883	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1884		if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
1885			if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
1886				bus_dmamap_sync(sc->rl_cdata.rl_tx_tag,
1887				    sc->rl_cdata.rl_tx_dmamap[i],
1888				    BUS_DMASYNC_POSTWRITE);
1889				bus_dmamap_unload(sc->rl_cdata.rl_tx_tag,
1890				    sc->rl_cdata.rl_tx_dmamap[i]);
1891				m_freem(sc->rl_cdata.rl_tx_chain[i]);
1892				sc->rl_cdata.rl_tx_chain[i] = NULL;
1893			}
1894			CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)),
1895			    0x0000000);
1896		}
1897	}
1898}
1899
1900/*
1901 * Device suspend routine.  Stop the interface and save some PCI
1902 * settings in case the BIOS doesn't restore them properly on
1903 * resume.
1904 */
1905static int
1906rl_suspend(device_t dev)
1907{
1908	struct rl_softc		*sc;
1909
1910	sc = device_get_softc(dev);
1911
1912	RL_LOCK(sc);
1913	rl_stop(sc);
1914	sc->suspended = 1;
1915	RL_UNLOCK(sc);
1916
1917	return (0);
1918}
1919
1920/*
1921 * Device resume routine.  Restore some PCI settings in case the BIOS
1922 * doesn't, re-enable busmastering, and restart the interface if
1923 * appropriate.
1924 */
1925static int
1926rl_resume(device_t dev)
1927{
1928	struct rl_softc		*sc;
1929	struct ifnet		*ifp;
1930
1931	sc = device_get_softc(dev);
1932	ifp = sc->rl_ifp;
1933
1934	RL_LOCK(sc);
1935
1936	/* reinitialize interface if necessary */
1937	if (ifp->if_flags & IFF_UP)
1938		rl_init_locked(sc);
1939
1940	sc->suspended = 0;
1941
1942	RL_UNLOCK(sc);
1943
1944	return (0);
1945}
1946
1947/*
1948 * Stop all chip I/O so that the kernel's probe routines don't
1949 * get confused by errant DMAs when rebooting.
1950 */
1951static int
1952rl_shutdown(device_t dev)
1953{
1954	struct rl_softc		*sc;
1955
1956	sc = device_get_softc(dev);
1957
1958	RL_LOCK(sc);
1959	rl_stop(sc);
1960	RL_UNLOCK(sc);
1961
1962	return (0);
1963}
1964