1/*-
2 * Copyright (c) 1997 Semen Ustimenko (semenu@FreeBSD.org)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: releng/11.0/sys/dev/tx/if_tx.c 298955 2016-05-03 03:41:25Z pfg $");
29
30/*
31 * EtherPower II 10/100 Fast Ethernet (SMC 9432 serie)
32 *
33 * These cards are based on SMC83c17x (EPIC) chip and one of the various
34 * PHYs (QS6612, AC101 and LXT970 were seen). The media support depends on
35 * card model. All cards support 10baseT/UTP and 100baseTX half- and full-
36 * duplex (SMB9432TX). SMC9432BTX also supports 10baseT/BNC. SMC9432FTX also
37 * supports fibre optics.
38 *
39 * Thanks are going to Steve Bauer and Jason Wright.
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/sockio.h>
45#include <sys/mbuf.h>
46#include <sys/kernel.h>
47#include <sys/malloc.h>
48#include <sys/module.h>
49#include <sys/socket.h>
50#include <sys/queue.h>
51
52#include <net/if.h>
53#include <net/if_var.h>
54#include <net/if_arp.h>
55#include <net/ethernet.h>
56#include <net/if_dl.h>
57#include <net/if_media.h>
58#include <net/if_types.h>
59
60#include <net/bpf.h>
61
62#include <net/if_vlan_var.h>
63
64#include <machine/bus.h>
65#include <machine/resource.h>
66#include <sys/bus.h>
67#include <sys/rman.h>
68
69#include <dev/pci/pcireg.h>
70#include <dev/pci/pcivar.h>
71
72#include <dev/mii/mii.h>
73#include <dev/mii/miivar.h>
74#include "miidevs.h"
75
76#include <dev/mii/lxtphyreg.h>
77
78#include "miibus_if.h"
79
80#include <dev/tx/if_txreg.h>
81#include <dev/tx/if_txvar.h>
82
83MODULE_DEPEND(tx, pci, 1, 1, 1);
84MODULE_DEPEND(tx, ether, 1, 1, 1);
85MODULE_DEPEND(tx, miibus, 1, 1, 1);
86
87static int epic_ifioctl(struct ifnet *, u_long, caddr_t);
88static void epic_intr(void *);
89static void epic_tx_underrun(epic_softc_t *);
90static void epic_ifstart(struct ifnet *);
91static void epic_ifstart_locked(struct ifnet *);
92static void epic_timer(void *);
93static void epic_init(void *);
94static void epic_init_locked(epic_softc_t *);
95static void epic_stop(epic_softc_t *);
96static void epic_rx_done(epic_softc_t *);
97static void epic_tx_done(epic_softc_t *);
98static int epic_init_rings(epic_softc_t *);
99static void epic_free_rings(epic_softc_t *);
100static void epic_stop_activity(epic_softc_t *);
101static int epic_queue_last_packet(epic_softc_t *);
102static void epic_start_activity(epic_softc_t *);
103static void epic_set_rx_mode(epic_softc_t *);
104static void epic_set_tx_mode(epic_softc_t *);
105static void epic_set_mc_table(epic_softc_t *);
106static int epic_read_eeprom(epic_softc_t *,u_int16_t);
107static void epic_output_eepromw(epic_softc_t *, u_int16_t);
108static u_int16_t epic_input_eepromw(epic_softc_t *);
109static u_int8_t epic_eeprom_clock(epic_softc_t *,u_int8_t);
110static void epic_write_eepromreg(epic_softc_t *,u_int8_t);
111static u_int8_t epic_read_eepromreg(epic_softc_t *);
112
113static int epic_read_phy_reg(epic_softc_t *, int, int);
114static void epic_write_phy_reg(epic_softc_t *, int, int, int);
115
116static int epic_miibus_readreg(device_t, int, int);
117static int epic_miibus_writereg(device_t, int, int, int);
118static void epic_miibus_statchg(device_t);
119static void epic_miibus_mediainit(device_t);
120
121static int epic_ifmedia_upd(struct ifnet *);
122static int epic_ifmedia_upd_locked(struct ifnet *);
123static void epic_ifmedia_sts(struct ifnet *, struct ifmediareq *);
124
125static int epic_probe(device_t);
126static int epic_attach(device_t);
127static int epic_shutdown(device_t);
128static int epic_detach(device_t);
129static void epic_release(epic_softc_t *);
130static struct epic_type *epic_devtype(device_t);
131
132static device_method_t epic_methods[] = {
133	/* Device interface */
134	DEVMETHOD(device_probe,		epic_probe),
135	DEVMETHOD(device_attach,	epic_attach),
136	DEVMETHOD(device_detach,	epic_detach),
137	DEVMETHOD(device_shutdown,	epic_shutdown),
138
139	/* MII interface */
140	DEVMETHOD(miibus_readreg,	epic_miibus_readreg),
141	DEVMETHOD(miibus_writereg,	epic_miibus_writereg),
142	DEVMETHOD(miibus_statchg,	epic_miibus_statchg),
143	DEVMETHOD(miibus_mediainit,	epic_miibus_mediainit),
144
145	{ 0, 0 }
146};
147
148static driver_t epic_driver = {
149	"tx",
150	epic_methods,
151	sizeof(epic_softc_t)
152};
153
154static devclass_t epic_devclass;
155
156DRIVER_MODULE(tx, pci, epic_driver, epic_devclass, 0, 0);
157DRIVER_MODULE(miibus, tx, miibus_driver, miibus_devclass, 0, 0);
158
159static struct epic_type epic_devs[] = {
160	{ SMC_VENDORID, SMC_DEVICEID_83C170, "SMC EtherPower II 10/100" },
161	{ 0, 0, NULL }
162};
163
164static int
165epic_probe(device_t dev)
166{
167	struct epic_type *t;
168
169	t = epic_devtype(dev);
170
171	if (t != NULL) {
172		device_set_desc(dev, t->name);
173		return (BUS_PROBE_DEFAULT);
174	}
175
176	return (ENXIO);
177}
178
179static struct epic_type *
180epic_devtype(device_t dev)
181{
182	struct epic_type *t;
183
184	t = epic_devs;
185
186	while (t->name != NULL) {
187		if ((pci_get_vendor(dev) == t->ven_id) &&
188		    (pci_get_device(dev) == t->dev_id)) {
189			return (t);
190		}
191		t++;
192	}
193	return (NULL);
194}
195
196#ifdef EPIC_USEIOSPACE
197#define	EPIC_RES	SYS_RES_IOPORT
198#define	EPIC_RID	PCIR_BASEIO
199#else
200#define	EPIC_RES	SYS_RES_MEMORY
201#define	EPIC_RID	PCIR_BASEMEM
202#endif
203
204static void
205epic_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
206{
207	u_int32_t *addr;
208
209	if (error)
210		return;
211
212	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
213	addr = arg;
214	*addr = segs->ds_addr;
215}
216
217/*
218 * Attach routine: map registers, allocate softc, rings and descriptors.
219 * Reset to known state.
220 */
221static int
222epic_attach(device_t dev)
223{
224	struct ifnet *ifp;
225	epic_softc_t *sc;
226	int error;
227	int i, rid, tmp;
228	u_char eaddr[6];
229
230	sc = device_get_softc(dev);
231
232	/* Preinitialize softc structure. */
233	sc->dev = dev;
234	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
235	    MTX_DEF);
236
237	/* Fill ifnet structure. */
238	ifp = sc->ifp = if_alloc(IFT_ETHER);
239	if (ifp == NULL) {
240		device_printf(dev, "can not if_alloc()\n");
241		error = ENOSPC;
242		goto fail;
243	}
244	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
245	ifp->if_softc = sc;
246	ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST;
247	ifp->if_ioctl = epic_ifioctl;
248	ifp->if_start = epic_ifstart;
249	ifp->if_init = epic_init;
250	IFQ_SET_MAXLEN(&ifp->if_snd, TX_RING_SIZE - 1);
251
252	/* Enable busmastering. */
253	pci_enable_busmaster(dev);
254
255	rid = EPIC_RID;
256	sc->res = bus_alloc_resource_any(dev, EPIC_RES, &rid, RF_ACTIVE);
257	if (sc->res == NULL) {
258		device_printf(dev, "couldn't map ports/memory\n");
259		error = ENXIO;
260		goto fail;
261	}
262
263	/* Allocate interrupt. */
264	rid = 0;
265	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
266	    RF_SHAREABLE | RF_ACTIVE);
267	if (sc->irq == NULL) {
268		device_printf(dev, "couldn't map interrupt\n");
269		error = ENXIO;
270		goto fail;
271	}
272
273	/* Allocate DMA tags. */
274	error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
275	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
276	    MCLBYTES * EPIC_MAX_FRAGS, EPIC_MAX_FRAGS, MCLBYTES, 0, NULL, NULL,
277	    &sc->mtag);
278	if (error) {
279		device_printf(dev, "couldn't allocate dma tag\n");
280		goto fail;
281	}
282
283	error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
284	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
285	    sizeof(struct epic_rx_desc) * RX_RING_SIZE,
286	    1, sizeof(struct epic_rx_desc) * RX_RING_SIZE, 0, NULL,
287	    NULL, &sc->rtag);
288	if (error) {
289		device_printf(dev, "couldn't allocate dma tag\n");
290		goto fail;
291	}
292
293	error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
294	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
295	    sizeof(struct epic_tx_desc) * TX_RING_SIZE,
296	    1, sizeof(struct epic_tx_desc) * TX_RING_SIZE, 0,
297	    NULL, NULL, &sc->ttag);
298	if (error) {
299		device_printf(dev, "couldn't allocate dma tag\n");
300		goto fail;
301	}
302
303	error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
304	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
305	    sizeof(struct epic_frag_list) * TX_RING_SIZE,
306	    1, sizeof(struct epic_frag_list) * TX_RING_SIZE, 0,
307	    NULL, NULL, &sc->ftag);
308	if (error) {
309		device_printf(dev, "couldn't allocate dma tag\n");
310		goto fail;
311	}
312
313	/* Allocate DMA safe memory and get the DMA addresses. */
314	error = bus_dmamem_alloc(sc->ftag, (void **)&sc->tx_flist,
315	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fmap);
316	if (error) {
317		device_printf(dev, "couldn't allocate dma memory\n");
318		goto fail;
319	}
320	error = bus_dmamap_load(sc->ftag, sc->fmap, sc->tx_flist,
321	    sizeof(struct epic_frag_list) * TX_RING_SIZE, epic_dma_map_addr,
322	    &sc->frag_addr, 0);
323	if (error) {
324		device_printf(dev, "couldn't map dma memory\n");
325		goto fail;
326	}
327	error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc,
328	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->tmap);
329	if (error) {
330		device_printf(dev, "couldn't allocate dma memory\n");
331		goto fail;
332	}
333	error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc,
334	    sizeof(struct epic_tx_desc) * TX_RING_SIZE, epic_dma_map_addr,
335	    &sc->tx_addr, 0);
336	if (error) {
337		device_printf(dev, "couldn't map dma memory\n");
338		goto fail;
339	}
340	error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc,
341	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rmap);
342	if (error) {
343		device_printf(dev, "couldn't allocate dma memory\n");
344		goto fail;
345	}
346	error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc,
347	    sizeof(struct epic_rx_desc) * RX_RING_SIZE, epic_dma_map_addr,
348	    &sc->rx_addr, 0);
349	if (error) {
350		device_printf(dev, "couldn't map dma memory\n");
351		goto fail;
352	}
353
354	/* Bring the chip out of low-power mode. */
355	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
356	DELAY(500);
357
358	/* Workaround for Application Note 7-15. */
359	for (i = 0; i < 16; i++)
360		CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
361
362	/* Read MAC address from EEPROM. */
363	for (i = 0; i < ETHER_ADDR_LEN / sizeof(u_int16_t); i++)
364		((u_int16_t *)eaddr)[i] = epic_read_eeprom(sc,i);
365
366	/* Set Non-Volatile Control Register from EEPROM. */
367	CSR_WRITE_4(sc, NVCTL, epic_read_eeprom(sc, EEPROM_NVCTL) & 0x1F);
368
369	/* Set defaults. */
370	sc->tx_threshold = TRANSMIT_THRESHOLD;
371	sc->txcon = TXCON_DEFAULT;
372	sc->miicfg = MIICFG_SMI_ENABLE;
373	sc->phyid = EPIC_UNKN_PHY;
374	sc->serinst = -1;
375
376	/* Fetch card id. */
377	sc->cardvend = pci_read_config(dev, PCIR_SUBVEND_0, 2);
378	sc->cardid = pci_read_config(dev, PCIR_SUBDEV_0, 2);
379
380	if (sc->cardvend != SMC_VENDORID)
381		device_printf(dev, "unknown card vendor %04xh\n", sc->cardvend);
382
383	/* Do ifmedia setup. */
384	error = mii_attach(dev, &sc->miibus, ifp, epic_ifmedia_upd,
385	    epic_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
386	if (error != 0) {
387		device_printf(dev, "attaching PHYs failed\n");
388		goto fail;
389	}
390
391	/* board type and ... */
392	printf(" type ");
393	for(i = 0x2c; i < 0x32; i++) {
394		tmp = epic_read_eeprom(sc, i);
395		if (' ' == (u_int8_t)tmp)
396			break;
397		printf("%c", (u_int8_t)tmp);
398		tmp >>= 8;
399		if (' ' == (u_int8_t)tmp)
400			break;
401		printf("%c", (u_int8_t)tmp);
402	}
403	printf("\n");
404
405	/* Initialize rings. */
406	if (epic_init_rings(sc)) {
407		device_printf(dev, "failed to init rings\n");
408		error = ENXIO;
409		goto fail;
410	}
411
412	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
413	ifp->if_capabilities |= IFCAP_VLAN_MTU;
414	ifp->if_capenable |= IFCAP_VLAN_MTU;
415	callout_init_mtx(&sc->timer, &sc->lock, 0);
416
417	/* Attach to OS's managers. */
418	ether_ifattach(ifp, eaddr);
419
420	/* Activate our interrupt handler. */
421	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
422	    NULL, epic_intr, sc, &sc->sc_ih);
423	if (error) {
424		device_printf(dev, "couldn't set up irq\n");
425		ether_ifdetach(ifp);
426		goto fail;
427	}
428
429	return (0);
430fail:
431	epic_release(sc);
432	return (error);
433}
434
435/*
436 * Free any resources allocated by the driver.
437 */
438static void
439epic_release(epic_softc_t *sc)
440{
441	if (sc->ifp != NULL)
442		if_free(sc->ifp);
443	if (sc->irq)
444		bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq);
445	if (sc->res)
446		bus_release_resource(sc->dev, EPIC_RES, EPIC_RID, sc->res);
447	epic_free_rings(sc);
448	if (sc->tx_flist) {
449		bus_dmamap_unload(sc->ftag, sc->fmap);
450		bus_dmamem_free(sc->ftag, sc->tx_flist, sc->fmap);
451	}
452	if (sc->tx_desc) {
453		bus_dmamap_unload(sc->ttag, sc->tmap);
454		bus_dmamem_free(sc->ttag, sc->tx_desc, sc->tmap);
455	}
456	if (sc->rx_desc) {
457		bus_dmamap_unload(sc->rtag, sc->rmap);
458		bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap);
459	}
460	if (sc->mtag)
461		bus_dma_tag_destroy(sc->mtag);
462	if (sc->ftag)
463		bus_dma_tag_destroy(sc->ftag);
464	if (sc->ttag)
465		bus_dma_tag_destroy(sc->ttag);
466	if (sc->rtag)
467		bus_dma_tag_destroy(sc->rtag);
468	mtx_destroy(&sc->lock);
469}
470
471/*
472 * Detach driver and free resources.
473 */
474static int
475epic_detach(device_t dev)
476{
477	struct ifnet *ifp;
478	epic_softc_t *sc;
479
480	sc = device_get_softc(dev);
481	ifp = sc->ifp;
482
483	EPIC_LOCK(sc);
484	epic_stop(sc);
485	EPIC_UNLOCK(sc);
486	callout_drain(&sc->timer);
487	ether_ifdetach(ifp);
488	bus_teardown_intr(dev, sc->irq, sc->sc_ih);
489
490	bus_generic_detach(dev);
491	device_delete_child(dev, sc->miibus);
492
493	epic_release(sc);
494	return (0);
495}
496
497#undef	EPIC_RES
498#undef	EPIC_RID
499
500/*
501 * Stop all chip I/O so that the kernel's probe routines don't
502 * get confused by errant DMAs when rebooting.
503 */
504static int
505epic_shutdown(device_t dev)
506{
507	epic_softc_t *sc;
508
509	sc = device_get_softc(dev);
510
511	EPIC_LOCK(sc);
512	epic_stop(sc);
513	EPIC_UNLOCK(sc);
514	return (0);
515}
516
517/*
518 * This is if_ioctl handler.
519 */
520static int
521epic_ifioctl(struct ifnet *ifp, u_long command, caddr_t data)
522{
523	epic_softc_t *sc = ifp->if_softc;
524	struct mii_data	*mii;
525	struct ifreq *ifr = (struct ifreq *) data;
526	int error = 0;
527
528	switch (command) {
529	case SIOCSIFMTU:
530		if (ifp->if_mtu == ifr->ifr_mtu)
531			break;
532
533		/* XXX Though the datasheet doesn't imply any
534		 * limitations on RX and TX sizes beside max 64Kb
535		 * DMA transfer, seems we can't send more then 1600
536		 * data bytes per ethernet packet (transmitter hangs
537		 * up if more data is sent).
538		 */
539		EPIC_LOCK(sc);
540		if (ifr->ifr_mtu + ifp->if_hdrlen <= EPIC_MAX_MTU) {
541			ifp->if_mtu = ifr->ifr_mtu;
542			epic_stop(sc);
543			epic_init_locked(sc);
544		} else
545			error = EINVAL;
546		EPIC_UNLOCK(sc);
547		break;
548
549	case SIOCSIFFLAGS:
550		/*
551		 * If the interface is marked up and stopped, then start it.
552		 * If it is marked down and running, then stop it.
553		 */
554		EPIC_LOCK(sc);
555		if (ifp->if_flags & IFF_UP) {
556			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
557				epic_init_locked(sc);
558				EPIC_UNLOCK(sc);
559				break;
560			}
561		} else {
562			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
563				epic_stop(sc);
564				EPIC_UNLOCK(sc);
565				break;
566			}
567		}
568
569		/* Handle IFF_PROMISC and IFF_ALLMULTI flags. */
570		epic_stop_activity(sc);
571		epic_set_mc_table(sc);
572		epic_set_rx_mode(sc);
573		epic_start_activity(sc);
574		EPIC_UNLOCK(sc);
575		break;
576
577	case SIOCADDMULTI:
578	case SIOCDELMULTI:
579		EPIC_LOCK(sc);
580		epic_set_mc_table(sc);
581		EPIC_UNLOCK(sc);
582		error = 0;
583		break;
584
585	case SIOCSIFMEDIA:
586	case SIOCGIFMEDIA:
587		mii = device_get_softc(sc->miibus);
588		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
589		break;
590
591	default:
592		error = ether_ioctl(ifp, command, data);
593		break;
594	}
595	return (error);
596}
597
598static void
599epic_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg,
600    bus_size_t mapsize, int error)
601{
602	struct epic_frag_list *flist;
603	int i;
604
605	if (error)
606		return;
607
608	KASSERT(nseg <= EPIC_MAX_FRAGS, ("too many DMA segments"));
609	flist = arg;
610	/* Fill fragments list. */
611	for (i = 0; i < nseg; i++) {
612		KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
613		flist->frag[i].fraglen = segs[i].ds_len;
614		flist->frag[i].fragaddr = segs[i].ds_addr;
615	}
616	flist->numfrags = nseg;
617}
618
619static void
620epic_dma_map_rxbuf(void *arg, bus_dma_segment_t *segs, int nseg,
621    bus_size_t mapsize, int error)
622{
623	struct epic_rx_desc *desc;
624
625	if (error)
626		return;
627
628	KASSERT(nseg == 1, ("too many DMA segments"));
629	desc = arg;
630	desc->bufaddr = segs->ds_addr;
631}
632
633/*
634 * This is if_start handler. It takes mbufs from if_snd queue
635 * and queue them for transmit, one by one, until TX ring become full
636 * or queue become empty.
637 */
638static void
639epic_ifstart(struct ifnet * ifp)
640{
641	epic_softc_t *sc = ifp->if_softc;
642
643	EPIC_LOCK(sc);
644	epic_ifstart_locked(ifp);
645	EPIC_UNLOCK(sc);
646}
647
648static void
649epic_ifstart_locked(struct ifnet * ifp)
650{
651	epic_softc_t *sc = ifp->if_softc;
652	struct epic_tx_buffer *buf;
653	struct epic_tx_desc *desc;
654	struct epic_frag_list *flist;
655	struct mbuf *m0, *m;
656	int error;
657
658	while (sc->pending_txs < TX_RING_SIZE) {
659		buf = sc->tx_buffer + sc->cur_tx;
660		desc = sc->tx_desc + sc->cur_tx;
661		flist = sc->tx_flist + sc->cur_tx;
662
663		/* Get next packet to send. */
664		IF_DEQUEUE(&ifp->if_snd, m0);
665
666		/* If nothing to send, return. */
667		if (m0 == NULL)
668			return;
669
670		error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0,
671		    epic_dma_map_txbuf, flist, 0);
672
673		if (error && error != EFBIG) {
674			m_freem(m0);
675			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
676			continue;
677		}
678
679		/*
680		 * If packet was more than EPIC_MAX_FRAGS parts,
681		 * recopy packet to a newly allocated mbuf cluster.
682		 */
683		if (error) {
684			m = m_defrag(m0, M_NOWAIT);
685			if (m == NULL) {
686				m_freem(m0);
687				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
688				continue;
689			}
690			m_freem(m0);
691			m0 = m;
692
693			error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m,
694			    epic_dma_map_txbuf, flist, 0);
695			if (error) {
696				m_freem(m);
697				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
698				continue;
699			}
700		}
701		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE);
702
703		buf->mbuf = m0;
704		sc->pending_txs++;
705		sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
706		desc->control = 0x01;
707		desc->txlength =
708		    max(m0->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
709		desc->status = 0x8000;
710		bus_dmamap_sync(sc->ttag, sc->tmap,
711		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
712		bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE);
713		CSR_WRITE_4(sc, COMMAND, COMMAND_TXQUEUED);
714
715		/* Set watchdog timer. */
716		sc->tx_timeout = 8;
717
718		BPF_MTAP(ifp, m0);
719	}
720
721	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
722}
723
724/*
725 * Synopsis: Finish all received frames.
726 */
727static void
728epic_rx_done(epic_softc_t *sc)
729{
730	struct ifnet *ifp = sc->ifp;
731	u_int16_t len;
732	struct epic_rx_buffer *buf;
733	struct epic_rx_desc *desc;
734	struct mbuf *m;
735	bus_dmamap_t map;
736	int error;
737
738	bus_dmamap_sync(sc->rtag, sc->rmap, BUS_DMASYNC_POSTREAD);
739	while ((sc->rx_desc[sc->cur_rx].status & 0x8000) == 0) {
740		buf = sc->rx_buffer + sc->cur_rx;
741		desc = sc->rx_desc + sc->cur_rx;
742
743		/* Switch to next descriptor. */
744		sc->cur_rx = (sc->cur_rx + 1) & RX_RING_MASK;
745
746		/*
747		 * Check for RX errors. This should only happen if
748		 * SAVE_ERRORED_PACKETS is set. RX errors generate
749		 * RXE interrupt usually.
750		 */
751		if ((desc->status & 1) == 0) {
752			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
753			desc->status = 0x8000;
754			continue;
755		}
756
757		/* Save packet length and mbuf contained packet. */
758		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
759		len = desc->rxlength - ETHER_CRC_LEN;
760		m = buf->mbuf;
761
762		/* Try to get an mbuf cluster. */
763		buf->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
764		if (buf->mbuf == NULL) {
765			buf->mbuf = m;
766			desc->status = 0x8000;
767			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
768			continue;
769		}
770		buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
771		m_adj(buf->mbuf, ETHER_ALIGN);
772
773		/* Point to new mbuf, and give descriptor to chip. */
774		error = bus_dmamap_load_mbuf(sc->mtag, sc->sparemap, buf->mbuf,
775		    epic_dma_map_rxbuf, desc, 0);
776		if (error) {
777			buf->mbuf = m;
778			desc->status = 0x8000;
779			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
780			continue;
781		}
782
783		desc->status = 0x8000;
784		bus_dmamap_unload(sc->mtag, buf->map);
785		map = buf->map;
786		buf->map = sc->sparemap;
787		sc->sparemap = map;
788		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
789
790		/* First mbuf in packet holds the ethernet and packet headers */
791		m->m_pkthdr.rcvif = ifp;
792		m->m_pkthdr.len = m->m_len = len;
793
794		/* Give mbuf to OS. */
795		EPIC_UNLOCK(sc);
796		(*ifp->if_input)(ifp, m);
797		EPIC_LOCK(sc);
798
799		/* Successfully received frame */
800		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
801        }
802	bus_dmamap_sync(sc->rtag, sc->rmap,
803	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
804}
805
806/*
807 * Synopsis: Do last phase of transmission. I.e. if desc is
808 * transmitted, decrease pending_txs counter, free mbuf contained
809 * packet, switch to next descriptor and repeat until no packets
810 * are pending or descriptor is not transmitted yet.
811 */
812static void
813epic_tx_done(epic_softc_t *sc)
814{
815	struct epic_tx_buffer *buf;
816	struct epic_tx_desc *desc;
817	u_int16_t status;
818
819	bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_POSTREAD);
820	while (sc->pending_txs > 0) {
821		buf = sc->tx_buffer + sc->dirty_tx;
822		desc = sc->tx_desc + sc->dirty_tx;
823		status = desc->status;
824
825		/*
826		 * If packet is not transmitted, thou followed
827		 * packets are not transmitted too.
828		 */
829		if (status & 0x8000)
830			break;
831
832		/* Packet is transmitted. Switch to next and free mbuf. */
833		sc->pending_txs--;
834		sc->dirty_tx = (sc->dirty_tx + 1) & TX_RING_MASK;
835		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE);
836		bus_dmamap_unload(sc->mtag, buf->map);
837		m_freem(buf->mbuf);
838		buf->mbuf = NULL;
839
840		/* Check for errors and collisions. */
841		if (status & 0x0001)
842			if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
843		else
844			if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
845		if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, (status >> 8) & 0x1F);
846#ifdef EPIC_DIAG
847		if ((status & 0x1001) == 0x1001)
848			device_printf(sc->dev,
849			    "Tx ERROR: excessive coll. number\n");
850#endif
851	}
852
853	if (sc->pending_txs < TX_RING_SIZE)
854		sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
855	bus_dmamap_sync(sc->ttag, sc->tmap,
856	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
857}
858
859/*
860 * Interrupt function
861 */
862static void
863epic_intr(void *arg)
864{
865    epic_softc_t *sc;
866    int status, i;
867
868    sc = arg;
869    i = 4;
870    EPIC_LOCK(sc);
871    while (i-- && ((status = CSR_READ_4(sc, INTSTAT)) & INTSTAT_INT_ACTV)) {
872	CSR_WRITE_4(sc, INTSTAT, status);
873
874	if (status & (INTSTAT_RQE|INTSTAT_RCC|INTSTAT_OVW)) {
875	    epic_rx_done(sc);
876	    if (status & (INTSTAT_RQE|INTSTAT_OVW)) {
877#ifdef EPIC_DIAG
878		if (status & INTSTAT_OVW)
879		    device_printf(sc->dev, "RX buffer overflow\n");
880		if (status & INTSTAT_RQE)
881		    device_printf(sc->dev, "RX FIFO overflow\n");
882#endif
883		if ((CSR_READ_4(sc, COMMAND) & COMMAND_RXQUEUED) == 0)
884		    CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED);
885		if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
886	    }
887	}
888
889	if (status & (INTSTAT_TXC|INTSTAT_TCC|INTSTAT_TQE)) {
890	    epic_tx_done(sc);
891	    if (sc->ifp->if_snd.ifq_head != NULL)
892		    epic_ifstart_locked(sc->ifp);
893	}
894
895	/* Check for rare errors */
896	if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
897		      INTSTAT_APE|INTSTAT_DPE|INTSTAT_TXU|INTSTAT_RXE)) {
898    	    if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
899			  INTSTAT_APE|INTSTAT_DPE)) {
900		device_printf(sc->dev, "PCI fatal errors occurred: %s%s%s%s\n",
901		    (status & INTSTAT_PMA) ? "PMA " : "",
902		    (status & INTSTAT_PTA) ? "PTA " : "",
903		    (status & INTSTAT_APE) ? "APE " : "",
904		    (status & INTSTAT_DPE) ? "DPE" : "");
905
906		epic_stop(sc);
907		epic_init_locked(sc);
908	    	break;
909	    }
910
911	    if (status & INTSTAT_RXE) {
912#ifdef EPIC_DIAG
913		device_printf(sc->dev, "CRC/Alignment error\n");
914#endif
915		if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
916	    }
917
918	    if (status & INTSTAT_TXU) {
919		epic_tx_underrun(sc);
920		if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
921	    }
922	}
923    }
924
925    /* If no packets are pending, then no timeouts. */
926    if (sc->pending_txs == 0)
927	    sc->tx_timeout = 0;
928    EPIC_UNLOCK(sc);
929}
930
931/*
932 * Handle the TX underrun error: increase the TX threshold
933 * and restart the transmitter.
934 */
935static void
936epic_tx_underrun(epic_softc_t *sc)
937{
938	if (sc->tx_threshold > TRANSMIT_THRESHOLD_MAX) {
939		sc->txcon &= ~TXCON_EARLY_TRANSMIT_ENABLE;
940#ifdef EPIC_DIAG
941		device_printf(sc->dev, "Tx UNDERRUN: early TX disabled\n");
942#endif
943	} else {
944		sc->tx_threshold += 0x40;
945#ifdef EPIC_DIAG
946		device_printf(sc->dev,
947		    "Tx UNDERRUN: TX threshold increased to %d\n",
948		    sc->tx_threshold);
949#endif
950	}
951
952	/* We must set TXUGO to reset the stuck transmitter. */
953	CSR_WRITE_4(sc, COMMAND, COMMAND_TXUGO);
954
955	/* Update the TX threshold */
956	epic_stop_activity(sc);
957	epic_set_tx_mode(sc);
958	epic_start_activity(sc);
959}
960
961/*
962 * This function is called once a second when the interface is running
963 * and performs two functions.  First, it provides a timer for the mii
964 * to help with autonegotiation.  Second, it checks for transmit
965 * timeouts.
966 */
967static void
968epic_timer(void *arg)
969{
970	epic_softc_t *sc = arg;
971	struct mii_data *mii;
972	struct ifnet *ifp;
973
974	ifp = sc->ifp;
975	EPIC_ASSERT_LOCKED(sc);
976	if (sc->tx_timeout && --sc->tx_timeout == 0) {
977		device_printf(sc->dev, "device timeout %d packets\n",
978		    sc->pending_txs);
979
980		/* Try to finish queued packets. */
981		epic_tx_done(sc);
982
983		/* If not successful. */
984		if (sc->pending_txs > 0) {
985			if_inc_counter(ifp, IFCOUNTER_OERRORS, sc->pending_txs);
986
987			/* Reinitialize board. */
988			device_printf(sc->dev, "reinitialization\n");
989			epic_stop(sc);
990			epic_init_locked(sc);
991		} else
992			device_printf(sc->dev,
993			    "seems we can continue normaly\n");
994
995		/* Start output. */
996		if (ifp->if_snd.ifq_head)
997			epic_ifstart_locked(ifp);
998	}
999
1000	mii = device_get_softc(sc->miibus);
1001	mii_tick(mii);
1002
1003	callout_reset(&sc->timer, hz, epic_timer, sc);
1004}
1005
1006/*
1007 * Set media options.
1008 */
1009static int
1010epic_ifmedia_upd(struct ifnet *ifp)
1011{
1012	epic_softc_t *sc;
1013	int error;
1014
1015	sc = ifp->if_softc;
1016	EPIC_LOCK(sc);
1017	error = epic_ifmedia_upd_locked(ifp);
1018	EPIC_UNLOCK(sc);
1019	return (error);
1020}
1021
1022static int
1023epic_ifmedia_upd_locked(struct ifnet *ifp)
1024{
1025	epic_softc_t *sc;
1026	struct mii_data *mii;
1027	struct ifmedia *ifm;
1028	struct mii_softc *miisc;
1029	int cfg, media;
1030
1031	sc = ifp->if_softc;
1032	mii = device_get_softc(sc->miibus);
1033	ifm = &mii->mii_media;
1034	media = ifm->ifm_cur->ifm_media;
1035
1036	/* Do not do anything if interface is not up. */
1037	if ((ifp->if_flags & IFF_UP) == 0)
1038		return (0);
1039
1040	/*
1041	 * Lookup current selected PHY.
1042	 */
1043	if (IFM_INST(media) == sc->serinst) {
1044		sc->phyid = EPIC_SERIAL;
1045		sc->physc = NULL;
1046	} else {
1047		/* If we're not selecting serial interface, select MII mode. */
1048		sc->miicfg &= ~MIICFG_SERIAL_ENABLE;
1049		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1050
1051		/* Default to unknown PHY. */
1052		sc->phyid = EPIC_UNKN_PHY;
1053
1054		/* Lookup selected PHY. */
1055		LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1056			if (IFM_INST(media) == miisc->mii_inst) {
1057				sc->physc = miisc;
1058				break;
1059			}
1060		}
1061
1062		/* Identify selected PHY. */
1063		if (sc->physc) {
1064			int id1, id2, model, oui;
1065
1066			id1 = PHY_READ(sc->physc, MII_PHYIDR1);
1067			id2 = PHY_READ(sc->physc, MII_PHYIDR2);
1068
1069			oui = MII_OUI(id1, id2);
1070			model = MII_MODEL(id2);
1071			switch (oui) {
1072			case MII_OUI_xxQUALSEMI:
1073				if (model == MII_MODEL_xxQUALSEMI_QS6612)
1074					sc->phyid = EPIC_QS6612_PHY;
1075				break;
1076			case MII_OUI_ALTIMA:
1077				if (model == MII_MODEL_ALTIMA_AC101)
1078					sc->phyid = EPIC_AC101_PHY;
1079				break;
1080			case MII_OUI_xxLEVEL1:
1081				if (model == MII_MODEL_xxLEVEL1_LXT970)
1082					sc->phyid = EPIC_LXT970_PHY;
1083				break;
1084			}
1085		}
1086	}
1087
1088	/*
1089	 * Do PHY specific card setup.
1090	 */
1091
1092	/*
1093	 * Call this, to isolate all not selected PHYs and
1094	 * set up selected.
1095	 */
1096	mii_mediachg(mii);
1097
1098	/* Do our own setup. */
1099	switch (sc->phyid) {
1100	case EPIC_QS6612_PHY:
1101		break;
1102	case EPIC_AC101_PHY:
1103		/* We have to powerup fiber tranceivers. */
1104		if (IFM_SUBTYPE(media) == IFM_100_FX)
1105			sc->miicfg |= MIICFG_694_ENABLE;
1106		else
1107			sc->miicfg &= ~MIICFG_694_ENABLE;
1108		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1109
1110		break;
1111	case EPIC_LXT970_PHY:
1112		/* We have to powerup fiber tranceivers. */
1113		cfg = PHY_READ(sc->physc, MII_LXTPHY_CONFIG);
1114		if (IFM_SUBTYPE(media) == IFM_100_FX)
1115			cfg |= CONFIG_LEDC1 | CONFIG_LEDC0;
1116		else
1117			cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
1118		PHY_WRITE(sc->physc, MII_LXTPHY_CONFIG, cfg);
1119
1120		break;
1121	case EPIC_SERIAL:
1122		/* Select serial PHY (10base2/BNC usually). */
1123		sc->miicfg |= MIICFG_694_ENABLE | MIICFG_SERIAL_ENABLE;
1124		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1125
1126		/* There is no driver to fill this. */
1127		mii->mii_media_active = media;
1128		mii->mii_media_status = 0;
1129
1130		/*
1131		 * We need to call this manually as it wasn't called
1132		 * in mii_mediachg().
1133		 */
1134		epic_miibus_statchg(sc->dev);
1135		break;
1136	default:
1137		device_printf(sc->dev, "ERROR! Unknown PHY selected\n");
1138		return (EINVAL);
1139	}
1140
1141	return (0);
1142}
1143
1144/*
1145 * Report current media status.
1146 */
1147static void
1148epic_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1149{
1150	epic_softc_t *sc;
1151	struct mii_data *mii;
1152
1153	sc = ifp->if_softc;
1154	mii = device_get_softc(sc->miibus);
1155	EPIC_LOCK(sc);
1156
1157	/* Nothing should be selected if interface is down. */
1158	if ((ifp->if_flags & IFF_UP) == 0) {
1159		ifmr->ifm_active = IFM_NONE;
1160		ifmr->ifm_status = 0;
1161		EPIC_UNLOCK(sc);
1162		return;
1163	}
1164
1165	/* Call underlying pollstat, if not serial PHY. */
1166	if (sc->phyid != EPIC_SERIAL)
1167		mii_pollstat(mii);
1168
1169	/* Simply copy media info. */
1170	ifmr->ifm_active = mii->mii_media_active;
1171	ifmr->ifm_status = mii->mii_media_status;
1172	EPIC_UNLOCK(sc);
1173}
1174
1175/*
1176 * Callback routine, called on media change.
1177 */
1178static void
1179epic_miibus_statchg(device_t dev)
1180{
1181	epic_softc_t *sc;
1182	struct mii_data *mii;
1183	int media;
1184
1185	sc = device_get_softc(dev);
1186	mii = device_get_softc(sc->miibus);
1187	media = mii->mii_media_active;
1188
1189	sc->txcon &= ~(TXCON_LOOPBACK_MODE | TXCON_FULL_DUPLEX);
1190
1191	/*
1192	 * If we are in full-duplex mode or loopback operation,
1193	 * we need to decouple receiver and transmitter.
1194	 */
1195	if (IFM_OPTIONS(media) & (IFM_FDX | IFM_LOOP))
1196 		sc->txcon |= TXCON_FULL_DUPLEX;
1197
1198	/* On some cards we need manualy set fullduplex led. */
1199	if (sc->cardid == SMC9432FTX ||
1200	    sc->cardid == SMC9432FTX_SC) {
1201		if (IFM_OPTIONS(media) & IFM_FDX)
1202			sc->miicfg |= MIICFG_694_ENABLE;
1203		else
1204			sc->miicfg &= ~MIICFG_694_ENABLE;
1205
1206		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1207	}
1208
1209	epic_stop_activity(sc);
1210	epic_set_tx_mode(sc);
1211	epic_start_activity(sc);
1212}
1213
1214static void
1215epic_miibus_mediainit(device_t dev)
1216{
1217	epic_softc_t *sc;
1218	struct mii_data *mii;
1219	struct ifmedia *ifm;
1220	int media;
1221
1222	sc = device_get_softc(dev);
1223	mii = device_get_softc(sc->miibus);
1224	ifm = &mii->mii_media;
1225
1226	/*
1227	 * Add Serial Media Interface if present, this applies to
1228	 * SMC9432BTX serie.
1229	 */
1230	if (CSR_READ_4(sc, MIICFG) & MIICFG_PHY_PRESENT) {
1231		/* Store its instance. */
1232		sc->serinst = mii->mii_instance++;
1233
1234		/* Add as 10base2/BNC media. */
1235		media = IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->serinst);
1236		ifmedia_add(ifm, media, 0, NULL);
1237
1238		/* Report to user. */
1239		device_printf(sc->dev, "serial PHY detected (10Base2/BNC)\n");
1240	}
1241}
1242
1243/*
1244 * Reset chip and update media.
1245 */
1246static void
1247epic_init(void *xsc)
1248{
1249	epic_softc_t *sc = xsc;
1250
1251	EPIC_LOCK(sc);
1252	epic_init_locked(sc);
1253	EPIC_UNLOCK(sc);
1254}
1255
1256static void
1257epic_init_locked(epic_softc_t *sc)
1258{
1259	struct ifnet *ifp = sc->ifp;
1260	int i;
1261
1262	/* If interface is already running, then we need not do anything. */
1263	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1264		return;
1265	}
1266
1267	/* Soft reset the chip (we have to power up card before). */
1268	CSR_WRITE_4(sc, GENCTL, 0);
1269	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
1270
1271	/*
1272	 * Reset takes 15 pci ticks which depends on PCI bus speed.
1273	 * Assuming it >= 33000000 hz, we have wait at least 495e-6 sec.
1274	 */
1275	DELAY(500);
1276
1277	/* Wake up */
1278	CSR_WRITE_4(sc, GENCTL, 0);
1279
1280	/* Workaround for Application Note 7-15 */
1281	for (i = 0; i < 16; i++)
1282		CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
1283
1284	/* Give rings to EPIC */
1285	CSR_WRITE_4(sc, PRCDAR, sc->rx_addr);
1286	CSR_WRITE_4(sc, PTCDAR, sc->tx_addr);
1287
1288	/* Put node address to EPIC. */
1289	CSR_WRITE_4(sc, LAN0, ((u_int16_t *)IF_LLADDR(sc->ifp))[0]);
1290	CSR_WRITE_4(sc, LAN1, ((u_int16_t *)IF_LLADDR(sc->ifp))[1]);
1291	CSR_WRITE_4(sc, LAN2, ((u_int16_t *)IF_LLADDR(sc->ifp))[2]);
1292
1293	/* Set tx mode, includeing transmit threshold. */
1294	epic_set_tx_mode(sc);
1295
1296	/* Compute and set RXCON. */
1297	epic_set_rx_mode(sc);
1298
1299	/* Set multicast table. */
1300	epic_set_mc_table(sc);
1301
1302	/* Enable interrupts by setting the interrupt mask. */
1303	CSR_WRITE_4(sc, INTMASK,
1304		INTSTAT_RCC  | /* INTSTAT_RQE | INTSTAT_OVW | INTSTAT_RXE | */
1305		/* INTSTAT_TXC | */ INTSTAT_TCC | INTSTAT_TQE | INTSTAT_TXU |
1306		INTSTAT_FATAL);
1307
1308	/* Acknowledge all pending interrupts. */
1309	CSR_WRITE_4(sc, INTSTAT, CSR_READ_4(sc, INTSTAT));
1310
1311	/* Enable interrupts,  set for PCI read multiple and etc */
1312	CSR_WRITE_4(sc, GENCTL,
1313		GENCTL_ENABLE_INTERRUPT | GENCTL_MEMORY_READ_MULTIPLE |
1314		GENCTL_ONECOPY | GENCTL_RECEIVE_FIFO_THRESHOLD64);
1315
1316	/* Mark interface running ... */
1317	if (ifp->if_flags & IFF_UP)
1318		ifp->if_drv_flags |= IFF_DRV_RUNNING;
1319	else
1320		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1321
1322	/* ... and free */
1323	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1324
1325	/* Start Rx process */
1326	epic_start_activity(sc);
1327
1328	/* Set appropriate media */
1329	epic_ifmedia_upd_locked(ifp);
1330
1331	callout_reset(&sc->timer, hz, epic_timer, sc);
1332}
1333
1334/*
1335 * Synopsis: calculate and set Rx mode. Chip must be in idle state to
1336 * access RXCON.
1337 */
1338static void
1339epic_set_rx_mode(epic_softc_t *sc)
1340{
1341	u_int32_t flags;
1342	u_int32_t rxcon;
1343
1344	flags = sc->ifp->if_flags;
1345	rxcon = RXCON_DEFAULT;
1346
1347#ifdef EPIC_EARLY_RX
1348	rxcon |= RXCON_EARLY_RX;
1349#endif
1350
1351	rxcon |= (flags & IFF_PROMISC) ? RXCON_PROMISCUOUS_MODE : 0;
1352
1353	CSR_WRITE_4(sc, RXCON, rxcon);
1354}
1355
1356/*
1357 * Synopsis: Set transmit control register. Chip must be in idle state to
1358 * access TXCON.
1359 */
1360static void
1361epic_set_tx_mode(epic_softc_t *sc)
1362{
1363
1364	if (sc->txcon & TXCON_EARLY_TRANSMIT_ENABLE)
1365		CSR_WRITE_4(sc, ETXTHR, sc->tx_threshold);
1366
1367	CSR_WRITE_4(sc, TXCON, sc->txcon);
1368}
1369
1370/*
1371 * Synopsis: Program multicast filter honoring IFF_ALLMULTI and IFF_PROMISC
1372 * flags (note that setting PROMISC bit in EPIC's RXCON will only touch
1373 * individual frames, multicast filter must be manually programmed).
1374 *
1375 * Note: EPIC must be in idle state.
1376 */
1377static void
1378epic_set_mc_table(epic_softc_t *sc)
1379{
1380	struct ifnet *ifp;
1381	struct ifmultiaddr *ifma;
1382	u_int16_t filter[4];
1383	u_int8_t h;
1384
1385	ifp = sc->ifp;
1386	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1387		CSR_WRITE_4(sc, MC0, 0xFFFF);
1388		CSR_WRITE_4(sc, MC1, 0xFFFF);
1389		CSR_WRITE_4(sc, MC2, 0xFFFF);
1390		CSR_WRITE_4(sc, MC3, 0xFFFF);
1391		return;
1392	}
1393
1394	filter[0] = 0;
1395	filter[1] = 0;
1396	filter[2] = 0;
1397	filter[3] = 0;
1398
1399	if_maddr_rlock(ifp);
1400	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1401		if (ifma->ifma_addr->sa_family != AF_LINK)
1402			continue;
1403		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1404		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
1405		filter[h >> 4] |= 1 << (h & 0xF);
1406	}
1407	if_maddr_runlock(ifp);
1408
1409	CSR_WRITE_4(sc, MC0, filter[0]);
1410	CSR_WRITE_4(sc, MC1, filter[1]);
1411	CSR_WRITE_4(sc, MC2, filter[2]);
1412	CSR_WRITE_4(sc, MC3, filter[3]);
1413}
1414
1415
1416/*
1417 * Synopsis: Start receive process and transmit one, if they need.
1418 */
1419static void
1420epic_start_activity(epic_softc_t *sc)
1421{
1422
1423	/* Start rx process. */
1424	CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED | COMMAND_START_RX |
1425	    (sc->pending_txs ? COMMAND_TXQUEUED : 0));
1426}
1427
1428/*
1429 * Synopsis: Completely stop Rx and Tx processes. If TQE is set additional
1430 * packet needs to be queued to stop Tx DMA.
1431 */
1432static void
1433epic_stop_activity(epic_softc_t *sc)
1434{
1435	int status, i;
1436
1437	/* Stop Tx and Rx DMA. */
1438	CSR_WRITE_4(sc, COMMAND,
1439	    COMMAND_STOP_RX | COMMAND_STOP_RDMA | COMMAND_STOP_TDMA);
1440
1441	/* Wait Rx and Tx DMA to stop (why 1 ms ??? XXX). */
1442	for (i = 0; i < 0x1000; i++) {
1443		status = CSR_READ_4(sc, INTSTAT) &
1444		    (INTSTAT_TXIDLE | INTSTAT_RXIDLE);
1445		if (status == (INTSTAT_TXIDLE | INTSTAT_RXIDLE))
1446			break;
1447		DELAY(1);
1448	}
1449
1450	/* Catch all finished packets. */
1451	epic_rx_done(sc);
1452	epic_tx_done(sc);
1453
1454	status = CSR_READ_4(sc, INTSTAT);
1455
1456	if ((status & INTSTAT_RXIDLE) == 0)
1457		device_printf(sc->dev, "ERROR! Can't stop Rx DMA\n");
1458
1459	if ((status & INTSTAT_TXIDLE) == 0)
1460		device_printf(sc->dev, "ERROR! Can't stop Tx DMA\n");
1461
1462	/*
1463	 * May need to queue one more packet if TQE, this is rare
1464	 * but existing case.
1465	 */
1466	if ((status & INTSTAT_TQE) && !(status & INTSTAT_TXIDLE))
1467		(void)epic_queue_last_packet(sc);
1468}
1469
1470/*
1471 * The EPIC transmitter may stuck in TQE state. It will not go IDLE until
1472 * a packet from current descriptor will be copied to internal RAM. We
1473 * compose a dummy packet here and queue it for transmission.
1474 *
1475 * XXX the packet will then be actually sent over network...
1476 */
1477static int
1478epic_queue_last_packet(epic_softc_t *sc)
1479{
1480	struct epic_tx_desc *desc;
1481	struct epic_frag_list *flist;
1482	struct epic_tx_buffer *buf;
1483	struct mbuf *m0;
1484	int error, i;
1485
1486	device_printf(sc->dev, "queue last packet\n");
1487
1488	desc = sc->tx_desc + sc->cur_tx;
1489	flist = sc->tx_flist + sc->cur_tx;
1490	buf = sc->tx_buffer + sc->cur_tx;
1491
1492	if ((desc->status & 0x8000) || (buf->mbuf != NULL))
1493		return (EBUSY);
1494
1495	MGETHDR(m0, M_NOWAIT, MT_DATA);
1496	if (m0 == NULL)
1497		return (ENOBUFS);
1498
1499	/* Prepare mbuf. */
1500	m0->m_len = min(MHLEN, ETHER_MIN_LEN - ETHER_CRC_LEN);
1501	m0->m_pkthdr.len = m0->m_len;
1502	m0->m_pkthdr.rcvif = sc->ifp;
1503	bzero(mtod(m0, caddr_t), m0->m_len);
1504
1505	/* Fill fragments list. */
1506	error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0,
1507	    epic_dma_map_txbuf, flist, 0);
1508	if (error) {
1509		m_freem(m0);
1510		return (error);
1511	}
1512	bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE);
1513
1514	/* Fill in descriptor. */
1515	buf->mbuf = m0;
1516	sc->pending_txs++;
1517	sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
1518	desc->control = 0x01;
1519	desc->txlength = max(m0->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
1520	desc->status = 0x8000;
1521	bus_dmamap_sync(sc->ttag, sc->tmap,
1522	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1523	bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE);
1524
1525	/* Launch transmission. */
1526	CSR_WRITE_4(sc, COMMAND, COMMAND_STOP_TDMA | COMMAND_TXQUEUED);
1527
1528	/* Wait Tx DMA to stop (for how long??? XXX) */
1529	for (i = 0; i < 1000; i++) {
1530		if (CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE)
1531			break;
1532		DELAY(1);
1533	}
1534
1535	if ((CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) == 0)
1536		device_printf(sc->dev, "ERROR! can't stop Tx DMA (2)\n");
1537	else
1538		epic_tx_done(sc);
1539
1540	return (0);
1541}
1542
1543/*
1544 *  Synopsis: Shut down board and deallocates rings.
1545 */
1546static void
1547epic_stop(epic_softc_t *sc)
1548{
1549
1550	EPIC_ASSERT_LOCKED(sc);
1551
1552	sc->tx_timeout = 0;
1553	callout_stop(&sc->timer);
1554
1555	/* Disable interrupts */
1556	CSR_WRITE_4(sc, INTMASK, 0);
1557	CSR_WRITE_4(sc, GENCTL, 0);
1558
1559	/* Try to stop Rx and TX processes */
1560	epic_stop_activity(sc);
1561
1562	/* Reset chip */
1563	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
1564	DELAY(1000);
1565
1566	/* Make chip go to bed */
1567	CSR_WRITE_4(sc, GENCTL, GENCTL_POWER_DOWN);
1568
1569	/* Mark as stopped */
1570	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1571}
1572
1573/*
1574 * Synopsis: This function should free all memory allocated for rings.
1575 */
1576static void
1577epic_free_rings(epic_softc_t *sc)
1578{
1579	int i;
1580
1581	for (i = 0; i < RX_RING_SIZE; i++) {
1582		struct epic_rx_buffer *buf = sc->rx_buffer + i;
1583		struct epic_rx_desc *desc = sc->rx_desc + i;
1584
1585		desc->status = 0;
1586		desc->buflength = 0;
1587		desc->bufaddr = 0;
1588
1589		if (buf->mbuf) {
1590			bus_dmamap_unload(sc->mtag, buf->map);
1591			bus_dmamap_destroy(sc->mtag, buf->map);
1592			m_freem(buf->mbuf);
1593		}
1594		buf->mbuf = NULL;
1595	}
1596
1597	if (sc->sparemap != NULL)
1598		bus_dmamap_destroy(sc->mtag, sc->sparemap);
1599
1600	for (i = 0; i < TX_RING_SIZE; i++) {
1601		struct epic_tx_buffer *buf = sc->tx_buffer + i;
1602		struct epic_tx_desc *desc = sc->tx_desc + i;
1603
1604		desc->status = 0;
1605		desc->buflength = 0;
1606		desc->bufaddr = 0;
1607
1608		if (buf->mbuf) {
1609			bus_dmamap_unload(sc->mtag, buf->map);
1610			bus_dmamap_destroy(sc->mtag, buf->map);
1611			m_freem(buf->mbuf);
1612		}
1613		buf->mbuf = NULL;
1614	}
1615}
1616
1617/*
1618 * Synopsis:  Allocates mbufs for Rx ring and point Rx descs to them.
1619 * Point Tx descs to fragment lists. Check that all descs and fraglists
1620 * are bounded and aligned properly.
1621 */
1622static int
1623epic_init_rings(epic_softc_t *sc)
1624{
1625	int error, i;
1626
1627	sc->cur_rx = sc->cur_tx = sc->dirty_tx = sc->pending_txs = 0;
1628
1629	/* Initialize the RX descriptor ring. */
1630	for (i = 0; i < RX_RING_SIZE; i++) {
1631		struct epic_rx_buffer *buf = sc->rx_buffer + i;
1632		struct epic_rx_desc *desc = sc->rx_desc + i;
1633
1634		desc->status = 0;		/* Owned by driver */
1635		desc->next = sc->rx_addr +
1636		    ((i + 1) & RX_RING_MASK) * sizeof(struct epic_rx_desc);
1637
1638		if ((desc->next & 3) ||
1639		    ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
1640			epic_free_rings(sc);
1641			return (EFAULT);
1642		}
1643
1644		buf->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1645		if (buf->mbuf == NULL) {
1646			epic_free_rings(sc);
1647			return (ENOBUFS);
1648		}
1649		buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
1650		m_adj(buf->mbuf, ETHER_ALIGN);
1651
1652		error = bus_dmamap_create(sc->mtag, 0, &buf->map);
1653		if (error) {
1654			epic_free_rings(sc);
1655			return (error);
1656		}
1657		error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf,
1658		    epic_dma_map_rxbuf, desc, 0);
1659		if (error) {
1660			epic_free_rings(sc);
1661			return (error);
1662		}
1663		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
1664
1665		desc->buflength = buf->mbuf->m_len; /* Max RX buffer length */
1666		desc->status = 0x8000;		/* Set owner bit to NIC */
1667	}
1668	bus_dmamap_sync(sc->rtag, sc->rmap,
1669	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1670
1671	/* Create the spare DMA map. */
1672	error = bus_dmamap_create(sc->mtag, 0, &sc->sparemap);
1673	if (error) {
1674		epic_free_rings(sc);
1675		return (error);
1676	}
1677
1678	/* Initialize the TX descriptor ring. */
1679	for (i = 0; i < TX_RING_SIZE; i++) {
1680		struct epic_tx_buffer *buf = sc->tx_buffer + i;
1681		struct epic_tx_desc *desc = sc->tx_desc + i;
1682
1683		desc->status = 0;
1684		desc->next = sc->tx_addr +
1685		    ((i + 1) & TX_RING_MASK) * sizeof(struct epic_tx_desc);
1686
1687		if ((desc->next & 3) ||
1688		    ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
1689			epic_free_rings(sc);
1690			return (EFAULT);
1691		}
1692
1693		buf->mbuf = NULL;
1694		desc->bufaddr = sc->frag_addr +
1695		    i * sizeof(struct epic_frag_list);
1696
1697		if ((desc->bufaddr & 3) ||
1698		    ((desc->bufaddr & PAGE_MASK) +
1699		    sizeof(struct epic_frag_list)) > PAGE_SIZE) {
1700			epic_free_rings(sc);
1701			return (EFAULT);
1702		}
1703
1704		error = bus_dmamap_create(sc->mtag, 0, &buf->map);
1705		if (error) {
1706			epic_free_rings(sc);
1707			return (error);
1708		}
1709	}
1710	bus_dmamap_sync(sc->ttag, sc->tmap,
1711	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1712	bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE);
1713
1714	return (0);
1715}
1716
1717/*
1718 * EEPROM operation functions
1719 */
1720static void
1721epic_write_eepromreg(epic_softc_t *sc, u_int8_t val)
1722{
1723	u_int16_t i;
1724
1725	CSR_WRITE_1(sc, EECTL, val);
1726
1727	for (i = 0; i < 0xFF; i++) {
1728		if ((CSR_READ_1(sc, EECTL) & 0x20) == 0)
1729			break;
1730	}
1731}
1732
1733static u_int8_t
1734epic_read_eepromreg(epic_softc_t *sc)
1735{
1736
1737	return (CSR_READ_1(sc, EECTL));
1738}
1739
1740static u_int8_t
1741epic_eeprom_clock(epic_softc_t *sc, u_int8_t val)
1742{
1743
1744	epic_write_eepromreg(sc, val);
1745	epic_write_eepromreg(sc, (val | 0x4));
1746	epic_write_eepromreg(sc, val);
1747
1748	return (epic_read_eepromreg(sc));
1749}
1750
1751static void
1752epic_output_eepromw(epic_softc_t *sc, u_int16_t val)
1753{
1754	int i;
1755
1756	for (i = 0xF; i >= 0; i--) {
1757		if (val & (1 << i))
1758			epic_eeprom_clock(sc, 0x0B);
1759		else
1760			epic_eeprom_clock(sc, 0x03);
1761	}
1762}
1763
1764static u_int16_t
1765epic_input_eepromw(epic_softc_t *sc)
1766{
1767	u_int16_t retval = 0;
1768	int i;
1769
1770	for (i = 0xF; i >= 0; i--) {
1771		if (epic_eeprom_clock(sc, 0x3) & 0x10)
1772			retval |= (1 << i);
1773	}
1774
1775	return (retval);
1776}
1777
1778static int
1779epic_read_eeprom(epic_softc_t *sc, u_int16_t loc)
1780{
1781	u_int16_t dataval;
1782	u_int16_t read_cmd;
1783
1784	epic_write_eepromreg(sc, 3);
1785
1786	if (epic_read_eepromreg(sc) & 0x40)
1787		read_cmd = (loc & 0x3F) | 0x180;
1788	else
1789		read_cmd = (loc & 0xFF) | 0x600;
1790
1791	epic_output_eepromw(sc, read_cmd);
1792
1793	dataval = epic_input_eepromw(sc);
1794
1795	epic_write_eepromreg(sc, 1);
1796
1797	return (dataval);
1798}
1799
1800/*
1801 * Here goes MII read/write routines.
1802 */
1803static int
1804epic_read_phy_reg(epic_softc_t *sc, int phy, int reg)
1805{
1806	int i;
1807
1808	CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x01));
1809
1810	for (i = 0; i < 0x100; i++) {
1811		if ((CSR_READ_4(sc, MIICTL) & 0x01) == 0)
1812			break;
1813		DELAY(1);
1814	}
1815
1816	return (CSR_READ_4(sc, MIIDATA));
1817}
1818
1819static void
1820epic_write_phy_reg(epic_softc_t *sc, int phy, int reg, int val)
1821{
1822	int i;
1823
1824	CSR_WRITE_4(sc, MIIDATA, val);
1825	CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x02));
1826
1827	for(i = 0; i < 0x100; i++) {
1828		if ((CSR_READ_4(sc, MIICTL) & 0x02) == 0)
1829			break;
1830		DELAY(1);
1831	}
1832}
1833
1834static int
1835epic_miibus_readreg(device_t dev, int phy, int reg)
1836{
1837	epic_softc_t *sc;
1838
1839	sc = device_get_softc(dev);
1840
1841	return (PHY_READ_2(sc, phy, reg));
1842}
1843
1844static int
1845epic_miibus_writereg(device_t dev, int phy, int reg, int data)
1846{
1847	epic_softc_t *sc;
1848
1849	sc = device_get_softc(dev);
1850
1851	PHY_WRITE_2(sc, phy, reg, data);
1852
1853	return (0);
1854}
1855