1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1997 Semen Ustimenko (semenu@FreeBSD.org)
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32/*
33 * EtherPower II 10/100 Fast Ethernet (SMC 9432 serie)
34 *
35 * These cards are based on SMC83c17x (EPIC) chip and one of the various
36 * PHYs (QS6612, AC101 and LXT970 were seen). The media support depends on
37 * card model. All cards support 10baseT/UTP and 100baseTX half- and full-
38 * duplex (SMB9432TX). SMC9432BTX also supports 10baseT/BNC. SMC9432FTX also
39 * supports fibre optics.
40 *
41 * Thanks are going to Steve Bauer and Jason Wright.
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sockio.h>
47#include <sys/mbuf.h>
48#include <sys/kernel.h>
49#include <sys/malloc.h>
50#include <sys/module.h>
51#include <sys/socket.h>
52#include <sys/queue.h>
53
54#include <net/if.h>
55#include <net/if_var.h>
56#include <net/if_arp.h>
57#include <net/ethernet.h>
58#include <net/if_dl.h>
59#include <net/if_media.h>
60#include <net/if_types.h>
61
62#include <net/bpf.h>
63
64#include <net/if_vlan_var.h>
65
66#include <machine/bus.h>
67#include <machine/resource.h>
68#include <sys/bus.h>
69#include <sys/rman.h>
70
71#include <dev/pci/pcireg.h>
72#include <dev/pci/pcivar.h>
73
74#include <dev/mii/mii.h>
75#include <dev/mii/miivar.h>
76#include "miidevs.h"
77
78#include <dev/mii/lxtphyreg.h>
79
80#include "miibus_if.h"
81
82#include <dev/tx/if_txreg.h>
83#include <dev/tx/if_txvar.h>
84
85MODULE_DEPEND(tx, pci, 1, 1, 1);
86MODULE_DEPEND(tx, ether, 1, 1, 1);
87MODULE_DEPEND(tx, miibus, 1, 1, 1);
88
89static int epic_ifioctl(struct ifnet *, u_long, caddr_t);
90static void epic_intr(void *);
91static void epic_tx_underrun(epic_softc_t *);
92static void epic_ifstart(struct ifnet *);
93static void epic_ifstart_locked(struct ifnet *);
94static void epic_timer(void *);
95static void epic_init(void *);
96static void epic_init_locked(epic_softc_t *);
97static void epic_stop(epic_softc_t *);
98static void epic_rx_done(epic_softc_t *);
99static void epic_tx_done(epic_softc_t *);
100static int epic_init_rings(epic_softc_t *);
101static void epic_free_rings(epic_softc_t *);
102static void epic_stop_activity(epic_softc_t *);
103static int epic_queue_last_packet(epic_softc_t *);
104static void epic_start_activity(epic_softc_t *);
105static void epic_set_rx_mode(epic_softc_t *);
106static void epic_set_tx_mode(epic_softc_t *);
107static void epic_set_mc_table(epic_softc_t *);
108static int epic_read_eeprom(epic_softc_t *,u_int16_t);
109static void epic_output_eepromw(epic_softc_t *, u_int16_t);
110static u_int16_t epic_input_eepromw(epic_softc_t *);
111static u_int8_t epic_eeprom_clock(epic_softc_t *,u_int8_t);
112static void epic_write_eepromreg(epic_softc_t *,u_int8_t);
113static u_int8_t epic_read_eepromreg(epic_softc_t *);
114
115static int epic_read_phy_reg(epic_softc_t *, int, int);
116static void epic_write_phy_reg(epic_softc_t *, int, int, int);
117
118static int epic_miibus_readreg(device_t, int, int);
119static int epic_miibus_writereg(device_t, int, int, int);
120static void epic_miibus_statchg(device_t);
121static void epic_miibus_mediainit(device_t);
122
123static int epic_ifmedia_upd(struct ifnet *);
124static int epic_ifmedia_upd_locked(struct ifnet *);
125static void epic_ifmedia_sts(struct ifnet *, struct ifmediareq *);
126
127static int epic_probe(device_t);
128static int epic_attach(device_t);
129static int epic_shutdown(device_t);
130static int epic_detach(device_t);
131static void epic_release(epic_softc_t *);
132static struct epic_type *epic_devtype(device_t);
133
134static device_method_t epic_methods[] = {
135	/* Device interface */
136	DEVMETHOD(device_probe,		epic_probe),
137	DEVMETHOD(device_attach,	epic_attach),
138	DEVMETHOD(device_detach,	epic_detach),
139	DEVMETHOD(device_shutdown,	epic_shutdown),
140
141	/* MII interface */
142	DEVMETHOD(miibus_readreg,	epic_miibus_readreg),
143	DEVMETHOD(miibus_writereg,	epic_miibus_writereg),
144	DEVMETHOD(miibus_statchg,	epic_miibus_statchg),
145	DEVMETHOD(miibus_mediainit,	epic_miibus_mediainit),
146
147	{ 0, 0 }
148};
149
150static driver_t epic_driver = {
151	"tx",
152	epic_methods,
153	sizeof(epic_softc_t)
154};
155
156static devclass_t epic_devclass;
157
158DRIVER_MODULE(tx, pci, epic_driver, epic_devclass, 0, 0);
159DRIVER_MODULE(miibus, tx, miibus_driver, miibus_devclass, 0, 0);
160
161static struct epic_type epic_devs[] = {
162	{ SMC_VENDORID, SMC_DEVICEID_83C170, "SMC EtherPower II 10/100" },
163	{ 0, 0, NULL }
164};
165
166static int
167epic_probe(device_t dev)
168{
169	struct epic_type *t;
170
171	t = epic_devtype(dev);
172
173	if (t != NULL) {
174		device_set_desc(dev, t->name);
175		return (BUS_PROBE_DEFAULT);
176	}
177
178	return (ENXIO);
179}
180
181static struct epic_type *
182epic_devtype(device_t dev)
183{
184	struct epic_type *t;
185
186	t = epic_devs;
187
188	while (t->name != NULL) {
189		if ((pci_get_vendor(dev) == t->ven_id) &&
190		    (pci_get_device(dev) == t->dev_id)) {
191			return (t);
192		}
193		t++;
194	}
195	return (NULL);
196}
197
198#ifdef EPIC_USEIOSPACE
199#define	EPIC_RES	SYS_RES_IOPORT
200#define	EPIC_RID	PCIR_BASEIO
201#else
202#define	EPIC_RES	SYS_RES_MEMORY
203#define	EPIC_RID	PCIR_BASEMEM
204#endif
205
206static void
207epic_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
208{
209	u_int32_t *addr;
210
211	if (error)
212		return;
213
214	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
215	addr = arg;
216	*addr = segs->ds_addr;
217}
218
219/*
220 * Attach routine: map registers, allocate softc, rings and descriptors.
221 * Reset to known state.
222 */
223static int
224epic_attach(device_t dev)
225{
226	struct ifnet *ifp;
227	epic_softc_t *sc;
228	int error;
229	int i, rid, tmp;
230	u_char eaddr[6];
231
232	sc = device_get_softc(dev);
233
234	/* Preinitialize softc structure. */
235	sc->dev = dev;
236	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
237	    MTX_DEF);
238
239	/* Fill ifnet structure. */
240	ifp = sc->ifp = if_alloc(IFT_ETHER);
241	if (ifp == NULL) {
242		device_printf(dev, "can not if_alloc()\n");
243		error = ENOSPC;
244		goto fail;
245	}
246	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
247	ifp->if_softc = sc;
248	ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST;
249	ifp->if_ioctl = epic_ifioctl;
250	ifp->if_start = epic_ifstart;
251	ifp->if_init = epic_init;
252	IFQ_SET_MAXLEN(&ifp->if_snd, TX_RING_SIZE - 1);
253
254	/* Enable busmastering. */
255	pci_enable_busmaster(dev);
256
257	rid = EPIC_RID;
258	sc->res = bus_alloc_resource_any(dev, EPIC_RES, &rid, RF_ACTIVE);
259	if (sc->res == NULL) {
260		device_printf(dev, "couldn't map ports/memory\n");
261		error = ENXIO;
262		goto fail;
263	}
264
265	/* Allocate interrupt. */
266	rid = 0;
267	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
268	    RF_SHAREABLE | RF_ACTIVE);
269	if (sc->irq == NULL) {
270		device_printf(dev, "couldn't map interrupt\n");
271		error = ENXIO;
272		goto fail;
273	}
274
275	/* Allocate DMA tags. */
276	error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
277	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
278	    MCLBYTES * EPIC_MAX_FRAGS, EPIC_MAX_FRAGS, MCLBYTES, 0, NULL, NULL,
279	    &sc->mtag);
280	if (error) {
281		device_printf(dev, "couldn't allocate dma tag\n");
282		goto fail;
283	}
284
285	error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
286	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
287	    sizeof(struct epic_rx_desc) * RX_RING_SIZE,
288	    1, sizeof(struct epic_rx_desc) * RX_RING_SIZE, 0, NULL,
289	    NULL, &sc->rtag);
290	if (error) {
291		device_printf(dev, "couldn't allocate dma tag\n");
292		goto fail;
293	}
294
295	error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
296	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
297	    sizeof(struct epic_tx_desc) * TX_RING_SIZE,
298	    1, sizeof(struct epic_tx_desc) * TX_RING_SIZE, 0,
299	    NULL, NULL, &sc->ttag);
300	if (error) {
301		device_printf(dev, "couldn't allocate dma tag\n");
302		goto fail;
303	}
304
305	error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
306	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
307	    sizeof(struct epic_frag_list) * TX_RING_SIZE,
308	    1, sizeof(struct epic_frag_list) * TX_RING_SIZE, 0,
309	    NULL, NULL, &sc->ftag);
310	if (error) {
311		device_printf(dev, "couldn't allocate dma tag\n");
312		goto fail;
313	}
314
315	/* Allocate DMA safe memory and get the DMA addresses. */
316	error = bus_dmamem_alloc(sc->ftag, (void **)&sc->tx_flist,
317	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fmap);
318	if (error) {
319		device_printf(dev, "couldn't allocate dma memory\n");
320		goto fail;
321	}
322	error = bus_dmamap_load(sc->ftag, sc->fmap, sc->tx_flist,
323	    sizeof(struct epic_frag_list) * TX_RING_SIZE, epic_dma_map_addr,
324	    &sc->frag_addr, 0);
325	if (error) {
326		device_printf(dev, "couldn't map dma memory\n");
327		goto fail;
328	}
329	error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc,
330	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->tmap);
331	if (error) {
332		device_printf(dev, "couldn't allocate dma memory\n");
333		goto fail;
334	}
335	error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc,
336	    sizeof(struct epic_tx_desc) * TX_RING_SIZE, epic_dma_map_addr,
337	    &sc->tx_addr, 0);
338	if (error) {
339		device_printf(dev, "couldn't map dma memory\n");
340		goto fail;
341	}
342	error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc,
343	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rmap);
344	if (error) {
345		device_printf(dev, "couldn't allocate dma memory\n");
346		goto fail;
347	}
348	error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc,
349	    sizeof(struct epic_rx_desc) * RX_RING_SIZE, epic_dma_map_addr,
350	    &sc->rx_addr, 0);
351	if (error) {
352		device_printf(dev, "couldn't map dma memory\n");
353		goto fail;
354	}
355
356	/* Bring the chip out of low-power mode. */
357	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
358	DELAY(500);
359
360	/* Workaround for Application Note 7-15. */
361	for (i = 0; i < 16; i++)
362		CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
363
364	/* Read MAC address from EEPROM. */
365	for (i = 0; i < ETHER_ADDR_LEN / sizeof(u_int16_t); i++)
366		((u_int16_t *)eaddr)[i] = epic_read_eeprom(sc,i);
367
368	/* Set Non-Volatile Control Register from EEPROM. */
369	CSR_WRITE_4(sc, NVCTL, epic_read_eeprom(sc, EEPROM_NVCTL) & 0x1F);
370
371	/* Set defaults. */
372	sc->tx_threshold = TRANSMIT_THRESHOLD;
373	sc->txcon = TXCON_DEFAULT;
374	sc->miicfg = MIICFG_SMI_ENABLE;
375	sc->phyid = EPIC_UNKN_PHY;
376	sc->serinst = -1;
377
378	/* Fetch card id. */
379	sc->cardvend = pci_read_config(dev, PCIR_SUBVEND_0, 2);
380	sc->cardid = pci_read_config(dev, PCIR_SUBDEV_0, 2);
381
382	if (sc->cardvend != SMC_VENDORID)
383		device_printf(dev, "unknown card vendor %04xh\n", sc->cardvend);
384
385	/* Do ifmedia setup. */
386	error = mii_attach(dev, &sc->miibus, ifp, epic_ifmedia_upd,
387	    epic_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
388	if (error != 0) {
389		device_printf(dev, "attaching PHYs failed\n");
390		goto fail;
391	}
392
393	/* board type and ... */
394	printf(" type ");
395	for(i = 0x2c; i < 0x32; i++) {
396		tmp = epic_read_eeprom(sc, i);
397		if (' ' == (u_int8_t)tmp)
398			break;
399		printf("%c", (u_int8_t)tmp);
400		tmp >>= 8;
401		if (' ' == (u_int8_t)tmp)
402			break;
403		printf("%c", (u_int8_t)tmp);
404	}
405	printf("\n");
406
407	/* Initialize rings. */
408	if (epic_init_rings(sc)) {
409		device_printf(dev, "failed to init rings\n");
410		error = ENXIO;
411		goto fail;
412	}
413
414	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
415	ifp->if_capabilities |= IFCAP_VLAN_MTU;
416	ifp->if_capenable |= IFCAP_VLAN_MTU;
417	callout_init_mtx(&sc->timer, &sc->lock, 0);
418
419	/* Attach to OS's managers. */
420	ether_ifattach(ifp, eaddr);
421
422	/* Activate our interrupt handler. */
423	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
424	    NULL, epic_intr, sc, &sc->sc_ih);
425	if (error) {
426		device_printf(dev, "couldn't set up irq\n");
427		ether_ifdetach(ifp);
428		goto fail;
429	}
430
431	gone_by_fcp101_dev(dev);
432
433	return (0);
434fail:
435	epic_release(sc);
436	return (error);
437}
438
439/*
440 * Free any resources allocated by the driver.
441 */
442static void
443epic_release(epic_softc_t *sc)
444{
445	if (sc->ifp != NULL)
446		if_free(sc->ifp);
447	if (sc->irq)
448		bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq);
449	if (sc->res)
450		bus_release_resource(sc->dev, EPIC_RES, EPIC_RID, sc->res);
451	epic_free_rings(sc);
452	if (sc->tx_flist) {
453		bus_dmamap_unload(sc->ftag, sc->fmap);
454		bus_dmamem_free(sc->ftag, sc->tx_flist, sc->fmap);
455	}
456	if (sc->tx_desc) {
457		bus_dmamap_unload(sc->ttag, sc->tmap);
458		bus_dmamem_free(sc->ttag, sc->tx_desc, sc->tmap);
459	}
460	if (sc->rx_desc) {
461		bus_dmamap_unload(sc->rtag, sc->rmap);
462		bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap);
463	}
464	if (sc->mtag)
465		bus_dma_tag_destroy(sc->mtag);
466	if (sc->ftag)
467		bus_dma_tag_destroy(sc->ftag);
468	if (sc->ttag)
469		bus_dma_tag_destroy(sc->ttag);
470	if (sc->rtag)
471		bus_dma_tag_destroy(sc->rtag);
472	mtx_destroy(&sc->lock);
473}
474
475/*
476 * Detach driver and free resources.
477 */
478static int
479epic_detach(device_t dev)
480{
481	struct ifnet *ifp;
482	epic_softc_t *sc;
483
484	sc = device_get_softc(dev);
485	ifp = sc->ifp;
486
487	EPIC_LOCK(sc);
488	epic_stop(sc);
489	EPIC_UNLOCK(sc);
490	callout_drain(&sc->timer);
491	ether_ifdetach(ifp);
492	bus_teardown_intr(dev, sc->irq, sc->sc_ih);
493
494	bus_generic_detach(dev);
495	device_delete_child(dev, sc->miibus);
496
497	epic_release(sc);
498	return (0);
499}
500
501#undef	EPIC_RES
502#undef	EPIC_RID
503
504/*
505 * Stop all chip I/O so that the kernel's probe routines don't
506 * get confused by errant DMAs when rebooting.
507 */
508static int
509epic_shutdown(device_t dev)
510{
511	epic_softc_t *sc;
512
513	sc = device_get_softc(dev);
514
515	EPIC_LOCK(sc);
516	epic_stop(sc);
517	EPIC_UNLOCK(sc);
518	return (0);
519}
520
521/*
522 * This is if_ioctl handler.
523 */
524static int
525epic_ifioctl(struct ifnet *ifp, u_long command, caddr_t data)
526{
527	epic_softc_t *sc = ifp->if_softc;
528	struct mii_data	*mii;
529	struct ifreq *ifr = (struct ifreq *) data;
530	int error = 0;
531
532	switch (command) {
533	case SIOCSIFMTU:
534		if (ifp->if_mtu == ifr->ifr_mtu)
535			break;
536
537		/* XXX Though the datasheet doesn't imply any
538		 * limitations on RX and TX sizes beside max 64Kb
539		 * DMA transfer, seems we can't send more then 1600
540		 * data bytes per ethernet packet (transmitter hangs
541		 * up if more data is sent).
542		 */
543		EPIC_LOCK(sc);
544		if (ifr->ifr_mtu + ifp->if_hdrlen <= EPIC_MAX_MTU) {
545			ifp->if_mtu = ifr->ifr_mtu;
546			epic_stop(sc);
547			epic_init_locked(sc);
548		} else
549			error = EINVAL;
550		EPIC_UNLOCK(sc);
551		break;
552
553	case SIOCSIFFLAGS:
554		/*
555		 * If the interface is marked up and stopped, then start it.
556		 * If it is marked down and running, then stop it.
557		 */
558		EPIC_LOCK(sc);
559		if (ifp->if_flags & IFF_UP) {
560			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
561				epic_init_locked(sc);
562				EPIC_UNLOCK(sc);
563				break;
564			}
565		} else {
566			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
567				epic_stop(sc);
568				EPIC_UNLOCK(sc);
569				break;
570			}
571		}
572
573		/* Handle IFF_PROMISC and IFF_ALLMULTI flags. */
574		epic_stop_activity(sc);
575		epic_set_mc_table(sc);
576		epic_set_rx_mode(sc);
577		epic_start_activity(sc);
578		EPIC_UNLOCK(sc);
579		break;
580
581	case SIOCADDMULTI:
582	case SIOCDELMULTI:
583		EPIC_LOCK(sc);
584		epic_set_mc_table(sc);
585		EPIC_UNLOCK(sc);
586		error = 0;
587		break;
588
589	case SIOCSIFMEDIA:
590	case SIOCGIFMEDIA:
591		mii = device_get_softc(sc->miibus);
592		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
593		break;
594
595	default:
596		error = ether_ioctl(ifp, command, data);
597		break;
598	}
599	return (error);
600}
601
602static void
603epic_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg,
604    bus_size_t mapsize, int error)
605{
606	struct epic_frag_list *flist;
607	int i;
608
609	if (error)
610		return;
611
612	KASSERT(nseg <= EPIC_MAX_FRAGS, ("too many DMA segments"));
613	flist = arg;
614	/* Fill fragments list. */
615	for (i = 0; i < nseg; i++) {
616		KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
617		flist->frag[i].fraglen = segs[i].ds_len;
618		flist->frag[i].fragaddr = segs[i].ds_addr;
619	}
620	flist->numfrags = nseg;
621}
622
623static void
624epic_dma_map_rxbuf(void *arg, bus_dma_segment_t *segs, int nseg,
625    bus_size_t mapsize, int error)
626{
627	struct epic_rx_desc *desc;
628
629	if (error)
630		return;
631
632	KASSERT(nseg == 1, ("too many DMA segments"));
633	desc = arg;
634	desc->bufaddr = segs->ds_addr;
635}
636
637/*
638 * This is if_start handler. It takes mbufs from if_snd queue
639 * and queue them for transmit, one by one, until TX ring become full
640 * or queue become empty.
641 */
642static void
643epic_ifstart(struct ifnet * ifp)
644{
645	epic_softc_t *sc = ifp->if_softc;
646
647	EPIC_LOCK(sc);
648	epic_ifstart_locked(ifp);
649	EPIC_UNLOCK(sc);
650}
651
652static void
653epic_ifstart_locked(struct ifnet * ifp)
654{
655	epic_softc_t *sc = ifp->if_softc;
656	struct epic_tx_buffer *buf;
657	struct epic_tx_desc *desc;
658	struct epic_frag_list *flist;
659	struct mbuf *m0, *m;
660	int error;
661
662	while (sc->pending_txs < TX_RING_SIZE) {
663		buf = sc->tx_buffer + sc->cur_tx;
664		desc = sc->tx_desc + sc->cur_tx;
665		flist = sc->tx_flist + sc->cur_tx;
666
667		/* Get next packet to send. */
668		IF_DEQUEUE(&ifp->if_snd, m0);
669
670		/* If nothing to send, return. */
671		if (m0 == NULL)
672			return;
673
674		error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0,
675		    epic_dma_map_txbuf, flist, 0);
676
677		if (error && error != EFBIG) {
678			m_freem(m0);
679			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
680			continue;
681		}
682
683		/*
684		 * If packet was more than EPIC_MAX_FRAGS parts,
685		 * recopy packet to a newly allocated mbuf cluster.
686		 */
687		if (error) {
688			m = m_defrag(m0, M_NOWAIT);
689			if (m == NULL) {
690				m_freem(m0);
691				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
692				continue;
693			}
694			m_freem(m0);
695			m0 = m;
696
697			error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m,
698			    epic_dma_map_txbuf, flist, 0);
699			if (error) {
700				m_freem(m);
701				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
702				continue;
703			}
704		}
705		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE);
706
707		buf->mbuf = m0;
708		sc->pending_txs++;
709		sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
710		desc->control = 0x01;
711		desc->txlength =
712		    max(m0->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
713		desc->status = 0x8000;
714		bus_dmamap_sync(sc->ttag, sc->tmap,
715		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
716		bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE);
717		CSR_WRITE_4(sc, COMMAND, COMMAND_TXQUEUED);
718
719		/* Set watchdog timer. */
720		sc->tx_timeout = 8;
721
722		BPF_MTAP(ifp, m0);
723	}
724
725	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
726}
727
728/*
729 * Synopsis: Finish all received frames.
730 */
731static void
732epic_rx_done(epic_softc_t *sc)
733{
734	struct ifnet *ifp = sc->ifp;
735	u_int16_t len;
736	struct epic_rx_buffer *buf;
737	struct epic_rx_desc *desc;
738	struct mbuf *m;
739	bus_dmamap_t map;
740	int error;
741
742	bus_dmamap_sync(sc->rtag, sc->rmap, BUS_DMASYNC_POSTREAD);
743	while ((sc->rx_desc[sc->cur_rx].status & 0x8000) == 0) {
744		buf = sc->rx_buffer + sc->cur_rx;
745		desc = sc->rx_desc + sc->cur_rx;
746
747		/* Switch to next descriptor. */
748		sc->cur_rx = (sc->cur_rx + 1) & RX_RING_MASK;
749
750		/*
751		 * Check for RX errors. This should only happen if
752		 * SAVE_ERRORED_PACKETS is set. RX errors generate
753		 * RXE interrupt usually.
754		 */
755		if ((desc->status & 1) == 0) {
756			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
757			desc->status = 0x8000;
758			continue;
759		}
760
761		/* Save packet length and mbuf contained packet. */
762		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
763		len = desc->rxlength - ETHER_CRC_LEN;
764		m = buf->mbuf;
765
766		/* Try to get an mbuf cluster. */
767		buf->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
768		if (buf->mbuf == NULL) {
769			buf->mbuf = m;
770			desc->status = 0x8000;
771			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
772			continue;
773		}
774		buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
775		m_adj(buf->mbuf, ETHER_ALIGN);
776
777		/* Point to new mbuf, and give descriptor to chip. */
778		error = bus_dmamap_load_mbuf(sc->mtag, sc->sparemap, buf->mbuf,
779		    epic_dma_map_rxbuf, desc, 0);
780		if (error) {
781			buf->mbuf = m;
782			desc->status = 0x8000;
783			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
784			continue;
785		}
786
787		desc->status = 0x8000;
788		bus_dmamap_unload(sc->mtag, buf->map);
789		map = buf->map;
790		buf->map = sc->sparemap;
791		sc->sparemap = map;
792		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
793
794		/* First mbuf in packet holds the ethernet and packet headers */
795		m->m_pkthdr.rcvif = ifp;
796		m->m_pkthdr.len = m->m_len = len;
797
798		/* Give mbuf to OS. */
799		EPIC_UNLOCK(sc);
800		(*ifp->if_input)(ifp, m);
801		EPIC_LOCK(sc);
802
803		/* Successfully received frame */
804		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
805        }
806	bus_dmamap_sync(sc->rtag, sc->rmap,
807	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
808}
809
810/*
811 * Synopsis: Do last phase of transmission. I.e. if desc is
812 * transmitted, decrease pending_txs counter, free mbuf contained
813 * packet, switch to next descriptor and repeat until no packets
814 * are pending or descriptor is not transmitted yet.
815 */
816static void
817epic_tx_done(epic_softc_t *sc)
818{
819	struct epic_tx_buffer *buf;
820	struct epic_tx_desc *desc;
821	u_int16_t status;
822
823	bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_POSTREAD);
824	while (sc->pending_txs > 0) {
825		buf = sc->tx_buffer + sc->dirty_tx;
826		desc = sc->tx_desc + sc->dirty_tx;
827		status = desc->status;
828
829		/*
830		 * If packet is not transmitted, thou followed
831		 * packets are not transmitted too.
832		 */
833		if (status & 0x8000)
834			break;
835
836		/* Packet is transmitted. Switch to next and free mbuf. */
837		sc->pending_txs--;
838		sc->dirty_tx = (sc->dirty_tx + 1) & TX_RING_MASK;
839		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE);
840		bus_dmamap_unload(sc->mtag, buf->map);
841		m_freem(buf->mbuf);
842		buf->mbuf = NULL;
843
844		/* Check for errors and collisions. */
845		if (status & 0x0001)
846			if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
847		else
848			if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
849		if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, (status >> 8) & 0x1F);
850#ifdef EPIC_DIAG
851		if ((status & 0x1001) == 0x1001)
852			device_printf(sc->dev,
853			    "Tx ERROR: excessive coll. number\n");
854#endif
855	}
856
857	if (sc->pending_txs < TX_RING_SIZE)
858		sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
859	bus_dmamap_sync(sc->ttag, sc->tmap,
860	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
861}
862
863/*
864 * Interrupt function
865 */
866static void
867epic_intr(void *arg)
868{
869    epic_softc_t *sc;
870    int status, i;
871
872    sc = arg;
873    i = 4;
874    EPIC_LOCK(sc);
875    while (i-- && ((status = CSR_READ_4(sc, INTSTAT)) & INTSTAT_INT_ACTV)) {
876	CSR_WRITE_4(sc, INTSTAT, status);
877
878	if (status & (INTSTAT_RQE|INTSTAT_RCC|INTSTAT_OVW)) {
879	    epic_rx_done(sc);
880	    if (status & (INTSTAT_RQE|INTSTAT_OVW)) {
881#ifdef EPIC_DIAG
882		if (status & INTSTAT_OVW)
883		    device_printf(sc->dev, "RX buffer overflow\n");
884		if (status & INTSTAT_RQE)
885		    device_printf(sc->dev, "RX FIFO overflow\n");
886#endif
887		if ((CSR_READ_4(sc, COMMAND) & COMMAND_RXQUEUED) == 0)
888		    CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED);
889		if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
890	    }
891	}
892
893	if (status & (INTSTAT_TXC|INTSTAT_TCC|INTSTAT_TQE)) {
894	    epic_tx_done(sc);
895	    if (sc->ifp->if_snd.ifq_head != NULL)
896		    epic_ifstart_locked(sc->ifp);
897	}
898
899	/* Check for rare errors */
900	if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
901		      INTSTAT_APE|INTSTAT_DPE|INTSTAT_TXU|INTSTAT_RXE)) {
902    	    if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
903			  INTSTAT_APE|INTSTAT_DPE)) {
904		device_printf(sc->dev, "PCI fatal errors occurred: %s%s%s%s\n",
905		    (status & INTSTAT_PMA) ? "PMA " : "",
906		    (status & INTSTAT_PTA) ? "PTA " : "",
907		    (status & INTSTAT_APE) ? "APE " : "",
908		    (status & INTSTAT_DPE) ? "DPE" : "");
909
910		epic_stop(sc);
911		epic_init_locked(sc);
912	    	break;
913	    }
914
915	    if (status & INTSTAT_RXE) {
916#ifdef EPIC_DIAG
917		device_printf(sc->dev, "CRC/Alignment error\n");
918#endif
919		if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
920	    }
921
922	    if (status & INTSTAT_TXU) {
923		epic_tx_underrun(sc);
924		if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
925	    }
926	}
927    }
928
929    /* If no packets are pending, then no timeouts. */
930    if (sc->pending_txs == 0)
931	    sc->tx_timeout = 0;
932    EPIC_UNLOCK(sc);
933}
934
935/*
936 * Handle the TX underrun error: increase the TX threshold
937 * and restart the transmitter.
938 */
939static void
940epic_tx_underrun(epic_softc_t *sc)
941{
942	if (sc->tx_threshold > TRANSMIT_THRESHOLD_MAX) {
943		sc->txcon &= ~TXCON_EARLY_TRANSMIT_ENABLE;
944#ifdef EPIC_DIAG
945		device_printf(sc->dev, "Tx UNDERRUN: early TX disabled\n");
946#endif
947	} else {
948		sc->tx_threshold += 0x40;
949#ifdef EPIC_DIAG
950		device_printf(sc->dev,
951		    "Tx UNDERRUN: TX threshold increased to %d\n",
952		    sc->tx_threshold);
953#endif
954	}
955
956	/* We must set TXUGO to reset the stuck transmitter. */
957	CSR_WRITE_4(sc, COMMAND, COMMAND_TXUGO);
958
959	/* Update the TX threshold */
960	epic_stop_activity(sc);
961	epic_set_tx_mode(sc);
962	epic_start_activity(sc);
963}
964
965/*
966 * This function is called once a second when the interface is running
967 * and performs two functions.  First, it provides a timer for the mii
968 * to help with autonegotiation.  Second, it checks for transmit
969 * timeouts.
970 */
971static void
972epic_timer(void *arg)
973{
974	epic_softc_t *sc = arg;
975	struct mii_data *mii;
976	struct ifnet *ifp;
977
978	ifp = sc->ifp;
979	EPIC_ASSERT_LOCKED(sc);
980	if (sc->tx_timeout && --sc->tx_timeout == 0) {
981		device_printf(sc->dev, "device timeout %d packets\n",
982		    sc->pending_txs);
983
984		/* Try to finish queued packets. */
985		epic_tx_done(sc);
986
987		/* If not successful. */
988		if (sc->pending_txs > 0) {
989			if_inc_counter(ifp, IFCOUNTER_OERRORS, sc->pending_txs);
990
991			/* Reinitialize board. */
992			device_printf(sc->dev, "reinitialization\n");
993			epic_stop(sc);
994			epic_init_locked(sc);
995		} else
996			device_printf(sc->dev,
997			    "seems we can continue normaly\n");
998
999		/* Start output. */
1000		if (ifp->if_snd.ifq_head)
1001			epic_ifstart_locked(ifp);
1002	}
1003
1004	mii = device_get_softc(sc->miibus);
1005	mii_tick(mii);
1006
1007	callout_reset(&sc->timer, hz, epic_timer, sc);
1008}
1009
1010/*
1011 * Set media options.
1012 */
1013static int
1014epic_ifmedia_upd(struct ifnet *ifp)
1015{
1016	epic_softc_t *sc;
1017	int error;
1018
1019	sc = ifp->if_softc;
1020	EPIC_LOCK(sc);
1021	error = epic_ifmedia_upd_locked(ifp);
1022	EPIC_UNLOCK(sc);
1023	return (error);
1024}
1025
1026static int
1027epic_ifmedia_upd_locked(struct ifnet *ifp)
1028{
1029	epic_softc_t *sc;
1030	struct mii_data *mii;
1031	struct ifmedia *ifm;
1032	struct mii_softc *miisc;
1033	int cfg, media;
1034
1035	sc = ifp->if_softc;
1036	mii = device_get_softc(sc->miibus);
1037	ifm = &mii->mii_media;
1038	media = ifm->ifm_cur->ifm_media;
1039
1040	/* Do not do anything if interface is not up. */
1041	if ((ifp->if_flags & IFF_UP) == 0)
1042		return (0);
1043
1044	/*
1045	 * Lookup current selected PHY.
1046	 */
1047	if (IFM_INST(media) == sc->serinst) {
1048		sc->phyid = EPIC_SERIAL;
1049		sc->physc = NULL;
1050	} else {
1051		/* If we're not selecting serial interface, select MII mode. */
1052		sc->miicfg &= ~MIICFG_SERIAL_ENABLE;
1053		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1054
1055		/* Default to unknown PHY. */
1056		sc->phyid = EPIC_UNKN_PHY;
1057
1058		/* Lookup selected PHY. */
1059		LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1060			if (IFM_INST(media) == miisc->mii_inst) {
1061				sc->physc = miisc;
1062				break;
1063			}
1064		}
1065
1066		/* Identify selected PHY. */
1067		if (sc->physc) {
1068			int id1, id2, model, oui;
1069
1070			id1 = PHY_READ(sc->physc, MII_PHYIDR1);
1071			id2 = PHY_READ(sc->physc, MII_PHYIDR2);
1072
1073			oui = MII_OUI(id1, id2);
1074			model = MII_MODEL(id2);
1075			switch (oui) {
1076			case MII_OUI_xxQUALSEMI:
1077				if (model == MII_MODEL_xxQUALSEMI_QS6612)
1078					sc->phyid = EPIC_QS6612_PHY;
1079				break;
1080			case MII_OUI_ALTIMA:
1081				if (model == MII_MODEL_ALTIMA_AC101)
1082					sc->phyid = EPIC_AC101_PHY;
1083				break;
1084			case MII_OUI_xxLEVEL1:
1085				if (model == MII_MODEL_xxLEVEL1_LXT970)
1086					sc->phyid = EPIC_LXT970_PHY;
1087				break;
1088			}
1089		}
1090	}
1091
1092	/*
1093	 * Do PHY specific card setup.
1094	 */
1095
1096	/*
1097	 * Call this, to isolate all not selected PHYs and
1098	 * set up selected.
1099	 */
1100	mii_mediachg(mii);
1101
1102	/* Do our own setup. */
1103	switch (sc->phyid) {
1104	case EPIC_QS6612_PHY:
1105		break;
1106	case EPIC_AC101_PHY:
1107		/* We have to powerup fiber tranceivers. */
1108		if (IFM_SUBTYPE(media) == IFM_100_FX)
1109			sc->miicfg |= MIICFG_694_ENABLE;
1110		else
1111			sc->miicfg &= ~MIICFG_694_ENABLE;
1112		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1113
1114		break;
1115	case EPIC_LXT970_PHY:
1116		/* We have to powerup fiber tranceivers. */
1117		cfg = PHY_READ(sc->physc, MII_LXTPHY_CONFIG);
1118		if (IFM_SUBTYPE(media) == IFM_100_FX)
1119			cfg |= CONFIG_LEDC1 | CONFIG_LEDC0;
1120		else
1121			cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
1122		PHY_WRITE(sc->physc, MII_LXTPHY_CONFIG, cfg);
1123
1124		break;
1125	case EPIC_SERIAL:
1126		/* Select serial PHY (10base2/BNC usually). */
1127		sc->miicfg |= MIICFG_694_ENABLE | MIICFG_SERIAL_ENABLE;
1128		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1129
1130		/* There is no driver to fill this. */
1131		mii->mii_media_active = media;
1132		mii->mii_media_status = 0;
1133
1134		/*
1135		 * We need to call this manually as it wasn't called
1136		 * in mii_mediachg().
1137		 */
1138		epic_miibus_statchg(sc->dev);
1139		break;
1140	default:
1141		device_printf(sc->dev, "ERROR! Unknown PHY selected\n");
1142		return (EINVAL);
1143	}
1144
1145	return (0);
1146}
1147
1148/*
1149 * Report current media status.
1150 */
1151static void
1152epic_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1153{
1154	epic_softc_t *sc;
1155	struct mii_data *mii;
1156
1157	sc = ifp->if_softc;
1158	mii = device_get_softc(sc->miibus);
1159	EPIC_LOCK(sc);
1160
1161	/* Nothing should be selected if interface is down. */
1162	if ((ifp->if_flags & IFF_UP) == 0) {
1163		ifmr->ifm_active = IFM_NONE;
1164		ifmr->ifm_status = 0;
1165		EPIC_UNLOCK(sc);
1166		return;
1167	}
1168
1169	/* Call underlying pollstat, if not serial PHY. */
1170	if (sc->phyid != EPIC_SERIAL)
1171		mii_pollstat(mii);
1172
1173	/* Simply copy media info. */
1174	ifmr->ifm_active = mii->mii_media_active;
1175	ifmr->ifm_status = mii->mii_media_status;
1176	EPIC_UNLOCK(sc);
1177}
1178
1179/*
1180 * Callback routine, called on media change.
1181 */
1182static void
1183epic_miibus_statchg(device_t dev)
1184{
1185	epic_softc_t *sc;
1186	struct mii_data *mii;
1187	int media;
1188
1189	sc = device_get_softc(dev);
1190	mii = device_get_softc(sc->miibus);
1191	media = mii->mii_media_active;
1192
1193	sc->txcon &= ~(TXCON_LOOPBACK_MODE | TXCON_FULL_DUPLEX);
1194
1195	/*
1196	 * If we are in full-duplex mode or loopback operation,
1197	 * we need to decouple receiver and transmitter.
1198	 */
1199	if (IFM_OPTIONS(media) & (IFM_FDX | IFM_LOOP))
1200 		sc->txcon |= TXCON_FULL_DUPLEX;
1201
1202	/* On some cards we need manualy set fullduplex led. */
1203	if (sc->cardid == SMC9432FTX ||
1204	    sc->cardid == SMC9432FTX_SC) {
1205		if (IFM_OPTIONS(media) & IFM_FDX)
1206			sc->miicfg |= MIICFG_694_ENABLE;
1207		else
1208			sc->miicfg &= ~MIICFG_694_ENABLE;
1209
1210		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1211	}
1212
1213	epic_stop_activity(sc);
1214	epic_set_tx_mode(sc);
1215	epic_start_activity(sc);
1216}
1217
1218static void
1219epic_miibus_mediainit(device_t dev)
1220{
1221	epic_softc_t *sc;
1222	struct mii_data *mii;
1223	struct ifmedia *ifm;
1224	int media;
1225
1226	sc = device_get_softc(dev);
1227	mii = device_get_softc(sc->miibus);
1228	ifm = &mii->mii_media;
1229
1230	/*
1231	 * Add Serial Media Interface if present, this applies to
1232	 * SMC9432BTX serie.
1233	 */
1234	if (CSR_READ_4(sc, MIICFG) & MIICFG_PHY_PRESENT) {
1235		/* Store its instance. */
1236		sc->serinst = mii->mii_instance++;
1237
1238		/* Add as 10base2/BNC media. */
1239		media = IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->serinst);
1240		ifmedia_add(ifm, media, 0, NULL);
1241
1242		/* Report to user. */
1243		device_printf(sc->dev, "serial PHY detected (10Base2/BNC)\n");
1244	}
1245}
1246
1247/*
1248 * Reset chip and update media.
1249 */
1250static void
1251epic_init(void *xsc)
1252{
1253	epic_softc_t *sc = xsc;
1254
1255	EPIC_LOCK(sc);
1256	epic_init_locked(sc);
1257	EPIC_UNLOCK(sc);
1258}
1259
1260static void
1261epic_init_locked(epic_softc_t *sc)
1262{
1263	struct ifnet *ifp = sc->ifp;
1264	int i;
1265
1266	/* If interface is already running, then we need not do anything. */
1267	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1268		return;
1269	}
1270
1271	/* Soft reset the chip (we have to power up card before). */
1272	CSR_WRITE_4(sc, GENCTL, 0);
1273	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
1274
1275	/*
1276	 * Reset takes 15 pci ticks which depends on PCI bus speed.
1277	 * Assuming it >= 33000000 hz, we have wait at least 495e-6 sec.
1278	 */
1279	DELAY(500);
1280
1281	/* Wake up */
1282	CSR_WRITE_4(sc, GENCTL, 0);
1283
1284	/* Workaround for Application Note 7-15 */
1285	for (i = 0; i < 16; i++)
1286		CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
1287
1288	/* Give rings to EPIC */
1289	CSR_WRITE_4(sc, PRCDAR, sc->rx_addr);
1290	CSR_WRITE_4(sc, PTCDAR, sc->tx_addr);
1291
1292	/* Put node address to EPIC. */
1293	CSR_WRITE_4(sc, LAN0, ((u_int16_t *)IF_LLADDR(sc->ifp))[0]);
1294	CSR_WRITE_4(sc, LAN1, ((u_int16_t *)IF_LLADDR(sc->ifp))[1]);
1295	CSR_WRITE_4(sc, LAN2, ((u_int16_t *)IF_LLADDR(sc->ifp))[2]);
1296
1297	/* Set tx mode, includeing transmit threshold. */
1298	epic_set_tx_mode(sc);
1299
1300	/* Compute and set RXCON. */
1301	epic_set_rx_mode(sc);
1302
1303	/* Set multicast table. */
1304	epic_set_mc_table(sc);
1305
1306	/* Enable interrupts by setting the interrupt mask. */
1307	CSR_WRITE_4(sc, INTMASK,
1308		INTSTAT_RCC  | /* INTSTAT_RQE | INTSTAT_OVW | INTSTAT_RXE | */
1309		/* INTSTAT_TXC | */ INTSTAT_TCC | INTSTAT_TQE | INTSTAT_TXU |
1310		INTSTAT_FATAL);
1311
1312	/* Acknowledge all pending interrupts. */
1313	CSR_WRITE_4(sc, INTSTAT, CSR_READ_4(sc, INTSTAT));
1314
1315	/* Enable interrupts,  set for PCI read multiple and etc */
1316	CSR_WRITE_4(sc, GENCTL,
1317		GENCTL_ENABLE_INTERRUPT | GENCTL_MEMORY_READ_MULTIPLE |
1318		GENCTL_ONECOPY | GENCTL_RECEIVE_FIFO_THRESHOLD64);
1319
1320	/* Mark interface running ... */
1321	if (ifp->if_flags & IFF_UP)
1322		ifp->if_drv_flags |= IFF_DRV_RUNNING;
1323	else
1324		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1325
1326	/* ... and free */
1327	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1328
1329	/* Start Rx process */
1330	epic_start_activity(sc);
1331
1332	/* Set appropriate media */
1333	epic_ifmedia_upd_locked(ifp);
1334
1335	callout_reset(&sc->timer, hz, epic_timer, sc);
1336}
1337
1338/*
1339 * Synopsis: calculate and set Rx mode. Chip must be in idle state to
1340 * access RXCON.
1341 */
1342static void
1343epic_set_rx_mode(epic_softc_t *sc)
1344{
1345	u_int32_t flags;
1346	u_int32_t rxcon;
1347
1348	flags = sc->ifp->if_flags;
1349	rxcon = RXCON_DEFAULT;
1350
1351#ifdef EPIC_EARLY_RX
1352	rxcon |= RXCON_EARLY_RX;
1353#endif
1354
1355	rxcon |= (flags & IFF_PROMISC) ? RXCON_PROMISCUOUS_MODE : 0;
1356
1357	CSR_WRITE_4(sc, RXCON, rxcon);
1358}
1359
1360/*
1361 * Synopsis: Set transmit control register. Chip must be in idle state to
1362 * access TXCON.
1363 */
1364static void
1365epic_set_tx_mode(epic_softc_t *sc)
1366{
1367
1368	if (sc->txcon & TXCON_EARLY_TRANSMIT_ENABLE)
1369		CSR_WRITE_4(sc, ETXTHR, sc->tx_threshold);
1370
1371	CSR_WRITE_4(sc, TXCON, sc->txcon);
1372}
1373
1374/*
1375 * Synopsis: Program multicast filter honoring IFF_ALLMULTI and IFF_PROMISC
1376 * flags (note that setting PROMISC bit in EPIC's RXCON will only touch
1377 * individual frames, multicast filter must be manually programmed).
1378 *
1379 * Note: EPIC must be in idle state.
1380 */
1381static void
1382epic_set_mc_table(epic_softc_t *sc)
1383{
1384	struct ifnet *ifp;
1385	struct ifmultiaddr *ifma;
1386	u_int16_t filter[4];
1387	u_int8_t h;
1388
1389	ifp = sc->ifp;
1390	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1391		CSR_WRITE_4(sc, MC0, 0xFFFF);
1392		CSR_WRITE_4(sc, MC1, 0xFFFF);
1393		CSR_WRITE_4(sc, MC2, 0xFFFF);
1394		CSR_WRITE_4(sc, MC3, 0xFFFF);
1395		return;
1396	}
1397
1398	filter[0] = 0;
1399	filter[1] = 0;
1400	filter[2] = 0;
1401	filter[3] = 0;
1402
1403	if_maddr_rlock(ifp);
1404	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1405		if (ifma->ifma_addr->sa_family != AF_LINK)
1406			continue;
1407		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1408		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
1409		filter[h >> 4] |= 1 << (h & 0xF);
1410	}
1411	if_maddr_runlock(ifp);
1412
1413	CSR_WRITE_4(sc, MC0, filter[0]);
1414	CSR_WRITE_4(sc, MC1, filter[1]);
1415	CSR_WRITE_4(sc, MC2, filter[2]);
1416	CSR_WRITE_4(sc, MC3, filter[3]);
1417}
1418
1419
1420/*
1421 * Synopsis: Start receive process and transmit one, if they need.
1422 */
1423static void
1424epic_start_activity(epic_softc_t *sc)
1425{
1426
1427	/* Start rx process. */
1428	CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED | COMMAND_START_RX |
1429	    (sc->pending_txs ? COMMAND_TXQUEUED : 0));
1430}
1431
1432/*
1433 * Synopsis: Completely stop Rx and Tx processes. If TQE is set additional
1434 * packet needs to be queued to stop Tx DMA.
1435 */
1436static void
1437epic_stop_activity(epic_softc_t *sc)
1438{
1439	int status, i;
1440
1441	/* Stop Tx and Rx DMA. */
1442	CSR_WRITE_4(sc, COMMAND,
1443	    COMMAND_STOP_RX | COMMAND_STOP_RDMA | COMMAND_STOP_TDMA);
1444
1445	/* Wait Rx and Tx DMA to stop (why 1 ms ??? XXX). */
1446	for (i = 0; i < 0x1000; i++) {
1447		status = CSR_READ_4(sc, INTSTAT) &
1448		    (INTSTAT_TXIDLE | INTSTAT_RXIDLE);
1449		if (status == (INTSTAT_TXIDLE | INTSTAT_RXIDLE))
1450			break;
1451		DELAY(1);
1452	}
1453
1454	/* Catch all finished packets. */
1455	epic_rx_done(sc);
1456	epic_tx_done(sc);
1457
1458	status = CSR_READ_4(sc, INTSTAT);
1459
1460	if ((status & INTSTAT_RXIDLE) == 0)
1461		device_printf(sc->dev, "ERROR! Can't stop Rx DMA\n");
1462
1463	if ((status & INTSTAT_TXIDLE) == 0)
1464		device_printf(sc->dev, "ERROR! Can't stop Tx DMA\n");
1465
1466	/*
1467	 * May need to queue one more packet if TQE, this is rare
1468	 * but existing case.
1469	 */
1470	if ((status & INTSTAT_TQE) && !(status & INTSTAT_TXIDLE))
1471		(void)epic_queue_last_packet(sc);
1472}
1473
1474/*
1475 * The EPIC transmitter may stuck in TQE state. It will not go IDLE until
1476 * a packet from current descriptor will be copied to internal RAM. We
1477 * compose a dummy packet here and queue it for transmission.
1478 *
1479 * XXX the packet will then be actually sent over network...
1480 */
1481static int
1482epic_queue_last_packet(epic_softc_t *sc)
1483{
1484	struct epic_tx_desc *desc;
1485	struct epic_frag_list *flist;
1486	struct epic_tx_buffer *buf;
1487	struct mbuf *m0;
1488	int error, i;
1489
1490	device_printf(sc->dev, "queue last packet\n");
1491
1492	desc = sc->tx_desc + sc->cur_tx;
1493	flist = sc->tx_flist + sc->cur_tx;
1494	buf = sc->tx_buffer + sc->cur_tx;
1495
1496	if ((desc->status & 0x8000) || (buf->mbuf != NULL))
1497		return (EBUSY);
1498
1499	MGETHDR(m0, M_NOWAIT, MT_DATA);
1500	if (m0 == NULL)
1501		return (ENOBUFS);
1502
1503	/* Prepare mbuf. */
1504	m0->m_len = min(MHLEN, ETHER_MIN_LEN - ETHER_CRC_LEN);
1505	m0->m_pkthdr.len = m0->m_len;
1506	m0->m_pkthdr.rcvif = sc->ifp;
1507	bzero(mtod(m0, caddr_t), m0->m_len);
1508
1509	/* Fill fragments list. */
1510	error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0,
1511	    epic_dma_map_txbuf, flist, 0);
1512	if (error) {
1513		m_freem(m0);
1514		return (error);
1515	}
1516	bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE);
1517
1518	/* Fill in descriptor. */
1519	buf->mbuf = m0;
1520	sc->pending_txs++;
1521	sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
1522	desc->control = 0x01;
1523	desc->txlength = max(m0->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
1524	desc->status = 0x8000;
1525	bus_dmamap_sync(sc->ttag, sc->tmap,
1526	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1527	bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE);
1528
1529	/* Launch transmission. */
1530	CSR_WRITE_4(sc, COMMAND, COMMAND_STOP_TDMA | COMMAND_TXQUEUED);
1531
1532	/* Wait Tx DMA to stop (for how long??? XXX) */
1533	for (i = 0; i < 1000; i++) {
1534		if (CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE)
1535			break;
1536		DELAY(1);
1537	}
1538
1539	if ((CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) == 0)
1540		device_printf(sc->dev, "ERROR! can't stop Tx DMA (2)\n");
1541	else
1542		epic_tx_done(sc);
1543
1544	return (0);
1545}
1546
1547/*
1548 *  Synopsis: Shut down board and deallocates rings.
1549 */
1550static void
1551epic_stop(epic_softc_t *sc)
1552{
1553
1554	EPIC_ASSERT_LOCKED(sc);
1555
1556	sc->tx_timeout = 0;
1557	callout_stop(&sc->timer);
1558
1559	/* Disable interrupts */
1560	CSR_WRITE_4(sc, INTMASK, 0);
1561	CSR_WRITE_4(sc, GENCTL, 0);
1562
1563	/* Try to stop Rx and TX processes */
1564	epic_stop_activity(sc);
1565
1566	/* Reset chip */
1567	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
1568	DELAY(1000);
1569
1570	/* Make chip go to bed */
1571	CSR_WRITE_4(sc, GENCTL, GENCTL_POWER_DOWN);
1572
1573	/* Mark as stopped */
1574	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1575}
1576
1577/*
1578 * Synopsis: This function should free all memory allocated for rings.
1579 */
1580static void
1581epic_free_rings(epic_softc_t *sc)
1582{
1583	int i;
1584
1585	for (i = 0; i < RX_RING_SIZE; i++) {
1586		struct epic_rx_buffer *buf = sc->rx_buffer + i;
1587		struct epic_rx_desc *desc = sc->rx_desc + i;
1588
1589		desc->status = 0;
1590		desc->buflength = 0;
1591		desc->bufaddr = 0;
1592
1593		if (buf->mbuf) {
1594			bus_dmamap_unload(sc->mtag, buf->map);
1595			bus_dmamap_destroy(sc->mtag, buf->map);
1596			m_freem(buf->mbuf);
1597		}
1598		buf->mbuf = NULL;
1599	}
1600
1601	if (sc->sparemap != NULL)
1602		bus_dmamap_destroy(sc->mtag, sc->sparemap);
1603
1604	for (i = 0; i < TX_RING_SIZE; i++) {
1605		struct epic_tx_buffer *buf = sc->tx_buffer + i;
1606		struct epic_tx_desc *desc = sc->tx_desc + i;
1607
1608		desc->status = 0;
1609		desc->buflength = 0;
1610		desc->bufaddr = 0;
1611
1612		if (buf->mbuf) {
1613			bus_dmamap_unload(sc->mtag, buf->map);
1614			bus_dmamap_destroy(sc->mtag, buf->map);
1615			m_freem(buf->mbuf);
1616		}
1617		buf->mbuf = NULL;
1618	}
1619}
1620
1621/*
1622 * Synopsis:  Allocates mbufs for Rx ring and point Rx descs to them.
1623 * Point Tx descs to fragment lists. Check that all descs and fraglists
1624 * are bounded and aligned properly.
1625 */
1626static int
1627epic_init_rings(epic_softc_t *sc)
1628{
1629	int error, i;
1630
1631	sc->cur_rx = sc->cur_tx = sc->dirty_tx = sc->pending_txs = 0;
1632
1633	/* Initialize the RX descriptor ring. */
1634	for (i = 0; i < RX_RING_SIZE; i++) {
1635		struct epic_rx_buffer *buf = sc->rx_buffer + i;
1636		struct epic_rx_desc *desc = sc->rx_desc + i;
1637
1638		desc->status = 0;		/* Owned by driver */
1639		desc->next = sc->rx_addr +
1640		    ((i + 1) & RX_RING_MASK) * sizeof(struct epic_rx_desc);
1641
1642		if ((desc->next & 3) ||
1643		    ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
1644			epic_free_rings(sc);
1645			return (EFAULT);
1646		}
1647
1648		buf->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1649		if (buf->mbuf == NULL) {
1650			epic_free_rings(sc);
1651			return (ENOBUFS);
1652		}
1653		buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
1654		m_adj(buf->mbuf, ETHER_ALIGN);
1655
1656		error = bus_dmamap_create(sc->mtag, 0, &buf->map);
1657		if (error) {
1658			epic_free_rings(sc);
1659			return (error);
1660		}
1661		error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf,
1662		    epic_dma_map_rxbuf, desc, 0);
1663		if (error) {
1664			epic_free_rings(sc);
1665			return (error);
1666		}
1667		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
1668
1669		desc->buflength = buf->mbuf->m_len; /* Max RX buffer length */
1670		desc->status = 0x8000;		/* Set owner bit to NIC */
1671	}
1672	bus_dmamap_sync(sc->rtag, sc->rmap,
1673	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1674
1675	/* Create the spare DMA map. */
1676	error = bus_dmamap_create(sc->mtag, 0, &sc->sparemap);
1677	if (error) {
1678		epic_free_rings(sc);
1679		return (error);
1680	}
1681
1682	/* Initialize the TX descriptor ring. */
1683	for (i = 0; i < TX_RING_SIZE; i++) {
1684		struct epic_tx_buffer *buf = sc->tx_buffer + i;
1685		struct epic_tx_desc *desc = sc->tx_desc + i;
1686
1687		desc->status = 0;
1688		desc->next = sc->tx_addr +
1689		    ((i + 1) & TX_RING_MASK) * sizeof(struct epic_tx_desc);
1690
1691		if ((desc->next & 3) ||
1692		    ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
1693			epic_free_rings(sc);
1694			return (EFAULT);
1695		}
1696
1697		buf->mbuf = NULL;
1698		desc->bufaddr = sc->frag_addr +
1699		    i * sizeof(struct epic_frag_list);
1700
1701		if ((desc->bufaddr & 3) ||
1702		    ((desc->bufaddr & PAGE_MASK) +
1703		    sizeof(struct epic_frag_list)) > PAGE_SIZE) {
1704			epic_free_rings(sc);
1705			return (EFAULT);
1706		}
1707
1708		error = bus_dmamap_create(sc->mtag, 0, &buf->map);
1709		if (error) {
1710			epic_free_rings(sc);
1711			return (error);
1712		}
1713	}
1714	bus_dmamap_sync(sc->ttag, sc->tmap,
1715	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1716	bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE);
1717
1718	return (0);
1719}
1720
1721/*
1722 * EEPROM operation functions
1723 */
1724static void
1725epic_write_eepromreg(epic_softc_t *sc, u_int8_t val)
1726{
1727	u_int16_t i;
1728
1729	CSR_WRITE_1(sc, EECTL, val);
1730
1731	for (i = 0; i < 0xFF; i++) {
1732		if ((CSR_READ_1(sc, EECTL) & 0x20) == 0)
1733			break;
1734	}
1735}
1736
1737static u_int8_t
1738epic_read_eepromreg(epic_softc_t *sc)
1739{
1740
1741	return (CSR_READ_1(sc, EECTL));
1742}
1743
1744static u_int8_t
1745epic_eeprom_clock(epic_softc_t *sc, u_int8_t val)
1746{
1747
1748	epic_write_eepromreg(sc, val);
1749	epic_write_eepromreg(sc, (val | 0x4));
1750	epic_write_eepromreg(sc, val);
1751
1752	return (epic_read_eepromreg(sc));
1753}
1754
1755static void
1756epic_output_eepromw(epic_softc_t *sc, u_int16_t val)
1757{
1758	int i;
1759
1760	for (i = 0xF; i >= 0; i--) {
1761		if (val & (1 << i))
1762			epic_eeprom_clock(sc, 0x0B);
1763		else
1764			epic_eeprom_clock(sc, 0x03);
1765	}
1766}
1767
1768static u_int16_t
1769epic_input_eepromw(epic_softc_t *sc)
1770{
1771	u_int16_t retval = 0;
1772	int i;
1773
1774	for (i = 0xF; i >= 0; i--) {
1775		if (epic_eeprom_clock(sc, 0x3) & 0x10)
1776			retval |= (1 << i);
1777	}
1778
1779	return (retval);
1780}
1781
1782static int
1783epic_read_eeprom(epic_softc_t *sc, u_int16_t loc)
1784{
1785	u_int16_t dataval;
1786	u_int16_t read_cmd;
1787
1788	epic_write_eepromreg(sc, 3);
1789
1790	if (epic_read_eepromreg(sc) & 0x40)
1791		read_cmd = (loc & 0x3F) | 0x180;
1792	else
1793		read_cmd = (loc & 0xFF) | 0x600;
1794
1795	epic_output_eepromw(sc, read_cmd);
1796
1797	dataval = epic_input_eepromw(sc);
1798
1799	epic_write_eepromreg(sc, 1);
1800
1801	return (dataval);
1802}
1803
1804/*
1805 * Here goes MII read/write routines.
1806 */
1807static int
1808epic_read_phy_reg(epic_softc_t *sc, int phy, int reg)
1809{
1810	int i;
1811
1812	CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x01));
1813
1814	for (i = 0; i < 0x100; i++) {
1815		if ((CSR_READ_4(sc, MIICTL) & 0x01) == 0)
1816			break;
1817		DELAY(1);
1818	}
1819
1820	return (CSR_READ_4(sc, MIIDATA));
1821}
1822
1823static void
1824epic_write_phy_reg(epic_softc_t *sc, int phy, int reg, int val)
1825{
1826	int i;
1827
1828	CSR_WRITE_4(sc, MIIDATA, val);
1829	CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x02));
1830
1831	for(i = 0; i < 0x100; i++) {
1832		if ((CSR_READ_4(sc, MIICTL) & 0x02) == 0)
1833			break;
1834		DELAY(1);
1835	}
1836}
1837
1838static int
1839epic_miibus_readreg(device_t dev, int phy, int reg)
1840{
1841	epic_softc_t *sc;
1842
1843	sc = device_get_softc(dev);
1844
1845	return (PHY_READ_2(sc, phy, reg));
1846}
1847
1848static int
1849epic_miibus_writereg(device_t dev, int phy, int reg, int data)
1850{
1851	epic_softc_t *sc;
1852
1853	sc = device_get_softc(dev);
1854
1855	PHY_WRITE_2(sc, phy, reg, data);
1856
1857	return (0);
1858}
1859