if_gmc.c revision 1.2
1/* $NetBSD: if_gmc.c,v 1.2 2008/12/15 04:44:27 matt Exp $ */
2/*-
3 * Copyright (c) 2008 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas <matt@3am-software.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/param.h>
32#include <sys/callout.h>
33#include <sys/device.h>
34#include <sys/ioctl.h>
35#include <sys/kernel.h>
36#include <sys/kmem.h>
37#include <sys/mbuf.h>
38
39#include <machine/bus.h>
40#include <machine/intr.h>
41
42#include <arm/gemini/gemini_reg.h>
43#include <arm/gemini/gemini_gmacreg.h>
44#include <arm/gemini/gemini_gmacvar.h>
45
46#include <net/if.h>
47#include <net/if_ether.h>
48#include <net/if_dl.h>
49
50__KERNEL_RCSID(0, "$NetBSD: if_gmc.c,v 1.2 2008/12/15 04:44:27 matt Exp $");
51
52#define	MAX_TXSEG	32
53
54struct gmc_softc {
55	device_t sc_dev;
56	struct gmac_softc *sc_psc;
57	struct gmc_softc *sc_sibling;
58	bus_dma_tag_t sc_dmat;
59	bus_space_tag_t sc_iot;
60	bus_space_handle_t sc_ioh;
61	bus_space_handle_t sc_dma_ioh;
62	bus_space_handle_t sc_gmac_ioh;
63	struct ethercom sc_ec;
64	struct mii_data sc_mii;
65	void *sc_ih;
66	bool sc_port1;
67	gmac_hwqueue_t *sc_rxq;
68	gmac_hwqueue_t *sc_txq[6];
69	callout_t sc_mii_ch;
70
71	uint32_t sc_gmac_status;
72	uint32_t sc_gmac_sta_add[3];
73	uint32_t sc_gmac_mcast_filter[2];
74	uint32_t sc_gmac_rx_filter;
75	uint32_t sc_gmac_config[2];
76	uint32_t sc_dmavr;
77
78	uint32_t sc_int_mask[5];
79	uint32_t sc_int_enabled[5];
80};
81
82#define	sc_if	sc_ec.ec_if
83
84static bool
85gmc_txqueue(struct gmc_softc *sc, gmac_hwqueue_t *hwq, struct mbuf *m)
86{
87	bus_dmamap_t map;
88	uint32_t desc1, desc3;
89	struct mbuf *last_m, *m0;
90	size_t count, i;
91	int error;
92	gmac_desc_t *d;
93
94	KASSERT(hwq != NULL);
95
96	map = gmac_mapcache_get(hwq->hwq_hqm->hqm_mc);
97	if (map == NULL)
98		return false;
99
100	for (last_m = NULL, m0 = m, count = 0;
101	     m0 != NULL;
102	     last_m = m0, m0 = m0->m_next) {
103		vaddr_t addr = (uintptr_t)m0->m_data;
104		if (m0->m_len == 0)
105			continue;
106		if (addr & 1) {
107			if (last_m != NULL && M_TRAILINGSPACE(last_m) > 0) {
108				last_m->m_data[last_m->m_len++] = *m->m_data++;
109				m->m_len--;
110			} else if (M_TRAILINGSPACE(m0) > 0) {
111				memmove(m0->m_data + 1, m0->m_data, m0->m_len);
112				m0->m_data++;
113			} else if (M_LEADINGSPACE(m0) > 0) {
114				memmove(m0->m_data - 1, m0->m_data, m0->m_len);
115				m0->m_data--;
116			} else {
117				panic("gmc_txqueue: odd addr %p", m0->m_data);
118			}
119		}
120		count += ((addr & PGOFSET) + m->m_len + PGOFSET) >> PGSHIFT;
121	}
122
123	gmac_hwqueue_sync(hwq);
124	if (hwq->hwq_free <= count) {
125		gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
126		return false;
127	}
128
129	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
130	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
131	if (error) {
132		aprint_error_dev(sc->sc_dev, "ifstart: load failed: %d\n",
133		    error);
134		gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
135		m_freem(m);
136		sc->sc_if.if_oerrors++;
137		return true;
138	}
139	KASSERT(map->dm_nsegs > 0);
140
141	/*
142	 * Sync the mbuf contents to memory/cache.
143	 */
144	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
145		BUS_DMASYNC_PREWRITE);
146
147	/*
148	 * Now we need to load the descriptors...
149	 */
150	desc1 = m->m_pkthdr.len;
151	desc3 = DESC3_SOF;
152	i = 0;
153	d = NULL;
154	do {
155		if (i > 0)
156			aprint_normal_dev(sc->sc_dev,
157			    "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
158			    i-1, d, d->d_desc0, d->d_desc1,
159			    d->d_bufaddr, d->d_desc3);
160		d = gmac_hwqueue_desc(hwq, i);
161		KASSERT(map->dm_segs[i].ds_len > 0);
162		KASSERT((map->dm_segs[i].ds_addr & 1) == 0);
163		d->d_desc0 = htole32(map->dm_segs[i].ds_len);
164		d->d_desc1 = htole32(desc1);
165		d->d_bufaddr = htole32(map->dm_segs[i].ds_addr);
166		d->d_desc3 = htole32(desc3);
167		desc3 = 0;
168	} while (++i < map->dm_nsegs);
169
170	d->d_desc3 |= htole32(DESC3_EOF|DESC3_EOFIE);
171	aprint_normal_dev(sc->sc_dev,
172	    "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
173	    i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
174	M_SETCTX(m, map);
175	IF_ENQUEUE(&hwq->hwq_ifq, m);
176	/*
177	 * Last descriptor has been marked.  Give them to the h/w.
178	 * This will sync for us.
179	 */
180	gmac_hwqueue_produce(hwq, map->dm_nsegs);
181	aprint_normal_dev(sc->sc_dev,
182	    "gmac_txqueue: *%zu@%p=%#x/%#x/%#x/%#x\n",
183	    i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
184	return true;
185}
186
187static void
188gmc_rxproduce(struct gmc_softc *sc)
189{
190	struct gmac_softc * const psc = sc->sc_psc;
191	gmac_hwqueue_t * const hwq = psc->sc_swfreeq;
192	gmac_hwqmem_t * const hqm = hwq->hwq_hqm;
193	size_t i;
194
195	for (i = 0;
196	     hwq->hwq_size - hwq->hwq_free - 1 + i < psc->sc_swfree_min; i++) {
197		bus_dmamap_t map;
198		gmac_desc_t *d;
199		struct mbuf *m;
200		int error;
201
202		map = gmac_mapcache_get(hqm->hqm_mc);
203		if (map == NULL)
204			break;
205
206		KASSERT(map->dm_mapsize == 0);
207
208		m = m_get(MT_DATA, M_DONTWAIT);
209		if (m == NULL) {
210			gmac_mapcache_put(hqm->hqm_mc, map);
211			break;
212		}
213
214		MCLGET(m, M_DONTWAIT);
215		if ((m->m_flags & M_EXT) == 0) {
216			m_free(m);
217			gmac_mapcache_put(hqm->hqm_mc, map);
218			break;
219		}
220		error = bus_dmamap_load(hqm->hqm_dmat, map, m->m_data,
221		    MCLBYTES, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT);
222		if (error) {
223			m_free(m);
224			gmac_mapcache_put(hqm->hqm_mc, map);
225			aprint_error_dev(sc->sc_dev,
226			    "map %p(%zu): can't map rx mbuf(%p) wptr=%zu: %d\n",
227			    map, map->_dm_size, m,
228			    (hwq->hwq_wptr + i) & (hwq->hwq_size - 1),
229			    error);
230			Debugger();
231			break;
232		}
233		bus_dmamap_sync(hqm->hqm_dmat, map, 0, map->dm_mapsize,
234		    BUS_DMASYNC_PREREAD);
235		m->m_len = 0;
236		M_SETCTX(m, map);
237		d = gmac_hwqueue_desc(hwq, i);
238		d->d_desc0   = htole32(map->dm_segs->ds_len);
239		d->d_bufaddr = htole32(map->dm_segs->ds_addr);
240		IF_ENQUEUE(&hwq->hwq_ifq, m);
241		sc->sc_psc->sc_rxpkts_per_sec++;
242	}
243
244	if (i)
245		gmac_hwqueue_produce(hwq, i);
246}
247
248static void
249gmc_filter_change(struct gmc_softc *sc)
250{
251	struct ether_multi *enm;
252	struct ether_multistep step;
253	uint32_t mhash[2];
254	uint32_t new0, new1, new2;
255	const char * const eaddr = CLLADDR(sc->sc_if.if_sadl);
256
257	new0 = eaddr[0] | ((eaddr[1] | (eaddr[2] | (eaddr[3] << 8)) << 8) << 8);
258	new1 = eaddr[4] | (eaddr[5] << 8);
259	new2 = 0;
260	if (sc->sc_gmac_sta_add[0] != new0
261	    || sc->sc_gmac_sta_add[1] != new1
262	    || sc->sc_gmac_sta_add[2] != new2) {
263		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD0,
264		    new0);
265		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD1,
266		    new1);
267		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD2,
268		    new2);
269		sc->sc_gmac_sta_add[0] = new0;
270		sc->sc_gmac_sta_add[1] = new1;
271		sc->sc_gmac_sta_add[2] = new2;
272	}
273
274	mhash[0] = 0;
275	mhash[1] = 0;
276	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
277	while (enm != NULL) {
278		size_t i;
279		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
280			mhash[0] = mhash[1] = 0xffffffff;
281			break;
282		}
283		i = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
284		mhash[(i >> 5) & 1] |= 1 << (i & 31);
285		ETHER_NEXT_MULTI(step, enm);
286	}
287
288	if (sc->sc_gmac_mcast_filter[0] != mhash[0]
289	    || sc->sc_gmac_mcast_filter[1] != mhash[1]) {
290		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
291		    GMAC_MCAST_FILTER0, mhash[0]);
292		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
293		    GMAC_MCAST_FILTER1, mhash[1]);
294		sc->sc_gmac_mcast_filter[0] = mhash[0];
295		sc->sc_gmac_mcast_filter[1] = mhash[1];
296	}
297
298	new0 = sc->sc_gmac_rx_filter & ~RXFILTER_PROMISC;
299	new0 |= RXFILTER_BROADCAST | RXFILTER_UNICAST | RXFILTER_MULTICAST;
300	if (sc->sc_if.if_flags & IFF_PROMISC)
301		new0 |= RXFILTER_PROMISC;
302
303	if (new0 != sc->sc_gmac_rx_filter) {
304		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_RX_FILTER,
305		    new0);
306		sc->sc_gmac_rx_filter = new0;
307	}
308}
309
310static void
311gmc_mii_tick(void *arg)
312{
313	struct gmc_softc * const sc = arg;
314	struct gmac_softc * const psc = sc->sc_psc;
315	int s = splnet();
316
317	/*
318	 * If we had to increase the number of receive mbufs due to fifo
319	 * overflows, we need a way to decrease them.  So every second we
320	 * recieve less than or equal to MIN_RXMAPS packets, we decrement
321	 * swfree_min until it returns to MIN_RXMAPS.
322	 */
323	if (psc->sc_rxpkts_per_sec <= MIN_RXMAPS
324	    && psc->sc_swfree_min > MIN_RXMAPS)
325		psc->sc_swfree_min--;
326	/*
327	 * If only one GMAC is running or this is port0, reset the count.
328	 */
329	if (psc->sc_running != 3 || !sc->sc_port1)
330		psc->sc_rxpkts_per_sec = 0;
331
332	mii_tick(&sc->sc_mii);
333	if (sc->sc_if.if_flags & IFF_RUNNING)
334		callout_schedule(&sc->sc_mii_ch, hz);
335
336	splx(s);
337}
338
339static int
340gmc_mediachange(struct ifnet *ifp)
341{
342	struct gmc_softc * const sc = ifp->if_softc;
343
344	if ((ifp->if_flags & IFF_UP) == 0)
345		return 0;
346
347	return mii_mediachg(&sc->sc_mii);
348}
349
350static void
351gmc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
352{
353	struct gmc_softc * const sc = ifp->if_softc;
354
355	mii_pollstat(&sc->sc_mii);
356	ifmr->ifm_status = sc->sc_mii.mii_media_status;
357	ifmr->ifm_active = sc->sc_mii.mii_media_active;
358}
359
360static void
361gmc_mii_statchg(device_t self)
362{
363	struct gmc_softc * const sc = device_private(self);
364	uint32_t gmac_status;
365
366	gmac_status = sc->sc_gmac_status;
367	gmac_status &= ~STATUS_PHYMODE_MASK;
368	gmac_status |= STATUS_PHYMODE_GMII;
369	gmac_status &= ~STATUS_SPEED_MASK;
370	if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T) {
371		gmac_status |= STATUS_SPEED_1000M;
372	} else if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX) {
373		gmac_status |= STATUS_SPEED_100M;
374	} else {
375		gmac_status |= STATUS_SPEED_10M;
376	}
377
378        if (sc->sc_mii.mii_media_active & IFM_FDX)
379		gmac_status |= STATUS_DUPLEX_FULL;
380	else
381		gmac_status &= ~STATUS_DUPLEX_FULL;
382
383        if (sc->sc_mii.mii_media_active & IFM_ACTIVE)
384		gmac_status |= STATUS_LINK_ON;
385	else
386		gmac_status &= ~STATUS_LINK_ON;
387
388	gmac_status |= STATUS_LINK_ON; /* XXX */
389
390	if (sc->sc_gmac_status != gmac_status) {
391		aprint_normal_dev(sc->sc_dev,
392		    "status change old=%#x new=%#x active=%#x\n",
393		    sc->sc_gmac_status, gmac_status,
394		    sc->sc_mii.mii_media_active);
395		sc->sc_gmac_status = gmac_status;
396		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STATUS,
397		    sc->sc_gmac_status);
398	}
399}
400
401static int
402gmc_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
403{
404	struct gmc_softc * const sc = ifp->if_softc;
405	struct ifreq * const ifr = data;
406	int s;
407	int error;
408	s = splnet();
409
410	switch (cmd) {
411	case SIOCSIFMEDIA:
412	case SIOCGIFMEDIA:
413		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
414		break;
415	default:
416		error = ether_ioctl(ifp, cmd, data);
417		if (error == ENETRESET) {
418			if (ifp->if_flags & IFF_RUNNING) {
419				/*
420				 * If the interface is running, we have to
421				 * update its multicast filter.
422				 */
423				gmc_filter_change(sc);
424			}
425			error = 0;
426		}
427	}
428
429	splx(s);
430	return error;
431}
432
433static void
434gmc_ifstart(struct ifnet *ifp)
435{
436	struct gmc_softc * const sc = ifp->if_softc;
437
438	if ((sc->sc_gmac_status & STATUS_LINK_ON) == 0
439	    || (ifp->if_flags & IFF_RUNNING) == 0)
440		return;
441
442	for (;;) {
443		struct mbuf *m;
444		IF_DEQUEUE(&ifp->if_snd, m);
445		if (m == NULL)
446			break;
447		if (!gmc_txqueue(sc, sc->sc_txq[0], m)) {
448			IF_PREPEND(&ifp->if_snd, m);
449			ifp->if_flags |= IFF_OACTIVE;
450			break;
451		}
452	}
453}
454
455static void
456gmc_ifstop(struct ifnet *ifp, int disable)
457{
458	struct gmc_softc * const sc = ifp->if_softc;
459	struct gmac_softc * const psc = sc->sc_psc;
460
461	psc->sc_running &= ~(sc->sc_port1 ? 2 : 1);
462	psc->sc_int_enabled[0] &= ~sc->sc_int_enabled[0];
463	psc->sc_int_enabled[1] &= ~sc->sc_int_enabled[1];
464	psc->sc_int_enabled[2] &= ~sc->sc_int_enabled[2];
465	psc->sc_int_enabled[3] &= ~sc->sc_int_enabled[3];
466	psc->sc_int_enabled[4] &= ~sc->sc_int_enabled[4] | INT4_SW_FREEQ_EMPTY;
467	if (psc->sc_running == 0) {
468		psc->sc_int_enabled[4] &= ~INT4_SW_FREEQ_EMPTY;
469		KASSERT(psc->sc_int_enabled[0] == 0);
470		KASSERT(psc->sc_int_enabled[1] == 0);
471		KASSERT(psc->sc_int_enabled[2] == 0);
472		KASSERT(psc->sc_int_enabled[3] == 0);
473		KASSERT(psc->sc_int_enabled[4] == 0);
474	} else if (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0)
475			== sc->sc_port1) {
476		psc->sc_int_select[4] &= ~INT4_SW_FREEQ_EMPTY;
477		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
478		    psc->sc_int_select[4]);
479	}
480	gmac_intr_update(psc);
481	if (disable) {
482#if 0
483		if (psc->sc_running == 0) {
484			gmac_mapcache_destroy(&psc->sc_txmaps);
485			gmac_mapcache_destroy(&psc->sc_rxmaps);
486		}
487#endif
488	}
489}
490
491static int
492gmc_ifinit(struct ifnet *ifp)
493{
494	struct gmc_softc * const sc = ifp->if_softc;
495	struct gmac_softc * const psc = sc->sc_psc;
496#if 1
497	uint32_t new, mask;
498#endif
499
500	gmac_mapcache_fill(psc->sc_rxmaps, MIN_RXMAPS);
501	gmac_mapcache_fill(psc->sc_txmaps, MIN_TXMAPS);
502
503	if (sc->sc_rxq == NULL) {
504		gmac_hwqmem_t *hqm;
505		hqm = gmac_hwqmem_create(psc->sc_rxmaps, RXQ_NDESCS, 1,
506		   HQM_CONSUMER|HQM_RX);
507		sc->sc_rxq = gmac_hwqueue_create(hqm, sc->sc_iot,
508		    sc->sc_ioh, GMAC_DEF_RXQn_RWPTR(sc->sc_port1),
509		    GMAC_DEF_RXQn_BASE(sc->sc_port1), 0);
510		if (sc->sc_rxq == NULL) {
511			gmac_hwqmem_destroy(hqm);
512			goto failed;
513		}
514		sc->sc_rxq->hwq_ifp = ifp;
515		sc->sc_rxq->hwq_producer = psc->sc_swfreeq;
516	}
517
518	if (sc->sc_txq[0] == NULL) {
519		gmac_hwqueue_t *hwq, *last_hwq;
520		gmac_hwqmem_t *hqm;
521		size_t i;
522
523		hqm = gmac_hwqmem_create(psc->sc_txmaps, TXQ_NDESCS, 6,
524		   HQM_PRODUCER|HQM_TX);
525		KASSERT(hqm != NULL);
526		for (i = 0; i < __arraycount(sc->sc_txq); i++) {
527			sc->sc_txq[i] = gmac_hwqueue_create(hqm, sc->sc_iot,
528			    sc->sc_dma_ioh, GMAC_SW_TX_Qn_RWPTR(i),
529			    GMAC_SW_TX_Q_BASE, i);
530			if (sc->sc_txq[i] == NULL) {
531				if (i == 0)
532					gmac_hwqmem_destroy(hqm);
533				goto failed;
534			}
535			sc->sc_txq[i]->hwq_ifp = ifp;
536
537			last_hwq = NULL;
538			SLIST_FOREACH(hwq, &psc->sc_hwfreeq->hwq_producers,
539			    hwq_link) {
540				if (sc->sc_txq[i]->hwq_qoff < hwq->hwq_qoff)
541					break;
542				last_hwq = hwq;
543			}
544			if (last_hwq == NULL)
545				SLIST_INSERT_HEAD(
546				    &psc->sc_hwfreeq->hwq_producers,
547				    sc->sc_txq[i], hwq_link);
548			else
549				SLIST_INSERT_AFTER(last_hwq, sc->sc_txq[i],
550				    hwq_link);
551		}
552	}
553
554	gmc_filter_change(sc);
555
556#if 1
557	mask = DMAVR_LOOPBACK|DMAVR_DROP_SMALL_ACK|DMAVR_EXTRABYTES_MASK
558	    |DMAVR_RXBURSTSIZE_MASK|DMAVR_RXBUSWIDTH_MASK
559	    |DMAVR_TXBURSTSIZE_MASK|DMAVR_TXBUSWIDTH_MASK;
560	new = /* DMAVR_RXDMA_ENABLE| */ DMAVR_TXDMA_ENABLE
561	    |DMAVR_EXTRABYTES(2)
562	    |DMAVR_RXBURSTSIZE(DMAVR_BURSTSIZE_32W)
563	    |DMAVR_RXBUSWIDTH(DMAVR_BUSWIDTH_32BITS)
564	    |DMAVR_TXBURSTSIZE(DMAVR_BURSTSIZE_32W)
565	    |DMAVR_TXBUSWIDTH(DMAVR_BUSWIDTH_32BITS);
566	new |= sc->sc_dmavr & ~mask;
567	if (sc->sc_dmavr != new) {
568		sc->sc_dmavr = new;
569		bus_space_write_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR,
570		    sc->sc_dmavr);
571		aprint_normal_dev(sc->sc_dev, "gmc_ifinit: dmavr=%#x/%#x\n",
572		    sc->sc_dmavr,
573		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR));
574	}
575
576	mask = CONFIG0_MAXLEN_MASK|CONFIG0_TX_DISABLE/*|CONFIG0_RX_DISABLE*/
577	    |CONFIG0_LOOPBACK|CONFIG0_SIM_TEST|CONFIG0_INVERSE_RXC_RGMII
578	    |CONFIG0_R_LATCHED_MMII|CONFIG0_RGMII_INBAND_STATUS_ENABLE;
579	new = CONFIG0_MAXLEN(CONFIG0_MAXLEN_1536);
580	new |= (sc->sc_gmac_config[0] & ~mask);
581	if (sc->sc_gmac_config[0] != new) {
582		sc->sc_gmac_config[0] = new;
583		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0,
584		    sc->sc_gmac_config[0]);
585		aprint_normal_dev(sc->sc_dev, "gmc_ifinit: config0=%#x/%#x\n",
586		    sc->sc_gmac_config[0],
587		    bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0));
588	}
589
590	gmc_rxproduce(sc);
591
592	/*
593	 * If we will be the only active interface, make sure the sw freeq
594	 * interrupt gets routed to use.
595	 */
596	if (psc->sc_running == 0
597	    && (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0) != sc->sc_port1)) {
598		psc->sc_int_select[4] ^= INT4_SW_FREEQ_EMPTY;
599		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
600		    psc->sc_int_select[4]);
601	}
602	sc->sc_int_enabled[0] = sc->sc_int_mask[0]
603	    & (INT0_TXDERR|INT0_TXPERR|INT0_RXDERR|INT0_RXPERR|INT0_SWTXQ_EOF);
604	sc->sc_int_enabled[1] = sc->sc_int_mask[1] & INT1_DEF_RXQ_EOF;
605	sc->sc_int_enabled[4] = INT4_SW_FREEQ_EMPTY | (sc->sc_int_mask[4]
606	    & (INT4_TX_FAIL|INT4_MIB_HEMIWRAP|INT4_RX_FIFO_OVRN
607	       |INT4_RGMII_STSCHG));
608
609	psc->sc_int_enabled[0] |= sc->sc_int_enabled[0];
610	psc->sc_int_enabled[1] |= sc->sc_int_enabled[1];
611	psc->sc_int_enabled[4] |= sc->sc_int_enabled[4];
612
613	gmac_intr_update(psc);
614#endif
615
616	if ((ifp->if_flags & IFF_RUNNING) == 0)
617		mii_tick(&sc->sc_mii);
618
619	ifp->if_flags |= IFF_RUNNING;
620	psc->sc_running |= (sc->sc_port1 ? 2 : 1);
621
622	callout_schedule(&sc->sc_mii_ch, hz);
623
624	return 0;
625
626failed:
627	gmc_ifstop(ifp, true);
628	return ENOMEM;
629}
630
631static int
632gmc_intr(void *arg)
633{
634	struct gmc_softc * const sc = arg;
635	uint32_t int0_status, int1_status, int4_status;
636	uint32_t status;
637	bool do_ifstart = false;
638	int rv = 0;
639
640	int0_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
641	    GMAC_INT0_STATUS);
642	int1_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
643	    GMAC_INT1_STATUS);
644	int4_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
645	    GMAC_INT4_STATUS);
646
647	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
648	    int0_status & sc->sc_int_enabled[0]);
649	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS,
650	    int1_status & sc->sc_int_enabled[1]);
651	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS,
652	    int4_status & sc->sc_int_enabled[4]);
653
654	aprint_normal_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
655	    int0_status, int1_status,
656	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
657	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
658	    int4_status);
659
660	aprint_normal_dev(sc->sc_dev, "gmac_intr: mask=%#x/%#x/%#x/%#x/%#x\n",
661	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_MASK),
662	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_MASK),
663	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_MASK),
664	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_MASK),
665	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK));
666
667	status = int0_status & sc->sc_int_mask[0];
668	if (status & (INT0_TXDERR|INT0_TXPERR)) {
669		aprint_error_dev(sc->sc_dev,
670		    "transmit%s%s error: %#x %08x bufaddr %#x\n",
671		    status & INT0_TXDERR ? " data" : "",
672		    status & INT0_TXPERR ? " protocol" : "",
673		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
674			GMAC_DMA_TX_CUR_DESC),
675		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
676			GMAC_SW_TX_Q0_RWPTR),
677		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
678			GMAC_DMA_TX_DESC2));
679		    Debugger();
680	}
681	if (status & (INT0_RXDERR|INT0_RXPERR)) {
682		aprint_error_dev(sc->sc_dev,
683		    "receive%s%s error: %#x %#x bufaddr %#x\n",
684		    status & INT0_TXDERR ? " data" : "",
685		    status & INT0_TXPERR ? " protocol" : "",
686		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
687			GMAC_DMA_RX_CUR_DESC),
688		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
689			GMAC_DEF_RXQn_RWPTR(sc->sc_port1)),
690		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
691			GMAC_DMA_RX_DESC2));
692		    Debugger();
693	}
694	if (status & INT0_SWTXQ_EOF) {
695		status &= INT0_SWTXQ_EOF;
696		for (int i = 0; status && i < __arraycount(sc->sc_txq); i++) {
697			if (status & INT0_SWTXQn_EOF(i)) {
698				gmac_hwqueue_sync(sc->sc_txq[i]);
699				status &= ~INT0_SWTXQn_EOF(i);
700			}
701		}
702#if 0
703		/*
704		 * If we got an EOF, that means someting wound up in the
705		 * hardware freeq, so go reclaim it.
706		 */
707//		gmac_hwqueue_consume(sc->sc_psc->sc_hwfreeq);
708#endif
709		do_ifstart = true;
710		rv = 1;
711	}
712
713	status = int1_status & sc->sc_int_mask[1];
714	if (status & INT1_DEF_RXQ_EOF) {
715		gmac_hwqueue_consume(sc->sc_rxq);
716		rv = 1;
717	}
718
719	if (int4_status & INT4_SW_FREEQ_EMPTY) {
720		gmc_rxproduce(sc);
721		rv = 1;
722	}
723
724	status = int4_status & sc->sc_int_enabled[4];
725	if (status & INT4_TX_FAIL) {
726	}
727	if (status & INT4_MIB_HEMIWRAP) {
728	}
729	if (status & INT4_RX_XON) {
730	}
731	if (status & INT4_RX_XOFF) {
732	}
733	if (status & INT4_TX_XON) {
734	}
735	if (status & INT4_TX_XOFF) {
736	}
737	if (status & INT4_RX_FIFO_OVRN) {
738		if (sc->sc_psc->sc_swfree_min < MAX_RXMAPS)
739			sc->sc_psc->sc_swfree_min++;
740		sc->sc_if.if_ierrors++;
741	}
742	if (status & INT4_RGMII_STSCHG) {
743		mii_tick(&sc->sc_mii);
744	}
745
746	if (do_ifstart)
747		gmc_ifstart(&sc->sc_if);
748
749	aprint_normal_dev(sc->sc_dev, "gmac_intr: done\n");
750	return rv;
751}
752
753static int
754gmc_match(device_t parent, cfdata_t cf, void *aux)
755{
756	struct gmac_softc *psc = device_private(parent);
757	struct gmac_attach_args *gma = aux;
758
759	if ((unsigned int)gma->gma_phy > 31)
760		return 0;
761	if ((unsigned int)gma->gma_port > 1)
762		return 0;
763	if (gma->gma_intr < 1 || gma->gma_intr > 2)
764		return 0;
765
766	if (psc->sc_ports & (1 << gma->gma_port))
767		return 0;
768
769	return 1;
770}
771
772static void
773gmc_attach(device_t parent, device_t self, void *aux)
774{
775	struct gmac_softc * const psc = device_private(parent);
776	struct gmc_softc * const sc = device_private(self);
777	struct gmac_attach_args *gma = aux;
778	struct ifnet * const ifp = &sc->sc_if;
779	static const char eaddrs[2][6] = {
780		"\x0\x52\xc3\x11\x22\x33",
781		"\x0\x52\xc3\x44\x55\x66",
782	};
783
784	psc->sc_ports |= 1 << gma->gma_port;
785	sc->sc_port1 = (gma->gma_port == 1);
786
787	sc->sc_dev = self;
788	sc->sc_psc = psc;
789	sc->sc_iot = psc->sc_iot;
790	sc->sc_ioh = psc->sc_ioh;
791	sc->sc_dmat = psc->sc_dmat;
792
793	bus_space_subregion(sc->sc_iot, sc->sc_ioh,
794	    GMAC_PORTn_DMA_OFFSET(gma->gma_port), GMAC_PORTn_DMA_SIZE,
795	    &sc->sc_dma_ioh);
796	bus_space_subregion(sc->sc_iot, sc->sc_ioh,
797	    GMAC_PORTn_GMAC_OFFSET(gma->gma_port), GMAC_PORTn_GMAC_SIZE,
798	    &sc->sc_gmac_ioh);
799	aprint_normal("\n");
800	aprint_naive("\n");
801
802	strlcpy(ifp->if_xname, device_xname(self), sizeof(ifp->if_xname));
803	ifp->if_flags = IFF_SIMPLEX|IFF_MULTICAST|IFF_BROADCAST;
804	ifp->if_softc = sc;
805	ifp->if_ioctl = gmc_ifioctl;
806	ifp->if_stop  = gmc_ifstop;
807	ifp->if_start = gmc_ifstart;
808	ifp->if_init  = gmc_ifinit;
809
810	IFQ_SET_READY(&ifp->if_snd);
811
812	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
813	sc->sc_ec.ec_mii = &sc->sc_mii;
814
815	sc->sc_mii.mii_ifp = ifp;
816	sc->sc_mii.mii_statchg = gmc_mii_statchg;
817	sc->sc_mii.mii_readreg = gma->gma_mii_readreg;
818	sc->sc_mii.mii_writereg = gma->gma_mii_writereg;
819
820	ifmedia_init(&sc->sc_mii.mii_media, 0, gmc_mediachange,
821	   gmc_mediastatus);
822
823	if_attach(ifp);
824	ether_ifattach(ifp, eaddrs[gma->gma_port]);
825	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
826	    gma->gma_phy, MII_OFFSET_ANY, 0);
827
828	if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
829		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
830		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
831	} else {
832		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX);
833	}
834
835	sc->sc_gmac_status = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
836	    GMAC_STATUS);
837	sc->sc_gmac_sta_add[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
838	    GMAC_STA_ADD0);
839	sc->sc_gmac_sta_add[1] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
840	    GMAC_STA_ADD1);
841	sc->sc_gmac_sta_add[2] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
842	    GMAC_STA_ADD2);
843	sc->sc_gmac_mcast_filter[0] = bus_space_read_4(sc->sc_iot,
844	    sc->sc_gmac_ioh, GMAC_MCAST_FILTER0);
845	sc->sc_gmac_mcast_filter[1] = bus_space_read_4(sc->sc_iot,
846	    sc->sc_gmac_ioh, GMAC_MCAST_FILTER1);
847	sc->sc_gmac_rx_filter = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
848	    GMAC_RX_FILTER);
849	sc->sc_gmac_config[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
850	    GMAC_CONFIG0);
851	sc->sc_dmavr = bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR);
852
853	/* sc->sc_int_enabled is already zeroed */
854	sc->sc_int_mask[0] = (sc->sc_port1 ? INT0_GMAC1 : INT0_GMAC0);
855	sc->sc_int_mask[1] = (sc->sc_port1 ? INT1_GMAC1 : INT1_GMAC0);
856	sc->sc_int_mask[2] = (sc->sc_port1 ? INT2_GMAC1 : INT2_GMAC0);
857	sc->sc_int_mask[3] = (sc->sc_port1 ? INT3_GMAC1 : INT3_GMAC0);
858	sc->sc_int_mask[4] = (sc->sc_port1 ? INT4_GMAC1 : INT4_GMAC0);
859
860	if (!sc->sc_port1) {
861	sc->sc_ih = intr_establish(gma->gma_intr, IPL_NET, IST_LEVEL_HIGH,
862	    gmc_intr, sc);
863	KASSERT(sc->sc_ih != NULL);
864	}
865
866	callout_init(&sc->sc_mii_ch, 0);
867	callout_setfunc(&sc->sc_mii_ch, gmc_mii_tick, sc);
868
869	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
870	     ether_sprintf(CLLADDR(sc->sc_if.if_sadl)));
871}
872
873CFATTACH_DECL_NEW(gmc, sizeof(struct gmc_softc),
874    gmc_match, gmc_attach, NULL, NULL);
875