if_cgem.c revision 249997
1249997Swkoszek/*-
2249997Swkoszek * Copyright (c) 2012-2013 Thomas Skibo.
3249997Swkoszek * All rights reserved.
4249997Swkoszek *
5249997Swkoszek * Redistribution and use in source and binary forms, with or without
6249997Swkoszek * modification, are permitted provided that the following conditions
7249997Swkoszek * are met:
8249997Swkoszek * 1. Redistributions of source code must retain the above copyright
9249997Swkoszek *    notice, this list of conditions and the following disclaimer.
10249997Swkoszek * 2. Redistributions in binary form must reproduce the above copyright
11249997Swkoszek *    notice, this list of conditions and the following disclaimer in the
12249997Swkoszek *    documentation and/or other materials provided with the distribution.
13249997Swkoszek *
14249997Swkoszek * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15249997Swkoszek * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16249997Swkoszek * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17249997Swkoszek * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18249997Swkoszek * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19249997Swkoszek * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20249997Swkoszek * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21249997Swkoszek * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22249997Swkoszek * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23249997Swkoszek * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24249997Swkoszek * SUCH DAMAGE.
25249997Swkoszek */
26249997Swkoszek
27249997Swkoszek/* A network interface driver for Cadence GEM Gigabit Ethernet
28249997Swkoszek * interface such as the one used in Xilinx Zynq-7000 SoC.
29249997Swkoszek *
30249997Swkoszek * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
31249997Swkoszek * (v1.4) November 16, 2012.  Xilinx doc UG585.  GEM is covered in Ch. 16
32249997Swkoszek * and register definitions are in appendix B.18.
33249997Swkoszek */
34249997Swkoszek
35249997Swkoszek#include <sys/cdefs.h>
36249997Swkoszek__FBSDID("$FreeBSD: head/sys/dev/cadence/if_cgem.c 249997 2013-04-27 22:38:29Z wkoszek $");
37249997Swkoszek
38249997Swkoszek#include <sys/param.h>
39249997Swkoszek#include <sys/systm.h>
40249997Swkoszek#include <sys/bus.h>
41249997Swkoszek#include <sys/kernel.h>
42249997Swkoszek#include <sys/malloc.h>
43249997Swkoszek#include <sys/mbuf.h>
44249997Swkoszek#include <sys/module.h>
45249997Swkoszek#include <sys/rman.h>
46249997Swkoszek#include <sys/socket.h>
47249997Swkoszek#include <sys/sockio.h>
48249997Swkoszek#include <sys/sysctl.h>
49249997Swkoszek
50249997Swkoszek#include <machine/bus.h>
51249997Swkoszek
52249997Swkoszek#include <net/ethernet.h>
53249997Swkoszek#include <net/if.h>
54249997Swkoszek#include <net/if_arp.h>
55249997Swkoszek#include <net/if_dl.h>
56249997Swkoszek#include <net/if_media.h>
57249997Swkoszek#include <net/if_mib.h>
58249997Swkoszek#include <net/if_types.h>
59249997Swkoszek
60249997Swkoszek#ifdef INET
61249997Swkoszek#include <netinet/in.h>
62249997Swkoszek#include <netinet/in_systm.h>
63249997Swkoszek#include <netinet/in_var.h>
64249997Swkoszek#include <netinet/ip.h>
65249997Swkoszek#endif
66249997Swkoszek
67249997Swkoszek#include <net/bpf.h>
68249997Swkoszek#include <net/bpfdesc.h>
69249997Swkoszek
70249997Swkoszek#include <dev/fdt/fdt_common.h>
71249997Swkoszek#include <dev/ofw/ofw_bus.h>
72249997Swkoszek#include <dev/ofw/ofw_bus_subr.h>
73249997Swkoszek
74249997Swkoszek#include <dev/mii/mii.h>
75249997Swkoszek#include <dev/mii/miivar.h>
76249997Swkoszek
77249997Swkoszek#include <dev/cadence/if_cgem_hw.h>
78249997Swkoszek
79249997Swkoszek#include "miibus_if.h"
80249997Swkoszek
81249997Swkoszek#define IF_CGEM_NAME "cgem"
82249997Swkoszek
83249997Swkoszek#define CGEM_NUM_RX_DESCS	256	/* size of receive descriptor ring */
84249997Swkoszek#define CGEM_NUM_TX_DESCS	256	/* size of transmit descriptor ring */
85249997Swkoszek
86249997Swkoszek#define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\
87249997Swkoszek				CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc)))
88249997Swkoszek
89249997Swkoszek
90249997Swkoszek/* Default for sysctl rxbufs.  Must be < CGEM_NUM_RX_DESCS of course. */
91249997Swkoszek#define DEFAULT_NUM_RX_BUFS	64	/* number of receive bufs to queue. */
92249997Swkoszek
93249997Swkoszek#define TX_MAX_DMA_SEGS		4	/* maximum segs in a tx mbuf dma */
94249997Swkoszek
95249997Swkoszek#define CGEM_CKSUM_ASSIST	(CSUM_IP | CSUM_TCP | CSUM_UDP | \
96249997Swkoszek				 CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
97249997Swkoszek
98249997Swkoszekstruct cgem_softc {
99249997Swkoszek	struct ifnet		*ifp;
100249997Swkoszek	struct mtx		sc_mtx;
101249997Swkoszek	device_t		dev;
102249997Swkoszek	device_t		miibus;
103249997Swkoszek	int			if_old_flags;
104249997Swkoszek	struct resource 	*mem_res;
105249997Swkoszek	struct resource 	*irq_res;
106249997Swkoszek	void			*intrhand;
107249997Swkoszek	struct callout		tick_ch;
108249997Swkoszek	uint32_t		net_ctl_shadow;
109249997Swkoszek	u_char			eaddr[6];
110249997Swkoszek
111249997Swkoszek	bus_dma_tag_t		desc_dma_tag;
112249997Swkoszek	bus_dma_tag_t		mbuf_dma_tag;
113249997Swkoszek
114249997Swkoszek	/* receive descriptor ring */
115249997Swkoszek	struct cgem_rx_desc	*rxring;
116249997Swkoszek	bus_addr_t		rxring_physaddr;
117249997Swkoszek	struct mbuf		*rxring_m[CGEM_NUM_RX_DESCS];
118249997Swkoszek	bus_dmamap_t		rxring_m_dmamap[CGEM_NUM_RX_DESCS];
119249997Swkoszek	int			rxring_hd_ptr;	/* where to put rcv bufs */
120249997Swkoszek	int			rxring_tl_ptr;	/* where to get receives */
121249997Swkoszek	int			rxring_queued;	/* how many rcv bufs queued */
122249997Swkoszek 	bus_dmamap_t		rxring_dma_map;
123249997Swkoszek	int			rxbufs;		/* tunable number rcv bufs */
124249997Swkoszek	int			rxoverruns;	/* rx ring overruns */
125249997Swkoszek
126249997Swkoszek	/* transmit descriptor ring */
127249997Swkoszek	struct cgem_tx_desc	*txring;
128249997Swkoszek	bus_addr_t		txring_physaddr;
129249997Swkoszek	struct mbuf		*txring_m[CGEM_NUM_TX_DESCS];
130249997Swkoszek	bus_dmamap_t		txring_m_dmamap[CGEM_NUM_TX_DESCS];
131249997Swkoszek	int			txring_hd_ptr;	/* where to put next xmits */
132249997Swkoszek	int			txring_tl_ptr;	/* next xmit mbuf to free */
133249997Swkoszek	int			txring_queued;	/* num xmits segs queued */
134249997Swkoszek	bus_dmamap_t		txring_dma_map;
135249997Swkoszek};
136249997Swkoszek
137249997Swkoszek#define RD4(sc, off) 		(bus_read_4((sc)->mem_res, (off)))
138249997Swkoszek#define WR4(sc, off, val) 	(bus_write_4((sc)->mem_res, (off), (val)))
139249997Swkoszek#define BARRIER(sc, off, len, flags) \
140249997Swkoszek	(bus_barrier((sc)->mem_res, (off), (len), (flags))
141249997Swkoszek
142249997Swkoszek#define CGEM_LOCK(sc)		mtx_lock(&(sc)->sc_mtx)
143249997Swkoszek#define CGEM_UNLOCK(sc)	mtx_unlock(&(sc)->sc_mtx)
144249997Swkoszek#define CGEM_LOCK_INIT(sc)	\
145249997Swkoszek	mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \
146249997Swkoszek		 MTX_NETWORK_LOCK, MTX_DEF)
147249997Swkoszek#define CGEM_LOCK_DESTROY(sc)	mtx_destroy(&(sc)->sc_mtx)
148249997Swkoszek#define CGEM_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->sc_mtx, MA_OWNED)
149249997Swkoszek
150249997Swkoszekstatic devclass_t cgem_devclass;
151249997Swkoszek
152249997Swkoszekstatic int cgem_probe(device_t dev);
153249997Swkoszekstatic int cgem_attach(device_t dev);
154249997Swkoszekstatic int cgem_detach(device_t dev);
155249997Swkoszekstatic void cgem_tick(void *);
156249997Swkoszekstatic void cgem_intr(void *);
157249997Swkoszek
158249997Swkoszekstatic void
159249997Swkoszekcgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
160249997Swkoszek{
161249997Swkoszek	int i;
162249997Swkoszek	uint32_t rnd;
163249997Swkoszek
164249997Swkoszek	/* See if boot loader gave us a MAC address already. */
165249997Swkoszek	for (i = 0; i < 4; i++) {
166249997Swkoszek		uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
167249997Swkoszek		uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
168249997Swkoszek		if (low != 0 || high != 0) {
169249997Swkoszek			eaddr[0] = low & 0xff;
170249997Swkoszek			eaddr[1] = (low >> 8) & 0xff;
171249997Swkoszek			eaddr[2] = (low >> 16) & 0xff;
172249997Swkoszek			eaddr[3] = (low >> 24) & 0xff;
173249997Swkoszek			eaddr[4] = high & 0xff;
174249997Swkoszek			eaddr[5] = (high >> 8) & 0xff;
175249997Swkoszek			break;
176249997Swkoszek		}
177249997Swkoszek	}
178249997Swkoszek
179249997Swkoszek	/* No MAC from boot loader?  Assign a random one. */
180249997Swkoszek	if (i == 4) {
181249997Swkoszek		rnd = arc4random();
182249997Swkoszek
183249997Swkoszek		eaddr[0] = 'b';
184249997Swkoszek		eaddr[1] = 's';
185249997Swkoszek		eaddr[2] = 'd';
186249997Swkoszek		eaddr[3] = (rnd >> 16) & 0xff;
187249997Swkoszek		eaddr[4] = (rnd >> 8) & 0xff;
188249997Swkoszek		eaddr[5] = rnd & 0xff;
189249997Swkoszek
190249997Swkoszek		device_printf(sc->dev, "no mac address found, assigning "
191249997Swkoszek			      "random: %02x:%02x:%02x:%02x:%02x:%02x\n",
192249997Swkoszek			      eaddr[0], eaddr[1], eaddr[2],
193249997Swkoszek			      eaddr[3], eaddr[4], eaddr[5]);
194249997Swkoszek
195249997Swkoszek		WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
196249997Swkoszek		    (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
197249997Swkoszek		WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
198249997Swkoszek	}
199249997Swkoszek}
200249997Swkoszek
201249997Swkoszek/* cgem_mac_hash():  map 48-bit address to a 6-bit hash.
202249997Swkoszek * The 6-bit hash corresponds to a bit in a 64-bit hash
203249997Swkoszek * register.  Setting that bit in the hash register enables
204249997Swkoszek * reception of all frames with a destination address that hashes
205249997Swkoszek * to that 6-bit value.
206249997Swkoszek *
207249997Swkoszek * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
208249997Swkoszek * Reference Manual.  Bits 0-5 in the hash are the exclusive-or of
209249997Swkoszek * every sixth bit in the destination address.
210249997Swkoszek */
211249997Swkoszekstatic int
212249997Swkoszekcgem_mac_hash(u_char eaddr[])
213249997Swkoszek{
214249997Swkoszek	int hash;
215249997Swkoszek	int i, j;
216249997Swkoszek
217249997Swkoszek	hash = 0;
218249997Swkoszek	for (i = 0; i < 6; i++)
219249997Swkoszek		for (j = i; j < 48; j += 6)
220249997Swkoszek			if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
221249997Swkoszek				hash ^= (1 << i);
222249997Swkoszek
223249997Swkoszek	return hash;
224249997Swkoszek}
225249997Swkoszek
226249997Swkoszek/* After any change in rx flags or multi-cast addresses, set up
227249997Swkoszek * hash registers and net config register bits.
228249997Swkoszek */
229249997Swkoszekstatic void
230249997Swkoszekcgem_rx_filter(struct cgem_softc *sc)
231249997Swkoszek{
232249997Swkoszek	struct ifnet *ifp = sc->ifp;
233249997Swkoszek	struct ifmultiaddr *ifma;
234249997Swkoszek	int index;
235249997Swkoszek	uint32_t hash_hi, hash_lo;
236249997Swkoszek	uint32_t net_cfg;
237249997Swkoszek
238249997Swkoszek	hash_hi = 0;
239249997Swkoszek	hash_lo = 0;
240249997Swkoszek
241249997Swkoszek	net_cfg = RD4(sc, CGEM_NET_CFG);
242249997Swkoszek
243249997Swkoszek	net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
244249997Swkoszek		     CGEM_NET_CFG_NO_BCAST |
245249997Swkoszek		     CGEM_NET_CFG_COPY_ALL);
246249997Swkoszek
247249997Swkoszek	if ((ifp->if_flags & IFF_PROMISC) != 0)
248249997Swkoszek		net_cfg |= CGEM_NET_CFG_COPY_ALL;
249249997Swkoszek	else {
250249997Swkoszek		if ((ifp->if_flags & IFF_BROADCAST) == 0)
251249997Swkoszek			net_cfg |= CGEM_NET_CFG_NO_BCAST;
252249997Swkoszek		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
253249997Swkoszek			hash_hi = 0xffffffff;
254249997Swkoszek			hash_lo = 0xffffffff;
255249997Swkoszek		} else {
256249997Swkoszek			if_maddr_rlock(ifp);
257249997Swkoszek			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
258249997Swkoszek				if (ifma->ifma_addr->sa_family != AF_LINK)
259249997Swkoszek					continue;
260249997Swkoszek				index = cgem_mac_hash(
261249997Swkoszek					LLADDR((struct sockaddr_dl *)
262249997Swkoszek					       ifma->ifma_addr));
263249997Swkoszek				if (index > 31)
264249997Swkoszek					hash_hi |= (1<<(index-32));
265249997Swkoszek				else
266249997Swkoszek					hash_lo |= (1<<index);
267249997Swkoszek			}
268249997Swkoszek			if_maddr_runlock(ifp);
269249997Swkoszek		}
270249997Swkoszek
271249997Swkoszek		if (hash_hi != 0 || hash_lo != 0)
272249997Swkoszek			net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN;
273249997Swkoszek	}
274249997Swkoszek
275249997Swkoszek	WR4(sc, CGEM_HASH_TOP, hash_hi);
276249997Swkoszek	WR4(sc, CGEM_HASH_BOT, hash_lo);
277249997Swkoszek	WR4(sc, CGEM_NET_CFG, net_cfg);
278249997Swkoszek}
279249997Swkoszek
280249997Swkoszek/* For bus_dmamap_load() callback. */
281249997Swkoszekstatic void
282249997Swkoszekcgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
283249997Swkoszek{
284249997Swkoszek
285249997Swkoszek	if (nsegs != 1 || error != 0)
286249997Swkoszek		return;
287249997Swkoszek	*(bus_addr_t *)arg = segs[0].ds_addr;
288249997Swkoszek}
289249997Swkoszek
290249997Swkoszek/* Create DMA'able descriptor rings. */
291249997Swkoszekstatic int
292249997Swkoszekcgem_setup_descs(struct cgem_softc *sc)
293249997Swkoszek{
294249997Swkoszek	int i, err;
295249997Swkoszek
296249997Swkoszek	sc->txring = NULL;
297249997Swkoszek	sc->rxring = NULL;
298249997Swkoszek
299249997Swkoszek	/* Allocate non-cached DMA space for RX and TX descriptors.
300249997Swkoszek	 */
301249997Swkoszek	err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
302249997Swkoszek				 BUS_SPACE_MAXADDR_32BIT,
303249997Swkoszek				 BUS_SPACE_MAXADDR,
304249997Swkoszek				 NULL, NULL,
305249997Swkoszek				 MAX_DESC_RING_SIZE,
306249997Swkoszek				 1,
307249997Swkoszek				 MAX_DESC_RING_SIZE,
308249997Swkoszek				 0,
309249997Swkoszek				 busdma_lock_mutex,
310249997Swkoszek				 &sc->sc_mtx,
311249997Swkoszek				 &sc->desc_dma_tag);
312249997Swkoszek	if (err)
313249997Swkoszek		return (err);
314249997Swkoszek
315249997Swkoszek	/* Set up a bus_dma_tag for mbufs. */
316249997Swkoszek	err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
317249997Swkoszek				 BUS_SPACE_MAXADDR_32BIT,
318249997Swkoszek				 BUS_SPACE_MAXADDR,
319249997Swkoszek				 NULL, NULL,
320249997Swkoszek				 MCLBYTES,
321249997Swkoszek				 TX_MAX_DMA_SEGS,
322249997Swkoszek				 MCLBYTES,
323249997Swkoszek				 0,
324249997Swkoszek				 busdma_lock_mutex,
325249997Swkoszek				 &sc->sc_mtx,
326249997Swkoszek				 &sc->mbuf_dma_tag);
327249997Swkoszek	if (err)
328249997Swkoszek		return (err);
329249997Swkoszek
330249997Swkoszek	/* Allocate DMA memory in non-cacheable space. */
331249997Swkoszek	err = bus_dmamem_alloc(sc->desc_dma_tag,
332249997Swkoszek			       (void **)&sc->rxring,
333249997Swkoszek			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
334249997Swkoszek			       &sc->rxring_dma_map);
335249997Swkoszek	if (err)
336249997Swkoszek		return (err);
337249997Swkoszek
338249997Swkoszek	/* Load descriptor DMA memory. */
339249997Swkoszek	err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
340249997Swkoszek			      (void *)sc->rxring,
341249997Swkoszek			      CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),
342249997Swkoszek			      cgem_getaddr, &sc->rxring_physaddr,
343249997Swkoszek			      BUS_DMA_NOWAIT);
344249997Swkoszek	if (err)
345249997Swkoszek		return (err);
346249997Swkoszek
347249997Swkoszek	/* Initialize RX descriptors. */
348249997Swkoszek	for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
349249997Swkoszek		sc->rxring[i].addr = CGEM_RXDESC_OWN;
350249997Swkoszek		sc->rxring[i].ctl = 0;
351249997Swkoszek		sc->rxring_m[i] = NULL;
352249997Swkoszek		err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
353249997Swkoszek					&sc->rxring_m_dmamap[i]);
354249997Swkoszek		if (err)
355249997Swkoszek			return (err);
356249997Swkoszek	}
357249997Swkoszek	sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
358249997Swkoszek
359249997Swkoszek	sc->rxring_hd_ptr = 0;
360249997Swkoszek	sc->rxring_tl_ptr = 0;
361249997Swkoszek	sc->rxring_queued = 0;
362249997Swkoszek
363249997Swkoszek	/* Allocate DMA memory for TX descriptors in non-cacheable space. */
364249997Swkoszek	err = bus_dmamem_alloc(sc->desc_dma_tag,
365249997Swkoszek			       (void **)&sc->txring,
366249997Swkoszek			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
367249997Swkoszek			       &sc->txring_dma_map);
368249997Swkoszek	if (err)
369249997Swkoszek		return (err);
370249997Swkoszek
371249997Swkoszek	/* Load TX descriptor DMA memory. */
372249997Swkoszek	err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map,
373249997Swkoszek			      (void *)sc->txring,
374249997Swkoszek			      CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc),
375249997Swkoszek			      cgem_getaddr, &sc->txring_physaddr,
376249997Swkoszek			      BUS_DMA_NOWAIT);
377249997Swkoszek	if (err)
378249997Swkoszek		return (err);
379249997Swkoszek
380249997Swkoszek	/* Initialize TX descriptor ring. */
381249997Swkoszek	for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
382249997Swkoszek		sc->txring[i].addr = 0;
383249997Swkoszek		sc->txring[i].ctl = CGEM_TXDESC_USED;
384249997Swkoszek		sc->txring_m[i] = NULL;
385249997Swkoszek		err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
386249997Swkoszek					&sc->txring_m_dmamap[i]);
387249997Swkoszek		if (err)
388249997Swkoszek			return (err);
389249997Swkoszek	}
390249997Swkoszek	sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
391249997Swkoszek
392249997Swkoszek	sc->txring_hd_ptr = 0;
393249997Swkoszek	sc->txring_tl_ptr = 0;
394249997Swkoszek	sc->txring_queued = 0;
395249997Swkoszek
396249997Swkoszek	return (0);
397249997Swkoszek}
398249997Swkoszek
399249997Swkoszek/* Fill receive descriptor ring with mbufs. */
400249997Swkoszekstatic void
401249997Swkoszekcgem_fill_rqueue(struct cgem_softc *sc)
402249997Swkoszek{
403249997Swkoszek	struct mbuf *m = NULL;
404249997Swkoszek	bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
405249997Swkoszek	int nsegs;
406249997Swkoszek
407249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
408249997Swkoszek
409249997Swkoszek	while (sc->rxring_queued < sc->rxbufs) {
410249997Swkoszek		/* Get a cluster mbuf. */
411249997Swkoszek		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
412249997Swkoszek		if (m == NULL)
413249997Swkoszek			break;
414249997Swkoszek
415249997Swkoszek		m->m_len = MCLBYTES;
416249997Swkoszek		m->m_pkthdr.len = MCLBYTES;
417249997Swkoszek		m->m_pkthdr.rcvif = sc->ifp;
418249997Swkoszek
419249997Swkoszek		/* Load map and plug in physical address. */
420249997Swkoszek		if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
421249997Swkoszek			      sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
422249997Swkoszek			      segs, &nsegs, BUS_DMA_NOWAIT)) {
423249997Swkoszek			/* XXX: warn? */
424249997Swkoszek			m_free(m);
425249997Swkoszek			break;
426249997Swkoszek		}
427249997Swkoszek		sc->rxring_m[sc->rxring_hd_ptr] = m;
428249997Swkoszek
429249997Swkoszek		/* Sync cache with receive buffer. */
430249997Swkoszek		bus_dmamap_sync(sc->mbuf_dma_tag,
431249997Swkoszek				sc->rxring_m_dmamap[sc->rxring_hd_ptr],
432249997Swkoszek				BUS_DMASYNC_PREREAD);
433249997Swkoszek
434249997Swkoszek		/* Write rx descriptor and increment head pointer. */
435249997Swkoszek		sc->rxring[sc->rxring_hd_ptr].ctl = 0;
436249997Swkoszek		if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
437249997Swkoszek			sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
438249997Swkoszek				CGEM_RXDESC_WRAP;
439249997Swkoszek			sc->rxring_hd_ptr = 0;
440249997Swkoszek		} else
441249997Swkoszek			sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
442249997Swkoszek
443249997Swkoszek		sc->rxring_queued++;
444249997Swkoszek	}
445249997Swkoszek}
446249997Swkoszek
447249997Swkoszek/* Pull received packets off of receive descriptor ring. */
448249997Swkoszekstatic void
449249997Swkoszekcgem_recv(struct cgem_softc *sc)
450249997Swkoszek{
451249997Swkoszek	struct ifnet *ifp = sc->ifp;
452249997Swkoszek	struct mbuf *m;
453249997Swkoszek	uint32_t ctl;
454249997Swkoszek
455249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
456249997Swkoszek
457249997Swkoszek	/* Pick up all packets in which the OWN bit is set. */
458249997Swkoszek	while (sc->rxring_queued > 0 &&
459249997Swkoszek	       (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
460249997Swkoszek
461249997Swkoszek		ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
462249997Swkoszek
463249997Swkoszek		/* Grab filled mbuf. */
464249997Swkoszek		m = sc->rxring_m[sc->rxring_tl_ptr];
465249997Swkoszek		sc->rxring_m[sc->rxring_tl_ptr] = NULL;
466249997Swkoszek
467249997Swkoszek		/* Sync cache with receive buffer. */
468249997Swkoszek		bus_dmamap_sync(sc->mbuf_dma_tag,
469249997Swkoszek				sc->rxring_m_dmamap[sc->rxring_tl_ptr],
470249997Swkoszek				BUS_DMASYNC_POSTREAD);
471249997Swkoszek
472249997Swkoszek		/* Unload dmamap. */
473249997Swkoszek		bus_dmamap_unload(sc->mbuf_dma_tag,
474249997Swkoszek		  	sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
475249997Swkoszek
476249997Swkoszek		/* Increment tail pointer. */
477249997Swkoszek		if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
478249997Swkoszek			sc->rxring_tl_ptr = 0;
479249997Swkoszek		sc->rxring_queued--;
480249997Swkoszek
481249997Swkoszek		/* Check FCS and make sure entire packet landed in one mbuf
482249997Swkoszek		 * cluster (which is much bigger than the largest ethernet
483249997Swkoszek		 * packet).
484249997Swkoszek		 */
485249997Swkoszek		if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
486249997Swkoszek		    (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
487249997Swkoszek		           (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
488249997Swkoszek			/* discard. */
489249997Swkoszek			m_free(m);
490249997Swkoszek			ifp->if_ierrors++;
491249997Swkoszek			continue;
492249997Swkoszek		}
493249997Swkoszek
494249997Swkoszek		/* Hand it off to upper layers. */
495249997Swkoszek		m->m_data += ETHER_ALIGN;
496249997Swkoszek		m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
497249997Swkoszek		m->m_pkthdr.rcvif = ifp;
498249997Swkoszek		m->m_pkthdr.len = m->m_len;
499249997Swkoszek
500249997Swkoszek		/* Are we using hardware checksumming?  Check the
501249997Swkoszek		 * status in the receive descriptor.
502249997Swkoszek		 */
503249997Swkoszek		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
504249997Swkoszek			/* TCP or UDP checks out, IP checks out too. */
505249997Swkoszek			if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
506249997Swkoszek			    CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
507249997Swkoszek			    (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
508249997Swkoszek			    CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
509249997Swkoszek				m->m_pkthdr.csum_flags |=
510249997Swkoszek					CSUM_IP_CHECKED | CSUM_IP_VALID |
511249997Swkoszek					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
512249997Swkoszek				m->m_pkthdr.csum_data = 0xffff;
513249997Swkoszek			} else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
514249997Swkoszek				   CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
515249997Swkoszek				/* Only IP checks out. */
516249997Swkoszek				m->m_pkthdr.csum_flags |=
517249997Swkoszek					CSUM_IP_CHECKED | CSUM_IP_VALID;
518249997Swkoszek				m->m_pkthdr.csum_data = 0xffff;
519249997Swkoszek			}
520249997Swkoszek		}
521249997Swkoszek
522249997Swkoszek		ifp->if_ipackets++;
523249997Swkoszek		CGEM_UNLOCK(sc);
524249997Swkoszek		(*ifp->if_input)(ifp, m);
525249997Swkoszek		CGEM_LOCK(sc);
526249997Swkoszek	}
527249997Swkoszek}
528249997Swkoszek
529249997Swkoszek/* Find completed transmits and free their mbufs. */
530249997Swkoszekstatic void
531249997Swkoszekcgem_clean_tx(struct cgem_softc *sc)
532249997Swkoszek{
533249997Swkoszek	struct mbuf *m;
534249997Swkoszek	uint32_t ctl;
535249997Swkoszek
536249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
537249997Swkoszek
538249997Swkoszek	/* free up finished transmits. */
539249997Swkoszek	while (sc->txring_queued > 0 &&
540249997Swkoszek	       ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
541249997Swkoszek		CGEM_TXDESC_USED) != 0) {
542249997Swkoszek
543249997Swkoszek		/* Sync cache.  nop? */
544249997Swkoszek		bus_dmamap_sync(sc->mbuf_dma_tag,
545249997Swkoszek				sc->txring_m_dmamap[sc->txring_tl_ptr],
546249997Swkoszek				BUS_DMASYNC_POSTWRITE);
547249997Swkoszek
548249997Swkoszek		/* Unload DMA map. */
549249997Swkoszek		bus_dmamap_unload(sc->mbuf_dma_tag,
550249997Swkoszek				  sc->txring_m_dmamap[sc->txring_tl_ptr]);
551249997Swkoszek
552249997Swkoszek		/* Free up the mbuf. */
553249997Swkoszek		m = sc->txring_m[sc->txring_tl_ptr];
554249997Swkoszek		sc->txring_m[sc->txring_tl_ptr] = NULL;
555249997Swkoszek		m_freem(m);
556249997Swkoszek
557249997Swkoszek		/* Check the status. */
558249997Swkoszek		if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
559249997Swkoszek			/* Serious bus error. log to console. */
560249997Swkoszek			device_printf(sc->dev, "cgem_clean_tx: Whoa! "
561249997Swkoszek				   "AHB error, addr=0x%x\n",
562249997Swkoszek				   sc->txring[sc->txring_tl_ptr].addr);
563249997Swkoszek		} else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
564249997Swkoszek				   CGEM_TXDESC_LATE_COLL)) != 0) {
565249997Swkoszek			sc->ifp->if_oerrors++;
566249997Swkoszek		} else
567249997Swkoszek			sc->ifp->if_opackets++;
568249997Swkoszek
569249997Swkoszek		/* If the packet spanned more than one tx descriptor,
570249997Swkoszek		 * skip descriptors until we find the end so that only
571249997Swkoszek		 * start-of-frame descriptors are processed.
572249997Swkoszek		 */
573249997Swkoszek		while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
574249997Swkoszek			if ((ctl & CGEM_TXDESC_WRAP) != 0)
575249997Swkoszek				sc->txring_tl_ptr = 0;
576249997Swkoszek			else
577249997Swkoszek				sc->txring_tl_ptr++;
578249997Swkoszek			sc->txring_queued--;
579249997Swkoszek
580249997Swkoszek			ctl = sc->txring[sc->txring_tl_ptr].ctl;
581249997Swkoszek
582249997Swkoszek			sc->txring[sc->txring_tl_ptr].ctl =
583249997Swkoszek				ctl | CGEM_TXDESC_USED;
584249997Swkoszek		}
585249997Swkoszek
586249997Swkoszek		/* Next descriptor. */
587249997Swkoszek		if ((ctl & CGEM_TXDESC_WRAP) != 0)
588249997Swkoszek			sc->txring_tl_ptr = 0;
589249997Swkoszek		else
590249997Swkoszek			sc->txring_tl_ptr++;
591249997Swkoszek		sc->txring_queued--;
592249997Swkoszek	}
593249997Swkoszek}
594249997Swkoszek
595249997Swkoszek/* Start transmits. */
596249997Swkoszekstatic void
597249997Swkoszekcgem_start_locked(struct ifnet *ifp)
598249997Swkoszek{
599249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
600249997Swkoszek	struct mbuf *m;
601249997Swkoszek	bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
602249997Swkoszek	uint32_t ctl;
603249997Swkoszek	int i, nsegs, wrap, err;
604249997Swkoszek
605249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
606249997Swkoszek
607249997Swkoszek	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0)
608249997Swkoszek		return;
609249997Swkoszek
610249997Swkoszek	for (;;) {
611249997Swkoszek		/* Check that there is room in the descriptor ring. */
612249997Swkoszek		if (sc->txring_queued >= CGEM_NUM_TX_DESCS -
613249997Swkoszek		    TX_MAX_DMA_SEGS - 1) {
614249997Swkoszek
615249997Swkoszek			/* Try to make room. */
616249997Swkoszek			cgem_clean_tx(sc);
617249997Swkoszek
618249997Swkoszek			/* Still no room? */
619249997Swkoszek			if (sc->txring_queued >= CGEM_NUM_TX_DESCS -
620249997Swkoszek			    TX_MAX_DMA_SEGS - 1) {
621249997Swkoszek				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
622249997Swkoszek				break;
623249997Swkoszek			}
624249997Swkoszek		}
625249997Swkoszek
626249997Swkoszek		/* Grab next transmit packet. */
627249997Swkoszek		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
628249997Swkoszek		if (m == NULL)
629249997Swkoszek			break;
630249997Swkoszek
631249997Swkoszek		/* Load DMA map. */
632249997Swkoszek		err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
633249997Swkoszek				      sc->txring_m_dmamap[sc->txring_hd_ptr],
634249997Swkoszek				      m, segs, &nsegs, BUS_DMA_NOWAIT);
635249997Swkoszek		if (err == EFBIG) {
636249997Swkoszek			/* Too many segments!  defrag and try again. */
637249997Swkoszek			struct mbuf *m2 = m_defrag(m, M_NOWAIT);
638249997Swkoszek
639249997Swkoszek			if (m2 == NULL) {
640249997Swkoszek				m_freem(m);
641249997Swkoszek				continue;
642249997Swkoszek			}
643249997Swkoszek			m = m2;
644249997Swkoszek			err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
645249997Swkoszek				      sc->txring_m_dmamap[sc->txring_hd_ptr],
646249997Swkoszek				      m, segs, &nsegs, BUS_DMA_NOWAIT);
647249997Swkoszek		}
648249997Swkoszek		if (err) {
649249997Swkoszek			/* Give up. */
650249997Swkoszek			m_freem(m);
651249997Swkoszek			continue;
652249997Swkoszek		}
653249997Swkoszek		sc->txring_m[sc->txring_hd_ptr] = m;
654249997Swkoszek
655249997Swkoszek		/* Sync tx buffer with cache. */
656249997Swkoszek		bus_dmamap_sync(sc->mbuf_dma_tag,
657249997Swkoszek				sc->txring_m_dmamap[sc->txring_hd_ptr],
658249997Swkoszek				BUS_DMASYNC_PREWRITE);
659249997Swkoszek
660249997Swkoszek		/* Set wrap flag if next packet might run off end of ring. */
661249997Swkoszek		wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
662249997Swkoszek			CGEM_NUM_TX_DESCS;
663249997Swkoszek
664249997Swkoszek		/* Fill in the TX descriptors back to front so that USED
665249997Swkoszek		 * bit in first descriptor is cleared last.
666249997Swkoszek		 */
667249997Swkoszek		for (i = nsegs - 1; i >= 0; i--) {
668249997Swkoszek			/* Descriptor address. */
669249997Swkoszek			sc->txring[sc->txring_hd_ptr + i].addr =
670249997Swkoszek				segs[i].ds_addr;
671249997Swkoszek
672249997Swkoszek			/* Descriptor control word. */
673249997Swkoszek			ctl = segs[i].ds_len;
674249997Swkoszek			if (i == nsegs - 1) {
675249997Swkoszek				ctl |= CGEM_TXDESC_LAST_BUF;
676249997Swkoszek				if (wrap)
677249997Swkoszek					ctl |= CGEM_TXDESC_WRAP;
678249997Swkoszek			}
679249997Swkoszek			sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
680249997Swkoszek
681249997Swkoszek			if (i != 0)
682249997Swkoszek				sc->txring_m[sc->txring_hd_ptr + i] = NULL;
683249997Swkoszek		}
684249997Swkoszek
685249997Swkoszek		if (wrap)
686249997Swkoszek			sc->txring_hd_ptr = 0;
687249997Swkoszek		else
688249997Swkoszek			sc->txring_hd_ptr += nsegs;
689249997Swkoszek		sc->txring_queued += nsegs;
690249997Swkoszek
691249997Swkoszek		/* Kick the transmitter. */
692249997Swkoszek		WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
693249997Swkoszek		    CGEM_NET_CTRL_START_TX);
694249997Swkoszek	}
695249997Swkoszek
696249997Swkoszek}
697249997Swkoszek
698249997Swkoszekstatic void
699249997Swkoszekcgem_start(struct ifnet *ifp)
700249997Swkoszek{
701249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
702249997Swkoszek
703249997Swkoszek	CGEM_LOCK(sc);
704249997Swkoszek	cgem_start_locked(ifp);
705249997Swkoszek	CGEM_UNLOCK(sc);
706249997Swkoszek}
707249997Swkoszek
708249997Swkoszek/* Respond to changes in media. */
709249997Swkoszekstatic void
710249997Swkoszekcgem_media_update(struct cgem_softc *sc, int active)
711249997Swkoszek{
712249997Swkoszek	uint32_t net_cfg;
713249997Swkoszek
714249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
715249997Swkoszek
716249997Swkoszek	/* Update hardware to reflect phy status. */
717249997Swkoszek	net_cfg = RD4(sc, CGEM_NET_CFG);
718249997Swkoszek	net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
719249997Swkoszek		     CGEM_NET_CFG_FULL_DUPLEX);
720249997Swkoszek
721249997Swkoszek	if (IFM_SUBTYPE(active) == IFM_1000_T)
722249997Swkoszek		net_cfg |= (CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN);
723249997Swkoszek	else if (IFM_SUBTYPE(active) == IFM_100_TX)
724249997Swkoszek		net_cfg |= CGEM_NET_CFG_SPEED100;
725249997Swkoszek
726249997Swkoszek	if ((active & IFM_FDX) != 0)
727249997Swkoszek		net_cfg |= CGEM_NET_CFG_FULL_DUPLEX;
728249997Swkoszek	WR4(sc, CGEM_NET_CFG, net_cfg);
729249997Swkoszek}
730249997Swkoszek
731249997Swkoszekstatic void
732249997Swkoszekcgem_tick(void *arg)
733249997Swkoszek{
734249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *)arg;
735249997Swkoszek	struct mii_data *mii;
736249997Swkoszek	int active;
737249997Swkoszek
738249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
739249997Swkoszek
740249997Swkoszek	/* Poll the phy. */
741249997Swkoszek	if (sc->miibus != NULL) {
742249997Swkoszek		mii = device_get_softc(sc->miibus);
743249997Swkoszek		active = mii->mii_media_active;
744249997Swkoszek		mii_tick(mii);
745249997Swkoszek		if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
746249997Swkoszek		    (IFM_ACTIVE | IFM_AVALID) &&
747249997Swkoszek		    active != mii->mii_media_active)
748249997Swkoszek			cgem_media_update(sc, mii->mii_media_active);
749249997Swkoszek	}
750249997Swkoszek
751249997Swkoszek	/* Next callout in one second. */
752249997Swkoszek	callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
753249997Swkoszek}
754249997Swkoszek
755249997Swkoszek/* Interrupt handler. */
756249997Swkoszekstatic void
757249997Swkoszekcgem_intr(void *arg)
758249997Swkoszek{
759249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *)arg;
760249997Swkoszek	uint32_t istatus;
761249997Swkoszek
762249997Swkoszek	CGEM_LOCK(sc);
763249997Swkoszek
764249997Swkoszek	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
765249997Swkoszek		CGEM_UNLOCK(sc);
766249997Swkoszek		return;
767249997Swkoszek	}
768249997Swkoszek
769249997Swkoszek	istatus = RD4(sc, CGEM_INTR_STAT);
770249997Swkoszek	WR4(sc, CGEM_INTR_STAT, istatus &
771249997Swkoszek	    (CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ |
772249997Swkoszek	     CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK));
773249997Swkoszek
774249997Swkoszek	/* Hresp not ok.  Something very bad with DMA.  Try to clear. */
775249997Swkoszek	if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
776249997Swkoszek		printf("cgem_intr: hresp not okay! rx_status=0x%x\n",
777249997Swkoszek		       RD4(sc, CGEM_RX_STAT));
778249997Swkoszek		WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
779249997Swkoszek	}
780249997Swkoszek
781249997Swkoszek	/* Transmitter has idled.  Free up any spent transmit buffers. */
782249997Swkoszek	if ((istatus & CGEM_INTR_TX_USED_READ) != 0)
783249997Swkoszek		cgem_clean_tx(sc);
784249997Swkoszek
785249997Swkoszek	/* Packets received or overflow. */
786249997Swkoszek	if ((istatus & (CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN)) != 0) {
787249997Swkoszek		cgem_recv(sc);
788249997Swkoszek		cgem_fill_rqueue(sc);
789249997Swkoszek		if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
790249997Swkoszek			/* Clear rx status register. */
791249997Swkoszek			sc->rxoverruns++;
792249997Swkoszek			WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
793249997Swkoszek		}
794249997Swkoszek	}
795249997Swkoszek
796249997Swkoszek	CGEM_UNLOCK(sc);
797249997Swkoszek}
798249997Swkoszek
799249997Swkoszek/* Reset hardware. */
800249997Swkoszekstatic void
801249997Swkoszekcgem_reset(struct cgem_softc *sc)
802249997Swkoszek{
803249997Swkoszek
804249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
805249997Swkoszek
806249997Swkoszek	WR4(sc, CGEM_NET_CTRL, 0);
807249997Swkoszek	WR4(sc, CGEM_NET_CFG, 0);
808249997Swkoszek	WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
809249997Swkoszek	WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
810249997Swkoszek	WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
811249997Swkoszek	WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
812249997Swkoszek	WR4(sc, CGEM_HASH_BOT, 0);
813249997Swkoszek	WR4(sc, CGEM_HASH_TOP, 0);
814249997Swkoszek	WR4(sc, CGEM_TX_QBAR, 0);	/* manual says do this. */
815249997Swkoszek	WR4(sc, CGEM_RX_QBAR, 0);
816249997Swkoszek
817249997Swkoszek	/* Get management port running even if interface is down. */
818249997Swkoszek	WR4(sc, CGEM_NET_CFG,
819249997Swkoszek	    CGEM_NET_CFG_DBUS_WIDTH_32 |
820249997Swkoszek	    CGEM_NET_CFG_MDC_CLK_DIV_64);
821249997Swkoszek
822249997Swkoszek	sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
823249997Swkoszek	WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
824249997Swkoszek}
825249997Swkoszek
826249997Swkoszek/* Bring up the hardware. */
827249997Swkoszekstatic void
828249997Swkoszekcgem_config(struct cgem_softc *sc)
829249997Swkoszek{
830249997Swkoszek	uint32_t net_cfg;
831249997Swkoszek	uint32_t dma_cfg;
832249997Swkoszek
833249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
834249997Swkoszek
835249997Swkoszek	/* Program Net Config Register. */
836249997Swkoszek	net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 |
837249997Swkoszek		CGEM_NET_CFG_MDC_CLK_DIV_64 |
838249997Swkoszek		CGEM_NET_CFG_FCS_REMOVE |
839249997Swkoszek		CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
840249997Swkoszek		CGEM_NET_CFG_GIGE_EN |
841249997Swkoszek		CGEM_NET_CFG_FULL_DUPLEX |
842249997Swkoszek		CGEM_NET_CFG_SPEED100;
843249997Swkoszek
844249997Swkoszek	/* Enable receive checksum offloading? */
845249997Swkoszek	if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0)
846249997Swkoszek		net_cfg |=  CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
847249997Swkoszek
848249997Swkoszek	WR4(sc, CGEM_NET_CFG, net_cfg);
849249997Swkoszek
850249997Swkoszek	/* Program DMA Config Register. */
851249997Swkoszek	dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
852249997Swkoszek		CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
853249997Swkoszek		CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
854249997Swkoszek		CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16;
855249997Swkoszek
856249997Swkoszek	/* Enable transmit checksum offloading? */
857249997Swkoszek	if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0)
858249997Swkoszek		dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
859249997Swkoszek
860249997Swkoszek	WR4(sc, CGEM_DMA_CFG, dma_cfg);
861249997Swkoszek
862249997Swkoszek	/* Write the rx and tx descriptor ring addresses to the QBAR regs. */
863249997Swkoszek	WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr);
864249997Swkoszek	WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr);
865249997Swkoszek
866249997Swkoszek	/* Enable rx and tx. */
867249997Swkoszek	sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
868249997Swkoszek	WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
869249997Swkoszek
870249997Swkoszek	/* Set up interrupts. */
871249997Swkoszek	WR4(sc, CGEM_INTR_EN,
872249997Swkoszek	    CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ |
873249997Swkoszek	    CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK);
874249997Swkoszek}
875249997Swkoszek
876249997Swkoszek/* Turn on interface and load up receive ring with buffers. */
877249997Swkoszekstatic void
878249997Swkoszekcgem_init_locked(struct cgem_softc *sc)
879249997Swkoszek{
880249997Swkoszek	struct mii_data *mii;
881249997Swkoszek
882249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
883249997Swkoszek
884249997Swkoszek	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
885249997Swkoszek		return;
886249997Swkoszek
887249997Swkoszek	cgem_config(sc);
888249997Swkoszek	cgem_fill_rqueue(sc);
889249997Swkoszek
890249997Swkoszek	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
891249997Swkoszek	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
892249997Swkoszek
893249997Swkoszek	mii = device_get_softc(sc->miibus);
894249997Swkoszek	mii_pollstat(mii);
895249997Swkoszek	cgem_media_update(sc, mii->mii_media_active);
896249997Swkoszek	cgem_start_locked(sc->ifp);
897249997Swkoszek
898249997Swkoszek	callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
899249997Swkoszek}
900249997Swkoszek
901249997Swkoszekstatic void
902249997Swkoszekcgem_init(void *arg)
903249997Swkoszek{
904249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *)arg;
905249997Swkoszek
906249997Swkoszek	CGEM_LOCK(sc);
907249997Swkoszek	cgem_init_locked(sc);
908249997Swkoszek	CGEM_UNLOCK(sc);
909249997Swkoszek}
910249997Swkoszek
911249997Swkoszek/* Turn off interface.  Free up any buffers in transmit or receive queues. */
912249997Swkoszekstatic void
913249997Swkoszekcgem_stop(struct cgem_softc *sc)
914249997Swkoszek{
915249997Swkoszek	int i;
916249997Swkoszek
917249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
918249997Swkoszek
919249997Swkoszek	callout_stop(&sc->tick_ch);
920249997Swkoszek
921249997Swkoszek	/* Shut down hardware. */
922249997Swkoszek	cgem_reset(sc);
923249997Swkoszek
924249997Swkoszek	/* Clear out transmit queue. */
925249997Swkoszek	for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
926249997Swkoszek		sc->txring[i].ctl = CGEM_TXDESC_USED;
927249997Swkoszek		sc->txring[i].addr = 0;
928249997Swkoszek		if (sc->txring_m[i]) {
929249997Swkoszek			bus_dmamap_unload(sc->mbuf_dma_tag,
930249997Swkoszek					  sc->txring_m_dmamap[i]);
931249997Swkoszek			m_freem(sc->txring_m[i]);
932249997Swkoszek			sc->txring_m[i] = NULL;
933249997Swkoszek		}
934249997Swkoszek	}
935249997Swkoszek	sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
936249997Swkoszek
937249997Swkoszek	sc->txring_hd_ptr = 0;
938249997Swkoszek	sc->txring_tl_ptr = 0;
939249997Swkoszek	sc->txring_queued = 0;
940249997Swkoszek
941249997Swkoszek	/* Clear out receive queue. */
942249997Swkoszek	for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
943249997Swkoszek		sc->rxring[i].addr = CGEM_RXDESC_OWN;
944249997Swkoszek		sc->rxring[i].ctl = 0;
945249997Swkoszek		if (sc->rxring_m[i]) {
946249997Swkoszek			/* Unload dmamap. */
947249997Swkoszek			bus_dmamap_unload(sc->mbuf_dma_tag,
948249997Swkoszek				  sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
949249997Swkoszek
950249997Swkoszek			m_freem(sc->rxring_m[i]);
951249997Swkoszek			sc->rxring_m[i] = NULL;
952249997Swkoszek		}
953249997Swkoszek	}
954249997Swkoszek	sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
955249997Swkoszek
956249997Swkoszek	sc->rxring_hd_ptr = 0;
957249997Swkoszek	sc->rxring_tl_ptr = 0;
958249997Swkoszek	sc->rxring_queued = 0;
959249997Swkoszek}
960249997Swkoszek
961249997Swkoszek
962249997Swkoszekstatic int
963249997Swkoszekcgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
964249997Swkoszek{
965249997Swkoszek	struct cgem_softc *sc = ifp->if_softc;
966249997Swkoszek	struct ifreq *ifr = (struct ifreq *)data;
967249997Swkoszek	struct mii_data *mii;
968249997Swkoszek	int error = 0, mask;
969249997Swkoszek
970249997Swkoszek	switch (cmd) {
971249997Swkoszek	case SIOCSIFFLAGS:
972249997Swkoszek		CGEM_LOCK(sc);
973249997Swkoszek		if ((ifp->if_flags & IFF_UP) != 0) {
974249997Swkoszek			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
975249997Swkoszek				if (((ifp->if_flags ^ sc->if_old_flags) &
976249997Swkoszek				     (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
977249997Swkoszek					cgem_rx_filter(sc);
978249997Swkoszek				}
979249997Swkoszek			} else {
980249997Swkoszek				cgem_init_locked(sc);
981249997Swkoszek			}
982249997Swkoszek		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
983249997Swkoszek			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
984249997Swkoszek			cgem_stop(sc);
985249997Swkoszek		}
986249997Swkoszek		sc->if_old_flags = ifp->if_flags;
987249997Swkoszek		CGEM_UNLOCK(sc);
988249997Swkoszek		break;
989249997Swkoszek
990249997Swkoszek	case SIOCADDMULTI:
991249997Swkoszek	case SIOCDELMULTI:
992249997Swkoszek		/* Set up multi-cast filters. */
993249997Swkoszek		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
994249997Swkoszek			CGEM_LOCK(sc);
995249997Swkoszek			cgem_rx_filter(sc);
996249997Swkoszek			CGEM_UNLOCK(sc);
997249997Swkoszek		}
998249997Swkoszek		break;
999249997Swkoszek
1000249997Swkoszek	case SIOCSIFMEDIA:
1001249997Swkoszek	case SIOCGIFMEDIA:
1002249997Swkoszek		mii = device_get_softc(sc->miibus);
1003249997Swkoszek		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1004249997Swkoszek		break;
1005249997Swkoszek
1006249997Swkoszek	case SIOCSIFCAP:
1007249997Swkoszek		CGEM_LOCK(sc);
1008249997Swkoszek		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1009249997Swkoszek
1010249997Swkoszek		if ((mask & IFCAP_TXCSUM) != 0) {
1011249997Swkoszek			if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
1012249997Swkoszek				/* Turn on TX checksumming. */
1013249997Swkoszek				ifp->if_capenable |= (IFCAP_TXCSUM |
1014249997Swkoszek						      IFCAP_TXCSUM_IPV6);
1015249997Swkoszek				ifp->if_hwassist |= CGEM_CKSUM_ASSIST;
1016249997Swkoszek
1017249997Swkoszek				WR4(sc, CGEM_DMA_CFG,
1018249997Swkoszek				    RD4(sc, CGEM_DMA_CFG) |
1019249997Swkoszek				     CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1020249997Swkoszek			} else {
1021249997Swkoszek				/* Turn off TX checksumming. */
1022249997Swkoszek				ifp->if_capenable &= ~(IFCAP_TXCSUM |
1023249997Swkoszek						       IFCAP_TXCSUM_IPV6);
1024249997Swkoszek				ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST;
1025249997Swkoszek
1026249997Swkoszek				WR4(sc, CGEM_DMA_CFG,
1027249997Swkoszek				    RD4(sc, CGEM_DMA_CFG) &
1028249997Swkoszek				     ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1029249997Swkoszek			}
1030249997Swkoszek		}
1031249997Swkoszek		if ((mask & IFCAP_RXCSUM) != 0) {
1032249997Swkoszek			if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
1033249997Swkoszek				/* Turn on RX checksumming. */
1034249997Swkoszek				ifp->if_capenable |= (IFCAP_RXCSUM |
1035249997Swkoszek						      IFCAP_RXCSUM_IPV6);
1036249997Swkoszek				WR4(sc, CGEM_NET_CFG,
1037249997Swkoszek				    RD4(sc, CGEM_NET_CFG) |
1038249997Swkoszek				     CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1039249997Swkoszek			} else {
1040249997Swkoszek				/* Turn off RX checksumming. */
1041249997Swkoszek				ifp->if_capenable &= ~(IFCAP_RXCSUM |
1042249997Swkoszek						       IFCAP_RXCSUM_IPV6);
1043249997Swkoszek				WR4(sc, CGEM_NET_CFG,
1044249997Swkoszek				    RD4(sc, CGEM_NET_CFG) &
1045249997Swkoszek				     ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1046249997Swkoszek			}
1047249997Swkoszek		}
1048249997Swkoszek
1049249997Swkoszek		CGEM_UNLOCK(sc);
1050249997Swkoszek		break;
1051249997Swkoszek	default:
1052249997Swkoszek		error = ether_ioctl(ifp, cmd, data);
1053249997Swkoszek		break;
1054249997Swkoszek	}
1055249997Swkoszek
1056249997Swkoszek	return (error);
1057249997Swkoszek}
1058249997Swkoszek
1059249997Swkoszek/* MII bus support routines.
1060249997Swkoszek */
1061249997Swkoszekstatic void
1062249997Swkoszekcgem_child_detached(device_t dev, device_t child)
1063249997Swkoszek{
1064249997Swkoszek	struct cgem_softc *sc = device_get_softc(dev);
1065249997Swkoszek	if (child == sc->miibus)
1066249997Swkoszek		sc->miibus = NULL;
1067249997Swkoszek}
1068249997Swkoszek
1069249997Swkoszekstatic int
1070249997Swkoszekcgem_ifmedia_upd(struct ifnet *ifp)
1071249997Swkoszek{
1072249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1073249997Swkoszek	struct mii_data *mii;
1074249997Swkoszek
1075249997Swkoszek	mii = device_get_softc(sc->miibus);
1076249997Swkoszek	CGEM_LOCK(sc);
1077249997Swkoszek	mii_mediachg(mii);
1078249997Swkoszek	CGEM_UNLOCK(sc);
1079249997Swkoszek	return (0);
1080249997Swkoszek}
1081249997Swkoszek
1082249997Swkoszekstatic void
1083249997Swkoszekcgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1084249997Swkoszek{
1085249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1086249997Swkoszek	struct mii_data *mii;
1087249997Swkoszek
1088249997Swkoszek	mii = device_get_softc(sc->miibus);
1089249997Swkoszek	CGEM_LOCK(sc);
1090249997Swkoszek	mii_pollstat(mii);
1091249997Swkoszek	ifmr->ifm_active = mii->mii_media_active;
1092249997Swkoszek	ifmr->ifm_status = mii->mii_media_status;
1093249997Swkoszek	CGEM_UNLOCK(sc);
1094249997Swkoszek}
1095249997Swkoszek
1096249997Swkoszekstatic int
1097249997Swkoszekcgem_miibus_readreg(device_t dev, int phy, int reg)
1098249997Swkoszek{
1099249997Swkoszek	struct cgem_softc *sc = device_get_softc(dev);
1100249997Swkoszek	int tries, val;
1101249997Swkoszek
1102249997Swkoszek	WR4(sc, CGEM_PHY_MAINT,
1103249997Swkoszek	    CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1104249997Swkoszek	    CGEM_PHY_MAINT_OP_READ |
1105249997Swkoszek	    (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1106249997Swkoszek	    (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
1107249997Swkoszek
1108249997Swkoszek	/* Wait for completion. */
1109249997Swkoszek	tries=0;
1110249997Swkoszek	while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1111249997Swkoszek		DELAY(5);
1112249997Swkoszek		if (++tries > 200) {
1113249997Swkoszek			device_printf(dev, "phy read timeout: %d\n", reg);
1114249997Swkoszek			return (-1);
1115249997Swkoszek		}
1116249997Swkoszek	}
1117249997Swkoszek
1118249997Swkoszek	val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
1119249997Swkoszek
1120249997Swkoszek	return (val);
1121249997Swkoszek}
1122249997Swkoszek
1123249997Swkoszekstatic int
1124249997Swkoszekcgem_miibus_writereg(device_t dev, int phy, int reg, int data)
1125249997Swkoszek{
1126249997Swkoszek	struct cgem_softc *sc = device_get_softc(dev);
1127249997Swkoszek	int tries;
1128249997Swkoszek
1129249997Swkoszek	WR4(sc, CGEM_PHY_MAINT,
1130249997Swkoszek	    CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1131249997Swkoszek	    CGEM_PHY_MAINT_OP_WRITE |
1132249997Swkoszek	    (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1133249997Swkoszek	    (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
1134249997Swkoszek	    (data & CGEM_PHY_MAINT_DATA_MASK));
1135249997Swkoszek
1136249997Swkoszek	/* Wait for completion. */
1137249997Swkoszek	tries = 0;
1138249997Swkoszek	while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1139249997Swkoszek		DELAY(5);
1140249997Swkoszek		if (++tries > 200) {
1141249997Swkoszek			device_printf(dev, "phy write timeout: %d\n", reg);
1142249997Swkoszek			return (-1);
1143249997Swkoszek		}
1144249997Swkoszek	}
1145249997Swkoszek
1146249997Swkoszek	return (0);
1147249997Swkoszek}
1148249997Swkoszek
1149249997Swkoszek
1150249997Swkoszekstatic int
1151249997Swkoszekcgem_probe(device_t dev)
1152249997Swkoszek{
1153249997Swkoszek
1154249997Swkoszek	if (!ofw_bus_is_compatible(dev, "cadence,gem"))
1155249997Swkoszek		return (ENXIO);
1156249997Swkoszek
1157249997Swkoszek	device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
1158249997Swkoszek	return (0);
1159249997Swkoszek}
1160249997Swkoszek
1161249997Swkoszekstatic int
1162249997Swkoszekcgem_attach(device_t dev)
1163249997Swkoszek{
1164249997Swkoszek	struct cgem_softc *sc = device_get_softc(dev);
1165249997Swkoszek	struct ifnet *ifp = NULL;
1166249997Swkoszek	int rid, err;
1167249997Swkoszek	u_char eaddr[ETHER_ADDR_LEN];
1168249997Swkoszek
1169249997Swkoszek	sc->dev = dev;
1170249997Swkoszek	CGEM_LOCK_INIT(sc);
1171249997Swkoszek
1172249997Swkoszek	/* Get memory resource. */
1173249997Swkoszek	rid = 0;
1174249997Swkoszek	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1175249997Swkoszek					     RF_ACTIVE);
1176249997Swkoszek	if (sc->mem_res == NULL) {
1177249997Swkoszek		device_printf(dev, "could not allocate memory resources.\n");
1178249997Swkoszek		return (ENOMEM);
1179249997Swkoszek	}
1180249997Swkoszek
1181249997Swkoszek	/* Get IRQ resource. */
1182249997Swkoszek	rid = 0;
1183249997Swkoszek	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1184249997Swkoszek					     RF_ACTIVE);
1185249997Swkoszek	if (sc->irq_res == NULL) {
1186249997Swkoszek		device_printf(dev, "could not allocate interrupt resource.\n");
1187249997Swkoszek		cgem_detach(dev);
1188249997Swkoszek		return (ENOMEM);
1189249997Swkoszek	}
1190249997Swkoszek
1191249997Swkoszek	ifp = sc->ifp = if_alloc(IFT_ETHER);
1192249997Swkoszek	if (ifp == NULL) {
1193249997Swkoszek		device_printf(dev, "could not allocate ifnet structure\n");
1194249997Swkoszek		cgem_detach(dev);
1195249997Swkoszek		return (ENOMEM);
1196249997Swkoszek	}
1197249997Swkoszek
1198249997Swkoszek	CGEM_LOCK(sc);
1199249997Swkoszek
1200249997Swkoszek	/* Reset hardware. */
1201249997Swkoszek	cgem_reset(sc);
1202249997Swkoszek
1203249997Swkoszek	/* Attach phy to mii bus. */
1204249997Swkoszek	err = mii_attach(dev, &sc->miibus, ifp,
1205249997Swkoszek			 cgem_ifmedia_upd, cgem_ifmedia_sts,
1206249997Swkoszek			 BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1207249997Swkoszek	if (err) {
1208249997Swkoszek		CGEM_UNLOCK(sc);
1209249997Swkoszek		device_printf(dev, "attaching PHYs failed\n");
1210249997Swkoszek		cgem_detach(dev);
1211249997Swkoszek		return (err);
1212249997Swkoszek	}
1213249997Swkoszek
1214249997Swkoszek	/* Set up TX and RX descriptor area. */
1215249997Swkoszek	err = cgem_setup_descs(sc);
1216249997Swkoszek	if (err) {
1217249997Swkoszek		CGEM_UNLOCK(sc);
1218249997Swkoszek		device_printf(dev, "could not set up dma mem for descs.\n");
1219249997Swkoszek		cgem_detach(dev);
1220249997Swkoszek		return (ENOMEM);
1221249997Swkoszek	}
1222249997Swkoszek
1223249997Swkoszek	/* Get a MAC address. */
1224249997Swkoszek	cgem_get_mac(sc, eaddr);
1225249997Swkoszek
1226249997Swkoszek	/* Start ticks. */
1227249997Swkoszek	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1228249997Swkoszek
1229249997Swkoszek	/* Set up ifnet structure. */
1230249997Swkoszek	ifp->if_softc = sc;
1231249997Swkoszek	if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
1232249997Swkoszek	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1233249997Swkoszek	ifp->if_start = cgem_start;
1234249997Swkoszek	ifp->if_ioctl = cgem_ioctl;
1235249997Swkoszek	ifp->if_init = cgem_init;
1236249997Swkoszek	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
1237249997Swkoszek	/* XXX: disable hw checksumming for now. */
1238249997Swkoszek	ifp->if_hwassist = 0;
1239249997Swkoszek	ifp->if_capenable = ifp->if_capabilities &
1240249997Swkoszek		~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
1241249997Swkoszek	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1242249997Swkoszek	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
1243249997Swkoszek	IFQ_SET_READY(&ifp->if_snd);
1244249997Swkoszek
1245249997Swkoszek	sc->if_old_flags = ifp->if_flags;
1246249997Swkoszek	sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1247249997Swkoszek
1248249997Swkoszek	ether_ifattach(ifp, eaddr);
1249249997Swkoszek
1250249997Swkoszek	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1251249997Swkoszek			     INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1252249997Swkoszek	if (err) {
1253249997Swkoszek		CGEM_UNLOCK(sc);
1254249997Swkoszek		device_printf(dev, "could not set interrupt handler.\n");
1255249997Swkoszek		ether_ifdetach(ifp);
1256249997Swkoszek		cgem_detach(dev);
1257249997Swkoszek		return (err);
1258249997Swkoszek	}
1259249997Swkoszek
1260249997Swkoszek	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1261249997Swkoszek		       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1262249997Swkoszek		       OID_AUTO, "rxbufs", CTLFLAG_RW,
1263249997Swkoszek		       &sc->rxbufs, 0,
1264249997Swkoszek		       "Number receive buffers to provide");
1265249997Swkoszek
1266249997Swkoszek	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1267249997Swkoszek		       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1268249997Swkoszek		       OID_AUTO, "_rxoverruns", CTLFLAG_RD,
1269249997Swkoszek		       &sc->rxoverruns, 0,
1270249997Swkoszek		       "Receive ring overrun events");
1271249997Swkoszek
1272249997Swkoszek	CGEM_UNLOCK(sc);
1273249997Swkoszek
1274249997Swkoszek	return (0);
1275249997Swkoszek}
1276249997Swkoszek
1277249997Swkoszekstatic int
1278249997Swkoszekcgem_detach(device_t dev)
1279249997Swkoszek{
1280249997Swkoszek	struct cgem_softc *sc = device_get_softc(dev);
1281249997Swkoszek	int i;
1282249997Swkoszek
1283249997Swkoszek	if (sc == NULL)
1284249997Swkoszek		return (ENODEV);
1285249997Swkoszek
1286249997Swkoszek	if (device_is_attached(dev)) {
1287249997Swkoszek		CGEM_LOCK(sc);
1288249997Swkoszek		cgem_stop(sc);
1289249997Swkoszek		CGEM_UNLOCK(sc);
1290249997Swkoszek		callout_drain(&sc->tick_ch);
1291249997Swkoszek		sc->ifp->if_flags &= ~IFF_UP;
1292249997Swkoszek		ether_ifdetach(sc->ifp);
1293249997Swkoszek	}
1294249997Swkoszek
1295249997Swkoszek	if (sc->miibus != NULL) {
1296249997Swkoszek		device_delete_child(dev, sc->miibus);
1297249997Swkoszek		sc->miibus = NULL;
1298249997Swkoszek	}
1299249997Swkoszek
1300249997Swkoszek	/* Release resrouces. */
1301249997Swkoszek	if (sc->mem_res != NULL) {
1302249997Swkoszek		bus_release_resource(dev, SYS_RES_MEMORY,
1303249997Swkoszek				     rman_get_rid(sc->mem_res), sc->mem_res);
1304249997Swkoszek		sc->mem_res = NULL;
1305249997Swkoszek	}
1306249997Swkoszek	if (sc->irq_res != NULL) {
1307249997Swkoszek		if (sc->intrhand)
1308249997Swkoszek			bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1309249997Swkoszek		bus_release_resource(dev, SYS_RES_IRQ,
1310249997Swkoszek				     rman_get_rid(sc->irq_res), sc->irq_res);
1311249997Swkoszek		sc->irq_res = NULL;
1312249997Swkoszek	}
1313249997Swkoszek
1314249997Swkoszek	/* Release DMA resources. */
1315249997Swkoszek	if (sc->rxring_dma_map != NULL) {
1316249997Swkoszek		bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1317249997Swkoszek				sc->rxring_dma_map);
1318249997Swkoszek		sc->rxring_dma_map = NULL;
1319249997Swkoszek		for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
1320249997Swkoszek			if (sc->rxring_m_dmamap[i] != NULL) {
1321249997Swkoszek				bus_dmamap_destroy(sc->mbuf_dma_tag,
1322249997Swkoszek						   sc->rxring_m_dmamap[i]);
1323249997Swkoszek				sc->rxring_m_dmamap[i] = NULL;
1324249997Swkoszek			}
1325249997Swkoszek	}
1326249997Swkoszek	if (sc->txring_dma_map != NULL) {
1327249997Swkoszek		bus_dmamem_free(sc->desc_dma_tag, sc->txring,
1328249997Swkoszek				sc->txring_dma_map);
1329249997Swkoszek		sc->txring_dma_map = NULL;
1330249997Swkoszek		for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
1331249997Swkoszek			if (sc->txring_m_dmamap[i] != NULL) {
1332249997Swkoszek				bus_dmamap_destroy(sc->mbuf_dma_tag,
1333249997Swkoszek						   sc->txring_m_dmamap[i]);
1334249997Swkoszek				sc->txring_m_dmamap[i] = NULL;
1335249997Swkoszek			}
1336249997Swkoszek	}
1337249997Swkoszek	if (sc->desc_dma_tag != NULL) {
1338249997Swkoszek		bus_dma_tag_destroy(sc->desc_dma_tag);
1339249997Swkoszek		sc->desc_dma_tag = NULL;
1340249997Swkoszek	}
1341249997Swkoszek	if (sc->mbuf_dma_tag != NULL) {
1342249997Swkoszek		bus_dma_tag_destroy(sc->mbuf_dma_tag);
1343249997Swkoszek		sc->mbuf_dma_tag = NULL;
1344249997Swkoszek	}
1345249997Swkoszek
1346249997Swkoszek	bus_generic_detach(dev);
1347249997Swkoszek
1348249997Swkoszek	CGEM_LOCK_DESTROY(sc);
1349249997Swkoszek
1350249997Swkoszek	return (0);
1351249997Swkoszek}
1352249997Swkoszek
1353249997Swkoszekstatic device_method_t cgem_methods[] = {
1354249997Swkoszek	/* Device interface */
1355249997Swkoszek	DEVMETHOD(device_probe,		cgem_probe),
1356249997Swkoszek	DEVMETHOD(device_attach,	cgem_attach),
1357249997Swkoszek	DEVMETHOD(device_detach,	cgem_detach),
1358249997Swkoszek
1359249997Swkoszek	/* Bus interface */
1360249997Swkoszek	DEVMETHOD(bus_child_detached,	cgem_child_detached),
1361249997Swkoszek
1362249997Swkoszek	/* MII interface */
1363249997Swkoszek	DEVMETHOD(miibus_readreg,	cgem_miibus_readreg),
1364249997Swkoszek	DEVMETHOD(miibus_writereg,	cgem_miibus_writereg),
1365249997Swkoszek
1366249997Swkoszek	DEVMETHOD_END
1367249997Swkoszek};
1368249997Swkoszek
1369249997Swkoszekstatic driver_t cgem_driver = {
1370249997Swkoszek	"cgem",
1371249997Swkoszek	cgem_methods,
1372249997Swkoszek	sizeof(struct cgem_softc),
1373249997Swkoszek};
1374249997Swkoszek
1375249997SwkoszekDRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL);
1376249997SwkoszekDRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL);
1377249997SwkoszekMODULE_DEPEND(cgem, miibus, 1, 1, 1);
1378249997SwkoszekMODULE_DEPEND(cgem, ether, 1, 1, 1);
1379