1249997Swkoszek/*-
2250015Swkoszek * Copyright (c) 2012-2013 Thomas Skibo
3249997Swkoszek * All rights reserved.
4249997Swkoszek *
5249997Swkoszek * Redistribution and use in source and binary forms, with or without
6249997Swkoszek * modification, are permitted provided that the following conditions
7249997Swkoszek * are met:
8249997Swkoszek * 1. Redistributions of source code must retain the above copyright
9249997Swkoszek *    notice, this list of conditions and the following disclaimer.
10249997Swkoszek * 2. Redistributions in binary form must reproduce the above copyright
11249997Swkoszek *    notice, this list of conditions and the following disclaimer in the
12249997Swkoszek *    documentation and/or other materials provided with the distribution.
13249997Swkoszek *
14249997Swkoszek * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15249997Swkoszek * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16249997Swkoszek * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17249997Swkoszek * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18249997Swkoszek * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19249997Swkoszek * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20249997Swkoszek * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21249997Swkoszek * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22249997Swkoszek * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23249997Swkoszek * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24249997Swkoszek * SUCH DAMAGE.
25249997Swkoszek */
26249997Swkoszek
27250015Swkoszek/*
28250015Swkoszek * A network interface driver for Cadence GEM Gigabit Ethernet
29249997Swkoszek * interface such as the one used in Xilinx Zynq-7000 SoC.
30249997Swkoszek *
31249997Swkoszek * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
32249997Swkoszek * (v1.4) November 16, 2012.  Xilinx doc UG585.  GEM is covered in Ch. 16
33249997Swkoszek * and register definitions are in appendix B.18.
34249997Swkoszek */
35249997Swkoszek
36249997Swkoszek#include <sys/cdefs.h>
37249997Swkoszek__FBSDID("$FreeBSD$");
38249997Swkoszek
39249997Swkoszek#include <sys/param.h>
40249997Swkoszek#include <sys/systm.h>
41249997Swkoszek#include <sys/bus.h>
42249997Swkoszek#include <sys/kernel.h>
43249997Swkoszek#include <sys/malloc.h>
44249997Swkoszek#include <sys/mbuf.h>
45249997Swkoszek#include <sys/module.h>
46249997Swkoszek#include <sys/rman.h>
47249997Swkoszek#include <sys/socket.h>
48249997Swkoszek#include <sys/sockio.h>
49249997Swkoszek#include <sys/sysctl.h>
50249997Swkoszek
51249997Swkoszek#include <machine/bus.h>
52249997Swkoszek
53249997Swkoszek#include <net/ethernet.h>
54249997Swkoszek#include <net/if.h>
55249997Swkoszek#include <net/if_arp.h>
56249997Swkoszek#include <net/if_dl.h>
57249997Swkoszek#include <net/if_media.h>
58249997Swkoszek#include <net/if_mib.h>
59249997Swkoszek#include <net/if_types.h>
60249997Swkoszek
61249997Swkoszek#ifdef INET
62249997Swkoszek#include <netinet/in.h>
63249997Swkoszek#include <netinet/in_systm.h>
64249997Swkoszek#include <netinet/in_var.h>
65249997Swkoszek#include <netinet/ip.h>
66249997Swkoszek#endif
67249997Swkoszek
68249997Swkoszek#include <net/bpf.h>
69249997Swkoszek#include <net/bpfdesc.h>
70249997Swkoszek
71249997Swkoszek#include <dev/fdt/fdt_common.h>
72249997Swkoszek#include <dev/ofw/ofw_bus.h>
73249997Swkoszek#include <dev/ofw/ofw_bus_subr.h>
74249997Swkoszek
75249997Swkoszek#include <dev/mii/mii.h>
76249997Swkoszek#include <dev/mii/miivar.h>
77249997Swkoszek
78249997Swkoszek#include <dev/cadence/if_cgem_hw.h>
79249997Swkoszek
80249997Swkoszek#include "miibus_if.h"
81249997Swkoszek
82249997Swkoszek#define IF_CGEM_NAME "cgem"
83249997Swkoszek
84249997Swkoszek#define CGEM_NUM_RX_DESCS	256	/* size of receive descriptor ring */
85249997Swkoszek#define CGEM_NUM_TX_DESCS	256	/* size of transmit descriptor ring */
86249997Swkoszek
87249997Swkoszek#define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\
88249997Swkoszek				CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc)))
89249997Swkoszek
90249997Swkoszek
91249997Swkoszek/* Default for sysctl rxbufs.  Must be < CGEM_NUM_RX_DESCS of course. */
92249997Swkoszek#define DEFAULT_NUM_RX_BUFS	64	/* number of receive bufs to queue. */
93249997Swkoszek
94249997Swkoszek#define TX_MAX_DMA_SEGS		4	/* maximum segs in a tx mbuf dma */
95249997Swkoszek
96249997Swkoszek#define CGEM_CKSUM_ASSIST	(CSUM_IP | CSUM_TCP | CSUM_UDP | \
97249997Swkoszek				 CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
98249997Swkoszek
99249997Swkoszekstruct cgem_softc {
100249997Swkoszek	struct ifnet		*ifp;
101249997Swkoszek	struct mtx		sc_mtx;
102249997Swkoszek	device_t		dev;
103249997Swkoszek	device_t		miibus;
104249997Swkoszek	int			if_old_flags;
105249997Swkoszek	struct resource 	*mem_res;
106249997Swkoszek	struct resource 	*irq_res;
107249997Swkoszek	void			*intrhand;
108249997Swkoszek	struct callout		tick_ch;
109249997Swkoszek	uint32_t		net_ctl_shadow;
110249997Swkoszek	u_char			eaddr[6];
111249997Swkoszek
112249997Swkoszek	bus_dma_tag_t		desc_dma_tag;
113249997Swkoszek	bus_dma_tag_t		mbuf_dma_tag;
114249997Swkoszek
115249997Swkoszek	/* receive descriptor ring */
116249997Swkoszek	struct cgem_rx_desc	*rxring;
117249997Swkoszek	bus_addr_t		rxring_physaddr;
118249997Swkoszek	struct mbuf		*rxring_m[CGEM_NUM_RX_DESCS];
119249997Swkoszek	bus_dmamap_t		rxring_m_dmamap[CGEM_NUM_RX_DESCS];
120249997Swkoszek	int			rxring_hd_ptr;	/* where to put rcv bufs */
121249997Swkoszek	int			rxring_tl_ptr;	/* where to get receives */
122249997Swkoszek	int			rxring_queued;	/* how many rcv bufs queued */
123249997Swkoszek 	bus_dmamap_t		rxring_dma_map;
124249997Swkoszek	int			rxbufs;		/* tunable number rcv bufs */
125249997Swkoszek	int			rxoverruns;	/* rx ring overruns */
126249997Swkoszek
127249997Swkoszek	/* transmit descriptor ring */
128249997Swkoszek	struct cgem_tx_desc	*txring;
129249997Swkoszek	bus_addr_t		txring_physaddr;
130249997Swkoszek	struct mbuf		*txring_m[CGEM_NUM_TX_DESCS];
131249997Swkoszek	bus_dmamap_t		txring_m_dmamap[CGEM_NUM_TX_DESCS];
132249997Swkoszek	int			txring_hd_ptr;	/* where to put next xmits */
133249997Swkoszek	int			txring_tl_ptr;	/* next xmit mbuf to free */
134249997Swkoszek	int			txring_queued;	/* num xmits segs queued */
135249997Swkoszek	bus_dmamap_t		txring_dma_map;
136249997Swkoszek};
137249997Swkoszek
138249997Swkoszek#define RD4(sc, off) 		(bus_read_4((sc)->mem_res, (off)))
139249997Swkoszek#define WR4(sc, off, val) 	(bus_write_4((sc)->mem_res, (off), (val)))
140249997Swkoszek#define BARRIER(sc, off, len, flags) \
141249997Swkoszek	(bus_barrier((sc)->mem_res, (off), (len), (flags))
142249997Swkoszek
143249997Swkoszek#define CGEM_LOCK(sc)		mtx_lock(&(sc)->sc_mtx)
144249997Swkoszek#define CGEM_UNLOCK(sc)	mtx_unlock(&(sc)->sc_mtx)
145249997Swkoszek#define CGEM_LOCK_INIT(sc)	\
146249997Swkoszek	mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \
147249997Swkoszek		 MTX_NETWORK_LOCK, MTX_DEF)
148249997Swkoszek#define CGEM_LOCK_DESTROY(sc)	mtx_destroy(&(sc)->sc_mtx)
149249997Swkoszek#define CGEM_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->sc_mtx, MA_OWNED)
150249997Swkoszek
151249997Swkoszekstatic devclass_t cgem_devclass;
152249997Swkoszek
153249997Swkoszekstatic int cgem_probe(device_t dev);
154249997Swkoszekstatic int cgem_attach(device_t dev);
155249997Swkoszekstatic int cgem_detach(device_t dev);
156249997Swkoszekstatic void cgem_tick(void *);
157249997Swkoszekstatic void cgem_intr(void *);
158249997Swkoszek
159249997Swkoszekstatic void
160249997Swkoszekcgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
161249997Swkoszek{
162249997Swkoszek	int i;
163249997Swkoszek	uint32_t rnd;
164249997Swkoszek
165249997Swkoszek	/* See if boot loader gave us a MAC address already. */
166249997Swkoszek	for (i = 0; i < 4; i++) {
167249997Swkoszek		uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
168249997Swkoszek		uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
169249997Swkoszek		if (low != 0 || high != 0) {
170249997Swkoszek			eaddr[0] = low & 0xff;
171249997Swkoszek			eaddr[1] = (low >> 8) & 0xff;
172249997Swkoszek			eaddr[2] = (low >> 16) & 0xff;
173249997Swkoszek			eaddr[3] = (low >> 24) & 0xff;
174249997Swkoszek			eaddr[4] = high & 0xff;
175249997Swkoszek			eaddr[5] = (high >> 8) & 0xff;
176249997Swkoszek			break;
177249997Swkoszek		}
178249997Swkoszek	}
179249997Swkoszek
180249997Swkoszek	/* No MAC from boot loader?  Assign a random one. */
181249997Swkoszek	if (i == 4) {
182249997Swkoszek		rnd = arc4random();
183249997Swkoszek
184249997Swkoszek		eaddr[0] = 'b';
185249997Swkoszek		eaddr[1] = 's';
186249997Swkoszek		eaddr[2] = 'd';
187249997Swkoszek		eaddr[3] = (rnd >> 16) & 0xff;
188249997Swkoszek		eaddr[4] = (rnd >> 8) & 0xff;
189249997Swkoszek		eaddr[5] = rnd & 0xff;
190249997Swkoszek
191249997Swkoszek		device_printf(sc->dev, "no mac address found, assigning "
192249997Swkoszek			      "random: %02x:%02x:%02x:%02x:%02x:%02x\n",
193249997Swkoszek			      eaddr[0], eaddr[1], eaddr[2],
194249997Swkoszek			      eaddr[3], eaddr[4], eaddr[5]);
195249997Swkoszek
196249997Swkoszek		WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
197249997Swkoszek		    (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
198249997Swkoszek		WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
199249997Swkoszek	}
200249997Swkoszek}
201249997Swkoszek
202249997Swkoszek/* cgem_mac_hash():  map 48-bit address to a 6-bit hash.
203249997Swkoszek * The 6-bit hash corresponds to a bit in a 64-bit hash
204249997Swkoszek * register.  Setting that bit in the hash register enables
205249997Swkoszek * reception of all frames with a destination address that hashes
206249997Swkoszek * to that 6-bit value.
207249997Swkoszek *
208249997Swkoszek * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
209249997Swkoszek * Reference Manual.  Bits 0-5 in the hash are the exclusive-or of
210249997Swkoszek * every sixth bit in the destination address.
211249997Swkoszek */
212249997Swkoszekstatic int
213249997Swkoszekcgem_mac_hash(u_char eaddr[])
214249997Swkoszek{
215249997Swkoszek	int hash;
216249997Swkoszek	int i, j;
217249997Swkoszek
218249997Swkoszek	hash = 0;
219249997Swkoszek	for (i = 0; i < 6; i++)
220249997Swkoszek		for (j = i; j < 48; j += 6)
221249997Swkoszek			if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
222249997Swkoszek				hash ^= (1 << i);
223249997Swkoszek
224249997Swkoszek	return hash;
225249997Swkoszek}
226249997Swkoszek
227249997Swkoszek/* After any change in rx flags or multi-cast addresses, set up
228249997Swkoszek * hash registers and net config register bits.
229249997Swkoszek */
230249997Swkoszekstatic void
231249997Swkoszekcgem_rx_filter(struct cgem_softc *sc)
232249997Swkoszek{
233249997Swkoszek	struct ifnet *ifp = sc->ifp;
234249997Swkoszek	struct ifmultiaddr *ifma;
235249997Swkoszek	int index;
236249997Swkoszek	uint32_t hash_hi, hash_lo;
237249997Swkoszek	uint32_t net_cfg;
238249997Swkoszek
239249997Swkoszek	hash_hi = 0;
240249997Swkoszek	hash_lo = 0;
241249997Swkoszek
242249997Swkoszek	net_cfg = RD4(sc, CGEM_NET_CFG);
243249997Swkoszek
244249997Swkoszek	net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
245249997Swkoszek		     CGEM_NET_CFG_NO_BCAST |
246249997Swkoszek		     CGEM_NET_CFG_COPY_ALL);
247249997Swkoszek
248249997Swkoszek	if ((ifp->if_flags & IFF_PROMISC) != 0)
249249997Swkoszek		net_cfg |= CGEM_NET_CFG_COPY_ALL;
250249997Swkoszek	else {
251249997Swkoszek		if ((ifp->if_flags & IFF_BROADCAST) == 0)
252249997Swkoszek			net_cfg |= CGEM_NET_CFG_NO_BCAST;
253249997Swkoszek		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
254249997Swkoszek			hash_hi = 0xffffffff;
255249997Swkoszek			hash_lo = 0xffffffff;
256249997Swkoszek		} else {
257249997Swkoszek			if_maddr_rlock(ifp);
258249997Swkoszek			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
259249997Swkoszek				if (ifma->ifma_addr->sa_family != AF_LINK)
260249997Swkoszek					continue;
261249997Swkoszek				index = cgem_mac_hash(
262249997Swkoszek					LLADDR((struct sockaddr_dl *)
263249997Swkoszek					       ifma->ifma_addr));
264249997Swkoszek				if (index > 31)
265249997Swkoszek					hash_hi |= (1<<(index-32));
266249997Swkoszek				else
267249997Swkoszek					hash_lo |= (1<<index);
268249997Swkoszek			}
269249997Swkoszek			if_maddr_runlock(ifp);
270249997Swkoszek		}
271249997Swkoszek
272249997Swkoszek		if (hash_hi != 0 || hash_lo != 0)
273249997Swkoszek			net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN;
274249997Swkoszek	}
275249997Swkoszek
276249997Swkoszek	WR4(sc, CGEM_HASH_TOP, hash_hi);
277249997Swkoszek	WR4(sc, CGEM_HASH_BOT, hash_lo);
278249997Swkoszek	WR4(sc, CGEM_NET_CFG, net_cfg);
279249997Swkoszek}
280249997Swkoszek
281249997Swkoszek/* For bus_dmamap_load() callback. */
282249997Swkoszekstatic void
283249997Swkoszekcgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
284249997Swkoszek{
285249997Swkoszek
286249997Swkoszek	if (nsegs != 1 || error != 0)
287249997Swkoszek		return;
288249997Swkoszek	*(bus_addr_t *)arg = segs[0].ds_addr;
289249997Swkoszek}
290249997Swkoszek
291249997Swkoszek/* Create DMA'able descriptor rings. */
292249997Swkoszekstatic int
293249997Swkoszekcgem_setup_descs(struct cgem_softc *sc)
294249997Swkoszek{
295249997Swkoszek	int i, err;
296249997Swkoszek
297249997Swkoszek	sc->txring = NULL;
298249997Swkoszek	sc->rxring = NULL;
299249997Swkoszek
300249997Swkoszek	/* Allocate non-cached DMA space for RX and TX descriptors.
301249997Swkoszek	 */
302249997Swkoszek	err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
303249997Swkoszek				 BUS_SPACE_MAXADDR_32BIT,
304249997Swkoszek				 BUS_SPACE_MAXADDR,
305249997Swkoszek				 NULL, NULL,
306249997Swkoszek				 MAX_DESC_RING_SIZE,
307249997Swkoszek				 1,
308249997Swkoszek				 MAX_DESC_RING_SIZE,
309249997Swkoszek				 0,
310249997Swkoszek				 busdma_lock_mutex,
311249997Swkoszek				 &sc->sc_mtx,
312249997Swkoszek				 &sc->desc_dma_tag);
313249997Swkoszek	if (err)
314249997Swkoszek		return (err);
315249997Swkoszek
316249997Swkoszek	/* Set up a bus_dma_tag for mbufs. */
317249997Swkoszek	err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
318249997Swkoszek				 BUS_SPACE_MAXADDR_32BIT,
319249997Swkoszek				 BUS_SPACE_MAXADDR,
320249997Swkoszek				 NULL, NULL,
321249997Swkoszek				 MCLBYTES,
322249997Swkoszek				 TX_MAX_DMA_SEGS,
323249997Swkoszek				 MCLBYTES,
324249997Swkoszek				 0,
325249997Swkoszek				 busdma_lock_mutex,
326249997Swkoszek				 &sc->sc_mtx,
327249997Swkoszek				 &sc->mbuf_dma_tag);
328249997Swkoszek	if (err)
329249997Swkoszek		return (err);
330249997Swkoszek
331249997Swkoszek	/* Allocate DMA memory in non-cacheable space. */
332249997Swkoszek	err = bus_dmamem_alloc(sc->desc_dma_tag,
333249997Swkoszek			       (void **)&sc->rxring,
334249997Swkoszek			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
335249997Swkoszek			       &sc->rxring_dma_map);
336249997Swkoszek	if (err)
337249997Swkoszek		return (err);
338249997Swkoszek
339249997Swkoszek	/* Load descriptor DMA memory. */
340249997Swkoszek	err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
341249997Swkoszek			      (void *)sc->rxring,
342249997Swkoszek			      CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),
343249997Swkoszek			      cgem_getaddr, &sc->rxring_physaddr,
344249997Swkoszek			      BUS_DMA_NOWAIT);
345249997Swkoszek	if (err)
346249997Swkoszek		return (err);
347249997Swkoszek
348249997Swkoszek	/* Initialize RX descriptors. */
349249997Swkoszek	for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
350249997Swkoszek		sc->rxring[i].addr = CGEM_RXDESC_OWN;
351249997Swkoszek		sc->rxring[i].ctl = 0;
352249997Swkoszek		sc->rxring_m[i] = NULL;
353249997Swkoszek		err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
354249997Swkoszek					&sc->rxring_m_dmamap[i]);
355249997Swkoszek		if (err)
356249997Swkoszek			return (err);
357249997Swkoszek	}
358249997Swkoszek	sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
359249997Swkoszek
360249997Swkoszek	sc->rxring_hd_ptr = 0;
361249997Swkoszek	sc->rxring_tl_ptr = 0;
362249997Swkoszek	sc->rxring_queued = 0;
363249997Swkoszek
364249997Swkoszek	/* Allocate DMA memory for TX descriptors in non-cacheable space. */
365249997Swkoszek	err = bus_dmamem_alloc(sc->desc_dma_tag,
366249997Swkoszek			       (void **)&sc->txring,
367249997Swkoszek			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
368249997Swkoszek			       &sc->txring_dma_map);
369249997Swkoszek	if (err)
370249997Swkoszek		return (err);
371249997Swkoszek
372249997Swkoszek	/* Load TX descriptor DMA memory. */
373249997Swkoszek	err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map,
374249997Swkoszek			      (void *)sc->txring,
375249997Swkoszek			      CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc),
376249997Swkoszek			      cgem_getaddr, &sc->txring_physaddr,
377249997Swkoszek			      BUS_DMA_NOWAIT);
378249997Swkoszek	if (err)
379249997Swkoszek		return (err);
380249997Swkoszek
381249997Swkoszek	/* Initialize TX descriptor ring. */
382249997Swkoszek	for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
383249997Swkoszek		sc->txring[i].addr = 0;
384249997Swkoszek		sc->txring[i].ctl = CGEM_TXDESC_USED;
385249997Swkoszek		sc->txring_m[i] = NULL;
386249997Swkoszek		err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
387249997Swkoszek					&sc->txring_m_dmamap[i]);
388249997Swkoszek		if (err)
389249997Swkoszek			return (err);
390249997Swkoszek	}
391249997Swkoszek	sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
392249997Swkoszek
393249997Swkoszek	sc->txring_hd_ptr = 0;
394249997Swkoszek	sc->txring_tl_ptr = 0;
395249997Swkoszek	sc->txring_queued = 0;
396249997Swkoszek
397249997Swkoszek	return (0);
398249997Swkoszek}
399249997Swkoszek
400249997Swkoszek/* Fill receive descriptor ring with mbufs. */
401249997Swkoszekstatic void
402249997Swkoszekcgem_fill_rqueue(struct cgem_softc *sc)
403249997Swkoszek{
404249997Swkoszek	struct mbuf *m = NULL;
405249997Swkoszek	bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
406249997Swkoszek	int nsegs;
407249997Swkoszek
408249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
409249997Swkoszek
410249997Swkoszek	while (sc->rxring_queued < sc->rxbufs) {
411249997Swkoszek		/* Get a cluster mbuf. */
412249997Swkoszek		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
413249997Swkoszek		if (m == NULL)
414249997Swkoszek			break;
415249997Swkoszek
416249997Swkoszek		m->m_len = MCLBYTES;
417249997Swkoszek		m->m_pkthdr.len = MCLBYTES;
418249997Swkoszek		m->m_pkthdr.rcvif = sc->ifp;
419249997Swkoszek
420249997Swkoszek		/* Load map and plug in physical address. */
421249997Swkoszek		if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
422249997Swkoszek			      sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
423249997Swkoszek			      segs, &nsegs, BUS_DMA_NOWAIT)) {
424249997Swkoszek			/* XXX: warn? */
425249997Swkoszek			m_free(m);
426249997Swkoszek			break;
427249997Swkoszek		}
428249997Swkoszek		sc->rxring_m[sc->rxring_hd_ptr] = m;
429249997Swkoszek
430249997Swkoszek		/* Sync cache with receive buffer. */
431249997Swkoszek		bus_dmamap_sync(sc->mbuf_dma_tag,
432249997Swkoszek				sc->rxring_m_dmamap[sc->rxring_hd_ptr],
433249997Swkoszek				BUS_DMASYNC_PREREAD);
434249997Swkoszek
435249997Swkoszek		/* Write rx descriptor and increment head pointer. */
436249997Swkoszek		sc->rxring[sc->rxring_hd_ptr].ctl = 0;
437249997Swkoszek		if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
438249997Swkoszek			sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
439249997Swkoszek				CGEM_RXDESC_WRAP;
440249997Swkoszek			sc->rxring_hd_ptr = 0;
441249997Swkoszek		} else
442249997Swkoszek			sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
443249997Swkoszek
444249997Swkoszek		sc->rxring_queued++;
445249997Swkoszek	}
446249997Swkoszek}
447249997Swkoszek
448249997Swkoszek/* Pull received packets off of receive descriptor ring. */
449249997Swkoszekstatic void
450249997Swkoszekcgem_recv(struct cgem_softc *sc)
451249997Swkoszek{
452249997Swkoszek	struct ifnet *ifp = sc->ifp;
453249997Swkoszek	struct mbuf *m;
454249997Swkoszek	uint32_t ctl;
455249997Swkoszek
456249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
457249997Swkoszek
458249997Swkoszek	/* Pick up all packets in which the OWN bit is set. */
459249997Swkoszek	while (sc->rxring_queued > 0 &&
460249997Swkoszek	       (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
461249997Swkoszek
462249997Swkoszek		ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
463249997Swkoszek
464249997Swkoszek		/* Grab filled mbuf. */
465249997Swkoszek		m = sc->rxring_m[sc->rxring_tl_ptr];
466249997Swkoszek		sc->rxring_m[sc->rxring_tl_ptr] = NULL;
467249997Swkoszek
468249997Swkoszek		/* Sync cache with receive buffer. */
469249997Swkoszek		bus_dmamap_sync(sc->mbuf_dma_tag,
470249997Swkoszek				sc->rxring_m_dmamap[sc->rxring_tl_ptr],
471249997Swkoszek				BUS_DMASYNC_POSTREAD);
472249997Swkoszek
473249997Swkoszek		/* Unload dmamap. */
474249997Swkoszek		bus_dmamap_unload(sc->mbuf_dma_tag,
475249997Swkoszek		  	sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
476249997Swkoszek
477249997Swkoszek		/* Increment tail pointer. */
478249997Swkoszek		if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
479249997Swkoszek			sc->rxring_tl_ptr = 0;
480249997Swkoszek		sc->rxring_queued--;
481249997Swkoszek
482249997Swkoszek		/* Check FCS and make sure entire packet landed in one mbuf
483249997Swkoszek		 * cluster (which is much bigger than the largest ethernet
484249997Swkoszek		 * packet).
485249997Swkoszek		 */
486249997Swkoszek		if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
487249997Swkoszek		    (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
488249997Swkoszek		           (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
489249997Swkoszek			/* discard. */
490249997Swkoszek			m_free(m);
491249997Swkoszek			ifp->if_ierrors++;
492249997Swkoszek			continue;
493249997Swkoszek		}
494249997Swkoszek
495249997Swkoszek		/* Hand it off to upper layers. */
496249997Swkoszek		m->m_data += ETHER_ALIGN;
497249997Swkoszek		m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
498249997Swkoszek		m->m_pkthdr.rcvif = ifp;
499249997Swkoszek		m->m_pkthdr.len = m->m_len;
500249997Swkoszek
501249997Swkoszek		/* Are we using hardware checksumming?  Check the
502249997Swkoszek		 * status in the receive descriptor.
503249997Swkoszek		 */
504249997Swkoszek		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
505249997Swkoszek			/* TCP or UDP checks out, IP checks out too. */
506249997Swkoszek			if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
507249997Swkoszek			    CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
508249997Swkoszek			    (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
509249997Swkoszek			    CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
510249997Swkoszek				m->m_pkthdr.csum_flags |=
511249997Swkoszek					CSUM_IP_CHECKED | CSUM_IP_VALID |
512249997Swkoszek					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
513249997Swkoszek				m->m_pkthdr.csum_data = 0xffff;
514249997Swkoszek			} else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
515249997Swkoszek				   CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
516249997Swkoszek				/* Only IP checks out. */
517249997Swkoszek				m->m_pkthdr.csum_flags |=
518249997Swkoszek					CSUM_IP_CHECKED | CSUM_IP_VALID;
519249997Swkoszek				m->m_pkthdr.csum_data = 0xffff;
520249997Swkoszek			}
521249997Swkoszek		}
522249997Swkoszek
523249997Swkoszek		ifp->if_ipackets++;
524249997Swkoszek		CGEM_UNLOCK(sc);
525249997Swkoszek		(*ifp->if_input)(ifp, m);
526249997Swkoszek		CGEM_LOCK(sc);
527249997Swkoszek	}
528249997Swkoszek}
529249997Swkoszek
530249997Swkoszek/* Find completed transmits and free their mbufs. */
531249997Swkoszekstatic void
532249997Swkoszekcgem_clean_tx(struct cgem_softc *sc)
533249997Swkoszek{
534249997Swkoszek	struct mbuf *m;
535249997Swkoszek	uint32_t ctl;
536249997Swkoszek
537249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
538249997Swkoszek
539249997Swkoszek	/* free up finished transmits. */
540249997Swkoszek	while (sc->txring_queued > 0 &&
541249997Swkoszek	       ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
542249997Swkoszek		CGEM_TXDESC_USED) != 0) {
543249997Swkoszek
544249997Swkoszek		/* Sync cache.  nop? */
545249997Swkoszek		bus_dmamap_sync(sc->mbuf_dma_tag,
546249997Swkoszek				sc->txring_m_dmamap[sc->txring_tl_ptr],
547249997Swkoszek				BUS_DMASYNC_POSTWRITE);
548249997Swkoszek
549249997Swkoszek		/* Unload DMA map. */
550249997Swkoszek		bus_dmamap_unload(sc->mbuf_dma_tag,
551249997Swkoszek				  sc->txring_m_dmamap[sc->txring_tl_ptr]);
552249997Swkoszek
553249997Swkoszek		/* Free up the mbuf. */
554249997Swkoszek		m = sc->txring_m[sc->txring_tl_ptr];
555249997Swkoszek		sc->txring_m[sc->txring_tl_ptr] = NULL;
556249997Swkoszek		m_freem(m);
557249997Swkoszek
558249997Swkoszek		/* Check the status. */
559249997Swkoszek		if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
560249997Swkoszek			/* Serious bus error. log to console. */
561249997Swkoszek			device_printf(sc->dev, "cgem_clean_tx: Whoa! "
562249997Swkoszek				   "AHB error, addr=0x%x\n",
563249997Swkoszek				   sc->txring[sc->txring_tl_ptr].addr);
564249997Swkoszek		} else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
565249997Swkoszek				   CGEM_TXDESC_LATE_COLL)) != 0) {
566249997Swkoszek			sc->ifp->if_oerrors++;
567249997Swkoszek		} else
568249997Swkoszek			sc->ifp->if_opackets++;
569249997Swkoszek
570249997Swkoszek		/* If the packet spanned more than one tx descriptor,
571249997Swkoszek		 * skip descriptors until we find the end so that only
572249997Swkoszek		 * start-of-frame descriptors are processed.
573249997Swkoszek		 */
574249997Swkoszek		while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
575249997Swkoszek			if ((ctl & CGEM_TXDESC_WRAP) != 0)
576249997Swkoszek				sc->txring_tl_ptr = 0;
577249997Swkoszek			else
578249997Swkoszek				sc->txring_tl_ptr++;
579249997Swkoszek			sc->txring_queued--;
580249997Swkoszek
581249997Swkoszek			ctl = sc->txring[sc->txring_tl_ptr].ctl;
582249997Swkoszek
583249997Swkoszek			sc->txring[sc->txring_tl_ptr].ctl =
584249997Swkoszek				ctl | CGEM_TXDESC_USED;
585249997Swkoszek		}
586249997Swkoszek
587249997Swkoszek		/* Next descriptor. */
588249997Swkoszek		if ((ctl & CGEM_TXDESC_WRAP) != 0)
589249997Swkoszek			sc->txring_tl_ptr = 0;
590249997Swkoszek		else
591249997Swkoszek			sc->txring_tl_ptr++;
592249997Swkoszek		sc->txring_queued--;
593249997Swkoszek	}
594249997Swkoszek}
595249997Swkoszek
596249997Swkoszek/* Start transmits. */
597249997Swkoszekstatic void
598249997Swkoszekcgem_start_locked(struct ifnet *ifp)
599249997Swkoszek{
600249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
601249997Swkoszek	struct mbuf *m;
602249997Swkoszek	bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
603249997Swkoszek	uint32_t ctl;
604249997Swkoszek	int i, nsegs, wrap, err;
605249997Swkoszek
606249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
607249997Swkoszek
608249997Swkoszek	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0)
609249997Swkoszek		return;
610249997Swkoszek
611249997Swkoszek	for (;;) {
612249997Swkoszek		/* Check that there is room in the descriptor ring. */
613249997Swkoszek		if (sc->txring_queued >= CGEM_NUM_TX_DESCS -
614249997Swkoszek		    TX_MAX_DMA_SEGS - 1) {
615249997Swkoszek
616249997Swkoszek			/* Try to make room. */
617249997Swkoszek			cgem_clean_tx(sc);
618249997Swkoszek
619249997Swkoszek			/* Still no room? */
620249997Swkoszek			if (sc->txring_queued >= CGEM_NUM_TX_DESCS -
621249997Swkoszek			    TX_MAX_DMA_SEGS - 1) {
622249997Swkoszek				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
623249997Swkoszek				break;
624249997Swkoszek			}
625249997Swkoszek		}
626249997Swkoszek
627249997Swkoszek		/* Grab next transmit packet. */
628249997Swkoszek		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
629249997Swkoszek		if (m == NULL)
630249997Swkoszek			break;
631249997Swkoszek
632249997Swkoszek		/* Load DMA map. */
633249997Swkoszek		err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
634249997Swkoszek				      sc->txring_m_dmamap[sc->txring_hd_ptr],
635249997Swkoszek				      m, segs, &nsegs, BUS_DMA_NOWAIT);
636249997Swkoszek		if (err == EFBIG) {
637249997Swkoszek			/* Too many segments!  defrag and try again. */
638249997Swkoszek			struct mbuf *m2 = m_defrag(m, M_NOWAIT);
639249997Swkoszek
640249997Swkoszek			if (m2 == NULL) {
641249997Swkoszek				m_freem(m);
642249997Swkoszek				continue;
643249997Swkoszek			}
644249997Swkoszek			m = m2;
645249997Swkoszek			err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
646249997Swkoszek				      sc->txring_m_dmamap[sc->txring_hd_ptr],
647249997Swkoszek				      m, segs, &nsegs, BUS_DMA_NOWAIT);
648249997Swkoszek		}
649249997Swkoszek		if (err) {
650249997Swkoszek			/* Give up. */
651249997Swkoszek			m_freem(m);
652249997Swkoszek			continue;
653249997Swkoszek		}
654249997Swkoszek		sc->txring_m[sc->txring_hd_ptr] = m;
655249997Swkoszek
656249997Swkoszek		/* Sync tx buffer with cache. */
657249997Swkoszek		bus_dmamap_sync(sc->mbuf_dma_tag,
658249997Swkoszek				sc->txring_m_dmamap[sc->txring_hd_ptr],
659249997Swkoszek				BUS_DMASYNC_PREWRITE);
660249997Swkoszek
661249997Swkoszek		/* Set wrap flag if next packet might run off end of ring. */
662249997Swkoszek		wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
663249997Swkoszek			CGEM_NUM_TX_DESCS;
664249997Swkoszek
665249997Swkoszek		/* Fill in the TX descriptors back to front so that USED
666249997Swkoszek		 * bit in first descriptor is cleared last.
667249997Swkoszek		 */
668249997Swkoszek		for (i = nsegs - 1; i >= 0; i--) {
669249997Swkoszek			/* Descriptor address. */
670249997Swkoszek			sc->txring[sc->txring_hd_ptr + i].addr =
671249997Swkoszek				segs[i].ds_addr;
672249997Swkoszek
673249997Swkoszek			/* Descriptor control word. */
674249997Swkoszek			ctl = segs[i].ds_len;
675249997Swkoszek			if (i == nsegs - 1) {
676249997Swkoszek				ctl |= CGEM_TXDESC_LAST_BUF;
677249997Swkoszek				if (wrap)
678249997Swkoszek					ctl |= CGEM_TXDESC_WRAP;
679249997Swkoszek			}
680249997Swkoszek			sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
681249997Swkoszek
682249997Swkoszek			if (i != 0)
683249997Swkoszek				sc->txring_m[sc->txring_hd_ptr + i] = NULL;
684249997Swkoszek		}
685249997Swkoszek
686249997Swkoszek		if (wrap)
687249997Swkoszek			sc->txring_hd_ptr = 0;
688249997Swkoszek		else
689249997Swkoszek			sc->txring_hd_ptr += nsegs;
690249997Swkoszek		sc->txring_queued += nsegs;
691249997Swkoszek
692249997Swkoszek		/* Kick the transmitter. */
693249997Swkoszek		WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
694249997Swkoszek		    CGEM_NET_CTRL_START_TX);
695249997Swkoszek	}
696249997Swkoszek
697249997Swkoszek}
698249997Swkoszek
699249997Swkoszekstatic void
700249997Swkoszekcgem_start(struct ifnet *ifp)
701249997Swkoszek{
702249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
703249997Swkoszek
704249997Swkoszek	CGEM_LOCK(sc);
705249997Swkoszek	cgem_start_locked(ifp);
706249997Swkoszek	CGEM_UNLOCK(sc);
707249997Swkoszek}
708249997Swkoszek
709249997Swkoszek/* Respond to changes in media. */
710249997Swkoszekstatic void
711249997Swkoszekcgem_media_update(struct cgem_softc *sc, int active)
712249997Swkoszek{
713249997Swkoszek	uint32_t net_cfg;
714249997Swkoszek
715249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
716249997Swkoszek
717249997Swkoszek	/* Update hardware to reflect phy status. */
718249997Swkoszek	net_cfg = RD4(sc, CGEM_NET_CFG);
719249997Swkoszek	net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
720249997Swkoszek		     CGEM_NET_CFG_FULL_DUPLEX);
721249997Swkoszek
722249997Swkoszek	if (IFM_SUBTYPE(active) == IFM_1000_T)
723249997Swkoszek		net_cfg |= (CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN);
724249997Swkoszek	else if (IFM_SUBTYPE(active) == IFM_100_TX)
725249997Swkoszek		net_cfg |= CGEM_NET_CFG_SPEED100;
726249997Swkoszek
727249997Swkoszek	if ((active & IFM_FDX) != 0)
728249997Swkoszek		net_cfg |= CGEM_NET_CFG_FULL_DUPLEX;
729249997Swkoszek	WR4(sc, CGEM_NET_CFG, net_cfg);
730249997Swkoszek}
731249997Swkoszek
732249997Swkoszekstatic void
733249997Swkoszekcgem_tick(void *arg)
734249997Swkoszek{
735249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *)arg;
736249997Swkoszek	struct mii_data *mii;
737249997Swkoszek	int active;
738249997Swkoszek
739249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
740249997Swkoszek
741249997Swkoszek	/* Poll the phy. */
742249997Swkoszek	if (sc->miibus != NULL) {
743249997Swkoszek		mii = device_get_softc(sc->miibus);
744249997Swkoszek		active = mii->mii_media_active;
745249997Swkoszek		mii_tick(mii);
746249997Swkoszek		if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
747249997Swkoszek		    (IFM_ACTIVE | IFM_AVALID) &&
748249997Swkoszek		    active != mii->mii_media_active)
749249997Swkoszek			cgem_media_update(sc, mii->mii_media_active);
750249997Swkoszek	}
751249997Swkoszek
752249997Swkoszek	/* Next callout in one second. */
753249997Swkoszek	callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
754249997Swkoszek}
755249997Swkoszek
756249997Swkoszek/* Interrupt handler. */
757249997Swkoszekstatic void
758249997Swkoszekcgem_intr(void *arg)
759249997Swkoszek{
760249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *)arg;
761249997Swkoszek	uint32_t istatus;
762249997Swkoszek
763249997Swkoszek	CGEM_LOCK(sc);
764249997Swkoszek
765249997Swkoszek	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
766249997Swkoszek		CGEM_UNLOCK(sc);
767249997Swkoszek		return;
768249997Swkoszek	}
769249997Swkoszek
770249997Swkoszek	istatus = RD4(sc, CGEM_INTR_STAT);
771249997Swkoszek	WR4(sc, CGEM_INTR_STAT, istatus &
772249997Swkoszek	    (CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ |
773249997Swkoszek	     CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK));
774249997Swkoszek
775249997Swkoszek	/* Hresp not ok.  Something very bad with DMA.  Try to clear. */
776249997Swkoszek	if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
777249997Swkoszek		printf("cgem_intr: hresp not okay! rx_status=0x%x\n",
778249997Swkoszek		       RD4(sc, CGEM_RX_STAT));
779249997Swkoszek		WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
780249997Swkoszek	}
781249997Swkoszek
782249997Swkoszek	/* Transmitter has idled.  Free up any spent transmit buffers. */
783249997Swkoszek	if ((istatus & CGEM_INTR_TX_USED_READ) != 0)
784249997Swkoszek		cgem_clean_tx(sc);
785249997Swkoszek
786249997Swkoszek	/* Packets received or overflow. */
787249997Swkoszek	if ((istatus & (CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN)) != 0) {
788249997Swkoszek		cgem_recv(sc);
789249997Swkoszek		cgem_fill_rqueue(sc);
790249997Swkoszek		if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
791249997Swkoszek			/* Clear rx status register. */
792249997Swkoszek			sc->rxoverruns++;
793249997Swkoszek			WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
794249997Swkoszek		}
795249997Swkoszek	}
796249997Swkoszek
797249997Swkoszek	CGEM_UNLOCK(sc);
798249997Swkoszek}
799249997Swkoszek
800249997Swkoszek/* Reset hardware. */
801249997Swkoszekstatic void
802249997Swkoszekcgem_reset(struct cgem_softc *sc)
803249997Swkoszek{
804249997Swkoszek
805249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
806249997Swkoszek
807249997Swkoszek	WR4(sc, CGEM_NET_CTRL, 0);
808249997Swkoszek	WR4(sc, CGEM_NET_CFG, 0);
809249997Swkoszek	WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
810249997Swkoszek	WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
811249997Swkoszek	WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
812249997Swkoszek	WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
813249997Swkoszek	WR4(sc, CGEM_HASH_BOT, 0);
814249997Swkoszek	WR4(sc, CGEM_HASH_TOP, 0);
815249997Swkoszek	WR4(sc, CGEM_TX_QBAR, 0);	/* manual says do this. */
816249997Swkoszek	WR4(sc, CGEM_RX_QBAR, 0);
817249997Swkoszek
818249997Swkoszek	/* Get management port running even if interface is down. */
819249997Swkoszek	WR4(sc, CGEM_NET_CFG,
820249997Swkoszek	    CGEM_NET_CFG_DBUS_WIDTH_32 |
821249997Swkoszek	    CGEM_NET_CFG_MDC_CLK_DIV_64);
822249997Swkoszek
823249997Swkoszek	sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
824249997Swkoszek	WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
825249997Swkoszek}
826249997Swkoszek
827249997Swkoszek/* Bring up the hardware. */
828249997Swkoszekstatic void
829249997Swkoszekcgem_config(struct cgem_softc *sc)
830249997Swkoszek{
831249997Swkoszek	uint32_t net_cfg;
832249997Swkoszek	uint32_t dma_cfg;
833249997Swkoszek
834249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
835249997Swkoszek
836249997Swkoszek	/* Program Net Config Register. */
837249997Swkoszek	net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 |
838249997Swkoszek		CGEM_NET_CFG_MDC_CLK_DIV_64 |
839249997Swkoszek		CGEM_NET_CFG_FCS_REMOVE |
840249997Swkoszek		CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
841249997Swkoszek		CGEM_NET_CFG_GIGE_EN |
842249997Swkoszek		CGEM_NET_CFG_FULL_DUPLEX |
843249997Swkoszek		CGEM_NET_CFG_SPEED100;
844249997Swkoszek
845249997Swkoszek	/* Enable receive checksum offloading? */
846249997Swkoszek	if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0)
847249997Swkoszek		net_cfg |=  CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
848249997Swkoszek
849249997Swkoszek	WR4(sc, CGEM_NET_CFG, net_cfg);
850249997Swkoszek
851249997Swkoszek	/* Program DMA Config Register. */
852249997Swkoszek	dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
853249997Swkoszek		CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
854249997Swkoszek		CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
855249997Swkoszek		CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16;
856249997Swkoszek
857249997Swkoszek	/* Enable transmit checksum offloading? */
858249997Swkoszek	if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0)
859249997Swkoszek		dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
860249997Swkoszek
861249997Swkoszek	WR4(sc, CGEM_DMA_CFG, dma_cfg);
862249997Swkoszek
863249997Swkoszek	/* Write the rx and tx descriptor ring addresses to the QBAR regs. */
864249997Swkoszek	WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr);
865249997Swkoszek	WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr);
866249997Swkoszek
867249997Swkoszek	/* Enable rx and tx. */
868249997Swkoszek	sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
869249997Swkoszek	WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
870249997Swkoszek
871249997Swkoszek	/* Set up interrupts. */
872249997Swkoszek	WR4(sc, CGEM_INTR_EN,
873249997Swkoszek	    CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ |
874249997Swkoszek	    CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK);
875249997Swkoszek}
876249997Swkoszek
877249997Swkoszek/* Turn on interface and load up receive ring with buffers. */
878249997Swkoszekstatic void
879249997Swkoszekcgem_init_locked(struct cgem_softc *sc)
880249997Swkoszek{
881249997Swkoszek	struct mii_data *mii;
882249997Swkoszek
883249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
884249997Swkoszek
885249997Swkoszek	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
886249997Swkoszek		return;
887249997Swkoszek
888249997Swkoszek	cgem_config(sc);
889249997Swkoszek	cgem_fill_rqueue(sc);
890249997Swkoszek
891249997Swkoszek	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
892249997Swkoszek	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
893249997Swkoszek
894249997Swkoszek	mii = device_get_softc(sc->miibus);
895249997Swkoszek	mii_pollstat(mii);
896249997Swkoszek	cgem_media_update(sc, mii->mii_media_active);
897249997Swkoszek	cgem_start_locked(sc->ifp);
898249997Swkoszek
899249997Swkoszek	callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
900249997Swkoszek}
901249997Swkoszek
902249997Swkoszekstatic void
903249997Swkoszekcgem_init(void *arg)
904249997Swkoszek{
905249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *)arg;
906249997Swkoszek
907249997Swkoszek	CGEM_LOCK(sc);
908249997Swkoszek	cgem_init_locked(sc);
909249997Swkoszek	CGEM_UNLOCK(sc);
910249997Swkoszek}
911249997Swkoszek
912249997Swkoszek/* Turn off interface.  Free up any buffers in transmit or receive queues. */
913249997Swkoszekstatic void
914249997Swkoszekcgem_stop(struct cgem_softc *sc)
915249997Swkoszek{
916249997Swkoszek	int i;
917249997Swkoszek
918249997Swkoszek	CGEM_ASSERT_LOCKED(sc);
919249997Swkoszek
920249997Swkoszek	callout_stop(&sc->tick_ch);
921249997Swkoszek
922249997Swkoszek	/* Shut down hardware. */
923249997Swkoszek	cgem_reset(sc);
924249997Swkoszek
925249997Swkoszek	/* Clear out transmit queue. */
926249997Swkoszek	for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
927249997Swkoszek		sc->txring[i].ctl = CGEM_TXDESC_USED;
928249997Swkoszek		sc->txring[i].addr = 0;
929249997Swkoszek		if (sc->txring_m[i]) {
930249997Swkoszek			bus_dmamap_unload(sc->mbuf_dma_tag,
931249997Swkoszek					  sc->txring_m_dmamap[i]);
932249997Swkoszek			m_freem(sc->txring_m[i]);
933249997Swkoszek			sc->txring_m[i] = NULL;
934249997Swkoszek		}
935249997Swkoszek	}
936249997Swkoszek	sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
937249997Swkoszek
938249997Swkoszek	sc->txring_hd_ptr = 0;
939249997Swkoszek	sc->txring_tl_ptr = 0;
940249997Swkoszek	sc->txring_queued = 0;
941249997Swkoszek
942249997Swkoszek	/* Clear out receive queue. */
943249997Swkoszek	for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
944249997Swkoszek		sc->rxring[i].addr = CGEM_RXDESC_OWN;
945249997Swkoszek		sc->rxring[i].ctl = 0;
946249997Swkoszek		if (sc->rxring_m[i]) {
947249997Swkoszek			/* Unload dmamap. */
948249997Swkoszek			bus_dmamap_unload(sc->mbuf_dma_tag,
949249997Swkoszek				  sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
950249997Swkoszek
951249997Swkoszek			m_freem(sc->rxring_m[i]);
952249997Swkoszek			sc->rxring_m[i] = NULL;
953249997Swkoszek		}
954249997Swkoszek	}
955249997Swkoszek	sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
956249997Swkoszek
957249997Swkoszek	sc->rxring_hd_ptr = 0;
958249997Swkoszek	sc->rxring_tl_ptr = 0;
959249997Swkoszek	sc->rxring_queued = 0;
960249997Swkoszek}
961249997Swkoszek
962249997Swkoszek
963249997Swkoszekstatic int
964249997Swkoszekcgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
965249997Swkoszek{
966249997Swkoszek	struct cgem_softc *sc = ifp->if_softc;
967249997Swkoszek	struct ifreq *ifr = (struct ifreq *)data;
968249997Swkoszek	struct mii_data *mii;
969249997Swkoszek	int error = 0, mask;
970249997Swkoszek
971249997Swkoszek	switch (cmd) {
972249997Swkoszek	case SIOCSIFFLAGS:
973249997Swkoszek		CGEM_LOCK(sc);
974249997Swkoszek		if ((ifp->if_flags & IFF_UP) != 0) {
975249997Swkoszek			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
976249997Swkoszek				if (((ifp->if_flags ^ sc->if_old_flags) &
977249997Swkoszek				     (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
978249997Swkoszek					cgem_rx_filter(sc);
979249997Swkoszek				}
980249997Swkoszek			} else {
981249997Swkoszek				cgem_init_locked(sc);
982249997Swkoszek			}
983249997Swkoszek		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
984249997Swkoszek			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
985249997Swkoszek			cgem_stop(sc);
986249997Swkoszek		}
987249997Swkoszek		sc->if_old_flags = ifp->if_flags;
988249997Swkoszek		CGEM_UNLOCK(sc);
989249997Swkoszek		break;
990249997Swkoszek
991249997Swkoszek	case SIOCADDMULTI:
992249997Swkoszek	case SIOCDELMULTI:
993249997Swkoszek		/* Set up multi-cast filters. */
994249997Swkoszek		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
995249997Swkoszek			CGEM_LOCK(sc);
996249997Swkoszek			cgem_rx_filter(sc);
997249997Swkoszek			CGEM_UNLOCK(sc);
998249997Swkoszek		}
999249997Swkoszek		break;
1000249997Swkoszek
1001249997Swkoszek	case SIOCSIFMEDIA:
1002249997Swkoszek	case SIOCGIFMEDIA:
1003249997Swkoszek		mii = device_get_softc(sc->miibus);
1004249997Swkoszek		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1005249997Swkoszek		break;
1006249997Swkoszek
1007249997Swkoszek	case SIOCSIFCAP:
1008249997Swkoszek		CGEM_LOCK(sc);
1009249997Swkoszek		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1010249997Swkoszek
1011249997Swkoszek		if ((mask & IFCAP_TXCSUM) != 0) {
1012249997Swkoszek			if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
1013249997Swkoszek				/* Turn on TX checksumming. */
1014249997Swkoszek				ifp->if_capenable |= (IFCAP_TXCSUM |
1015249997Swkoszek						      IFCAP_TXCSUM_IPV6);
1016249997Swkoszek				ifp->if_hwassist |= CGEM_CKSUM_ASSIST;
1017249997Swkoszek
1018249997Swkoszek				WR4(sc, CGEM_DMA_CFG,
1019249997Swkoszek				    RD4(sc, CGEM_DMA_CFG) |
1020249997Swkoszek				     CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1021249997Swkoszek			} else {
1022249997Swkoszek				/* Turn off TX checksumming. */
1023249997Swkoszek				ifp->if_capenable &= ~(IFCAP_TXCSUM |
1024249997Swkoszek						       IFCAP_TXCSUM_IPV6);
1025249997Swkoszek				ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST;
1026249997Swkoszek
1027249997Swkoszek				WR4(sc, CGEM_DMA_CFG,
1028249997Swkoszek				    RD4(sc, CGEM_DMA_CFG) &
1029249997Swkoszek				     ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1030249997Swkoszek			}
1031249997Swkoszek		}
1032249997Swkoszek		if ((mask & IFCAP_RXCSUM) != 0) {
1033249997Swkoszek			if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
1034249997Swkoszek				/* Turn on RX checksumming. */
1035249997Swkoszek				ifp->if_capenable |= (IFCAP_RXCSUM |
1036249997Swkoszek						      IFCAP_RXCSUM_IPV6);
1037249997Swkoszek				WR4(sc, CGEM_NET_CFG,
1038249997Swkoszek				    RD4(sc, CGEM_NET_CFG) |
1039249997Swkoszek				     CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1040249997Swkoszek			} else {
1041249997Swkoszek				/* Turn off RX checksumming. */
1042249997Swkoszek				ifp->if_capenable &= ~(IFCAP_RXCSUM |
1043249997Swkoszek						       IFCAP_RXCSUM_IPV6);
1044249997Swkoszek				WR4(sc, CGEM_NET_CFG,
1045249997Swkoszek				    RD4(sc, CGEM_NET_CFG) &
1046249997Swkoszek				     ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1047249997Swkoszek			}
1048249997Swkoszek		}
1049249997Swkoszek
1050249997Swkoszek		CGEM_UNLOCK(sc);
1051249997Swkoszek		break;
1052249997Swkoszek	default:
1053249997Swkoszek		error = ether_ioctl(ifp, cmd, data);
1054249997Swkoszek		break;
1055249997Swkoszek	}
1056249997Swkoszek
1057249997Swkoszek	return (error);
1058249997Swkoszek}
1059249997Swkoszek
1060249997Swkoszek/* MII bus support routines.
1061249997Swkoszek */
1062249997Swkoszekstatic void
1063249997Swkoszekcgem_child_detached(device_t dev, device_t child)
1064249997Swkoszek{
1065249997Swkoszek	struct cgem_softc *sc = device_get_softc(dev);
1066249997Swkoszek	if (child == sc->miibus)
1067249997Swkoszek		sc->miibus = NULL;
1068249997Swkoszek}
1069249997Swkoszek
1070249997Swkoszekstatic int
1071249997Swkoszekcgem_ifmedia_upd(struct ifnet *ifp)
1072249997Swkoszek{
1073249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1074249997Swkoszek	struct mii_data *mii;
1075249997Swkoszek
1076249997Swkoszek	mii = device_get_softc(sc->miibus);
1077249997Swkoszek	CGEM_LOCK(sc);
1078249997Swkoszek	mii_mediachg(mii);
1079249997Swkoszek	CGEM_UNLOCK(sc);
1080249997Swkoszek	return (0);
1081249997Swkoszek}
1082249997Swkoszek
1083249997Swkoszekstatic void
1084249997Swkoszekcgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1085249997Swkoszek{
1086249997Swkoszek	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1087249997Swkoszek	struct mii_data *mii;
1088249997Swkoszek
1089249997Swkoszek	mii = device_get_softc(sc->miibus);
1090249997Swkoszek	CGEM_LOCK(sc);
1091249997Swkoszek	mii_pollstat(mii);
1092249997Swkoszek	ifmr->ifm_active = mii->mii_media_active;
1093249997Swkoszek	ifmr->ifm_status = mii->mii_media_status;
1094249997Swkoszek	CGEM_UNLOCK(sc);
1095249997Swkoszek}
1096249997Swkoszek
1097249997Swkoszekstatic int
1098249997Swkoszekcgem_miibus_readreg(device_t dev, int phy, int reg)
1099249997Swkoszek{
1100249997Swkoszek	struct cgem_softc *sc = device_get_softc(dev);
1101249997Swkoszek	int tries, val;
1102249997Swkoszek
1103249997Swkoszek	WR4(sc, CGEM_PHY_MAINT,
1104249997Swkoszek	    CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1105249997Swkoszek	    CGEM_PHY_MAINT_OP_READ |
1106249997Swkoszek	    (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1107249997Swkoszek	    (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
1108249997Swkoszek
1109249997Swkoszek	/* Wait for completion. */
1110249997Swkoszek	tries=0;
1111249997Swkoszek	while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1112249997Swkoszek		DELAY(5);
1113249997Swkoszek		if (++tries > 200) {
1114249997Swkoszek			device_printf(dev, "phy read timeout: %d\n", reg);
1115249997Swkoszek			return (-1);
1116249997Swkoszek		}
1117249997Swkoszek	}
1118249997Swkoszek
1119249997Swkoszek	val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
1120249997Swkoszek
1121249997Swkoszek	return (val);
1122249997Swkoszek}
1123249997Swkoszek
1124249997Swkoszekstatic int
1125249997Swkoszekcgem_miibus_writereg(device_t dev, int phy, int reg, int data)
1126249997Swkoszek{
1127249997Swkoszek	struct cgem_softc *sc = device_get_softc(dev);
1128249997Swkoszek	int tries;
1129249997Swkoszek
1130249997Swkoszek	WR4(sc, CGEM_PHY_MAINT,
1131249997Swkoszek	    CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1132249997Swkoszek	    CGEM_PHY_MAINT_OP_WRITE |
1133249997Swkoszek	    (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1134249997Swkoszek	    (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
1135249997Swkoszek	    (data & CGEM_PHY_MAINT_DATA_MASK));
1136249997Swkoszek
1137249997Swkoszek	/* Wait for completion. */
1138249997Swkoszek	tries = 0;
1139249997Swkoszek	while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1140249997Swkoszek		DELAY(5);
1141249997Swkoszek		if (++tries > 200) {
1142249997Swkoszek			device_printf(dev, "phy write timeout: %d\n", reg);
1143249997Swkoszek			return (-1);
1144249997Swkoszek		}
1145249997Swkoszek	}
1146249997Swkoszek
1147249997Swkoszek	return (0);
1148249997Swkoszek}
1149249997Swkoszek
1150249997Swkoszek
1151249997Swkoszekstatic int
1152249997Swkoszekcgem_probe(device_t dev)
1153249997Swkoszek{
1154249997Swkoszek
1155249997Swkoszek	if (!ofw_bus_is_compatible(dev, "cadence,gem"))
1156249997Swkoszek		return (ENXIO);
1157249997Swkoszek
1158249997Swkoszek	device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
1159249997Swkoszek	return (0);
1160249997Swkoszek}
1161249997Swkoszek
1162249997Swkoszekstatic int
1163249997Swkoszekcgem_attach(device_t dev)
1164249997Swkoszek{
1165249997Swkoszek	struct cgem_softc *sc = device_get_softc(dev);
1166249997Swkoszek	struct ifnet *ifp = NULL;
1167249997Swkoszek	int rid, err;
1168249997Swkoszek	u_char eaddr[ETHER_ADDR_LEN];
1169249997Swkoszek
1170249997Swkoszek	sc->dev = dev;
1171249997Swkoszek	CGEM_LOCK_INIT(sc);
1172249997Swkoszek
1173249997Swkoszek	/* Get memory resource. */
1174249997Swkoszek	rid = 0;
1175249997Swkoszek	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1176249997Swkoszek					     RF_ACTIVE);
1177249997Swkoszek	if (sc->mem_res == NULL) {
1178249997Swkoszek		device_printf(dev, "could not allocate memory resources.\n");
1179249997Swkoszek		return (ENOMEM);
1180249997Swkoszek	}
1181249997Swkoszek
1182249997Swkoszek	/* Get IRQ resource. */
1183249997Swkoszek	rid = 0;
1184249997Swkoszek	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1185249997Swkoszek					     RF_ACTIVE);
1186249997Swkoszek	if (sc->irq_res == NULL) {
1187249997Swkoszek		device_printf(dev, "could not allocate interrupt resource.\n");
1188249997Swkoszek		cgem_detach(dev);
1189249997Swkoszek		return (ENOMEM);
1190249997Swkoszek	}
1191249997Swkoszek
1192249997Swkoszek	ifp = sc->ifp = if_alloc(IFT_ETHER);
1193249997Swkoszek	if (ifp == NULL) {
1194249997Swkoszek		device_printf(dev, "could not allocate ifnet structure\n");
1195249997Swkoszek		cgem_detach(dev);
1196249997Swkoszek		return (ENOMEM);
1197249997Swkoszek	}
1198249997Swkoszek
1199249997Swkoszek	CGEM_LOCK(sc);
1200249997Swkoszek
1201249997Swkoszek	/* Reset hardware. */
1202249997Swkoszek	cgem_reset(sc);
1203249997Swkoszek
1204249997Swkoszek	/* Attach phy to mii bus. */
1205249997Swkoszek	err = mii_attach(dev, &sc->miibus, ifp,
1206249997Swkoszek			 cgem_ifmedia_upd, cgem_ifmedia_sts,
1207249997Swkoszek			 BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1208249997Swkoszek	if (err) {
1209249997Swkoszek		CGEM_UNLOCK(sc);
1210249997Swkoszek		device_printf(dev, "attaching PHYs failed\n");
1211249997Swkoszek		cgem_detach(dev);
1212249997Swkoszek		return (err);
1213249997Swkoszek	}
1214249997Swkoszek
1215249997Swkoszek	/* Set up TX and RX descriptor area. */
1216249997Swkoszek	err = cgem_setup_descs(sc);
1217249997Swkoszek	if (err) {
1218249997Swkoszek		CGEM_UNLOCK(sc);
1219249997Swkoszek		device_printf(dev, "could not set up dma mem for descs.\n");
1220249997Swkoszek		cgem_detach(dev);
1221249997Swkoszek		return (ENOMEM);
1222249997Swkoszek	}
1223249997Swkoszek
1224249997Swkoszek	/* Get a MAC address. */
1225249997Swkoszek	cgem_get_mac(sc, eaddr);
1226249997Swkoszek
1227249997Swkoszek	/* Start ticks. */
1228249997Swkoszek	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1229249997Swkoszek
1230249997Swkoszek	/* Set up ifnet structure. */
1231249997Swkoszek	ifp->if_softc = sc;
1232249997Swkoszek	if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
1233249997Swkoszek	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1234249997Swkoszek	ifp->if_start = cgem_start;
1235249997Swkoszek	ifp->if_ioctl = cgem_ioctl;
1236249997Swkoszek	ifp->if_init = cgem_init;
1237249997Swkoszek	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
1238249997Swkoszek	/* XXX: disable hw checksumming for now. */
1239249997Swkoszek	ifp->if_hwassist = 0;
1240249997Swkoszek	ifp->if_capenable = ifp->if_capabilities &
1241249997Swkoszek		~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
1242249997Swkoszek	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1243249997Swkoszek	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
1244249997Swkoszek	IFQ_SET_READY(&ifp->if_snd);
1245249997Swkoszek
1246249997Swkoszek	sc->if_old_flags = ifp->if_flags;
1247249997Swkoszek	sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1248249997Swkoszek
1249249997Swkoszek	ether_ifattach(ifp, eaddr);
1250249997Swkoszek
1251249997Swkoszek	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1252249997Swkoszek			     INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1253249997Swkoszek	if (err) {
1254249997Swkoszek		CGEM_UNLOCK(sc);
1255249997Swkoszek		device_printf(dev, "could not set interrupt handler.\n");
1256249997Swkoszek		ether_ifdetach(ifp);
1257249997Swkoszek		cgem_detach(dev);
1258249997Swkoszek		return (err);
1259249997Swkoszek	}
1260249997Swkoszek
1261249997Swkoszek	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1262249997Swkoszek		       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1263249997Swkoszek		       OID_AUTO, "rxbufs", CTLFLAG_RW,
1264249997Swkoszek		       &sc->rxbufs, 0,
1265249997Swkoszek		       "Number receive buffers to provide");
1266249997Swkoszek
1267249997Swkoszek	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1268249997Swkoszek		       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1269249997Swkoszek		       OID_AUTO, "_rxoverruns", CTLFLAG_RD,
1270249997Swkoszek		       &sc->rxoverruns, 0,
1271249997Swkoszek		       "Receive ring overrun events");
1272249997Swkoszek
1273249997Swkoszek	CGEM_UNLOCK(sc);
1274249997Swkoszek
1275249997Swkoszek	return (0);
1276249997Swkoszek}
1277249997Swkoszek
1278249997Swkoszekstatic int
1279249997Swkoszekcgem_detach(device_t dev)
1280249997Swkoszek{
1281249997Swkoszek	struct cgem_softc *sc = device_get_softc(dev);
1282249997Swkoszek	int i;
1283249997Swkoszek
1284249997Swkoszek	if (sc == NULL)
1285249997Swkoszek		return (ENODEV);
1286249997Swkoszek
1287249997Swkoszek	if (device_is_attached(dev)) {
1288249997Swkoszek		CGEM_LOCK(sc);
1289249997Swkoszek		cgem_stop(sc);
1290249997Swkoszek		CGEM_UNLOCK(sc);
1291249997Swkoszek		callout_drain(&sc->tick_ch);
1292249997Swkoszek		sc->ifp->if_flags &= ~IFF_UP;
1293249997Swkoszek		ether_ifdetach(sc->ifp);
1294249997Swkoszek	}
1295249997Swkoszek
1296249997Swkoszek	if (sc->miibus != NULL) {
1297249997Swkoszek		device_delete_child(dev, sc->miibus);
1298249997Swkoszek		sc->miibus = NULL;
1299249997Swkoszek	}
1300249997Swkoszek
1301249997Swkoszek	/* Release resrouces. */
1302249997Swkoszek	if (sc->mem_res != NULL) {
1303249997Swkoszek		bus_release_resource(dev, SYS_RES_MEMORY,
1304249997Swkoszek				     rman_get_rid(sc->mem_res), sc->mem_res);
1305249997Swkoszek		sc->mem_res = NULL;
1306249997Swkoszek	}
1307249997Swkoszek	if (sc->irq_res != NULL) {
1308249997Swkoszek		if (sc->intrhand)
1309249997Swkoszek			bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1310249997Swkoszek		bus_release_resource(dev, SYS_RES_IRQ,
1311249997Swkoszek				     rman_get_rid(sc->irq_res), sc->irq_res);
1312249997Swkoszek		sc->irq_res = NULL;
1313249997Swkoszek	}
1314249997Swkoszek
1315249997Swkoszek	/* Release DMA resources. */
1316249997Swkoszek	if (sc->rxring_dma_map != NULL) {
1317249997Swkoszek		bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1318249997Swkoszek				sc->rxring_dma_map);
1319249997Swkoszek		sc->rxring_dma_map = NULL;
1320249997Swkoszek		for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
1321249997Swkoszek			if (sc->rxring_m_dmamap[i] != NULL) {
1322249997Swkoszek				bus_dmamap_destroy(sc->mbuf_dma_tag,
1323249997Swkoszek						   sc->rxring_m_dmamap[i]);
1324249997Swkoszek				sc->rxring_m_dmamap[i] = NULL;
1325249997Swkoszek			}
1326249997Swkoszek	}
1327249997Swkoszek	if (sc->txring_dma_map != NULL) {
1328249997Swkoszek		bus_dmamem_free(sc->desc_dma_tag, sc->txring,
1329249997Swkoszek				sc->txring_dma_map);
1330249997Swkoszek		sc->txring_dma_map = NULL;
1331249997Swkoszek		for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
1332249997Swkoszek			if (sc->txring_m_dmamap[i] != NULL) {
1333249997Swkoszek				bus_dmamap_destroy(sc->mbuf_dma_tag,
1334249997Swkoszek						   sc->txring_m_dmamap[i]);
1335249997Swkoszek				sc->txring_m_dmamap[i] = NULL;
1336249997Swkoszek			}
1337249997Swkoszek	}
1338249997Swkoszek	if (sc->desc_dma_tag != NULL) {
1339249997Swkoszek		bus_dma_tag_destroy(sc->desc_dma_tag);
1340249997Swkoszek		sc->desc_dma_tag = NULL;
1341249997Swkoszek	}
1342249997Swkoszek	if (sc->mbuf_dma_tag != NULL) {
1343249997Swkoszek		bus_dma_tag_destroy(sc->mbuf_dma_tag);
1344249997Swkoszek		sc->mbuf_dma_tag = NULL;
1345249997Swkoszek	}
1346249997Swkoszek
1347249997Swkoszek	bus_generic_detach(dev);
1348249997Swkoszek
1349249997Swkoszek	CGEM_LOCK_DESTROY(sc);
1350249997Swkoszek
1351249997Swkoszek	return (0);
1352249997Swkoszek}
1353249997Swkoszek
1354249997Swkoszekstatic device_method_t cgem_methods[] = {
1355249997Swkoszek	/* Device interface */
1356249997Swkoszek	DEVMETHOD(device_probe,		cgem_probe),
1357249997Swkoszek	DEVMETHOD(device_attach,	cgem_attach),
1358249997Swkoszek	DEVMETHOD(device_detach,	cgem_detach),
1359249997Swkoszek
1360249997Swkoszek	/* Bus interface */
1361249997Swkoszek	DEVMETHOD(bus_child_detached,	cgem_child_detached),
1362249997Swkoszek
1363249997Swkoszek	/* MII interface */
1364249997Swkoszek	DEVMETHOD(miibus_readreg,	cgem_miibus_readreg),
1365249997Swkoszek	DEVMETHOD(miibus_writereg,	cgem_miibus_writereg),
1366249997Swkoszek
1367249997Swkoszek	DEVMETHOD_END
1368249997Swkoszek};
1369249997Swkoszek
1370249997Swkoszekstatic driver_t cgem_driver = {
1371249997Swkoszek	"cgem",
1372249997Swkoszek	cgem_methods,
1373249997Swkoszek	sizeof(struct cgem_softc),
1374249997Swkoszek};
1375249997Swkoszek
1376249997SwkoszekDRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL);
1377249997SwkoszekDRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL);
1378249997SwkoszekMODULE_DEPEND(cgem, miibus, 1, 1, 1);
1379249997SwkoszekMODULE_DEPEND(cgem, ether, 1, 1, 1);
1380