1/*-
2 * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
28 * A network interface driver for Cadence GEM Gigabit Ethernet
29 * interface such as the one used in Xilinx Zynq-7000 SoC.
30 *
31 * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
32 * (v1.4) November 16, 2012.  Xilinx doc UG585.  GEM is covered in Ch. 16
33 * and register definitions are in appendix B.18.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD$");
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/bus.h>
42#include <sys/kernel.h>
43#include <sys/malloc.h>
44#include <sys/mbuf.h>
45#include <sys/module.h>
46#include <sys/rman.h>
47#include <sys/socket.h>
48#include <sys/sockio.h>
49#include <sys/sysctl.h>
50
51#include <machine/bus.h>
52
53#include <net/ethernet.h>
54#include <net/if.h>
55#include <net/if_arp.h>
56#include <net/if_dl.h>
57#include <net/if_media.h>
58#include <net/if_mib.h>
59#include <net/if_types.h>
60
61#ifdef INET
62#include <netinet/in.h>
63#include <netinet/in_systm.h>
64#include <netinet/in_var.h>
65#include <netinet/ip.h>
66#endif
67
68#include <net/bpf.h>
69#include <net/bpfdesc.h>
70
71#include <dev/fdt/fdt_common.h>
72#include <dev/ofw/ofw_bus.h>
73#include <dev/ofw/ofw_bus_subr.h>
74
75#include <dev/mii/mii.h>
76#include <dev/mii/miivar.h>
77
78#include <dev/cadence/if_cgem_hw.h>
79
80#include "miibus_if.h"
81
82#define IF_CGEM_NAME "cgem"
83
84#define CGEM_NUM_RX_DESCS	512	/* size of receive descriptor ring */
85#define CGEM_NUM_TX_DESCS	512	/* size of transmit descriptor ring */
86
87#define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\
88				CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc)))
89
90
91/* Default for sysctl rxbufs.  Must be < CGEM_NUM_RX_DESCS of course. */
92#define DEFAULT_NUM_RX_BUFS	256	/* number of receive bufs to queue. */
93
94#define TX_MAX_DMA_SEGS		8	/* maximum segs in a tx mbuf dma */
95
96#define CGEM_CKSUM_ASSIST	(CSUM_IP | CSUM_TCP | CSUM_UDP | \
97				 CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
98
99struct cgem_softc {
100	struct ifnet		*ifp;
101	struct mtx		sc_mtx;
102	device_t		dev;
103	device_t		miibus;
104	u_int			mii_media_active;	/* last active media */
105	int			if_old_flags;
106	struct resource 	*mem_res;
107	struct resource 	*irq_res;
108	void			*intrhand;
109	struct callout		tick_ch;
110	uint32_t		net_ctl_shadow;
111	int			ref_clk_num;
112	u_char			eaddr[6];
113
114	bus_dma_tag_t		desc_dma_tag;
115	bus_dma_tag_t		mbuf_dma_tag;
116
117	/* receive descriptor ring */
118	struct cgem_rx_desc	*rxring;
119	bus_addr_t		rxring_physaddr;
120	struct mbuf		*rxring_m[CGEM_NUM_RX_DESCS];
121	bus_dmamap_t		rxring_m_dmamap[CGEM_NUM_RX_DESCS];
122	int			rxring_hd_ptr;	/* where to put rcv bufs */
123	int			rxring_tl_ptr;	/* where to get receives */
124	int			rxring_queued;	/* how many rcv bufs queued */
125 	bus_dmamap_t		rxring_dma_map;
126	int			rxbufs;		/* tunable number rcv bufs */
127	int			rxhangwar;	/* rx hang work-around */
128	u_int			rxoverruns;	/* rx overruns */
129	u_int			rxnobufs;	/* rx buf ring empty events */
130	u_int			rxdmamapfails;	/* rx dmamap failures */
131	uint32_t		rx_frames_prev;
132
133	/* transmit descriptor ring */
134	struct cgem_tx_desc	*txring;
135	bus_addr_t		txring_physaddr;
136	struct mbuf		*txring_m[CGEM_NUM_TX_DESCS];
137	bus_dmamap_t		txring_m_dmamap[CGEM_NUM_TX_DESCS];
138	int			txring_hd_ptr;	/* where to put next xmits */
139	int			txring_tl_ptr;	/* next xmit mbuf to free */
140	int			txring_queued;	/* num xmits segs queued */
141	bus_dmamap_t		txring_dma_map;
142	u_int			txfull;		/* tx ring full events */
143	u_int			txdefrags;	/* tx calls to m_defrag() */
144	u_int			txdefragfails;	/* tx m_defrag() failures */
145	u_int			txdmamapfails;	/* tx dmamap failures */
146
147	/* hardware provided statistics */
148	struct cgem_hw_stats {
149		uint64_t		tx_bytes;
150		uint32_t		tx_frames;
151		uint32_t		tx_frames_bcast;
152		uint32_t		tx_frames_multi;
153		uint32_t		tx_frames_pause;
154		uint32_t		tx_frames_64b;
155		uint32_t		tx_frames_65to127b;
156		uint32_t		tx_frames_128to255b;
157		uint32_t		tx_frames_256to511b;
158		uint32_t		tx_frames_512to1023b;
159		uint32_t		tx_frames_1024to1536b;
160		uint32_t		tx_under_runs;
161		uint32_t		tx_single_collisn;
162		uint32_t		tx_multi_collisn;
163		uint32_t		tx_excsv_collisn;
164		uint32_t		tx_late_collisn;
165		uint32_t		tx_deferred_frames;
166		uint32_t		tx_carrier_sense_errs;
167
168		uint64_t		rx_bytes;
169		uint32_t		rx_frames;
170		uint32_t		rx_frames_bcast;
171		uint32_t		rx_frames_multi;
172		uint32_t		rx_frames_pause;
173		uint32_t		rx_frames_64b;
174		uint32_t		rx_frames_65to127b;
175		uint32_t		rx_frames_128to255b;
176		uint32_t		rx_frames_256to511b;
177		uint32_t		rx_frames_512to1023b;
178		uint32_t		rx_frames_1024to1536b;
179		uint32_t		rx_frames_undersize;
180		uint32_t		rx_frames_oversize;
181		uint32_t		rx_frames_jabber;
182		uint32_t		rx_frames_fcs_errs;
183		uint32_t		rx_frames_length_errs;
184		uint32_t		rx_symbol_errs;
185		uint32_t		rx_align_errs;
186		uint32_t		rx_resource_errs;
187		uint32_t		rx_overrun_errs;
188		uint32_t		rx_ip_hdr_csum_errs;
189		uint32_t		rx_tcp_csum_errs;
190		uint32_t		rx_udp_csum_errs;
191	} stats;
192};
193
194#define RD4(sc, off) 		(bus_read_4((sc)->mem_res, (off)))
195#define WR4(sc, off, val) 	(bus_write_4((sc)->mem_res, (off), (val)))
196#define BARRIER(sc, off, len, flags) \
197	(bus_barrier((sc)->mem_res, (off), (len), (flags))
198
199#define CGEM_LOCK(sc)		mtx_lock(&(sc)->sc_mtx)
200#define CGEM_UNLOCK(sc)	mtx_unlock(&(sc)->sc_mtx)
201#define CGEM_LOCK_INIT(sc)	\
202	mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \
203		 MTX_NETWORK_LOCK, MTX_DEF)
204#define CGEM_LOCK_DESTROY(sc)	mtx_destroy(&(sc)->sc_mtx)
205#define CGEM_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->sc_mtx, MA_OWNED)
206
207/* Allow platforms to optionally provide a way to set the reference clock. */
208int cgem_set_ref_clk(int unit, int frequency);
209
210static devclass_t cgem_devclass;
211
212static int cgem_probe(device_t dev);
213static int cgem_attach(device_t dev);
214static int cgem_detach(device_t dev);
215static void cgem_tick(void *);
216static void cgem_intr(void *);
217
218static void cgem_mediachange(struct cgem_softc *, struct mii_data *);
219
220static void
221cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
222{
223	int i;
224	uint32_t rnd;
225
226	/* See if boot loader gave us a MAC address already. */
227	for (i = 0; i < 4; i++) {
228		uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
229		uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
230		if (low != 0 || high != 0) {
231			eaddr[0] = low & 0xff;
232			eaddr[1] = (low >> 8) & 0xff;
233			eaddr[2] = (low >> 16) & 0xff;
234			eaddr[3] = (low >> 24) & 0xff;
235			eaddr[4] = high & 0xff;
236			eaddr[5] = (high >> 8) & 0xff;
237			break;
238		}
239	}
240
241	/* No MAC from boot loader?  Assign a random one. */
242	if (i == 4) {
243		rnd = arc4random();
244
245		eaddr[0] = 'b';
246		eaddr[1] = 's';
247		eaddr[2] = 'd';
248		eaddr[3] = (rnd >> 16) & 0xff;
249		eaddr[4] = (rnd >> 8) & 0xff;
250		eaddr[5] = rnd & 0xff;
251
252		device_printf(sc->dev, "no mac address found, assigning "
253			      "random: %02x:%02x:%02x:%02x:%02x:%02x\n",
254			      eaddr[0], eaddr[1], eaddr[2],
255			      eaddr[3], eaddr[4], eaddr[5]);
256	}
257
258	/* Move address to first slot and zero out the rest. */
259	WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
260	    (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
261	WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
262
263	for (i = 1; i < 4; i++) {
264		WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0);
265		WR4(sc, CGEM_SPEC_ADDR_HI(i), 0);
266	}
267}
268
269/* cgem_mac_hash():  map 48-bit address to a 6-bit hash.
270 * The 6-bit hash corresponds to a bit in a 64-bit hash
271 * register.  Setting that bit in the hash register enables
272 * reception of all frames with a destination address that hashes
273 * to that 6-bit value.
274 *
275 * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
276 * Reference Manual.  Bits 0-5 in the hash are the exclusive-or of
277 * every sixth bit in the destination address.
278 */
279static int
280cgem_mac_hash(u_char eaddr[])
281{
282	int hash;
283	int i, j;
284
285	hash = 0;
286	for (i = 0; i < 6; i++)
287		for (j = i; j < 48; j += 6)
288			if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
289				hash ^= (1 << i);
290
291	return hash;
292}
293
294/* After any change in rx flags or multi-cast addresses, set up
295 * hash registers and net config register bits.
296 */
297static void
298cgem_rx_filter(struct cgem_softc *sc)
299{
300	struct ifnet *ifp = sc->ifp;
301	struct ifmultiaddr *ifma;
302	int index;
303	uint32_t hash_hi, hash_lo;
304	uint32_t net_cfg;
305
306	hash_hi = 0;
307	hash_lo = 0;
308
309	net_cfg = RD4(sc, CGEM_NET_CFG);
310
311	net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
312		     CGEM_NET_CFG_NO_BCAST |
313		     CGEM_NET_CFG_COPY_ALL);
314
315	if ((ifp->if_flags & IFF_PROMISC) != 0)
316		net_cfg |= CGEM_NET_CFG_COPY_ALL;
317	else {
318		if ((ifp->if_flags & IFF_BROADCAST) == 0)
319			net_cfg |= CGEM_NET_CFG_NO_BCAST;
320		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
321			hash_hi = 0xffffffff;
322			hash_lo = 0xffffffff;
323		} else {
324			if_maddr_rlock(ifp);
325			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
326				if (ifma->ifma_addr->sa_family != AF_LINK)
327					continue;
328				index = cgem_mac_hash(
329					LLADDR((struct sockaddr_dl *)
330					       ifma->ifma_addr));
331				if (index > 31)
332					hash_hi |= (1<<(index-32));
333				else
334					hash_lo |= (1<<index);
335			}
336			if_maddr_runlock(ifp);
337		}
338
339		if (hash_hi != 0 || hash_lo != 0)
340			net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN;
341	}
342
343	WR4(sc, CGEM_HASH_TOP, hash_hi);
344	WR4(sc, CGEM_HASH_BOT, hash_lo);
345	WR4(sc, CGEM_NET_CFG, net_cfg);
346}
347
348/* For bus_dmamap_load() callback. */
349static void
350cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
351{
352
353	if (nsegs != 1 || error != 0)
354		return;
355	*(bus_addr_t *)arg = segs[0].ds_addr;
356}
357
358/* Create DMA'able descriptor rings. */
359static int
360cgem_setup_descs(struct cgem_softc *sc)
361{
362	int i, err;
363
364	sc->txring = NULL;
365	sc->rxring = NULL;
366
367	/* Allocate non-cached DMA space for RX and TX descriptors.
368	 */
369	err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
370				 BUS_SPACE_MAXADDR_32BIT,
371				 BUS_SPACE_MAXADDR,
372				 NULL, NULL,
373				 MAX_DESC_RING_SIZE,
374				 1,
375				 MAX_DESC_RING_SIZE,
376				 0,
377				 busdma_lock_mutex,
378				 &sc->sc_mtx,
379				 &sc->desc_dma_tag);
380	if (err)
381		return (err);
382
383	/* Set up a bus_dma_tag for mbufs. */
384	err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
385				 BUS_SPACE_MAXADDR_32BIT,
386				 BUS_SPACE_MAXADDR,
387				 NULL, NULL,
388				 MCLBYTES,
389				 TX_MAX_DMA_SEGS,
390				 MCLBYTES,
391				 0,
392				 busdma_lock_mutex,
393				 &sc->sc_mtx,
394				 &sc->mbuf_dma_tag);
395	if (err)
396		return (err);
397
398	/* Allocate DMA memory in non-cacheable space. */
399	err = bus_dmamem_alloc(sc->desc_dma_tag,
400			       (void **)&sc->rxring,
401			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
402			       &sc->rxring_dma_map);
403	if (err)
404		return (err);
405
406	/* Load descriptor DMA memory. */
407	err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
408			      (void *)sc->rxring,
409			      CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),
410			      cgem_getaddr, &sc->rxring_physaddr,
411			      BUS_DMA_NOWAIT);
412	if (err)
413		return (err);
414
415	/* Initialize RX descriptors. */
416	for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
417		sc->rxring[i].addr = CGEM_RXDESC_OWN;
418		sc->rxring[i].ctl = 0;
419		sc->rxring_m[i] = NULL;
420		err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
421					&sc->rxring_m_dmamap[i]);
422		if (err)
423			return (err);
424	}
425	sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
426
427	sc->rxring_hd_ptr = 0;
428	sc->rxring_tl_ptr = 0;
429	sc->rxring_queued = 0;
430
431	/* Allocate DMA memory for TX descriptors in non-cacheable space. */
432	err = bus_dmamem_alloc(sc->desc_dma_tag,
433			       (void **)&sc->txring,
434			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
435			       &sc->txring_dma_map);
436	if (err)
437		return (err);
438
439	/* Load TX descriptor DMA memory. */
440	err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map,
441			      (void *)sc->txring,
442			      CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc),
443			      cgem_getaddr, &sc->txring_physaddr,
444			      BUS_DMA_NOWAIT);
445	if (err)
446		return (err);
447
448	/* Initialize TX descriptor ring. */
449	for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
450		sc->txring[i].addr = 0;
451		sc->txring[i].ctl = CGEM_TXDESC_USED;
452		sc->txring_m[i] = NULL;
453		err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
454					&sc->txring_m_dmamap[i]);
455		if (err)
456			return (err);
457	}
458	sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
459
460	sc->txring_hd_ptr = 0;
461	sc->txring_tl_ptr = 0;
462	sc->txring_queued = 0;
463
464	return (0);
465}
466
467/* Fill receive descriptor ring with mbufs. */
468static void
469cgem_fill_rqueue(struct cgem_softc *sc)
470{
471	struct mbuf *m = NULL;
472	bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
473	int nsegs;
474
475	CGEM_ASSERT_LOCKED(sc);
476
477	while (sc->rxring_queued < sc->rxbufs) {
478		/* Get a cluster mbuf. */
479		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
480		if (m == NULL)
481			break;
482
483		m->m_len = MCLBYTES;
484		m->m_pkthdr.len = MCLBYTES;
485		m->m_pkthdr.rcvif = sc->ifp;
486
487		/* Load map and plug in physical address. */
488		if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
489			      sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
490			      segs, &nsegs, BUS_DMA_NOWAIT)) {
491			sc->rxdmamapfails++;
492			m_free(m);
493			break;
494		}
495		sc->rxring_m[sc->rxring_hd_ptr] = m;
496
497		/* Sync cache with receive buffer. */
498		bus_dmamap_sync(sc->mbuf_dma_tag,
499				sc->rxring_m_dmamap[sc->rxring_hd_ptr],
500				BUS_DMASYNC_PREREAD);
501
502		/* Write rx descriptor and increment head pointer. */
503		sc->rxring[sc->rxring_hd_ptr].ctl = 0;
504		if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
505			sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
506				CGEM_RXDESC_WRAP;
507			sc->rxring_hd_ptr = 0;
508		} else
509			sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
510
511		sc->rxring_queued++;
512	}
513}
514
515/* Pull received packets off of receive descriptor ring. */
516static void
517cgem_recv(struct cgem_softc *sc)
518{
519	struct ifnet *ifp = sc->ifp;
520	struct mbuf *m, *m_hd, **m_tl;
521	uint32_t ctl;
522
523	CGEM_ASSERT_LOCKED(sc);
524
525	/* Pick up all packets in which the OWN bit is set. */
526	m_hd = NULL;
527	m_tl = &m_hd;
528	while (sc->rxring_queued > 0 &&
529	       (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
530
531		ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
532
533		/* Grab filled mbuf. */
534		m = sc->rxring_m[sc->rxring_tl_ptr];
535		sc->rxring_m[sc->rxring_tl_ptr] = NULL;
536
537		/* Sync cache with receive buffer. */
538		bus_dmamap_sync(sc->mbuf_dma_tag,
539				sc->rxring_m_dmamap[sc->rxring_tl_ptr],
540				BUS_DMASYNC_POSTREAD);
541
542		/* Unload dmamap. */
543		bus_dmamap_unload(sc->mbuf_dma_tag,
544		  	sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
545
546		/* Increment tail pointer. */
547		if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
548			sc->rxring_tl_ptr = 0;
549		sc->rxring_queued--;
550
551		/* Check FCS and make sure entire packet landed in one mbuf
552		 * cluster (which is much bigger than the largest ethernet
553		 * packet).
554		 */
555		if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
556		    (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
557		           (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
558			/* discard. */
559			m_free(m);
560			ifp->if_ierrors++;
561			continue;
562		}
563
564		/* Ready it to hand off to upper layers. */
565		m->m_data += ETHER_ALIGN;
566		m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
567		m->m_pkthdr.rcvif = ifp;
568		m->m_pkthdr.len = m->m_len;
569
570		/* Are we using hardware checksumming?  Check the
571		 * status in the receive descriptor.
572		 */
573		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
574			/* TCP or UDP checks out, IP checks out too. */
575			if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
576			    CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
577			    (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
578			    CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
579				m->m_pkthdr.csum_flags |=
580					CSUM_IP_CHECKED | CSUM_IP_VALID |
581					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
582				m->m_pkthdr.csum_data = 0xffff;
583			} else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
584				   CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
585				/* Only IP checks out. */
586				m->m_pkthdr.csum_flags |=
587					CSUM_IP_CHECKED | CSUM_IP_VALID;
588				m->m_pkthdr.csum_data = 0xffff;
589			}
590		}
591
592		/* Queue it up for delivery below. */
593		*m_tl = m;
594		m_tl = &m->m_next;
595	}
596
597	/* Replenish receive buffers. */
598	cgem_fill_rqueue(sc);
599
600	/* Unlock and send up packets. */
601	CGEM_UNLOCK(sc);
602	while (m_hd != NULL) {
603		m = m_hd;
604		m_hd = m_hd->m_next;
605		m->m_next = NULL;
606		ifp->if_ipackets++;
607		(*ifp->if_input)(ifp, m);
608	}
609	CGEM_LOCK(sc);
610}
611
612/* Find completed transmits and free their mbufs. */
613static void
614cgem_clean_tx(struct cgem_softc *sc)
615{
616	struct mbuf *m;
617	uint32_t ctl;
618
619	CGEM_ASSERT_LOCKED(sc);
620
621	/* free up finished transmits. */
622	while (sc->txring_queued > 0 &&
623	       ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
624		CGEM_TXDESC_USED) != 0) {
625
626		/* Sync cache.  nop? */
627		bus_dmamap_sync(sc->mbuf_dma_tag,
628				sc->txring_m_dmamap[sc->txring_tl_ptr],
629				BUS_DMASYNC_POSTWRITE);
630
631		/* Unload DMA map. */
632		bus_dmamap_unload(sc->mbuf_dma_tag,
633				  sc->txring_m_dmamap[sc->txring_tl_ptr]);
634
635		/* Free up the mbuf. */
636		m = sc->txring_m[sc->txring_tl_ptr];
637		sc->txring_m[sc->txring_tl_ptr] = NULL;
638		m_freem(m);
639
640		/* Check the status. */
641		if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
642			/* Serious bus error. log to console. */
643			device_printf(sc->dev, "cgem_clean_tx: Whoa! "
644				   "AHB error, addr=0x%x\n",
645				   sc->txring[sc->txring_tl_ptr].addr);
646		} else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
647				   CGEM_TXDESC_LATE_COLL)) != 0) {
648			sc->ifp->if_oerrors++;
649		} else
650			sc->ifp->if_opackets++;
651
652		/* If the packet spanned more than one tx descriptor,
653		 * skip descriptors until we find the end so that only
654		 * start-of-frame descriptors are processed.
655		 */
656		while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
657			if ((ctl & CGEM_TXDESC_WRAP) != 0)
658				sc->txring_tl_ptr = 0;
659			else
660				sc->txring_tl_ptr++;
661			sc->txring_queued--;
662
663			ctl = sc->txring[sc->txring_tl_ptr].ctl;
664
665			sc->txring[sc->txring_tl_ptr].ctl =
666				ctl | CGEM_TXDESC_USED;
667		}
668
669		/* Next descriptor. */
670		if ((ctl & CGEM_TXDESC_WRAP) != 0)
671			sc->txring_tl_ptr = 0;
672		else
673			sc->txring_tl_ptr++;
674		sc->txring_queued--;
675
676		sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
677	}
678}
679
680/* Start transmits. */
681static void
682cgem_start_locked(struct ifnet *ifp)
683{
684	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
685	struct mbuf *m;
686	bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
687	uint32_t ctl;
688	int i, nsegs, wrap, err;
689
690	CGEM_ASSERT_LOCKED(sc);
691
692	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0)
693		return;
694
695	for (;;) {
696		/* Check that there is room in the descriptor ring. */
697		if (sc->txring_queued >=
698		    CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
699
700			/* Try to make room. */
701			cgem_clean_tx(sc);
702
703			/* Still no room? */
704			if (sc->txring_queued >=
705			    CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
706				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
707				sc->txfull++;
708				break;
709			}
710		}
711
712		/* Grab next transmit packet. */
713		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
714		if (m == NULL)
715			break;
716
717		/* Load DMA map. */
718		err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
719				      sc->txring_m_dmamap[sc->txring_hd_ptr],
720				      m, segs, &nsegs, BUS_DMA_NOWAIT);
721		if (err == EFBIG) {
722			/* Too many segments!  defrag and try again. */
723			struct mbuf *m2 = m_defrag(m, M_NOWAIT);
724
725			if (m2 == NULL) {
726				sc->txdefragfails++;
727				m_freem(m);
728				continue;
729			}
730			m = m2;
731			err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
732				      sc->txring_m_dmamap[sc->txring_hd_ptr],
733				      m, segs, &nsegs, BUS_DMA_NOWAIT);
734			sc->txdefrags++;
735		}
736		if (err) {
737			/* Give up. */
738			m_freem(m);
739			sc->txdmamapfails++;
740			continue;
741		}
742		sc->txring_m[sc->txring_hd_ptr] = m;
743
744		/* Sync tx buffer with cache. */
745		bus_dmamap_sync(sc->mbuf_dma_tag,
746				sc->txring_m_dmamap[sc->txring_hd_ptr],
747				BUS_DMASYNC_PREWRITE);
748
749		/* Set wrap flag if next packet might run off end of ring. */
750		wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
751			CGEM_NUM_TX_DESCS;
752
753		/* Fill in the TX descriptors back to front so that USED
754		 * bit in first descriptor is cleared last.
755		 */
756		for (i = nsegs - 1; i >= 0; i--) {
757			/* Descriptor address. */
758			sc->txring[sc->txring_hd_ptr + i].addr =
759				segs[i].ds_addr;
760
761			/* Descriptor control word. */
762			ctl = segs[i].ds_len;
763			if (i == nsegs - 1) {
764				ctl |= CGEM_TXDESC_LAST_BUF;
765				if (wrap)
766					ctl |= CGEM_TXDESC_WRAP;
767			}
768			sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
769
770			if (i != 0)
771				sc->txring_m[sc->txring_hd_ptr + i] = NULL;
772		}
773
774		if (wrap)
775			sc->txring_hd_ptr = 0;
776		else
777			sc->txring_hd_ptr += nsegs;
778		sc->txring_queued += nsegs;
779
780		/* Kick the transmitter. */
781		WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
782		    CGEM_NET_CTRL_START_TX);
783
784		/* If there is a BPF listener, bounce a copy to to him. */
785		ETHER_BPF_MTAP(ifp, m);
786	}
787}
788
789static void
790cgem_start(struct ifnet *ifp)
791{
792	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
793
794	CGEM_LOCK(sc);
795	cgem_start_locked(ifp);
796	CGEM_UNLOCK(sc);
797}
798
799static void
800cgem_poll_hw_stats(struct cgem_softc *sc)
801{
802	uint32_t n;
803
804	CGEM_ASSERT_LOCKED(sc);
805
806	sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT);
807	sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32;
808
809	sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX);
810	sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX);
811	sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX);
812	sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX);
813	sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX);
814	sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX);
815	sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX);
816	sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX);
817	sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX);
818	sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX);
819	sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS);
820
821	n = RD4(sc, CGEM_SINGLE_COLL_FRAMES);
822	sc->stats.tx_single_collisn += n;
823	sc->ifp->if_collisions += n;
824	n = RD4(sc, CGEM_MULTI_COLL_FRAMES);
825	sc->stats.tx_multi_collisn += n;
826	sc->ifp->if_collisions += n;
827	n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES);
828	sc->stats.tx_excsv_collisn += n;
829	sc->ifp->if_collisions += n;
830	n = RD4(sc, CGEM_LATE_COLL);
831	sc->stats.tx_late_collisn += n;
832	sc->ifp->if_collisions += n;
833
834	sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES);
835	sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS);
836
837	sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT);
838	sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32;
839
840	sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX);
841	sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX);
842	sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX);
843	sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX);
844	sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX);
845	sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX);
846	sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX);
847	sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX);
848	sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX);
849	sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX);
850	sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX);
851	sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX);
852	sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX);
853	sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS);
854	sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS);
855	sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS);
856	sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS);
857	sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS);
858	sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS);
859	sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS);
860	sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS);
861	sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS);
862}
863
864static void
865cgem_tick(void *arg)
866{
867	struct cgem_softc *sc = (struct cgem_softc *)arg;
868	struct mii_data *mii;
869
870	CGEM_ASSERT_LOCKED(sc);
871
872	/* Poll the phy. */
873	if (sc->miibus != NULL) {
874		mii = device_get_softc(sc->miibus);
875		mii_tick(mii);
876	}
877
878	/* Poll statistics registers. */
879	cgem_poll_hw_stats(sc);
880
881	/* Check for receiver hang. */
882	if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) {
883		/*
884		 * Reset receiver logic by toggling RX_EN bit.  1usec
885		 * delay is necessary especially when operating at 100mbps
886		 * and 10mbps speeds.
887		 */
888		WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow &
889		    ~CGEM_NET_CTRL_RX_EN);
890		DELAY(1);
891		WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
892	}
893	sc->rx_frames_prev = sc->stats.rx_frames;
894
895	/* Next callout in one second. */
896	callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
897}
898
899/* Interrupt handler. */
900static void
901cgem_intr(void *arg)
902{
903	struct cgem_softc *sc = (struct cgem_softc *)arg;
904	uint32_t istatus;
905
906	CGEM_LOCK(sc);
907
908	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
909		CGEM_UNLOCK(sc);
910		return;
911	}
912
913	/* Read interrupt status and immediately clear the bits. */
914	istatus = RD4(sc, CGEM_INTR_STAT);
915	WR4(sc, CGEM_INTR_STAT, istatus);
916
917	/* Packets received. */
918	if ((istatus & CGEM_INTR_RX_COMPLETE) != 0)
919		cgem_recv(sc);
920
921	/* Free up any completed transmit buffers. */
922	cgem_clean_tx(sc);
923
924	/* Hresp not ok.  Something is very bad with DMA.  Try to clear. */
925	if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
926		device_printf(sc->dev, "cgem_intr: hresp not okay! "
927			      "rx_status=0x%x\n", RD4(sc, CGEM_RX_STAT));
928		WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
929	}
930
931	/* Receiver overrun. */
932	if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
933		/* Clear status bit. */
934		WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN);
935		sc->rxoverruns++;
936	}
937
938	/* Receiver ran out of bufs. */
939	if ((istatus & CGEM_INTR_RX_USED_READ) != 0) {
940		WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
941		    CGEM_NET_CTRL_FLUSH_DPRAM_PKT);
942		cgem_fill_rqueue(sc);
943		sc->rxnobufs++;
944	}
945
946	/* Restart transmitter if needed. */
947	if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
948		cgem_start_locked(sc->ifp);
949
950	CGEM_UNLOCK(sc);
951}
952
953/* Reset hardware. */
954static void
955cgem_reset(struct cgem_softc *sc)
956{
957
958	CGEM_ASSERT_LOCKED(sc);
959
960	WR4(sc, CGEM_NET_CTRL, 0);
961	WR4(sc, CGEM_NET_CFG, 0);
962	WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
963	WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
964	WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
965	WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
966	WR4(sc, CGEM_HASH_BOT, 0);
967	WR4(sc, CGEM_HASH_TOP, 0);
968	WR4(sc, CGEM_TX_QBAR, 0);	/* manual says do this. */
969	WR4(sc, CGEM_RX_QBAR, 0);
970
971	/* Get management port running even if interface is down. */
972	WR4(sc, CGEM_NET_CFG,
973	    CGEM_NET_CFG_DBUS_WIDTH_32 |
974	    CGEM_NET_CFG_MDC_CLK_DIV_64);
975
976	sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
977	WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
978}
979
980/* Bring up the hardware. */
981static void
982cgem_config(struct cgem_softc *sc)
983{
984	uint32_t net_cfg;
985	uint32_t dma_cfg;
986	u_char *eaddr = IF_LLADDR(sc->ifp);
987
988	CGEM_ASSERT_LOCKED(sc);
989
990	/* Program Net Config Register. */
991	net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 |
992		CGEM_NET_CFG_MDC_CLK_DIV_64 |
993		CGEM_NET_CFG_FCS_REMOVE |
994		CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
995		CGEM_NET_CFG_GIGE_EN |
996		CGEM_NET_CFG_1536RXEN |
997		CGEM_NET_CFG_FULL_DUPLEX |
998		CGEM_NET_CFG_SPEED100;
999
1000	/* Enable receive checksum offloading? */
1001	if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0)
1002		net_cfg |=  CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
1003
1004	WR4(sc, CGEM_NET_CFG, net_cfg);
1005
1006	/* Program DMA Config Register. */
1007	dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
1008		CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
1009		CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
1010		CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 |
1011		CGEM_DMA_CFG_DISC_WHEN_NO_AHB;
1012
1013	/* Enable transmit checksum offloading? */
1014	if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0)
1015		dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
1016
1017	WR4(sc, CGEM_DMA_CFG, dma_cfg);
1018
1019	/* Write the rx and tx descriptor ring addresses to the QBAR regs. */
1020	WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr);
1021	WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr);
1022
1023	/* Enable rx and tx. */
1024	sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
1025	WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1026
1027	/* Set receive address in case it changed. */
1028	WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
1029	    (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
1030	WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
1031
1032	/* Set up interrupts. */
1033	WR4(sc, CGEM_INTR_EN,
1034	    CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN |
1035	    CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ |
1036	    CGEM_INTR_HRESP_NOT_OK);
1037}
1038
1039/* Turn on interface and load up receive ring with buffers. */
1040static void
1041cgem_init_locked(struct cgem_softc *sc)
1042{
1043	struct mii_data *mii;
1044
1045	CGEM_ASSERT_LOCKED(sc);
1046
1047	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1048		return;
1049
1050	cgem_config(sc);
1051	cgem_fill_rqueue(sc);
1052
1053	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1054	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1055
1056	mii = device_get_softc(sc->miibus);
1057	mii_mediachg(mii);
1058
1059	callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
1060}
1061
1062static void
1063cgem_init(void *arg)
1064{
1065	struct cgem_softc *sc = (struct cgem_softc *)arg;
1066
1067	CGEM_LOCK(sc);
1068	cgem_init_locked(sc);
1069	CGEM_UNLOCK(sc);
1070}
1071
1072/* Turn off interface.  Free up any buffers in transmit or receive queues. */
1073static void
1074cgem_stop(struct cgem_softc *sc)
1075{
1076	int i;
1077
1078	CGEM_ASSERT_LOCKED(sc);
1079
1080	callout_stop(&sc->tick_ch);
1081
1082	/* Shut down hardware. */
1083	cgem_reset(sc);
1084
1085	/* Clear out transmit queue. */
1086	for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
1087		sc->txring[i].ctl = CGEM_TXDESC_USED;
1088		sc->txring[i].addr = 0;
1089		if (sc->txring_m[i]) {
1090			bus_dmamap_unload(sc->mbuf_dma_tag,
1091					  sc->txring_m_dmamap[i]);
1092			m_freem(sc->txring_m[i]);
1093			sc->txring_m[i] = NULL;
1094		}
1095	}
1096	sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
1097
1098	sc->txring_hd_ptr = 0;
1099	sc->txring_tl_ptr = 0;
1100	sc->txring_queued = 0;
1101
1102	/* Clear out receive queue. */
1103	for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
1104		sc->rxring[i].addr = CGEM_RXDESC_OWN;
1105		sc->rxring[i].ctl = 0;
1106		if (sc->rxring_m[i]) {
1107			/* Unload dmamap. */
1108			bus_dmamap_unload(sc->mbuf_dma_tag,
1109				  sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
1110
1111			m_freem(sc->rxring_m[i]);
1112			sc->rxring_m[i] = NULL;
1113		}
1114	}
1115	sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
1116
1117	sc->rxring_hd_ptr = 0;
1118	sc->rxring_tl_ptr = 0;
1119	sc->rxring_queued = 0;
1120
1121	/* Force next statchg or linkchg to program net config register. */
1122	sc->mii_media_active = 0;
1123}
1124
1125
1126static int
1127cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1128{
1129	struct cgem_softc *sc = ifp->if_softc;
1130	struct ifreq *ifr = (struct ifreq *)data;
1131	struct mii_data *mii;
1132	int error = 0, mask;
1133
1134	switch (cmd) {
1135	case SIOCSIFFLAGS:
1136		CGEM_LOCK(sc);
1137		if ((ifp->if_flags & IFF_UP) != 0) {
1138			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1139				if (((ifp->if_flags ^ sc->if_old_flags) &
1140				     (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1141					cgem_rx_filter(sc);
1142				}
1143			} else {
1144				cgem_init_locked(sc);
1145			}
1146		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1147			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1148			cgem_stop(sc);
1149		}
1150		sc->if_old_flags = ifp->if_flags;
1151		CGEM_UNLOCK(sc);
1152		break;
1153
1154	case SIOCADDMULTI:
1155	case SIOCDELMULTI:
1156		/* Set up multi-cast filters. */
1157		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1158			CGEM_LOCK(sc);
1159			cgem_rx_filter(sc);
1160			CGEM_UNLOCK(sc);
1161		}
1162		break;
1163
1164	case SIOCSIFMEDIA:
1165	case SIOCGIFMEDIA:
1166		mii = device_get_softc(sc->miibus);
1167		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1168		break;
1169
1170	case SIOCSIFCAP:
1171		CGEM_LOCK(sc);
1172		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1173
1174		if ((mask & IFCAP_TXCSUM) != 0) {
1175			if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
1176				/* Turn on TX checksumming. */
1177				ifp->if_capenable |= (IFCAP_TXCSUM |
1178						      IFCAP_TXCSUM_IPV6);
1179				ifp->if_hwassist |= CGEM_CKSUM_ASSIST;
1180
1181				WR4(sc, CGEM_DMA_CFG,
1182				    RD4(sc, CGEM_DMA_CFG) |
1183				     CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1184			} else {
1185				/* Turn off TX checksumming. */
1186				ifp->if_capenable &= ~(IFCAP_TXCSUM |
1187						       IFCAP_TXCSUM_IPV6);
1188				ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST;
1189
1190				WR4(sc, CGEM_DMA_CFG,
1191				    RD4(sc, CGEM_DMA_CFG) &
1192				     ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1193			}
1194		}
1195		if ((mask & IFCAP_RXCSUM) != 0) {
1196			if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
1197				/* Turn on RX checksumming. */
1198				ifp->if_capenable |= (IFCAP_RXCSUM |
1199						      IFCAP_RXCSUM_IPV6);
1200				WR4(sc, CGEM_NET_CFG,
1201				    RD4(sc, CGEM_NET_CFG) |
1202				     CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1203			} else {
1204				/* Turn off RX checksumming. */
1205				ifp->if_capenable &= ~(IFCAP_RXCSUM |
1206						       IFCAP_RXCSUM_IPV6);
1207				WR4(sc, CGEM_NET_CFG,
1208				    RD4(sc, CGEM_NET_CFG) &
1209				     ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1210			}
1211		}
1212		if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_TXCSUM)) ==
1213		    (IFCAP_RXCSUM | IFCAP_TXCSUM))
1214			ifp->if_capenable |= IFCAP_VLAN_HWCSUM;
1215		else
1216			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1217
1218		CGEM_UNLOCK(sc);
1219		break;
1220	default:
1221		error = ether_ioctl(ifp, cmd, data);
1222		break;
1223	}
1224
1225	return (error);
1226}
1227
1228/* MII bus support routines.
1229 */
1230static void
1231cgem_child_detached(device_t dev, device_t child)
1232{
1233	struct cgem_softc *sc = device_get_softc(dev);
1234
1235	if (child == sc->miibus)
1236		sc->miibus = NULL;
1237}
1238
1239static int
1240cgem_ifmedia_upd(struct ifnet *ifp)
1241{
1242	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1243	struct mii_data *mii;
1244	struct mii_softc *miisc;
1245	int error = 0;
1246
1247	mii = device_get_softc(sc->miibus);
1248	CGEM_LOCK(sc);
1249	if ((ifp->if_flags & IFF_UP) != 0) {
1250		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1251			PHY_RESET(miisc);
1252		error = mii_mediachg(mii);
1253	}
1254	CGEM_UNLOCK(sc);
1255
1256	return (error);
1257}
1258
1259static void
1260cgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1261{
1262	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1263	struct mii_data *mii;
1264
1265	mii = device_get_softc(sc->miibus);
1266	CGEM_LOCK(sc);
1267	mii_pollstat(mii);
1268	ifmr->ifm_active = mii->mii_media_active;
1269	ifmr->ifm_status = mii->mii_media_status;
1270	CGEM_UNLOCK(sc);
1271}
1272
1273static int
1274cgem_miibus_readreg(device_t dev, int phy, int reg)
1275{
1276	struct cgem_softc *sc = device_get_softc(dev);
1277	int tries, val;
1278
1279	WR4(sc, CGEM_PHY_MAINT,
1280	    CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1281	    CGEM_PHY_MAINT_OP_READ |
1282	    (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1283	    (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
1284
1285	/* Wait for completion. */
1286	tries=0;
1287	while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1288		DELAY(5);
1289		if (++tries > 200) {
1290			device_printf(dev, "phy read timeout: %d\n", reg);
1291			return (-1);
1292		}
1293	}
1294
1295	val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
1296
1297	if (reg == MII_EXTSR)
1298		/*
1299		 * MAC does not support half-duplex at gig speeds.
1300		 * Let mii(4) exclude the capability.
1301		 */
1302		val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX);
1303
1304	return (val);
1305}
1306
1307static int
1308cgem_miibus_writereg(device_t dev, int phy, int reg, int data)
1309{
1310	struct cgem_softc *sc = device_get_softc(dev);
1311	int tries;
1312
1313	WR4(sc, CGEM_PHY_MAINT,
1314	    CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1315	    CGEM_PHY_MAINT_OP_WRITE |
1316	    (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1317	    (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
1318	    (data & CGEM_PHY_MAINT_DATA_MASK));
1319
1320	/* Wait for completion. */
1321	tries = 0;
1322	while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1323		DELAY(5);
1324		if (++tries > 200) {
1325			device_printf(dev, "phy write timeout: %d\n", reg);
1326			return (-1);
1327		}
1328	}
1329
1330	return (0);
1331}
1332
1333static void
1334cgem_miibus_statchg(device_t dev)
1335{
1336	struct cgem_softc *sc  = device_get_softc(dev);
1337	struct mii_data *mii = device_get_softc(sc->miibus);
1338
1339	CGEM_ASSERT_LOCKED(sc);
1340
1341	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1342	    (IFM_ACTIVE | IFM_AVALID) &&
1343	    sc->mii_media_active != mii->mii_media_active)
1344		cgem_mediachange(sc, mii);
1345}
1346
1347static void
1348cgem_miibus_linkchg(device_t dev)
1349{
1350	struct cgem_softc *sc  = device_get_softc(dev);
1351	struct mii_data *mii = device_get_softc(sc->miibus);
1352
1353	CGEM_ASSERT_LOCKED(sc);
1354
1355	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1356	    (IFM_ACTIVE | IFM_AVALID) &&
1357	    sc->mii_media_active != mii->mii_media_active)
1358		cgem_mediachange(sc, mii);
1359}
1360
1361/*
1362 * Overridable weak symbol cgem_set_ref_clk().  This allows platforms to
1363 * provide a function to set the cgem's reference clock.
1364 */
1365static int __used
1366cgem_default_set_ref_clk(int unit, int frequency)
1367{
1368
1369	return 0;
1370}
1371__weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk);
1372
1373/* Call to set reference clock and network config bits according to media. */
1374static void
1375cgem_mediachange(struct cgem_softc *sc,	struct mii_data *mii)
1376{
1377	uint32_t net_cfg;
1378	int ref_clk_freq;
1379
1380	CGEM_ASSERT_LOCKED(sc);
1381
1382	/* Update hardware to reflect media. */
1383	net_cfg = RD4(sc, CGEM_NET_CFG);
1384	net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
1385		     CGEM_NET_CFG_FULL_DUPLEX);
1386
1387	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1388	case IFM_1000_T:
1389		net_cfg |= (CGEM_NET_CFG_SPEED100 |
1390			    CGEM_NET_CFG_GIGE_EN);
1391		ref_clk_freq = 125000000;
1392		break;
1393	case IFM_100_TX:
1394		net_cfg |= CGEM_NET_CFG_SPEED100;
1395		ref_clk_freq = 25000000;
1396		break;
1397	default:
1398		ref_clk_freq = 2500000;
1399	}
1400
1401	if ((mii->mii_media_active & IFM_FDX) != 0)
1402		net_cfg |= CGEM_NET_CFG_FULL_DUPLEX;
1403
1404	WR4(sc, CGEM_NET_CFG, net_cfg);
1405
1406	/* Set the reference clock if necessary. */
1407	if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq))
1408		device_printf(sc->dev, "cgem_mediachange: "
1409			      "could not set ref clk%d to %d.\n",
1410			      sc->ref_clk_num, ref_clk_freq);
1411
1412	sc->mii_media_active = mii->mii_media_active;
1413}
1414
1415static void
1416cgem_add_sysctls(device_t dev)
1417{
1418	struct cgem_softc *sc = device_get_softc(dev);
1419	struct sysctl_ctx_list *ctx;
1420	struct sysctl_oid_list *child;
1421	struct sysctl_oid *tree;
1422
1423	ctx = device_get_sysctl_ctx(dev);
1424	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1425
1426	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW,
1427		       &sc->rxbufs, 0,
1428		       "Number receive buffers to provide");
1429
1430	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW,
1431		       &sc->rxhangwar, 0,
1432		       "Enable receive hang work-around");
1433
1434	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD,
1435			&sc->rxoverruns, 0,
1436			"Receive overrun events");
1437
1438	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD,
1439			&sc->rxnobufs, 0,
1440			"Receive buf queue empty events");
1441
1442	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD,
1443			&sc->rxdmamapfails, 0,
1444			"Receive DMA map failures");
1445
1446	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD,
1447			&sc->txfull, 0,
1448			"Transmit ring full events");
1449
1450	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD,
1451			&sc->txdmamapfails, 0,
1452			"Transmit DMA map failures");
1453
1454	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD,
1455			&sc->txdefrags, 0,
1456			"Transmit m_defrag() calls");
1457
1458	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD,
1459			&sc->txdefragfails, 0,
1460			"Transmit m_defrag() failures");
1461
1462	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
1463			       NULL, "GEM statistics");
1464	child = SYSCTL_CHILDREN(tree);
1465
1466	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD,
1467			 &sc->stats.tx_bytes, "Total bytes transmitted");
1468
1469	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD,
1470			&sc->stats.tx_frames, 0, "Total frames transmitted");
1471	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD,
1472			&sc->stats.tx_frames_bcast, 0,
1473			"Number broadcast frames transmitted");
1474	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD,
1475			&sc->stats.tx_frames_multi, 0,
1476			"Number multicast frames transmitted");
1477	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause",
1478			CTLFLAG_RD, &sc->stats.tx_frames_pause, 0,
1479			"Number pause frames transmitted");
1480	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD,
1481			&sc->stats.tx_frames_64b, 0,
1482			"Number frames transmitted of size 64 bytes or less");
1483	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD,
1484			&sc->stats.tx_frames_65to127b, 0,
1485			"Number frames transmitted of size 65-127 bytes");
1486	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b",
1487			CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0,
1488			"Number frames transmitted of size 128-255 bytes");
1489	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b",
1490			CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0,
1491			"Number frames transmitted of size 256-511 bytes");
1492	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b",
1493			CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0,
1494			"Number frames transmitted of size 512-1023 bytes");
1495	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b",
1496			CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0,
1497			"Number frames transmitted of size 1024-1536 bytes");
1498	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs",
1499			CTLFLAG_RD, &sc->stats.tx_under_runs, 0,
1500			"Number transmit under-run events");
1501	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn",
1502			CTLFLAG_RD, &sc->stats.tx_single_collisn, 0,
1503			"Number single-collision transmit frames");
1504	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn",
1505			CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0,
1506			"Number multi-collision transmit frames");
1507	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn",
1508			CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0,
1509			"Number excessive collision transmit frames");
1510	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn",
1511			CTLFLAG_RD, &sc->stats.tx_late_collisn, 0,
1512			"Number late-collision transmit frames");
1513	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames",
1514			CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0,
1515			"Number deferred transmit frames");
1516	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs",
1517			CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0,
1518			"Number carrier sense errors on transmit");
1519
1520	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD,
1521			 &sc->stats.rx_bytes, "Total bytes received");
1522
1523	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD,
1524			&sc->stats.rx_frames, 0, "Total frames received");
1525	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast",
1526			CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0,
1527			"Number broadcast frames received");
1528	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi",
1529			CTLFLAG_RD, &sc->stats.rx_frames_multi, 0,
1530			"Number multicast frames received");
1531	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause",
1532			CTLFLAG_RD, &sc->stats.rx_frames_pause, 0,
1533			"Number pause frames received");
1534	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b",
1535			CTLFLAG_RD, &sc->stats.rx_frames_64b, 0,
1536			"Number frames received of size 64 bytes or less");
1537	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b",
1538			CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0,
1539			"Number frames received of size 65-127 bytes");
1540	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b",
1541			CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0,
1542			"Number frames received of size 128-255 bytes");
1543	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b",
1544			CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0,
1545			"Number frames received of size 256-511 bytes");
1546	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b",
1547			CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0,
1548			"Number frames received of size 512-1023 bytes");
1549	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b",
1550			CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0,
1551			"Number frames received of size 1024-1536 bytes");
1552	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize",
1553			CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0,
1554			"Number undersize frames received");
1555	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize",
1556			CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0,
1557			"Number oversize frames received");
1558	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber",
1559			CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0,
1560			"Number jabber frames received");
1561	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs",
1562			CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0,
1563			"Number frames received with FCS errors");
1564	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs",
1565			CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0,
1566			"Number frames received with length errors");
1567	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs",
1568			CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0,
1569			"Number receive symbol errors");
1570	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs",
1571			CTLFLAG_RD, &sc->stats.rx_align_errs, 0,
1572			"Number receive alignment errors");
1573	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs",
1574			CTLFLAG_RD, &sc->stats.rx_resource_errs, 0,
1575			"Number frames received when no rx buffer available");
1576	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs",
1577			CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0,
1578			"Number frames received but not copied due to "
1579			"receive overrun");
1580	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs",
1581			CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0,
1582			"Number frames received with IP header checksum "
1583			"errors");
1584	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs",
1585			CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0,
1586			"Number frames received with TCP checksum errors");
1587	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs",
1588			CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0,
1589			"Number frames received with UDP checksum errors");
1590}
1591
1592
1593static int
1594cgem_probe(device_t dev)
1595{
1596
1597	if (!ofw_bus_is_compatible(dev, "cadence,gem"))
1598		return (ENXIO);
1599
1600	device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
1601	return (0);
1602}
1603
1604static int
1605cgem_attach(device_t dev)
1606{
1607	struct cgem_softc *sc = device_get_softc(dev);
1608	struct ifnet *ifp = NULL;
1609	phandle_t node;
1610	pcell_t cell;
1611	int rid, err;
1612	u_char eaddr[ETHER_ADDR_LEN];
1613
1614	sc->dev = dev;
1615	CGEM_LOCK_INIT(sc);
1616
1617	/* Get reference clock number and base divider from fdt. */
1618	node = ofw_bus_get_node(dev);
1619	sc->ref_clk_num = 0;
1620	if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0)
1621		sc->ref_clk_num = fdt32_to_cpu(cell);
1622
1623	/* Get memory resource. */
1624	rid = 0;
1625	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1626					     RF_ACTIVE);
1627	if (sc->mem_res == NULL) {
1628		device_printf(dev, "could not allocate memory resources.\n");
1629		return (ENOMEM);
1630	}
1631
1632	/* Get IRQ resource. */
1633	rid = 0;
1634	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1635					     RF_ACTIVE);
1636	if (sc->irq_res == NULL) {
1637		device_printf(dev, "could not allocate interrupt resource.\n");
1638		cgem_detach(dev);
1639		return (ENOMEM);
1640	}
1641
1642	/* Set up ifnet structure. */
1643	ifp = sc->ifp = if_alloc(IFT_ETHER);
1644	if (ifp == NULL) {
1645		device_printf(dev, "could not allocate ifnet structure\n");
1646		cgem_detach(dev);
1647		return (ENOMEM);
1648	}
1649	ifp->if_softc = sc;
1650	if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
1651	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1652	ifp->if_start = cgem_start;
1653	ifp->if_ioctl = cgem_ioctl;
1654	ifp->if_init = cgem_init;
1655	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
1656		IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
1657	/* Disable hardware checksumming by default. */
1658	ifp->if_hwassist = 0;
1659	ifp->if_capenable = ifp->if_capabilities &
1660		~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM);
1661	ifp->if_snd.ifq_drv_maxlen = CGEM_NUM_TX_DESCS;
1662	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1663	IFQ_SET_READY(&ifp->if_snd);
1664
1665	sc->if_old_flags = ifp->if_flags;
1666	sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1667	sc->rxhangwar = 1;
1668
1669	/* Reset hardware. */
1670	CGEM_LOCK(sc);
1671	cgem_reset(sc);
1672	CGEM_UNLOCK(sc);
1673
1674	/* Attach phy to mii bus. */
1675	err = mii_attach(dev, &sc->miibus, ifp,
1676			 cgem_ifmedia_upd, cgem_ifmedia_sts,
1677			 BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1678	if (err) {
1679		device_printf(dev, "attaching PHYs failed\n");
1680		cgem_detach(dev);
1681		return (err);
1682	}
1683
1684	/* Set up TX and RX descriptor area. */
1685	err = cgem_setup_descs(sc);
1686	if (err) {
1687		device_printf(dev, "could not set up dma mem for descs.\n");
1688		cgem_detach(dev);
1689		return (ENOMEM);
1690	}
1691
1692	/* Get a MAC address. */
1693	cgem_get_mac(sc, eaddr);
1694
1695	/* Start ticks. */
1696	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1697
1698	ether_ifattach(ifp, eaddr);
1699
1700	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1701			     INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1702	if (err) {
1703		device_printf(dev, "could not set interrupt handler.\n");
1704		ether_ifdetach(ifp);
1705		cgem_detach(dev);
1706		return (err);
1707	}
1708
1709	cgem_add_sysctls(dev);
1710
1711	return (0);
1712}
1713
1714static int
1715cgem_detach(device_t dev)
1716{
1717	struct cgem_softc *sc = device_get_softc(dev);
1718	int i;
1719
1720	if (sc == NULL)
1721		return (ENODEV);
1722
1723	if (device_is_attached(dev)) {
1724		CGEM_LOCK(sc);
1725		cgem_stop(sc);
1726		CGEM_UNLOCK(sc);
1727		callout_drain(&sc->tick_ch);
1728		sc->ifp->if_flags &= ~IFF_UP;
1729		ether_ifdetach(sc->ifp);
1730	}
1731
1732	if (sc->miibus != NULL) {
1733		device_delete_child(dev, sc->miibus);
1734		sc->miibus = NULL;
1735	}
1736
1737	/* Release resources. */
1738	if (sc->mem_res != NULL) {
1739		bus_release_resource(dev, SYS_RES_MEMORY,
1740				     rman_get_rid(sc->mem_res), sc->mem_res);
1741		sc->mem_res = NULL;
1742	}
1743	if (sc->irq_res != NULL) {
1744		if (sc->intrhand)
1745			bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1746		bus_release_resource(dev, SYS_RES_IRQ,
1747				     rman_get_rid(sc->irq_res), sc->irq_res);
1748		sc->irq_res = NULL;
1749	}
1750
1751	/* Release DMA resources. */
1752	if (sc->rxring_dma_map != NULL) {
1753		bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1754				sc->rxring_dma_map);
1755		sc->rxring_dma_map = NULL;
1756		for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
1757			if (sc->rxring_m_dmamap[i] != NULL) {
1758				bus_dmamap_destroy(sc->mbuf_dma_tag,
1759						   sc->rxring_m_dmamap[i]);
1760				sc->rxring_m_dmamap[i] = NULL;
1761			}
1762	}
1763	if (sc->txring_dma_map != NULL) {
1764		bus_dmamem_free(sc->desc_dma_tag, sc->txring,
1765				sc->txring_dma_map);
1766		sc->txring_dma_map = NULL;
1767		for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
1768			if (sc->txring_m_dmamap[i] != NULL) {
1769				bus_dmamap_destroy(sc->mbuf_dma_tag,
1770						   sc->txring_m_dmamap[i]);
1771				sc->txring_m_dmamap[i] = NULL;
1772			}
1773	}
1774	if (sc->desc_dma_tag != NULL) {
1775		bus_dma_tag_destroy(sc->desc_dma_tag);
1776		sc->desc_dma_tag = NULL;
1777	}
1778	if (sc->mbuf_dma_tag != NULL) {
1779		bus_dma_tag_destroy(sc->mbuf_dma_tag);
1780		sc->mbuf_dma_tag = NULL;
1781	}
1782
1783	bus_generic_detach(dev);
1784
1785	CGEM_LOCK_DESTROY(sc);
1786
1787	return (0);
1788}
1789
1790static device_method_t cgem_methods[] = {
1791	/* Device interface */
1792	DEVMETHOD(device_probe,		cgem_probe),
1793	DEVMETHOD(device_attach,	cgem_attach),
1794	DEVMETHOD(device_detach,	cgem_detach),
1795
1796	/* Bus interface */
1797	DEVMETHOD(bus_child_detached,	cgem_child_detached),
1798
1799	/* MII interface */
1800	DEVMETHOD(miibus_readreg,	cgem_miibus_readreg),
1801	DEVMETHOD(miibus_writereg,	cgem_miibus_writereg),
1802	DEVMETHOD(miibus_statchg,	cgem_miibus_statchg),
1803	DEVMETHOD(miibus_linkchg,	cgem_miibus_linkchg),
1804
1805	DEVMETHOD_END
1806};
1807
1808static driver_t cgem_driver = {
1809	"cgem",
1810	cgem_methods,
1811	sizeof(struct cgem_softc),
1812};
1813
1814DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL);
1815DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL);
1816MODULE_DEPEND(cgem, miibus, 1, 1, 1);
1817MODULE_DEPEND(cgem, ether, 1, 1, 1);
1818