1/*-
2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3 * All rights reserved.
4 *
5 * Developed by Semihalf.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#ifdef HAVE_KERNEL_OPTION_HEADERS
33#include "opt_device_polling.h"
34#endif
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD$");
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/endian.h>
42#include <sys/mbuf.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/kernel.h>
46#include <sys/module.h>
47#include <sys/socket.h>
48#include <sys/sysctl.h>
49
50#include <net/ethernet.h>
51#include <net/bpf.h>
52#include <net/if.h>
53#include <net/if_arp.h>
54#include <net/if_dl.h>
55#include <net/if_media.h>
56#include <net/if_types.h>
57#include <net/if_vlan_var.h>
58
59#include <netinet/in_systm.h>
60#include <netinet/in.h>
61#include <netinet/ip.h>
62
63#include <sys/sockio.h>
64#include <sys/bus.h>
65#include <machine/bus.h>
66#include <sys/rman.h>
67#include <machine/resource.h>
68
69#include <dev/mii/mii.h>
70#include <dev/mii/miivar.h>
71
72#include <dev/fdt/fdt_common.h>
73#include <dev/ofw/ofw_bus.h>
74#include <dev/ofw/ofw_bus_subr.h>
75
76#include <dev/mge/if_mgevar.h>
77#include <arm/mv/mvreg.h>
78#include <arm/mv/mvvar.h>
79
80#include "miibus_if.h"
81
82static int mge_probe(device_t dev);
83static int mge_attach(device_t dev);
84static int mge_detach(device_t dev);
85static int mge_shutdown(device_t dev);
86static int mge_suspend(device_t dev);
87static int mge_resume(device_t dev);
88
89static int mge_miibus_readreg(device_t dev, int phy, int reg);
90static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
91
92static int mge_ifmedia_upd(struct ifnet *ifp);
93static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
94
95static void mge_init(void *arg);
96static void mge_init_locked(void *arg);
97static void mge_start(struct ifnet *ifp);
98static void mge_start_locked(struct ifnet *ifp);
99static void mge_watchdog(struct mge_softc *sc);
100static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
101
102static uint32_t mge_tfut_ipg(uint32_t val, int ver);
103static uint32_t mge_rx_ipg(uint32_t val, int ver);
104static void mge_ver_params(struct mge_softc *sc);
105
106static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
107static void mge_intr_rxtx(void *arg);
108static void mge_intr_rx(void *arg);
109static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
110    uint32_t int_cause_ext);
111static int mge_intr_rx_locked(struct mge_softc *sc, int count);
112static void mge_intr_tx(void *arg);
113static void mge_intr_tx_locked(struct mge_softc *sc);
114static void mge_intr_misc(void *arg);
115static void mge_intr_sum(void *arg);
116static void mge_intr_err(void *arg);
117static void mge_stop(struct mge_softc *sc);
118static void mge_tick(void *msc);
119static uint32_t mge_set_port_serial_control(uint32_t media);
120static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
121static void mge_set_mac_address(struct mge_softc *sc);
122static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
123    uint8_t queue);
124static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
125static int mge_allocate_dma(struct mge_softc *sc);
126static int mge_alloc_desc_dma(struct mge_softc *sc,
127    struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
128static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
129    struct mbuf **mbufp, bus_addr_t *paddr);
130static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
131static void mge_free_dma(struct mge_softc *sc);
132static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
133    bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
134static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
135    uint32_t status, uint16_t bufsize);
136static void mge_offload_setup_descriptor(struct mge_softc *sc,
137    struct mge_desc_wrapper *dw);
138static uint8_t mge_crc8(uint8_t *data, int size);
139static void mge_setup_multicast(struct mge_softc *sc);
140static void mge_set_rxic(struct mge_softc *sc);
141static void mge_set_txic(struct mge_softc *sc);
142static void mge_add_sysctls(struct mge_softc *sc);
143static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
144
145static device_method_t mge_methods[] = {
146	/* Device interface */
147	DEVMETHOD(device_probe,		mge_probe),
148	DEVMETHOD(device_attach,	mge_attach),
149	DEVMETHOD(device_detach,	mge_detach),
150	DEVMETHOD(device_shutdown,	mge_shutdown),
151	DEVMETHOD(device_suspend,	mge_suspend),
152	DEVMETHOD(device_resume,	mge_resume),
153	/* MII interface */
154	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
155	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
156	{ 0, 0 }
157};
158
159static driver_t mge_driver = {
160	"mge",
161	mge_methods,
162	sizeof(struct mge_softc),
163};
164
165static devclass_t mge_devclass;
166
167DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
168DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
169MODULE_DEPEND(mge, ether, 1, 1, 1);
170MODULE_DEPEND(mge, miibus, 1, 1, 1);
171
172static struct resource_spec res_spec[] = {
173	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
174	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
175	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
176	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
177	{ -1, 0 }
178};
179
180static struct {
181	driver_intr_t *handler;
182	char * description;
183} mge_intrs[MGE_INTR_COUNT + 1] = {
184	{ mge_intr_rxtx,"GbE aggregated interrupt" },
185	{ mge_intr_rx,	"GbE receive interrupt" },
186	{ mge_intr_tx,	"GbE transmit interrupt" },
187	{ mge_intr_misc,"GbE misc interrupt" },
188	{ mge_intr_sum,	"GbE summary interrupt" },
189	{ mge_intr_err,	"GbE error interrupt" },
190};
191
192static void
193mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
194{
195	uint32_t mac_l, mac_h;
196	uint8_t lmac[6];
197	int i, valid;
198
199	/*
200	 * Retrieve hw address from the device tree.
201	 */
202	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
203	if (i == 6) {
204		valid = 0;
205		for (i = 0; i < 6; i++)
206			if (lmac[i] != 0) {
207				valid = 1;
208				break;
209			}
210
211		if (valid) {
212			bcopy(lmac, addr, 6);
213			return;
214		}
215	}
216
217	/*
218	 * Fall back -- use the currently programmed address.
219	 */
220	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
221	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
222
223	addr[0] = (mac_h & 0xff000000) >> 24;
224	addr[1] = (mac_h & 0x00ff0000) >> 16;
225	addr[2] = (mac_h & 0x0000ff00) >> 8;
226	addr[3] = (mac_h & 0x000000ff);
227	addr[4] = (mac_l & 0x0000ff00) >> 8;
228	addr[5] = (mac_l & 0x000000ff);
229}
230
231static uint32_t
232mge_tfut_ipg(uint32_t val, int ver)
233{
234
235	switch (ver) {
236	case 1:
237		return ((val & 0x3fff) << 4);
238	case 2:
239	default:
240		return ((val & 0xffff) << 4);
241	}
242}
243
244static uint32_t
245mge_rx_ipg(uint32_t val, int ver)
246{
247
248	switch (ver) {
249	case 1:
250		return ((val & 0x3fff) << 8);
251	case 2:
252	default:
253		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
254	}
255}
256
257static void
258mge_ver_params(struct mge_softc *sc)
259{
260	uint32_t d, r;
261
262	soc_id(&d, &r);
263	if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
264	    d == MV_DEV_88F6282 ||
265	    d == MV_DEV_MV78100 ||
266	    d == MV_DEV_MV78100_Z0 ||
267	    (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
268		sc->mge_ver = 2;
269		sc->mge_mtu = 0x4e8;
270		sc->mge_tfut_ipg_max = 0xFFFF;
271		sc->mge_rx_ipg_max = 0xFFFF;
272		sc->mge_tx_arb_cfg = 0xFC0000FF;
273		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
274		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
275	} else {
276		sc->mge_ver = 1;
277		sc->mge_mtu = 0x458;
278		sc->mge_tfut_ipg_max = 0x3FFF;
279		sc->mge_rx_ipg_max = 0x3FFF;
280		sc->mge_tx_arb_cfg = 0x000000FF;
281		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
282		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
283	}
284	if (d == MV_DEV_88RC8180)
285		sc->mge_intr_cnt = 1;
286	else
287		sc->mge_intr_cnt = 2;
288
289	if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
290		sc->mge_hw_csum = 0;
291	else
292		sc->mge_hw_csum = 1;
293}
294
295static void
296mge_set_mac_address(struct mge_softc *sc)
297{
298	char *if_mac;
299	uint32_t mac_l, mac_h;
300
301	MGE_GLOBAL_LOCK_ASSERT(sc);
302
303	if_mac = (char *)IF_LLADDR(sc->ifp);
304
305	mac_l = (if_mac[4] << 8) | (if_mac[5]);
306	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
307	    (if_mac[2] << 8) | (if_mac[3] << 0);
308
309	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
310	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
311
312	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
313}
314
315static void
316mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
317{
318	uint32_t reg_idx, reg_off, reg_val, i;
319
320	last_byte &= 0xf;
321	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
322	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
323	reg_val = (1 | (queue << 1)) << reg_off;
324
325	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
326		if ( i == reg_idx)
327			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
328		else
329			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
330	}
331}
332
333static void
334mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
335{
336	uint32_t port_config;
337	uint32_t reg_val, i;
338
339	/* Enable or disable promiscuous mode as needed */
340	if (sc->ifp->if_flags & IFF_PROMISC) {
341		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
342		port_config |= PORT_CONFIG_UPM;
343		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
344
345		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
346		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
347
348		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
349			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
350			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
351		}
352
353		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
354			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
355
356	} else {
357		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
358		port_config &= ~PORT_CONFIG_UPM;
359		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
360
361		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
362			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
363			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
364		}
365
366		mge_set_mac_address(sc);
367	}
368}
369
370static void
371mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
372{
373	u_int32_t *paddr;
374
375	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
376	paddr = arg;
377
378	*paddr = segs->ds_addr;
379}
380
381static int
382mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
383    bus_addr_t *paddr)
384{
385	struct mbuf *new_mbuf;
386	bus_dma_segment_t seg[1];
387	int error;
388	int nsegs;
389
390	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
391
392	new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
393	if (new_mbuf == NULL)
394		return (ENOBUFS);
395	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
396
397	if (*mbufp) {
398		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
399		bus_dmamap_unload(tag, map);
400	}
401
402	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
403	    BUS_DMA_NOWAIT);
404	KASSERT(nsegs == 1, ("Too many segments returned!"));
405	if (nsegs != 1 || error)
406		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
407
408	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
409
410	(*mbufp) = new_mbuf;
411	(*paddr) = seg->ds_addr;
412	return (0);
413}
414
415static int
416mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
417    uint32_t size, bus_dma_tag_t *buffer_tag)
418{
419	struct mge_desc_wrapper *dw;
420	bus_addr_t desc_paddr;
421	int i, error;
422
423	desc_paddr = 0;
424	for (i = size - 1; i >= 0; i--) {
425		dw = &(tab[i]);
426		error = bus_dmamem_alloc(sc->mge_desc_dtag,
427		    (void**)&(dw->mge_desc),
428		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
429		    &(dw->desc_dmap));
430
431		if (error) {
432			if_printf(sc->ifp, "failed to allocate DMA memory\n");
433			dw->mge_desc = NULL;
434			return (ENXIO);
435		}
436
437		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
438		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
439		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
440
441		if (error) {
442			if_printf(sc->ifp, "can't load descriptor\n");
443			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
444			    dw->desc_dmap);
445			dw->mge_desc = NULL;
446			return (ENXIO);
447		}
448
449		/* Chain descriptors */
450		dw->mge_desc->next_desc = desc_paddr;
451		desc_paddr = dw->mge_desc_paddr;
452	}
453	tab[size - 1].mge_desc->next_desc = desc_paddr;
454
455	/* Allocate a busdma tag for mbufs. */
456	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
457	    1, 0,				/* alignment, boundary */
458	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
459	    BUS_SPACE_MAXADDR,			/* highaddr */
460	    NULL, NULL,				/* filtfunc, filtfuncarg */
461	    MCLBYTES, 1,			/* maxsize, nsegments */
462	    MCLBYTES, 0,			/* maxsegsz, flags */
463	    NULL, NULL,				/* lockfunc, lockfuncarg */
464	    buffer_tag);			/* dmat */
465	if (error) {
466		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
467		return (ENXIO);
468	}
469
470	/* Create TX busdma maps */
471	for (i = 0; i < size; i++) {
472		dw = &(tab[i]);
473		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
474		if (error) {
475			if_printf(sc->ifp, "failed to create map for mbuf\n");
476			return (ENXIO);
477		}
478
479		dw->buffer = (struct mbuf*)NULL;
480		dw->mge_desc->buffer = (bus_addr_t)NULL;
481	}
482
483	return (0);
484}
485
486static int
487mge_allocate_dma(struct mge_softc *sc)
488{
489	int error;
490	struct mge_desc_wrapper *dw;
491	int i;
492
493	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
494	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
495	    16, 0,				/* alignment, boundary */
496	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
497	    BUS_SPACE_MAXADDR,			/* highaddr */
498	    NULL, NULL,				/* filtfunc, filtfuncarg */
499	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
500	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
501	    NULL, NULL,				/* lockfunc, lockfuncarg */
502	    &sc->mge_desc_dtag);		/* dmat */
503
504
505	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
506	    &sc->mge_tx_dtag);
507	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
508	    &sc->mge_rx_dtag);
509
510	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
511		dw = &(sc->mge_rx_desc[i]);
512		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
513		    &dw->mge_desc->buffer);
514	}
515
516	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
517	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
518
519	return (0);
520}
521
522static void
523mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
524    uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
525{
526	struct mge_desc_wrapper *dw;
527	int i;
528
529	for (i = 0; i < size; i++) {
530		/* Free RX mbuf */
531		dw = &(tab[i]);
532
533		if (dw->buffer_dmap) {
534			if (free_mbufs) {
535				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
536				    BUS_DMASYNC_POSTREAD);
537				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
538			}
539			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
540			if (free_mbufs)
541				m_freem(dw->buffer);
542		}
543		/* Free RX descriptors */
544		if (dw->desc_dmap) {
545			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
546			    BUS_DMASYNC_POSTREAD);
547			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
548			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
549			    dw->desc_dmap);
550		}
551	}
552}
553
554static void
555mge_free_dma(struct mge_softc *sc)
556{
557	/* Free desciptors and mbufs */
558	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
559	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
560
561	/* Destroy mbuf dma tag */
562	bus_dma_tag_destroy(sc->mge_tx_dtag);
563	bus_dma_tag_destroy(sc->mge_rx_dtag);
564	/* Destroy descriptors tag */
565	bus_dma_tag_destroy(sc->mge_desc_dtag);
566}
567
568static void
569mge_reinit_rx(struct mge_softc *sc)
570{
571	struct mge_desc_wrapper *dw;
572	int i;
573
574	MGE_RECEIVE_LOCK_ASSERT(sc);
575
576	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
577
578	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
579	    &sc->mge_rx_dtag);
580
581	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
582		dw = &(sc->mge_rx_desc[i]);
583		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
584		&dw->mge_desc->buffer);
585	}
586
587	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
588	sc->rx_desc_curr = 0;
589
590	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
591	    sc->rx_desc_start);
592
593	/* Enable RX queue */
594	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
595}
596
597#ifdef DEVICE_POLLING
598static poll_handler_t mge_poll;
599
600static int
601mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
602{
603	struct mge_softc *sc = ifp->if_softc;
604	uint32_t int_cause, int_cause_ext;
605	int rx_npkts = 0;
606
607	MGE_GLOBAL_LOCK(sc);
608
609	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
610		MGE_GLOBAL_UNLOCK(sc);
611		return (rx_npkts);
612	}
613
614	if (cmd == POLL_AND_CHECK_STATUS) {
615		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
616		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
617
618		/* Check for resource error */
619		if (int_cause & MGE_PORT_INT_RXERRQ0)
620			mge_reinit_rx(sc);
621
622		if (int_cause || int_cause_ext) {
623			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
624			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
625		}
626	}
627
628	mge_intr_tx_locked(sc);
629	rx_npkts = mge_intr_rx_locked(sc, count);
630
631	MGE_GLOBAL_UNLOCK(sc);
632	return (rx_npkts);
633}
634#endif /* DEVICE_POLLING */
635
636static int
637mge_attach(device_t dev)
638{
639	struct mge_softc *sc;
640	struct mii_softc *miisc;
641	struct ifnet *ifp;
642	uint8_t hwaddr[ETHER_ADDR_LEN];
643	int i, error, phy;
644
645	sc = device_get_softc(dev);
646	sc->dev = dev;
647	sc->node = ofw_bus_get_node(dev);
648
649	/* Set chip version-dependent parameters */
650	mge_ver_params(sc);
651
652	/* Get phy address and used softc from fdt */
653	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) != 0)
654		return (ENXIO);
655
656	/* Initialize mutexes */
657	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
658	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
659
660	/* Allocate IO and IRQ resources */
661	error = bus_alloc_resources(dev, res_spec, sc->res);
662	if (error) {
663		device_printf(dev, "could not allocate resources\n");
664		mge_detach(dev);
665		return (ENXIO);
666	}
667
668	/* Allocate DMA, buffers, buffer descriptors */
669	error = mge_allocate_dma(sc);
670	if (error) {
671		mge_detach(dev);
672		return (ENXIO);
673	}
674
675	sc->tx_desc_curr = 0;
676	sc->rx_desc_curr = 0;
677	sc->tx_desc_used_idx = 0;
678	sc->tx_desc_used_count = 0;
679
680	/* Configure defaults for interrupts coalescing */
681	sc->rx_ic_time = 768;
682	sc->tx_ic_time = 768;
683	mge_add_sysctls(sc);
684
685	/* Allocate network interface */
686	ifp = sc->ifp = if_alloc(IFT_ETHER);
687	if (ifp == NULL) {
688		device_printf(dev, "if_alloc() failed\n");
689		mge_detach(dev);
690		return (ENOMEM);
691	}
692
693	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
694	ifp->if_softc = sc;
695	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
696	ifp->if_capabilities = IFCAP_VLAN_MTU;
697	if (sc->mge_hw_csum) {
698		ifp->if_capabilities |= IFCAP_HWCSUM;
699		ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
700	}
701	ifp->if_capenable = ifp->if_capabilities;
702
703#ifdef DEVICE_POLLING
704	/* Advertise that polling is supported */
705	ifp->if_capabilities |= IFCAP_POLLING;
706#endif
707
708	ifp->if_init = mge_init;
709	ifp->if_start = mge_start;
710	ifp->if_ioctl = mge_ioctl;
711
712	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
713	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
714	IFQ_SET_READY(&ifp->if_snd);
715
716	mge_get_mac_address(sc, hwaddr);
717	ether_ifattach(ifp, hwaddr);
718	callout_init(&sc->wd_callout, 0);
719
720	/* Attach PHY(s) */
721	error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
722	    mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
723	if (error) {
724		device_printf(dev, "attaching PHYs failed\n");
725		mge_detach(dev);
726		return (error);
727	}
728	sc->mii = device_get_softc(sc->miibus);
729
730	/* Tell the MAC where to find the PHY so autoneg works */
731	miisc = LIST_FIRST(&sc->mii->mii_phys);
732	MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
733
734	/* Attach interrupt handlers */
735	/* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
736	for (i = 1; i <= sc->mge_intr_cnt; ++i) {
737		error = bus_setup_intr(dev, sc->res[i],
738		    INTR_TYPE_NET | INTR_MPSAFE,
739		    NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
740		    sc, &sc->ih_cookie[i - 1]);
741		if (error) {
742			device_printf(dev, "could not setup %s\n",
743			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
744			mge_detach(dev);
745			return (error);
746		}
747	}
748
749	return (0);
750}
751
752static int
753mge_detach(device_t dev)
754{
755	struct mge_softc *sc;
756	int error,i;
757
758	sc = device_get_softc(dev);
759
760	/* Stop controller and free TX queue */
761	if (sc->ifp)
762		mge_shutdown(dev);
763
764	/* Wait for stopping ticks */
765        callout_drain(&sc->wd_callout);
766
767	/* Stop and release all interrupts */
768	for (i = 0; i < sc->mge_intr_cnt; ++i) {
769		if (!sc->ih_cookie[i])
770			continue;
771
772		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
773		if (error)
774			device_printf(dev, "could not release %s\n",
775			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
776	}
777
778	/* Detach network interface */
779	if (sc->ifp) {
780		ether_ifdetach(sc->ifp);
781		if_free(sc->ifp);
782	}
783
784	/* Free DMA resources */
785	mge_free_dma(sc);
786
787	/* Free IO memory handler */
788	bus_release_resources(dev, res_spec, sc->res);
789
790	/* Destroy mutexes */
791	mtx_destroy(&sc->receive_lock);
792	mtx_destroy(&sc->transmit_lock);
793
794	return (0);
795}
796
797static void
798mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
799{
800	struct mge_softc *sc = ifp->if_softc;
801	struct mii_data *mii;
802
803	MGE_TRANSMIT_LOCK(sc);
804
805	mii = sc->mii;
806	mii_pollstat(mii);
807
808	ifmr->ifm_active = mii->mii_media_active;
809	ifmr->ifm_status = mii->mii_media_status;
810
811	MGE_TRANSMIT_UNLOCK(sc);
812}
813
814static uint32_t
815mge_set_port_serial_control(uint32_t media)
816{
817	uint32_t port_config;
818
819	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
820	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
821
822	if (IFM_TYPE(media) == IFM_ETHER) {
823		switch(IFM_SUBTYPE(media)) {
824			case IFM_AUTO:
825				break;
826			case IFM_1000_T:
827				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
828				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
829				    PORT_SERIAL_SPEED_AUTONEG);
830				break;
831			case IFM_100_TX:
832				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
833				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
834				    PORT_SERIAL_SPEED_AUTONEG);
835				break;
836			case IFM_10_T:
837				port_config  |= (PORT_SERIAL_AUTONEG |
838				    PORT_SERIAL_AUTONEG_FC |
839				    PORT_SERIAL_SPEED_AUTONEG);
840				break;
841		}
842		if (media & IFM_FDX)
843			port_config |= PORT_SERIAL_FULL_DUPLEX;
844	}
845	return (port_config);
846}
847
848static int
849mge_ifmedia_upd(struct ifnet *ifp)
850{
851	struct mge_softc *sc = ifp->if_softc;
852
853	if (ifp->if_flags & IFF_UP) {
854		MGE_GLOBAL_LOCK(sc);
855
856		sc->mge_media_status = sc->mii->mii_media.ifm_media;
857		mii_mediachg(sc->mii);
858		mge_init_locked(sc);
859
860		MGE_GLOBAL_UNLOCK(sc);
861	}
862
863	return (0);
864}
865
866static void
867mge_init(void *arg)
868{
869	struct mge_softc *sc = arg;
870
871	MGE_GLOBAL_LOCK(sc);
872
873	mge_init_locked(arg);
874
875	MGE_GLOBAL_UNLOCK(sc);
876}
877
878static void
879mge_init_locked(void *arg)
880{
881	struct mge_softc *sc = arg;
882	struct mge_desc_wrapper *dw;
883	volatile uint32_t reg_val;
884	int i, count;
885
886
887	MGE_GLOBAL_LOCK_ASSERT(sc);
888
889	/* Stop interface */
890	mge_stop(sc);
891
892	/* Disable interrupts */
893	mge_intrs_ctrl(sc, 0);
894
895	/* Set MAC address */
896	mge_set_mac_address(sc);
897
898	/* Setup multicast filters */
899	mge_setup_multicast(sc);
900
901	if (sc->mge_ver == 2) {
902		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
903		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
904	}
905
906	/* Initialize TX queue configuration registers */
907	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
908	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
909	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
910
911	/* Clear TX queue configuration registers for unused queues */
912	for (i = 1; i < 7; i++) {
913		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
914		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
915		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
916	}
917
918	/* Set default MTU */
919	MGE_WRITE(sc, sc->mge_mtu, 0);
920
921	/* Port configuration */
922	MGE_WRITE(sc, MGE_PORT_CONFIG,
923	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
924	    PORT_CONFIG_ARO_RXQ(0));
925	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
926
927	/* Setup port configuration */
928	reg_val = mge_set_port_serial_control(sc->mge_media_status);
929	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
930
931	/* Setup SDMA configuration */
932	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
933	    MGE_SDMA_TX_BYTE_SWAP |
934	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
935	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
936
937	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
938
939	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
940	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
941	    sc->rx_desc_start);
942
943	/* Reset descriptor indexes */
944	sc->tx_desc_curr = 0;
945	sc->rx_desc_curr = 0;
946	sc->tx_desc_used_idx = 0;
947	sc->tx_desc_used_count = 0;
948
949	/* Enable RX descriptors */
950	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
951		dw = &sc->mge_rx_desc[i];
952		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
953		dw->mge_desc->buff_size = MCLBYTES;
954		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
955		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
956	}
957
958	/* Enable RX queue */
959	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
960
961	/* Enable port */
962	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
963	reg_val |= PORT_SERIAL_ENABLE;
964	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
965	count = 0x100000;
966	for (;;) {
967		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
968		if (reg_val & MGE_STATUS_LINKUP)
969			break;
970		DELAY(100);
971		if (--count == 0) {
972			if_printf(sc->ifp, "Timeout on link-up\n");
973			break;
974		}
975	}
976
977	/* Setup interrupts coalescing */
978	mge_set_rxic(sc);
979	mge_set_txic(sc);
980
981	/* Enable interrupts */
982#ifdef DEVICE_POLLING
983        /*
984	 * * ...only if polling is not turned on. Disable interrupts explicitly
985	 * if polling is enabled.
986	 */
987	if (sc->ifp->if_capenable & IFCAP_POLLING)
988		mge_intrs_ctrl(sc, 0);
989	else
990#endif /* DEVICE_POLLING */
991	mge_intrs_ctrl(sc, 1);
992
993	/* Activate network interface */
994	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
995	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
996	sc->wd_timer = 0;
997
998	/* Schedule watchdog timeout */
999	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1000}
1001
1002static void
1003mge_intr_rxtx(void *arg)
1004{
1005	struct mge_softc *sc = arg;
1006	uint32_t int_cause, int_cause_ext;
1007
1008	MGE_GLOBAL_LOCK(sc);
1009
1010#ifdef DEVICE_POLLING
1011	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1012		MGE_GLOBAL_UNLOCK(sc);
1013		return;
1014	}
1015#endif
1016
1017	/* Get interrupt cause */
1018	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1019	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1020
1021	/* Check for Transmit interrupt */
1022	if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1023	    MGE_PORT_INT_EXT_TXUR)) {
1024		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1025		    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1026		mge_intr_tx_locked(sc);
1027	}
1028
1029	MGE_TRANSMIT_UNLOCK(sc);
1030
1031	/* Check for Receive interrupt */
1032	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1033
1034	MGE_RECEIVE_UNLOCK(sc);
1035}
1036
1037static void
1038mge_intr_err(void *arg)
1039{
1040	struct mge_softc *sc = arg;
1041	struct ifnet *ifp;
1042
1043	ifp = sc->ifp;
1044	if_printf(ifp, "%s\n", __FUNCTION__);
1045}
1046
1047static void
1048mge_intr_misc(void *arg)
1049{
1050	struct mge_softc *sc = arg;
1051	struct ifnet *ifp;
1052
1053	ifp = sc->ifp;
1054	if_printf(ifp, "%s\n", __FUNCTION__);
1055}
1056
1057static void
1058mge_intr_rx(void *arg) {
1059	struct mge_softc *sc = arg;
1060	uint32_t int_cause, int_cause_ext;
1061
1062	MGE_RECEIVE_LOCK(sc);
1063
1064#ifdef DEVICE_POLLING
1065	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1066		MGE_RECEIVE_UNLOCK(sc);
1067		return;
1068	}
1069#endif
1070
1071	/* Get interrupt cause */
1072	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1073	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1074
1075	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1076
1077	MGE_RECEIVE_UNLOCK(sc);
1078}
1079
1080static void
1081mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1082    uint32_t int_cause_ext)
1083{
1084	/* Check for resource error */
1085	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1086		mge_reinit_rx(sc);
1087		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1088		    ~(int_cause & MGE_PORT_INT_RXERRQ0));
1089	}
1090
1091	int_cause &= MGE_PORT_INT_RXQ0;
1092	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1093
1094	if (int_cause || int_cause_ext) {
1095		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1096		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1097		mge_intr_rx_locked(sc, -1);
1098	}
1099}
1100
1101static int
1102mge_intr_rx_locked(struct mge_softc *sc, int count)
1103{
1104	struct ifnet *ifp = sc->ifp;
1105	uint32_t status;
1106	uint16_t bufsize;
1107	struct mge_desc_wrapper* dw;
1108	struct mbuf *mb;
1109	int rx_npkts = 0;
1110
1111	MGE_RECEIVE_LOCK_ASSERT(sc);
1112
1113	while (count != 0) {
1114		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1115		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1116		    BUS_DMASYNC_POSTREAD);
1117
1118		/* Get status */
1119		status = dw->mge_desc->cmd_status;
1120		bufsize = dw->mge_desc->buff_size;
1121		if ((status & MGE_DMA_OWNED) != 0)
1122			break;
1123
1124		if (dw->mge_desc->byte_count &&
1125		    ~(status & MGE_ERR_SUMMARY)) {
1126
1127			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1128			    BUS_DMASYNC_POSTREAD);
1129
1130			mb = m_devget(dw->buffer->m_data,
1131			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1132			    0, ifp, NULL);
1133
1134			if (mb == NULL)
1135				/* Give up if no mbufs */
1136				break;
1137
1138			mb->m_len -= 2;
1139			mb->m_pkthdr.len -= 2;
1140			mb->m_data += 2;
1141
1142			mge_offload_process_frame(ifp, mb, status,
1143			    bufsize);
1144
1145			MGE_RECEIVE_UNLOCK(sc);
1146			(*ifp->if_input)(ifp, mb);
1147			MGE_RECEIVE_LOCK(sc);
1148			rx_npkts++;
1149		}
1150
1151		dw->mge_desc->byte_count = 0;
1152		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1153		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1154		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1155		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1156
1157		if (count > 0)
1158			count -= 1;
1159	}
1160
1161	return (rx_npkts);
1162}
1163
1164static void
1165mge_intr_sum(void *arg)
1166{
1167	struct mge_softc *sc = arg;
1168	struct ifnet *ifp;
1169
1170	ifp = sc->ifp;
1171	if_printf(ifp, "%s\n", __FUNCTION__);
1172}
1173
1174static void
1175mge_intr_tx(void *arg)
1176{
1177	struct mge_softc *sc = arg;
1178	uint32_t int_cause_ext;
1179
1180	MGE_TRANSMIT_LOCK(sc);
1181
1182#ifdef DEVICE_POLLING
1183	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1184		MGE_TRANSMIT_UNLOCK(sc);
1185		return;
1186	}
1187#endif
1188
1189	/* Ack the interrupt */
1190	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1191	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1192	    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1193
1194	mge_intr_tx_locked(sc);
1195
1196	MGE_TRANSMIT_UNLOCK(sc);
1197}
1198
1199
1200static void
1201mge_intr_tx_locked(struct mge_softc *sc)
1202{
1203	struct ifnet *ifp = sc->ifp;
1204	struct mge_desc_wrapper *dw;
1205	struct mge_desc *desc;
1206	uint32_t status;
1207	int send = 0;
1208
1209	MGE_TRANSMIT_LOCK_ASSERT(sc);
1210
1211	/* Disable watchdog */
1212	sc->wd_timer = 0;
1213
1214	while (sc->tx_desc_used_count) {
1215		/* Get the descriptor */
1216		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1217		desc = dw->mge_desc;
1218		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1219		    BUS_DMASYNC_POSTREAD);
1220
1221		/* Get descriptor status */
1222		status = desc->cmd_status;
1223
1224		if (status & MGE_DMA_OWNED)
1225			break;
1226
1227		sc->tx_desc_used_idx =
1228			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1229		sc->tx_desc_used_count--;
1230
1231		/* Update collision statistics */
1232		if (status & MGE_ERR_SUMMARY) {
1233			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1234				ifp->if_collisions++;
1235			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1236				ifp->if_collisions += 16;
1237		}
1238
1239		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1240		    BUS_DMASYNC_POSTWRITE);
1241		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1242		m_freem(dw->buffer);
1243		dw->buffer = (struct mbuf*)NULL;
1244		send++;
1245
1246		ifp->if_opackets++;
1247	}
1248
1249	if (send) {
1250		/* Now send anything that was pending */
1251		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1252		mge_start_locked(ifp);
1253	}
1254}
1255
1256static int
1257mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1258{
1259	struct mge_softc *sc = ifp->if_softc;
1260	struct ifreq *ifr = (struct ifreq *)data;
1261	int mask, error;
1262	uint32_t flags;
1263
1264	error = 0;
1265
1266	switch (command) {
1267	case SIOCSIFFLAGS:
1268		MGE_GLOBAL_LOCK(sc);
1269
1270		if (ifp->if_flags & IFF_UP) {
1271			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1272				flags = ifp->if_flags ^ sc->mge_if_flags;
1273				if (flags & IFF_PROMISC)
1274					mge_set_prom_mode(sc,
1275					    MGE_RX_DEFAULT_QUEUE);
1276
1277				if (flags & IFF_ALLMULTI)
1278					mge_setup_multicast(sc);
1279			} else
1280				mge_init_locked(sc);
1281		}
1282		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1283			mge_stop(sc);
1284
1285		sc->mge_if_flags = ifp->if_flags;
1286		MGE_GLOBAL_UNLOCK(sc);
1287		break;
1288	case SIOCADDMULTI:
1289	case SIOCDELMULTI:
1290		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1291			MGE_GLOBAL_LOCK(sc);
1292			mge_setup_multicast(sc);
1293			MGE_GLOBAL_UNLOCK(sc);
1294		}
1295		break;
1296	case SIOCSIFCAP:
1297		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1298		if (mask & IFCAP_HWCSUM) {
1299			ifp->if_capenable &= ~IFCAP_HWCSUM;
1300			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1301			if (ifp->if_capenable & IFCAP_TXCSUM)
1302				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1303			else
1304				ifp->if_hwassist = 0;
1305		}
1306#ifdef DEVICE_POLLING
1307		if (mask & IFCAP_POLLING) {
1308			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1309				error = ether_poll_register(mge_poll, ifp);
1310				if (error)
1311					return(error);
1312
1313				MGE_GLOBAL_LOCK(sc);
1314				mge_intrs_ctrl(sc, 0);
1315				ifp->if_capenable |= IFCAP_POLLING;
1316				MGE_GLOBAL_UNLOCK(sc);
1317			} else {
1318				error = ether_poll_deregister(ifp);
1319				MGE_GLOBAL_LOCK(sc);
1320				mge_intrs_ctrl(sc, 1);
1321				ifp->if_capenable &= ~IFCAP_POLLING;
1322				MGE_GLOBAL_UNLOCK(sc);
1323			}
1324		}
1325#endif
1326		break;
1327	case SIOCGIFMEDIA: /* fall through */
1328	case SIOCSIFMEDIA:
1329		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1330		    && !(ifr->ifr_media & IFM_FDX)) {
1331			device_printf(sc->dev,
1332			    "1000baseTX half-duplex unsupported\n");
1333			return 0;
1334		}
1335		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1336		break;
1337	default:
1338		error = ether_ioctl(ifp, command, data);
1339	}
1340	return (error);
1341}
1342
1343static int
1344mge_miibus_readreg(device_t dev, int phy, int reg)
1345{
1346	struct mge_softc *sc;
1347	uint32_t retries;
1348
1349	sc = device_get_softc(dev);
1350
1351	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1352	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1353
1354	retries = MGE_SMI_READ_RETRIES;
1355	while (--retries &&
1356	    !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
1357		DELAY(MGE_SMI_READ_DELAY);
1358
1359	if (retries == 0)
1360		device_printf(dev, "Timeout while reading from PHY\n");
1361
1362	return (MGE_READ(sc->phy_sc, MGE_REG_SMI) & 0xffff);
1363}
1364
1365static int
1366mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1367{
1368	struct mge_softc *sc;
1369	uint32_t retries;
1370
1371	sc = device_get_softc(dev);
1372
1373	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1374	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1375
1376	retries = MGE_SMI_WRITE_RETRIES;
1377	while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
1378		DELAY(MGE_SMI_WRITE_DELAY);
1379
1380	if (retries == 0)
1381		device_printf(dev, "Timeout while writing to PHY\n");
1382	return (0);
1383}
1384
1385static int
1386mge_probe(device_t dev)
1387{
1388
1389	if (!ofw_bus_status_okay(dev))
1390		return (ENXIO);
1391
1392	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1393		return (ENXIO);
1394
1395	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1396	return (BUS_PROBE_DEFAULT);
1397}
1398
1399static int
1400mge_resume(device_t dev)
1401{
1402
1403	device_printf(dev, "%s\n", __FUNCTION__);
1404	return (0);
1405}
1406
1407static int
1408mge_shutdown(device_t dev)
1409{
1410	struct mge_softc *sc = device_get_softc(dev);
1411
1412	MGE_GLOBAL_LOCK(sc);
1413
1414#ifdef DEVICE_POLLING
1415        if (sc->ifp->if_capenable & IFCAP_POLLING)
1416		ether_poll_deregister(sc->ifp);
1417#endif
1418
1419	mge_stop(sc);
1420
1421	MGE_GLOBAL_UNLOCK(sc);
1422
1423	return (0);
1424}
1425
1426static int
1427mge_encap(struct mge_softc *sc, struct mbuf *m0)
1428{
1429	struct mge_desc_wrapper *dw = NULL;
1430	struct ifnet *ifp;
1431	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1432	bus_dmamap_t mapp;
1433	int error;
1434	int seg, nsegs;
1435	int desc_no;
1436
1437	ifp = sc->ifp;
1438
1439	/* Check for free descriptors */
1440	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1441		/* No free descriptors */
1442		return (-1);
1443	}
1444
1445	/* Fetch unused map */
1446	desc_no = sc->tx_desc_curr;
1447	dw = &sc->mge_tx_desc[desc_no];
1448	mapp = dw->buffer_dmap;
1449
1450	/* Create mapping in DMA memory */
1451	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1452	    BUS_DMA_NOWAIT);
1453	if (error != 0 || nsegs != 1 ) {
1454		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1455		return ((error != 0) ? error : -1);
1456	}
1457
1458	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1459
1460	/* Everything is ok, now we can send buffers */
1461	for (seg = 0; seg < nsegs; seg++) {
1462		dw->mge_desc->byte_count = segs[seg].ds_len;
1463		dw->mge_desc->buffer = segs[seg].ds_addr;
1464		dw->buffer = m0;
1465		dw->mge_desc->cmd_status = 0;
1466		if (seg == 0)
1467			mge_offload_setup_descriptor(sc, dw);
1468		dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1469		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1470		    MGE_DMA_OWNED;
1471	}
1472
1473	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1474	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1475
1476	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1477	sc->tx_desc_used_count++;
1478	return (0);
1479}
1480
1481static void
1482mge_tick(void *msc)
1483{
1484	struct mge_softc *sc = msc;
1485
1486	/* Check for TX timeout */
1487	mge_watchdog(sc);
1488
1489	mii_tick(sc->mii);
1490
1491	/* Check for media type change */
1492	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1493		mge_ifmedia_upd(sc->ifp);
1494
1495	/* Schedule another timeout one second from now */
1496	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1497}
1498
1499static void
1500mge_watchdog(struct mge_softc *sc)
1501{
1502	struct ifnet *ifp;
1503
1504	ifp = sc->ifp;
1505
1506	MGE_GLOBAL_LOCK(sc);
1507
1508	if (sc->wd_timer == 0 || --sc->wd_timer) {
1509		MGE_GLOBAL_UNLOCK(sc);
1510		return;
1511	}
1512
1513	ifp->if_oerrors++;
1514	if_printf(ifp, "watchdog timeout\n");
1515
1516	mge_stop(sc);
1517	mge_init_locked(sc);
1518
1519	MGE_GLOBAL_UNLOCK(sc);
1520}
1521
1522static void
1523mge_start(struct ifnet *ifp)
1524{
1525	struct mge_softc *sc = ifp->if_softc;
1526
1527	MGE_TRANSMIT_LOCK(sc);
1528
1529	mge_start_locked(ifp);
1530
1531	MGE_TRANSMIT_UNLOCK(sc);
1532}
1533
1534static void
1535mge_start_locked(struct ifnet *ifp)
1536{
1537	struct mge_softc *sc;
1538	struct mbuf *m0, *mtmp;
1539	uint32_t reg_val, queued = 0;
1540
1541	sc = ifp->if_softc;
1542
1543	MGE_TRANSMIT_LOCK_ASSERT(sc);
1544
1545	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1546	    IFF_DRV_RUNNING)
1547		return;
1548
1549	for (;;) {
1550		/* Get packet from the queue */
1551		IF_DEQUEUE(&ifp->if_snd, m0);
1552		if (m0 == NULL)
1553			break;
1554
1555		mtmp = m_defrag(m0, M_NOWAIT);
1556		if (mtmp)
1557			m0 = mtmp;
1558
1559		if (mge_encap(sc, m0)) {
1560			IF_PREPEND(&ifp->if_snd, m0);
1561			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1562			break;
1563		}
1564		queued++;
1565		BPF_MTAP(ifp, m0);
1566	}
1567
1568	if (queued) {
1569		/* Enable transmitter and watchdog timer */
1570		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1571		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1572		sc->wd_timer = 5;
1573	}
1574}
1575
1576static void
1577mge_stop(struct mge_softc *sc)
1578{
1579	struct ifnet *ifp;
1580	volatile uint32_t reg_val, status;
1581	struct mge_desc_wrapper *dw;
1582	struct mge_desc *desc;
1583	int count;
1584
1585	ifp = sc->ifp;
1586
1587	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1588		return;
1589
1590	/* Stop tick engine */
1591	callout_stop(&sc->wd_callout);
1592
1593	/* Disable interface */
1594	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1595	sc->wd_timer = 0;
1596
1597	/* Disable interrupts */
1598	mge_intrs_ctrl(sc, 0);
1599
1600	/* Disable Rx and Tx */
1601	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1602	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1603	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1604
1605	/* Remove pending data from TX queue */
1606	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1607	    sc->tx_desc_used_count) {
1608		/* Get the descriptor */
1609		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1610		desc = dw->mge_desc;
1611		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1612		    BUS_DMASYNC_POSTREAD);
1613
1614		/* Get descriptor status */
1615		status = desc->cmd_status;
1616
1617		if (status & MGE_DMA_OWNED)
1618			break;
1619
1620		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1621		    MGE_TX_DESC_NUM;
1622		sc->tx_desc_used_count--;
1623
1624		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1625		    BUS_DMASYNC_POSTWRITE);
1626		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1627
1628		m_freem(dw->buffer);
1629		dw->buffer = (struct mbuf*)NULL;
1630	}
1631
1632	/* Wait for end of transmission */
1633	count = 0x100000;
1634	while (count--) {
1635		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1636		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1637		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1638			break;
1639		DELAY(100);
1640	}
1641
1642	if(!count)
1643		if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1644		    __FUNCTION__);
1645
1646	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1647	reg_val &= ~(PORT_SERIAL_ENABLE);
1648	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1649}
1650
1651static int
1652mge_suspend(device_t dev)
1653{
1654
1655	device_printf(dev, "%s\n", __FUNCTION__);
1656	return (0);
1657}
1658
1659static void
1660mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1661    uint32_t status, uint16_t bufsize)
1662{
1663	int csum_flags = 0;
1664
1665	if (ifp->if_capenable & IFCAP_RXCSUM) {
1666		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1667			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1668
1669		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1670		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1671		    (status & MGE_RX_L4_CSUM_OK)) {
1672			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1673			frame->m_pkthdr.csum_data = 0xFFFF;
1674		}
1675
1676		frame->m_pkthdr.csum_flags = csum_flags;
1677	}
1678}
1679
1680static void
1681mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1682{
1683	struct mbuf *m0 = dw->buffer;
1684	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1685	int csum_flags = m0->m_pkthdr.csum_flags;
1686	int cmd_status = 0;
1687	struct ip *ip;
1688	int ehlen, etype;
1689
1690	if (csum_flags) {
1691		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1692			etype = ntohs(eh->evl_proto);
1693			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1694			csum_flags |= MGE_TX_VLAN_TAGGED;
1695		} else {
1696			etype = ntohs(eh->evl_encap_proto);
1697			ehlen = ETHER_HDR_LEN;
1698		}
1699
1700		if (etype != ETHERTYPE_IP) {
1701			if_printf(sc->ifp,
1702			    "TCP/IP Offload enabled for unsupported "
1703			    "protocol!\n");
1704			return;
1705		}
1706
1707		ip = (struct ip *)(m0->m_data + ehlen);
1708		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1709		cmd_status |= MGE_TX_NOT_FRAGMENT;
1710	}
1711
1712	if (csum_flags & CSUM_IP)
1713		cmd_status |= MGE_TX_GEN_IP_CSUM;
1714
1715	if (csum_flags & CSUM_TCP)
1716		cmd_status |= MGE_TX_GEN_L4_CSUM;
1717
1718	if (csum_flags & CSUM_UDP)
1719		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1720
1721	dw->mge_desc->cmd_status |= cmd_status;
1722}
1723
1724static void
1725mge_intrs_ctrl(struct mge_softc *sc, int enable)
1726{
1727
1728	if (enable) {
1729		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1730		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1731		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1732		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1733		    MGE_PORT_INT_EXT_TXBUF0);
1734	} else {
1735		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1736		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1737
1738		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1739		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1740
1741		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1742		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1743	}
1744}
1745
1746static uint8_t
1747mge_crc8(uint8_t *data, int size)
1748{
1749	uint8_t crc = 0;
1750	static const uint8_t ct[256] = {
1751		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1752		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1753		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1754		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1755		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1756		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1757		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1758		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1759		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1760		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1761		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1762		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1763		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1764		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1765		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1766		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1767		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1768		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1769		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1770		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1771		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1772		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1773		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1774		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1775		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1776		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1777		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1778		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1779		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1780		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1781		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1782		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1783	};
1784
1785	while(size--)
1786		crc = ct[crc ^ *(data++)];
1787
1788	return(crc);
1789}
1790
1791static void
1792mge_setup_multicast(struct mge_softc *sc)
1793{
1794	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1795	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1796	uint32_t smt[MGE_MCAST_REG_NUMBER];
1797	uint32_t omt[MGE_MCAST_REG_NUMBER];
1798	struct ifnet *ifp = sc->ifp;
1799	struct ifmultiaddr *ifma;
1800	uint8_t *mac;
1801	int i;
1802
1803	if (ifp->if_flags & IFF_ALLMULTI) {
1804		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1805			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1806	} else {
1807		memset(smt, 0, sizeof(smt));
1808		memset(omt, 0, sizeof(omt));
1809
1810		if_maddr_rlock(ifp);
1811		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1812			if (ifma->ifma_addr->sa_family != AF_LINK)
1813				continue;
1814
1815			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1816			if (memcmp(mac, special, sizeof(special)) == 0) {
1817				i = mac[5];
1818				smt[i >> 2] |= v << ((i & 0x03) << 3);
1819			} else {
1820				i = mge_crc8(mac, ETHER_ADDR_LEN);
1821				omt[i >> 2] |= v << ((i & 0x03) << 3);
1822			}
1823		}
1824		if_maddr_runlock(ifp);
1825	}
1826
1827	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1828		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1829		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1830	}
1831}
1832
1833static void
1834mge_set_rxic(struct mge_softc *sc)
1835{
1836	uint32_t reg;
1837
1838	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1839		sc->rx_ic_time = sc->mge_rx_ipg_max;
1840
1841	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1842	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1843	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1844	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1845}
1846
1847static void
1848mge_set_txic(struct mge_softc *sc)
1849{
1850	uint32_t reg;
1851
1852	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1853		sc->tx_ic_time = sc->mge_tfut_ipg_max;
1854
1855	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1856	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1857	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1858	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1859}
1860
1861static int
1862mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1863{
1864	struct mge_softc *sc = (struct mge_softc *)arg1;
1865	uint32_t time;
1866	int error;
1867
1868	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1869	error = sysctl_handle_int(oidp, &time, 0, req);
1870	if (error != 0)
1871		return(error);
1872
1873	MGE_GLOBAL_LOCK(sc);
1874	if (arg2 == MGE_IC_RX) {
1875		sc->rx_ic_time = time;
1876		mge_set_rxic(sc);
1877	} else {
1878		sc->tx_ic_time = time;
1879		mge_set_txic(sc);
1880	}
1881	MGE_GLOBAL_UNLOCK(sc);
1882
1883	return(0);
1884}
1885
1886static void
1887mge_add_sysctls(struct mge_softc *sc)
1888{
1889	struct sysctl_ctx_list *ctx;
1890	struct sysctl_oid_list *children;
1891	struct sysctl_oid *tree;
1892
1893	ctx = device_get_sysctl_ctx(sc->dev);
1894	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1895	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1896	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1897	children = SYSCTL_CHILDREN(tree);
1898
1899	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1900	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1901	    "I", "IC RX time threshold");
1902	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1903	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1904	    "I", "IC TX time threshold");
1905}
1906