1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski
5 * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver.
31 */
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_device_polling.h"
37#endif
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/bus.h>
42#include <sys/endian.h>
43#include <sys/mbuf.h>
44#include <sys/kernel.h>
45#include <sys/module.h>
46#include <sys/socket.h>
47#include <sys/sockio.h>
48#include <sys/sysctl.h>
49
50#include <net/bpf.h>
51#include <net/ethernet.h>
52#include <net/if.h>
53#include <net/if_var.h>
54#include <net/if_arp.h>
55#include <net/if_dl.h>
56#include <net/if_media.h>
57#include <net/if_types.h>
58#include <net/if_vlan_var.h>
59
60#include <netinet/in_systm.h>
61#include <netinet/in.h>
62#include <netinet/ip.h>
63
64#include <machine/bus.h>
65
66#include <dev/mii/mii.h>
67#include <dev/mii/miivar.h>
68
69#include <dev/tsec/if_tsec.h>
70#include <dev/tsec/if_tsecreg.h>
71
72static int	tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag,
73    bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr,
74    const char *dname);
75static void	tsec_dma_ctl(struct tsec_softc *sc, int state);
76static void	 tsec_encap(struct ifnet *ifp, struct tsec_softc *sc,
77    struct mbuf *m0, uint16_t fcb_flags, int *start_tx);
78static void	tsec_free_dma(struct tsec_softc *sc);
79static void	tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr);
80static int	tsec_ifmedia_upd(struct ifnet *ifp);
81static void	tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
82static int	tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
83    struct mbuf **mbufp, uint32_t *paddr);
84static void	tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs,
85    int nseg, int error);
86static void	tsec_intrs_ctl(struct tsec_softc *sc, int state);
87static void	tsec_init(void *xsc);
88static void	tsec_init_locked(struct tsec_softc *sc);
89static int	tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
90static void	tsec_reset_mac(struct tsec_softc *sc);
91static void	tsec_setfilter(struct tsec_softc *sc);
92static void	tsec_set_mac_address(struct tsec_softc *sc);
93static void	tsec_start(struct ifnet *ifp);
94static void	tsec_start_locked(struct ifnet *ifp);
95static void	tsec_stop(struct tsec_softc *sc);
96static void	tsec_tick(void *arg);
97static void	tsec_watchdog(struct tsec_softc *sc);
98static void	tsec_add_sysctls(struct tsec_softc *sc);
99static int	tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS);
100static int	tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS);
101static void	tsec_set_rxic(struct tsec_softc *sc);
102static void	tsec_set_txic(struct tsec_softc *sc);
103static int	tsec_receive_intr_locked(struct tsec_softc *sc, int count);
104static void	tsec_transmit_intr_locked(struct tsec_softc *sc);
105static void	tsec_error_intr_locked(struct tsec_softc *sc, int count);
106static void	tsec_offload_setup(struct tsec_softc *sc);
107static void	tsec_offload_process_frame(struct tsec_softc *sc,
108    struct mbuf *m);
109static void	tsec_setup_multicast(struct tsec_softc *sc);
110static int	tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu);
111
112devclass_t tsec_devclass;
113DRIVER_MODULE(miibus, tsec, miibus_driver, miibus_devclass, 0, 0);
114MODULE_DEPEND(tsec, ether, 1, 1, 1);
115MODULE_DEPEND(tsec, miibus, 1, 1, 1);
116
117struct mtx tsec_phy_mtx;
118
119int
120tsec_attach(struct tsec_softc *sc)
121{
122	uint8_t hwaddr[ETHER_ADDR_LEN];
123	struct ifnet *ifp;
124	int error = 0;
125	int i;
126
127	/* Initialize global (because potentially shared) MII lock */
128	if (!mtx_initialized(&tsec_phy_mtx))
129		mtx_init(&tsec_phy_mtx, "tsec mii", NULL, MTX_DEF);
130
131	/* Reset all TSEC counters */
132	TSEC_TX_RX_COUNTERS_INIT(sc);
133
134	/* Stop DMA engine if enabled by firmware */
135	tsec_dma_ctl(sc, 0);
136
137	/* Reset MAC */
138	tsec_reset_mac(sc);
139
140	/* Disable interrupts for now */
141	tsec_intrs_ctl(sc, 0);
142
143	/* Configure defaults for interrupts coalescing */
144	sc->rx_ic_time = 768;
145	sc->rx_ic_count = 16;
146	sc->tx_ic_time = 768;
147	sc->tx_ic_count = 16;
148	tsec_set_rxic(sc);
149	tsec_set_txic(sc);
150	tsec_add_sysctls(sc);
151
152	/* Allocate a busdma tag and DMA safe memory for TX descriptors. */
153	error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag,
154	    &sc->tsec_tx_dmap, sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC,
155	    (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX");
156
157	if (error) {
158		tsec_detach(sc);
159		return (ENXIO);
160	}
161
162	/* Allocate a busdma tag and DMA safe memory for RX descriptors. */
163	error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag,
164	    &sc->tsec_rx_dmap, sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC,
165	    (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX");
166	if (error) {
167		tsec_detach(sc);
168		return (ENXIO);
169	}
170
171	/* Allocate a busdma tag for TX mbufs. */
172	error = bus_dma_tag_create(NULL,	/* parent */
173	    TSEC_TXBUFFER_ALIGNMENT, 0,		/* alignment, boundary */
174	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
175	    BUS_SPACE_MAXADDR,			/* highaddr */
176	    NULL, NULL,				/* filtfunc, filtfuncarg */
177	    MCLBYTES * (TSEC_TX_NUM_DESC - 1),	/* maxsize */
178	    TSEC_TX_MAX_DMA_SEGS,		/* nsegments */
179	    MCLBYTES, 0,			/* maxsegsz, flags */
180	    NULL, NULL,				/* lockfunc, lockfuncarg */
181	    &sc->tsec_tx_mtag);			/* dmat */
182	if (error) {
183		device_printf(sc->dev, "failed to allocate busdma tag "
184		    "(tx mbufs)\n");
185		tsec_detach(sc);
186		return (ENXIO);
187	}
188
189	/* Allocate a busdma tag for RX mbufs. */
190	error = bus_dma_tag_create(NULL,	/* parent */
191	    TSEC_RXBUFFER_ALIGNMENT, 0,		/* alignment, boundary */
192	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
193	    BUS_SPACE_MAXADDR,			/* highaddr */
194	    NULL, NULL,				/* filtfunc, filtfuncarg */
195	    MCLBYTES,				/* maxsize */
196	    1,					/* nsegments */
197	    MCLBYTES, 0,			/* maxsegsz, flags */
198	    NULL, NULL,				/* lockfunc, lockfuncarg */
199	    &sc->tsec_rx_mtag);			/* dmat */
200	if (error) {
201		device_printf(sc->dev, "failed to allocate busdma tag "
202		    "(rx mbufs)\n");
203		tsec_detach(sc);
204		return (ENXIO);
205	}
206
207	/* Create TX busdma maps */
208	for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
209		error = bus_dmamap_create(sc->tsec_tx_mtag, 0,
210		   &sc->tx_bufmap[i].map);
211		if (error) {
212			device_printf(sc->dev, "failed to init TX ring\n");
213			tsec_detach(sc);
214			return (ENXIO);
215		}
216		sc->tx_bufmap[i].map_initialized = 1;
217	}
218
219	/* Create RX busdma maps and zero mbuf handlers */
220	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
221		error = bus_dmamap_create(sc->tsec_rx_mtag, 0,
222		    &sc->rx_data[i].map);
223		if (error) {
224			device_printf(sc->dev, "failed to init RX ring\n");
225			tsec_detach(sc);
226			return (ENXIO);
227		}
228		sc->rx_data[i].mbuf = NULL;
229	}
230
231	/* Create mbufs for RX buffers */
232	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
233		error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map,
234		    &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr);
235		if (error) {
236			device_printf(sc->dev, "can't load rx DMA map %d, "
237			    "error = %d\n", i, error);
238			tsec_detach(sc);
239			return (error);
240		}
241	}
242
243	/* Create network interface for upper layers */
244	ifp = sc->tsec_ifp = if_alloc(IFT_ETHER);
245	if (ifp == NULL) {
246		device_printf(sc->dev, "if_alloc() failed\n");
247		tsec_detach(sc);
248		return (ENOMEM);
249	}
250
251	ifp->if_softc = sc;
252	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
253	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
254	ifp->if_init = tsec_init;
255	ifp->if_start = tsec_start;
256	ifp->if_ioctl = tsec_ioctl;
257
258	IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1);
259	ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1;
260	IFQ_SET_READY(&ifp->if_snd);
261
262	ifp->if_capabilities = IFCAP_VLAN_MTU;
263	if (sc->is_etsec)
264		ifp->if_capabilities |= IFCAP_HWCSUM;
265
266	ifp->if_capenable = ifp->if_capabilities;
267
268#ifdef DEVICE_POLLING
269	/* Advertise that polling is supported */
270	ifp->if_capabilities |= IFCAP_POLLING;
271#endif
272
273	/* Attach PHY(s) */
274	error = mii_attach(sc->dev, &sc->tsec_miibus, ifp, tsec_ifmedia_upd,
275	    tsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->phyaddr, MII_OFFSET_ANY,
276	    0);
277	if (error) {
278		device_printf(sc->dev, "attaching PHYs failed\n");
279		if_free(ifp);
280		sc->tsec_ifp = NULL;
281		tsec_detach(sc);
282		return (error);
283	}
284	sc->tsec_mii = device_get_softc(sc->tsec_miibus);
285
286	/* Set MAC address */
287	tsec_get_hwaddr(sc, hwaddr);
288	ether_ifattach(ifp, hwaddr);
289
290	return (0);
291}
292
293int
294tsec_detach(struct tsec_softc *sc)
295{
296
297	if (sc->tsec_ifp != NULL) {
298#ifdef DEVICE_POLLING
299		if (sc->tsec_ifp->if_capenable & IFCAP_POLLING)
300			ether_poll_deregister(sc->tsec_ifp);
301#endif
302
303		/* Stop TSEC controller and free TX queue */
304		if (sc->sc_rres)
305			tsec_shutdown(sc->dev);
306
307		/* Detach network interface */
308		ether_ifdetach(sc->tsec_ifp);
309		if_free(sc->tsec_ifp);
310		sc->tsec_ifp = NULL;
311	}
312
313	/* Free DMA resources */
314	tsec_free_dma(sc);
315
316	return (0);
317}
318
319int
320tsec_shutdown(device_t dev)
321{
322	struct tsec_softc *sc;
323
324	sc = device_get_softc(dev);
325
326	TSEC_GLOBAL_LOCK(sc);
327	tsec_stop(sc);
328	TSEC_GLOBAL_UNLOCK(sc);
329	return (0);
330}
331
332int
333tsec_suspend(device_t dev)
334{
335
336	/* TODO not implemented! */
337	return (0);
338}
339
340int
341tsec_resume(device_t dev)
342{
343
344	/* TODO not implemented! */
345	return (0);
346}
347
348static void
349tsec_init(void *xsc)
350{
351	struct tsec_softc *sc = xsc;
352
353	TSEC_GLOBAL_LOCK(sc);
354	tsec_init_locked(sc);
355	TSEC_GLOBAL_UNLOCK(sc);
356}
357
358static int
359tsec_mii_wait(struct tsec_softc *sc, uint32_t flags)
360{
361	int timeout;
362
363	/*
364	 * The status indicators are not set immediatly after a command.
365	 * Discard the first value.
366	 */
367	TSEC_PHY_READ(sc, TSEC_REG_MIIMIND);
368
369	timeout = TSEC_READ_RETRY;
370	while ((TSEC_PHY_READ(sc, TSEC_REG_MIIMIND) & flags) && --timeout)
371		DELAY(TSEC_READ_DELAY);
372
373	return (timeout == 0);
374}
375
376
377static void
378tsec_init_locked(struct tsec_softc *sc)
379{
380	struct tsec_desc *tx_desc = sc->tsec_tx_vaddr;
381	struct tsec_desc *rx_desc = sc->tsec_rx_vaddr;
382	struct ifnet *ifp = sc->tsec_ifp;
383	uint32_t val, i;
384	int timeout;
385
386	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
387		return;
388
389	TSEC_GLOBAL_LOCK_ASSERT(sc);
390	tsec_stop(sc);
391
392	/*
393	 * These steps are according to the MPC8555E PowerQUICCIII RM:
394	 * 14.7 Initialization/Application Information
395	 */
396
397	/* Step 1: soft reset MAC */
398	tsec_reset_mac(sc);
399
400	/* Step 2: Initialize MACCFG2 */
401	TSEC_WRITE(sc, TSEC_REG_MACCFG2,
402	    TSEC_MACCFG2_FULLDUPLEX |	/* Full Duplex = 1 */
403	    TSEC_MACCFG2_PADCRC |	/* PAD/CRC append */
404	    TSEC_MACCFG2_GMII |		/* I/F Mode bit */
405	    TSEC_MACCFG2_PRECNT		/* Preamble count = 7 */
406	);
407
408	/* Step 3: Initialize ECNTRL
409	 * While the documentation states that R100M is ignored if RPM is
410	 * not set, it does seem to be needed to get the orange boxes to
411	 * work (which have a Marvell 88E1111 PHY). Go figure.
412	 */
413
414	/*
415	 * XXX kludge - use circumstancial evidence to program ECNTRL
416	 * correctly. Ideally we need some board information to guide
417	 * us here.
418	 */
419	i = TSEC_READ(sc, TSEC_REG_ID2);
420	val = (i & 0xffff)
421	    ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM)	/* Sumatra */
422	    : TSEC_ECNTRL_R100M;			/* Orange + CDS */
423	TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val);
424
425	/* Step 4: Initialize MAC station address */
426	tsec_set_mac_address(sc);
427
428	/*
429	 * Step 5: Assign a Physical address to the TBI so as to not conflict
430	 * with the external PHY physical address
431	 */
432	TSEC_WRITE(sc, TSEC_REG_TBIPA, 5);
433
434	TSEC_PHY_LOCK(sc);
435
436	/* Step 6: Reset the management interface */
437	TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT);
438
439	/* Step 7: Setup the MII Mgmt clock speed */
440	TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28);
441
442	/* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */
443	timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY);
444
445	TSEC_PHY_UNLOCK(sc);
446	if (timeout) {
447		if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n");
448		return;
449	}
450
451	/* Step 9: Setup the MII Mgmt */
452	mii_mediachg(sc->tsec_mii);
453
454	/* Step 10: Clear IEVENT register */
455	TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff);
456
457	/* Step 11: Enable interrupts */
458#ifdef DEVICE_POLLING
459	/*
460	 * ...only if polling is not turned on. Disable interrupts explicitly
461	 * if polling is enabled.
462	 */
463	if (ifp->if_capenable & IFCAP_POLLING )
464		tsec_intrs_ctl(sc, 0);
465	else
466#endif /* DEVICE_POLLING */
467	tsec_intrs_ctl(sc, 1);
468
469	/* Step 12: Initialize IADDRn */
470	TSEC_WRITE(sc, TSEC_REG_IADDR0, 0);
471	TSEC_WRITE(sc, TSEC_REG_IADDR1, 0);
472	TSEC_WRITE(sc, TSEC_REG_IADDR2, 0);
473	TSEC_WRITE(sc, TSEC_REG_IADDR3, 0);
474	TSEC_WRITE(sc, TSEC_REG_IADDR4, 0);
475	TSEC_WRITE(sc, TSEC_REG_IADDR5, 0);
476	TSEC_WRITE(sc, TSEC_REG_IADDR6, 0);
477	TSEC_WRITE(sc, TSEC_REG_IADDR7, 0);
478
479	/* Step 13: Initialize GADDRn */
480	TSEC_WRITE(sc, TSEC_REG_GADDR0, 0);
481	TSEC_WRITE(sc, TSEC_REG_GADDR1, 0);
482	TSEC_WRITE(sc, TSEC_REG_GADDR2, 0);
483	TSEC_WRITE(sc, TSEC_REG_GADDR3, 0);
484	TSEC_WRITE(sc, TSEC_REG_GADDR4, 0);
485	TSEC_WRITE(sc, TSEC_REG_GADDR5, 0);
486	TSEC_WRITE(sc, TSEC_REG_GADDR6, 0);
487	TSEC_WRITE(sc, TSEC_REG_GADDR7, 0);
488
489	/* Step 14: Initialize RCTRL */
490	TSEC_WRITE(sc, TSEC_REG_RCTRL, 0);
491
492	/* Step 15: Initialize DMACTRL */
493	tsec_dma_ctl(sc, 1);
494
495	/* Step 16: Initialize FIFO_PAUSE_CTRL */
496	TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN);
497
498	/*
499	 * Step 17: Initialize transmit/receive descriptor rings.
500	 * Initialize TBASE and RBASE.
501	 */
502	TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr);
503	TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr);
504
505	for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
506		tx_desc[i].bufptr = 0;
507		tx_desc[i].length = 0;
508		tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ?
509		    TSEC_TXBD_W : 0);
510	}
511	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
512	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
513
514	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
515		rx_desc[i].bufptr = sc->rx_data[i].paddr;
516		rx_desc[i].length = 0;
517		rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I |
518		    ((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0);
519	}
520	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
521	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
522
523	/* Step 18: Initialize the maximum receive buffer length */
524	TSEC_WRITE(sc, TSEC_REG_MRBLR, MCLBYTES);
525
526	/* Step 19: Configure ethernet frame sizes */
527	TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_MIN_FRAME_SIZE);
528	tsec_set_mtu(sc, ifp->if_mtu);
529
530	/* Step 20: Enable Rx and RxBD sdata snooping */
531	TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN);
532	TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0);
533
534	/* Step 21: Reset collision counters in hardware */
535	TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
536	TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
537	TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
538	TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
539	TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
540
541	/* Step 22: Mask all CAM interrupts */
542	TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff);
543	TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff);
544
545	/* Step 23: Enable Rx and Tx */
546	val = TSEC_READ(sc, TSEC_REG_MACCFG1);
547	val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
548	TSEC_WRITE(sc, TSEC_REG_MACCFG1, val);
549
550	/* Step 24: Reset TSEC counters for Tx and Rx rings */
551	TSEC_TX_RX_COUNTERS_INIT(sc);
552
553	/* Step 25: Setup TCP/IP Off-Load engine */
554	if (sc->is_etsec)
555		tsec_offload_setup(sc);
556
557	/* Step 26: Setup multicast filters */
558	tsec_setup_multicast(sc);
559
560	/* Step 27: Activate network interface */
561	ifp->if_drv_flags |= IFF_DRV_RUNNING;
562	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
563	sc->tsec_if_flags = ifp->if_flags;
564	sc->tsec_watchdog = 0;
565
566	/* Schedule watchdog timeout */
567	callout_reset(&sc->tsec_callout, hz, tsec_tick, sc);
568}
569
570static void
571tsec_set_mac_address(struct tsec_softc *sc)
572{
573	uint32_t macbuf[2] = { 0, 0 };
574	char *macbufp, *curmac;
575	int i;
576
577	TSEC_GLOBAL_LOCK_ASSERT(sc);
578
579	KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)),
580	    ("tsec_set_mac_address: (%d <= %zd", ETHER_ADDR_LEN,
581	    sizeof(macbuf)));
582
583	macbufp = (char *)macbuf;
584	curmac = (char *)IF_LLADDR(sc->tsec_ifp);
585
586	/* Correct order of MAC address bytes */
587	for (i = 1; i <= ETHER_ADDR_LEN; i++)
588		macbufp[ETHER_ADDR_LEN-i] = curmac[i-1];
589
590	/* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */
591	TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]);
592	TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]);
593}
594
595/*
596 * DMA control function, if argument state is:
597 * 0 - DMA engine will be disabled
598 * 1 - DMA engine will be enabled
599 */
600static void
601tsec_dma_ctl(struct tsec_softc *sc, int state)
602{
603	device_t dev;
604	uint32_t dma_flags, timeout;
605
606	dev = sc->dev;
607
608	dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL);
609
610	switch (state) {
611	case 0:
612		/* Temporarily clear stop graceful stop bits. */
613		tsec_dma_ctl(sc, 1000);
614
615		/* Set it again */
616		dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
617		break;
618	case 1000:
619	case 1:
620		/* Set write with response (WWR), wait (WOP) and snoop bits */
621		dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN |
622		    DMACTRL_WWR | DMACTRL_WOP);
623
624		/* Clear graceful stop bits */
625		dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
626		break;
627	default:
628		device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n",
629		    state);
630	}
631
632	TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags);
633
634	switch (state) {
635	case 0:
636		/* Wait for DMA stop */
637		timeout = TSEC_READ_RETRY;
638		while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) &
639		    (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC))))
640			DELAY(TSEC_READ_DELAY);
641
642		if (timeout == 0)
643			device_printf(dev, "tsec_dma_ctl(): timeout!\n");
644		break;
645	case 1:
646		/* Restart transmission function */
647		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
648	}
649}
650
651/*
652 * Interrupts control function, if argument state is:
653 * 0 - all TSEC interrupts will be masked
654 * 1 - all TSEC interrupts will be unmasked
655 */
656static void
657tsec_intrs_ctl(struct tsec_softc *sc, int state)
658{
659	device_t dev;
660
661	dev = sc->dev;
662
663	switch (state) {
664	case 0:
665		TSEC_WRITE(sc, TSEC_REG_IMASK, 0);
666		break;
667	case 1:
668		TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN |
669		    TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN | TSEC_IMASK_EBERREN |
670		    TSEC_IMASK_BTEN | TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN |
671		    TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN | TSEC_IMASK_RXFEN);
672		break;
673	default:
674		device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n",
675		    state);
676	}
677}
678
679static void
680tsec_reset_mac(struct tsec_softc *sc)
681{
682	uint32_t maccfg1_flags;
683
684	/* Set soft reset bit */
685	maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
686	maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET;
687	TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
688
689	/* Clear soft reset bit */
690	maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
691	maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET;
692	TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
693}
694
695static void
696tsec_watchdog(struct tsec_softc *sc)
697{
698	struct ifnet *ifp;
699
700	TSEC_GLOBAL_LOCK_ASSERT(sc);
701
702	if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0)
703		return;
704
705	ifp = sc->tsec_ifp;
706	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
707	if_printf(ifp, "watchdog timeout\n");
708
709	tsec_stop(sc);
710	tsec_init_locked(sc);
711}
712
713static void
714tsec_start(struct ifnet *ifp)
715{
716	struct tsec_softc *sc = ifp->if_softc;
717
718	TSEC_TRANSMIT_LOCK(sc);
719	tsec_start_locked(ifp);
720	TSEC_TRANSMIT_UNLOCK(sc);
721}
722
723static void
724tsec_start_locked(struct ifnet *ifp)
725{
726	struct tsec_softc *sc;
727	struct mbuf *m0;
728	struct tsec_tx_fcb *tx_fcb;
729	int csum_flags;
730	int start_tx;
731	uint16_t fcb_flags;
732
733	sc = ifp->if_softc;
734	start_tx = 0;
735
736	TSEC_TRANSMIT_LOCK_ASSERT(sc);
737
738	if (sc->tsec_link == 0)
739		return;
740
741	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
742	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
743
744	for (;;) {
745
746		if (TSEC_FREE_TX_DESC(sc) < TSEC_TX_MAX_DMA_SEGS) {
747			/* No free descriptors */
748			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
749			break;
750		}
751
752		/* Get packet from the queue */
753		IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
754		if (m0 == NULL)
755			break;
756
757		/* Insert TCP/IP Off-load frame control block */
758		fcb_flags = 0;
759		csum_flags = m0->m_pkthdr.csum_flags;
760		if (csum_flags) {
761			M_PREPEND(m0, sizeof(struct tsec_tx_fcb), M_NOWAIT);
762			if (m0 == NULL)
763				break;
764
765			if (csum_flags & CSUM_IP)
766				fcb_flags |= TSEC_TX_FCB_IP4 |
767				    TSEC_TX_FCB_CSUM_IP;
768
769			if (csum_flags & CSUM_TCP)
770				fcb_flags |= TSEC_TX_FCB_TCP |
771				    TSEC_TX_FCB_CSUM_TCP_UDP;
772
773			if (csum_flags & CSUM_UDP)
774				fcb_flags |= TSEC_TX_FCB_UDP |
775				    TSEC_TX_FCB_CSUM_TCP_UDP;
776
777			tx_fcb = mtod(m0, struct tsec_tx_fcb *);
778			tx_fcb->flags = fcb_flags;
779			tx_fcb->l3_offset = ETHER_HDR_LEN;
780			tx_fcb->l4_offset = sizeof(struct ip);
781		}
782
783		tsec_encap(ifp, sc, m0, fcb_flags, &start_tx);
784	}
785	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
786	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
787
788	if (start_tx) {
789		/* Enable transmitter and watchdog timer */
790		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
791		sc->tsec_watchdog = 5;
792	}
793}
794
795static void
796tsec_encap(struct ifnet *ifp, struct tsec_softc *sc, struct mbuf *m0,
797    uint16_t fcb_flags, int *start_tx)
798{
799	bus_dma_segment_t segs[TSEC_TX_MAX_DMA_SEGS];
800	int error, i, nsegs;
801	struct tsec_bufmap *tx_bufmap;
802	uint32_t tx_idx;
803	uint16_t flags;
804
805	TSEC_TRANSMIT_LOCK_ASSERT(sc);
806
807	tx_idx = sc->tx_idx_head;
808	tx_bufmap = &sc->tx_bufmap[tx_idx];
809
810	/* Create mapping in DMA memory */
811	error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, tx_bufmap->map, m0,
812	    segs, &nsegs, BUS_DMA_NOWAIT);
813	if (error == EFBIG) {
814		/* Too many segments!  Defrag and try again. */
815		struct mbuf *m = m_defrag(m0, M_NOWAIT);
816
817		if (m == NULL) {
818			m_freem(m0);
819			return;
820		}
821		m0 = m;
822		error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag,
823		    tx_bufmap->map, m0, segs, &nsegs, BUS_DMA_NOWAIT);
824	}
825	if (error != 0) {
826		/* Give up. */
827		m_freem(m0);
828		return;
829	}
830
831	bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map,
832	    BUS_DMASYNC_PREWRITE);
833	tx_bufmap->mbuf = m0;
834
835	/*
836	 * Fill in the TX descriptors back to front so that READY bit in first
837	 * descriptor is set last.
838	 */
839	tx_idx = (tx_idx + (uint32_t)nsegs) & (TSEC_TX_NUM_DESC - 1);
840	sc->tx_idx_head = tx_idx;
841	flags = TSEC_TXBD_L | TSEC_TXBD_I | TSEC_TXBD_R | TSEC_TXBD_TC;
842	for (i = nsegs - 1; i >= 0; i--) {
843		struct tsec_desc *tx_desc;
844
845		tx_idx = (tx_idx - 1) & (TSEC_TX_NUM_DESC - 1);
846		tx_desc = &sc->tsec_tx_vaddr[tx_idx];
847		tx_desc->length = segs[i].ds_len;
848		tx_desc->bufptr = segs[i].ds_addr;
849
850		if (i == 0) {
851			wmb();
852
853			if (fcb_flags != 0)
854				flags |= TSEC_TXBD_TOE;
855		}
856
857		/*
858		 * Set flags:
859		 *   - wrap
860		 *   - checksum
861		 *   - ready to send
862		 *   - transmit the CRC sequence after the last data byte
863		 *   - interrupt after the last buffer
864		 */
865		tx_desc->flags = (tx_idx == (TSEC_TX_NUM_DESC - 1) ?
866		    TSEC_TXBD_W : 0) | flags;
867
868		flags &= ~(TSEC_TXBD_L | TSEC_TXBD_I);
869	}
870
871	BPF_MTAP(ifp, m0);
872	*start_tx = 1;
873}
874
875static void
876tsec_setfilter(struct tsec_softc *sc)
877{
878	struct ifnet *ifp;
879	uint32_t flags;
880
881	ifp = sc->tsec_ifp;
882	flags = TSEC_READ(sc, TSEC_REG_RCTRL);
883
884	/* Promiscuous mode */
885	if (ifp->if_flags & IFF_PROMISC)
886		flags |= TSEC_RCTRL_PROM;
887	else
888		flags &= ~TSEC_RCTRL_PROM;
889
890	TSEC_WRITE(sc, TSEC_REG_RCTRL, flags);
891}
892
893#ifdef DEVICE_POLLING
894static poll_handler_t tsec_poll;
895
896static int
897tsec_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
898{
899	uint32_t ie;
900	struct tsec_softc *sc = ifp->if_softc;
901	int rx_npkts;
902
903	rx_npkts = 0;
904
905	TSEC_GLOBAL_LOCK(sc);
906	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
907		TSEC_GLOBAL_UNLOCK(sc);
908		return (rx_npkts);
909	}
910
911	if (cmd == POLL_AND_CHECK_STATUS) {
912		tsec_error_intr_locked(sc, count);
913
914		/* Clear all events reported */
915		ie = TSEC_READ(sc, TSEC_REG_IEVENT);
916		TSEC_WRITE(sc, TSEC_REG_IEVENT, ie);
917	}
918
919	tsec_transmit_intr_locked(sc);
920
921	TSEC_GLOBAL_TO_RECEIVE_LOCK(sc);
922
923	rx_npkts = tsec_receive_intr_locked(sc, count);
924
925	TSEC_RECEIVE_UNLOCK(sc);
926
927	return (rx_npkts);
928}
929#endif /* DEVICE_POLLING */
930
931static int
932tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
933{
934	struct tsec_softc *sc = ifp->if_softc;
935	struct ifreq *ifr = (struct ifreq *)data;
936	int mask, error = 0;
937
938	switch (command) {
939	case SIOCSIFMTU:
940		TSEC_GLOBAL_LOCK(sc);
941		if (tsec_set_mtu(sc, ifr->ifr_mtu))
942			ifp->if_mtu = ifr->ifr_mtu;
943		else
944			error = EINVAL;
945		TSEC_GLOBAL_UNLOCK(sc);
946		break;
947	case SIOCSIFFLAGS:
948		TSEC_GLOBAL_LOCK(sc);
949		if (ifp->if_flags & IFF_UP) {
950			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
951				if ((sc->tsec_if_flags ^ ifp->if_flags) &
952				    IFF_PROMISC)
953					tsec_setfilter(sc);
954
955				if ((sc->tsec_if_flags ^ ifp->if_flags) &
956				    IFF_ALLMULTI)
957					tsec_setup_multicast(sc);
958			} else
959				tsec_init_locked(sc);
960		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
961			tsec_stop(sc);
962
963		sc->tsec_if_flags = ifp->if_flags;
964		TSEC_GLOBAL_UNLOCK(sc);
965		break;
966	case SIOCADDMULTI:
967	case SIOCDELMULTI:
968		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
969			TSEC_GLOBAL_LOCK(sc);
970			tsec_setup_multicast(sc);
971			TSEC_GLOBAL_UNLOCK(sc);
972		}
973	case SIOCGIFMEDIA:
974	case SIOCSIFMEDIA:
975		error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media,
976		    command);
977		break;
978	case SIOCSIFCAP:
979		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
980		if ((mask & IFCAP_HWCSUM) && sc->is_etsec) {
981			TSEC_GLOBAL_LOCK(sc);
982			ifp->if_capenable &= ~IFCAP_HWCSUM;
983			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
984			tsec_offload_setup(sc);
985			TSEC_GLOBAL_UNLOCK(sc);
986		}
987#ifdef DEVICE_POLLING
988		if (mask & IFCAP_POLLING) {
989			if (ifr->ifr_reqcap & IFCAP_POLLING) {
990				error = ether_poll_register(tsec_poll, ifp);
991				if (error)
992					return (error);
993
994				TSEC_GLOBAL_LOCK(sc);
995				/* Disable interrupts */
996				tsec_intrs_ctl(sc, 0);
997				ifp->if_capenable |= IFCAP_POLLING;
998				TSEC_GLOBAL_UNLOCK(sc);
999			} else {
1000				error = ether_poll_deregister(ifp);
1001				TSEC_GLOBAL_LOCK(sc);
1002				/* Enable interrupts */
1003				tsec_intrs_ctl(sc, 1);
1004				ifp->if_capenable &= ~IFCAP_POLLING;
1005				TSEC_GLOBAL_UNLOCK(sc);
1006			}
1007		}
1008#endif
1009		break;
1010
1011	default:
1012		error = ether_ioctl(ifp, command, data);
1013	}
1014
1015	/* Flush buffers if not empty */
1016	if (ifp->if_flags & IFF_UP)
1017		tsec_start(ifp);
1018	return (error);
1019}
1020
1021static int
1022tsec_ifmedia_upd(struct ifnet *ifp)
1023{
1024	struct tsec_softc *sc = ifp->if_softc;
1025	struct mii_data *mii;
1026
1027	TSEC_TRANSMIT_LOCK(sc);
1028
1029	mii = sc->tsec_mii;
1030	mii_mediachg(mii);
1031
1032	TSEC_TRANSMIT_UNLOCK(sc);
1033	return (0);
1034}
1035
1036static void
1037tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1038{
1039	struct tsec_softc *sc = ifp->if_softc;
1040	struct mii_data *mii;
1041
1042	TSEC_TRANSMIT_LOCK(sc);
1043
1044	mii = sc->tsec_mii;
1045	mii_pollstat(mii);
1046
1047	ifmr->ifm_active = mii->mii_media_active;
1048	ifmr->ifm_status = mii->mii_media_status;
1049
1050	TSEC_TRANSMIT_UNLOCK(sc);
1051}
1052
1053static int
1054tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
1055    uint32_t *paddr)
1056{
1057	struct mbuf *new_mbuf;
1058	bus_dma_segment_t seg[1];
1059	int error, nsegs;
1060
1061	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
1062
1063	new_mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES);
1064	if (new_mbuf == NULL)
1065		return (ENOBUFS);
1066	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
1067
1068	if (*mbufp) {
1069		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
1070		bus_dmamap_unload(tag, map);
1071	}
1072
1073	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
1074	    BUS_DMA_NOWAIT);
1075	KASSERT(nsegs == 1, ("Too many segments returned!"));
1076	if (nsegs != 1 || error)
1077		panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
1078
1079#if 0
1080	if (error) {
1081		printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n",
1082			error);
1083		m_freem(new_mbuf);
1084		return (ENOBUFS);
1085	}
1086#endif
1087
1088#if 0
1089	KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0,
1090		("Wrong alignment of RX buffer!"));
1091#endif
1092	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
1093
1094	(*mbufp) = new_mbuf;
1095	(*paddr) = seg->ds_addr;
1096	return (0);
1097}
1098
1099static void
1100tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1101{
1102	u_int32_t *paddr;
1103
1104	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
1105	paddr = arg;
1106	*paddr = segs->ds_addr;
1107}
1108
1109static int
1110tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap,
1111    bus_size_t dsize, void **vaddr, void *raddr, const char *dname)
1112{
1113	int error;
1114
1115	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
1116	error = bus_dma_tag_create(NULL,	/* parent */
1117	    PAGE_SIZE, 0,			/* alignment, boundary */
1118	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
1119	    BUS_SPACE_MAXADDR,			/* highaddr */
1120	    NULL, NULL,				/* filtfunc, filtfuncarg */
1121	    dsize, 1,				/* maxsize, nsegments */
1122	    dsize, 0,				/* maxsegsz, flags */
1123	    NULL, NULL,				/* lockfunc, lockfuncarg */
1124	    dtag);				/* dmat */
1125
1126	if (error) {
1127		device_printf(dev, "failed to allocate busdma %s tag\n",
1128		    dname);
1129		(*vaddr) = NULL;
1130		return (ENXIO);
1131	}
1132
1133	error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1134	    dmap);
1135	if (error) {
1136		device_printf(dev, "failed to allocate %s DMA safe memory\n",
1137		    dname);
1138		bus_dma_tag_destroy(*dtag);
1139		(*vaddr) = NULL;
1140		return (ENXIO);
1141	}
1142
1143	error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize,
1144	    tsec_map_dma_addr, raddr, BUS_DMA_NOWAIT);
1145	if (error) {
1146		device_printf(dev, "cannot get address of the %s "
1147		    "descriptors\n", dname);
1148		bus_dmamem_free(*dtag, *vaddr, *dmap);
1149		bus_dma_tag_destroy(*dtag);
1150		(*vaddr) = NULL;
1151		return (ENXIO);
1152	}
1153
1154	return (0);
1155}
1156
1157static void
1158tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr)
1159{
1160
1161	if (vaddr == NULL)
1162		return;
1163
1164	/* Unmap descriptors from DMA memory */
1165	bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD |
1166	    BUS_DMASYNC_POSTWRITE);
1167	bus_dmamap_unload(dtag, dmap);
1168
1169	/* Free descriptors memory */
1170	bus_dmamem_free(dtag, vaddr, dmap);
1171
1172	/* Destroy descriptors tag */
1173	bus_dma_tag_destroy(dtag);
1174}
1175
1176static void
1177tsec_free_dma(struct tsec_softc *sc)
1178{
1179	int i;
1180
1181	/* Free TX maps */
1182	for (i = 0; i < TSEC_TX_NUM_DESC; i++)
1183		if (sc->tx_bufmap[i].map_initialized)
1184			bus_dmamap_destroy(sc->tsec_tx_mtag,
1185			    sc->tx_bufmap[i].map);
1186	/* Destroy tag for TX mbufs */
1187	bus_dma_tag_destroy(sc->tsec_tx_mtag);
1188
1189	/* Free RX mbufs and maps */
1190	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
1191		if (sc->rx_data[i].mbuf) {
1192			/* Unload buffer from DMA */
1193			bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map,
1194			    BUS_DMASYNC_POSTREAD);
1195			bus_dmamap_unload(sc->tsec_rx_mtag,
1196			    sc->rx_data[i].map);
1197
1198			/* Free buffer */
1199			m_freem(sc->rx_data[i].mbuf);
1200		}
1201		/* Destroy map for this buffer */
1202		if (sc->rx_data[i].map != NULL)
1203			bus_dmamap_destroy(sc->tsec_rx_mtag,
1204			    sc->rx_data[i].map);
1205	}
1206	/* Destroy tag for RX mbufs */
1207	bus_dma_tag_destroy(sc->tsec_rx_mtag);
1208
1209	/* Unload TX/RX descriptors */
1210	tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1211	    sc->tsec_tx_vaddr);
1212	tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1213	    sc->tsec_rx_vaddr);
1214}
1215
1216static void
1217tsec_stop(struct tsec_softc *sc)
1218{
1219	struct ifnet *ifp;
1220	uint32_t tmpval;
1221
1222	TSEC_GLOBAL_LOCK_ASSERT(sc);
1223
1224	ifp = sc->tsec_ifp;
1225
1226	/* Disable interface and watchdog timer */
1227	callout_stop(&sc->tsec_callout);
1228	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1229	sc->tsec_watchdog = 0;
1230
1231	/* Disable all interrupts and stop DMA */
1232	tsec_intrs_ctl(sc, 0);
1233	tsec_dma_ctl(sc, 0);
1234
1235	/* Remove pending data from TX queue */
1236	while (sc->tx_idx_tail != sc->tx_idx_head) {
1237		bus_dmamap_sync(sc->tsec_tx_mtag,
1238		    sc->tx_bufmap[sc->tx_idx_tail].map,
1239		    BUS_DMASYNC_POSTWRITE);
1240		bus_dmamap_unload(sc->tsec_tx_mtag,
1241		    sc->tx_bufmap[sc->tx_idx_tail].map);
1242		m_freem(sc->tx_bufmap[sc->tx_idx_tail].mbuf);
1243		sc->tx_idx_tail = (sc->tx_idx_tail + 1)
1244		    & (TSEC_TX_NUM_DESC - 1);
1245	}
1246
1247	/* Disable RX and TX */
1248	tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1);
1249	tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
1250	TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval);
1251	DELAY(10);
1252}
1253
1254static void
1255tsec_tick(void *arg)
1256{
1257	struct tsec_softc *sc = arg;
1258	struct ifnet *ifp;
1259	int link;
1260
1261	TSEC_GLOBAL_LOCK(sc);
1262
1263	tsec_watchdog(sc);
1264
1265	ifp = sc->tsec_ifp;
1266	link = sc->tsec_link;
1267
1268	mii_tick(sc->tsec_mii);
1269
1270	if (link == 0 && sc->tsec_link == 1 &&
1271	    (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)))
1272		tsec_start_locked(ifp);
1273
1274	/* Schedule another timeout one second from now. */
1275	callout_reset(&sc->tsec_callout, hz, tsec_tick, sc);
1276
1277	TSEC_GLOBAL_UNLOCK(sc);
1278}
1279
1280/*
1281 *  This is the core RX routine. It replenishes mbufs in the descriptor and
1282 *  sends data which have been dma'ed into host memory to upper layer.
1283 *
1284 *  Loops at most count times if count is > 0, or until done if count < 0.
1285 */
1286static int
1287tsec_receive_intr_locked(struct tsec_softc *sc, int count)
1288{
1289	struct tsec_desc *rx_desc;
1290	struct ifnet *ifp;
1291	struct rx_data_type *rx_data;
1292	struct mbuf *m;
1293	uint32_t i;
1294	int c, rx_npkts;
1295	uint16_t flags;
1296
1297	TSEC_RECEIVE_LOCK_ASSERT(sc);
1298
1299	ifp = sc->tsec_ifp;
1300	rx_data = sc->rx_data;
1301	rx_npkts = 0;
1302
1303	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1304	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1305
1306	for (c = 0; ; c++) {
1307		if (count >= 0 && count-- == 0)
1308			break;
1309
1310		rx_desc = TSEC_GET_CUR_RX_DESC(sc);
1311		flags = rx_desc->flags;
1312
1313		/* Check if there is anything to receive */
1314		if ((flags & TSEC_RXBD_E) || (c >= TSEC_RX_NUM_DESC)) {
1315			/*
1316			 * Avoid generating another interrupt
1317			 */
1318			if (flags & TSEC_RXBD_E)
1319				TSEC_WRITE(sc, TSEC_REG_IEVENT,
1320				    TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1321			/*
1322			 * We didn't consume current descriptor and have to
1323			 * return it to the queue
1324			 */
1325			TSEC_BACK_CUR_RX_DESC(sc);
1326			break;
1327		}
1328
1329		if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO |
1330		    TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) {
1331
1332			rx_desc->length = 0;
1333			rx_desc->flags = (rx_desc->flags &
1334			    ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I;
1335
1336			if (sc->frame != NULL) {
1337				m_free(sc->frame);
1338				sc->frame = NULL;
1339			}
1340
1341			continue;
1342		}
1343
1344		/* Ok... process frame */
1345		i = TSEC_GET_CUR_RX_DESC_CNT(sc);
1346		m = rx_data[i].mbuf;
1347		m->m_len = rx_desc->length;
1348
1349		if (sc->frame != NULL) {
1350			if ((flags & TSEC_RXBD_L) != 0)
1351				m->m_len -= m_length(sc->frame, NULL);
1352
1353			m->m_flags &= ~M_PKTHDR;
1354			m_cat(sc->frame, m);
1355		} else {
1356			sc->frame = m;
1357		}
1358
1359		m = NULL;
1360
1361		if ((flags & TSEC_RXBD_L) != 0) {
1362			m = sc->frame;
1363			sc->frame = NULL;
1364		}
1365
1366		if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map,
1367		    &rx_data[i].mbuf, &rx_data[i].paddr)) {
1368			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1369			/*
1370			 * We ran out of mbufs; didn't consume current
1371			 * descriptor and have to return it to the queue.
1372			 */
1373			TSEC_BACK_CUR_RX_DESC(sc);
1374			break;
1375		}
1376
1377		/* Attach new buffer to descriptor and clear flags */
1378		rx_desc->bufptr = rx_data[i].paddr;
1379		rx_desc->length = 0;
1380		rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) |
1381		    TSEC_RXBD_E | TSEC_RXBD_I;
1382
1383		if (m != NULL) {
1384			m->m_pkthdr.rcvif = ifp;
1385
1386			m_fixhdr(m);
1387			m_adj(m, -ETHER_CRC_LEN);
1388
1389			if (sc->is_etsec)
1390				tsec_offload_process_frame(sc, m);
1391
1392			TSEC_RECEIVE_UNLOCK(sc);
1393			(*ifp->if_input)(ifp, m);
1394			TSEC_RECEIVE_LOCK(sc);
1395			rx_npkts++;
1396		}
1397	}
1398
1399	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1400	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1401
1402	/*
1403	 * Make sure TSEC receiver is not halted.
1404	 *
1405	 * Various conditions can stop the TSEC receiver, but not all are
1406	 * signaled and handled by error interrupt, so make sure the receiver
1407	 * is running. Writing to TSEC_REG_RSTAT restarts the receiver when
1408	 * halted, and is harmless if already running.
1409	 */
1410	TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT);
1411	return (rx_npkts);
1412}
1413
1414void
1415tsec_receive_intr(void *arg)
1416{
1417	struct tsec_softc *sc = arg;
1418
1419	TSEC_RECEIVE_LOCK(sc);
1420
1421#ifdef DEVICE_POLLING
1422	if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) {
1423		TSEC_RECEIVE_UNLOCK(sc);
1424		return;
1425	}
1426#endif
1427
1428	/* Confirm the interrupt was received by driver */
1429	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1430	tsec_receive_intr_locked(sc, -1);
1431
1432	TSEC_RECEIVE_UNLOCK(sc);
1433}
1434
1435static void
1436tsec_transmit_intr_locked(struct tsec_softc *sc)
1437{
1438	struct ifnet *ifp;
1439	uint32_t tx_idx;
1440
1441	TSEC_TRANSMIT_LOCK_ASSERT(sc);
1442
1443	ifp = sc->tsec_ifp;
1444
1445	/* Update collision statistics */
1446	if_inc_counter(ifp, IFCOUNTER_COLLISIONS, TSEC_READ(sc, TSEC_REG_MON_TNCL));
1447
1448	/* Reset collision counters in hardware */
1449	TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
1450	TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
1451	TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
1452	TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
1453	TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
1454
1455	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1456	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1457
1458	tx_idx = sc->tx_idx_tail;
1459	while (tx_idx != sc->tx_idx_head) {
1460		struct tsec_desc *tx_desc;
1461		struct tsec_bufmap *tx_bufmap;
1462
1463		tx_desc = &sc->tsec_tx_vaddr[tx_idx];
1464		if (tx_desc->flags & TSEC_TXBD_R) {
1465			break;
1466		}
1467
1468		tx_bufmap = &sc->tx_bufmap[tx_idx];
1469		tx_idx = (tx_idx + 1) & (TSEC_TX_NUM_DESC - 1);
1470		if (tx_bufmap->mbuf == NULL)
1471			continue;
1472
1473		/*
1474		 * This is the last buf in this packet, so unmap and free it.
1475		 */
1476		bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map,
1477		    BUS_DMASYNC_POSTWRITE);
1478		bus_dmamap_unload(sc->tsec_tx_mtag, tx_bufmap->map);
1479		m_freem(tx_bufmap->mbuf);
1480		tx_bufmap->mbuf = NULL;
1481
1482		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1483	}
1484	sc->tx_idx_tail = tx_idx;
1485	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1486	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1487
1488	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1489	tsec_start_locked(ifp);
1490
1491	if (sc->tx_idx_tail == sc->tx_idx_head)
1492		sc->tsec_watchdog = 0;
1493}
1494
1495void
1496tsec_transmit_intr(void *arg)
1497{
1498	struct tsec_softc *sc = arg;
1499
1500	TSEC_TRANSMIT_LOCK(sc);
1501
1502#ifdef DEVICE_POLLING
1503	if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) {
1504		TSEC_TRANSMIT_UNLOCK(sc);
1505		return;
1506	}
1507#endif
1508	/* Confirm the interrupt was received by driver */
1509	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF);
1510	tsec_transmit_intr_locked(sc);
1511
1512	TSEC_TRANSMIT_UNLOCK(sc);
1513}
1514
1515static void
1516tsec_error_intr_locked(struct tsec_softc *sc, int count)
1517{
1518	struct ifnet *ifp;
1519	uint32_t eflags;
1520
1521	TSEC_GLOBAL_LOCK_ASSERT(sc);
1522
1523	ifp = sc->tsec_ifp;
1524
1525	eflags = TSEC_READ(sc, TSEC_REG_IEVENT);
1526
1527	/* Clear events bits in hardware */
1528	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY |
1529	    TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT |
1530	    TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC |
1531	    TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN);
1532
1533	/* Check transmitter errors */
1534	if (eflags & TSEC_IEVENT_TXE) {
1535		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1536
1537		if (eflags & TSEC_IEVENT_LC)
1538			if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1539
1540		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
1541	}
1542
1543	/* Check for discarded frame due to a lack of buffers */
1544	if (eflags & TSEC_IEVENT_BSY) {
1545		if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1546	}
1547
1548	if (ifp->if_flags & IFF_DEBUG)
1549		if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n",
1550		    eflags);
1551
1552	if (eflags & TSEC_IEVENT_EBERR) {
1553		if_printf(ifp, "System bus error occurred during"
1554		    "DMA transaction (flags: 0x%x)\n", eflags);
1555		tsec_init_locked(sc);
1556	}
1557
1558	if (eflags & TSEC_IEVENT_BABT)
1559		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1560
1561	if (eflags & TSEC_IEVENT_BABR)
1562		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1563}
1564
1565void
1566tsec_error_intr(void *arg)
1567{
1568	struct tsec_softc *sc = arg;
1569
1570	TSEC_GLOBAL_LOCK(sc);
1571	tsec_error_intr_locked(sc, -1);
1572	TSEC_GLOBAL_UNLOCK(sc);
1573}
1574
1575int
1576tsec_miibus_readreg(device_t dev, int phy, int reg)
1577{
1578	struct tsec_softc *sc;
1579	int timeout;
1580	int rv;
1581
1582	sc = device_get_softc(dev);
1583
1584	TSEC_PHY_LOCK();
1585	TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1586	TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, 0);
1587	TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE);
1588
1589	timeout = tsec_mii_wait(sc, TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY);
1590	rv = TSEC_PHY_READ(sc, TSEC_REG_MIIMSTAT);
1591	TSEC_PHY_UNLOCK();
1592
1593	if (timeout)
1594		device_printf(dev, "Timeout while reading from PHY!\n");
1595
1596	return (rv);
1597}
1598
1599int
1600tsec_miibus_writereg(device_t dev, int phy, int reg, int value)
1601{
1602	struct tsec_softc *sc;
1603	int timeout;
1604
1605	sc = device_get_softc(dev);
1606
1607	TSEC_PHY_LOCK();
1608	TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1609	TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCON, value);
1610	timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY);
1611	TSEC_PHY_UNLOCK();
1612
1613	if (timeout)
1614		device_printf(dev, "Timeout while writing to PHY!\n");
1615
1616	return (0);
1617}
1618
1619void
1620tsec_miibus_statchg(device_t dev)
1621{
1622	struct tsec_softc *sc;
1623	struct mii_data *mii;
1624	uint32_t ecntrl, id, tmp;
1625	int link;
1626
1627	sc = device_get_softc(dev);
1628	mii = sc->tsec_mii;
1629	link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0);
1630
1631	tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF;
1632
1633	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1634		tmp |= TSEC_MACCFG2_FULLDUPLEX;
1635	else
1636		tmp &= ~TSEC_MACCFG2_FULLDUPLEX;
1637
1638	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1639	case IFM_1000_T:
1640	case IFM_1000_SX:
1641		tmp |= TSEC_MACCFG2_GMII;
1642		sc->tsec_link = link;
1643		break;
1644	case IFM_100_TX:
1645	case IFM_10_T:
1646		tmp |= TSEC_MACCFG2_MII;
1647		sc->tsec_link = link;
1648		break;
1649	case IFM_NONE:
1650		if (link)
1651			device_printf(dev, "No speed selected but link "
1652			    "active!\n");
1653		sc->tsec_link = 0;
1654		return;
1655	default:
1656		sc->tsec_link = 0;
1657		device_printf(dev, "Unknown speed (%d), link %s!\n",
1658		    IFM_SUBTYPE(mii->mii_media_active),
1659		        ((link) ? "up" : "down"));
1660		return;
1661	}
1662	TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp);
1663
1664	/* XXX kludge - use circumstantial evidence for reduced mode. */
1665	id = TSEC_READ(sc, TSEC_REG_ID2);
1666	if (id & 0xffff) {
1667		ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M;
1668		ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0;
1669		TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl);
1670	}
1671}
1672
1673static void
1674tsec_add_sysctls(struct tsec_softc *sc)
1675{
1676	struct sysctl_ctx_list *ctx;
1677	struct sysctl_oid_list *children;
1678	struct sysctl_oid *tree;
1679
1680	ctx = device_get_sysctl_ctx(sc->dev);
1681	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1682	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1683	    CTLFLAG_RD, 0, "TSEC Interrupts coalescing");
1684	children = SYSCTL_CHILDREN(tree);
1685
1686	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1687	    CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_time,
1688	    "I", "IC RX time threshold (0-65535)");
1689	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_count",
1690	    CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_count,
1691	    "I", "IC RX frame count threshold (0-255)");
1692
1693	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1694	    CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_time,
1695	    "I", "IC TX time threshold (0-65535)");
1696	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_count",
1697	    CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_count,
1698	    "I", "IC TX frame count threshold (0-255)");
1699}
1700
1701/*
1702 * With Interrupt Coalescing (IC) active, a transmit/receive frame
1703 * interrupt is raised either upon:
1704 *
1705 * - threshold-defined period of time elapsed, or
1706 * - threshold-defined number of frames is received/transmitted,
1707 *   whichever occurs first.
1708 *
1709 * The following sysctls regulate IC behaviour (for TX/RX separately):
1710 *
1711 * dev.tsec.<unit>.int_coal.rx_time
1712 * dev.tsec.<unit>.int_coal.rx_count
1713 * dev.tsec.<unit>.int_coal.tx_time
1714 * dev.tsec.<unit>.int_coal.tx_count
1715 *
1716 * Values:
1717 *
1718 * - 0 for either time or count disables IC on the given TX/RX path
1719 *
1720 * - count: 1-255 (expresses frame count number; note that value of 1 is
1721 *   effectively IC off)
1722 *
1723 * - time: 1-65535 (value corresponds to a real time period and is
1724 *   expressed in units equivalent to 64 TSEC interface clocks, i.e. one timer
1725 *   threshold unit is 26.5 us, 2.56 us, or 512 ns, corresponding to 10 Mbps,
1726 *   100 Mbps, or 1Gbps, respectively. For detailed discussion consult the
1727 *   TSEC reference manual.
1728 */
1729static int
1730tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS)
1731{
1732	int error;
1733	uint32_t time;
1734	struct tsec_softc *sc = (struct tsec_softc *)arg1;
1735
1736	time = (arg2 == TSEC_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1737
1738	error = sysctl_handle_int(oidp, &time, 0, req);
1739	if (error != 0)
1740		return (error);
1741
1742	if (time > 65535)
1743		return (EINVAL);
1744
1745	TSEC_IC_LOCK(sc);
1746	if (arg2 == TSEC_IC_RX) {
1747		sc->rx_ic_time = time;
1748		tsec_set_rxic(sc);
1749	} else {
1750		sc->tx_ic_time = time;
1751		tsec_set_txic(sc);
1752	}
1753	TSEC_IC_UNLOCK(sc);
1754
1755	return (0);
1756}
1757
1758static int
1759tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS)
1760{
1761	int error;
1762	uint32_t count;
1763	struct tsec_softc *sc = (struct tsec_softc *)arg1;
1764
1765	count = (arg2 == TSEC_IC_RX) ? sc->rx_ic_count : sc->tx_ic_count;
1766
1767	error = sysctl_handle_int(oidp, &count, 0, req);
1768	if (error != 0)
1769		return (error);
1770
1771	if (count > 255)
1772		return (EINVAL);
1773
1774	TSEC_IC_LOCK(sc);
1775	if (arg2 == TSEC_IC_RX) {
1776		sc->rx_ic_count = count;
1777		tsec_set_rxic(sc);
1778	} else {
1779		sc->tx_ic_count = count;
1780		tsec_set_txic(sc);
1781	}
1782	TSEC_IC_UNLOCK(sc);
1783
1784	return (0);
1785}
1786
1787static void
1788tsec_set_rxic(struct tsec_softc *sc)
1789{
1790	uint32_t rxic_val;
1791
1792	if (sc->rx_ic_count == 0 || sc->rx_ic_time == 0)
1793		/* Disable RX IC */
1794		rxic_val = 0;
1795	else {
1796		rxic_val = 0x80000000;
1797		rxic_val |= (sc->rx_ic_count << 21);
1798		rxic_val |= sc->rx_ic_time;
1799	}
1800
1801	TSEC_WRITE(sc, TSEC_REG_RXIC, rxic_val);
1802}
1803
1804static void
1805tsec_set_txic(struct tsec_softc *sc)
1806{
1807	uint32_t txic_val;
1808
1809	if (sc->tx_ic_count == 0 || sc->tx_ic_time == 0)
1810		/* Disable TX IC */
1811		txic_val = 0;
1812	else {
1813		txic_val = 0x80000000;
1814		txic_val |= (sc->tx_ic_count << 21);
1815		txic_val |= sc->tx_ic_time;
1816	}
1817
1818	TSEC_WRITE(sc, TSEC_REG_TXIC, txic_val);
1819}
1820
1821static void
1822tsec_offload_setup(struct tsec_softc *sc)
1823{
1824	struct ifnet *ifp = sc->tsec_ifp;
1825	uint32_t reg;
1826
1827	TSEC_GLOBAL_LOCK_ASSERT(sc);
1828
1829	reg = TSEC_READ(sc, TSEC_REG_TCTRL);
1830	reg |= TSEC_TCTRL_IPCSEN | TSEC_TCTRL_TUCSEN;
1831
1832	if (ifp->if_capenable & IFCAP_TXCSUM)
1833		ifp->if_hwassist = TSEC_CHECKSUM_FEATURES;
1834	else
1835		ifp->if_hwassist = 0;
1836
1837	TSEC_WRITE(sc, TSEC_REG_TCTRL, reg);
1838
1839	reg = TSEC_READ(sc, TSEC_REG_RCTRL);
1840	reg &= ~(TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | TSEC_RCTRL_PRSDEP);
1841	reg |= TSEC_RCTRL_PRSDEP_PARSE_L2 | TSEC_RCTRL_VLEX;
1842
1843	if (ifp->if_capenable & IFCAP_RXCSUM)
1844		reg |= TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN |
1845		    TSEC_RCTRL_PRSDEP_PARSE_L234;
1846
1847	TSEC_WRITE(sc, TSEC_REG_RCTRL, reg);
1848}
1849
1850
1851static void
1852tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m)
1853{
1854	struct tsec_rx_fcb rx_fcb;
1855	int csum_flags = 0;
1856	int protocol, flags;
1857
1858	TSEC_RECEIVE_LOCK_ASSERT(sc);
1859
1860	m_copydata(m, 0, sizeof(struct tsec_rx_fcb), (caddr_t)(&rx_fcb));
1861	flags = rx_fcb.flags;
1862	protocol = rx_fcb.protocol;
1863
1864	if (TSEC_RX_FCB_IP_CSUM_CHECKED(flags)) {
1865		csum_flags |= CSUM_IP_CHECKED;
1866
1867		if ((flags & TSEC_RX_FCB_IP_CSUM_ERROR) == 0)
1868			csum_flags |= CSUM_IP_VALID;
1869	}
1870
1871	if ((protocol == IPPROTO_TCP || protocol == IPPROTO_UDP) &&
1872	    TSEC_RX_FCB_TCP_UDP_CSUM_CHECKED(flags) &&
1873	    (flags & TSEC_RX_FCB_TCP_UDP_CSUM_ERROR) == 0) {
1874
1875		csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1876		m->m_pkthdr.csum_data = 0xFFFF;
1877	}
1878
1879	m->m_pkthdr.csum_flags = csum_flags;
1880
1881	if (flags & TSEC_RX_FCB_VLAN) {
1882		m->m_pkthdr.ether_vtag = rx_fcb.vlan;
1883		m->m_flags |= M_VLANTAG;
1884	}
1885
1886	m_adj(m, sizeof(struct tsec_rx_fcb));
1887}
1888
1889static void
1890tsec_setup_multicast(struct tsec_softc *sc)
1891{
1892	uint32_t hashtable[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
1893	struct ifnet *ifp = sc->tsec_ifp;
1894	struct ifmultiaddr *ifma;
1895	uint32_t h;
1896	int i;
1897
1898	TSEC_GLOBAL_LOCK_ASSERT(sc);
1899
1900	if (ifp->if_flags & IFF_ALLMULTI) {
1901		for (i = 0; i < 8; i++)
1902			TSEC_WRITE(sc, TSEC_REG_GADDR(i), 0xFFFFFFFF);
1903
1904		return;
1905	}
1906
1907	if_maddr_rlock(ifp);
1908	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1909
1910		if (ifma->ifma_addr->sa_family != AF_LINK)
1911			continue;
1912
1913		h = (ether_crc32_be(LLADDR((struct sockaddr_dl *)
1914		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 24) & 0xFF;
1915
1916		hashtable[(h >> 5)] |= 1 << (0x1F - (h & 0x1F));
1917	}
1918	if_maddr_runlock(ifp);
1919
1920	for (i = 0; i < 8; i++)
1921		TSEC_WRITE(sc, TSEC_REG_GADDR(i), hashtable[i]);
1922}
1923
1924static int
1925tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu)
1926{
1927
1928	mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
1929
1930	TSEC_GLOBAL_LOCK_ASSERT(sc);
1931
1932	if (mtu >= TSEC_MIN_FRAME_SIZE && mtu <= TSEC_MAX_FRAME_SIZE) {
1933		TSEC_WRITE(sc, TSEC_REG_MAXFRM, mtu);
1934		return (mtu);
1935	}
1936
1937	return (0);
1938}
1939