1/*-
2 * Copyright (c) 2012,2013 Bjoern A. Zeeb
3 * Copyright (c) 2014 Robert N. M. Watson
4 * All rights reserved.
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-11-C-0249)
8 * ("MRC2"), as part of the DARPA MRC research programme.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31/*
32 * Altera Triple-Speed Ethernet MegaCore, Function User Guide
33 * UG-01008-3.0, Software Version: 12.0, June 2012.
34 * Available at the time of writing at:
35 * http://www.altera.com/literature/ug/ug_ethernet.pdf
36 *
37 * We are using an Marvell E1111 (Alaska) PHY on the DE4.  See mii/e1000phy.c.
38 */
39/*
40 * XXX-BZ NOTES:
41 * - ifOutBroadcastPkts are only counted if both ether dst and src are all-1s;
42 *   seems an IP core bug, they count ether broadcasts as multicast.  Is this
43 *   still the case?
44 * - figure out why the TX FIFO fill status and intr did not work as expected.
45 * - test 100Mbit/s and 10Mbit/s
46 * - blacklist the one special factory programmed ethernet address (for now
47 *   hardcoded, later from loader?)
48 * - resolve all XXX, left as reminders to shake out details later
49 * - Jumbo frame support
50 */
51
52#include <sys/cdefs.h>
53__FBSDID("$FreeBSD$");
54
55#include "opt_device_polling.h"
56
57#include <sys/param.h>
58#include <sys/systm.h>
59#include <sys/kernel.h>
60#include <sys/bus.h>
61#include <sys/endian.h>
62#include <sys/jail.h>
63#include <sys/lock.h>
64#include <sys/module.h>
65#include <sys/mutex.h>
66#include <sys/proc.h>
67#include <sys/socket.h>
68#include <sys/sockio.h>
69#include <sys/types.h>
70
71#include <net/ethernet.h>
72#include <net/if.h>
73#include <net/if_var.h>
74#include <net/if_dl.h>
75#include <net/if_media.h>
76#include <net/if_types.h>
77#include <net/if_vlan_var.h>
78
79#include <net/bpf.h>
80
81#include <machine/bus.h>
82#include <machine/resource.h>
83#include <sys/rman.h>
84
85#include <dev/mii/mii.h>
86#include <dev/mii/miivar.h>
87
88#include <dev/altera/atse/if_atsereg.h>
89#include <dev/altera/atse/a_api.h>
90
91MODULE_DEPEND(atse, ether, 1, 1, 1);
92MODULE_DEPEND(atse, miibus, 1, 1, 1);
93
94
95#define	ATSE_WATCHDOG_TIME	5
96
97#ifdef DEVICE_POLLING
98static poll_handler_t atse_poll;
99#endif
100
101/* XXX once we'd do parallel attach, we need a global lock for this. */
102#define	ATSE_ETHERNET_OPTION_BITS_UNDEF	0
103#define	ATSE_ETHERNET_OPTION_BITS_READ	1
104static int atse_ethernet_option_bits_flag = ATSE_ETHERNET_OPTION_BITS_UNDEF;
105static uint8_t atse_ethernet_option_bits[ALTERA_ETHERNET_OPTION_BITS_LEN];
106
107static int	atse_intr_debug_enable = 0;
108SYSCTL_INT(_debug, OID_AUTO, atse_intr_debug_enable, CTLFLAG_RW,
109    &atse_intr_debug_enable, 0,
110   "Extra debugging output for atse interrupts");
111
112/*
113 * Softc and critical resource locking.
114 */
115#define	ATSE_LOCK(_sc)		mtx_lock(&(_sc)->atse_mtx)
116#define	ATSE_UNLOCK(_sc)	mtx_unlock(&(_sc)->atse_mtx)
117#define	ATSE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->atse_mtx, MA_OWNED)
118
119#define	ATSE_TX_PENDING(sc)	(sc->atse_tx_m != NULL ||		\
120				    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
121
122#ifdef DEBUG
123#define	DPRINTF(format, ...)	printf(format, __VA_ARGS__)
124#else
125#define	DPRINTF(format, ...)
126#endif
127
128/* a_api.c functions; factor out? */
129static inline void
130a_onchip_fifo_mem_core_write(struct resource *res, uint32_t off,
131    uint32_t val4, const char *desc, const char *f, const int l)
132{
133
134	val4 = htole32(val4);
135	DPRINTF("[%s:%d] FIFOW %s 0x%08x = 0x%08x\n", f, l, desc, off, val4);
136	bus_write_4(res, off, val4);
137}
138static inline uint32_t
139a_onchip_fifo_mem_core_read(struct resource *res, uint32_t off,
140    const char *desc, const char *f, const int l)
141{
142	uint32_t val4;
143
144	val4 = le32toh(bus_read_4(res, off));
145	DPRINTF("[%s:%d] FIFOR %s 0x%08x = 0x%08x\n", f, l, desc, off, val4);
146	return (val4);
147}
148
149/* The FIFO does an endian convertion, so we must not do it as well. */
150/* XXX-BZ in fact we should do a htobe32 so le would be fine as well? */
151#define	ATSE_TX_DATA_WRITE(sc, val4)					\
152	bus_write_4((sc)->atse_tx_mem_res, A_ONCHIP_FIFO_MEM_CORE_DATA, val4)
153
154#define	ATSE_TX_META_WRITE(sc, val4)					\
155	a_onchip_fifo_mem_core_write((sc)->atse_tx_mem_res,		\
156	    A_ONCHIP_FIFO_MEM_CORE_METADATA,				\
157	    (val4), "TXM", __func__, __LINE__)
158#define	ATSE_TX_META_READ(sc)						\
159	a_onchip_fifo_mem_core_read((sc)->atse_tx_mem_res,		\
160	    A_ONCHIP_FIFO_MEM_CORE_METADATA,				\
161	    "TXM", __func__, __LINE__)
162
163#define	ATSE_TX_READ_FILL_LEVEL(sc)					\
164	a_onchip_fifo_mem_core_read((sc)->atse_txc_mem_res,		\
165	    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_FILL_LEVEL,		\
166	    "TX_FILL", __func__, __LINE__)
167#define	ATSE_RX_READ_FILL_LEVEL(sc)					\
168	a_onchip_fifo_mem_core_read((sc)->atse_rxc_mem_res,		\
169	    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_FILL_LEVEL,		\
170	    "RX_FILL", __func__, __LINE__)
171
172/* The FIFO does an endian convertion, so we must not do it as well. */
173/* XXX-BZ in fact we shoudl do a htobe32 so le would be fine as well? */
174#define	ATSE_RX_DATA_READ(sc)						\
175	bus_read_4((sc)->atse_rx_mem_res, A_ONCHIP_FIFO_MEM_CORE_DATA)
176#define	ATSE_RX_META_READ(sc)						\
177	a_onchip_fifo_mem_core_read((sc)->atse_rx_mem_res,		\
178	    A_ONCHIP_FIFO_MEM_CORE_METADATA,				\
179	    "RXM", __func__, __LINE__)
180
181#define	ATSE_RX_STATUS_READ(sc)						\
182	a_onchip_fifo_mem_core_read((sc)->atse_rxc_mem_res,		\
183	    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_I_STATUS,			\
184	    "RX_EVENT", __func__, __LINE__)
185
186#define	ATSE_TX_STATUS_READ(sc)						\
187	a_onchip_fifo_mem_core_read((sc)->atse_txc_mem_res,		\
188	    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_I_STATUS,			\
189	    "TX_EVENT", __func__, __LINE__)
190
191#define	ATSE_RX_EVENT_READ(sc)						\
192	a_onchip_fifo_mem_core_read((sc)->atse_rxc_mem_res,		\
193	    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT,			\
194	    "RX_EVENT", __func__, __LINE__)
195
196#define	ATSE_TX_EVENT_READ(sc)						\
197	a_onchip_fifo_mem_core_read((sc)->atse_txc_mem_res,		\
198	    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT,			\
199	    "TX_EVENT", __func__, __LINE__)
200
201#define	ATSE_RX_EVENT_CLEAR(sc)						\
202	do {								\
203		uint32_t val4;						\
204									\
205		val4 = a_onchip_fifo_mem_core_read(			\
206		    (sc)->atse_rxc_mem_res,				\
207		    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT,		\
208		    "RX_EVENT", __func__, __LINE__);			\
209		if (val4 != 0x00)					\
210			a_onchip_fifo_mem_core_write(			\
211			    (sc)->atse_rxc_mem_res,			\
212			    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT,	\
213			    val4, "RX_EVENT", __func__, __LINE__);	\
214	} while(0)
215#define	ATSE_TX_EVENT_CLEAR(sc)						\
216	do {								\
217		uint32_t val4;						\
218									\
219		val4 = a_onchip_fifo_mem_core_read(			\
220		    (sc)->atse_txc_mem_res,				\
221		    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT,		\
222		    "TX_EVENT", __func__, __LINE__);			\
223		if (val4 != 0x00)					\
224			a_onchip_fifo_mem_core_write(			\
225			    (sc)->atse_txc_mem_res,			\
226			    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT,	\
227			    val4, "TX_EVENT", __func__, __LINE__);	\
228	} while(0)
229
230#define	ATSE_RX_EVENTS	(A_ONCHIP_FIFO_MEM_CORE_INTR_FULL |	\
231			    A_ONCHIP_FIFO_MEM_CORE_INTR_OVERFLOW |	\
232			    A_ONCHIP_FIFO_MEM_CORE_INTR_UNDERFLOW)
233#define	ATSE_RX_INTR_ENABLE(sc)						\
234	a_onchip_fifo_mem_core_write((sc)->atse_rxc_mem_res,		\
235	    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE,		\
236	    ATSE_RX_EVENTS,						\
237	    "RX_INTR", __func__, __LINE__)	/* XXX-BZ review later. */
238#define	ATSE_RX_INTR_DISABLE(sc)					\
239	a_onchip_fifo_mem_core_write((sc)->atse_rxc_mem_res,		\
240	    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, 0,		\
241	    "RX_INTR", __func__, __LINE__)
242#define	ATSE_RX_INTR_READ(sc)						\
243	a_onchip_fifo_mem_core_read((sc)->atse_rxc_mem_res,		\
244	    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE,		\
245	    "RX_INTR", __func__, __LINE__)
246
247#define	ATSE_TX_EVENTS	(A_ONCHIP_FIFO_MEM_CORE_INTR_EMPTY |		\
248			    A_ONCHIP_FIFO_MEM_CORE_INTR_OVERFLOW |	\
249			    A_ONCHIP_FIFO_MEM_CORE_INTR_UNDERFLOW)
250#define	ATSE_TX_INTR_ENABLE(sc)						\
251	a_onchip_fifo_mem_core_write((sc)->atse_txc_mem_res,		\
252	    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE,		\
253	    ATSE_TX_EVENTS,						\
254	    "TX_INTR", __func__, __LINE__)	/* XXX-BZ review later. */
255#define	ATSE_TX_INTR_DISABLE(sc)					\
256	a_onchip_fifo_mem_core_write((sc)->atse_txc_mem_res,		\
257	    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, 0,		\
258	    "TX_INTR", __func__, __LINE__)
259#define	ATSE_TX_INTR_READ(sc)						\
260	a_onchip_fifo_mem_core_read((sc)->atse_txc_mem_res,		\
261	    A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE,		\
262	    "TX_INTR", __func__, __LINE__)
263
264static int	atse_rx_locked(struct atse_softc *sc);
265
266/*
267 * Register space access macros.
268 */
269static inline void
270csr_write_4(struct atse_softc *sc, uint32_t reg, uint32_t val4,
271    const char *f, const int l)
272{
273
274	val4 = htole32(val4);
275	DPRINTF("[%s:%d] CSR W %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
276	    "atse_mem_res", reg, reg * 4, val4);
277	bus_write_4(sc->atse_mem_res, reg * 4, val4);
278}
279
280static inline uint32_t
281csr_read_4(struct atse_softc *sc, uint32_t reg, const char *f, const int l)
282{
283	uint32_t val4;
284
285	val4 = le32toh(bus_read_4(sc->atse_mem_res, reg * 4));
286	DPRINTF("[%s:%d] CSR R %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
287	    "atse_mem_res", reg, reg * 4, val4);
288	return (val4);
289}
290
291/*
292 * See page 5-2 that it's all dword offsets and the MS 16 bits need to be zero
293 * on write and ignored on read.
294 */
295static inline void
296pxx_write_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, uint16_t val,
297    const char *f, const int l, const char *s)
298{
299	uint32_t val4;
300
301	val4 = htole32(val & 0x0000ffff);
302	DPRINTF("[%s:%d] %s W %s 0x%08x (0x%08jx) = 0x%08x\n", f, l, s,
303	    "atse_mem_res", reg, (bmcr + reg) * 4, val4);
304	bus_write_4(sc->atse_mem_res, (bmcr + reg) * 4, val4);
305}
306
307static inline uint16_t
308pxx_read_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, const char *f,
309    const int l, const char *s)
310{
311	uint32_t val4;
312	uint16_t val;
313
314	val4 = bus_read_4(sc->atse_mem_res, (bmcr + reg) * 4);
315	val = le32toh(val4) & 0x0000ffff;
316	DPRINTF("[%s:%d] %s R %s 0x%08x (0x%08jx) = 0x%04x\n", f, l, s,
317	    "atse_mem_res", reg, (bmcr + reg) * 4, val);
318	return (val);
319}
320
321#define	CSR_WRITE_4(sc, reg, val)	\
322	csr_write_4((sc), (reg), (val), __func__, __LINE__)
323#define	CSR_READ_4(sc, reg)		\
324	csr_read_4((sc), (reg), __func__, __LINE__)
325#define	PCS_WRITE_2(sc, reg, val)	\
326	pxx_write_2((sc), sc->atse_bmcr0, (reg), (val), __func__, __LINE__, \
327	    "PCS")
328#define	PCS_READ_2(sc, reg)		\
329	pxx_read_2((sc), sc->atse_bmcr0, (reg), __func__, __LINE__, "PCS")
330#define	PHY_WRITE_2(sc, reg, val)	\
331	pxx_write_2((sc), sc->atse_bmcr1, (reg), (val), __func__, __LINE__, \
332	    "PHY")
333#define	PHY_READ_2(sc, reg)		\
334	pxx_read_2((sc), sc->atse_bmcr1, (reg), __func__, __LINE__, "PHY")
335
336static void atse_tick(void *);
337static int atse_detach(device_t);
338
339devclass_t atse_devclass;
340
341static int
342atse_tx_locked(struct atse_softc *sc, int *sent)
343{
344	struct mbuf *m;
345	uint32_t val4, fill_level;
346	int c;
347
348	ATSE_LOCK_ASSERT(sc);
349
350	m = sc->atse_tx_m;
351	KASSERT(m != NULL, ("%s: m is null: sc=%p", __func__, sc));
352	KASSERT(m->m_flags & M_PKTHDR, ("%s: not a pkthdr: m=%p", __func__, m));
353
354	/*
355	 * Copy to buffer to minimize our pain as we can only store
356	 * double words which, after the first mbuf gets out of alignment
357	 * quite quickly.
358	 */
359	if (sc->atse_tx_m_offset == 0) {
360		m_copydata(m, 0, m->m_pkthdr.len, sc->atse_tx_buf);
361		sc->atse_tx_buf_len = m->m_pkthdr.len;
362	}
363
364	fill_level = ATSE_TX_READ_FILL_LEVEL(sc);
365#if 0	/* Returns 0xdeadc0de. */
366	val4 = ATSE_TX_META_READ(sc);
367#endif
368	if (sc->atse_tx_m_offset == 0) {
369		/* Write start of packet. */
370		val4 = A_ONCHIP_FIFO_MEM_CORE_SOP;
371		val4 &= ~A_ONCHIP_FIFO_MEM_CORE_EOP;
372		ATSE_TX_META_WRITE(sc, val4);
373	}
374
375	/* TX FIFO is single clock mode, so we have the full FIFO. */
376	c = 0;
377	while ((sc->atse_tx_buf_len - sc->atse_tx_m_offset) > 4 &&
378	     fill_level < AVALON_FIFO_TX_BASIC_OPTS_DEPTH) {
379
380		bcopy(&sc->atse_tx_buf[sc->atse_tx_m_offset], &val4,
381		    sizeof(val4));
382		ATSE_TX_DATA_WRITE(sc, val4);
383		sc->atse_tx_m_offset += sizeof(val4);
384		c += sizeof(val4);
385
386		fill_level++;
387		if (fill_level == AVALON_FIFO_TX_BASIC_OPTS_DEPTH)
388			fill_level = ATSE_TX_READ_FILL_LEVEL(sc);
389	}
390	if (sent != NULL)
391		*sent += c;
392
393	/* Set EOP *before* writing the last symbol. */
394	if (sc->atse_tx_m_offset >= (sc->atse_tx_buf_len - 4) &&
395	    fill_level < AVALON_FIFO_TX_BASIC_OPTS_DEPTH) {
396		int leftm;
397		uint32_t x;
398
399		/* Set EndOfPacket. */
400		val4 = A_ONCHIP_FIFO_MEM_CORE_EOP;
401		/* Set EMPTY. */
402		leftm = sc->atse_tx_buf_len - sc->atse_tx_m_offset;
403		val4 |= ((4 - leftm) << A_ONCHIP_FIFO_MEM_CORE_EMPTY_SHIFT);
404		x = val4;
405		ATSE_TX_META_WRITE(sc, val4);
406
407		/* Write last symbol. */
408		val4 = 0;
409		bcopy(sc->atse_tx_buf + sc->atse_tx_m_offset, &val4, leftm);
410		ATSE_TX_DATA_WRITE(sc, val4);
411
412		if (sent != NULL)
413			*sent += leftm;
414
415		/* OK, the packet is gone. */
416		sc->atse_tx_m = NULL;
417		sc->atse_tx_m_offset = 0;
418
419		/* If anyone is interested give them a copy. */
420		BPF_MTAP(sc->atse_ifp, m);
421
422		m_freem(m);
423		return (0);
424	}
425
426	return (EBUSY);
427}
428
429static void
430atse_start_locked(struct ifnet *ifp)
431{
432	struct atse_softc *sc;
433	int error, sent;
434
435	sc = ifp->if_softc;
436	ATSE_LOCK_ASSERT(sc);
437
438	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
439	    IFF_DRV_RUNNING || (sc->atse_flags & ATSE_FLAGS_LINK) == 0)
440		return;
441
442#if 1
443	/*
444	 * Disable the watchdog while sending, we are batching packets.
445	 * Though we should never reach 5 seconds, and are holding the lock,
446	 * but who knows.
447	 */
448	sc->atse_watchdog_timer = 0;
449#endif
450
451	if (sc->atse_tx_m != NULL) {
452		error = atse_tx_locked(sc, &sent);
453		if (error != 0)
454			goto done;
455	}
456	/* We have more space to send so continue ... */
457	for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
458
459		IFQ_DRV_DEQUEUE(&ifp->if_snd, sc->atse_tx_m);
460		sc->atse_tx_m_offset = 0;
461		if (sc->atse_tx_m == NULL)
462			break;
463		error = atse_tx_locked(sc, &sent);
464		if (error != 0)
465			goto done;
466	}
467
468done:
469	/* If the IP core walks into Nekromanteion try to bail out. */
470	if (sent > 0)
471		sc->atse_watchdog_timer = ATSE_WATCHDOG_TIME;
472}
473
474static void
475atse_start(struct ifnet *ifp)
476{
477	struct atse_softc *sc;
478
479	sc = ifp->if_softc;
480	ATSE_LOCK(sc);
481	atse_start_locked(ifp);
482	ATSE_UNLOCK(sc);
483}
484
485static int
486atse_stop_locked(struct atse_softc *sc)
487{
488	struct ifnet *ifp;
489	uint32_t mask, val4;
490	int i;
491
492	ATSE_LOCK_ASSERT(sc);
493
494	sc->atse_watchdog_timer = 0;
495	callout_stop(&sc->atse_tick);
496
497	ifp = sc->atse_ifp;
498	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
499	ATSE_RX_INTR_DISABLE(sc);
500	ATSE_TX_INTR_DISABLE(sc);
501	ATSE_RX_EVENT_CLEAR(sc);
502	ATSE_TX_EVENT_CLEAR(sc);
503
504	/* Disable MAC transmit and receive datapath. */
505	mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
506	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
507	val4 &= ~mask;
508	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
509	/* Wait for bits to be cleared; i=100 is excessive. */
510	for (i = 0; i < 100; i++) {
511		val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
512		if ((val4 & mask) == 0)
513			break;
514		DELAY(10);
515	}
516	if ((val4 & mask) != 0)
517		device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
518		/* Punt. */
519
520	sc->atse_flags &= ~ATSE_FLAGS_LINK;
521
522	/* XXX-BZ free the RX/TX rings. */
523
524	return (0);
525}
526
527static uint8_t
528atse_mchash(struct atse_softc *sc __unused, const uint8_t *addr)
529{
530	int i, j;
531	uint8_t x, y;
532
533	x = 0;
534	for (i = 0; i < ETHER_ADDR_LEN; i++) {
535		y = addr[i] & 0x01;
536		for (j = 1; j < 8; j++)
537			y ^= (addr[i] >> j) & 0x01;
538		x |= (y << i);
539	}
540	return (x);
541}
542
543static int
544atse_rxfilter_locked(struct atse_softc *sc)
545{
546	struct ifnet *ifp;
547	struct ifmultiaddr *ifma;
548	uint32_t val4;
549	int i;
550
551	/* XXX-BZ can we find out if we have the MHASH synthesized? */
552	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
553	/* For simplicity always hash full 48 bits of addresses. */
554	if ((val4 & BASE_CFG_COMMAND_CONFIG_MHASH_SEL) != 0)
555		val4 &= ~BASE_CFG_COMMAND_CONFIG_MHASH_SEL;
556
557	ifp = sc->atse_ifp;
558	if (ifp->if_flags & IFF_PROMISC)
559		val4 |= BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
560	else
561		val4 &= ~BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
562
563	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
564
565	if (ifp->if_flags & IFF_ALLMULTI) {
566		/* Accept all multicast addresses. */
567		for (i = 0; i <= MHASH_LEN; i++)
568			CSR_WRITE_4(sc, MHASH_START + i, 0x1);
569	} else {
570		/*
571		 * Can hold MHASH_LEN entries.
572		 * XXX-BZ bitstring.h would be more general.
573		 */
574		uint64_t h;
575
576		h = 0;
577		/*
578		 * Re-build and re-program hash table.  First build the
579		 * bit-field "yes" or "no" for each slot per address, then
580		 * do all the programming afterwards.
581		 */
582		if_maddr_rlock(ifp);
583		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
584			if (ifma->ifma_addr->sa_family != AF_LINK)
585				continue;
586
587			h |= (1 << atse_mchash(sc,
588			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr)));
589		}
590		if_maddr_runlock(ifp);
591		for (i = 0; i <= MHASH_LEN; i++)
592			CSR_WRITE_4(sc, MHASH_START + i,
593			    (h & (1 << i)) ? 0x01 : 0x00);
594	}
595
596	return (0);
597}
598
599static int
600atse_ethernet_option_bits_read_fdt(device_t dev)
601{
602	struct resource *res;
603	device_t fdev;
604	int i, rid;
605
606	if (atse_ethernet_option_bits_flag & ATSE_ETHERNET_OPTION_BITS_READ)
607		return (0);
608
609	fdev = device_find_child(device_get_parent(dev), "cfi", 0);
610	if (fdev == NULL)
611		return (ENOENT);
612
613	rid = 0;
614	res = bus_alloc_resource_any(fdev, SYS_RES_MEMORY, &rid,
615	    RF_ACTIVE | RF_SHAREABLE);
616	if (res == NULL)
617		return (ENXIO);
618
619	for (i = 0; i < ALTERA_ETHERNET_OPTION_BITS_LEN; i++)
620		atse_ethernet_option_bits[i] = bus_read_1(res,
621		    ALTERA_ETHERNET_OPTION_BITS_OFF + i);
622
623	bus_release_resource(fdev, SYS_RES_MEMORY, rid, res);
624	atse_ethernet_option_bits_flag |= ATSE_ETHERNET_OPTION_BITS_READ;
625
626	return (0);
627}
628
629static int
630atse_ethernet_option_bits_read(device_t dev)
631{
632	int error;
633
634	error = atse_ethernet_option_bits_read_fdt(dev);
635	if (error == 0)
636		return (0);
637
638	device_printf(dev, "Cannot read Ethernet addresses from flash.\n");
639	return (error);
640}
641
642static int
643atse_get_eth_address(struct atse_softc *sc)
644{
645	unsigned long hostid;
646	uint32_t val4;
647	int unit;
648
649	/*
650	 * Make sure to only ever do this once.  Otherwise a reset would
651	 * possibly change our ethernet address, which is not good at all.
652	 */
653	if (sc->atse_eth_addr[0] != 0x00 || sc->atse_eth_addr[1] != 0x00 ||
654	    sc->atse_eth_addr[2] != 0x00)
655		return (0);
656
657	if ((atse_ethernet_option_bits_flag &
658	    ATSE_ETHERNET_OPTION_BITS_READ) == 0)
659		goto get_random;
660
661	val4 = atse_ethernet_option_bits[0] << 24;
662	val4 |= atse_ethernet_option_bits[1] << 16;
663	val4 |= atse_ethernet_option_bits[2] << 8;
664	val4 |= atse_ethernet_option_bits[3];
665	/* They chose "safe". */
666	if (val4 != le32toh(0x00005afe)) {
667		device_printf(sc->atse_dev, "Magic '5afe' is not safe: 0x%08x. "
668		    "Falling back to random numbers for hardware address.\n",
669		     val4);
670		goto get_random;
671	}
672
673	sc->atse_eth_addr[0] = atse_ethernet_option_bits[4];
674	sc->atse_eth_addr[1] = atse_ethernet_option_bits[5];
675	sc->atse_eth_addr[2] = atse_ethernet_option_bits[6];
676	sc->atse_eth_addr[3] = atse_ethernet_option_bits[7];
677	sc->atse_eth_addr[4] = atse_ethernet_option_bits[8];
678	sc->atse_eth_addr[5] = atse_ethernet_option_bits[9];
679
680	/* Handle factory default ethernet addresss: 00:07:ed:ff:ed:15 */
681	if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x07 &&
682	    sc->atse_eth_addr[2] == 0xed && sc->atse_eth_addr[3] == 0xff &&
683	    sc->atse_eth_addr[4] == 0xed && sc->atse_eth_addr[5] == 0x15) {
684
685		device_printf(sc->atse_dev, "Factory programmed Ethernet "
686		    "hardware address blacklisted.  Falling back to random "
687		    "address to avoid collisions.\n");
688		device_printf(sc->atse_dev, "Please re-program your flash.\n");
689		goto get_random;
690	}
691
692	if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x00 &&
693	    sc->atse_eth_addr[2] == 0x00 && sc->atse_eth_addr[3] == 0x00 &&
694	    sc->atse_eth_addr[4] == 0x00 && sc->atse_eth_addr[5] == 0x00) {
695		device_printf(sc->atse_dev, "All zero's Ethernet hardware "
696		    "address blacklisted.  Falling back to random address.\n");
697		device_printf(sc->atse_dev, "Please re-program your flash.\n");
698		goto get_random;
699	}
700
701	if (ETHER_IS_MULTICAST(sc->atse_eth_addr)) {
702		device_printf(sc->atse_dev, "Multicast Ethernet hardware "
703		    "address blacklisted.  Falling back to random address.\n");
704		device_printf(sc->atse_dev, "Please re-program your flash.\n");
705		goto get_random;
706	}
707
708	/*
709	 * If we find an Altera prefixed address with a 0x0 ending
710	 * adjust by device unit.  If not and this is not the first
711	 * Ethernet, go to random.
712	 */
713	unit = device_get_unit(sc->atse_dev);
714	if (unit == 0x00)
715		return (0);
716
717	if (unit > 0x0f) {
718		device_printf(sc->atse_dev, "We do not support Ethernet "
719		    "addresses for more than 16 MACs. Falling back to "
720		    "random hadware address.\n");
721		goto get_random;
722	}
723	if ((sc->atse_eth_addr[0] & ~0x2) != 0 ||
724	    sc->atse_eth_addr[1] != 0x07 || sc->atse_eth_addr[2] != 0xed ||
725	    (sc->atse_eth_addr[5] & 0x0f) != 0x0) {
726		device_printf(sc->atse_dev, "Ethernet address not meeting our "
727		    "multi-MAC standards. Falling back to random hadware "
728		    "address.\n");
729		goto get_random;
730	}
731	sc->atse_eth_addr[5] |= (unit & 0x0f);
732
733	return (0);
734
735get_random:
736	/*
737	 * Fall back to random code we also use on bridge(4).
738	 */
739	getcredhostid(curthread->td_ucred, &hostid);
740	if (hostid == 0) {
741		arc4rand(sc->atse_eth_addr, ETHER_ADDR_LEN, 1);
742		sc->atse_eth_addr[0] &= ~1;/* clear multicast bit */
743		sc->atse_eth_addr[0] |= 2; /* set the LAA bit */
744	} else {
745		sc->atse_eth_addr[0] = 0x2;
746		sc->atse_eth_addr[1] = (hostid >> 24)	& 0xff;
747		sc->atse_eth_addr[2] = (hostid >> 16)	& 0xff;
748		sc->atse_eth_addr[3] = (hostid >> 8 )	& 0xff;
749		sc->atse_eth_addr[4] = hostid		& 0xff;
750		sc->atse_eth_addr[5] = sc->atse_unit	& 0xff;
751	}
752
753	return (0);
754}
755
756static int
757atse_set_eth_address(struct atse_softc *sc, int n)
758{
759	uint32_t v0, v1;
760
761	v0 = (sc->atse_eth_addr[3] << 24) | (sc->atse_eth_addr[2] << 16) |
762	    (sc->atse_eth_addr[1] << 8) | sc->atse_eth_addr[0];
763	v1 = (sc->atse_eth_addr[5] << 8) | sc->atse_eth_addr[4];
764
765	if (n & ATSE_ETH_ADDR_DEF) {
766		CSR_WRITE_4(sc, BASE_CFG_MAC_0, v0);
767		CSR_WRITE_4(sc, BASE_CFG_MAC_1, v1);
768	}
769	if (n & ATSE_ETH_ADDR_SUPP1) {
770		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_0, v0);
771		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_1, v1);
772	}
773	if (n & ATSE_ETH_ADDR_SUPP2) {
774		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_0, v0);
775		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_1, v1);
776	}
777	if (n & ATSE_ETH_ADDR_SUPP3) {
778		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_0, v0);
779		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_1, v1);
780	}
781	if (n & ATSE_ETH_ADDR_SUPP4) {
782		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_0, v0);
783		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_1, v1);
784	}
785
786	return (0);
787}
788
789static int
790atse_reset(struct atse_softc *sc)
791{
792	int i;
793	uint32_t val4, mask;
794	uint16_t val;
795
796	/* 1. External PHY Initialization using MDIO. */
797	/*
798	 * We select the right MDIO space in atse_attach() and let MII do
799	 * anything else.
800	 */
801
802	/* 2. PCS Configuration Register Initialization. */
803	/* a. Set auto negotiation link timer to 1.6ms for SGMII. */
804	PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_0, 0x0D40);
805	PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_1, 0x0003);
806
807	/* b. Configure SGMII. */
808	val = PCS_EXT_IF_MODE_SGMII_ENA|PCS_EXT_IF_MODE_USE_SGMII_AN;
809	PCS_WRITE_2(sc, PCS_EXT_IF_MODE, val);
810
811	/* c. Enable auto negotiation. */
812	/* Ignore Bits 6,8,13; should be set,set,unset. */
813	val = PCS_READ_2(sc, PCS_CONTROL);
814	val &= ~(PCS_CONTROL_ISOLATE|PCS_CONTROL_POWERDOWN);
815	val &= ~PCS_CONTROL_LOOPBACK;		/* Make this a -link1 option? */
816	val |= PCS_CONTROL_AUTO_NEGOTIATION_ENABLE;
817	PCS_WRITE_2(sc, PCS_CONTROL, val);
818
819	/* d. PCS reset. */
820	val = PCS_READ_2(sc, PCS_CONTROL);
821	val |= PCS_CONTROL_RESET;
822	PCS_WRITE_2(sc, PCS_CONTROL, val);
823	/* Wait for reset bit to clear; i=100 is excessive. */
824	for (i = 0; i < 100; i++) {
825		val = PCS_READ_2(sc, PCS_CONTROL);
826		if ((val & PCS_CONTROL_RESET) == 0)
827			break;
828		DELAY(10);
829	}
830	if ((val & PCS_CONTROL_RESET) != 0) {
831		device_printf(sc->atse_dev, "PCS reset timed out.\n");
832		return (ENXIO);
833	}
834
835	/* 3. MAC Configuration Register Initialization. */
836	/* a. Disable MAC transmit and receive datapath. */
837	mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
838	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
839	val4 &= ~mask;
840	/* Samples in the manual do have the SW_RESET bit set here, why? */
841	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
842	/* Wait for bits to be cleared; i=100 is excessive. */
843	for (i = 0; i < 100; i++) {
844		val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
845		if ((val4 & mask) == 0)
846			break;
847		DELAY(10);
848	}
849	if ((val4 & mask) != 0) {
850		device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
851		return (ENXIO);
852	}
853	/* b. MAC FIFO configuration. */
854	CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_EMPTY, FIFO_DEPTH_TX - 16);
855	CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_FULL, 3);
856	CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_EMPTY, 8);
857	CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_EMPTY, FIFO_DEPTH_RX - 16);
858	CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_FULL, 8);
859	CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_EMPTY, 8);
860#if 0
861	CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 16);
862	CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 16);
863#else
864	/* For store-and-forward mode, set this threshold to 0. */
865	CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 0);
866	CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 0);
867#endif
868	/* c. MAC address configuration. */
869	/* Also intialize supplementary addresses to our primary one. */
870	/* XXX-BZ FreeBSD really needs to grow and API for using these. */
871	atse_get_eth_address(sc);
872	atse_set_eth_address(sc, ATSE_ETH_ADDR_ALL);
873
874	/* d. MAC function configuration. */
875	CSR_WRITE_4(sc, BASE_CFG_FRM_LENGTH, 1518);	/* Default. */
876	CSR_WRITE_4(sc, BASE_CFG_TX_IPG_LENGTH, 12);
877	CSR_WRITE_4(sc, BASE_CFG_PAUSE_QUANT, 0xFFFF);
878
879	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
880	/*
881	 * If 1000BASE-X/SGMII PCS is initialized, set the ETH_SPEED (bit 3)
882	 * and ENA_10 (bit 25) in command_config register to 0.  If half duplex
883	 * is reported in the PHY/PCS status register, set the HD_ENA (bit 10)
884	 * to 1 in command_config register.
885	 * BZ: We shoot for 1000 instead.
886	 */
887#if 0
888	val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
889#else
890	val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
891#endif
892	val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
893#if 0
894	/*
895	 * We do not want to set this, otherwise, we could not even send
896	 * random raw ethernet frames for various other research.  By default
897	 * FreeBSD will use the right ether source address.
898	 */
899	val4 |= BASE_CFG_COMMAND_CONFIG_TX_ADDR_INS;
900#endif
901	val4 |= BASE_CFG_COMMAND_CONFIG_PAD_EN;
902	val4 &= ~BASE_CFG_COMMAND_CONFIG_CRC_FWD;
903#if 0
904	val4 |= BASE_CFG_COMMAND_CONFIG_CNTL_FRM_ENA;
905#endif
906#if 1
907	val4 |= BASE_CFG_COMMAND_CONFIG_RX_ERR_DISC;
908#endif
909	val &= ~BASE_CFG_COMMAND_CONFIG_LOOP_ENA;		/* link0? */
910	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
911
912	/*
913	 * Make sure we do not enable 32bit alignment;  FreeBSD cannot
914	 * cope with the additional padding (though we should!?).
915	 * Also make sure we get the CRC appended.
916	 */
917	val4 = CSR_READ_4(sc, TX_CMD_STAT);
918	val4 &= ~(TX_CMD_STAT_OMIT_CRC|TX_CMD_STAT_TX_SHIFT16);
919	CSR_WRITE_4(sc, TX_CMD_STAT, val4);
920	val4 = CSR_READ_4(sc, RX_CMD_STAT);
921	val4 &= ~RX_CMD_STAT_RX_SHIFT16;
922	CSR_WRITE_4(sc, RX_CMD_STAT, val4);
923
924	/* e. Reset MAC. */
925	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
926	val4 |= BASE_CFG_COMMAND_CONFIG_SW_RESET;
927	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
928	/* Wait for bits to be cleared; i=100 is excessive. */
929	for (i = 0; i < 100; i++) {
930		val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
931		if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) == 0)
932			break;
933		DELAY(10);
934	}
935	if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) != 0) {
936		device_printf(sc->atse_dev, "MAC reset timed out.\n");
937		return (ENXIO);
938	}
939
940	/* f. Enable MAC transmit and receive datapath. */
941	mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
942	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
943	val4 |= mask;
944	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
945	/* Wait for bits to be cleared; i=100 is excessive. */
946	for (i = 0; i < 100; i++) {
947		val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
948		if ((val4 & mask) == mask)
949			break;
950		DELAY(10);
951	}
952	if ((val4 & mask) != mask) {
953		device_printf(sc->atse_dev, "Enabling MAC TX/RX timed out.\n");
954		return (ENXIO);
955	}
956
957	return (0);
958}
959
960static void
961atse_init_locked(struct atse_softc *sc)
962{
963	struct ifnet *ifp;
964	struct mii_data *mii;
965	uint8_t *eaddr;
966
967	ATSE_LOCK_ASSERT(sc);
968	ifp = sc->atse_ifp;
969
970	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
971		return;
972
973	/*
974	 * Must update the ether address if changed.  Given we do not handle
975	 * in atse_ioctl() but it's in the general framework, just always
976	 * do it here before atse_reset().
977	 */
978	eaddr = IF_LLADDR(sc->atse_ifp);
979	bcopy(eaddr, &sc->atse_eth_addr, ETHER_ADDR_LEN);
980
981	/* Make things frind to halt, cleanup, ... */
982	atse_stop_locked(sc);
983	/* ... reset, ... */
984	atse_reset(sc);
985
986	/* ... and fire up the engine again. */
987	atse_rxfilter_locked(sc);
988
989	/* Memory rings?  DMA engine? */
990
991	sc->atse_rx_buf_len = 0;
992	sc->atse_flags &= ATSE_FLAGS_LINK;	/* Preserve. */
993
994#ifdef DEVICE_POLLING
995        /* Only enable interrupts if we are not polling. */
996	if (ifp->if_capenable & IFCAP_POLLING) {
997		ATSE_RX_INTR_DISABLE(sc);
998		ATSE_TX_INTR_DISABLE(sc);
999		ATSE_RX_EVENT_CLEAR(sc);
1000		ATSE_TX_EVENT_CLEAR(sc);
1001	} else
1002#endif
1003	{
1004		ATSE_RX_INTR_ENABLE(sc);
1005		ATSE_TX_INTR_ENABLE(sc);
1006	}
1007
1008	mii = device_get_softc(sc->atse_miibus);
1009
1010	sc->atse_flags &= ~ATSE_FLAGS_LINK;
1011	mii_mediachg(mii);
1012
1013	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1014	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1015
1016	callout_reset(&sc->atse_tick, hz, atse_tick, sc);
1017}
1018
1019static void
1020atse_init(void *xsc)
1021{
1022	struct atse_softc *sc;
1023
1024	/*
1025	 * XXXRW: There is some argument that we should immediately do RX
1026	 * processing after enabling interrupts, or one may not fire if there
1027	 * are buffered packets.
1028	 */
1029	sc = (struct atse_softc *)xsc;
1030	ATSE_LOCK(sc);
1031	atse_init_locked(sc);
1032	ATSE_UNLOCK(sc);
1033}
1034
1035static int
1036atse_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1037{
1038	struct atse_softc *sc;
1039	struct ifreq *ifr;
1040	int error, mask;
1041
1042
1043	error = 0;
1044	sc = ifp->if_softc;
1045	ifr = (struct ifreq *)data;
1046
1047	switch (command) {
1048	case SIOCSIFFLAGS:
1049		ATSE_LOCK(sc);
1050		if (ifp->if_flags & IFF_UP) {
1051			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1052			    ((ifp->if_flags ^ sc->atse_if_flags) &
1053			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1054				atse_rxfilter_locked(sc);
1055			else
1056				atse_init_locked(sc);
1057		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1058			atse_stop_locked(sc);
1059                sc->atse_if_flags = ifp->if_flags;
1060		ATSE_UNLOCK(sc);
1061		break;
1062	case SIOCSIFCAP:
1063		ATSE_LOCK(sc);
1064		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1065#ifdef DEVICE_POLLING
1066		if ((mask & IFCAP_POLLING) != 0 &&
1067		    (IFCAP_POLLING & ifp->if_capabilities) != 0) {
1068			ifp->if_capenable ^= IFCAP_POLLING;
1069			if ((IFCAP_POLLING & ifp->if_capenable) != 0) {
1070
1071				error = ether_poll_register(atse_poll, ifp);
1072				if (error != 0) {
1073					ATSE_UNLOCK(sc);
1074					break;
1075				}
1076				/* Disable interrupts. */
1077				ATSE_RX_INTR_DISABLE(sc);
1078				ATSE_TX_INTR_DISABLE(sc);
1079				ATSE_RX_EVENT_CLEAR(sc);
1080				ATSE_TX_EVENT_CLEAR(sc);
1081
1082			/*
1083			 * Do not allow disabling of polling if we do
1084			 * not have interrupts.
1085			 */
1086			} else if (sc->atse_rx_irq_res != NULL ||
1087			    sc->atse_tx_irq_res != NULL) {
1088				error = ether_poll_deregister(ifp);
1089				/* Enable interrupts. */
1090				ATSE_RX_INTR_ENABLE(sc);
1091				ATSE_TX_INTR_ENABLE(sc);
1092			} else {
1093				ifp->if_capenable ^= IFCAP_POLLING;
1094				error = EINVAL;
1095			}
1096		}
1097#endif /* DEVICE_POLLING */
1098		ATSE_UNLOCK(sc);
1099		break;
1100	case SIOCADDMULTI:
1101	case SIOCDELMULTI:
1102		ATSE_LOCK(sc);
1103		atse_rxfilter_locked(sc);
1104		ATSE_UNLOCK(sc);
1105		break;
1106	case SIOCGIFMEDIA:
1107	case SIOCSIFMEDIA:
1108	{
1109		struct mii_data *mii;
1110		struct ifreq *ifr;
1111
1112		mii = device_get_softc(sc->atse_miibus);
1113		ifr = (struct ifreq *)data;
1114		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1115		break;
1116	}
1117	default:
1118		error = ether_ioctl(ifp, command, data);
1119		break;
1120	}
1121
1122	return (error);
1123}
1124
1125static void
1126atse_intr_debug(struct atse_softc *sc, const char *intrname)
1127{
1128	uint32_t rxs, rxe, rxi, rxf, txs, txe, txi, txf;
1129
1130	if (!atse_intr_debug_enable)
1131		return;
1132
1133	rxs = ATSE_RX_STATUS_READ(sc);
1134	rxe = ATSE_RX_EVENT_READ(sc);
1135	rxi = ATSE_RX_INTR_READ(sc);
1136	rxf = ATSE_RX_READ_FILL_LEVEL(sc);
1137
1138	txs = ATSE_TX_STATUS_READ(sc);
1139	txe = ATSE_TX_EVENT_READ(sc);
1140	txi = ATSE_TX_INTR_READ(sc);
1141	txf = ATSE_TX_READ_FILL_LEVEL(sc);
1142
1143	printf(
1144	    "%s - %s: "
1145	    "rxs 0x%x rxe 0x%x rxi 0x%x rxf 0x%x "
1146	    "txs 0x%x txe 0x%x txi 0x%x txf 0x%x\n",
1147	    __func__, intrname,
1148	    rxs, rxe, rxi, rxf,
1149	    txs, txe, txi, txf);
1150}
1151
1152static void
1153atse_watchdog(struct atse_softc *sc)
1154{
1155
1156	ATSE_LOCK_ASSERT(sc);
1157
1158	if (sc->atse_watchdog_timer == 0 || --sc->atse_watchdog_timer > 0)
1159		return;
1160
1161	device_printf(sc->atse_dev, "watchdog timeout\n");
1162	sc->atse_ifp->if_oerrors++;
1163
1164	atse_intr_debug(sc, "poll");
1165
1166	sc->atse_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1167	atse_init_locked(sc);
1168
1169	atse_rx_locked(sc);
1170	if (!IFQ_DRV_IS_EMPTY(&sc->atse_ifp->if_snd))
1171		atse_start_locked(sc->atse_ifp);
1172}
1173
1174static void
1175atse_tick(void *xsc)
1176{
1177	struct atse_softc *sc;
1178	struct mii_data *mii;
1179	struct ifnet *ifp;
1180
1181	sc = (struct atse_softc *)xsc;
1182	ATSE_LOCK_ASSERT(sc);
1183	ifp = sc->atse_ifp;
1184
1185	mii = device_get_softc(sc->atse_miibus);
1186	mii_tick(mii);
1187	atse_watchdog(sc);
1188	if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0)
1189		atse_miibus_statchg(sc->atse_dev);
1190	callout_reset(&sc->atse_tick, hz, atse_tick, sc);
1191}
1192
1193/*
1194 * Set media options.
1195 */
1196static int
1197atse_ifmedia_upd(struct ifnet *ifp)
1198{
1199	struct atse_softc *sc;
1200	struct mii_data *mii;
1201	struct mii_softc *miisc;
1202	int error;
1203
1204	sc = ifp->if_softc;
1205
1206	ATSE_LOCK(sc);
1207	mii = device_get_softc(sc->atse_miibus);
1208	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1209		PHY_RESET(miisc);
1210	error = mii_mediachg(mii);
1211	ATSE_UNLOCK(sc);
1212
1213	return (error);
1214}
1215
1216static void
1217atse_update_rx_err(struct atse_softc *sc, uint32_t mask)
1218{
1219	int i;
1220
1221	/* RX error are 6 bits, we only know 4 of them. */
1222	for (i = 0; i < ATSE_RX_ERR_MAX; i++)
1223		if ((mask & (1 << i)) != 0)
1224			sc->atse_rx_err[i]++;
1225}
1226
1227static int
1228atse_rx_locked(struct atse_softc *sc)
1229{
1230	struct ifnet *ifp;
1231	struct mbuf *m;
1232	uint32_t fill, i, j;
1233	uint32_t data, meta;
1234	int rx_npkts = 0;
1235
1236	ATSE_LOCK_ASSERT(sc);
1237
1238	ifp = sc->atse_ifp;
1239	j = 0;
1240	meta = 0;
1241	do {
1242outer:
1243		if (sc->atse_rx_m == NULL) {
1244			m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1245			if (m == NULL)
1246				return (rx_npkts);
1247			m->m_len = m->m_pkthdr.len = MCLBYTES;
1248			/* Make sure upper layers will be aligned. */
1249			m_adj(m, ETHER_ALIGN);
1250			sc->atse_rx_m = m;
1251		}
1252
1253		fill = ATSE_RX_READ_FILL_LEVEL(sc);
1254		for (i = 0; i < fill; i++) {
1255			/*
1256			 * XXX-BZ for whatever reason the FIFO requires the
1257			 * the data read before we can access the meta data.
1258			 */
1259			data = ATSE_RX_DATA_READ(sc);
1260			meta = ATSE_RX_META_READ(sc);
1261			if (meta & A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) {
1262				/* XXX-BZ evaluate error. */
1263				atse_update_rx_err(sc, ((meta &
1264				    A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) >>
1265				    A_ONCHIP_FIFO_MEM_CORE_ERROR_SHIFT) & 0xff);
1266				ifp->if_ierrors++;
1267				sc->atse_rx_buf_len = 0;
1268				/*
1269				 * Should still read till EOP or next SOP.
1270				 *
1271				 * XXX-BZ might also depend on
1272				 * BASE_CFG_COMMAND_CONFIG_RX_ERR_DISC
1273				 */
1274				sc->atse_flags |= ATSE_FLAGS_ERROR;
1275				return (rx_npkts);
1276			}
1277			if ((meta & A_ONCHIP_FIFO_MEM_CORE_CHANNEL_MASK) != 0)
1278				device_printf(sc->atse_dev, "%s: unexpected "
1279				    "channel %u\n", __func__, (meta &
1280				    A_ONCHIP_FIFO_MEM_CORE_CHANNEL_MASK) >>
1281				    A_ONCHIP_FIFO_MEM_CORE_CHANNEL_SHIFT);
1282
1283			if (meta & A_ONCHIP_FIFO_MEM_CORE_SOP) {
1284				/*
1285				 * There is no need to clear SOP between 1st
1286				 * and subsequent packet data junks.
1287				 */
1288				if (sc->atse_rx_buf_len != 0 &&
1289				    (sc->atse_flags & ATSE_FLAGS_SOP_SEEN) == 0)
1290				{
1291					device_printf(sc->atse_dev, "%s: SOP "
1292					    "without empty buffer: %u\n",
1293					    __func__, sc->atse_rx_buf_len);
1294					/* XXX-BZ any better counter? */
1295					ifp->if_ierrors++;
1296				}
1297
1298				if ((sc->atse_flags & ATSE_FLAGS_SOP_SEEN) == 0)
1299				{
1300					sc->atse_flags |= ATSE_FLAGS_SOP_SEEN;
1301					sc->atse_rx_buf_len = 0;
1302				}
1303			}
1304#if 0 /* We had to read the data before we could access meta data. See above. */
1305			data = ATSE_RX_DATA_READ(sc);
1306#endif
1307			/* Make sure to not overflow the mbuf data size. */
1308			if (sc->atse_rx_buf_len >= sc->atse_rx_m->m_len -
1309			    sizeof(data)) {
1310				/*
1311				 * XXX-BZ Error.  We need more mbufs and are
1312				 * not setup for this yet.
1313				 */
1314				ifp->if_ierrors++;
1315				sc->atse_flags |= ATSE_FLAGS_ERROR;
1316			}
1317			if ((sc->atse_flags & ATSE_FLAGS_ERROR) == 0)
1318				/*
1319				 * MUST keep this bcopy as m_data after m_adj
1320				 * for IP header aligment is on half-word
1321				 * and not word alignment.
1322				 */
1323				bcopy(&data, (uint8_t *)(sc->atse_rx_m->m_data +
1324				    sc->atse_rx_buf_len), sizeof(data));
1325			if (meta & A_ONCHIP_FIFO_MEM_CORE_EOP) {
1326				uint8_t empty;
1327
1328				empty = (meta &
1329				    A_ONCHIP_FIFO_MEM_CORE_EMPTY_MASK) >>
1330				    A_ONCHIP_FIFO_MEM_CORE_EMPTY_SHIFT;
1331				sc->atse_rx_buf_len += (4 - empty);
1332
1333				ifp->if_ipackets++;
1334				rx_npkts++;
1335
1336				m = sc->atse_rx_m;
1337				m->m_pkthdr.len = m->m_len =
1338				    sc->atse_rx_buf_len;
1339				sc->atse_rx_m = NULL;
1340
1341				sc->atse_rx_buf_len = 0;
1342				sc->atse_flags &= ~ATSE_FLAGS_SOP_SEEN;
1343				if (sc->atse_flags & ATSE_FLAGS_ERROR) {
1344					sc->atse_flags &= ~ATSE_FLAGS_ERROR;
1345					m_freem(m);
1346				} else {
1347					m->m_pkthdr.rcvif = ifp;
1348					ATSE_UNLOCK(sc);
1349					(*ifp->if_input)(ifp, m);
1350					ATSE_LOCK(sc);
1351				}
1352#ifdef DEVICE_POLLING
1353				if (ifp->if_capenable & IFCAP_POLLING) {
1354					if (sc->atse_rx_cycles <= 0)
1355						return (rx_npkts);
1356					sc->atse_rx_cycles--;
1357				}
1358#endif
1359				goto outer;	/* Need a new mbuf. */
1360			} else {
1361				sc->atse_rx_buf_len += sizeof(data);
1362			}
1363		} /* for */
1364
1365	/* XXX-BZ could optimize in case of another packet waiting. */
1366	} while (fill > 0);
1367
1368	return (rx_npkts);
1369}
1370
1371
1372/*
1373 * Report current media status.
1374 */
1375static void
1376atse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1377{
1378	struct atse_softc *sc;
1379	struct mii_data *mii;
1380
1381	sc = ifp->if_softc;
1382
1383	ATSE_LOCK(sc);
1384	mii = device_get_softc(sc->atse_miibus);
1385	mii_pollstat(mii);
1386	ifmr->ifm_active = mii->mii_media_active;
1387	ifmr->ifm_status = mii->mii_media_status;
1388	ATSE_UNLOCK(sc);
1389}
1390
1391static void
1392atse_rx_intr(void *arg)
1393{
1394	struct atse_softc *sc;
1395	struct ifnet *ifp;
1396	uint32_t rxe;
1397
1398	sc = (struct atse_softc *)arg;
1399	ifp = sc->atse_ifp;
1400
1401	ATSE_LOCK(sc);
1402#ifdef DEVICE_POLLING
1403	if (ifp->if_capenable & IFCAP_POLLING) {
1404		ATSE_UNLOCK(sc);
1405		return;
1406	}
1407#endif
1408
1409	atse_intr_debug(sc, "rx");
1410	rxe = ATSE_RX_EVENT_READ(sc);
1411	if (rxe & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW|
1412	    A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
1413		/* XXX-BZ ERROR HANDLING. */
1414		atse_update_rx_err(sc, ((rxe &
1415		    A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) >>
1416		    A_ONCHIP_FIFO_MEM_CORE_ERROR_SHIFT) & 0xff);
1417		ifp->if_ierrors++;
1418	}
1419
1420	/*
1421	 * There is considerable subtlety in the race-free handling of rx
1422	 * interrupts: we must disable interrupts whenever we manipulate the
1423	 * FIFO to prevent further interrupts from firing before we are done;
1424	 * we must clear the event after processing to prevent the event from
1425	 * being immediately reposted due to data remaining; we must clear the
1426	 * event mask before reenabling interrupts or risk missing a positive
1427	 * edge; and we must recheck everything after completing in case the
1428	 * event posted between clearing events and reenabling interrupts.  If
1429	 * a race is experienced, we must restart the whole mechanism.
1430	 */
1431	do {
1432		ATSE_RX_INTR_DISABLE(sc);
1433#if 0
1434		sc->atse_rx_cycles = RX_CYCLES_IN_INTR;
1435#endif
1436		atse_rx_locked(sc);
1437		ATSE_RX_EVENT_CLEAR(sc);
1438
1439		/* Disable interrupts if interface is down. */
1440		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1441			ATSE_RX_INTR_ENABLE(sc);
1442	} while (!(ATSE_RX_STATUS_READ(sc) &
1443	    A_ONCHIP_FIFO_MEM_CORE_STATUS_EMPTY));
1444	ATSE_UNLOCK(sc);
1445
1446}
1447
1448static void
1449atse_tx_intr(void *arg)
1450{
1451	struct atse_softc *sc;
1452	struct ifnet *ifp;
1453	uint32_t txe;
1454
1455	sc = (struct atse_softc *)arg;
1456	ifp = sc->atse_ifp;
1457
1458	ATSE_LOCK(sc);
1459#ifdef DEVICE_POLLING
1460	if (ifp->if_capenable & IFCAP_POLLING) {
1461		ATSE_UNLOCK(sc);
1462		return;
1463	}
1464#endif
1465
1466	/* XXX-BZ build histogram. */
1467	atse_intr_debug(sc, "tx");
1468	txe = ATSE_TX_EVENT_READ(sc);
1469	if (txe & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW|
1470	    A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
1471		/* XXX-BZ ERROR HANDLING. */
1472		ifp->if_oerrors++;
1473	}
1474
1475	/*
1476	 * There is also considerable subtlety in the race-free handling of
1477	 * tx interrupts: all processing occurs with interrupts disabled to
1478	 * prevent spurious refiring while transmit is in progress (which
1479	 * could occur if the FIFO drains while sending -- quite likely); we
1480	 * must not clear the event mask until after we've sent, also to
1481	 * prevent spurious refiring; once we've cleared the event mask we can
1482	 * reenable interrupts, but there is a possible race between clear and
1483	 * enable, so we must recheck and potentially repeat the whole process
1484	 * if it is detected.
1485	 */
1486	do {
1487		ATSE_TX_INTR_DISABLE(sc);
1488		sc->atse_watchdog_timer = 0;
1489		atse_start_locked(ifp);
1490		ATSE_TX_EVENT_CLEAR(sc);
1491
1492		/* Disable interrupts if interface is down. */
1493		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1494			ATSE_TX_INTR_ENABLE(sc);
1495	} while (ATSE_TX_PENDING(sc) &&
1496	    !(ATSE_TX_STATUS_READ(sc) & A_ONCHIP_FIFO_MEM_CORE_STATUS_FULL));
1497	ATSE_UNLOCK(sc);
1498}
1499
1500#ifdef DEVICE_POLLING
1501static int
1502atse_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1503{
1504	struct atse_softc *sc;
1505	int rx_npkts = 0;
1506
1507	sc = ifp->if_softc;
1508	ATSE_LOCK(sc);
1509	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1510		ATSE_UNLOCK(sc);
1511		return (rx_npkts);
1512	}
1513
1514	sc->atse_rx_cycles = count;
1515	rx_npkts = atse_rx_locked(sc);
1516	atse_start_locked(ifp);
1517
1518	if (sc->atse_rx_cycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1519		uint32_t rx, tx;
1520
1521		rx = ATSE_RX_EVENT_READ(sc);
1522		tx = ATSE_TX_EVENT_READ(sc);
1523
1524		if (rx & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW|
1525		    A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
1526			/* XXX-BZ ERROR HANDLING. */
1527			atse_update_rx_err(sc, ((rx &
1528			    A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) >>
1529			    A_ONCHIP_FIFO_MEM_CORE_ERROR_SHIFT) & 0xff);
1530			ifp->if_ierrors++;
1531		}
1532		if (tx & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW|
1533		    A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
1534			/* XXX-BZ ERROR HANDLING. */
1535			ifp->if_oerrors++;
1536		}
1537		if (ATSE_TX_READ_FILL_LEVEL(sc) == 0)
1538			sc->atse_watchdog_timer = 0;
1539
1540#if 0
1541		if (/* Severe error; if only we could find out. */) {
1542			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1543			atse_init_locked(sc);
1544		}
1545#endif
1546	}
1547
1548	ATSE_UNLOCK(sc);
1549	return (rx_npkts);
1550}
1551#endif /* DEVICE_POLLING */
1552
1553static struct atse_mac_stats_regs {
1554	const char *name;
1555	const char *descr;	/* Mostly copied from Altera datasheet. */
1556} atse_mac_stats_regs[] = {
1557	[0x1a] =
1558	{ "aFramesTransmittedOK",
1559	    "The number of frames that are successfully transmitted including "
1560	    "the pause frames." },
1561	{ "aFramesReceivedOK",
1562	    "The number of frames that are successfully received including the "
1563	    "pause frames." },
1564	{ "aFrameCheckSequenceErrors",
1565	    "The number of receive frames with CRC error." },
1566	{ "aAlignmentErrors",
1567	    "The number of receive frames with alignment error." },
1568	{ "aOctetsTransmittedOK",
1569	    "The lower 32 bits of the number of data and padding octets that "
1570	    "are successfully transmitted." },
1571	{ "aOctetsReceivedOK",
1572	    "The lower 32 bits of the number of data and padding octets that "
1573	    " are successfully received." },
1574	{ "aTxPAUSEMACCtrlFrames",
1575	    "The number of pause frames transmitted." },
1576	{ "aRxPAUSEMACCtrlFrames",
1577	    "The number received pause frames received." },
1578	{ "ifInErrors",
1579	    "The number of errored frames received." },
1580	{ "ifOutErrors",
1581	    "The number of transmit frames with either a FIFO overflow error, "
1582	    "a FIFO underflow error, or a error defined by the user "
1583	    "application." },
1584	{ "ifInUcastPkts",
1585	    "The number of valid unicast frames received." },
1586	{ "ifInMulticastPkts",
1587	    "The number of valid multicast frames received. The count does "
1588	    "not include pause frames." },
1589	{ "ifInBroadcastPkts",
1590	    "The number of valid broadcast frames received." },
1591	{ "ifOutDiscards",
1592	    "This statistics counter is not in use.  The MAC function does not "
1593	    "discard frames that are written to the FIFO buffer by the user "
1594	    "application." },
1595	{ "ifOutUcastPkts",
1596	    "The number of valid unicast frames transmitted." },
1597	{ "ifOutMulticastPkts",
1598	    "The number of valid multicast frames transmitted, excluding pause "
1599	    "frames." },
1600	{ "ifOutBroadcastPkts",
1601	    "The number of valid broadcast frames transmitted." },
1602	{ "etherStatsDropEvents",
1603	    "The number of frames that are dropped due to MAC internal errors "
1604	    "when FIFO buffer overflow persists." },
1605	{ "etherStatsOctets",
1606	    "The lower 32 bits of the total number of octets received. This "
1607	    "count includes both good and errored frames." },
1608	{ "etherStatsPkts",
1609	    "The total number of good and errored frames received." },
1610	{ "etherStatsUndersizePkts",
1611	    "The number of frames received with length less than 64 bytes. "
1612	    "This count does not include errored frames." },
1613	{ "etherStatsOversizePkts",
1614	    "The number of frames received that are longer than the value "
1615	    "configured in the frm_length register. This count does not "
1616	    "include errored frames." },
1617	{ "etherStatsPkts64Octets",
1618	    "The number of 64-byte frames received. This count includes good "
1619	    "and errored frames." },
1620	{ "etherStatsPkts65to127Octets",
1621	    "The number of received good and errored frames between the length "
1622	    "of 65 and 127 bytes." },
1623	{ "etherStatsPkts128to255Octets",
1624	    "The number of received good and errored frames between the length "
1625	    "of 128 and 255 bytes." },
1626	{ "etherStatsPkts256to511Octets",
1627	    "The number of received good and errored frames between the length "
1628	    "of 256 and 511 bytes." },
1629	{ "etherStatsPkts512to1023Octets",
1630	    "The number of received good and errored frames between the length "
1631	    "of 512 and 1023 bytes." },
1632	{ "etherStatsPkts1024to1518Octets",
1633	    "The number of received good and errored frames between the length "
1634	    "of 1024 and 1518 bytes." },
1635	{ "etherStatsPkts1519toXOctets",
1636	    "The number of received good and errored frames between the length "
1637	    "of 1519 and the maximum frame length configured in the frm_length "
1638	    "register." },
1639	{ "etherStatsJabbers",
1640	    "Too long frames with CRC error." },
1641	{ "etherStatsFragments",
1642	    "Too short frames with CRC error." },
1643	/* 0x39 unused, 0x3a/b non-stats. */
1644	[0x3c] =
1645	/* Extended Statistics Counters */
1646	{ "msb_aOctetsTransmittedOK",
1647	    "Upper 32 bits of the number of data and padding octets that are "
1648	    "successfully transmitted." },
1649	{ "msb_aOctetsReceivedOK",
1650	    "Upper 32 bits of the number of data and padding octets that are "
1651	    "successfully received." },
1652	{ "msb_etherStatsOctets",
1653	    "Upper 32 bits of the total number of octets received. This count "
1654	    "includes both good and errored frames." }
1655};
1656
1657static int
1658sysctl_atse_mac_stats_proc(SYSCTL_HANDLER_ARGS)
1659{
1660	struct atse_softc *sc;
1661        int error, offset, s;
1662
1663        sc = arg1;
1664	offset = arg2;
1665
1666	s = CSR_READ_4(sc, offset);
1667	error = sysctl_handle_int(oidp, &s, 0, req);
1668	if (error || !req->newptr)
1669		return (error);
1670
1671        return (0);
1672}
1673
1674static struct atse_rx_err_stats_regs {
1675	const char *name;
1676	const char *descr;
1677} atse_rx_err_stats_regs[] = {
1678
1679#define ATSE_RX_ERR_FIFO_THRES_EOP      0 /* FIFO threshold reached, on EOP. */
1680#define ATSE_RX_ERR_ELEN                1 /* Frame/payload length not valid. */
1681#define ATSE_RX_ERR_CRC32               2 /* CRC-32 error. */
1682#define ATSE_RX_ERR_FIFO_THRES_TRUNC    3 /* FIFO thresh., truncated frame. */
1683#define ATSE_RX_ERR_4                   4 /* ? */
1684#define ATSE_RX_ERR_5                   5 /* / */
1685
1686	{ "rx_err_fifo_thres_eop",
1687	    "FIFO threshold reached, reported on EOP." },
1688	{ "rx_err_fifo_elen",
1689	    "Frame or payload length not valid." },
1690	{ "rx_err_fifo_crc32",
1691	    "CRC-32 error." },
1692	{ "rx_err_fifo_thres_trunc",
1693	    "FIFO threshold reached, truncated frame" },
1694	{ "rx_err_4",
1695	    "?" },
1696	{ "rx_err_5",
1697	    "?" },
1698};
1699
1700static int
1701sysctl_atse_rx_err_stats_proc(SYSCTL_HANDLER_ARGS)
1702{
1703	struct atse_softc *sc;
1704        int error, offset, s;
1705
1706        sc = arg1;
1707	offset = arg2;
1708
1709	s = sc->atse_rx_err[offset];
1710	error = sysctl_handle_int(oidp, &s, 0, req);
1711	if (error || !req->newptr)
1712		return (error);
1713
1714        return (0);
1715}
1716
1717static void
1718atse_sysctl_stats_attach(device_t dev)
1719{
1720	struct sysctl_ctx_list *sctx;
1721	struct sysctl_oid *soid;
1722	struct atse_softc *sc;
1723	int i;
1724
1725	sc = device_get_softc(dev);
1726        sctx = device_get_sysctl_ctx(dev);
1727        soid = device_get_sysctl_tree(dev);
1728
1729	/* MAC statistics. */
1730	for (i = 0; i < sizeof(atse_mac_stats_regs) /
1731	    sizeof(*atse_mac_stats_regs); i++) {
1732		if (atse_mac_stats_regs[i].name == NULL ||
1733		    atse_mac_stats_regs[i].descr == NULL)
1734			continue;
1735
1736		SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
1737		    atse_mac_stats_regs[i].name, CTLTYPE_UINT|CTLFLAG_RD,
1738		    sc, i, sysctl_atse_mac_stats_proc, "IU",
1739		    atse_mac_stats_regs[i].descr);
1740	}
1741
1742	/* rx_err[]. */
1743	for (i = 0; i < ATSE_RX_ERR_MAX; i++) {
1744		if (atse_rx_err_stats_regs[i].name == NULL ||
1745		    atse_rx_err_stats_regs[i].descr == NULL)
1746			continue;
1747
1748		SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
1749		    atse_rx_err_stats_regs[i].name, CTLTYPE_UINT|CTLFLAG_RD,
1750		    sc, i, sysctl_atse_rx_err_stats_proc, "IU",
1751		    atse_rx_err_stats_regs[i].descr);
1752	}
1753}
1754
1755/*
1756 * Generic device handling routines.
1757 */
1758int
1759atse_attach(device_t dev)
1760{
1761	struct atse_softc *sc;
1762	struct ifnet *ifp;
1763	int error;
1764
1765	sc = device_get_softc(dev);
1766
1767	atse_ethernet_option_bits_read(dev);
1768
1769	mtx_init(&sc->atse_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1770	    MTX_DEF);
1771
1772	callout_init_mtx(&sc->atse_tick, &sc->atse_mtx, 0);
1773
1774	sc->atse_tx_buf = malloc(ETHER_MAX_LEN_JUMBO, M_DEVBUF, M_WAITOK);
1775
1776	/*
1777	 * We are only doing single-PHY with this driver currently.  The
1778	 * defaults would be right so that BASE_CFG_MDIO_ADDR0 points to the
1779	 * 1st PHY address (0) apart from the fact that BMCR0 is always
1780	 * the PCS mapping, so we always use BMCR1. See Table 5-1 0xA0-0xBF.
1781	 */
1782#if 0	/* Always PCS. */
1783	sc->atse_bmcr0 = MDIO_0_START;
1784	CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR0, 0x00);
1785#endif
1786	/* Always use matching PHY for atse[0..]. */
1787	sc->atse_phy_addr = device_get_unit(dev);
1788	sc->atse_bmcr1 = MDIO_1_START;
1789	CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR1, sc->atse_phy_addr);
1790
1791	/* Reset the adapter. */
1792	atse_reset(sc);
1793
1794	/* Setup interface. */
1795	ifp = sc->atse_ifp = if_alloc(IFT_ETHER);
1796	if (ifp == NULL) {
1797		device_printf(dev, "if_alloc() failed\n");
1798		error = ENOSPC;
1799		goto err;
1800	}
1801	ifp->if_softc = sc;
1802	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1803	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1804	ifp->if_ioctl = atse_ioctl;
1805	ifp->if_start = atse_start;
1806	ifp->if_init = atse_init;
1807	IFQ_SET_MAXLEN(&ifp->if_snd, ATSE_TX_LIST_CNT - 1);
1808	ifp->if_snd.ifq_drv_maxlen = ATSE_TX_LIST_CNT - 1;
1809	IFQ_SET_READY(&ifp->if_snd);
1810
1811	/* MII setup. */
1812	error = mii_attach(dev, &sc->atse_miibus, ifp, atse_ifmedia_upd,
1813	    atse_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1814	if (error != 0) {
1815		device_printf(dev, "attaching PHY failed: %d\n", error);
1816		goto err;
1817	}
1818
1819	/* Call media-indepedent attach routine. */
1820	ether_ifattach(ifp, sc->atse_eth_addr);
1821
1822	/* Tell the upper layer(s) about vlan mtu support. */
1823	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1824	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1825	ifp->if_capenable = ifp->if_capabilities;
1826#ifdef DEVICE_POLLING
1827	/* We will enable polling by default if no irqs available. See below. */
1828	ifp->if_capabilities |= IFCAP_POLLING;
1829#endif
1830
1831	/* Hook up interrupts. */
1832	if (sc->atse_rx_irq_res != NULL) {
1833		error = bus_setup_intr(dev, sc->atse_rx_irq_res, INTR_TYPE_NET |
1834		    INTR_MPSAFE, NULL, atse_rx_intr, sc, &sc->atse_rx_intrhand);
1835		if (error != 0) {
1836			device_printf(dev, "enabling RX IRQ failed\n");
1837			ether_ifdetach(ifp);
1838			goto err;
1839		}
1840	}
1841
1842	if (sc->atse_tx_irq_res != NULL) {
1843		error = bus_setup_intr(dev, sc->atse_tx_irq_res, INTR_TYPE_NET |
1844		    INTR_MPSAFE, NULL, atse_tx_intr, sc, &sc->atse_tx_intrhand);
1845		if (error != 0) {
1846			bus_teardown_intr(dev, sc->atse_rx_irq_res,
1847			    sc->atse_rx_intrhand);
1848			device_printf(dev, "enabling TX IRQ failed\n");
1849			ether_ifdetach(ifp);
1850			goto err;
1851		}
1852	}
1853
1854	if ((ifp->if_capenable & IFCAP_POLLING) != 0 ||
1855	   (sc->atse_rx_irq_res == NULL && sc->atse_tx_irq_res == NULL)) {
1856#ifdef DEVICE_POLLING
1857		/* If not on and no IRQs force it on. */
1858		if (sc->atse_rx_irq_res == NULL && sc->atse_tx_irq_res == NULL){
1859			ifp->if_capenable |= IFCAP_POLLING;
1860			device_printf(dev, "forcing to polling due to no "
1861			    "interrupts\n");
1862		}
1863		error = ether_poll_register(atse_poll, ifp);
1864		if (error != 0)
1865			goto err;
1866#else
1867		device_printf(dev, "no DEVICE_POLLING in kernel and no IRQs\n");
1868		error = ENXIO;
1869#endif
1870	} else {
1871		ATSE_RX_INTR_ENABLE(sc);
1872		ATSE_TX_INTR_ENABLE(sc);
1873	}
1874
1875err:
1876	if (error != 0)
1877		atse_detach(dev);
1878
1879	if (error == 0)
1880		atse_sysctl_stats_attach(dev);
1881
1882	return (error);
1883}
1884
1885static int
1886atse_detach(device_t dev)
1887{
1888	struct atse_softc *sc;
1889	struct ifnet *ifp;
1890
1891	sc = device_get_softc(dev);
1892	KASSERT(mtx_initialized(&sc->atse_mtx), ("%s: mutex not initialized",
1893	    device_get_nameunit(dev)));
1894	ifp = sc->atse_ifp;
1895
1896#ifdef DEVICE_POLLING
1897	if (ifp->if_capenable & IFCAP_POLLING)
1898		ether_poll_deregister(ifp);
1899#endif
1900
1901	/* Only cleanup if attach succeeded. */
1902	if (device_is_attached(dev)) {
1903		ATSE_LOCK(sc);
1904		atse_stop_locked(sc);
1905		ATSE_UNLOCK(sc);
1906		callout_drain(&sc->atse_tick);
1907		ether_ifdetach(ifp);
1908	}
1909	if (sc->atse_miibus != NULL)
1910		device_delete_child(dev, sc->atse_miibus);
1911
1912	if (sc->atse_tx_intrhand)
1913		bus_teardown_intr(dev, sc->atse_tx_irq_res,
1914		    sc->atse_tx_intrhand);
1915	if (sc->atse_rx_intrhand)
1916		bus_teardown_intr(dev, sc->atse_rx_irq_res,
1917		    sc->atse_rx_intrhand);
1918
1919	if (ifp != NULL)
1920		if_free(ifp);
1921
1922	if (sc->atse_tx_buf != NULL)
1923		free(sc->atse_tx_buf, M_DEVBUF);
1924
1925	mtx_destroy(&sc->atse_mtx);
1926
1927	return (0);
1928}
1929
1930/* Shared between nexus and fdt implementation. */
1931void
1932atse_detach_resources(device_t dev)
1933{
1934	struct atse_softc *sc;
1935
1936	sc = device_get_softc(dev);
1937
1938	if (sc->atse_txc_mem_res != NULL) {
1939		bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_txc_mem_rid,
1940		    sc->atse_txc_mem_res);
1941		sc->atse_txc_mem_res = NULL;
1942	}
1943	if (sc->atse_tx_mem_res != NULL) {
1944		bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_tx_mem_rid,
1945		    sc->atse_tx_mem_res);
1946		sc->atse_tx_mem_res = NULL;
1947	}
1948	if (sc->atse_tx_irq_res != NULL) {
1949		bus_release_resource(dev, SYS_RES_IRQ, sc->atse_tx_irq_rid,
1950		    sc->atse_tx_irq_res);
1951		sc->atse_tx_irq_res = NULL;
1952	}
1953	if (sc->atse_rxc_mem_res != NULL) {
1954		bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_rxc_mem_rid,
1955		    sc->atse_rxc_mem_res);
1956		sc->atse_rxc_mem_res = NULL;
1957	}
1958	if (sc->atse_rx_mem_res != NULL) {
1959		bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_rx_mem_rid,
1960		    sc->atse_rx_mem_res);
1961		sc->atse_rx_mem_res = NULL;
1962	}
1963	if (sc->atse_rx_irq_res != NULL) {
1964		bus_release_resource(dev, SYS_RES_IRQ, sc->atse_rx_irq_rid,
1965		    sc->atse_rx_irq_res);
1966		sc->atse_rx_irq_res = NULL;
1967	}
1968	if (sc->atse_mem_res != NULL) {
1969		bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_mem_rid,
1970		    sc->atse_mem_res);
1971		sc->atse_mem_res = NULL;
1972	}
1973}
1974
1975int
1976atse_detach_dev(device_t dev)
1977{
1978	int error;
1979
1980	error = atse_detach(dev);
1981	if (error) {
1982		/* We are basically in undefined state now. */
1983		device_printf(dev, "atse_detach() failed: %d\n", error);
1984		return (error);
1985	}
1986
1987	atse_detach_resources(dev);
1988
1989	return (0);
1990}
1991
1992int
1993atse_miibus_readreg(device_t dev, int phy, int reg)
1994{
1995	struct atse_softc *sc;
1996
1997	sc = device_get_softc(dev);
1998
1999	/*
2000	 * We currently do not support re-mapping of MDIO space on-the-fly
2001	 * but de-facto hard-code the phy#.
2002	 */
2003	if (phy != sc->atse_phy_addr)
2004		return (0);
2005
2006	return (PHY_READ_2(sc, reg));
2007}
2008
2009int
2010atse_miibus_writereg(device_t dev, int phy, int reg, int data)
2011{
2012	struct atse_softc *sc;
2013
2014	sc = device_get_softc(dev);
2015
2016	/*
2017	 * We currently do not support re-mapping of MDIO space on-the-fly
2018	 * but de-facto hard-code the phy#.
2019	 */
2020	if (phy != sc->atse_phy_addr)
2021		return (0);
2022
2023	PHY_WRITE_2(sc, reg, data);
2024	return (0);
2025}
2026
2027void
2028atse_miibus_statchg(device_t dev)
2029{
2030	struct atse_softc *sc;
2031	struct mii_data *mii;
2032	struct ifnet *ifp;
2033	uint32_t val4;
2034
2035	sc = device_get_softc(dev);
2036	ATSE_LOCK_ASSERT(sc);
2037
2038        mii = device_get_softc(sc->atse_miibus);
2039        ifp = sc->atse_ifp;
2040        if (mii == NULL || ifp == NULL ||
2041            (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2042                return;
2043
2044	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
2045
2046	/* Assume no link. */
2047	sc->atse_flags &= ~ATSE_FLAGS_LINK;
2048
2049	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2050	    (IFM_ACTIVE | IFM_AVALID)) {
2051
2052		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2053		case IFM_10_T:
2054			val4 |= BASE_CFG_COMMAND_CONFIG_ENA_10;
2055			val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
2056			sc->atse_flags |= ATSE_FLAGS_LINK;
2057			break;
2058		case IFM_100_TX:
2059			val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
2060			val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
2061			sc->atse_flags |= ATSE_FLAGS_LINK;
2062			break;
2063		case IFM_1000_T:
2064			val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
2065			val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
2066			sc->atse_flags |= ATSE_FLAGS_LINK;
2067			break;
2068		default:
2069			break;
2070		}
2071	}
2072
2073        if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
2074		/* XXX-BZ need to stop the MAC? */
2075                return;
2076        }
2077
2078	if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0)
2079		val4 &= ~BASE_CFG_COMMAND_CONFIG_HD_ENA;
2080        else
2081		val4 |= BASE_CFG_COMMAND_CONFIG_HD_ENA;
2082	/* XXX-BZ flow control? */
2083
2084	/* Make sure the MAC is activated. */
2085	val4 |= BASE_CFG_COMMAND_CONFIG_TX_ENA;
2086	val4 |= BASE_CFG_COMMAND_CONFIG_RX_ENA;
2087
2088	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
2089}
2090
2091/* end */
2092