1/*	$NetBSD: pq3etsec.c,v 1.60 2024/06/29 12:11:11 riastradh Exp $	*/
2/*-
3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 *
10 * This material is based upon work supported by the Defense Advanced Research
11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 * Contract No. N66001-09-C-2073.
13 * Approved for Public Release, Distribution Unlimited
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 *    notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 *    notice, this list of conditions and the following disclaimer in the
22 *    documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.60 2024/06/29 12:11:11 riastradh Exp $");
39
40#ifdef _KERNEL_OPT
41#include "opt_inet.h"
42#include "opt_mpc85xx.h"
43#include "opt_multiprocessor.h"
44#include "opt_net_mpsafe.h"
45#endif
46
47#include <sys/param.h>
48#include <sys/cpu.h>
49#include <sys/device.h>
50#include <sys/mbuf.h>
51#include <sys/ioctl.h>
52#include <sys/intr.h>
53#include <sys/bus.h>
54#include <sys/kernel.h>
55#include <sys/kmem.h>
56#include <sys/proc.h>
57#include <sys/atomic.h>
58#include <sys/callout.h>
59#include <sys/sysctl.h>
60
61#include <sys/rndsource.h>
62
63#include <net/if.h>
64#include <net/if_dl.h>
65#include <net/if_ether.h>
66#include <net/if_media.h>
67#include <net/bpf.h>
68
69#include <dev/mii/miivar.h>
70
71#ifdef INET
72#include <netinet/in.h>
73#include <netinet/in_systm.h>
74#include <netinet/ip.h>
75#include <netinet/in_offload.h>
76#endif /* INET */
77#ifdef INET6
78#include <netinet6/in6.h>
79#include <netinet/ip6.h>
80#endif
81#include <netinet6/in6_offload.h>
82
83#include <powerpc/spr.h>
84#include <powerpc/booke/spr.h>
85#include <powerpc/booke/cpuvar.h>
86#include <powerpc/booke/e500var.h>
87#include <powerpc/booke/e500reg.h>
88#include <powerpc/booke/etsecreg.h>
89
90#define	M_HASFCB		M_LINK2	/* tx packet has FCB prepended */
91
92#define	ETSEC_MAXTXMBUFS	30
93#define	ETSEC_NTXSEGS		30
94#define	ETSEC_MAXRXMBUFS	511
95#define	ETSEC_MINRXMBUFS	32
96#define	ETSEC_NRXSEGS		1
97
98#define	IFCAP_RCTRL_IPCSEN	IFCAP_CSUM_IPv4_Rx
99#define	IFCAP_RCTRL_TUCSEN	(IFCAP_CSUM_TCPv4_Rx	\
100				 | IFCAP_CSUM_UDPv4_Rx	\
101				 | IFCAP_CSUM_TCPv6_Rx	\
102				 | IFCAP_CSUM_UDPv6_Rx)
103
104#define	IFCAP_TCTRL_IPCSEN	IFCAP_CSUM_IPv4_Tx
105#define	IFCAP_TCTRL_TUCSEN	(IFCAP_CSUM_TCPv4_Tx	\
106				 | IFCAP_CSUM_UDPv4_Tx	\
107				 | IFCAP_CSUM_TCPv6_Tx	\
108				 | IFCAP_CSUM_UDPv6_Tx)
109
110#define	IFCAP_ETSEC		(IFCAP_RCTRL_IPCSEN | IFCAP_RCTRL_TUCSEN      \
111				 | IFCAP_TCTRL_IPCSEN | IFCAP_TCTRL_TUCSEN)
112
113#define	M_CSUM_IP   (M_CSUM_CIP | M_CSUM_CTU)
114#define	M_CSUM_IP6  (M_CSUM_TCPv6 | M_CSUM_UDPv6)
115#define	M_CSUM_TUP  (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6)
116#define	M_CSUM_UDP  (M_CSUM_UDPv4 | M_CSUM_UDPv6)
117#define	M_CSUM_IP4  (M_CSUM_IPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)
118#define	M_CSUM_CIP  (M_CSUM_IPv4)
119#define	M_CSUM_CTU  (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6)
120
121struct pq3etsec_txqueue {
122	bus_dmamap_t txq_descmap;
123	volatile struct txbd *txq_consumer;
124	volatile struct txbd *txq_producer;
125	volatile struct txbd *txq_first;
126	volatile struct txbd *txq_last;
127	struct ifqueue txq_mbufs;
128	struct mbuf *txq_next;
129#ifdef ETSEC_DEBUG
130	struct mbuf *txq_lmbufs[512];
131#endif
132	uint32_t txq_qmask;
133	uint32_t txq_free;
134	uint32_t txq_threshold;
135	uint32_t txq_lastintr;
136	bus_size_t txq_reg_tbase;
137	bus_dma_segment_t txq_descmap_seg;
138};
139
140struct pq3etsec_rxqueue {
141	bus_dmamap_t rxq_descmap;
142	volatile struct rxbd *rxq_consumer;
143	volatile struct rxbd *rxq_producer;
144	volatile struct rxbd *rxq_first;
145	volatile struct rxbd *rxq_last;
146	struct mbuf *rxq_mhead;
147	struct mbuf **rxq_mtail;
148	struct mbuf *rxq_mconsumer;
149#ifdef ETSEC_DEBUG
150	struct mbuf *rxq_mbufs[512];
151#endif
152	uint32_t rxq_qmask;
153	uint32_t rxq_inuse;
154	uint32_t rxq_threshold;
155	bus_size_t rxq_reg_rbase;
156	bus_size_t rxq_reg_rbptr;
157	bus_dma_segment_t rxq_descmap_seg;
158};
159
160struct pq3etsec_mapcache {
161	u_int dmc_nmaps;
162	u_int dmc_maxseg;
163	u_int dmc_maxmaps;
164	u_int dmc_maxmapsize;
165	bus_dmamap_t dmc_maps[0];
166};
167
168struct pq3etsec_softc {
169	device_t sc_dev;
170	device_t sc_mdio_dev;
171	struct ethercom sc_ec;
172#define sc_if		sc_ec.ec_if
173	struct mii_data sc_mii;
174	bus_space_tag_t sc_bst;
175	bus_space_handle_t sc_bsh;
176	bus_space_handle_t sc_mdio_bsh;
177	bus_dma_tag_t sc_dmat;
178	int sc_phy_addr;
179	prop_dictionary_t sc_intrmap;
180	uint32_t sc_intrmask;
181
182	uint32_t sc_soft_flags;
183#define	SOFT_RESET		0x0001
184#define	SOFT_RXINTR		0x0010
185#define	SOFT_RXBSY		0x0020
186#define	SOFT_TXINTR		0x0100
187#define	SOFT_TXERROR		0x0200
188
189	struct pq3etsec_txqueue sc_txq;
190	struct pq3etsec_rxqueue sc_rxq;
191	uint32_t sc_txerrors;
192	uint32_t sc_rxerrors;
193
194	size_t sc_rx_adjlen;
195
196	/*
197	 * Copies of various ETSEC registers.
198	 */
199	uint32_t sc_imask;
200	uint32_t sc_maccfg1;
201	uint32_t sc_maccfg2;
202	uint32_t sc_maxfrm;
203	uint32_t sc_ecntrl;
204	uint32_t sc_dmactrl;
205	uint32_t sc_macstnaddr1;
206	uint32_t sc_macstnaddr2;
207	uint32_t sc_tctrl;
208	uint32_t sc_rctrl;
209	uint32_t sc_gaddr[16];
210	uint64_t sc_macaddrs[15];
211
212	void *sc_tx_ih;
213	void *sc_rx_ih;
214	void *sc_error_ih;
215	void *sc_soft_ih;
216
217	kmutex_t *sc_lock;
218	kmutex_t *sc_hwlock;
219
220	struct evcnt sc_ev_tx_stall;
221	struct evcnt sc_ev_tx_intr;
222	struct evcnt sc_ev_rx_stall;
223	struct evcnt sc_ev_rx_intr;
224	struct evcnt sc_ev_error_intr;
225	struct evcnt sc_ev_soft_intr;
226	struct evcnt sc_ev_tx_pause;
227	struct evcnt sc_ev_rx_pause;
228	struct evcnt sc_ev_mii_ticks;
229
230	struct callout sc_mii_callout;
231	uint64_t sc_mii_last_tick;
232
233	struct ifqueue sc_rx_bufcache;
234	struct pq3etsec_mapcache *sc_rx_mapcache;
235	struct pq3etsec_mapcache *sc_tx_mapcache;
236
237	/* Interrupt Coalescing parameters */
238	int sc_ic_rx_time;
239	int sc_ic_rx_count;
240	int sc_ic_tx_time;
241	int sc_ic_tx_count;
242
243	krndsource_t rnd_source;
244};
245
246#define	ETSEC_IC_RX_ENABLED(sc)						\
247	((sc)->sc_ic_rx_time != 0 && (sc)->sc_ic_rx_count != 0)
248#define	ETSEC_IC_TX_ENABLED(sc)						\
249	((sc)->sc_ic_tx_time != 0 && (sc)->sc_ic_tx_count != 0)
250
251struct pq3mdio_softc {
252	device_t mdio_dev;
253
254	bus_space_tag_t mdio_bst;
255	bus_space_handle_t mdio_bsh;
256};
257
258static int pq3etsec_match(device_t, cfdata_t, void *);
259static void pq3etsec_attach(device_t, device_t, void *);
260
261static int pq3mdio_match(device_t, cfdata_t, void *);
262static void pq3mdio_attach(device_t, device_t, void *);
263
264static void pq3etsec_ifstart(struct ifnet *);
265static void pq3etsec_ifwatchdog(struct ifnet *);
266static int pq3etsec_ifinit(struct ifnet *);
267static void pq3etsec_ifstop(struct ifnet *, int);
268static int pq3etsec_ifioctl(struct ifnet *, u_long, void *);
269
270static int pq3etsec_mapcache_create(struct pq3etsec_softc *,
271    struct pq3etsec_mapcache **, size_t, size_t, size_t);
272static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *,
273    struct pq3etsec_mapcache *);
274static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *,
275    struct pq3etsec_mapcache *);
276static void pq3etsec_mapcache_put(struct pq3etsec_softc *,
277    struct pq3etsec_mapcache *, bus_dmamap_t);
278
279static int pq3etsec_txq_attach(struct pq3etsec_softc *,
280    struct pq3etsec_txqueue *, u_int);
281static void pq3etsec_txq_purge(struct pq3etsec_softc *,
282    struct pq3etsec_txqueue *);
283static void pq3etsec_txq_reset(struct pq3etsec_softc *,
284    struct pq3etsec_txqueue *);
285static bool pq3etsec_txq_consume(struct pq3etsec_softc *,
286    struct pq3etsec_txqueue *);
287static bool pq3etsec_txq_produce(struct pq3etsec_softc *,
288    struct pq3etsec_txqueue *, struct mbuf *m);
289static bool pq3etsec_txq_active_p(struct pq3etsec_softc *,
290    struct pq3etsec_txqueue *);
291
292static int pq3etsec_rxq_attach(struct pq3etsec_softc *,
293    struct pq3etsec_rxqueue *, u_int);
294static bool pq3etsec_rxq_produce(struct pq3etsec_softc *,
295    struct pq3etsec_rxqueue *);
296static void pq3etsec_rxq_purge(struct pq3etsec_softc *,
297    struct pq3etsec_rxqueue *, bool);
298static void pq3etsec_rxq_reset(struct pq3etsec_softc *,
299    struct pq3etsec_rxqueue *);
300
301static void pq3etsec_mc_setup(struct pq3etsec_softc *);
302
303static void pq3etsec_mii_tick(void *);
304static int pq3etsec_rx_intr(void *);
305static int pq3etsec_tx_intr(void *);
306static int pq3etsec_error_intr(void *);
307static void pq3etsec_soft_intr(void *);
308
309static void pq3etsec_set_ic_rx(struct pq3etsec_softc *);
310static void pq3etsec_set_ic_tx(struct pq3etsec_softc *);
311
312static void pq3etsec_sysctl_setup(struct sysctllog **, struct pq3etsec_softc *);
313
314CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc),
315    pq3etsec_match, pq3etsec_attach, NULL, NULL);
316
317CFATTACH_DECL_NEW(pq3mdio_tsec, sizeof(struct pq3mdio_softc),
318    pq3mdio_match, pq3mdio_attach, NULL, NULL);
319
320CFATTACH_DECL_NEW(pq3mdio_cpunode, sizeof(struct pq3mdio_softc),
321    pq3mdio_match, pq3mdio_attach, NULL, NULL);
322
323static inline uint32_t
324etsec_mdio_read(struct pq3mdio_softc *mdio, bus_size_t off)
325{
326	return bus_space_read_4(mdio->mdio_bst, mdio->mdio_bsh, off);
327}
328
329static inline void
330etsec_mdio_write(struct pq3mdio_softc *mdio, bus_size_t off, uint32_t data)
331{
332	bus_space_write_4(mdio->mdio_bst, mdio->mdio_bsh, off, data);
333}
334
335static inline uint32_t
336etsec_read(struct pq3etsec_softc *sc, bus_size_t off)
337{
338	return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off);
339}
340
341static int
342pq3mdio_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
343{
344	return strcmp(cf->cf_name, "mdio") == 0;
345}
346
347static int
348pq3mdio_match(device_t parent, cfdata_t cf, void *aux)
349{
350	const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16;
351	const bool p1025_p = (svr == (SVR_P1025v1 >> 16)
352	    || svr == (SVR_P1016v1 >> 16));
353
354	if (device_is_a(parent, "cpunode")) {
355		if (!p1025_p
356		    || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
357			return 0;
358
359		return 1;
360	}
361
362	if (device_is_a(parent, "tsec")) {
363		if (p1025_p
364		    || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
365			return 0;
366
367		return 1;
368	}
369
370	return 0;
371}
372
373static void
374pq3mdio_attach(device_t parent, device_t self, void *aux)
375{
376	struct pq3mdio_softc * const mdio = device_private(self);
377	struct cpunode_attach_args * const cna = aux;
378	struct cpunode_locators * const cnl = &cna->cna_locs;
379
380	mdio->mdio_dev = self;
381
382	if (device_is_a(parent, "cpunode")) {
383		struct cpunode_softc * const psc = device_private(parent);
384		psc->sc_children |= cna->cna_childmask;
385
386		mdio->mdio_bst = cna->cna_memt;
387		if (bus_space_map(mdio->mdio_bst, cnl->cnl_addr,
388				cnl->cnl_size, 0, &mdio->mdio_bsh) != 0) {
389			aprint_error(": error mapping registers @ %#x\n",
390			    cnl->cnl_addr);
391			return;
392		}
393	} else {
394		struct pq3etsec_softc * const sc = device_private(parent);
395
396		KASSERT(device_is_a(parent, "tsec"));
397		KASSERTMSG(cnl->cnl_addr == ETSEC1_BASE
398		    || cnl->cnl_addr == ETSEC2_BASE
399		    || cnl->cnl_addr == ETSEC3_BASE
400		    || cnl->cnl_addr == ETSEC4_BASE,
401		    "unknown tsec addr %x", cnl->cnl_addr);
402
403		mdio->mdio_bst = sc->sc_bst;
404		mdio->mdio_bsh = sc->sc_bsh;
405	}
406
407	aprint_normal("\n");
408}
409
410static int
411pq3mdio_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
412{
413	struct pq3mdio_softc * const mdio = device_private(self);
414	uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM);
415
416	etsec_mdio_write(mdio, MIIMADD,
417	    __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
418
419	etsec_mdio_write(mdio, MIIMCOM, 0);	/* clear any past bits */
420	etsec_mdio_write(mdio, MIIMCOM, MIIMCOM_READ);
421
422	while (etsec_mdio_read(mdio, MIIMIND) != 0) {
423			delay(1);
424	}
425	*val = etsec_mdio_read(mdio, MIIMSTAT) &0xffff;
426
427	if (miimcom == MIIMCOM_SCAN)
428		etsec_mdio_write(mdio, MIIMCOM, miimcom);
429
430#if 0
431	aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n",
432	    __func__, phy, reg, data);
433#endif
434	return 0;
435}
436
437static int
438pq3mdio_mii_writereg(device_t self, int phy, int reg, uint16_t data)
439{
440	struct pq3mdio_softc * const mdio = device_private(self);
441	uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM);
442
443#if 0
444	aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n",
445	    __func__, phy, reg, data);
446#endif
447
448	etsec_mdio_write(mdio, MIIMADD,
449	    __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
450	etsec_mdio_write(mdio, MIIMCOM, 0);	/* clear any past bits */
451	etsec_mdio_write(mdio, MIIMCON, data);
452
453	int timo = 1000;	/* 1ms */
454	while ((etsec_mdio_read(mdio, MIIMIND) & MIIMIND_BUSY) && --timo > 0) {
455			delay(1);
456	}
457
458	if (miimcom == MIIMCOM_SCAN)
459		etsec_mdio_write(mdio, MIIMCOM, miimcom);
460
461	return 0;
462}
463
464static inline void
465etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data)
466{
467	bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data);
468}
469
470static void
471pq3etsec_mii_statchg(struct ifnet *ifp)
472{
473	struct pq3etsec_softc * const sc = ifp->if_softc;
474	struct mii_data * const mii = &sc->sc_mii;
475
476	uint32_t maccfg1 = sc->sc_maccfg1;
477	uint32_t maccfg2 = sc->sc_maccfg2;
478	uint32_t ecntrl = sc->sc_ecntrl;
479
480	maccfg1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
481	maccfg2 &= ~(MACCFG2_IFMODE | MACCFG2_FD);
482
483	if (sc->sc_mii.mii_media_active & IFM_FDX) {
484		maccfg2 |= MACCFG2_FD;
485	}
486
487	/*
488	 * Now deal with the flow control bits.
489	 */
490	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO
491	    && (mii->mii_media_active & IFM_ETH_FMASK)) {
492		if (mii->mii_media_active & IFM_ETH_RXPAUSE)
493			maccfg1 |= MACCFG1_RX_FLOW;
494		if (mii->mii_media_active & IFM_ETH_TXPAUSE)
495			maccfg1 |= MACCFG1_TX_FLOW;
496	}
497
498	/*
499	 * Now deal with the speed.
500	 */
501	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
502		maccfg2 |= MACCFG2_IFMODE_GMII;
503	} else {
504		maccfg2 |= MACCFG2_IFMODE_MII;
505		ecntrl &= ~ECNTRL_R100M;
506		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) {
507			ecntrl |= ECNTRL_R100M;
508		}
509	}
510
511	/*
512	 * If things are different, re-init things.
513	 */
514	if (maccfg1 != sc->sc_maccfg1
515	    || maccfg2 != sc->sc_maccfg2
516	    || ecntrl != sc->sc_ecntrl) {
517		if (sc->sc_if.if_flags & IFF_RUNNING)
518			atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET);
519		sc->sc_maccfg1 = maccfg1;
520		sc->sc_maccfg2 = maccfg2;
521		sc->sc_ecntrl = ecntrl;
522	}
523}
524
525#if 0
526static void
527pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
528{
529	struct pq3etsec_softc * const sc = ifp->if_softc;
530
531	mii_pollstat(&sc->sc_mii);
532	ether_mediastatus(ifp, ifmr);
533	ifmr->ifm_status = sc->sc_mii.mii_media_status;
534	ifmr->ifm_active = sc->sc_mii.mii_media_active;
535}
536
537static int
538pq3etsec_mediachange(struct ifnet *ifp)
539{
540	struct pq3etsec_softc * const sc = ifp->if_softc;
541
542	if ((ifp->if_flags & IFF_UP) == 0)
543		return 0;
544
545	int rv = mii_mediachg(&sc->sc_mii);
546	return (rv == ENXIO) ? 0 : rv;
547}
548#endif
549
550static int
551pq3etsec_match(device_t parent, cfdata_t cf, void *aux)
552{
553
554	if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
555		return 0;
556
557	return 1;
558}
559
560static void
561pq3etsec_attach(device_t parent, device_t self, void *aux)
562{
563	struct cpunode_softc * const psc = device_private(parent);
564	struct pq3etsec_softc * const sc = device_private(self);
565	struct mii_data * const mii = &sc->sc_mii;
566	struct cpunode_attach_args * const cna = aux;
567	struct cpunode_locators * const cnl = &cna->cna_locs;
568	cfdata_t cf = device_cfdata(self);
569	int error;
570
571	psc->sc_children |= cna->cna_childmask;
572	sc->sc_dev = self;
573	sc->sc_bst = cna->cna_memt;
574	sc->sc_dmat = &booke_bus_dma_tag;
575
576	/*
577	 * Pull out the mdio bus and phy we are supposed to use.
578	 */
579	const int mdio = cf->cf_loc[CPUNODECF_MDIO];
580	const int phy = cf->cf_loc[CPUNODECF_PHY];
581	if (mdio != CPUNODECF_MDIO_DEFAULT)
582		aprint_normal(" mdio %d", mdio);
583
584	/*
585	 * See if the phy is in the config file...
586	 */
587	if (phy != CPUNODECF_PHY_DEFAULT) {
588		sc->sc_phy_addr = phy;
589	} else {
590		unsigned char prop_name[20];
591		snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr",
592		    cnl->cnl_instance);
593		sc->sc_phy_addr = board_info_get_number(prop_name);
594	}
595	if (sc->sc_phy_addr != MII_PHY_ANY)
596		aprint_normal(" phy %d", sc->sc_phy_addr);
597
598	error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0,
599	    &sc->sc_bsh);
600	if (error) {
601		aprint_error(": error mapping registers: %d\n", error);
602		return;
603	}
604
605	/*
606	 * Assume firmware has aready set the mac address and fetch it
607	 * before we reinit it.
608	 */
609	sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2);
610	sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1);
611	sc->sc_rctrl = RCTRL_DEFAULT;
612	sc->sc_ecntrl = etsec_read(sc, ECNTRL);
613	sc->sc_maccfg1 = etsec_read(sc, MACCFG1);
614	sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT;
615
616	if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) {
617		size_t len;
618		const uint8_t *mac_addr =
619		    board_info_get_data("tsec-mac-addr-base", &len);
620		KASSERT(len == ETHER_ADDR_LEN);
621		sc->sc_macstnaddr2 =
622		    (mac_addr[1] << 24)
623		    | (mac_addr[0] << 16);
624		sc->sc_macstnaddr1 =
625		    ((mac_addr[5] + cnl->cnl_instance - 1) << 24)
626		    | (mac_addr[4] << 16)
627		    | (mac_addr[3] << 8)
628		    | (mac_addr[2] << 0);
629#if 0
630		aprint_error(": mac-address unknown\n");
631		return;
632#endif
633	}
634
635	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
636	sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM);
637
638	callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE);
639	callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc);
640
641	/* Disable interrupts */
642	etsec_write(sc, IMASK, 0);
643
644	error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0);
645	if (error) {
646		aprint_error(": failed to init rxq: %d\n", error);
647		goto fail_1;
648	}
649
650	error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0);
651	if (error) {
652		aprint_error(": failed to init txq: %d\n", error);
653		goto fail_2;
654	}
655
656	error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache,
657	    ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS);
658	if (error) {
659		aprint_error(": failed to allocate rx dmamaps: %d\n", error);
660		goto fail_3;
661	}
662
663	error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
664	    ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS);
665	if (error) {
666		aprint_error(": failed to allocate tx dmamaps: %d\n", error);
667		goto fail_4;
668	}
669
670	sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP,
671	    pq3etsec_tx_intr, sc);
672	if (sc->sc_tx_ih == NULL) {
673		aprint_error(": failed to establish tx interrupt: %d\n",
674		    cnl->cnl_intrs[0]);
675		goto fail_5;
676	}
677
678	sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP,
679	    pq3etsec_rx_intr, sc);
680	if (sc->sc_rx_ih == NULL) {
681		aprint_error(": failed to establish rx interrupt: %d\n",
682		    cnl->cnl_intrs[1]);
683		goto fail_6;
684	}
685
686	sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP,
687	    pq3etsec_error_intr, sc);
688	if (sc->sc_error_ih == NULL) {
689		aprint_error(": failed to establish error interrupt: %d\n",
690		    cnl->cnl_intrs[2]);
691		goto fail_7;
692	}
693
694	int softint_flags = SOFTINT_NET;
695#if !defined(MULTIPROCESSOR) || defined(NET_MPSAFE)
696	softint_flags |= SOFTINT_MPSAFE;
697#endif	/* !MULTIPROCESSOR || NET_MPSAFE */
698	sc->sc_soft_ih = softint_establish(softint_flags,
699	    pq3etsec_soft_intr, sc);
700	if (sc->sc_soft_ih == NULL) {
701		aprint_error(": failed to establish soft interrupt\n");
702		goto fail_8;
703	}
704
705	/*
706	 * If there was no MDIO
707	 */
708	if (mdio == CPUNODECF_MDIO_DEFAULT) {
709		aprint_normal("\n");
710		cfdata_t mdio_cf = config_search(self, cna,
711		    CFARGS(.submatch = pq3mdio_find));
712		if (mdio_cf != NULL) {
713			sc->sc_mdio_dev =
714			    config_attach(self, mdio_cf, cna, NULL, CFARGS_NONE);
715		}
716	} else {
717		sc->sc_mdio_dev = device_find_by_driver_unit("mdio", mdio);
718		if (sc->sc_mdio_dev == NULL) {
719			aprint_error(": failed to locate mdio device\n");
720			goto fail_9;
721		}
722		aprint_normal("\n");
723	}
724
725	etsec_write(sc, ATTR, ATTR_DEFAULT);
726	etsec_write(sc, ATTRELI, ATTRELI_DEFAULT);
727
728	/* Enable interrupt coalescing */
729	sc->sc_ic_rx_time = 768;
730	sc->sc_ic_rx_count = 16;
731	sc->sc_ic_tx_time = 768;
732	sc->sc_ic_tx_count = 16;
733	pq3etsec_set_ic_rx(sc);
734	pq3etsec_set_ic_tx(sc);
735
736	char enaddr[ETHER_ADDR_LEN] = {
737	    [0] = sc->sc_macstnaddr2 >> 16,
738	    [1] = sc->sc_macstnaddr2 >> 24,
739	    [2] = sc->sc_macstnaddr1 >>	 0,
740	    [3] = sc->sc_macstnaddr1 >>	 8,
741	    [4] = sc->sc_macstnaddr1 >> 16,
742	    [5] = sc->sc_macstnaddr1 >> 24,
743	};
744	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
745	   ether_sprintf(enaddr));
746
747	const char * const xname = device_xname(sc->sc_dev);
748	struct ethercom * const ec = &sc->sc_ec;
749	struct ifnet * const ifp = &ec->ec_if;
750
751	ec->ec_mii = mii;
752
753	mii->mii_ifp = ifp;
754	mii->mii_readreg = pq3mdio_mii_readreg;
755	mii->mii_writereg = pq3mdio_mii_writereg;
756	mii->mii_statchg = pq3etsec_mii_statchg;
757
758	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
759
760	if (sc->sc_mdio_dev != NULL && sc->sc_phy_addr < 32) {
761		mii_attach(sc->sc_mdio_dev, mii, 0xffffffff,
762		    sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
763
764		if (LIST_FIRST(&mii->mii_phys) == NULL) {
765			ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE,
766			    0, NULL);
767			ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
768		} else {
769			callout_schedule(&sc->sc_mii_callout, hz);
770			ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
771		}
772	} else {
773		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX,
774		    0, NULL);
775		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX);
776	}
777
778	ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
779	    | ETHERCAP_JUMBO_MTU;
780	ec->ec_capenable = ETHERCAP_VLAN_HWTAGGING;
781
782	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
783	ifp->if_softc = sc;
784	ifp->if_capabilities = IFCAP_ETSEC;
785	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
786	ifp->if_ioctl = pq3etsec_ifioctl;
787	ifp->if_start = pq3etsec_ifstart;
788	ifp->if_watchdog = pq3etsec_ifwatchdog;
789	ifp->if_init = pq3etsec_ifinit;
790	ifp->if_stop = pq3etsec_ifstop;
791	IFQ_SET_READY(&ifp->if_snd);
792
793	/*
794	 * Attach the interface.
795	 */
796	if_initialize(ifp);
797	pq3etsec_sysctl_setup(NULL, sc);
798	if_attach(ifp);
799	if_deferred_start_init(ifp, NULL);
800	ether_ifattach(ifp, enaddr);
801
802	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
803	    RND_FLAG_DEFAULT);
804
805	pq3etsec_ifstop(ifp, true);
806
807	evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC,
808	    NULL, xname, "rx stall");
809	evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
810	    NULL, xname, "tx stall");
811	evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR,
812	    NULL, xname, "tx intr");
813	evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR,
814	    NULL, xname, "rx intr");
815	evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR,
816	    NULL, xname, "error intr");
817	evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
818	    NULL, xname, "soft intr");
819	evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC,
820	    NULL, xname, "tx pause");
821	evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC,
822	    NULL, xname, "rx pause");
823	evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC,
824	    NULL, xname, "mii ticks");
825	return;
826
827fail_9:
828	softint_disestablish(sc->sc_soft_ih);
829fail_8:
830	intr_disestablish(sc->sc_error_ih);
831fail_7:
832	intr_disestablish(sc->sc_rx_ih);
833fail_6:
834	intr_disestablish(sc->sc_tx_ih);
835fail_5:
836	pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache);
837fail_4:
838	pq3etsec_mapcache_destroy(sc, sc->sc_rx_mapcache);
839fail_3:
840#if 0 /* notyet */
841	pq3etsec_txq_detach(sc);
842#endif
843fail_2:
844#if 0 /* notyet */
845	pq3etsec_rxq_detach(sc);
846#endif
847fail_1:
848	callout_destroy(&sc->sc_mii_callout);
849	mutex_obj_free(sc->sc_lock);
850	mutex_obj_free(sc->sc_hwlock);
851	bus_space_unmap(sc->sc_bst, sc->sc_bsh, cnl->cnl_size);
852}
853
854static uint64_t
855pq3etsec_macaddr_create(const uint8_t *lladdr)
856{
857	uint64_t macaddr = 0;
858
859	lladdr += ETHER_ADDR_LEN;
860	for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) {
861		macaddr = (macaddr << 8) | *--lladdr;
862	}
863	return macaddr << 16;
864}
865
866static int
867pq3etsec_ifinit(struct ifnet *ifp)
868{
869	struct pq3etsec_softc * const sc = ifp->if_softc;
870	int error = 0;
871
872	sc->sc_maxfrm = uimax(ifp->if_mtu + 32, MCLBYTES);
873	if (ifp->if_mtu > ETHERMTU_JUMBO)
874		return error;
875
876	KASSERT(ifp->if_flags & IFF_UP);
877
878	/*
879	 * Stop the interface (steps 1 to 4 in the Soft Reset and
880	 * Reconfigurating Procedure.
881	 */
882	pq3etsec_ifstop(ifp, 0);
883
884	/*
885	 * If our frame size has changed (or it's our first time through)
886	 * destroy the existing transmit mapcache.
887	 */
888	if (sc->sc_tx_mapcache != NULL
889	    && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
890		pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache);
891		sc->sc_tx_mapcache = NULL;
892	}
893
894	if (sc->sc_tx_mapcache == NULL) {
895		error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
896		    ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS);
897		if (error)
898			return error;
899	}
900
901	sc->sc_ev_mii_ticks.ev_count++;
902	mii_tick(&sc->sc_mii);
903
904	if (ifp->if_flags & IFF_PROMISC) {
905		sc->sc_rctrl |= RCTRL_PROM;
906	} else {
907		sc->sc_rctrl &= ~RCTRL_PROM;
908	}
909
910	uint32_t rctrl_prsdep = 0;
911	sc->sc_rctrl &=
912	    ~(RCTRL_IPCSEN | RCTRL_TUCSEN | RCTRL_VLEX | RCTRL_PRSDEP);
913	if (VLAN_ATTACHED(&sc->sc_ec)) {
914		sc->sc_rctrl |= RCTRL_VLEX;
915		rctrl_prsdep = RCTRL_PRSDEP_L2;
916	}
917	if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) {
918		sc->sc_rctrl |= RCTRL_IPCSEN;
919		rctrl_prsdep = RCTRL_PRSDEP_L3;
920	}
921	if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) {
922		sc->sc_rctrl |= RCTRL_TUCSEN;
923		rctrl_prsdep = RCTRL_PRSDEP_L4;
924	}
925	sc->sc_rctrl |= rctrl_prsdep;
926#if 0
927	if (sc->sc_rctrl
928	    & (RCTRL_IPCSEN | RCTRL_TUCSEN | RCTRL_VLEX | RCTRL_PRSDEP))
929		aprint_normal_dev(sc->sc_dev,
930		    "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n",
931		    sc->sc_rctrl,
932		    __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN),
933		    __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN),
934		    __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX),
935		    __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP));
936#endif
937
938	sc->sc_tctrl &= ~(TCTRL_IPCSEN | TCTRL_TUCSEN | TCTRL_VLINS);
939	if (VLAN_ATTACHED(&sc->sc_ec))		/* is this really true */
940		sc->sc_tctrl |= TCTRL_VLINS;
941	if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN)
942		sc->sc_tctrl |= TCTRL_IPCSEN;
943	if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN)
944		sc->sc_tctrl |= TCTRL_TUCSEN;
945#if 0
946	if (sc->sc_tctrl & (TCTRL_IPCSEN | TCTRL_TUCSEN | TCTRL_VLINS))
947		aprint_normal_dev(sc->sc_dev,
948		    "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n",
949		    sc->sc_tctrl,
950		    __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN),
951		    __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN),
952		    __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS));
953#endif
954
955	sc->sc_maccfg1 &= ~(MACCFG1_TX_EN | MACCFG1_RX_EN);
956
957	const uint64_t macstnaddr =
958	    pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl));
959
960	sc->sc_imask = IEVENT_DPE;
961
962	/* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */
963	pq3etsec_rxq_reset(sc, &sc->sc_rxq);
964	pq3etsec_rxq_produce(sc, &sc->sc_rxq);	/* fill with rx buffers */
965
966	/* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */
967	pq3etsec_txq_reset(sc, &sc->sc_txq);
968
969	/* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */
970	KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2);
971	etsec_write(sc, MAXFRM, sc->sc_maxfrm);
972	etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32));
973	etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >>  0));
974	etsec_write(sc, MACCFG1, sc->sc_maccfg1);
975	etsec_write(sc, MACCFG2, sc->sc_maccfg2);
976	etsec_write(sc, ECNTRL, sc->sc_ecntrl);
977
978	/* 8. Setup group address hash table (GADDR0-GADDR15) */
979	pq3etsec_mc_setup(sc);
980
981	/* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */
982	etsec_write(sc, MRBLR, MCLBYTES);
983
984	/* 10. Setup WWR, WOP, TOD bits in DMACTRL register */
985	sc->sc_dmactrl |= DMACTRL_DEFAULT;
986	etsec_write(sc, DMACTRL, sc->sc_dmactrl);
987
988	/* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
989	etsec_write(sc, TQUEUE, TQUEUE_EN0);
990	sc->sc_imask |= IEVENT_TXF | IEVENT_TXE | IEVENT_TXC;
991
992	etsec_write(sc, TCTRL, sc->sc_tctrl);	/* for TOE stuff */
993
994	/* 12. Enable receive queues in RQUEUE, */
995	etsec_write(sc, RQUEUE, RQUEUE_EN0 | RQUEUE_EX0);
996	sc->sc_imask |= IEVENT_RXF | IEVENT_BSY | IEVENT_RXC;
997
998	/*     and optionally set TOE functionality in RCTRL. */
999	etsec_write(sc, RCTRL, sc->sc_rctrl);
1000	sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL);
1001	if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF)
1002		sc->sc_rx_adjlen += sizeof(struct rxfcb);
1003
1004	/* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */
1005	etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF);
1006
1007	/* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/
1008	etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF);
1009
1010	/* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */
1011	sc->sc_dmactrl &= ~(DMACTRL_GRS | DMACTRL_GTS);
1012	etsec_write(sc, DMACTRL, sc->sc_dmactrl);
1013
1014	/* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */
1015	etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
1016	etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
1017
1018	sc->sc_soft_flags = 0;
1019
1020	etsec_write(sc, IMASK, sc->sc_imask);
1021
1022	ifp->if_flags |= IFF_RUNNING;
1023
1024	return error;
1025}
1026
1027static void
1028pq3etsec_ifstop(struct ifnet *ifp, int disable)
1029{
1030	struct pq3etsec_softc * const sc = ifp->if_softc;
1031
1032	KASSERT(!cpu_intr_p());
1033	const uint32_t imask_gsc_mask = IEVENT_GTSC | IEVENT_GRSC;
1034	/*
1035	 * Clear the GTSC and GRSC from the interrupt mask until
1036	 * we are ready for them.  Then clear them from IEVENT,
1037	 * request the graceful shutdown, and then enable the
1038	 * GTSC and GRSC bits in the mask.  This should cause the
1039	 * error interrupt to fire which will issue a wakeup to
1040	 * allow us to resume.
1041	 */
1042
1043	/*
1044	 * 1. Set GRS/GTS bits in DMACTRL register
1045	 */
1046	sc->sc_dmactrl |= DMACTRL_GRS | DMACTRL_GTS;
1047	etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask);
1048	etsec_write(sc, IEVENT, imask_gsc_mask);
1049	etsec_write(sc, DMACTRL, sc->sc_dmactrl);
1050
1051	if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN | MACCFG1_RX_EN)) {
1052		/*
1053		 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set
1054		 */
1055		etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask);
1056
1057		u_int timo = 1000;
1058		uint32_t ievent = etsec_read(sc, IEVENT);
1059		while ((ievent & imask_gsc_mask) != imask_gsc_mask) {
1060			if (--timo == 0) {
1061				aprint_error_dev(sc->sc_dev,
1062				    "WARNING: "
1063				    "request to stop failed (IEVENT=%#x)\n",
1064				    ievent);
1065				break;
1066			}
1067			delay(10);
1068			ievent = etsec_read(sc, IEVENT);
1069		}
1070	}
1071
1072	/*
1073	 * Now reset the controller.
1074	 *
1075	 * 3. Set SOFT_RESET bit in MACCFG1 register
1076	 * 4. Clear SOFT_RESET bit in MACCFG1 register
1077	 */
1078	etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET);
1079	etsec_write(sc, MACCFG1, 0);
1080	etsec_write(sc, IMASK, 0);
1081	etsec_write(sc, IEVENT, ~0);
1082	sc->sc_imask = 0;
1083	ifp->if_flags &= ~IFF_RUNNING;
1084
1085	uint32_t tbipa = etsec_read(sc, TBIPA);
1086	if (tbipa == sc->sc_phy_addr) {
1087		aprint_normal_dev(sc->sc_dev, "relocating TBI\n");
1088		etsec_write(sc, TBIPA, 0x1f);
1089	}
1090	uint32_t miimcfg = etsec_read(sc, MIIMCFG);
1091	etsec_write(sc, MIIMCFG, MIIMCFG_RESET);
1092	etsec_write(sc, MIIMCFG, miimcfg);
1093
1094	/*
1095	 * Let's consume any remaing transmitted packets.  And if we are
1096	 * disabling the interface, purge ourselves of any untransmitted
1097	 * packets.  But don't consume any received packets, just drop them.
1098	 * If we aren't disabling the interface, save the mbufs in the
1099	 * receive queue for reuse.
1100	 */
1101	pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable);
1102	pq3etsec_txq_consume(sc, &sc->sc_txq);
1103	if (disable) {
1104		pq3etsec_txq_purge(sc, &sc->sc_txq);
1105		IFQ_PURGE(&ifp->if_snd);
1106	}
1107}
1108
1109static void
1110pq3etsec_ifwatchdog(struct ifnet *ifp)
1111{
1112}
1113
1114static void
1115pq3etsec_mc_setup(
1116	struct pq3etsec_softc *sc)
1117{
1118	struct ethercom * const ec = &sc->sc_ec;
1119	struct ifnet * const ifp = &sc->sc_if;
1120	struct ether_multi *enm;
1121	struct ether_multistep step;
1122	uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8);
1123	const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8);
1124
1125	memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr));
1126	memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
1127
1128	ifp->if_flags &= ~IFF_ALLMULTI;
1129
1130	ETHER_LOCK(ec);
1131	ETHER_FIRST_MULTI(step, ec, enm);
1132	for (u_int i = 0; enm != NULL; ) {
1133		const char *addr = enm->enm_addrlo;
1134		if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
1135			ifp->if_flags |= IFF_ALLMULTI;
1136			memset(gaddr, 0xff, 32 << (crc_shift & 1));
1137			memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
1138			break;
1139		}
1140		if ((sc->sc_rctrl & RCTRL_EMEN)
1141		    && i < __arraycount(sc->sc_macaddrs)) {
1142			sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr);
1143		} else {
1144			uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
1145#if 0
1146			printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__,
1147			    ether_sprintf(addr), crc,
1148			    crc >> crc_shift,
1149			    crc >> (crc_shift + 5),
1150			    (crc >> crc_shift) & 31,
1151			    1 << (((crc >> crc_shift) & 31) ^ 31));
1152#endif
1153			/*
1154			 * The documentation doesn't completely follow PowerPC
1155			 * bit order.  The BE crc32 (H) for 01:00:5E:00:00:01
1156			 * is 0x7fa32d9b.  By empirical testing, the
1157			 * corresponding hash bit is word 3, bit 31 (ppc bit
1158			 * order).  Since 3 << 31 | 31 is 0x7f, we deduce
1159			 * H[0:2] selects the register while H[3:7] selects
1160			 * the bit (ppc bit order).
1161			 */
1162			crc >>= crc_shift;
1163			gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31);
1164		}
1165		ETHER_NEXT_MULTI(step, enm);
1166	}
1167	ETHER_UNLOCK(ec);
1168	for (u_int i = 0; i < 8; i++) {
1169		etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]);
1170		etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]);
1171#if 0
1172		if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8])
1173		printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__,
1174		    i, IGADDR(i), etsec_read(sc, IGADDR(i)),
1175		    i, GADDR(i), etsec_read(sc, GADDR(i)));
1176#endif
1177	}
1178	for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) {
1179		uint64_t macaddr = sc->sc_macaddrs[i];
1180		etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32));
1181		etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >>  0));
1182#if 0
1183		if (macaddr)
1184		printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__,
1185		    i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)),
1186		    i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i)));
1187#endif
1188	}
1189}
1190
1191static int
1192pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
1193{
1194	struct pq3etsec_softc *sc  = ifp->if_softc;
1195	struct ifreq * const ifr = data;
1196	const int s = splnet();
1197	int error;
1198
1199	switch (cmd) {
1200	case SIOCSIFMEDIA:
1201		/* Flow control requires full-duplex mode. */
1202		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1203		    (ifr->ifr_media & IFM_FDX) == 0)
1204			ifr->ifr_media &= ~IFM_ETH_FMASK;
1205		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1206			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1207				/* We can do both TXPAUSE and RXPAUSE. */
1208				ifr->ifr_media |=
1209				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1210			}
1211		}
1212		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1213		break;
1214
1215	default:
1216		error = ether_ioctl(ifp, cmd, data);
1217		if (error != ENETRESET)
1218			break;
1219
1220		if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
1221			error = 0;
1222			if (ifp->if_flags & IFF_RUNNING)
1223				pq3etsec_mc_setup(sc);
1224			break;
1225		}
1226		error = pq3etsec_ifinit(ifp);
1227		break;
1228	}
1229
1230	splx(s);
1231	return error;
1232}
1233
1234static void
1235pq3etsec_rxq_desc_presync(
1236	struct pq3etsec_softc *sc,
1237	struct pq3etsec_rxqueue *rxq,
1238	volatile struct rxbd *rxbd,
1239	size_t count)
1240{
1241	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
1242	    (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
1243	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1244}
1245
1246static void
1247pq3etsec_rxq_desc_postsync(
1248	struct pq3etsec_softc *sc,
1249	struct pq3etsec_rxqueue *rxq,
1250	volatile struct rxbd *rxbd,
1251	size_t count)
1252{
1253	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
1254	    (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
1255	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1256}
1257
1258static void
1259pq3etsec_txq_desc_presync(
1260	struct pq3etsec_softc *sc,
1261	struct pq3etsec_txqueue *txq,
1262	volatile struct txbd *txbd,
1263	size_t count)
1264{
1265	bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
1266	    (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
1267	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1268}
1269
1270static void
1271pq3etsec_txq_desc_postsync(
1272	struct pq3etsec_softc *sc,
1273	struct pq3etsec_txqueue *txq,
1274	volatile struct txbd *txbd,
1275	size_t count)
1276{
1277	bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
1278	    (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
1279	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1280}
1281
1282static bus_dmamap_t
1283pq3etsec_mapcache_get(
1284	struct pq3etsec_softc *sc,
1285	struct pq3etsec_mapcache *dmc)
1286{
1287	KASSERT(dmc->dmc_nmaps > 0);
1288	KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
1289	return dmc->dmc_maps[--dmc->dmc_nmaps];
1290}
1291
1292static void
1293pq3etsec_mapcache_put(
1294	struct pq3etsec_softc *sc,
1295	struct pq3etsec_mapcache *dmc,
1296	bus_dmamap_t map)
1297{
1298	KASSERT(map != NULL);
1299	KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
1300	dmc->dmc_maps[dmc->dmc_nmaps++] = map;
1301}
1302
1303static void
1304pq3etsec_mapcache_destroy(
1305	struct pq3etsec_softc *sc,
1306	struct pq3etsec_mapcache *dmc)
1307{
1308	const size_t dmc_size =
1309	    offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]);
1310
1311	for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
1312		bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
1313	}
1314	kmem_intr_free(dmc, dmc_size);
1315}
1316
1317static int
1318pq3etsec_mapcache_create(
1319	struct pq3etsec_softc *sc,
1320	struct pq3etsec_mapcache **dmc_p,
1321	size_t maxmaps,
1322	size_t maxmapsize,
1323	size_t maxseg)
1324{
1325	const size_t dmc_size =
1326	    offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]);
1327	struct pq3etsec_mapcache * const dmc =
1328		kmem_intr_zalloc(dmc_size, KM_NOSLEEP);
1329
1330	dmc->dmc_maxmaps = maxmaps;
1331	dmc->dmc_nmaps = maxmaps;
1332	dmc->dmc_maxmapsize = maxmapsize;
1333	dmc->dmc_maxseg = maxseg;
1334
1335	for (u_int i = 0; i < maxmaps; i++) {
1336		int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
1337		     dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
1338		     BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
1339		if (error) {
1340			aprint_error_dev(sc->sc_dev,
1341			    "failed to creat dma map cache "
1342			    "entry %u of %zu: %d\n",
1343			    i, maxmaps, error);
1344			while (i-- > 0) {
1345				bus_dmamap_destroy(sc->sc_dmat,
1346				    dmc->dmc_maps[i]);
1347			}
1348			kmem_intr_free(dmc, dmc_size);
1349			return error;
1350		}
1351		KASSERT(dmc->dmc_maps[i] != NULL);
1352	}
1353
1354	*dmc_p = dmc;
1355
1356	return 0;
1357}
1358
1359#if 0
1360static void
1361pq3etsec_dmamem_free(
1362	bus_dma_tag_t dmat,
1363	size_t map_size,
1364	bus_dma_segment_t *seg,
1365	bus_dmamap_t map,
1366	void *kvap)
1367{
1368	bus_dmamap_destroy(dmat, map);
1369	bus_dmamem_unmap(dmat, kvap, map_size);
1370	bus_dmamem_free(dmat, seg, 1);
1371}
1372#endif
1373
1374static int
1375pq3etsec_dmamem_alloc(
1376	bus_dma_tag_t dmat,
1377	size_t map_size,
1378	bus_dma_segment_t *seg,
1379	bus_dmamap_t *map,
1380	void **kvap)
1381{
1382	int error;
1383	int nseg;
1384
1385	*kvap = NULL;
1386	*map = NULL;
1387
1388	error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0,
1389	   seg, 1, &nseg, 0);
1390	if (error)
1391		return error;
1392
1393	KASSERT(nseg == 1);
1394
1395	error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap,
1396	    BUS_DMA_COHERENT);
1397	if (error == 0) {
1398		error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
1399		    map);
1400		if (error == 0) {
1401			error = bus_dmamap_load(dmat, *map, *kvap, map_size,
1402			    NULL, 0);
1403			if (error == 0)
1404				return 0;
1405			bus_dmamap_destroy(dmat, *map);
1406			*map = NULL;
1407		}
1408		bus_dmamem_unmap(dmat, *kvap, map_size);
1409		*kvap = NULL;
1410	}
1411	bus_dmamem_free(dmat, seg, nseg);
1412	return 0;
1413}
1414
1415static struct mbuf *
1416pq3etsec_rx_buf_alloc(
1417	struct pq3etsec_softc *sc)
1418{
1419	struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
1420	if (m == NULL) {
1421		printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
1422		return NULL;
1423	}
1424	MCLGET(m, M_DONTWAIT);
1425	if ((m->m_flags & M_EXT) == 0) {
1426		printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
1427		m_freem(m);
1428		return NULL;
1429	}
1430	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1431
1432	bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache);
1433	if (map == NULL) {
1434		printf("%s:%d: %s\n", __func__, __LINE__, "map get");
1435		m_freem(m);
1436		return NULL;
1437	}
1438	M_SETCTX(m, map);
1439	m->m_len = m->m_pkthdr.len = MCLBYTES;
1440	int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1441	    BUS_DMA_READ | BUS_DMA_NOWAIT);
1442	if (error) {
1443		aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
1444		    error);
1445		M_SETCTX(m, NULL);
1446		m_freem(m);
1447		pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
1448		return NULL;
1449	}
1450	KASSERT(map->dm_mapsize == MCLBYTES);
1451	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1452	    BUS_DMASYNC_PREREAD);
1453
1454	return m;
1455}
1456
1457static void
1458pq3etsec_rx_map_unload(
1459	struct pq3etsec_softc *sc,
1460	struct mbuf *m)
1461{
1462	KASSERT(m);
1463	for (; m != NULL; m = m->m_next) {
1464		bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1465		KASSERT(map);
1466		KASSERT(map->dm_mapsize == MCLBYTES);
1467		bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
1468		    BUS_DMASYNC_POSTREAD);
1469		bus_dmamap_unload(sc->sc_dmat, map);
1470		pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
1471		M_SETCTX(m, NULL);
1472	}
1473}
1474
1475static bool
1476pq3etsec_rxq_produce(
1477	struct pq3etsec_softc *sc,
1478	struct pq3etsec_rxqueue *rxq)
1479{
1480	volatile struct rxbd *producer = rxq->rxq_producer;
1481#if 0
1482	size_t inuse = rxq->rxq_inuse;
1483#endif
1484	while (rxq->rxq_inuse < rxq->rxq_threshold) {
1485		struct mbuf *m;
1486		IF_DEQUEUE(&sc->sc_rx_bufcache, m);
1487		if (m == NULL) {
1488			m = pq3etsec_rx_buf_alloc(sc);
1489			if (m == NULL) {
1490				printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__);
1491				break;
1492			}
1493		}
1494		bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1495		KASSERT(map);
1496
1497#ifdef ETSEC_DEBUG
1498		KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL);
1499		rxq->rxq_mbufs[producer-rxq->rxq_first] = m;
1500#endif
1501
1502		/* rxbd_len is write-only by the ETSEC */
1503		producer->rxbd_bufptr = map->dm_segs[0].ds_addr;
1504		membar_producer();
1505		producer->rxbd_flags |= RXBD_E;
1506		if (__predict_false(rxq->rxq_mhead == NULL)) {
1507			KASSERT(producer == rxq->rxq_consumer);
1508			rxq->rxq_mconsumer = m;
1509		}
1510		*rxq->rxq_mtail = m;
1511		rxq->rxq_mtail = &m->m_next;
1512		m->m_len = MCLBYTES;
1513		m->m_next = NULL;
1514		rxq->rxq_inuse++;
1515		if (++producer == rxq->rxq_last) {
1516			membar_producer();
1517			pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
1518			    rxq->rxq_last - rxq->rxq_producer);
1519			producer = rxq->rxq_producer = rxq->rxq_first;
1520		}
1521	}
1522	if (producer != rxq->rxq_producer) {
1523		membar_producer();
1524		pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
1525		    producer - rxq->rxq_producer);
1526		rxq->rxq_producer = producer;
1527	}
1528	uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT;
1529	if (qhlt) {
1530		KASSERT(qhlt & rxq->rxq_qmask);
1531		sc->sc_ev_rx_stall.ev_count++;
1532		etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask);
1533	}
1534#if 0
1535	aprint_normal_dev(sc->sc_dev,
1536	    "%s: buffers inuse went from %zu to %zu\n",
1537	    __func__, inuse, rxq->rxq_inuse);
1538#endif
1539	return true;
1540}
1541
1542static bool
1543pq3etsec_rx_offload(
1544	struct pq3etsec_softc *sc,
1545	struct mbuf *m,
1546	const struct rxfcb *fcb)
1547{
1548	if (fcb->rxfcb_flags & RXFCB_VLN) {
1549		vlan_set_tag(m, fcb->rxfcb_vlctl);
1550	}
1551	if ((fcb->rxfcb_flags & RXFCB_IP) == 0
1552	    || (fcb->rxfcb_flags & (RXFCB_CIP | RXFCB_CTU)) == 0)
1553		return true;
1554	int csum_flags = 0;
1555	if ((fcb->rxfcb_flags & (RXFCB_IP6 | RXFCB_CIP)) == RXFCB_CIP) {
1556		csum_flags |= M_CSUM_IPv4;
1557		if (fcb->rxfcb_flags & RXFCB_EIP)
1558			csum_flags |= M_CSUM_IPv4_BAD;
1559	}
1560	if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) {
1561		int ipv_flags;
1562		if (fcb->rxfcb_flags & RXFCB_IP6)
1563			ipv_flags = M_CSUM_TCPv6 | M_CSUM_UDPv6;
1564		else
1565			ipv_flags = M_CSUM_TCPv4 | M_CSUM_UDPv4;
1566		if (fcb->rxfcb_pro == IPPROTO_TCP) {
1567			csum_flags |= (M_CSUM_TCPv4 |M_CSUM_TCPv6) & ipv_flags;
1568		} else {
1569			csum_flags |= (M_CSUM_UDPv4 |M_CSUM_UDPv6) & ipv_flags;
1570		}
1571		if (fcb->rxfcb_flags & RXFCB_ETU)
1572			csum_flags |= M_CSUM_TCP_UDP_BAD;
1573	}
1574
1575	m->m_pkthdr.csum_flags = csum_flags;
1576	return true;
1577}
1578
1579static void
1580pq3etsec_rx_input(
1581	struct pq3etsec_softc *sc,
1582	struct mbuf *m,
1583	uint16_t rxbd_flags)
1584{
1585	struct ifnet * const ifp = &sc->sc_if;
1586
1587	pq3etsec_rx_map_unload(sc, m);
1588
1589	if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) {
1590		struct rxfcb fcb = *mtod(m, struct rxfcb *);
1591		if (!pq3etsec_rx_offload(sc, m, &fcb))
1592			return;
1593	}
1594	m_adj(m, sc->sc_rx_adjlen);
1595
1596	if (rxbd_flags & RXBD_M)
1597		m->m_flags |= M_PROMISC;
1598	if (rxbd_flags & RXBD_BC)
1599		m->m_flags |= M_BCAST;
1600	if (rxbd_flags & RXBD_MC)
1601		m->m_flags |= M_MCAST;
1602	m->m_flags |= M_HASFCS;
1603	m_set_rcvif(m, &sc->sc_if);
1604
1605	/*
1606	 * Let's give it to the network subsystm to deal with.
1607	 */
1608	if_percpuq_enqueue(ifp->if_percpuq, m);
1609}
1610
1611static void
1612pq3etsec_rxq_consume(
1613	struct pq3etsec_softc *sc,
1614	struct pq3etsec_rxqueue *rxq)
1615{
1616	struct ifnet * const ifp = &sc->sc_if;
1617	volatile struct rxbd *consumer = rxq->rxq_consumer;
1618	size_t rxconsumed = 0;
1619
1620	etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask);
1621
1622	for (;;) {
1623		if (consumer == rxq->rxq_producer) {
1624			rxq->rxq_consumer = consumer;
1625			rxq->rxq_inuse -= rxconsumed;
1626			KASSERT(rxq->rxq_inuse == 0);
1627			break;
1628		}
1629		pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1);
1630		const uint16_t rxbd_flags = consumer->rxbd_flags;
1631		if (rxbd_flags & RXBD_E) {
1632			rxq->rxq_consumer = consumer;
1633			rxq->rxq_inuse -= rxconsumed;
1634			break;
1635		}
1636		KASSERT(rxq->rxq_mconsumer != NULL);
1637#ifdef ETSEC_DEBUG
1638		KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
1639#endif
1640#if 0
1641		printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n",
1642		    __func__,
1643		    consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len,
1644		    mtod(rxq->rxq_mconsumer, int *)[0],
1645		    mtod(rxq->rxq_mconsumer, int *)[1],
1646		    mtod(rxq->rxq_mconsumer, int *)[2],
1647		    mtod(rxq->rxq_mconsumer, int *)[3]);
1648#endif
1649		/*
1650		 * We own this packet again.  Clear all flags except wrap.
1651		 */
1652		rxconsumed++;
1653		consumer->rxbd_flags = rxbd_flags & (RXBD_W | RXBD_I);
1654
1655		/*
1656		 * If this descriptor has the LAST bit set and no errors,
1657		 * it's a valid input packet.
1658		 */
1659		if ((rxbd_flags & (RXBD_L | RXBD_ERRORS)) == RXBD_L) {
1660			size_t rxbd_len = consumer->rxbd_len;
1661			struct mbuf *m = rxq->rxq_mhead;
1662			struct mbuf *m_last = rxq->rxq_mconsumer;
1663			if ((rxq->rxq_mhead = m_last->m_next) == NULL)
1664				rxq->rxq_mtail = &rxq->rxq_mhead;
1665			rxq->rxq_mconsumer = rxq->rxq_mhead;
1666			m_last->m_next = NULL;
1667			m_last->m_len = rxbd_len & (MCLBYTES - 1);
1668			m->m_pkthdr.len = rxbd_len;
1669			pq3etsec_rx_input(sc, m, rxbd_flags);
1670		} else if (rxbd_flags & RXBD_L) {
1671			KASSERT(rxbd_flags & RXBD_ERRORS);
1672			struct mbuf *m;
1673			/*
1674			 * We encountered an error, take the mbufs and add
1675			 * then to the rx bufcache so we can reuse them.
1676			 */
1677			if_statinc(ifp, if_ierrors);
1678			for (m = rxq->rxq_mhead;
1679			     m != rxq->rxq_mconsumer;
1680			     m = m->m_next) {
1681				IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1682			}
1683			m = rxq->rxq_mconsumer;
1684			if ((rxq->rxq_mhead = m->m_next) == NULL)
1685				rxq->rxq_mtail = &rxq->rxq_mhead;
1686			rxq->rxq_mconsumer = m->m_next;
1687			IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1688		} else {
1689			rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next;
1690		}
1691#ifdef ETSEC_DEBUG
1692		rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL;
1693#endif
1694
1695		/*
1696		 * Wrap at the last entry!
1697		 */
1698		if (rxbd_flags & RXBD_W) {
1699			KASSERT(consumer + 1 == rxq->rxq_last);
1700			consumer = rxq->rxq_first;
1701		} else {
1702			consumer++;
1703		}
1704#ifdef ETSEC_DEBUG
1705		KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
1706#endif
1707	}
1708
1709	if (rxconsumed != 0)
1710		rnd_add_uint32(&sc->rnd_source, rxconsumed);
1711}
1712
1713static void
1714pq3etsec_rxq_purge(
1715	struct pq3etsec_softc *sc,
1716	struct pq3etsec_rxqueue *rxq,
1717	bool discard)
1718{
1719	struct mbuf *m;
1720
1721	if ((m = rxq->rxq_mhead) != NULL) {
1722#ifdef ETSEC_DEBUG
1723		memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs));
1724#endif
1725
1726		if (discard) {
1727			pq3etsec_rx_map_unload(sc, m);
1728			m_freem(m);
1729		} else {
1730			while (m != NULL) {
1731				struct mbuf *m0 = m->m_next;
1732				m->m_next = NULL;
1733				IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1734				m = m0;
1735			}
1736		}
1737	}
1738
1739	rxq->rxq_mconsumer = NULL;
1740	rxq->rxq_mhead = NULL;
1741	rxq->rxq_mtail = &rxq->rxq_mhead;
1742	rxq->rxq_inuse = 0;
1743}
1744
1745static void
1746pq3etsec_rxq_reset(
1747	struct pq3etsec_softc *sc,
1748	struct pq3etsec_rxqueue *rxq)
1749{
1750	/*
1751	 * sync all the descriptors
1752	 */
1753	pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
1754	    rxq->rxq_last - rxq->rxq_first);
1755
1756	/*
1757	 * Make sure we own all descriptors in the ring.
1758	 */
1759	volatile struct rxbd *rxbd;
1760	for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) {
1761		rxbd->rxbd_flags = RXBD_I;
1762	}
1763
1764	/*
1765	 * Last descriptor has the wrap flag.
1766	 */
1767	rxbd->rxbd_flags = RXBD_W | RXBD_I;
1768
1769	/*
1770	 * Reset the producer consumer indexes.
1771	 */
1772	rxq->rxq_consumer = rxq->rxq_first;
1773	rxq->rxq_producer = rxq->rxq_first;
1774	rxq->rxq_inuse = 0;
1775	if (rxq->rxq_threshold < ETSEC_MINRXMBUFS)
1776		rxq->rxq_threshold = ETSEC_MINRXMBUFS;
1777
1778	sc->sc_imask |= IEVENT_RXF | IEVENT_BSY;
1779
1780	/*
1781	 * Restart the transmit at the first descriptor
1782	 */
1783	etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr);
1784}
1785
1786static int
1787pq3etsec_rxq_attach(
1788	struct pq3etsec_softc *sc,
1789	struct pq3etsec_rxqueue *rxq,
1790	u_int qno)
1791{
1792	size_t map_size = PAGE_SIZE;
1793	size_t desc_count = map_size / sizeof(struct rxbd);
1794	int error;
1795	void *descs;
1796
1797	error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
1798	   &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
1799	if (error)
1800		return error;
1801
1802	memset(descs, 0, map_size);
1803	rxq->rxq_first = descs;
1804	rxq->rxq_last = rxq->rxq_first + desc_count;
1805	rxq->rxq_consumer = descs;
1806	rxq->rxq_producer = descs;
1807
1808	pq3etsec_rxq_purge(sc, rxq, true);
1809	pq3etsec_rxq_reset(sc, rxq);
1810
1811	rxq->rxq_reg_rbase = RBASEn(qno);
1812	rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno);
1813
1814	return 0;
1815}
1816
1817static bool
1818pq3etsec_txq_active_p(
1819	struct pq3etsec_softc * const sc,
1820	struct pq3etsec_txqueue *txq)
1821{
1822	return !IF_IS_EMPTY(&txq->txq_mbufs);
1823}
1824
1825static bool
1826pq3etsec_txq_fillable_p(
1827	struct pq3etsec_softc * const sc,
1828	struct pq3etsec_txqueue *txq)
1829{
1830	return txq->txq_free >= txq->txq_threshold;
1831}
1832
1833static int
1834pq3etsec_txq_attach(
1835	struct pq3etsec_softc *sc,
1836	struct pq3etsec_txqueue *txq,
1837	u_int qno)
1838{
1839	size_t map_size = PAGE_SIZE;
1840	size_t desc_count = map_size / sizeof(struct txbd);
1841	int error;
1842	void *descs;
1843
1844	error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
1845	   &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1846	if (error)
1847		return error;
1848
1849	memset(descs, 0, map_size);
1850	txq->txq_first = descs;
1851	txq->txq_last = txq->txq_first + desc_count;
1852	txq->txq_consumer = descs;
1853	txq->txq_producer = descs;
1854
1855	IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS);
1856
1857	txq->txq_reg_tbase = TBASEn(qno);
1858	txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno);
1859
1860	pq3etsec_txq_reset(sc, txq);
1861
1862	return 0;
1863}
1864
1865static int
1866pq3etsec_txq_map_load(
1867	struct pq3etsec_softc *sc,
1868	struct pq3etsec_txqueue *txq,
1869	struct mbuf *m)
1870{
1871	bus_dmamap_t map;
1872	int error;
1873
1874	map = M_GETCTX(m, bus_dmamap_t);
1875	if (map != NULL)
1876		return 0;
1877
1878	map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache);
1879	if (map == NULL)
1880		return ENOMEM;
1881
1882	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1883	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1884	if (error)
1885		return error;
1886
1887	bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
1888	    BUS_DMASYNC_PREWRITE);
1889	M_SETCTX(m, map);
1890	return 0;
1891}
1892
1893static void
1894pq3etsec_txq_map_unload(
1895	struct pq3etsec_softc *sc,
1896	struct pq3etsec_txqueue *txq,
1897	struct mbuf *m)
1898{
1899	KASSERT(m);
1900	bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1901	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1902	    BUS_DMASYNC_POSTWRITE);
1903	bus_dmamap_unload(sc->sc_dmat, map);
1904	pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map);
1905}
1906
1907static bool
1908pq3etsec_txq_produce(
1909	struct pq3etsec_softc *sc,
1910	struct pq3etsec_txqueue *txq,
1911	struct mbuf *m)
1912{
1913	bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1914
1915	if (map->dm_nsegs > txq->txq_free)
1916		return false;
1917
1918	/*
1919	 * TCP Offload flag must be set in the first descriptor.
1920	 */
1921	volatile struct txbd *producer = txq->txq_producer;
1922	uint16_t last_flags = TXBD_L;
1923	uint16_t first_flags = TXBD_R
1924	    | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0);
1925
1926	/*
1927	 * If we've produced enough descriptors without consuming any
1928	 * we need to ask for an interrupt to reclaim some.
1929	 */
1930	txq->txq_lastintr += map->dm_nsegs;
1931	if (ETSEC_IC_TX_ENABLED(sc)
1932	    || txq->txq_lastintr >= txq->txq_threshold
1933	    || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1934		txq->txq_lastintr = 0;
1935		last_flags |= TXBD_I;
1936	}
1937
1938#ifdef ETSEC_DEBUG
1939	KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
1940#endif
1941	KASSERT(producer != txq->txq_last);
1942	producer->txbd_bufptr = map->dm_segs[0].ds_addr;
1943	producer->txbd_len = map->dm_segs[0].ds_len;
1944
1945	if (map->dm_nsegs > 1) {
1946		volatile struct txbd *start = producer + 1;
1947		size_t count = map->dm_nsegs - 1;
1948		for (u_int i = 1; i < map->dm_nsegs; i++) {
1949			if (__predict_false(++producer == txq->txq_last)) {
1950				producer = txq->txq_first;
1951				if (start < txq->txq_last) {
1952					pq3etsec_txq_desc_presync(sc, txq,
1953					    start, txq->txq_last - start);
1954					count -= txq->txq_last - start;
1955				}
1956				start = txq->txq_first;
1957			}
1958#ifdef ETSEC_DEBUG
1959			KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
1960#endif
1961			producer->txbd_bufptr = map->dm_segs[i].ds_addr;
1962			producer->txbd_len = map->dm_segs[i].ds_len;
1963			producer->txbd_flags = TXBD_R
1964			    | (producer->txbd_flags & TXBD_W)
1965			    | (i == map->dm_nsegs - 1 ? last_flags : 0);
1966#if 0
1967			printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first,
1968			    producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr);
1969#endif
1970		}
1971		pq3etsec_txq_desc_presync(sc, txq, start, count);
1972	} else {
1973		first_flags |= last_flags;
1974	}
1975
1976	membar_producer();
1977	txq->txq_producer->txbd_flags =
1978	    first_flags | (txq->txq_producer->txbd_flags & TXBD_W);
1979#if 0
1980	printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__,
1981	    txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags,
1982	    txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr);
1983#endif
1984	pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1);
1985
1986	/*
1987	 * Reduce free count by the number of segments we consumed.
1988	 */
1989	txq->txq_free -= map->dm_nsegs;
1990	KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1991	KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0);
1992	KASSERT(producer->txbd_flags & TXBD_L);
1993#ifdef ETSEC_DEBUG
1994	txq->txq_lmbufs[producer - txq->txq_first] = m;
1995#endif
1996
1997#if 0
1998	printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n",
1999	    __func__, m, m->m_pkthdr.len, map->dm_nsegs,
2000	    txq->txq_producer - txq->txq_first, producer - txq->txq_first);
2001#endif
2002
2003	if (++producer == txq->txq_last)
2004		txq->txq_producer = txq->txq_first;
2005	else
2006		txq->txq_producer = producer;
2007	IF_ENQUEUE(&txq->txq_mbufs, m);
2008
2009	/*
2010	 * Restart the transmitter.
2011	 */
2012	etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT);	/* W1C */
2013
2014	return true;
2015}
2016
2017static void
2018pq3etsec_tx_offload(
2019	struct pq3etsec_softc *sc,
2020	struct pq3etsec_txqueue *txq,
2021	struct mbuf **mp)
2022{
2023	struct mbuf *m = *mp;
2024	u_int csum_flags = m->m_pkthdr.csum_flags;
2025	bool have_vtag;
2026	uint16_t vtag;
2027
2028	KASSERT(m->m_flags & M_PKTHDR);
2029
2030	have_vtag = vlan_has_tag(m);
2031	vtag = (have_vtag) ? vlan_get_tag(m) : 0;
2032
2033	/*
2034	 * Let see if we are doing any offload first.
2035	 */
2036	if (csum_flags == 0 && !have_vtag) {
2037		m->m_flags &= ~M_HASFCB;
2038		return;
2039	}
2040
2041	uint16_t flags = 0;
2042	if (csum_flags & M_CSUM_IP) {
2043		flags |= TXFCB_IP
2044		    | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0)
2045		    | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0)
2046		    | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0)
2047		    | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0)
2048		    | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0);
2049	}
2050	if (have_vtag) {
2051		flags |= TXFCB_VLN;
2052	}
2053	if (flags == 0) {
2054		m->m_flags &= ~M_HASFCB;
2055		return;
2056	}
2057
2058	struct txfcb fcb;
2059	fcb.txfcb_flags = flags;
2060	if (csum_flags & M_CSUM_IPv4)
2061		fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2062	else
2063		fcb.txfcb_l4os = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2064	fcb.txfcb_l3os = ETHER_HDR_LEN;
2065	fcb.txfcb_phcs = 0;
2066	fcb.txfcb_vlctl = vtag;
2067
2068#if 0
2069	printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n",
2070	    __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os,
2071	    fcb.txfcb_phcs, fcb.txfcb_vlctl);
2072#endif
2073
2074	if (M_LEADINGSPACE(m) >= sizeof(fcb)) {
2075		m->m_data -= sizeof(fcb);
2076		m->m_len += sizeof(fcb);
2077	} else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) {
2078		memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len);
2079		m->m_data = m->m_pktdat;
2080		m->m_len += sizeof(fcb);
2081	} else {
2082		struct mbuf *mn;
2083		MGET(mn, M_DONTWAIT, m->m_type);
2084		if (mn == NULL) {
2085			if (csum_flags & M_CSUM_IP4) {
2086#ifdef INET
2087				in_undefer_cksum(m, ETHER_HDR_LEN,
2088				    csum_flags & M_CSUM_IP4);
2089#else
2090				panic("%s: impossible M_CSUM flags %#x",
2091				    device_xname(sc->sc_dev), csum_flags);
2092#endif
2093			} else if (csum_flags & M_CSUM_IP6) {
2094#ifdef INET6
2095				in6_undefer_cksum(m, ETHER_HDR_LEN,
2096				    csum_flags & M_CSUM_IP6);
2097#else
2098				panic("%s: impossible M_CSUM flags %#x",
2099				    device_xname(sc->sc_dev), csum_flags);
2100#endif
2101			}
2102
2103			m->m_flags &= ~M_HASFCB;
2104			return;
2105		}
2106
2107		m_move_pkthdr(mn, m);
2108		mn->m_next = m;
2109		m = mn;
2110		m_align(m, sizeof(fcb));
2111		m->m_len = sizeof(fcb);
2112		*mp = m;
2113	}
2114	m->m_pkthdr.len += sizeof(fcb);
2115	m->m_flags |= M_HASFCB;
2116	*mtod(m, struct txfcb *) = fcb;
2117	return;
2118}
2119
2120static bool
2121pq3etsec_txq_enqueue(
2122	struct pq3etsec_softc *sc,
2123	struct pq3etsec_txqueue *txq)
2124{
2125	for (;;) {
2126		if (IF_QFULL(&txq->txq_mbufs))
2127			return false;
2128		struct mbuf *m = txq->txq_next;
2129		if (m == NULL) {
2130			int s = splnet();
2131			IFQ_DEQUEUE(&sc->sc_if.if_snd, m);
2132			splx(s);
2133			if (m == NULL)
2134				return true;
2135			M_SETCTX(m, NULL);
2136			pq3etsec_tx_offload(sc, txq, &m);
2137		} else {
2138			txq->txq_next = NULL;
2139		}
2140		int error = pq3etsec_txq_map_load(sc, txq, m);
2141		if (error) {
2142			aprint_error_dev(sc->sc_dev,
2143			    "discarded packet due to "
2144			    "dmamap load failure: %d\n", error);
2145			m_freem(m);
2146			continue;
2147		}
2148		KASSERT(txq->txq_next == NULL);
2149		if (!pq3etsec_txq_produce(sc, txq, m)) {
2150			txq->txq_next = m;
2151			return false;
2152		}
2153		KASSERT(txq->txq_next == NULL);
2154	}
2155}
2156
2157static bool
2158pq3etsec_txq_consume(
2159	struct pq3etsec_softc *sc,
2160	struct pq3etsec_txqueue *txq)
2161{
2162	struct ifnet * const ifp = &sc->sc_if;
2163	volatile struct txbd *consumer = txq->txq_consumer;
2164	size_t txfree = 0;
2165	bool ret;
2166
2167#if 0
2168	printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
2169#endif
2170	etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask);
2171
2172	for (;;) {
2173		if (consumer == txq->txq_producer) {
2174			txq->txq_consumer = consumer;
2175			txq->txq_free += txfree;
2176			txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree);
2177			KASSERT(txq->txq_lastintr == 0);
2178			KASSERT(txq->txq_free ==
2179			    txq->txq_last - txq->txq_first - 1);
2180			ret = true;
2181			break;
2182		}
2183		pq3etsec_txq_desc_postsync(sc, txq, consumer, 1);
2184		const uint16_t txbd_flags = consumer->txbd_flags;
2185		if (txbd_flags & TXBD_R) {
2186			txq->txq_consumer = consumer;
2187			txq->txq_free += txfree;
2188			txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree);
2189			ret = pq3etsec_txq_fillable_p(sc, txq);
2190			break;
2191		}
2192
2193		/*
2194		 * If this is the last descriptor in the chain, get the
2195		 * mbuf, free its dmamap, and free the mbuf chain itself.
2196		 */
2197		if (txbd_flags & TXBD_L) {
2198			struct mbuf *m;
2199
2200			IF_DEQUEUE(&txq->txq_mbufs, m);
2201#ifdef ETSEC_DEBUG
2202			KASSERTMSG(
2203			    m == txq->txq_lmbufs[consumer-txq->txq_first],
2204			    "%s: %p [%u]: flags %#x m (%p) != %p (%p)",
2205			    __func__, consumer, consumer - txq->txq_first,
2206			    txbd_flags, m,
2207			    &txq->txq_lmbufs[consumer-txq->txq_first],
2208			    txq->txq_lmbufs[consumer-txq->txq_first]);
2209#endif
2210			KASSERT(m);
2211			pq3etsec_txq_map_unload(sc, txq, m);
2212#if 0
2213			printf("%s: mbuf %p: consumed a %u byte packet\n",
2214			    __func__, m, m->m_pkthdr.len);
2215#endif
2216			if (m->m_flags & M_HASFCB)
2217				m_adj(m, sizeof(struct txfcb));
2218			bpf_mtap(ifp, m, BPF_D_OUT);
2219			net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2220			if_statinc_ref(ifp, nsr, if_opackets);
2221			if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len);
2222			if (m->m_flags & M_MCAST)
2223				if_statinc_ref(ifp, nsr, if_omcasts);
2224			if (txbd_flags & TXBD_ERRORS)
2225				if_statinc_ref(ifp, nsr, if_oerrors);
2226			IF_STAT_PUTREF(ifp);
2227			m_freem(m);
2228#ifdef ETSEC_DEBUG
2229			txq->txq_lmbufs[consumer - txq->txq_first] = NULL;
2230#endif
2231		} else {
2232#ifdef ETSEC_DEBUG
2233			KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL);
2234#endif
2235		}
2236
2237		/*
2238		 * We own this packet again.  Clear all flags except wrap.
2239		 */
2240		txfree++;
2241		//consumer->txbd_flags = txbd_flags & TXBD_W;
2242
2243		/*
2244		 * Wrap at the last entry!
2245		 */
2246		if (txbd_flags & TXBD_W) {
2247			KASSERT(consumer + 1 == txq->txq_last);
2248			consumer = txq->txq_first;
2249		} else {
2250			consumer++;
2251			KASSERT(consumer < txq->txq_last);
2252		}
2253	}
2254
2255	if (txfree != 0)
2256		rnd_add_uint32(&sc->rnd_source, txfree);
2257	return ret;
2258}
2259
2260static void
2261pq3etsec_txq_purge(
2262	struct pq3etsec_softc *sc,
2263	struct pq3etsec_txqueue *txq)
2264{
2265	struct mbuf *m;
2266	KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0);
2267
2268	for (;;) {
2269		IF_DEQUEUE(&txq->txq_mbufs, m);
2270		if (m == NULL)
2271			break;
2272		pq3etsec_txq_map_unload(sc, txq, m);
2273		m_freem(m);
2274	}
2275	if ((m = txq->txq_next) != NULL) {
2276		txq->txq_next = NULL;
2277		pq3etsec_txq_map_unload(sc, txq, m);
2278		m_freem(m);
2279	}
2280#ifdef ETSEC_DEBUG
2281	memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs));
2282#endif
2283}
2284
2285static void
2286pq3etsec_txq_reset(
2287	struct pq3etsec_softc *sc,
2288	struct pq3etsec_txqueue *txq)
2289{
2290	/*
2291	 * sync all the descriptors
2292	 */
2293	pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first,
2294	    txq->txq_last - txq->txq_first);
2295
2296	/*
2297	 * Make sure we own all descriptors in the ring.
2298	 */
2299	volatile struct txbd *txbd;
2300	for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) {
2301		txbd->txbd_flags = 0;
2302	}
2303
2304	/*
2305	 * Last descriptor has the wrap flag.
2306	 */
2307	txbd->txbd_flags = TXBD_W;
2308
2309	/*
2310	 * Reset the producer consumer indexes.
2311	 */
2312	txq->txq_consumer = txq->txq_first;
2313	txq->txq_producer = txq->txq_first;
2314	txq->txq_free = txq->txq_last - txq->txq_first - 1;
2315	txq->txq_threshold = txq->txq_free / 2;
2316	txq->txq_lastintr = 0;
2317
2318	/*
2319	 * What do we want to get interrupted on?
2320	 */
2321	sc->sc_imask |= IEVENT_TXF | IEVENT_TXE;
2322
2323	/*
2324	 * Restart the transmit at the first descriptor
2325	 */
2326	etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr);
2327}
2328
2329static void
2330pq3etsec_ifstart(struct ifnet *ifp)
2331{
2332	struct pq3etsec_softc * const sc = ifp->if_softc;
2333
2334	if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) {
2335		return;
2336	}
2337
2338	atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
2339	softint_schedule(sc->sc_soft_ih);
2340}
2341
2342static void
2343pq3etsec_tx_error(
2344	struct pq3etsec_softc * const sc)
2345{
2346	struct pq3etsec_txqueue * const txq = &sc->sc_txq;
2347
2348	pq3etsec_txq_consume(sc, txq);
2349
2350	if (sc->sc_txerrors
2351	    & (IEVENT_LC | IEVENT_CRL | IEVENT_XFUN | IEVENT_BABT)) {
2352	} else if (sc->sc_txerrors & IEVENT_EBERR) {
2353	}
2354
2355	if (pq3etsec_txq_active_p(sc, txq))
2356		etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask);
2357	if (!pq3etsec_txq_enqueue(sc, txq)) {
2358		sc->sc_ev_tx_stall.ev_count++;
2359	}
2360
2361	sc->sc_txerrors = 0;
2362}
2363
2364int
2365pq3etsec_tx_intr(void *arg)
2366{
2367	struct pq3etsec_softc * const sc = arg;
2368
2369	mutex_enter(sc->sc_hwlock);
2370
2371	sc->sc_ev_tx_intr.ev_count++;
2372
2373	uint32_t ievent = etsec_read(sc, IEVENT);
2374	ievent &= IEVENT_TXF | IEVENT_TXB;
2375	etsec_write(sc, IEVENT, ievent);	/* write 1 to clear */
2376
2377#if 0
2378	aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
2379	    __func__, ievent, etsec_read(sc, IMASK));
2380#endif
2381
2382	if (ievent == 0) {
2383		mutex_exit(sc->sc_hwlock);
2384		return 0;
2385	}
2386
2387	sc->sc_imask &= ~(IEVENT_TXF | IEVENT_TXB);
2388	atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
2389	etsec_write(sc, IMASK, sc->sc_imask);
2390	softint_schedule(sc->sc_soft_ih);
2391
2392	mutex_exit(sc->sc_hwlock);
2393
2394	return 1;
2395}
2396
2397int
2398pq3etsec_rx_intr(void *arg)
2399{
2400	struct pq3etsec_softc * const sc = arg;
2401
2402	mutex_enter(sc->sc_hwlock);
2403
2404	sc->sc_ev_rx_intr.ev_count++;
2405
2406	uint32_t ievent = etsec_read(sc, IEVENT);
2407	ievent &= IEVENT_RXF | IEVENT_RXB;
2408	etsec_write(sc, IEVENT, ievent);	/* write 1 to clear */
2409	if (ievent == 0) {
2410		mutex_exit(sc->sc_hwlock);
2411		return 0;
2412	}
2413
2414#if 0
2415	aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent);
2416#endif
2417
2418	sc->sc_imask &= ~(IEVENT_RXF | IEVENT_RXB);
2419	atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR);
2420	etsec_write(sc, IMASK, sc->sc_imask);
2421	softint_schedule(sc->sc_soft_ih);
2422
2423	mutex_exit(sc->sc_hwlock);
2424
2425	return 1;
2426}
2427
2428int
2429pq3etsec_error_intr(void *arg)
2430{
2431	struct pq3etsec_softc * const sc = arg;
2432
2433	mutex_enter(sc->sc_hwlock);
2434
2435	sc->sc_ev_error_intr.ev_count++;
2436
2437	for (int rv = 0, soft_flags = 0;; rv = 1) {
2438		uint32_t ievent = etsec_read(sc, IEVENT);
2439		ievent &= ~(IEVENT_RXF | IEVENT_RXB | IEVENT_TXF | IEVENT_TXB);
2440		etsec_write(sc, IEVENT, ievent);	/* write 1 to clear */
2441		if (ievent == 0) {
2442			if (soft_flags) {
2443				atomic_or_uint(&sc->sc_soft_flags, soft_flags);
2444				softint_schedule(sc->sc_soft_ih);
2445			}
2446			mutex_exit(sc->sc_hwlock);
2447			return rv;
2448		}
2449#if 0
2450		aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
2451		    __func__, ievent, etsec_read(sc, IMASK));
2452#endif
2453
2454		if (ievent & (IEVENT_GRSC | IEVENT_GTSC)) {
2455			sc->sc_imask &= ~(IEVENT_GRSC | IEVENT_GTSC);
2456			etsec_write(sc, IMASK, sc->sc_imask);
2457			wakeup(sc);
2458		}
2459		if (ievent & (IEVENT_MMRD | IEVENT_MMWR)) {
2460			sc->sc_imask &= ~(IEVENT_MMRD | IEVENT_MMWR);
2461			etsec_write(sc, IMASK, sc->sc_imask);
2462			wakeup(&sc->sc_mii);
2463		}
2464		if (ievent & IEVENT_BSY) {
2465			soft_flags |= SOFT_RXBSY;
2466			sc->sc_imask &= ~IEVENT_BSY;
2467			etsec_write(sc, IMASK, sc->sc_imask);
2468		}
2469		if (ievent & IEVENT_TXE) {
2470			soft_flags |= SOFT_TXERROR;
2471			sc->sc_imask &= ~IEVENT_TXE;
2472			sc->sc_txerrors |= ievent;
2473		}
2474		if (ievent & IEVENT_TXC) {
2475			sc->sc_ev_tx_pause.ev_count++;
2476		}
2477		if (ievent & IEVENT_RXC) {
2478			sc->sc_ev_rx_pause.ev_count++;
2479		}
2480		if (ievent & IEVENT_DPE) {
2481			soft_flags |= SOFT_RESET;
2482			sc->sc_imask &= ~IEVENT_DPE;
2483			etsec_write(sc, IMASK, sc->sc_imask);
2484		}
2485	}
2486}
2487
2488void
2489pq3etsec_soft_intr(void *arg)
2490{
2491	struct pq3etsec_softc * const sc = arg;
2492	struct ifnet * const ifp = &sc->sc_if;
2493	uint32_t imask = 0;
2494
2495	mutex_enter(sc->sc_lock);
2496
2497	u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
2498
2499	sc->sc_ev_soft_intr.ev_count++;
2500
2501	if (soft_flags & SOFT_RESET) {
2502		int s = splnet();
2503		pq3etsec_ifinit(ifp);
2504		splx(s);
2505		soft_flags = 0;
2506	}
2507
2508	if (soft_flags & SOFT_RXBSY) {
2509		struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq;
2510		size_t threshold = 5 * rxq->rxq_threshold / 4;
2511		if (threshold >= rxq->rxq_last - rxq->rxq_first) {
2512			threshold = rxq->rxq_last - rxq->rxq_first - 1;
2513		} else {
2514			imask |= IEVENT_BSY;
2515		}
2516		aprint_normal_dev(sc->sc_dev,
2517		    "increasing receive buffers from %zu to %zu\n",
2518		    rxq->rxq_threshold, threshold);
2519		rxq->rxq_threshold = threshold;
2520	}
2521
2522	if ((soft_flags & SOFT_TXINTR)
2523	    || pq3etsec_txq_active_p(sc, &sc->sc_txq)) {
2524		/*
2525		 * Let's do what we came here for.  Consume transmitted
2526		 * packets off the transmit ring.
2527		 */
2528		if (!pq3etsec_txq_consume(sc, &sc->sc_txq)
2529		    || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) {
2530			sc->sc_ev_tx_stall.ev_count++;
2531		}
2532		imask |= IEVENT_TXF;
2533	}
2534
2535	if (soft_flags & (SOFT_RXINTR | SOFT_RXBSY)) {
2536		/* Let's consume */
2537		pq3etsec_rxq_consume(sc, &sc->sc_rxq);
2538		imask |= IEVENT_RXF;
2539	}
2540
2541	if (soft_flags & SOFT_TXERROR) {
2542		pq3etsec_tx_error(sc);
2543		imask |= IEVENT_TXE;
2544	}
2545
2546	if (ifp->if_flags & IFF_RUNNING) {
2547		pq3etsec_rxq_produce(sc, &sc->sc_rxq);
2548		mutex_spin_enter(sc->sc_hwlock);
2549		sc->sc_imask |= imask;
2550		etsec_write(sc, IMASK, sc->sc_imask);
2551		mutex_spin_exit(sc->sc_hwlock);
2552	} else {
2553		KASSERT((soft_flags & SOFT_RXBSY) == 0);
2554	}
2555
2556	mutex_exit(sc->sc_lock);
2557}
2558
2559static void
2560pq3etsec_mii_tick(void *arg)
2561{
2562	struct pq3etsec_softc * const sc = arg;
2563	mutex_enter(sc->sc_lock);
2564	callout_ack(&sc->sc_mii_callout);
2565	sc->sc_ev_mii_ticks.ev_count++;
2566#ifdef DEBUG
2567	uint64_t now = mftb();
2568	if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) {
2569		aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n",
2570		    __func__, now - sc->sc_mii_last_tick);
2571		callout_stop(&sc->sc_mii_callout);
2572	}
2573#endif
2574	mii_tick(&sc->sc_mii);
2575	int s = splnet();
2576	if (sc->sc_soft_flags & SOFT_RESET)
2577		softint_schedule(sc->sc_soft_ih);
2578	splx(s);
2579	callout_schedule(&sc->sc_mii_callout, hz);
2580#ifdef DEBUG
2581	sc->sc_mii_last_tick = now;
2582#endif
2583	mutex_exit(sc->sc_lock);
2584}
2585
2586static void
2587pq3etsec_set_ic_rx(struct pq3etsec_softc *sc)
2588{
2589	uint32_t reg;
2590
2591	if (ETSEC_IC_RX_ENABLED(sc)) {
2592		reg = RXIC_ICEN;
2593		reg |= RXIC_ICFT_SET(sc->sc_ic_rx_count);
2594		reg |= RXIC_ICTT_SET(sc->sc_ic_rx_time);
2595	} else {
2596		/* Disable RX interrupt coalescing */
2597		reg = 0;
2598	}
2599
2600	etsec_write(sc, RXIC, reg);
2601}
2602
2603static void
2604pq3etsec_set_ic_tx(struct pq3etsec_softc *sc)
2605{
2606	uint32_t reg;
2607
2608	if (ETSEC_IC_TX_ENABLED(sc)) {
2609		reg = TXIC_ICEN;
2610		reg |= TXIC_ICFT_SET(sc->sc_ic_tx_count);
2611		reg |= TXIC_ICTT_SET(sc->sc_ic_tx_time);
2612	} else {
2613		/* Disable TX interrupt coalescing */
2614		reg = 0;
2615	}
2616
2617	etsec_write(sc, TXIC, reg);
2618}
2619
2620/*
2621 * sysctl
2622 */
2623static int
2624pq3etsec_sysctl_ic_time_helper(SYSCTLFN_ARGS, int *valuep)
2625{
2626	struct sysctlnode node = *rnode;
2627	struct pq3etsec_softc *sc = rnode->sysctl_data;
2628	int value = *valuep;
2629	int error;
2630
2631	node.sysctl_data = &value;
2632	error = sysctl_lookup(SYSCTLFN_CALL(&node));
2633	if (error != 0 || newp == NULL)
2634		return error;
2635
2636	if (value < 0 || value > 65535)
2637		return EINVAL;
2638
2639	mutex_enter(sc->sc_lock);
2640	*valuep = value;
2641	if (valuep == &sc->sc_ic_rx_time)
2642		pq3etsec_set_ic_rx(sc);
2643	else
2644		pq3etsec_set_ic_tx(sc);
2645	mutex_exit(sc->sc_lock);
2646
2647	return 0;
2648}
2649
2650static int
2651pq3etsec_sysctl_ic_count_helper(SYSCTLFN_ARGS, int *valuep)
2652{
2653	struct sysctlnode node = *rnode;
2654	struct pq3etsec_softc *sc = rnode->sysctl_data;
2655	int value = *valuep;
2656	int error;
2657
2658	node.sysctl_data = &value;
2659	error = sysctl_lookup(SYSCTLFN_CALL(&node));
2660	if (error != 0 || newp == NULL)
2661		return error;
2662
2663	if (value < 0 || value > 255)
2664		return EINVAL;
2665
2666	mutex_enter(sc->sc_lock);
2667	*valuep = value;
2668	if (valuep == &sc->sc_ic_rx_count)
2669		pq3etsec_set_ic_rx(sc);
2670	else
2671		pq3etsec_set_ic_tx(sc);
2672	mutex_exit(sc->sc_lock);
2673
2674	return 0;
2675}
2676
2677static int
2678pq3etsec_sysctl_ic_rx_time_helper(SYSCTLFN_ARGS)
2679{
2680	struct pq3etsec_softc *sc = rnode->sysctl_data;
2681
2682	return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode),
2683	    &sc->sc_ic_rx_time);
2684}
2685
2686static int
2687pq3etsec_sysctl_ic_rx_count_helper(SYSCTLFN_ARGS)
2688{
2689	struct pq3etsec_softc *sc = rnode->sysctl_data;
2690
2691	return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode),
2692	    &sc->sc_ic_rx_count);
2693}
2694
2695static int
2696pq3etsec_sysctl_ic_tx_time_helper(SYSCTLFN_ARGS)
2697{
2698	struct pq3etsec_softc *sc = rnode->sysctl_data;
2699
2700	return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode),
2701	    &sc->sc_ic_tx_time);
2702}
2703
2704static int
2705pq3etsec_sysctl_ic_tx_count_helper(SYSCTLFN_ARGS)
2706{
2707	struct pq3etsec_softc *sc = rnode->sysctl_data;
2708
2709	return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode),
2710	    &sc->sc_ic_tx_count);
2711}
2712
2713static void pq3etsec_sysctl_setup(struct sysctllog **clog,
2714    struct pq3etsec_softc *sc)
2715{
2716	const struct sysctlnode *cnode, *rnode;
2717
2718	if (sysctl_createv(clog, 0, NULL, &rnode,
2719	    CTLFLAG_PERMANENT,
2720	    CTLTYPE_NODE, device_xname(sc->sc_dev),
2721	    SYSCTL_DESCR("TSEC interface"),
2722	    NULL, 0, NULL, 0,
2723	    CTL_HW, CTL_CREATE, CTL_EOL) != 0)
2724		goto bad;
2725
2726	if (sysctl_createv(clog, 0, &rnode, &rnode,
2727	    CTLFLAG_PERMANENT,
2728	    CTLTYPE_NODE, "int_coal",
2729	    SYSCTL_DESCR("Interrupts coalescing"),
2730	    NULL, 0, NULL, 0,
2731	    CTL_CREATE, CTL_EOL) != 0)
2732		goto bad;
2733
2734	if (sysctl_createv(clog, 0, &rnode, &cnode,
2735	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
2736	    CTLTYPE_INT, "rx_time",
2737	    SYSCTL_DESCR("RX time threshold (0-65535)"),
2738	    pq3etsec_sysctl_ic_rx_time_helper, 0, (void *)sc, 0,
2739	    CTL_CREATE, CTL_EOL) != 0)
2740		goto bad;
2741
2742	if (sysctl_createv(clog, 0, &rnode, &cnode,
2743	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
2744	    CTLTYPE_INT, "rx_count",
2745	    SYSCTL_DESCR("RX frame count threshold (0-255)"),
2746	    pq3etsec_sysctl_ic_rx_count_helper, 0, (void *)sc, 0,
2747	    CTL_CREATE, CTL_EOL) != 0)
2748		goto bad;
2749
2750	if (sysctl_createv(clog, 0, &rnode, &cnode,
2751	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
2752	    CTLTYPE_INT, "tx_time",
2753	    SYSCTL_DESCR("TX time threshold (0-65535)"),
2754	    pq3etsec_sysctl_ic_tx_time_helper, 0, (void *)sc, 0,
2755	    CTL_CREATE, CTL_EOL) != 0)
2756		goto bad;
2757
2758	if (sysctl_createv(clog, 0, &rnode, &cnode,
2759	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
2760	    CTLTYPE_INT, "tx_count",
2761	    SYSCTL_DESCR("TX frame count threshold (0-255)"),
2762	    pq3etsec_sysctl_ic_tx_count_helper, 0, (void *)sc, 0,
2763	    CTL_CREATE, CTL_EOL) != 0)
2764		goto bad;
2765
2766	return;
2767
2768 bad:
2769	aprint_error_dev(sc->sc_dev, "could not attach sysctl nodes\n");
2770}
2771