if_myx.c revision 1.101
1/*	$OpenBSD: if_myx.c,v 1.101 2017/01/24 03:57:35 dlg Exp $	*/
2
3/*
4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*
20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21 */
22
23#include "bpfilter.h"
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/kernel.h>
30#include <sys/socket.h>
31#include <sys/malloc.h>
32#include <sys/pool.h>
33#include <sys/timeout.h>
34#include <sys/device.h>
35#include <sys/proc.h>
36#include <sys/queue.h>
37
38#include <machine/bus.h>
39#include <machine/intr.h>
40
41#include <net/if.h>
42#include <net/if_dl.h>
43#include <net/if_media.h>
44
45#if NBPFILTER > 0
46#include <net/bpf.h>
47#endif
48
49#include <netinet/in.h>
50#include <netinet/if_ether.h>
51
52#include <dev/pci/pcireg.h>
53#include <dev/pci/pcivar.h>
54#include <dev/pci/pcidevs.h>
55
56#include <dev/pci/if_myxreg.h>
57
58#ifdef MYX_DEBUG
59#define MYXDBG_INIT	(1<<0)	/* chipset initialization */
60#define MYXDBG_CMD	(2<<0)	/* commands */
61#define MYXDBG_INTR	(3<<0)	/* interrupts */
62#define MYXDBG_ALL	0xffff	/* enable all debugging messages */
63int myx_debug = MYXDBG_ALL;
64#define DPRINTF(_lvl, _arg...)	do {					\
65	if (myx_debug & (_lvl))						\
66		printf(_arg);						\
67} while (0)
68#else
69#define DPRINTF(_lvl, arg...)
70#endif
71
72#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
73
74struct myx_dmamem {
75	bus_dmamap_t		 mxm_map;
76	bus_dma_segment_t	 mxm_seg;
77	int			 mxm_nsegs;
78	size_t			 mxm_size;
79	caddr_t			 mxm_kva;
80};
81
82struct pool *myx_mcl_pool;
83
84struct myx_slot {
85	bus_dmamap_t		 ms_map;
86	struct mbuf		*ms_m;
87};
88
89struct myx_rx_ring {
90	struct myx_softc	*mrr_softc;
91	struct timeout		 mrr_refill;
92	struct if_rxring	 mrr_rxr;
93	struct myx_slot		*mrr_slots;
94	u_int32_t		 mrr_offset;
95	u_int			 mrr_running;
96	u_int			 mrr_prod;
97	u_int			 mrr_cons;
98	struct mbuf		*(*mrr_mclget)(void);
99};
100
101enum myx_state {
102	MYX_S_OFF = 0,
103	MYX_S_RUNNING,
104	MYX_S_DOWN
105};
106
107struct myx_softc {
108	struct device		 sc_dev;
109	struct arpcom		 sc_ac;
110
111	pci_chipset_tag_t	 sc_pc;
112	pci_intr_handle_t	 sc_ih;
113	pcitag_t		 sc_tag;
114
115	bus_dma_tag_t		 sc_dmat;
116	bus_space_tag_t		 sc_memt;
117	bus_space_handle_t	 sc_memh;
118	bus_size_t		 sc_mems;
119
120	struct myx_dmamem	 sc_zerodma;
121	struct myx_dmamem	 sc_cmddma;
122	struct myx_dmamem	 sc_paddma;
123
124	struct myx_dmamem	 sc_sts_dma;
125	volatile struct myx_status	*sc_sts;
126
127	int			 sc_intx;
128	void			*sc_irqh;
129	u_int32_t		 sc_irqcoaloff;
130	u_int32_t		 sc_irqclaimoff;
131	u_int32_t		 sc_irqdeassertoff;
132
133	struct myx_dmamem	 sc_intrq_dma;
134	struct myx_intrq_desc	*sc_intrq;
135	u_int			 sc_intrq_count;
136	u_int			 sc_intrq_idx;
137
138	u_int			 sc_rx_ring_count;
139#define  MYX_RXSMALL		 0
140#define  MYX_RXBIG		 1
141	struct myx_rx_ring	 sc_rx_ring[2];
142
143	bus_size_t		 sc_tx_boundary;
144	u_int			 sc_tx_ring_count;
145	u_int32_t		 sc_tx_ring_offset;
146	u_int			 sc_tx_nsegs;
147	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
148	u_int			 sc_tx_ring_prod;
149	u_int			 sc_tx_ring_cons;
150
151	u_int			 sc_tx_prod;
152	u_int			 sc_tx_cons;
153	struct myx_slot		*sc_tx_slots;
154
155	struct ifmedia		 sc_media;
156
157	volatile enum myx_state	 sc_state;
158	volatile u_int8_t	 sc_linkdown;
159};
160
161#define MYX_RXSMALL_SIZE	MCLBYTES
162#define MYX_RXBIG_SIZE		(MYX_MTU - \
163    (ETHER_ALIGN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
164
165int	 myx_match(struct device *, void *, void *);
166void	 myx_attach(struct device *, struct device *, void *);
167int	 myx_pcie_dc(struct myx_softc *, struct pci_attach_args *);
168int	 myx_query(struct myx_softc *sc, char *, size_t);
169u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
170void	 myx_attachhook(struct device *);
171int	 myx_loadfirmware(struct myx_softc *, const char *);
172int	 myx_probe_firmware(struct myx_softc *);
173
174void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
175void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
176
177#if defined(__LP64__)
178#define _myx_bus_space_write bus_space_write_raw_region_8
179typedef u_int64_t myx_bus_t;
180#else
181#define _myx_bus_space_write bus_space_write_raw_region_4
182typedef u_int32_t myx_bus_t;
183#endif
184#define myx_bus_space_write(_sc, _o, _a, _l) \
185    _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l))
186
187int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
188int	 myx_boot(struct myx_softc *, u_int32_t);
189
190int	 myx_rdma(struct myx_softc *, u_int);
191int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
192	    bus_size_t, u_int align);
193void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
194int	 myx_media_change(struct ifnet *);
195void	 myx_media_status(struct ifnet *, struct ifmediareq *);
196void	 myx_link_state(struct myx_softc *, u_int32_t);
197void	 myx_watchdog(struct ifnet *);
198int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
199int	 myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *);
200void	 myx_up(struct myx_softc *);
201void	 myx_iff(struct myx_softc *);
202void	 myx_down(struct myx_softc *);
203
204void	 myx_start(struct ifqueue *);
205void	 myx_write_txd_tail(struct myx_softc *, struct myx_slot *, u_int8_t,
206	    u_int32_t, u_int);
207int	 myx_load_mbuf(struct myx_softc *, struct myx_slot *, struct mbuf *);
208int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
209int	 myx_intr(void *);
210void	 myx_rxeof(struct myx_softc *);
211void	 myx_txeof(struct myx_softc *, u_int32_t);
212
213int			myx_buf_fill(struct myx_softc *, struct myx_slot *,
214			    struct mbuf *(*)(void));
215struct mbuf *		myx_mcl_small(void);
216struct mbuf *		myx_mcl_big(void);
217
218int			myx_rx_init(struct myx_softc *, int, bus_size_t);
219int			myx_rx_fill(struct myx_softc *, struct myx_rx_ring *);
220void			myx_rx_empty(struct myx_softc *, struct myx_rx_ring *);
221void			myx_rx_free(struct myx_softc *, struct myx_rx_ring *);
222
223int			myx_tx_init(struct myx_softc *, bus_size_t);
224void			myx_tx_empty(struct myx_softc *);
225void			myx_tx_free(struct myx_softc *);
226
227void			myx_refill(void *);
228
229struct cfdriver myx_cd = {
230	NULL, "myx", DV_IFNET
231};
232struct cfattach myx_ca = {
233	sizeof(struct myx_softc), myx_match, myx_attach
234};
235
236const struct pci_matchid myx_devices[] = {
237	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
238	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
239};
240
241int
242myx_match(struct device *parent, void *match, void *aux)
243{
244	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
245}
246
247void
248myx_attach(struct device *parent, struct device *self, void *aux)
249{
250	struct myx_softc	*sc = (struct myx_softc *)self;
251	struct pci_attach_args	*pa = aux;
252	char			 part[32];
253	pcireg_t		 memtype;
254
255	sc->sc_pc = pa->pa_pc;
256	sc->sc_tag = pa->pa_tag;
257	sc->sc_dmat = pa->pa_dmat;
258
259	sc->sc_rx_ring[MYX_RXSMALL].mrr_softc = sc;
260	sc->sc_rx_ring[MYX_RXSMALL].mrr_mclget = myx_mcl_small;
261	timeout_set(&sc->sc_rx_ring[MYX_RXSMALL].mrr_refill, myx_refill,
262	    &sc->sc_rx_ring[MYX_RXSMALL]);
263	sc->sc_rx_ring[MYX_RXBIG].mrr_softc = sc;
264	sc->sc_rx_ring[MYX_RXBIG].mrr_mclget = myx_mcl_big;
265	timeout_set(&sc->sc_rx_ring[MYX_RXBIG].mrr_refill, myx_refill,
266	    &sc->sc_rx_ring[MYX_RXBIG]);
267
268	/* Map the PCI memory space */
269	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
270	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
271	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
272		printf(": unable to map register memory\n");
273		return;
274	}
275
276	/* Get board details (mac/part) */
277	memset(part, 0, sizeof(part));
278	if (myx_query(sc, part, sizeof(part)) != 0)
279		goto unmap;
280
281	/* Map the interrupt */
282	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
283		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
284			printf(": unable to map interrupt\n");
285			goto unmap;
286		}
287		sc->sc_intx = 1;
288	}
289
290	printf(": %s, model %s, address %s\n",
291	    pci_intr_string(pa->pa_pc, sc->sc_ih),
292	    part[0] == '\0' ? "(unknown)" : part,
293	    ether_sprintf(sc->sc_ac.ac_enaddr));
294
295	/* this is sort of racy */
296	if (myx_mcl_pool == NULL) {
297		extern struct kmem_pa_mode kp_dma_contig;
298
299		myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF,
300		    M_WAITOK);
301		if (myx_mcl_pool == NULL) {
302			printf("%s: unable to allocate mcl pool\n",
303			    DEVNAME(sc));
304			goto unmap;
305		}
306		pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY, IPL_NET,
307		    0, "myxmcl", NULL);
308		pool_set_constraints(myx_mcl_pool, &kp_dma_contig);
309	}
310
311	if (myx_pcie_dc(sc, pa) != 0)
312		printf("%s: unable to configure PCI Express\n", DEVNAME(sc));
313
314	config_mountroot(self, myx_attachhook);
315
316	return;
317
318 unmap:
319	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
320	sc->sc_mems = 0;
321}
322
323int
324myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa)
325{
326	pcireg_t dcsr;
327	pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO;
328	pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO;
329	int reg;
330
331	if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
332	    &reg, NULL) == 0)
333		return (-1);
334
335	reg += PCI_PCIE_DCSR;
336	dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg);
337	if ((dcsr & mask) != dc) {
338		CLR(dcsr, mask);
339		SET(dcsr, dc);
340		pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr);
341	}
342
343	return (0);
344}
345
346u_int
347myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
348{
349	u_int		i, j;
350	u_int8_t	digit;
351
352	memset(lladdr, 0, ETHER_ADDR_LEN);
353	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
354		if (mac[i] >= '0' && mac[i] <= '9')
355			digit = mac[i] - '0';
356		else if (mac[i] >= 'A' && mac[i] <= 'F')
357			digit = mac[i] - 'A' + 10;
358		else if (mac[i] >= 'a' && mac[i] <= 'f')
359			digit = mac[i] - 'a' + 10;
360		else
361			continue;
362		if ((j & 1) == 0)
363			digit <<= 4;
364		lladdr[j++/2] |= digit;
365	}
366
367	return (i);
368}
369
370int
371myx_query(struct myx_softc *sc, char *part, size_t partlen)
372{
373	struct myx_gen_hdr hdr;
374	u_int32_t	offset;
375	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
376	u_int		i, len, maxlen;
377
378	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
379	offset = betoh32(offset);
380	if (offset + sizeof(hdr) > sc->sc_mems) {
381		printf(": header is outside register window\n");
382		return (1);
383	}
384
385	myx_read(sc, offset, &hdr, sizeof(hdr));
386	offset = betoh32(hdr.fw_specs);
387	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
388
389	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
390
391	for (i = 0; i < len; i++) {
392		maxlen = len - i;
393		if (strings[i] == '\0')
394			break;
395		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
396			i += 4;
397			i += myx_ether_aton(&strings[i],
398			    sc->sc_ac.ac_enaddr, maxlen);
399		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
400			i += 3;
401			i += strlcpy(part, &strings[i], min(maxlen, partlen));
402		}
403		for (; i < len; i++) {
404			if (strings[i] == '\0')
405				break;
406		}
407	}
408
409	return (0);
410}
411
412int
413myx_loadfirmware(struct myx_softc *sc, const char *filename)
414{
415	struct myx_gen_hdr	hdr;
416	u_int8_t		*fw;
417	size_t			fwlen;
418	u_int32_t		offset;
419	u_int			i, ret = 1;
420
421	if (loadfirmware(filename, &fw, &fwlen) != 0) {
422		printf("%s: could not load firmware %s\n", DEVNAME(sc),
423		    filename);
424		return (1);
425	}
426	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
427		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
428		goto err;
429	}
430
431	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
432	offset = betoh32(offset);
433	if ((offset + sizeof(hdr)) > fwlen) {
434		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
435		goto err;
436	}
437
438	memcpy(&hdr, fw + offset, sizeof(hdr));
439	DPRINTF(MYXDBG_INIT, "%s: "
440	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
441	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
442	    betoh32(hdr.fw_type), hdr.fw_version);
443
444	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
445	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
446		printf("%s: invalid firmware type 0x%x version %s\n",
447		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
448		goto err;
449	}
450
451	/* Write the firmware to the card's SRAM */
452	for (i = 0; i < fwlen; i += 256)
453		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
454
455	if (myx_boot(sc, fwlen) != 0) {
456		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
457		goto err;
458	}
459
460	ret = 0;
461
462err:
463	free(fw, M_DEVBUF, fwlen);
464	return (ret);
465}
466
467void
468myx_attachhook(struct device *self)
469{
470	struct myx_softc	*sc = (struct myx_softc *)self;
471	struct ifnet		*ifp = &sc->sc_ac.ac_if;
472	struct myx_cmd		 mc;
473
474	/* Allocate command DMA memory */
475	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
476	    MYXALIGN_CMD) != 0) {
477		printf("%s: failed to allocate command DMA memory\n",
478		    DEVNAME(sc));
479		return;
480	}
481
482	/* Try the firmware stored on disk */
483	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
484		/* error printed by myx_loadfirmware */
485		goto freecmd;
486	}
487
488	memset(&mc, 0, sizeof(mc));
489
490	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
491		printf("%s: failed to reset the device\n", DEVNAME(sc));
492		goto freecmd;
493	}
494
495	sc->sc_tx_boundary = 4096;
496
497	if (myx_probe_firmware(sc) != 0) {
498		printf("%s: error while selecting firmware\n", DEVNAME(sc));
499		goto freecmd;
500	}
501
502	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
503	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
504	if (sc->sc_irqh == NULL) {
505		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
506		goto freecmd;
507	}
508
509	ifp->if_softc = sc;
510	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
511	ifp->if_xflags = IFXF_MPSAFE;
512	ifp->if_ioctl = myx_ioctl;
513	ifp->if_qstart = myx_start;
514	ifp->if_watchdog = myx_watchdog;
515	ifp->if_hardmtu = MYX_RXBIG_SIZE;
516	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
517	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
518
519	ifp->if_capabilities = IFCAP_VLAN_MTU;
520#if 0
521	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
522	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
523	    IFCAP_CSUM_UDPv4;
524#endif
525
526	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
527	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
528	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
529
530	if_attach(ifp);
531	ether_ifattach(ifp);
532
533	return;
534
535freecmd:
536	myx_dmamem_free(sc, &sc->sc_cmddma);
537}
538
539int
540myx_probe_firmware(struct myx_softc *sc)
541{
542	struct myx_dmamem test;
543	bus_dmamap_t map;
544	struct myx_cmd mc;
545	pcireg_t csr;
546	int offset;
547	int width = 0;
548
549	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
550	    &offset, NULL)) {
551		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
552		    offset + PCI_PCIE_LCSR);
553		width = (csr >> 20) & 0x3f;
554
555		if (width <= 4) {
556			/*
557			 * if the link width is 4 or less we can use the
558			 * aligned firmware.
559			 */
560			return (0);
561		}
562	}
563
564	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
565		return (1);
566	map = test.mxm_map;
567
568	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
569	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
570
571	memset(&mc, 0, sizeof(mc));
572	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
573	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
574	mc.mc_data2 = htobe32(4096 * 0x10000);
575	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
576		printf("%s: DMA read test failed\n", DEVNAME(sc));
577		goto fail;
578	}
579
580	memset(&mc, 0, sizeof(mc));
581	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
582	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
583	mc.mc_data2 = htobe32(4096 * 0x1);
584	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
585		printf("%s: DMA write test failed\n", DEVNAME(sc));
586		goto fail;
587	}
588
589	memset(&mc, 0, sizeof(mc));
590	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
591	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
592	mc.mc_data2 = htobe32(4096 * 0x10001);
593	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
594		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
595		goto fail;
596	}
597
598	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
599	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
600	myx_dmamem_free(sc, &test);
601	return (0);
602
603fail:
604	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
605	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
606	myx_dmamem_free(sc, &test);
607
608	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
609		printf("%s: unable to load %s\n", DEVNAME(sc),
610		    MYXFW_UNALIGNED);
611		return (1);
612	}
613
614	sc->sc_tx_boundary = 2048;
615
616	printf("%s: using unaligned firmware\n", DEVNAME(sc));
617	return (0);
618}
619
620void
621myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
622{
623	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
624	    BUS_SPACE_BARRIER_READ);
625	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
626}
627
628void
629myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
630{
631	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
632	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
633	    BUS_SPACE_BARRIER_WRITE);
634}
635
636int
637myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
638    bus_size_t size, u_int align)
639{
640	mxm->mxm_size = size;
641
642	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
643	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
644	    &mxm->mxm_map) != 0)
645		return (1);
646	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
647	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
648	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
649		goto destroy;
650	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
651	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
652		goto free;
653	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
654	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
655		goto unmap;
656
657	return (0);
658 unmap:
659	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
660 free:
661	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
662 destroy:
663	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
664	return (1);
665}
666
667void
668myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
669{
670	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
671	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
672	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
673	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
674}
675
676int
677myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
678{
679	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
680	struct myx_response	*mr;
681	u_int			 i;
682	u_int32_t		 result, data;
683#ifdef MYX_DEBUG
684	static const char *cmds[MYXCMD_MAX] = {
685		"CMD_NONE",
686		"CMD_RESET",
687		"CMD_GET_VERSION",
688		"CMD_SET_INTRQDMA",
689		"CMD_SET_BIGBUFSZ",
690		"CMD_SET_SMALLBUFSZ",
691		"CMD_GET_TXRINGOFF",
692		"CMD_GET_RXSMALLRINGOFF",
693		"CMD_GET_RXBIGRINGOFF",
694		"CMD_GET_INTRACKOFF",
695		"CMD_GET_INTRDEASSERTOFF",
696		"CMD_GET_TXRINGSZ",
697		"CMD_GET_RXRINGSZ",
698		"CMD_SET_INTRQSZ",
699		"CMD_SET_IFUP",
700		"CMD_SET_IFDOWN",
701		"CMD_SET_MTU",
702		"CMD_GET_INTRCOALDELAYOFF",
703		"CMD_SET_STATSINTVL",
704		"CMD_SET_STATSDMA_OLD",
705		"CMD_SET_PROMISC",
706		"CMD_UNSET_PROMISC",
707		"CMD_SET_LLADDR",
708		"CMD_SET_FC",
709		"CMD_UNSET_FC",
710		"CMD_DMA_TEST",
711		"CMD_SET_ALLMULTI",
712		"CMD_UNSET_ALLMULTI",
713		"CMD_SET_MCASTGROUP",
714		"CMD_UNSET_MCASTGROUP",
715		"CMD_UNSET_MCAST",
716		"CMD_SET_STATSDMA",
717		"CMD_UNALIGNED_DMA_TEST",
718		"CMD_GET_UNALIGNED_STATUS"
719	};
720#endif
721
722	mc->mc_cmd = htobe32(cmd);
723	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
724	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
725
726	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
727	mr->mr_result = 0xffffffff;
728
729	/* Send command */
730	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
731	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
732	    BUS_DMASYNC_PREREAD);
733
734	for (i = 0; i < 20; i++) {
735		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
736		    BUS_DMASYNC_POSTREAD);
737		result = betoh32(mr->mr_result);
738		data = betoh32(mr->mr_data);
739
740		if (result != 0xffffffff)
741			break;
742
743		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
744		    BUS_DMASYNC_PREREAD);
745		delay(1000);
746	}
747
748	DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, "
749	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
750	    cmds[cmd], i, result, data, data);
751
752	if (result != 0)
753		return (-1);
754
755	if (r != NULL)
756		*r = data;
757	return (0);
758}
759
760int
761myx_boot(struct myx_softc *sc, u_int32_t length)
762{
763	struct myx_bootcmd	 bc;
764	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
765	u_int32_t		*status;
766	u_int			 i, ret = 1;
767
768	memset(&bc, 0, sizeof(bc));
769	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
770	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
771	bc.bc_result = 0xffffffff;
772	bc.bc_offset = htobe32(MYX_FW_BOOT);
773	bc.bc_length = htobe32(length - 8);
774	bc.bc_copyto = htobe32(8);
775	bc.bc_jumpto = htobe32(0);
776
777	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
778	*status = 0;
779
780	/* Send command */
781	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
782	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
783	    BUS_DMASYNC_PREREAD);
784
785	for (i = 0; i < 200; i++) {
786		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
787		    BUS_DMASYNC_POSTREAD);
788		if (*status == 0xffffffff) {
789			ret = 0;
790			break;
791		}
792
793		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
794		    BUS_DMASYNC_PREREAD);
795		delay(1000);
796	}
797
798	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
799	    DEVNAME(sc), i, ret);
800
801	return (ret);
802}
803
804int
805myx_rdma(struct myx_softc *sc, u_int do_enable)
806{
807	struct myx_rdmacmd	 rc;
808	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
809	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
810	u_int32_t		*status;
811	int			 ret = 1;
812	u_int			 i;
813
814	/*
815	 * It is required to setup a _dummy_ RDMA address. It also makes
816	 * some PCI-E chipsets resend dropped messages.
817	 */
818	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
819	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
820	rc.rc_result = 0xffffffff;
821	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
822	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
823	rc.rc_enable = htobe32(do_enable);
824
825	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
826	*status = 0;
827
828	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
829	    BUS_DMASYNC_PREREAD);
830
831	/* Send command */
832	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
833
834	for (i = 0; i < 20; i++) {
835		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
836		    BUS_DMASYNC_POSTREAD);
837
838		if (*status == 0xffffffff) {
839			ret = 0;
840			break;
841		}
842
843		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
844		    BUS_DMASYNC_PREREAD);
845		delay(1000);
846	}
847
848	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
849	    DEVNAME(sc), __func__,
850	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
851
852	return (ret);
853}
854
855int
856myx_media_change(struct ifnet *ifp)
857{
858	/* ignore */
859	return (0);
860}
861
862void
863myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
864{
865	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
866	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
867	u_int32_t		 sts;
868
869	imr->ifm_active = IFM_ETHER | IFM_AUTO;
870	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
871		imr->ifm_status = 0;
872		return;
873	}
874
875	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
876	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
877	sts = sc->sc_sts->ms_linkstate;
878	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
879	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
880
881	myx_link_state(sc, sts);
882
883	imr->ifm_status = IFM_AVALID;
884	if (!LINK_STATE_IS_UP(ifp->if_link_state))
885		return;
886
887	imr->ifm_active |= IFM_FDX | IFM_FLOW |
888	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
889	imr->ifm_status |= IFM_ACTIVE;
890}
891
892void
893myx_link_state(struct myx_softc *sc, u_int32_t sts)
894{
895	struct ifnet		*ifp = &sc->sc_ac.ac_if;
896	int			 link_state = LINK_STATE_DOWN;
897
898	if (betoh32(sts) == MYXSTS_LINKUP)
899		link_state = LINK_STATE_FULL_DUPLEX;
900	if (ifp->if_link_state != link_state) {
901		ifp->if_link_state = link_state;
902		if_link_state_change(ifp);
903		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
904		    IF_Gbps(10) : 0;
905	}
906}
907
908void
909myx_watchdog(struct ifnet *ifp)
910{
911	return;
912}
913
914int
915myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
916{
917	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
918	struct ifreq		*ifr = (struct ifreq *)data;
919	int			 s, error = 0;
920
921	s = splnet();
922
923	switch (cmd) {
924	case SIOCSIFADDR:
925		ifp->if_flags |= IFF_UP;
926		/* FALLTHROUGH */
927
928	case SIOCSIFFLAGS:
929		if (ISSET(ifp->if_flags, IFF_UP)) {
930			if (ISSET(ifp->if_flags, IFF_RUNNING))
931				error = ENETRESET;
932			else
933				myx_up(sc);
934		} else {
935			if (ISSET(ifp->if_flags, IFF_RUNNING))
936				myx_down(sc);
937		}
938		break;
939
940	case SIOCGIFMEDIA:
941	case SIOCSIFMEDIA:
942		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
943		break;
944
945	case SIOCGIFRXR:
946		error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
947		break;
948
949	default:
950		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
951	}
952
953	if (error == ENETRESET) {
954		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
955		    (IFF_UP | IFF_RUNNING))
956			myx_iff(sc);
957		error = 0;
958	}
959
960	splx(s);
961	return (error);
962}
963
964int
965myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri)
966{
967	struct if_rxring_info ifr[2];
968
969	memset(ifr, 0, sizeof(ifr));
970
971	ifr[0].ifr_size = MYX_RXSMALL_SIZE;
972	ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr;
973
974	ifr[1].ifr_size = MYX_RXBIG_SIZE;
975	ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr;
976
977	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
978}
979
980void
981myx_up(struct myx_softc *sc)
982{
983	struct ifnet		*ifp = &sc->sc_ac.ac_if;
984	struct myx_cmd		mc;
985	bus_dmamap_t		map;
986	size_t			size;
987	u_int			maxpkt;
988	u_int32_t		r;
989
990	memset(&mc, 0, sizeof(mc));
991	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
992		printf("%s: failed to reset the device\n", DEVNAME(sc));
993		return;
994	}
995
996	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
997	    64, MYXALIGN_CMD) != 0) {
998		printf("%s: failed to allocate zero pad memory\n",
999		    DEVNAME(sc));
1000		return;
1001	}
1002	memset(sc->sc_zerodma.mxm_kva, 0, 64);
1003	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1004	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1005
1006	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
1007	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
1008		printf("%s: failed to allocate pad DMA memory\n",
1009		    DEVNAME(sc));
1010		goto free_zero;
1011	}
1012	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1013	    sc->sc_paddma.mxm_map->dm_mapsize,
1014	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1015
1016	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1017		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1018		goto free_pad;
1019	}
1020
1021	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1022		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1023		goto free_pad;
1024	}
1025	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1026
1027	memset(&mc, 0, sizeof(mc));
1028	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1029		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1030		goto free_pad;
1031	}
1032	sc->sc_tx_ring_prod = 0;
1033	sc->sc_tx_ring_cons = 0;
1034	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1035	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1036	sc->sc_tx_count = 0;
1037	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1038
1039	/* Allocate Interrupt Queue */
1040
1041	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1042	sc->sc_intrq_idx = 0;
1043
1044	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1045	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1046	    size, MYXALIGN_DATA) != 0) {
1047		goto free_pad;
1048	}
1049	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1050	map = sc->sc_intrq_dma.mxm_map;
1051	memset(sc->sc_intrq, 0, size);
1052	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1053	    BUS_DMASYNC_PREREAD);
1054
1055	memset(&mc, 0, sizeof(mc));
1056	mc.mc_data0 = htobe32(size);
1057	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1058		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1059		goto free_intrq;
1060	}
1061
1062	memset(&mc, 0, sizeof(mc));
1063	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1064	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1065	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1066		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1067		goto free_intrq;
1068	}
1069
1070	/*
1071	 * get interrupt offsets
1072	 */
1073
1074	memset(&mc, 0, sizeof(mc));
1075	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1076	    &sc->sc_irqclaimoff) != 0) {
1077		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1078		goto free_intrq;
1079	}
1080
1081	memset(&mc, 0, sizeof(mc));
1082	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1083	    &sc->sc_irqdeassertoff) != 0) {
1084		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1085		goto free_intrq;
1086	}
1087
1088	memset(&mc, 0, sizeof(mc));
1089	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1090	    &sc->sc_irqcoaloff) != 0) {
1091		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1092		goto free_intrq;
1093	}
1094
1095	/* Set an appropriate interrupt coalescing period */
1096	r = htobe32(MYX_IRQCOALDELAY);
1097	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1098
1099	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1100		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1101		goto free_intrq;
1102	}
1103
1104	memset(&mc, 0, sizeof(mc));
1105	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1106		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1107		goto free_intrq;
1108	}
1109
1110	memset(&mc, 0, sizeof(mc));
1111	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1112		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1113		goto free_intrq;
1114	}
1115
1116	memset(&mc, 0, sizeof(mc));
1117	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1118	    &sc->sc_tx_ring_offset) != 0) {
1119		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1120		goto free_intrq;
1121	}
1122
1123	memset(&mc, 0, sizeof(mc));
1124	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1125	    &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) {
1126		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1127		goto free_intrq;
1128	}
1129
1130	memset(&mc, 0, sizeof(mc));
1131	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1132	    &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) {
1133		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1134		goto free_intrq;
1135	}
1136
1137	/* Allocate Interrupt Data */
1138	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1139	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1140		printf("%s: failed to allocate status DMA memory\n",
1141		    DEVNAME(sc));
1142		goto free_intrq;
1143	}
1144	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1145	map = sc->sc_sts_dma.mxm_map;
1146	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1147	    BUS_DMASYNC_PREREAD);
1148
1149	memset(&mc, 0, sizeof(mc));
1150	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1151	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1152	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1153	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1154		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1155		goto free_sts;
1156	}
1157
1158	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1159
1160	memset(&mc, 0, sizeof(mc));
1161	mc.mc_data0 = htobe32(maxpkt);
1162	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1163		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1164		goto free_sts;
1165	}
1166
1167	if (myx_tx_init(sc, maxpkt) != 0)
1168		goto free_sts;
1169
1170	if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0)
1171		goto free_tx_ring;
1172
1173	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXSMALL]) != 0)
1174		goto free_rx_ring_small;
1175
1176	if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0)
1177		goto empty_rx_ring_small;
1178
1179	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXBIG]) != 0)
1180		goto free_rx_ring_big;
1181
1182	memset(&mc, 0, sizeof(mc));
1183	mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN);
1184	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1185		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1186		goto empty_rx_ring_big;
1187	}
1188
1189	memset(&mc, 0, sizeof(mc));
1190	mc.mc_data0 = htobe32(16384);
1191	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1192		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1193		goto empty_rx_ring_big;
1194	}
1195
1196	sc->sc_state = MYX_S_RUNNING;
1197
1198	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1199		printf("%s: failed to start the device\n", DEVNAME(sc));
1200		goto empty_rx_ring_big;
1201	}
1202
1203	myx_iff(sc);
1204	SET(ifp->if_flags, IFF_RUNNING);
1205	ifq_restart(&ifp->if_snd);
1206
1207	return;
1208
1209empty_rx_ring_big:
1210	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1211free_rx_ring_big:
1212	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1213empty_rx_ring_small:
1214	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1215free_rx_ring_small:
1216	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1217free_tx_ring:
1218	myx_tx_free(sc);
1219free_sts:
1220	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1221	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1222	myx_dmamem_free(sc, &sc->sc_sts_dma);
1223free_intrq:
1224	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1225	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1226	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1227free_pad:
1228	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1229	    sc->sc_paddma.mxm_map->dm_mapsize,
1230	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1231	myx_dmamem_free(sc, &sc->sc_paddma);
1232
1233	memset(&mc, 0, sizeof(mc));
1234	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1235		printf("%s: failed to reset the device\n", DEVNAME(sc));
1236	}
1237free_zero:
1238	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1239	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1240	myx_dmamem_free(sc, &sc->sc_zerodma);
1241}
1242
1243int
1244myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1245{
1246	struct myx_cmd		 mc;
1247
1248	memset(&mc, 0, sizeof(mc));
1249	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1250	    addr[2] << 8 | addr[3]);
1251	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1252
1253	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1254		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1255		return (-1);
1256	}
1257	return (0);
1258}
1259
1260void
1261myx_iff(struct myx_softc *sc)
1262{
1263	struct myx_cmd		mc;
1264	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1265	struct ether_multi	*enm;
1266	struct ether_multistep	step;
1267	u_int8_t *addr;
1268
1269	CLR(ifp->if_flags, IFF_ALLMULTI);
1270
1271	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1272	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1273		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1274		return;
1275	}
1276
1277	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1278		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1279		return;
1280	}
1281
1282	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1283		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1284		return;
1285	}
1286
1287	if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1288	    sc->sc_ac.ac_multirangecnt > 0) {
1289		SET(ifp->if_flags, IFF_ALLMULTI);
1290		return;
1291	}
1292
1293	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1294	while (enm != NULL) {
1295		addr = enm->enm_addrlo;
1296
1297		memset(&mc, 0, sizeof(mc));
1298		mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1299		    addr[2] << 8 | addr[3]);
1300		mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1301		if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1302			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1303			return;
1304		}
1305
1306		ETHER_NEXT_MULTI(step, enm);
1307	}
1308
1309	memset(&mc, 0, sizeof(mc));
1310	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1311		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1312		return;
1313	}
1314}
1315
1316void
1317myx_down(struct myx_softc *sc)
1318{
1319	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1320	volatile struct myx_status *sts = sc->sc_sts;
1321	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1322	struct sleep_state	 sls;
1323	struct myx_cmd		 mc;
1324	int			 s;
1325	int			 ring;
1326
1327	CLR(ifp->if_flags, IFF_RUNNING);
1328
1329	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1330	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1331	sc->sc_linkdown = sts->ms_linkdown;
1332	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1333	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1334
1335	sc->sc_state = MYX_S_DOWN;
1336	membar_producer();
1337
1338	memset(&mc, 0, sizeof(mc));
1339	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1340
1341	while (sc->sc_state != MYX_S_OFF) {
1342		sleep_setup(&sls, sts, PWAIT, "myxdown");
1343		membar_consumer();
1344		sleep_finish(&sls, sc->sc_state != MYX_S_OFF);
1345	}
1346
1347	s = splnet();
1348	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1349		ifp->if_link_state = LINK_STATE_UNKNOWN;
1350		ifp->if_baudrate = 0;
1351		if_link_state_change(ifp);
1352	}
1353	splx(s);
1354
1355	memset(&mc, 0, sizeof(mc));
1356	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1357		printf("%s: failed to reset the device\n", DEVNAME(sc));
1358	}
1359
1360	ifq_clr_oactive(&ifp->if_snd);
1361	ifq_barrier(&ifp->if_snd);
1362
1363	for (ring = 0; ring < 2; ring++) {
1364		struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1365
1366		timeout_del(&mrr->mrr_refill);
1367		myx_rx_empty(sc, mrr);
1368		myx_rx_free(sc, mrr);
1369	}
1370
1371	myx_tx_empty(sc);
1372	myx_tx_free(sc);
1373
1374	/* the sleep shizz above already synced this dmamem */
1375	myx_dmamem_free(sc, &sc->sc_sts_dma);
1376
1377	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1378	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1379	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1380
1381	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1382	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1383	myx_dmamem_free(sc, &sc->sc_paddma);
1384
1385	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1386	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1387	myx_dmamem_free(sc, &sc->sc_zerodma);
1388}
1389
1390void
1391myx_write_txd_tail(struct myx_softc *sc, struct myx_slot *ms, u_int8_t flags,
1392    u_int32_t offset, u_int idx)
1393{
1394	struct myx_tx_desc		txd;
1395	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1396	bus_dmamap_t			map = ms->ms_map;
1397	int				i;
1398
1399	for (i = 1; i < map->dm_nsegs; i++) {
1400		memset(&txd, 0, sizeof(txd));
1401		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1402		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1403		txd.tx_flags = flags;
1404
1405		myx_bus_space_write(sc,
1406		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1407		    &txd, sizeof(txd));
1408	}
1409
1410	/* pad runt frames */
1411	if (map->dm_mapsize < 60) {
1412		memset(&txd, 0, sizeof(txd));
1413		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1414		txd.tx_length = htobe16(60 - map->dm_mapsize);
1415		txd.tx_flags = flags;
1416
1417		myx_bus_space_write(sc,
1418		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1419		    &txd, sizeof(txd));
1420	}
1421}
1422
1423void
1424myx_start(struct ifqueue *ifq)
1425{
1426	struct ifnet			*ifp = ifq->ifq_if;
1427	struct myx_tx_desc		txd;
1428	struct myx_softc		*sc = ifp->if_softc;
1429	struct myx_slot			*ms;
1430	bus_dmamap_t			map;
1431	struct mbuf			*m;
1432	u_int32_t			offset = sc->sc_tx_ring_offset;
1433	u_int				idx, cons, prod;
1434	u_int				free, used;
1435	u_int8_t			flags;
1436
1437	idx = sc->sc_tx_ring_prod;
1438
1439	/* figure out space */
1440	free = sc->sc_tx_ring_cons;
1441	if (free <= idx)
1442		free += sc->sc_tx_ring_count;
1443	free -= idx;
1444
1445	cons = prod = sc->sc_tx_prod;
1446
1447	used = 0;
1448
1449	for (;;) {
1450		if (used + sc->sc_tx_nsegs + 1 > free) {
1451			ifq_set_oactive(ifq);
1452			break;
1453		}
1454
1455		m = ifq_dequeue(ifq);
1456		if (m == NULL)
1457			break;
1458
1459		ms = &sc->sc_tx_slots[prod];
1460
1461		if (myx_load_mbuf(sc, ms, m) != 0) {
1462			m_freem(m);
1463			ifp->if_oerrors++;
1464			continue;
1465		}
1466
1467#if NBPFILTER > 0
1468		if (ifp->if_bpf)
1469			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1470#endif
1471
1472		map = ms->ms_map;
1473		bus_dmamap_sync(sc->sc_dmat, map, 0,
1474		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1475
1476		used += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1477
1478		if (++prod >= sc->sc_tx_ring_count)
1479			prod = 0;
1480	}
1481
1482	if (cons == prod)
1483		return;
1484
1485	ms = &sc->sc_tx_slots[cons];
1486
1487	for (;;) {
1488		idx += ms->ms_map->dm_nsegs +
1489		    (ms->ms_map->dm_mapsize < 60 ? 1 : 0);
1490		if (idx >= sc->sc_tx_ring_count)
1491			idx -= sc->sc_tx_ring_count;
1492
1493		if (++cons >= sc->sc_tx_ring_count)
1494			cons = 0;
1495
1496		if (cons == prod)
1497			break;
1498
1499		ms = &sc->sc_tx_slots[cons];
1500		map = ms->ms_map;
1501
1502		flags = MYXTXD_FLAGS_NO_TSO;
1503		if (map->dm_mapsize < 1520)
1504			flags |= MYXTXD_FLAGS_SMALL;
1505
1506		memset(&txd, 0, sizeof(txd));
1507		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1508		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1509		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1510		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1511		myx_bus_space_write(sc,
1512		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1513
1514		myx_write_txd_tail(sc, ms, flags, offset, idx);
1515	}
1516
1517	/* go back and post first packet */
1518	ms = &sc->sc_tx_slots[sc->sc_tx_prod];
1519	map = ms->ms_map;
1520
1521	flags = MYXTXD_FLAGS_NO_TSO;
1522	if (map->dm_mapsize < 1520)
1523		flags |= MYXTXD_FLAGS_SMALL;
1524
1525	memset(&txd, 0, sizeof(txd));
1526	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1527	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1528	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1529	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1530
1531	/* make sure the first descriptor is seen after the others */
1532	myx_write_txd_tail(sc, ms, flags, offset, sc->sc_tx_ring_prod);
1533
1534	myx_bus_space_write(sc,
1535	    offset + sizeof(txd) * sc->sc_tx_ring_prod, &txd,
1536	    sizeof(txd) - sizeof(myx_bus_t));
1537
1538	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1539	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1540
1541	myx_bus_space_write(sc,
1542	    offset + sizeof(txd) * (sc->sc_tx_ring_prod + 1) -
1543	    sizeof(myx_bus_t),
1544	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1545	    sizeof(myx_bus_t));
1546
1547	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1548	    offset + sizeof(txd) * sc->sc_tx_ring_prod, sizeof(txd),
1549	    BUS_SPACE_BARRIER_WRITE);
1550
1551	/* commit */
1552	sc->sc_tx_ring_prod = idx;
1553	sc->sc_tx_prod = prod;
1554}
1555
1556int
1557myx_load_mbuf(struct myx_softc *sc, struct myx_slot *ms, struct mbuf *m)
1558{
1559	bus_dma_tag_t			dmat = sc->sc_dmat;
1560	bus_dmamap_t			dmap = ms->ms_map;
1561
1562	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1563	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1564	case 0:
1565		break;
1566
1567	case EFBIG: /* mbuf chain is too fragmented */
1568		if (m_defrag(m, M_DONTWAIT) == 0 &&
1569		    bus_dmamap_load_mbuf(dmat, dmap, m,
1570		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1571			break;
1572	default:
1573		return (1);
1574	}
1575
1576	ms->ms_m = m;
1577	return (0);
1578}
1579
1580int
1581myx_intr(void *arg)
1582{
1583	struct myx_softc	*sc = (struct myx_softc *)arg;
1584	volatile struct myx_status *sts = sc->sc_sts;
1585	enum myx_state		 state;
1586	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1587	u_int32_t		 data;
1588	u_int8_t		 valid = 0;
1589
1590	state = sc->sc_state;
1591	if (state == MYX_S_OFF)
1592		return (0);
1593
1594	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1595	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1596
1597	valid = sts->ms_isvalid;
1598	if (valid == 0x0) {
1599		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1600		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1601		return (0);
1602	}
1603
1604	if (sc->sc_intx) {
1605		data = htobe32(0);
1606		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1607		    sc->sc_irqdeassertoff, &data, sizeof(data));
1608	}
1609	sts->ms_isvalid = 0;
1610
1611	do {
1612		data = sts->ms_txdonecnt;
1613
1614		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1615		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1616		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1617	} while (sts->ms_isvalid);
1618
1619	data = betoh32(data);
1620	if (data != sc->sc_tx_count)
1621		myx_txeof(sc, data);
1622
1623	data = htobe32(3);
1624	if (valid & 0x1) {
1625		myx_rxeof(sc);
1626
1627		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1628		    sc->sc_irqclaimoff, &data, sizeof(data));
1629	}
1630	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1631	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1632
1633	if (sts->ms_statusupdated) {
1634		if (state == MYX_S_DOWN &&
1635		    sc->sc_linkdown != sts->ms_linkdown) {
1636			sc->sc_state = MYX_S_OFF;
1637			membar_producer();
1638			wakeup(sts);
1639		} else {
1640			data = sts->ms_linkstate;
1641			if (data != 0xffffffff) {
1642				KERNEL_LOCK();
1643				myx_link_state(sc, data);
1644				KERNEL_UNLOCK();
1645			}
1646		}
1647	}
1648
1649	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1650	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1651
1652	return (1);
1653}
1654
1655void
1656myx_refill(void *xmrr)
1657{
1658	struct myx_rx_ring *mrr = xmrr;
1659	struct myx_softc *sc = mrr->mrr_softc;
1660
1661	myx_rx_fill(sc, mrr);
1662
1663	if (mrr->mrr_prod == mrr->mrr_cons)
1664		timeout_add(&mrr->mrr_refill, 1);
1665}
1666
1667void
1668myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1669{
1670	struct ifnet *ifp = &sc->sc_ac.ac_if;
1671	struct myx_slot *ms;
1672	bus_dmamap_t map;
1673	u_int idx, cons;
1674
1675	idx = sc->sc_tx_ring_cons;
1676	cons = sc->sc_tx_cons;
1677
1678	do {
1679		ms = &sc->sc_tx_slots[cons];
1680		map = ms->ms_map;
1681
1682		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1683
1684		bus_dmamap_sync(sc->sc_dmat, map, 0,
1685		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1686		bus_dmamap_unload(sc->sc_dmat, map);
1687		m_freem(ms->ms_m);
1688
1689		if (++cons >= sc->sc_tx_ring_count)
1690			cons = 0;
1691	} while (++sc->sc_tx_count != done_count);
1692
1693	if (idx >= sc->sc_tx_ring_count)
1694		idx -= sc->sc_tx_ring_count;
1695
1696	sc->sc_tx_ring_cons = idx;
1697	sc->sc_tx_cons = cons;
1698
1699	if (ifq_is_oactive(&ifp->if_snd))
1700		ifq_restart(&ifp->if_snd);
1701}
1702
1703void
1704myx_rxeof(struct myx_softc *sc)
1705{
1706	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1707	struct ifnet *ifp = &sc->sc_ac.ac_if;
1708	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1709	struct myx_rx_ring *mrr;
1710	struct myx_slot *ms;
1711	struct mbuf *m;
1712	int ring;
1713	u_int rxfree[2] = { 0 , 0 };
1714	u_int len;
1715
1716	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1717	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1718
1719	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1720		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1721
1722		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1723			sc->sc_intrq_idx = 0;
1724
1725		ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ?
1726		    MYX_RXSMALL : MYX_RXBIG;
1727
1728		mrr = &sc->sc_rx_ring[ring];
1729		ms = &mrr->mrr_slots[mrr->mrr_cons];
1730
1731		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1732			mrr->mrr_cons = 0;
1733
1734		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1735		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1736		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1737
1738		m = ms->ms_m;
1739		m->m_data += ETHER_ALIGN;
1740		m->m_pkthdr.len = m->m_len = len;
1741
1742		ml_enqueue(&ml, m);
1743
1744		rxfree[ring]++;
1745	}
1746
1747	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1748	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1749
1750	for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) {
1751		if (rxfree[ring] == 0)
1752			continue;
1753
1754		mrr = &sc->sc_rx_ring[ring];
1755
1756		if_rxr_put(&mrr->mrr_rxr, rxfree[ring]);
1757		myx_rx_fill(sc, mrr);
1758		if (mrr->mrr_prod == mrr->mrr_cons)
1759			timeout_add(&mrr->mrr_refill, 0);
1760	}
1761
1762	if_input(ifp, &ml);
1763}
1764
1765static int
1766myx_rx_fill_slots(struct myx_softc *sc, struct myx_rx_ring *mrr, u_int slots)
1767{
1768	struct myx_rx_desc rxd;
1769	struct myx_slot *ms;
1770	u_int32_t offset = mrr->mrr_offset;
1771	u_int p, first, fills;
1772
1773	first = p = mrr->mrr_prod;
1774	if (myx_buf_fill(sc, &mrr->mrr_slots[first], mrr->mrr_mclget) != 0)
1775		return (slots);
1776
1777	if (++p >= sc->sc_rx_ring_count)
1778		p = 0;
1779
1780	for (fills = 1; fills < slots; fills++) {
1781		ms = &mrr->mrr_slots[p];
1782
1783		if (myx_buf_fill(sc, ms, mrr->mrr_mclget) != 0)
1784			break;
1785
1786		rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1787		myx_bus_space_write(sc, offset + p * sizeof(rxd),
1788		    &rxd, sizeof(rxd));
1789
1790		if (++p >= sc->sc_rx_ring_count)
1791			p = 0;
1792	}
1793
1794	mrr->mrr_prod = p;
1795
1796	/* make sure the first descriptor is seen after the others */
1797	if (fills > 1) {
1798		bus_space_barrier(sc->sc_memt, sc->sc_memh,
1799		    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1800		    BUS_SPACE_BARRIER_WRITE);
1801	}
1802
1803	ms = &mrr->mrr_slots[first];
1804	rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1805	myx_bus_space_write(sc, offset + first * sizeof(rxd),
1806	    &rxd, sizeof(rxd));
1807
1808	return (slots - fills);
1809}
1810
1811int
1812myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size)
1813{
1814	struct myx_rx_desc rxd;
1815	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1816	struct myx_slot *ms;
1817	u_int32_t offset = mrr->mrr_offset;
1818	int rv;
1819	int i;
1820
1821	mrr->mrr_slots = mallocarray(sizeof(*ms), sc->sc_rx_ring_count,
1822	    M_DEVBUF, M_WAITOK);
1823	if (mrr->mrr_slots == NULL)
1824		return (ENOMEM);
1825
1826	memset(&rxd, 0xff, sizeof(rxd));
1827	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1828		ms = &mrr->mrr_slots[i];
1829		rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1830		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
1831		if (rv != 0)
1832			goto destroy;
1833
1834		myx_bus_space_write(sc, offset + i * sizeof(rxd),
1835		    &rxd, sizeof(rxd));
1836	}
1837
1838	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1839	mrr->mrr_prod = mrr->mrr_cons = 0;
1840
1841	return (0);
1842
1843destroy:
1844	while (i-- > 0) {
1845		ms = &mrr->mrr_slots[i];
1846		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1847	}
1848	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1849	return (rv);
1850}
1851
1852int
1853myx_rx_fill(struct myx_softc *sc, struct myx_rx_ring *mrr)
1854{
1855	u_int slots;
1856
1857	slots = if_rxr_get(&mrr->mrr_rxr, sc->sc_rx_ring_count);
1858	if (slots == 0)
1859		return (1);
1860
1861	slots = myx_rx_fill_slots(sc, mrr, slots);
1862	if (slots > 0)
1863		if_rxr_put(&mrr->mrr_rxr, slots);
1864
1865	return (0);
1866}
1867
1868void
1869myx_rx_empty(struct myx_softc *sc, struct myx_rx_ring *mrr)
1870{
1871	struct myx_slot *ms;
1872
1873	while (mrr->mrr_cons != mrr->mrr_prod) {
1874		ms = &mrr->mrr_slots[mrr->mrr_cons];
1875
1876		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1877			mrr->mrr_cons = 0;
1878
1879		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1880		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1881		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1882		m_freem(ms->ms_m);
1883	}
1884
1885	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1886}
1887
1888void
1889myx_rx_free(struct myx_softc *sc, struct myx_rx_ring *mrr)
1890{
1891	struct myx_slot *ms;
1892	int i;
1893
1894	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1895		ms = &mrr->mrr_slots[i];
1896		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1897	}
1898
1899	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1900}
1901
1902struct mbuf *
1903myx_mcl_small(void)
1904{
1905	struct mbuf *m;
1906
1907	m = MCLGETI(NULL, M_DONTWAIT, NULL, MYX_RXSMALL_SIZE);
1908	if (m == NULL)
1909		return (NULL);
1910
1911	m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE;
1912
1913	return (m);
1914}
1915
1916struct mbuf *
1917myx_mcl_big(void)
1918{
1919	struct mbuf *m;
1920	void *mcl;
1921
1922	MGETHDR(m, M_DONTWAIT, MT_DATA);
1923	if (m == NULL)
1924		return (NULL);
1925
1926	mcl = pool_get(myx_mcl_pool, PR_NOWAIT);
1927	if (mcl == NULL) {
1928		m_free(m);
1929		return (NULL);
1930	}
1931
1932	MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, MEXTFREE_POOL, myx_mcl_pool);
1933	m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE;
1934
1935	return (m);
1936}
1937
1938int
1939myx_buf_fill(struct myx_softc *sc, struct myx_slot *ms,
1940    struct mbuf *(*mclget)(void))
1941{
1942	struct mbuf *m;
1943	int rv;
1944
1945	m = (*mclget)();
1946	if (m == NULL)
1947		return (ENOMEM);
1948
1949	rv = bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, BUS_DMA_NOWAIT);
1950	if (rv != 0) {
1951		m_freem(m);
1952		return (rv);
1953	}
1954
1955	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1956	    ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1957
1958	ms->ms_m = m;
1959
1960	return (0);
1961}
1962
1963int
1964myx_tx_init(struct myx_softc *sc, bus_size_t size)
1965{
1966	struct myx_slot *ms;
1967	int rv;
1968	int i;
1969
1970	sc->sc_tx_slots = mallocarray(sizeof(*ms), sc->sc_tx_ring_count,
1971	    M_DEVBUF, M_WAITOK);
1972	if (sc->sc_tx_slots == NULL)
1973		return (ENOMEM);
1974
1975	for (i = 0; i < sc->sc_tx_ring_count; i++) {
1976		ms = &sc->sc_tx_slots[i];
1977		rv = bus_dmamap_create(sc->sc_dmat, size, sc->sc_tx_nsegs,
1978		    sc->sc_tx_boundary, sc->sc_tx_boundary,
1979		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
1980		if (rv != 0)
1981			goto destroy;
1982	}
1983
1984	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1985
1986	return (0);
1987
1988destroy:
1989	while (i-- > 0) {
1990		ms = &sc->sc_tx_slots[i];
1991		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1992	}
1993	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
1994	return (rv);
1995}
1996
1997void
1998myx_tx_empty(struct myx_softc *sc)
1999{
2000	struct myx_slot *ms;
2001	u_int cons = sc->sc_tx_cons;
2002	u_int prod = sc->sc_tx_prod;
2003
2004	while (cons != prod) {
2005		ms = &sc->sc_tx_slots[cons];
2006
2007		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
2008		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2009		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
2010		m_freem(ms->ms_m);
2011
2012		if (++cons >= sc->sc_tx_ring_count)
2013			cons = 0;
2014	}
2015
2016	sc->sc_tx_cons = cons;
2017}
2018
2019void
2020myx_tx_free(struct myx_softc *sc)
2021{
2022	struct myx_slot *ms;
2023	int i;
2024
2025	for (i = 0; i < sc->sc_tx_ring_count; i++) {
2026		ms = &sc->sc_tx_slots[i];
2027		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2028	}
2029
2030	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2031}
2032