if_myx.c revision 1.106
1/*	$OpenBSD: if_myx.c,v 1.106 2019/04/16 09:40:21 dlg Exp $	*/
2
3/*
4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*
20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21 */
22
23#include "bpfilter.h"
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/kernel.h>
30#include <sys/socket.h>
31#include <sys/malloc.h>
32#include <sys/pool.h>
33#include <sys/timeout.h>
34#include <sys/device.h>
35#include <sys/proc.h>
36#include <sys/queue.h>
37#include <sys/rwlock.h>
38
39#include <machine/bus.h>
40#include <machine/intr.h>
41
42#include <net/if.h>
43#include <net/if_dl.h>
44#include <net/if_media.h>
45
46#if NBPFILTER > 0
47#include <net/bpf.h>
48#endif
49
50#include <netinet/in.h>
51#include <netinet/if_ether.h>
52
53#include <dev/pci/pcireg.h>
54#include <dev/pci/pcivar.h>
55#include <dev/pci/pcidevs.h>
56
57#include <dev/pci/if_myxreg.h>
58
59#ifdef MYX_DEBUG
60#define MYXDBG_INIT	(1<<0)	/* chipset initialization */
61#define MYXDBG_CMD	(2<<0)	/* commands */
62#define MYXDBG_INTR	(3<<0)	/* interrupts */
63#define MYXDBG_ALL	0xffff	/* enable all debugging messages */
64int myx_debug = MYXDBG_ALL;
65#define DPRINTF(_lvl, _arg...)	do {					\
66	if (myx_debug & (_lvl))						\
67		printf(_arg);						\
68} while (0)
69#else
70#define DPRINTF(_lvl, arg...)
71#endif
72
73#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
74
75struct myx_dmamem {
76	bus_dmamap_t		 mxm_map;
77	bus_dma_segment_t	 mxm_seg;
78	int			 mxm_nsegs;
79	size_t			 mxm_size;
80	caddr_t			 mxm_kva;
81};
82
83struct pool *myx_mcl_pool;
84
85struct myx_slot {
86	bus_dmamap_t		 ms_map;
87	struct mbuf		*ms_m;
88};
89
90struct myx_rx_ring {
91	struct myx_softc	*mrr_softc;
92	struct timeout		 mrr_refill;
93	struct if_rxring	 mrr_rxr;
94	struct myx_slot		*mrr_slots;
95	u_int32_t		 mrr_offset;
96	u_int			 mrr_running;
97	u_int			 mrr_prod;
98	u_int			 mrr_cons;
99	struct mbuf		*(*mrr_mclget)(void);
100};
101
102enum myx_state {
103	MYX_S_OFF = 0,
104	MYX_S_RUNNING,
105	MYX_S_DOWN
106};
107
108struct myx_softc {
109	struct device		 sc_dev;
110	struct arpcom		 sc_ac;
111
112	pci_chipset_tag_t	 sc_pc;
113	pci_intr_handle_t	 sc_ih;
114	pcitag_t		 sc_tag;
115
116	bus_dma_tag_t		 sc_dmat;
117	bus_space_tag_t		 sc_memt;
118	bus_space_handle_t	 sc_memh;
119	bus_size_t		 sc_mems;
120
121	struct myx_dmamem	 sc_zerodma;
122	struct myx_dmamem	 sc_cmddma;
123	struct myx_dmamem	 sc_paddma;
124
125	struct myx_dmamem	 sc_sts_dma;
126	volatile struct myx_status	*sc_sts;
127
128	int			 sc_intx;
129	void			*sc_irqh;
130	u_int32_t		 sc_irqcoaloff;
131	u_int32_t		 sc_irqclaimoff;
132	u_int32_t		 sc_irqdeassertoff;
133
134	struct myx_dmamem	 sc_intrq_dma;
135	struct myx_intrq_desc	*sc_intrq;
136	u_int			 sc_intrq_count;
137	u_int			 sc_intrq_idx;
138
139	u_int			 sc_rx_ring_count;
140#define  MYX_RXSMALL		 0
141#define  MYX_RXBIG		 1
142	struct myx_rx_ring	 sc_rx_ring[2];
143
144	bus_size_t		 sc_tx_boundary;
145	u_int			 sc_tx_ring_count;
146	u_int32_t		 sc_tx_ring_offset;
147	u_int			 sc_tx_nsegs;
148	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
149	u_int			 sc_tx_ring_prod;
150	u_int			 sc_tx_ring_cons;
151
152	u_int			 sc_tx_prod;
153	u_int			 sc_tx_cons;
154	struct myx_slot		*sc_tx_slots;
155
156	struct ifmedia		 sc_media;
157
158	volatile enum myx_state	 sc_state;
159	volatile u_int8_t	 sc_linkdown;
160
161	struct rwlock		 sc_sff_lock;
162};
163
164#define MYX_RXSMALL_SIZE	MCLBYTES
165#define MYX_RXBIG_SIZE		(MYX_MTU - \
166    (ETHER_ALIGN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
167
168int	 myx_match(struct device *, void *, void *);
169void	 myx_attach(struct device *, struct device *, void *);
170int	 myx_pcie_dc(struct myx_softc *, struct pci_attach_args *);
171int	 myx_query(struct myx_softc *sc, char *, size_t);
172u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
173void	 myx_attachhook(struct device *);
174int	 myx_loadfirmware(struct myx_softc *, const char *);
175int	 myx_probe_firmware(struct myx_softc *);
176
177void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
178void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
179
180#if defined(__LP64__)
181#define _myx_bus_space_write bus_space_write_raw_region_8
182typedef u_int64_t myx_bus_t;
183#else
184#define _myx_bus_space_write bus_space_write_raw_region_4
185typedef u_int32_t myx_bus_t;
186#endif
187#define myx_bus_space_write(_sc, _o, _a, _l) \
188    _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l))
189
190int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
191int	 myx_boot(struct myx_softc *, u_int32_t);
192
193int	 myx_rdma(struct myx_softc *, u_int);
194int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
195	    bus_size_t, u_int align);
196void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
197int	 myx_media_change(struct ifnet *);
198void	 myx_media_status(struct ifnet *, struct ifmediareq *);
199void	 myx_link_state(struct myx_softc *, u_int32_t);
200void	 myx_watchdog(struct ifnet *);
201int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
202int	 myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *);
203void	 myx_up(struct myx_softc *);
204void	 myx_iff(struct myx_softc *);
205void	 myx_down(struct myx_softc *);
206int	 myx_get_sffpage(struct myx_softc *, struct if_sffpage *);
207
208void	 myx_start(struct ifqueue *);
209void	 myx_write_txd_tail(struct myx_softc *, struct myx_slot *, u_int8_t,
210	    u_int32_t, u_int);
211int	 myx_load_mbuf(struct myx_softc *, struct myx_slot *, struct mbuf *);
212int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
213int	 myx_intr(void *);
214void	 myx_rxeof(struct myx_softc *);
215void	 myx_txeof(struct myx_softc *, u_int32_t);
216
217int			myx_buf_fill(struct myx_softc *, struct myx_slot *,
218			    struct mbuf *(*)(void));
219struct mbuf *		myx_mcl_small(void);
220struct mbuf *		myx_mcl_big(void);
221
222int			myx_rx_init(struct myx_softc *, int, bus_size_t);
223int			myx_rx_fill(struct myx_softc *, struct myx_rx_ring *);
224void			myx_rx_empty(struct myx_softc *, struct myx_rx_ring *);
225void			myx_rx_free(struct myx_softc *, struct myx_rx_ring *);
226
227int			myx_tx_init(struct myx_softc *, bus_size_t);
228void			myx_tx_empty(struct myx_softc *);
229void			myx_tx_free(struct myx_softc *);
230
231void			myx_refill(void *);
232
233struct cfdriver myx_cd = {
234	NULL, "myx", DV_IFNET
235};
236struct cfattach myx_ca = {
237	sizeof(struct myx_softc), myx_match, myx_attach
238};
239
240const struct pci_matchid myx_devices[] = {
241	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
242	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
243};
244
245int
246myx_match(struct device *parent, void *match, void *aux)
247{
248	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
249}
250
251void
252myx_attach(struct device *parent, struct device *self, void *aux)
253{
254	struct myx_softc	*sc = (struct myx_softc *)self;
255	struct pci_attach_args	*pa = aux;
256	char			 part[32];
257	pcireg_t		 memtype;
258
259	sc->sc_pc = pa->pa_pc;
260	sc->sc_tag = pa->pa_tag;
261	sc->sc_dmat = pa->pa_dmat;
262
263	sc->sc_rx_ring[MYX_RXSMALL].mrr_softc = sc;
264	sc->sc_rx_ring[MYX_RXSMALL].mrr_mclget = myx_mcl_small;
265	timeout_set(&sc->sc_rx_ring[MYX_RXSMALL].mrr_refill, myx_refill,
266	    &sc->sc_rx_ring[MYX_RXSMALL]);
267	sc->sc_rx_ring[MYX_RXBIG].mrr_softc = sc;
268	sc->sc_rx_ring[MYX_RXBIG].mrr_mclget = myx_mcl_big;
269	timeout_set(&sc->sc_rx_ring[MYX_RXBIG].mrr_refill, myx_refill,
270	    &sc->sc_rx_ring[MYX_RXBIG]);
271
272	/* Map the PCI memory space */
273	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
274	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
275	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
276		printf(": unable to map register memory\n");
277		return;
278	}
279
280	/* Get board details (mac/part) */
281	memset(part, 0, sizeof(part));
282	if (myx_query(sc, part, sizeof(part)) != 0)
283		goto unmap;
284
285	/* Map the interrupt */
286	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
287		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
288			printf(": unable to map interrupt\n");
289			goto unmap;
290		}
291		sc->sc_intx = 1;
292	}
293
294	printf(": %s, model %s, address %s\n",
295	    pci_intr_string(pa->pa_pc, sc->sc_ih),
296	    part[0] == '\0' ? "(unknown)" : part,
297	    ether_sprintf(sc->sc_ac.ac_enaddr));
298
299	if (myx_pcie_dc(sc, pa) != 0)
300		printf("%s: unable to configure PCI Express\n", DEVNAME(sc));
301
302	config_mountroot(self, myx_attachhook);
303
304	return;
305
306 unmap:
307	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
308	sc->sc_mems = 0;
309}
310
311int
312myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa)
313{
314	pcireg_t dcsr;
315	pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO;
316	pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO;
317	int reg;
318
319	if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
320	    &reg, NULL) == 0)
321		return (-1);
322
323	reg += PCI_PCIE_DCSR;
324	dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg);
325	if ((dcsr & mask) != dc) {
326		CLR(dcsr, mask);
327		SET(dcsr, dc);
328		pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr);
329	}
330
331	return (0);
332}
333
334u_int
335myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
336{
337	u_int		i, j;
338	u_int8_t	digit;
339
340	memset(lladdr, 0, ETHER_ADDR_LEN);
341	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
342		if (mac[i] >= '0' && mac[i] <= '9')
343			digit = mac[i] - '0';
344		else if (mac[i] >= 'A' && mac[i] <= 'F')
345			digit = mac[i] - 'A' + 10;
346		else if (mac[i] >= 'a' && mac[i] <= 'f')
347			digit = mac[i] - 'a' + 10;
348		else
349			continue;
350		if ((j & 1) == 0)
351			digit <<= 4;
352		lladdr[j++/2] |= digit;
353	}
354
355	return (i);
356}
357
358int
359myx_query(struct myx_softc *sc, char *part, size_t partlen)
360{
361	struct myx_gen_hdr hdr;
362	u_int32_t	offset;
363	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
364	u_int		i, len, maxlen;
365
366	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
367	offset = betoh32(offset);
368	if (offset + sizeof(hdr) > sc->sc_mems) {
369		printf(": header is outside register window\n");
370		return (1);
371	}
372
373	myx_read(sc, offset, &hdr, sizeof(hdr));
374	offset = betoh32(hdr.fw_specs);
375	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
376
377	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
378
379	for (i = 0; i < len; i++) {
380		maxlen = len - i;
381		if (strings[i] == '\0')
382			break;
383		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
384			i += 4;
385			i += myx_ether_aton(&strings[i],
386			    sc->sc_ac.ac_enaddr, maxlen);
387		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
388			i += 3;
389			i += strlcpy(part, &strings[i], min(maxlen, partlen));
390		}
391		for (; i < len; i++) {
392			if (strings[i] == '\0')
393				break;
394		}
395	}
396
397	return (0);
398}
399
400int
401myx_loadfirmware(struct myx_softc *sc, const char *filename)
402{
403	struct myx_gen_hdr	hdr;
404	u_int8_t		*fw;
405	size_t			fwlen;
406	u_int32_t		offset;
407	u_int			i, ret = 1;
408
409	if (loadfirmware(filename, &fw, &fwlen) != 0) {
410		printf("%s: could not load firmware %s\n", DEVNAME(sc),
411		    filename);
412		return (1);
413	}
414	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
415		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
416		goto err;
417	}
418
419	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
420	offset = betoh32(offset);
421	if ((offset + sizeof(hdr)) > fwlen) {
422		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
423		goto err;
424	}
425
426	memcpy(&hdr, fw + offset, sizeof(hdr));
427	DPRINTF(MYXDBG_INIT, "%s: "
428	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
429	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
430	    betoh32(hdr.fw_type), hdr.fw_version);
431
432	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
433	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
434		printf("%s: invalid firmware type 0x%x version %s\n",
435		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
436		goto err;
437	}
438
439	/* Write the firmware to the card's SRAM */
440	for (i = 0; i < fwlen; i += 256)
441		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
442
443	if (myx_boot(sc, fwlen) != 0) {
444		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
445		goto err;
446	}
447
448	ret = 0;
449
450err:
451	free(fw, M_DEVBUF, fwlen);
452	return (ret);
453}
454
455void
456myx_attachhook(struct device *self)
457{
458	struct myx_softc	*sc = (struct myx_softc *)self;
459	struct ifnet		*ifp = &sc->sc_ac.ac_if;
460	struct myx_cmd		 mc;
461
462	/* this is sort of racy */
463	if (myx_mcl_pool == NULL) {
464		myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF,
465		    M_WAITOK);
466
467		m_pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY,
468		    "myxmcl");
469		pool_cache_init(myx_mcl_pool);
470	}
471
472	/* Allocate command DMA memory */
473	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
474	    MYXALIGN_CMD) != 0) {
475		printf("%s: failed to allocate command DMA memory\n",
476		    DEVNAME(sc));
477		return;
478	}
479
480	/* Try the firmware stored on disk */
481	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
482		/* error printed by myx_loadfirmware */
483		goto freecmd;
484	}
485
486	memset(&mc, 0, sizeof(mc));
487
488	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
489		printf("%s: failed to reset the device\n", DEVNAME(sc));
490		goto freecmd;
491	}
492
493	sc->sc_tx_boundary = 4096;
494
495	if (myx_probe_firmware(sc) != 0) {
496		printf("%s: error while selecting firmware\n", DEVNAME(sc));
497		goto freecmd;
498	}
499
500	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
501	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
502	if (sc->sc_irqh == NULL) {
503		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
504		goto freecmd;
505	}
506
507	ifp->if_softc = sc;
508	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
509	ifp->if_xflags = IFXF_MPSAFE;
510	ifp->if_ioctl = myx_ioctl;
511	ifp->if_qstart = myx_start;
512	ifp->if_watchdog = myx_watchdog;
513	ifp->if_hardmtu = MYX_RXBIG_SIZE;
514	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
515	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
516
517	ifp->if_capabilities = IFCAP_VLAN_MTU;
518#if 0
519	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
520	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
521	    IFCAP_CSUM_UDPv4;
522#endif
523
524	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
525	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
526	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
527
528	if_attach(ifp);
529	ether_ifattach(ifp);
530
531	return;
532
533freecmd:
534	myx_dmamem_free(sc, &sc->sc_cmddma);
535}
536
537int
538myx_probe_firmware(struct myx_softc *sc)
539{
540	struct myx_dmamem test;
541	bus_dmamap_t map;
542	struct myx_cmd mc;
543	pcireg_t csr;
544	int offset;
545	int width = 0;
546
547	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
548	    &offset, NULL)) {
549		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
550		    offset + PCI_PCIE_LCSR);
551		width = (csr >> 20) & 0x3f;
552
553		if (width <= 4) {
554			/*
555			 * if the link width is 4 or less we can use the
556			 * aligned firmware.
557			 */
558			return (0);
559		}
560	}
561
562	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
563		return (1);
564	map = test.mxm_map;
565
566	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
567	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
568
569	memset(&mc, 0, sizeof(mc));
570	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
571	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
572	mc.mc_data2 = htobe32(4096 * 0x10000);
573	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
574		printf("%s: DMA read test failed\n", DEVNAME(sc));
575		goto fail;
576	}
577
578	memset(&mc, 0, sizeof(mc));
579	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
580	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
581	mc.mc_data2 = htobe32(4096 * 0x1);
582	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
583		printf("%s: DMA write test failed\n", DEVNAME(sc));
584		goto fail;
585	}
586
587	memset(&mc, 0, sizeof(mc));
588	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
589	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
590	mc.mc_data2 = htobe32(4096 * 0x10001);
591	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
592		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
593		goto fail;
594	}
595
596	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
597	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
598	myx_dmamem_free(sc, &test);
599	return (0);
600
601fail:
602	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
603	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
604	myx_dmamem_free(sc, &test);
605
606	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
607		printf("%s: unable to load %s\n", DEVNAME(sc),
608		    MYXFW_UNALIGNED);
609		return (1);
610	}
611
612	sc->sc_tx_boundary = 2048;
613
614	printf("%s: using unaligned firmware\n", DEVNAME(sc));
615	return (0);
616}
617
618void
619myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
620{
621	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
622	    BUS_SPACE_BARRIER_READ);
623	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
624}
625
626void
627myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
628{
629	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
630	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
631	    BUS_SPACE_BARRIER_WRITE);
632}
633
634int
635myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
636    bus_size_t size, u_int align)
637{
638	mxm->mxm_size = size;
639
640	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
641	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
642	    &mxm->mxm_map) != 0)
643		return (1);
644	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
645	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
646	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
647		goto destroy;
648	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
649	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
650		goto free;
651	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
652	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
653		goto unmap;
654
655	return (0);
656 unmap:
657	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
658 free:
659	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
660 destroy:
661	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
662	return (1);
663}
664
665void
666myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
667{
668	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
669	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
670	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
671	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
672}
673
674int
675myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
676{
677	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
678	struct myx_response	*mr;
679	u_int			 i;
680	u_int32_t		 result, data;
681
682	mc->mc_cmd = htobe32(cmd);
683	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
684	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
685
686	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
687	mr->mr_result = 0xffffffff;
688
689	/* Send command */
690	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
691	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
692	    BUS_DMASYNC_PREREAD);
693
694	for (i = 0; i < 20; i++) {
695		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
696		    BUS_DMASYNC_POSTREAD);
697		result = betoh32(mr->mr_result);
698		data = betoh32(mr->mr_data);
699
700		if (result != 0xffffffff)
701			break;
702
703		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
704		    BUS_DMASYNC_PREREAD);
705		delay(1000);
706	}
707
708	DPRINTF(MYXDBG_CMD, "%s(%s): cmd %u completed, i %d, "
709	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
710	    cmd, i, result, data, data);
711
712	if (result == MYXCMD_OK) {
713		if (r != NULL)
714			*r = data;
715	}
716
717	return (result);
718}
719
720int
721myx_boot(struct myx_softc *sc, u_int32_t length)
722{
723	struct myx_bootcmd	 bc;
724	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
725	u_int32_t		*status;
726	u_int			 i, ret = 1;
727
728	memset(&bc, 0, sizeof(bc));
729	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
730	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
731	bc.bc_result = 0xffffffff;
732	bc.bc_offset = htobe32(MYX_FW_BOOT);
733	bc.bc_length = htobe32(length - 8);
734	bc.bc_copyto = htobe32(8);
735	bc.bc_jumpto = htobe32(0);
736
737	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
738	*status = 0;
739
740	/* Send command */
741	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
742	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
743	    BUS_DMASYNC_PREREAD);
744
745	for (i = 0; i < 200; i++) {
746		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
747		    BUS_DMASYNC_POSTREAD);
748		if (*status == 0xffffffff) {
749			ret = 0;
750			break;
751		}
752
753		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
754		    BUS_DMASYNC_PREREAD);
755		delay(1000);
756	}
757
758	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
759	    DEVNAME(sc), i, ret);
760
761	return (ret);
762}
763
764int
765myx_rdma(struct myx_softc *sc, u_int do_enable)
766{
767	struct myx_rdmacmd	 rc;
768	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
769	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
770	u_int32_t		*status;
771	int			 ret = 1;
772	u_int			 i;
773
774	/*
775	 * It is required to setup a _dummy_ RDMA address. It also makes
776	 * some PCI-E chipsets resend dropped messages.
777	 */
778	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
779	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
780	rc.rc_result = 0xffffffff;
781	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
782	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
783	rc.rc_enable = htobe32(do_enable);
784
785	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
786	*status = 0;
787
788	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
789	    BUS_DMASYNC_PREREAD);
790
791	/* Send command */
792	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
793
794	for (i = 0; i < 20; i++) {
795		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
796		    BUS_DMASYNC_POSTREAD);
797
798		if (*status == 0xffffffff) {
799			ret = 0;
800			break;
801		}
802
803		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
804		    BUS_DMASYNC_PREREAD);
805		delay(1000);
806	}
807
808	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
809	    DEVNAME(sc), __func__,
810	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
811
812	return (ret);
813}
814
815int
816myx_media_change(struct ifnet *ifp)
817{
818	/* ignore */
819	return (0);
820}
821
822void
823myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
824{
825	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
826	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
827	u_int32_t		 sts;
828
829	imr->ifm_active = IFM_ETHER | IFM_AUTO;
830	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
831		imr->ifm_status = 0;
832		return;
833	}
834
835	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
836	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
837	sts = sc->sc_sts->ms_linkstate;
838	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
839	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
840
841	myx_link_state(sc, sts);
842
843	imr->ifm_status = IFM_AVALID;
844	if (!LINK_STATE_IS_UP(ifp->if_link_state))
845		return;
846
847	imr->ifm_active |= IFM_FDX | IFM_FLOW |
848	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
849	imr->ifm_status |= IFM_ACTIVE;
850}
851
852void
853myx_link_state(struct myx_softc *sc, u_int32_t sts)
854{
855	struct ifnet		*ifp = &sc->sc_ac.ac_if;
856	int			 link_state = LINK_STATE_DOWN;
857
858	if (betoh32(sts) == MYXSTS_LINKUP)
859		link_state = LINK_STATE_FULL_DUPLEX;
860	if (ifp->if_link_state != link_state) {
861		ifp->if_link_state = link_state;
862		if_link_state_change(ifp);
863		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
864		    IF_Gbps(10) : 0;
865	}
866}
867
868void
869myx_watchdog(struct ifnet *ifp)
870{
871	return;
872}
873
874int
875myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
876{
877	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
878	struct ifreq		*ifr = (struct ifreq *)data;
879	int			 s, error = 0;
880
881	s = splnet();
882
883	switch (cmd) {
884	case SIOCSIFADDR:
885		ifp->if_flags |= IFF_UP;
886		/* FALLTHROUGH */
887
888	case SIOCSIFFLAGS:
889		if (ISSET(ifp->if_flags, IFF_UP)) {
890			if (ISSET(ifp->if_flags, IFF_RUNNING))
891				error = ENETRESET;
892			else
893				myx_up(sc);
894		} else {
895			if (ISSET(ifp->if_flags, IFF_RUNNING))
896				myx_down(sc);
897		}
898		break;
899
900	case SIOCGIFMEDIA:
901	case SIOCSIFMEDIA:
902		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
903		break;
904
905	case SIOCGIFRXR:
906		error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
907		break;
908
909	case SIOCGIFSFFPAGE:
910		error = rw_enter(&sc->sc_sff_lock, RW_WRITE|RW_INTR);
911		if (error != 0)
912			break;
913
914		error = myx_get_sffpage(sc, (struct if_sffpage *)data);
915		rw_exit(&sc->sc_sff_lock);
916		break;
917
918	default:
919		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
920	}
921
922	if (error == ENETRESET) {
923		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
924		    (IFF_UP | IFF_RUNNING))
925			myx_iff(sc);
926		error = 0;
927	}
928
929	splx(s);
930	return (error);
931}
932
933int
934myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri)
935{
936	struct if_rxring_info ifr[2];
937
938	memset(ifr, 0, sizeof(ifr));
939
940	ifr[0].ifr_size = MYX_RXSMALL_SIZE;
941	ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr;
942
943	ifr[1].ifr_size = MYX_RXBIG_SIZE;
944	ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr;
945
946	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
947}
948
949static int
950myx_i2c_byte(struct myx_softc *sc, uint8_t off, uint8_t *byte)
951{
952	struct myx_cmd		mc;
953	int			result;
954	uint32_t		r;
955	unsigned int		ms;
956
957	for (ms = 0; ms < 600; ms++) {
958		memset(&mc, 0, sizeof(mc));
959		mc.mc_data0 = htobe32(off);
960		result = myx_cmd(sc, MYXCMD_I2C_BYTE, &mc, &r);
961		switch (result) {
962		case MYXCMD_OK:
963			*byte = r;
964			return (0);
965		case MYXCMD_ERR_BUSY:
966			break;
967		default:
968			return (EIO);
969		}
970
971		delay(1000);
972	}
973
974	return (EBUSY);
975}
976
977int
978myx_get_sffpage(struct myx_softc *sc, struct if_sffpage *sff)
979{
980	struct myx_cmd		mc;
981	unsigned int		i;
982	int			result;
983
984	memset(&mc, 0, sizeof(mc));
985	mc.mc_data0 = htobe32(1); /* get all 256 bytes */
986	mc.mc_data1 = htobe32(sff->sff_addr << 8);
987	result = myx_cmd(sc, MYXCMD_I2C_READ, &mc, NULL);
988	if (result != 0)
989		return (EIO);
990
991	if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
992		uint8_t page;
993
994		result = myx_i2c_byte(sc, 127, &page);
995		if (result != 0)
996			return (result);
997
998		if (page != sff->sff_page)
999			return (ENXIO);
1000	}
1001
1002	for (i = 0; i < sizeof(sff->sff_data); i++) {
1003		result = myx_i2c_byte(sc, i, &sff->sff_data[i]);
1004		if (result != 0)
1005			return (result);
1006	}
1007
1008	return (0);
1009}
1010
1011void
1012myx_up(struct myx_softc *sc)
1013{
1014	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1015	struct myx_cmd		mc;
1016	bus_dmamap_t		map;
1017	size_t			size;
1018	u_int			maxpkt;
1019	u_int32_t		r;
1020
1021	memset(&mc, 0, sizeof(mc));
1022	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1023		printf("%s: failed to reset the device\n", DEVNAME(sc));
1024		return;
1025	}
1026
1027	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
1028	    64, MYXALIGN_CMD) != 0) {
1029		printf("%s: failed to allocate zero pad memory\n",
1030		    DEVNAME(sc));
1031		return;
1032	}
1033	memset(sc->sc_zerodma.mxm_kva, 0, 64);
1034	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1035	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1036
1037	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
1038	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
1039		printf("%s: failed to allocate pad DMA memory\n",
1040		    DEVNAME(sc));
1041		goto free_zero;
1042	}
1043	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1044	    sc->sc_paddma.mxm_map->dm_mapsize,
1045	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1046
1047	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1048		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1049		goto free_pad;
1050	}
1051
1052	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1053		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1054		goto free_pad;
1055	}
1056	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1057
1058	memset(&mc, 0, sizeof(mc));
1059	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1060		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1061		goto free_pad;
1062	}
1063	sc->sc_tx_ring_prod = 0;
1064	sc->sc_tx_ring_cons = 0;
1065	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1066	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1067	sc->sc_tx_count = 0;
1068	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1069
1070	/* Allocate Interrupt Queue */
1071
1072	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1073	sc->sc_intrq_idx = 0;
1074
1075	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1076	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1077	    size, MYXALIGN_DATA) != 0) {
1078		goto free_pad;
1079	}
1080	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1081	map = sc->sc_intrq_dma.mxm_map;
1082	memset(sc->sc_intrq, 0, size);
1083	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1084	    BUS_DMASYNC_PREREAD);
1085
1086	memset(&mc, 0, sizeof(mc));
1087	mc.mc_data0 = htobe32(size);
1088	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1089		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1090		goto free_intrq;
1091	}
1092
1093	memset(&mc, 0, sizeof(mc));
1094	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1095	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1096	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1097		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1098		goto free_intrq;
1099	}
1100
1101	/*
1102	 * get interrupt offsets
1103	 */
1104
1105	memset(&mc, 0, sizeof(mc));
1106	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1107	    &sc->sc_irqclaimoff) != 0) {
1108		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1109		goto free_intrq;
1110	}
1111
1112	memset(&mc, 0, sizeof(mc));
1113	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1114	    &sc->sc_irqdeassertoff) != 0) {
1115		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1116		goto free_intrq;
1117	}
1118
1119	memset(&mc, 0, sizeof(mc));
1120	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1121	    &sc->sc_irqcoaloff) != 0) {
1122		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1123		goto free_intrq;
1124	}
1125
1126	/* Set an appropriate interrupt coalescing period */
1127	r = htobe32(MYX_IRQCOALDELAY);
1128	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1129
1130	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1131		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1132		goto free_intrq;
1133	}
1134
1135	memset(&mc, 0, sizeof(mc));
1136	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1137		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1138		goto free_intrq;
1139	}
1140
1141	memset(&mc, 0, sizeof(mc));
1142	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1143		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1144		goto free_intrq;
1145	}
1146
1147	memset(&mc, 0, sizeof(mc));
1148	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1149	    &sc->sc_tx_ring_offset) != 0) {
1150		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1151		goto free_intrq;
1152	}
1153
1154	memset(&mc, 0, sizeof(mc));
1155	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1156	    &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) {
1157		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1158		goto free_intrq;
1159	}
1160
1161	memset(&mc, 0, sizeof(mc));
1162	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1163	    &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) {
1164		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1165		goto free_intrq;
1166	}
1167
1168	/* Allocate Interrupt Data */
1169	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1170	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1171		printf("%s: failed to allocate status DMA memory\n",
1172		    DEVNAME(sc));
1173		goto free_intrq;
1174	}
1175	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1176	map = sc->sc_sts_dma.mxm_map;
1177	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1178	    BUS_DMASYNC_PREREAD);
1179
1180	memset(&mc, 0, sizeof(mc));
1181	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1182	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1183	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1184	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1185		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1186		goto free_sts;
1187	}
1188
1189	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1190
1191	memset(&mc, 0, sizeof(mc));
1192	mc.mc_data0 = htobe32(maxpkt);
1193	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1194		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1195		goto free_sts;
1196	}
1197
1198	if (myx_tx_init(sc, maxpkt) != 0)
1199		goto free_sts;
1200
1201	if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0)
1202		goto free_tx_ring;
1203
1204	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXSMALL]) != 0)
1205		goto free_rx_ring_small;
1206
1207	if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0)
1208		goto empty_rx_ring_small;
1209
1210	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXBIG]) != 0)
1211		goto free_rx_ring_big;
1212
1213	memset(&mc, 0, sizeof(mc));
1214	mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN);
1215	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1216		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1217		goto empty_rx_ring_big;
1218	}
1219
1220	memset(&mc, 0, sizeof(mc));
1221	mc.mc_data0 = htobe32(16384);
1222	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1223		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1224		goto empty_rx_ring_big;
1225	}
1226
1227	sc->sc_state = MYX_S_RUNNING;
1228
1229	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1230		printf("%s: failed to start the device\n", DEVNAME(sc));
1231		goto empty_rx_ring_big;
1232	}
1233
1234	myx_iff(sc);
1235	SET(ifp->if_flags, IFF_RUNNING);
1236	ifq_restart(&ifp->if_snd);
1237
1238	return;
1239
1240empty_rx_ring_big:
1241	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1242free_rx_ring_big:
1243	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1244empty_rx_ring_small:
1245	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1246free_rx_ring_small:
1247	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1248free_tx_ring:
1249	myx_tx_free(sc);
1250free_sts:
1251	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1252	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1253	myx_dmamem_free(sc, &sc->sc_sts_dma);
1254free_intrq:
1255	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1256	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1257	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1258free_pad:
1259	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1260	    sc->sc_paddma.mxm_map->dm_mapsize,
1261	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1262	myx_dmamem_free(sc, &sc->sc_paddma);
1263
1264	memset(&mc, 0, sizeof(mc));
1265	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1266		printf("%s: failed to reset the device\n", DEVNAME(sc));
1267	}
1268free_zero:
1269	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1270	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1271	myx_dmamem_free(sc, &sc->sc_zerodma);
1272}
1273
1274int
1275myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1276{
1277	struct myx_cmd		 mc;
1278
1279	memset(&mc, 0, sizeof(mc));
1280	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1281	    addr[2] << 8 | addr[3]);
1282	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1283
1284	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1285		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1286		return (-1);
1287	}
1288	return (0);
1289}
1290
1291void
1292myx_iff(struct myx_softc *sc)
1293{
1294	struct myx_cmd		mc;
1295	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1296	struct ether_multi	*enm;
1297	struct ether_multistep	step;
1298	u_int8_t *addr;
1299
1300	CLR(ifp->if_flags, IFF_ALLMULTI);
1301
1302	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1303	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1304		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1305		return;
1306	}
1307
1308	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1309		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1310		return;
1311	}
1312
1313	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1314		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1315		return;
1316	}
1317
1318	if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1319	    sc->sc_ac.ac_multirangecnt > 0) {
1320		SET(ifp->if_flags, IFF_ALLMULTI);
1321		return;
1322	}
1323
1324	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1325	while (enm != NULL) {
1326		addr = enm->enm_addrlo;
1327
1328		memset(&mc, 0, sizeof(mc));
1329		mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1330		    addr[2] << 8 | addr[3]);
1331		mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1332		if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1333			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1334			return;
1335		}
1336
1337		ETHER_NEXT_MULTI(step, enm);
1338	}
1339
1340	memset(&mc, 0, sizeof(mc));
1341	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1342		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1343		return;
1344	}
1345}
1346
1347void
1348myx_down(struct myx_softc *sc)
1349{
1350	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1351	volatile struct myx_status *sts = sc->sc_sts;
1352	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1353	struct sleep_state	 sls;
1354	struct myx_cmd		 mc;
1355	int			 s;
1356	int			 ring;
1357
1358	CLR(ifp->if_flags, IFF_RUNNING);
1359
1360	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1361	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1362	sc->sc_linkdown = sts->ms_linkdown;
1363	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1364	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1365
1366	sc->sc_state = MYX_S_DOWN;
1367	membar_producer();
1368
1369	memset(&mc, 0, sizeof(mc));
1370	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1371
1372	while (sc->sc_state != MYX_S_OFF) {
1373		sleep_setup(&sls, sts, PWAIT, "myxdown");
1374		membar_consumer();
1375		sleep_finish(&sls, sc->sc_state != MYX_S_OFF);
1376	}
1377
1378	s = splnet();
1379	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1380		ifp->if_link_state = LINK_STATE_UNKNOWN;
1381		ifp->if_baudrate = 0;
1382		if_link_state_change(ifp);
1383	}
1384	splx(s);
1385
1386	memset(&mc, 0, sizeof(mc));
1387	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1388		printf("%s: failed to reset the device\n", DEVNAME(sc));
1389	}
1390
1391	ifq_clr_oactive(&ifp->if_snd);
1392	ifq_barrier(&ifp->if_snd);
1393
1394	for (ring = 0; ring < 2; ring++) {
1395		struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1396
1397		timeout_del(&mrr->mrr_refill);
1398		myx_rx_empty(sc, mrr);
1399		myx_rx_free(sc, mrr);
1400	}
1401
1402	myx_tx_empty(sc);
1403	myx_tx_free(sc);
1404
1405	/* the sleep shizz above already synced this dmamem */
1406	myx_dmamem_free(sc, &sc->sc_sts_dma);
1407
1408	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1409	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1410	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1411
1412	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1413	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1414	myx_dmamem_free(sc, &sc->sc_paddma);
1415
1416	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1417	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1418	myx_dmamem_free(sc, &sc->sc_zerodma);
1419}
1420
1421void
1422myx_write_txd_tail(struct myx_softc *sc, struct myx_slot *ms, u_int8_t flags,
1423    u_int32_t offset, u_int idx)
1424{
1425	struct myx_tx_desc		txd;
1426	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1427	bus_dmamap_t			map = ms->ms_map;
1428	int				i;
1429
1430	for (i = 1; i < map->dm_nsegs; i++) {
1431		memset(&txd, 0, sizeof(txd));
1432		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1433		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1434		txd.tx_flags = flags;
1435
1436		myx_bus_space_write(sc,
1437		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1438		    &txd, sizeof(txd));
1439	}
1440
1441	/* pad runt frames */
1442	if (map->dm_mapsize < 60) {
1443		memset(&txd, 0, sizeof(txd));
1444		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1445		txd.tx_length = htobe16(60 - map->dm_mapsize);
1446		txd.tx_flags = flags;
1447
1448		myx_bus_space_write(sc,
1449		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1450		    &txd, sizeof(txd));
1451	}
1452}
1453
1454void
1455myx_start(struct ifqueue *ifq)
1456{
1457	struct ifnet			*ifp = ifq->ifq_if;
1458	struct myx_tx_desc		txd;
1459	struct myx_softc		*sc = ifp->if_softc;
1460	struct myx_slot			*ms;
1461	bus_dmamap_t			map;
1462	struct mbuf			*m;
1463	u_int32_t			offset = sc->sc_tx_ring_offset;
1464	u_int				idx, cons, prod;
1465	u_int				free, used;
1466	u_int8_t			flags;
1467
1468	idx = sc->sc_tx_ring_prod;
1469
1470	/* figure out space */
1471	free = sc->sc_tx_ring_cons;
1472	if (free <= idx)
1473		free += sc->sc_tx_ring_count;
1474	free -= idx;
1475
1476	cons = prod = sc->sc_tx_prod;
1477
1478	used = 0;
1479
1480	for (;;) {
1481		if (used + sc->sc_tx_nsegs + 1 > free) {
1482			ifq_set_oactive(ifq);
1483			break;
1484		}
1485
1486		m = ifq_dequeue(ifq);
1487		if (m == NULL)
1488			break;
1489
1490		ms = &sc->sc_tx_slots[prod];
1491
1492		if (myx_load_mbuf(sc, ms, m) != 0) {
1493			m_freem(m);
1494			ifp->if_oerrors++;
1495			continue;
1496		}
1497
1498#if NBPFILTER > 0
1499		if (ifp->if_bpf)
1500			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1501#endif
1502
1503		map = ms->ms_map;
1504		bus_dmamap_sync(sc->sc_dmat, map, 0,
1505		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1506
1507		used += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1508
1509		if (++prod >= sc->sc_tx_ring_count)
1510			prod = 0;
1511	}
1512
1513	if (cons == prod)
1514		return;
1515
1516	ms = &sc->sc_tx_slots[cons];
1517
1518	for (;;) {
1519		idx += ms->ms_map->dm_nsegs +
1520		    (ms->ms_map->dm_mapsize < 60 ? 1 : 0);
1521		if (idx >= sc->sc_tx_ring_count)
1522			idx -= sc->sc_tx_ring_count;
1523
1524		if (++cons >= sc->sc_tx_ring_count)
1525			cons = 0;
1526
1527		if (cons == prod)
1528			break;
1529
1530		ms = &sc->sc_tx_slots[cons];
1531		map = ms->ms_map;
1532
1533		flags = MYXTXD_FLAGS_NO_TSO;
1534		if (map->dm_mapsize < 1520)
1535			flags |= MYXTXD_FLAGS_SMALL;
1536
1537		memset(&txd, 0, sizeof(txd));
1538		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1539		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1540		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1541		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1542		myx_bus_space_write(sc,
1543		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1544
1545		myx_write_txd_tail(sc, ms, flags, offset, idx);
1546	}
1547
1548	/* go back and post first packet */
1549	ms = &sc->sc_tx_slots[sc->sc_tx_prod];
1550	map = ms->ms_map;
1551
1552	flags = MYXTXD_FLAGS_NO_TSO;
1553	if (map->dm_mapsize < 1520)
1554		flags |= MYXTXD_FLAGS_SMALL;
1555
1556	memset(&txd, 0, sizeof(txd));
1557	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1558	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1559	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1560	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1561
1562	/* make sure the first descriptor is seen after the others */
1563	myx_write_txd_tail(sc, ms, flags, offset, sc->sc_tx_ring_prod);
1564
1565	myx_bus_space_write(sc,
1566	    offset + sizeof(txd) * sc->sc_tx_ring_prod, &txd,
1567	    sizeof(txd) - sizeof(myx_bus_t));
1568
1569	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1570	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1571
1572	myx_bus_space_write(sc,
1573	    offset + sizeof(txd) * (sc->sc_tx_ring_prod + 1) -
1574	    sizeof(myx_bus_t),
1575	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1576	    sizeof(myx_bus_t));
1577
1578	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1579	    offset + sizeof(txd) * sc->sc_tx_ring_prod, sizeof(txd),
1580	    BUS_SPACE_BARRIER_WRITE);
1581
1582	/* commit */
1583	sc->sc_tx_ring_prod = idx;
1584	sc->sc_tx_prod = prod;
1585}
1586
1587int
1588myx_load_mbuf(struct myx_softc *sc, struct myx_slot *ms, struct mbuf *m)
1589{
1590	bus_dma_tag_t			dmat = sc->sc_dmat;
1591	bus_dmamap_t			dmap = ms->ms_map;
1592
1593	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1594	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1595	case 0:
1596		break;
1597
1598	case EFBIG: /* mbuf chain is too fragmented */
1599		if (m_defrag(m, M_DONTWAIT) == 0 &&
1600		    bus_dmamap_load_mbuf(dmat, dmap, m,
1601		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1602			break;
1603	default:
1604		return (1);
1605	}
1606
1607	ms->ms_m = m;
1608	return (0);
1609}
1610
1611int
1612myx_intr(void *arg)
1613{
1614	struct myx_softc	*sc = (struct myx_softc *)arg;
1615	volatile struct myx_status *sts = sc->sc_sts;
1616	enum myx_state		 state;
1617	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1618	u_int32_t		 data;
1619	u_int8_t		 valid = 0;
1620
1621	state = sc->sc_state;
1622	if (state == MYX_S_OFF)
1623		return (0);
1624
1625	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1626	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1627
1628	valid = sts->ms_isvalid;
1629	if (valid == 0x0) {
1630		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1631		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1632		return (0);
1633	}
1634
1635	if (sc->sc_intx) {
1636		data = htobe32(0);
1637		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1638		    sc->sc_irqdeassertoff, &data, sizeof(data));
1639	}
1640	sts->ms_isvalid = 0;
1641
1642	do {
1643		data = sts->ms_txdonecnt;
1644
1645		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1646		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1647		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1648	} while (sts->ms_isvalid);
1649
1650	data = betoh32(data);
1651	if (data != sc->sc_tx_count)
1652		myx_txeof(sc, data);
1653
1654	data = htobe32(3);
1655	if (valid & 0x1) {
1656		myx_rxeof(sc);
1657
1658		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1659		    sc->sc_irqclaimoff, &data, sizeof(data));
1660	}
1661	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1662	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1663
1664	if (sts->ms_statusupdated) {
1665		if (state == MYX_S_DOWN &&
1666		    sc->sc_linkdown != sts->ms_linkdown) {
1667			sc->sc_state = MYX_S_OFF;
1668			membar_producer();
1669			wakeup(sts);
1670		} else {
1671			data = sts->ms_linkstate;
1672			if (data != 0xffffffff) {
1673				KERNEL_LOCK();
1674				myx_link_state(sc, data);
1675				KERNEL_UNLOCK();
1676			}
1677		}
1678	}
1679
1680	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1681	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1682
1683	return (1);
1684}
1685
1686void
1687myx_refill(void *xmrr)
1688{
1689	struct myx_rx_ring *mrr = xmrr;
1690	struct myx_softc *sc = mrr->mrr_softc;
1691
1692	myx_rx_fill(sc, mrr);
1693
1694	if (mrr->mrr_prod == mrr->mrr_cons)
1695		timeout_add(&mrr->mrr_refill, 1);
1696}
1697
1698void
1699myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1700{
1701	struct ifnet *ifp = &sc->sc_ac.ac_if;
1702	struct myx_slot *ms;
1703	bus_dmamap_t map;
1704	u_int idx, cons;
1705
1706	idx = sc->sc_tx_ring_cons;
1707	cons = sc->sc_tx_cons;
1708
1709	do {
1710		ms = &sc->sc_tx_slots[cons];
1711		map = ms->ms_map;
1712
1713		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1714
1715		bus_dmamap_sync(sc->sc_dmat, map, 0,
1716		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1717		bus_dmamap_unload(sc->sc_dmat, map);
1718		m_freem(ms->ms_m);
1719
1720		if (++cons >= sc->sc_tx_ring_count)
1721			cons = 0;
1722	} while (++sc->sc_tx_count != done_count);
1723
1724	if (idx >= sc->sc_tx_ring_count)
1725		idx -= sc->sc_tx_ring_count;
1726
1727	sc->sc_tx_ring_cons = idx;
1728	sc->sc_tx_cons = cons;
1729
1730	if (ifq_is_oactive(&ifp->if_snd))
1731		ifq_restart(&ifp->if_snd);
1732}
1733
1734void
1735myx_rxeof(struct myx_softc *sc)
1736{
1737	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1738	struct ifnet *ifp = &sc->sc_ac.ac_if;
1739	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1740	struct myx_rx_ring *mrr;
1741	struct myx_slot *ms;
1742	struct mbuf *m;
1743	int ring;
1744	u_int rxfree[2] = { 0 , 0 };
1745	u_int len;
1746
1747	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1748	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1749
1750	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1751		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1752
1753		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1754			sc->sc_intrq_idx = 0;
1755
1756		ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ?
1757		    MYX_RXSMALL : MYX_RXBIG;
1758
1759		mrr = &sc->sc_rx_ring[ring];
1760		ms = &mrr->mrr_slots[mrr->mrr_cons];
1761
1762		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1763			mrr->mrr_cons = 0;
1764
1765		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1766		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1767		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1768
1769		m = ms->ms_m;
1770		m->m_data += ETHER_ALIGN;
1771		m->m_pkthdr.len = m->m_len = len;
1772
1773		ml_enqueue(&ml, m);
1774
1775		rxfree[ring]++;
1776	}
1777
1778	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1779	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1780
1781	for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) {
1782		if (rxfree[ring] == 0)
1783			continue;
1784
1785		mrr = &sc->sc_rx_ring[ring];
1786
1787		if_rxr_put(&mrr->mrr_rxr, rxfree[ring]);
1788		myx_rx_fill(sc, mrr);
1789		if (mrr->mrr_prod == mrr->mrr_cons)
1790			timeout_add(&mrr->mrr_refill, 0);
1791	}
1792
1793	if_input(ifp, &ml);
1794}
1795
1796static int
1797myx_rx_fill_slots(struct myx_softc *sc, struct myx_rx_ring *mrr, u_int slots)
1798{
1799	struct myx_rx_desc rxd;
1800	struct myx_slot *ms;
1801	u_int32_t offset = mrr->mrr_offset;
1802	u_int p, first, fills;
1803
1804	first = p = mrr->mrr_prod;
1805	if (myx_buf_fill(sc, &mrr->mrr_slots[first], mrr->mrr_mclget) != 0)
1806		return (slots);
1807
1808	if (++p >= sc->sc_rx_ring_count)
1809		p = 0;
1810
1811	for (fills = 1; fills < slots; fills++) {
1812		ms = &mrr->mrr_slots[p];
1813
1814		if (myx_buf_fill(sc, ms, mrr->mrr_mclget) != 0)
1815			break;
1816
1817		rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1818		myx_bus_space_write(sc, offset + p * sizeof(rxd),
1819		    &rxd, sizeof(rxd));
1820
1821		if (++p >= sc->sc_rx_ring_count)
1822			p = 0;
1823	}
1824
1825	mrr->mrr_prod = p;
1826
1827	/* make sure the first descriptor is seen after the others */
1828	if (fills > 1) {
1829		bus_space_barrier(sc->sc_memt, sc->sc_memh,
1830		    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1831		    BUS_SPACE_BARRIER_WRITE);
1832	}
1833
1834	ms = &mrr->mrr_slots[first];
1835	rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1836	myx_bus_space_write(sc, offset + first * sizeof(rxd),
1837	    &rxd, sizeof(rxd));
1838
1839	return (slots - fills);
1840}
1841
1842int
1843myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size)
1844{
1845	struct myx_rx_desc rxd;
1846	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1847	struct myx_slot *ms;
1848	u_int32_t offset = mrr->mrr_offset;
1849	int rv;
1850	int i;
1851
1852	mrr->mrr_slots = mallocarray(sizeof(*ms), sc->sc_rx_ring_count,
1853	    M_DEVBUF, M_WAITOK);
1854	if (mrr->mrr_slots == NULL)
1855		return (ENOMEM);
1856
1857	memset(&rxd, 0xff, sizeof(rxd));
1858	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1859		ms = &mrr->mrr_slots[i];
1860		rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1861		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
1862		if (rv != 0)
1863			goto destroy;
1864
1865		myx_bus_space_write(sc, offset + i * sizeof(rxd),
1866		    &rxd, sizeof(rxd));
1867	}
1868
1869	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1870	mrr->mrr_prod = mrr->mrr_cons = 0;
1871
1872	return (0);
1873
1874destroy:
1875	while (i-- > 0) {
1876		ms = &mrr->mrr_slots[i];
1877		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1878	}
1879	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1880	return (rv);
1881}
1882
1883int
1884myx_rx_fill(struct myx_softc *sc, struct myx_rx_ring *mrr)
1885{
1886	u_int slots;
1887
1888	slots = if_rxr_get(&mrr->mrr_rxr, sc->sc_rx_ring_count);
1889	if (slots == 0)
1890		return (1);
1891
1892	slots = myx_rx_fill_slots(sc, mrr, slots);
1893	if (slots > 0)
1894		if_rxr_put(&mrr->mrr_rxr, slots);
1895
1896	return (0);
1897}
1898
1899void
1900myx_rx_empty(struct myx_softc *sc, struct myx_rx_ring *mrr)
1901{
1902	struct myx_slot *ms;
1903
1904	while (mrr->mrr_cons != mrr->mrr_prod) {
1905		ms = &mrr->mrr_slots[mrr->mrr_cons];
1906
1907		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1908			mrr->mrr_cons = 0;
1909
1910		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1911		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1912		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1913		m_freem(ms->ms_m);
1914	}
1915
1916	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1917}
1918
1919void
1920myx_rx_free(struct myx_softc *sc, struct myx_rx_ring *mrr)
1921{
1922	struct myx_slot *ms;
1923	int i;
1924
1925	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1926		ms = &mrr->mrr_slots[i];
1927		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1928	}
1929
1930	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1931}
1932
1933struct mbuf *
1934myx_mcl_small(void)
1935{
1936	struct mbuf *m;
1937
1938	m = MCLGETI(NULL, M_DONTWAIT, NULL, MYX_RXSMALL_SIZE);
1939	if (m == NULL)
1940		return (NULL);
1941
1942	m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE;
1943
1944	return (m);
1945}
1946
1947struct mbuf *
1948myx_mcl_big(void)
1949{
1950	struct mbuf *m;
1951	void *mcl;
1952
1953	MGETHDR(m, M_DONTWAIT, MT_DATA);
1954	if (m == NULL)
1955		return (NULL);
1956
1957	mcl = pool_get(myx_mcl_pool, PR_NOWAIT);
1958	if (mcl == NULL) {
1959		m_free(m);
1960		return (NULL);
1961	}
1962
1963	MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, MEXTFREE_POOL, myx_mcl_pool);
1964	m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE;
1965
1966	return (m);
1967}
1968
1969int
1970myx_buf_fill(struct myx_softc *sc, struct myx_slot *ms,
1971    struct mbuf *(*mclget)(void))
1972{
1973	struct mbuf *m;
1974	int rv;
1975
1976	m = (*mclget)();
1977	if (m == NULL)
1978		return (ENOMEM);
1979
1980	rv = bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, BUS_DMA_NOWAIT);
1981	if (rv != 0) {
1982		m_freem(m);
1983		return (rv);
1984	}
1985
1986	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1987	    ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1988
1989	ms->ms_m = m;
1990
1991	return (0);
1992}
1993
1994int
1995myx_tx_init(struct myx_softc *sc, bus_size_t size)
1996{
1997	struct myx_slot *ms;
1998	int rv;
1999	int i;
2000
2001	sc->sc_tx_slots = mallocarray(sizeof(*ms), sc->sc_tx_ring_count,
2002	    M_DEVBUF, M_WAITOK);
2003	if (sc->sc_tx_slots == NULL)
2004		return (ENOMEM);
2005
2006	for (i = 0; i < sc->sc_tx_ring_count; i++) {
2007		ms = &sc->sc_tx_slots[i];
2008		rv = bus_dmamap_create(sc->sc_dmat, size, sc->sc_tx_nsegs,
2009		    sc->sc_tx_boundary, sc->sc_tx_boundary,
2010		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
2011		if (rv != 0)
2012			goto destroy;
2013	}
2014
2015	sc->sc_tx_prod = sc->sc_tx_cons = 0;
2016
2017	return (0);
2018
2019destroy:
2020	while (i-- > 0) {
2021		ms = &sc->sc_tx_slots[i];
2022		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2023	}
2024	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2025	return (rv);
2026}
2027
2028void
2029myx_tx_empty(struct myx_softc *sc)
2030{
2031	struct myx_slot *ms;
2032	u_int cons = sc->sc_tx_cons;
2033	u_int prod = sc->sc_tx_prod;
2034
2035	while (cons != prod) {
2036		ms = &sc->sc_tx_slots[cons];
2037
2038		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
2039		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2040		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
2041		m_freem(ms->ms_m);
2042
2043		if (++cons >= sc->sc_tx_ring_count)
2044			cons = 0;
2045	}
2046
2047	sc->sc_tx_cons = cons;
2048}
2049
2050void
2051myx_tx_free(struct myx_softc *sc)
2052{
2053	struct myx_slot *ms;
2054	int i;
2055
2056	for (i = 0; i < sc->sc_tx_ring_count; i++) {
2057		ms = &sc->sc_tx_slots[i];
2058		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2059	}
2060
2061	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2062}
2063