if_myx.c revision 1.88
1/*	$OpenBSD: if_myx.c,v 1.88 2015/11/25 03:09:59 dlg Exp $	*/
2
3/*
4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*
20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21 */
22
23#include "bpfilter.h"
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/kernel.h>
30#include <sys/socket.h>
31#include <sys/malloc.h>
32#include <sys/pool.h>
33#include <sys/timeout.h>
34#include <sys/device.h>
35#include <sys/proc.h>
36#include <sys/queue.h>
37#include <sys/atomic.h>
38
39#include <machine/bus.h>
40#include <machine/intr.h>
41
42#include <net/if.h>
43#include <net/if_dl.h>
44#include <net/if_media.h>
45
46#if NBPFILTER > 0
47#include <net/bpf.h>
48#endif
49
50#include <netinet/in.h>
51#include <netinet/if_ether.h>
52
53#include <dev/pci/pcireg.h>
54#include <dev/pci/pcivar.h>
55#include <dev/pci/pcidevs.h>
56
57#include <dev/pci/if_myxreg.h>
58
59#ifdef MYX_DEBUG
60#define MYXDBG_INIT	(1<<0)	/* chipset initialization */
61#define MYXDBG_CMD	(2<<0)	/* commands */
62#define MYXDBG_INTR	(3<<0)	/* interrupts */
63#define MYXDBG_ALL	0xffff	/* enable all debugging messages */
64int myx_debug = MYXDBG_ALL;
65#define DPRINTF(_lvl, _arg...)	do {					\
66	if (myx_debug & (_lvl))						\
67		printf(_arg);						\
68} while (0)
69#else
70#define DPRINTF(_lvl, arg...)
71#endif
72
73#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
74
75struct myx_dmamem {
76	bus_dmamap_t		 mxm_map;
77	bus_dma_segment_t	 mxm_seg;
78	int			 mxm_nsegs;
79	size_t			 mxm_size;
80	caddr_t			 mxm_kva;
81};
82
83struct pool *myx_mcl_pool;
84
85struct myx_slot {
86	bus_dmamap_t		 ms_map;
87	struct mbuf		*ms_m;
88};
89
90struct myx_rx_ring {
91	struct myx_softc	*mrr_softc;
92	struct timeout		 mrr_refill;
93	struct if_rxring	 mrr_rxr;
94	struct myx_slot		*mrr_slots;
95	u_int32_t		 mrr_offset;
96	u_int			 mrr_running;
97	u_int			 mrr_prod;
98	u_int			 mrr_cons;
99	struct mbuf		*(*mrr_mclget)(void);
100};
101
102enum myx_state {
103	MYX_S_OFF = 0,
104	MYX_S_RUNNING,
105	MYX_S_DOWN
106};
107
108struct myx_softc {
109	struct device		 sc_dev;
110	struct arpcom		 sc_ac;
111
112	pci_chipset_tag_t	 sc_pc;
113	pci_intr_handle_t	 sc_ih;
114	pcitag_t		 sc_tag;
115
116	bus_dma_tag_t		 sc_dmat;
117	bus_space_tag_t		 sc_memt;
118	bus_space_handle_t	 sc_memh;
119	bus_size_t		 sc_mems;
120
121	struct myx_dmamem	 sc_zerodma;
122	struct myx_dmamem	 sc_cmddma;
123	struct myx_dmamem	 sc_paddma;
124
125	struct myx_dmamem	 sc_sts_dma;
126	volatile struct myx_status	*sc_sts;
127
128	int			 sc_intx;
129	void			*sc_irqh;
130	u_int32_t		 sc_irqcoaloff;
131	u_int32_t		 sc_irqclaimoff;
132	u_int32_t		 sc_irqdeassertoff;
133
134	struct myx_dmamem	 sc_intrq_dma;
135	struct myx_intrq_desc	*sc_intrq;
136	u_int			 sc_intrq_count;
137	u_int			 sc_intrq_idx;
138
139	u_int			 sc_rx_ring_count;
140#define  MYX_RXSMALL		 0
141#define  MYX_RXBIG		 1
142	struct myx_rx_ring	 sc_rx_ring[2];
143
144	bus_size_t		 sc_tx_boundary;
145	u_int			 sc_tx_ring_count;
146	u_int32_t		 sc_tx_ring_offset;
147	u_int			 sc_tx_nsegs;
148	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
149	u_int			 sc_tx_ring_prod;
150	u_int			 sc_tx_ring_cons;
151
152	u_int			 sc_tx_prod;
153	u_int			 sc_tx_cons;
154	struct myx_slot		*sc_tx_slots;
155
156	struct ifmedia		 sc_media;
157
158	volatile enum myx_state	 sc_state;
159	volatile u_int8_t	 sc_linkdown;
160};
161
162#define MYX_RXSMALL_SIZE	MCLBYTES
163#define MYX_RXBIG_SIZE		(9 * 1024)
164
165int	 myx_match(struct device *, void *, void *);
166void	 myx_attach(struct device *, struct device *, void *);
167int	 myx_pcie_dc(struct myx_softc *, struct pci_attach_args *);
168int	 myx_query(struct myx_softc *sc, char *, size_t);
169u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
170void	 myx_attachhook(void *);
171int	 myx_loadfirmware(struct myx_softc *, const char *);
172int	 myx_probe_firmware(struct myx_softc *);
173
174void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
175void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
176
177#if defined(__LP64__)
178#define _myx_bus_space_write bus_space_write_raw_region_8
179typedef u_int64_t myx_bus_t;
180#else
181#define _myx_bus_space_write bus_space_write_raw_region_4
182typedef u_int32_t myx_bus_t;
183#endif
184#define myx_bus_space_write(_sc, _o, _a, _l) \
185    _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l))
186
187int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
188int	 myx_boot(struct myx_softc *, u_int32_t);
189
190int	 myx_rdma(struct myx_softc *, u_int);
191int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
192	    bus_size_t, u_int align);
193void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
194int	 myx_media_change(struct ifnet *);
195void	 myx_media_status(struct ifnet *, struct ifmediareq *);
196void	 myx_link_state(struct myx_softc *, u_int32_t);
197void	 myx_watchdog(struct ifnet *);
198int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
199int	 myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *);
200void	 myx_up(struct myx_softc *);
201void	 myx_iff(struct myx_softc *);
202void	 myx_down(struct myx_softc *);
203
204void	 myx_start(struct ifnet *);
205void	 myx_write_txd_tail(struct myx_softc *, struct myx_slot *, u_int8_t,
206	    u_int32_t, u_int);
207int	 myx_load_mbuf(struct myx_softc *, struct myx_slot *, struct mbuf *);
208int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
209int	 myx_intr(void *);
210void	 myx_rxeof(struct myx_softc *);
211void	 myx_txeof(struct myx_softc *, u_int32_t);
212
213int			myx_buf_fill(struct myx_softc *, struct myx_slot *,
214			    struct mbuf *(*)(void));
215struct mbuf *		myx_mcl_small(void);
216struct mbuf *		myx_mcl_big(void);
217
218int			myx_rx_init(struct myx_softc *, int, bus_size_t);
219int			myx_rx_fill(struct myx_softc *, struct myx_rx_ring *);
220void			myx_rx_empty(struct myx_softc *, struct myx_rx_ring *);
221void			myx_rx_free(struct myx_softc *, struct myx_rx_ring *);
222
223int			myx_tx_init(struct myx_softc *, bus_size_t);
224void			myx_tx_empty(struct myx_softc *);
225void			myx_tx_free(struct myx_softc *);
226
227void			myx_refill(void *);
228
229struct cfdriver myx_cd = {
230	NULL, "myx", DV_IFNET
231};
232struct cfattach myx_ca = {
233	sizeof(struct myx_softc), myx_match, myx_attach
234};
235
236const struct pci_matchid myx_devices[] = {
237	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
238	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
239};
240
241int
242myx_match(struct device *parent, void *match, void *aux)
243{
244	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
245}
246
247void
248myx_attach(struct device *parent, struct device *self, void *aux)
249{
250	struct myx_softc	*sc = (struct myx_softc *)self;
251	struct pci_attach_args	*pa = aux;
252	char			 part[32];
253	pcireg_t		 memtype;
254
255	sc->sc_pc = pa->pa_pc;
256	sc->sc_tag = pa->pa_tag;
257	sc->sc_dmat = pa->pa_dmat;
258
259	sc->sc_rx_ring[MYX_RXSMALL].mrr_softc = sc;
260	sc->sc_rx_ring[MYX_RXSMALL].mrr_mclget = myx_mcl_small;
261	timeout_set(&sc->sc_rx_ring[MYX_RXSMALL].mrr_refill, myx_refill,
262	    &sc->sc_rx_ring[MYX_RXSMALL]);
263	sc->sc_rx_ring[MYX_RXBIG].mrr_softc = sc;
264	sc->sc_rx_ring[MYX_RXBIG].mrr_mclget = myx_mcl_big;
265	timeout_set(&sc->sc_rx_ring[MYX_RXBIG].mrr_refill, myx_refill,
266	    &sc->sc_rx_ring[MYX_RXBIG]);
267
268	/* Map the PCI memory space */
269	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
270	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
271	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
272		printf(": unable to map register memory\n");
273		return;
274	}
275
276	/* Get board details (mac/part) */
277	memset(part, 0, sizeof(part));
278	if (myx_query(sc, part, sizeof(part)) != 0)
279		goto unmap;
280
281	/* Map the interrupt */
282	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
283		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
284			printf(": unable to map interrupt\n");
285			goto unmap;
286		}
287		sc->sc_intx = 1;
288	}
289
290	printf(": %s, model %s, address %s\n",
291	    pci_intr_string(pa->pa_pc, sc->sc_ih),
292	    part[0] == '\0' ? "(unknown)" : part,
293	    ether_sprintf(sc->sc_ac.ac_enaddr));
294
295	/* this is sort of racy */
296	if (myx_mcl_pool == NULL) {
297		extern struct kmem_pa_mode kp_dma_contig;
298
299		myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF,
300		    M_WAITOK);
301		if (myx_mcl_pool == NULL) {
302			printf("%s: unable to allocate mcl pool\n",
303			    DEVNAME(sc));
304			goto unmap;
305		}
306		pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY, 0,
307		    0, "myxmcl", NULL);
308		pool_setipl(myx_mcl_pool, IPL_NET);
309		pool_set_constraints(myx_mcl_pool, &kp_dma_contig);
310	}
311
312	if (myx_pcie_dc(sc, pa) != 0)
313		printf("%s: unable to configure PCI Express\n", DEVNAME(sc));
314
315	if (mountroothook_establish(myx_attachhook, sc) == NULL) {
316		printf("%s: unable to establish mountroot hook\n", DEVNAME(sc));
317		goto unmap;
318	}
319
320	return;
321
322 unmap:
323	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
324	sc->sc_mems = 0;
325}
326
327int
328myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa)
329{
330	pcireg_t dcsr;
331	pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO;
332	pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO;
333	int reg;
334
335	if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
336	    &reg, NULL) == 0)
337		return (-1);
338
339	reg += PCI_PCIE_DCSR;
340	dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg);
341	if ((dcsr & mask) != dc) {
342		CLR(dcsr, mask);
343		SET(dcsr, dc);
344		pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr);
345	}
346
347	return (0);
348}
349
350u_int
351myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
352{
353	u_int		i, j;
354	u_int8_t	digit;
355
356	memset(lladdr, 0, ETHER_ADDR_LEN);
357	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
358		if (mac[i] >= '0' && mac[i] <= '9')
359			digit = mac[i] - '0';
360		else if (mac[i] >= 'A' && mac[i] <= 'F')
361			digit = mac[i] - 'A' + 10;
362		else if (mac[i] >= 'a' && mac[i] <= 'f')
363			digit = mac[i] - 'a' + 10;
364		else
365			continue;
366		if ((j & 1) == 0)
367			digit <<= 4;
368		lladdr[j++/2] |= digit;
369	}
370
371	return (i);
372}
373
374int
375myx_query(struct myx_softc *sc, char *part, size_t partlen)
376{
377	struct myx_gen_hdr hdr;
378	u_int32_t	offset;
379	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
380	u_int		i, len, maxlen;
381
382	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
383	offset = betoh32(offset);
384	if (offset + sizeof(hdr) > sc->sc_mems) {
385		printf(": header is outside register window\n");
386		return (1);
387	}
388
389	myx_read(sc, offset, &hdr, sizeof(hdr));
390	offset = betoh32(hdr.fw_specs);
391	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
392
393	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
394
395	for (i = 0; i < len; i++) {
396		maxlen = len - i;
397		if (strings[i] == '\0')
398			break;
399		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
400			i += 4;
401			i += myx_ether_aton(&strings[i],
402			    sc->sc_ac.ac_enaddr, maxlen);
403		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
404			i += 3;
405			i += strlcpy(part, &strings[i], min(maxlen, partlen));
406		}
407		for (; i < len; i++) {
408			if (strings[i] == '\0')
409				break;
410		}
411	}
412
413	return (0);
414}
415
416int
417myx_loadfirmware(struct myx_softc *sc, const char *filename)
418{
419	struct myx_gen_hdr	hdr;
420	u_int8_t		*fw;
421	size_t			fwlen;
422	u_int32_t		offset;
423	u_int			i, ret = 1;
424
425	if (loadfirmware(filename, &fw, &fwlen) != 0) {
426		printf("%s: could not load firmware %s\n", DEVNAME(sc),
427		    filename);
428		return (1);
429	}
430	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
431		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
432		goto err;
433	}
434
435	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
436	offset = betoh32(offset);
437	if ((offset + sizeof(hdr)) > fwlen) {
438		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
439		goto err;
440	}
441
442	memcpy(&hdr, fw + offset, sizeof(hdr));
443	DPRINTF(MYXDBG_INIT, "%s: "
444	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
445	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
446	    betoh32(hdr.fw_type), hdr.fw_version);
447
448	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
449	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
450		printf("%s: invalid firmware type 0x%x version %s\n",
451		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
452		goto err;
453	}
454
455	/* Write the firmware to the card's SRAM */
456	for (i = 0; i < fwlen; i += 256)
457		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
458
459	if (myx_boot(sc, fwlen) != 0) {
460		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
461		goto err;
462	}
463
464	ret = 0;
465
466err:
467	free(fw, M_DEVBUF, fwlen);
468	return (ret);
469}
470
471void
472myx_attachhook(void *arg)
473{
474	struct myx_softc	*sc = (struct myx_softc *)arg;
475	struct ifnet		*ifp = &sc->sc_ac.ac_if;
476	struct myx_cmd		 mc;
477
478	/* Allocate command DMA memory */
479	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
480	    MYXALIGN_CMD) != 0) {
481		printf("%s: failed to allocate command DMA memory\n",
482		    DEVNAME(sc));
483		return;
484	}
485
486	/* Try the firmware stored on disk */
487	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
488		/* error printed by myx_loadfirmware */
489		goto freecmd;
490	}
491
492	memset(&mc, 0, sizeof(mc));
493
494	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
495		printf("%s: failed to reset the device\n", DEVNAME(sc));
496		goto freecmd;
497	}
498
499	sc->sc_tx_boundary = 4096;
500
501	if (myx_probe_firmware(sc) != 0) {
502		printf("%s: error while selecting firmware\n", DEVNAME(sc));
503		goto freecmd;
504	}
505
506	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
507	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
508	if (sc->sc_irqh == NULL) {
509		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
510		goto freecmd;
511	}
512
513	ifp->if_softc = sc;
514	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
515	ifp->if_ioctl = myx_ioctl;
516	ifp->if_start = myx_start;
517	ifp->if_watchdog = myx_watchdog;
518	ifp->if_hardmtu = 9000;
519	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
520	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
521	IFQ_SET_READY(&ifp->if_snd);
522
523	ifp->if_capabilities = IFCAP_VLAN_MTU;
524#if 0
525	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
526	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
527	    IFCAP_CSUM_UDPv4;
528#endif
529
530	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
531	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
532	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
533
534	if_attach(ifp);
535	ether_ifattach(ifp);
536
537	return;
538
539freecmd:
540	myx_dmamem_free(sc, &sc->sc_cmddma);
541}
542
543int
544myx_probe_firmware(struct myx_softc *sc)
545{
546	struct myx_dmamem test;
547	bus_dmamap_t map;
548	struct myx_cmd mc;
549	pcireg_t csr;
550	int offset;
551	int width = 0;
552
553	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
554	    &offset, NULL)) {
555		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
556		    offset + PCI_PCIE_LCSR);
557		width = (csr >> 20) & 0x3f;
558
559		if (width <= 4) {
560			/*
561			 * if the link width is 4 or less we can use the
562			 * aligned firmware.
563			 */
564			return (0);
565		}
566	}
567
568	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
569		return (1);
570	map = test.mxm_map;
571
572	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
573	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
574
575	memset(&mc, 0, sizeof(mc));
576	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
577	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
578	mc.mc_data2 = htobe32(4096 * 0x10000);
579	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
580		printf("%s: DMA read test failed\n", DEVNAME(sc));
581		goto fail;
582	}
583
584	memset(&mc, 0, sizeof(mc));
585	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
586	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
587	mc.mc_data2 = htobe32(4096 * 0x1);
588	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
589		printf("%s: DMA write test failed\n", DEVNAME(sc));
590		goto fail;
591	}
592
593	memset(&mc, 0, sizeof(mc));
594	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
595	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
596	mc.mc_data2 = htobe32(4096 * 0x10001);
597	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
598		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
599		goto fail;
600	}
601
602	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
603	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
604	myx_dmamem_free(sc, &test);
605	return (0);
606
607fail:
608	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
609	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
610	myx_dmamem_free(sc, &test);
611
612	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
613		printf("%s: unable to load %s\n", DEVNAME(sc),
614		    MYXFW_UNALIGNED);
615		return (1);
616	}
617
618	sc->sc_tx_boundary = 2048;
619
620	printf("%s: using unaligned firmware\n", DEVNAME(sc));
621	return (0);
622}
623
624void
625myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
626{
627	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
628	    BUS_SPACE_BARRIER_READ);
629	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
630}
631
632void
633myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
634{
635	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
636	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
637	    BUS_SPACE_BARRIER_WRITE);
638}
639
640int
641myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
642    bus_size_t size, u_int align)
643{
644	mxm->mxm_size = size;
645
646	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
647	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
648	    &mxm->mxm_map) != 0)
649		return (1);
650	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
651	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
652	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
653		goto destroy;
654	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
655	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
656		goto free;
657	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
658	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
659		goto unmap;
660
661	return (0);
662 unmap:
663	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
664 free:
665	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
666 destroy:
667	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
668	return (1);
669}
670
671void
672myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
673{
674	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
675	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
676	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
677	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
678}
679
680int
681myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
682{
683	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
684	struct myx_response	*mr;
685	u_int			 i;
686	u_int32_t		 result, data;
687#ifdef MYX_DEBUG
688	static const char *cmds[MYXCMD_MAX] = {
689		"CMD_NONE",
690		"CMD_RESET",
691		"CMD_GET_VERSION",
692		"CMD_SET_INTRQDMA",
693		"CMD_SET_BIGBUFSZ",
694		"CMD_SET_SMALLBUFSZ",
695		"CMD_GET_TXRINGOFF",
696		"CMD_GET_RXSMALLRINGOFF",
697		"CMD_GET_RXBIGRINGOFF",
698		"CMD_GET_INTRACKOFF",
699		"CMD_GET_INTRDEASSERTOFF",
700		"CMD_GET_TXRINGSZ",
701		"CMD_GET_RXRINGSZ",
702		"CMD_SET_INTRQSZ",
703		"CMD_SET_IFUP",
704		"CMD_SET_IFDOWN",
705		"CMD_SET_MTU",
706		"CMD_GET_INTRCOALDELAYOFF",
707		"CMD_SET_STATSINTVL",
708		"CMD_SET_STATSDMA_OLD",
709		"CMD_SET_PROMISC",
710		"CMD_UNSET_PROMISC",
711		"CMD_SET_LLADDR",
712		"CMD_SET_FC",
713		"CMD_UNSET_FC",
714		"CMD_DMA_TEST",
715		"CMD_SET_ALLMULTI",
716		"CMD_UNSET_ALLMULTI",
717		"CMD_SET_MCASTGROUP",
718		"CMD_UNSET_MCASTGROUP",
719		"CMD_UNSET_MCAST",
720		"CMD_SET_STATSDMA",
721		"CMD_UNALIGNED_DMA_TEST",
722		"CMD_GET_UNALIGNED_STATUS"
723	};
724#endif
725
726	mc->mc_cmd = htobe32(cmd);
727	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
728	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
729
730	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
731	mr->mr_result = 0xffffffff;
732
733	/* Send command */
734	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
735	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
736	    BUS_DMASYNC_PREREAD);
737
738	for (i = 0; i < 20; i++) {
739		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
740		    BUS_DMASYNC_POSTREAD);
741		result = betoh32(mr->mr_result);
742		data = betoh32(mr->mr_data);
743
744		if (result != 0xffffffff)
745			break;
746
747		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
748		    BUS_DMASYNC_PREREAD);
749		delay(1000);
750	}
751
752	DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, "
753	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
754	    cmds[cmd], i, result, data, data);
755
756	if (result != 0)
757		return (-1);
758
759	if (r != NULL)
760		*r = data;
761	return (0);
762}
763
764int
765myx_boot(struct myx_softc *sc, u_int32_t length)
766{
767	struct myx_bootcmd	 bc;
768	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
769	u_int32_t		*status;
770	u_int			 i, ret = 1;
771
772	memset(&bc, 0, sizeof(bc));
773	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
774	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
775	bc.bc_result = 0xffffffff;
776	bc.bc_offset = htobe32(MYX_FW_BOOT);
777	bc.bc_length = htobe32(length - 8);
778	bc.bc_copyto = htobe32(8);
779	bc.bc_jumpto = htobe32(0);
780
781	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
782	*status = 0;
783
784	/* Send command */
785	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
786	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
787	    BUS_DMASYNC_PREREAD);
788
789	for (i = 0; i < 200; i++) {
790		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
791		    BUS_DMASYNC_POSTREAD);
792		if (*status == 0xffffffff) {
793			ret = 0;
794			break;
795		}
796
797		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
798		    BUS_DMASYNC_PREREAD);
799		delay(1000);
800	}
801
802	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
803	    DEVNAME(sc), i, ret);
804
805	return (ret);
806}
807
808int
809myx_rdma(struct myx_softc *sc, u_int do_enable)
810{
811	struct myx_rdmacmd	 rc;
812	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
813	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
814	u_int32_t		*status;
815	int			 ret = 1;
816	u_int			 i;
817
818	/*
819	 * It is required to setup a _dummy_ RDMA address. It also makes
820	 * some PCI-E chipsets resend dropped messages.
821	 */
822	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
823	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
824	rc.rc_result = 0xffffffff;
825	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
826	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
827	rc.rc_enable = htobe32(do_enable);
828
829	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
830	*status = 0;
831
832	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
833	    BUS_DMASYNC_PREREAD);
834
835	/* Send command */
836	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
837
838	for (i = 0; i < 20; i++) {
839		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
840		    BUS_DMASYNC_POSTREAD);
841
842		if (*status == 0xffffffff) {
843			ret = 0;
844			break;
845		}
846
847		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
848		    BUS_DMASYNC_PREREAD);
849		delay(1000);
850	}
851
852	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
853	    DEVNAME(sc), __func__,
854	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
855
856	return (ret);
857}
858
859int
860myx_media_change(struct ifnet *ifp)
861{
862	/* ignore */
863	return (0);
864}
865
866void
867myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
868{
869	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
870	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
871	u_int32_t		 sts;
872
873	imr->ifm_active = IFM_ETHER | IFM_AUTO;
874	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
875		imr->ifm_status = 0;
876		return;
877	}
878
879	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
880	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
881	sts = sc->sc_sts->ms_linkstate;
882	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
883	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
884
885	myx_link_state(sc, sts);
886
887	imr->ifm_status = IFM_AVALID;
888	if (!LINK_STATE_IS_UP(ifp->if_link_state))
889		return;
890
891	imr->ifm_active |= IFM_FDX | IFM_FLOW |
892	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
893	imr->ifm_status |= IFM_ACTIVE;
894}
895
896void
897myx_link_state(struct myx_softc *sc, u_int32_t sts)
898{
899	struct ifnet		*ifp = &sc->sc_ac.ac_if;
900	int			 link_state = LINK_STATE_DOWN;
901
902	if (betoh32(sts) == MYXSTS_LINKUP)
903		link_state = LINK_STATE_FULL_DUPLEX;
904	if (ifp->if_link_state != link_state) {
905		ifp->if_link_state = link_state;
906		if_link_state_change(ifp);
907		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
908		    IF_Gbps(10) : 0;
909	}
910}
911
912void
913myx_watchdog(struct ifnet *ifp)
914{
915	return;
916}
917
918int
919myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
920{
921	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
922	struct ifreq		*ifr = (struct ifreq *)data;
923	int			 s, error = 0;
924
925	s = splnet();
926
927	switch (cmd) {
928	case SIOCSIFADDR:
929		ifp->if_flags |= IFF_UP;
930		/* FALLTHROUGH */
931
932	case SIOCSIFFLAGS:
933		if (ISSET(ifp->if_flags, IFF_UP)) {
934			if (ISSET(ifp->if_flags, IFF_RUNNING))
935				error = ENETRESET;
936			else
937				myx_up(sc);
938		} else {
939			if (ISSET(ifp->if_flags, IFF_RUNNING))
940				myx_down(sc);
941		}
942		break;
943
944	case SIOCGIFMEDIA:
945	case SIOCSIFMEDIA:
946		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
947		break;
948
949	case SIOCGIFRXR:
950		error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
951		break;
952
953	default:
954		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
955	}
956
957	if (error == ENETRESET) {
958		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
959		    (IFF_UP | IFF_RUNNING))
960			myx_iff(sc);
961		error = 0;
962	}
963
964	splx(s);
965	return (error);
966}
967
968int
969myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri)
970{
971	struct if_rxring_info ifr[2];
972
973	memset(ifr, 0, sizeof(ifr));
974
975	ifr[0].ifr_size = MYX_RXSMALL_SIZE;
976	ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr;
977
978	ifr[1].ifr_size = MYX_RXBIG_SIZE;
979	ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr;
980
981	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
982}
983
984void
985myx_up(struct myx_softc *sc)
986{
987	struct ifnet		*ifp = &sc->sc_ac.ac_if;
988	struct myx_cmd		mc;
989	bus_dmamap_t		map;
990	size_t			size;
991	u_int			maxpkt;
992	u_int32_t		r;
993
994	memset(&mc, 0, sizeof(mc));
995	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
996		printf("%s: failed to reset the device\n", DEVNAME(sc));
997		return;
998	}
999
1000	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
1001	    64, MYXALIGN_CMD) != 0) {
1002		printf("%s: failed to allocate zero pad memory\n",
1003		    DEVNAME(sc));
1004		return;
1005	}
1006	memset(sc->sc_zerodma.mxm_kva, 0, 64);
1007	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1008	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1009
1010	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
1011	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
1012		printf("%s: failed to allocate pad DMA memory\n",
1013		    DEVNAME(sc));
1014		goto free_zero;
1015	}
1016	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1017	    sc->sc_paddma.mxm_map->dm_mapsize,
1018	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1019
1020	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1021		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1022		goto free_pad;
1023	}
1024
1025	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1026		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1027		goto free_pad;
1028	}
1029	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1030
1031	memset(&mc, 0, sizeof(mc));
1032	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1033		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1034		goto free_pad;
1035	}
1036	sc->sc_tx_ring_prod = 0;
1037	sc->sc_tx_ring_cons = 0;
1038	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1039	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1040	sc->sc_tx_count = 0;
1041	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1042	IFQ_SET_READY(&ifp->if_snd);
1043
1044	/* Allocate Interrupt Queue */
1045
1046	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1047	sc->sc_intrq_idx = 0;
1048
1049	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1050	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1051	    size, MYXALIGN_DATA) != 0) {
1052		goto free_pad;
1053	}
1054	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1055	map = sc->sc_intrq_dma.mxm_map;
1056	memset(sc->sc_intrq, 0, size);
1057	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1058	    BUS_DMASYNC_PREREAD);
1059
1060	memset(&mc, 0, sizeof(mc));
1061	mc.mc_data0 = htobe32(size);
1062	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1063		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1064		goto free_intrq;
1065	}
1066
1067	memset(&mc, 0, sizeof(mc));
1068	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1069	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1070	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1071		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1072		goto free_intrq;
1073	}
1074
1075	/*
1076	 * get interrupt offsets
1077	 */
1078
1079	memset(&mc, 0, sizeof(mc));
1080	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1081	    &sc->sc_irqclaimoff) != 0) {
1082		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1083		goto free_intrq;
1084	}
1085
1086	memset(&mc, 0, sizeof(mc));
1087	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1088	    &sc->sc_irqdeassertoff) != 0) {
1089		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1090		goto free_intrq;
1091	}
1092
1093	memset(&mc, 0, sizeof(mc));
1094	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1095	    &sc->sc_irqcoaloff) != 0) {
1096		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1097		goto free_intrq;
1098	}
1099
1100	/* Set an appropriate interrupt coalescing period */
1101	r = htobe32(MYX_IRQCOALDELAY);
1102	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1103
1104	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1105		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1106		goto free_intrq;
1107	}
1108
1109	memset(&mc, 0, sizeof(mc));
1110	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1111		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1112		goto free_intrq;
1113	}
1114
1115	memset(&mc, 0, sizeof(mc));
1116	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1117		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1118		goto free_intrq;
1119	}
1120
1121	memset(&mc, 0, sizeof(mc));
1122	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1123	    &sc->sc_tx_ring_offset) != 0) {
1124		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1125		goto free_intrq;
1126	}
1127
1128	memset(&mc, 0, sizeof(mc));
1129	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1130	    &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) {
1131		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1132		goto free_intrq;
1133	}
1134
1135	memset(&mc, 0, sizeof(mc));
1136	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1137	    &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) {
1138		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1139		goto free_intrq;
1140	}
1141
1142	/* Allocate Interrupt Data */
1143	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1144	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1145		printf("%s: failed to allocate status DMA memory\n",
1146		    DEVNAME(sc));
1147		goto free_intrq;
1148	}
1149	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1150	map = sc->sc_sts_dma.mxm_map;
1151	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1152	    BUS_DMASYNC_PREREAD);
1153
1154	memset(&mc, 0, sizeof(mc));
1155	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1156	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1157	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1158	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1159		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1160		goto free_sts;
1161	}
1162
1163	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1164
1165	memset(&mc, 0, sizeof(mc));
1166	mc.mc_data0 = htobe32(maxpkt);
1167	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1168		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1169		goto free_sts;
1170	}
1171
1172	if (myx_tx_init(sc, maxpkt) != 0)
1173		goto free_sts;
1174
1175	if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0)
1176		goto free_tx_ring;
1177
1178	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXSMALL]) != 0)
1179		goto free_rx_ring_small;
1180
1181	if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0)
1182		goto empty_rx_ring_small;
1183
1184	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXBIG]) != 0)
1185		goto free_rx_ring_big;
1186
1187	memset(&mc, 0, sizeof(mc));
1188	mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN);
1189	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1190		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1191		goto empty_rx_ring_big;
1192	}
1193
1194	memset(&mc, 0, sizeof(mc));
1195	mc.mc_data0 = htobe32(16384);
1196	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1197		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1198		goto empty_rx_ring_big;
1199	}
1200
1201	sc->sc_state = MYX_S_RUNNING;
1202
1203	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1204		printf("%s: failed to start the device\n", DEVNAME(sc));
1205		goto empty_rx_ring_big;
1206	}
1207
1208	ifq_clr_oactive(&ifp->if_snd);
1209	SET(ifp->if_flags, IFF_RUNNING);
1210	myx_iff(sc);
1211	myx_start(ifp);
1212
1213	return;
1214
1215empty_rx_ring_big:
1216	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1217free_rx_ring_big:
1218	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1219empty_rx_ring_small:
1220	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1221free_rx_ring_small:
1222	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1223free_tx_ring:
1224	myx_tx_free(sc);
1225free_sts:
1226	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1227	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1228	myx_dmamem_free(sc, &sc->sc_sts_dma);
1229free_intrq:
1230	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1231	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1232	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1233free_pad:
1234	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1235	    sc->sc_paddma.mxm_map->dm_mapsize,
1236	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1237	myx_dmamem_free(sc, &sc->sc_paddma);
1238
1239	memset(&mc, 0, sizeof(mc));
1240	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1241		printf("%s: failed to reset the device\n", DEVNAME(sc));
1242	}
1243free_zero:
1244	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1245	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1246	myx_dmamem_free(sc, &sc->sc_zerodma);
1247}
1248
1249int
1250myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1251{
1252	struct myx_cmd		 mc;
1253
1254	memset(&mc, 0, sizeof(mc));
1255	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1256	    addr[2] << 8 | addr[3]);
1257	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1258
1259	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1260		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1261		return (-1);
1262	}
1263	return (0);
1264}
1265
1266void
1267myx_iff(struct myx_softc *sc)
1268{
1269	struct myx_cmd		mc;
1270	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1271	struct ether_multi	*enm;
1272	struct ether_multistep	step;
1273	u_int8_t *addr;
1274
1275	CLR(ifp->if_flags, IFF_ALLMULTI);
1276
1277	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1278	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1279		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1280		return;
1281	}
1282
1283	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1284		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1285		return;
1286	}
1287
1288	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1289		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1290		return;
1291	}
1292
1293	if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1294	    sc->sc_ac.ac_multirangecnt > 0) {
1295		SET(ifp->if_flags, IFF_ALLMULTI);
1296		return;
1297	}
1298
1299	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1300	while (enm != NULL) {
1301		addr = enm->enm_addrlo;
1302
1303		memset(&mc, 0, sizeof(mc));
1304		mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1305		    addr[2] << 8 | addr[3]);
1306		mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1307		if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1308			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1309			return;
1310		}
1311
1312		ETHER_NEXT_MULTI(step, enm);
1313	}
1314
1315	memset(&mc, 0, sizeof(mc));
1316	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1317		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1318		return;
1319	}
1320}
1321
1322void
1323myx_down(struct myx_softc *sc)
1324{
1325	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1326	volatile struct myx_status *sts = sc->sc_sts;
1327	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1328	struct sleep_state	 sls;
1329	struct myx_cmd		 mc;
1330	int			 s;
1331	int			 ring;
1332
1333	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1334	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1335	sc->sc_linkdown = sts->ms_linkdown;
1336	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1337	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1338
1339	sc->sc_state = MYX_S_DOWN;
1340	membar_producer();
1341
1342	memset(&mc, 0, sizeof(mc));
1343	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1344
1345	while (sc->sc_state != MYX_S_OFF) {
1346		sleep_setup(&sls, sts, PWAIT, "myxdown");
1347		membar_consumer();
1348		sleep_finish(&sls, sc->sc_state != MYX_S_OFF);
1349	}
1350
1351	s = splnet();
1352	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1353		ifp->if_link_state = LINK_STATE_UNKNOWN;
1354		ifp->if_baudrate = 0;
1355		if_link_state_change(ifp);
1356	}
1357	splx(s);
1358
1359	memset(&mc, 0, sizeof(mc));
1360	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1361		printf("%s: failed to reset the device\n", DEVNAME(sc));
1362	}
1363
1364	CLR(ifp->if_flags, IFF_RUNNING);
1365	ifq_clr_oactive(&ifp->if_snd);
1366
1367	for (ring = 0; ring < 2; ring++) {
1368		struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1369
1370		timeout_del(&mrr->mrr_refill);
1371		myx_rx_empty(sc, mrr);
1372		myx_rx_free(sc, mrr);
1373	}
1374
1375	myx_tx_empty(sc);
1376	myx_tx_free(sc);
1377
1378	/* the sleep shizz above already synced this dmamem */
1379	myx_dmamem_free(sc, &sc->sc_sts_dma);
1380
1381	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1382	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1383	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1384
1385	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1386	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1387	myx_dmamem_free(sc, &sc->sc_paddma);
1388
1389	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1390	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1391	myx_dmamem_free(sc, &sc->sc_zerodma);
1392}
1393
1394void
1395myx_write_txd_tail(struct myx_softc *sc, struct myx_slot *ms, u_int8_t flags,
1396    u_int32_t offset, u_int idx)
1397{
1398	struct myx_tx_desc		txd;
1399	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1400	bus_dmamap_t			map = ms->ms_map;
1401	int				i;
1402
1403	for (i = 1; i < map->dm_nsegs; i++) {
1404		memset(&txd, 0, sizeof(txd));
1405		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1406		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1407		txd.tx_flags = flags;
1408
1409		myx_bus_space_write(sc,
1410		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1411		    &txd, sizeof(txd));
1412	}
1413
1414	/* pad runt frames */
1415	if (map->dm_mapsize < 60) {
1416		memset(&txd, 0, sizeof(txd));
1417		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1418		txd.tx_length = htobe16(60 - map->dm_mapsize);
1419		txd.tx_flags = flags;
1420
1421		myx_bus_space_write(sc,
1422		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1423		    &txd, sizeof(txd));
1424	}
1425}
1426
1427void
1428myx_start(struct ifnet *ifp)
1429{
1430	struct myx_tx_desc		txd;
1431	struct myx_softc		*sc = ifp->if_softc;
1432	struct myx_slot			*ms;
1433	bus_dmamap_t			map;
1434	struct mbuf			*m;
1435	u_int32_t			offset = sc->sc_tx_ring_offset;
1436	u_int				idx, cons, prod;
1437	u_int				free, used;
1438	u_int8_t			flags;
1439
1440	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
1441	    ifq_is_oactive(&ifp->if_snd) ||
1442	    IFQ_IS_EMPTY(&ifp->if_snd))
1443		return;
1444
1445	idx = sc->sc_tx_ring_prod;
1446
1447	/* figure out space */
1448	free = sc->sc_tx_ring_cons;
1449	if (free <= idx)
1450		free += sc->sc_tx_ring_count;
1451	free -= idx;
1452
1453	cons = prod = sc->sc_tx_prod;
1454
1455	used = 0;
1456
1457	for (;;) {
1458		if (used + sc->sc_tx_nsegs + 1 > free) {
1459			ifq_set_oactive(&ifp->if_snd);
1460			break;
1461		}
1462
1463		IFQ_DEQUEUE(&ifp->if_snd, m);
1464		if (m == NULL)
1465			break;
1466
1467		ms = &sc->sc_tx_slots[prod];
1468
1469		if (myx_load_mbuf(sc, ms, m) != 0) {
1470			m_freem(m);
1471			ifp->if_oerrors++;
1472			continue;
1473		}
1474
1475#if NBPFILTER > 0
1476		if (ifp->if_bpf)
1477			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1478#endif
1479
1480		map = ms->ms_map;
1481		bus_dmamap_sync(sc->sc_dmat, map, 0,
1482		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1483
1484		used += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1485
1486		if (++prod >= sc->sc_tx_ring_count)
1487			prod = 0;
1488	}
1489
1490	if (cons == prod)
1491		return;
1492
1493	ms = &sc->sc_tx_slots[cons];
1494
1495	for (;;) {
1496		idx += ms->ms_map->dm_nsegs +
1497		    (ms->ms_map->dm_mapsize < 60 ? 1 : 0);
1498		if (idx >= sc->sc_tx_ring_count)
1499			idx -= sc->sc_tx_ring_count;
1500
1501		if (++cons >= sc->sc_tx_ring_count)
1502			cons = 0;
1503
1504		if (cons == prod)
1505			break;
1506
1507		ms = &sc->sc_tx_slots[cons];
1508		map = ms->ms_map;
1509
1510		flags = MYXTXD_FLAGS_NO_TSO;
1511		if (map->dm_mapsize < 1520)
1512			flags |= MYXTXD_FLAGS_SMALL;
1513
1514		memset(&txd, 0, sizeof(txd));
1515		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1516		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1517		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1518		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1519		myx_bus_space_write(sc,
1520		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1521
1522		myx_write_txd_tail(sc, ms, flags, offset, idx);
1523	}
1524
1525	/* go back and post first packet */
1526	ms = &sc->sc_tx_slots[sc->sc_tx_prod];
1527	map = ms->ms_map;
1528
1529	flags = MYXTXD_FLAGS_NO_TSO;
1530	if (map->dm_mapsize < 1520)
1531		flags |= MYXTXD_FLAGS_SMALL;
1532
1533	memset(&txd, 0, sizeof(txd));
1534	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1535	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1536	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1537	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1538
1539	/* make sure the first descriptor is seen after the others */
1540	myx_write_txd_tail(sc, ms, flags, offset, sc->sc_tx_ring_prod);
1541
1542	myx_bus_space_write(sc,
1543	    offset + sizeof(txd) * sc->sc_tx_ring_prod, &txd,
1544	    sizeof(txd) - sizeof(myx_bus_t));
1545
1546	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1547	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1548
1549	myx_bus_space_write(sc,
1550	    offset + sizeof(txd) * (sc->sc_tx_ring_prod + 1) -
1551	    sizeof(myx_bus_t),
1552	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1553	    sizeof(myx_bus_t));
1554
1555	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1556	    offset + sizeof(txd) * sc->sc_tx_ring_prod, sizeof(txd),
1557	    BUS_SPACE_BARRIER_WRITE);
1558
1559	/* commit */
1560	sc->sc_tx_ring_prod = idx;
1561	sc->sc_tx_prod = prod;
1562}
1563
1564int
1565myx_load_mbuf(struct myx_softc *sc, struct myx_slot *ms, struct mbuf *m)
1566{
1567	bus_dma_tag_t			dmat = sc->sc_dmat;
1568	bus_dmamap_t			dmap = ms->ms_map;
1569
1570	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1571	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1572	case 0:
1573		break;
1574
1575	case EFBIG: /* mbuf chain is too fragmented */
1576		if (m_defrag(m, M_DONTWAIT) == 0 &&
1577		    bus_dmamap_load_mbuf(dmat, dmap, m,
1578		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1579			break;
1580	default:
1581		return (1);
1582	}
1583
1584	ms->ms_m = m;
1585	return (0);
1586}
1587
1588int
1589myx_intr(void *arg)
1590{
1591	struct myx_softc	*sc = (struct myx_softc *)arg;
1592	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1593	volatile struct myx_status *sts = sc->sc_sts;
1594	enum myx_state		 state;
1595	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1596	u_int32_t		 data, start;
1597	u_int8_t		 valid = 0;
1598
1599	state = sc->sc_state;
1600	if (state == MYX_S_OFF)
1601		return (0);
1602
1603	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1604	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1605
1606	valid = sts->ms_isvalid;
1607	if (valid == 0x0) {
1608		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1609		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1610		return (0);
1611	}
1612
1613	if (sc->sc_intx) {
1614		data = htobe32(0);
1615		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1616		    sc->sc_irqdeassertoff, &data, sizeof(data));
1617	}
1618	sts->ms_isvalid = 0;
1619
1620	do {
1621		data = sts->ms_txdonecnt;
1622
1623		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1624		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1625		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1626	} while (sts->ms_isvalid);
1627
1628	data = betoh32(data);
1629	if (data != sc->sc_tx_count)
1630		myx_txeof(sc, data);
1631
1632	data = htobe32(3);
1633	if (valid & 0x1) {
1634		myx_rxeof(sc);
1635
1636		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1637		    sc->sc_irqclaimoff, &data, sizeof(data));
1638	}
1639	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1640	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1641
1642	start = ifq_is_oactive(&ifp->if_snd);
1643
1644	if (sts->ms_statusupdated) {
1645		if (state == MYX_S_DOWN &&
1646		    sc->sc_linkdown != sts->ms_linkdown) {
1647			sc->sc_state = MYX_S_OFF;
1648			membar_producer();
1649			wakeup(sts);
1650			start = 0;
1651		} else {
1652			data = sts->ms_linkstate;
1653			if (data != 0xffffffff) {
1654				KERNEL_LOCK();
1655				myx_link_state(sc, data);
1656				KERNEL_UNLOCK();
1657			}
1658		}
1659	}
1660
1661	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1662	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1663
1664	if (start) {
1665		KERNEL_LOCK();
1666		ifq_clr_oactive(&ifp->if_snd);
1667		myx_start(ifp);
1668		KERNEL_UNLOCK();
1669	}
1670
1671	return (1);
1672}
1673
1674void
1675myx_refill(void *xmrr)
1676{
1677	struct myx_rx_ring *mrr = xmrr;
1678	struct myx_softc *sc = mrr->mrr_softc;
1679
1680	myx_rx_fill(sc, mrr);
1681
1682	if (mrr->mrr_prod == mrr->mrr_cons)
1683		timeout_add(&mrr->mrr_refill, 1);
1684}
1685
1686void
1687myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1688{
1689	struct ifnet *ifp = &sc->sc_ac.ac_if;
1690	struct myx_slot *ms;
1691	bus_dmamap_t map;
1692	u_int idx, cons;
1693
1694	idx = sc->sc_tx_ring_cons;
1695	cons = sc->sc_tx_cons;
1696
1697	do {
1698		ms = &sc->sc_tx_slots[cons];
1699		map = ms->ms_map;
1700
1701		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1702
1703		bus_dmamap_sync(sc->sc_dmat, map, 0,
1704		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1705		bus_dmamap_unload(sc->sc_dmat, map);
1706		m_freem(ms->ms_m);
1707
1708		ifp->if_opackets++;
1709
1710		if (++cons >= sc->sc_tx_ring_count)
1711			cons = 0;
1712	} while (++sc->sc_tx_count != done_count);
1713
1714	if (idx >= sc->sc_tx_ring_count)
1715		idx -= sc->sc_tx_ring_count;
1716
1717	sc->sc_tx_ring_cons = idx;
1718	sc->sc_tx_cons = cons;
1719}
1720
1721void
1722myx_rxeof(struct myx_softc *sc)
1723{
1724	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1725	struct ifnet *ifp = &sc->sc_ac.ac_if;
1726	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1727	struct myx_rx_ring *mrr;
1728	struct myx_slot *ms;
1729	struct mbuf *m;
1730	int ring;
1731	u_int rxfree[2] = { 0 , 0 };
1732	u_int len;
1733
1734	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1735	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1736
1737	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1738		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1739
1740		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1741			sc->sc_intrq_idx = 0;
1742
1743		ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ?
1744		    MYX_RXSMALL : MYX_RXBIG;
1745
1746		mrr = &sc->sc_rx_ring[ring];
1747		ms = &mrr->mrr_slots[mrr->mrr_cons];
1748
1749		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1750			mrr->mrr_cons = 0;
1751
1752		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1753		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1754		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1755
1756		m = ms->ms_m;
1757		m->m_data += ETHER_ALIGN;
1758		m->m_pkthdr.len = m->m_len = len;
1759
1760		ml_enqueue(&ml, m);
1761
1762		rxfree[ring]++;
1763	}
1764
1765	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1766	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1767
1768	for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) {
1769		if (rxfree[ring] == 0)
1770			continue;
1771
1772		mrr = &sc->sc_rx_ring[ring];
1773
1774		if_rxr_put(&mrr->mrr_rxr, rxfree[ring]);
1775		myx_rx_fill(sc, mrr);
1776		if (mrr->mrr_prod == mrr->mrr_cons)
1777			timeout_add(&mrr->mrr_refill, 0);
1778	}
1779
1780	if_input(ifp, &ml);
1781}
1782
1783static int
1784myx_rx_fill_slots(struct myx_softc *sc, struct myx_rx_ring *mrr, u_int slots)
1785{
1786	struct myx_rx_desc rxd;
1787	struct myx_slot *ms;
1788	u_int32_t offset = mrr->mrr_offset;
1789	u_int p, first, fills;
1790
1791	first = p = mrr->mrr_prod;
1792	if (myx_buf_fill(sc, &mrr->mrr_slots[first], mrr->mrr_mclget) != 0)
1793		return (slots);
1794
1795	if (++p >= sc->sc_rx_ring_count)
1796		p = 0;
1797
1798	for (fills = 1; fills < slots; fills++) {
1799		ms = &mrr->mrr_slots[p];
1800
1801		if (myx_buf_fill(sc, ms, mrr->mrr_mclget) != 0)
1802			break;
1803
1804		rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1805		myx_bus_space_write(sc, offset + p * sizeof(rxd),
1806		    &rxd, sizeof(rxd));
1807
1808		if (++p >= sc->sc_rx_ring_count)
1809			p = 0;
1810	}
1811
1812	mrr->mrr_prod = p;
1813
1814	/* make sure the first descriptor is seen after the others */
1815	if (fills > 1) {
1816		bus_space_barrier(sc->sc_memt, sc->sc_memh,
1817		    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1818		    BUS_SPACE_BARRIER_WRITE);
1819	}
1820
1821	ms = &mrr->mrr_slots[first];
1822	rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1823	myx_bus_space_write(sc, offset + first * sizeof(rxd),
1824	    &rxd, sizeof(rxd));
1825
1826	return (slots - fills);
1827}
1828
1829int
1830myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size)
1831{
1832	struct myx_rx_desc rxd;
1833	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1834	struct myx_slot *ms;
1835	u_int32_t offset = mrr->mrr_offset;
1836	int rv;
1837	int i;
1838
1839	mrr->mrr_slots = mallocarray(sizeof(*ms), sc->sc_rx_ring_count,
1840	    M_DEVBUF, M_WAITOK);
1841	if (mrr->mrr_slots == NULL)
1842		return (ENOMEM);
1843
1844	memset(&rxd, 0xff, sizeof(rxd));
1845	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1846		ms = &mrr->mrr_slots[i];
1847		rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1848		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
1849		if (rv != 0)
1850			goto destroy;
1851
1852		myx_bus_space_write(sc, offset + i * sizeof(rxd),
1853		    &rxd, sizeof(rxd));
1854	}
1855
1856	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1857	mrr->mrr_prod = mrr->mrr_cons = 0;
1858
1859	return (0);
1860
1861destroy:
1862	while (i-- > 0) {
1863		ms = &mrr->mrr_slots[i];
1864		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1865	}
1866	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1867	return (rv);
1868}
1869
1870static inline int
1871myx_rx_ring_enter(struct myx_rx_ring *mrr)
1872{
1873	return (atomic_inc_int_nv(&mrr->mrr_running) == 1);
1874}
1875
1876static inline int
1877myx_rx_ring_leave(struct myx_rx_ring *mrr)
1878{
1879	if (atomic_cas_uint(&mrr->mrr_running, 1, 0) == 1)
1880		return (1);
1881
1882	mrr->mrr_running = 1;
1883
1884	return (0);
1885}
1886
1887int
1888myx_rx_fill(struct myx_softc *sc, struct myx_rx_ring *mrr)
1889{
1890	u_int slots;
1891
1892	slots = if_rxr_get(&mrr->mrr_rxr, sc->sc_rx_ring_count);
1893	if (slots == 0)
1894		return (1);
1895
1896	slots = myx_rx_fill_slots(sc, mrr, slots);
1897	if (slots > 0)
1898		if_rxr_put(&mrr->mrr_rxr, slots);
1899
1900	return (0);
1901}
1902
1903void
1904myx_rx_empty(struct myx_softc *sc, struct myx_rx_ring *mrr)
1905{
1906	struct myx_slot *ms;
1907
1908	while (mrr->mrr_cons != mrr->mrr_prod) {
1909		ms = &mrr->mrr_slots[mrr->mrr_cons];
1910
1911		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1912			mrr->mrr_cons = 0;
1913
1914		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1915		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1916		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1917		m_freem(ms->ms_m);
1918	}
1919
1920	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1921}
1922
1923void
1924myx_rx_free(struct myx_softc *sc, struct myx_rx_ring *mrr)
1925{
1926	struct myx_slot *ms;
1927	int i;
1928
1929	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1930		ms = &mrr->mrr_slots[i];
1931		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1932	}
1933
1934	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1935}
1936
1937struct mbuf *
1938myx_mcl_small(void)
1939{
1940	struct mbuf *m;
1941
1942	m = MCLGETI(NULL, M_DONTWAIT, NULL, MYX_RXSMALL_SIZE);
1943	if (m == NULL)
1944		return (NULL);
1945
1946	m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE;
1947
1948	return (m);
1949}
1950
1951struct mbuf *
1952myx_mcl_big(void)
1953{
1954	struct mbuf *m;
1955	void *mcl;
1956
1957	MGETHDR(m, M_DONTWAIT, MT_DATA);
1958	if (m == NULL)
1959		return (NULL);
1960
1961	mcl = pool_get(myx_mcl_pool, PR_NOWAIT);
1962	if (mcl == NULL) {
1963		m_free(m);
1964		return (NULL);
1965	}
1966
1967	MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, m_extfree_pool, myx_mcl_pool);
1968	m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE;
1969
1970	return (m);
1971}
1972
1973int
1974myx_buf_fill(struct myx_softc *sc, struct myx_slot *ms,
1975    struct mbuf *(*mclget)(void))
1976{
1977	struct mbuf *m;
1978	int rv;
1979
1980	m = (*mclget)();
1981	if (m == NULL)
1982		return (ENOMEM);
1983
1984	rv = bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, BUS_DMA_NOWAIT);
1985	if (rv != 0) {
1986		m_freem(m);
1987		return (rv);
1988	}
1989
1990	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1991	    ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1992
1993	ms->ms_m = m;
1994
1995	return (0);
1996}
1997
1998int
1999myx_tx_init(struct myx_softc *sc, bus_size_t size)
2000{
2001	struct myx_slot *ms;
2002	int rv;
2003	int i;
2004
2005	sc->sc_tx_slots = mallocarray(sizeof(*ms), sc->sc_tx_ring_count,
2006	    M_DEVBUF, M_WAITOK);
2007	if (sc->sc_tx_slots == NULL)
2008		return (ENOMEM);
2009
2010	for (i = 0; i < sc->sc_tx_ring_count; i++) {
2011		ms = &sc->sc_tx_slots[i];
2012		rv = bus_dmamap_create(sc->sc_dmat, size, sc->sc_tx_nsegs,
2013		    sc->sc_tx_boundary, sc->sc_tx_boundary,
2014		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
2015		if (rv != 0)
2016			goto destroy;
2017	}
2018
2019	sc->sc_tx_prod = sc->sc_tx_cons = 0;
2020
2021	return (0);
2022
2023destroy:
2024	while (i-- > 0) {
2025		ms = &sc->sc_tx_slots[i];
2026		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2027	}
2028	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2029	return (rv);
2030}
2031
2032void
2033myx_tx_empty(struct myx_softc *sc)
2034{
2035	struct myx_slot *ms;
2036	u_int cons = sc->sc_tx_cons;
2037	u_int prod = sc->sc_tx_prod;
2038
2039	while (cons != prod) {
2040		ms = &sc->sc_tx_slots[cons];
2041
2042		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
2043		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2044		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
2045		m_freem(ms->ms_m);
2046
2047		if (++cons >= sc->sc_tx_ring_count)
2048			cons = 0;
2049	}
2050
2051	sc->sc_tx_cons = cons;
2052}
2053
2054void
2055myx_tx_free(struct myx_softc *sc)
2056{
2057	struct myx_slot *ms;
2058	int i;
2059
2060	for (i = 0; i < sc->sc_tx_ring_count; i++) {
2061		ms = &sc->sc_tx_slots[i];
2062		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2063	}
2064
2065	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2066}
2067