if_myx.c revision 1.79
1/*	$OpenBSD: if_myx.c,v 1.79 2015/08/14 07:24:18 dlg Exp $	*/
2
3/*
4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*
20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21 */
22
23#include "bpfilter.h"
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/kernel.h>
30#include <sys/socket.h>
31#include <sys/malloc.h>
32#include <sys/pool.h>
33#include <sys/timeout.h>
34#include <sys/device.h>
35#include <sys/queue.h>
36#include <sys/atomic.h>
37
38#include <machine/bus.h>
39#include <machine/intr.h>
40
41#include <net/if.h>
42#include <net/if_dl.h>
43#include <net/if_media.h>
44
45#if NBPFILTER > 0
46#include <net/bpf.h>
47#endif
48
49#include <netinet/in.h>
50#include <netinet/if_ether.h>
51
52#include <dev/pci/pcireg.h>
53#include <dev/pci/pcivar.h>
54#include <dev/pci/pcidevs.h>
55
56#include <dev/pci/if_myxreg.h>
57
58#ifdef MYX_DEBUG
59#define MYXDBG_INIT	(1<<0)	/* chipset initialization */
60#define MYXDBG_CMD	(2<<0)	/* commands */
61#define MYXDBG_INTR	(3<<0)	/* interrupts */
62#define MYXDBG_ALL	0xffff	/* enable all debugging messages */
63int myx_debug = MYXDBG_ALL;
64#define DPRINTF(_lvl, _arg...)	do {					\
65	if (myx_debug & (_lvl))						\
66		printf(_arg);						\
67} while (0)
68#else
69#define DPRINTF(_lvl, arg...)
70#endif
71
72#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
73
74struct myx_dmamem {
75	bus_dmamap_t		 mxm_map;
76	bus_dma_segment_t	 mxm_seg;
77	int			 mxm_nsegs;
78	size_t			 mxm_size;
79	caddr_t			 mxm_kva;
80};
81
82struct myx_buf {
83	SIMPLEQ_ENTRY(myx_buf)	 mb_entry;
84	bus_dmamap_t		 mb_map;
85	struct mbuf		*mb_m;
86};
87
88struct myx_buf_list {
89	SIMPLEQ_HEAD(, myx_buf)	mbl_q;
90	struct mutex		mbl_mtx;
91};
92
93struct pool *myx_buf_pool;
94struct pool *myx_mcl_pool;
95
96struct myx_ring_lock {
97};
98
99struct myx_rx_slot {
100	bus_dmamap_t		 mrs_map;
101	struct mbuf		*mrs_m;
102};
103
104struct myx_rx_ring {
105	struct mutex		 mrr_rxr_mtx;
106	struct if_rxring	 mrr_rxr;
107	u_int32_t		 mrr_offset;
108	u_int			 mrr_running;
109	u_int			 mrr_prod;
110	u_int			 mrr_cons;
111	struct myx_rx_slot	*mrr_slots;
112};
113
114enum myx_state {
115	MYX_S_OFF = 0,
116	MYX_S_RUNNING,
117	MYX_S_DOWN
118};
119
120struct myx_softc {
121	struct device		 sc_dev;
122	struct arpcom		 sc_ac;
123
124	pci_chipset_tag_t	 sc_pc;
125	pci_intr_handle_t	 sc_ih;
126	pcitag_t		 sc_tag;
127
128	bus_dma_tag_t		 sc_dmat;
129	bus_space_tag_t		 sc_memt;
130	bus_space_handle_t	 sc_memh;
131	bus_size_t		 sc_mems;
132
133	struct myx_dmamem	 sc_zerodma;
134	struct myx_dmamem	 sc_cmddma;
135	struct myx_dmamem	 sc_paddma;
136
137	struct myx_dmamem	 sc_sts_dma;
138	volatile struct myx_status	*sc_sts;
139	struct mutex		 sc_sts_mtx;
140
141	int			 sc_intx;
142	void			*sc_irqh;
143	u_int32_t		 sc_irqcoaloff;
144	u_int32_t		 sc_irqclaimoff;
145	u_int32_t		 sc_irqdeassertoff;
146
147	struct myx_dmamem	 sc_intrq_dma;
148	struct myx_intrq_desc	*sc_intrq;
149	u_int			 sc_intrq_count;
150	u_int			 sc_intrq_idx;
151
152	u_int			 sc_rx_ring_count;
153#define  MYX_RXSMALL		 0
154#define  MYX_RXBIG		 1
155	struct myx_rx_ring	 sc_rx_ring[2];
156	struct timeout		 sc_refill;
157
158	bus_size_t		 sc_tx_boundary;
159	u_int			 sc_tx_ring_count;
160	u_int32_t		 sc_tx_ring_offset;
161	u_int			 sc_tx_nsegs;
162	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
163	u_int			 sc_tx_free;
164	struct myx_buf_list	 sc_tx_buf_free;
165	struct myx_buf_list	 sc_tx_buf_list;
166	u_int			 sc_tx_ring_idx;
167
168	struct ifmedia		 sc_media;
169
170	volatile enum myx_state	 sc_state;
171	volatile u_int8_t	 sc_linkdown;
172};
173
174#define MYX_RXSMALL_SIZE	MCLBYTES
175#define MYX_RXBIG_SIZE		(9 * 1024)
176
177int	 myx_match(struct device *, void *, void *);
178void	 myx_attach(struct device *, struct device *, void *);
179int	 myx_pcie_dc(struct myx_softc *, struct pci_attach_args *);
180int	 myx_query(struct myx_softc *sc, char *, size_t);
181u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
182void	 myx_attachhook(void *);
183int	 myx_loadfirmware(struct myx_softc *, const char *);
184int	 myx_probe_firmware(struct myx_softc *);
185
186void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
187void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
188
189#if defined(__LP64__)
190#define _myx_bus_space_write bus_space_write_raw_region_8
191typedef u_int64_t myx_bus_t;
192#else
193#define _myx_bus_space_write bus_space_write_raw_region_4
194typedef u_int32_t myx_bus_t;
195#endif
196#define myx_bus_space_write(_sc, _o, _a, _l) \
197    _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l))
198
199int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
200int	 myx_boot(struct myx_softc *, u_int32_t);
201
202int	 myx_rdma(struct myx_softc *, u_int);
203int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
204	    bus_size_t, u_int align);
205void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
206int	 myx_media_change(struct ifnet *);
207void	 myx_media_status(struct ifnet *, struct ifmediareq *);
208void	 myx_link_state(struct myx_softc *, u_int32_t);
209void	 myx_watchdog(struct ifnet *);
210int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
211int	 myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *);
212void	 myx_up(struct myx_softc *);
213void	 myx_iff(struct myx_softc *);
214void	 myx_down(struct myx_softc *);
215
216void	 myx_start(struct ifnet *);
217void	 myx_write_txd_tail(struct myx_softc *, struct myx_buf *, u_int8_t,
218	    u_int32_t, u_int);
219int	 myx_load_buf(struct myx_softc *, struct myx_buf *, struct mbuf *);
220int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
221int	 myx_intr(void *);
222void	 myx_rxeof(struct myx_softc *);
223void	 myx_txeof(struct myx_softc *, u_int32_t);
224
225struct myx_buf *	myx_buf_alloc(struct myx_softc *, bus_size_t, int,
226			    bus_size_t, bus_size_t);
227void			myx_buf_free(struct myx_softc *, struct myx_buf *);
228void			myx_bufs_init(struct myx_buf_list *);
229int			myx_bufs_empty(struct myx_buf_list *);
230struct myx_buf *	myx_buf_get(struct myx_buf_list *);
231void			myx_buf_put(struct myx_buf_list *, struct myx_buf *);
232int			myx_buf_fill(struct myx_softc *, int,
233			    struct myx_rx_slot *);
234struct mbuf *		myx_mcl_small(void);
235struct mbuf *		myx_mcl_big(void);
236
237int			myx_rx_init(struct myx_softc *, int, bus_size_t);
238int			myx_rx_fill(struct myx_softc *, int);
239void			myx_rx_empty(struct myx_softc *, int);
240void			myx_rx_free(struct myx_softc *, int);
241
242void			myx_refill(void *);
243
244static inline void
245myx_sts_enter(struct myx_softc *sc)
246{
247	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
248
249	mtx_enter(&sc->sc_sts_mtx);
250	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
251	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
252}
253
254static inline void
255myx_sts_leave(struct myx_softc *sc)
256{
257	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
258
259	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
260	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
261	mtx_leave(&sc->sc_sts_mtx);
262}
263
264struct cfdriver myx_cd = {
265	NULL, "myx", DV_IFNET
266};
267struct cfattach myx_ca = {
268	sizeof(struct myx_softc), myx_match, myx_attach
269};
270
271const struct pci_matchid myx_devices[] = {
272	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
273	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
274};
275
276int
277myx_match(struct device *parent, void *match, void *aux)
278{
279	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
280}
281
282void
283myx_attach(struct device *parent, struct device *self, void *aux)
284{
285	struct myx_softc	*sc = (struct myx_softc *)self;
286	struct pci_attach_args	*pa = aux;
287	char			 part[32];
288	pcireg_t		 memtype;
289
290	sc->sc_pc = pa->pa_pc;
291	sc->sc_tag = pa->pa_tag;
292	sc->sc_dmat = pa->pa_dmat;
293
294	mtx_init(&sc->sc_rx_ring[MYX_RXSMALL].mrr_rxr_mtx, IPL_NET);
295	mtx_init(&sc->sc_rx_ring[MYX_RXBIG].mrr_rxr_mtx, IPL_NET);
296
297	myx_bufs_init(&sc->sc_tx_buf_free);
298	myx_bufs_init(&sc->sc_tx_buf_list);
299
300	timeout_set(&sc->sc_refill, myx_refill, sc);
301
302	mtx_init(&sc->sc_sts_mtx, IPL_NET);
303
304	/* Map the PCI memory space */
305	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
306	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
307	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
308		printf(": unable to map register memory\n");
309		return;
310	}
311
312	/* Get board details (mac/part) */
313	memset(part, 0, sizeof(part));
314	if (myx_query(sc, part, sizeof(part)) != 0)
315		goto unmap;
316
317	/* Map the interrupt */
318	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
319		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
320			printf(": unable to map interrupt\n");
321			goto unmap;
322		}
323		sc->sc_intx = 1;
324	}
325
326	printf(": %s, model %s, address %s\n",
327	    pci_intr_string(pa->pa_pc, sc->sc_ih),
328	    part[0] == '\0' ? "(unknown)" : part,
329	    ether_sprintf(sc->sc_ac.ac_enaddr));
330
331	/* this is sort of racy */
332	if (myx_buf_pool == NULL) {
333		extern struct kmem_pa_mode kp_dma_contig;
334
335		myx_buf_pool = malloc(sizeof(*myx_buf_pool), M_DEVBUF,
336		    M_WAITOK);
337		if (myx_buf_pool == NULL) {
338			printf("%s: unable to allocate buf pool\n",
339			    DEVNAME(sc));
340			goto unmap;
341		}
342		pool_init(myx_buf_pool, sizeof(struct myx_buf),
343		    0, 0, 0, "myxbufs", &pool_allocator_nointr);
344		pool_setipl(myx_buf_pool, IPL_NONE);
345
346		myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF,
347		    M_WAITOK);
348		if (myx_mcl_pool == NULL) {
349			printf("%s: unable to allocate mcl pool\n",
350			    DEVNAME(sc));
351			goto unmap;
352		}
353		pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY, 0,
354		    0, "myxmcl", NULL);
355		pool_setipl(myx_mcl_pool, IPL_NET);
356		pool_set_constraints(myx_mcl_pool, &kp_dma_contig);
357	}
358
359	if (myx_pcie_dc(sc, pa) != 0)
360		printf("%s: unable to configure PCI Express\n", DEVNAME(sc));
361
362	if (mountroothook_establish(myx_attachhook, sc) == NULL) {
363		printf("%s: unable to establish mountroot hook\n", DEVNAME(sc));
364		goto unmap;
365	}
366
367	return;
368
369 unmap:
370	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
371	sc->sc_mems = 0;
372}
373
374int
375myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa)
376{
377	pcireg_t dcsr;
378	pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO;
379	pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO;
380	int reg;
381
382	if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
383	    &reg, NULL) == 0)
384		return (-1);
385
386	reg += PCI_PCIE_DCSR;
387	dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg);
388	if ((dcsr & mask) != dc) {
389		CLR(dcsr, mask);
390		SET(dcsr, dc);
391		pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr);
392	}
393
394	return (0);
395}
396
397u_int
398myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
399{
400	u_int		i, j;
401	u_int8_t	digit;
402
403	memset(lladdr, 0, ETHER_ADDR_LEN);
404	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
405		if (mac[i] >= '0' && mac[i] <= '9')
406			digit = mac[i] - '0';
407		else if (mac[i] >= 'A' && mac[i] <= 'F')
408			digit = mac[i] - 'A' + 10;
409		else if (mac[i] >= 'a' && mac[i] <= 'f')
410			digit = mac[i] - 'a' + 10;
411		else
412			continue;
413		if ((j & 1) == 0)
414			digit <<= 4;
415		lladdr[j++/2] |= digit;
416	}
417
418	return (i);
419}
420
421int
422myx_query(struct myx_softc *sc, char *part, size_t partlen)
423{
424	struct myx_gen_hdr hdr;
425	u_int32_t	offset;
426	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
427	u_int		i, len, maxlen;
428
429	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
430	offset = betoh32(offset);
431	if (offset + sizeof(hdr) > sc->sc_mems) {
432		printf(": header is outside register window\n");
433		return (1);
434	}
435
436	myx_read(sc, offset, &hdr, sizeof(hdr));
437	offset = betoh32(hdr.fw_specs);
438	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
439
440	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
441
442	for (i = 0; i < len; i++) {
443		maxlen = len - i;
444		if (strings[i] == '\0')
445			break;
446		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
447			i += 4;
448			i += myx_ether_aton(&strings[i],
449			    sc->sc_ac.ac_enaddr, maxlen);
450		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
451			i += 3;
452			i += strlcpy(part, &strings[i], min(maxlen, partlen));
453		}
454		for (; i < len; i++) {
455			if (strings[i] == '\0')
456				break;
457		}
458	}
459
460	return (0);
461}
462
463int
464myx_loadfirmware(struct myx_softc *sc, const char *filename)
465{
466	struct myx_gen_hdr	hdr;
467	u_int8_t		*fw;
468	size_t			fwlen;
469	u_int32_t		offset;
470	u_int			i, ret = 1;
471
472	if (loadfirmware(filename, &fw, &fwlen) != 0) {
473		printf("%s: could not load firmware %s\n", DEVNAME(sc),
474		    filename);
475		return (1);
476	}
477	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
478		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
479		goto err;
480	}
481
482	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
483	offset = betoh32(offset);
484	if ((offset + sizeof(hdr)) > fwlen) {
485		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
486		goto err;
487	}
488
489	memcpy(&hdr, fw + offset, sizeof(hdr));
490	DPRINTF(MYXDBG_INIT, "%s: "
491	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
492	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
493	    betoh32(hdr.fw_type), hdr.fw_version);
494
495	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
496	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
497		printf("%s: invalid firmware type 0x%x version %s\n",
498		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
499		goto err;
500	}
501
502	/* Write the firmware to the card's SRAM */
503	for (i = 0; i < fwlen; i += 256)
504		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
505
506	if (myx_boot(sc, fwlen) != 0) {
507		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
508		goto err;
509	}
510
511	ret = 0;
512
513err:
514	free(fw, M_DEVBUF, 0);
515	return (ret);
516}
517
518void
519myx_attachhook(void *arg)
520{
521	struct myx_softc	*sc = (struct myx_softc *)arg;
522	struct ifnet		*ifp = &sc->sc_ac.ac_if;
523	struct myx_cmd		 mc;
524
525	/* Allocate command DMA memory */
526	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
527	    MYXALIGN_CMD) != 0) {
528		printf("%s: failed to allocate command DMA memory\n",
529		    DEVNAME(sc));
530		return;
531	}
532
533	/* Try the firmware stored on disk */
534	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
535		/* error printed by myx_loadfirmware */
536		goto freecmd;
537	}
538
539	memset(&mc, 0, sizeof(mc));
540
541	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
542		printf("%s: failed to reset the device\n", DEVNAME(sc));
543		goto freecmd;
544	}
545
546	sc->sc_tx_boundary = 4096;
547
548	if (myx_probe_firmware(sc) != 0) {
549		printf("%s: error while selecting firmware\n", DEVNAME(sc));
550		goto freecmd;
551	}
552
553	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
554	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
555	if (sc->sc_irqh == NULL) {
556		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
557		goto freecmd;
558	}
559
560	ifp->if_softc = sc;
561	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
562	ifp->if_ioctl = myx_ioctl;
563	ifp->if_start = myx_start;
564	ifp->if_watchdog = myx_watchdog;
565	ifp->if_hardmtu = 9000;
566	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
567	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
568	IFQ_SET_READY(&ifp->if_snd);
569
570	ifp->if_capabilities = IFCAP_VLAN_MTU;
571#if 0
572	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
573	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
574	    IFCAP_CSUM_UDPv4;
575#endif
576
577	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
578	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
579	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
580
581	if_attach(ifp);
582	ether_ifattach(ifp);
583
584	return;
585
586freecmd:
587	myx_dmamem_free(sc, &sc->sc_cmddma);
588}
589
590int
591myx_probe_firmware(struct myx_softc *sc)
592{
593	struct myx_dmamem test;
594	bus_dmamap_t map;
595	struct myx_cmd mc;
596	pcireg_t csr;
597	int offset;
598	int width = 0;
599
600	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
601	    &offset, NULL)) {
602		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
603		    offset + PCI_PCIE_LCSR);
604		width = (csr >> 20) & 0x3f;
605
606		if (width <= 4) {
607			/*
608			 * if the link width is 4 or less we can use the
609			 * aligned firmware.
610			 */
611			return (0);
612		}
613	}
614
615	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
616		return (1);
617	map = test.mxm_map;
618
619	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
620	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
621
622	memset(&mc, 0, sizeof(mc));
623	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
624	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
625	mc.mc_data2 = htobe32(4096 * 0x10000);
626	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
627		printf("%s: DMA read test failed\n", DEVNAME(sc));
628		goto fail;
629	}
630
631	memset(&mc, 0, sizeof(mc));
632	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
633	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
634	mc.mc_data2 = htobe32(4096 * 0x1);
635	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
636		printf("%s: DMA write test failed\n", DEVNAME(sc));
637		goto fail;
638	}
639
640	memset(&mc, 0, sizeof(mc));
641	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
642	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
643	mc.mc_data2 = htobe32(4096 * 0x10001);
644	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
645		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
646		goto fail;
647	}
648
649	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
650	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
651	myx_dmamem_free(sc, &test);
652	return (0);
653
654fail:
655	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
656	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
657	myx_dmamem_free(sc, &test);
658
659	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
660		printf("%s: unable to load %s\n", DEVNAME(sc),
661		    MYXFW_UNALIGNED);
662		return (1);
663	}
664
665	sc->sc_tx_boundary = 2048;
666
667	printf("%s: using unaligned firmware\n", DEVNAME(sc));
668	return (0);
669}
670
671void
672myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
673{
674	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
675	    BUS_SPACE_BARRIER_READ);
676	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
677}
678
679void
680myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
681{
682	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
683	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
684	    BUS_SPACE_BARRIER_WRITE);
685}
686
687int
688myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
689    bus_size_t size, u_int align)
690{
691	mxm->mxm_size = size;
692
693	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
694	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
695	    &mxm->mxm_map) != 0)
696		return (1);
697	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
698	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
699	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
700		goto destroy;
701	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
702	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
703		goto free;
704	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
705	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
706		goto unmap;
707
708	return (0);
709 unmap:
710	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
711 free:
712	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
713 destroy:
714	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
715	return (1);
716}
717
718void
719myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
720{
721	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
722	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
723	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
724	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
725}
726
727int
728myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
729{
730	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
731	struct myx_response	*mr;
732	u_int			 i;
733	u_int32_t		 result, data;
734#ifdef MYX_DEBUG
735	static const char *cmds[MYXCMD_MAX] = {
736		"CMD_NONE",
737		"CMD_RESET",
738		"CMD_GET_VERSION",
739		"CMD_SET_INTRQDMA",
740		"CMD_SET_BIGBUFSZ",
741		"CMD_SET_SMALLBUFSZ",
742		"CMD_GET_TXRINGOFF",
743		"CMD_GET_RXSMALLRINGOFF",
744		"CMD_GET_RXBIGRINGOFF",
745		"CMD_GET_INTRACKOFF",
746		"CMD_GET_INTRDEASSERTOFF",
747		"CMD_GET_TXRINGSZ",
748		"CMD_GET_RXRINGSZ",
749		"CMD_SET_INTRQSZ",
750		"CMD_SET_IFUP",
751		"CMD_SET_IFDOWN",
752		"CMD_SET_MTU",
753		"CMD_GET_INTRCOALDELAYOFF",
754		"CMD_SET_STATSINTVL",
755		"CMD_SET_STATSDMA_OLD",
756		"CMD_SET_PROMISC",
757		"CMD_UNSET_PROMISC",
758		"CMD_SET_LLADDR",
759		"CMD_SET_FC",
760		"CMD_UNSET_FC",
761		"CMD_DMA_TEST",
762		"CMD_SET_ALLMULTI",
763		"CMD_UNSET_ALLMULTI",
764		"CMD_SET_MCASTGROUP",
765		"CMD_UNSET_MCASTGROUP",
766		"CMD_UNSET_MCAST",
767		"CMD_SET_STATSDMA",
768		"CMD_UNALIGNED_DMA_TEST",
769		"CMD_GET_UNALIGNED_STATUS"
770	};
771#endif
772
773	mc->mc_cmd = htobe32(cmd);
774	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
775	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
776
777	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
778	mr->mr_result = 0xffffffff;
779
780	/* Send command */
781	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
782	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
783	    BUS_DMASYNC_PREREAD);
784
785	for (i = 0; i < 20; i++) {
786		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
787		    BUS_DMASYNC_POSTREAD);
788		result = betoh32(mr->mr_result);
789		data = betoh32(mr->mr_data);
790
791		if (result != 0xffffffff)
792			break;
793
794		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
795		    BUS_DMASYNC_PREREAD);
796		delay(1000);
797	}
798
799	DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, "
800	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
801	    cmds[cmd], i, result, data, data);
802
803	if (result != 0)
804		return (-1);
805
806	if (r != NULL)
807		*r = data;
808	return (0);
809}
810
811int
812myx_boot(struct myx_softc *sc, u_int32_t length)
813{
814	struct myx_bootcmd	 bc;
815	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
816	u_int32_t		*status;
817	u_int			 i, ret = 1;
818
819	memset(&bc, 0, sizeof(bc));
820	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
821	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
822	bc.bc_result = 0xffffffff;
823	bc.bc_offset = htobe32(MYX_FW_BOOT);
824	bc.bc_length = htobe32(length - 8);
825	bc.bc_copyto = htobe32(8);
826	bc.bc_jumpto = htobe32(0);
827
828	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
829	*status = 0;
830
831	/* Send command */
832	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
833	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
834	    BUS_DMASYNC_PREREAD);
835
836	for (i = 0; i < 200; i++) {
837		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
838		    BUS_DMASYNC_POSTREAD);
839		if (*status == 0xffffffff) {
840			ret = 0;
841			break;
842		}
843
844		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
845		    BUS_DMASYNC_PREREAD);
846		delay(1000);
847	}
848
849	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
850	    DEVNAME(sc), i, ret);
851
852	return (ret);
853}
854
855int
856myx_rdma(struct myx_softc *sc, u_int do_enable)
857{
858	struct myx_rdmacmd	 rc;
859	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
860	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
861	u_int32_t		*status;
862	int			 ret = 1;
863	u_int			 i;
864
865	/*
866	 * It is required to setup a _dummy_ RDMA address. It also makes
867	 * some PCI-E chipsets resend dropped messages.
868	 */
869	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
870	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
871	rc.rc_result = 0xffffffff;
872	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
873	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
874	rc.rc_enable = htobe32(do_enable);
875
876	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
877	*status = 0;
878
879	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
880	    BUS_DMASYNC_PREREAD);
881
882	/* Send command */
883	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
884
885	for (i = 0; i < 20; i++) {
886		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
887		    BUS_DMASYNC_POSTREAD);
888
889		if (*status == 0xffffffff) {
890			ret = 0;
891			break;
892		}
893
894		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
895		    BUS_DMASYNC_PREREAD);
896		delay(1000);
897	}
898
899	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
900	    DEVNAME(sc), __func__,
901	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
902
903	return (ret);
904}
905
906int
907myx_media_change(struct ifnet *ifp)
908{
909	/* ignore */
910	return (0);
911}
912
913void
914myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
915{
916	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
917	u_int32_t		 sts;
918
919	imr->ifm_active = IFM_ETHER | IFM_AUTO;
920	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
921		imr->ifm_status = 0;
922		return;
923	}
924
925	myx_sts_enter(sc);
926	sts = sc->sc_sts->ms_linkstate;
927	myx_sts_leave(sc);
928
929	myx_link_state(sc, sts);
930
931	imr->ifm_status = IFM_AVALID;
932	if (!LINK_STATE_IS_UP(ifp->if_link_state))
933		return;
934
935	imr->ifm_active |= IFM_FDX | IFM_FLOW |
936	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
937	imr->ifm_status |= IFM_ACTIVE;
938}
939
940void
941myx_link_state(struct myx_softc *sc, u_int32_t sts)
942{
943	struct ifnet		*ifp = &sc->sc_ac.ac_if;
944	int			 link_state = LINK_STATE_DOWN;
945
946	if (betoh32(sts) == MYXSTS_LINKUP)
947		link_state = LINK_STATE_FULL_DUPLEX;
948	if (ifp->if_link_state != link_state) {
949		ifp->if_link_state = link_state;
950		if_link_state_change(ifp);
951		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
952		    IF_Gbps(10) : 0;
953	}
954}
955
956void
957myx_watchdog(struct ifnet *ifp)
958{
959	return;
960}
961
962int
963myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
964{
965	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
966	struct ifaddr		*ifa = (struct ifaddr *)data;
967	struct ifreq		*ifr = (struct ifreq *)data;
968	int			 s, error = 0;
969
970	s = splnet();
971
972	switch (cmd) {
973	case SIOCSIFADDR:
974		ifp->if_flags |= IFF_UP;
975		if (ifa->ifa_addr->sa_family == AF_INET)
976			arp_ifinit(&sc->sc_ac, ifa);
977		/* FALLTHROUGH */
978
979	case SIOCSIFFLAGS:
980		if (ISSET(ifp->if_flags, IFF_UP)) {
981			if (ISSET(ifp->if_flags, IFF_RUNNING))
982				error = ENETRESET;
983			else
984				myx_up(sc);
985		} else {
986			if (ISSET(ifp->if_flags, IFF_RUNNING))
987				myx_down(sc);
988		}
989		break;
990
991	case SIOCGIFMEDIA:
992	case SIOCSIFMEDIA:
993		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
994		break;
995
996	case SIOCGIFRXR:
997		error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
998		break;
999
1000	default:
1001		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1002	}
1003
1004	if (error == ENETRESET) {
1005		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1006		    (IFF_UP | IFF_RUNNING))
1007			myx_iff(sc);
1008		error = 0;
1009	}
1010
1011	splx(s);
1012	return (error);
1013}
1014
1015int
1016myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri)
1017{
1018	struct if_rxring_info ifr[2];
1019
1020	memset(ifr, 0, sizeof(ifr));
1021
1022	ifr[0].ifr_size = MYX_RXSMALL_SIZE;
1023	mtx_enter(&sc->sc_rx_ring[MYX_RXSMALL].mrr_rxr_mtx);
1024	ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr;
1025	mtx_leave(&sc->sc_rx_ring[MYX_RXSMALL].mrr_rxr_mtx);
1026
1027	ifr[1].ifr_size = MYX_RXBIG_SIZE;
1028	mtx_enter(&sc->sc_rx_ring[MYX_RXBIG].mrr_rxr_mtx);
1029	ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr;
1030	mtx_leave(&sc->sc_rx_ring[MYX_RXBIG].mrr_rxr_mtx);
1031
1032	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
1033}
1034
1035void
1036myx_up(struct myx_softc *sc)
1037{
1038	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1039	struct myx_buf		*mb;
1040	struct myx_cmd		mc;
1041	bus_dmamap_t		map;
1042	size_t			size;
1043	u_int			maxpkt;
1044	u_int32_t		r;
1045	int			i;
1046
1047	memset(&mc, 0, sizeof(mc));
1048	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1049		printf("%s: failed to reset the device\n", DEVNAME(sc));
1050		return;
1051	}
1052
1053	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
1054	    64, MYXALIGN_CMD) != 0) {
1055		printf("%s: failed to allocate zero pad memory\n",
1056		    DEVNAME(sc));
1057		return;
1058	}
1059	memset(sc->sc_zerodma.mxm_kva, 0, 64);
1060	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1061	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1062
1063	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
1064	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
1065		printf("%s: failed to allocate pad DMA memory\n",
1066		    DEVNAME(sc));
1067		goto free_zero;
1068	}
1069	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1070	    sc->sc_paddma.mxm_map->dm_mapsize,
1071	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1072
1073	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1074		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1075		goto free_pad;
1076	}
1077
1078	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1079		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1080		goto free_pad;
1081	}
1082	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1083
1084	memset(&mc, 0, sizeof(mc));
1085	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1086		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1087		goto free_pad;
1088	}
1089	sc->sc_tx_ring_idx = 0;
1090	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1091	sc->sc_tx_free = sc->sc_tx_ring_count - 1;
1092	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1093	sc->sc_tx_count = 0;
1094	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1095	IFQ_SET_READY(&ifp->if_snd);
1096
1097	/* Allocate Interrupt Queue */
1098
1099	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1100	sc->sc_intrq_idx = 0;
1101
1102	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1103	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1104	    size, MYXALIGN_DATA) != 0) {
1105		goto free_pad;
1106	}
1107	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1108	map = sc->sc_intrq_dma.mxm_map;
1109	memset(sc->sc_intrq, 0, size);
1110	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1111	    BUS_DMASYNC_PREREAD);
1112
1113	memset(&mc, 0, sizeof(mc));
1114	mc.mc_data0 = htobe32(size);
1115	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1116		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1117		goto free_intrq;
1118	}
1119
1120	memset(&mc, 0, sizeof(mc));
1121	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1122	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1123	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1124		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1125		goto free_intrq;
1126	}
1127
1128	/*
1129	 * get interrupt offsets
1130	 */
1131
1132	memset(&mc, 0, sizeof(mc));
1133	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1134	    &sc->sc_irqclaimoff) != 0) {
1135		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1136		goto free_intrq;
1137	}
1138
1139	memset(&mc, 0, sizeof(mc));
1140	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1141	    &sc->sc_irqdeassertoff) != 0) {
1142		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1143		goto free_intrq;
1144	}
1145
1146	memset(&mc, 0, sizeof(mc));
1147	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1148	    &sc->sc_irqcoaloff) != 0) {
1149		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1150		goto free_intrq;
1151	}
1152
1153	/* Set an appropriate interrupt coalescing period */
1154	r = htobe32(MYX_IRQCOALDELAY);
1155	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1156
1157	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1158		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1159		goto free_intrq;
1160	}
1161
1162	memset(&mc, 0, sizeof(mc));
1163	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1164		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1165		goto free_intrq;
1166	}
1167
1168	memset(&mc, 0, sizeof(mc));
1169	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1170		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1171		goto free_intrq;
1172	}
1173
1174	memset(&mc, 0, sizeof(mc));
1175	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1176	    &sc->sc_tx_ring_offset) != 0) {
1177		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1178		goto free_intrq;
1179	}
1180
1181	memset(&mc, 0, sizeof(mc));
1182	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1183	    &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) {
1184		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1185		goto free_intrq;
1186	}
1187
1188	memset(&mc, 0, sizeof(mc));
1189	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1190	    &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) {
1191		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1192		goto free_intrq;
1193	}
1194
1195	/* Allocate Interrupt Data */
1196	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1197	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1198		printf("%s: failed to allocate status DMA memory\n",
1199		    DEVNAME(sc));
1200		goto free_intrq;
1201	}
1202	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1203	map = sc->sc_sts_dma.mxm_map;
1204	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1205	    BUS_DMASYNC_PREREAD);
1206
1207	memset(&mc, 0, sizeof(mc));
1208	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1209	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1210	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1211	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1212		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1213		goto free_sts;
1214	}
1215
1216	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1217
1218	memset(&mc, 0, sizeof(mc));
1219	mc.mc_data0 = htobe32(maxpkt);
1220	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1221		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1222		goto free_sts;
1223	}
1224
1225	for (i = 0; i < sc->sc_tx_ring_count; i++) {
1226		mb = myx_buf_alloc(sc, maxpkt, sc->sc_tx_nsegs,
1227		    sc->sc_tx_boundary, sc->sc_tx_boundary);
1228		if (mb == NULL)
1229			goto free_tx_bufs;
1230
1231		myx_buf_put(&sc->sc_tx_buf_free, mb);
1232	}
1233
1234	if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0)
1235		goto free_tx_bufs;
1236
1237	if (myx_rx_fill(sc, MYX_RXSMALL) != 0)
1238		goto free_rx_ring_small;
1239
1240	if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0)
1241		goto empty_rx_ring_small;
1242
1243	if (myx_rx_fill(sc, MYX_RXBIG) != 0)
1244		goto free_rx_ring_big;
1245
1246	memset(&mc, 0, sizeof(mc));
1247	mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN);
1248	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1249		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1250		goto empty_rx_ring_big;
1251	}
1252
1253	memset(&mc, 0, sizeof(mc));
1254	mc.mc_data0 = htobe32(16384);
1255	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1256		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1257		goto empty_rx_ring_big;
1258	}
1259
1260	mtx_enter(&sc->sc_sts_mtx);
1261	sc->sc_state = MYX_S_RUNNING;
1262	mtx_leave(&sc->sc_sts_mtx);
1263
1264	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1265		printf("%s: failed to start the device\n", DEVNAME(sc));
1266		goto empty_rx_ring_big;
1267	}
1268
1269	CLR(ifp->if_flags, IFF_OACTIVE);
1270	SET(ifp->if_flags, IFF_RUNNING);
1271	myx_iff(sc);
1272	myx_start(ifp);
1273
1274	return;
1275
1276empty_rx_ring_big:
1277	myx_rx_empty(sc, MYX_RXBIG);
1278free_rx_ring_big:
1279	myx_rx_free(sc, MYX_RXBIG);
1280empty_rx_ring_small:
1281	myx_rx_empty(sc, MYX_RXSMALL);
1282free_rx_ring_small:
1283	myx_rx_free(sc, MYX_RXSMALL);
1284free_tx_bufs:
1285	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1286		myx_buf_free(sc, mb);
1287free_sts:
1288	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1289	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1290	myx_dmamem_free(sc, &sc->sc_sts_dma);
1291free_intrq:
1292	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1293	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1294	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1295free_pad:
1296	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1297	    sc->sc_paddma.mxm_map->dm_mapsize,
1298	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1299	myx_dmamem_free(sc, &sc->sc_paddma);
1300
1301	memset(&mc, 0, sizeof(mc));
1302	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1303		printf("%s: failed to reset the device\n", DEVNAME(sc));
1304	}
1305free_zero:
1306	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1307	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1308	myx_dmamem_free(sc, &sc->sc_zerodma);
1309}
1310
1311int
1312myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1313{
1314	struct myx_cmd		 mc;
1315
1316	memset(&mc, 0, sizeof(mc));
1317	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1318	    addr[2] << 8 | addr[3]);
1319	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1320
1321	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1322		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1323		return (-1);
1324	}
1325	return (0);
1326}
1327
1328void
1329myx_iff(struct myx_softc *sc)
1330{
1331	struct myx_cmd		mc;
1332	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1333	struct ether_multi	*enm;
1334	struct ether_multistep	step;
1335	u_int8_t *addr;
1336
1337	CLR(ifp->if_flags, IFF_ALLMULTI);
1338
1339	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1340	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1341		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1342		return;
1343	}
1344
1345	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1346		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1347		return;
1348	}
1349
1350	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1351		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1352		return;
1353	}
1354
1355	if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1356	    sc->sc_ac.ac_multirangecnt > 0) {
1357		SET(ifp->if_flags, IFF_ALLMULTI);
1358		return;
1359	}
1360
1361	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1362	while (enm != NULL) {
1363		addr = enm->enm_addrlo;
1364
1365		memset(&mc, 0, sizeof(mc));
1366		mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1367		    addr[2] << 8 | addr[3]);
1368		mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1369		if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1370			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1371			return;
1372		}
1373
1374		ETHER_NEXT_MULTI(step, enm);
1375	}
1376
1377	memset(&mc, 0, sizeof(mc));
1378	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1379		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1380		return;
1381	}
1382}
1383
1384void
1385myx_down(struct myx_softc *sc)
1386{
1387	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1388	volatile struct myx_status *sts = sc->sc_sts;
1389	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1390	struct myx_buf		*mb;
1391	struct myx_cmd		 mc;
1392	int			 s;
1393	int			 ring;
1394
1395	myx_sts_enter(sc);
1396	sc->sc_linkdown = sts->ms_linkdown;
1397	sc->sc_state = MYX_S_DOWN;
1398
1399	memset(&mc, 0, sizeof(mc));
1400	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1401
1402	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1403	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1404	while (sc->sc_state != MYX_S_OFF)
1405		msleep(sts, &sc->sc_sts_mtx, 0, "myxdown", 0);
1406	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1407	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1408	mtx_leave(&sc->sc_sts_mtx);
1409
1410	timeout_del(&sc->sc_refill);
1411
1412	s = splnet();
1413	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1414		ifp->if_link_state = LINK_STATE_UNKNOWN;
1415		ifp->if_baudrate = 0;
1416		if_link_state_change(ifp);
1417	}
1418	splx(s);
1419
1420	memset(&mc, 0, sizeof(mc));
1421	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1422		printf("%s: failed to reset the device\n", DEVNAME(sc));
1423	}
1424
1425	CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1426
1427	for (ring = 0; ring < 2; ring++) {
1428		myx_rx_empty(sc, ring);
1429		myx_rx_free(sc, ring);
1430	}
1431
1432	while ((mb = myx_buf_get(&sc->sc_tx_buf_list)) != NULL) {
1433		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1434		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1435		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1436		m_freem(mb->mb_m);
1437		myx_buf_free(sc, mb);
1438	}
1439
1440	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1441		myx_buf_free(sc, mb);
1442
1443	/* the sleep shizz above already synced this dmamem */
1444	myx_dmamem_free(sc, &sc->sc_sts_dma);
1445
1446	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1447	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1448	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1449
1450	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1451	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1452	myx_dmamem_free(sc, &sc->sc_paddma);
1453
1454	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1455	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1456	myx_dmamem_free(sc, &sc->sc_zerodma);
1457}
1458
1459void
1460myx_write_txd_tail(struct myx_softc *sc, struct myx_buf *mb, u_int8_t flags,
1461    u_int32_t offset, u_int idx)
1462{
1463	struct myx_tx_desc		txd;
1464	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1465	bus_dmamap_t			map = mb->mb_map;
1466	int				i;
1467
1468	for (i = 1; i < map->dm_nsegs; i++) {
1469		memset(&txd, 0, sizeof(txd));
1470		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1471		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1472		txd.tx_flags = flags;
1473
1474		myx_bus_space_write(sc,
1475		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1476		    &txd, sizeof(txd));
1477	}
1478
1479	/* pad runt frames */
1480	if (map->dm_mapsize < 60) {
1481		memset(&txd, 0, sizeof(txd));
1482		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1483		txd.tx_length = htobe16(60 - map->dm_mapsize);
1484		txd.tx_flags = flags;
1485
1486		myx_bus_space_write(sc,
1487		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1488		    &txd, sizeof(txd));
1489	}
1490}
1491
1492void
1493myx_start(struct ifnet *ifp)
1494{
1495	struct myx_tx_desc		txd;
1496	SIMPLEQ_HEAD(, myx_buf)		list = SIMPLEQ_HEAD_INITIALIZER(list);
1497	struct myx_softc		*sc = ifp->if_softc;
1498	bus_dmamap_t			map;
1499	struct myx_buf			*mb, *firstmb;
1500	struct mbuf			*m;
1501	u_int32_t			offset = sc->sc_tx_ring_offset;
1502	u_int				idx, firstidx;
1503	u_int8_t			flags;
1504
1505	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
1506	    ISSET(ifp->if_flags, IFF_OACTIVE) ||
1507	    IFQ_IS_EMPTY(&ifp->if_snd))
1508		return;
1509
1510	for (;;) {
1511		if (sc->sc_tx_free <= sc->sc_tx_nsegs ||
1512		    (mb = myx_buf_get(&sc->sc_tx_buf_free)) == NULL) {
1513			SET(ifp->if_flags, IFF_OACTIVE);
1514			break;
1515		}
1516
1517		IFQ_DEQUEUE(&ifp->if_snd, m);
1518		if (m == NULL) {
1519			myx_buf_put(&sc->sc_tx_buf_free, mb);
1520			break;
1521		}
1522
1523		if (myx_load_buf(sc, mb, m) != 0) {
1524			m_freem(m);
1525			myx_buf_put(&sc->sc_tx_buf_free, mb);
1526			ifp->if_oerrors++;
1527			continue;
1528		}
1529
1530#if NBPFILTER > 0
1531		if (ifp->if_bpf)
1532			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1533#endif
1534
1535		mb->mb_m = m;
1536
1537		map = mb->mb_map;
1538		bus_dmamap_sync(sc->sc_dmat, map, 0,
1539		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1540
1541		SIMPLEQ_INSERT_TAIL(&list, mb, mb_entry);
1542
1543		atomic_sub_int(&sc->sc_tx_free, map->dm_nsegs +
1544		    (map->dm_mapsize < 60 ? 1 : 0));
1545	}
1546
1547	/* post the first descriptor last */
1548	firstmb = SIMPLEQ_FIRST(&list);
1549	if (firstmb == NULL)
1550		return;
1551
1552	SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1553	myx_buf_put(&sc->sc_tx_buf_list, firstmb);
1554
1555	idx = firstidx = sc->sc_tx_ring_idx;
1556	idx += firstmb->mb_map->dm_nsegs +
1557	    (firstmb->mb_map->dm_mapsize < 60 ? 1 : 0);
1558	idx %= sc->sc_tx_ring_count;
1559
1560	while ((mb = SIMPLEQ_FIRST(&list)) != NULL) {
1561		SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1562		myx_buf_put(&sc->sc_tx_buf_list, mb);
1563
1564		map = mb->mb_map;
1565
1566		flags = MYXTXD_FLAGS_NO_TSO;
1567		if (map->dm_mapsize < 1520)
1568			flags |= MYXTXD_FLAGS_SMALL;
1569
1570		memset(&txd, 0, sizeof(txd));
1571		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1572		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1573		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1574		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1575		myx_bus_space_write(sc,
1576		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1577
1578		myx_write_txd_tail(sc, mb, flags, offset, idx);
1579
1580		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1581		idx %= sc->sc_tx_ring_count;
1582	}
1583	sc->sc_tx_ring_idx = idx;
1584
1585	/* go back and post first mb */
1586	map = firstmb->mb_map;
1587
1588	flags = MYXTXD_FLAGS_NO_TSO;
1589	if (map->dm_mapsize < 1520)
1590		flags |= MYXTXD_FLAGS_SMALL;
1591
1592	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1593	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1594	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1595	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1596
1597	/* make sure the first descriptor is seen after the others */
1598	myx_write_txd_tail(sc, firstmb, flags, offset, firstidx);
1599
1600	myx_bus_space_write(sc,
1601	    offset + sizeof(txd) * firstidx, &txd,
1602	    sizeof(txd) - sizeof(myx_bus_t));
1603
1604	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1605	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1606
1607	myx_bus_space_write(sc,
1608	    offset + sizeof(txd) * (firstidx + 1) - sizeof(myx_bus_t),
1609	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1610	    sizeof(myx_bus_t));
1611
1612	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1613	    offset + sizeof(txd) * firstidx, sizeof(txd),
1614	    BUS_SPACE_BARRIER_WRITE);
1615}
1616
1617int
1618myx_load_buf(struct myx_softc *sc, struct myx_buf *mb, struct mbuf *m)
1619{
1620	bus_dma_tag_t			dmat = sc->sc_dmat;
1621	bus_dmamap_t			dmap = mb->mb_map;
1622
1623	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1624	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1625	case 0:
1626		break;
1627
1628	case EFBIG: /* mbuf chain is too fragmented */
1629		if (m_defrag(m, M_DONTWAIT) == 0 &&
1630		    bus_dmamap_load_mbuf(dmat, dmap, m,
1631		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1632			break;
1633	default:
1634		return (1);
1635	}
1636
1637	mb->mb_m = m;
1638	return (0);
1639}
1640
1641int
1642myx_intr(void *arg)
1643{
1644	struct myx_softc	*sc = (struct myx_softc *)arg;
1645	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1646	volatile struct myx_status *sts = sc->sc_sts;
1647	enum myx_state		 state = MYX_S_RUNNING;
1648	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1649	u_int32_t		 data, link = 0xffffffff;
1650	u_int8_t		 valid = 0;
1651
1652	mtx_enter(&sc->sc_sts_mtx);
1653	if (sc->sc_state == MYX_S_OFF) {
1654		mtx_leave(&sc->sc_sts_mtx);
1655		return (0);
1656	}
1657
1658	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1659	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1660
1661	valid = sts->ms_isvalid;
1662	if (valid == 0x0) {
1663		myx_sts_leave(sc);
1664		return (0);
1665	}
1666
1667	if (sc->sc_intx) {
1668		data = htobe32(0);
1669		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1670		    sc->sc_irqdeassertoff, &data, sizeof(data));
1671	}
1672	sts->ms_isvalid = 0;
1673
1674	do {
1675		data = sts->ms_txdonecnt;
1676
1677		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1678		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1679		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1680	} while (sts->ms_isvalid);
1681
1682	if (sts->ms_statusupdated) {
1683		link = sts->ms_linkstate;
1684
1685		if (sc->sc_state == MYX_S_DOWN &&
1686		    sc->sc_linkdown != sts->ms_linkdown)
1687			state = MYX_S_DOWN;
1688	}
1689	myx_sts_leave(sc);
1690
1691	data = betoh32(data);
1692	if (data != sc->sc_tx_count)
1693		myx_txeof(sc, data);
1694
1695	data = htobe32(3);
1696	if (valid & 0x1) {
1697		myx_rxeof(sc);
1698
1699		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1700		    sc->sc_irqclaimoff, &data, sizeof(data));
1701	}
1702	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1703	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1704
1705	if (state == MYX_S_DOWN) {
1706		/* myx_down is waiting for us */
1707		mtx_enter(&sc->sc_sts_mtx);
1708		sc->sc_state = MYX_S_OFF;
1709		wakeup(sts);
1710		mtx_leave(&sc->sc_sts_mtx);
1711
1712		return (1);
1713	}
1714
1715	if (link != 0xffffffff) {
1716		KERNEL_LOCK();
1717		myx_link_state(sc, link);
1718		KERNEL_UNLOCK();
1719	}
1720
1721	if (ISSET(ifp->if_flags, IFF_OACTIVE)) {
1722		KERNEL_LOCK();
1723		CLR(ifp->if_flags, IFF_OACTIVE);
1724		myx_start(ifp);
1725		KERNEL_UNLOCK();
1726	}
1727
1728	return (1);
1729}
1730
1731void
1732myx_refill(void *xsc)
1733{
1734	struct myx_softc *sc = xsc;
1735	struct myx_rx_ring *mrr;
1736	int ring;
1737
1738	for (ring = 0; ring < 2; ring++) {
1739		mrr = &sc->sc_rx_ring[ring];
1740
1741		if (myx_rx_fill(sc, ring) >= 0 &&
1742		    mrr->mrr_prod == mrr->mrr_cons)
1743			timeout_add(&sc->sc_refill, 1);
1744	}
1745}
1746
1747void
1748myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1749{
1750	struct ifnet *ifp = &sc->sc_ac.ac_if;
1751	struct myx_buf *mb;
1752	struct mbuf *m;
1753	bus_dmamap_t map;
1754	u_int free = 0;
1755
1756	do {
1757		mb = myx_buf_get(&sc->sc_tx_buf_list);
1758		if (mb == NULL) {
1759			printf("oh noes, no mb!\n");
1760			break;
1761		}
1762
1763		m = mb->mb_m;
1764		map = mb->mb_map;
1765
1766		free += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1767
1768		bus_dmamap_sync(sc->sc_dmat, map, 0,
1769		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1770
1771		bus_dmamap_unload(sc->sc_dmat, map);
1772		ifp->if_opackets++;
1773
1774		m_freem(m);
1775		myx_buf_put(&sc->sc_tx_buf_free, mb);
1776	} while (++sc->sc_tx_count != done_count);
1777
1778	if (free)
1779		atomic_add_int(&sc->sc_tx_free, free);
1780}
1781
1782void
1783myx_rxeof(struct myx_softc *sc)
1784{
1785	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1786	struct ifnet *ifp = &sc->sc_ac.ac_if;
1787	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1788	struct myx_rx_ring *mrr;
1789	struct myx_rx_slot *mrs;
1790	struct mbuf *m;
1791	int ring;
1792	u_int rxfree[2] = { 0 , 0 };
1793	u_int len;
1794
1795	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1796	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1797
1798	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1799		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1800
1801		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1802			sc->sc_intrq_idx = 0;
1803
1804		ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ?
1805		    MYX_RXSMALL : MYX_RXBIG;
1806
1807		mrr = &sc->sc_rx_ring[ring];
1808		mrs = &mrr->mrr_slots[mrr->mrr_cons];
1809
1810		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1811			mrr->mrr_cons = 0;
1812
1813		bus_dmamap_sync(sc->sc_dmat, mrs->mrs_map, 0,
1814		    mrs->mrs_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1815		bus_dmamap_unload(sc->sc_dmat, mrs->mrs_map);
1816
1817		m = mrs->mrs_m;
1818		m->m_data += ETHER_ALIGN;
1819		m->m_pkthdr.len = m->m_len = len;
1820
1821		ml_enqueue(&ml, m);
1822
1823		rxfree[ring]++;
1824	}
1825
1826	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1827	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1828
1829	for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) {
1830		if (rxfree[ring] == 0)
1831			continue;
1832
1833		mrr = &sc->sc_rx_ring[ring];
1834
1835		mtx_enter(&mrr->mrr_rxr_mtx);
1836		if_rxr_put(&mrr->mrr_rxr, rxfree[ring]);
1837		mtx_leave(&mrr->mrr_rxr_mtx);
1838
1839		if (myx_rx_fill(sc, ring) >= 0 &&
1840		    mrr->mrr_prod == mrr->mrr_cons)
1841			timeout_add(&sc->sc_refill, 0);
1842	}
1843
1844	if_input(ifp, &ml);
1845}
1846
1847static int
1848myx_rx_fill_slots(struct myx_softc *sc, int ring, u_int slots)
1849{
1850	struct myx_rx_desc rxd;
1851	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1852	struct myx_rx_slot *mrs;
1853	u_int32_t offset = mrr->mrr_offset;
1854	u_int p, first, fills;
1855
1856	first = p = mrr->mrr_prod;
1857	if (myx_buf_fill(sc, ring, &mrr->mrr_slots[first]) != 0)
1858		return (slots);
1859
1860	if (++p >= sc->sc_rx_ring_count)
1861		p = 0;
1862
1863	for (fills = 1; fills < slots; fills++) {
1864		mrs = &mrr->mrr_slots[p];
1865
1866		if (myx_buf_fill(sc, ring, mrs) != 0)
1867			break;
1868
1869		rxd.rx_addr = htobe64(mrs->mrs_map->dm_segs[0].ds_addr);
1870		myx_bus_space_write(sc, offset + p * sizeof(rxd),
1871		    &rxd, sizeof(rxd));
1872
1873		if (++p >= sc->sc_rx_ring_count)
1874			p = 0;
1875	}
1876
1877	mrr->mrr_prod = p;
1878
1879	/* make sure the first descriptor is seen after the others */
1880	if (fills > 1) {
1881		bus_space_barrier(sc->sc_memt, sc->sc_memh,
1882		    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1883		    BUS_SPACE_BARRIER_WRITE);
1884	}
1885
1886	mrs = &mrr->mrr_slots[first];
1887	rxd.rx_addr = htobe64(mrs->mrs_map->dm_segs[0].ds_addr);
1888	myx_bus_space_write(sc, offset + first * sizeof(rxd),
1889	    &rxd, sizeof(rxd));
1890
1891	return (slots - fills);
1892}
1893
1894int
1895myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size)
1896{
1897	struct myx_rx_desc rxd;
1898	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1899	struct myx_rx_slot *mrs;
1900	u_int32_t offset = mrr->mrr_offset;
1901	int rv;
1902	int i;
1903
1904	mrr->mrr_slots = mallocarray(sizeof(*mrs), sc->sc_rx_ring_count,
1905	    M_DEVBUF, M_WAITOK);
1906	if (mrr->mrr_slots == NULL)
1907		return (ENOMEM);
1908
1909	memset(&rxd, 0xff, sizeof(rxd));
1910	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1911		mrs = &mrr->mrr_slots[i];
1912		rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1913		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mrs->mrs_map);
1914		if (rv != 0)
1915			goto destroy;
1916
1917		myx_bus_space_write(sc, offset + i * sizeof(rxd),
1918		    &rxd, sizeof(rxd));
1919	}
1920
1921	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1922	mrr->mrr_prod = mrr->mrr_cons = 0;
1923
1924	return (0);
1925
1926destroy:
1927	while (i-- > 0) {
1928		mrs = &mrr->mrr_slots[i];
1929		bus_dmamap_destroy(sc->sc_dmat, mrs->mrs_map);
1930	}
1931	free(mrr->mrr_slots, M_DEVBUF, sizeof(*mrs) * sc->sc_rx_ring_count);
1932	return (rv);
1933}
1934
1935static inline int
1936myx_rx_ring_enter(struct myx_rx_ring *mrr)
1937{
1938	return (atomic_inc_int_nv(&mrr->mrr_running) == 1);
1939}
1940
1941static inline int
1942myx_rx_ring_leave(struct myx_rx_ring *mrr)
1943{
1944	if (atomic_cas_uint(&mrr->mrr_running, 1, 0) == 1)
1945		return (1);
1946
1947	mrr->mrr_running = 1;
1948
1949	return (0);
1950}
1951
1952int
1953myx_rx_fill(struct myx_softc *sc, int ring)
1954{
1955	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1956	u_int slots;
1957	int rv = 1;
1958
1959	if (!myx_rx_ring_enter(mrr))
1960		return (-1);
1961
1962	do {
1963		mtx_enter(&sc->sc_rx_ring[ring].mrr_rxr_mtx);
1964		slots = if_rxr_get(&sc->sc_rx_ring[ring].mrr_rxr,
1965		    sc->sc_rx_ring_count);
1966		mtx_leave(&sc->sc_rx_ring[ring].mrr_rxr_mtx);
1967
1968		if (slots == 0)
1969			continue;
1970
1971		slots = myx_rx_fill_slots(sc, ring, slots);
1972		rv = 0;
1973
1974		if (slots > 0) {
1975			mtx_enter(&sc->sc_rx_ring[ring].mrr_rxr_mtx);
1976			if_rxr_put(&sc->sc_rx_ring[ring].mrr_rxr, slots);
1977			mtx_leave(&sc->sc_rx_ring[ring].mrr_rxr_mtx);
1978		}
1979	} while (!myx_rx_ring_leave(mrr));
1980
1981	return (rv);
1982}
1983
1984void
1985myx_rx_empty(struct myx_softc *sc, int ring)
1986{
1987	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1988	struct myx_rx_slot *mrs;
1989
1990	while (mrr->mrr_cons != mrr->mrr_prod) {
1991		mrs = &mrr->mrr_slots[mrr->mrr_cons];
1992
1993		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1994			mrr->mrr_cons = 0;
1995
1996		bus_dmamap_sync(sc->sc_dmat, mrs->mrs_map, 0,
1997		    mrs->mrs_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1998		bus_dmamap_unload(sc->sc_dmat, mrs->mrs_map);
1999		m_freem(mrs->mrs_m);
2000	}
2001
2002	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
2003}
2004
2005void
2006myx_rx_free(struct myx_softc *sc, int ring)
2007{
2008	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
2009	struct myx_rx_slot *mrs;
2010	int i;
2011
2012	for (i = 0; i < sc->sc_rx_ring_count; i++) {
2013		mrs = &mrr->mrr_slots[i];
2014		bus_dmamap_destroy(sc->sc_dmat, mrs->mrs_map);
2015	}
2016
2017	free(mrr->mrr_slots, M_DEVBUF, sizeof(*mrs) * sc->sc_rx_ring_count);
2018}
2019
2020struct mbuf *
2021myx_mcl_small(void)
2022{
2023	struct mbuf *m;
2024
2025	m = MCLGETI(NULL, M_DONTWAIT, NULL, MYX_RXSMALL_SIZE);
2026	if (m == NULL)
2027		return (NULL);
2028
2029	m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE;
2030
2031	return (m);
2032}
2033
2034struct mbuf *
2035myx_mcl_big(void)
2036{
2037	struct mbuf *m;
2038	void *mcl;
2039
2040	MGETHDR(m, M_DONTWAIT, MT_DATA);
2041	if (m == NULL)
2042		return (NULL);
2043
2044	mcl = pool_get(myx_mcl_pool, PR_NOWAIT);
2045	if (mcl == NULL) {
2046		m_free(m);
2047		return (NULL);
2048	}
2049
2050	MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, m_extfree_pool, myx_mcl_pool);
2051	m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE;
2052
2053	return (m);
2054}
2055
2056int
2057myx_buf_fill(struct myx_softc *sc, int ring, struct myx_rx_slot *mrs)
2058{
2059	struct mbuf *(*mclget[2])(void) = { myx_mcl_small, myx_mcl_big };
2060	struct mbuf *m;
2061	int rv;
2062
2063	m = (*mclget[ring])();
2064	if (m == NULL)
2065		return (ENOMEM);
2066
2067	rv = bus_dmamap_load_mbuf(sc->sc_dmat, mrs->mrs_map, m, BUS_DMA_NOWAIT);
2068	if (rv != 0) {
2069		m_freem(m);
2070		return (rv);
2071	}
2072
2073	bus_dmamap_sync(sc->sc_dmat, mrs->mrs_map, 0,
2074	    mrs->mrs_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2075
2076	mrs->mrs_m = m;
2077
2078	return (0);
2079}
2080
2081struct myx_buf *
2082myx_buf_alloc(struct myx_softc *sc, bus_size_t size, int nsegs,
2083    bus_size_t maxsegsz, bus_size_t boundary)
2084{
2085	struct myx_buf *mb;
2086
2087	mb = pool_get(myx_buf_pool, PR_WAITOK);
2088	if (mb == NULL)
2089		return (NULL);
2090
2091	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, boundary,
2092	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mb->mb_map) != 0) {
2093		pool_put(myx_buf_pool, mb);
2094		return (NULL);
2095	}
2096
2097	return (mb);
2098}
2099
2100void
2101myx_buf_free(struct myx_softc *sc, struct myx_buf *mb)
2102{
2103	bus_dmamap_destroy(sc->sc_dmat, mb->mb_map);
2104	pool_put(myx_buf_pool, mb);
2105}
2106
2107struct myx_buf *
2108myx_buf_get(struct myx_buf_list *mbl)
2109{
2110	struct myx_buf *mb;
2111
2112	mtx_enter(&mbl->mbl_mtx);
2113	mb = SIMPLEQ_FIRST(&mbl->mbl_q);
2114	if (mb != NULL)
2115		SIMPLEQ_REMOVE_HEAD(&mbl->mbl_q, mb_entry);
2116	mtx_leave(&mbl->mbl_mtx);
2117
2118	return (mb);
2119}
2120
2121int
2122myx_bufs_empty(struct myx_buf_list *mbl)
2123{
2124	int rv;
2125
2126	mtx_enter(&mbl->mbl_mtx);
2127	rv = SIMPLEQ_EMPTY(&mbl->mbl_q);
2128	mtx_leave(&mbl->mbl_mtx);
2129
2130	return (rv);
2131}
2132
2133void
2134myx_buf_put(struct myx_buf_list *mbl, struct myx_buf *mb)
2135{
2136	mtx_enter(&mbl->mbl_mtx);
2137	SIMPLEQ_INSERT_TAIL(&mbl->mbl_q, mb, mb_entry);
2138	mtx_leave(&mbl->mbl_mtx);
2139}
2140
2141void
2142myx_bufs_init(struct myx_buf_list *mbl)
2143{
2144	SIMPLEQ_INIT(&mbl->mbl_q);
2145	mtx_init(&mbl->mbl_mtx, IPL_NET);
2146}
2147