if_myx.c revision 1.80
1/*	$OpenBSD: if_myx.c,v 1.80 2015/08/14 10:42:25 dlg Exp $	*/
2
3/*
4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*
20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21 */
22
23#include "bpfilter.h"
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/kernel.h>
30#include <sys/socket.h>
31#include <sys/malloc.h>
32#include <sys/pool.h>
33#include <sys/timeout.h>
34#include <sys/device.h>
35#include <sys/queue.h>
36#include <sys/atomic.h>
37
38#include <machine/bus.h>
39#include <machine/intr.h>
40
41#include <net/if.h>
42#include <net/if_dl.h>
43#include <net/if_media.h>
44
45#if NBPFILTER > 0
46#include <net/bpf.h>
47#endif
48
49#include <netinet/in.h>
50#include <netinet/if_ether.h>
51
52#include <dev/pci/pcireg.h>
53#include <dev/pci/pcivar.h>
54#include <dev/pci/pcidevs.h>
55
56#include <dev/pci/if_myxreg.h>
57
58#ifdef MYX_DEBUG
59#define MYXDBG_INIT	(1<<0)	/* chipset initialization */
60#define MYXDBG_CMD	(2<<0)	/* commands */
61#define MYXDBG_INTR	(3<<0)	/* interrupts */
62#define MYXDBG_ALL	0xffff	/* enable all debugging messages */
63int myx_debug = MYXDBG_ALL;
64#define DPRINTF(_lvl, _arg...)	do {					\
65	if (myx_debug & (_lvl))						\
66		printf(_arg);						\
67} while (0)
68#else
69#define DPRINTF(_lvl, arg...)
70#endif
71
72#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
73
74struct myx_dmamem {
75	bus_dmamap_t		 mxm_map;
76	bus_dma_segment_t	 mxm_seg;
77	int			 mxm_nsegs;
78	size_t			 mxm_size;
79	caddr_t			 mxm_kva;
80};
81
82struct myx_buf {
83	SIMPLEQ_ENTRY(myx_buf)	 mb_entry;
84	bus_dmamap_t		 mb_map;
85	struct mbuf		*mb_m;
86};
87
88struct myx_buf_list {
89	SIMPLEQ_HEAD(, myx_buf)	mbl_q;
90	struct mutex		mbl_mtx;
91};
92
93struct pool *myx_buf_pool;
94struct pool *myx_mcl_pool;
95
96struct myx_rx_slot {
97	bus_dmamap_t		 mrs_map;
98	struct mbuf		*mrs_m;
99};
100
101struct myx_rx_ring {
102	struct myx_softc	*mrr_softc;
103	struct timeout		 mrr_refill;
104	struct if_rxring	 mrr_rxr;
105	struct myx_rx_slot	*mrr_slots;
106	u_int32_t		 mrr_offset;
107	u_int			 mrr_running;
108	u_int			 mrr_prod;
109	u_int			 mrr_cons;
110	struct mbuf		*(*mrr_mclget)(void);
111};
112
113enum myx_state {
114	MYX_S_OFF = 0,
115	MYX_S_RUNNING,
116	MYX_S_DOWN
117};
118
119struct myx_softc {
120	struct device		 sc_dev;
121	struct arpcom		 sc_ac;
122
123	pci_chipset_tag_t	 sc_pc;
124	pci_intr_handle_t	 sc_ih;
125	pcitag_t		 sc_tag;
126
127	bus_dma_tag_t		 sc_dmat;
128	bus_space_tag_t		 sc_memt;
129	bus_space_handle_t	 sc_memh;
130	bus_size_t		 sc_mems;
131
132	struct myx_dmamem	 sc_zerodma;
133	struct myx_dmamem	 sc_cmddma;
134	struct myx_dmamem	 sc_paddma;
135
136	struct myx_dmamem	 sc_sts_dma;
137	volatile struct myx_status	*sc_sts;
138	struct mutex		 sc_sts_mtx;
139
140	int			 sc_intx;
141	void			*sc_irqh;
142	u_int32_t		 sc_irqcoaloff;
143	u_int32_t		 sc_irqclaimoff;
144	u_int32_t		 sc_irqdeassertoff;
145
146	struct myx_dmamem	 sc_intrq_dma;
147	struct myx_intrq_desc	*sc_intrq;
148	u_int			 sc_intrq_count;
149	u_int			 sc_intrq_idx;
150
151	u_int			 sc_rx_ring_count;
152#define  MYX_RXSMALL		 0
153#define  MYX_RXBIG		 1
154	struct myx_rx_ring	 sc_rx_ring[2];
155
156	bus_size_t		 sc_tx_boundary;
157	u_int			 sc_tx_ring_count;
158	u_int32_t		 sc_tx_ring_offset;
159	u_int			 sc_tx_nsegs;
160	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
161	u_int			 sc_tx_free;
162	struct myx_buf_list	 sc_tx_buf_free;
163	struct myx_buf_list	 sc_tx_buf_list;
164	u_int			 sc_tx_ring_idx;
165
166	struct ifmedia		 sc_media;
167
168	volatile enum myx_state	 sc_state;
169	volatile u_int8_t	 sc_linkdown;
170};
171
172#define MYX_RXSMALL_SIZE	MCLBYTES
173#define MYX_RXBIG_SIZE		(9 * 1024)
174
175int	 myx_match(struct device *, void *, void *);
176void	 myx_attach(struct device *, struct device *, void *);
177int	 myx_pcie_dc(struct myx_softc *, struct pci_attach_args *);
178int	 myx_query(struct myx_softc *sc, char *, size_t);
179u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
180void	 myx_attachhook(void *);
181int	 myx_loadfirmware(struct myx_softc *, const char *);
182int	 myx_probe_firmware(struct myx_softc *);
183
184void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
185void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
186
187#if defined(__LP64__)
188#define _myx_bus_space_write bus_space_write_raw_region_8
189typedef u_int64_t myx_bus_t;
190#else
191#define _myx_bus_space_write bus_space_write_raw_region_4
192typedef u_int32_t myx_bus_t;
193#endif
194#define myx_bus_space_write(_sc, _o, _a, _l) \
195    _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l))
196
197int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
198int	 myx_boot(struct myx_softc *, u_int32_t);
199
200int	 myx_rdma(struct myx_softc *, u_int);
201int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
202	    bus_size_t, u_int align);
203void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
204int	 myx_media_change(struct ifnet *);
205void	 myx_media_status(struct ifnet *, struct ifmediareq *);
206void	 myx_link_state(struct myx_softc *, u_int32_t);
207void	 myx_watchdog(struct ifnet *);
208int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
209int	 myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *);
210void	 myx_up(struct myx_softc *);
211void	 myx_iff(struct myx_softc *);
212void	 myx_down(struct myx_softc *);
213
214void	 myx_start(struct ifnet *);
215void	 myx_write_txd_tail(struct myx_softc *, struct myx_buf *, u_int8_t,
216	    u_int32_t, u_int);
217int	 myx_load_buf(struct myx_softc *, struct myx_buf *, struct mbuf *);
218int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
219int	 myx_intr(void *);
220void	 myx_rxeof(struct myx_softc *);
221void	 myx_txeof(struct myx_softc *, u_int32_t);
222
223struct myx_buf *	myx_buf_alloc(struct myx_softc *, bus_size_t, int,
224			    bus_size_t, bus_size_t);
225void			myx_buf_free(struct myx_softc *, struct myx_buf *);
226void			myx_bufs_init(struct myx_buf_list *);
227int			myx_bufs_empty(struct myx_buf_list *);
228struct myx_buf *	myx_buf_get(struct myx_buf_list *);
229void			myx_buf_put(struct myx_buf_list *, struct myx_buf *);
230int			myx_buf_fill(struct myx_softc *, struct myx_rx_slot *,
231			    struct mbuf *(*)(void));
232struct mbuf *		myx_mcl_small(void);
233struct mbuf *		myx_mcl_big(void);
234
235int			myx_rx_init(struct myx_softc *, int, bus_size_t);
236int			myx_rx_fill(struct myx_softc *, struct myx_rx_ring *);
237void			myx_rx_empty(struct myx_softc *, struct myx_rx_ring *);
238void			myx_rx_free(struct myx_softc *, struct myx_rx_ring *);
239
240void			myx_refill(void *);
241
242static inline void
243myx_sts_enter(struct myx_softc *sc)
244{
245	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
246
247	mtx_enter(&sc->sc_sts_mtx);
248	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
249	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
250}
251
252static inline void
253myx_sts_leave(struct myx_softc *sc)
254{
255	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
256
257	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
258	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
259	mtx_leave(&sc->sc_sts_mtx);
260}
261
262struct cfdriver myx_cd = {
263	NULL, "myx", DV_IFNET
264};
265struct cfattach myx_ca = {
266	sizeof(struct myx_softc), myx_match, myx_attach
267};
268
269const struct pci_matchid myx_devices[] = {
270	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
271	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
272};
273
274int
275myx_match(struct device *parent, void *match, void *aux)
276{
277	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
278}
279
280void
281myx_attach(struct device *parent, struct device *self, void *aux)
282{
283	struct myx_softc	*sc = (struct myx_softc *)self;
284	struct pci_attach_args	*pa = aux;
285	char			 part[32];
286	pcireg_t		 memtype;
287
288	sc->sc_pc = pa->pa_pc;
289	sc->sc_tag = pa->pa_tag;
290	sc->sc_dmat = pa->pa_dmat;
291
292	sc->sc_rx_ring[MYX_RXSMALL].mrr_softc = sc;
293	sc->sc_rx_ring[MYX_RXSMALL].mrr_mclget = myx_mcl_small;
294	timeout_set(&sc->sc_rx_ring[MYX_RXSMALL].mrr_refill, myx_refill,
295	    &sc->sc_rx_ring[MYX_RXSMALL]);
296	sc->sc_rx_ring[MYX_RXBIG].mrr_softc = sc;
297	sc->sc_rx_ring[MYX_RXBIG].mrr_mclget = myx_mcl_big;
298	timeout_set(&sc->sc_rx_ring[MYX_RXBIG].mrr_refill, myx_refill,
299	    &sc->sc_rx_ring[MYX_RXBIG]);
300
301	myx_bufs_init(&sc->sc_tx_buf_free);
302	myx_bufs_init(&sc->sc_tx_buf_list);
303
304	mtx_init(&sc->sc_sts_mtx, IPL_NET);
305
306	/* Map the PCI memory space */
307	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
308	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
309	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
310		printf(": unable to map register memory\n");
311		return;
312	}
313
314	/* Get board details (mac/part) */
315	memset(part, 0, sizeof(part));
316	if (myx_query(sc, part, sizeof(part)) != 0)
317		goto unmap;
318
319	/* Map the interrupt */
320	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
321		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
322			printf(": unable to map interrupt\n");
323			goto unmap;
324		}
325		sc->sc_intx = 1;
326	}
327
328	printf(": %s, model %s, address %s\n",
329	    pci_intr_string(pa->pa_pc, sc->sc_ih),
330	    part[0] == '\0' ? "(unknown)" : part,
331	    ether_sprintf(sc->sc_ac.ac_enaddr));
332
333	/* this is sort of racy */
334	if (myx_buf_pool == NULL) {
335		extern struct kmem_pa_mode kp_dma_contig;
336
337		myx_buf_pool = malloc(sizeof(*myx_buf_pool), M_DEVBUF,
338		    M_WAITOK);
339		if (myx_buf_pool == NULL) {
340			printf("%s: unable to allocate buf pool\n",
341			    DEVNAME(sc));
342			goto unmap;
343		}
344		pool_init(myx_buf_pool, sizeof(struct myx_buf),
345		    0, 0, 0, "myxbufs", &pool_allocator_nointr);
346		pool_setipl(myx_buf_pool, IPL_NONE);
347
348		myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF,
349		    M_WAITOK);
350		if (myx_mcl_pool == NULL) {
351			printf("%s: unable to allocate mcl pool\n",
352			    DEVNAME(sc));
353			goto unmap;
354		}
355		pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY, 0,
356		    0, "myxmcl", NULL);
357		pool_setipl(myx_mcl_pool, IPL_NET);
358		pool_set_constraints(myx_mcl_pool, &kp_dma_contig);
359	}
360
361	if (myx_pcie_dc(sc, pa) != 0)
362		printf("%s: unable to configure PCI Express\n", DEVNAME(sc));
363
364	if (mountroothook_establish(myx_attachhook, sc) == NULL) {
365		printf("%s: unable to establish mountroot hook\n", DEVNAME(sc));
366		goto unmap;
367	}
368
369	return;
370
371 unmap:
372	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
373	sc->sc_mems = 0;
374}
375
376int
377myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa)
378{
379	pcireg_t dcsr;
380	pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO;
381	pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO;
382	int reg;
383
384	if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
385	    &reg, NULL) == 0)
386		return (-1);
387
388	reg += PCI_PCIE_DCSR;
389	dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg);
390	if ((dcsr & mask) != dc) {
391		CLR(dcsr, mask);
392		SET(dcsr, dc);
393		pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr);
394	}
395
396	return (0);
397}
398
399u_int
400myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
401{
402	u_int		i, j;
403	u_int8_t	digit;
404
405	memset(lladdr, 0, ETHER_ADDR_LEN);
406	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
407		if (mac[i] >= '0' && mac[i] <= '9')
408			digit = mac[i] - '0';
409		else if (mac[i] >= 'A' && mac[i] <= 'F')
410			digit = mac[i] - 'A' + 10;
411		else if (mac[i] >= 'a' && mac[i] <= 'f')
412			digit = mac[i] - 'a' + 10;
413		else
414			continue;
415		if ((j & 1) == 0)
416			digit <<= 4;
417		lladdr[j++/2] |= digit;
418	}
419
420	return (i);
421}
422
423int
424myx_query(struct myx_softc *sc, char *part, size_t partlen)
425{
426	struct myx_gen_hdr hdr;
427	u_int32_t	offset;
428	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
429	u_int		i, len, maxlen;
430
431	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
432	offset = betoh32(offset);
433	if (offset + sizeof(hdr) > sc->sc_mems) {
434		printf(": header is outside register window\n");
435		return (1);
436	}
437
438	myx_read(sc, offset, &hdr, sizeof(hdr));
439	offset = betoh32(hdr.fw_specs);
440	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
441
442	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
443
444	for (i = 0; i < len; i++) {
445		maxlen = len - i;
446		if (strings[i] == '\0')
447			break;
448		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
449			i += 4;
450			i += myx_ether_aton(&strings[i],
451			    sc->sc_ac.ac_enaddr, maxlen);
452		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
453			i += 3;
454			i += strlcpy(part, &strings[i], min(maxlen, partlen));
455		}
456		for (; i < len; i++) {
457			if (strings[i] == '\0')
458				break;
459		}
460	}
461
462	return (0);
463}
464
465int
466myx_loadfirmware(struct myx_softc *sc, const char *filename)
467{
468	struct myx_gen_hdr	hdr;
469	u_int8_t		*fw;
470	size_t			fwlen;
471	u_int32_t		offset;
472	u_int			i, ret = 1;
473
474	if (loadfirmware(filename, &fw, &fwlen) != 0) {
475		printf("%s: could not load firmware %s\n", DEVNAME(sc),
476		    filename);
477		return (1);
478	}
479	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
480		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
481		goto err;
482	}
483
484	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
485	offset = betoh32(offset);
486	if ((offset + sizeof(hdr)) > fwlen) {
487		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
488		goto err;
489	}
490
491	memcpy(&hdr, fw + offset, sizeof(hdr));
492	DPRINTF(MYXDBG_INIT, "%s: "
493	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
494	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
495	    betoh32(hdr.fw_type), hdr.fw_version);
496
497	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
498	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
499		printf("%s: invalid firmware type 0x%x version %s\n",
500		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
501		goto err;
502	}
503
504	/* Write the firmware to the card's SRAM */
505	for (i = 0; i < fwlen; i += 256)
506		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
507
508	if (myx_boot(sc, fwlen) != 0) {
509		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
510		goto err;
511	}
512
513	ret = 0;
514
515err:
516	free(fw, M_DEVBUF, 0);
517	return (ret);
518}
519
520void
521myx_attachhook(void *arg)
522{
523	struct myx_softc	*sc = (struct myx_softc *)arg;
524	struct ifnet		*ifp = &sc->sc_ac.ac_if;
525	struct myx_cmd		 mc;
526
527	/* Allocate command DMA memory */
528	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
529	    MYXALIGN_CMD) != 0) {
530		printf("%s: failed to allocate command DMA memory\n",
531		    DEVNAME(sc));
532		return;
533	}
534
535	/* Try the firmware stored on disk */
536	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
537		/* error printed by myx_loadfirmware */
538		goto freecmd;
539	}
540
541	memset(&mc, 0, sizeof(mc));
542
543	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
544		printf("%s: failed to reset the device\n", DEVNAME(sc));
545		goto freecmd;
546	}
547
548	sc->sc_tx_boundary = 4096;
549
550	if (myx_probe_firmware(sc) != 0) {
551		printf("%s: error while selecting firmware\n", DEVNAME(sc));
552		goto freecmd;
553	}
554
555	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
556	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
557	if (sc->sc_irqh == NULL) {
558		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
559		goto freecmd;
560	}
561
562	ifp->if_softc = sc;
563	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
564	ifp->if_ioctl = myx_ioctl;
565	ifp->if_start = myx_start;
566	ifp->if_watchdog = myx_watchdog;
567	ifp->if_hardmtu = 9000;
568	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
569	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
570	IFQ_SET_READY(&ifp->if_snd);
571
572	ifp->if_capabilities = IFCAP_VLAN_MTU;
573#if 0
574	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
575	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
576	    IFCAP_CSUM_UDPv4;
577#endif
578
579	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
580	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
581	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
582
583	if_attach(ifp);
584	ether_ifattach(ifp);
585
586	return;
587
588freecmd:
589	myx_dmamem_free(sc, &sc->sc_cmddma);
590}
591
592int
593myx_probe_firmware(struct myx_softc *sc)
594{
595	struct myx_dmamem test;
596	bus_dmamap_t map;
597	struct myx_cmd mc;
598	pcireg_t csr;
599	int offset;
600	int width = 0;
601
602	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
603	    &offset, NULL)) {
604		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
605		    offset + PCI_PCIE_LCSR);
606		width = (csr >> 20) & 0x3f;
607
608		if (width <= 4) {
609			/*
610			 * if the link width is 4 or less we can use the
611			 * aligned firmware.
612			 */
613			return (0);
614		}
615	}
616
617	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
618		return (1);
619	map = test.mxm_map;
620
621	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
622	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
623
624	memset(&mc, 0, sizeof(mc));
625	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
626	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
627	mc.mc_data2 = htobe32(4096 * 0x10000);
628	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
629		printf("%s: DMA read test failed\n", DEVNAME(sc));
630		goto fail;
631	}
632
633	memset(&mc, 0, sizeof(mc));
634	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
635	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
636	mc.mc_data2 = htobe32(4096 * 0x1);
637	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
638		printf("%s: DMA write test failed\n", DEVNAME(sc));
639		goto fail;
640	}
641
642	memset(&mc, 0, sizeof(mc));
643	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
644	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
645	mc.mc_data2 = htobe32(4096 * 0x10001);
646	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
647		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
648		goto fail;
649	}
650
651	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
652	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
653	myx_dmamem_free(sc, &test);
654	return (0);
655
656fail:
657	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
658	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
659	myx_dmamem_free(sc, &test);
660
661	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
662		printf("%s: unable to load %s\n", DEVNAME(sc),
663		    MYXFW_UNALIGNED);
664		return (1);
665	}
666
667	sc->sc_tx_boundary = 2048;
668
669	printf("%s: using unaligned firmware\n", DEVNAME(sc));
670	return (0);
671}
672
673void
674myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
675{
676	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
677	    BUS_SPACE_BARRIER_READ);
678	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
679}
680
681void
682myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
683{
684	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
685	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
686	    BUS_SPACE_BARRIER_WRITE);
687}
688
689int
690myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
691    bus_size_t size, u_int align)
692{
693	mxm->mxm_size = size;
694
695	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
696	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
697	    &mxm->mxm_map) != 0)
698		return (1);
699	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
700	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
701	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
702		goto destroy;
703	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
704	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
705		goto free;
706	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
707	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
708		goto unmap;
709
710	return (0);
711 unmap:
712	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
713 free:
714	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
715 destroy:
716	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
717	return (1);
718}
719
720void
721myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
722{
723	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
724	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
725	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
726	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
727}
728
729int
730myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
731{
732	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
733	struct myx_response	*mr;
734	u_int			 i;
735	u_int32_t		 result, data;
736#ifdef MYX_DEBUG
737	static const char *cmds[MYXCMD_MAX] = {
738		"CMD_NONE",
739		"CMD_RESET",
740		"CMD_GET_VERSION",
741		"CMD_SET_INTRQDMA",
742		"CMD_SET_BIGBUFSZ",
743		"CMD_SET_SMALLBUFSZ",
744		"CMD_GET_TXRINGOFF",
745		"CMD_GET_RXSMALLRINGOFF",
746		"CMD_GET_RXBIGRINGOFF",
747		"CMD_GET_INTRACKOFF",
748		"CMD_GET_INTRDEASSERTOFF",
749		"CMD_GET_TXRINGSZ",
750		"CMD_GET_RXRINGSZ",
751		"CMD_SET_INTRQSZ",
752		"CMD_SET_IFUP",
753		"CMD_SET_IFDOWN",
754		"CMD_SET_MTU",
755		"CMD_GET_INTRCOALDELAYOFF",
756		"CMD_SET_STATSINTVL",
757		"CMD_SET_STATSDMA_OLD",
758		"CMD_SET_PROMISC",
759		"CMD_UNSET_PROMISC",
760		"CMD_SET_LLADDR",
761		"CMD_SET_FC",
762		"CMD_UNSET_FC",
763		"CMD_DMA_TEST",
764		"CMD_SET_ALLMULTI",
765		"CMD_UNSET_ALLMULTI",
766		"CMD_SET_MCASTGROUP",
767		"CMD_UNSET_MCASTGROUP",
768		"CMD_UNSET_MCAST",
769		"CMD_SET_STATSDMA",
770		"CMD_UNALIGNED_DMA_TEST",
771		"CMD_GET_UNALIGNED_STATUS"
772	};
773#endif
774
775	mc->mc_cmd = htobe32(cmd);
776	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
777	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
778
779	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
780	mr->mr_result = 0xffffffff;
781
782	/* Send command */
783	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
784	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
785	    BUS_DMASYNC_PREREAD);
786
787	for (i = 0; i < 20; i++) {
788		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
789		    BUS_DMASYNC_POSTREAD);
790		result = betoh32(mr->mr_result);
791		data = betoh32(mr->mr_data);
792
793		if (result != 0xffffffff)
794			break;
795
796		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
797		    BUS_DMASYNC_PREREAD);
798		delay(1000);
799	}
800
801	DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, "
802	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
803	    cmds[cmd], i, result, data, data);
804
805	if (result != 0)
806		return (-1);
807
808	if (r != NULL)
809		*r = data;
810	return (0);
811}
812
813int
814myx_boot(struct myx_softc *sc, u_int32_t length)
815{
816	struct myx_bootcmd	 bc;
817	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
818	u_int32_t		*status;
819	u_int			 i, ret = 1;
820
821	memset(&bc, 0, sizeof(bc));
822	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
823	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
824	bc.bc_result = 0xffffffff;
825	bc.bc_offset = htobe32(MYX_FW_BOOT);
826	bc.bc_length = htobe32(length - 8);
827	bc.bc_copyto = htobe32(8);
828	bc.bc_jumpto = htobe32(0);
829
830	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
831	*status = 0;
832
833	/* Send command */
834	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
835	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
836	    BUS_DMASYNC_PREREAD);
837
838	for (i = 0; i < 200; i++) {
839		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
840		    BUS_DMASYNC_POSTREAD);
841		if (*status == 0xffffffff) {
842			ret = 0;
843			break;
844		}
845
846		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
847		    BUS_DMASYNC_PREREAD);
848		delay(1000);
849	}
850
851	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
852	    DEVNAME(sc), i, ret);
853
854	return (ret);
855}
856
857int
858myx_rdma(struct myx_softc *sc, u_int do_enable)
859{
860	struct myx_rdmacmd	 rc;
861	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
862	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
863	u_int32_t		*status;
864	int			 ret = 1;
865	u_int			 i;
866
867	/*
868	 * It is required to setup a _dummy_ RDMA address. It also makes
869	 * some PCI-E chipsets resend dropped messages.
870	 */
871	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
872	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
873	rc.rc_result = 0xffffffff;
874	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
875	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
876	rc.rc_enable = htobe32(do_enable);
877
878	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
879	*status = 0;
880
881	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
882	    BUS_DMASYNC_PREREAD);
883
884	/* Send command */
885	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
886
887	for (i = 0; i < 20; i++) {
888		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
889		    BUS_DMASYNC_POSTREAD);
890
891		if (*status == 0xffffffff) {
892			ret = 0;
893			break;
894		}
895
896		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
897		    BUS_DMASYNC_PREREAD);
898		delay(1000);
899	}
900
901	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
902	    DEVNAME(sc), __func__,
903	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
904
905	return (ret);
906}
907
908int
909myx_media_change(struct ifnet *ifp)
910{
911	/* ignore */
912	return (0);
913}
914
915void
916myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
917{
918	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
919	u_int32_t		 sts;
920
921	imr->ifm_active = IFM_ETHER | IFM_AUTO;
922	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
923		imr->ifm_status = 0;
924		return;
925	}
926
927	myx_sts_enter(sc);
928	sts = sc->sc_sts->ms_linkstate;
929	myx_sts_leave(sc);
930
931	myx_link_state(sc, sts);
932
933	imr->ifm_status = IFM_AVALID;
934	if (!LINK_STATE_IS_UP(ifp->if_link_state))
935		return;
936
937	imr->ifm_active |= IFM_FDX | IFM_FLOW |
938	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
939	imr->ifm_status |= IFM_ACTIVE;
940}
941
942void
943myx_link_state(struct myx_softc *sc, u_int32_t sts)
944{
945	struct ifnet		*ifp = &sc->sc_ac.ac_if;
946	int			 link_state = LINK_STATE_DOWN;
947
948	if (betoh32(sts) == MYXSTS_LINKUP)
949		link_state = LINK_STATE_FULL_DUPLEX;
950	if (ifp->if_link_state != link_state) {
951		ifp->if_link_state = link_state;
952		if_link_state_change(ifp);
953		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
954		    IF_Gbps(10) : 0;
955	}
956}
957
958void
959myx_watchdog(struct ifnet *ifp)
960{
961	return;
962}
963
964int
965myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
966{
967	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
968	struct ifaddr		*ifa = (struct ifaddr *)data;
969	struct ifreq		*ifr = (struct ifreq *)data;
970	int			 s, error = 0;
971
972	s = splnet();
973
974	switch (cmd) {
975	case SIOCSIFADDR:
976		ifp->if_flags |= IFF_UP;
977		if (ifa->ifa_addr->sa_family == AF_INET)
978			arp_ifinit(&sc->sc_ac, ifa);
979		/* FALLTHROUGH */
980
981	case SIOCSIFFLAGS:
982		if (ISSET(ifp->if_flags, IFF_UP)) {
983			if (ISSET(ifp->if_flags, IFF_RUNNING))
984				error = ENETRESET;
985			else
986				myx_up(sc);
987		} else {
988			if (ISSET(ifp->if_flags, IFF_RUNNING))
989				myx_down(sc);
990		}
991		break;
992
993	case SIOCGIFMEDIA:
994	case SIOCSIFMEDIA:
995		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
996		break;
997
998	case SIOCGIFRXR:
999		error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1000		break;
1001
1002	default:
1003		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1004	}
1005
1006	if (error == ENETRESET) {
1007		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1008		    (IFF_UP | IFF_RUNNING))
1009			myx_iff(sc);
1010		error = 0;
1011	}
1012
1013	splx(s);
1014	return (error);
1015}
1016
1017int
1018myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri)
1019{
1020	struct if_rxring_info ifr[2];
1021
1022	memset(ifr, 0, sizeof(ifr));
1023
1024	ifr[0].ifr_size = MYX_RXSMALL_SIZE;
1025	ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr;
1026
1027	ifr[1].ifr_size = MYX_RXBIG_SIZE;
1028	ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr;
1029
1030	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
1031}
1032
1033void
1034myx_up(struct myx_softc *sc)
1035{
1036	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1037	struct myx_buf		*mb;
1038	struct myx_cmd		mc;
1039	bus_dmamap_t		map;
1040	size_t			size;
1041	u_int			maxpkt;
1042	u_int32_t		r;
1043	int			i;
1044
1045	memset(&mc, 0, sizeof(mc));
1046	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1047		printf("%s: failed to reset the device\n", DEVNAME(sc));
1048		return;
1049	}
1050
1051	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
1052	    64, MYXALIGN_CMD) != 0) {
1053		printf("%s: failed to allocate zero pad memory\n",
1054		    DEVNAME(sc));
1055		return;
1056	}
1057	memset(sc->sc_zerodma.mxm_kva, 0, 64);
1058	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1059	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1060
1061	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
1062	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
1063		printf("%s: failed to allocate pad DMA memory\n",
1064		    DEVNAME(sc));
1065		goto free_zero;
1066	}
1067	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1068	    sc->sc_paddma.mxm_map->dm_mapsize,
1069	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1070
1071	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1072		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1073		goto free_pad;
1074	}
1075
1076	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1077		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1078		goto free_pad;
1079	}
1080	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1081
1082	memset(&mc, 0, sizeof(mc));
1083	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1084		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1085		goto free_pad;
1086	}
1087	sc->sc_tx_ring_idx = 0;
1088	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1089	sc->sc_tx_free = sc->sc_tx_ring_count - 1;
1090	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1091	sc->sc_tx_count = 0;
1092	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1093	IFQ_SET_READY(&ifp->if_snd);
1094
1095	/* Allocate Interrupt Queue */
1096
1097	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1098	sc->sc_intrq_idx = 0;
1099
1100	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1101	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1102	    size, MYXALIGN_DATA) != 0) {
1103		goto free_pad;
1104	}
1105	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1106	map = sc->sc_intrq_dma.mxm_map;
1107	memset(sc->sc_intrq, 0, size);
1108	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1109	    BUS_DMASYNC_PREREAD);
1110
1111	memset(&mc, 0, sizeof(mc));
1112	mc.mc_data0 = htobe32(size);
1113	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1114		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1115		goto free_intrq;
1116	}
1117
1118	memset(&mc, 0, sizeof(mc));
1119	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1120	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1121	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1122		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1123		goto free_intrq;
1124	}
1125
1126	/*
1127	 * get interrupt offsets
1128	 */
1129
1130	memset(&mc, 0, sizeof(mc));
1131	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1132	    &sc->sc_irqclaimoff) != 0) {
1133		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1134		goto free_intrq;
1135	}
1136
1137	memset(&mc, 0, sizeof(mc));
1138	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1139	    &sc->sc_irqdeassertoff) != 0) {
1140		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1141		goto free_intrq;
1142	}
1143
1144	memset(&mc, 0, sizeof(mc));
1145	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1146	    &sc->sc_irqcoaloff) != 0) {
1147		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1148		goto free_intrq;
1149	}
1150
1151	/* Set an appropriate interrupt coalescing period */
1152	r = htobe32(MYX_IRQCOALDELAY);
1153	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1154
1155	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1156		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1157		goto free_intrq;
1158	}
1159
1160	memset(&mc, 0, sizeof(mc));
1161	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1162		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1163		goto free_intrq;
1164	}
1165
1166	memset(&mc, 0, sizeof(mc));
1167	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1168		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1169		goto free_intrq;
1170	}
1171
1172	memset(&mc, 0, sizeof(mc));
1173	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1174	    &sc->sc_tx_ring_offset) != 0) {
1175		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1176		goto free_intrq;
1177	}
1178
1179	memset(&mc, 0, sizeof(mc));
1180	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1181	    &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) {
1182		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1183		goto free_intrq;
1184	}
1185
1186	memset(&mc, 0, sizeof(mc));
1187	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1188	    &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) {
1189		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1190		goto free_intrq;
1191	}
1192
1193	/* Allocate Interrupt Data */
1194	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1195	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1196		printf("%s: failed to allocate status DMA memory\n",
1197		    DEVNAME(sc));
1198		goto free_intrq;
1199	}
1200	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1201	map = sc->sc_sts_dma.mxm_map;
1202	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1203	    BUS_DMASYNC_PREREAD);
1204
1205	memset(&mc, 0, sizeof(mc));
1206	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1207	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1208	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1209	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1210		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1211		goto free_sts;
1212	}
1213
1214	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1215
1216	memset(&mc, 0, sizeof(mc));
1217	mc.mc_data0 = htobe32(maxpkt);
1218	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1219		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1220		goto free_sts;
1221	}
1222
1223	for (i = 0; i < sc->sc_tx_ring_count; i++) {
1224		mb = myx_buf_alloc(sc, maxpkt, sc->sc_tx_nsegs,
1225		    sc->sc_tx_boundary, sc->sc_tx_boundary);
1226		if (mb == NULL)
1227			goto free_tx_bufs;
1228
1229		myx_buf_put(&sc->sc_tx_buf_free, mb);
1230	}
1231
1232	if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0)
1233		goto free_tx_bufs;
1234
1235	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXSMALL]) != 0)
1236		goto free_rx_ring_small;
1237
1238	if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0)
1239		goto empty_rx_ring_small;
1240
1241	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXBIG]) != 0)
1242		goto free_rx_ring_big;
1243
1244	memset(&mc, 0, sizeof(mc));
1245	mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN);
1246	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1247		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1248		goto empty_rx_ring_big;
1249	}
1250
1251	memset(&mc, 0, sizeof(mc));
1252	mc.mc_data0 = htobe32(16384);
1253	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1254		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1255		goto empty_rx_ring_big;
1256	}
1257
1258	mtx_enter(&sc->sc_sts_mtx);
1259	sc->sc_state = MYX_S_RUNNING;
1260	mtx_leave(&sc->sc_sts_mtx);
1261
1262	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1263		printf("%s: failed to start the device\n", DEVNAME(sc));
1264		goto empty_rx_ring_big;
1265	}
1266
1267	CLR(ifp->if_flags, IFF_OACTIVE);
1268	SET(ifp->if_flags, IFF_RUNNING);
1269	myx_iff(sc);
1270	myx_start(ifp);
1271
1272	return;
1273
1274empty_rx_ring_big:
1275	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1276free_rx_ring_big:
1277	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1278empty_rx_ring_small:
1279	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1280free_rx_ring_small:
1281	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1282free_tx_bufs:
1283	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1284		myx_buf_free(sc, mb);
1285free_sts:
1286	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1287	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1288	myx_dmamem_free(sc, &sc->sc_sts_dma);
1289free_intrq:
1290	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1291	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1292	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1293free_pad:
1294	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1295	    sc->sc_paddma.mxm_map->dm_mapsize,
1296	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1297	myx_dmamem_free(sc, &sc->sc_paddma);
1298
1299	memset(&mc, 0, sizeof(mc));
1300	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1301		printf("%s: failed to reset the device\n", DEVNAME(sc));
1302	}
1303free_zero:
1304	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1305	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1306	myx_dmamem_free(sc, &sc->sc_zerodma);
1307}
1308
1309int
1310myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1311{
1312	struct myx_cmd		 mc;
1313
1314	memset(&mc, 0, sizeof(mc));
1315	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1316	    addr[2] << 8 | addr[3]);
1317	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1318
1319	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1320		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1321		return (-1);
1322	}
1323	return (0);
1324}
1325
1326void
1327myx_iff(struct myx_softc *sc)
1328{
1329	struct myx_cmd		mc;
1330	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1331	struct ether_multi	*enm;
1332	struct ether_multistep	step;
1333	u_int8_t *addr;
1334
1335	CLR(ifp->if_flags, IFF_ALLMULTI);
1336
1337	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1338	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1339		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1340		return;
1341	}
1342
1343	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1344		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1345		return;
1346	}
1347
1348	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1349		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1350		return;
1351	}
1352
1353	if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1354	    sc->sc_ac.ac_multirangecnt > 0) {
1355		SET(ifp->if_flags, IFF_ALLMULTI);
1356		return;
1357	}
1358
1359	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1360	while (enm != NULL) {
1361		addr = enm->enm_addrlo;
1362
1363		memset(&mc, 0, sizeof(mc));
1364		mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1365		    addr[2] << 8 | addr[3]);
1366		mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1367		if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1368			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1369			return;
1370		}
1371
1372		ETHER_NEXT_MULTI(step, enm);
1373	}
1374
1375	memset(&mc, 0, sizeof(mc));
1376	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1377		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1378		return;
1379	}
1380}
1381
1382void
1383myx_down(struct myx_softc *sc)
1384{
1385	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1386	volatile struct myx_status *sts = sc->sc_sts;
1387	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1388	struct myx_buf		*mb;
1389	struct myx_cmd		 mc;
1390	int			 s;
1391	int			 ring;
1392
1393	myx_sts_enter(sc);
1394	sc->sc_linkdown = sts->ms_linkdown;
1395	sc->sc_state = MYX_S_DOWN;
1396
1397	memset(&mc, 0, sizeof(mc));
1398	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1399
1400	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1401	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1402	while (sc->sc_state != MYX_S_OFF)
1403		msleep(sts, &sc->sc_sts_mtx, 0, "myxdown", 0);
1404	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1405	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1406	mtx_leave(&sc->sc_sts_mtx);
1407
1408	s = splnet();
1409	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1410		ifp->if_link_state = LINK_STATE_UNKNOWN;
1411		ifp->if_baudrate = 0;
1412		if_link_state_change(ifp);
1413	}
1414	splx(s);
1415
1416	memset(&mc, 0, sizeof(mc));
1417	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1418		printf("%s: failed to reset the device\n", DEVNAME(sc));
1419	}
1420
1421	CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1422
1423	for (ring = 0; ring < 2; ring++) {
1424		struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1425
1426		timeout_del(&mrr->mrr_refill);
1427		myx_rx_empty(sc, mrr);
1428		myx_rx_free(sc, mrr);
1429	}
1430
1431	while ((mb = myx_buf_get(&sc->sc_tx_buf_list)) != NULL) {
1432		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1433		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1434		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1435		m_freem(mb->mb_m);
1436		myx_buf_free(sc, mb);
1437	}
1438
1439	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1440		myx_buf_free(sc, mb);
1441
1442	/* the sleep shizz above already synced this dmamem */
1443	myx_dmamem_free(sc, &sc->sc_sts_dma);
1444
1445	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1446	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1447	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1448
1449	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1450	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1451	myx_dmamem_free(sc, &sc->sc_paddma);
1452
1453	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1454	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1455	myx_dmamem_free(sc, &sc->sc_zerodma);
1456}
1457
1458void
1459myx_write_txd_tail(struct myx_softc *sc, struct myx_buf *mb, u_int8_t flags,
1460    u_int32_t offset, u_int idx)
1461{
1462	struct myx_tx_desc		txd;
1463	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1464	bus_dmamap_t			map = mb->mb_map;
1465	int				i;
1466
1467	for (i = 1; i < map->dm_nsegs; i++) {
1468		memset(&txd, 0, sizeof(txd));
1469		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1470		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1471		txd.tx_flags = flags;
1472
1473		myx_bus_space_write(sc,
1474		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1475		    &txd, sizeof(txd));
1476	}
1477
1478	/* pad runt frames */
1479	if (map->dm_mapsize < 60) {
1480		memset(&txd, 0, sizeof(txd));
1481		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1482		txd.tx_length = htobe16(60 - map->dm_mapsize);
1483		txd.tx_flags = flags;
1484
1485		myx_bus_space_write(sc,
1486		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1487		    &txd, sizeof(txd));
1488	}
1489}
1490
1491void
1492myx_start(struct ifnet *ifp)
1493{
1494	struct myx_tx_desc		txd;
1495	SIMPLEQ_HEAD(, myx_buf)		list = SIMPLEQ_HEAD_INITIALIZER(list);
1496	struct myx_softc		*sc = ifp->if_softc;
1497	bus_dmamap_t			map;
1498	struct myx_buf			*mb, *firstmb;
1499	struct mbuf			*m;
1500	u_int32_t			offset = sc->sc_tx_ring_offset;
1501	u_int				idx, firstidx;
1502	u_int8_t			flags;
1503
1504	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
1505	    ISSET(ifp->if_flags, IFF_OACTIVE) ||
1506	    IFQ_IS_EMPTY(&ifp->if_snd))
1507		return;
1508
1509	for (;;) {
1510		if (sc->sc_tx_free <= sc->sc_tx_nsegs ||
1511		    (mb = myx_buf_get(&sc->sc_tx_buf_free)) == NULL) {
1512			SET(ifp->if_flags, IFF_OACTIVE);
1513			break;
1514		}
1515
1516		IFQ_DEQUEUE(&ifp->if_snd, m);
1517		if (m == NULL) {
1518			myx_buf_put(&sc->sc_tx_buf_free, mb);
1519			break;
1520		}
1521
1522		if (myx_load_buf(sc, mb, m) != 0) {
1523			m_freem(m);
1524			myx_buf_put(&sc->sc_tx_buf_free, mb);
1525			ifp->if_oerrors++;
1526			continue;
1527		}
1528
1529#if NBPFILTER > 0
1530		if (ifp->if_bpf)
1531			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1532#endif
1533
1534		mb->mb_m = m;
1535
1536		map = mb->mb_map;
1537		bus_dmamap_sync(sc->sc_dmat, map, 0,
1538		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1539
1540		SIMPLEQ_INSERT_TAIL(&list, mb, mb_entry);
1541
1542		atomic_sub_int(&sc->sc_tx_free, map->dm_nsegs +
1543		    (map->dm_mapsize < 60 ? 1 : 0));
1544	}
1545
1546	/* post the first descriptor last */
1547	firstmb = SIMPLEQ_FIRST(&list);
1548	if (firstmb == NULL)
1549		return;
1550
1551	SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1552	myx_buf_put(&sc->sc_tx_buf_list, firstmb);
1553
1554	idx = firstidx = sc->sc_tx_ring_idx;
1555	idx += firstmb->mb_map->dm_nsegs +
1556	    (firstmb->mb_map->dm_mapsize < 60 ? 1 : 0);
1557	idx %= sc->sc_tx_ring_count;
1558
1559	while ((mb = SIMPLEQ_FIRST(&list)) != NULL) {
1560		SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1561		myx_buf_put(&sc->sc_tx_buf_list, mb);
1562
1563		map = mb->mb_map;
1564
1565		flags = MYXTXD_FLAGS_NO_TSO;
1566		if (map->dm_mapsize < 1520)
1567			flags |= MYXTXD_FLAGS_SMALL;
1568
1569		memset(&txd, 0, sizeof(txd));
1570		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1571		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1572		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1573		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1574		myx_bus_space_write(sc,
1575		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1576
1577		myx_write_txd_tail(sc, mb, flags, offset, idx);
1578
1579		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1580		idx %= sc->sc_tx_ring_count;
1581	}
1582	sc->sc_tx_ring_idx = idx;
1583
1584	/* go back and post first mb */
1585	map = firstmb->mb_map;
1586
1587	flags = MYXTXD_FLAGS_NO_TSO;
1588	if (map->dm_mapsize < 1520)
1589		flags |= MYXTXD_FLAGS_SMALL;
1590
1591	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1592	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1593	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1594	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1595
1596	/* make sure the first descriptor is seen after the others */
1597	myx_write_txd_tail(sc, firstmb, flags, offset, firstidx);
1598
1599	myx_bus_space_write(sc,
1600	    offset + sizeof(txd) * firstidx, &txd,
1601	    sizeof(txd) - sizeof(myx_bus_t));
1602
1603	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1604	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1605
1606	myx_bus_space_write(sc,
1607	    offset + sizeof(txd) * (firstidx + 1) - sizeof(myx_bus_t),
1608	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1609	    sizeof(myx_bus_t));
1610
1611	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1612	    offset + sizeof(txd) * firstidx, sizeof(txd),
1613	    BUS_SPACE_BARRIER_WRITE);
1614}
1615
1616int
1617myx_load_buf(struct myx_softc *sc, struct myx_buf *mb, struct mbuf *m)
1618{
1619	bus_dma_tag_t			dmat = sc->sc_dmat;
1620	bus_dmamap_t			dmap = mb->mb_map;
1621
1622	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1623	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1624	case 0:
1625		break;
1626
1627	case EFBIG: /* mbuf chain is too fragmented */
1628		if (m_defrag(m, M_DONTWAIT) == 0 &&
1629		    bus_dmamap_load_mbuf(dmat, dmap, m,
1630		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1631			break;
1632	default:
1633		return (1);
1634	}
1635
1636	mb->mb_m = m;
1637	return (0);
1638}
1639
1640int
1641myx_intr(void *arg)
1642{
1643	struct myx_softc	*sc = (struct myx_softc *)arg;
1644	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1645	volatile struct myx_status *sts = sc->sc_sts;
1646	enum myx_state		 state = MYX_S_RUNNING;
1647	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1648	u_int32_t		 data, link = 0xffffffff;
1649	u_int8_t		 valid = 0;
1650
1651	mtx_enter(&sc->sc_sts_mtx);
1652	if (sc->sc_state == MYX_S_OFF) {
1653		mtx_leave(&sc->sc_sts_mtx);
1654		return (0);
1655	}
1656
1657	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1658	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1659
1660	valid = sts->ms_isvalid;
1661	if (valid == 0x0) {
1662		myx_sts_leave(sc);
1663		return (0);
1664	}
1665
1666	if (sc->sc_intx) {
1667		data = htobe32(0);
1668		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1669		    sc->sc_irqdeassertoff, &data, sizeof(data));
1670	}
1671	sts->ms_isvalid = 0;
1672
1673	do {
1674		data = sts->ms_txdonecnt;
1675
1676		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1677		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1678		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1679	} while (sts->ms_isvalid);
1680
1681	if (sts->ms_statusupdated) {
1682		link = sts->ms_linkstate;
1683
1684		if (sc->sc_state == MYX_S_DOWN &&
1685		    sc->sc_linkdown != sts->ms_linkdown)
1686			state = MYX_S_DOWN;
1687	}
1688	myx_sts_leave(sc);
1689
1690	data = betoh32(data);
1691	if (data != sc->sc_tx_count)
1692		myx_txeof(sc, data);
1693
1694	data = htobe32(3);
1695	if (valid & 0x1) {
1696		myx_rxeof(sc);
1697
1698		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1699		    sc->sc_irqclaimoff, &data, sizeof(data));
1700	}
1701	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1702	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1703
1704	if (state == MYX_S_DOWN) {
1705		/* myx_down is waiting for us */
1706		mtx_enter(&sc->sc_sts_mtx);
1707		sc->sc_state = MYX_S_OFF;
1708		wakeup(sts);
1709		mtx_leave(&sc->sc_sts_mtx);
1710
1711		return (1);
1712	}
1713
1714	if (link != 0xffffffff) {
1715		KERNEL_LOCK();
1716		myx_link_state(sc, link);
1717		KERNEL_UNLOCK();
1718	}
1719
1720	if (ISSET(ifp->if_flags, IFF_OACTIVE)) {
1721		KERNEL_LOCK();
1722		CLR(ifp->if_flags, IFF_OACTIVE);
1723		myx_start(ifp);
1724		KERNEL_UNLOCK();
1725	}
1726
1727	return (1);
1728}
1729
1730void
1731myx_refill(void *xmrr)
1732{
1733	struct myx_rx_ring *mrr = xmrr;
1734	struct myx_softc *sc = mrr->mrr_softc;
1735
1736	myx_rx_fill(sc, mrr);
1737
1738	if (mrr->mrr_prod == mrr->mrr_cons)
1739		timeout_add(&mrr->mrr_refill, 1);
1740}
1741
1742void
1743myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1744{
1745	struct ifnet *ifp = &sc->sc_ac.ac_if;
1746	struct myx_buf *mb;
1747	struct mbuf *m;
1748	bus_dmamap_t map;
1749	u_int free = 0;
1750
1751	do {
1752		mb = myx_buf_get(&sc->sc_tx_buf_list);
1753		if (mb == NULL) {
1754			printf("oh noes, no mb!\n");
1755			break;
1756		}
1757
1758		m = mb->mb_m;
1759		map = mb->mb_map;
1760
1761		free += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1762
1763		bus_dmamap_sync(sc->sc_dmat, map, 0,
1764		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1765
1766		bus_dmamap_unload(sc->sc_dmat, map);
1767		ifp->if_opackets++;
1768
1769		m_freem(m);
1770		myx_buf_put(&sc->sc_tx_buf_free, mb);
1771	} while (++sc->sc_tx_count != done_count);
1772
1773	if (free)
1774		atomic_add_int(&sc->sc_tx_free, free);
1775}
1776
1777void
1778myx_rxeof(struct myx_softc *sc)
1779{
1780	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1781	struct ifnet *ifp = &sc->sc_ac.ac_if;
1782	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1783	struct myx_rx_ring *mrr;
1784	struct myx_rx_slot *mrs;
1785	struct mbuf *m;
1786	int ring;
1787	u_int rxfree[2] = { 0 , 0 };
1788	u_int len;
1789
1790	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1791	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1792
1793	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1794		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1795
1796		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1797			sc->sc_intrq_idx = 0;
1798
1799		ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ?
1800		    MYX_RXSMALL : MYX_RXBIG;
1801
1802		mrr = &sc->sc_rx_ring[ring];
1803		mrs = &mrr->mrr_slots[mrr->mrr_cons];
1804
1805		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1806			mrr->mrr_cons = 0;
1807
1808		bus_dmamap_sync(sc->sc_dmat, mrs->mrs_map, 0,
1809		    mrs->mrs_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1810		bus_dmamap_unload(sc->sc_dmat, mrs->mrs_map);
1811
1812		m = mrs->mrs_m;
1813		m->m_data += ETHER_ALIGN;
1814		m->m_pkthdr.len = m->m_len = len;
1815
1816		ml_enqueue(&ml, m);
1817
1818		rxfree[ring]++;
1819	}
1820
1821	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1822	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1823
1824	for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) {
1825		if (rxfree[ring] == 0)
1826			continue;
1827
1828		mrr = &sc->sc_rx_ring[ring];
1829
1830		if_rxr_put(&mrr->mrr_rxr, rxfree[ring]);
1831		myx_rx_fill(sc, mrr);
1832		if (mrr->mrr_prod == mrr->mrr_cons)
1833			timeout_add(&mrr->mrr_refill, 0);
1834	}
1835
1836	if_input(ifp, &ml);
1837}
1838
1839static int
1840myx_rx_fill_slots(struct myx_softc *sc, struct myx_rx_ring *mrr, u_int slots)
1841{
1842	struct myx_rx_desc rxd;
1843	struct myx_rx_slot *mrs;
1844	u_int32_t offset = mrr->mrr_offset;
1845	u_int p, first, fills;
1846
1847	first = p = mrr->mrr_prod;
1848	if (myx_buf_fill(sc, &mrr->mrr_slots[first], mrr->mrr_mclget) != 0)
1849		return (slots);
1850
1851	if (++p >= sc->sc_rx_ring_count)
1852		p = 0;
1853
1854	for (fills = 1; fills < slots; fills++) {
1855		mrs = &mrr->mrr_slots[p];
1856
1857		if (myx_buf_fill(sc, mrs, mrr->mrr_mclget) != 0)
1858			break;
1859
1860		rxd.rx_addr = htobe64(mrs->mrs_map->dm_segs[0].ds_addr);
1861		myx_bus_space_write(sc, offset + p * sizeof(rxd),
1862		    &rxd, sizeof(rxd));
1863
1864		if (++p >= sc->sc_rx_ring_count)
1865			p = 0;
1866	}
1867
1868	mrr->mrr_prod = p;
1869
1870	/* make sure the first descriptor is seen after the others */
1871	if (fills > 1) {
1872		bus_space_barrier(sc->sc_memt, sc->sc_memh,
1873		    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1874		    BUS_SPACE_BARRIER_WRITE);
1875	}
1876
1877	mrs = &mrr->mrr_slots[first];
1878	rxd.rx_addr = htobe64(mrs->mrs_map->dm_segs[0].ds_addr);
1879	myx_bus_space_write(sc, offset + first * sizeof(rxd),
1880	    &rxd, sizeof(rxd));
1881
1882	return (slots - fills);
1883}
1884
1885int
1886myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size)
1887{
1888	struct myx_rx_desc rxd;
1889	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1890	struct myx_rx_slot *mrs;
1891	u_int32_t offset = mrr->mrr_offset;
1892	int rv;
1893	int i;
1894
1895	mrr->mrr_slots = mallocarray(sizeof(*mrs), sc->sc_rx_ring_count,
1896	    M_DEVBUF, M_WAITOK);
1897	if (mrr->mrr_slots == NULL)
1898		return (ENOMEM);
1899
1900	memset(&rxd, 0xff, sizeof(rxd));
1901	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1902		mrs = &mrr->mrr_slots[i];
1903		rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1904		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mrs->mrs_map);
1905		if (rv != 0)
1906			goto destroy;
1907
1908		myx_bus_space_write(sc, offset + i * sizeof(rxd),
1909		    &rxd, sizeof(rxd));
1910	}
1911
1912	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1913	mrr->mrr_prod = mrr->mrr_cons = 0;
1914
1915	return (0);
1916
1917destroy:
1918	while (i-- > 0) {
1919		mrs = &mrr->mrr_slots[i];
1920		bus_dmamap_destroy(sc->sc_dmat, mrs->mrs_map);
1921	}
1922	free(mrr->mrr_slots, M_DEVBUF, sizeof(*mrs) * sc->sc_rx_ring_count);
1923	return (rv);
1924}
1925
1926static inline int
1927myx_rx_ring_enter(struct myx_rx_ring *mrr)
1928{
1929	return (atomic_inc_int_nv(&mrr->mrr_running) == 1);
1930}
1931
1932static inline int
1933myx_rx_ring_leave(struct myx_rx_ring *mrr)
1934{
1935	if (atomic_cas_uint(&mrr->mrr_running, 1, 0) == 1)
1936		return (1);
1937
1938	mrr->mrr_running = 1;
1939
1940	return (0);
1941}
1942
1943int
1944myx_rx_fill(struct myx_softc *sc, struct myx_rx_ring *mrr)
1945{
1946	u_int slots;
1947
1948	slots = if_rxr_get(&mrr->mrr_rxr, sc->sc_rx_ring_count);
1949	if (slots == 0)
1950		return (1);
1951
1952	slots = myx_rx_fill_slots(sc, mrr, slots);
1953	if (slots > 0)
1954		if_rxr_put(&mrr->mrr_rxr, slots);
1955
1956	return (0);
1957}
1958
1959void
1960myx_rx_empty(struct myx_softc *sc, struct myx_rx_ring *mrr)
1961{
1962	struct myx_rx_slot *mrs;
1963
1964	while (mrr->mrr_cons != mrr->mrr_prod) {
1965		mrs = &mrr->mrr_slots[mrr->mrr_cons];
1966
1967		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1968			mrr->mrr_cons = 0;
1969
1970		bus_dmamap_sync(sc->sc_dmat, mrs->mrs_map, 0,
1971		    mrs->mrs_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1972		bus_dmamap_unload(sc->sc_dmat, mrs->mrs_map);
1973		m_freem(mrs->mrs_m);
1974	}
1975
1976	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1977}
1978
1979void
1980myx_rx_free(struct myx_softc *sc, struct myx_rx_ring *mrr)
1981{
1982	struct myx_rx_slot *mrs;
1983	int i;
1984
1985	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1986		mrs = &mrr->mrr_slots[i];
1987		bus_dmamap_destroy(sc->sc_dmat, mrs->mrs_map);
1988	}
1989
1990	free(mrr->mrr_slots, M_DEVBUF, sizeof(*mrs) * sc->sc_rx_ring_count);
1991}
1992
1993struct mbuf *
1994myx_mcl_small(void)
1995{
1996	struct mbuf *m;
1997
1998	m = MCLGETI(NULL, M_DONTWAIT, NULL, MYX_RXSMALL_SIZE);
1999	if (m == NULL)
2000		return (NULL);
2001
2002	m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE;
2003
2004	return (m);
2005}
2006
2007struct mbuf *
2008myx_mcl_big(void)
2009{
2010	struct mbuf *m;
2011	void *mcl;
2012
2013	MGETHDR(m, M_DONTWAIT, MT_DATA);
2014	if (m == NULL)
2015		return (NULL);
2016
2017	mcl = pool_get(myx_mcl_pool, PR_NOWAIT);
2018	if (mcl == NULL) {
2019		m_free(m);
2020		return (NULL);
2021	}
2022
2023	MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, m_extfree_pool, myx_mcl_pool);
2024	m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE;
2025
2026	return (m);
2027}
2028
2029int
2030myx_buf_fill(struct myx_softc *sc, struct myx_rx_slot *mrs,
2031    struct mbuf *(*mclget)(void))
2032{
2033	struct mbuf *m;
2034	int rv;
2035
2036	m = (*mclget)();
2037	if (m == NULL)
2038		return (ENOMEM);
2039
2040	rv = bus_dmamap_load_mbuf(sc->sc_dmat, mrs->mrs_map, m, BUS_DMA_NOWAIT);
2041	if (rv != 0) {
2042		m_freem(m);
2043		return (rv);
2044	}
2045
2046	bus_dmamap_sync(sc->sc_dmat, mrs->mrs_map, 0,
2047	    mrs->mrs_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2048
2049	mrs->mrs_m = m;
2050
2051	return (0);
2052}
2053
2054struct myx_buf *
2055myx_buf_alloc(struct myx_softc *sc, bus_size_t size, int nsegs,
2056    bus_size_t maxsegsz, bus_size_t boundary)
2057{
2058	struct myx_buf *mb;
2059
2060	mb = pool_get(myx_buf_pool, PR_WAITOK);
2061	if (mb == NULL)
2062		return (NULL);
2063
2064	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, boundary,
2065	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mb->mb_map) != 0) {
2066		pool_put(myx_buf_pool, mb);
2067		return (NULL);
2068	}
2069
2070	return (mb);
2071}
2072
2073void
2074myx_buf_free(struct myx_softc *sc, struct myx_buf *mb)
2075{
2076	bus_dmamap_destroy(sc->sc_dmat, mb->mb_map);
2077	pool_put(myx_buf_pool, mb);
2078}
2079
2080struct myx_buf *
2081myx_buf_get(struct myx_buf_list *mbl)
2082{
2083	struct myx_buf *mb;
2084
2085	mtx_enter(&mbl->mbl_mtx);
2086	mb = SIMPLEQ_FIRST(&mbl->mbl_q);
2087	if (mb != NULL)
2088		SIMPLEQ_REMOVE_HEAD(&mbl->mbl_q, mb_entry);
2089	mtx_leave(&mbl->mbl_mtx);
2090
2091	return (mb);
2092}
2093
2094int
2095myx_bufs_empty(struct myx_buf_list *mbl)
2096{
2097	int rv;
2098
2099	mtx_enter(&mbl->mbl_mtx);
2100	rv = SIMPLEQ_EMPTY(&mbl->mbl_q);
2101	mtx_leave(&mbl->mbl_mtx);
2102
2103	return (rv);
2104}
2105
2106void
2107myx_buf_put(struct myx_buf_list *mbl, struct myx_buf *mb)
2108{
2109	mtx_enter(&mbl->mbl_mtx);
2110	SIMPLEQ_INSERT_TAIL(&mbl->mbl_q, mb, mb_entry);
2111	mtx_leave(&mbl->mbl_mtx);
2112}
2113
2114void
2115myx_bufs_init(struct myx_buf_list *mbl)
2116{
2117	SIMPLEQ_INIT(&mbl->mbl_q);
2118	mtx_init(&mbl->mbl_mtx, IPL_NET);
2119}
2120