if_myx.c revision 1.60
1/*	$OpenBSD: if_myx.c,v 1.60 2014/07/10 07:02:50 dlg Exp $	*/
2
3/*
4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*
20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21 */
22
23#include "bpfilter.h"
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/kernel.h>
30#include <sys/socket.h>
31#include <sys/malloc.h>
32#include <sys/timeout.h>
33#include <sys/proc.h>
34#include <sys/device.h>
35#include <sys/queue.h>
36
37#include <machine/bus.h>
38#include <machine/intr.h>
39
40#include <net/if.h>
41#include <net/if_dl.h>
42#include <net/if_media.h>
43#include <net/if_types.h>
44
45#if NBPFILTER > 0
46#include <net/bpf.h>
47#endif
48
49#ifdef INET
50#include <netinet/in.h>
51#include <netinet/if_ether.h>
52#endif
53
54#include <dev/pci/pcireg.h>
55#include <dev/pci/pcivar.h>
56#include <dev/pci/pcidevs.h>
57
58#include <dev/pci/if_myxreg.h>
59
60#ifdef MYX_DEBUG
61#define MYXDBG_INIT	(1<<0)	/* chipset initialization */
62#define MYXDBG_CMD	(2<<0)	/* commands */
63#define MYXDBG_INTR	(3<<0)	/* interrupts */
64#define MYXDBG_ALL	0xffff	/* enable all debugging messages */
65int myx_debug = MYXDBG_ALL;
66#define DPRINTF(_lvl, _arg...)	do {					\
67	if (myx_debug & (_lvl))						\
68		printf(_arg);						\
69} while (0)
70#else
71#define DPRINTF(_lvl, arg...)
72#endif
73
74#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
75
76struct myx_dmamem {
77	bus_dmamap_t		 mxm_map;
78	bus_dma_segment_t	 mxm_seg;
79	int			 mxm_nsegs;
80	size_t			 mxm_size;
81	caddr_t			 mxm_kva;
82};
83
84struct myx_buf {
85	SIMPLEQ_ENTRY(myx_buf)	 mb_entry;
86	bus_dmamap_t		 mb_map;
87	struct mbuf		*mb_m;
88};
89
90struct myx_buf_list {
91	SIMPLEQ_HEAD(, myx_buf)	mbl_q;
92	struct mutex		mbl_mtx;
93};
94
95struct pool *myx_buf_pool;
96
97struct myx_ring_lock {
98	struct mutex		mrl_mtx;
99	u_int			mrl_running;
100};
101
102enum myx_state {
103	MYX_S_OFF = 0,
104	MYX_S_RUNNING,
105	MYX_S_DOWN
106};
107
108struct myx_softc {
109	struct device		 sc_dev;
110	struct arpcom		 sc_ac;
111
112	pci_chipset_tag_t	 sc_pc;
113	pci_intr_handle_t	 sc_ih;
114	pcitag_t		 sc_tag;
115
116	bus_dma_tag_t		 sc_dmat;
117	bus_space_tag_t		 sc_memt;
118	bus_space_handle_t	 sc_memh;
119	bus_size_t		 sc_mems;
120
121	struct myx_dmamem	 sc_zerodma;
122	struct myx_dmamem	 sc_cmddma;
123	struct myx_dmamem	 sc_paddma;
124
125	struct myx_dmamem	 sc_sts_dma;
126	volatile struct myx_status	*sc_sts;
127	struct mutex		 sc_sts_mtx;
128
129	int			 sc_intx;
130	void			*sc_irqh;
131	u_int32_t		 sc_irqcoaloff;
132	u_int32_t		 sc_irqclaimoff;
133	u_int32_t		 sc_irqdeassertoff;
134
135	struct myx_dmamem	 sc_intrq_dma;
136	struct myx_intrq_desc	*sc_intrq;
137	u_int			 sc_intrq_count;
138	u_int			 sc_intrq_idx;
139
140	u_int			 sc_rx_ring_count;
141	struct myx_ring_lock	 sc_rx_ring_lock[2];
142	u_int32_t		 sc_rx_ring_offset[2];
143	struct myx_buf_list	 sc_rx_buf_free[2];
144	struct myx_buf_list	 sc_rx_buf_list[2];
145	u_int			 sc_rx_ring_idx[2];
146	struct if_rxring	 sc_rx_ring[2];
147#define  MYX_RXSMALL		 0
148#define  MYX_RXBIG		 1
149	struct timeout		 sc_refill;
150
151	bus_size_t		 sc_tx_boundary;
152	u_int			 sc_tx_ring_count;
153	struct myx_ring_lock	 sc_tx_ring_lock;
154	u_int32_t		 sc_tx_ring_offset;
155	u_int			 sc_tx_nsegs;
156	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
157	u_int			 sc_tx_free;
158	struct myx_buf_list	 sc_tx_buf_free;
159	struct myx_buf_list	 sc_tx_buf_list;
160	u_int			 sc_tx_ring_idx;
161
162	struct ifmedia		 sc_media;
163
164	volatile enum myx_state	 sc_state;
165	volatile u_int8_t	 sc_linkdown;
166};
167
168int	 myx_match(struct device *, void *, void *);
169void	 myx_attach(struct device *, struct device *, void *);
170int	 myx_query(struct myx_softc *sc, char *, size_t);
171u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
172void	 myx_attachhook(void *);
173int	 myx_loadfirmware(struct myx_softc *, const char *);
174int	 myx_probe_firmware(struct myx_softc *);
175
176void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
177void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
178
179#if defined(__LP64__)
180#define myx_bus_space_write bus_space_write_raw_region_8
181typedef u_int64_t myx_bus_t;
182#else
183#define myx_bus_space_write bus_space_write_raw_region_4
184typedef u_int32_t myx_bus_t;
185#endif
186
187int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
188int	 myx_boot(struct myx_softc *, u_int32_t);
189
190int	 myx_rdma(struct myx_softc *, u_int);
191int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
192	    bus_size_t, u_int align);
193void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
194int	 myx_media_change(struct ifnet *);
195void	 myx_media_status(struct ifnet *, struct ifmediareq *);
196void	 myx_link_state(struct myx_softc *, u_int32_t);
197void	 myx_watchdog(struct ifnet *);
198int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
199int	 myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *);
200void	 myx_up(struct myx_softc *);
201void	 myx_iff(struct myx_softc *);
202void	 myx_down(struct myx_softc *);
203
204void	 myx_start(struct ifnet *);
205void	 myx_write_txd_tail(struct myx_softc *, struct myx_buf *, u_int8_t,
206	    u_int32_t, u_int);
207int	 myx_load_buf(struct myx_softc *, struct myx_buf *, struct mbuf *);
208int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
209int	 myx_intr(void *);
210int	 myx_rxeof(struct myx_softc *);
211void	 myx_txeof(struct myx_softc *, u_int32_t);
212
213struct myx_buf *	myx_buf_alloc(struct myx_softc *, bus_size_t, int,
214			    bus_size_t, bus_size_t);
215void			myx_buf_free(struct myx_softc *, struct myx_buf *);
216void			myx_bufs_init(struct myx_buf_list *);
217int			myx_bufs_empty(struct myx_buf_list *);
218struct myx_buf *	myx_buf_get(struct myx_buf_list *);
219void			myx_buf_put(struct myx_buf_list *, struct myx_buf *);
220struct myx_buf *	myx_buf_fill(struct myx_softc *, int);
221
222void			myx_rx_zero(struct myx_softc *, int);
223int			myx_rx_fill(struct myx_softc *, int);
224void			myx_refill(void *);
225
226void			myx_ring_lock_init(struct myx_ring_lock *);
227int			myx_ring_enter(struct myx_ring_lock *);
228int			myx_ring_leave(struct myx_ring_lock *);
229
230static inline void
231myx_sts_enter(struct myx_softc *sc)
232{
233	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
234
235	mtx_enter(&sc->sc_sts_mtx);
236	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
237	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
238}
239
240static inline void
241myx_sts_leave(struct myx_softc *sc)
242{
243	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
244
245	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
246	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
247	mtx_leave(&sc->sc_sts_mtx);
248}
249
250struct cfdriver myx_cd = {
251	NULL, "myx", DV_IFNET
252};
253struct cfattach myx_ca = {
254	sizeof(struct myx_softc), myx_match, myx_attach
255};
256
257const struct pci_matchid myx_devices[] = {
258	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
259	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
260};
261
262int
263myx_match(struct device *parent, void *match, void *aux)
264{
265	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
266}
267
268void
269myx_attach(struct device *parent, struct device *self, void *aux)
270{
271	struct myx_softc	*sc = (struct myx_softc *)self;
272	struct pci_attach_args	*pa = aux;
273	char			 part[32];
274	pcireg_t		 memtype;
275
276	sc->sc_pc = pa->pa_pc;
277	sc->sc_tag = pa->pa_tag;
278	sc->sc_dmat = pa->pa_dmat;
279
280	myx_ring_lock_init(&sc->sc_rx_ring_lock[MYX_RXSMALL]);
281	myx_bufs_init(&sc->sc_rx_buf_free[MYX_RXSMALL]);
282	myx_bufs_init(&sc->sc_rx_buf_list[MYX_RXSMALL]);
283	myx_ring_lock_init(&sc->sc_rx_ring_lock[MYX_RXBIG]);
284	myx_bufs_init(&sc->sc_rx_buf_free[MYX_RXBIG]);
285	myx_bufs_init(&sc->sc_rx_buf_list[MYX_RXBIG]);
286
287	myx_ring_lock_init(&sc->sc_tx_ring_lock);
288	myx_bufs_init(&sc->sc_tx_buf_free);
289	myx_bufs_init(&sc->sc_tx_buf_list);
290
291	timeout_set(&sc->sc_refill, myx_refill, sc);
292
293	mtx_init(&sc->sc_sts_mtx, IPL_NET);
294
295
296	/* Map the PCI memory space */
297	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
298	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
299	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
300		printf(": unable to map register memory\n");
301		return;
302	}
303
304	/* Get board details (mac/part) */
305	memset(part, 0, sizeof(part));
306	if (myx_query(sc, part, sizeof(part)) != 0)
307		goto unmap;
308
309	/* Map the interrupt */
310	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
311		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
312			printf(": unable to map interrupt\n");
313			goto unmap;
314		}
315		sc->sc_intx = 1;
316	}
317
318	printf(": %s, model %s, address %s\n",
319	    pci_intr_string(pa->pa_pc, sc->sc_ih),
320	    part[0] == '\0' ? "(unknown)" : part,
321	    ether_sprintf(sc->sc_ac.ac_enaddr));
322
323	/* this is sort of racy */
324	if (myx_buf_pool == NULL) {
325		myx_buf_pool = malloc(sizeof(*myx_buf_pool), M_DEVBUF,
326		    M_WAITOK);
327		if (myx_buf_pool == NULL) {
328			printf("%s: unable to allocate buf pool\n",
329			    DEVNAME(sc));
330			goto unmap;
331		}
332		pool_init(myx_buf_pool, sizeof(struct myx_buf),
333		    0, 0, 0, "myxbufs", &pool_allocator_nointr);
334	}
335
336	if (mountroothook_establish(myx_attachhook, sc) == NULL) {
337		printf("%s: unable to establish mountroot hook\n", DEVNAME(sc));
338		goto unmap;
339	}
340
341	return;
342
343 unmap:
344	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
345	sc->sc_mems = 0;
346}
347
348u_int
349myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
350{
351	u_int		i, j;
352	u_int8_t	digit;
353
354	memset(lladdr, 0, ETHER_ADDR_LEN);
355	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
356		if (mac[i] >= '0' && mac[i] <= '9')
357			digit = mac[i] - '0';
358		else if (mac[i] >= 'A' && mac[i] <= 'F')
359			digit = mac[i] - 'A' + 10;
360		else if (mac[i] >= 'a' && mac[i] <= 'f')
361			digit = mac[i] - 'a' + 10;
362		else
363			continue;
364		if ((j & 1) == 0)
365			digit <<= 4;
366		lladdr[j++/2] |= digit;
367	}
368
369	return (i);
370}
371
372int
373myx_query(struct myx_softc *sc, char *part, size_t partlen)
374{
375	struct myx_gen_hdr hdr;
376	u_int32_t	offset;
377	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
378	u_int		i, len, maxlen;
379
380	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
381	offset = betoh32(offset);
382	if (offset + sizeof(hdr) > sc->sc_mems) {
383		printf(": header is outside register window\n");
384		return (1);
385	}
386
387	myx_read(sc, offset, &hdr, sizeof(hdr));
388	offset = betoh32(hdr.fw_specs);
389	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
390
391	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
392
393	for (i = 0; i < len; i++) {
394		maxlen = len - i;
395		if (strings[i] == '\0')
396			break;
397		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
398			i += 4;
399			i += myx_ether_aton(&strings[i],
400			    sc->sc_ac.ac_enaddr, maxlen);
401		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
402			i += 3;
403			i += strlcpy(part, &strings[i], min(maxlen, partlen));
404		}
405		for (; i < len; i++) {
406			if (strings[i] == '\0')
407				break;
408		}
409	}
410
411	return (0);
412}
413
414int
415myx_loadfirmware(struct myx_softc *sc, const char *filename)
416{
417	struct myx_gen_hdr	hdr;
418	u_int8_t		*fw;
419	size_t			fwlen;
420	u_int32_t		offset;
421	u_int			i, ret = 1;
422
423	if (loadfirmware(filename, &fw, &fwlen) != 0) {
424		printf("%s: could not load firmware %s\n", DEVNAME(sc),
425		    filename);
426		return (1);
427	}
428	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
429		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
430		goto err;
431	}
432
433	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
434	offset = betoh32(offset);
435	if ((offset + sizeof(hdr)) > fwlen) {
436		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
437		goto err;
438	}
439
440	memcpy(&hdr, fw + offset, sizeof(hdr));
441	DPRINTF(MYXDBG_INIT, "%s: "
442	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
443	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
444	    betoh32(hdr.fw_type), hdr.fw_version);
445
446	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
447	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
448		printf("%s: invalid firmware type 0x%x version %s\n",
449		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
450		goto err;
451	}
452
453	/* Write the firmware to the card's SRAM */
454	for (i = 0; i < fwlen; i += 256)
455		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
456
457	if (myx_boot(sc, fwlen) != 0) {
458		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
459		goto err;
460	}
461
462	ret = 0;
463
464err:
465	free(fw, M_DEVBUF);
466	return (ret);
467}
468
469void
470myx_attachhook(void *arg)
471{
472	struct myx_softc	*sc = (struct myx_softc *)arg;
473	struct ifnet		*ifp = &sc->sc_ac.ac_if;
474	struct myx_cmd		 mc;
475
476	/* Allocate command DMA memory */
477	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
478	    MYXALIGN_CMD) != 0) {
479		printf("%s: failed to allocate command DMA memory\n",
480		    DEVNAME(sc));
481		return;
482	}
483
484	/* Try the firmware stored on disk */
485	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
486		/* error printed by myx_loadfirmware */
487		goto freecmd;
488	}
489
490	memset(&mc, 0, sizeof(mc));
491
492	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
493		printf("%s: failed to reset the device\n", DEVNAME(sc));
494		goto freecmd;
495	}
496
497	sc->sc_tx_boundary = 4096;
498
499	if (myx_probe_firmware(sc) != 0) {
500		printf("%s: error while selecting firmware\n", DEVNAME(sc));
501		goto freecmd;
502	}
503
504	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
505	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
506	if (sc->sc_irqh == NULL) {
507		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
508		goto freecmd;
509	}
510
511	ifp->if_softc = sc;
512	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
513	ifp->if_ioctl = myx_ioctl;
514	ifp->if_start = myx_start;
515	ifp->if_watchdog = myx_watchdog;
516	ifp->if_hardmtu = 9000;
517	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
518	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
519	IFQ_SET_READY(&ifp->if_snd);
520
521	ifp->if_capabilities = IFCAP_VLAN_MTU;
522#if 0
523	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
524	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
525	    IFCAP_CSUM_UDPv4;
526#endif
527
528	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
529	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
530	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
531
532	if_attach(ifp);
533	ether_ifattach(ifp);
534
535	return;
536
537freecmd:
538	myx_dmamem_free(sc, &sc->sc_cmddma);
539}
540
541int
542myx_probe_firmware(struct myx_softc *sc)
543{
544	struct myx_dmamem test;
545	bus_dmamap_t map;
546	struct myx_cmd mc;
547	pcireg_t csr;
548	int offset;
549	int width = 0;
550
551	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
552	    &offset, NULL)) {
553		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
554		    offset + PCI_PCIE_LCSR);
555		width = (csr >> 20) & 0x3f;
556
557		if (width <= 4) {
558			/*
559			 * if the link width is 4 or less we can use the
560			 * aligned firmware.
561			 */
562			return (0);
563		}
564	}
565
566	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
567		return (1);
568	map = test.mxm_map;
569
570	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
571	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
572
573	memset(&mc, 0, sizeof(mc));
574	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
575	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
576	mc.mc_data2 = htobe32(4096 * 0x10000);
577	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
578		printf("%s: DMA read test failed\n", DEVNAME(sc));
579		goto fail;
580	}
581
582	memset(&mc, 0, sizeof(mc));
583	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
584	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
585	mc.mc_data2 = htobe32(4096 * 0x1);
586	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
587		printf("%s: DMA write test failed\n", DEVNAME(sc));
588		goto fail;
589	}
590
591	memset(&mc, 0, sizeof(mc));
592	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
593	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
594	mc.mc_data2 = htobe32(4096 * 0x10001);
595	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
596		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
597		goto fail;
598	}
599
600	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
601	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
602	myx_dmamem_free(sc, &test);
603	return (0);
604
605fail:
606	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
607	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
608	myx_dmamem_free(sc, &test);
609
610	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
611		printf("%s: unable to load %s\n", DEVNAME(sc),
612		    MYXFW_UNALIGNED);
613		return (1);
614	}
615
616	sc->sc_tx_boundary = 2048;
617
618	printf("%s: using unaligned firmware\n", DEVNAME(sc));
619	return (0);
620}
621
622void
623myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
624{
625	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
626	    BUS_SPACE_BARRIER_READ);
627	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
628}
629
630void
631myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
632{
633	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
634	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
635	    BUS_SPACE_BARRIER_WRITE);
636}
637
638int
639myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
640    bus_size_t size, u_int align)
641{
642	mxm->mxm_size = size;
643
644	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
645	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
646	    &mxm->mxm_map) != 0)
647		return (1);
648	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
649	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
650	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
651		goto destroy;
652	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
653	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
654		goto free;
655	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
656	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
657		goto unmap;
658
659	return (0);
660 unmap:
661	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
662 free:
663	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
664 destroy:
665	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
666	return (1);
667}
668
669void
670myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
671{
672	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
673	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
674	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
675	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
676}
677
678int
679myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
680{
681	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
682	struct myx_response	*mr;
683	u_int			 i;
684	u_int32_t		 result, data;
685#ifdef MYX_DEBUG
686	static const char *cmds[MYXCMD_MAX] = {
687		"CMD_NONE",
688		"CMD_RESET",
689		"CMD_GET_VERSION",
690		"CMD_SET_INTRQDMA",
691		"CMD_SET_BIGBUFSZ",
692		"CMD_SET_SMALLBUFSZ",
693		"CMD_GET_TXRINGOFF",
694		"CMD_GET_RXSMALLRINGOFF",
695		"CMD_GET_RXBIGRINGOFF",
696		"CMD_GET_INTRACKOFF",
697		"CMD_GET_INTRDEASSERTOFF",
698		"CMD_GET_TXRINGSZ",
699		"CMD_GET_RXRINGSZ",
700		"CMD_SET_INTRQSZ",
701		"CMD_SET_IFUP",
702		"CMD_SET_IFDOWN",
703		"CMD_SET_MTU",
704		"CMD_GET_INTRCOALDELAYOFF",
705		"CMD_SET_STATSINTVL",
706		"CMD_SET_STATSDMA_OLD",
707		"CMD_SET_PROMISC",
708		"CMD_UNSET_PROMISC",
709		"CMD_SET_LLADDR",
710		"CMD_SET_FC",
711		"CMD_UNSET_FC",
712		"CMD_DMA_TEST",
713		"CMD_SET_ALLMULTI",
714		"CMD_UNSET_ALLMULTI",
715		"CMD_SET_MCASTGROUP",
716		"CMD_UNSET_MCASTGROUP",
717		"CMD_UNSET_MCAST",
718		"CMD_SET_STATSDMA",
719		"CMD_UNALIGNED_DMA_TEST",
720		"CMD_GET_UNALIGNED_STATUS"
721	};
722#endif
723
724	mc->mc_cmd = htobe32(cmd);
725	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
726	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
727
728	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
729	mr->mr_result = 0xffffffff;
730
731	/* Send command */
732	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
733	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
734	    BUS_DMASYNC_PREREAD);
735
736	for (i = 0; i < 20; i++) {
737		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
738		    BUS_DMASYNC_POSTREAD);
739		result = betoh32(mr->mr_result);
740		data = betoh32(mr->mr_data);
741
742		if (result != 0xffffffff)
743			break;
744
745		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
746		    BUS_DMASYNC_PREREAD);
747		delay(1000);
748	}
749
750	DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, "
751	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
752	    cmds[cmd], i, result, data, data);
753
754	if (result != 0)
755		return (-1);
756
757	if (r != NULL)
758		*r = data;
759	return (0);
760}
761
762int
763myx_boot(struct myx_softc *sc, u_int32_t length)
764{
765	struct myx_bootcmd	 bc;
766	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
767	u_int32_t		*status;
768	u_int			 i, ret = 1;
769
770	memset(&bc, 0, sizeof(bc));
771	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
772	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
773	bc.bc_result = 0xffffffff;
774	bc.bc_offset = htobe32(MYX_FW_BOOT);
775	bc.bc_length = htobe32(length - 8);
776	bc.bc_copyto = htobe32(8);
777	bc.bc_jumpto = htobe32(0);
778
779	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
780	*status = 0;
781
782	/* Send command */
783	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
784	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
785	    BUS_DMASYNC_PREREAD);
786
787	for (i = 0; i < 200; i++) {
788		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
789		    BUS_DMASYNC_POSTREAD);
790		if (*status == 0xffffffff) {
791			ret = 0;
792			break;
793		}
794
795		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
796		    BUS_DMASYNC_PREREAD);
797		delay(1000);
798	}
799
800	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
801	    DEVNAME(sc), i, ret);
802
803	return (ret);
804}
805
806int
807myx_rdma(struct myx_softc *sc, u_int do_enable)
808{
809	struct myx_rdmacmd	 rc;
810	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
811	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
812	u_int32_t		*status;
813	int			 ret = 1;
814	u_int			 i;
815
816	/*
817	 * It is required to setup a _dummy_ RDMA address. It also makes
818	 * some PCI-E chipsets resend dropped messages.
819	 */
820	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
821	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
822	rc.rc_result = 0xffffffff;
823	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
824	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
825	rc.rc_enable = htobe32(do_enable);
826
827	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
828	*status = 0;
829
830	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
831	    BUS_DMASYNC_PREREAD);
832
833	/* Send command */
834	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
835
836	for (i = 0; i < 20; i++) {
837		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
838		    BUS_DMASYNC_POSTREAD);
839
840		if (*status == 0xffffffff) {
841			ret = 0;
842			break;
843		}
844
845		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
846		    BUS_DMASYNC_PREREAD);
847		delay(1000);
848	}
849
850	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
851	    DEVNAME(sc), __func__,
852	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
853
854	return (ret);
855}
856
857int
858myx_media_change(struct ifnet *ifp)
859{
860	/* ignore */
861	return (0);
862}
863
864void
865myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
866{
867	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
868	u_int32_t		 sts;
869
870	imr->ifm_active = IFM_ETHER | IFM_AUTO;
871	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
872		imr->ifm_status = 0;
873		return;
874	}
875
876	myx_sts_enter(sc);
877	sts = sc->sc_sts->ms_linkstate;
878	myx_sts_leave(sc);
879
880	myx_link_state(sc, sts);
881
882	imr->ifm_status = IFM_AVALID;
883	if (!LINK_STATE_IS_UP(ifp->if_link_state))
884		return;
885
886	imr->ifm_active |= IFM_FDX | IFM_FLOW |
887	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
888	imr->ifm_status |= IFM_ACTIVE;
889}
890
891void
892myx_link_state(struct myx_softc *sc, u_int32_t sts)
893{
894	struct ifnet		*ifp = &sc->sc_ac.ac_if;
895	int			 link_state = LINK_STATE_DOWN;
896
897	if (betoh32(sts) == MYXSTS_LINKUP)
898		link_state = LINK_STATE_FULL_DUPLEX;
899	if (ifp->if_link_state != link_state) {
900		ifp->if_link_state = link_state;
901		if_link_state_change(ifp);
902		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
903		    IF_Gbps(10) : 0;
904	}
905}
906
907void
908myx_watchdog(struct ifnet *ifp)
909{
910	return;
911}
912
913int
914myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
915{
916	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
917	struct ifaddr		*ifa = (struct ifaddr *)data;
918	struct ifreq		*ifr = (struct ifreq *)data;
919	int			 s, error = 0;
920
921	s = splnet();
922
923	switch (cmd) {
924	case SIOCSIFADDR:
925		ifp->if_flags |= IFF_UP;
926#ifdef INET
927		if (ifa->ifa_addr->sa_family == AF_INET)
928			arp_ifinit(&sc->sc_ac, ifa);
929#endif
930		/* FALLTHROUGH */
931
932	case SIOCSIFFLAGS:
933		if (ISSET(ifp->if_flags, IFF_UP)) {
934			if (ISSET(ifp->if_flags, IFF_RUNNING))
935				error = ENETRESET;
936			else
937				myx_up(sc);
938		} else {
939			if (ISSET(ifp->if_flags, IFF_RUNNING))
940				myx_down(sc);
941		}
942		break;
943
944	case SIOCGIFMEDIA:
945	case SIOCSIFMEDIA:
946		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
947		break;
948
949	case SIOCGIFRXR:
950		error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
951		break;
952
953	default:
954		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
955	}
956
957	if (error == ENETRESET) {
958		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
959		    (IFF_UP | IFF_RUNNING))
960			myx_iff(sc);
961		error = 0;
962	}
963
964	splx(s);
965	return (error);
966}
967
968int
969myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri)
970{
971	struct if_rxring_info ifr[2];
972
973	memset(ifr, 0, sizeof(ifr));
974
975	ifr[0].ifr_size = MCLBYTES;
976	mtx_enter(&sc->sc_rx_ring_lock[0].mrl_mtx);
977	ifr[0].ifr_info = sc->sc_rx_ring[0];
978	mtx_leave(&sc->sc_rx_ring_lock[0].mrl_mtx);
979
980	ifr[1].ifr_size = 12 * 1024;
981	mtx_enter(&sc->sc_rx_ring_lock[1].mrl_mtx);
982	ifr[1].ifr_info = sc->sc_rx_ring[1];
983	mtx_leave(&sc->sc_rx_ring_lock[1].mrl_mtx);
984
985	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
986}
987
988void
989myx_up(struct myx_softc *sc)
990{
991	struct ifnet		*ifp = &sc->sc_ac.ac_if;
992	struct myx_buf		*mb;
993	struct myx_cmd		mc;
994	bus_dmamap_t		map;
995	size_t			size;
996	u_int			maxpkt;
997	u_int32_t		r;
998	int			i;
999
1000	memset(&mc, 0, sizeof(mc));
1001	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1002		printf("%s: failed to reset the device\n", DEVNAME(sc));
1003		return;
1004	}
1005
1006	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
1007	    64, MYXALIGN_CMD) != 0) {
1008		printf("%s: failed to allocate zero pad memory\n",
1009		    DEVNAME(sc));
1010		return;
1011	}
1012	memset(sc->sc_zerodma.mxm_kva, 0, 64);
1013	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1014	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1015
1016	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
1017	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
1018		printf("%s: failed to allocate pad DMA memory\n",
1019		    DEVNAME(sc));
1020		goto free_zero;
1021	}
1022	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1023	    sc->sc_paddma.mxm_map->dm_mapsize,
1024	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1025
1026	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1027		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1028		goto free_pad;
1029	}
1030
1031	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1032		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1033		goto free_pad;
1034	}
1035	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1036
1037	memset(&mc, 0, sizeof(mc));
1038	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1039		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1040		goto free_pad;
1041	}
1042	sc->sc_tx_ring_idx = 0;
1043	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1044	sc->sc_tx_free = sc->sc_tx_ring_count - 1;
1045	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1046	sc->sc_tx_count = 0;
1047	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1048	IFQ_SET_READY(&ifp->if_snd);
1049
1050	/* Allocate Interrupt Queue */
1051
1052	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1053	sc->sc_intrq_idx = 0;
1054
1055	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1056	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1057	    size, MYXALIGN_DATA) != 0) {
1058		goto free_pad;
1059	}
1060	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1061	map = sc->sc_intrq_dma.mxm_map;
1062	memset(sc->sc_intrq, 0, size);
1063	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1064	    BUS_DMASYNC_PREREAD);
1065
1066	memset(&mc, 0, sizeof(mc));
1067	mc.mc_data0 = htobe32(size);
1068	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1069		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1070		goto free_intrq;
1071	}
1072
1073	memset(&mc, 0, sizeof(mc));
1074	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1075	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1076	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1077		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1078		goto free_intrq;
1079	}
1080
1081	/*
1082	 * get interrupt offsets
1083	 */
1084
1085	memset(&mc, 0, sizeof(mc));
1086	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1087	    &sc->sc_irqclaimoff) != 0) {
1088		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1089		goto free_intrq;
1090	}
1091
1092	memset(&mc, 0, sizeof(mc));
1093	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1094	    &sc->sc_irqdeassertoff) != 0) {
1095		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1096		goto free_intrq;
1097	}
1098
1099	memset(&mc, 0, sizeof(mc));
1100	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1101	    &sc->sc_irqcoaloff) != 0) {
1102		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1103		goto free_intrq;
1104	}
1105
1106	/* Set an appropriate interrupt coalescing period */
1107	r = htobe32(MYX_IRQCOALDELAY);
1108	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1109
1110	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1111		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1112		goto free_intrq;
1113	}
1114
1115	memset(&mc, 0, sizeof(mc));
1116	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1117		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1118		goto free_intrq;
1119	}
1120
1121	memset(&mc, 0, sizeof(mc));
1122	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1123		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1124		goto free_intrq;
1125	}
1126
1127	memset(&mc, 0, sizeof(mc));
1128	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1129	    &sc->sc_tx_ring_offset) != 0) {
1130		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1131		goto free_intrq;
1132	}
1133
1134	memset(&mc, 0, sizeof(mc));
1135	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1136	    &sc->sc_rx_ring_offset[MYX_RXSMALL]) != 0) {
1137		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1138		goto free_intrq;
1139	}
1140
1141	memset(&mc, 0, sizeof(mc));
1142	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1143	    &sc->sc_rx_ring_offset[MYX_RXBIG]) != 0) {
1144		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1145		goto free_intrq;
1146	}
1147
1148	/* Allocate Interrupt Data */
1149	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1150	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1151		printf("%s: failed to allocate status DMA memory\n",
1152		    DEVNAME(sc));
1153		goto free_intrq;
1154	}
1155	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1156	map = sc->sc_sts_dma.mxm_map;
1157	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1158	    BUS_DMASYNC_PREREAD);
1159
1160	memset(&mc, 0, sizeof(mc));
1161	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1162	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1163	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1164	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1165		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1166		goto free_sts;
1167	}
1168
1169	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1170
1171	memset(&mc, 0, sizeof(mc));
1172	mc.mc_data0 = htobe32(maxpkt);
1173	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1174		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1175		goto free_sts;
1176	}
1177
1178	for (i = 0; i < sc->sc_tx_ring_count; i++) {
1179		mb = myx_buf_alloc(sc, maxpkt, sc->sc_tx_nsegs,
1180		    sc->sc_tx_boundary, sc->sc_tx_boundary);
1181		if (mb == NULL)
1182			goto free_tx_bufs;
1183
1184		myx_buf_put(&sc->sc_tx_buf_free, mb);
1185	}
1186
1187	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1188		mb = myx_buf_alloc(sc, MCLBYTES, 1, 4096, 4096);
1189		if (mb == NULL)
1190			goto free_rxsmall_bufs;
1191
1192		myx_buf_put(&sc->sc_rx_buf_free[MYX_RXSMALL], mb);
1193	}
1194
1195	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1196		mb = myx_buf_alloc(sc, 12 * 1024, 1, 12 * 1024, 0);
1197		if (mb == NULL)
1198			goto free_rxbig_bufs;
1199
1200		myx_buf_put(&sc->sc_rx_buf_free[MYX_RXBIG], mb);
1201	}
1202
1203	if_rxr_init(&sc->sc_rx_ring[MYX_RXBIG], 2, sc->sc_rx_ring_count - 2);
1204	if_rxr_init(&sc->sc_rx_ring[MYX_RXSMALL], 2, sc->sc_rx_ring_count - 2);
1205
1206	myx_rx_zero(sc, MYX_RXSMALL);
1207	if (myx_rx_fill(sc, MYX_RXSMALL) != 0) {
1208		printf("%s: failed to fill small rx ring\n", DEVNAME(sc));
1209		goto free_rxbig_bufs;
1210	}
1211
1212	myx_rx_zero(sc, MYX_RXBIG);
1213	if (myx_rx_fill(sc, MYX_RXBIG) != 0) {
1214		printf("%s: failed to fill big rx ring\n", DEVNAME(sc));
1215		goto free_rxsmall;
1216	}
1217
1218	memset(&mc, 0, sizeof(mc));
1219	mc.mc_data0 = htobe32(MCLBYTES - ETHER_ALIGN);
1220	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1221		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1222		goto free_rxbig;
1223	}
1224
1225	memset(&mc, 0, sizeof(mc));
1226	mc.mc_data0 = htobe32(16384);
1227	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1228		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1229		goto free_rxbig;
1230	}
1231
1232	mtx_enter(&sc->sc_sts_mtx);
1233	sc->sc_state = MYX_S_RUNNING;
1234	mtx_leave(&sc->sc_sts_mtx);
1235
1236	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1237		printf("%s: failed to start the device\n", DEVNAME(sc));
1238		goto free_rxbig;
1239	}
1240
1241	CLR(ifp->if_flags, IFF_OACTIVE);
1242	SET(ifp->if_flags, IFF_RUNNING);
1243	myx_iff(sc);
1244	myx_start(ifp);
1245
1246	return;
1247
1248free_rxbig:
1249	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) {
1250		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1251		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1252		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1253		m_freem(mb->mb_m);
1254		myx_buf_free(sc, mb);
1255	}
1256free_rxsmall:
1257	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) {
1258		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1259		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1260		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1261		m_freem(mb->mb_m);
1262		myx_buf_free(sc, mb);
1263	}
1264free_rxbig_bufs:
1265	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL)
1266		myx_buf_free(sc, mb);
1267free_rxsmall_bufs:
1268	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL)
1269		myx_buf_free(sc, mb);
1270free_tx_bufs:
1271	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1272		myx_buf_free(sc, mb);
1273free_sts:
1274	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1275	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1276	myx_dmamem_free(sc, &sc->sc_sts_dma);
1277free_intrq:
1278	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1279	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1280	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1281free_pad:
1282	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1283	    sc->sc_paddma.mxm_map->dm_mapsize,
1284	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1285	myx_dmamem_free(sc, &sc->sc_paddma);
1286
1287	memset(&mc, 0, sizeof(mc));
1288	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1289		printf("%s: failed to reset the device\n", DEVNAME(sc));
1290	}
1291free_zero:
1292	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1293	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1294	myx_dmamem_free(sc, &sc->sc_zerodma);
1295}
1296
1297int
1298myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1299{
1300	struct myx_cmd		 mc;
1301
1302	memset(&mc, 0, sizeof(mc));
1303	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1304	    addr[2] << 8 | addr[3]);
1305	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1306
1307	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1308		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1309		return (-1);
1310	}
1311	return (0);
1312}
1313
1314void
1315myx_iff(struct myx_softc *sc)
1316{
1317	struct myx_cmd		mc;
1318	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1319	struct ether_multi	*enm;
1320	struct ether_multistep	step;
1321	u_int8_t *addr;
1322
1323	CLR(ifp->if_flags, IFF_ALLMULTI);
1324
1325	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1326	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1327		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1328		return;
1329	}
1330
1331	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1332		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1333		return;
1334	}
1335
1336	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1337		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1338		return;
1339	}
1340
1341	if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1342	    sc->sc_ac.ac_multirangecnt > 0) {
1343		SET(ifp->if_flags, IFF_ALLMULTI);
1344		return;
1345	}
1346
1347	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1348	while (enm != NULL) {
1349		addr = enm->enm_addrlo;
1350
1351		memset(&mc, 0, sizeof(mc));
1352		mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1353		    addr[2] << 8 | addr[3]);
1354		mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1355		if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1356			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1357			return;
1358		}
1359
1360		ETHER_NEXT_MULTI(step, enm);
1361	}
1362
1363	memset(&mc, 0, sizeof(mc));
1364	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1365		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1366		return;
1367	}
1368}
1369
1370void
1371myx_down(struct myx_softc *sc)
1372{
1373	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1374	volatile struct myx_status *sts = sc->sc_sts;
1375	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1376	struct myx_buf		*mb;
1377	struct myx_cmd		 mc;
1378	int			 s;
1379
1380	myx_sts_enter(sc);
1381	sc->sc_linkdown = sts->ms_linkdown;
1382	sc->sc_state = MYX_S_DOWN;
1383
1384	memset(&mc, 0, sizeof(mc));
1385	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1386
1387	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1388	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1389	while (sc->sc_state != MYX_S_OFF)
1390		msleep(sts, &sc->sc_sts_mtx, 0, "myxdown", 0);
1391	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1392	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1393	mtx_leave(&sc->sc_sts_mtx);
1394
1395	timeout_del(&sc->sc_refill);
1396
1397	s = splnet();
1398	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1399		ifp->if_link_state = LINK_STATE_UNKNOWN;
1400		ifp->if_baudrate = 0;
1401		if_link_state_change(ifp);
1402	}
1403	splx(s);
1404
1405	memset(&mc, 0, sizeof(mc));
1406	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1407		printf("%s: failed to reset the device\n", DEVNAME(sc));
1408	}
1409
1410	CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1411
1412	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) {
1413		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1414		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1415		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1416		m_freem(mb->mb_m);
1417		myx_buf_free(sc, mb);
1418	}
1419
1420	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) {
1421		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1422		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1423		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1424		m_freem(mb->mb_m);
1425		myx_buf_free(sc, mb);
1426	}
1427
1428	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL)
1429		myx_buf_free(sc, mb);
1430
1431	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL)
1432		myx_buf_free(sc, mb);
1433
1434	while ((mb = myx_buf_get(&sc->sc_tx_buf_list)) != NULL) {
1435		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1436		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1437		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1438		m_freem(mb->mb_m);
1439		myx_buf_free(sc, mb);
1440	}
1441
1442	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1443		myx_buf_free(sc, mb);
1444
1445	/* the sleep shizz above already synced this dmamem */
1446	myx_dmamem_free(sc, &sc->sc_sts_dma);
1447
1448	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1449	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1450	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1451
1452	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1453	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1454	myx_dmamem_free(sc, &sc->sc_paddma);
1455
1456	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1457	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1458	myx_dmamem_free(sc, &sc->sc_zerodma);
1459}
1460
1461void
1462myx_write_txd_tail(struct myx_softc *sc, struct myx_buf *mb, u_int8_t flags,
1463    u_int32_t offset, u_int idx)
1464{
1465	struct myx_tx_desc		txd;
1466	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1467	bus_dmamap_t			map = mb->mb_map;
1468	int				i;
1469
1470	for (i = 1; i < map->dm_nsegs; i++) {
1471		memset(&txd, 0, sizeof(txd));
1472		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1473		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1474		txd.tx_flags = flags;
1475
1476		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1477		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1478		    &txd, sizeof(txd));
1479	}
1480
1481	/* pad runt frames */
1482	if (map->dm_mapsize < 60) {
1483		memset(&txd, 0, sizeof(txd));
1484		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1485		txd.tx_length = htobe16(60 - map->dm_mapsize);
1486		txd.tx_flags = flags;
1487
1488		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1489		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1490		    &txd, sizeof(txd));
1491	}
1492}
1493
1494void
1495myx_start(struct ifnet *ifp)
1496{
1497	struct myx_tx_desc		txd;
1498	SIMPLEQ_HEAD(, myx_buf)		list = SIMPLEQ_HEAD_INITIALIZER(list);
1499	struct myx_softc		*sc = ifp->if_softc;
1500	bus_dmamap_t			map;
1501	struct myx_buf			*mb, *firstmb;
1502	struct mbuf			*m;
1503	u_int32_t			offset = sc->sc_tx_ring_offset;
1504	u_int				idx, firstidx;
1505	u_int8_t			flags;
1506
1507	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
1508	    ISSET(ifp->if_flags, IFF_OACTIVE) ||
1509	    IFQ_IS_EMPTY(&ifp->if_snd))
1510		return;
1511
1512	for (;;) {
1513		if (sc->sc_tx_free <= sc->sc_tx_nsegs) {
1514			SET(ifp->if_flags, IFF_OACTIVE);
1515			break;
1516		}
1517
1518		IFQ_POLL(&ifp->if_snd, m);
1519		if (m == NULL)
1520			break;
1521
1522		mb = myx_buf_get(&sc->sc_tx_buf_free);
1523		if (mb == NULL) {
1524			SET(ifp->if_flags, IFF_OACTIVE);
1525			break;
1526		}
1527
1528		IFQ_DEQUEUE(&ifp->if_snd, m);
1529		if (myx_load_buf(sc, mb, m) != 0) {
1530			m_freem(m);
1531			myx_buf_put(&sc->sc_tx_buf_free, mb);
1532			ifp->if_oerrors++;
1533			break;
1534		}
1535
1536#if NBPFILTER > 0
1537		if (ifp->if_bpf)
1538			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1539#endif
1540
1541		mb->mb_m = m;
1542
1543		map = mb->mb_map;
1544		bus_dmamap_sync(sc->sc_dmat, map, 0,
1545		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1546
1547		SIMPLEQ_INSERT_TAIL(&list, mb, mb_entry);
1548
1549		sc->sc_tx_free -= map->dm_nsegs +
1550		    (map->dm_mapsize < 60 ? 1 : 0);
1551	}
1552
1553	/* post the first descriptor last */
1554	firstmb = SIMPLEQ_FIRST(&list);
1555	if (firstmb == NULL)
1556		return;
1557
1558	SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1559	myx_buf_put(&sc->sc_tx_buf_list, firstmb);
1560
1561	idx = firstidx = sc->sc_tx_ring_idx;
1562	idx += firstmb->mb_map->dm_nsegs +
1563	    (firstmb->mb_map->dm_mapsize < 60 ? 1 : 0);
1564	idx %= sc->sc_tx_ring_count;
1565
1566	while ((mb = SIMPLEQ_FIRST(&list)) != NULL) {
1567		SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1568		myx_buf_put(&sc->sc_tx_buf_list, mb);
1569
1570		map = mb->mb_map;
1571
1572		flags = MYXTXD_FLAGS_NO_TSO;
1573		if (map->dm_mapsize < 1520)
1574			flags |= MYXTXD_FLAGS_SMALL;
1575
1576		memset(&txd, 0, sizeof(txd));
1577		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1578		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1579		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1580		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1581		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1582		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1583
1584		myx_write_txd_tail(sc, mb, flags, offset, idx);
1585
1586		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1587		idx %= sc->sc_tx_ring_count;
1588	}
1589	sc->sc_tx_ring_idx = idx;
1590
1591	/* go back and post first mb */
1592	map = firstmb->mb_map;
1593
1594	flags = MYXTXD_FLAGS_NO_TSO;
1595	if (map->dm_mapsize < 1520)
1596		flags |= MYXTXD_FLAGS_SMALL;
1597
1598	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1599	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1600	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1601	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1602
1603	/* make sure the first descriptor is seen after the others */
1604	myx_write_txd_tail(sc, firstmb, flags, offset, firstidx);
1605
1606	myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1607	    offset + sizeof(txd) * firstidx, &txd,
1608	    sizeof(txd) - sizeof(myx_bus_t));
1609
1610	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1611	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1612
1613	myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1614	    offset + sizeof(txd) * (firstidx + 1) - sizeof(myx_bus_t),
1615	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1616	    sizeof(myx_bus_t));
1617
1618	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1619	    offset + sizeof(txd) * firstidx, sizeof(txd),
1620	    BUS_SPACE_BARRIER_WRITE);
1621}
1622
1623int
1624myx_load_buf(struct myx_softc *sc, struct myx_buf *mb, struct mbuf *m)
1625{
1626	bus_dma_tag_t			dmat = sc->sc_dmat;
1627	bus_dmamap_t			dmap = mb->mb_map;
1628
1629	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1630	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1631	case 0:
1632		break;
1633
1634	case EFBIG: /* mbuf chain is too fragmented */
1635		if (m_defrag(m, M_DONTWAIT) == 0 &&
1636		    bus_dmamap_load_mbuf(dmat, dmap, m,
1637		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1638			break;
1639	default:
1640		return (1);
1641	}
1642
1643	mb->mb_m = m;
1644	return (0);
1645}
1646
1647int
1648myx_intr(void *arg)
1649{
1650	struct myx_softc	*sc = (struct myx_softc *)arg;
1651	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1652	volatile struct myx_status *sts = sc->sc_sts;
1653	enum myx_state		 state = MYX_S_RUNNING;
1654	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1655	u_int32_t		 data, link = 0xffffffff;
1656	int			 refill = 0;
1657	u_int8_t		 valid = 0;
1658	int			 i;
1659
1660	mtx_enter(&sc->sc_sts_mtx);
1661	if (sc->sc_state == MYX_S_OFF) {
1662		mtx_leave(&sc->sc_sts_mtx);
1663		return (0);
1664	}
1665
1666	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1667	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1668
1669	valid = sts->ms_isvalid;
1670	if (valid == 0x0) {
1671		myx_sts_leave(sc);
1672		return (0);
1673	}
1674
1675	if (sc->sc_intx) {
1676		data = htobe32(0);
1677		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1678		    sc->sc_irqdeassertoff, &data, sizeof(data));
1679	}
1680	sts->ms_isvalid = 0;
1681
1682	do {
1683		data = sts->ms_txdonecnt;
1684
1685		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1686		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1687		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1688	} while (sts->ms_isvalid);
1689
1690	if (sts->ms_statusupdated) {
1691		link = sts->ms_linkstate;
1692
1693		if (sc->sc_state == MYX_S_DOWN &&
1694		    sc->sc_linkdown != sts->ms_linkdown)
1695			state = MYX_S_DOWN;
1696	}
1697	myx_sts_leave(sc);
1698
1699	data = betoh32(data);
1700	if (data != sc->sc_tx_count)
1701		myx_txeof(sc, data);
1702
1703	data = htobe32(3);
1704	if (valid & 0x1) {
1705		refill |= myx_rxeof(sc);
1706
1707		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1708		    sc->sc_irqclaimoff, &data, sizeof(data));
1709	}
1710	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1711	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1712
1713	if (state == MYX_S_DOWN) {
1714		/* myx_down is waiting for us */
1715		mtx_enter(&sc->sc_sts_mtx);
1716		sc->sc_state = MYX_S_OFF;
1717		wakeup(sts);
1718		mtx_leave(&sc->sc_sts_mtx);
1719
1720		return (1);
1721	}
1722
1723	KERNEL_LOCK();
1724	if (link != 0xffffffff)
1725		myx_link_state(sc, link);
1726
1727	if (ISSET(ifp->if_flags, IFF_OACTIVE)) {
1728		CLR(ifp->if_flags, IFF_OACTIVE);
1729		myx_start(ifp);
1730	}
1731	KERNEL_UNLOCK();
1732
1733	for (i = 0; i < 2; i++) {
1734		if (ISSET(refill, 1 << i)) {
1735			if (myx_rx_fill(sc, i) >= 0 &&
1736			    myx_bufs_empty(&sc->sc_rx_buf_list[i]))
1737				timeout_add(&sc->sc_refill, 0);
1738		}
1739	}
1740
1741	return (1);
1742}
1743
1744void
1745myx_refill(void *xsc)
1746{
1747	struct myx_softc *sc = xsc;
1748	int i;
1749
1750	for (i = 0; i < 2; i++) {
1751		if (myx_rx_fill(sc, i) >= 0 &&
1752		    myx_bufs_empty(&sc->sc_rx_buf_list[i]))
1753			timeout_add(&sc->sc_refill, 1);
1754	}
1755}
1756
1757void
1758myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1759{
1760	struct ifnet *ifp = &sc->sc_ac.ac_if;
1761	struct myx_buf *mb;
1762	struct mbuf *m;
1763	bus_dmamap_t map;
1764	u_int free = 0;
1765
1766	do {
1767		mb = myx_buf_get(&sc->sc_tx_buf_list);
1768		if (mb == NULL) {
1769			printf("oh noes, no mb!\n");
1770			break;
1771		}
1772
1773		m = mb->mb_m;
1774		map = mb->mb_map;
1775
1776		free += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1777
1778		bus_dmamap_sync(sc->sc_dmat, map, 0,
1779		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1780
1781		KERNEL_LOCK();
1782		bus_dmamap_unload(sc->sc_dmat, map);
1783		m_freem(m);
1784		ifp->if_opackets++;
1785		KERNEL_UNLOCK();
1786
1787		myx_buf_put(&sc->sc_tx_buf_free, mb);
1788	} while (++sc->sc_tx_count != done_count);
1789
1790	if (free) {
1791		KERNEL_LOCK();
1792		sc->sc_tx_free += free;
1793		KERNEL_UNLOCK();
1794	}
1795}
1796
1797int
1798myx_rxeof(struct myx_softc *sc)
1799{
1800	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1801	struct ifnet *ifp = &sc->sc_ac.ac_if;
1802	struct myx_buf *mb;
1803	struct mbuf *m;
1804	int ring;
1805	int rings = 0;
1806	u_int rxfree[2] = { 0 , 0 };
1807	u_int len;
1808
1809	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1810	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1811
1812	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1813		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1814
1815		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1816			sc->sc_intrq_idx = 0;
1817
1818		ring = (len <= (MCLBYTES - ETHER_ALIGN)) ?
1819		    MYX_RXSMALL : MYX_RXBIG;
1820
1821		mb = myx_buf_get(&sc->sc_rx_buf_list[ring]);
1822		if (mb == NULL) {
1823			printf("oh noes, no mb!\n");
1824			break;
1825		}
1826
1827		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1828		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1829
1830		m = mb->mb_m;
1831		m->m_data += ETHER_ALIGN;
1832		m->m_pkthdr.rcvif = ifp;
1833		m->m_pkthdr.len = m->m_len = len;
1834
1835		KERNEL_LOCK();
1836		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1837#if NBPFILTER > 0
1838		if (ifp->if_bpf)
1839			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1840#endif
1841
1842		ether_input_mbuf(ifp, m);
1843		ifp->if_ipackets++;
1844		KERNEL_UNLOCK();
1845
1846		myx_buf_put(&sc->sc_rx_buf_free[ring], mb);
1847
1848		rxfree[ring]++;
1849	}
1850
1851	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1852	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1853
1854	for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) {
1855		if (rxfree[ring] == 0)
1856			continue;
1857
1858		mtx_enter(&sc->sc_rx_ring_lock[ring].mrl_mtx);
1859		if_rxr_put(&sc->sc_rx_ring[ring], rxfree[ring]);
1860		mtx_leave(&sc->sc_rx_ring_lock[ring].mrl_mtx);
1861
1862		SET(rings, 1 << ring);
1863	}
1864
1865	return (rings);
1866}
1867
1868void
1869myx_rx_zero(struct myx_softc *sc, int ring)
1870{
1871	struct myx_rx_desc rxd;
1872	u_int32_t offset = sc->sc_rx_ring_offset[ring];
1873	int idx;
1874
1875	sc->sc_rx_ring_idx[ring] = 0;
1876
1877	memset(&rxd, 0xff, sizeof(rxd));
1878	for (idx = 0; idx < sc->sc_rx_ring_count; idx++) {
1879		myx_write(sc, offset + idx * sizeof(rxd),
1880		    &rxd, sizeof(rxd));
1881	}
1882}
1883
1884int
1885myx_rx_fill(struct myx_softc *sc, int ring)
1886{
1887	struct myx_rx_desc rxd;
1888	struct myx_buf *mb, *firstmb;
1889	u_int32_t offset = sc->sc_rx_ring_offset[ring];
1890	u_int idx, firstidx, slots;
1891	int rv = 1;
1892
1893	if (!myx_ring_enter(&sc->sc_rx_ring_lock[ring]))
1894		return (-1);
1895
1896	do {
1897		mtx_enter(&sc->sc_rx_ring_lock[ring].mrl_mtx);
1898		slots = if_rxr_get(&sc->sc_rx_ring[ring], sc->sc_rx_ring_count);
1899		mtx_leave(&sc->sc_rx_ring_lock[ring].mrl_mtx);
1900
1901		if (slots-- == 0)
1902			continue;
1903
1904		firstmb = myx_buf_fill(sc, ring);
1905		if (firstmb == NULL)
1906			continue;
1907
1908		rv = 0;
1909		myx_buf_put(&sc->sc_rx_buf_list[ring], firstmb);
1910
1911		firstidx = sc->sc_rx_ring_idx[ring];
1912		idx = firstidx + 1;
1913		idx %= sc->sc_rx_ring_count;
1914
1915		while (slots > 0 && (mb = myx_buf_fill(sc, ring)) != NULL) {
1916			myx_buf_put(&sc->sc_rx_buf_list[ring], mb);
1917
1918			rxd.rx_addr = htobe64(mb->mb_map->dm_segs[0].ds_addr);
1919			myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1920			    offset + idx * sizeof(rxd), &rxd, sizeof(rxd));
1921
1922			idx++;
1923			idx %= sc->sc_rx_ring_count;
1924			slots--;
1925		}
1926
1927		/* make sure the first descriptor is seen after the others */
1928		if (idx != firstidx + 1) {
1929			bus_space_barrier(sc->sc_memt, sc->sc_memh,
1930			    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1931			    BUS_SPACE_BARRIER_WRITE);
1932		}
1933
1934		rxd.rx_addr = htobe64(firstmb->mb_map->dm_segs[0].ds_addr);
1935		myx_write(sc, offset + firstidx * sizeof(rxd),
1936		    &rxd, sizeof(rxd));
1937
1938		sc->sc_rx_ring_idx[ring] = idx;
1939		mtx_enter(&sc->sc_rx_ring_lock[ring].mrl_mtx);
1940		if_rxr_put(&sc->sc_rx_ring[ring], slots);
1941		mtx_leave(&sc->sc_rx_ring_lock[ring].mrl_mtx);
1942	} while (!myx_ring_leave(&sc->sc_rx_ring_lock[ring]));
1943
1944	return (rv);
1945}
1946
1947struct myx_buf *
1948myx_buf_fill(struct myx_softc *sc, int ring)
1949{
1950	static size_t sizes[2] = { MCLBYTES, 12 * 1024 };
1951	struct myx_buf *mb;
1952	struct mbuf *m;
1953	int rv;
1954
1955	KERNEL_LOCK();
1956	m = MCLGETI(NULL, M_DONTWAIT, NULL, sizes[ring]);
1957	KERNEL_UNLOCK();
1958	if (m == NULL)
1959		return (NULL);
1960	m->m_len = m->m_pkthdr.len = sizes[ring];
1961
1962	mb = myx_buf_get(&sc->sc_rx_buf_free[ring]);
1963	if (mb == NULL)
1964		goto mfree;
1965
1966	KERNEL_LOCK();
1967	rv = bus_dmamap_load_mbuf(sc->sc_dmat, mb->mb_map, m, BUS_DMA_NOWAIT);
1968	KERNEL_UNLOCK();
1969	if (rv != 0)
1970		goto put;
1971
1972	mb->mb_m = m;
1973	bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, mb->mb_map->dm_mapsize,
1974	    BUS_DMASYNC_PREREAD);
1975
1976	return (mb);
1977
1978put:
1979	myx_buf_put(&sc->sc_rx_buf_free[ring], mb);
1980mfree:
1981	KERNEL_LOCK();
1982	m_freem(m);
1983	KERNEL_UNLOCK();
1984
1985	return (NULL);
1986}
1987
1988struct myx_buf *
1989myx_buf_alloc(struct myx_softc *sc, bus_size_t size, int nsegs,
1990    bus_size_t maxsegsz, bus_size_t boundary)
1991{
1992	struct myx_buf *mb;
1993
1994	mb = pool_get(myx_buf_pool, PR_WAITOK);
1995	if (mb == NULL)
1996		return (NULL);
1997
1998	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, boundary,
1999	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mb->mb_map) != 0) {
2000		pool_put(myx_buf_pool, mb);
2001		return (NULL);
2002	}
2003
2004	return (mb);
2005}
2006
2007void
2008myx_buf_free(struct myx_softc *sc, struct myx_buf *mb)
2009{
2010	bus_dmamap_destroy(sc->sc_dmat, mb->mb_map);
2011	pool_put(myx_buf_pool, mb);
2012}
2013
2014struct myx_buf *
2015myx_buf_get(struct myx_buf_list *mbl)
2016{
2017	struct myx_buf *mb;
2018
2019	mtx_enter(&mbl->mbl_mtx);
2020	mb = SIMPLEQ_FIRST(&mbl->mbl_q);
2021	if (mb != NULL)
2022		SIMPLEQ_REMOVE_HEAD(&mbl->mbl_q, mb_entry);
2023	mtx_leave(&mbl->mbl_mtx);
2024
2025	return (mb);
2026}
2027
2028int
2029myx_bufs_empty(struct myx_buf_list *mbl)
2030{
2031	int rv;
2032
2033	mtx_enter(&mbl->mbl_mtx);
2034	rv = SIMPLEQ_EMPTY(&mbl->mbl_q);
2035	mtx_leave(&mbl->mbl_mtx);
2036
2037	return (rv);
2038}
2039
2040void
2041myx_buf_put(struct myx_buf_list *mbl, struct myx_buf *mb)
2042{
2043	mtx_enter(&mbl->mbl_mtx);
2044	SIMPLEQ_INSERT_TAIL(&mbl->mbl_q, mb, mb_entry);
2045	mtx_leave(&mbl->mbl_mtx);
2046}
2047
2048void
2049myx_bufs_init(struct myx_buf_list *mbl)
2050{
2051	SIMPLEQ_INIT(&mbl->mbl_q);
2052	mtx_init(&mbl->mbl_mtx, IPL_NET);
2053}
2054
2055void
2056myx_ring_lock_init(struct myx_ring_lock *mrl)
2057{
2058	mtx_init(&mrl->mrl_mtx, IPL_NET);
2059	mrl->mrl_running = 0;
2060}
2061
2062int
2063myx_ring_enter(struct myx_ring_lock *mrl)
2064{
2065	int rv = 1;
2066
2067	mtx_enter(&mrl->mrl_mtx);
2068	if (++mrl->mrl_running > 1)
2069		rv = 0;
2070	mtx_leave(&mrl->mrl_mtx);
2071
2072	return (rv);
2073}
2074
2075int
2076myx_ring_leave(struct myx_ring_lock *mrl)
2077{
2078	int rv = 1;
2079
2080	mtx_enter(&mrl->mrl_mtx);
2081	if (--mrl->mrl_running > 0) {
2082		mrl->mrl_running = 1;
2083		rv = 0;
2084	}
2085	mtx_leave(&mrl->mrl_mtx);
2086
2087	return (rv);
2088}
2089