if_myx.c revision 1.52
1/*	$OpenBSD: if_myx.c,v 1.52 2014/01/23 01:54:02 dlg Exp $	*/
2
3/*
4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*
20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21 */
22
23#include "bpfilter.h"
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/kernel.h>
30#include <sys/socket.h>
31#include <sys/malloc.h>
32#include <sys/timeout.h>
33#include <sys/proc.h>
34#include <sys/device.h>
35#include <sys/queue.h>
36
37#include <machine/bus.h>
38#include <machine/intr.h>
39
40#include <net/if.h>
41#include <net/if_dl.h>
42#include <net/if_media.h>
43#include <net/if_types.h>
44
45#if NBPFILTER > 0
46#include <net/bpf.h>
47#endif
48
49#ifdef INET
50#include <netinet/in.h>
51#include <netinet/if_ether.h>
52#endif
53
54#include <dev/pci/pcireg.h>
55#include <dev/pci/pcivar.h>
56#include <dev/pci/pcidevs.h>
57
58#include <dev/pci/if_myxreg.h>
59
60#ifdef MYX_DEBUG
61#define MYXDBG_INIT	(1<<0)	/* chipset initialization */
62#define MYXDBG_CMD	(2<<0)	/* commands */
63#define MYXDBG_INTR	(3<<0)	/* interrupts */
64#define MYXDBG_ALL	0xffff	/* enable all debugging messages */
65int myx_debug = MYXDBG_ALL;
66#define DPRINTF(_lvl, _arg...)	do {					\
67	if (myx_debug & (_lvl))						\
68		printf(_arg);						\
69} while (0)
70#else
71#define DPRINTF(_lvl, arg...)
72#endif
73
74#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
75
76struct myx_dmamem {
77	bus_dmamap_t		 mxm_map;
78	bus_dma_segment_t	 mxm_seg;
79	int			 mxm_nsegs;
80	size_t			 mxm_size;
81	caddr_t			 mxm_kva;
82};
83
84struct myx_buf {
85	SIMPLEQ_ENTRY(myx_buf)	 mb_entry;
86	bus_dmamap_t		 mb_map;
87	struct mbuf		*mb_m;
88};
89
90struct myx_buf_list {
91	SIMPLEQ_HEAD(, myx_buf)	mbl_q;
92	struct mutex		mbl_mtx;
93};
94
95struct pool *myx_buf_pool;
96
97struct myx_ring_lock {
98	struct mutex		mrl_mtx;
99	u_int			mrl_running;
100};
101
102struct myx_softc {
103	struct device		 sc_dev;
104	struct arpcom		 sc_ac;
105
106	pci_chipset_tag_t	 sc_pc;
107	pci_intr_handle_t	 sc_ih;
108	pcitag_t		 sc_tag;
109	u_int			 sc_function;
110
111	bus_dma_tag_t		 sc_dmat;
112	bus_space_tag_t		 sc_memt;
113	bus_space_handle_t	 sc_memh;
114	bus_size_t		 sc_mems;
115
116	struct myx_dmamem	 sc_zerodma;
117	struct myx_dmamem	 sc_cmddma;
118	struct myx_dmamem	 sc_paddma;
119
120	struct myx_dmamem	 sc_sts_dma;
121	volatile struct myx_status	*sc_sts;
122	struct mutex		 sc_sts_mtx;
123
124	int			 sc_intx;
125	void			*sc_irqh;
126	u_int32_t		 sc_irqcoaloff;
127	u_int32_t		 sc_irqclaimoff;
128	u_int32_t		 sc_irqdeassertoff;
129
130	struct myx_dmamem	 sc_intrq_dma;
131	struct myx_intrq_desc	*sc_intrq;
132	u_int			 sc_intrq_count;
133	u_int			 sc_intrq_idx;
134
135	u_int			 sc_rx_ring_count;
136	struct myx_ring_lock	 sc_rx_ring_lock[2];
137	u_int32_t		 sc_rx_ring_offset[2];
138	struct myx_buf_list	 sc_rx_buf_free[2];
139	struct myx_buf_list	 sc_rx_buf_list[2];
140	u_int			 sc_rx_ring_idx[2];
141#define  MYX_RXSMALL		 0
142#define  MYX_RXBIG		 1
143	struct timeout		 sc_refill;
144
145	bus_size_t		 sc_tx_boundary;
146	u_int			 sc_tx_ring_count;
147	struct myx_ring_lock	 sc_tx_ring_lock;
148	u_int32_t		 sc_tx_ring_offset;
149	u_int			 sc_tx_nsegs;
150	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
151	u_int			 sc_tx_free;
152	struct myx_buf_list	 sc_tx_buf_free;
153	struct myx_buf_list	 sc_tx_buf_list;
154	u_int			 sc_tx_ring_idx;
155
156	u_int8_t		 sc_lladdr[ETHER_ADDR_LEN];
157	struct ifmedia		 sc_media;
158
159	volatile u_int8_t	 sc_linkdown;
160};
161
162int	 myx_match(struct device *, void *, void *);
163void	 myx_attach(struct device *, struct device *, void *);
164int	 myx_query(struct myx_softc *sc, char *, size_t);
165u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
166void	 myx_attachhook(void *);
167int	 myx_loadfirmware(struct myx_softc *, const char *);
168int	 myx_probe_firmware(struct myx_softc *);
169
170void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
171void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
172
173#if defined(__LP64__)
174#define myx_bus_space_write bus_space_write_raw_region_8
175typedef u_int64_t myx_bus_t;
176#else
177#define myx_bus_space_write bus_space_write_raw_region_4
178typedef u_int32_t myx_bus_t;
179#endif
180
181int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
182int	 myx_boot(struct myx_softc *, u_int32_t);
183
184int	 myx_rdma(struct myx_softc *, u_int);
185int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
186	    bus_size_t, u_int align);
187void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
188int	 myx_media_change(struct ifnet *);
189void	 myx_media_status(struct ifnet *, struct ifmediareq *);
190void	 myx_link_state(struct myx_softc *, u_int32_t);
191void	 myx_watchdog(struct ifnet *);
192int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
193void	 myx_up(struct myx_softc *);
194void	 myx_iff(struct myx_softc *);
195void	 myx_down(struct myx_softc *);
196
197void	 myx_start(struct ifnet *);
198void	 myx_write_txd_tail(struct myx_softc *, struct myx_buf *, u_int8_t,
199	    u_int32_t, u_int);
200int	 myx_load_buf(struct myx_softc *, struct myx_buf *, struct mbuf *);
201int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
202int	 myx_intr(void *);
203int	 myx_rxeof(struct myx_softc *);
204void	 myx_txeof(struct myx_softc *, u_int32_t);
205
206struct myx_buf *	myx_buf_alloc(struct myx_softc *, bus_size_t, int,
207			    bus_size_t, bus_size_t);
208void			myx_buf_free(struct myx_softc *, struct myx_buf *);
209void			myx_bufs_init(struct myx_buf_list *);
210int			myx_bufs_empty(struct myx_buf_list *);
211struct myx_buf *	myx_buf_get(struct myx_buf_list *);
212void			myx_buf_put(struct myx_buf_list *, struct myx_buf *);
213struct myx_buf *	myx_buf_fill(struct myx_softc *, int);
214
215void			myx_rx_zero(struct myx_softc *, int);
216int			myx_rx_fill(struct myx_softc *, int);
217void			myx_refill(void *);
218
219void			myx_ring_lock_init(struct myx_ring_lock *);
220int			myx_ring_enter(struct myx_ring_lock *);
221int			myx_ring_leave(struct myx_ring_lock *);
222
223static inline void
224myx_sts_enter(struct myx_softc *sc)
225{
226	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
227
228        mtx_enter(&sc->sc_sts_mtx);
229        bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
230            BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
231}
232
233static inline void
234myx_sts_leave(struct myx_softc *sc)
235{
236	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
237
238        bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
239            BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
240        mtx_leave(&sc->sc_sts_mtx);
241}
242
243struct cfdriver myx_cd = {
244	NULL, "myx", DV_IFNET
245};
246struct cfattach myx_ca = {
247	sizeof(struct myx_softc), myx_match, myx_attach
248};
249
250const struct pci_matchid myx_devices[] = {
251	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
252	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
253};
254
255int
256myx_match(struct device *parent, void *match, void *aux)
257{
258	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
259}
260
261void
262myx_attach(struct device *parent, struct device *self, void *aux)
263{
264	struct myx_softc	*sc = (struct myx_softc *)self;
265	struct pci_attach_args	*pa = aux;
266	char			 part[32];
267	pcireg_t		 memtype;
268
269	sc->sc_pc = pa->pa_pc;
270	sc->sc_tag = pa->pa_tag;
271	sc->sc_dmat = pa->pa_dmat;
272	sc->sc_function = pa->pa_function;
273
274	myx_ring_lock_init(&sc->sc_rx_ring_lock[MYX_RXSMALL]);
275	myx_bufs_init(&sc->sc_rx_buf_free[MYX_RXSMALL]);
276	myx_bufs_init(&sc->sc_rx_buf_list[MYX_RXSMALL]);
277	myx_ring_lock_init(&sc->sc_rx_ring_lock[MYX_RXBIG]);
278	myx_bufs_init(&sc->sc_rx_buf_free[MYX_RXBIG]);
279	myx_bufs_init(&sc->sc_rx_buf_list[MYX_RXBIG]);
280
281	myx_ring_lock_init(&sc->sc_tx_ring_lock);
282	myx_bufs_init(&sc->sc_tx_buf_free);
283	myx_bufs_init(&sc->sc_tx_buf_list);
284
285	timeout_set(&sc->sc_refill, myx_refill, sc);
286
287	mtx_init(&sc->sc_sts_mtx, IPL_NET);
288
289
290	/* Map the PCI memory space */
291	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
292	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
293	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
294		printf(": unable to map register memory\n");
295		return;
296	}
297
298	/* Get board details (mac/part) */
299	memset(part, 0, sizeof(part));
300	if (myx_query(sc, part, sizeof(part)) != 0)
301		goto unmap;
302
303	/* Map the interrupt */
304	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
305		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
306			printf(": unable to map interrupt\n");
307			goto unmap;
308		}
309		sc->sc_intx = 1;
310	}
311
312	printf(": %s, model %s, address %s\n",
313	    pci_intr_string(pa->pa_pc, sc->sc_ih),
314	    part[0] == '\0' ? "(unknown)" : part,
315	    ether_sprintf(sc->sc_ac.ac_enaddr));
316
317	/* this is sort of racy */
318	if (myx_buf_pool == NULL) {
319		myx_buf_pool = malloc(sizeof(*myx_buf_pool), M_DEVBUF,
320		    M_WAITOK);
321		if (myx_buf_pool == NULL) {
322			printf("%s: unable to allocate buf pool\n",
323			    DEVNAME(sc));
324			goto unmap;
325		}
326		pool_init(myx_buf_pool, sizeof(struct myx_buf),
327		    0, 0, 0, "myxbufs", &pool_allocator_nointr);
328	}
329
330	if (mountroothook_establish(myx_attachhook, sc) == NULL) {
331		printf("%s: unable to establish mountroot hook\n", DEVNAME(sc));
332		goto unmap;
333	}
334
335	return;
336
337 unmap:
338	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
339	sc->sc_mems = 0;
340}
341
342u_int
343myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
344{
345	u_int		i, j;
346	u_int8_t	digit;
347
348	memset(lladdr, 0, ETHER_ADDR_LEN);
349	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
350		if (mac[i] >= '0' && mac[i] <= '9')
351			digit = mac[i] - '0';
352		else if (mac[i] >= 'A' && mac[i] <= 'F')
353			digit = mac[i] - 'A' + 10;
354		else if (mac[i] >= 'a' && mac[i] <= 'f')
355			digit = mac[i] - 'a' + 10;
356		else
357			continue;
358		if ((j & 1) == 0)
359			digit <<= 4;
360		lladdr[j++/2] |= digit;
361	}
362
363	return (i);
364}
365
366int
367myx_query(struct myx_softc *sc, char *part, size_t partlen)
368{
369	struct myx_gen_hdr hdr;
370	u_int32_t	offset;
371	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
372	u_int		i, len, maxlen;
373
374	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
375	offset = betoh32(offset);
376	if (offset + sizeof(hdr) > sc->sc_mems) {
377		printf(": header is outside register window\n");
378		return (1);
379	}
380
381	myx_read(sc, offset, &hdr, sizeof(hdr));
382	offset = betoh32(hdr.fw_specs);
383	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
384
385	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
386
387	for (i = 0; i < len; i++) {
388		maxlen = len - i;
389		if (strings[i] == '\0')
390			break;
391		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
392			i += 4;
393			i += myx_ether_aton(&strings[i],
394			    sc->sc_ac.ac_enaddr, maxlen);
395		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
396			i += 3;
397			i += strlcpy(part, &strings[i], min(maxlen, partlen));
398		}
399		for (; i < len; i++) {
400			if (strings[i] == '\0')
401				break;
402		}
403	}
404
405	return (0);
406}
407
408int
409myx_loadfirmware(struct myx_softc *sc, const char *filename)
410{
411	struct myx_gen_hdr	hdr;
412	u_int8_t		*fw;
413	size_t			fwlen;
414	u_int32_t		offset;
415	u_int			i, ret = 1;
416
417	if (loadfirmware(filename, &fw, &fwlen) != 0) {
418		printf("%s: could not load firmware %s\n", DEVNAME(sc),
419		    filename);
420		return (1);
421	}
422	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
423		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
424		goto err;
425	}
426
427	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
428	offset = betoh32(offset);
429	if ((offset + sizeof(hdr)) > fwlen) {
430		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
431		goto err;
432	}
433
434	memcpy(&hdr, fw + offset, sizeof(hdr));
435	DPRINTF(MYXDBG_INIT, "%s: "
436	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
437	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
438	    betoh32(hdr.fw_type), hdr.fw_version);
439
440	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
441	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
442		printf("%s: invalid firmware type 0x%x version %s\n",
443		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
444		goto err;
445	}
446
447	/* Write the firmware to the card's SRAM */
448	for (i = 0; i < fwlen; i += 256)
449		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
450
451	if (myx_boot(sc, fwlen) != 0) {
452		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
453		goto err;
454	}
455
456	ret = 0;
457
458err:
459	free(fw, M_DEVBUF);
460	return (ret);
461}
462
463void
464myx_attachhook(void *arg)
465{
466	struct myx_softc	*sc = (struct myx_softc *)arg;
467	struct ifnet		*ifp = &sc->sc_ac.ac_if;
468	struct myx_cmd		 mc;
469
470	/* Allocate command DMA memory */
471	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
472	    MYXALIGN_CMD) != 0) {
473		printf("%s: failed to allocate command DMA memory\n",
474		    DEVNAME(sc));
475		return;
476	}
477
478	/* Try the firmware stored on disk */
479	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
480		/* error printed by myx_loadfirmware */
481		goto freecmd;
482	}
483
484	memset(&mc, 0, sizeof(mc));
485
486	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
487		printf("%s: failed to reset the device\n", DEVNAME(sc));
488		goto freecmd;
489	}
490
491	sc->sc_tx_boundary = 4096;
492
493	if (myx_probe_firmware(sc) != 0) {
494		printf("%s: error while selecting firmware\n", DEVNAME(sc));
495		goto freecmd;
496	}
497
498	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
499	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
500	if (sc->sc_irqh == NULL) {
501		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
502		goto freecmd;
503	}
504
505	ifp->if_softc = sc;
506	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
507	ifp->if_ioctl = myx_ioctl;
508	ifp->if_start = myx_start;
509	ifp->if_watchdog = myx_watchdog;
510	ifp->if_hardmtu = 9000;
511	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
512	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
513	IFQ_SET_READY(&ifp->if_snd);
514
515	ifp->if_capabilities = IFCAP_VLAN_MTU;
516#if 0
517	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
518	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
519	    IFCAP_CSUM_UDPv4;
520#endif
521
522	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
523	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
524	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
525
526	if_attach(ifp);
527	ether_ifattach(ifp);
528
529	return;
530
531freecmd:
532	myx_dmamem_free(sc, &sc->sc_cmddma);
533}
534
535int
536myx_probe_firmware(struct myx_softc *sc)
537{
538	struct myx_dmamem test;
539	bus_dmamap_t map;
540	struct myx_cmd mc;
541	pcireg_t csr;
542	int offset;
543	int width = 0;
544
545	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
546	    &offset, NULL)) {
547		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
548		    offset + PCI_PCIE_LCSR);
549		width = (csr >> 20) & 0x3f;
550
551		if (width <= 4) {
552			/*
553			 * if the link width is 4 or less we can use the
554			 * aligned firmware.
555			 */
556			return (0);
557		}
558	}
559
560	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
561		return (1);
562	map = test.mxm_map;
563
564	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
565	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
566
567	memset(&mc, 0, sizeof(mc));
568	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
569	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
570	mc.mc_data2 = htobe32(4096 * 0x10000);
571	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
572		printf("%s: DMA read test failed\n", DEVNAME(sc));
573		goto fail;
574	}
575
576	memset(&mc, 0, sizeof(mc));
577	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
578	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
579	mc.mc_data2 = htobe32(4096 * 0x1);
580	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
581		printf("%s: DMA write test failed\n", DEVNAME(sc));
582		goto fail;
583	}
584
585	memset(&mc, 0, sizeof(mc));
586	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
587	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
588	mc.mc_data2 = htobe32(4096 * 0x10001);
589	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
590		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
591		goto fail;
592	}
593
594	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
595	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
596	myx_dmamem_free(sc, &test);
597	return (0);
598
599fail:
600	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
601	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
602	myx_dmamem_free(sc, &test);
603
604	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
605		printf("%s: unable to load %s\n", DEVNAME(sc),
606		    MYXFW_UNALIGNED);
607		return (1);
608	}
609
610	sc->sc_tx_boundary = 2048;
611
612	printf("%s: using unaligned firmware\n", DEVNAME(sc));
613	return (0);
614}
615
616void
617myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
618{
619	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
620	    BUS_SPACE_BARRIER_READ);
621	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
622}
623
624void
625myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
626{
627	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
628	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
629	    BUS_SPACE_BARRIER_WRITE);
630}
631
632int
633myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
634    bus_size_t size, u_int align)
635{
636	mxm->mxm_size = size;
637
638	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
639	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
640	    &mxm->mxm_map) != 0)
641		return (1);
642	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
643	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
644	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
645		goto destroy;
646	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
647	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
648		goto free;
649	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
650	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
651		goto unmap;
652
653	return (0);
654 unmap:
655	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
656 free:
657	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
658 destroy:
659	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
660	return (1);
661}
662
663void
664myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
665{
666	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
667	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
668	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
669	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
670}
671
672int
673myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
674{
675	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
676	struct myx_response	*mr;
677	u_int			 i;
678	u_int32_t		 result, data;
679#ifdef MYX_DEBUG
680	static const char *cmds[MYXCMD_MAX] = {
681		"CMD_NONE",
682		"CMD_RESET",
683		"CMD_GET_VERSION",
684		"CMD_SET_INTRQDMA",
685		"CMD_SET_BIGBUFSZ",
686		"CMD_SET_SMALLBUFSZ",
687		"CMD_GET_TXRINGOFF",
688		"CMD_GET_RXSMALLRINGOFF",
689		"CMD_GET_RXBIGRINGOFF",
690		"CMD_GET_INTRACKOFF",
691		"CMD_GET_INTRDEASSERTOFF",
692		"CMD_GET_TXRINGSZ",
693		"CMD_GET_RXRINGSZ",
694		"CMD_SET_INTRQSZ",
695		"CMD_SET_IFUP",
696		"CMD_SET_IFDOWN",
697		"CMD_SET_MTU",
698		"CMD_GET_INTRCOALDELAYOFF",
699		"CMD_SET_STATSINTVL",
700		"CMD_SET_STATSDMA_OLD",
701		"CMD_SET_PROMISC",
702		"CMD_UNSET_PROMISC",
703		"CMD_SET_LLADDR",
704		"CMD_SET_FC",
705		"CMD_UNSET_FC",
706		"CMD_DMA_TEST",
707		"CMD_SET_ALLMULTI",
708		"CMD_UNSET_ALLMULTI",
709		"CMD_SET_MCASTGROUP",
710		"CMD_UNSET_MCASTGROUP",
711		"CMD_UNSET_MCAST",
712		"CMD_SET_STATSDMA",
713		"CMD_UNALIGNED_DMA_TEST",
714		"CMD_GET_UNALIGNED_STATUS"
715	};
716#endif
717
718	mc->mc_cmd = htobe32(cmd);
719	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
720	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
721
722	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
723	mr->mr_result = 0xffffffff;
724
725	/* Send command */
726	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
727	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
728	    BUS_DMASYNC_PREREAD);
729
730	for (i = 0; i < 20; i++) {
731		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
732		    BUS_DMASYNC_POSTREAD);
733		result = betoh32(mr->mr_result);
734		data = betoh32(mr->mr_data);
735
736		if (result != 0xffffffff)
737			break;
738
739		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
740		    BUS_DMASYNC_PREREAD);
741		delay(1000);
742	}
743
744	DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, "
745	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
746	    cmds[cmd], i, result, data, data);
747
748	if (result != 0)
749		return (-1);
750
751	if (r != NULL)
752		*r = data;
753	return (0);
754}
755
756int
757myx_boot(struct myx_softc *sc, u_int32_t length)
758{
759	struct myx_bootcmd	 bc;
760	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
761	u_int32_t		*status;
762	u_int			 i, ret = 1;
763
764	memset(&bc, 0, sizeof(bc));
765	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
766	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
767	bc.bc_result = 0xffffffff;
768	bc.bc_offset = htobe32(MYX_FW_BOOT);
769	bc.bc_length = htobe32(length - 8);
770	bc.bc_copyto = htobe32(8);
771	bc.bc_jumpto = htobe32(0);
772
773	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
774	*status = 0;
775
776	/* Send command */
777	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
778	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
779	    BUS_DMASYNC_PREREAD);
780
781	for (i = 0; i < 200; i++) {
782		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
783		    BUS_DMASYNC_POSTREAD);
784		if (*status == 0xffffffff) {
785			ret = 0;
786			break;
787		}
788
789		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
790		    BUS_DMASYNC_PREREAD);
791		delay(1000);
792	}
793
794	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
795	    DEVNAME(sc), i, ret);
796
797	return (ret);
798}
799
800int
801myx_rdma(struct myx_softc *sc, u_int do_enable)
802{
803	struct myx_rdmacmd	 rc;
804	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
805	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
806	u_int32_t		*status;
807	int			 ret = 1;
808	u_int			 i;
809
810	/*
811	 * It is required to setup a _dummy_ RDMA address. It also makes
812	 * some PCI-E chipsets resend dropped messages.
813	 */
814	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
815	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
816	rc.rc_result = 0xffffffff;
817	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
818	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
819	rc.rc_enable = htobe32(do_enable);
820
821	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
822	*status = 0;
823
824	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
825	    BUS_DMASYNC_PREREAD);
826
827	/* Send command */
828	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
829
830	for (i = 0; i < 20; i++) {
831		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
832		    BUS_DMASYNC_POSTREAD);
833
834		if (*status == 0xffffffff) {
835			ret = 0;
836			break;
837		}
838
839		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
840		    BUS_DMASYNC_PREREAD);
841		delay(1000);
842	}
843
844	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
845	    DEVNAME(sc), __func__,
846	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
847
848	return (ret);
849}
850
851int
852myx_media_change(struct ifnet *ifp)
853{
854	/* ignore */
855	return (0);
856}
857
858void
859myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
860{
861	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
862	u_int32_t		 sts;
863
864	imr->ifm_active = IFM_ETHER | IFM_AUTO;
865	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
866		imr->ifm_status = 0;
867		return;
868	}
869
870	myx_sts_enter(sc);
871	sts = sc->sc_sts->ms_linkstate;
872	myx_sts_leave(sc);
873
874	myx_link_state(sc, sts);
875
876	imr->ifm_status = IFM_AVALID;
877	if (!LINK_STATE_IS_UP(ifp->if_link_state))
878		return;
879
880	imr->ifm_active |= IFM_FDX | IFM_FLOW |
881	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
882	imr->ifm_status |= IFM_ACTIVE;
883}
884
885void
886myx_link_state(struct myx_softc *sc, u_int32_t sts)
887{
888	struct ifnet		*ifp = &sc->sc_ac.ac_if;
889	int			 link_state = LINK_STATE_DOWN;
890
891	if (betoh32(sts) == MYXSTS_LINKUP)
892		link_state = LINK_STATE_FULL_DUPLEX;
893	if (ifp->if_link_state != link_state) {
894		ifp->if_link_state = link_state;
895		if_link_state_change(ifp);
896		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
897		    IF_Gbps(10) : 0;
898	}
899}
900
901void
902myx_watchdog(struct ifnet *ifp)
903{
904	return;
905}
906
907int
908myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
909{
910	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
911	struct ifaddr		*ifa = (struct ifaddr *)data;
912	struct ifreq		*ifr = (struct ifreq *)data;
913	int			 s, error = 0;
914
915	s = splnet();
916
917	switch (cmd) {
918	case SIOCSIFADDR:
919		ifp->if_flags |= IFF_UP;
920#ifdef INET
921		if (ifa->ifa_addr->sa_family == AF_INET)
922			arp_ifinit(&sc->sc_ac, ifa);
923#endif
924		/* FALLTHROUGH */
925
926	case SIOCSIFFLAGS:
927		if (ISSET(ifp->if_flags, IFF_UP)) {
928			if (ISSET(ifp->if_flags, IFF_RUNNING))
929				error = ENETRESET;
930			else
931				myx_up(sc);
932		} else {
933			if (ISSET(ifp->if_flags, IFF_RUNNING))
934				myx_down(sc);
935		}
936		break;
937
938	case SIOCGIFMEDIA:
939	case SIOCSIFMEDIA:
940		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
941		break;
942
943	default:
944		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
945	}
946
947	if (error == ENETRESET) {
948		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
949		    (IFF_UP | IFF_RUNNING))
950			myx_iff(sc);
951		error = 0;
952	}
953
954	splx(s);
955	return (error);
956}
957
958void
959myx_up(struct myx_softc *sc)
960{
961	struct ifnet		*ifp = &sc->sc_ac.ac_if;
962	struct myx_buf		*mb;
963	struct myx_cmd		mc;
964	bus_dmamap_t		map;
965	size_t			size;
966	u_int			maxpkt;
967	u_int32_t		r;
968	int			i;
969
970	memset(&mc, 0, sizeof(mc));
971	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
972		printf("%s: failed to reset the device\n", DEVNAME(sc));
973		return;
974	}
975
976	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
977	    64, MYXALIGN_CMD) != 0) {
978		printf("%s: failed to allocate zero pad memory\n",
979		    DEVNAME(sc));
980		return;
981	}
982	memset(sc->sc_zerodma.mxm_kva, 0, 64);
983	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
984	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
985
986	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
987	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
988		printf("%s: failed to allocate pad DMA memory\n",
989		    DEVNAME(sc));
990		goto free_zero;
991	}
992	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
993	    sc->sc_paddma.mxm_map->dm_mapsize,
994	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
995
996	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
997		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
998		goto free_pad;
999	}
1000
1001	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1002		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1003		goto free_pad;
1004	}
1005	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1006
1007	m_clsetwms(ifp, MCLBYTES, 2, sc->sc_rx_ring_count - 2);
1008	m_clsetwms(ifp, 12 * 1024, 2, sc->sc_rx_ring_count - 2);
1009
1010	memset(&mc, 0, sizeof(mc));
1011	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1012		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1013		goto free_pad;
1014	}
1015	sc->sc_tx_ring_idx = 0;
1016	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1017	sc->sc_tx_free = sc->sc_tx_ring_count - 1;
1018	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1019	sc->sc_tx_count = 0;
1020	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1021	IFQ_SET_READY(&ifp->if_snd);
1022
1023	/* Allocate Interrupt Queue */
1024
1025	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1026	sc->sc_intrq_idx = 0;
1027
1028	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1029	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1030	    size, MYXALIGN_DATA) != 0) {
1031		goto free_pad;
1032	}
1033	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1034	map = sc->sc_intrq_dma.mxm_map;
1035	memset(sc->sc_intrq, 0, size);
1036	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1037	    BUS_DMASYNC_PREREAD);
1038
1039	memset(&mc, 0, sizeof(mc));
1040	mc.mc_data0 = htobe32(size);
1041	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1042		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1043		goto free_intrq;
1044	}
1045
1046	memset(&mc, 0, sizeof(mc));
1047	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1048	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1049	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1050		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1051		goto free_intrq;
1052	}
1053
1054	/*
1055	 * get interrupt offsets
1056	 */
1057
1058	memset(&mc, 0, sizeof(mc));
1059	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1060	    &sc->sc_irqclaimoff) != 0) {
1061		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1062		goto free_intrq;
1063	}
1064
1065	memset(&mc, 0, sizeof(mc));
1066	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1067	    &sc->sc_irqdeassertoff) != 0) {
1068		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1069		goto free_intrq;
1070	}
1071
1072	memset(&mc, 0, sizeof(mc));
1073	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1074	    &sc->sc_irqcoaloff) != 0) {
1075		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1076		goto free_intrq;
1077	}
1078
1079	/* Set an appropriate interrupt coalescing period */
1080	r = htobe32(MYX_IRQCOALDELAY);
1081	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1082
1083	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1084		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1085		goto free_intrq;
1086	}
1087
1088	memset(&mc, 0, sizeof(mc));
1089	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1090		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1091		goto free_intrq;
1092	}
1093
1094	memset(&mc, 0, sizeof(mc));
1095	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1096		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1097		goto free_intrq;
1098	}
1099
1100	memset(&mc, 0, sizeof(mc));
1101	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1102	    &sc->sc_tx_ring_offset) != 0) {
1103		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1104		goto free_intrq;
1105	}
1106
1107	memset(&mc, 0, sizeof(mc));
1108	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1109	    &sc->sc_rx_ring_offset[MYX_RXSMALL]) != 0) {
1110		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1111		goto free_intrq;
1112	}
1113
1114	memset(&mc, 0, sizeof(mc));
1115	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1116	    &sc->sc_rx_ring_offset[MYX_RXBIG]) != 0) {
1117		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1118		goto free_intrq;
1119	}
1120
1121	/* Allocate Interrupt Data */
1122	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1123	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1124		printf("%s: failed to allocate status DMA memory\n",
1125		    DEVNAME(sc));
1126		goto free_intrq;
1127	}
1128	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1129	map = sc->sc_sts_dma.mxm_map;
1130	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1131	    BUS_DMASYNC_PREREAD);
1132
1133	memset(&mc, 0, sizeof(mc));
1134	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1135	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1136	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1137	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1138		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1139		goto free_sts;
1140	}
1141
1142	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1143
1144	memset(&mc, 0, sizeof(mc));
1145	mc.mc_data0 = htobe32(maxpkt);
1146	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1147		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1148		goto free_sts;
1149	}
1150
1151	for (i = 0; i < sc->sc_tx_ring_count; i++) {
1152		mb = myx_buf_alloc(sc, maxpkt, sc->sc_tx_nsegs,
1153		    sc->sc_tx_boundary, sc->sc_tx_boundary);
1154		if (mb == NULL)
1155			goto free_tx_bufs;
1156
1157		myx_buf_put(&sc->sc_tx_buf_free, mb);
1158	}
1159
1160	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1161		mb = myx_buf_alloc(sc, MCLBYTES, 1, 4096, 4096);
1162		if (mb == NULL)
1163			goto free_rxsmall_bufs;
1164
1165		myx_buf_put(&sc->sc_rx_buf_free[MYX_RXSMALL], mb);
1166	}
1167
1168	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1169		mb = myx_buf_alloc(sc, 12 * 1024, 1, 12 * 1024, 0);
1170		if (mb == NULL)
1171			goto free_rxbig_bufs;
1172
1173		myx_buf_put(&sc->sc_rx_buf_free[MYX_RXBIG], mb);
1174	}
1175
1176	myx_rx_zero(sc, MYX_RXSMALL);
1177	if (myx_rx_fill(sc, MYX_RXSMALL) != 0) {
1178		printf("%s: failed to fill small rx ring\n", DEVNAME(sc));
1179		goto free_rxbig_bufs;
1180	}
1181
1182	myx_rx_zero(sc, MYX_RXBIG);
1183	if (myx_rx_fill(sc, MYX_RXBIG) != 0) {
1184		printf("%s: failed to fill big rx ring\n", DEVNAME(sc));
1185		goto free_rxsmall;
1186	}
1187
1188	memset(&mc, 0, sizeof(mc));
1189	mc.mc_data0 = htobe32(MCLBYTES - ETHER_ALIGN);
1190	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1191		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1192		goto free_rxbig;
1193	}
1194
1195	memset(&mc, 0, sizeof(mc));
1196	mc.mc_data0 = htobe32(16384);
1197	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1198		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1199		goto free_rxbig;
1200	}
1201
1202	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1203		printf("%s: failed to start the device\n", DEVNAME(sc));
1204		goto free_rxbig;
1205	}
1206
1207	CLR(ifp->if_flags, IFF_OACTIVE);
1208	SET(ifp->if_flags, IFF_RUNNING);
1209
1210	myx_iff(sc);
1211
1212	return;
1213
1214free_rxbig:
1215	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) {
1216		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1217		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1218		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1219		m_freem(mb->mb_m);
1220		myx_buf_free(sc, mb);
1221	}
1222free_rxsmall:
1223	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) {
1224		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1225		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1226		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1227		m_freem(mb->mb_m);
1228		myx_buf_free(sc, mb);
1229	}
1230free_rxbig_bufs:
1231	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL)
1232		myx_buf_free(sc, mb);
1233free_rxsmall_bufs:
1234	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL)
1235		myx_buf_free(sc, mb);
1236free_tx_bufs:
1237	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1238		myx_buf_free(sc, mb);
1239free_sts:
1240	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1241	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1242	myx_dmamem_free(sc, &sc->sc_sts_dma);
1243free_intrq:
1244	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1245	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1246	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1247free_pad:
1248	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1249	    sc->sc_paddma.mxm_map->dm_mapsize,
1250	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1251	myx_dmamem_free(sc, &sc->sc_paddma);
1252
1253	memset(&mc, 0, sizeof(mc));
1254	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1255		printf("%s: failed to reset the device\n", DEVNAME(sc));
1256	}
1257free_zero:
1258	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1259	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1260	myx_dmamem_free(sc, &sc->sc_zerodma);
1261}
1262
1263int
1264myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1265{
1266	struct myx_cmd		 mc;
1267
1268	memset(&mc, 0, sizeof(mc));
1269	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]);
1270	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1271
1272	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1273		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1274		return (-1);
1275	}
1276	return (0);
1277}
1278
1279void
1280myx_iff(struct myx_softc *sc)
1281{
1282	struct myx_cmd		mc;
1283	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1284	struct ether_multi	*enm;
1285	struct ether_multistep	step;
1286
1287	CLR(ifp->if_flags, IFF_ALLMULTI);
1288
1289	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1290	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1291		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1292		return;
1293	}
1294
1295	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1296		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1297		return;
1298	}
1299
1300	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1301		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1302		return;
1303	}
1304
1305	if (ISSET(ifp->if_flags, IFF_PROMISC) || sc->sc_ac.ac_multirangecnt > 0) {
1306		SET(ifp->if_flags, IFF_ALLMULTI);
1307		return;
1308	}
1309
1310	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1311	while (enm != NULL) {
1312		if (myx_setlladdr(sc, MYXCMD_SET_MCASTGROUP,
1313		    enm->enm_addrlo) != 0) {
1314			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1315			return;
1316		}
1317
1318		ETHER_NEXT_MULTI(step, enm);
1319	}
1320
1321	memset(&mc, 0, sizeof(mc));
1322	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1323		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1324		return;
1325	}
1326}
1327
1328void
1329myx_down(struct myx_softc *sc)
1330{
1331	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1332	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1333	struct myx_buf		*mb;
1334	struct myx_cmd		 mc;
1335	int			 s;
1336
1337	myx_sts_enter(sc);
1338	sc->sc_linkdown = sc->sc_sts->ms_linkdown;
1339	myx_sts_leave(sc);
1340
1341	memset(&mc, 0, sizeof(mc));
1342	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1343
1344	myx_sts_enter(sc);
1345	/* we play with the guts of sc_sts by hand here */
1346	while (sc->sc_linkdown == sc->sc_sts->ms_linkdown) {
1347		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1348		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1349
1350		msleep(sc->sc_sts, &sc->sc_sts_mtx, 0, "myxdown", 0);
1351
1352		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1353		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1354	}
1355	mtx_leave(&sc->sc_sts_mtx);
1356
1357	timeout_del(&sc->sc_refill);
1358
1359	s = splnet();
1360	CLR(ifp->if_flags, IFF_RUNNING);
1361
1362	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1363		ifp->if_link_state = LINK_STATE_UNKNOWN;
1364		ifp->if_baudrate = 0;
1365		if_link_state_change(ifp);
1366	}
1367	splx(s);
1368
1369	memset(&mc, 0, sizeof(mc));
1370	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1371		printf("%s: failed to reset the device\n", DEVNAME(sc));
1372	}
1373
1374	CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1375
1376	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) {
1377		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1378		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1379		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1380		m_freem(mb->mb_m);
1381		myx_buf_free(sc, mb);
1382	}
1383
1384	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) {
1385		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1386		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1387		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1388		m_freem(mb->mb_m);
1389		myx_buf_free(sc, mb);
1390	}
1391
1392	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL)
1393		myx_buf_free(sc, mb);
1394
1395	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL)
1396		myx_buf_free(sc, mb);
1397
1398	while ((mb = myx_buf_get(&sc->sc_tx_buf_list)) != NULL) {
1399		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1400		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1401		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1402		m_freem(mb->mb_m);
1403		myx_buf_free(sc, mb);
1404	}
1405
1406	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1407		myx_buf_free(sc, mb);
1408
1409	/* the sleep shizz above already synced this dmamem */
1410	myx_dmamem_free(sc, &sc->sc_sts_dma);
1411
1412	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1413	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1414	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1415
1416	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1417	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1418	myx_dmamem_free(sc, &sc->sc_paddma);
1419
1420	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1421	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1422	myx_dmamem_free(sc, &sc->sc_zerodma);
1423}
1424
1425void
1426myx_write_txd_tail(struct myx_softc *sc, struct myx_buf *mb, u_int8_t flags,
1427    u_int32_t offset, u_int idx)
1428{
1429	struct myx_tx_desc		txd;
1430	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1431	bus_dmamap_t			map = mb->mb_map;
1432	int				i;
1433
1434	for (i = 1; i < map->dm_nsegs; i++) {
1435		memset(&txd, 0, sizeof(txd));
1436		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1437		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1438		txd.tx_flags = flags;
1439
1440		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1441		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1442		    &txd, sizeof(txd));
1443	}
1444
1445	/* pad runt frames */
1446	if (map->dm_mapsize < 60) {
1447		memset(&txd, 0, sizeof(txd));
1448		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1449		txd.tx_length = htobe16(60 - map->dm_mapsize);
1450		txd.tx_flags = flags;
1451
1452		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1453		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1454		    &txd, sizeof(txd));
1455	}
1456}
1457
1458void
1459myx_start(struct ifnet *ifp)
1460{
1461	struct myx_tx_desc		txd;
1462	SIMPLEQ_HEAD(, myx_buf)		list = SIMPLEQ_HEAD_INITIALIZER(list);
1463	struct myx_softc		*sc = ifp->if_softc;
1464	bus_dmamap_t			map;
1465	struct myx_buf			*mb, *firstmb;
1466	struct mbuf			*m;
1467	u_int32_t			offset = sc->sc_tx_ring_offset;
1468	u_int				idx, firstidx;
1469	u_int8_t			flags;
1470
1471	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
1472	    ISSET(ifp->if_flags, IFF_OACTIVE) ||
1473	    IFQ_IS_EMPTY(&ifp->if_snd))
1474		return;
1475
1476	for (;;) {
1477		if (sc->sc_tx_free <= sc->sc_tx_nsegs) {
1478			SET(ifp->if_flags, IFF_OACTIVE);
1479			break;
1480		}
1481
1482		IFQ_POLL(&ifp->if_snd, m);
1483		if (m == NULL)
1484			break;
1485
1486		mb = myx_buf_get(&sc->sc_tx_buf_free);
1487		if (mb == NULL) {
1488			SET(ifp->if_flags, IFF_OACTIVE);
1489			break;
1490		}
1491
1492		IFQ_DEQUEUE(&ifp->if_snd, m);
1493		if (myx_load_buf(sc, mb, m) != 0) {
1494			m_freem(m);
1495			myx_buf_put(&sc->sc_tx_buf_free, mb);
1496			ifp->if_oerrors++;
1497			break;
1498		}
1499
1500#if NBPFILTER > 0
1501		if (ifp->if_bpf)
1502			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1503#endif
1504
1505		mb->mb_m = m;
1506
1507		map = mb->mb_map;
1508		bus_dmamap_sync(sc->sc_dmat, map, 0,
1509		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1510
1511		SIMPLEQ_INSERT_TAIL(&list, mb, mb_entry);
1512
1513		sc->sc_tx_free -= map->dm_nsegs +
1514		    (map->dm_mapsize < 60 ? 1 : 0);
1515	}
1516
1517	/* post the first descriptor last */
1518	firstmb = SIMPLEQ_FIRST(&list);
1519	if (firstmb == NULL)
1520		return;
1521
1522	SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1523	myx_buf_put(&sc->sc_tx_buf_list, firstmb);
1524
1525	idx = firstidx = sc->sc_tx_ring_idx;
1526	idx += firstmb->mb_map->dm_nsegs +
1527	    (firstmb->mb_map->dm_mapsize < 60 ? 1 : 0);
1528	idx %= sc->sc_tx_ring_count;
1529
1530	while ((mb = SIMPLEQ_FIRST(&list)) != NULL) {
1531		SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1532		myx_buf_put(&sc->sc_tx_buf_list, mb);
1533
1534		map = mb->mb_map;
1535
1536		flags = MYXTXD_FLAGS_NO_TSO;
1537		if (map->dm_mapsize < 1520)
1538			flags |= MYXTXD_FLAGS_SMALL;
1539
1540		memset(&txd, 0, sizeof(txd));
1541		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1542		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1543		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1544		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1545		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1546		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1547
1548		myx_write_txd_tail(sc, mb, flags, offset, idx);
1549
1550		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1551		idx %= sc->sc_tx_ring_count;
1552	}
1553	sc->sc_tx_ring_idx = idx;
1554
1555	/* go back and post first mb */
1556	map = firstmb->mb_map;
1557
1558	flags = MYXTXD_FLAGS_NO_TSO;
1559	if (map->dm_mapsize < 1520)
1560		flags |= MYXTXD_FLAGS_SMALL;
1561
1562	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1563	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1564	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1565	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1566
1567	/* make sure the first descriptor is seen after the others */
1568	myx_write_txd_tail(sc, firstmb, flags, offset, firstidx);
1569
1570	myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1571	    offset + sizeof(txd) * firstidx, &txd,
1572	    sizeof(txd) - sizeof(myx_bus_t));
1573
1574	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1575	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1576
1577	myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1578	    offset + sizeof(txd) * (firstidx + 1) - sizeof(myx_bus_t),
1579	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1580	    sizeof(myx_bus_t));
1581
1582	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1583	    offset + sizeof(txd) * firstidx, sizeof(txd),
1584	    BUS_SPACE_BARRIER_WRITE);
1585}
1586
1587int
1588myx_load_buf(struct myx_softc *sc, struct myx_buf *mb, struct mbuf *m)
1589{
1590	bus_dma_tag_t			dmat = sc->sc_dmat;
1591	bus_dmamap_t			dmap = mb->mb_map;
1592
1593	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1594	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1595	case 0:
1596		break;
1597
1598	case EFBIG: /* mbuf chain is too fragmented */
1599		if (m_defrag(m, M_DONTWAIT) == 0 &&
1600		    bus_dmamap_load_mbuf(dmat, dmap, m,
1601		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1602			break;
1603	default:
1604		return (1);
1605	}
1606
1607	mb->mb_m = m;
1608	return (0);
1609}
1610
1611int
1612myx_intr(void *arg)
1613{
1614	struct myx_softc	*sc = (struct myx_softc *)arg;
1615	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1616	volatile struct myx_status *sts = sc->sc_sts;
1617	u_int32_t		 data, link;
1618	int			 refill = 0;
1619	u_int8_t		 valid = 0;
1620	u_int			 if_flags;
1621	int			 i;
1622
1623	if_flags = ifp->if_flags;
1624	if (!ISSET(if_flags, IFF_RUNNING))
1625		return (0);
1626
1627	myx_sts_enter(sc);
1628	valid = sts->ms_isvalid;
1629	if (valid == 0x0) {
1630		myx_sts_leave(sc);
1631		return (0);
1632	}
1633	sts->ms_isvalid = 0;
1634
1635	if (sc->sc_intx) {
1636		data = htobe32(0);
1637		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1638		    sc->sc_irqdeassertoff, &data, sizeof(data));
1639	}
1640
1641	if (!ISSET(if_flags, IFF_UP) &&
1642	    sc->sc_linkdown != sts->ms_linkdown) {
1643		/* myx_down is waiting for us */
1644		wakeup_one(sc->sc_sts);
1645	}
1646
1647	link = sts->ms_statusupdated ? sts->ms_linkstate : 0xffffffff;
1648
1649	do {
1650		data = betoh32(sts->ms_txdonecnt);
1651		myx_sts_leave(sc);
1652
1653		if (data != sc->sc_tx_count)
1654			myx_txeof(sc, data);
1655
1656		refill |= myx_rxeof(sc);
1657
1658		myx_sts_enter(sc);
1659	} while (sts->ms_isvalid);
1660	myx_sts_leave(sc);
1661
1662	if (link != 0xffffffff) {
1663		KERNEL_LOCK();
1664		myx_link_state(sc, link);
1665		KERNEL_UNLOCK();
1666	}
1667
1668	data = htobe32(3);
1669	if (valid & 0x1) {
1670		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1671		    sc->sc_irqclaimoff, &data, sizeof(data));
1672	}
1673	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1674	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1675	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1676	    sc->sc_irqclaimoff, sizeof(data) * 2, BUS_SPACE_BARRIER_WRITE);
1677
1678	if (ISSET(if_flags, IFF_OACTIVE)) {
1679		KERNEL_LOCK();
1680		CLR(ifp->if_flags, IFF_OACTIVE);
1681		myx_start(ifp);
1682		KERNEL_UNLOCK();
1683	}
1684
1685	for (i = 0; i < 2; i++) {
1686		if (ISSET(refill, 1 << i)) {
1687			if (myx_rx_fill(sc, i) >= 0 &&
1688			    myx_bufs_empty(&sc->sc_rx_buf_list[i]))
1689				timeout_add(&sc->sc_refill, 0);
1690		}
1691	}
1692
1693	return (1);
1694}
1695
1696void
1697myx_refill(void *xsc)
1698{
1699	struct myx_softc *sc = xsc;
1700	int i;
1701
1702	for (i = 0; i < 2; i++) {
1703		if (myx_rx_fill(sc, i) >= 0 &&
1704		    myx_bufs_empty(&sc->sc_rx_buf_list[i]))
1705			timeout_add(&sc->sc_refill, 1);
1706	}
1707}
1708
1709void
1710myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1711{
1712	struct ifnet *ifp = &sc->sc_ac.ac_if;
1713	struct myx_buf *mb;
1714	struct mbuf *m;
1715	bus_dmamap_t map;
1716
1717	do {
1718		mb = myx_buf_get(&sc->sc_tx_buf_list);
1719		if (mb == NULL) {
1720			printf("oh noes, no mb!\n");
1721			break;
1722		}
1723
1724		m = mb->mb_m;
1725		map = mb->mb_map;
1726
1727		sc->sc_tx_free += map->dm_nsegs;
1728		if (map->dm_mapsize < 60)
1729			sc->sc_tx_free += 1;
1730
1731		bus_dmamap_sync(sc->sc_dmat, map, 0,
1732		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1733
1734		KERNEL_LOCK();
1735		bus_dmamap_unload(sc->sc_dmat, map);
1736		m_freem(m);
1737		ifp->if_opackets++;
1738		KERNEL_UNLOCK();
1739
1740		myx_buf_put(&sc->sc_tx_buf_free, mb);
1741	} while (++sc->sc_tx_count != done_count);
1742}
1743
1744int
1745myx_rxeof(struct myx_softc *sc)
1746{
1747	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1748	struct ifnet *ifp = &sc->sc_ac.ac_if;
1749	struct myx_buf *mb;
1750	struct mbuf *m;
1751	int ring;
1752	int rings = 0;
1753	u_int len;
1754
1755	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1756	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1757
1758	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1759		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1760
1761		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1762			sc->sc_intrq_idx = 0;
1763
1764		ring = (len <= (MCLBYTES - ETHER_ALIGN)) ?
1765		    MYX_RXSMALL : MYX_RXBIG;
1766
1767		mb = myx_buf_get(&sc->sc_rx_buf_list[ring]);
1768		if (mb == NULL) {
1769			printf("oh noes, no mb!\n");
1770			break;
1771		}
1772
1773		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1774		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1775
1776		m = mb->mb_m;
1777		m->m_data += ETHER_ALIGN;
1778		m->m_pkthdr.rcvif = ifp;
1779		m->m_pkthdr.len = m->m_len = len;
1780
1781		KERNEL_LOCK();
1782		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1783#if NBPFILTER > 0
1784		if (ifp->if_bpf)
1785			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1786#endif
1787
1788		ether_input_mbuf(ifp, m);
1789		ifp->if_ipackets++;
1790		KERNEL_UNLOCK();
1791
1792		myx_buf_put(&sc->sc_rx_buf_free[ring], mb);
1793
1794		SET(rings, 1 << ring);
1795	}
1796
1797	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1798	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1799
1800	return (rings);
1801}
1802
1803void
1804myx_rx_zero(struct myx_softc *sc, int ring)
1805{
1806	struct myx_rx_desc rxd;
1807	u_int32_t offset = sc->sc_rx_ring_offset[ring];
1808	int idx;
1809
1810	sc->sc_rx_ring_idx[ring] = 0;
1811
1812	memset(&rxd, 0xff, sizeof(rxd));
1813	for (idx = 0; idx < sc->sc_rx_ring_count; idx++) {
1814		myx_write(sc, offset + idx * sizeof(rxd),
1815		    &rxd, sizeof(rxd));
1816	}
1817}
1818
1819int
1820myx_rx_fill(struct myx_softc *sc, int ring)
1821{
1822	struct myx_rx_desc rxd;
1823	struct myx_buf *mb, *firstmb;
1824	u_int32_t offset = sc->sc_rx_ring_offset[ring];
1825	u_int idx, firstidx;
1826	int rv = 1;
1827
1828	if (!myx_ring_enter(&sc->sc_rx_ring_lock[ring]))
1829		return (-1);
1830
1831	do {
1832		firstmb = myx_buf_fill(sc, ring);
1833		if (firstmb == NULL)
1834			continue;
1835
1836		rv = 0;
1837		myx_buf_put(&sc->sc_rx_buf_list[ring], firstmb);
1838
1839		firstidx = sc->sc_rx_ring_idx[ring];
1840		idx = firstidx + 1;
1841		idx %= sc->sc_rx_ring_count;
1842
1843		while ((mb = myx_buf_fill(sc, ring)) != NULL) {
1844			myx_buf_put(&sc->sc_rx_buf_list[ring], mb);
1845
1846			rxd.rx_addr = htobe64(mb->mb_map->dm_segs[0].ds_addr);
1847			myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1848			    offset + idx * sizeof(rxd), &rxd, sizeof(rxd));
1849
1850			idx++;
1851			idx %= sc->sc_rx_ring_count;
1852		}
1853
1854		/* make sure the first descriptor is seen after the others */
1855		if (idx != firstidx + 1) {
1856			bus_space_barrier(sc->sc_memt, sc->sc_memh,
1857			    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1858			    BUS_SPACE_BARRIER_WRITE);
1859		}
1860
1861		rxd.rx_addr = htobe64(firstmb->mb_map->dm_segs[0].ds_addr);
1862		myx_write(sc, offset + firstidx * sizeof(rxd),
1863		    &rxd, sizeof(rxd));
1864
1865		sc->sc_rx_ring_idx[ring] = idx;
1866	} while (!myx_ring_leave(&sc->sc_rx_ring_lock[ring]));
1867
1868	return (rv);
1869}
1870
1871struct myx_buf *
1872myx_buf_fill(struct myx_softc *sc, int ring)
1873{
1874	static size_t sizes[2] = { MCLBYTES, 12 * 1024 };
1875	struct myx_buf *mb;
1876	struct mbuf *m;
1877	int rv;
1878
1879	KERNEL_LOCK();
1880	m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_ac.ac_if, sizes[ring]);
1881	KERNEL_UNLOCK();
1882	if (m == NULL)
1883		return (NULL);
1884	m->m_len = m->m_pkthdr.len = sizes[ring];
1885
1886	mb = myx_buf_get(&sc->sc_rx_buf_free[ring]);
1887	if (mb == NULL)
1888		goto mfree;
1889
1890	KERNEL_LOCK();
1891	rv = bus_dmamap_load_mbuf(sc->sc_dmat, mb->mb_map, m, BUS_DMA_NOWAIT);
1892	KERNEL_UNLOCK();
1893	if (rv != 0)
1894		goto put;
1895
1896	mb->mb_m = m;
1897	bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, mb->mb_map->dm_mapsize,
1898	    BUS_DMASYNC_PREREAD);
1899
1900	return (mb);
1901
1902put:
1903	myx_buf_put(&sc->sc_rx_buf_free[ring], mb);
1904mfree:
1905	KERNEL_LOCK();
1906	m_freem(m);
1907	KERNEL_UNLOCK();
1908
1909	return (NULL);
1910}
1911
1912struct myx_buf *
1913myx_buf_alloc(struct myx_softc *sc, bus_size_t size, int nsegs,
1914    bus_size_t maxsegsz, bus_size_t boundary)
1915{
1916	struct myx_buf *mb;
1917
1918	mb = pool_get(myx_buf_pool, PR_WAITOK);
1919	if (mb == NULL)
1920		return (NULL);
1921
1922	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, boundary,
1923	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mb->mb_map) != 0) {
1924		pool_put(myx_buf_pool, mb);
1925		return (NULL);
1926	}
1927
1928	return (mb);
1929}
1930
1931void
1932myx_buf_free(struct myx_softc *sc, struct myx_buf *mb)
1933{
1934	bus_dmamap_destroy(sc->sc_dmat, mb->mb_map);
1935	pool_put(myx_buf_pool, mb);
1936}
1937
1938struct myx_buf *
1939myx_buf_get(struct myx_buf_list *mbl)
1940{
1941	struct myx_buf *mb;
1942
1943	mtx_enter(&mbl->mbl_mtx);
1944	mb = SIMPLEQ_FIRST(&mbl->mbl_q);
1945	if (mb != NULL)
1946		SIMPLEQ_REMOVE_HEAD(&mbl->mbl_q, mb_entry);
1947	mtx_leave(&mbl->mbl_mtx);
1948
1949	return (mb);
1950}
1951
1952int
1953myx_bufs_empty(struct myx_buf_list *mbl)
1954{
1955	int rv;
1956
1957	mtx_enter(&mbl->mbl_mtx);
1958	rv = SIMPLEQ_EMPTY(&mbl->mbl_q);
1959	mtx_leave(&mbl->mbl_mtx);
1960
1961	return (rv);
1962}
1963
1964void
1965myx_buf_put(struct myx_buf_list *mbl, struct myx_buf *mb)
1966{
1967	mtx_enter(&mbl->mbl_mtx);
1968	SIMPLEQ_INSERT_TAIL(&mbl->mbl_q, mb, mb_entry);
1969	mtx_leave(&mbl->mbl_mtx);
1970}
1971
1972void
1973myx_bufs_init(struct myx_buf_list *mbl)
1974{
1975	SIMPLEQ_INIT(&mbl->mbl_q);
1976	mtx_init(&mbl->mbl_mtx, IPL_NET);
1977}
1978
1979void
1980myx_ring_lock_init(struct myx_ring_lock *mrl)
1981{
1982	mtx_init(&mrl->mrl_mtx, IPL_NET);
1983	mrl->mrl_running = 0;
1984}
1985
1986int
1987myx_ring_enter(struct myx_ring_lock *mrl)
1988{
1989	int rv = 1;
1990
1991	mtx_enter(&mrl->mrl_mtx);
1992	if (++mrl->mrl_running > 1)
1993		rv = 0;
1994	mtx_leave(&mrl->mrl_mtx);
1995
1996	return (rv);
1997}
1998
1999int
2000myx_ring_leave(struct myx_ring_lock *mrl)
2001{
2002	int rv = 1;
2003
2004	mtx_enter(&mrl->mrl_mtx);
2005	if (--mrl->mrl_running > 0) {
2006		mrl->mrl_running = 1;
2007		rv = 0;
2008	}
2009	mtx_leave(&mrl->mrl_mtx);
2010
2011	return (rv);
2012}
2013