if_myx.c revision 1.56
1/*	$OpenBSD: if_myx.c,v 1.56 2014/02/10 05:21:41 dlg Exp $	*/
2
3/*
4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*
20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21 */
22
23#include "bpfilter.h"
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/kernel.h>
30#include <sys/socket.h>
31#include <sys/malloc.h>
32#include <sys/timeout.h>
33#include <sys/proc.h>
34#include <sys/device.h>
35#include <sys/queue.h>
36
37#include <machine/bus.h>
38#include <machine/intr.h>
39
40#include <net/if.h>
41#include <net/if_dl.h>
42#include <net/if_media.h>
43#include <net/if_types.h>
44
45#if NBPFILTER > 0
46#include <net/bpf.h>
47#endif
48
49#ifdef INET
50#include <netinet/in.h>
51#include <netinet/if_ether.h>
52#endif
53
54#include <dev/pci/pcireg.h>
55#include <dev/pci/pcivar.h>
56#include <dev/pci/pcidevs.h>
57
58#include <dev/pci/if_myxreg.h>
59
60#ifdef MYX_DEBUG
61#define MYXDBG_INIT	(1<<0)	/* chipset initialization */
62#define MYXDBG_CMD	(2<<0)	/* commands */
63#define MYXDBG_INTR	(3<<0)	/* interrupts */
64#define MYXDBG_ALL	0xffff	/* enable all debugging messages */
65int myx_debug = MYXDBG_ALL;
66#define DPRINTF(_lvl, _arg...)	do {					\
67	if (myx_debug & (_lvl))						\
68		printf(_arg);						\
69} while (0)
70#else
71#define DPRINTF(_lvl, arg...)
72#endif
73
74#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
75
76struct myx_dmamem {
77	bus_dmamap_t		 mxm_map;
78	bus_dma_segment_t	 mxm_seg;
79	int			 mxm_nsegs;
80	size_t			 mxm_size;
81	caddr_t			 mxm_kva;
82};
83
84struct myx_buf {
85	SIMPLEQ_ENTRY(myx_buf)	 mb_entry;
86	bus_dmamap_t		 mb_map;
87	struct mbuf		*mb_m;
88};
89
90struct myx_buf_list {
91	SIMPLEQ_HEAD(, myx_buf)	mbl_q;
92	struct mutex		mbl_mtx;
93};
94
95struct pool *myx_buf_pool;
96
97struct myx_ring_lock {
98	struct mutex		mrl_mtx;
99	u_int			mrl_running;
100};
101
102enum myx_state {
103	MYX_S_OFF = 0,
104	MYX_S_RUNNING,
105	MYX_S_DOWN
106};
107
108struct myx_softc {
109	struct device		 sc_dev;
110	struct arpcom		 sc_ac;
111
112	pci_chipset_tag_t	 sc_pc;
113	pci_intr_handle_t	 sc_ih;
114	pcitag_t		 sc_tag;
115
116	bus_dma_tag_t		 sc_dmat;
117	bus_space_tag_t		 sc_memt;
118	bus_space_handle_t	 sc_memh;
119	bus_size_t		 sc_mems;
120
121	struct myx_dmamem	 sc_zerodma;
122	struct myx_dmamem	 sc_cmddma;
123	struct myx_dmamem	 sc_paddma;
124
125	struct myx_dmamem	 sc_sts_dma;
126	volatile struct myx_status	*sc_sts;
127	struct mutex		 sc_sts_mtx;
128
129	int			 sc_intx;
130	void			*sc_irqh;
131	u_int32_t		 sc_irqcoaloff;
132	u_int32_t		 sc_irqclaimoff;
133	u_int32_t		 sc_irqdeassertoff;
134
135	struct myx_dmamem	 sc_intrq_dma;
136	struct myx_intrq_desc	*sc_intrq;
137	u_int			 sc_intrq_count;
138	u_int			 sc_intrq_idx;
139
140	u_int			 sc_rx_ring_count;
141	struct myx_ring_lock	 sc_rx_ring_lock[2];
142	u_int32_t		 sc_rx_ring_offset[2];
143	struct myx_buf_list	 sc_rx_buf_free[2];
144	struct myx_buf_list	 sc_rx_buf_list[2];
145	u_int			 sc_rx_ring_idx[2];
146#define  MYX_RXSMALL		 0
147#define  MYX_RXBIG		 1
148	struct timeout		 sc_refill;
149
150	bus_size_t		 sc_tx_boundary;
151	u_int			 sc_tx_ring_count;
152	struct myx_ring_lock	 sc_tx_ring_lock;
153	u_int32_t		 sc_tx_ring_offset;
154	u_int			 sc_tx_nsegs;
155	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
156	u_int			 sc_tx_free;
157	struct myx_buf_list	 sc_tx_buf_free;
158	struct myx_buf_list	 sc_tx_buf_list;
159	u_int			 sc_tx_ring_idx;
160
161	struct ifmedia		 sc_media;
162
163	volatile enum myx_state	 sc_state;
164	volatile u_int8_t	 sc_linkdown;
165};
166
167int	 myx_match(struct device *, void *, void *);
168void	 myx_attach(struct device *, struct device *, void *);
169int	 myx_query(struct myx_softc *sc, char *, size_t);
170u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
171void	 myx_attachhook(void *);
172int	 myx_loadfirmware(struct myx_softc *, const char *);
173int	 myx_probe_firmware(struct myx_softc *);
174
175void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
176void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
177
178#if defined(__LP64__)
179#define myx_bus_space_write bus_space_write_raw_region_8
180typedef u_int64_t myx_bus_t;
181#else
182#define myx_bus_space_write bus_space_write_raw_region_4
183typedef u_int32_t myx_bus_t;
184#endif
185
186int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
187int	 myx_boot(struct myx_softc *, u_int32_t);
188
189int	 myx_rdma(struct myx_softc *, u_int);
190int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
191	    bus_size_t, u_int align);
192void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
193int	 myx_media_change(struct ifnet *);
194void	 myx_media_status(struct ifnet *, struct ifmediareq *);
195void	 myx_link_state(struct myx_softc *, u_int32_t);
196void	 myx_watchdog(struct ifnet *);
197int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
198void	 myx_up(struct myx_softc *);
199void	 myx_iff(struct myx_softc *);
200void	 myx_down(struct myx_softc *);
201
202void	 myx_start(struct ifnet *);
203void	 myx_write_txd_tail(struct myx_softc *, struct myx_buf *, u_int8_t,
204	    u_int32_t, u_int);
205int	 myx_load_buf(struct myx_softc *, struct myx_buf *, struct mbuf *);
206int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
207int	 myx_intr(void *);
208int	 myx_rxeof(struct myx_softc *);
209void	 myx_txeof(struct myx_softc *, u_int32_t);
210
211struct myx_buf *	myx_buf_alloc(struct myx_softc *, bus_size_t, int,
212			    bus_size_t, bus_size_t);
213void			myx_buf_free(struct myx_softc *, struct myx_buf *);
214void			myx_bufs_init(struct myx_buf_list *);
215int			myx_bufs_empty(struct myx_buf_list *);
216struct myx_buf *	myx_buf_get(struct myx_buf_list *);
217void			myx_buf_put(struct myx_buf_list *, struct myx_buf *);
218struct myx_buf *	myx_buf_fill(struct myx_softc *, int);
219
220void			myx_rx_zero(struct myx_softc *, int);
221int			myx_rx_fill(struct myx_softc *, int);
222void			myx_refill(void *);
223
224void			myx_ring_lock_init(struct myx_ring_lock *);
225int			myx_ring_enter(struct myx_ring_lock *);
226int			myx_ring_leave(struct myx_ring_lock *);
227
228static inline void
229myx_sts_enter(struct myx_softc *sc)
230{
231	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
232
233        mtx_enter(&sc->sc_sts_mtx);
234        bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
235            BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
236}
237
238static inline void
239myx_sts_leave(struct myx_softc *sc)
240{
241	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
242
243        bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
244            BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
245        mtx_leave(&sc->sc_sts_mtx);
246}
247
248struct cfdriver myx_cd = {
249	NULL, "myx", DV_IFNET
250};
251struct cfattach myx_ca = {
252	sizeof(struct myx_softc), myx_match, myx_attach
253};
254
255const struct pci_matchid myx_devices[] = {
256	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
257	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
258};
259
260int
261myx_match(struct device *parent, void *match, void *aux)
262{
263	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
264}
265
266void
267myx_attach(struct device *parent, struct device *self, void *aux)
268{
269	struct myx_softc	*sc = (struct myx_softc *)self;
270	struct pci_attach_args	*pa = aux;
271	char			 part[32];
272	pcireg_t		 memtype;
273
274	sc->sc_pc = pa->pa_pc;
275	sc->sc_tag = pa->pa_tag;
276	sc->sc_dmat = pa->pa_dmat;
277
278	myx_ring_lock_init(&sc->sc_rx_ring_lock[MYX_RXSMALL]);
279	myx_bufs_init(&sc->sc_rx_buf_free[MYX_RXSMALL]);
280	myx_bufs_init(&sc->sc_rx_buf_list[MYX_RXSMALL]);
281	myx_ring_lock_init(&sc->sc_rx_ring_lock[MYX_RXBIG]);
282	myx_bufs_init(&sc->sc_rx_buf_free[MYX_RXBIG]);
283	myx_bufs_init(&sc->sc_rx_buf_list[MYX_RXBIG]);
284
285	myx_ring_lock_init(&sc->sc_tx_ring_lock);
286	myx_bufs_init(&sc->sc_tx_buf_free);
287	myx_bufs_init(&sc->sc_tx_buf_list);
288
289	timeout_set(&sc->sc_refill, myx_refill, sc);
290
291	mtx_init(&sc->sc_sts_mtx, IPL_NET);
292
293
294	/* Map the PCI memory space */
295	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
296	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
297	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
298		printf(": unable to map register memory\n");
299		return;
300	}
301
302	/* Get board details (mac/part) */
303	memset(part, 0, sizeof(part));
304	if (myx_query(sc, part, sizeof(part)) != 0)
305		goto unmap;
306
307	/* Map the interrupt */
308	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
309		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
310			printf(": unable to map interrupt\n");
311			goto unmap;
312		}
313		sc->sc_intx = 1;
314	}
315
316	printf(": %s, model %s, address %s\n",
317	    pci_intr_string(pa->pa_pc, sc->sc_ih),
318	    part[0] == '\0' ? "(unknown)" : part,
319	    ether_sprintf(sc->sc_ac.ac_enaddr));
320
321	/* this is sort of racy */
322	if (myx_buf_pool == NULL) {
323		myx_buf_pool = malloc(sizeof(*myx_buf_pool), M_DEVBUF,
324		    M_WAITOK);
325		if (myx_buf_pool == NULL) {
326			printf("%s: unable to allocate buf pool\n",
327			    DEVNAME(sc));
328			goto unmap;
329		}
330		pool_init(myx_buf_pool, sizeof(struct myx_buf),
331		    0, 0, 0, "myxbufs", &pool_allocator_nointr);
332	}
333
334	if (mountroothook_establish(myx_attachhook, sc) == NULL) {
335		printf("%s: unable to establish mountroot hook\n", DEVNAME(sc));
336		goto unmap;
337	}
338
339	return;
340
341 unmap:
342	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
343	sc->sc_mems = 0;
344}
345
346u_int
347myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
348{
349	u_int		i, j;
350	u_int8_t	digit;
351
352	memset(lladdr, 0, ETHER_ADDR_LEN);
353	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
354		if (mac[i] >= '0' && mac[i] <= '9')
355			digit = mac[i] - '0';
356		else if (mac[i] >= 'A' && mac[i] <= 'F')
357			digit = mac[i] - 'A' + 10;
358		else if (mac[i] >= 'a' && mac[i] <= 'f')
359			digit = mac[i] - 'a' + 10;
360		else
361			continue;
362		if ((j & 1) == 0)
363			digit <<= 4;
364		lladdr[j++/2] |= digit;
365	}
366
367	return (i);
368}
369
370int
371myx_query(struct myx_softc *sc, char *part, size_t partlen)
372{
373	struct myx_gen_hdr hdr;
374	u_int32_t	offset;
375	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
376	u_int		i, len, maxlen;
377
378	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
379	offset = betoh32(offset);
380	if (offset + sizeof(hdr) > sc->sc_mems) {
381		printf(": header is outside register window\n");
382		return (1);
383	}
384
385	myx_read(sc, offset, &hdr, sizeof(hdr));
386	offset = betoh32(hdr.fw_specs);
387	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
388
389	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
390
391	for (i = 0; i < len; i++) {
392		maxlen = len - i;
393		if (strings[i] == '\0')
394			break;
395		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
396			i += 4;
397			i += myx_ether_aton(&strings[i],
398			    sc->sc_ac.ac_enaddr, maxlen);
399		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
400			i += 3;
401			i += strlcpy(part, &strings[i], min(maxlen, partlen));
402		}
403		for (; i < len; i++) {
404			if (strings[i] == '\0')
405				break;
406		}
407	}
408
409	return (0);
410}
411
412int
413myx_loadfirmware(struct myx_softc *sc, const char *filename)
414{
415	struct myx_gen_hdr	hdr;
416	u_int8_t		*fw;
417	size_t			fwlen;
418	u_int32_t		offset;
419	u_int			i, ret = 1;
420
421	if (loadfirmware(filename, &fw, &fwlen) != 0) {
422		printf("%s: could not load firmware %s\n", DEVNAME(sc),
423		    filename);
424		return (1);
425	}
426	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
427		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
428		goto err;
429	}
430
431	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
432	offset = betoh32(offset);
433	if ((offset + sizeof(hdr)) > fwlen) {
434		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
435		goto err;
436	}
437
438	memcpy(&hdr, fw + offset, sizeof(hdr));
439	DPRINTF(MYXDBG_INIT, "%s: "
440	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
441	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
442	    betoh32(hdr.fw_type), hdr.fw_version);
443
444	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
445	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
446		printf("%s: invalid firmware type 0x%x version %s\n",
447		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
448		goto err;
449	}
450
451	/* Write the firmware to the card's SRAM */
452	for (i = 0; i < fwlen; i += 256)
453		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
454
455	if (myx_boot(sc, fwlen) != 0) {
456		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
457		goto err;
458	}
459
460	ret = 0;
461
462err:
463	free(fw, M_DEVBUF);
464	return (ret);
465}
466
467void
468myx_attachhook(void *arg)
469{
470	struct myx_softc	*sc = (struct myx_softc *)arg;
471	struct ifnet		*ifp = &sc->sc_ac.ac_if;
472	struct myx_cmd		 mc;
473
474	/* Allocate command DMA memory */
475	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
476	    MYXALIGN_CMD) != 0) {
477		printf("%s: failed to allocate command DMA memory\n",
478		    DEVNAME(sc));
479		return;
480	}
481
482	/* Try the firmware stored on disk */
483	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
484		/* error printed by myx_loadfirmware */
485		goto freecmd;
486	}
487
488	memset(&mc, 0, sizeof(mc));
489
490	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
491		printf("%s: failed to reset the device\n", DEVNAME(sc));
492		goto freecmd;
493	}
494
495	sc->sc_tx_boundary = 4096;
496
497	if (myx_probe_firmware(sc) != 0) {
498		printf("%s: error while selecting firmware\n", DEVNAME(sc));
499		goto freecmd;
500	}
501
502	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
503	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
504	if (sc->sc_irqh == NULL) {
505		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
506		goto freecmd;
507	}
508
509	ifp->if_softc = sc;
510	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
511	ifp->if_ioctl = myx_ioctl;
512	ifp->if_start = myx_start;
513	ifp->if_watchdog = myx_watchdog;
514	ifp->if_hardmtu = 9000;
515	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
516	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
517	IFQ_SET_READY(&ifp->if_snd);
518
519	ifp->if_capabilities = IFCAP_VLAN_MTU;
520#if 0
521	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
522	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
523	    IFCAP_CSUM_UDPv4;
524#endif
525
526	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
527	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
528	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
529
530	if_attach(ifp);
531	ether_ifattach(ifp);
532
533	return;
534
535freecmd:
536	myx_dmamem_free(sc, &sc->sc_cmddma);
537}
538
539int
540myx_probe_firmware(struct myx_softc *sc)
541{
542	struct myx_dmamem test;
543	bus_dmamap_t map;
544	struct myx_cmd mc;
545	pcireg_t csr;
546	int offset;
547	int width = 0;
548
549	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
550	    &offset, NULL)) {
551		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
552		    offset + PCI_PCIE_LCSR);
553		width = (csr >> 20) & 0x3f;
554
555		if (width <= 4) {
556			/*
557			 * if the link width is 4 or less we can use the
558			 * aligned firmware.
559			 */
560			return (0);
561		}
562	}
563
564	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
565		return (1);
566	map = test.mxm_map;
567
568	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
569	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
570
571	memset(&mc, 0, sizeof(mc));
572	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
573	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
574	mc.mc_data2 = htobe32(4096 * 0x10000);
575	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
576		printf("%s: DMA read test failed\n", DEVNAME(sc));
577		goto fail;
578	}
579
580	memset(&mc, 0, sizeof(mc));
581	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
582	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
583	mc.mc_data2 = htobe32(4096 * 0x1);
584	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
585		printf("%s: DMA write test failed\n", DEVNAME(sc));
586		goto fail;
587	}
588
589	memset(&mc, 0, sizeof(mc));
590	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
591	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
592	mc.mc_data2 = htobe32(4096 * 0x10001);
593	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
594		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
595		goto fail;
596	}
597
598	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
599	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
600	myx_dmamem_free(sc, &test);
601	return (0);
602
603fail:
604	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
605	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
606	myx_dmamem_free(sc, &test);
607
608	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
609		printf("%s: unable to load %s\n", DEVNAME(sc),
610		    MYXFW_UNALIGNED);
611		return (1);
612	}
613
614	sc->sc_tx_boundary = 2048;
615
616	printf("%s: using unaligned firmware\n", DEVNAME(sc));
617	return (0);
618}
619
620void
621myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
622{
623	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
624	    BUS_SPACE_BARRIER_READ);
625	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
626}
627
628void
629myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
630{
631	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
632	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
633	    BUS_SPACE_BARRIER_WRITE);
634}
635
636int
637myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
638    bus_size_t size, u_int align)
639{
640	mxm->mxm_size = size;
641
642	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
643	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
644	    &mxm->mxm_map) != 0)
645		return (1);
646	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
647	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
648	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
649		goto destroy;
650	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
651	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
652		goto free;
653	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
654	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
655		goto unmap;
656
657	return (0);
658 unmap:
659	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
660 free:
661	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
662 destroy:
663	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
664	return (1);
665}
666
667void
668myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
669{
670	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
671	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
672	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
673	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
674}
675
676int
677myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
678{
679	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
680	struct myx_response	*mr;
681	u_int			 i;
682	u_int32_t		 result, data;
683#ifdef MYX_DEBUG
684	static const char *cmds[MYXCMD_MAX] = {
685		"CMD_NONE",
686		"CMD_RESET",
687		"CMD_GET_VERSION",
688		"CMD_SET_INTRQDMA",
689		"CMD_SET_BIGBUFSZ",
690		"CMD_SET_SMALLBUFSZ",
691		"CMD_GET_TXRINGOFF",
692		"CMD_GET_RXSMALLRINGOFF",
693		"CMD_GET_RXBIGRINGOFF",
694		"CMD_GET_INTRACKOFF",
695		"CMD_GET_INTRDEASSERTOFF",
696		"CMD_GET_TXRINGSZ",
697		"CMD_GET_RXRINGSZ",
698		"CMD_SET_INTRQSZ",
699		"CMD_SET_IFUP",
700		"CMD_SET_IFDOWN",
701		"CMD_SET_MTU",
702		"CMD_GET_INTRCOALDELAYOFF",
703		"CMD_SET_STATSINTVL",
704		"CMD_SET_STATSDMA_OLD",
705		"CMD_SET_PROMISC",
706		"CMD_UNSET_PROMISC",
707		"CMD_SET_LLADDR",
708		"CMD_SET_FC",
709		"CMD_UNSET_FC",
710		"CMD_DMA_TEST",
711		"CMD_SET_ALLMULTI",
712		"CMD_UNSET_ALLMULTI",
713		"CMD_SET_MCASTGROUP",
714		"CMD_UNSET_MCASTGROUP",
715		"CMD_UNSET_MCAST",
716		"CMD_SET_STATSDMA",
717		"CMD_UNALIGNED_DMA_TEST",
718		"CMD_GET_UNALIGNED_STATUS"
719	};
720#endif
721
722	mc->mc_cmd = htobe32(cmd);
723	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
724	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
725
726	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
727	mr->mr_result = 0xffffffff;
728
729	/* Send command */
730	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
731	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
732	    BUS_DMASYNC_PREREAD);
733
734	for (i = 0; i < 20; i++) {
735		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
736		    BUS_DMASYNC_POSTREAD);
737		result = betoh32(mr->mr_result);
738		data = betoh32(mr->mr_data);
739
740		if (result != 0xffffffff)
741			break;
742
743		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
744		    BUS_DMASYNC_PREREAD);
745		delay(1000);
746	}
747
748	DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, "
749	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
750	    cmds[cmd], i, result, data, data);
751
752	if (result != 0)
753		return (-1);
754
755	if (r != NULL)
756		*r = data;
757	return (0);
758}
759
760int
761myx_boot(struct myx_softc *sc, u_int32_t length)
762{
763	struct myx_bootcmd	 bc;
764	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
765	u_int32_t		*status;
766	u_int			 i, ret = 1;
767
768	memset(&bc, 0, sizeof(bc));
769	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
770	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
771	bc.bc_result = 0xffffffff;
772	bc.bc_offset = htobe32(MYX_FW_BOOT);
773	bc.bc_length = htobe32(length - 8);
774	bc.bc_copyto = htobe32(8);
775	bc.bc_jumpto = htobe32(0);
776
777	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
778	*status = 0;
779
780	/* Send command */
781	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
782	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
783	    BUS_DMASYNC_PREREAD);
784
785	for (i = 0; i < 200; i++) {
786		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
787		    BUS_DMASYNC_POSTREAD);
788		if (*status == 0xffffffff) {
789			ret = 0;
790			break;
791		}
792
793		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
794		    BUS_DMASYNC_PREREAD);
795		delay(1000);
796	}
797
798	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
799	    DEVNAME(sc), i, ret);
800
801	return (ret);
802}
803
804int
805myx_rdma(struct myx_softc *sc, u_int do_enable)
806{
807	struct myx_rdmacmd	 rc;
808	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
809	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
810	u_int32_t		*status;
811	int			 ret = 1;
812	u_int			 i;
813
814	/*
815	 * It is required to setup a _dummy_ RDMA address. It also makes
816	 * some PCI-E chipsets resend dropped messages.
817	 */
818	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
819	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
820	rc.rc_result = 0xffffffff;
821	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
822	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
823	rc.rc_enable = htobe32(do_enable);
824
825	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
826	*status = 0;
827
828	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
829	    BUS_DMASYNC_PREREAD);
830
831	/* Send command */
832	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
833
834	for (i = 0; i < 20; i++) {
835		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
836		    BUS_DMASYNC_POSTREAD);
837
838		if (*status == 0xffffffff) {
839			ret = 0;
840			break;
841		}
842
843		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
844		    BUS_DMASYNC_PREREAD);
845		delay(1000);
846	}
847
848	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
849	    DEVNAME(sc), __func__,
850	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
851
852	return (ret);
853}
854
855int
856myx_media_change(struct ifnet *ifp)
857{
858	/* ignore */
859	return (0);
860}
861
862void
863myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
864{
865	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
866	u_int32_t		 sts;
867
868	imr->ifm_active = IFM_ETHER | IFM_AUTO;
869	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
870		imr->ifm_status = 0;
871		return;
872	}
873
874	myx_sts_enter(sc);
875	sts = sc->sc_sts->ms_linkstate;
876	myx_sts_leave(sc);
877
878	myx_link_state(sc, sts);
879
880	imr->ifm_status = IFM_AVALID;
881	if (!LINK_STATE_IS_UP(ifp->if_link_state))
882		return;
883
884	imr->ifm_active |= IFM_FDX | IFM_FLOW |
885	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
886	imr->ifm_status |= IFM_ACTIVE;
887}
888
889void
890myx_link_state(struct myx_softc *sc, u_int32_t sts)
891{
892	struct ifnet		*ifp = &sc->sc_ac.ac_if;
893	int			 link_state = LINK_STATE_DOWN;
894
895	if (betoh32(sts) == MYXSTS_LINKUP)
896		link_state = LINK_STATE_FULL_DUPLEX;
897	if (ifp->if_link_state != link_state) {
898		ifp->if_link_state = link_state;
899		if_link_state_change(ifp);
900		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
901		    IF_Gbps(10) : 0;
902	}
903}
904
905void
906myx_watchdog(struct ifnet *ifp)
907{
908	return;
909}
910
911int
912myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
913{
914	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
915	struct ifaddr		*ifa = (struct ifaddr *)data;
916	struct ifreq		*ifr = (struct ifreq *)data;
917	int			 s, error = 0;
918
919	s = splnet();
920
921	switch (cmd) {
922	case SIOCSIFADDR:
923		ifp->if_flags |= IFF_UP;
924#ifdef INET
925		if (ifa->ifa_addr->sa_family == AF_INET)
926			arp_ifinit(&sc->sc_ac, ifa);
927#endif
928		/* FALLTHROUGH */
929
930	case SIOCSIFFLAGS:
931		if (ISSET(ifp->if_flags, IFF_UP)) {
932			if (ISSET(ifp->if_flags, IFF_RUNNING))
933				error = ENETRESET;
934			else
935				myx_up(sc);
936		} else {
937			if (ISSET(ifp->if_flags, IFF_RUNNING))
938				myx_down(sc);
939		}
940		break;
941
942	case SIOCGIFMEDIA:
943	case SIOCSIFMEDIA:
944		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
945		break;
946
947	default:
948		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
949	}
950
951	if (error == ENETRESET) {
952		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
953		    (IFF_UP | IFF_RUNNING))
954			myx_iff(sc);
955		error = 0;
956	}
957
958	splx(s);
959	return (error);
960}
961
962void
963myx_up(struct myx_softc *sc)
964{
965	struct ifnet		*ifp = &sc->sc_ac.ac_if;
966	struct myx_buf		*mb;
967	struct myx_cmd		mc;
968	bus_dmamap_t		map;
969	size_t			size;
970	u_int			maxpkt;
971	u_int32_t		r;
972	int			i;
973
974	memset(&mc, 0, sizeof(mc));
975	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
976		printf("%s: failed to reset the device\n", DEVNAME(sc));
977		return;
978	}
979
980	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
981	    64, MYXALIGN_CMD) != 0) {
982		printf("%s: failed to allocate zero pad memory\n",
983		    DEVNAME(sc));
984		return;
985	}
986	memset(sc->sc_zerodma.mxm_kva, 0, 64);
987	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
988	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
989
990	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
991	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
992		printf("%s: failed to allocate pad DMA memory\n",
993		    DEVNAME(sc));
994		goto free_zero;
995	}
996	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
997	    sc->sc_paddma.mxm_map->dm_mapsize,
998	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
999
1000	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1001		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1002		goto free_pad;
1003	}
1004
1005	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1006		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1007		goto free_pad;
1008	}
1009	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1010
1011	m_clsetwms(ifp, MCLBYTES, 2, sc->sc_rx_ring_count - 2);
1012	m_clsetwms(ifp, 12 * 1024, 2, sc->sc_rx_ring_count - 2);
1013
1014	memset(&mc, 0, sizeof(mc));
1015	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1016		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1017		goto free_pad;
1018	}
1019	sc->sc_tx_ring_idx = 0;
1020	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1021	sc->sc_tx_free = sc->sc_tx_ring_count - 1;
1022	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1023	sc->sc_tx_count = 0;
1024	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1025	IFQ_SET_READY(&ifp->if_snd);
1026
1027	/* Allocate Interrupt Queue */
1028
1029	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1030	sc->sc_intrq_idx = 0;
1031
1032	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1033	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1034	    size, MYXALIGN_DATA) != 0) {
1035		goto free_pad;
1036	}
1037	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1038	map = sc->sc_intrq_dma.mxm_map;
1039	memset(sc->sc_intrq, 0, size);
1040	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1041	    BUS_DMASYNC_PREREAD);
1042
1043	memset(&mc, 0, sizeof(mc));
1044	mc.mc_data0 = htobe32(size);
1045	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1046		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1047		goto free_intrq;
1048	}
1049
1050	memset(&mc, 0, sizeof(mc));
1051	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1052	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1053	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1054		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1055		goto free_intrq;
1056	}
1057
1058	/*
1059	 * get interrupt offsets
1060	 */
1061
1062	memset(&mc, 0, sizeof(mc));
1063	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1064	    &sc->sc_irqclaimoff) != 0) {
1065		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1066		goto free_intrq;
1067	}
1068
1069	memset(&mc, 0, sizeof(mc));
1070	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1071	    &sc->sc_irqdeassertoff) != 0) {
1072		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1073		goto free_intrq;
1074	}
1075
1076	memset(&mc, 0, sizeof(mc));
1077	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1078	    &sc->sc_irqcoaloff) != 0) {
1079		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1080		goto free_intrq;
1081	}
1082
1083	/* Set an appropriate interrupt coalescing period */
1084	r = htobe32(MYX_IRQCOALDELAY);
1085	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1086
1087	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1088		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1089		goto free_intrq;
1090	}
1091
1092	memset(&mc, 0, sizeof(mc));
1093	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1094		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1095		goto free_intrq;
1096	}
1097
1098	memset(&mc, 0, sizeof(mc));
1099	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1100		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1101		goto free_intrq;
1102	}
1103
1104	memset(&mc, 0, sizeof(mc));
1105	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1106	    &sc->sc_tx_ring_offset) != 0) {
1107		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1108		goto free_intrq;
1109	}
1110
1111	memset(&mc, 0, sizeof(mc));
1112	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1113	    &sc->sc_rx_ring_offset[MYX_RXSMALL]) != 0) {
1114		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1115		goto free_intrq;
1116	}
1117
1118	memset(&mc, 0, sizeof(mc));
1119	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1120	    &sc->sc_rx_ring_offset[MYX_RXBIG]) != 0) {
1121		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1122		goto free_intrq;
1123	}
1124
1125	/* Allocate Interrupt Data */
1126	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1127	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1128		printf("%s: failed to allocate status DMA memory\n",
1129		    DEVNAME(sc));
1130		goto free_intrq;
1131	}
1132	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1133	map = sc->sc_sts_dma.mxm_map;
1134	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1135	    BUS_DMASYNC_PREREAD);
1136
1137	memset(&mc, 0, sizeof(mc));
1138	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1139	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1140	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1141	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1142		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1143		goto free_sts;
1144	}
1145
1146	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1147
1148	memset(&mc, 0, sizeof(mc));
1149	mc.mc_data0 = htobe32(maxpkt);
1150	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1151		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1152		goto free_sts;
1153	}
1154
1155	for (i = 0; i < sc->sc_tx_ring_count; i++) {
1156		mb = myx_buf_alloc(sc, maxpkt, sc->sc_tx_nsegs,
1157		    sc->sc_tx_boundary, sc->sc_tx_boundary);
1158		if (mb == NULL)
1159			goto free_tx_bufs;
1160
1161		myx_buf_put(&sc->sc_tx_buf_free, mb);
1162	}
1163
1164	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1165		mb = myx_buf_alloc(sc, MCLBYTES, 1, 4096, 4096);
1166		if (mb == NULL)
1167			goto free_rxsmall_bufs;
1168
1169		myx_buf_put(&sc->sc_rx_buf_free[MYX_RXSMALL], mb);
1170	}
1171
1172	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1173		mb = myx_buf_alloc(sc, 12 * 1024, 1, 12 * 1024, 0);
1174		if (mb == NULL)
1175			goto free_rxbig_bufs;
1176
1177		myx_buf_put(&sc->sc_rx_buf_free[MYX_RXBIG], mb);
1178	}
1179
1180	myx_rx_zero(sc, MYX_RXSMALL);
1181	if (myx_rx_fill(sc, MYX_RXSMALL) != 0) {
1182		printf("%s: failed to fill small rx ring\n", DEVNAME(sc));
1183		goto free_rxbig_bufs;
1184	}
1185
1186	myx_rx_zero(sc, MYX_RXBIG);
1187	if (myx_rx_fill(sc, MYX_RXBIG) != 0) {
1188		printf("%s: failed to fill big rx ring\n", DEVNAME(sc));
1189		goto free_rxsmall;
1190	}
1191
1192	memset(&mc, 0, sizeof(mc));
1193	mc.mc_data0 = htobe32(MCLBYTES - ETHER_ALIGN);
1194	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1195		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1196		goto free_rxbig;
1197	}
1198
1199	memset(&mc, 0, sizeof(mc));
1200	mc.mc_data0 = htobe32(16384);
1201	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1202		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1203		goto free_rxbig;
1204	}
1205
1206	mtx_enter(&sc->sc_sts_mtx);
1207	sc->sc_state = MYX_S_RUNNING;
1208	mtx_leave(&sc->sc_sts_mtx);
1209
1210	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1211		printf("%s: failed to start the device\n", DEVNAME(sc));
1212		goto free_rxbig;
1213	}
1214
1215	CLR(ifp->if_flags, IFF_OACTIVE);
1216	SET(ifp->if_flags, IFF_RUNNING);
1217	myx_iff(sc);
1218	myx_start(ifp);
1219
1220	return;
1221
1222free_rxbig:
1223	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) {
1224		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1225		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1226		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1227		m_freem(mb->mb_m);
1228		myx_buf_free(sc, mb);
1229	}
1230free_rxsmall:
1231	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) {
1232		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1233		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1234		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1235		m_freem(mb->mb_m);
1236		myx_buf_free(sc, mb);
1237	}
1238free_rxbig_bufs:
1239	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL)
1240		myx_buf_free(sc, mb);
1241free_rxsmall_bufs:
1242	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL)
1243		myx_buf_free(sc, mb);
1244free_tx_bufs:
1245	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1246		myx_buf_free(sc, mb);
1247free_sts:
1248	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1249	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1250	myx_dmamem_free(sc, &sc->sc_sts_dma);
1251free_intrq:
1252	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1253	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1254	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1255free_pad:
1256	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1257	    sc->sc_paddma.mxm_map->dm_mapsize,
1258	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1259	myx_dmamem_free(sc, &sc->sc_paddma);
1260
1261	memset(&mc, 0, sizeof(mc));
1262	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1263		printf("%s: failed to reset the device\n", DEVNAME(sc));
1264	}
1265free_zero:
1266	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1267	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1268	myx_dmamem_free(sc, &sc->sc_zerodma);
1269}
1270
1271int
1272myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1273{
1274	struct myx_cmd		 mc;
1275
1276	memset(&mc, 0, sizeof(mc));
1277	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1278	    addr[2] << 8 | addr[3]);
1279	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1280
1281	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1282		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1283		return (-1);
1284	}
1285	return (0);
1286}
1287
1288void
1289myx_iff(struct myx_softc *sc)
1290{
1291	struct myx_cmd		mc;
1292	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1293	struct ether_multi	*enm;
1294	struct ether_multistep	step;
1295	u_int8_t *addr;
1296
1297	CLR(ifp->if_flags, IFF_ALLMULTI);
1298
1299	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1300	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1301		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1302		return;
1303	}
1304
1305	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1306		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1307		return;
1308	}
1309
1310	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1311		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1312		return;
1313	}
1314
1315	if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1316	    sc->sc_ac.ac_multirangecnt > 0) {
1317		SET(ifp->if_flags, IFF_ALLMULTI);
1318		return;
1319	}
1320
1321	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1322	while (enm != NULL) {
1323		addr = enm->enm_addrlo;
1324
1325		memset(&mc, 0, sizeof(mc));
1326		mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1327		    addr[2] << 8 | addr[3]);
1328		mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1329		if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1330			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1331			return;
1332		}
1333
1334		ETHER_NEXT_MULTI(step, enm);
1335	}
1336
1337	memset(&mc, 0, sizeof(mc));
1338	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1339		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1340		return;
1341	}
1342}
1343
1344void
1345myx_down(struct myx_softc *sc)
1346{
1347	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1348	volatile struct myx_status *sts = sc->sc_sts;
1349	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1350	struct myx_buf		*mb;
1351	struct myx_cmd		 mc;
1352	int			 s;
1353
1354	myx_sts_enter(sc);
1355	sc->sc_linkdown = sts->ms_linkdown;
1356	sc->sc_state = MYX_S_DOWN;
1357
1358	memset(&mc, 0, sizeof(mc));
1359	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1360
1361	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1362	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1363	while (sc->sc_state != MYX_S_OFF)
1364		msleep(sts, &sc->sc_sts_mtx, 0, "myxdown", 0);
1365	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1366	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1367	mtx_leave(&sc->sc_sts_mtx);
1368
1369	timeout_del(&sc->sc_refill);
1370
1371	s = splnet();
1372	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1373		ifp->if_link_state = LINK_STATE_UNKNOWN;
1374		ifp->if_baudrate = 0;
1375		if_link_state_change(ifp);
1376	}
1377	splx(s);
1378
1379	memset(&mc, 0, sizeof(mc));
1380	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1381		printf("%s: failed to reset the device\n", DEVNAME(sc));
1382	}
1383
1384	CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1385
1386	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) {
1387		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1388		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1389		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1390		m_freem(mb->mb_m);
1391		myx_buf_free(sc, mb);
1392	}
1393
1394	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) {
1395		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1396		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1397		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1398		m_freem(mb->mb_m);
1399		myx_buf_free(sc, mb);
1400	}
1401
1402	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL)
1403		myx_buf_free(sc, mb);
1404
1405	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL)
1406		myx_buf_free(sc, mb);
1407
1408	while ((mb = myx_buf_get(&sc->sc_tx_buf_list)) != NULL) {
1409		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1410		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1411		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1412		m_freem(mb->mb_m);
1413		myx_buf_free(sc, mb);
1414	}
1415
1416	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1417		myx_buf_free(sc, mb);
1418
1419	/* the sleep shizz above already synced this dmamem */
1420	myx_dmamem_free(sc, &sc->sc_sts_dma);
1421
1422	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1423	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1424	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1425
1426	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1427	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1428	myx_dmamem_free(sc, &sc->sc_paddma);
1429
1430	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1431	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1432	myx_dmamem_free(sc, &sc->sc_zerodma);
1433}
1434
1435void
1436myx_write_txd_tail(struct myx_softc *sc, struct myx_buf *mb, u_int8_t flags,
1437    u_int32_t offset, u_int idx)
1438{
1439	struct myx_tx_desc		txd;
1440	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1441	bus_dmamap_t			map = mb->mb_map;
1442	int				i;
1443
1444	for (i = 1; i < map->dm_nsegs; i++) {
1445		memset(&txd, 0, sizeof(txd));
1446		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1447		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1448		txd.tx_flags = flags;
1449
1450		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1451		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1452		    &txd, sizeof(txd));
1453	}
1454
1455	/* pad runt frames */
1456	if (map->dm_mapsize < 60) {
1457		memset(&txd, 0, sizeof(txd));
1458		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1459		txd.tx_length = htobe16(60 - map->dm_mapsize);
1460		txd.tx_flags = flags;
1461
1462		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1463		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1464		    &txd, sizeof(txd));
1465	}
1466}
1467
1468void
1469myx_start(struct ifnet *ifp)
1470{
1471	struct myx_tx_desc		txd;
1472	SIMPLEQ_HEAD(, myx_buf)		list = SIMPLEQ_HEAD_INITIALIZER(list);
1473	struct myx_softc		*sc = ifp->if_softc;
1474	bus_dmamap_t			map;
1475	struct myx_buf			*mb, *firstmb;
1476	struct mbuf			*m;
1477	u_int32_t			offset = sc->sc_tx_ring_offset;
1478	u_int				idx, firstidx;
1479	u_int8_t			flags;
1480
1481	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
1482	    ISSET(ifp->if_flags, IFF_OACTIVE) ||
1483	    IFQ_IS_EMPTY(&ifp->if_snd))
1484		return;
1485
1486	for (;;) {
1487		if (sc->sc_tx_free <= sc->sc_tx_nsegs) {
1488			SET(ifp->if_flags, IFF_OACTIVE);
1489			break;
1490		}
1491
1492		IFQ_POLL(&ifp->if_snd, m);
1493		if (m == NULL)
1494			break;
1495
1496		mb = myx_buf_get(&sc->sc_tx_buf_free);
1497		if (mb == NULL) {
1498			SET(ifp->if_flags, IFF_OACTIVE);
1499			break;
1500		}
1501
1502		IFQ_DEQUEUE(&ifp->if_snd, m);
1503		if (myx_load_buf(sc, mb, m) != 0) {
1504			m_freem(m);
1505			myx_buf_put(&sc->sc_tx_buf_free, mb);
1506			ifp->if_oerrors++;
1507			break;
1508		}
1509
1510#if NBPFILTER > 0
1511		if (ifp->if_bpf)
1512			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1513#endif
1514
1515		mb->mb_m = m;
1516
1517		map = mb->mb_map;
1518		bus_dmamap_sync(sc->sc_dmat, map, 0,
1519		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1520
1521		SIMPLEQ_INSERT_TAIL(&list, mb, mb_entry);
1522
1523		sc->sc_tx_free -= map->dm_nsegs +
1524		    (map->dm_mapsize < 60 ? 1 : 0);
1525	}
1526
1527	/* post the first descriptor last */
1528	firstmb = SIMPLEQ_FIRST(&list);
1529	if (firstmb == NULL)
1530		return;
1531
1532	SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1533	myx_buf_put(&sc->sc_tx_buf_list, firstmb);
1534
1535	idx = firstidx = sc->sc_tx_ring_idx;
1536	idx += firstmb->mb_map->dm_nsegs +
1537	    (firstmb->mb_map->dm_mapsize < 60 ? 1 : 0);
1538	idx %= sc->sc_tx_ring_count;
1539
1540	while ((mb = SIMPLEQ_FIRST(&list)) != NULL) {
1541		SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1542		myx_buf_put(&sc->sc_tx_buf_list, mb);
1543
1544		map = mb->mb_map;
1545
1546		flags = MYXTXD_FLAGS_NO_TSO;
1547		if (map->dm_mapsize < 1520)
1548			flags |= MYXTXD_FLAGS_SMALL;
1549
1550		memset(&txd, 0, sizeof(txd));
1551		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1552		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1553		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1554		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1555		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1556		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1557
1558		myx_write_txd_tail(sc, mb, flags, offset, idx);
1559
1560		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1561		idx %= sc->sc_tx_ring_count;
1562	}
1563	sc->sc_tx_ring_idx = idx;
1564
1565	/* go back and post first mb */
1566	map = firstmb->mb_map;
1567
1568	flags = MYXTXD_FLAGS_NO_TSO;
1569	if (map->dm_mapsize < 1520)
1570		flags |= MYXTXD_FLAGS_SMALL;
1571
1572	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1573	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1574	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1575	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1576
1577	/* make sure the first descriptor is seen after the others */
1578	myx_write_txd_tail(sc, firstmb, flags, offset, firstidx);
1579
1580	myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1581	    offset + sizeof(txd) * firstidx, &txd,
1582	    sizeof(txd) - sizeof(myx_bus_t));
1583
1584	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1585	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1586
1587	myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1588	    offset + sizeof(txd) * (firstidx + 1) - sizeof(myx_bus_t),
1589	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1590	    sizeof(myx_bus_t));
1591
1592	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1593	    offset + sizeof(txd) * firstidx, sizeof(txd),
1594	    BUS_SPACE_BARRIER_WRITE);
1595}
1596
1597int
1598myx_load_buf(struct myx_softc *sc, struct myx_buf *mb, struct mbuf *m)
1599{
1600	bus_dma_tag_t			dmat = sc->sc_dmat;
1601	bus_dmamap_t			dmap = mb->mb_map;
1602
1603	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1604	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1605	case 0:
1606		break;
1607
1608	case EFBIG: /* mbuf chain is too fragmented */
1609		if (m_defrag(m, M_DONTWAIT) == 0 &&
1610		    bus_dmamap_load_mbuf(dmat, dmap, m,
1611		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1612			break;
1613	default:
1614		return (1);
1615	}
1616
1617	mb->mb_m = m;
1618	return (0);
1619}
1620
1621int
1622myx_intr(void *arg)
1623{
1624	struct myx_softc	*sc = (struct myx_softc *)arg;
1625	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1626	volatile struct myx_status *sts = sc->sc_sts;
1627	enum myx_state		 state = MYX_S_RUNNING;
1628	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1629	u_int32_t		 data, link = 0xffffffff;
1630	int			 refill = 0;
1631	u_int8_t		 valid = 0;
1632	int			 i;
1633
1634	mtx_enter(&sc->sc_sts_mtx);
1635	if (sc->sc_state == MYX_S_OFF) {
1636		mtx_leave(&sc->sc_sts_mtx);
1637		return (0);
1638	}
1639
1640	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1641	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1642
1643	valid = sts->ms_isvalid;
1644	if (valid == 0x0) {
1645		myx_sts_leave(sc);
1646		return (0);
1647	}
1648
1649	if (sc->sc_intx) {
1650		data = htobe32(0);
1651		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1652		    sc->sc_irqdeassertoff, &data, sizeof(data));
1653	}
1654	sts->ms_isvalid = 0;
1655
1656	do {
1657		data = sts->ms_txdonecnt;
1658
1659		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1660		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1661		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1662	} while (sts->ms_isvalid);
1663
1664	if (sts->ms_statusupdated) {
1665		link = sts->ms_linkstate;
1666
1667		if (sc->sc_state == MYX_S_DOWN &&
1668		    sc->sc_linkdown != sts->ms_linkdown)
1669			state = MYX_S_DOWN;
1670	}
1671	myx_sts_leave(sc);
1672
1673	data = betoh32(data);
1674	if (data != sc->sc_tx_count)
1675		myx_txeof(sc, data);
1676
1677	data = htobe32(3);
1678	if (valid & 0x1) {
1679		refill |= myx_rxeof(sc);
1680
1681		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1682		    sc->sc_irqclaimoff, &data, sizeof(data));
1683	}
1684	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1685	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1686	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1687	    sc->sc_irqclaimoff, sizeof(data) * 2, BUS_SPACE_BARRIER_WRITE);
1688
1689	if (state == MYX_S_DOWN) {
1690		/* myx_down is waiting for us */
1691		mtx_enter(&sc->sc_sts_mtx);
1692		sc->sc_state = MYX_S_OFF;
1693		wakeup(sts);
1694		mtx_leave(&sc->sc_sts_mtx);
1695
1696		return (1);
1697	}
1698
1699	KERNEL_LOCK();
1700	if (link != 0xffffffff)
1701		myx_link_state(sc, link);
1702
1703	if (ISSET(ifp->if_flags, IFF_OACTIVE)) {
1704		CLR(ifp->if_flags, IFF_OACTIVE);
1705		myx_start(ifp);
1706	}
1707	KERNEL_UNLOCK();
1708
1709	for (i = 0; i < 2; i++) {
1710		if (ISSET(refill, 1 << i)) {
1711			if (myx_rx_fill(sc, i) >= 0 &&
1712			    myx_bufs_empty(&sc->sc_rx_buf_list[i]))
1713				timeout_add(&sc->sc_refill, 0);
1714		}
1715	}
1716
1717	return (1);
1718}
1719
1720void
1721myx_refill(void *xsc)
1722{
1723	struct myx_softc *sc = xsc;
1724	int i;
1725
1726	for (i = 0; i < 2; i++) {
1727		if (myx_rx_fill(sc, i) >= 0 &&
1728		    myx_bufs_empty(&sc->sc_rx_buf_list[i]))
1729			timeout_add(&sc->sc_refill, 1);
1730	}
1731}
1732
1733void
1734myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1735{
1736	struct ifnet *ifp = &sc->sc_ac.ac_if;
1737	struct myx_buf *mb;
1738	struct mbuf *m;
1739	bus_dmamap_t map;
1740	u_int free = 0;
1741
1742	do {
1743		mb = myx_buf_get(&sc->sc_tx_buf_list);
1744		if (mb == NULL) {
1745			printf("oh noes, no mb!\n");
1746			break;
1747		}
1748
1749		m = mb->mb_m;
1750		map = mb->mb_map;
1751
1752		free += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1753
1754		bus_dmamap_sync(sc->sc_dmat, map, 0,
1755		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1756
1757		KERNEL_LOCK();
1758		bus_dmamap_unload(sc->sc_dmat, map);
1759		m_freem(m);
1760		ifp->if_opackets++;
1761		KERNEL_UNLOCK();
1762
1763		myx_buf_put(&sc->sc_tx_buf_free, mb);
1764	} while (++sc->sc_tx_count != done_count);
1765
1766	if (free) {
1767		KERNEL_LOCK();
1768		sc->sc_tx_free += free;
1769		KERNEL_UNLOCK();
1770	}
1771}
1772
1773int
1774myx_rxeof(struct myx_softc *sc)
1775{
1776	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1777	struct ifnet *ifp = &sc->sc_ac.ac_if;
1778	struct myx_buf *mb;
1779	struct mbuf *m;
1780	int ring;
1781	int rings = 0;
1782	u_int len;
1783
1784	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1785	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1786
1787	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1788		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1789
1790		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1791			sc->sc_intrq_idx = 0;
1792
1793		ring = (len <= (MCLBYTES - ETHER_ALIGN)) ?
1794		    MYX_RXSMALL : MYX_RXBIG;
1795
1796		mb = myx_buf_get(&sc->sc_rx_buf_list[ring]);
1797		if (mb == NULL) {
1798			printf("oh noes, no mb!\n");
1799			break;
1800		}
1801
1802		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1803		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1804
1805		m = mb->mb_m;
1806		m->m_data += ETHER_ALIGN;
1807		m->m_pkthdr.rcvif = ifp;
1808		m->m_pkthdr.len = m->m_len = len;
1809
1810		KERNEL_LOCK();
1811		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1812#if NBPFILTER > 0
1813		if (ifp->if_bpf)
1814			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1815#endif
1816
1817		ether_input_mbuf(ifp, m);
1818		ifp->if_ipackets++;
1819		KERNEL_UNLOCK();
1820
1821		myx_buf_put(&sc->sc_rx_buf_free[ring], mb);
1822
1823		SET(rings, 1 << ring);
1824	}
1825
1826	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1827	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1828
1829	return (rings);
1830}
1831
1832void
1833myx_rx_zero(struct myx_softc *sc, int ring)
1834{
1835	struct myx_rx_desc rxd;
1836	u_int32_t offset = sc->sc_rx_ring_offset[ring];
1837	int idx;
1838
1839	sc->sc_rx_ring_idx[ring] = 0;
1840
1841	memset(&rxd, 0xff, sizeof(rxd));
1842	for (idx = 0; idx < sc->sc_rx_ring_count; idx++) {
1843		myx_write(sc, offset + idx * sizeof(rxd),
1844		    &rxd, sizeof(rxd));
1845	}
1846}
1847
1848int
1849myx_rx_fill(struct myx_softc *sc, int ring)
1850{
1851	struct myx_rx_desc rxd;
1852	struct myx_buf *mb, *firstmb;
1853	u_int32_t offset = sc->sc_rx_ring_offset[ring];
1854	u_int idx, firstidx;
1855	int rv = 1;
1856
1857	if (!myx_ring_enter(&sc->sc_rx_ring_lock[ring]))
1858		return (-1);
1859
1860	do {
1861		firstmb = myx_buf_fill(sc, ring);
1862		if (firstmb == NULL)
1863			continue;
1864
1865		rv = 0;
1866		myx_buf_put(&sc->sc_rx_buf_list[ring], firstmb);
1867
1868		firstidx = sc->sc_rx_ring_idx[ring];
1869		idx = firstidx + 1;
1870		idx %= sc->sc_rx_ring_count;
1871
1872		while ((mb = myx_buf_fill(sc, ring)) != NULL) {
1873			myx_buf_put(&sc->sc_rx_buf_list[ring], mb);
1874
1875			rxd.rx_addr = htobe64(mb->mb_map->dm_segs[0].ds_addr);
1876			myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1877			    offset + idx * sizeof(rxd), &rxd, sizeof(rxd));
1878
1879			idx++;
1880			idx %= sc->sc_rx_ring_count;
1881		}
1882
1883		/* make sure the first descriptor is seen after the others */
1884		if (idx != firstidx + 1) {
1885			bus_space_barrier(sc->sc_memt, sc->sc_memh,
1886			    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1887			    BUS_SPACE_BARRIER_WRITE);
1888		}
1889
1890		rxd.rx_addr = htobe64(firstmb->mb_map->dm_segs[0].ds_addr);
1891		myx_write(sc, offset + firstidx * sizeof(rxd),
1892		    &rxd, sizeof(rxd));
1893
1894		sc->sc_rx_ring_idx[ring] = idx;
1895	} while (!myx_ring_leave(&sc->sc_rx_ring_lock[ring]));
1896
1897	return (rv);
1898}
1899
1900struct myx_buf *
1901myx_buf_fill(struct myx_softc *sc, int ring)
1902{
1903	static size_t sizes[2] = { MCLBYTES, 12 * 1024 };
1904	struct myx_buf *mb;
1905	struct mbuf *m;
1906	int rv;
1907
1908	KERNEL_LOCK();
1909	m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_ac.ac_if, sizes[ring]);
1910	KERNEL_UNLOCK();
1911	if (m == NULL)
1912		return (NULL);
1913	m->m_len = m->m_pkthdr.len = sizes[ring];
1914
1915	mb = myx_buf_get(&sc->sc_rx_buf_free[ring]);
1916	if (mb == NULL)
1917		goto mfree;
1918
1919	KERNEL_LOCK();
1920	rv = bus_dmamap_load_mbuf(sc->sc_dmat, mb->mb_map, m, BUS_DMA_NOWAIT);
1921	KERNEL_UNLOCK();
1922	if (rv != 0)
1923		goto put;
1924
1925	mb->mb_m = m;
1926	bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, mb->mb_map->dm_mapsize,
1927	    BUS_DMASYNC_PREREAD);
1928
1929	return (mb);
1930
1931put:
1932	myx_buf_put(&sc->sc_rx_buf_free[ring], mb);
1933mfree:
1934	KERNEL_LOCK();
1935	m_freem(m);
1936	KERNEL_UNLOCK();
1937
1938	return (NULL);
1939}
1940
1941struct myx_buf *
1942myx_buf_alloc(struct myx_softc *sc, bus_size_t size, int nsegs,
1943    bus_size_t maxsegsz, bus_size_t boundary)
1944{
1945	struct myx_buf *mb;
1946
1947	mb = pool_get(myx_buf_pool, PR_WAITOK);
1948	if (mb == NULL)
1949		return (NULL);
1950
1951	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, boundary,
1952	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mb->mb_map) != 0) {
1953		pool_put(myx_buf_pool, mb);
1954		return (NULL);
1955	}
1956
1957	return (mb);
1958}
1959
1960void
1961myx_buf_free(struct myx_softc *sc, struct myx_buf *mb)
1962{
1963	bus_dmamap_destroy(sc->sc_dmat, mb->mb_map);
1964	pool_put(myx_buf_pool, mb);
1965}
1966
1967struct myx_buf *
1968myx_buf_get(struct myx_buf_list *mbl)
1969{
1970	struct myx_buf *mb;
1971
1972	mtx_enter(&mbl->mbl_mtx);
1973	mb = SIMPLEQ_FIRST(&mbl->mbl_q);
1974	if (mb != NULL)
1975		SIMPLEQ_REMOVE_HEAD(&mbl->mbl_q, mb_entry);
1976	mtx_leave(&mbl->mbl_mtx);
1977
1978	return (mb);
1979}
1980
1981int
1982myx_bufs_empty(struct myx_buf_list *mbl)
1983{
1984	int rv;
1985
1986	mtx_enter(&mbl->mbl_mtx);
1987	rv = SIMPLEQ_EMPTY(&mbl->mbl_q);
1988	mtx_leave(&mbl->mbl_mtx);
1989
1990	return (rv);
1991}
1992
1993void
1994myx_buf_put(struct myx_buf_list *mbl, struct myx_buf *mb)
1995{
1996	mtx_enter(&mbl->mbl_mtx);
1997	SIMPLEQ_INSERT_TAIL(&mbl->mbl_q, mb, mb_entry);
1998	mtx_leave(&mbl->mbl_mtx);
1999}
2000
2001void
2002myx_bufs_init(struct myx_buf_list *mbl)
2003{
2004	SIMPLEQ_INIT(&mbl->mbl_q);
2005	mtx_init(&mbl->mbl_mtx, IPL_NET);
2006}
2007
2008void
2009myx_ring_lock_init(struct myx_ring_lock *mrl)
2010{
2011	mtx_init(&mrl->mrl_mtx, IPL_NET);
2012	mrl->mrl_running = 0;
2013}
2014
2015int
2016myx_ring_enter(struct myx_ring_lock *mrl)
2017{
2018	int rv = 1;
2019
2020	mtx_enter(&mrl->mrl_mtx);
2021	if (++mrl->mrl_running > 1)
2022		rv = 0;
2023	mtx_leave(&mrl->mrl_mtx);
2024
2025	return (rv);
2026}
2027
2028int
2029myx_ring_leave(struct myx_ring_lock *mrl)
2030{
2031	int rv = 1;
2032
2033	mtx_enter(&mrl->mrl_mtx);
2034	if (--mrl->mrl_running > 0) {
2035		mrl->mrl_running = 1;
2036		rv = 0;
2037	}
2038	mtx_leave(&mrl->mrl_mtx);
2039
2040	return (rv);
2041}
2042